##// END OF EJS Templates
feat(archive-cache): objectstore now makes bucket required, and use shards as folders inside it
super-admin -
r5447:ba9c215d default
parent child Browse files
Show More
@@ -1,850 +1,849 b''
1
1
2 ; #########################################
2 ; #########################################
3 ; RHODECODE COMMUNITY EDITION CONFIGURATION
3 ; RHODECODE COMMUNITY EDITION CONFIGURATION
4 ; #########################################
4 ; #########################################
5
5
6 [DEFAULT]
6 [DEFAULT]
7 ; Debug flag sets all loggers to debug, and enables request tracking
7 ; Debug flag sets all loggers to debug, and enables request tracking
8 debug = true
8 debug = true
9
9
10 ; ########################################################################
10 ; ########################################################################
11 ; EMAIL CONFIGURATION
11 ; EMAIL CONFIGURATION
12 ; These settings will be used by the RhodeCode mailing system
12 ; These settings will be used by the RhodeCode mailing system
13 ; ########################################################################
13 ; ########################################################################
14
14
15 ; prefix all emails subjects with given prefix, helps filtering out emails
15 ; prefix all emails subjects with given prefix, helps filtering out emails
16 #email_prefix = [RhodeCode]
16 #email_prefix = [RhodeCode]
17
17
18 ; email FROM address all mails will be sent
18 ; email FROM address all mails will be sent
19 #app_email_from = rhodecode-noreply@localhost
19 #app_email_from = rhodecode-noreply@localhost
20
20
21 #smtp_server = mail.server.com
21 #smtp_server = mail.server.com
22 #smtp_username =
22 #smtp_username =
23 #smtp_password =
23 #smtp_password =
24 #smtp_port =
24 #smtp_port =
25 #smtp_use_tls = false
25 #smtp_use_tls = false
26 #smtp_use_ssl = true
26 #smtp_use_ssl = true
27
27
28 [server:main]
28 [server:main]
29 ; COMMON HOST/IP CONFIG, This applies mostly to develop setup,
29 ; COMMON HOST/IP CONFIG, This applies mostly to develop setup,
30 ; Host port for gunicorn are controlled by gunicorn_conf.py
30 ; Host port for gunicorn are controlled by gunicorn_conf.py
31 host = 127.0.0.1
31 host = 127.0.0.1
32 port = 10020
32 port = 10020
33
33
34
34
35 ; ###########################
35 ; ###########################
36 ; GUNICORN APPLICATION SERVER
36 ; GUNICORN APPLICATION SERVER
37 ; ###########################
37 ; ###########################
38
38
39 ; run with gunicorn --paste rhodecode.ini --config gunicorn_conf.py
39 ; run with gunicorn --paste rhodecode.ini --config gunicorn_conf.py
40
40
41 ; Module to use, this setting shouldn't be changed
41 ; Module to use, this setting shouldn't be changed
42 use = egg:gunicorn#main
42 use = egg:gunicorn#main
43
43
44 ; Prefix middleware for RhodeCode.
44 ; Prefix middleware for RhodeCode.
45 ; recommended when using proxy setup.
45 ; recommended when using proxy setup.
46 ; allows to set RhodeCode under a prefix in server.
46 ; allows to set RhodeCode under a prefix in server.
47 ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well.
47 ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well.
48 ; And set your prefix like: `prefix = /custom_prefix`
48 ; And set your prefix like: `prefix = /custom_prefix`
49 ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need
49 ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need
50 ; to make your cookies only work on prefix url
50 ; to make your cookies only work on prefix url
51 [filter:proxy-prefix]
51 [filter:proxy-prefix]
52 use = egg:PasteDeploy#prefix
52 use = egg:PasteDeploy#prefix
53 prefix = /
53 prefix = /
54
54
55 [app:main]
55 [app:main]
56 ; The %(here)s variable will be replaced with the absolute path of parent directory
56 ; The %(here)s variable will be replaced with the absolute path of parent directory
57 ; of this file
57 ; of this file
58 ; Each option in the app:main can be override by an environmental variable
58 ; Each option in the app:main can be override by an environmental variable
59 ;
59 ;
60 ;To override an option:
60 ;To override an option:
61 ;
61 ;
62 ;RC_<KeyName>
62 ;RC_<KeyName>
63 ;Everything should be uppercase, . and - should be replaced by _.
63 ;Everything should be uppercase, . and - should be replaced by _.
64 ;For example, if you have these configuration settings:
64 ;For example, if you have these configuration settings:
65 ;rc_cache.repo_object.backend = foo
65 ;rc_cache.repo_object.backend = foo
66 ;can be overridden by
66 ;can be overridden by
67 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
67 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
68
68
69 use = egg:rhodecode-enterprise-ce
69 use = egg:rhodecode-enterprise-ce
70
70
71 ; enable proxy prefix middleware, defined above
71 ; enable proxy prefix middleware, defined above
72 #filter-with = proxy-prefix
72 #filter-with = proxy-prefix
73
73
74 ; #############
74 ; #############
75 ; DEBUG OPTIONS
75 ; DEBUG OPTIONS
76 ; #############
76 ; #############
77
77
78 pyramid.reload_templates = true
78 pyramid.reload_templates = true
79
79
80 # During development the we want to have the debug toolbar enabled
80 # During development the we want to have the debug toolbar enabled
81 pyramid.includes =
81 pyramid.includes =
82 pyramid_debugtoolbar
82 pyramid_debugtoolbar
83
83
84 debugtoolbar.hosts = 0.0.0.0/0
84 debugtoolbar.hosts = 0.0.0.0/0
85 debugtoolbar.exclude_prefixes =
85 debugtoolbar.exclude_prefixes =
86 /css
86 /css
87 /fonts
87 /fonts
88 /images
88 /images
89 /js
89 /js
90
90
91 ## RHODECODE PLUGINS ##
91 ## RHODECODE PLUGINS ##
92 rhodecode.includes =
92 rhodecode.includes =
93 rhodecode.api
93 rhodecode.api
94
94
95
95
96 # api prefix url
96 # api prefix url
97 rhodecode.api.url = /_admin/api
97 rhodecode.api.url = /_admin/api
98
98
99 ; enable debug style page
99 ; enable debug style page
100 debug_style = true
100 debug_style = true
101
101
102 ; #################
102 ; #################
103 ; END DEBUG OPTIONS
103 ; END DEBUG OPTIONS
104 ; #################
104 ; #################
105
105
106 ; encryption key used to encrypt social plugin tokens,
106 ; encryption key used to encrypt social plugin tokens,
107 ; remote_urls with credentials etc, if not set it defaults to
107 ; remote_urls with credentials etc, if not set it defaults to
108 ; `beaker.session.secret`
108 ; `beaker.session.secret`
109 #rhodecode.encrypted_values.secret =
109 #rhodecode.encrypted_values.secret =
110
110
111 ; decryption strict mode (enabled by default). It controls if decryption raises
111 ; decryption strict mode (enabled by default). It controls if decryption raises
112 ; `SignatureVerificationError` in case of wrong key, or damaged encryption data.
112 ; `SignatureVerificationError` in case of wrong key, or damaged encryption data.
113 #rhodecode.encrypted_values.strict = false
113 #rhodecode.encrypted_values.strict = false
114
114
115 ; Pick algorithm for encryption. Either fernet (more secure) or aes (default)
115 ; Pick algorithm for encryption. Either fernet (more secure) or aes (default)
116 ; fernet is safer, and we strongly recommend switching to it.
116 ; fernet is safer, and we strongly recommend switching to it.
117 ; Due to backward compatibility aes is used as default.
117 ; Due to backward compatibility aes is used as default.
118 #rhodecode.encrypted_values.algorithm = fernet
118 #rhodecode.encrypted_values.algorithm = fernet
119
119
120 ; Return gzipped responses from RhodeCode (static files/application)
120 ; Return gzipped responses from RhodeCode (static files/application)
121 gzip_responses = false
121 gzip_responses = false
122
122
123 ; Auto-generate javascript routes file on startup
123 ; Auto-generate javascript routes file on startup
124 generate_js_files = false
124 generate_js_files = false
125
125
126 ; System global default language.
126 ; System global default language.
127 ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh
127 ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh
128 lang = en
128 lang = en
129
129
130 ; Perform a full repository scan and import on each server start.
130 ; Perform a full repository scan and import on each server start.
131 ; Settings this to true could lead to very long startup time.
131 ; Settings this to true could lead to very long startup time.
132 startup.import_repos = false
132 startup.import_repos = false
133
133
134 ; URL at which the application is running. This is used for Bootstrapping
134 ; URL at which the application is running. This is used for Bootstrapping
135 ; requests in context when no web request is available. Used in ishell, or
135 ; requests in context when no web request is available. Used in ishell, or
136 ; SSH calls. Set this for events to receive proper url for SSH calls.
136 ; SSH calls. Set this for events to receive proper url for SSH calls.
137 app.base_url = http://rhodecode.local
137 app.base_url = http://rhodecode.local
138
138
139 ; Host at which the Service API is running.
139 ; Host at which the Service API is running.
140 app.service_api.host = http://rhodecode.local:10020
140 app.service_api.host = http://rhodecode.local:10020
141
141
142 ; Secret for Service API authentication.
142 ; Secret for Service API authentication.
143 app.service_api.token =
143 app.service_api.token =
144
144
145 ; Unique application ID. Should be a random unique string for security.
145 ; Unique application ID. Should be a random unique string for security.
146 app_instance_uuid = rc-production
146 app_instance_uuid = rc-production
147
147
148 ; Cut off limit for large diffs (size in bytes). If overall diff size on
148 ; Cut off limit for large diffs (size in bytes). If overall diff size on
149 ; commit, or pull request exceeds this limit this diff will be displayed
149 ; commit, or pull request exceeds this limit this diff will be displayed
150 ; partially. E.g 512000 == 512Kb
150 ; partially. E.g 512000 == 512Kb
151 cut_off_limit_diff = 512000
151 cut_off_limit_diff = 512000
152
152
153 ; Cut off limit for large files inside diffs (size in bytes). Each individual
153 ; Cut off limit for large files inside diffs (size in bytes). Each individual
154 ; file inside diff which exceeds this limit will be displayed partially.
154 ; file inside diff which exceeds this limit will be displayed partially.
155 ; E.g 128000 == 128Kb
155 ; E.g 128000 == 128Kb
156 cut_off_limit_file = 128000
156 cut_off_limit_file = 128000
157
157
158 ; Use cached version of vcs repositories everywhere. Recommended to be `true`
158 ; Use cached version of vcs repositories everywhere. Recommended to be `true`
159 vcs_full_cache = true
159 vcs_full_cache = true
160
160
161 ; Force https in RhodeCode, fixes https redirects, assumes it's always https.
161 ; Force https in RhodeCode, fixes https redirects, assumes it's always https.
162 ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache
162 ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache
163 force_https = false
163 force_https = false
164
164
165 ; use Strict-Transport-Security headers
165 ; use Strict-Transport-Security headers
166 use_htsts = false
166 use_htsts = false
167
167
168 ; Set to true if your repos are exposed using the dumb protocol
168 ; Set to true if your repos are exposed using the dumb protocol
169 git_update_server_info = false
169 git_update_server_info = false
170
170
171 ; RSS/ATOM feed options
171 ; RSS/ATOM feed options
172 rss_cut_off_limit = 256000
172 rss_cut_off_limit = 256000
173 rss_items_per_page = 10
173 rss_items_per_page = 10
174 rss_include_diff = false
174 rss_include_diff = false
175
175
176 ; gist URL alias, used to create nicer urls for gist. This should be an
176 ; gist URL alias, used to create nicer urls for gist. This should be an
177 ; url that does rewrites to _admin/gists/{gistid}.
177 ; url that does rewrites to _admin/gists/{gistid}.
178 ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal
178 ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal
179 ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid}
179 ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid}
180 gist_alias_url =
180 gist_alias_url =
181
181
182 ; List of views (using glob pattern syntax) that AUTH TOKENS could be
182 ; List of views (using glob pattern syntax) that AUTH TOKENS could be
183 ; used for access.
183 ; used for access.
184 ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it
184 ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it
185 ; came from the the logged in user who own this authentication token.
185 ; came from the the logged in user who own this authentication token.
186 ; Additionally @TOKEN syntax can be used to bound the view to specific
186 ; Additionally @TOKEN syntax can be used to bound the view to specific
187 ; authentication token. Such view would be only accessible when used together
187 ; authentication token. Such view would be only accessible when used together
188 ; with this authentication token
188 ; with this authentication token
189 ; list of all views can be found under `/_admin/permissions/auth_token_access`
189 ; list of all views can be found under `/_admin/permissions/auth_token_access`
190 ; The list should be "," separated and on a single line.
190 ; The list should be "," separated and on a single line.
191 ; Most common views to enable:
191 ; Most common views to enable:
192
192
193 # RepoCommitsView:repo_commit_download
193 # RepoCommitsView:repo_commit_download
194 # RepoCommitsView:repo_commit_patch
194 # RepoCommitsView:repo_commit_patch
195 # RepoCommitsView:repo_commit_raw
195 # RepoCommitsView:repo_commit_raw
196 # RepoCommitsView:repo_commit_raw@TOKEN
196 # RepoCommitsView:repo_commit_raw@TOKEN
197 # RepoFilesView:repo_files_diff
197 # RepoFilesView:repo_files_diff
198 # RepoFilesView:repo_archivefile
198 # RepoFilesView:repo_archivefile
199 # RepoFilesView:repo_file_raw
199 # RepoFilesView:repo_file_raw
200 # GistView:*
200 # GistView:*
201 api_access_controllers_whitelist =
201 api_access_controllers_whitelist =
202
202
203 ; Default encoding used to convert from and to unicode
203 ; Default encoding used to convert from and to unicode
204 ; can be also a comma separated list of encoding in case of mixed encodings
204 ; can be also a comma separated list of encoding in case of mixed encodings
205 default_encoding = UTF-8
205 default_encoding = UTF-8
206
206
207 ; instance-id prefix
207 ; instance-id prefix
208 ; a prefix key for this instance used for cache invalidation when running
208 ; a prefix key for this instance used for cache invalidation when running
209 ; multiple instances of RhodeCode, make sure it's globally unique for
209 ; multiple instances of RhodeCode, make sure it's globally unique for
210 ; all running RhodeCode instances. Leave empty if you don't use it
210 ; all running RhodeCode instances. Leave empty if you don't use it
211 instance_id =
211 instance_id =
212
212
213 ; Fallback authentication plugin. Set this to a plugin ID to force the usage
213 ; Fallback authentication plugin. Set this to a plugin ID to force the usage
214 ; of an authentication plugin also if it is disabled by it's settings.
214 ; of an authentication plugin also if it is disabled by it's settings.
215 ; This could be useful if you are unable to log in to the system due to broken
215 ; This could be useful if you are unable to log in to the system due to broken
216 ; authentication settings. Then you can enable e.g. the internal RhodeCode auth
216 ; authentication settings. Then you can enable e.g. the internal RhodeCode auth
217 ; module to log in again and fix the settings.
217 ; module to log in again and fix the settings.
218 ; Available builtin plugin IDs (hash is part of the ID):
218 ; Available builtin plugin IDs (hash is part of the ID):
219 ; egg:rhodecode-enterprise-ce#rhodecode
219 ; egg:rhodecode-enterprise-ce#rhodecode
220 ; egg:rhodecode-enterprise-ce#pam
220 ; egg:rhodecode-enterprise-ce#pam
221 ; egg:rhodecode-enterprise-ce#ldap
221 ; egg:rhodecode-enterprise-ce#ldap
222 ; egg:rhodecode-enterprise-ce#jasig_cas
222 ; egg:rhodecode-enterprise-ce#jasig_cas
223 ; egg:rhodecode-enterprise-ce#headers
223 ; egg:rhodecode-enterprise-ce#headers
224 ; egg:rhodecode-enterprise-ce#crowd
224 ; egg:rhodecode-enterprise-ce#crowd
225
225
226 #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode
226 #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode
227
227
228 ; Flag to control loading of legacy plugins in py:/path format
228 ; Flag to control loading of legacy plugins in py:/path format
229 auth_plugin.import_legacy_plugins = true
229 auth_plugin.import_legacy_plugins = true
230
230
231 ; alternative return HTTP header for failed authentication. Default HTTP
231 ; alternative return HTTP header for failed authentication. Default HTTP
232 ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with
232 ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with
233 ; handling that causing a series of failed authentication calls.
233 ; handling that causing a series of failed authentication calls.
234 ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code
234 ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code
235 ; This will be served instead of default 401 on bad authentication
235 ; This will be served instead of default 401 on bad authentication
236 auth_ret_code =
236 auth_ret_code =
237
237
238 ; use special detection method when serving auth_ret_code, instead of serving
238 ; use special detection method when serving auth_ret_code, instead of serving
239 ; ret_code directly, use 401 initially (Which triggers credentials prompt)
239 ; ret_code directly, use 401 initially (Which triggers credentials prompt)
240 ; and then serve auth_ret_code to clients
240 ; and then serve auth_ret_code to clients
241 auth_ret_code_detection = false
241 auth_ret_code_detection = false
242
242
243 ; locking return code. When repository is locked return this HTTP code. 2XX
243 ; locking return code. When repository is locked return this HTTP code. 2XX
244 ; codes don't break the transactions while 4XX codes do
244 ; codes don't break the transactions while 4XX codes do
245 lock_ret_code = 423
245 lock_ret_code = 423
246
246
247 ; Filesystem location were repositories should be stored
247 ; Filesystem location were repositories should be stored
248 repo_store.path = /var/opt/rhodecode_repo_store
248 repo_store.path = /var/opt/rhodecode_repo_store
249
249
250 ; allows to setup custom hooks in settings page
250 ; allows to setup custom hooks in settings page
251 allow_custom_hooks_settings = true
251 allow_custom_hooks_settings = true
252
252
253 ; Generated license token required for EE edition license.
253 ; Generated license token required for EE edition license.
254 ; New generated token value can be found in Admin > settings > license page.
254 ; New generated token value can be found in Admin > settings > license page.
255 license_token =
255 license_token =
256
256
257 ; This flag hides sensitive information on the license page such as token, and license data
257 ; This flag hides sensitive information on the license page such as token, and license data
258 license.hide_license_info = false
258 license.hide_license_info = false
259
259
260 ; supervisor connection uri, for managing supervisor and logs.
260 ; supervisor connection uri, for managing supervisor and logs.
261 supervisor.uri =
261 supervisor.uri =
262
262
263 ; supervisord group name/id we only want this RC instance to handle
263 ; supervisord group name/id we only want this RC instance to handle
264 supervisor.group_id = dev
264 supervisor.group_id = dev
265
265
266 ; Display extended labs settings
266 ; Display extended labs settings
267 labs_settings_active = true
267 labs_settings_active = true
268
268
269 ; Custom exception store path, defaults to TMPDIR
269 ; Custom exception store path, defaults to TMPDIR
270 ; This is used to store exception from RhodeCode in shared directory
270 ; This is used to store exception from RhodeCode in shared directory
271 #exception_tracker.store_path =
271 #exception_tracker.store_path =
272
272
273 ; Send email with exception details when it happens
273 ; Send email with exception details when it happens
274 #exception_tracker.send_email = false
274 #exception_tracker.send_email = false
275
275
276 ; Comma separated list of recipients for exception emails,
276 ; Comma separated list of recipients for exception emails,
277 ; e.g admin@rhodecode.com,devops@rhodecode.com
277 ; e.g admin@rhodecode.com,devops@rhodecode.com
278 ; Can be left empty, then emails will be sent to ALL super-admins
278 ; Can be left empty, then emails will be sent to ALL super-admins
279 #exception_tracker.send_email_recipients =
279 #exception_tracker.send_email_recipients =
280
280
281 ; optional prefix to Add to email Subject
281 ; optional prefix to Add to email Subject
282 #exception_tracker.email_prefix = [RHODECODE ERROR]
282 #exception_tracker.email_prefix = [RHODECODE ERROR]
283
283
284 ; File store configuration. This is used to store and serve uploaded files
284 ; File store configuration. This is used to store and serve uploaded files
285 file_store.enabled = true
285 file_store.enabled = true
286
286
287 ; Storage backend, available options are: local
287 ; Storage backend, available options are: local
288 file_store.backend = local
288 file_store.backend = local
289
289
290 ; path to store the uploaded binaries and artifacts
290 ; path to store the uploaded binaries and artifacts
291 file_store.storage_path = /var/opt/rhodecode_data/file_store
291 file_store.storage_path = /var/opt/rhodecode_data/file_store
292
292
293
293
294 ; Redis url to acquire/check generation of archives locks
294 ; Redis url to acquire/check generation of archives locks
295 archive_cache.locking.url = redis://redis:6379/1
295 archive_cache.locking.url = redis://redis:6379/1
296
296
297 ; Storage backend, only 'filesystem' and 'objectstore' are available now
297 ; Storage backend, only 'filesystem' and 'objectstore' are available now
298 archive_cache.backend.type = filesystem
298 archive_cache.backend.type = filesystem
299
299
300 ; url for s3 compatible storage that allows to upload artifacts
300 ; url for s3 compatible storage that allows to upload artifacts
301 ; e.g http://minio:9000
301 ; e.g http://minio:9000
302 archive_cache.objectstore.url = http://s3-minio:9000
302 archive_cache.objectstore.url = http://s3-minio:9000
303
303
304 ; key for s3 auth
304 ; key for s3 auth
305 archive_cache.objectstore.key = key
305 archive_cache.objectstore.key = key
306
306
307 ; secret for s3 auth
307 ; secret for s3 auth
308 archive_cache.objectstore.secret = secret
308 archive_cache.objectstore.secret = secret
309
309
310 ; number of sharded buckets to create to distribute archives across
310 ; number of sharded buckets to create to distribute archives across
311 ; default is 8 shards
311 ; default is 8 shards
312 archive_cache.objectstore.bucket_shards = 8
312 archive_cache.objectstore.bucket_shards = 8
313
313
314 ; a top-level bucket to put all other sharded buckets in
314 ; a top-level bucket to put all other shards in
315 ; in case it's empty all buckets will be created in top-level (not recommended)
315 ; objects will be stored in rhodecode-archive-cache/shard-N based on the bucket_shards number
316 ; objects will be stored in rhodecode-archive-cache/shard-bucket-N based on the bucket_shards number
316 archive_cache.objectstore.bucket = rhodecode-archive-cache
317 archive_cache.objectstore.bucket_root = rhodecode-archive-cache
318
317
319 ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time
318 ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time
320 archive_cache.objectstore.retry = false
319 archive_cache.objectstore.retry = false
321
320
322 ; number of seconds to wait for next try using retry
321 ; number of seconds to wait for next try using retry
323 archive_cache.objectstore.retry_backoff = 1
322 archive_cache.objectstore.retry_backoff = 1
324
323
325 ; how many tries do do a retry fetch from this backend
324 ; how many tries do do a retry fetch from this backend
326 archive_cache.objectstore.retry_attempts = 10
325 archive_cache.objectstore.retry_attempts = 10
327
326
328 ; Default is $cache_dir/archive_cache if not set
327 ; Default is $cache_dir/archive_cache if not set
329 ; Generated repo archives will be cached at this location
328 ; Generated repo archives will be cached at this location
330 ; and served from the cache during subsequent requests for the same archive of
329 ; and served from the cache during subsequent requests for the same archive of
331 ; the repository. This path is important to be shared across filesystems and with
330 ; the repository. This path is important to be shared across filesystems and with
332 ; RhodeCode and vcsserver
331 ; RhodeCode and vcsserver
333 archive_cache.filesystem.store_dir = /var/opt/rhodecode_data/archive_cache
332 archive_cache.filesystem.store_dir = /var/opt/rhodecode_data/archive_cache
334
333
335 ; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb
334 ; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb
336 archive_cache.filesystem.cache_size_gb = 1
335 archive_cache.filesystem.cache_size_gb = 1
337
336
338 ; Eviction policy used to clear out after cache_size_gb limit is reached
337 ; Eviction policy used to clear out after cache_size_gb limit is reached
339 archive_cache.filesystem.eviction_policy = least-recently-stored
338 archive_cache.filesystem.eviction_policy = least-recently-stored
340
339
341 ; By default cache uses sharding technique, this specifies how many shards are there
340 ; By default cache uses sharding technique, this specifies how many shards are there
342 ; default is 8 shards
341 ; default is 8 shards
343 archive_cache.filesystem.cache_shards = 8
342 archive_cache.filesystem.cache_shards = 8
344
343
345 ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time
344 ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time
346 archive_cache.filesystem.retry = false
345 archive_cache.filesystem.retry = false
347
346
348 ; number of seconds to wait for next try using retry
347 ; number of seconds to wait for next try using retry
349 archive_cache.filesystem.retry_backoff = 1
348 archive_cache.filesystem.retry_backoff = 1
350
349
351 ; how many tries do do a retry fetch from this backend
350 ; how many tries do do a retry fetch from this backend
352 archive_cache.filesystem.retry_attempts = 10
351 archive_cache.filesystem.retry_attempts = 10
353
352
354
353
355 ; #############
354 ; #############
356 ; CELERY CONFIG
355 ; CELERY CONFIG
357 ; #############
356 ; #############
358
357
359 ; manually run celery: /path/to/celery worker --task-events --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini
358 ; manually run celery: /path/to/celery worker --task-events --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini
360
359
361 use_celery = true
360 use_celery = true
362
361
363 ; path to store schedule database
362 ; path to store schedule database
364 #celerybeat-schedule.path =
363 #celerybeat-schedule.path =
365
364
366 ; connection url to the message broker (default redis)
365 ; connection url to the message broker (default redis)
367 celery.broker_url = redis://redis:6379/8
366 celery.broker_url = redis://redis:6379/8
368
367
369 ; results backend to get results for (default redis)
368 ; results backend to get results for (default redis)
370 celery.result_backend = redis://redis:6379/8
369 celery.result_backend = redis://redis:6379/8
371
370
372 ; rabbitmq example
371 ; rabbitmq example
373 #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost
372 #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost
374
373
375 ; maximum tasks to execute before worker restart
374 ; maximum tasks to execute before worker restart
376 celery.max_tasks_per_child = 20
375 celery.max_tasks_per_child = 20
377
376
378 ; tasks will never be sent to the queue, but executed locally instead.
377 ; tasks will never be sent to the queue, but executed locally instead.
379 celery.task_always_eager = false
378 celery.task_always_eager = false
380
379
381 ; #############
380 ; #############
382 ; DOGPILE CACHE
381 ; DOGPILE CACHE
383 ; #############
382 ; #############
384
383
385 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
384 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
386 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
385 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
387 cache_dir = /var/opt/rhodecode_data
386 cache_dir = /var/opt/rhodecode_data
388
387
389 ; *********************************************
388 ; *********************************************
390 ; `sql_cache_short` cache for heavy SQL queries
389 ; `sql_cache_short` cache for heavy SQL queries
391 ; Only supported backend is `memory_lru`
390 ; Only supported backend is `memory_lru`
392 ; *********************************************
391 ; *********************************************
393 rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru
392 rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru
394 rc_cache.sql_cache_short.expiration_time = 30
393 rc_cache.sql_cache_short.expiration_time = 30
395
394
396
395
397 ; *****************************************************
396 ; *****************************************************
398 ; `cache_repo_longterm` cache for repo object instances
397 ; `cache_repo_longterm` cache for repo object instances
399 ; Only supported backend is `memory_lru`
398 ; Only supported backend is `memory_lru`
400 ; *****************************************************
399 ; *****************************************************
401 rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru
400 rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru
402 ; by default we use 30 Days, cache is still invalidated on push
401 ; by default we use 30 Days, cache is still invalidated on push
403 rc_cache.cache_repo_longterm.expiration_time = 2592000
402 rc_cache.cache_repo_longterm.expiration_time = 2592000
404 ; max items in LRU cache, set to smaller number to save memory, and expire last used caches
403 ; max items in LRU cache, set to smaller number to save memory, and expire last used caches
405 rc_cache.cache_repo_longterm.max_size = 10000
404 rc_cache.cache_repo_longterm.max_size = 10000
406
405
407
406
408 ; *********************************************
407 ; *********************************************
409 ; `cache_general` cache for general purpose use
408 ; `cache_general` cache for general purpose use
410 ; for simplicity use rc.file_namespace backend,
409 ; for simplicity use rc.file_namespace backend,
411 ; for performance and scale use rc.redis
410 ; for performance and scale use rc.redis
412 ; *********************************************
411 ; *********************************************
413 rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace
412 rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace
414 rc_cache.cache_general.expiration_time = 43200
413 rc_cache.cache_general.expiration_time = 43200
415 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
414 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
416 #rc_cache.cache_general.arguments.filename = /tmp/cache_general_db
415 #rc_cache.cache_general.arguments.filename = /tmp/cache_general_db
417
416
418 ; alternative `cache_general` redis backend with distributed lock
417 ; alternative `cache_general` redis backend with distributed lock
419 #rc_cache.cache_general.backend = dogpile.cache.rc.redis
418 #rc_cache.cache_general.backend = dogpile.cache.rc.redis
420 #rc_cache.cache_general.expiration_time = 300
419 #rc_cache.cache_general.expiration_time = 300
421
420
422 ; redis_expiration_time needs to be greater then expiration_time
421 ; redis_expiration_time needs to be greater then expiration_time
423 #rc_cache.cache_general.arguments.redis_expiration_time = 7200
422 #rc_cache.cache_general.arguments.redis_expiration_time = 7200
424
423
425 #rc_cache.cache_general.arguments.host = localhost
424 #rc_cache.cache_general.arguments.host = localhost
426 #rc_cache.cache_general.arguments.port = 6379
425 #rc_cache.cache_general.arguments.port = 6379
427 #rc_cache.cache_general.arguments.db = 0
426 #rc_cache.cache_general.arguments.db = 0
428 #rc_cache.cache_general.arguments.socket_timeout = 30
427 #rc_cache.cache_general.arguments.socket_timeout = 30
429 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
428 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
430 #rc_cache.cache_general.arguments.distributed_lock = true
429 #rc_cache.cache_general.arguments.distributed_lock = true
431
430
432 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
431 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
433 #rc_cache.cache_general.arguments.lock_auto_renewal = true
432 #rc_cache.cache_general.arguments.lock_auto_renewal = true
434
433
435 ; *************************************************
434 ; *************************************************
436 ; `cache_perms` cache for permission tree, auth TTL
435 ; `cache_perms` cache for permission tree, auth TTL
437 ; for simplicity use rc.file_namespace backend,
436 ; for simplicity use rc.file_namespace backend,
438 ; for performance and scale use rc.redis
437 ; for performance and scale use rc.redis
439 ; *************************************************
438 ; *************************************************
440 rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace
439 rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace
441 rc_cache.cache_perms.expiration_time = 3600
440 rc_cache.cache_perms.expiration_time = 3600
442 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
441 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
443 #rc_cache.cache_perms.arguments.filename = /tmp/cache_perms_db
442 #rc_cache.cache_perms.arguments.filename = /tmp/cache_perms_db
444
443
445 ; alternative `cache_perms` redis backend with distributed lock
444 ; alternative `cache_perms` redis backend with distributed lock
446 #rc_cache.cache_perms.backend = dogpile.cache.rc.redis
445 #rc_cache.cache_perms.backend = dogpile.cache.rc.redis
447 #rc_cache.cache_perms.expiration_time = 300
446 #rc_cache.cache_perms.expiration_time = 300
448
447
449 ; redis_expiration_time needs to be greater then expiration_time
448 ; redis_expiration_time needs to be greater then expiration_time
450 #rc_cache.cache_perms.arguments.redis_expiration_time = 7200
449 #rc_cache.cache_perms.arguments.redis_expiration_time = 7200
451
450
452 #rc_cache.cache_perms.arguments.host = localhost
451 #rc_cache.cache_perms.arguments.host = localhost
453 #rc_cache.cache_perms.arguments.port = 6379
452 #rc_cache.cache_perms.arguments.port = 6379
454 #rc_cache.cache_perms.arguments.db = 0
453 #rc_cache.cache_perms.arguments.db = 0
455 #rc_cache.cache_perms.arguments.socket_timeout = 30
454 #rc_cache.cache_perms.arguments.socket_timeout = 30
456 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
455 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
457 #rc_cache.cache_perms.arguments.distributed_lock = true
456 #rc_cache.cache_perms.arguments.distributed_lock = true
458
457
459 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
458 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
460 #rc_cache.cache_perms.arguments.lock_auto_renewal = true
459 #rc_cache.cache_perms.arguments.lock_auto_renewal = true
461
460
462 ; ***************************************************
461 ; ***************************************************
463 ; `cache_repo` cache for file tree, Readme, RSS FEEDS
462 ; `cache_repo` cache for file tree, Readme, RSS FEEDS
464 ; for simplicity use rc.file_namespace backend,
463 ; for simplicity use rc.file_namespace backend,
465 ; for performance and scale use rc.redis
464 ; for performance and scale use rc.redis
466 ; ***************************************************
465 ; ***************************************************
467 rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace
466 rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace
468 rc_cache.cache_repo.expiration_time = 2592000
467 rc_cache.cache_repo.expiration_time = 2592000
469 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
468 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
470 #rc_cache.cache_repo.arguments.filename = /tmp/cache_repo_db
469 #rc_cache.cache_repo.arguments.filename = /tmp/cache_repo_db
471
470
472 ; alternative `cache_repo` redis backend with distributed lock
471 ; alternative `cache_repo` redis backend with distributed lock
473 #rc_cache.cache_repo.backend = dogpile.cache.rc.redis
472 #rc_cache.cache_repo.backend = dogpile.cache.rc.redis
474 #rc_cache.cache_repo.expiration_time = 2592000
473 #rc_cache.cache_repo.expiration_time = 2592000
475
474
476 ; redis_expiration_time needs to be greater then expiration_time
475 ; redis_expiration_time needs to be greater then expiration_time
477 #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400
476 #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400
478
477
479 #rc_cache.cache_repo.arguments.host = localhost
478 #rc_cache.cache_repo.arguments.host = localhost
480 #rc_cache.cache_repo.arguments.port = 6379
479 #rc_cache.cache_repo.arguments.port = 6379
481 #rc_cache.cache_repo.arguments.db = 1
480 #rc_cache.cache_repo.arguments.db = 1
482 #rc_cache.cache_repo.arguments.socket_timeout = 30
481 #rc_cache.cache_repo.arguments.socket_timeout = 30
483 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
482 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
484 #rc_cache.cache_repo.arguments.distributed_lock = true
483 #rc_cache.cache_repo.arguments.distributed_lock = true
485
484
486 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
485 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
487 #rc_cache.cache_repo.arguments.lock_auto_renewal = true
486 #rc_cache.cache_repo.arguments.lock_auto_renewal = true
488
487
489 ; ##############
488 ; ##############
490 ; BEAKER SESSION
489 ; BEAKER SESSION
491 ; ##############
490 ; ##############
492
491
493 ; beaker.session.type is type of storage options for the logged users sessions. Current allowed
492 ; beaker.session.type is type of storage options for the logged users sessions. Current allowed
494 ; types are file, ext:redis, ext:database, ext:memcached
493 ; types are file, ext:redis, ext:database, ext:memcached
495 ; Fastest ones are ext:redis and ext:database, DO NOT use memory type for session
494 ; Fastest ones are ext:redis and ext:database, DO NOT use memory type for session
496 #beaker.session.type = file
495 #beaker.session.type = file
497 #beaker.session.data_dir = %(here)s/data/sessions
496 #beaker.session.data_dir = %(here)s/data/sessions
498
497
499 ; Redis based sessions
498 ; Redis based sessions
500 beaker.session.type = ext:redis
499 beaker.session.type = ext:redis
501 beaker.session.url = redis://redis:6379/2
500 beaker.session.url = redis://redis:6379/2
502
501
503 ; DB based session, fast, and allows easy management over logged in users
502 ; DB based session, fast, and allows easy management over logged in users
504 #beaker.session.type = ext:database
503 #beaker.session.type = ext:database
505 #beaker.session.table_name = db_session
504 #beaker.session.table_name = db_session
506 #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode
505 #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode
507 #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode
506 #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode
508 #beaker.session.sa.pool_recycle = 3600
507 #beaker.session.sa.pool_recycle = 3600
509 #beaker.session.sa.echo = false
508 #beaker.session.sa.echo = false
510
509
511 beaker.session.key = rhodecode
510 beaker.session.key = rhodecode
512 beaker.session.secret = develop-rc-uytcxaz
511 beaker.session.secret = develop-rc-uytcxaz
513 beaker.session.lock_dir = /data_ramdisk/lock
512 beaker.session.lock_dir = /data_ramdisk/lock
514
513
515 ; Secure encrypted cookie. Requires AES and AES python libraries
514 ; Secure encrypted cookie. Requires AES and AES python libraries
516 ; you must disable beaker.session.secret to use this
515 ; you must disable beaker.session.secret to use this
517 #beaker.session.encrypt_key = key_for_encryption
516 #beaker.session.encrypt_key = key_for_encryption
518 #beaker.session.validate_key = validation_key
517 #beaker.session.validate_key = validation_key
519
518
520 ; Sets session as invalid (also logging out user) if it haven not been
519 ; Sets session as invalid (also logging out user) if it haven not been
521 ; accessed for given amount of time in seconds
520 ; accessed for given amount of time in seconds
522 beaker.session.timeout = 2592000
521 beaker.session.timeout = 2592000
523 beaker.session.httponly = true
522 beaker.session.httponly = true
524
523
525 ; Path to use for the cookie. Set to prefix if you use prefix middleware
524 ; Path to use for the cookie. Set to prefix if you use prefix middleware
526 #beaker.session.cookie_path = /custom_prefix
525 #beaker.session.cookie_path = /custom_prefix
527
526
528 ; Set https secure cookie
527 ; Set https secure cookie
529 beaker.session.secure = false
528 beaker.session.secure = false
530
529
531 ; default cookie expiration time in seconds, set to `true` to set expire
530 ; default cookie expiration time in seconds, set to `true` to set expire
532 ; at browser close
531 ; at browser close
533 #beaker.session.cookie_expires = 3600
532 #beaker.session.cookie_expires = 3600
534
533
535 ; #############################
534 ; #############################
536 ; SEARCH INDEXING CONFIGURATION
535 ; SEARCH INDEXING CONFIGURATION
537 ; #############################
536 ; #############################
538
537
539 ; Full text search indexer is available in rhodecode-tools under
538 ; Full text search indexer is available in rhodecode-tools under
540 ; `rhodecode-tools index` command
539 ; `rhodecode-tools index` command
541
540
542 ; WHOOSH Backend, doesn't require additional services to run
541 ; WHOOSH Backend, doesn't require additional services to run
543 ; it works good with few dozen repos
542 ; it works good with few dozen repos
544 search.module = rhodecode.lib.index.whoosh
543 search.module = rhodecode.lib.index.whoosh
545 search.location = %(here)s/data/index
544 search.location = %(here)s/data/index
546
545
547 ; ####################
546 ; ####################
548 ; CHANNELSTREAM CONFIG
547 ; CHANNELSTREAM CONFIG
549 ; ####################
548 ; ####################
550
549
551 ; channelstream enables persistent connections and live notification
550 ; channelstream enables persistent connections and live notification
552 ; in the system. It's also used by the chat system
551 ; in the system. It's also used by the chat system
553
552
554 channelstream.enabled = true
553 channelstream.enabled = true
555
554
556 ; server address for channelstream server on the backend
555 ; server address for channelstream server on the backend
557 channelstream.server = channelstream:9800
556 channelstream.server = channelstream:9800
558
557
559 ; location of the channelstream server from outside world
558 ; location of the channelstream server from outside world
560 ; use ws:// for http or wss:// for https. This address needs to be handled
559 ; use ws:// for http or wss:// for https. This address needs to be handled
561 ; by external HTTP server such as Nginx or Apache
560 ; by external HTTP server such as Nginx or Apache
562 ; see Nginx/Apache configuration examples in our docs
561 ; see Nginx/Apache configuration examples in our docs
563 channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream
562 channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream
564 channelstream.secret = ENV_GENERATED
563 channelstream.secret = ENV_GENERATED
565 channelstream.history.location = /var/opt/rhodecode_data/channelstream_history
564 channelstream.history.location = /var/opt/rhodecode_data/channelstream_history
566
565
567 ; Internal application path that Javascript uses to connect into.
566 ; Internal application path that Javascript uses to connect into.
568 ; If you use proxy-prefix the prefix should be added before /_channelstream
567 ; If you use proxy-prefix the prefix should be added before /_channelstream
569 channelstream.proxy_path = /_channelstream
568 channelstream.proxy_path = /_channelstream
570
569
571
570
572 ; ##############################
571 ; ##############################
573 ; MAIN RHODECODE DATABASE CONFIG
572 ; MAIN RHODECODE DATABASE CONFIG
574 ; ##############################
573 ; ##############################
575
574
576 #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30
575 #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30
577 #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode
576 #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode
578 #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8
577 #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8
579 ; pymysql is an alternative driver for MySQL, use in case of problems with default one
578 ; pymysql is an alternative driver for MySQL, use in case of problems with default one
580 #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode
579 #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode
581
580
582 sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30
581 sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30
583
582
584 ; see sqlalchemy docs for other advanced settings
583 ; see sqlalchemy docs for other advanced settings
585 ; print the sql statements to output
584 ; print the sql statements to output
586 sqlalchemy.db1.echo = false
585 sqlalchemy.db1.echo = false
587
586
588 ; recycle the connections after this amount of seconds
587 ; recycle the connections after this amount of seconds
589 sqlalchemy.db1.pool_recycle = 3600
588 sqlalchemy.db1.pool_recycle = 3600
590
589
591 ; the number of connections to keep open inside the connection pool.
590 ; the number of connections to keep open inside the connection pool.
592 ; 0 indicates no limit
591 ; 0 indicates no limit
593 ; the general calculus with gevent is:
592 ; the general calculus with gevent is:
594 ; if your system allows 500 concurrent greenlets (max_connections) that all do database access,
593 ; if your system allows 500 concurrent greenlets (max_connections) that all do database access,
595 ; then increase pool size + max overflow so that they add up to 500.
594 ; then increase pool size + max overflow so that they add up to 500.
596 #sqlalchemy.db1.pool_size = 5
595 #sqlalchemy.db1.pool_size = 5
597
596
598 ; The number of connections to allow in connection pool "overflow", that is
597 ; The number of connections to allow in connection pool "overflow", that is
599 ; connections that can be opened above and beyond the pool_size setting,
598 ; connections that can be opened above and beyond the pool_size setting,
600 ; which defaults to five.
599 ; which defaults to five.
601 #sqlalchemy.db1.max_overflow = 10
600 #sqlalchemy.db1.max_overflow = 10
602
601
603 ; Connection check ping, used to detect broken database connections
602 ; Connection check ping, used to detect broken database connections
604 ; could be enabled to better handle cases if MySQL has gone away errors
603 ; could be enabled to better handle cases if MySQL has gone away errors
605 #sqlalchemy.db1.ping_connection = true
604 #sqlalchemy.db1.ping_connection = true
606
605
607 ; ##########
606 ; ##########
608 ; VCS CONFIG
607 ; VCS CONFIG
609 ; ##########
608 ; ##########
610 vcs.server.enable = true
609 vcs.server.enable = true
611 vcs.server = vcsserver:10010
610 vcs.server = vcsserver:10010
612
611
613 ; Web server connectivity protocol, responsible for web based VCS operations
612 ; Web server connectivity protocol, responsible for web based VCS operations
614 ; Available protocols are:
613 ; Available protocols are:
615 ; `http` - use http-rpc backend (default)
614 ; `http` - use http-rpc backend (default)
616 vcs.server.protocol = http
615 vcs.server.protocol = http
617
616
618 ; Push/Pull operations protocol, available options are:
617 ; Push/Pull operations protocol, available options are:
619 ; `http` - use http-rpc backend (default)
618 ; `http` - use http-rpc backend (default)
620 vcs.scm_app_implementation = http
619 vcs.scm_app_implementation = http
621
620
622 ; Push/Pull operations hooks protocol, available options are:
621 ; Push/Pull operations hooks protocol, available options are:
623 ; `http` - use http-rpc backend (default)
622 ; `http` - use http-rpc backend (default)
624 ; `celery` - use celery based hooks
623 ; `celery` - use celery based hooks
625 vcs.hooks.protocol = http
624 vcs.hooks.protocol = http
626
625
627 ; Host on which this instance is listening for hooks. vcsserver will call this host to pull/push hooks so it should be
626 ; Host on which this instance is listening for hooks. vcsserver will call this host to pull/push hooks so it should be
628 ; accessible via network.
627 ; accessible via network.
629 ; Use vcs.hooks.host = "*" to bind to current hostname (for Docker)
628 ; Use vcs.hooks.host = "*" to bind to current hostname (for Docker)
630 vcs.hooks.host = *
629 vcs.hooks.host = *
631
630
632 ; Start VCSServer with this instance as a subprocess, useful for development
631 ; Start VCSServer with this instance as a subprocess, useful for development
633 vcs.start_server = false
632 vcs.start_server = false
634
633
635 ; List of enabled VCS backends, available options are:
634 ; List of enabled VCS backends, available options are:
636 ; `hg` - mercurial
635 ; `hg` - mercurial
637 ; `git` - git
636 ; `git` - git
638 ; `svn` - subversion
637 ; `svn` - subversion
639 vcs.backends = hg, git, svn
638 vcs.backends = hg, git, svn
640
639
641 ; Wait this number of seconds before killing connection to the vcsserver
640 ; Wait this number of seconds before killing connection to the vcsserver
642 vcs.connection_timeout = 3600
641 vcs.connection_timeout = 3600
643
642
644 ; Cache flag to cache vcsserver remote calls locally
643 ; Cache flag to cache vcsserver remote calls locally
645 ; It uses cache_region `cache_repo`
644 ; It uses cache_region `cache_repo`
646 vcs.methods.cache = true
645 vcs.methods.cache = true
647
646
648 ; ####################################################
647 ; ####################################################
649 ; Subversion proxy support (mod_dav_svn)
648 ; Subversion proxy support (mod_dav_svn)
650 ; Maps RhodeCode repo groups into SVN paths for Apache
649 ; Maps RhodeCode repo groups into SVN paths for Apache
651 ; ####################################################
650 ; ####################################################
652
651
653 ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out.
652 ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out.
654 ; Set a numeric version for your current SVN e.g 1.8, or 1.12
653 ; Set a numeric version for your current SVN e.g 1.8, or 1.12
655 ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible
654 ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible
656 #vcs.svn.compatible_version = 1.8
655 #vcs.svn.compatible_version = 1.8
657
656
658 ; Enable SVN proxy of requests over HTTP
657 ; Enable SVN proxy of requests over HTTP
659 vcs.svn.proxy.enabled = true
658 vcs.svn.proxy.enabled = true
660
659
661 ; host to connect to running SVN subsystem
660 ; host to connect to running SVN subsystem
662 vcs.svn.proxy.host = http://svn:8090
661 vcs.svn.proxy.host = http://svn:8090
663
662
664 ; Enable or disable the config file generation.
663 ; Enable or disable the config file generation.
665 svn.proxy.generate_config = true
664 svn.proxy.generate_config = true
666
665
667 ; Generate config file with `SVNListParentPath` set to `On`.
666 ; Generate config file with `SVNListParentPath` set to `On`.
668 svn.proxy.list_parent_path = true
667 svn.proxy.list_parent_path = true
669
668
670 ; Set location and file name of generated config file.
669 ; Set location and file name of generated config file.
671 svn.proxy.config_file_path = /etc/rhodecode/conf/svn/mod_dav_svn.conf
670 svn.proxy.config_file_path = /etc/rhodecode/conf/svn/mod_dav_svn.conf
672
671
673 ; alternative mod_dav config template. This needs to be a valid mako template
672 ; alternative mod_dav config template. This needs to be a valid mako template
674 ; Example template can be found in the source code:
673 ; Example template can be found in the source code:
675 ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako
674 ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako
676 #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako
675 #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako
677
676
678 ; Used as a prefix to the `Location` block in the generated config file.
677 ; Used as a prefix to the `Location` block in the generated config file.
679 ; In most cases it should be set to `/`.
678 ; In most cases it should be set to `/`.
680 svn.proxy.location_root = /
679 svn.proxy.location_root = /
681
680
682 ; Command to reload the mod dav svn configuration on change.
681 ; Command to reload the mod dav svn configuration on change.
683 ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh
682 ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh
684 ; Make sure user who runs RhodeCode process is allowed to reload Apache
683 ; Make sure user who runs RhodeCode process is allowed to reload Apache
685 #svn.proxy.reload_cmd = /etc/init.d/apache2 reload
684 #svn.proxy.reload_cmd = /etc/init.d/apache2 reload
686
685
687 ; If the timeout expires before the reload command finishes, the command will
686 ; If the timeout expires before the reload command finishes, the command will
688 ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds.
687 ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds.
689 #svn.proxy.reload_timeout = 10
688 #svn.proxy.reload_timeout = 10
690
689
691 ; ####################
690 ; ####################
692 ; SSH Support Settings
691 ; SSH Support Settings
693 ; ####################
692 ; ####################
694
693
695 ; Defines if a custom authorized_keys file should be created and written on
694 ; Defines if a custom authorized_keys file should be created and written on
696 ; any change user ssh keys. Setting this to false also disables possibility
695 ; any change user ssh keys. Setting this to false also disables possibility
697 ; of adding SSH keys by users from web interface. Super admins can still
696 ; of adding SSH keys by users from web interface. Super admins can still
698 ; manage SSH Keys.
697 ; manage SSH Keys.
699 ssh.generate_authorized_keyfile = true
698 ssh.generate_authorized_keyfile = true
700
699
701 ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding`
700 ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding`
702 # ssh.authorized_keys_ssh_opts =
701 # ssh.authorized_keys_ssh_opts =
703
702
704 ; Path to the authorized_keys file where the generate entries are placed.
703 ; Path to the authorized_keys file where the generate entries are placed.
705 ; It is possible to have multiple key files specified in `sshd_config` e.g.
704 ; It is possible to have multiple key files specified in `sshd_config` e.g.
706 ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode
705 ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode
707 ssh.authorized_keys_file_path = /etc/rhodecode/conf/ssh/authorized_keys_rhodecode
706 ssh.authorized_keys_file_path = /etc/rhodecode/conf/ssh/authorized_keys_rhodecode
708
707
709 ; Command to execute the SSH wrapper. The binary is available in the
708 ; Command to execute the SSH wrapper. The binary is available in the
710 ; RhodeCode installation directory.
709 ; RhodeCode installation directory.
711 ; legacy: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper
710 ; legacy: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper
712 ; new rewrite: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2
711 ; new rewrite: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2
713 ssh.wrapper_cmd = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper
712 ssh.wrapper_cmd = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper
714
713
715 ; Allow shell when executing the ssh-wrapper command
714 ; Allow shell when executing the ssh-wrapper command
716 ssh.wrapper_cmd_allow_shell = false
715 ssh.wrapper_cmd_allow_shell = false
717
716
718 ; Enables logging, and detailed output send back to the client during SSH
717 ; Enables logging, and detailed output send back to the client during SSH
719 ; operations. Useful for debugging, shouldn't be used in production.
718 ; operations. Useful for debugging, shouldn't be used in production.
720 ssh.enable_debug_logging = true
719 ssh.enable_debug_logging = true
721
720
722 ; Paths to binary executable, by default they are the names, but we can
721 ; Paths to binary executable, by default they are the names, but we can
723 ; override them if we want to use a custom one
722 ; override them if we want to use a custom one
724 ssh.executable.hg = /usr/local/bin/rhodecode_bin/vcs_bin/hg
723 ssh.executable.hg = /usr/local/bin/rhodecode_bin/vcs_bin/hg
725 ssh.executable.git = /usr/local/bin/rhodecode_bin/vcs_bin/git
724 ssh.executable.git = /usr/local/bin/rhodecode_bin/vcs_bin/git
726 ssh.executable.svn = /usr/local/bin/rhodecode_bin/vcs_bin/svnserve
725 ssh.executable.svn = /usr/local/bin/rhodecode_bin/vcs_bin/svnserve
727
726
728 ; Enables SSH key generator web interface. Disabling this still allows users
727 ; Enables SSH key generator web interface. Disabling this still allows users
729 ; to add their own keys.
728 ; to add their own keys.
730 ssh.enable_ui_key_generator = true
729 ssh.enable_ui_key_generator = true
731
730
732 ; Statsd client config, this is used to send metrics to statsd
731 ; Statsd client config, this is used to send metrics to statsd
733 ; We recommend setting statsd_exported and scrape them using Prometheus
732 ; We recommend setting statsd_exported and scrape them using Prometheus
734 #statsd.enabled = false
733 #statsd.enabled = false
735 #statsd.statsd_host = 0.0.0.0
734 #statsd.statsd_host = 0.0.0.0
736 #statsd.statsd_port = 8125
735 #statsd.statsd_port = 8125
737 #statsd.statsd_prefix =
736 #statsd.statsd_prefix =
738 #statsd.statsd_ipv6 = false
737 #statsd.statsd_ipv6 = false
739
738
740 ; configure logging automatically at server startup set to false
739 ; configure logging automatically at server startup set to false
741 ; to use the below custom logging config.
740 ; to use the below custom logging config.
742 ; RC_LOGGING_FORMATTER
741 ; RC_LOGGING_FORMATTER
743 ; RC_LOGGING_LEVEL
742 ; RC_LOGGING_LEVEL
744 ; env variables can control the settings for logging in case of autoconfigure
743 ; env variables can control the settings for logging in case of autoconfigure
745
744
746 #logging.autoconfigure = true
745 #logging.autoconfigure = true
747
746
748 ; specify your own custom logging config file to configure logging
747 ; specify your own custom logging config file to configure logging
749 #logging.logging_conf_file = /path/to/custom_logging.ini
748 #logging.logging_conf_file = /path/to/custom_logging.ini
750
749
751 ; Dummy marker to add new entries after.
750 ; Dummy marker to add new entries after.
752 ; Add any custom entries below. Please don't remove this marker.
751 ; Add any custom entries below. Please don't remove this marker.
753 custom.conf = 1
752 custom.conf = 1
754
753
755
754
756 ; #####################
755 ; #####################
757 ; LOGGING CONFIGURATION
756 ; LOGGING CONFIGURATION
758 ; #####################
757 ; #####################
759
758
760 [loggers]
759 [loggers]
761 keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper
760 keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper
762
761
763 [handlers]
762 [handlers]
764 keys = console, console_sql
763 keys = console, console_sql
765
764
766 [formatters]
765 [formatters]
767 keys = generic, json, color_formatter, color_formatter_sql
766 keys = generic, json, color_formatter, color_formatter_sql
768
767
769 ; #######
768 ; #######
770 ; LOGGERS
769 ; LOGGERS
771 ; #######
770 ; #######
772 [logger_root]
771 [logger_root]
773 level = NOTSET
772 level = NOTSET
774 handlers = console
773 handlers = console
775
774
776 [logger_sqlalchemy]
775 [logger_sqlalchemy]
777 level = INFO
776 level = INFO
778 handlers = console_sql
777 handlers = console_sql
779 qualname = sqlalchemy.engine
778 qualname = sqlalchemy.engine
780 propagate = 0
779 propagate = 0
781
780
782 [logger_beaker]
781 [logger_beaker]
783 level = DEBUG
782 level = DEBUG
784 handlers =
783 handlers =
785 qualname = beaker.container
784 qualname = beaker.container
786 propagate = 1
785 propagate = 1
787
786
788 [logger_rhodecode]
787 [logger_rhodecode]
789 level = DEBUG
788 level = DEBUG
790 handlers =
789 handlers =
791 qualname = rhodecode
790 qualname = rhodecode
792 propagate = 1
791 propagate = 1
793
792
794 [logger_ssh_wrapper]
793 [logger_ssh_wrapper]
795 level = DEBUG
794 level = DEBUG
796 handlers =
795 handlers =
797 qualname = ssh_wrapper
796 qualname = ssh_wrapper
798 propagate = 1
797 propagate = 1
799
798
800 [logger_celery]
799 [logger_celery]
801 level = DEBUG
800 level = DEBUG
802 handlers =
801 handlers =
803 qualname = celery
802 qualname = celery
804
803
805
804
806 ; ########
805 ; ########
807 ; HANDLERS
806 ; HANDLERS
808 ; ########
807 ; ########
809
808
810 [handler_console]
809 [handler_console]
811 class = StreamHandler
810 class = StreamHandler
812 args = (sys.stderr, )
811 args = (sys.stderr, )
813 level = DEBUG
812 level = DEBUG
814 ; To enable JSON formatted logs replace 'generic/color_formatter' with 'json'
813 ; To enable JSON formatted logs replace 'generic/color_formatter' with 'json'
815 ; This allows sending properly formatted logs to grafana loki or elasticsearch
814 ; This allows sending properly formatted logs to grafana loki or elasticsearch
816 formatter = color_formatter
815 formatter = color_formatter
817
816
818 [handler_console_sql]
817 [handler_console_sql]
819 ; "level = DEBUG" logs SQL queries and results.
818 ; "level = DEBUG" logs SQL queries and results.
820 ; "level = INFO" logs SQL queries.
819 ; "level = INFO" logs SQL queries.
821 ; "level = WARN" logs neither. (Recommended for production systems.)
820 ; "level = WARN" logs neither. (Recommended for production systems.)
822 class = StreamHandler
821 class = StreamHandler
823 args = (sys.stderr, )
822 args = (sys.stderr, )
824 level = WARN
823 level = WARN
825 ; To enable JSON formatted logs replace 'generic/color_formatter_sql' with 'json'
824 ; To enable JSON formatted logs replace 'generic/color_formatter_sql' with 'json'
826 ; This allows sending properly formatted logs to grafana loki or elasticsearch
825 ; This allows sending properly formatted logs to grafana loki or elasticsearch
827 formatter = color_formatter_sql
826 formatter = color_formatter_sql
828
827
829 ; ##########
828 ; ##########
830 ; FORMATTERS
829 ; FORMATTERS
831 ; ##########
830 ; ##########
832
831
833 [formatter_generic]
832 [formatter_generic]
834 class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter
833 class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter
835 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
834 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
836 datefmt = %Y-%m-%d %H:%M:%S
835 datefmt = %Y-%m-%d %H:%M:%S
837
836
838 [formatter_color_formatter]
837 [formatter_color_formatter]
839 class = rhodecode.lib.logging_formatter.ColorFormatter
838 class = rhodecode.lib.logging_formatter.ColorFormatter
840 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
839 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
841 datefmt = %Y-%m-%d %H:%M:%S
840 datefmt = %Y-%m-%d %H:%M:%S
842
841
843 [formatter_color_formatter_sql]
842 [formatter_color_formatter_sql]
844 class = rhodecode.lib.logging_formatter.ColorFormatterSql
843 class = rhodecode.lib.logging_formatter.ColorFormatterSql
845 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
844 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
846 datefmt = %Y-%m-%d %H:%M:%S
845 datefmt = %Y-%m-%d %H:%M:%S
847
846
848 [formatter_json]
847 [formatter_json]
849 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
848 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
850 class = rhodecode.lib._vendor.jsonlogger.JsonFormatter
849 class = rhodecode.lib._vendor.jsonlogger.JsonFormatter
@@ -1,818 +1,817 b''
1
1
2 ; #########################################
2 ; #########################################
3 ; RHODECODE COMMUNITY EDITION CONFIGURATION
3 ; RHODECODE COMMUNITY EDITION CONFIGURATION
4 ; #########################################
4 ; #########################################
5
5
6 [DEFAULT]
6 [DEFAULT]
7 ; Debug flag sets all loggers to debug, and enables request tracking
7 ; Debug flag sets all loggers to debug, and enables request tracking
8 debug = false
8 debug = false
9
9
10 ; ########################################################################
10 ; ########################################################################
11 ; EMAIL CONFIGURATION
11 ; EMAIL CONFIGURATION
12 ; These settings will be used by the RhodeCode mailing system
12 ; These settings will be used by the RhodeCode mailing system
13 ; ########################################################################
13 ; ########################################################################
14
14
15 ; prefix all emails subjects with given prefix, helps filtering out emails
15 ; prefix all emails subjects with given prefix, helps filtering out emails
16 #email_prefix = [RhodeCode]
16 #email_prefix = [RhodeCode]
17
17
18 ; email FROM address all mails will be sent
18 ; email FROM address all mails will be sent
19 #app_email_from = rhodecode-noreply@localhost
19 #app_email_from = rhodecode-noreply@localhost
20
20
21 #smtp_server = mail.server.com
21 #smtp_server = mail.server.com
22 #smtp_username =
22 #smtp_username =
23 #smtp_password =
23 #smtp_password =
24 #smtp_port =
24 #smtp_port =
25 #smtp_use_tls = false
25 #smtp_use_tls = false
26 #smtp_use_ssl = true
26 #smtp_use_ssl = true
27
27
28 [server:main]
28 [server:main]
29 ; COMMON HOST/IP CONFIG, This applies mostly to develop setup,
29 ; COMMON HOST/IP CONFIG, This applies mostly to develop setup,
30 ; Host port for gunicorn are controlled by gunicorn_conf.py
30 ; Host port for gunicorn are controlled by gunicorn_conf.py
31 host = 127.0.0.1
31 host = 127.0.0.1
32 port = 10020
32 port = 10020
33
33
34
34
35 ; ###########################
35 ; ###########################
36 ; GUNICORN APPLICATION SERVER
36 ; GUNICORN APPLICATION SERVER
37 ; ###########################
37 ; ###########################
38
38
39 ; run with gunicorn --paste rhodecode.ini --config gunicorn_conf.py
39 ; run with gunicorn --paste rhodecode.ini --config gunicorn_conf.py
40
40
41 ; Module to use, this setting shouldn't be changed
41 ; Module to use, this setting shouldn't be changed
42 use = egg:gunicorn#main
42 use = egg:gunicorn#main
43
43
44 ; Prefix middleware for RhodeCode.
44 ; Prefix middleware for RhodeCode.
45 ; recommended when using proxy setup.
45 ; recommended when using proxy setup.
46 ; allows to set RhodeCode under a prefix in server.
46 ; allows to set RhodeCode under a prefix in server.
47 ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well.
47 ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well.
48 ; And set your prefix like: `prefix = /custom_prefix`
48 ; And set your prefix like: `prefix = /custom_prefix`
49 ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need
49 ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need
50 ; to make your cookies only work on prefix url
50 ; to make your cookies only work on prefix url
51 [filter:proxy-prefix]
51 [filter:proxy-prefix]
52 use = egg:PasteDeploy#prefix
52 use = egg:PasteDeploy#prefix
53 prefix = /
53 prefix = /
54
54
55 [app:main]
55 [app:main]
56 ; The %(here)s variable will be replaced with the absolute path of parent directory
56 ; The %(here)s variable will be replaced with the absolute path of parent directory
57 ; of this file
57 ; of this file
58 ; Each option in the app:main can be override by an environmental variable
58 ; Each option in the app:main can be override by an environmental variable
59 ;
59 ;
60 ;To override an option:
60 ;To override an option:
61 ;
61 ;
62 ;RC_<KeyName>
62 ;RC_<KeyName>
63 ;Everything should be uppercase, . and - should be replaced by _.
63 ;Everything should be uppercase, . and - should be replaced by _.
64 ;For example, if you have these configuration settings:
64 ;For example, if you have these configuration settings:
65 ;rc_cache.repo_object.backend = foo
65 ;rc_cache.repo_object.backend = foo
66 ;can be overridden by
66 ;can be overridden by
67 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
67 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
68
68
69 use = egg:rhodecode-enterprise-ce
69 use = egg:rhodecode-enterprise-ce
70
70
71 ; enable proxy prefix middleware, defined above
71 ; enable proxy prefix middleware, defined above
72 #filter-with = proxy-prefix
72 #filter-with = proxy-prefix
73
73
74 ; encryption key used to encrypt social plugin tokens,
74 ; encryption key used to encrypt social plugin tokens,
75 ; remote_urls with credentials etc, if not set it defaults to
75 ; remote_urls with credentials etc, if not set it defaults to
76 ; `beaker.session.secret`
76 ; `beaker.session.secret`
77 #rhodecode.encrypted_values.secret =
77 #rhodecode.encrypted_values.secret =
78
78
79 ; decryption strict mode (enabled by default). It controls if decryption raises
79 ; decryption strict mode (enabled by default). It controls if decryption raises
80 ; `SignatureVerificationError` in case of wrong key, or damaged encryption data.
80 ; `SignatureVerificationError` in case of wrong key, or damaged encryption data.
81 #rhodecode.encrypted_values.strict = false
81 #rhodecode.encrypted_values.strict = false
82
82
83 ; Pick algorithm for encryption. Either fernet (more secure) or aes (default)
83 ; Pick algorithm for encryption. Either fernet (more secure) or aes (default)
84 ; fernet is safer, and we strongly recommend switching to it.
84 ; fernet is safer, and we strongly recommend switching to it.
85 ; Due to backward compatibility aes is used as default.
85 ; Due to backward compatibility aes is used as default.
86 #rhodecode.encrypted_values.algorithm = fernet
86 #rhodecode.encrypted_values.algorithm = fernet
87
87
88 ; Return gzipped responses from RhodeCode (static files/application)
88 ; Return gzipped responses from RhodeCode (static files/application)
89 gzip_responses = false
89 gzip_responses = false
90
90
91 ; Auto-generate javascript routes file on startup
91 ; Auto-generate javascript routes file on startup
92 generate_js_files = false
92 generate_js_files = false
93
93
94 ; System global default language.
94 ; System global default language.
95 ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh
95 ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh
96 lang = en
96 lang = en
97
97
98 ; Perform a full repository scan and import on each server start.
98 ; Perform a full repository scan and import on each server start.
99 ; Settings this to true could lead to very long startup time.
99 ; Settings this to true could lead to very long startup time.
100 startup.import_repos = false
100 startup.import_repos = false
101
101
102 ; URL at which the application is running. This is used for Bootstrapping
102 ; URL at which the application is running. This is used for Bootstrapping
103 ; requests in context when no web request is available. Used in ishell, or
103 ; requests in context when no web request is available. Used in ishell, or
104 ; SSH calls. Set this for events to receive proper url for SSH calls.
104 ; SSH calls. Set this for events to receive proper url for SSH calls.
105 app.base_url = http://rhodecode.local
105 app.base_url = http://rhodecode.local
106
106
107 ; Host at which the Service API is running.
107 ; Host at which the Service API is running.
108 app.service_api.host = http://rhodecode.local:10020
108 app.service_api.host = http://rhodecode.local:10020
109
109
110 ; Secret for Service API authentication.
110 ; Secret for Service API authentication.
111 app.service_api.token =
111 app.service_api.token =
112
112
113 ; Unique application ID. Should be a random unique string for security.
113 ; Unique application ID. Should be a random unique string for security.
114 app_instance_uuid = rc-production
114 app_instance_uuid = rc-production
115
115
116 ; Cut off limit for large diffs (size in bytes). If overall diff size on
116 ; Cut off limit for large diffs (size in bytes). If overall diff size on
117 ; commit, or pull request exceeds this limit this diff will be displayed
117 ; commit, or pull request exceeds this limit this diff will be displayed
118 ; partially. E.g 512000 == 512Kb
118 ; partially. E.g 512000 == 512Kb
119 cut_off_limit_diff = 512000
119 cut_off_limit_diff = 512000
120
120
121 ; Cut off limit for large files inside diffs (size in bytes). Each individual
121 ; Cut off limit for large files inside diffs (size in bytes). Each individual
122 ; file inside diff which exceeds this limit will be displayed partially.
122 ; file inside diff which exceeds this limit will be displayed partially.
123 ; E.g 128000 == 128Kb
123 ; E.g 128000 == 128Kb
124 cut_off_limit_file = 128000
124 cut_off_limit_file = 128000
125
125
126 ; Use cached version of vcs repositories everywhere. Recommended to be `true`
126 ; Use cached version of vcs repositories everywhere. Recommended to be `true`
127 vcs_full_cache = true
127 vcs_full_cache = true
128
128
129 ; Force https in RhodeCode, fixes https redirects, assumes it's always https.
129 ; Force https in RhodeCode, fixes https redirects, assumes it's always https.
130 ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache
130 ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache
131 force_https = false
131 force_https = false
132
132
133 ; use Strict-Transport-Security headers
133 ; use Strict-Transport-Security headers
134 use_htsts = false
134 use_htsts = false
135
135
136 ; Set to true if your repos are exposed using the dumb protocol
136 ; Set to true if your repos are exposed using the dumb protocol
137 git_update_server_info = false
137 git_update_server_info = false
138
138
139 ; RSS/ATOM feed options
139 ; RSS/ATOM feed options
140 rss_cut_off_limit = 256000
140 rss_cut_off_limit = 256000
141 rss_items_per_page = 10
141 rss_items_per_page = 10
142 rss_include_diff = false
142 rss_include_diff = false
143
143
144 ; gist URL alias, used to create nicer urls for gist. This should be an
144 ; gist URL alias, used to create nicer urls for gist. This should be an
145 ; url that does rewrites to _admin/gists/{gistid}.
145 ; url that does rewrites to _admin/gists/{gistid}.
146 ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal
146 ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal
147 ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid}
147 ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid}
148 gist_alias_url =
148 gist_alias_url =
149
149
150 ; List of views (using glob pattern syntax) that AUTH TOKENS could be
150 ; List of views (using glob pattern syntax) that AUTH TOKENS could be
151 ; used for access.
151 ; used for access.
152 ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it
152 ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it
153 ; came from the the logged in user who own this authentication token.
153 ; came from the the logged in user who own this authentication token.
154 ; Additionally @TOKEN syntax can be used to bound the view to specific
154 ; Additionally @TOKEN syntax can be used to bound the view to specific
155 ; authentication token. Such view would be only accessible when used together
155 ; authentication token. Such view would be only accessible when used together
156 ; with this authentication token
156 ; with this authentication token
157 ; list of all views can be found under `/_admin/permissions/auth_token_access`
157 ; list of all views can be found under `/_admin/permissions/auth_token_access`
158 ; The list should be "," separated and on a single line.
158 ; The list should be "," separated and on a single line.
159 ; Most common views to enable:
159 ; Most common views to enable:
160
160
161 # RepoCommitsView:repo_commit_download
161 # RepoCommitsView:repo_commit_download
162 # RepoCommitsView:repo_commit_patch
162 # RepoCommitsView:repo_commit_patch
163 # RepoCommitsView:repo_commit_raw
163 # RepoCommitsView:repo_commit_raw
164 # RepoCommitsView:repo_commit_raw@TOKEN
164 # RepoCommitsView:repo_commit_raw@TOKEN
165 # RepoFilesView:repo_files_diff
165 # RepoFilesView:repo_files_diff
166 # RepoFilesView:repo_archivefile
166 # RepoFilesView:repo_archivefile
167 # RepoFilesView:repo_file_raw
167 # RepoFilesView:repo_file_raw
168 # GistView:*
168 # GistView:*
169 api_access_controllers_whitelist =
169 api_access_controllers_whitelist =
170
170
171 ; Default encoding used to convert from and to unicode
171 ; Default encoding used to convert from and to unicode
172 ; can be also a comma separated list of encoding in case of mixed encodings
172 ; can be also a comma separated list of encoding in case of mixed encodings
173 default_encoding = UTF-8
173 default_encoding = UTF-8
174
174
175 ; instance-id prefix
175 ; instance-id prefix
176 ; a prefix key for this instance used for cache invalidation when running
176 ; a prefix key for this instance used for cache invalidation when running
177 ; multiple instances of RhodeCode, make sure it's globally unique for
177 ; multiple instances of RhodeCode, make sure it's globally unique for
178 ; all running RhodeCode instances. Leave empty if you don't use it
178 ; all running RhodeCode instances. Leave empty if you don't use it
179 instance_id =
179 instance_id =
180
180
181 ; Fallback authentication plugin. Set this to a plugin ID to force the usage
181 ; Fallback authentication plugin. Set this to a plugin ID to force the usage
182 ; of an authentication plugin also if it is disabled by it's settings.
182 ; of an authentication plugin also if it is disabled by it's settings.
183 ; This could be useful if you are unable to log in to the system due to broken
183 ; This could be useful if you are unable to log in to the system due to broken
184 ; authentication settings. Then you can enable e.g. the internal RhodeCode auth
184 ; authentication settings. Then you can enable e.g. the internal RhodeCode auth
185 ; module to log in again and fix the settings.
185 ; module to log in again and fix the settings.
186 ; Available builtin plugin IDs (hash is part of the ID):
186 ; Available builtin plugin IDs (hash is part of the ID):
187 ; egg:rhodecode-enterprise-ce#rhodecode
187 ; egg:rhodecode-enterprise-ce#rhodecode
188 ; egg:rhodecode-enterprise-ce#pam
188 ; egg:rhodecode-enterprise-ce#pam
189 ; egg:rhodecode-enterprise-ce#ldap
189 ; egg:rhodecode-enterprise-ce#ldap
190 ; egg:rhodecode-enterprise-ce#jasig_cas
190 ; egg:rhodecode-enterprise-ce#jasig_cas
191 ; egg:rhodecode-enterprise-ce#headers
191 ; egg:rhodecode-enterprise-ce#headers
192 ; egg:rhodecode-enterprise-ce#crowd
192 ; egg:rhodecode-enterprise-ce#crowd
193
193
194 #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode
194 #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode
195
195
196 ; Flag to control loading of legacy plugins in py:/path format
196 ; Flag to control loading of legacy plugins in py:/path format
197 auth_plugin.import_legacy_plugins = true
197 auth_plugin.import_legacy_plugins = true
198
198
199 ; alternative return HTTP header for failed authentication. Default HTTP
199 ; alternative return HTTP header for failed authentication. Default HTTP
200 ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with
200 ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with
201 ; handling that causing a series of failed authentication calls.
201 ; handling that causing a series of failed authentication calls.
202 ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code
202 ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code
203 ; This will be served instead of default 401 on bad authentication
203 ; This will be served instead of default 401 on bad authentication
204 auth_ret_code =
204 auth_ret_code =
205
205
206 ; use special detection method when serving auth_ret_code, instead of serving
206 ; use special detection method when serving auth_ret_code, instead of serving
207 ; ret_code directly, use 401 initially (Which triggers credentials prompt)
207 ; ret_code directly, use 401 initially (Which triggers credentials prompt)
208 ; and then serve auth_ret_code to clients
208 ; and then serve auth_ret_code to clients
209 auth_ret_code_detection = false
209 auth_ret_code_detection = false
210
210
211 ; locking return code. When repository is locked return this HTTP code. 2XX
211 ; locking return code. When repository is locked return this HTTP code. 2XX
212 ; codes don't break the transactions while 4XX codes do
212 ; codes don't break the transactions while 4XX codes do
213 lock_ret_code = 423
213 lock_ret_code = 423
214
214
215 ; Filesystem location were repositories should be stored
215 ; Filesystem location were repositories should be stored
216 repo_store.path = /var/opt/rhodecode_repo_store
216 repo_store.path = /var/opt/rhodecode_repo_store
217
217
218 ; allows to setup custom hooks in settings page
218 ; allows to setup custom hooks in settings page
219 allow_custom_hooks_settings = true
219 allow_custom_hooks_settings = true
220
220
221 ; Generated license token required for EE edition license.
221 ; Generated license token required for EE edition license.
222 ; New generated token value can be found in Admin > settings > license page.
222 ; New generated token value can be found in Admin > settings > license page.
223 license_token =
223 license_token =
224
224
225 ; This flag hides sensitive information on the license page such as token, and license data
225 ; This flag hides sensitive information on the license page such as token, and license data
226 license.hide_license_info = false
226 license.hide_license_info = false
227
227
228 ; supervisor connection uri, for managing supervisor and logs.
228 ; supervisor connection uri, for managing supervisor and logs.
229 supervisor.uri =
229 supervisor.uri =
230
230
231 ; supervisord group name/id we only want this RC instance to handle
231 ; supervisord group name/id we only want this RC instance to handle
232 supervisor.group_id = prod
232 supervisor.group_id = prod
233
233
234 ; Display extended labs settings
234 ; Display extended labs settings
235 labs_settings_active = true
235 labs_settings_active = true
236
236
237 ; Custom exception store path, defaults to TMPDIR
237 ; Custom exception store path, defaults to TMPDIR
238 ; This is used to store exception from RhodeCode in shared directory
238 ; This is used to store exception from RhodeCode in shared directory
239 #exception_tracker.store_path =
239 #exception_tracker.store_path =
240
240
241 ; Send email with exception details when it happens
241 ; Send email with exception details when it happens
242 #exception_tracker.send_email = false
242 #exception_tracker.send_email = false
243
243
244 ; Comma separated list of recipients for exception emails,
244 ; Comma separated list of recipients for exception emails,
245 ; e.g admin@rhodecode.com,devops@rhodecode.com
245 ; e.g admin@rhodecode.com,devops@rhodecode.com
246 ; Can be left empty, then emails will be sent to ALL super-admins
246 ; Can be left empty, then emails will be sent to ALL super-admins
247 #exception_tracker.send_email_recipients =
247 #exception_tracker.send_email_recipients =
248
248
249 ; optional prefix to Add to email Subject
249 ; optional prefix to Add to email Subject
250 #exception_tracker.email_prefix = [RHODECODE ERROR]
250 #exception_tracker.email_prefix = [RHODECODE ERROR]
251
251
252 ; File store configuration. This is used to store and serve uploaded files
252 ; File store configuration. This is used to store and serve uploaded files
253 file_store.enabled = true
253 file_store.enabled = true
254
254
255 ; Storage backend, available options are: local
255 ; Storage backend, available options are: local
256 file_store.backend = local
256 file_store.backend = local
257
257
258 ; path to store the uploaded binaries and artifacts
258 ; path to store the uploaded binaries and artifacts
259 file_store.storage_path = /var/opt/rhodecode_data/file_store
259 file_store.storage_path = /var/opt/rhodecode_data/file_store
260
260
261
261
262 ; Redis url to acquire/check generation of archives locks
262 ; Redis url to acquire/check generation of archives locks
263 archive_cache.locking.url = redis://redis:6379/1
263 archive_cache.locking.url = redis://redis:6379/1
264
264
265 ; Storage backend, only 'filesystem' and 'objectstore' are available now
265 ; Storage backend, only 'filesystem' and 'objectstore' are available now
266 archive_cache.backend.type = filesystem
266 archive_cache.backend.type = filesystem
267
267
268 ; url for s3 compatible storage that allows to upload artifacts
268 ; url for s3 compatible storage that allows to upload artifacts
269 ; e.g http://minio:9000
269 ; e.g http://minio:9000
270 archive_cache.objectstore.url = http://s3-minio:9000
270 archive_cache.objectstore.url = http://s3-minio:9000
271
271
272 ; key for s3 auth
272 ; key for s3 auth
273 archive_cache.objectstore.key = key
273 archive_cache.objectstore.key = key
274
274
275 ; secret for s3 auth
275 ; secret for s3 auth
276 archive_cache.objectstore.secret = secret
276 archive_cache.objectstore.secret = secret
277
277
278 ; number of sharded buckets to create to distribute archives across
278 ; number of sharded buckets to create to distribute archives across
279 ; default is 8 shards
279 ; default is 8 shards
280 archive_cache.objectstore.bucket_shards = 8
280 archive_cache.objectstore.bucket_shards = 8
281
281
282 ; a top-level bucket to put all other sharded buckets in
282 ; a top-level bucket to put all other shards in
283 ; in case it's empty all buckets will be created in top-level (not recommended)
283 ; objects will be stored in rhodecode-archive-cache/shard-N based on the bucket_shards number
284 ; objects will be stored in rhodecode-archive-cache/shard-bucket-N based on the bucket_shards number
284 archive_cache.objectstore.bucket = rhodecode-archive-cache
285 archive_cache.objectstore.bucket_root = rhodecode-archive-cache
286
285
287 ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time
286 ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time
288 archive_cache.objectstore.retry = false
287 archive_cache.objectstore.retry = false
289
288
290 ; number of seconds to wait for next try using retry
289 ; number of seconds to wait for next try using retry
291 archive_cache.objectstore.retry_backoff = 1
290 archive_cache.objectstore.retry_backoff = 1
292
291
293 ; how many tries do do a retry fetch from this backend
292 ; how many tries do do a retry fetch from this backend
294 archive_cache.objectstore.retry_attempts = 10
293 archive_cache.objectstore.retry_attempts = 10
295
294
296 ; Default is $cache_dir/archive_cache if not set
295 ; Default is $cache_dir/archive_cache if not set
297 ; Generated repo archives will be cached at this location
296 ; Generated repo archives will be cached at this location
298 ; and served from the cache during subsequent requests for the same archive of
297 ; and served from the cache during subsequent requests for the same archive of
299 ; the repository. This path is important to be shared across filesystems and with
298 ; the repository. This path is important to be shared across filesystems and with
300 ; RhodeCode and vcsserver
299 ; RhodeCode and vcsserver
301 archive_cache.filesystem.store_dir = /var/opt/rhodecode_data/archive_cache
300 archive_cache.filesystem.store_dir = /var/opt/rhodecode_data/archive_cache
302
301
303 ; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb
302 ; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb
304 archive_cache.filesystem.cache_size_gb = 40
303 archive_cache.filesystem.cache_size_gb = 40
305
304
306 ; Eviction policy used to clear out after cache_size_gb limit is reached
305 ; Eviction policy used to clear out after cache_size_gb limit is reached
307 archive_cache.filesystem.eviction_policy = least-recently-stored
306 archive_cache.filesystem.eviction_policy = least-recently-stored
308
307
309 ; By default cache uses sharding technique, this specifies how many shards are there
308 ; By default cache uses sharding technique, this specifies how many shards are there
310 ; default is 8 shards
309 ; default is 8 shards
311 archive_cache.filesystem.cache_shards = 8
310 archive_cache.filesystem.cache_shards = 8
312
311
313 ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time
312 ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time
314 archive_cache.filesystem.retry = false
313 archive_cache.filesystem.retry = false
315
314
316 ; number of seconds to wait for next try using retry
315 ; number of seconds to wait for next try using retry
317 archive_cache.filesystem.retry_backoff = 1
316 archive_cache.filesystem.retry_backoff = 1
318
317
319 ; how many tries do do a retry fetch from this backend
318 ; how many tries do do a retry fetch from this backend
320 archive_cache.filesystem.retry_attempts = 10
319 archive_cache.filesystem.retry_attempts = 10
321
320
322
321
323 ; #############
322 ; #############
324 ; CELERY CONFIG
323 ; CELERY CONFIG
325 ; #############
324 ; #############
326
325
327 ; manually run celery: /path/to/celery worker --task-events --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini
326 ; manually run celery: /path/to/celery worker --task-events --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini
328
327
329 use_celery = true
328 use_celery = true
330
329
331 ; path to store schedule database
330 ; path to store schedule database
332 #celerybeat-schedule.path =
331 #celerybeat-schedule.path =
333
332
334 ; connection url to the message broker (default redis)
333 ; connection url to the message broker (default redis)
335 celery.broker_url = redis://redis:6379/8
334 celery.broker_url = redis://redis:6379/8
336
335
337 ; results backend to get results for (default redis)
336 ; results backend to get results for (default redis)
338 celery.result_backend = redis://redis:6379/8
337 celery.result_backend = redis://redis:6379/8
339
338
340 ; rabbitmq example
339 ; rabbitmq example
341 #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost
340 #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost
342
341
343 ; maximum tasks to execute before worker restart
342 ; maximum tasks to execute before worker restart
344 celery.max_tasks_per_child = 20
343 celery.max_tasks_per_child = 20
345
344
346 ; tasks will never be sent to the queue, but executed locally instead.
345 ; tasks will never be sent to the queue, but executed locally instead.
347 celery.task_always_eager = false
346 celery.task_always_eager = false
348
347
349 ; #############
348 ; #############
350 ; DOGPILE CACHE
349 ; DOGPILE CACHE
351 ; #############
350 ; #############
352
351
353 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
352 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
354 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
353 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
355 cache_dir = /var/opt/rhodecode_data
354 cache_dir = /var/opt/rhodecode_data
356
355
357 ; *********************************************
356 ; *********************************************
358 ; `sql_cache_short` cache for heavy SQL queries
357 ; `sql_cache_short` cache for heavy SQL queries
359 ; Only supported backend is `memory_lru`
358 ; Only supported backend is `memory_lru`
360 ; *********************************************
359 ; *********************************************
361 rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru
360 rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru
362 rc_cache.sql_cache_short.expiration_time = 30
361 rc_cache.sql_cache_short.expiration_time = 30
363
362
364
363
365 ; *****************************************************
364 ; *****************************************************
366 ; `cache_repo_longterm` cache for repo object instances
365 ; `cache_repo_longterm` cache for repo object instances
367 ; Only supported backend is `memory_lru`
366 ; Only supported backend is `memory_lru`
368 ; *****************************************************
367 ; *****************************************************
369 rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru
368 rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru
370 ; by default we use 30 Days, cache is still invalidated on push
369 ; by default we use 30 Days, cache is still invalidated on push
371 rc_cache.cache_repo_longterm.expiration_time = 2592000
370 rc_cache.cache_repo_longterm.expiration_time = 2592000
372 ; max items in LRU cache, set to smaller number to save memory, and expire last used caches
371 ; max items in LRU cache, set to smaller number to save memory, and expire last used caches
373 rc_cache.cache_repo_longterm.max_size = 10000
372 rc_cache.cache_repo_longterm.max_size = 10000
374
373
375
374
376 ; *********************************************
375 ; *********************************************
377 ; `cache_general` cache for general purpose use
376 ; `cache_general` cache for general purpose use
378 ; for simplicity use rc.file_namespace backend,
377 ; for simplicity use rc.file_namespace backend,
379 ; for performance and scale use rc.redis
378 ; for performance and scale use rc.redis
380 ; *********************************************
379 ; *********************************************
381 rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace
380 rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace
382 rc_cache.cache_general.expiration_time = 43200
381 rc_cache.cache_general.expiration_time = 43200
383 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
382 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
384 #rc_cache.cache_general.arguments.filename = /tmp/cache_general_db
383 #rc_cache.cache_general.arguments.filename = /tmp/cache_general_db
385
384
386 ; alternative `cache_general` redis backend with distributed lock
385 ; alternative `cache_general` redis backend with distributed lock
387 #rc_cache.cache_general.backend = dogpile.cache.rc.redis
386 #rc_cache.cache_general.backend = dogpile.cache.rc.redis
388 #rc_cache.cache_general.expiration_time = 300
387 #rc_cache.cache_general.expiration_time = 300
389
388
390 ; redis_expiration_time needs to be greater then expiration_time
389 ; redis_expiration_time needs to be greater then expiration_time
391 #rc_cache.cache_general.arguments.redis_expiration_time = 7200
390 #rc_cache.cache_general.arguments.redis_expiration_time = 7200
392
391
393 #rc_cache.cache_general.arguments.host = localhost
392 #rc_cache.cache_general.arguments.host = localhost
394 #rc_cache.cache_general.arguments.port = 6379
393 #rc_cache.cache_general.arguments.port = 6379
395 #rc_cache.cache_general.arguments.db = 0
394 #rc_cache.cache_general.arguments.db = 0
396 #rc_cache.cache_general.arguments.socket_timeout = 30
395 #rc_cache.cache_general.arguments.socket_timeout = 30
397 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
396 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
398 #rc_cache.cache_general.arguments.distributed_lock = true
397 #rc_cache.cache_general.arguments.distributed_lock = true
399
398
400 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
399 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
401 #rc_cache.cache_general.arguments.lock_auto_renewal = true
400 #rc_cache.cache_general.arguments.lock_auto_renewal = true
402
401
403 ; *************************************************
402 ; *************************************************
404 ; `cache_perms` cache for permission tree, auth TTL
403 ; `cache_perms` cache for permission tree, auth TTL
405 ; for simplicity use rc.file_namespace backend,
404 ; for simplicity use rc.file_namespace backend,
406 ; for performance and scale use rc.redis
405 ; for performance and scale use rc.redis
407 ; *************************************************
406 ; *************************************************
408 rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace
407 rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace
409 rc_cache.cache_perms.expiration_time = 3600
408 rc_cache.cache_perms.expiration_time = 3600
410 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
409 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
411 #rc_cache.cache_perms.arguments.filename = /tmp/cache_perms_db
410 #rc_cache.cache_perms.arguments.filename = /tmp/cache_perms_db
412
411
413 ; alternative `cache_perms` redis backend with distributed lock
412 ; alternative `cache_perms` redis backend with distributed lock
414 #rc_cache.cache_perms.backend = dogpile.cache.rc.redis
413 #rc_cache.cache_perms.backend = dogpile.cache.rc.redis
415 #rc_cache.cache_perms.expiration_time = 300
414 #rc_cache.cache_perms.expiration_time = 300
416
415
417 ; redis_expiration_time needs to be greater then expiration_time
416 ; redis_expiration_time needs to be greater then expiration_time
418 #rc_cache.cache_perms.arguments.redis_expiration_time = 7200
417 #rc_cache.cache_perms.arguments.redis_expiration_time = 7200
419
418
420 #rc_cache.cache_perms.arguments.host = localhost
419 #rc_cache.cache_perms.arguments.host = localhost
421 #rc_cache.cache_perms.arguments.port = 6379
420 #rc_cache.cache_perms.arguments.port = 6379
422 #rc_cache.cache_perms.arguments.db = 0
421 #rc_cache.cache_perms.arguments.db = 0
423 #rc_cache.cache_perms.arguments.socket_timeout = 30
422 #rc_cache.cache_perms.arguments.socket_timeout = 30
424 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
423 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
425 #rc_cache.cache_perms.arguments.distributed_lock = true
424 #rc_cache.cache_perms.arguments.distributed_lock = true
426
425
427 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
426 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
428 #rc_cache.cache_perms.arguments.lock_auto_renewal = true
427 #rc_cache.cache_perms.arguments.lock_auto_renewal = true
429
428
430 ; ***************************************************
429 ; ***************************************************
431 ; `cache_repo` cache for file tree, Readme, RSS FEEDS
430 ; `cache_repo` cache for file tree, Readme, RSS FEEDS
432 ; for simplicity use rc.file_namespace backend,
431 ; for simplicity use rc.file_namespace backend,
433 ; for performance and scale use rc.redis
432 ; for performance and scale use rc.redis
434 ; ***************************************************
433 ; ***************************************************
435 rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace
434 rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace
436 rc_cache.cache_repo.expiration_time = 2592000
435 rc_cache.cache_repo.expiration_time = 2592000
437 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
436 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
438 #rc_cache.cache_repo.arguments.filename = /tmp/cache_repo_db
437 #rc_cache.cache_repo.arguments.filename = /tmp/cache_repo_db
439
438
440 ; alternative `cache_repo` redis backend with distributed lock
439 ; alternative `cache_repo` redis backend with distributed lock
441 #rc_cache.cache_repo.backend = dogpile.cache.rc.redis
440 #rc_cache.cache_repo.backend = dogpile.cache.rc.redis
442 #rc_cache.cache_repo.expiration_time = 2592000
441 #rc_cache.cache_repo.expiration_time = 2592000
443
442
444 ; redis_expiration_time needs to be greater then expiration_time
443 ; redis_expiration_time needs to be greater then expiration_time
445 #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400
444 #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400
446
445
447 #rc_cache.cache_repo.arguments.host = localhost
446 #rc_cache.cache_repo.arguments.host = localhost
448 #rc_cache.cache_repo.arguments.port = 6379
447 #rc_cache.cache_repo.arguments.port = 6379
449 #rc_cache.cache_repo.arguments.db = 1
448 #rc_cache.cache_repo.arguments.db = 1
450 #rc_cache.cache_repo.arguments.socket_timeout = 30
449 #rc_cache.cache_repo.arguments.socket_timeout = 30
451 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
450 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
452 #rc_cache.cache_repo.arguments.distributed_lock = true
451 #rc_cache.cache_repo.arguments.distributed_lock = true
453
452
454 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
453 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
455 #rc_cache.cache_repo.arguments.lock_auto_renewal = true
454 #rc_cache.cache_repo.arguments.lock_auto_renewal = true
456
455
457 ; ##############
456 ; ##############
458 ; BEAKER SESSION
457 ; BEAKER SESSION
459 ; ##############
458 ; ##############
460
459
461 ; beaker.session.type is type of storage options for the logged users sessions. Current allowed
460 ; beaker.session.type is type of storage options for the logged users sessions. Current allowed
462 ; types are file, ext:redis, ext:database, ext:memcached
461 ; types are file, ext:redis, ext:database, ext:memcached
463 ; Fastest ones are ext:redis and ext:database, DO NOT use memory type for session
462 ; Fastest ones are ext:redis and ext:database, DO NOT use memory type for session
464 #beaker.session.type = file
463 #beaker.session.type = file
465 #beaker.session.data_dir = %(here)s/data/sessions
464 #beaker.session.data_dir = %(here)s/data/sessions
466
465
467 ; Redis based sessions
466 ; Redis based sessions
468 beaker.session.type = ext:redis
467 beaker.session.type = ext:redis
469 beaker.session.url = redis://redis:6379/2
468 beaker.session.url = redis://redis:6379/2
470
469
471 ; DB based session, fast, and allows easy management over logged in users
470 ; DB based session, fast, and allows easy management over logged in users
472 #beaker.session.type = ext:database
471 #beaker.session.type = ext:database
473 #beaker.session.table_name = db_session
472 #beaker.session.table_name = db_session
474 #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode
473 #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode
475 #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode
474 #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode
476 #beaker.session.sa.pool_recycle = 3600
475 #beaker.session.sa.pool_recycle = 3600
477 #beaker.session.sa.echo = false
476 #beaker.session.sa.echo = false
478
477
479 beaker.session.key = rhodecode
478 beaker.session.key = rhodecode
480 beaker.session.secret = production-rc-uytcxaz
479 beaker.session.secret = production-rc-uytcxaz
481 beaker.session.lock_dir = /data_ramdisk/lock
480 beaker.session.lock_dir = /data_ramdisk/lock
482
481
483 ; Secure encrypted cookie. Requires AES and AES python libraries
482 ; Secure encrypted cookie. Requires AES and AES python libraries
484 ; you must disable beaker.session.secret to use this
483 ; you must disable beaker.session.secret to use this
485 #beaker.session.encrypt_key = key_for_encryption
484 #beaker.session.encrypt_key = key_for_encryption
486 #beaker.session.validate_key = validation_key
485 #beaker.session.validate_key = validation_key
487
486
488 ; Sets session as invalid (also logging out user) if it haven not been
487 ; Sets session as invalid (also logging out user) if it haven not been
489 ; accessed for given amount of time in seconds
488 ; accessed for given amount of time in seconds
490 beaker.session.timeout = 2592000
489 beaker.session.timeout = 2592000
491 beaker.session.httponly = true
490 beaker.session.httponly = true
492
491
493 ; Path to use for the cookie. Set to prefix if you use prefix middleware
492 ; Path to use for the cookie. Set to prefix if you use prefix middleware
494 #beaker.session.cookie_path = /custom_prefix
493 #beaker.session.cookie_path = /custom_prefix
495
494
496 ; Set https secure cookie
495 ; Set https secure cookie
497 beaker.session.secure = false
496 beaker.session.secure = false
498
497
499 ; default cookie expiration time in seconds, set to `true` to set expire
498 ; default cookie expiration time in seconds, set to `true` to set expire
500 ; at browser close
499 ; at browser close
501 #beaker.session.cookie_expires = 3600
500 #beaker.session.cookie_expires = 3600
502
501
503 ; #############################
502 ; #############################
504 ; SEARCH INDEXING CONFIGURATION
503 ; SEARCH INDEXING CONFIGURATION
505 ; #############################
504 ; #############################
506
505
507 ; Full text search indexer is available in rhodecode-tools under
506 ; Full text search indexer is available in rhodecode-tools under
508 ; `rhodecode-tools index` command
507 ; `rhodecode-tools index` command
509
508
510 ; WHOOSH Backend, doesn't require additional services to run
509 ; WHOOSH Backend, doesn't require additional services to run
511 ; it works good with few dozen repos
510 ; it works good with few dozen repos
512 search.module = rhodecode.lib.index.whoosh
511 search.module = rhodecode.lib.index.whoosh
513 search.location = %(here)s/data/index
512 search.location = %(here)s/data/index
514
513
515 ; ####################
514 ; ####################
516 ; CHANNELSTREAM CONFIG
515 ; CHANNELSTREAM CONFIG
517 ; ####################
516 ; ####################
518
517
519 ; channelstream enables persistent connections and live notification
518 ; channelstream enables persistent connections and live notification
520 ; in the system. It's also used by the chat system
519 ; in the system. It's also used by the chat system
521
520
522 channelstream.enabled = true
521 channelstream.enabled = true
523
522
524 ; server address for channelstream server on the backend
523 ; server address for channelstream server on the backend
525 channelstream.server = channelstream:9800
524 channelstream.server = channelstream:9800
526
525
527 ; location of the channelstream server from outside world
526 ; location of the channelstream server from outside world
528 ; use ws:// for http or wss:// for https. This address needs to be handled
527 ; use ws:// for http or wss:// for https. This address needs to be handled
529 ; by external HTTP server such as Nginx or Apache
528 ; by external HTTP server such as Nginx or Apache
530 ; see Nginx/Apache configuration examples in our docs
529 ; see Nginx/Apache configuration examples in our docs
531 channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream
530 channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream
532 channelstream.secret = ENV_GENERATED
531 channelstream.secret = ENV_GENERATED
533 channelstream.history.location = /var/opt/rhodecode_data/channelstream_history
532 channelstream.history.location = /var/opt/rhodecode_data/channelstream_history
534
533
535 ; Internal application path that Javascript uses to connect into.
534 ; Internal application path that Javascript uses to connect into.
536 ; If you use proxy-prefix the prefix should be added before /_channelstream
535 ; If you use proxy-prefix the prefix should be added before /_channelstream
537 channelstream.proxy_path = /_channelstream
536 channelstream.proxy_path = /_channelstream
538
537
539
538
540 ; ##############################
539 ; ##############################
541 ; MAIN RHODECODE DATABASE CONFIG
540 ; MAIN RHODECODE DATABASE CONFIG
542 ; ##############################
541 ; ##############################
543
542
544 #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30
543 #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30
545 #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode
544 #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode
546 #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8
545 #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8
547 ; pymysql is an alternative driver for MySQL, use in case of problems with default one
546 ; pymysql is an alternative driver for MySQL, use in case of problems with default one
548 #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode
547 #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode
549
548
550 sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode
549 sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode
551
550
552 ; see sqlalchemy docs for other advanced settings
551 ; see sqlalchemy docs for other advanced settings
553 ; print the sql statements to output
552 ; print the sql statements to output
554 sqlalchemy.db1.echo = false
553 sqlalchemy.db1.echo = false
555
554
556 ; recycle the connections after this amount of seconds
555 ; recycle the connections after this amount of seconds
557 sqlalchemy.db1.pool_recycle = 3600
556 sqlalchemy.db1.pool_recycle = 3600
558
557
559 ; the number of connections to keep open inside the connection pool.
558 ; the number of connections to keep open inside the connection pool.
560 ; 0 indicates no limit
559 ; 0 indicates no limit
561 ; the general calculus with gevent is:
560 ; the general calculus with gevent is:
562 ; if your system allows 500 concurrent greenlets (max_connections) that all do database access,
561 ; if your system allows 500 concurrent greenlets (max_connections) that all do database access,
563 ; then increase pool size + max overflow so that they add up to 500.
562 ; then increase pool size + max overflow so that they add up to 500.
564 #sqlalchemy.db1.pool_size = 5
563 #sqlalchemy.db1.pool_size = 5
565
564
566 ; The number of connections to allow in connection pool "overflow", that is
565 ; The number of connections to allow in connection pool "overflow", that is
567 ; connections that can be opened above and beyond the pool_size setting,
566 ; connections that can be opened above and beyond the pool_size setting,
568 ; which defaults to five.
567 ; which defaults to five.
569 #sqlalchemy.db1.max_overflow = 10
568 #sqlalchemy.db1.max_overflow = 10
570
569
571 ; Connection check ping, used to detect broken database connections
570 ; Connection check ping, used to detect broken database connections
572 ; could be enabled to better handle cases if MySQL has gone away errors
571 ; could be enabled to better handle cases if MySQL has gone away errors
573 #sqlalchemy.db1.ping_connection = true
572 #sqlalchemy.db1.ping_connection = true
574
573
575 ; ##########
574 ; ##########
576 ; VCS CONFIG
575 ; VCS CONFIG
577 ; ##########
576 ; ##########
578 vcs.server.enable = true
577 vcs.server.enable = true
579 vcs.server = vcsserver:10010
578 vcs.server = vcsserver:10010
580
579
581 ; Web server connectivity protocol, responsible for web based VCS operations
580 ; Web server connectivity protocol, responsible for web based VCS operations
582 ; Available protocols are:
581 ; Available protocols are:
583 ; `http` - use http-rpc backend (default)
582 ; `http` - use http-rpc backend (default)
584 vcs.server.protocol = http
583 vcs.server.protocol = http
585
584
586 ; Push/Pull operations protocol, available options are:
585 ; Push/Pull operations protocol, available options are:
587 ; `http` - use http-rpc backend (default)
586 ; `http` - use http-rpc backend (default)
588 vcs.scm_app_implementation = http
587 vcs.scm_app_implementation = http
589
588
590 ; Push/Pull operations hooks protocol, available options are:
589 ; Push/Pull operations hooks protocol, available options are:
591 ; `http` - use http-rpc backend (default)
590 ; `http` - use http-rpc backend (default)
592 ; `celery` - use celery based hooks
591 ; `celery` - use celery based hooks
593 vcs.hooks.protocol = http
592 vcs.hooks.protocol = http
594
593
595 ; Host on which this instance is listening for hooks. vcsserver will call this host to pull/push hooks so it should be
594 ; Host on which this instance is listening for hooks. vcsserver will call this host to pull/push hooks so it should be
596 ; accessible via network.
595 ; accessible via network.
597 ; Use vcs.hooks.host = "*" to bind to current hostname (for Docker)
596 ; Use vcs.hooks.host = "*" to bind to current hostname (for Docker)
598 vcs.hooks.host = *
597 vcs.hooks.host = *
599
598
600 ; Start VCSServer with this instance as a subprocess, useful for development
599 ; Start VCSServer with this instance as a subprocess, useful for development
601 vcs.start_server = false
600 vcs.start_server = false
602
601
603 ; List of enabled VCS backends, available options are:
602 ; List of enabled VCS backends, available options are:
604 ; `hg` - mercurial
603 ; `hg` - mercurial
605 ; `git` - git
604 ; `git` - git
606 ; `svn` - subversion
605 ; `svn` - subversion
607 vcs.backends = hg, git, svn
606 vcs.backends = hg, git, svn
608
607
609 ; Wait this number of seconds before killing connection to the vcsserver
608 ; Wait this number of seconds before killing connection to the vcsserver
610 vcs.connection_timeout = 3600
609 vcs.connection_timeout = 3600
611
610
612 ; Cache flag to cache vcsserver remote calls locally
611 ; Cache flag to cache vcsserver remote calls locally
613 ; It uses cache_region `cache_repo`
612 ; It uses cache_region `cache_repo`
614 vcs.methods.cache = true
613 vcs.methods.cache = true
615
614
616 ; ####################################################
615 ; ####################################################
617 ; Subversion proxy support (mod_dav_svn)
616 ; Subversion proxy support (mod_dav_svn)
618 ; Maps RhodeCode repo groups into SVN paths for Apache
617 ; Maps RhodeCode repo groups into SVN paths for Apache
619 ; ####################################################
618 ; ####################################################
620
619
621 ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out.
620 ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out.
622 ; Set a numeric version for your current SVN e.g 1.8, or 1.12
621 ; Set a numeric version for your current SVN e.g 1.8, or 1.12
623 ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible
622 ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible
624 #vcs.svn.compatible_version = 1.8
623 #vcs.svn.compatible_version = 1.8
625
624
626 ; Enable SVN proxy of requests over HTTP
625 ; Enable SVN proxy of requests over HTTP
627 vcs.svn.proxy.enabled = true
626 vcs.svn.proxy.enabled = true
628
627
629 ; host to connect to running SVN subsystem
628 ; host to connect to running SVN subsystem
630 vcs.svn.proxy.host = http://svn:8090
629 vcs.svn.proxy.host = http://svn:8090
631
630
632 ; Enable or disable the config file generation.
631 ; Enable or disable the config file generation.
633 svn.proxy.generate_config = true
632 svn.proxy.generate_config = true
634
633
635 ; Generate config file with `SVNListParentPath` set to `On`.
634 ; Generate config file with `SVNListParentPath` set to `On`.
636 svn.proxy.list_parent_path = true
635 svn.proxy.list_parent_path = true
637
636
638 ; Set location and file name of generated config file.
637 ; Set location and file name of generated config file.
639 svn.proxy.config_file_path = /etc/rhodecode/conf/svn/mod_dav_svn.conf
638 svn.proxy.config_file_path = /etc/rhodecode/conf/svn/mod_dav_svn.conf
640
639
641 ; alternative mod_dav config template. This needs to be a valid mako template
640 ; alternative mod_dav config template. This needs to be a valid mako template
642 ; Example template can be found in the source code:
641 ; Example template can be found in the source code:
643 ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako
642 ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako
644 #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako
643 #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako
645
644
646 ; Used as a prefix to the `Location` block in the generated config file.
645 ; Used as a prefix to the `Location` block in the generated config file.
647 ; In most cases it should be set to `/`.
646 ; In most cases it should be set to `/`.
648 svn.proxy.location_root = /
647 svn.proxy.location_root = /
649
648
650 ; Command to reload the mod dav svn configuration on change.
649 ; Command to reload the mod dav svn configuration on change.
651 ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh
650 ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh
652 ; Make sure user who runs RhodeCode process is allowed to reload Apache
651 ; Make sure user who runs RhodeCode process is allowed to reload Apache
653 #svn.proxy.reload_cmd = /etc/init.d/apache2 reload
652 #svn.proxy.reload_cmd = /etc/init.d/apache2 reload
654
653
655 ; If the timeout expires before the reload command finishes, the command will
654 ; If the timeout expires before the reload command finishes, the command will
656 ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds.
655 ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds.
657 #svn.proxy.reload_timeout = 10
656 #svn.proxy.reload_timeout = 10
658
657
659 ; ####################
658 ; ####################
660 ; SSH Support Settings
659 ; SSH Support Settings
661 ; ####################
660 ; ####################
662
661
663 ; Defines if a custom authorized_keys file should be created and written on
662 ; Defines if a custom authorized_keys file should be created and written on
664 ; any change user ssh keys. Setting this to false also disables possibility
663 ; any change user ssh keys. Setting this to false also disables possibility
665 ; of adding SSH keys by users from web interface. Super admins can still
664 ; of adding SSH keys by users from web interface. Super admins can still
666 ; manage SSH Keys.
665 ; manage SSH Keys.
667 ssh.generate_authorized_keyfile = true
666 ssh.generate_authorized_keyfile = true
668
667
669 ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding`
668 ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding`
670 # ssh.authorized_keys_ssh_opts =
669 # ssh.authorized_keys_ssh_opts =
671
670
672 ; Path to the authorized_keys file where the generate entries are placed.
671 ; Path to the authorized_keys file where the generate entries are placed.
673 ; It is possible to have multiple key files specified in `sshd_config` e.g.
672 ; It is possible to have multiple key files specified in `sshd_config` e.g.
674 ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode
673 ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode
675 ssh.authorized_keys_file_path = /etc/rhodecode/conf/ssh/authorized_keys_rhodecode
674 ssh.authorized_keys_file_path = /etc/rhodecode/conf/ssh/authorized_keys_rhodecode
676
675
677 ; Command to execute the SSH wrapper. The binary is available in the
676 ; Command to execute the SSH wrapper. The binary is available in the
678 ; RhodeCode installation directory.
677 ; RhodeCode installation directory.
679 ; legacy: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper
678 ; legacy: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper
680 ; new rewrite: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2
679 ; new rewrite: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2
681 ssh.wrapper_cmd = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper
680 ssh.wrapper_cmd = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper
682
681
683 ; Allow shell when executing the ssh-wrapper command
682 ; Allow shell when executing the ssh-wrapper command
684 ssh.wrapper_cmd_allow_shell = false
683 ssh.wrapper_cmd_allow_shell = false
685
684
686 ; Enables logging, and detailed output send back to the client during SSH
685 ; Enables logging, and detailed output send back to the client during SSH
687 ; operations. Useful for debugging, shouldn't be used in production.
686 ; operations. Useful for debugging, shouldn't be used in production.
688 ssh.enable_debug_logging = false
687 ssh.enable_debug_logging = false
689
688
690 ; Paths to binary executable, by default they are the names, but we can
689 ; Paths to binary executable, by default they are the names, but we can
691 ; override them if we want to use a custom one
690 ; override them if we want to use a custom one
692 ssh.executable.hg = /usr/local/bin/rhodecode_bin/vcs_bin/hg
691 ssh.executable.hg = /usr/local/bin/rhodecode_bin/vcs_bin/hg
693 ssh.executable.git = /usr/local/bin/rhodecode_bin/vcs_bin/git
692 ssh.executable.git = /usr/local/bin/rhodecode_bin/vcs_bin/git
694 ssh.executable.svn = /usr/local/bin/rhodecode_bin/vcs_bin/svnserve
693 ssh.executable.svn = /usr/local/bin/rhodecode_bin/vcs_bin/svnserve
695
694
696 ; Enables SSH key generator web interface. Disabling this still allows users
695 ; Enables SSH key generator web interface. Disabling this still allows users
697 ; to add their own keys.
696 ; to add their own keys.
698 ssh.enable_ui_key_generator = true
697 ssh.enable_ui_key_generator = true
699
698
700 ; Statsd client config, this is used to send metrics to statsd
699 ; Statsd client config, this is used to send metrics to statsd
701 ; We recommend setting statsd_exported and scrape them using Prometheus
700 ; We recommend setting statsd_exported and scrape them using Prometheus
702 #statsd.enabled = false
701 #statsd.enabled = false
703 #statsd.statsd_host = 0.0.0.0
702 #statsd.statsd_host = 0.0.0.0
704 #statsd.statsd_port = 8125
703 #statsd.statsd_port = 8125
705 #statsd.statsd_prefix =
704 #statsd.statsd_prefix =
706 #statsd.statsd_ipv6 = false
705 #statsd.statsd_ipv6 = false
707
706
708 ; configure logging automatically at server startup set to false
707 ; configure logging automatically at server startup set to false
709 ; to use the below custom logging config.
708 ; to use the below custom logging config.
710 ; RC_LOGGING_FORMATTER
709 ; RC_LOGGING_FORMATTER
711 ; RC_LOGGING_LEVEL
710 ; RC_LOGGING_LEVEL
712 ; env variables can control the settings for logging in case of autoconfigure
711 ; env variables can control the settings for logging in case of autoconfigure
713
712
714 #logging.autoconfigure = true
713 #logging.autoconfigure = true
715
714
716 ; specify your own custom logging config file to configure logging
715 ; specify your own custom logging config file to configure logging
717 #logging.logging_conf_file = /path/to/custom_logging.ini
716 #logging.logging_conf_file = /path/to/custom_logging.ini
718
717
719 ; Dummy marker to add new entries after.
718 ; Dummy marker to add new entries after.
720 ; Add any custom entries below. Please don't remove this marker.
719 ; Add any custom entries below. Please don't remove this marker.
721 custom.conf = 1
720 custom.conf = 1
722
721
723
722
724 ; #####################
723 ; #####################
725 ; LOGGING CONFIGURATION
724 ; LOGGING CONFIGURATION
726 ; #####################
725 ; #####################
727
726
728 [loggers]
727 [loggers]
729 keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper
728 keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper
730
729
731 [handlers]
730 [handlers]
732 keys = console, console_sql
731 keys = console, console_sql
733
732
734 [formatters]
733 [formatters]
735 keys = generic, json, color_formatter, color_formatter_sql
734 keys = generic, json, color_formatter, color_formatter_sql
736
735
737 ; #######
736 ; #######
738 ; LOGGERS
737 ; LOGGERS
739 ; #######
738 ; #######
740 [logger_root]
739 [logger_root]
741 level = NOTSET
740 level = NOTSET
742 handlers = console
741 handlers = console
743
742
744 [logger_sqlalchemy]
743 [logger_sqlalchemy]
745 level = INFO
744 level = INFO
746 handlers = console_sql
745 handlers = console_sql
747 qualname = sqlalchemy.engine
746 qualname = sqlalchemy.engine
748 propagate = 0
747 propagate = 0
749
748
750 [logger_beaker]
749 [logger_beaker]
751 level = DEBUG
750 level = DEBUG
752 handlers =
751 handlers =
753 qualname = beaker.container
752 qualname = beaker.container
754 propagate = 1
753 propagate = 1
755
754
756 [logger_rhodecode]
755 [logger_rhodecode]
757 level = DEBUG
756 level = DEBUG
758 handlers =
757 handlers =
759 qualname = rhodecode
758 qualname = rhodecode
760 propagate = 1
759 propagate = 1
761
760
762 [logger_ssh_wrapper]
761 [logger_ssh_wrapper]
763 level = DEBUG
762 level = DEBUG
764 handlers =
763 handlers =
765 qualname = ssh_wrapper
764 qualname = ssh_wrapper
766 propagate = 1
765 propagate = 1
767
766
768 [logger_celery]
767 [logger_celery]
769 level = DEBUG
768 level = DEBUG
770 handlers =
769 handlers =
771 qualname = celery
770 qualname = celery
772
771
773
772
774 ; ########
773 ; ########
775 ; HANDLERS
774 ; HANDLERS
776 ; ########
775 ; ########
777
776
778 [handler_console]
777 [handler_console]
779 class = StreamHandler
778 class = StreamHandler
780 args = (sys.stderr, )
779 args = (sys.stderr, )
781 level = INFO
780 level = INFO
782 ; To enable JSON formatted logs replace 'generic/color_formatter' with 'json'
781 ; To enable JSON formatted logs replace 'generic/color_formatter' with 'json'
783 ; This allows sending properly formatted logs to grafana loki or elasticsearch
782 ; This allows sending properly formatted logs to grafana loki or elasticsearch
784 formatter = generic
783 formatter = generic
785
784
786 [handler_console_sql]
785 [handler_console_sql]
787 ; "level = DEBUG" logs SQL queries and results.
786 ; "level = DEBUG" logs SQL queries and results.
788 ; "level = INFO" logs SQL queries.
787 ; "level = INFO" logs SQL queries.
789 ; "level = WARN" logs neither. (Recommended for production systems.)
788 ; "level = WARN" logs neither. (Recommended for production systems.)
790 class = StreamHandler
789 class = StreamHandler
791 args = (sys.stderr, )
790 args = (sys.stderr, )
792 level = WARN
791 level = WARN
793 ; To enable JSON formatted logs replace 'generic/color_formatter_sql' with 'json'
792 ; To enable JSON formatted logs replace 'generic/color_formatter_sql' with 'json'
794 ; This allows sending properly formatted logs to grafana loki or elasticsearch
793 ; This allows sending properly formatted logs to grafana loki or elasticsearch
795 formatter = generic
794 formatter = generic
796
795
797 ; ##########
796 ; ##########
798 ; FORMATTERS
797 ; FORMATTERS
799 ; ##########
798 ; ##########
800
799
801 [formatter_generic]
800 [formatter_generic]
802 class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter
801 class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter
803 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
802 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
804 datefmt = %Y-%m-%d %H:%M:%S
803 datefmt = %Y-%m-%d %H:%M:%S
805
804
806 [formatter_color_formatter]
805 [formatter_color_formatter]
807 class = rhodecode.lib.logging_formatter.ColorFormatter
806 class = rhodecode.lib.logging_formatter.ColorFormatter
808 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
807 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
809 datefmt = %Y-%m-%d %H:%M:%S
808 datefmt = %Y-%m-%d %H:%M:%S
810
809
811 [formatter_color_formatter_sql]
810 [formatter_color_formatter_sql]
812 class = rhodecode.lib.logging_formatter.ColorFormatterSql
811 class = rhodecode.lib.logging_formatter.ColorFormatterSql
813 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
812 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
814 datefmt = %Y-%m-%d %H:%M:%S
813 datefmt = %Y-%m-%d %H:%M:%S
815
814
816 [formatter_json]
815 [formatter_json]
817 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
816 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
818 class = rhodecode.lib._vendor.jsonlogger.JsonFormatter
817 class = rhodecode.lib._vendor.jsonlogger.JsonFormatter
@@ -1,222 +1,222 b''
1 # Copyright (C) 2010-2023 RhodeCode GmbH
1 # Copyright (C) 2010-2023 RhodeCode GmbH
2 #
2 #
3 # This program is free software: you can redistribute it and/or modify
3 # This program is free software: you can redistribute it and/or modify
4 # it under the terms of the GNU Affero General Public License, version 3
4 # it under the terms of the GNU Affero General Public License, version 3
5 # (only), as published by the Free Software Foundation.
5 # (only), as published by the Free Software Foundation.
6 #
6 #
7 # This program is distributed in the hope that it will be useful,
7 # This program is distributed in the hope that it will be useful,
8 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # but WITHOUT ANY WARRANTY; without even the implied warranty of
9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 # GNU General Public License for more details.
10 # GNU General Public License for more details.
11 #
11 #
12 # You should have received a copy of the GNU Affero General Public License
12 # You should have received a copy of the GNU Affero General Public License
13 # along with this program. If not, see <http://www.gnu.org/licenses/>.
13 # along with this program. If not, see <http://www.gnu.org/licenses/>.
14 #
14 #
15 # This program is dual-licensed. If you wish to learn more about the
15 # This program is dual-licensed. If you wish to learn more about the
16 # RhodeCode Enterprise Edition, including its added features, Support services,
16 # RhodeCode Enterprise Edition, including its added features, Support services,
17 # and proprietary license terms, please see https://rhodecode.com/licenses/
17 # and proprietary license terms, please see https://rhodecode.com/licenses/
18
18
19 import os
19 import os
20 import tempfile
20 import tempfile
21 import logging
21 import logging
22
22
23 from pyramid.settings import asbool
23 from pyramid.settings import asbool
24
24
25 from rhodecode.config.settings_maker import SettingsMaker
25 from rhodecode.config.settings_maker import SettingsMaker
26 from rhodecode.config import utils as config_utils
26 from rhodecode.config import utils as config_utils
27
27
28 log = logging.getLogger(__name__)
28 log = logging.getLogger(__name__)
29
29
30
30
31 def sanitize_settings_and_apply_defaults(global_config, settings):
31 def sanitize_settings_and_apply_defaults(global_config, settings):
32 """
32 """
33 Applies settings defaults and does all type conversion.
33 Applies settings defaults and does all type conversion.
34
34
35 We would move all settings parsing and preparation into this place, so that
35 We would move all settings parsing and preparation into this place, so that
36 we have only one place left which deals with this part. The remaining parts
36 we have only one place left which deals with this part. The remaining parts
37 of the application would start to rely fully on well-prepared settings.
37 of the application would start to rely fully on well-prepared settings.
38
38
39 This piece would later be split up per topic to avoid a big fat monster
39 This piece would later be split up per topic to avoid a big fat monster
40 function.
40 function.
41 """
41 """
42 jn = os.path.join
42 jn = os.path.join
43
43
44 global_settings_maker = SettingsMaker(global_config)
44 global_settings_maker = SettingsMaker(global_config)
45 global_settings_maker.make_setting('debug', default=False, parser='bool')
45 global_settings_maker.make_setting('debug', default=False, parser='bool')
46 debug_enabled = asbool(global_config.get('debug'))
46 debug_enabled = asbool(global_config.get('debug'))
47
47
48 settings_maker = SettingsMaker(settings)
48 settings_maker = SettingsMaker(settings)
49
49
50 settings_maker.make_setting(
50 settings_maker.make_setting(
51 'logging.autoconfigure',
51 'logging.autoconfigure',
52 default=False,
52 default=False,
53 parser='bool')
53 parser='bool')
54
54
55 logging_conf = jn(os.path.dirname(global_config.get('__file__')), 'logging.ini')
55 logging_conf = jn(os.path.dirname(global_config.get('__file__')), 'logging.ini')
56 settings_maker.enable_logging(logging_conf, level='INFO' if debug_enabled else 'DEBUG')
56 settings_maker.enable_logging(logging_conf, level='INFO' if debug_enabled else 'DEBUG')
57
57
58 # Default includes, possible to change as a user
58 # Default includes, possible to change as a user
59 pyramid_includes = settings_maker.make_setting('pyramid.includes', [], parser='list:newline')
59 pyramid_includes = settings_maker.make_setting('pyramid.includes', [], parser='list:newline')
60 log.debug(
60 log.debug(
61 "Using the following pyramid.includes: %s",
61 "Using the following pyramid.includes: %s",
62 pyramid_includes)
62 pyramid_includes)
63
63
64 settings_maker.make_setting('rhodecode.edition', 'Community Edition')
64 settings_maker.make_setting('rhodecode.edition', 'Community Edition')
65 settings_maker.make_setting('rhodecode.edition_id', 'CE')
65 settings_maker.make_setting('rhodecode.edition_id', 'CE')
66
66
67 if 'mako.default_filters' not in settings:
67 if 'mako.default_filters' not in settings:
68 # set custom default filters if we don't have it defined
68 # set custom default filters if we don't have it defined
69 settings['mako.imports'] = 'from rhodecode.lib.base import h_filter'
69 settings['mako.imports'] = 'from rhodecode.lib.base import h_filter'
70 settings['mako.default_filters'] = 'h_filter'
70 settings['mako.default_filters'] = 'h_filter'
71
71
72 if 'mako.directories' not in settings:
72 if 'mako.directories' not in settings:
73 mako_directories = settings.setdefault('mako.directories', [
73 mako_directories = settings.setdefault('mako.directories', [
74 # Base templates of the original application
74 # Base templates of the original application
75 'rhodecode:templates',
75 'rhodecode:templates',
76 ])
76 ])
77 log.debug(
77 log.debug(
78 "Using the following Mako template directories: %s",
78 "Using the following Mako template directories: %s",
79 mako_directories)
79 mako_directories)
80
80
81 # NOTE(marcink): fix redis requirement for schema of connection since 3.X
81 # NOTE(marcink): fix redis requirement for schema of connection since 3.X
82 if 'beaker.session.type' in settings and settings['beaker.session.type'] == 'ext:redis':
82 if 'beaker.session.type' in settings and settings['beaker.session.type'] == 'ext:redis':
83 raw_url = settings['beaker.session.url']
83 raw_url = settings['beaker.session.url']
84 if not raw_url.startswith(('redis://', 'rediss://', 'unix://')):
84 if not raw_url.startswith(('redis://', 'rediss://', 'unix://')):
85 settings['beaker.session.url'] = 'redis://' + raw_url
85 settings['beaker.session.url'] = 'redis://' + raw_url
86
86
87 settings_maker.make_setting('__file__', global_config.get('__file__'))
87 settings_maker.make_setting('__file__', global_config.get('__file__'))
88
88
89 # TODO: johbo: Re-think this, usually the call to config.include
89 # TODO: johbo: Re-think this, usually the call to config.include
90 # should allow to pass in a prefix.
90 # should allow to pass in a prefix.
91 settings_maker.make_setting('rhodecode.api.url', '/_admin/api')
91 settings_maker.make_setting('rhodecode.api.url', '/_admin/api')
92
92
93 # Sanitize generic settings.
93 # Sanitize generic settings.
94 settings_maker.make_setting('default_encoding', 'UTF-8', parser='list')
94 settings_maker.make_setting('default_encoding', 'UTF-8', parser='list')
95 settings_maker.make_setting('gzip_responses', False, parser='bool')
95 settings_maker.make_setting('gzip_responses', False, parser='bool')
96 settings_maker.make_setting('startup.import_repos', 'false', parser='bool')
96 settings_maker.make_setting('startup.import_repos', 'false', parser='bool')
97
97
98 # statsd
98 # statsd
99 settings_maker.make_setting('statsd.enabled', False, parser='bool')
99 settings_maker.make_setting('statsd.enabled', False, parser='bool')
100 settings_maker.make_setting('statsd.statsd_host', 'statsd-exporter', parser='string')
100 settings_maker.make_setting('statsd.statsd_host', 'statsd-exporter', parser='string')
101 settings_maker.make_setting('statsd.statsd_port', 9125, parser='int')
101 settings_maker.make_setting('statsd.statsd_port', 9125, parser='int')
102 settings_maker.make_setting('statsd.statsd_prefix', '')
102 settings_maker.make_setting('statsd.statsd_prefix', '')
103 settings_maker.make_setting('statsd.statsd_ipv6', False, parser='bool')
103 settings_maker.make_setting('statsd.statsd_ipv6', False, parser='bool')
104
104
105 settings_maker.make_setting('vcs.svn.compatible_version', '')
105 settings_maker.make_setting('vcs.svn.compatible_version', '')
106 settings_maker.make_setting('vcs.svn.proxy.enabled', True, parser='bool')
106 settings_maker.make_setting('vcs.svn.proxy.enabled', True, parser='bool')
107 settings_maker.make_setting('vcs.svn.proxy.host', 'http://svn:8090', parser='string')
107 settings_maker.make_setting('vcs.svn.proxy.host', 'http://svn:8090', parser='string')
108 settings_maker.make_setting('vcs.hooks.protocol', 'http')
108 settings_maker.make_setting('vcs.hooks.protocol', 'http')
109 settings_maker.make_setting('vcs.hooks.host', '*')
109 settings_maker.make_setting('vcs.hooks.host', '*')
110 settings_maker.make_setting('vcs.scm_app_implementation', 'http')
110 settings_maker.make_setting('vcs.scm_app_implementation', 'http')
111 settings_maker.make_setting('vcs.server', '')
111 settings_maker.make_setting('vcs.server', '')
112 settings_maker.make_setting('vcs.server.protocol', 'http')
112 settings_maker.make_setting('vcs.server.protocol', 'http')
113 settings_maker.make_setting('vcs.server.enable', 'true', parser='bool')
113 settings_maker.make_setting('vcs.server.enable', 'true', parser='bool')
114 settings_maker.make_setting('vcs.hooks.direct_calls', 'false', parser='bool')
114 settings_maker.make_setting('vcs.hooks.direct_calls', 'false', parser='bool')
115 settings_maker.make_setting('vcs.start_server', 'false', parser='bool')
115 settings_maker.make_setting('vcs.start_server', 'false', parser='bool')
116 settings_maker.make_setting('vcs.backends', 'hg, git, svn', parser='list')
116 settings_maker.make_setting('vcs.backends', 'hg, git, svn', parser='list')
117 settings_maker.make_setting('vcs.connection_timeout', 3600, parser='int')
117 settings_maker.make_setting('vcs.connection_timeout', 3600, parser='int')
118
118
119 settings_maker.make_setting('vcs.methods.cache', True, parser='bool')
119 settings_maker.make_setting('vcs.methods.cache', True, parser='bool')
120
120
121 # repo_store path
121 # repo_store path
122 settings_maker.make_setting('repo_store.path', '/var/opt/rhodecode_repo_store')
122 settings_maker.make_setting('repo_store.path', '/var/opt/rhodecode_repo_store')
123 # Support legacy values of vcs.scm_app_implementation. Legacy
123 # Support legacy values of vcs.scm_app_implementation. Legacy
124 # configurations may use 'rhodecode.lib.middleware.utils.scm_app_http', or
124 # configurations may use 'rhodecode.lib.middleware.utils.scm_app_http', or
125 # disabled since 4.13 'vcsserver.scm_app' which is now mapped to 'http'.
125 # disabled since 4.13 'vcsserver.scm_app' which is now mapped to 'http'.
126 scm_app_impl = settings['vcs.scm_app_implementation']
126 scm_app_impl = settings['vcs.scm_app_implementation']
127 if scm_app_impl in ['rhodecode.lib.middleware.utils.scm_app_http', 'vcsserver.scm_app']:
127 if scm_app_impl in ['rhodecode.lib.middleware.utils.scm_app_http', 'vcsserver.scm_app']:
128 settings['vcs.scm_app_implementation'] = 'http'
128 settings['vcs.scm_app_implementation'] = 'http'
129
129
130 settings_maker.make_setting('appenlight', False, parser='bool')
130 settings_maker.make_setting('appenlight', False, parser='bool')
131
131
132 temp_store = tempfile.gettempdir()
132 temp_store = tempfile.gettempdir()
133 tmp_cache_dir = jn(temp_store, 'rc_cache')
133 tmp_cache_dir = jn(temp_store, 'rc_cache')
134
134
135 # save default, cache dir, and use it for all backends later.
135 # save default, cache dir, and use it for all backends later.
136 default_cache_dir = settings_maker.make_setting(
136 default_cache_dir = settings_maker.make_setting(
137 'cache_dir',
137 'cache_dir',
138 default=tmp_cache_dir, default_when_empty=True,
138 default=tmp_cache_dir, default_when_empty=True,
139 parser='dir:ensured')
139 parser='dir:ensured')
140
140
141 # exception store cache
141 # exception store cache
142 settings_maker.make_setting(
142 settings_maker.make_setting(
143 'exception_tracker.store_path',
143 'exception_tracker.store_path',
144 default=jn(default_cache_dir, 'exc_store'), default_when_empty=True,
144 default=jn(default_cache_dir, 'exc_store'), default_when_empty=True,
145 parser='dir:ensured'
145 parser='dir:ensured'
146 )
146 )
147
147
148 settings_maker.make_setting(
148 settings_maker.make_setting(
149 'celerybeat-schedule.path',
149 'celerybeat-schedule.path',
150 default=jn(default_cache_dir, 'celerybeat_schedule', 'celerybeat-schedule.db'), default_when_empty=True,
150 default=jn(default_cache_dir, 'celerybeat_schedule', 'celerybeat-schedule.db'), default_when_empty=True,
151 parser='file:ensured'
151 parser='file:ensured'
152 )
152 )
153
153
154 settings_maker.make_setting('exception_tracker.send_email', False, parser='bool')
154 settings_maker.make_setting('exception_tracker.send_email', False, parser='bool')
155 settings_maker.make_setting('exception_tracker.email_prefix', '[RHODECODE ERROR]', default_when_empty=True)
155 settings_maker.make_setting('exception_tracker.email_prefix', '[RHODECODE ERROR]', default_when_empty=True)
156
156
157 # sessions, ensure file since no-value is memory
157 # sessions, ensure file since no-value is memory
158 settings_maker.make_setting('beaker.session.type', 'file')
158 settings_maker.make_setting('beaker.session.type', 'file')
159 settings_maker.make_setting('beaker.session.data_dir', jn(default_cache_dir, 'session_data'))
159 settings_maker.make_setting('beaker.session.data_dir', jn(default_cache_dir, 'session_data'))
160
160
161 # cache_general
161 # cache_general
162 settings_maker.make_setting('rc_cache.cache_general.backend', 'dogpile.cache.rc.file_namespace')
162 settings_maker.make_setting('rc_cache.cache_general.backend', 'dogpile.cache.rc.file_namespace')
163 settings_maker.make_setting('rc_cache.cache_general.expiration_time', 60 * 60 * 12, parser='int')
163 settings_maker.make_setting('rc_cache.cache_general.expiration_time', 60 * 60 * 12, parser='int')
164 settings_maker.make_setting('rc_cache.cache_general.arguments.filename', jn(default_cache_dir, 'rhodecode_cache_general.db'))
164 settings_maker.make_setting('rc_cache.cache_general.arguments.filename', jn(default_cache_dir, 'rhodecode_cache_general.db'))
165
165
166 # cache_perms
166 # cache_perms
167 settings_maker.make_setting('rc_cache.cache_perms.backend', 'dogpile.cache.rc.file_namespace')
167 settings_maker.make_setting('rc_cache.cache_perms.backend', 'dogpile.cache.rc.file_namespace')
168 settings_maker.make_setting('rc_cache.cache_perms.expiration_time', 60 * 60, parser='int')
168 settings_maker.make_setting('rc_cache.cache_perms.expiration_time', 60 * 60, parser='int')
169 settings_maker.make_setting('rc_cache.cache_perms.arguments.filename', jn(default_cache_dir, 'rhodecode_cache_perms_db'))
169 settings_maker.make_setting('rc_cache.cache_perms.arguments.filename', jn(default_cache_dir, 'rhodecode_cache_perms_db'))
170
170
171 # cache_repo
171 # cache_repo
172 settings_maker.make_setting('rc_cache.cache_repo.backend', 'dogpile.cache.rc.file_namespace')
172 settings_maker.make_setting('rc_cache.cache_repo.backend', 'dogpile.cache.rc.file_namespace')
173 settings_maker.make_setting('rc_cache.cache_repo.expiration_time', 60 * 60 * 24 * 30, parser='int')
173 settings_maker.make_setting('rc_cache.cache_repo.expiration_time', 60 * 60 * 24 * 30, parser='int')
174 settings_maker.make_setting('rc_cache.cache_repo.arguments.filename', jn(default_cache_dir, 'rhodecode_cache_repo_db'))
174 settings_maker.make_setting('rc_cache.cache_repo.arguments.filename', jn(default_cache_dir, 'rhodecode_cache_repo_db'))
175
175
176 # cache_license
176 # cache_license
177 settings_maker.make_setting('rc_cache.cache_license.backend', 'dogpile.cache.rc.file_namespace')
177 settings_maker.make_setting('rc_cache.cache_license.backend', 'dogpile.cache.rc.file_namespace')
178 settings_maker.make_setting('rc_cache.cache_license.expiration_time', 60 * 5, parser='int')
178 settings_maker.make_setting('rc_cache.cache_license.expiration_time', 60 * 5, parser='int')
179 settings_maker.make_setting('rc_cache.cache_license.arguments.filename', jn(default_cache_dir, 'rhodecode_cache_license_db'))
179 settings_maker.make_setting('rc_cache.cache_license.arguments.filename', jn(default_cache_dir, 'rhodecode_cache_license_db'))
180
180
181 # cache_repo_longterm memory, 96H
181 # cache_repo_longterm memory, 96H
182 settings_maker.make_setting('rc_cache.cache_repo_longterm.backend', 'dogpile.cache.rc.memory_lru')
182 settings_maker.make_setting('rc_cache.cache_repo_longterm.backend', 'dogpile.cache.rc.memory_lru')
183 settings_maker.make_setting('rc_cache.cache_repo_longterm.expiration_time', 345600, parser='int')
183 settings_maker.make_setting('rc_cache.cache_repo_longterm.expiration_time', 345600, parser='int')
184 settings_maker.make_setting('rc_cache.cache_repo_longterm.max_size', 10000, parser='int')
184 settings_maker.make_setting('rc_cache.cache_repo_longterm.max_size', 10000, parser='int')
185
185
186 # sql_cache_short
186 # sql_cache_short
187 settings_maker.make_setting('rc_cache.sql_cache_short.backend', 'dogpile.cache.rc.memory_lru')
187 settings_maker.make_setting('rc_cache.sql_cache_short.backend', 'dogpile.cache.rc.memory_lru')
188 settings_maker.make_setting('rc_cache.sql_cache_short.expiration_time', 30, parser='int')
188 settings_maker.make_setting('rc_cache.sql_cache_short.expiration_time', 30, parser='int')
189 settings_maker.make_setting('rc_cache.sql_cache_short.max_size', 10000, parser='int')
189 settings_maker.make_setting('rc_cache.sql_cache_short.max_size', 10000, parser='int')
190
190
191 # archive_cache
191 # archive_cache
192 settings_maker.make_setting('archive_cache.locking.url', 'redis://redis:6379/1')
192 settings_maker.make_setting('archive_cache.locking.url', 'redis://redis:6379/1')
193 settings_maker.make_setting('archive_cache.backend.type', 'filesystem')
193 settings_maker.make_setting('archive_cache.backend.type', 'filesystem')
194
194
195 settings_maker.make_setting('archive_cache.filesystem.store_dir', jn(default_cache_dir, 'archive_cache'), default_when_empty=True,)
195 settings_maker.make_setting('archive_cache.filesystem.store_dir', jn(default_cache_dir, 'archive_cache'), default_when_empty=True,)
196 settings_maker.make_setting('archive_cache.filesystem.cache_shards', 8, parser='int')
196 settings_maker.make_setting('archive_cache.filesystem.cache_shards', 8, parser='int')
197 settings_maker.make_setting('archive_cache.filesystem.cache_size_gb', 10, parser='float')
197 settings_maker.make_setting('archive_cache.filesystem.cache_size_gb', 10, parser='float')
198 settings_maker.make_setting('archive_cache.filesystem.eviction_policy', 'least-recently-stored')
198 settings_maker.make_setting('archive_cache.filesystem.eviction_policy', 'least-recently-stored')
199
199
200 settings_maker.make_setting('archive_cache.filesystem.retry', False, parser='bool')
200 settings_maker.make_setting('archive_cache.filesystem.retry', False, parser='bool')
201 settings_maker.make_setting('archive_cache.filesystem.retry_backoff', 1, parser='int')
201 settings_maker.make_setting('archive_cache.filesystem.retry_backoff', 1, parser='int')
202 settings_maker.make_setting('archive_cache.filesystem.retry_attempts', 10, parser='int')
202 settings_maker.make_setting('archive_cache.filesystem.retry_attempts', 10, parser='int')
203
203
204 settings_maker.make_setting('archive_cache.objectstore.url', jn(default_cache_dir, 'archive_cache'), default_when_empty=True,)
204 settings_maker.make_setting('archive_cache.objectstore.url', jn(default_cache_dir, 'archive_cache'), default_when_empty=True,)
205 settings_maker.make_setting('archive_cache.objectstore.key', '')
205 settings_maker.make_setting('archive_cache.objectstore.key', '')
206 settings_maker.make_setting('archive_cache.objectstore.secret', '')
206 settings_maker.make_setting('archive_cache.objectstore.secret', '')
207 settings_maker.make_setting('archive_cache.objectstore.bucket_root', 'rhodecode-archive-cache')
207 settings_maker.make_setting('archive_cache.objectstore.bucket', 'rhodecode-archive-cache', default_when_empty=True,)
208 settings_maker.make_setting('archive_cache.objectstore.bucket_shards', 8, parser='int')
208 settings_maker.make_setting('archive_cache.objectstore.bucket_shards', 8, parser='int')
209
209
210 settings_maker.make_setting('archive_cache.objectstore.cache_size_gb', 10, parser='float')
210 settings_maker.make_setting('archive_cache.objectstore.cache_size_gb', 10, parser='float')
211 settings_maker.make_setting('archive_cache.objectstore.eviction_policy', 'least-recently-stored')
211 settings_maker.make_setting('archive_cache.objectstore.eviction_policy', 'least-recently-stored')
212
212
213 settings_maker.make_setting('archive_cache.objectstore.retry', False, parser='bool')
213 settings_maker.make_setting('archive_cache.objectstore.retry', False, parser='bool')
214 settings_maker.make_setting('archive_cache.objectstore.retry_backoff', 1, parser='int')
214 settings_maker.make_setting('archive_cache.objectstore.retry_backoff', 1, parser='int')
215 settings_maker.make_setting('archive_cache.objectstore.retry_attempts', 10, parser='int')
215 settings_maker.make_setting('archive_cache.objectstore.retry_attempts', 10, parser='int')
216
216
217 settings_maker.env_expand()
217 settings_maker.env_expand()
218
218
219 # configure instance id
219 # configure instance id
220 config_utils.set_instance_id(settings)
220 config_utils.set_instance_id(settings)
221
221
222 return settings
222 return settings
@@ -1,352 +1,355 b''
1 # Copyright (C) 2015-2024 RhodeCode GmbH
1 # Copyright (C) 2015-2024 RhodeCode GmbH
2 #
2 #
3 # This program is free software: you can redistribute it and/or modify
3 # This program is free software: you can redistribute it and/or modify
4 # it under the terms of the GNU Affero General Public License, version 3
4 # it under the terms of the GNU Affero General Public License, version 3
5 # (only), as published by the Free Software Foundation.
5 # (only), as published by the Free Software Foundation.
6 #
6 #
7 # This program is distributed in the hope that it will be useful,
7 # This program is distributed in the hope that it will be useful,
8 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # but WITHOUT ANY WARRANTY; without even the implied warranty of
9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 # GNU General Public License for more details.
10 # GNU General Public License for more details.
11 #
11 #
12 # You should have received a copy of the GNU Affero General Public License
12 # You should have received a copy of the GNU Affero General Public License
13 # along with this program. If not, see <http://www.gnu.org/licenses/>.
13 # along with this program. If not, see <http://www.gnu.org/licenses/>.
14 #
14 #
15 # This program is dual-licensed. If you wish to learn more about the
15 # This program is dual-licensed. If you wish to learn more about the
16 # RhodeCode Enterprise Edition, including its added features, Support services,
16 # RhodeCode Enterprise Edition, including its added features, Support services,
17 # and proprietary license terms, please see https://rhodecode.com/licenses/
17 # and proprietary license terms, please see https://rhodecode.com/licenses/
18
18
19 import os
19 import os
20 import functools
20 import functools
21 import logging
21 import logging
22 import typing
22 import typing
23 import time
23 import time
24 import zlib
24 import zlib
25
25
26 from ...ext_json import json
26 from ...ext_json import json
27 from ..utils import StatsDB, NOT_GIVEN, ShardFileReader, EVICTION_POLICY, format_size
27 from ..utils import StatsDB, NOT_GIVEN, ShardFileReader, EVICTION_POLICY, format_size
28 from ..lock import GenerationLock
28 from ..lock import GenerationLock
29
29
30 log = logging.getLogger(__name__)
30 log = logging.getLogger(__name__)
31
31
32
32
33 class BaseShard:
33 class BaseShard:
34 storage_type: str = ''
34 storage_type: str = ''
35 fs = None
35 fs = None
36
36
37 @classmethod
37 @classmethod
38 def hash(cls, key):
38 def hash(cls, key):
39 """Compute portable hash for `key`.
39 """Compute portable hash for `key`.
40
40
41 :param key: key to hash
41 :param key: key to hash
42 :return: hash value
42 :return: hash value
43
43
44 """
44 """
45 mask = 0xFFFFFFFF
45 mask = 0xFFFFFFFF
46 return zlib.adler32(key.encode('utf-8')) & mask # noqa
46 return zlib.adler32(key.encode('utf-8')) & mask # noqa
47
47
48 def _write_file(self, full_path, read_iterator, mode):
48 def _write_file(self, full_path, read_iterator, mode):
49 raise NotImplementedError
49 raise NotImplementedError
50
50
51 def _get_keyfile(self, key):
51 def _get_keyfile(self, key):
52 raise NotImplementedError
52 raise NotImplementedError
53
53
54 def random_filename(self):
54 def random_filename(self):
55 raise NotImplementedError
55 raise NotImplementedError
56
56
57 def _store(self, key, value_reader, metadata, mode):
57 def _store(self, key, value_reader, metadata, mode):
58 (filename, # hash-name
58 (filename, # hash-name
59 full_path # full-path/hash-name
59 full_path # full-path/hash-name
60 ) = self.random_filename()
60 ) = self.random_filename()
61
61
62 key_file, key_file_path = self._get_keyfile(key)
62 key_file, key_file_path = self._get_keyfile(key)
63
63
64 # STORE METADATA
64 # STORE METADATA
65 _metadata = {
65 _metadata = {
66 "version": "v1",
66 "version": "v1",
67
67
68 "key_file": key_file, # this is the .key.json file storing meta
68 "key_file": key_file, # this is the .key.json file storing meta
69 "key_file_path": key_file_path, # full path to key_file
69 "key_file_path": key_file_path, # full path to key_file
70 "archive_key": key, # original name we stored archive under, e.g my-archive.zip
70 "archive_key": key, # original name we stored archive under, e.g my-archive.zip
71 "archive_filename": filename, # the actual filename we stored that file under
71 "archive_filename": filename, # the actual filename we stored that file under
72 "archive_full_path": full_path,
72 "archive_full_path": full_path,
73
73
74 "store_time": time.time(),
74 "store_time": time.time(),
75 "access_count": 0,
75 "access_count": 0,
76 "access_time": 0,
76 "access_time": 0,
77
77
78 "size": 0
78 "size": 0
79 }
79 }
80 if metadata:
80 if metadata:
81 _metadata.update(metadata)
81 _metadata.update(metadata)
82
82
83 read_iterator = iter(functools.partial(value_reader.read, 2**22), b'')
83 read_iterator = iter(functools.partial(value_reader.read, 2**22), b'')
84 size, sha256 = self._write_file(full_path, read_iterator, mode)
84 size, sha256 = self._write_file(full_path, read_iterator, mode)
85 _metadata['size'] = size
85 _metadata['size'] = size
86 _metadata['sha256'] = sha256
86 _metadata['sha256'] = sha256
87
87
88 # after archive is finished, we create a key to save the presence of the binary file
88 # after archive is finished, we create a key to save the presence of the binary file
89 with self.fs.open(key_file_path, 'wb') as f:
89 with self.fs.open(key_file_path, 'wb') as f:
90 f.write(json.dumps(_metadata))
90 f.write(json.dumps(_metadata))
91
91
92 return key, filename, size, _metadata
92 return key, filename, size, _metadata
93
93
94 def _fetch(self, key, retry, retry_attempts, retry_backoff):
94 def _fetch(self, key, retry, retry_attempts, retry_backoff):
95 if retry is NOT_GIVEN:
95 if retry is NOT_GIVEN:
96 retry = False
96 retry = False
97 if retry_attempts is NOT_GIVEN:
97 if retry_attempts is NOT_GIVEN:
98 retry_attempts = 0
98 retry_attempts = 0
99
99
100 if retry and retry_attempts > 0:
100 if retry and retry_attempts > 0:
101 for attempt in range(1, retry_attempts + 1):
101 for attempt in range(1, retry_attempts + 1):
102 if key in self:
102 if key in self:
103 break
103 break
104 # we didn't find the key, wait retry_backoff N seconds, and re-check
104 # we didn't find the key, wait retry_backoff N seconds, and re-check
105 time.sleep(retry_backoff)
105 time.sleep(retry_backoff)
106
106
107 if key not in self:
107 if key not in self:
108 log.exception(f'requested key={key} not found in {self} retry={retry}, attempts={retry_attempts}')
108 log.exception(f'requested key={key} not found in {self} retry={retry}, attempts={retry_attempts}')
109 raise KeyError(key)
109 raise KeyError(key)
110
110
111 key_file, key_file_path = self._get_keyfile(key)
111 key_file, key_file_path = self._get_keyfile(key)
112 with self.fs.open(key_file_path, 'rb') as f:
112 with self.fs.open(key_file_path, 'rb') as f:
113 metadata = json.loads(f.read())
113 metadata = json.loads(f.read())
114
114
115 archive_path = metadata['archive_full_path']
115 archive_path = metadata['archive_full_path']
116
116
117 try:
117 try:
118 return ShardFileReader(self.fs.open(archive_path, 'rb')), metadata
118 return ShardFileReader(self.fs.open(archive_path, 'rb')), metadata
119 finally:
119 finally:
120 # update usage stats, count and accessed
120 # update usage stats, count and accessed
121 metadata["access_count"] = metadata.get("access_count", 0) + 1
121 metadata["access_count"] = metadata.get("access_count", 0) + 1
122 metadata["access_time"] = time.time()
122 metadata["access_time"] = time.time()
123 log.debug('Updated %s with access snapshot, access_count=%s access_time=%s',
123 log.debug('Updated %s with access snapshot, access_count=%s access_time=%s',
124 key_file, metadata['access_count'], metadata['access_time'])
124 key_file, metadata['access_count'], metadata['access_time'])
125 with self.fs.open(key_file_path, 'wb') as f:
125 with self.fs.open(key_file_path, 'wb') as f:
126 f.write(json.dumps(metadata))
126 f.write(json.dumps(metadata))
127
127
128 def _remove(self, key):
128 def _remove(self, key):
129 if key not in self:
129 if key not in self:
130 log.exception(f'requested key={key} not found in {self}')
130 log.exception(f'requested key={key} not found in {self}')
131 raise KeyError(key)
131 raise KeyError(key)
132
132
133 key_file, key_file_path = self._get_keyfile(key)
133 key_file, key_file_path = self._get_keyfile(key)
134 with self.fs.open(key_file_path, 'rb') as f:
134 with self.fs.open(key_file_path, 'rb') as f:
135 metadata = json.loads(f.read())
135 metadata = json.loads(f.read())
136
136
137 archive_path = metadata['archive_full_path']
137 archive_path = metadata['archive_full_path']
138 self.fs.rm(archive_path)
138 self.fs.rm(archive_path)
139 self.fs.rm(key_file_path)
139 self.fs.rm(key_file_path)
140 return 1
140 return 1
141
141
142 @property
142 @property
143 def storage_medium(self):
143 def storage_medium(self):
144 return getattr(self, self.storage_type)
144 return getattr(self, self.storage_type)
145
145
146 @property
146 @property
147 def key_suffix(self):
147 def key_suffix(self):
148 return 'key.json'
148 return 'key.json'
149
149
150 def __contains__(self, key):
150 def __contains__(self, key):
151 """Return `True` if `key` matching item is found in cache.
151 """Return `True` if `key` matching item is found in cache.
152
152
153 :param key: key matching item
153 :param key: key matching item
154 :return: True if key matching item
154 :return: True if key matching item
155
155
156 """
156 """
157 key_file, key_file_path = self._get_keyfile(key)
157 key_file, key_file_path = self._get_keyfile(key)
158 return self.fs.exists(key_file_path)
158 return self.fs.exists(key_file_path)
159
159
160
160
161 class BaseCache:
161 class BaseCache:
162 _locking_url: str = ''
162 _locking_url: str = ''
163 _storage_path: str = ''
163 _storage_path: str = ''
164 _config = {}
164 _config = {}
165 retry = False
165 retry = False
166 retry_attempts = 0
166 retry_attempts = 0
167 retry_backoff = 1
167 retry_backoff = 1
168 _shards = tuple()
168 _shards = tuple()
169 shard_cls = BaseShard
169
170
170 def __contains__(self, key):
171 def __contains__(self, key):
171 """Return `True` if `key` matching item is found in cache.
172 """Return `True` if `key` matching item is found in cache.
172
173
173 :param key: key matching item
174 :param key: key matching item
174 :return: True if key matching item
175 :return: True if key matching item
175
176
176 """
177 """
177 return self.has_key(key)
178 return self.has_key(key)
178
179
179 def __repr__(self):
180 def __repr__(self):
180 return f'<{self.__class__.__name__}(storage={self._storage_path})>'
181 return f'<{self.__class__.__name__}(storage={self._storage_path})>'
181
182
182 @classmethod
183 @classmethod
183 def gb_to_bytes(cls, gb):
184 def gb_to_bytes(cls, gb):
184 return gb * (1024 ** 3)
185 return gb * (1024 ** 3)
185
186
186 @property
187 @property
187 def storage_path(self):
188 def storage_path(self):
188 return self._storage_path
189 return self._storage_path
189
190
190 @classmethod
191 @classmethod
191 def get_stats_db(cls):
192 def get_stats_db(cls):
192 return StatsDB()
193 return StatsDB()
193
194
194 def get_conf(self, key, pop=False):
195 def get_conf(self, key, pop=False):
195 if key not in self._config:
196 if key not in self._config:
196 raise ValueError(f"No configuration key '{key}', please make sure it exists in archive_cache config")
197 raise ValueError(f"No configuration key '{key}', please make sure it exists in archive_cache config")
197 val = self._config[key]
198 val = self._config[key]
198 if pop:
199 if pop:
199 del self._config[key]
200 del self._config[key]
200 return val
201 return val
201
202
202 def _get_shard(self, key):
203 def _get_shard(self, key) -> shard_cls:
203 raise NotImplementedError
204 index = self._hash(key) % self._shard_count
205 shard = self._shards[index]
206 return shard
204
207
205 def _get_size(self, shard, archive_path):
208 def _get_size(self, shard, archive_path):
206 raise NotImplementedError
209 raise NotImplementedError
207
210
208 def store(self, key, value_reader, metadata=None):
211 def store(self, key, value_reader, metadata=None):
209 shard = self._get_shard(key)
212 shard = self._get_shard(key)
210 return shard.store(key, value_reader, metadata)
213 return shard.store(key, value_reader, metadata)
211
214
212 def fetch(self, key, retry=NOT_GIVEN, retry_attempts=NOT_GIVEN) -> tuple[typing.BinaryIO, dict]:
215 def fetch(self, key, retry=NOT_GIVEN, retry_attempts=NOT_GIVEN) -> tuple[typing.BinaryIO, dict]:
213 """
216 """
214 Return file handle corresponding to `key` from specific shard cache.
217 Return file handle corresponding to `key` from specific shard cache.
215 """
218 """
216 if retry is NOT_GIVEN:
219 if retry is NOT_GIVEN:
217 retry = self.retry
220 retry = self.retry
218 if retry_attempts is NOT_GIVEN:
221 if retry_attempts is NOT_GIVEN:
219 retry_attempts = self.retry_attempts
222 retry_attempts = self.retry_attempts
220 retry_backoff = self.retry_backoff
223 retry_backoff = self.retry_backoff
221
224
222 shard = self._get_shard(key)
225 shard = self._get_shard(key)
223 return shard.fetch(key, retry=retry, retry_attempts=retry_attempts, retry_backoff=retry_backoff)
226 return shard.fetch(key, retry=retry, retry_attempts=retry_attempts, retry_backoff=retry_backoff)
224
227
225 def remove(self, key):
228 def remove(self, key):
226 shard = self._get_shard(key)
229 shard = self._get_shard(key)
227 return shard.remove(key)
230 return shard.remove(key)
228
231
229 def has_key(self, archive_key):
232 def has_key(self, archive_key):
230 """Return `True` if `key` matching item is found in cache.
233 """Return `True` if `key` matching item is found in cache.
231
234
232 :param archive_key: key for item, this is a unique archive name we want to store data under. e.g my-archive-svn.zip
235 :param archive_key: key for item, this is a unique archive name we want to store data under. e.g my-archive-svn.zip
233 :return: True if key is found
236 :return: True if key is found
234
237
235 """
238 """
236 shard = self._get_shard(archive_key)
239 shard = self._get_shard(archive_key)
237 return archive_key in shard
240 return archive_key in shard
238
241
239 def iter_keys(self):
242 def iter_keys(self):
240 for shard in self._shards:
243 for shard in self._shards:
241 if shard.fs.exists(shard.storage_medium):
244 if shard.fs.exists(shard.storage_medium):
242 for path, _dirs, _files in shard.fs.walk(shard.storage_medium):
245 for path, _dirs, _files in shard.fs.walk(shard.storage_medium):
243 for key_file_path in _files:
246 for key_file_path in _files:
244 if key_file_path.endswith(shard.key_suffix):
247 if key_file_path.endswith(shard.key_suffix):
245 yield shard, key_file_path
248 yield shard, key_file_path
246
249
247 def get_lock(self, lock_key):
250 def get_lock(self, lock_key):
248 return GenerationLock(lock_key, self._locking_url)
251 return GenerationLock(lock_key, self._locking_url)
249
252
250 def evict(self, policy=None, size_limit=None) -> dict:
253 def evict(self, policy=None, size_limit=None) -> dict:
251 """
254 """
252 Remove old items based on the conditions
255 Remove old items based on the conditions
253
256
254
257
255 explanation of this algo:
258 explanation of this algo:
256 iterate over each shard, then for each shard iterate over the .key files
259 iterate over each shard, then for each shard iterate over the .key files
257 read the key files metadata stored. This gives us a full list of keys, cached_archived, their size and
260 read the key files metadata stored. This gives us a full list of keys, cached_archived, their size and
258 access data, time creation, and access counts.
261 access data, time creation, and access counts.
259
262
260 Store that into a memory DB in order we can run different sorting strategies easily.
263 Store that into a memory DB in order we can run different sorting strategies easily.
261 Summing the size is a sum sql query.
264 Summing the size is a sum sql query.
262
265
263 Then we run a sorting strategy based on eviction policy.
266 Then we run a sorting strategy based on eviction policy.
264 We iterate over sorted keys, and remove each checking if we hit the overall limit.
267 We iterate over sorted keys, and remove each checking if we hit the overall limit.
265 """
268 """
266 removal_info = {
269 removal_info = {
267 "removed_items": 0,
270 "removed_items": 0,
268 "removed_size": 0
271 "removed_size": 0
269 }
272 }
270 policy = policy or self._eviction_policy
273 policy = policy or self._eviction_policy
271 size_limit = size_limit or self._cache_size_limit
274 size_limit = size_limit or self._cache_size_limit
272
275
273 select_policy = EVICTION_POLICY[policy]['evict']
276 select_policy = EVICTION_POLICY[policy]['evict']
274
277
275 log.debug('Running eviction policy \'%s\', and checking for size limit: %s',
278 log.debug('Running eviction policy \'%s\', and checking for size limit: %s',
276 policy, format_size(size_limit))
279 policy, format_size(size_limit))
277
280
278 if select_policy is None:
281 if select_policy is None:
279 return removal_info
282 return removal_info
280
283
281 db = self.get_stats_db()
284 db = self.get_stats_db()
282
285
283 data = []
286 data = []
284 cnt = 1
287 cnt = 1
285
288
286 for shard, key_file in self.iter_keys():
289 for shard, key_file in self.iter_keys():
287 with shard.fs.open(os.path.join(shard.storage_medium, key_file), 'rb') as f:
290 with shard.fs.open(os.path.join(shard.storage_medium, key_file), 'rb') as f:
288 metadata = json.loads(f.read())
291 metadata = json.loads(f.read())
289
292
290 key_file_path = os.path.join(shard.storage_medium, key_file)
293 key_file_path = os.path.join(shard.storage_medium, key_file)
291
294
292 archive_key = metadata['archive_key']
295 archive_key = metadata['archive_key']
293 archive_path = metadata['archive_full_path']
296 archive_path = metadata['archive_full_path']
294
297
295 size = metadata.get('size')
298 size = metadata.get('size')
296 if not size:
299 if not size:
297 # in case we don't have size re-calc it...
300 # in case we don't have size re-calc it...
298 size = self._get_size(shard, archive_path)
301 size = self._get_size(shard, archive_path)
299
302
300 data.append([
303 data.append([
301 cnt,
304 cnt,
302 key_file,
305 key_file,
303 key_file_path,
306 key_file_path,
304 archive_key,
307 archive_key,
305 archive_path,
308 archive_path,
306 metadata.get('store_time', 0),
309 metadata.get('store_time', 0),
307 metadata.get('access_time', 0),
310 metadata.get('access_time', 0),
308 metadata.get('access_count', 0),
311 metadata.get('access_count', 0),
309 size,
312 size,
310 ])
313 ])
311 cnt += 1
314 cnt += 1
312
315
313 # Insert bulk data using executemany
316 # Insert bulk data using executemany
314 db.bulk_insert(data)
317 db.bulk_insert(data)
315
318
316 total_size = db.get_total_size()
319 total_size = db.get_total_size()
317 log.debug('Analyzed %s keys, occupying: %s, running eviction to match %s',
320 log.debug('Analyzed %s keys, occupying: %s, running eviction to match %s',
318 len(data), format_size(total_size), format_size(size_limit))
321 len(data), format_size(total_size), format_size(size_limit))
319
322
320 removed_items = 0
323 removed_items = 0
321 removed_size = 0
324 removed_size = 0
322 for key_file, archive_key, size in db.get_sorted_keys(select_policy):
325 for key_file, archive_key, size in db.get_sorted_keys(select_policy):
323 # simulate removal impact BEFORE removal
326 # simulate removal impact BEFORE removal
324 total_size -= size
327 total_size -= size
325
328
326 if total_size <= size_limit:
329 if total_size <= size_limit:
327 # we obtained what we wanted...
330 # we obtained what we wanted...
328 break
331 break
329
332
330 self.remove(archive_key)
333 self.remove(archive_key)
331 removed_items += 1
334 removed_items += 1
332 removed_size += size
335 removed_size += size
333 removal_info['removed_items'] = removed_items
336 removal_info['removed_items'] = removed_items
334 removal_info['removed_size'] = removed_size
337 removal_info['removed_size'] = removed_size
335 log.debug('Removed %s cache archives, and reduced size by: %s',
338 log.debug('Removed %s cache archives, and reduced size by: %s',
336 removed_items, format_size(removed_size))
339 removed_items, format_size(removed_size))
337 return removal_info
340 return removal_info
338
341
339 def get_statistics(self):
342 def get_statistics(self):
340 total_files = 0
343 total_files = 0
341 total_size = 0
344 total_size = 0
342 meta = {}
345 meta = {}
343
346
344 for shard, key_file in self.iter_keys():
347 for shard, key_file in self.iter_keys():
345 json_key = f"{shard.storage_medium}/{key_file}"
348 json_key = f"{shard.storage_medium}/{key_file}"
346 with shard.fs.open(json_key, 'rb') as f:
349 with shard.fs.open(json_key, 'rb') as f:
347 total_files += 1
350 total_files += 1
348 metadata = json.loads(f.read())
351 metadata = json.loads(f.read())
349 total_size += metadata['size']
352 total_size += metadata['size']
350
353
351 return total_files, total_size, meta
354 return total_files, total_size, meta
352
355
@@ -1,167 +1,174 b''
1 # Copyright (C) 2015-2024 RhodeCode GmbH
1 # Copyright (C) 2015-2024 RhodeCode GmbH
2 #
2 #
3 # This program is free software: you can redistribute it and/or modify
3 # This program is free software: you can redistribute it and/or modify
4 # it under the terms of the GNU Affero General Public License, version 3
4 # it under the terms of the GNU Affero General Public License, version 3
5 # (only), as published by the Free Software Foundation.
5 # (only), as published by the Free Software Foundation.
6 #
6 #
7 # This program is distributed in the hope that it will be useful,
7 # This program is distributed in the hope that it will be useful,
8 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # but WITHOUT ANY WARRANTY; without even the implied warranty of
9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 # GNU General Public License for more details.
10 # GNU General Public License for more details.
11 #
11 #
12 # You should have received a copy of the GNU Affero General Public License
12 # You should have received a copy of the GNU Affero General Public License
13 # along with this program. If not, see <http://www.gnu.org/licenses/>.
13 # along with this program. If not, see <http://www.gnu.org/licenses/>.
14 #
14 #
15 # This program is dual-licensed. If you wish to learn more about the
15 # This program is dual-licensed. If you wish to learn more about the
16 # RhodeCode Enterprise Edition, including its added features, Support services,
16 # RhodeCode Enterprise Edition, including its added features, Support services,
17 # and proprietary license terms, please see https://rhodecode.com/licenses/
17 # and proprietary license terms, please see https://rhodecode.com/licenses/
18
18
19 import codecs
19 import codecs
20 import hashlib
20 import hashlib
21 import logging
21 import logging
22 import os
22 import os
23
23
24 import fsspec
24 import fsspec
25
25
26 from .base import BaseCache, BaseShard
26 from .base import BaseCache, BaseShard
27 from ..utils import ShardFileReader, NOT_GIVEN
27 from ..utils import ShardFileReader, NOT_GIVEN
28 from ...type_utils import str2bool
28 from ...type_utils import str2bool
29
29
30 log = logging.getLogger(__name__)
30 log = logging.getLogger(__name__)
31
31
32
32
33 class FileSystemShard(BaseShard):
33 class FileSystemShard(BaseShard):
34
34
35 def __init__(self, index, directory, **settings):
35 def __init__(self, index, directory, directory_folder, fs, **settings):
36 self._index = index
36 self._index = index
37 self._directory = directory
37 self._directory = directory
38 self._directory_folder = directory_folder
38 self.storage_type = 'directory'
39 self.storage_type = 'directory'
39 self.fs = fsspec.filesystem('file')
40
41 self.fs = fs
40
42
41 @property
43 @property
42 def directory(self):
44 def directory(self):
43 """Cache directory."""
45 """Cache directory final path."""
44 return self._directory
46 return os.path.join(self._directory, self._directory_folder)
45
47
46 def _get_keyfile(self, archive_key) -> tuple[str, str]:
48 def _get_keyfile(self, archive_key) -> tuple[str, str]:
47 key_file = f'{archive_key}.{self.key_suffix}'
49 key_file = f'{archive_key}.{self.key_suffix}'
48 return key_file, os.path.join(self.directory, key_file)
50 return key_file, os.path.join(self.directory, key_file)
49
51
50 def _get_writer(self, path, mode):
52 def _get_writer(self, path, mode):
51 for count in range(1, 11):
53 for count in range(1, 11):
52 try:
54 try:
53 # Another cache may have deleted the directory before
55 # Another cache may have deleted the directory before
54 # the file could be opened.
56 # the file could be opened.
55 return self.fs.open(path, mode)
57 return self.fs.open(path, mode)
56 except OSError:
58 except OSError:
57 if count == 10:
59 if count == 10:
58 # Give up after 10 tries to open the file.
60 # Give up after 10 tries to open the file.
59 raise
61 raise
60 continue
62 continue
61
63
62 def _write_file(self, full_path, iterator, mode):
64 def _write_file(self, full_path, iterator, mode):
63 # ensure dir exists
65 # ensure dir exists
64 destination, _ = os.path.split(full_path)
66 destination, _ = os.path.split(full_path)
65 if not self.fs.exists(destination):
67 if not self.fs.exists(destination):
66 self.fs.makedirs(destination)
68 self.fs.makedirs(destination)
67
69
68 writer = self._get_writer(full_path, mode)
70 writer = self._get_writer(full_path, mode)
69
71
70 digest = hashlib.sha256()
72 digest = hashlib.sha256()
71 with writer:
73 with writer:
72 size = 0
74 size = 0
73 for chunk in iterator:
75 for chunk in iterator:
74 size += len(chunk)
76 size += len(chunk)
75 digest.update(chunk)
77 digest.update(chunk)
76 writer.write(chunk)
78 writer.write(chunk)
77 writer.flush()
79 writer.flush()
78 # Get the file descriptor
80 # Get the file descriptor
79 fd = writer.fileno()
81 fd = writer.fileno()
80
82
81 # Sync the file descriptor to disk, helps with NFS cases...
83 # Sync the file descriptor to disk, helps with NFS cases...
82 os.fsync(fd)
84 os.fsync(fd)
83 sha256 = digest.hexdigest()
85 sha256 = digest.hexdigest()
84 log.debug('written new archive cache under %s, sha256: %s', full_path, sha256)
86 log.debug('written new archive cache under %s, sha256: %s', full_path, sha256)
85 return size, sha256
87 return size, sha256
86
88
87 def store(self, key, value_reader, metadata: dict | None = None):
89 def store(self, key, value_reader, metadata: dict | None = None):
88 return self._store(key, value_reader, metadata, mode='xb')
90 return self._store(key, value_reader, metadata, mode='xb')
89
91
90 def fetch(self, key, retry=NOT_GIVEN, retry_attempts=NOT_GIVEN, retry_backoff=1) -> tuple[ShardFileReader, dict]:
92 def fetch(self, key, retry=NOT_GIVEN, retry_attempts=NOT_GIVEN, retry_backoff=1) -> tuple[ShardFileReader, dict]:
91 return self._fetch(key, retry, retry_attempts, retry_backoff)
93 return self._fetch(key, retry, retry_attempts, retry_backoff)
92
94
93 def remove(self, key):
95 def remove(self, key):
94 return self._remove(key)
96 return self._remove(key)
95
97
96 def random_filename(self):
98 def random_filename(self):
97 """Return filename and full-path tuple for file storage.
99 """Return filename and full-path tuple for file storage.
98
100
99 Filename will be a randomly generated 28 character hexadecimal string
101 Filename will be a randomly generated 28 character hexadecimal string
100 with ".archive_cache" suffixed. Two levels of sub-directories will be used to
102 with ".archive_cache" suffixed. Two levels of sub-directories will be used to
101 reduce the size of directories. On older filesystems, lookups in
103 reduce the size of directories. On older filesystems, lookups in
102 directories with many files may be slow.
104 directories with many files may be slow.
103 """
105 """
104
106
105 hex_name = codecs.encode(os.urandom(16), 'hex').decode('utf-8')
107 hex_name = codecs.encode(os.urandom(16), 'hex').decode('utf-8')
106
108
107 archive_name = hex_name[4:] + '.archive_cache'
109 archive_name = hex_name[4:] + '.archive_cache'
108 filename = f"{hex_name[:2]}/{hex_name[2:4]}/{archive_name}"
110 filename = f"{hex_name[:2]}/{hex_name[2:4]}/{archive_name}"
109
111
110 full_path = os.path.join(self.directory, filename)
112 full_path = os.path.join(self.directory, filename)
111 return archive_name, full_path
113 return archive_name, full_path
112
114
113 def __repr__(self):
115 def __repr__(self):
114 return f'{self.__class__.__name__}(index={self._index}, dir={self.directory})'
116 return f'{self.__class__.__name__}(index={self._index}, dir={self.directory})'
115
117
116
118
117 class FileSystemFanoutCache(BaseCache):
119 class FileSystemFanoutCache(BaseCache):
118 shard_name = 'shard_%03d'
120 shard_name = 'shard_%03d'
121 shard_cls = FileSystemShard
119
122
120 def __init__(self, locking_url, **settings):
123 def __init__(self, locking_url, **settings):
121 """
124 """
122 Initialize file system cache instance.
125 Initialize file system cache instance.
123
126
124 :param str locking_url: redis url for a lock
127 :param str locking_url: redis url for a lock
125 :param settings: settings dict
128 :param settings: settings dict
126
129
127 """
130 """
128 self._locking_url = locking_url
131 self._locking_url = locking_url
129 self._config = settings
132 self._config = settings
130 cache_dir = self.get_conf('archive_cache.filesystem.store_dir')
133 cache_dir = self.get_conf('archive_cache.filesystem.store_dir')
131 directory = str(cache_dir)
134 directory = str(cache_dir)
132 directory = os.path.expanduser(directory)
135 directory = os.path.expanduser(directory)
133 directory = os.path.expandvars(directory)
136 directory = os.path.expandvars(directory)
134 self._directory = directory
137 self._directory = directory
135 self._storage_path = directory
138 self._storage_path = directory # common path for all from BaseCache
136
139
137 # check if it's ok to write, and re-create the archive cache
140 self._shard_count = int(self.get_conf('archive_cache.filesystem.cache_shards', pop=True))
138 if not os.path.isdir(self._directory):
141 if self._shard_count < 1:
139 os.makedirs(self._directory, exist_ok=True)
142 raise ValueError('cache_shards must be 1 or more')
140
141 self._count = int(self.get_conf('archive_cache.filesystem.cache_shards', pop=True))
142
143
143 self._eviction_policy = self.get_conf('archive_cache.filesystem.eviction_policy', pop=True)
144 self._eviction_policy = self.get_conf('archive_cache.filesystem.eviction_policy', pop=True)
144 self._cache_size_limit = self.gb_to_bytes(int(self.get_conf('archive_cache.filesystem.cache_size_gb')))
145 self._cache_size_limit = self.gb_to_bytes(int(self.get_conf('archive_cache.filesystem.cache_size_gb')))
145
146
146 self.retry = str2bool(self.get_conf('archive_cache.filesystem.retry', pop=True))
147 self.retry = str2bool(self.get_conf('archive_cache.filesystem.retry', pop=True))
147 self.retry_attempts = int(self.get_conf('archive_cache.filesystem.retry_attempts', pop=True))
148 self.retry_attempts = int(self.get_conf('archive_cache.filesystem.retry_attempts', pop=True))
148 self.retry_backoff = int(self.get_conf('archive_cache.filesystem.retry_backoff', pop=True))
149 self.retry_backoff = int(self.get_conf('archive_cache.filesystem.retry_backoff', pop=True))
149
150
150 log.debug('Initializing archival cache instance under %s', self._directory)
151 log.debug('Initializing %s archival cache instance under %s', self)
152 fs = fsspec.filesystem('file')
153 # check if it's ok to write, and re-create the archive cache main dir
154 # A directory is the virtual equivalent of a physical file cabinet.
155 # In other words, it's a container for organizing digital data.
156 # Unlike a folder, which can only store files, a directory can store files,
157 # subdirectories, and other directories.
158 if not fs.exists(self._directory):
159 fs.makedirs(self._directory, exist_ok=True)
160
151 self._shards = tuple(
161 self._shards = tuple(
152 FileSystemShard(
162 self.shard_cls(
153 index=num,
163 index=num,
154 directory=os.path.join(directory, self.shard_name % num),
164 directory=directory,
165 directory_folder=self.shard_name % num,
166 fs=fs,
155 **settings,
167 **settings,
156 )
168 )
157 for num in range(self._count)
169 for num in range(self._shard_count)
158 )
170 )
159 self._hash = self._shards[0].hash
171 self._hash = self._shards[0].hash
160
172
161 def _get_shard(self, key) -> FileSystemShard:
162 index = self._hash(key) % self._count
163 shard = self._shards[index]
164 return shard
165
166 def _get_size(self, shard, archive_path):
173 def _get_size(self, shard, archive_path):
167 return os.stat(archive_path).st_size
174 return os.stat(archive_path).st_size
@@ -1,158 +1,164 b''
1 # Copyright (C) 2015-2024 RhodeCode GmbH
1 # Copyright (C) 2015-2024 RhodeCode GmbH
2 #
2 #
3 # This program is free software: you can redistribute it and/or modify
3 # This program is free software: you can redistribute it and/or modify
4 # it under the terms of the GNU Affero General Public License, version 3
4 # it under the terms of the GNU Affero General Public License, version 3
5 # (only), as published by the Free Software Foundation.
5 # (only), as published by the Free Software Foundation.
6 #
6 #
7 # This program is distributed in the hope that it will be useful,
7 # This program is distributed in the hope that it will be useful,
8 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # but WITHOUT ANY WARRANTY; without even the implied warranty of
9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 # GNU General Public License for more details.
10 # GNU General Public License for more details.
11 #
11 #
12 # You should have received a copy of the GNU Affero General Public License
12 # You should have received a copy of the GNU Affero General Public License
13 # along with this program. If not, see <http://www.gnu.org/licenses/>.
13 # along with this program. If not, see <http://www.gnu.org/licenses/>.
14 #
14 #
15 # This program is dual-licensed. If you wish to learn more about the
15 # This program is dual-licensed. If you wish to learn more about the
16 # RhodeCode Enterprise Edition, including its added features, Support services,
16 # RhodeCode Enterprise Edition, including its added features, Support services,
17 # and proprietary license terms, please see https://rhodecode.com/licenses/
17 # and proprietary license terms, please see https://rhodecode.com/licenses/
18
18
19 import codecs
19 import codecs
20 import hashlib
20 import hashlib
21 import logging
21 import logging
22 import os
22 import os
23
23
24 import fsspec
24 import fsspec
25
25
26 from .base import BaseCache, BaseShard
26 from .base import BaseCache, BaseShard
27 from ..utils import ShardFileReader, NOT_GIVEN
27 from ..utils import ShardFileReader, NOT_GIVEN
28 from ...type_utils import str2bool
28 from ...type_utils import str2bool
29
29
30 log = logging.getLogger(__name__)
30 log = logging.getLogger(__name__)
31
31
32
32
33 class S3Shard(BaseShard):
33 class S3Shard(BaseShard):
34
34
35 def __init__(self, index, bucket, **settings):
35 def __init__(self, index, bucket, bucket_folder, fs, **settings):
36 self._index = index
36 self._index = index
37 self._bucket = bucket
37 self._bucket_folder = bucket_folder
38 self.storage_type = 'bucket'
38 self.storage_type = 'bucket'
39 self._bucket_main = bucket
39
40
40 endpoint_url = settings.pop('archive_cache.objectstore.url')
41 self.fs = fs
41 key = settings.pop('archive_cache.objectstore.key')
42 secret = settings.pop('archive_cache.objectstore.secret')
43
44 # TODO: Add it all over the place...
45 self._bucket_root = settings.pop('archive_cache.objectstore.bucket_root')
46
47 self.fs = fsspec.filesystem('s3', anon=False, endpoint_url=endpoint_url, key=key, secret=secret)
48
42
49 @property
43 @property
50 def bucket(self):
44 def bucket(self):
51 """Cache bucket."""
45 """Cache bucket final path."""
52 return os.path.join(self._bucket_root, self._bucket)
46 return os.path.join(self._bucket_main, self._bucket_folder)
53
47
54 def _get_keyfile(self, archive_key) -> tuple[str, str]:
48 def _get_keyfile(self, archive_key) -> tuple[str, str]:
55 key_file = f'{archive_key}-{self.key_suffix}'
49 key_file = f'{archive_key}-{self.key_suffix}'
56 return key_file, os.path.join(self.bucket, key_file)
50 return key_file, os.path.join(self.bucket, key_file)
57
51
58 def _get_writer(self, path, mode):
52 def _get_writer(self, path, mode):
59 return self.fs.open(path, 'wb')
53 return self.fs.open(path, 'wb')
60
54
61 def _write_file(self, full_path, iterator, mode):
55 def _write_file(self, full_path, iterator, mode):
62 if self._bucket_root:
63 if not self.fs.exists(self._bucket_root):
64 self.fs.mkdir(self._bucket_root)
65
56
66 # ensure bucket exists
57 # ensure folder in bucket exists
67 destination = self.bucket
58 destination = self.bucket
68 if not self.fs.exists(destination):
59 if not self.fs.exists(destination):
69 self.fs.mkdir(destination, s3_additional_kwargs={})
60 self.fs.mkdir(destination, s3_additional_kwargs={})
70
61
71 writer = self._get_writer(full_path, mode)
62 writer = self._get_writer(full_path, mode)
72
63
73 digest = hashlib.sha256()
64 digest = hashlib.sha256()
74 with writer:
65 with writer:
75 size = 0
66 size = 0
76 for chunk in iterator:
67 for chunk in iterator:
77 size += len(chunk)
68 size += len(chunk)
78 digest.update(chunk)
69 digest.update(chunk)
79 writer.write(chunk)
70 writer.write(chunk)
80
71
81 sha256 = digest.hexdigest()
72 sha256 = digest.hexdigest()
82 log.debug('written new archive cache under %s, sha256: %s', full_path, sha256)
73 log.debug('written new archive cache under %s, sha256: %s', full_path, sha256)
83 return size, sha256
74 return size, sha256
84
75
85 def store(self, key, value_reader, metadata: dict | None = None):
76 def store(self, key, value_reader, metadata: dict | None = None):
86 return self._store(key, value_reader, metadata, mode='wb')
77 return self._store(key, value_reader, metadata, mode='wb')
87
78
88 def fetch(self, key, retry=NOT_GIVEN, retry_attempts=NOT_GIVEN, retry_backoff=1) -> tuple[ShardFileReader, dict]:
79 def fetch(self, key, retry=NOT_GIVEN, retry_attempts=NOT_GIVEN, retry_backoff=1) -> tuple[ShardFileReader, dict]:
89 return self._fetch(key, retry, retry_attempts, retry_backoff)
80 return self._fetch(key, retry, retry_attempts, retry_backoff)
90
81
91 def remove(self, key):
82 def remove(self, key):
92 return self._remove(key)
83 return self._remove(key)
93
84
94 def random_filename(self):
85 def random_filename(self):
95 """Return filename and full-path tuple for file storage.
86 """Return filename and full-path tuple for file storage.
96
87
97 Filename will be a randomly generated 28 character hexadecimal string
88 Filename will be a randomly generated 28 character hexadecimal string
98 with ".archive_cache" suffixed. Two levels of sub-directories will be used to
89 with ".archive_cache" suffixed. Two levels of sub-directories will be used to
99 reduce the size of directories. On older filesystems, lookups in
90 reduce the size of directories. On older filesystems, lookups in
100 directories with many files may be slow.
91 directories with many files may be slow.
101 """
92 """
102
93
103 hex_name = codecs.encode(os.urandom(16), 'hex').decode('utf-8')
94 hex_name = codecs.encode(os.urandom(16), 'hex').decode('utf-8')
104
95
105 archive_name = hex_name[4:] + '.archive_cache'
96 archive_name = hex_name[4:] + '.archive_cache'
106 filename = f"{hex_name[:2]}-{hex_name[2:4]}-{archive_name}"
97 filename = f"{hex_name[:2]}-{hex_name[2:4]}-{archive_name}"
107
98
108 full_path = os.path.join(self.bucket, filename)
99 full_path = os.path.join(self.bucket, filename)
109 return archive_name, full_path
100 return archive_name, full_path
110
101
111 def __repr__(self):
102 def __repr__(self):
112 return f'{self.__class__.__name__}(index={self._index}, bucket={self.bucket})'
103 return f'{self.__class__.__name__}(index={self._index}, bucket={self.bucket})'
113
104
114
105
115 class ObjectStoreCache(BaseCache):
106 class ObjectStoreCache(BaseCache):
116 shard_name = 'shard-bucket-%03d'
107 shard_name = 'shard-%03d'
108 shard_cls = S3Shard
117
109
118 def __init__(self, locking_url, **settings):
110 def __init__(self, locking_url, **settings):
119 """
111 """
120 Initialize objectstore cache instance.
112 Initialize objectstore cache instance.
121
113
122 :param str locking_url: redis url for a lock
114 :param str locking_url: redis url for a lock
123 :param settings: settings dict
115 :param settings: settings dict
124
116
125 """
117 """
126 self._locking_url = locking_url
118 self._locking_url = locking_url
127 self._config = settings
119 self._config = settings
128
120
129 objectstore_url = self.get_conf('archive_cache.objectstore.url')
121 objectstore_url = self.get_conf('archive_cache.objectstore.url')
130 self._storage_path = objectstore_url
122 self._storage_path = objectstore_url # common path for all from BaseCache
131
123
132 self._count = int(self.get_conf('archive_cache.objectstore.bucket_shards', pop=True))
124 self._shard_count = int(self.get_conf('archive_cache.objectstore.bucket_shards', pop=True))
125 if self._shard_count < 1:
126 raise ValueError('cache_shards must be 1 or more')
127
128 self._bucket = settings.pop('archive_cache.objectstore.bucket')
129 if not self._bucket:
130 raise ValueError('archive_cache.objectstore.bucket needs to have a value')
133
131
134 self._eviction_policy = self.get_conf('archive_cache.objectstore.eviction_policy', pop=True)
132 self._eviction_policy = self.get_conf('archive_cache.objectstore.eviction_policy', pop=True)
135 self._cache_size_limit = self.gb_to_bytes(int(self.get_conf('archive_cache.objectstore.cache_size_gb')))
133 self._cache_size_limit = self.gb_to_bytes(int(self.get_conf('archive_cache.objectstore.cache_size_gb')))
136
134
137 self.retry = str2bool(self.get_conf('archive_cache.objectstore.retry', pop=True))
135 self.retry = str2bool(self.get_conf('archive_cache.objectstore.retry', pop=True))
138 self.retry_attempts = int(self.get_conf('archive_cache.objectstore.retry_attempts', pop=True))
136 self.retry_attempts = int(self.get_conf('archive_cache.objectstore.retry_attempts', pop=True))
139 self.retry_backoff = int(self.get_conf('archive_cache.objectstore.retry_backoff', pop=True))
137 self.retry_backoff = int(self.get_conf('archive_cache.objectstore.retry_backoff', pop=True))
140
138
141 log.debug('Initializing archival cache instance under %s', objectstore_url)
139 endpoint_url = settings.pop('archive_cache.objectstore.url')
140 key = settings.pop('archive_cache.objectstore.key')
141 secret = settings.pop('archive_cache.objectstore.secret')
142
143 log.debug('Initializing %s archival cache instance under %s', self)
144
145 fs = fsspec.filesystem('s3', anon=False, endpoint_url=endpoint_url, key=key, secret=secret)
146
147 # init main bucket
148 if not fs.exists(self._bucket):
149 fs.mkdir(self._bucket)
150
142 self._shards = tuple(
151 self._shards = tuple(
143 S3Shard(
152 self.shard_cls(
144 index=num,
153 index=num,
145 bucket=self.shard_name % num,
154 bucket=self._bucket,
155 bucket_folder=self.shard_name % num,
156 fs=fs,
146 **settings,
157 **settings,
147 )
158 )
148 for num in range(self._count)
159 for num in range(self._shard_count)
149 )
160 )
150 self._hash = self._shards[0].hash
161 self._hash = self._shards[0].hash
151
162
152 def _get_shard(self, key) -> S3Shard:
153 index = self._hash(key) % self._count
154 shard = self._shards[index]
155 return shard
156
157 def _get_size(self, shard, archive_path):
163 def _get_size(self, shard, archive_path):
158 return shard.fs.info(archive_path)['size']
164 return shard.fs.info(archive_path)['size']
@@ -1,832 +1,831 b''
1
1
2 ; #########################################
2 ; #########################################
3 ; RHODECODE COMMUNITY EDITION CONFIGURATION
3 ; RHODECODE COMMUNITY EDITION CONFIGURATION
4 ; #########################################
4 ; #########################################
5
5
6 [DEFAULT]
6 [DEFAULT]
7 ; Debug flag sets all loggers to debug, and enables request tracking
7 ; Debug flag sets all loggers to debug, and enables request tracking
8 debug = true
8 debug = true
9
9
10 ; ########################################################################
10 ; ########################################################################
11 ; EMAIL CONFIGURATION
11 ; EMAIL CONFIGURATION
12 ; These settings will be used by the RhodeCode mailing system
12 ; These settings will be used by the RhodeCode mailing system
13 ; ########################################################################
13 ; ########################################################################
14
14
15 ; prefix all emails subjects with given prefix, helps filtering out emails
15 ; prefix all emails subjects with given prefix, helps filtering out emails
16 #email_prefix = [RhodeCode]
16 #email_prefix = [RhodeCode]
17
17
18 ; email FROM address all mails will be sent
18 ; email FROM address all mails will be sent
19 #app_email_from = rhodecode-noreply@localhost
19 #app_email_from = rhodecode-noreply@localhost
20
20
21 #smtp_server = mail.server.com
21 #smtp_server = mail.server.com
22 #smtp_username =
22 #smtp_username =
23 #smtp_password =
23 #smtp_password =
24 #smtp_port =
24 #smtp_port =
25 #smtp_use_tls = false
25 #smtp_use_tls = false
26 #smtp_use_ssl = true
26 #smtp_use_ssl = true
27
27
28 [server:main]
28 [server:main]
29 ; COMMON HOST/IP CONFIG, This applies mostly to develop setup,
29 ; COMMON HOST/IP CONFIG, This applies mostly to develop setup,
30 ; Host port for gunicorn are controlled by gunicorn_conf.py
30 ; Host port for gunicorn are controlled by gunicorn_conf.py
31 host = 127.0.0.1
31 host = 127.0.0.1
32 port = 10020
32 port = 10020
33
33
34
34
35 ; ###########################
35 ; ###########################
36 ; GUNICORN APPLICATION SERVER
36 ; GUNICORN APPLICATION SERVER
37 ; ###########################
37 ; ###########################
38
38
39 ; run with gunicorn --paste rhodecode.ini --config gunicorn_conf.py
39 ; run with gunicorn --paste rhodecode.ini --config gunicorn_conf.py
40
40
41 ; Module to use, this setting shouldn't be changed
41 ; Module to use, this setting shouldn't be changed
42 use = egg:gunicorn#main
42 use = egg:gunicorn#main
43
43
44 ; Prefix middleware for RhodeCode.
44 ; Prefix middleware for RhodeCode.
45 ; recommended when using proxy setup.
45 ; recommended when using proxy setup.
46 ; allows to set RhodeCode under a prefix in server.
46 ; allows to set RhodeCode under a prefix in server.
47 ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well.
47 ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well.
48 ; And set your prefix like: `prefix = /custom_prefix`
48 ; And set your prefix like: `prefix = /custom_prefix`
49 ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need
49 ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need
50 ; to make your cookies only work on prefix url
50 ; to make your cookies only work on prefix url
51 [filter:proxy-prefix]
51 [filter:proxy-prefix]
52 use = egg:PasteDeploy#prefix
52 use = egg:PasteDeploy#prefix
53 prefix = /
53 prefix = /
54
54
55 [app:main]
55 [app:main]
56 ; The %(here)s variable will be replaced with the absolute path of parent directory
56 ; The %(here)s variable will be replaced with the absolute path of parent directory
57 ; of this file
57 ; of this file
58 ; Each option in the app:main can be override by an environmental variable
58 ; Each option in the app:main can be override by an environmental variable
59 ;
59 ;
60 ;To override an option:
60 ;To override an option:
61 ;
61 ;
62 ;RC_<KeyName>
62 ;RC_<KeyName>
63 ;Everything should be uppercase, . and - should be replaced by _.
63 ;Everything should be uppercase, . and - should be replaced by _.
64 ;For example, if you have these configuration settings:
64 ;For example, if you have these configuration settings:
65 ;rc_cache.repo_object.backend = foo
65 ;rc_cache.repo_object.backend = foo
66 ;can be overridden by
66 ;can be overridden by
67 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
67 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
68
68
69 use = egg:rhodecode-enterprise-ce
69 use = egg:rhodecode-enterprise-ce
70
70
71 ; enable proxy prefix middleware, defined above
71 ; enable proxy prefix middleware, defined above
72 #filter-with = proxy-prefix
72 #filter-with = proxy-prefix
73
73
74 ; encryption key used to encrypt social plugin tokens,
74 ; encryption key used to encrypt social plugin tokens,
75 ; remote_urls with credentials etc, if not set it defaults to
75 ; remote_urls with credentials etc, if not set it defaults to
76 ; `beaker.session.secret`
76 ; `beaker.session.secret`
77 #rhodecode.encrypted_values.secret =
77 #rhodecode.encrypted_values.secret =
78
78
79 ; decryption strict mode (enabled by default). It controls if decryption raises
79 ; decryption strict mode (enabled by default). It controls if decryption raises
80 ; `SignatureVerificationError` in case of wrong key, or damaged encryption data.
80 ; `SignatureVerificationError` in case of wrong key, or damaged encryption data.
81 #rhodecode.encrypted_values.strict = false
81 #rhodecode.encrypted_values.strict = false
82
82
83 ; Pick algorithm for encryption. Either fernet (more secure) or aes (default)
83 ; Pick algorithm for encryption. Either fernet (more secure) or aes (default)
84 ; fernet is safer, and we strongly recommend switching to it.
84 ; fernet is safer, and we strongly recommend switching to it.
85 ; Due to backward compatibility aes is used as default.
85 ; Due to backward compatibility aes is used as default.
86 #rhodecode.encrypted_values.algorithm = fernet
86 #rhodecode.encrypted_values.algorithm = fernet
87
87
88 ; Return gzipped responses from RhodeCode (static files/application)
88 ; Return gzipped responses from RhodeCode (static files/application)
89 gzip_responses = false
89 gzip_responses = false
90
90
91 ; Auto-generate javascript routes file on startup
91 ; Auto-generate javascript routes file on startup
92 generate_js_files = false
92 generate_js_files = false
93
93
94 ; System global default language.
94 ; System global default language.
95 ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh
95 ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh
96 lang = en
96 lang = en
97
97
98 ; Perform a full repository scan and import on each server start.
98 ; Perform a full repository scan and import on each server start.
99 ; Settings this to true could lead to very long startup time.
99 ; Settings this to true could lead to very long startup time.
100 startup.import_repos = true
100 startup.import_repos = true
101
101
102 ; URL at which the application is running. This is used for Bootstrapping
102 ; URL at which the application is running. This is used for Bootstrapping
103 ; requests in context when no web request is available. Used in ishell, or
103 ; requests in context when no web request is available. Used in ishell, or
104 ; SSH calls. Set this for events to receive proper url for SSH calls.
104 ; SSH calls. Set this for events to receive proper url for SSH calls.
105 app.base_url = http://rhodecode.local
105 app.base_url = http://rhodecode.local
106
106
107 ; Host at which the Service API is running.
107 ; Host at which the Service API is running.
108 app.service_api.host = http://rhodecode.local:10020
108 app.service_api.host = http://rhodecode.local:10020
109
109
110 ; Secret for Service API authentication.
110 ; Secret for Service API authentication.
111 app.service_api.token =
111 app.service_api.token =
112
112
113 ; Unique application ID. Should be a random unique string for security.
113 ; Unique application ID. Should be a random unique string for security.
114 app_instance_uuid = rc-production
114 app_instance_uuid = rc-production
115
115
116 ; Cut off limit for large diffs (size in bytes). If overall diff size on
116 ; Cut off limit for large diffs (size in bytes). If overall diff size on
117 ; commit, or pull request exceeds this limit this diff will be displayed
117 ; commit, or pull request exceeds this limit this diff will be displayed
118 ; partially. E.g 512000 == 512Kb
118 ; partially. E.g 512000 == 512Kb
119 cut_off_limit_diff = 1024000
119 cut_off_limit_diff = 1024000
120
120
121 ; Cut off limit for large files inside diffs (size in bytes). Each individual
121 ; Cut off limit for large files inside diffs (size in bytes). Each individual
122 ; file inside diff which exceeds this limit will be displayed partially.
122 ; file inside diff which exceeds this limit will be displayed partially.
123 ; E.g 128000 == 128Kb
123 ; E.g 128000 == 128Kb
124 cut_off_limit_file = 256000
124 cut_off_limit_file = 256000
125
125
126 ; Use cached version of vcs repositories everywhere. Recommended to be `true`
126 ; Use cached version of vcs repositories everywhere. Recommended to be `true`
127 vcs_full_cache = false
127 vcs_full_cache = false
128
128
129 ; Force https in RhodeCode, fixes https redirects, assumes it's always https.
129 ; Force https in RhodeCode, fixes https redirects, assumes it's always https.
130 ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache
130 ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache
131 force_https = false
131 force_https = false
132
132
133 ; use Strict-Transport-Security headers
133 ; use Strict-Transport-Security headers
134 use_htsts = false
134 use_htsts = false
135
135
136 ; Set to true if your repos are exposed using the dumb protocol
136 ; Set to true if your repos are exposed using the dumb protocol
137 git_update_server_info = false
137 git_update_server_info = false
138
138
139 ; RSS/ATOM feed options
139 ; RSS/ATOM feed options
140 rss_cut_off_limit = 256000
140 rss_cut_off_limit = 256000
141 rss_items_per_page = 10
141 rss_items_per_page = 10
142 rss_include_diff = false
142 rss_include_diff = false
143
143
144 ; gist URL alias, used to create nicer urls for gist. This should be an
144 ; gist URL alias, used to create nicer urls for gist. This should be an
145 ; url that does rewrites to _admin/gists/{gistid}.
145 ; url that does rewrites to _admin/gists/{gistid}.
146 ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal
146 ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal
147 ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid}
147 ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid}
148 gist_alias_url =
148 gist_alias_url =
149
149
150 ; List of views (using glob pattern syntax) that AUTH TOKENS could be
150 ; List of views (using glob pattern syntax) that AUTH TOKENS could be
151 ; used for access.
151 ; used for access.
152 ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it
152 ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it
153 ; came from the the logged in user who own this authentication token.
153 ; came from the the logged in user who own this authentication token.
154 ; Additionally @TOKEN syntax can be used to bound the view to specific
154 ; Additionally @TOKEN syntax can be used to bound the view to specific
155 ; authentication token. Such view would be only accessible when used together
155 ; authentication token. Such view would be only accessible when used together
156 ; with this authentication token
156 ; with this authentication token
157 ; list of all views can be found under `/_admin/permissions/auth_token_access`
157 ; list of all views can be found under `/_admin/permissions/auth_token_access`
158 ; The list should be "," separated and on a single line.
158 ; The list should be "," separated and on a single line.
159 ; Most common views to enable:
159 ; Most common views to enable:
160
160
161 # RepoCommitsView:repo_commit_download
161 # RepoCommitsView:repo_commit_download
162 # RepoCommitsView:repo_commit_patch
162 # RepoCommitsView:repo_commit_patch
163 # RepoCommitsView:repo_commit_raw
163 # RepoCommitsView:repo_commit_raw
164 # RepoCommitsView:repo_commit_raw@TOKEN
164 # RepoCommitsView:repo_commit_raw@TOKEN
165 # RepoFilesView:repo_files_diff
165 # RepoFilesView:repo_files_diff
166 # RepoFilesView:repo_archivefile
166 # RepoFilesView:repo_archivefile
167 # RepoFilesView:repo_file_raw
167 # RepoFilesView:repo_file_raw
168 # GistView:*
168 # GistView:*
169 api_access_controllers_whitelist =
169 api_access_controllers_whitelist =
170
170
171 ; Default encoding used to convert from and to unicode
171 ; Default encoding used to convert from and to unicode
172 ; can be also a comma separated list of encoding in case of mixed encodings
172 ; can be also a comma separated list of encoding in case of mixed encodings
173 default_encoding = UTF-8
173 default_encoding = UTF-8
174
174
175 ; instance-id prefix
175 ; instance-id prefix
176 ; a prefix key for this instance used for cache invalidation when running
176 ; a prefix key for this instance used for cache invalidation when running
177 ; multiple instances of RhodeCode, make sure it's globally unique for
177 ; multiple instances of RhodeCode, make sure it's globally unique for
178 ; all running RhodeCode instances. Leave empty if you don't use it
178 ; all running RhodeCode instances. Leave empty if you don't use it
179 instance_id =
179 instance_id =
180
180
181 ; Fallback authentication plugin. Set this to a plugin ID to force the usage
181 ; Fallback authentication plugin. Set this to a plugin ID to force the usage
182 ; of an authentication plugin also if it is disabled by it's settings.
182 ; of an authentication plugin also if it is disabled by it's settings.
183 ; This could be useful if you are unable to log in to the system due to broken
183 ; This could be useful if you are unable to log in to the system due to broken
184 ; authentication settings. Then you can enable e.g. the internal RhodeCode auth
184 ; authentication settings. Then you can enable e.g. the internal RhodeCode auth
185 ; module to log in again and fix the settings.
185 ; module to log in again and fix the settings.
186 ; Available builtin plugin IDs (hash is part of the ID):
186 ; Available builtin plugin IDs (hash is part of the ID):
187 ; egg:rhodecode-enterprise-ce#rhodecode
187 ; egg:rhodecode-enterprise-ce#rhodecode
188 ; egg:rhodecode-enterprise-ce#pam
188 ; egg:rhodecode-enterprise-ce#pam
189 ; egg:rhodecode-enterprise-ce#ldap
189 ; egg:rhodecode-enterprise-ce#ldap
190 ; egg:rhodecode-enterprise-ce#jasig_cas
190 ; egg:rhodecode-enterprise-ce#jasig_cas
191 ; egg:rhodecode-enterprise-ce#headers
191 ; egg:rhodecode-enterprise-ce#headers
192 ; egg:rhodecode-enterprise-ce#crowd
192 ; egg:rhodecode-enterprise-ce#crowd
193
193
194 #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode
194 #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode
195
195
196 ; Flag to control loading of legacy plugins in py:/path format
196 ; Flag to control loading of legacy plugins in py:/path format
197 auth_plugin.import_legacy_plugins = true
197 auth_plugin.import_legacy_plugins = true
198
198
199 ; alternative return HTTP header for failed authentication. Default HTTP
199 ; alternative return HTTP header for failed authentication. Default HTTP
200 ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with
200 ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with
201 ; handling that causing a series of failed authentication calls.
201 ; handling that causing a series of failed authentication calls.
202 ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code
202 ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code
203 ; This will be served instead of default 401 on bad authentication
203 ; This will be served instead of default 401 on bad authentication
204 auth_ret_code =
204 auth_ret_code =
205
205
206 ; use special detection method when serving auth_ret_code, instead of serving
206 ; use special detection method when serving auth_ret_code, instead of serving
207 ; ret_code directly, use 401 initially (Which triggers credentials prompt)
207 ; ret_code directly, use 401 initially (Which triggers credentials prompt)
208 ; and then serve auth_ret_code to clients
208 ; and then serve auth_ret_code to clients
209 auth_ret_code_detection = false
209 auth_ret_code_detection = false
210
210
211 ; locking return code. When repository is locked return this HTTP code. 2XX
211 ; locking return code. When repository is locked return this HTTP code. 2XX
212 ; codes don't break the transactions while 4XX codes do
212 ; codes don't break the transactions while 4XX codes do
213 lock_ret_code = 423
213 lock_ret_code = 423
214
214
215 ; Filesystem location were repositories should be stored
215 ; Filesystem location were repositories should be stored
216 repo_store.path = /var/opt/rhodecode_repo_store
216 repo_store.path = /var/opt/rhodecode_repo_store
217
217
218 ; allows to setup custom hooks in settings page
218 ; allows to setup custom hooks in settings page
219 allow_custom_hooks_settings = true
219 allow_custom_hooks_settings = true
220
220
221 ; Generated license token required for EE edition license.
221 ; Generated license token required for EE edition license.
222 ; New generated token value can be found in Admin > settings > license page.
222 ; New generated token value can be found in Admin > settings > license page.
223 license_token = abra-cada-bra1-rce3
223 license_token = abra-cada-bra1-rce3
224
224
225 ; This flag hides sensitive information on the license page such as token, and license data
225 ; This flag hides sensitive information on the license page such as token, and license data
226 license.hide_license_info = false
226 license.hide_license_info = false
227
227
228 ; supervisor connection uri, for managing supervisor and logs.
228 ; supervisor connection uri, for managing supervisor and logs.
229 supervisor.uri =
229 supervisor.uri =
230
230
231 ; supervisord group name/id we only want this RC instance to handle
231 ; supervisord group name/id we only want this RC instance to handle
232 supervisor.group_id = dev
232 supervisor.group_id = dev
233
233
234 ; Display extended labs settings
234 ; Display extended labs settings
235 labs_settings_active = true
235 labs_settings_active = true
236
236
237 ; Custom exception store path, defaults to TMPDIR
237 ; Custom exception store path, defaults to TMPDIR
238 ; This is used to store exception from RhodeCode in shared directory
238 ; This is used to store exception from RhodeCode in shared directory
239 #exception_tracker.store_path =
239 #exception_tracker.store_path =
240
240
241 ; Send email with exception details when it happens
241 ; Send email with exception details when it happens
242 #exception_tracker.send_email = false
242 #exception_tracker.send_email = false
243
243
244 ; Comma separated list of recipients for exception emails,
244 ; Comma separated list of recipients for exception emails,
245 ; e.g admin@rhodecode.com,devops@rhodecode.com
245 ; e.g admin@rhodecode.com,devops@rhodecode.com
246 ; Can be left empty, then emails will be sent to ALL super-admins
246 ; Can be left empty, then emails will be sent to ALL super-admins
247 #exception_tracker.send_email_recipients =
247 #exception_tracker.send_email_recipients =
248
248
249 ; optional prefix to Add to email Subject
249 ; optional prefix to Add to email Subject
250 #exception_tracker.email_prefix = [RHODECODE ERROR]
250 #exception_tracker.email_prefix = [RHODECODE ERROR]
251
251
252 ; File store configuration. This is used to store and serve uploaded files
252 ; File store configuration. This is used to store and serve uploaded files
253 file_store.enabled = true
253 file_store.enabled = true
254
254
255 ; Storage backend, available options are: local
255 ; Storage backend, available options are: local
256 file_store.backend = local
256 file_store.backend = local
257
257
258 ; path to store the uploaded binaries and artifacts
258 ; path to store the uploaded binaries and artifacts
259 file_store.storage_path = /var/opt/rhodecode_data/file_store
259 file_store.storage_path = /var/opt/rhodecode_data/file_store
260
260
261
261
262 ; Redis url to acquire/check generation of archives locks
262 ; Redis url to acquire/check generation of archives locks
263 archive_cache.locking.url = redis://redis:6379/1
263 archive_cache.locking.url = redis://redis:6379/1
264
264
265 ; Storage backend, only 'filesystem' and 'objectstore' are available now
265 ; Storage backend, only 'filesystem' and 'objectstore' are available now
266 archive_cache.backend.type = filesystem
266 archive_cache.backend.type = filesystem
267
267
268 ; url for s3 compatible storage that allows to upload artifacts
268 ; url for s3 compatible storage that allows to upload artifacts
269 ; e.g http://minio:9000
269 ; e.g http://minio:9000
270 archive_cache.objectstore.url = http://s3-minio:9000
270 archive_cache.objectstore.url = http://s3-minio:9000
271
271
272 ; key for s3 auth
272 ; key for s3 auth
273 archive_cache.objectstore.key = key
273 archive_cache.objectstore.key = key
274
274
275 ; secret for s3 auth
275 ; secret for s3 auth
276 archive_cache.objectstore.secret = secret
276 archive_cache.objectstore.secret = secret
277
277
278 ; number of sharded buckets to create to distribute archives across
278 ; number of sharded buckets to create to distribute archives across
279 ; default is 8 shards
279 ; default is 8 shards
280 archive_cache.objectstore.bucket_shards = 8
280 archive_cache.objectstore.bucket_shards = 8
281
281
282 ; a top-level bucket to put all other sharded buckets in
282 ; a top-level bucket to put all other shards in
283 ; in case it's empty all buckets will be created in top-level (not recommended)
283 ; objects will be stored in rhodecode-archive-cache/shard-N based on the bucket_shards number
284 ; objects will be stored in rhodecode-archive-cache/shard-bucket-N based on the bucket_shards number
284 archive_cache.objectstore.bucket = rhodecode-archive-cache
285 archive_cache.objectstore.bucket_root = rhodecode-archive-cache
286
285
287 ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time
286 ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time
288 archive_cache.objectstore.retry = false
287 archive_cache.objectstore.retry = false
289
288
290 ; number of seconds to wait for next try using retry
289 ; number of seconds to wait for next try using retry
291 archive_cache.objectstore.retry_backoff = 1
290 archive_cache.objectstore.retry_backoff = 1
292
291
293 ; how many tries do do a retry fetch from this backend
292 ; how many tries do do a retry fetch from this backend
294 archive_cache.objectstore.retry_attempts = 10
293 archive_cache.objectstore.retry_attempts = 10
295
294
296 ; Default is $cache_dir/archive_cache if not set
295 ; Default is $cache_dir/archive_cache if not set
297 ; Generated repo archives will be cached at this location
296 ; Generated repo archives will be cached at this location
298 ; and served from the cache during subsequent requests for the same archive of
297 ; and served from the cache during subsequent requests for the same archive of
299 ; the repository. This path is important to be shared across filesystems and with
298 ; the repository. This path is important to be shared across filesystems and with
300 ; RhodeCode and vcsserver
299 ; RhodeCode and vcsserver
301 archive_cache.filesystem.store_dir = %(here)s/rc-tests/archive_cache
300 archive_cache.filesystem.store_dir = %(here)s/rc-tests/archive_cache
302
301
303 ; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb
302 ; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb
304 archive_cache.filesystem.cache_size_gb = 2
303 archive_cache.filesystem.cache_size_gb = 2
305
304
306 ; Eviction policy used to clear out after cache_size_gb limit is reached
305 ; Eviction policy used to clear out after cache_size_gb limit is reached
307 archive_cache.filesystem.eviction_policy = least-recently-stored
306 archive_cache.filesystem.eviction_policy = least-recently-stored
308
307
309 ; By default cache uses sharding technique, this specifies how many shards are there
308 ; By default cache uses sharding technique, this specifies how many shards are there
310 ; default is 8 shards
309 ; default is 8 shards
311 archive_cache.filesystem.cache_shards = 8
310 archive_cache.filesystem.cache_shards = 8
312
311
313 ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time
312 ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time
314 archive_cache.filesystem.retry = false
313 archive_cache.filesystem.retry = false
315
314
316 ; number of seconds to wait for next try using retry
315 ; number of seconds to wait for next try using retry
317 archive_cache.filesystem.retry_backoff = 1
316 archive_cache.filesystem.retry_backoff = 1
318
317
319 ; how many tries do do a retry fetch from this backend
318 ; how many tries do do a retry fetch from this backend
320 archive_cache.filesystem.retry_attempts = 10
319 archive_cache.filesystem.retry_attempts = 10
321
320
322
321
323 ; #############
322 ; #############
324 ; CELERY CONFIG
323 ; CELERY CONFIG
325 ; #############
324 ; #############
326
325
327 ; manually run celery: /path/to/celery worker --task-events --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini
326 ; manually run celery: /path/to/celery worker --task-events --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini
328
327
329 use_celery = false
328 use_celery = false
330
329
331 ; path to store schedule database
330 ; path to store schedule database
332 #celerybeat-schedule.path =
331 #celerybeat-schedule.path =
333
332
334 ; connection url to the message broker (default redis)
333 ; connection url to the message broker (default redis)
335 celery.broker_url = redis://redis:6379/8
334 celery.broker_url = redis://redis:6379/8
336
335
337 ; results backend to get results for (default redis)
336 ; results backend to get results for (default redis)
338 celery.result_backend = redis://redis:6379/8
337 celery.result_backend = redis://redis:6379/8
339
338
340 ; rabbitmq example
339 ; rabbitmq example
341 #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost
340 #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost
342
341
343 ; maximum tasks to execute before worker restart
342 ; maximum tasks to execute before worker restart
344 celery.max_tasks_per_child = 20
343 celery.max_tasks_per_child = 20
345
344
346 ; tasks will never be sent to the queue, but executed locally instead.
345 ; tasks will never be sent to the queue, but executed locally instead.
347 celery.task_always_eager = true
346 celery.task_always_eager = true
348 celery.task_store_eager_result = true
347 celery.task_store_eager_result = true
349
348
350 ; #############
349 ; #############
351 ; DOGPILE CACHE
350 ; DOGPILE CACHE
352 ; #############
351 ; #############
353
352
354 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
353 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
355 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
354 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
356 cache_dir = %(here)s/rc-test-data
355 cache_dir = %(here)s/rc-test-data
357
356
358 ; *********************************************
357 ; *********************************************
359 ; `sql_cache_short` cache for heavy SQL queries
358 ; `sql_cache_short` cache for heavy SQL queries
360 ; Only supported backend is `memory_lru`
359 ; Only supported backend is `memory_lru`
361 ; *********************************************
360 ; *********************************************
362 rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru
361 rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru
363 rc_cache.sql_cache_short.expiration_time = 0
362 rc_cache.sql_cache_short.expiration_time = 0
364
363
365
364
366 ; *****************************************************
365 ; *****************************************************
367 ; `cache_repo_longterm` cache for repo object instances
366 ; `cache_repo_longterm` cache for repo object instances
368 ; Only supported backend is `memory_lru`
367 ; Only supported backend is `memory_lru`
369 ; *****************************************************
368 ; *****************************************************
370 rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru
369 rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru
371 ; by default we use 30 Days, cache is still invalidated on push
370 ; by default we use 30 Days, cache is still invalidated on push
372 rc_cache.cache_repo_longterm.expiration_time = 2592000
371 rc_cache.cache_repo_longterm.expiration_time = 2592000
373 ; max items in LRU cache, set to smaller number to save memory, and expire last used caches
372 ; max items in LRU cache, set to smaller number to save memory, and expire last used caches
374 rc_cache.cache_repo_longterm.max_size = 10000
373 rc_cache.cache_repo_longterm.max_size = 10000
375
374
376
375
377 ; *********************************************
376 ; *********************************************
378 ; `cache_general` cache for general purpose use
377 ; `cache_general` cache for general purpose use
379 ; for simplicity use rc.file_namespace backend,
378 ; for simplicity use rc.file_namespace backend,
380 ; for performance and scale use rc.redis
379 ; for performance and scale use rc.redis
381 ; *********************************************
380 ; *********************************************
382 rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace
381 rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace
383 rc_cache.cache_general.expiration_time = 43200
382 rc_cache.cache_general.expiration_time = 43200
384 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
383 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
385 rc_cache.cache_general.arguments.filename = %(here)s/rc-tests/cache-backend/cache_general_db
384 rc_cache.cache_general.arguments.filename = %(here)s/rc-tests/cache-backend/cache_general_db
386
385
387 ; alternative `cache_general` redis backend with distributed lock
386 ; alternative `cache_general` redis backend with distributed lock
388 #rc_cache.cache_general.backend = dogpile.cache.rc.redis
387 #rc_cache.cache_general.backend = dogpile.cache.rc.redis
389 #rc_cache.cache_general.expiration_time = 300
388 #rc_cache.cache_general.expiration_time = 300
390
389
391 ; redis_expiration_time needs to be greater then expiration_time
390 ; redis_expiration_time needs to be greater then expiration_time
392 #rc_cache.cache_general.arguments.redis_expiration_time = 7200
391 #rc_cache.cache_general.arguments.redis_expiration_time = 7200
393
392
394 #rc_cache.cache_general.arguments.host = localhost
393 #rc_cache.cache_general.arguments.host = localhost
395 #rc_cache.cache_general.arguments.port = 6379
394 #rc_cache.cache_general.arguments.port = 6379
396 #rc_cache.cache_general.arguments.db = 0
395 #rc_cache.cache_general.arguments.db = 0
397 #rc_cache.cache_general.arguments.socket_timeout = 30
396 #rc_cache.cache_general.arguments.socket_timeout = 30
398 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
397 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
399 #rc_cache.cache_general.arguments.distributed_lock = true
398 #rc_cache.cache_general.arguments.distributed_lock = true
400
399
401 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
400 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
402 #rc_cache.cache_general.arguments.lock_auto_renewal = true
401 #rc_cache.cache_general.arguments.lock_auto_renewal = true
403
402
404 ; *************************************************
403 ; *************************************************
405 ; `cache_perms` cache for permission tree, auth TTL
404 ; `cache_perms` cache for permission tree, auth TTL
406 ; for simplicity use rc.file_namespace backend,
405 ; for simplicity use rc.file_namespace backend,
407 ; for performance and scale use rc.redis
406 ; for performance and scale use rc.redis
408 ; *************************************************
407 ; *************************************************
409 rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace
408 rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace
410 rc_cache.cache_perms.expiration_time = 0
409 rc_cache.cache_perms.expiration_time = 0
411 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
410 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
412 rc_cache.cache_perms.arguments.filename = %(here)s/rc-tests/cache-backend/cache_perms_db
411 rc_cache.cache_perms.arguments.filename = %(here)s/rc-tests/cache-backend/cache_perms_db
413
412
414 ; alternative `cache_perms` redis backend with distributed lock
413 ; alternative `cache_perms` redis backend with distributed lock
415 #rc_cache.cache_perms.backend = dogpile.cache.rc.redis
414 #rc_cache.cache_perms.backend = dogpile.cache.rc.redis
416 #rc_cache.cache_perms.expiration_time = 300
415 #rc_cache.cache_perms.expiration_time = 300
417
416
418 ; redis_expiration_time needs to be greater then expiration_time
417 ; redis_expiration_time needs to be greater then expiration_time
419 #rc_cache.cache_perms.arguments.redis_expiration_time = 7200
418 #rc_cache.cache_perms.arguments.redis_expiration_time = 7200
420
419
421 #rc_cache.cache_perms.arguments.host = localhost
420 #rc_cache.cache_perms.arguments.host = localhost
422 #rc_cache.cache_perms.arguments.port = 6379
421 #rc_cache.cache_perms.arguments.port = 6379
423 #rc_cache.cache_perms.arguments.db = 0
422 #rc_cache.cache_perms.arguments.db = 0
424 #rc_cache.cache_perms.arguments.socket_timeout = 30
423 #rc_cache.cache_perms.arguments.socket_timeout = 30
425 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
424 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
426 #rc_cache.cache_perms.arguments.distributed_lock = true
425 #rc_cache.cache_perms.arguments.distributed_lock = true
427
426
428 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
427 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
429 #rc_cache.cache_perms.arguments.lock_auto_renewal = true
428 #rc_cache.cache_perms.arguments.lock_auto_renewal = true
430
429
431 ; ***************************************************
430 ; ***************************************************
432 ; `cache_repo` cache for file tree, Readme, RSS FEEDS
431 ; `cache_repo` cache for file tree, Readme, RSS FEEDS
433 ; for simplicity use rc.file_namespace backend,
432 ; for simplicity use rc.file_namespace backend,
434 ; for performance and scale use rc.redis
433 ; for performance and scale use rc.redis
435 ; ***************************************************
434 ; ***************************************************
436 rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace
435 rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace
437 rc_cache.cache_repo.expiration_time = 2592000
436 rc_cache.cache_repo.expiration_time = 2592000
438 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
437 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
439 rc_cache.cache_repo.arguments.filename = %(here)s/rc-tests/cache-backend/cache_repo_db
438 rc_cache.cache_repo.arguments.filename = %(here)s/rc-tests/cache-backend/cache_repo_db
440
439
441 ; alternative `cache_repo` redis backend with distributed lock
440 ; alternative `cache_repo` redis backend with distributed lock
442 #rc_cache.cache_repo.backend = dogpile.cache.rc.redis
441 #rc_cache.cache_repo.backend = dogpile.cache.rc.redis
443 #rc_cache.cache_repo.expiration_time = 2592000
442 #rc_cache.cache_repo.expiration_time = 2592000
444
443
445 ; redis_expiration_time needs to be greater then expiration_time
444 ; redis_expiration_time needs to be greater then expiration_time
446 #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400
445 #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400
447
446
448 #rc_cache.cache_repo.arguments.host = localhost
447 #rc_cache.cache_repo.arguments.host = localhost
449 #rc_cache.cache_repo.arguments.port = 6379
448 #rc_cache.cache_repo.arguments.port = 6379
450 #rc_cache.cache_repo.arguments.db = 1
449 #rc_cache.cache_repo.arguments.db = 1
451 #rc_cache.cache_repo.arguments.socket_timeout = 30
450 #rc_cache.cache_repo.arguments.socket_timeout = 30
452 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
451 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
453 #rc_cache.cache_repo.arguments.distributed_lock = true
452 #rc_cache.cache_repo.arguments.distributed_lock = true
454
453
455 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
454 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
456 #rc_cache.cache_repo.arguments.lock_auto_renewal = true
455 #rc_cache.cache_repo.arguments.lock_auto_renewal = true
457
456
458 ; ##############
457 ; ##############
459 ; BEAKER SESSION
458 ; BEAKER SESSION
460 ; ##############
459 ; ##############
461
460
462 ; beaker.session.type is type of storage options for the logged users sessions. Current allowed
461 ; beaker.session.type is type of storage options for the logged users sessions. Current allowed
463 ; types are file, ext:redis, ext:database, ext:memcached
462 ; types are file, ext:redis, ext:database, ext:memcached
464 ; Fastest ones are ext:redis and ext:database, DO NOT use memory type for session
463 ; Fastest ones are ext:redis and ext:database, DO NOT use memory type for session
465 beaker.session.type = file
464 beaker.session.type = file
466 beaker.session.data_dir = %(here)s/rc-tests/data/sessions
465 beaker.session.data_dir = %(here)s/rc-tests/data/sessions
467
466
468 ; Redis based sessions
467 ; Redis based sessions
469 #beaker.session.type = ext:redis
468 #beaker.session.type = ext:redis
470 #beaker.session.url = redis://redis:6379/2
469 #beaker.session.url = redis://redis:6379/2
471
470
472 ; DB based session, fast, and allows easy management over logged in users
471 ; DB based session, fast, and allows easy management over logged in users
473 #beaker.session.type = ext:database
472 #beaker.session.type = ext:database
474 #beaker.session.table_name = db_session
473 #beaker.session.table_name = db_session
475 #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode
474 #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode
476 #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode
475 #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode
477 #beaker.session.sa.pool_recycle = 3600
476 #beaker.session.sa.pool_recycle = 3600
478 #beaker.session.sa.echo = false
477 #beaker.session.sa.echo = false
479
478
480 beaker.session.key = rhodecode
479 beaker.session.key = rhodecode
481 beaker.session.secret = test-rc-uytcxaz
480 beaker.session.secret = test-rc-uytcxaz
482 beaker.session.lock_dir = %(here)s/rc-tests/data/sessions/lock
481 beaker.session.lock_dir = %(here)s/rc-tests/data/sessions/lock
483
482
484 ; Secure encrypted cookie. Requires AES and AES python libraries
483 ; Secure encrypted cookie. Requires AES and AES python libraries
485 ; you must disable beaker.session.secret to use this
484 ; you must disable beaker.session.secret to use this
486 #beaker.session.encrypt_key = key_for_encryption
485 #beaker.session.encrypt_key = key_for_encryption
487 #beaker.session.validate_key = validation_key
486 #beaker.session.validate_key = validation_key
488
487
489 ; Sets session as invalid (also logging out user) if it haven not been
488 ; Sets session as invalid (also logging out user) if it haven not been
490 ; accessed for given amount of time in seconds
489 ; accessed for given amount of time in seconds
491 beaker.session.timeout = 2592000
490 beaker.session.timeout = 2592000
492 beaker.session.httponly = true
491 beaker.session.httponly = true
493
492
494 ; Path to use for the cookie. Set to prefix if you use prefix middleware
493 ; Path to use for the cookie. Set to prefix if you use prefix middleware
495 #beaker.session.cookie_path = /custom_prefix
494 #beaker.session.cookie_path = /custom_prefix
496
495
497 ; Set https secure cookie
496 ; Set https secure cookie
498 beaker.session.secure = false
497 beaker.session.secure = false
499
498
500 ; default cookie expiration time in seconds, set to `true` to set expire
499 ; default cookie expiration time in seconds, set to `true` to set expire
501 ; at browser close
500 ; at browser close
502 #beaker.session.cookie_expires = 3600
501 #beaker.session.cookie_expires = 3600
503
502
504 ; #############################
503 ; #############################
505 ; SEARCH INDEXING CONFIGURATION
504 ; SEARCH INDEXING CONFIGURATION
506 ; #############################
505 ; #############################
507
506
508 ; Full text search indexer is available in rhodecode-tools under
507 ; Full text search indexer is available in rhodecode-tools under
509 ; `rhodecode-tools index` command
508 ; `rhodecode-tools index` command
510
509
511 ; WHOOSH Backend, doesn't require additional services to run
510 ; WHOOSH Backend, doesn't require additional services to run
512 ; it works good with few dozen repos
511 ; it works good with few dozen repos
513 search.module = rhodecode.lib.index.whoosh
512 search.module = rhodecode.lib.index.whoosh
514 search.location = %(here)s/rc-tests/data/index
513 search.location = %(here)s/rc-tests/data/index
515
514
516 ; ####################
515 ; ####################
517 ; CHANNELSTREAM CONFIG
516 ; CHANNELSTREAM CONFIG
518 ; ####################
517 ; ####################
519
518
520 ; channelstream enables persistent connections and live notification
519 ; channelstream enables persistent connections and live notification
521 ; in the system. It's also used by the chat system
520 ; in the system. It's also used by the chat system
522
521
523 channelstream.enabled = false
522 channelstream.enabled = false
524
523
525 ; server address for channelstream server on the backend
524 ; server address for channelstream server on the backend
526 channelstream.server = channelstream:9800
525 channelstream.server = channelstream:9800
527
526
528 ; location of the channelstream server from outside world
527 ; location of the channelstream server from outside world
529 ; use ws:// for http or wss:// for https. This address needs to be handled
528 ; use ws:// for http or wss:// for https. This address needs to be handled
530 ; by external HTTP server such as Nginx or Apache
529 ; by external HTTP server such as Nginx or Apache
531 ; see Nginx/Apache configuration examples in our docs
530 ; see Nginx/Apache configuration examples in our docs
532 channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream
531 channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream
533 channelstream.secret = ENV_GENERATED
532 channelstream.secret = ENV_GENERATED
534 channelstream.history.location = %(here)s/rc-tests/channelstream_history
533 channelstream.history.location = %(here)s/rc-tests/channelstream_history
535
534
536 ; Internal application path that Javascript uses to connect into.
535 ; Internal application path that Javascript uses to connect into.
537 ; If you use proxy-prefix the prefix should be added before /_channelstream
536 ; If you use proxy-prefix the prefix should be added before /_channelstream
538 channelstream.proxy_path = /_channelstream
537 channelstream.proxy_path = /_channelstream
539
538
540
539
541 ; ##############################
540 ; ##############################
542 ; MAIN RHODECODE DATABASE CONFIG
541 ; MAIN RHODECODE DATABASE CONFIG
543 ; ##############################
542 ; ##############################
544
543
545 #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30
544 #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30
546 #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode
545 #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode
547 #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8
546 #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8
548 ; pymysql is an alternative driver for MySQL, use in case of problems with default one
547 ; pymysql is an alternative driver for MySQL, use in case of problems with default one
549 #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode
548 #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode
550
549
551 sqlalchemy.db1.url = sqlite:///%(here)s/rc-tests/rhodecode_test.db?timeout=30
550 sqlalchemy.db1.url = sqlite:///%(here)s/rc-tests/rhodecode_test.db?timeout=30
552
551
553 ; see sqlalchemy docs for other advanced settings
552 ; see sqlalchemy docs for other advanced settings
554 ; print the sql statements to output
553 ; print the sql statements to output
555 sqlalchemy.db1.echo = false
554 sqlalchemy.db1.echo = false
556
555
557 ; recycle the connections after this amount of seconds
556 ; recycle the connections after this amount of seconds
558 sqlalchemy.db1.pool_recycle = 3600
557 sqlalchemy.db1.pool_recycle = 3600
559
558
560 ; the number of connections to keep open inside the connection pool.
559 ; the number of connections to keep open inside the connection pool.
561 ; 0 indicates no limit
560 ; 0 indicates no limit
562 ; the general calculus with gevent is:
561 ; the general calculus with gevent is:
563 ; if your system allows 500 concurrent greenlets (max_connections) that all do database access,
562 ; if your system allows 500 concurrent greenlets (max_connections) that all do database access,
564 ; then increase pool size + max overflow so that they add up to 500.
563 ; then increase pool size + max overflow so that they add up to 500.
565 #sqlalchemy.db1.pool_size = 5
564 #sqlalchemy.db1.pool_size = 5
566
565
567 ; The number of connections to allow in connection pool "overflow", that is
566 ; The number of connections to allow in connection pool "overflow", that is
568 ; connections that can be opened above and beyond the pool_size setting,
567 ; connections that can be opened above and beyond the pool_size setting,
569 ; which defaults to five.
568 ; which defaults to five.
570 #sqlalchemy.db1.max_overflow = 10
569 #sqlalchemy.db1.max_overflow = 10
571
570
572 ; Connection check ping, used to detect broken database connections
571 ; Connection check ping, used to detect broken database connections
573 ; could be enabled to better handle cases if MySQL has gone away errors
572 ; could be enabled to better handle cases if MySQL has gone away errors
574 #sqlalchemy.db1.ping_connection = true
573 #sqlalchemy.db1.ping_connection = true
575
574
576 ; ##########
575 ; ##########
577 ; VCS CONFIG
576 ; VCS CONFIG
578 ; ##########
577 ; ##########
579 vcs.server.enable = true
578 vcs.server.enable = true
580 vcs.server = vcsserver:10010
579 vcs.server = vcsserver:10010
581
580
582 ; Web server connectivity protocol, responsible for web based VCS operations
581 ; Web server connectivity protocol, responsible for web based VCS operations
583 ; Available protocols are:
582 ; Available protocols are:
584 ; `http` - use http-rpc backend (default)
583 ; `http` - use http-rpc backend (default)
585 vcs.server.protocol = http
584 vcs.server.protocol = http
586
585
587 ; Push/Pull operations protocol, available options are:
586 ; Push/Pull operations protocol, available options are:
588 ; `http` - use http-rpc backend (default)
587 ; `http` - use http-rpc backend (default)
589 vcs.scm_app_implementation = http
588 vcs.scm_app_implementation = http
590
589
591 ; Push/Pull operations hooks protocol, available options are:
590 ; Push/Pull operations hooks protocol, available options are:
592 ; `http` - use http-rpc backend (default)
591 ; `http` - use http-rpc backend (default)
593 ; `celery` - use celery based hooks
592 ; `celery` - use celery based hooks
594 vcs.hooks.protocol = http
593 vcs.hooks.protocol = http
595
594
596 ; Host on which this instance is listening for hooks. vcsserver will call this host to pull/push hooks so it should be
595 ; Host on which this instance is listening for hooks. vcsserver will call this host to pull/push hooks so it should be
597 ; accessible via network.
596 ; accessible via network.
598 ; Use vcs.hooks.host = "*" to bind to current hostname (for Docker)
597 ; Use vcs.hooks.host = "*" to bind to current hostname (for Docker)
599 vcs.hooks.host = *
598 vcs.hooks.host = *
600
599
601 ; Start VCSServer with this instance as a subprocess, useful for development
600 ; Start VCSServer with this instance as a subprocess, useful for development
602 vcs.start_server = false
601 vcs.start_server = false
603
602
604 ; List of enabled VCS backends, available options are:
603 ; List of enabled VCS backends, available options are:
605 ; `hg` - mercurial
604 ; `hg` - mercurial
606 ; `git` - git
605 ; `git` - git
607 ; `svn` - subversion
606 ; `svn` - subversion
608 vcs.backends = hg, git, svn
607 vcs.backends = hg, git, svn
609
608
610 ; Wait this number of seconds before killing connection to the vcsserver
609 ; Wait this number of seconds before killing connection to the vcsserver
611 vcs.connection_timeout = 3600
610 vcs.connection_timeout = 3600
612
611
613 ; Cache flag to cache vcsserver remote calls locally
612 ; Cache flag to cache vcsserver remote calls locally
614 ; It uses cache_region `cache_repo`
613 ; It uses cache_region `cache_repo`
615 vcs.methods.cache = false
614 vcs.methods.cache = false
616
615
617 ; ####################################################
616 ; ####################################################
618 ; Subversion proxy support (mod_dav_svn)
617 ; Subversion proxy support (mod_dav_svn)
619 ; Maps RhodeCode repo groups into SVN paths for Apache
618 ; Maps RhodeCode repo groups into SVN paths for Apache
620 ; ####################################################
619 ; ####################################################
621
620
622 ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out.
621 ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out.
623 ; Set a numeric version for your current SVN e.g 1.8, or 1.12
622 ; Set a numeric version for your current SVN e.g 1.8, or 1.12
624 ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible
623 ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible
625 #vcs.svn.compatible_version = 1.8
624 #vcs.svn.compatible_version = 1.8
626
625
627 ; Enable SVN proxy of requests over HTTP
626 ; Enable SVN proxy of requests over HTTP
628 vcs.svn.proxy.enabled = true
627 vcs.svn.proxy.enabled = true
629
628
630 ; host to connect to running SVN subsystem
629 ; host to connect to running SVN subsystem
631 vcs.svn.proxy.host = http://svn:8090
630 vcs.svn.proxy.host = http://svn:8090
632
631
633 ; Enable or disable the config file generation.
632 ; Enable or disable the config file generation.
634 svn.proxy.generate_config = false
633 svn.proxy.generate_config = false
635
634
636 ; Generate config file with `SVNListParentPath` set to `On`.
635 ; Generate config file with `SVNListParentPath` set to `On`.
637 svn.proxy.list_parent_path = true
636 svn.proxy.list_parent_path = true
638
637
639 ; Set location and file name of generated config file.
638 ; Set location and file name of generated config file.
640 svn.proxy.config_file_path = %(here)s/rc-tests/mod_dav_svn.conf
639 svn.proxy.config_file_path = %(here)s/rc-tests/mod_dav_svn.conf
641
640
642 ; alternative mod_dav config template. This needs to be a valid mako template
641 ; alternative mod_dav config template. This needs to be a valid mako template
643 ; Example template can be found in the source code:
642 ; Example template can be found in the source code:
644 ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako
643 ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako
645 #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako
644 #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako
646
645
647 ; Used as a prefix to the `Location` block in the generated config file.
646 ; Used as a prefix to the `Location` block in the generated config file.
648 ; In most cases it should be set to `/`.
647 ; In most cases it should be set to `/`.
649 svn.proxy.location_root = /
648 svn.proxy.location_root = /
650
649
651 ; Command to reload the mod dav svn configuration on change.
650 ; Command to reload the mod dav svn configuration on change.
652 ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh
651 ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh
653 ; Make sure user who runs RhodeCode process is allowed to reload Apache
652 ; Make sure user who runs RhodeCode process is allowed to reload Apache
654 #svn.proxy.reload_cmd = /etc/init.d/apache2 reload
653 #svn.proxy.reload_cmd = /etc/init.d/apache2 reload
655
654
656 ; If the timeout expires before the reload command finishes, the command will
655 ; If the timeout expires before the reload command finishes, the command will
657 ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds.
656 ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds.
658 #svn.proxy.reload_timeout = 10
657 #svn.proxy.reload_timeout = 10
659
658
660 ; ####################
659 ; ####################
661 ; SSH Support Settings
660 ; SSH Support Settings
662 ; ####################
661 ; ####################
663
662
664 ; Defines if a custom authorized_keys file should be created and written on
663 ; Defines if a custom authorized_keys file should be created and written on
665 ; any change user ssh keys. Setting this to false also disables possibility
664 ; any change user ssh keys. Setting this to false also disables possibility
666 ; of adding SSH keys by users from web interface. Super admins can still
665 ; of adding SSH keys by users from web interface. Super admins can still
667 ; manage SSH Keys.
666 ; manage SSH Keys.
668 ssh.generate_authorized_keyfile = true
667 ssh.generate_authorized_keyfile = true
669
668
670 ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding`
669 ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding`
671 # ssh.authorized_keys_ssh_opts =
670 # ssh.authorized_keys_ssh_opts =
672
671
673 ; Path to the authorized_keys file where the generate entries are placed.
672 ; Path to the authorized_keys file where the generate entries are placed.
674 ; It is possible to have multiple key files specified in `sshd_config` e.g.
673 ; It is possible to have multiple key files specified in `sshd_config` e.g.
675 ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode
674 ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode
676 ssh.authorized_keys_file_path = %(here)s/rc-tests/authorized_keys_rhodecode
675 ssh.authorized_keys_file_path = %(here)s/rc-tests/authorized_keys_rhodecode
677
676
678 ; Command to execute the SSH wrapper. The binary is available in the
677 ; Command to execute the SSH wrapper. The binary is available in the
679 ; RhodeCode installation directory.
678 ; RhodeCode installation directory.
680 ; legacy: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper
679 ; legacy: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper
681 ; new rewrite: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2
680 ; new rewrite: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2
682 ssh.wrapper_cmd = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper
681 ssh.wrapper_cmd = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper
683
682
684 ; Allow shell when executing the ssh-wrapper command
683 ; Allow shell when executing the ssh-wrapper command
685 ssh.wrapper_cmd_allow_shell = false
684 ssh.wrapper_cmd_allow_shell = false
686
685
687 ; Enables logging, and detailed output send back to the client during SSH
686 ; Enables logging, and detailed output send back to the client during SSH
688 ; operations. Useful for debugging, shouldn't be used in production.
687 ; operations. Useful for debugging, shouldn't be used in production.
689 ssh.enable_debug_logging = true
688 ssh.enable_debug_logging = true
690
689
691 ; Paths to binary executable, by default they are the names, but we can
690 ; Paths to binary executable, by default they are the names, but we can
692 ; override them if we want to use a custom one
691 ; override them if we want to use a custom one
693 ssh.executable.hg = /usr/local/bin/rhodecode_bin/vcs_bin/hg
692 ssh.executable.hg = /usr/local/bin/rhodecode_bin/vcs_bin/hg
694 ssh.executable.git = /usr/local/bin/rhodecode_bin/vcs_bin/git
693 ssh.executable.git = /usr/local/bin/rhodecode_bin/vcs_bin/git
695 ssh.executable.svn = /usr/local/bin/rhodecode_bin/vcs_bin/svnserve
694 ssh.executable.svn = /usr/local/bin/rhodecode_bin/vcs_bin/svnserve
696
695
697 ; Enables SSH key generator web interface. Disabling this still allows users
696 ; Enables SSH key generator web interface. Disabling this still allows users
698 ; to add their own keys.
697 ; to add their own keys.
699 ssh.enable_ui_key_generator = true
698 ssh.enable_ui_key_generator = true
700
699
701 ; Statsd client config, this is used to send metrics to statsd
700 ; Statsd client config, this is used to send metrics to statsd
702 ; We recommend setting statsd_exported and scrape them using Prometheus
701 ; We recommend setting statsd_exported and scrape them using Prometheus
703 #statsd.enabled = false
702 #statsd.enabled = false
704 #statsd.statsd_host = 0.0.0.0
703 #statsd.statsd_host = 0.0.0.0
705 #statsd.statsd_port = 8125
704 #statsd.statsd_port = 8125
706 #statsd.statsd_prefix =
705 #statsd.statsd_prefix =
707 #statsd.statsd_ipv6 = false
706 #statsd.statsd_ipv6 = false
708
707
709 ; configure logging automatically at server startup set to false
708 ; configure logging automatically at server startup set to false
710 ; to use the below custom logging config.
709 ; to use the below custom logging config.
711 ; RC_LOGGING_FORMATTER
710 ; RC_LOGGING_FORMATTER
712 ; RC_LOGGING_LEVEL
711 ; RC_LOGGING_LEVEL
713 ; env variables can control the settings for logging in case of autoconfigure
712 ; env variables can control the settings for logging in case of autoconfigure
714
713
715 logging.autoconfigure = false
714 logging.autoconfigure = false
716
715
717 ; specify your own custom logging config file to configure logging
716 ; specify your own custom logging config file to configure logging
718 #logging.logging_conf_file = /path/to/custom_logging.ini
717 #logging.logging_conf_file = /path/to/custom_logging.ini
719
718
720 ; Dummy marker to add new entries after.
719 ; Dummy marker to add new entries after.
721 ; Add any custom entries below. Please don't remove this marker.
720 ; Add any custom entries below. Please don't remove this marker.
722 custom.conf = 1
721 custom.conf = 1
723
722
724
723
725 ; #####################
724 ; #####################
726 ; LOGGING CONFIGURATION
725 ; LOGGING CONFIGURATION
727 ; #####################
726 ; #####################
728
727
729 [loggers]
728 [loggers]
730 keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper, dogpile
729 keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper, dogpile
731
730
732 [handlers]
731 [handlers]
733 keys = console, console_sql
732 keys = console, console_sql
734
733
735 [formatters]
734 [formatters]
736 keys = generic, json, color_formatter, color_formatter_sql
735 keys = generic, json, color_formatter, color_formatter_sql
737
736
738 ; #######
737 ; #######
739 ; LOGGERS
738 ; LOGGERS
740 ; #######
739 ; #######
741 [logger_root]
740 [logger_root]
742 level = NOTSET
741 level = NOTSET
743 handlers = console
742 handlers = console
744
743
745 [logger_routes]
744 [logger_routes]
746 level = DEBUG
745 level = DEBUG
747 handlers =
746 handlers =
748 qualname = routes.middleware
747 qualname = routes.middleware
749 ## "level = DEBUG" logs the route matched and routing variables.
748 ## "level = DEBUG" logs the route matched and routing variables.
750 propagate = 1
749 propagate = 1
751
750
752 [logger_sqlalchemy]
751 [logger_sqlalchemy]
753 level = INFO
752 level = INFO
754 handlers = console_sql
753 handlers = console_sql
755 qualname = sqlalchemy.engine
754 qualname = sqlalchemy.engine
756 propagate = 0
755 propagate = 0
757
756
758 [logger_beaker]
757 [logger_beaker]
759 level = DEBUG
758 level = DEBUG
760 handlers =
759 handlers =
761 qualname = beaker.container
760 qualname = beaker.container
762 propagate = 1
761 propagate = 1
763
762
764 [logger_dogpile]
763 [logger_dogpile]
765 level = INFO
764 level = INFO
766 handlers = console
765 handlers = console
767 qualname = dogpile
766 qualname = dogpile
768 propagate = 1
767 propagate = 1
769
768
770 [logger_rhodecode]
769 [logger_rhodecode]
771 level = DEBUG
770 level = DEBUG
772 handlers =
771 handlers =
773 qualname = rhodecode
772 qualname = rhodecode
774 propagate = 1
773 propagate = 1
775
774
776 [logger_ssh_wrapper]
775 [logger_ssh_wrapper]
777 level = DEBUG
776 level = DEBUG
778 handlers =
777 handlers =
779 qualname = ssh_wrapper
778 qualname = ssh_wrapper
780 propagate = 1
779 propagate = 1
781
780
782 [logger_celery]
781 [logger_celery]
783 level = DEBUG
782 level = DEBUG
784 handlers =
783 handlers =
785 qualname = celery
784 qualname = celery
786
785
787
786
788 ; ########
787 ; ########
789 ; HANDLERS
788 ; HANDLERS
790 ; ########
789 ; ########
791
790
792 [handler_console]
791 [handler_console]
793 class = StreamHandler
792 class = StreamHandler
794 args = (sys.stderr, )
793 args = (sys.stderr, )
795 level = DEBUG
794 level = DEBUG
796 ; To enable JSON formatted logs replace 'generic/color_formatter' with 'json'
795 ; To enable JSON formatted logs replace 'generic/color_formatter' with 'json'
797 ; This allows sending properly formatted logs to grafana loki or elasticsearch
796 ; This allows sending properly formatted logs to grafana loki or elasticsearch
798 formatter = generic
797 formatter = generic
799
798
800 [handler_console_sql]
799 [handler_console_sql]
801 ; "level = DEBUG" logs SQL queries and results.
800 ; "level = DEBUG" logs SQL queries and results.
802 ; "level = INFO" logs SQL queries.
801 ; "level = INFO" logs SQL queries.
803 ; "level = WARN" logs neither. (Recommended for production systems.)
802 ; "level = WARN" logs neither. (Recommended for production systems.)
804 class = StreamHandler
803 class = StreamHandler
805 args = (sys.stderr, )
804 args = (sys.stderr, )
806 level = WARN
805 level = WARN
807 ; To enable JSON formatted logs replace 'generic/color_formatter_sql' with 'json'
806 ; To enable JSON formatted logs replace 'generic/color_formatter_sql' with 'json'
808 ; This allows sending properly formatted logs to grafana loki or elasticsearch
807 ; This allows sending properly formatted logs to grafana loki or elasticsearch
809 formatter = generic
808 formatter = generic
810
809
811 ; ##########
810 ; ##########
812 ; FORMATTERS
811 ; FORMATTERS
813 ; ##########
812 ; ##########
814
813
815 [formatter_generic]
814 [formatter_generic]
816 class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter
815 class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter
817 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
816 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
818 datefmt = %Y-%m-%d %H:%M:%S
817 datefmt = %Y-%m-%d %H:%M:%S
819
818
820 [formatter_color_formatter]
819 [formatter_color_formatter]
821 class = rhodecode.lib.logging_formatter.ColorFormatter
820 class = rhodecode.lib.logging_formatter.ColorFormatter
822 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
821 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
823 datefmt = %Y-%m-%d %H:%M:%S
822 datefmt = %Y-%m-%d %H:%M:%S
824
823
825 [formatter_color_formatter_sql]
824 [formatter_color_formatter_sql]
826 class = rhodecode.lib.logging_formatter.ColorFormatterSql
825 class = rhodecode.lib.logging_formatter.ColorFormatterSql
827 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
826 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
828 datefmt = %Y-%m-%d %H:%M:%S
827 datefmt = %Y-%m-%d %H:%M:%S
829
828
830 [formatter_json]
829 [formatter_json]
831 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
830 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
832 class = rhodecode.lib._vendor.jsonlogger.JsonFormatter
831 class = rhodecode.lib._vendor.jsonlogger.JsonFormatter
General Comments 0
You need to be logged in to leave comments. Login now