##// END OF EJS Templates
feat(archive-cache): added extra info on number of evicted caches
super-admin -
r5434:6da054fb default
parent child Browse files
Show More
@@ -1,348 +1,352 b''
1 1 # Copyright (C) 2015-2024 RhodeCode GmbH
2 2 #
3 3 # This program is free software: you can redistribute it and/or modify
4 4 # it under the terms of the GNU Affero General Public License, version 3
5 5 # (only), as published by the Free Software Foundation.
6 6 #
7 7 # This program is distributed in the hope that it will be useful,
8 8 # but WITHOUT ANY WARRANTY; without even the implied warranty of
9 9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 10 # GNU General Public License for more details.
11 11 #
12 12 # You should have received a copy of the GNU Affero General Public License
13 13 # along with this program. If not, see <http://www.gnu.org/licenses/>.
14 14 #
15 15 # This program is dual-licensed. If you wish to learn more about the
16 16 # RhodeCode Enterprise Edition, including its added features, Support services,
17 17 # and proprietary license terms, please see https://rhodecode.com/licenses/
18 18
19 19 import os
20 20 import functools
21 21 import logging
22 22 import typing
23 23 import time
24 24 import zlib
25 25
26 26 from ...ext_json import json
27 27 from ..utils import StatsDB, NOT_GIVEN, ShardFileReader, EVICTION_POLICY, format_size
28 28 from ..lock import GenerationLock
29 29
30 30 log = logging.getLogger(__name__)
31 31
32 32
33 33 class BaseShard:
34 34 storage_type: str = ''
35 35 fs = None
36 36
37 37 @classmethod
38 38 def hash(cls, key):
39 39 """Compute portable hash for `key`.
40 40
41 41 :param key: key to hash
42 42 :return: hash value
43 43
44 44 """
45 45 mask = 0xFFFFFFFF
46 46 return zlib.adler32(key.encode('utf-8')) & mask # noqa
47 47
48 48 def _write_file(self, full_path, read_iterator, mode):
49 49 raise NotImplementedError
50 50
51 51 def _get_keyfile(self, key):
52 52 raise NotImplementedError
53 53
54 54 def random_filename(self):
55 55 raise NotImplementedError
56 56
57 57 def _store(self, key, value_reader, metadata, mode):
58 58 (filename, # hash-name
59 59 full_path # full-path/hash-name
60 60 ) = self.random_filename()
61 61
62 62 key_file, key_file_path = self._get_keyfile(key)
63 63
64 64 # STORE METADATA
65 65 _metadata = {
66 66 "version": "v1",
67 67
68 68 "key_file": key_file, # this is the .key.json file storing meta
69 69 "key_file_path": key_file_path, # full path to key_file
70 70 "archive_key": key, # original name we stored archive under, e.g my-archive.zip
71 71 "archive_filename": filename, # the actual filename we stored that file under
72 72 "archive_full_path": full_path,
73 73
74 74 "store_time": time.time(),
75 75 "access_count": 0,
76 76 "access_time": 0,
77 77
78 78 "size": 0
79 79 }
80 80 if metadata:
81 81 _metadata.update(metadata)
82 82
83 83 read_iterator = iter(functools.partial(value_reader.read, 2**22), b'')
84 84 size, sha256 = self._write_file(full_path, read_iterator, mode)
85 85 _metadata['size'] = size
86 86 _metadata['sha256'] = sha256
87 87
88 88 # after archive is finished, we create a key to save the presence of the binary file
89 89 with self.fs.open(key_file_path, 'wb') as f:
90 90 f.write(json.dumps(_metadata))
91 91
92 92 return key, filename, size, _metadata
93 93
94 94 def _fetch(self, key, retry, retry_attempts, retry_backoff):
95 95 if retry is NOT_GIVEN:
96 96 retry = False
97 97 if retry_attempts is NOT_GIVEN:
98 98 retry_attempts = 0
99 99
100 100 if retry and retry_attempts > 0:
101 101 for attempt in range(1, retry_attempts + 1):
102 102 if key in self:
103 103 break
104 104 # we didn't find the key, wait retry_backoff N seconds, and re-check
105 105 time.sleep(retry_backoff)
106 106
107 107 if key not in self:
108 108 log.exception(f'requested key={key} not found in {self} retry={retry}, attempts={retry_attempts}')
109 109 raise KeyError(key)
110 110
111 111 key_file, key_file_path = self._get_keyfile(key)
112 112 with self.fs.open(key_file_path, 'rb') as f:
113 113 metadata = json.loads(f.read())
114 114
115 115 archive_path = metadata['archive_full_path']
116 116
117 117 try:
118 118 return ShardFileReader(self.fs.open(archive_path, 'rb')), metadata
119 119 finally:
120 120 # update usage stats, count and accessed
121 121 metadata["access_count"] = metadata.get("access_count", 0) + 1
122 122 metadata["access_time"] = time.time()
123 123 log.debug('Updated %s with access snapshot, access_count=%s access_time=%s',
124 124 key_file, metadata['access_count'], metadata['access_time'])
125 125 with self.fs.open(key_file_path, 'wb') as f:
126 126 f.write(json.dumps(metadata))
127 127
128 128 def _remove(self, key):
129 129 if key not in self:
130 130 log.exception(f'requested key={key} not found in {self}')
131 131 raise KeyError(key)
132 132
133 133 key_file, key_file_path = self._get_keyfile(key)
134 134 with self.fs.open(key_file_path, 'rb') as f:
135 135 metadata = json.loads(f.read())
136 136
137 137 archive_path = metadata['archive_full_path']
138 138 self.fs.rm(archive_path)
139 139 self.fs.rm(key_file_path)
140 140 return 1
141 141
142 142 @property
143 143 def storage_medium(self):
144 144 return getattr(self, self.storage_type)
145 145
146 146 @property
147 147 def key_suffix(self):
148 148 return 'key.json'
149 149
150 150 def __contains__(self, key):
151 151 """Return `True` if `key` matching item is found in cache.
152 152
153 153 :param key: key matching item
154 154 :return: True if key matching item
155 155
156 156 """
157 157 key_file, key_file_path = self._get_keyfile(key)
158 158 return self.fs.exists(key_file_path)
159 159
160 160
161 161 class BaseCache:
162 162 _locking_url: str = ''
163 163 _storage_path: str = ''
164 164 _config = {}
165 165 retry = False
166 166 retry_attempts = 0
167 167 retry_backoff = 1
168 168 _shards = tuple()
169 169
170 170 def __contains__(self, key):
171 171 """Return `True` if `key` matching item is found in cache.
172 172
173 173 :param key: key matching item
174 174 :return: True if key matching item
175 175
176 176 """
177 177 return self.has_key(key)
178 178
179 179 def __repr__(self):
180 180 return f'<{self.__class__.__name__}(storage={self._storage_path})>'
181 181
182 182 @classmethod
183 183 def gb_to_bytes(cls, gb):
184 184 return gb * (1024 ** 3)
185 185
186 186 @property
187 187 def storage_path(self):
188 188 return self._storage_path
189 189
190 190 @classmethod
191 191 def get_stats_db(cls):
192 192 return StatsDB()
193 193
194 194 def get_conf(self, key, pop=False):
195 195 if key not in self._config:
196 196 raise ValueError(f"No configuration key '{key}', please make sure it exists in archive_cache config")
197 197 val = self._config[key]
198 198 if pop:
199 199 del self._config[key]
200 200 return val
201 201
202 202 def _get_shard(self, key):
203 203 raise NotImplementedError
204 204
205 205 def _get_size(self, shard, archive_path):
206 206 raise NotImplementedError
207 207
208 208 def store(self, key, value_reader, metadata=None):
209 209 shard = self._get_shard(key)
210 210 return shard.store(key, value_reader, metadata)
211 211
212 212 def fetch(self, key, retry=NOT_GIVEN, retry_attempts=NOT_GIVEN) -> tuple[typing.BinaryIO, dict]:
213 213 """
214 214 Return file handle corresponding to `key` from specific shard cache.
215 215 """
216 216 if retry is NOT_GIVEN:
217 217 retry = self.retry
218 218 if retry_attempts is NOT_GIVEN:
219 219 retry_attempts = self.retry_attempts
220 220 retry_backoff = self.retry_backoff
221 221
222 222 shard = self._get_shard(key)
223 223 return shard.fetch(key, retry=retry, retry_attempts=retry_attempts, retry_backoff=retry_backoff)
224 224
225 225 def remove(self, key):
226 226 shard = self._get_shard(key)
227 227 return shard.remove(key)
228 228
229 229 def has_key(self, archive_key):
230 230 """Return `True` if `key` matching item is found in cache.
231 231
232 232 :param archive_key: key for item, this is a unique archive name we want to store data under. e.g my-archive-svn.zip
233 233 :return: True if key is found
234 234
235 235 """
236 236 shard = self._get_shard(archive_key)
237 237 return archive_key in shard
238 238
239 239 def iter_keys(self):
240 240 for shard in self._shards:
241 241 if shard.fs.exists(shard.storage_medium):
242 242 for path, _dirs, _files in shard.fs.walk(shard.storage_medium):
243 243 for key_file_path in _files:
244 244 if key_file_path.endswith(shard.key_suffix):
245 245 yield shard, key_file_path
246 246
247 247 def get_lock(self, lock_key):
248 248 return GenerationLock(lock_key, self._locking_url)
249 249
250 def evict(self, policy=None, size_limit=None) -> int:
250 def evict(self, policy=None, size_limit=None) -> dict:
251 251 """
252 252 Remove old items based on the conditions
253 253
254 254
255 255 explanation of this algo:
256 256 iterate over each shard, then for each shard iterate over the .key files
257 257 read the key files metadata stored. This gives us a full list of keys, cached_archived, their size and
258 258 access data, time creation, and access counts.
259 259
260 Store that into a memory DB so we can run different sorting strategies easily.
260 Store that into a memory DB in order we can run different sorting strategies easily.
261 261 Summing the size is a sum sql query.
262 262
263 263 Then we run a sorting strategy based on eviction policy.
264 264 We iterate over sorted keys, and remove each checking if we hit the overall limit.
265 265 """
266
266 removal_info = {
267 "removed_items": 0,
268 "removed_size": 0
269 }
267 270 policy = policy or self._eviction_policy
268 271 size_limit = size_limit or self._cache_size_limit
269 272
270 273 select_policy = EVICTION_POLICY[policy]['evict']
271 274
272 275 log.debug('Running eviction policy \'%s\', and checking for size limit: %s',
273 276 policy, format_size(size_limit))
274 277
275 278 if select_policy is None:
276 return 0
279 return removal_info
277 280
278 281 db = self.get_stats_db()
279 282
280 283 data = []
281 284 cnt = 1
282 285
283 286 for shard, key_file in self.iter_keys():
284 287 with shard.fs.open(os.path.join(shard.storage_medium, key_file), 'rb') as f:
285 288 metadata = json.loads(f.read())
286 289
287 290 key_file_path = os.path.join(shard.storage_medium, key_file)
288 291
289 292 archive_key = metadata['archive_key']
290 293 archive_path = metadata['archive_full_path']
291 294
292 295 size = metadata.get('size')
293 296 if not size:
294 297 # in case we don't have size re-calc it...
295 298 size = self._get_size(shard, archive_path)
296 299
297 300 data.append([
298 301 cnt,
299 302 key_file,
300 303 key_file_path,
301 304 archive_key,
302 305 archive_path,
303 306 metadata.get('store_time', 0),
304 307 metadata.get('access_time', 0),
305 308 metadata.get('access_count', 0),
306 309 size,
307 310 ])
308 311 cnt += 1
309 312
310 313 # Insert bulk data using executemany
311 314 db.bulk_insert(data)
312 315
313 316 total_size = db.get_total_size()
314 317 log.debug('Analyzed %s keys, occupying: %s, running eviction to match %s',
315 318 len(data), format_size(total_size), format_size(size_limit))
316 319
317 320 removed_items = 0
318 321 removed_size = 0
319 322 for key_file, archive_key, size in db.get_sorted_keys(select_policy):
320 323 # simulate removal impact BEFORE removal
321 324 total_size -= size
322 325
323 326 if total_size <= size_limit:
324 327 # we obtained what we wanted...
325 328 break
326 329
327 330 self.remove(archive_key)
328 331 removed_items += 1
329 332 removed_size += size
330
333 removal_info['removed_items'] = removed_items
334 removal_info['removed_size'] = removed_size
331 335 log.debug('Removed %s cache archives, and reduced size by: %s',
332 336 removed_items, format_size(removed_size))
333 return removed_items
337 return removal_info
334 338
335 339 def get_statistics(self):
336 340 total_files = 0
337 341 total_size = 0
338 342 meta = {}
339 343
340 344 for shard, key_file in self.iter_keys():
341 345 json_key = f"{shard.storage_medium}/{key_file}"
342 346 with shard.fs.open(json_key, 'rb') as f:
343 347 total_files += 1
344 348 metadata = json.loads(f.read())
345 349 total_size += metadata['size']
346 350
347 351 return total_files, total_size, meta
348 352
@@ -1,105 +1,105 b''
1 1 # Copyright (C) 2016-2023 RhodeCode GmbH
2 2 #
3 3 # This program is free software: you can redistribute it and/or modify
4 4 # it under the terms of the GNU Affero General Public License, version 3
5 5 # (only), as published by the Free Software Foundation.
6 6 #
7 7 # This program is distributed in the hope that it will be useful,
8 8 # but WITHOUT ANY WARRANTY; without even the implied warranty of
9 9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 10 # GNU General Public License for more details.
11 11 #
12 12 # You should have received a copy of the GNU Affero General Public License
13 13 # along with this program. If not, see <http://www.gnu.org/licenses/>.
14 14 #
15 15 # This program is dual-licensed. If you wish to learn more about the
16 16 # RhodeCode Enterprise Edition, including its added features, Support services,
17 17 # and proprietary license terms, please see https://rhodecode.com/licenses/
18 18
19 19 import time
20 20 import pytest
21 21 import rhodecode
22 22 import os
23 23 import shutil
24 24 from tempfile import mkdtemp
25 25
26 26 from rhodecode.lib import archive_cache
27 27
28 28
29 29 def file_reader(temp_store):
30 30 with open(temp_store, 'w') as f:
31 31 for cnt in range(10000):
32 32 f.write(str(cnt))
33 33 return open(temp_store, 'rb')
34 34
35 35
36 36 @pytest.fixture()
37 37 def d_cache_instance(ini_settings):
38 38 config = ini_settings
39 39 d_cache = archive_cache.get_archival_cache_store(config=config, always_init=True)
40 40 return d_cache
41 41
42 42
43 43 @pytest.mark.usefixtures('app')
44 44 class TestArchiveCaches(object):
45 45
46 46 def test_archivecache_empty_stats(self, d_cache_instance):
47 47 d_cache = d_cache_instance
48 48 shutil.rmtree(d_cache._directory)
49 49
50 50 stats = d_cache.get_statistics()
51 51 assert (0, 0, {}) == stats
52 52
53 53 def test_archivecache_store_keys(self, d_cache_instance, tmp_path):
54 54 d_cache = d_cache_instance
55 55 shutil.rmtree(d_cache._directory)
56 56
57 57 for n in range(100):
58 58
59 59 archive_name = f'my-archive-abc-{n}.zip'
60 60 temp_archive_path = os.path.join(tmp_path, archive_name)
61 61 d_cache.store(archive_name, file_reader(temp_archive_path ), {'foo': 'bar'})
62 62 reader, meta = d_cache.fetch(archive_name)
63 63 content = reader.read()
64 64 assert content == open(temp_archive_path, 'rb').read()
65 65
66 66 stats = d_cache.get_statistics()
67 67 assert (100, 3889000, {}) == stats
68 68
69 69 def test_archivecache_remove_keys(self, d_cache_instance, tmp_path):
70 70 d_cache = d_cache_instance
71 71 shutil.rmtree(d_cache._directory)
72 72
73 73 n = 1
74 74 archive_name = f'my-archive-abc-{n}.zip'
75 75 temp_archive_path = os.path.join(tmp_path, archive_name)
76 76
77 77 d_cache.store(archive_name, file_reader(temp_archive_path ), {'foo': 'bar'})
78 78 stats = d_cache.get_statistics()
79 79 assert (1, 38890, {}) == stats
80 80
81 81 assert 1 == d_cache.remove(archive_name)
82 82
83 83 stats = d_cache.get_statistics()
84 84 assert (0, 0, {}) == stats
85 85
86 86 def test_archivecache_evict_keys(self, d_cache_instance, tmp_path):
87 87 d_cache = d_cache_instance
88 88 shutil.rmtree(d_cache._directory)
89 89 tries = 500
90 90 for n in range(tries):
91 91
92 92 archive_name = f'my-archive-abc-{n}.zip'
93 93 temp_archive_path = os.path.join(tmp_path, archive_name)
94 94 d_cache.store(archive_name, file_reader(temp_archive_path ), {'foo': 'bar'})
95 95
96 96 stats = d_cache.get_statistics()
97 97 assert (tries, 19445000, {}) == stats
98 98 evict_to = 0.005 # around (5mb)
99 99 evicted_items = d_cache.evict(size_limit=d_cache.gb_to_bytes(evict_to))
100 100 evicted = 361
101 assert evicted == evicted_items
101 assert {'removed_items': evicted, 'removed_size': 14039290} == evicted_items
102 102
103 103 stats = d_cache.get_statistics()
104 104 assert (tries - evicted, 5405710, {}) == stats
105 105
@@ -1,827 +1,827 b''
1 1
2 2 ; #########################################
3 3 ; RHODECODE COMMUNITY EDITION CONFIGURATION
4 4 ; #########################################
5 5
6 6 [DEFAULT]
7 7 ; Debug flag sets all loggers to debug, and enables request tracking
8 8 debug = true
9 9
10 10 ; ########################################################################
11 11 ; EMAIL CONFIGURATION
12 12 ; These settings will be used by the RhodeCode mailing system
13 13 ; ########################################################################
14 14
15 15 ; prefix all emails subjects with given prefix, helps filtering out emails
16 16 #email_prefix = [RhodeCode]
17 17
18 18 ; email FROM address all mails will be sent
19 19 #app_email_from = rhodecode-noreply@localhost
20 20
21 21 #smtp_server = mail.server.com
22 22 #smtp_username =
23 23 #smtp_password =
24 24 #smtp_port =
25 25 #smtp_use_tls = false
26 26 #smtp_use_ssl = true
27 27
28 28 [server:main]
29 29 ; COMMON HOST/IP CONFIG, This applies mostly to develop setup,
30 30 ; Host port for gunicorn are controlled by gunicorn_conf.py
31 31 host = 127.0.0.1
32 32 port = 10020
33 33
34 34
35 35 ; ###########################
36 36 ; GUNICORN APPLICATION SERVER
37 37 ; ###########################
38 38
39 39 ; run with gunicorn --paste rhodecode.ini --config gunicorn_conf.py
40 40
41 41 ; Module to use, this setting shouldn't be changed
42 42 use = egg:gunicorn#main
43 43
44 44 ; Prefix middleware for RhodeCode.
45 45 ; recommended when using proxy setup.
46 46 ; allows to set RhodeCode under a prefix in server.
47 47 ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well.
48 48 ; And set your prefix like: `prefix = /custom_prefix`
49 49 ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need
50 50 ; to make your cookies only work on prefix url
51 51 [filter:proxy-prefix]
52 52 use = egg:PasteDeploy#prefix
53 53 prefix = /
54 54
55 55 [app:main]
56 56 ; The %(here)s variable will be replaced with the absolute path of parent directory
57 57 ; of this file
58 58 ; Each option in the app:main can be override by an environmental variable
59 59 ;
60 60 ;To override an option:
61 61 ;
62 62 ;RC_<KeyName>
63 63 ;Everything should be uppercase, . and - should be replaced by _.
64 64 ;For example, if you have these configuration settings:
65 65 ;rc_cache.repo_object.backend = foo
66 66 ;can be overridden by
67 67 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
68 68
69 69 use = egg:rhodecode-enterprise-ce
70 70
71 71 ; enable proxy prefix middleware, defined above
72 72 #filter-with = proxy-prefix
73 73
74 74 ; encryption key used to encrypt social plugin tokens,
75 75 ; remote_urls with credentials etc, if not set it defaults to
76 76 ; `beaker.session.secret`
77 77 #rhodecode.encrypted_values.secret =
78 78
79 79 ; decryption strict mode (enabled by default). It controls if decryption raises
80 80 ; `SignatureVerificationError` in case of wrong key, or damaged encryption data.
81 81 #rhodecode.encrypted_values.strict = false
82 82
83 83 ; Pick algorithm for encryption. Either fernet (more secure) or aes (default)
84 84 ; fernet is safer, and we strongly recommend switching to it.
85 85 ; Due to backward compatibility aes is used as default.
86 86 #rhodecode.encrypted_values.algorithm = fernet
87 87
88 88 ; Return gzipped responses from RhodeCode (static files/application)
89 89 gzip_responses = false
90 90
91 91 ; Auto-generate javascript routes file on startup
92 92 generate_js_files = false
93 93
94 94 ; System global default language.
95 95 ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh
96 96 lang = en
97 97
98 98 ; Perform a full repository scan and import on each server start.
99 99 ; Settings this to true could lead to very long startup time.
100 100 startup.import_repos = true
101 101
102 102 ; URL at which the application is running. This is used for Bootstrapping
103 103 ; requests in context when no web request is available. Used in ishell, or
104 104 ; SSH calls. Set this for events to receive proper url for SSH calls.
105 105 app.base_url = http://rhodecode.local
106 106
107 107 ; Host at which the Service API is running.
108 108 app.service_api.host = http://rhodecode.local:10020
109 109
110 110 ; Secret for Service API authentication.
111 111 app.service_api.token =
112 112
113 113 ; Unique application ID. Should be a random unique string for security.
114 114 app_instance_uuid = rc-production
115 115
116 116 ; Cut off limit for large diffs (size in bytes). If overall diff size on
117 117 ; commit, or pull request exceeds this limit this diff will be displayed
118 118 ; partially. E.g 512000 == 512Kb
119 119 cut_off_limit_diff = 1024000
120 120
121 121 ; Cut off limit for large files inside diffs (size in bytes). Each individual
122 122 ; file inside diff which exceeds this limit will be displayed partially.
123 123 ; E.g 128000 == 128Kb
124 124 cut_off_limit_file = 256000
125 125
126 126 ; Use cached version of vcs repositories everywhere. Recommended to be `true`
127 127 vcs_full_cache = false
128 128
129 129 ; Force https in RhodeCode, fixes https redirects, assumes it's always https.
130 130 ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache
131 131 force_https = false
132 132
133 133 ; use Strict-Transport-Security headers
134 134 use_htsts = false
135 135
136 136 ; Set to true if your repos are exposed using the dumb protocol
137 137 git_update_server_info = false
138 138
139 139 ; RSS/ATOM feed options
140 140 rss_cut_off_limit = 256000
141 141 rss_items_per_page = 10
142 142 rss_include_diff = false
143 143
144 144 ; gist URL alias, used to create nicer urls for gist. This should be an
145 145 ; url that does rewrites to _admin/gists/{gistid}.
146 146 ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal
147 147 ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid}
148 148 gist_alias_url =
149 149
150 150 ; List of views (using glob pattern syntax) that AUTH TOKENS could be
151 151 ; used for access.
152 152 ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it
153 153 ; came from the the logged in user who own this authentication token.
154 154 ; Additionally @TOKEN syntax can be used to bound the view to specific
155 155 ; authentication token. Such view would be only accessible when used together
156 156 ; with this authentication token
157 157 ; list of all views can be found under `/_admin/permissions/auth_token_access`
158 158 ; The list should be "," separated and on a single line.
159 159 ; Most common views to enable:
160 160
161 161 # RepoCommitsView:repo_commit_download
162 162 # RepoCommitsView:repo_commit_patch
163 163 # RepoCommitsView:repo_commit_raw
164 164 # RepoCommitsView:repo_commit_raw@TOKEN
165 165 # RepoFilesView:repo_files_diff
166 166 # RepoFilesView:repo_archivefile
167 167 # RepoFilesView:repo_file_raw
168 168 # GistView:*
169 169 api_access_controllers_whitelist =
170 170
171 171 ; Default encoding used to convert from and to unicode
172 172 ; can be also a comma separated list of encoding in case of mixed encodings
173 173 default_encoding = UTF-8
174 174
175 175 ; instance-id prefix
176 176 ; a prefix key for this instance used for cache invalidation when running
177 177 ; multiple instances of RhodeCode, make sure it's globally unique for
178 178 ; all running RhodeCode instances. Leave empty if you don't use it
179 179 instance_id =
180 180
181 181 ; Fallback authentication plugin. Set this to a plugin ID to force the usage
182 182 ; of an authentication plugin also if it is disabled by it's settings.
183 183 ; This could be useful if you are unable to log in to the system due to broken
184 184 ; authentication settings. Then you can enable e.g. the internal RhodeCode auth
185 185 ; module to log in again and fix the settings.
186 186 ; Available builtin plugin IDs (hash is part of the ID):
187 187 ; egg:rhodecode-enterprise-ce#rhodecode
188 188 ; egg:rhodecode-enterprise-ce#pam
189 189 ; egg:rhodecode-enterprise-ce#ldap
190 190 ; egg:rhodecode-enterprise-ce#jasig_cas
191 191 ; egg:rhodecode-enterprise-ce#headers
192 192 ; egg:rhodecode-enterprise-ce#crowd
193 193
194 194 #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode
195 195
196 196 ; Flag to control loading of legacy plugins in py:/path format
197 197 auth_plugin.import_legacy_plugins = true
198 198
199 199 ; alternative return HTTP header for failed authentication. Default HTTP
200 200 ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with
201 201 ; handling that causing a series of failed authentication calls.
202 202 ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code
203 203 ; This will be served instead of default 401 on bad authentication
204 204 auth_ret_code =
205 205
206 206 ; use special detection method when serving auth_ret_code, instead of serving
207 207 ; ret_code directly, use 401 initially (Which triggers credentials prompt)
208 208 ; and then serve auth_ret_code to clients
209 209 auth_ret_code_detection = false
210 210
211 211 ; locking return code. When repository is locked return this HTTP code. 2XX
212 212 ; codes don't break the transactions while 4XX codes do
213 213 lock_ret_code = 423
214 214
215 215 ; Filesystem location were repositories should be stored
216 216 repo_store.path = /var/opt/rhodecode_repo_store
217 217
218 218 ; allows to setup custom hooks in settings page
219 219 allow_custom_hooks_settings = true
220 220
221 221 ; Generated license token required for EE edition license.
222 222 ; New generated token value can be found in Admin > settings > license page.
223 223 license_token = abra-cada-bra1-rce3
224 224
225 225 ; This flag hides sensitive information on the license page such as token, and license data
226 226 license.hide_license_info = false
227 227
228 228 ; supervisor connection uri, for managing supervisor and logs.
229 229 supervisor.uri =
230 230
231 231 ; supervisord group name/id we only want this RC instance to handle
232 232 supervisor.group_id = dev
233 233
234 234 ; Display extended labs settings
235 235 labs_settings_active = true
236 236
237 237 ; Custom exception store path, defaults to TMPDIR
238 238 ; This is used to store exception from RhodeCode in shared directory
239 239 #exception_tracker.store_path =
240 240
241 241 ; Send email with exception details when it happens
242 242 #exception_tracker.send_email = false
243 243
244 244 ; Comma separated list of recipients for exception emails,
245 245 ; e.g admin@rhodecode.com,devops@rhodecode.com
246 246 ; Can be left empty, then emails will be sent to ALL super-admins
247 247 #exception_tracker.send_email_recipients =
248 248
249 249 ; optional prefix to Add to email Subject
250 250 #exception_tracker.email_prefix = [RHODECODE ERROR]
251 251
252 252 ; File store configuration. This is used to store and serve uploaded files
253 253 file_store.enabled = true
254 254
255 255 ; Storage backend, available options are: local
256 256 file_store.backend = local
257 257
258 258 ; path to store the uploaded binaries and artifacts
259 259 file_store.storage_path = /var/opt/rhodecode_data/file_store
260 260
261 261
262 262 ; Redis url to acquire/check generation of archives locks
263 263 archive_cache.locking.url = redis://redis:6379/1
264 264
265 265 ; Storage backend, only 'filesystem' and 'objectstore' are available now
266 266 archive_cache.backend.type = filesystem
267 267
268 268 ; url for s3 compatible storage that allows to upload artifacts
269 269 ; e.g http://minio:9000
270 270 archive_cache.objectstore.url = http://s3-minio:9000
271 271
272 272 ; key for s3 auth
273 273 archive_cache.objectstore.key = key
274 274
275 275 ; secret for s3 auth
276 276 archive_cache.objectstore.secret = secret
277 277
278 278 ; number of sharded buckets to create to distribute archives across
279 279 ; default is 8 shards
280 280 archive_cache.objectstore.bucket_shards = 8
281 281
282 282 ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time
283 283 archive_cache.objectstore.retry = false
284 284
285 285 ; number of seconds to wait for next try using retry
286 286 archive_cache.objectstore.retry_backoff = 1
287 287
288 288 ; how many tries do do a retry fetch from this backend
289 289 archive_cache.objectstore.retry_attempts = 10
290 290
291 291 ; Default is $cache_dir/archive_cache if not set
292 292 ; Generated repo archives will be cached at this location
293 293 ; and served from the cache during subsequent requests for the same archive of
294 294 ; the repository. This path is important to be shared across filesystems and with
295 295 ; RhodeCode and vcsserver
296 296 archive_cache.filesystem.store_dir = %(here)s/rc-tests/archive_cache
297 297
298 298 ; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb
299 299 archive_cache.filesystem.cache_size_gb = 2
300 300
301 301 ; Eviction policy used to clear out after cache_size_gb limit is reached
302 302 archive_cache.filesystem.eviction_policy = least-recently-stored
303 303
304 304 ; By default cache uses sharding technique, this specifies how many shards are there
305 305 ; default is 8 shards
306 306 archive_cache.filesystem.cache_shards = 8
307 307
308 308 ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time
309 309 archive_cache.filesystem.retry = false
310 310
311 311 ; number of seconds to wait for next try using retry
312 312 archive_cache.filesystem.retry_backoff = 1
313 313
314 314 ; how many tries do do a retry fetch from this backend
315 315 archive_cache.filesystem.retry_attempts = 10
316 316
317 317
318 318 ; #############
319 319 ; CELERY CONFIG
320 320 ; #############
321 321
322 322 ; manually run celery: /path/to/celery worker --task-events --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini
323 323
324 324 use_celery = false
325 325
326 326 ; path to store schedule database
327 327 #celerybeat-schedule.path =
328 328
329 329 ; connection url to the message broker (default redis)
330 330 celery.broker_url = redis://redis:6379/8
331 331
332 332 ; results backend to get results for (default redis)
333 333 celery.result_backend = redis://redis:6379/8
334 334
335 335 ; rabbitmq example
336 336 #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost
337 337
338 338 ; maximum tasks to execute before worker restart
339 339 celery.max_tasks_per_child = 20
340 340
341 341 ; tasks will never be sent to the queue, but executed locally instead.
342 342 celery.task_always_eager = true
343 343 celery.task_store_eager_result = true
344 344
345 345 ; #############
346 346 ; DOGPILE CACHE
347 347 ; #############
348 348
349 349 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
350 350 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
351 cache_dir = /var/opt/rhodecode_data
351 cache_dir = %(here)s/rc-test-data
352 352
353 353 ; *********************************************
354 354 ; `sql_cache_short` cache for heavy SQL queries
355 355 ; Only supported backend is `memory_lru`
356 356 ; *********************************************
357 357 rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru
358 358 rc_cache.sql_cache_short.expiration_time = 0
359 359
360 360
361 361 ; *****************************************************
362 362 ; `cache_repo_longterm` cache for repo object instances
363 363 ; Only supported backend is `memory_lru`
364 364 ; *****************************************************
365 365 rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru
366 366 ; by default we use 30 Days, cache is still invalidated on push
367 367 rc_cache.cache_repo_longterm.expiration_time = 2592000
368 368 ; max items in LRU cache, set to smaller number to save memory, and expire last used caches
369 369 rc_cache.cache_repo_longterm.max_size = 10000
370 370
371 371
372 372 ; *********************************************
373 373 ; `cache_general` cache for general purpose use
374 374 ; for simplicity use rc.file_namespace backend,
375 375 ; for performance and scale use rc.redis
376 376 ; *********************************************
377 377 rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace
378 378 rc_cache.cache_general.expiration_time = 43200
379 379 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
380 380 rc_cache.cache_general.arguments.filename = %(here)s/rc-tests/cache-backend/cache_general_db
381 381
382 382 ; alternative `cache_general` redis backend with distributed lock
383 383 #rc_cache.cache_general.backend = dogpile.cache.rc.redis
384 384 #rc_cache.cache_general.expiration_time = 300
385 385
386 386 ; redis_expiration_time needs to be greater then expiration_time
387 387 #rc_cache.cache_general.arguments.redis_expiration_time = 7200
388 388
389 389 #rc_cache.cache_general.arguments.host = localhost
390 390 #rc_cache.cache_general.arguments.port = 6379
391 391 #rc_cache.cache_general.arguments.db = 0
392 392 #rc_cache.cache_general.arguments.socket_timeout = 30
393 393 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
394 394 #rc_cache.cache_general.arguments.distributed_lock = true
395 395
396 396 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
397 397 #rc_cache.cache_general.arguments.lock_auto_renewal = true
398 398
399 399 ; *************************************************
400 400 ; `cache_perms` cache for permission tree, auth TTL
401 401 ; for simplicity use rc.file_namespace backend,
402 402 ; for performance and scale use rc.redis
403 403 ; *************************************************
404 404 rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace
405 405 rc_cache.cache_perms.expiration_time = 0
406 406 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
407 407 rc_cache.cache_perms.arguments.filename = %(here)s/rc-tests/cache-backend/cache_perms_db
408 408
409 409 ; alternative `cache_perms` redis backend with distributed lock
410 410 #rc_cache.cache_perms.backend = dogpile.cache.rc.redis
411 411 #rc_cache.cache_perms.expiration_time = 300
412 412
413 413 ; redis_expiration_time needs to be greater then expiration_time
414 414 #rc_cache.cache_perms.arguments.redis_expiration_time = 7200
415 415
416 416 #rc_cache.cache_perms.arguments.host = localhost
417 417 #rc_cache.cache_perms.arguments.port = 6379
418 418 #rc_cache.cache_perms.arguments.db = 0
419 419 #rc_cache.cache_perms.arguments.socket_timeout = 30
420 420 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
421 421 #rc_cache.cache_perms.arguments.distributed_lock = true
422 422
423 423 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
424 424 #rc_cache.cache_perms.arguments.lock_auto_renewal = true
425 425
426 426 ; ***************************************************
427 427 ; `cache_repo` cache for file tree, Readme, RSS FEEDS
428 428 ; for simplicity use rc.file_namespace backend,
429 429 ; for performance and scale use rc.redis
430 430 ; ***************************************************
431 431 rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace
432 432 rc_cache.cache_repo.expiration_time = 2592000
433 433 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
434 434 rc_cache.cache_repo.arguments.filename = %(here)s/rc-tests/cache-backend/cache_repo_db
435 435
436 436 ; alternative `cache_repo` redis backend with distributed lock
437 437 #rc_cache.cache_repo.backend = dogpile.cache.rc.redis
438 438 #rc_cache.cache_repo.expiration_time = 2592000
439 439
440 440 ; redis_expiration_time needs to be greater then expiration_time
441 441 #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400
442 442
443 443 #rc_cache.cache_repo.arguments.host = localhost
444 444 #rc_cache.cache_repo.arguments.port = 6379
445 445 #rc_cache.cache_repo.arguments.db = 1
446 446 #rc_cache.cache_repo.arguments.socket_timeout = 30
447 447 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
448 448 #rc_cache.cache_repo.arguments.distributed_lock = true
449 449
450 450 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
451 451 #rc_cache.cache_repo.arguments.lock_auto_renewal = true
452 452
453 453 ; ##############
454 454 ; BEAKER SESSION
455 455 ; ##############
456 456
457 457 ; beaker.session.type is type of storage options for the logged users sessions. Current allowed
458 458 ; types are file, ext:redis, ext:database, ext:memcached
459 459 ; Fastest ones are ext:redis and ext:database, DO NOT use memory type for session
460 460 beaker.session.type = file
461 461 beaker.session.data_dir = %(here)s/rc-tests/data/sessions
462 462
463 463 ; Redis based sessions
464 464 #beaker.session.type = ext:redis
465 465 #beaker.session.url = redis://redis:6379/2
466 466
467 467 ; DB based session, fast, and allows easy management over logged in users
468 468 #beaker.session.type = ext:database
469 469 #beaker.session.table_name = db_session
470 470 #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode
471 471 #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode
472 472 #beaker.session.sa.pool_recycle = 3600
473 473 #beaker.session.sa.echo = false
474 474
475 475 beaker.session.key = rhodecode
476 476 beaker.session.secret = test-rc-uytcxaz
477 477 beaker.session.lock_dir = %(here)s/rc-tests/data/sessions/lock
478 478
479 479 ; Secure encrypted cookie. Requires AES and AES python libraries
480 480 ; you must disable beaker.session.secret to use this
481 481 #beaker.session.encrypt_key = key_for_encryption
482 482 #beaker.session.validate_key = validation_key
483 483
484 484 ; Sets session as invalid (also logging out user) if it haven not been
485 485 ; accessed for given amount of time in seconds
486 486 beaker.session.timeout = 2592000
487 487 beaker.session.httponly = true
488 488
489 489 ; Path to use for the cookie. Set to prefix if you use prefix middleware
490 490 #beaker.session.cookie_path = /custom_prefix
491 491
492 492 ; Set https secure cookie
493 493 beaker.session.secure = false
494 494
495 495 ; default cookie expiration time in seconds, set to `true` to set expire
496 496 ; at browser close
497 497 #beaker.session.cookie_expires = 3600
498 498
499 499 ; #############################
500 500 ; SEARCH INDEXING CONFIGURATION
501 501 ; #############################
502 502
503 503 ; Full text search indexer is available in rhodecode-tools under
504 504 ; `rhodecode-tools index` command
505 505
506 506 ; WHOOSH Backend, doesn't require additional services to run
507 507 ; it works good with few dozen repos
508 508 search.module = rhodecode.lib.index.whoosh
509 509 search.location = %(here)s/rc-tests/data/index
510 510
511 511 ; ####################
512 512 ; CHANNELSTREAM CONFIG
513 513 ; ####################
514 514
515 515 ; channelstream enables persistent connections and live notification
516 516 ; in the system. It's also used by the chat system
517 517
518 518 channelstream.enabled = false
519 519
520 520 ; server address for channelstream server on the backend
521 521 channelstream.server = channelstream:9800
522 522
523 523 ; location of the channelstream server from outside world
524 524 ; use ws:// for http or wss:// for https. This address needs to be handled
525 525 ; by external HTTP server such as Nginx or Apache
526 526 ; see Nginx/Apache configuration examples in our docs
527 527 channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream
528 528 channelstream.secret = ENV_GENERATED
529 529 channelstream.history.location = %(here)s/rc-tests/channelstream_history
530 530
531 531 ; Internal application path that Javascript uses to connect into.
532 532 ; If you use proxy-prefix the prefix should be added before /_channelstream
533 533 channelstream.proxy_path = /_channelstream
534 534
535 535
536 536 ; ##############################
537 537 ; MAIN RHODECODE DATABASE CONFIG
538 538 ; ##############################
539 539
540 540 #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30
541 541 #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode
542 542 #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8
543 543 ; pymysql is an alternative driver for MySQL, use in case of problems with default one
544 544 #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode
545 545
546 546 sqlalchemy.db1.url = sqlite:///%(here)s/rc-tests/rhodecode_test.db?timeout=30
547 547
548 548 ; see sqlalchemy docs for other advanced settings
549 549 ; print the sql statements to output
550 550 sqlalchemy.db1.echo = false
551 551
552 552 ; recycle the connections after this amount of seconds
553 553 sqlalchemy.db1.pool_recycle = 3600
554 554
555 555 ; the number of connections to keep open inside the connection pool.
556 556 ; 0 indicates no limit
557 557 ; the general calculus with gevent is:
558 558 ; if your system allows 500 concurrent greenlets (max_connections) that all do database access,
559 559 ; then increase pool size + max overflow so that they add up to 500.
560 560 #sqlalchemy.db1.pool_size = 5
561 561
562 562 ; The number of connections to allow in connection pool "overflow", that is
563 563 ; connections that can be opened above and beyond the pool_size setting,
564 564 ; which defaults to five.
565 565 #sqlalchemy.db1.max_overflow = 10
566 566
567 567 ; Connection check ping, used to detect broken database connections
568 568 ; could be enabled to better handle cases if MySQL has gone away errors
569 569 #sqlalchemy.db1.ping_connection = true
570 570
571 571 ; ##########
572 572 ; VCS CONFIG
573 573 ; ##########
574 574 vcs.server.enable = true
575 575 vcs.server = vcsserver:10010
576 576
577 577 ; Web server connectivity protocol, responsible for web based VCS operations
578 578 ; Available protocols are:
579 579 ; `http` - use http-rpc backend (default)
580 580 vcs.server.protocol = http
581 581
582 582 ; Push/Pull operations protocol, available options are:
583 583 ; `http` - use http-rpc backend (default)
584 584 vcs.scm_app_implementation = http
585 585
586 586 ; Push/Pull operations hooks protocol, available options are:
587 587 ; `http` - use http-rpc backend (default)
588 588 ; `celery` - use celery based hooks
589 589 vcs.hooks.protocol = http
590 590
591 591 ; Host on which this instance is listening for hooks. vcsserver will call this host to pull/push hooks so it should be
592 592 ; accessible via network.
593 593 ; Use vcs.hooks.host = "*" to bind to current hostname (for Docker)
594 594 vcs.hooks.host = *
595 595
596 596 ; Start VCSServer with this instance as a subprocess, useful for development
597 597 vcs.start_server = false
598 598
599 599 ; List of enabled VCS backends, available options are:
600 600 ; `hg` - mercurial
601 601 ; `git` - git
602 602 ; `svn` - subversion
603 603 vcs.backends = hg, git, svn
604 604
605 605 ; Wait this number of seconds before killing connection to the vcsserver
606 606 vcs.connection_timeout = 3600
607 607
608 608 ; Cache flag to cache vcsserver remote calls locally
609 609 ; It uses cache_region `cache_repo`
610 610 vcs.methods.cache = false
611 611
612 612 ; ####################################################
613 613 ; Subversion proxy support (mod_dav_svn)
614 614 ; Maps RhodeCode repo groups into SVN paths for Apache
615 615 ; ####################################################
616 616
617 617 ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out.
618 618 ; Set a numeric version for your current SVN e.g 1.8, or 1.12
619 619 ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible
620 620 #vcs.svn.compatible_version = 1.8
621 621
622 622 ; Enable SVN proxy of requests over HTTP
623 623 vcs.svn.proxy.enabled = true
624 624
625 625 ; host to connect to running SVN subsystem
626 626 vcs.svn.proxy.host = http://svn:8090
627 627
628 628 ; Enable or disable the config file generation.
629 629 svn.proxy.generate_config = false
630 630
631 631 ; Generate config file with `SVNListParentPath` set to `On`.
632 632 svn.proxy.list_parent_path = true
633 633
634 634 ; Set location and file name of generated config file.
635 635 svn.proxy.config_file_path = %(here)s/rc-tests/mod_dav_svn.conf
636 636
637 637 ; alternative mod_dav config template. This needs to be a valid mako template
638 638 ; Example template can be found in the source code:
639 639 ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako
640 640 #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako
641 641
642 642 ; Used as a prefix to the `Location` block in the generated config file.
643 643 ; In most cases it should be set to `/`.
644 644 svn.proxy.location_root = /
645 645
646 646 ; Command to reload the mod dav svn configuration on change.
647 647 ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh
648 648 ; Make sure user who runs RhodeCode process is allowed to reload Apache
649 649 #svn.proxy.reload_cmd = /etc/init.d/apache2 reload
650 650
651 651 ; If the timeout expires before the reload command finishes, the command will
652 652 ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds.
653 653 #svn.proxy.reload_timeout = 10
654 654
655 655 ; ####################
656 656 ; SSH Support Settings
657 657 ; ####################
658 658
659 659 ; Defines if a custom authorized_keys file should be created and written on
660 660 ; any change user ssh keys. Setting this to false also disables possibility
661 661 ; of adding SSH keys by users from web interface. Super admins can still
662 662 ; manage SSH Keys.
663 663 ssh.generate_authorized_keyfile = true
664 664
665 665 ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding`
666 666 # ssh.authorized_keys_ssh_opts =
667 667
668 668 ; Path to the authorized_keys file where the generate entries are placed.
669 669 ; It is possible to have multiple key files specified in `sshd_config` e.g.
670 670 ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode
671 671 ssh.authorized_keys_file_path = %(here)s/rc-tests/authorized_keys_rhodecode
672 672
673 673 ; Command to execute the SSH wrapper. The binary is available in the
674 674 ; RhodeCode installation directory.
675 675 ; legacy: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper
676 676 ; new rewrite: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2
677 677 ssh.wrapper_cmd = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper
678 678
679 679 ; Allow shell when executing the ssh-wrapper command
680 680 ssh.wrapper_cmd_allow_shell = false
681 681
682 682 ; Enables logging, and detailed output send back to the client during SSH
683 683 ; operations. Useful for debugging, shouldn't be used in production.
684 ssh.enable_debug_logging = false
684 ssh.enable_debug_logging = true
685 685
686 686 ; Paths to binary executable, by default they are the names, but we can
687 687 ; override them if we want to use a custom one
688 688 ssh.executable.hg = /usr/local/bin/rhodecode_bin/vcs_bin/hg
689 689 ssh.executable.git = /usr/local/bin/rhodecode_bin/vcs_bin/git
690 690 ssh.executable.svn = /usr/local/bin/rhodecode_bin/vcs_bin/svnserve
691 691
692 692 ; Enables SSH key generator web interface. Disabling this still allows users
693 693 ; to add their own keys.
694 694 ssh.enable_ui_key_generator = true
695 695
696 696 ; Statsd client config, this is used to send metrics to statsd
697 697 ; We recommend setting statsd_exported and scrape them using Prometheus
698 698 #statsd.enabled = false
699 699 #statsd.statsd_host = 0.0.0.0
700 700 #statsd.statsd_port = 8125
701 701 #statsd.statsd_prefix =
702 702 #statsd.statsd_ipv6 = false
703 703
704 704 ; configure logging automatically at server startup set to false
705 705 ; to use the below custom logging config.
706 706 ; RC_LOGGING_FORMATTER
707 707 ; RC_LOGGING_LEVEL
708 708 ; env variables can control the settings for logging in case of autoconfigure
709 709
710 710 logging.autoconfigure = false
711 711
712 712 ; specify your own custom logging config file to configure logging
713 713 #logging.logging_conf_file = /path/to/custom_logging.ini
714 714
715 715 ; Dummy marker to add new entries after.
716 716 ; Add any custom entries below. Please don't remove this marker.
717 717 custom.conf = 1
718 718
719 719
720 720 ; #####################
721 721 ; LOGGING CONFIGURATION
722 722 ; #####################
723 723
724 724 [loggers]
725 725 keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper, dogpile
726 726
727 727 [handlers]
728 728 keys = console, console_sql
729 729
730 730 [formatters]
731 731 keys = generic, json, color_formatter, color_formatter_sql
732 732
733 733 ; #######
734 734 ; LOGGERS
735 735 ; #######
736 736 [logger_root]
737 737 level = NOTSET
738 738 handlers = console
739 739
740 740 [logger_routes]
741 741 level = DEBUG
742 742 handlers =
743 743 qualname = routes.middleware
744 744 ## "level = DEBUG" logs the route matched and routing variables.
745 745 propagate = 1
746 746
747 747 [logger_sqlalchemy]
748 748 level = INFO
749 749 handlers = console_sql
750 750 qualname = sqlalchemy.engine
751 751 propagate = 0
752 752
753 753 [logger_beaker]
754 754 level = DEBUG
755 755 handlers =
756 756 qualname = beaker.container
757 757 propagate = 1
758 758
759 759 [logger_dogpile]
760 760 level = INFO
761 761 handlers = console
762 762 qualname = dogpile
763 763 propagate = 1
764 764
765 765 [logger_rhodecode]
766 766 level = DEBUG
767 767 handlers =
768 768 qualname = rhodecode
769 769 propagate = 1
770 770
771 771 [logger_ssh_wrapper]
772 772 level = DEBUG
773 773 handlers =
774 774 qualname = ssh_wrapper
775 775 propagate = 1
776 776
777 777 [logger_celery]
778 778 level = DEBUG
779 779 handlers =
780 780 qualname = celery
781 781
782 782
783 783 ; ########
784 784 ; HANDLERS
785 785 ; ########
786 786
787 787 [handler_console]
788 788 class = StreamHandler
789 789 args = (sys.stderr, )
790 790 level = DEBUG
791 791 ; To enable JSON formatted logs replace 'generic/color_formatter' with 'json'
792 792 ; This allows sending properly formatted logs to grafana loki or elasticsearch
793 793 formatter = generic
794 794
795 795 [handler_console_sql]
796 796 ; "level = DEBUG" logs SQL queries and results.
797 797 ; "level = INFO" logs SQL queries.
798 798 ; "level = WARN" logs neither. (Recommended for production systems.)
799 799 class = StreamHandler
800 800 args = (sys.stderr, )
801 801 level = WARN
802 802 ; To enable JSON formatted logs replace 'generic/color_formatter_sql' with 'json'
803 803 ; This allows sending properly formatted logs to grafana loki or elasticsearch
804 804 formatter = generic
805 805
806 806 ; ##########
807 807 ; FORMATTERS
808 808 ; ##########
809 809
810 810 [formatter_generic]
811 811 class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter
812 812 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
813 813 datefmt = %Y-%m-%d %H:%M:%S
814 814
815 815 [formatter_color_formatter]
816 816 class = rhodecode.lib.logging_formatter.ColorFormatter
817 817 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
818 818 datefmt = %Y-%m-%d %H:%M:%S
819 819
820 820 [formatter_color_formatter_sql]
821 821 class = rhodecode.lib.logging_formatter.ColorFormatterSql
822 822 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
823 823 datefmt = %Y-%m-%d %H:%M:%S
824 824
825 825 [formatter_json]
826 826 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
827 827 class = rhodecode.lib._vendor.jsonlogger.JsonFormatter
General Comments 0
You need to be logged in to leave comments. Login now