##// END OF EJS Templates
feat(archive-cache): added extra info on number of evicted caches
super-admin -
r5434:6da054fb default
parent child Browse files
Show More
@@ -1,348 +1,352 b''
1 # Copyright (C) 2015-2024 RhodeCode GmbH
1 # Copyright (C) 2015-2024 RhodeCode GmbH
2 #
2 #
3 # This program is free software: you can redistribute it and/or modify
3 # This program is free software: you can redistribute it and/or modify
4 # it under the terms of the GNU Affero General Public License, version 3
4 # it under the terms of the GNU Affero General Public License, version 3
5 # (only), as published by the Free Software Foundation.
5 # (only), as published by the Free Software Foundation.
6 #
6 #
7 # This program is distributed in the hope that it will be useful,
7 # This program is distributed in the hope that it will be useful,
8 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # but WITHOUT ANY WARRANTY; without even the implied warranty of
9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 # GNU General Public License for more details.
10 # GNU General Public License for more details.
11 #
11 #
12 # You should have received a copy of the GNU Affero General Public License
12 # You should have received a copy of the GNU Affero General Public License
13 # along with this program. If not, see <http://www.gnu.org/licenses/>.
13 # along with this program. If not, see <http://www.gnu.org/licenses/>.
14 #
14 #
15 # This program is dual-licensed. If you wish to learn more about the
15 # This program is dual-licensed. If you wish to learn more about the
16 # RhodeCode Enterprise Edition, including its added features, Support services,
16 # RhodeCode Enterprise Edition, including its added features, Support services,
17 # and proprietary license terms, please see https://rhodecode.com/licenses/
17 # and proprietary license terms, please see https://rhodecode.com/licenses/
18
18
19 import os
19 import os
20 import functools
20 import functools
21 import logging
21 import logging
22 import typing
22 import typing
23 import time
23 import time
24 import zlib
24 import zlib
25
25
26 from ...ext_json import json
26 from ...ext_json import json
27 from ..utils import StatsDB, NOT_GIVEN, ShardFileReader, EVICTION_POLICY, format_size
27 from ..utils import StatsDB, NOT_GIVEN, ShardFileReader, EVICTION_POLICY, format_size
28 from ..lock import GenerationLock
28 from ..lock import GenerationLock
29
29
30 log = logging.getLogger(__name__)
30 log = logging.getLogger(__name__)
31
31
32
32
33 class BaseShard:
33 class BaseShard:
34 storage_type: str = ''
34 storage_type: str = ''
35 fs = None
35 fs = None
36
36
37 @classmethod
37 @classmethod
38 def hash(cls, key):
38 def hash(cls, key):
39 """Compute portable hash for `key`.
39 """Compute portable hash for `key`.
40
40
41 :param key: key to hash
41 :param key: key to hash
42 :return: hash value
42 :return: hash value
43
43
44 """
44 """
45 mask = 0xFFFFFFFF
45 mask = 0xFFFFFFFF
46 return zlib.adler32(key.encode('utf-8')) & mask # noqa
46 return zlib.adler32(key.encode('utf-8')) & mask # noqa
47
47
48 def _write_file(self, full_path, read_iterator, mode):
48 def _write_file(self, full_path, read_iterator, mode):
49 raise NotImplementedError
49 raise NotImplementedError
50
50
51 def _get_keyfile(self, key):
51 def _get_keyfile(self, key):
52 raise NotImplementedError
52 raise NotImplementedError
53
53
54 def random_filename(self):
54 def random_filename(self):
55 raise NotImplementedError
55 raise NotImplementedError
56
56
57 def _store(self, key, value_reader, metadata, mode):
57 def _store(self, key, value_reader, metadata, mode):
58 (filename, # hash-name
58 (filename, # hash-name
59 full_path # full-path/hash-name
59 full_path # full-path/hash-name
60 ) = self.random_filename()
60 ) = self.random_filename()
61
61
62 key_file, key_file_path = self._get_keyfile(key)
62 key_file, key_file_path = self._get_keyfile(key)
63
63
64 # STORE METADATA
64 # STORE METADATA
65 _metadata = {
65 _metadata = {
66 "version": "v1",
66 "version": "v1",
67
67
68 "key_file": key_file, # this is the .key.json file storing meta
68 "key_file": key_file, # this is the .key.json file storing meta
69 "key_file_path": key_file_path, # full path to key_file
69 "key_file_path": key_file_path, # full path to key_file
70 "archive_key": key, # original name we stored archive under, e.g my-archive.zip
70 "archive_key": key, # original name we stored archive under, e.g my-archive.zip
71 "archive_filename": filename, # the actual filename we stored that file under
71 "archive_filename": filename, # the actual filename we stored that file under
72 "archive_full_path": full_path,
72 "archive_full_path": full_path,
73
73
74 "store_time": time.time(),
74 "store_time": time.time(),
75 "access_count": 0,
75 "access_count": 0,
76 "access_time": 0,
76 "access_time": 0,
77
77
78 "size": 0
78 "size": 0
79 }
79 }
80 if metadata:
80 if metadata:
81 _metadata.update(metadata)
81 _metadata.update(metadata)
82
82
83 read_iterator = iter(functools.partial(value_reader.read, 2**22), b'')
83 read_iterator = iter(functools.partial(value_reader.read, 2**22), b'')
84 size, sha256 = self._write_file(full_path, read_iterator, mode)
84 size, sha256 = self._write_file(full_path, read_iterator, mode)
85 _metadata['size'] = size
85 _metadata['size'] = size
86 _metadata['sha256'] = sha256
86 _metadata['sha256'] = sha256
87
87
88 # after archive is finished, we create a key to save the presence of the binary file
88 # after archive is finished, we create a key to save the presence of the binary file
89 with self.fs.open(key_file_path, 'wb') as f:
89 with self.fs.open(key_file_path, 'wb') as f:
90 f.write(json.dumps(_metadata))
90 f.write(json.dumps(_metadata))
91
91
92 return key, filename, size, _metadata
92 return key, filename, size, _metadata
93
93
94 def _fetch(self, key, retry, retry_attempts, retry_backoff):
94 def _fetch(self, key, retry, retry_attempts, retry_backoff):
95 if retry is NOT_GIVEN:
95 if retry is NOT_GIVEN:
96 retry = False
96 retry = False
97 if retry_attempts is NOT_GIVEN:
97 if retry_attempts is NOT_GIVEN:
98 retry_attempts = 0
98 retry_attempts = 0
99
99
100 if retry and retry_attempts > 0:
100 if retry and retry_attempts > 0:
101 for attempt in range(1, retry_attempts + 1):
101 for attempt in range(1, retry_attempts + 1):
102 if key in self:
102 if key in self:
103 break
103 break
104 # we didn't find the key, wait retry_backoff N seconds, and re-check
104 # we didn't find the key, wait retry_backoff N seconds, and re-check
105 time.sleep(retry_backoff)
105 time.sleep(retry_backoff)
106
106
107 if key not in self:
107 if key not in self:
108 log.exception(f'requested key={key} not found in {self} retry={retry}, attempts={retry_attempts}')
108 log.exception(f'requested key={key} not found in {self} retry={retry}, attempts={retry_attempts}')
109 raise KeyError(key)
109 raise KeyError(key)
110
110
111 key_file, key_file_path = self._get_keyfile(key)
111 key_file, key_file_path = self._get_keyfile(key)
112 with self.fs.open(key_file_path, 'rb') as f:
112 with self.fs.open(key_file_path, 'rb') as f:
113 metadata = json.loads(f.read())
113 metadata = json.loads(f.read())
114
114
115 archive_path = metadata['archive_full_path']
115 archive_path = metadata['archive_full_path']
116
116
117 try:
117 try:
118 return ShardFileReader(self.fs.open(archive_path, 'rb')), metadata
118 return ShardFileReader(self.fs.open(archive_path, 'rb')), metadata
119 finally:
119 finally:
120 # update usage stats, count and accessed
120 # update usage stats, count and accessed
121 metadata["access_count"] = metadata.get("access_count", 0) + 1
121 metadata["access_count"] = metadata.get("access_count", 0) + 1
122 metadata["access_time"] = time.time()
122 metadata["access_time"] = time.time()
123 log.debug('Updated %s with access snapshot, access_count=%s access_time=%s',
123 log.debug('Updated %s with access snapshot, access_count=%s access_time=%s',
124 key_file, metadata['access_count'], metadata['access_time'])
124 key_file, metadata['access_count'], metadata['access_time'])
125 with self.fs.open(key_file_path, 'wb') as f:
125 with self.fs.open(key_file_path, 'wb') as f:
126 f.write(json.dumps(metadata))
126 f.write(json.dumps(metadata))
127
127
128 def _remove(self, key):
128 def _remove(self, key):
129 if key not in self:
129 if key not in self:
130 log.exception(f'requested key={key} not found in {self}')
130 log.exception(f'requested key={key} not found in {self}')
131 raise KeyError(key)
131 raise KeyError(key)
132
132
133 key_file, key_file_path = self._get_keyfile(key)
133 key_file, key_file_path = self._get_keyfile(key)
134 with self.fs.open(key_file_path, 'rb') as f:
134 with self.fs.open(key_file_path, 'rb') as f:
135 metadata = json.loads(f.read())
135 metadata = json.loads(f.read())
136
136
137 archive_path = metadata['archive_full_path']
137 archive_path = metadata['archive_full_path']
138 self.fs.rm(archive_path)
138 self.fs.rm(archive_path)
139 self.fs.rm(key_file_path)
139 self.fs.rm(key_file_path)
140 return 1
140 return 1
141
141
142 @property
142 @property
143 def storage_medium(self):
143 def storage_medium(self):
144 return getattr(self, self.storage_type)
144 return getattr(self, self.storage_type)
145
145
146 @property
146 @property
147 def key_suffix(self):
147 def key_suffix(self):
148 return 'key.json'
148 return 'key.json'
149
149
150 def __contains__(self, key):
150 def __contains__(self, key):
151 """Return `True` if `key` matching item is found in cache.
151 """Return `True` if `key` matching item is found in cache.
152
152
153 :param key: key matching item
153 :param key: key matching item
154 :return: True if key matching item
154 :return: True if key matching item
155
155
156 """
156 """
157 key_file, key_file_path = self._get_keyfile(key)
157 key_file, key_file_path = self._get_keyfile(key)
158 return self.fs.exists(key_file_path)
158 return self.fs.exists(key_file_path)
159
159
160
160
161 class BaseCache:
161 class BaseCache:
162 _locking_url: str = ''
162 _locking_url: str = ''
163 _storage_path: str = ''
163 _storage_path: str = ''
164 _config = {}
164 _config = {}
165 retry = False
165 retry = False
166 retry_attempts = 0
166 retry_attempts = 0
167 retry_backoff = 1
167 retry_backoff = 1
168 _shards = tuple()
168 _shards = tuple()
169
169
170 def __contains__(self, key):
170 def __contains__(self, key):
171 """Return `True` if `key` matching item is found in cache.
171 """Return `True` if `key` matching item is found in cache.
172
172
173 :param key: key matching item
173 :param key: key matching item
174 :return: True if key matching item
174 :return: True if key matching item
175
175
176 """
176 """
177 return self.has_key(key)
177 return self.has_key(key)
178
178
179 def __repr__(self):
179 def __repr__(self):
180 return f'<{self.__class__.__name__}(storage={self._storage_path})>'
180 return f'<{self.__class__.__name__}(storage={self._storage_path})>'
181
181
182 @classmethod
182 @classmethod
183 def gb_to_bytes(cls, gb):
183 def gb_to_bytes(cls, gb):
184 return gb * (1024 ** 3)
184 return gb * (1024 ** 3)
185
185
186 @property
186 @property
187 def storage_path(self):
187 def storage_path(self):
188 return self._storage_path
188 return self._storage_path
189
189
190 @classmethod
190 @classmethod
191 def get_stats_db(cls):
191 def get_stats_db(cls):
192 return StatsDB()
192 return StatsDB()
193
193
194 def get_conf(self, key, pop=False):
194 def get_conf(self, key, pop=False):
195 if key not in self._config:
195 if key not in self._config:
196 raise ValueError(f"No configuration key '{key}', please make sure it exists in archive_cache config")
196 raise ValueError(f"No configuration key '{key}', please make sure it exists in archive_cache config")
197 val = self._config[key]
197 val = self._config[key]
198 if pop:
198 if pop:
199 del self._config[key]
199 del self._config[key]
200 return val
200 return val
201
201
202 def _get_shard(self, key):
202 def _get_shard(self, key):
203 raise NotImplementedError
203 raise NotImplementedError
204
204
205 def _get_size(self, shard, archive_path):
205 def _get_size(self, shard, archive_path):
206 raise NotImplementedError
206 raise NotImplementedError
207
207
208 def store(self, key, value_reader, metadata=None):
208 def store(self, key, value_reader, metadata=None):
209 shard = self._get_shard(key)
209 shard = self._get_shard(key)
210 return shard.store(key, value_reader, metadata)
210 return shard.store(key, value_reader, metadata)
211
211
212 def fetch(self, key, retry=NOT_GIVEN, retry_attempts=NOT_GIVEN) -> tuple[typing.BinaryIO, dict]:
212 def fetch(self, key, retry=NOT_GIVEN, retry_attempts=NOT_GIVEN) -> tuple[typing.BinaryIO, dict]:
213 """
213 """
214 Return file handle corresponding to `key` from specific shard cache.
214 Return file handle corresponding to `key` from specific shard cache.
215 """
215 """
216 if retry is NOT_GIVEN:
216 if retry is NOT_GIVEN:
217 retry = self.retry
217 retry = self.retry
218 if retry_attempts is NOT_GIVEN:
218 if retry_attempts is NOT_GIVEN:
219 retry_attempts = self.retry_attempts
219 retry_attempts = self.retry_attempts
220 retry_backoff = self.retry_backoff
220 retry_backoff = self.retry_backoff
221
221
222 shard = self._get_shard(key)
222 shard = self._get_shard(key)
223 return shard.fetch(key, retry=retry, retry_attempts=retry_attempts, retry_backoff=retry_backoff)
223 return shard.fetch(key, retry=retry, retry_attempts=retry_attempts, retry_backoff=retry_backoff)
224
224
225 def remove(self, key):
225 def remove(self, key):
226 shard = self._get_shard(key)
226 shard = self._get_shard(key)
227 return shard.remove(key)
227 return shard.remove(key)
228
228
229 def has_key(self, archive_key):
229 def has_key(self, archive_key):
230 """Return `True` if `key` matching item is found in cache.
230 """Return `True` if `key` matching item is found in cache.
231
231
232 :param archive_key: key for item, this is a unique archive name we want to store data under. e.g my-archive-svn.zip
232 :param archive_key: key for item, this is a unique archive name we want to store data under. e.g my-archive-svn.zip
233 :return: True if key is found
233 :return: True if key is found
234
234
235 """
235 """
236 shard = self._get_shard(archive_key)
236 shard = self._get_shard(archive_key)
237 return archive_key in shard
237 return archive_key in shard
238
238
239 def iter_keys(self):
239 def iter_keys(self):
240 for shard in self._shards:
240 for shard in self._shards:
241 if shard.fs.exists(shard.storage_medium):
241 if shard.fs.exists(shard.storage_medium):
242 for path, _dirs, _files in shard.fs.walk(shard.storage_medium):
242 for path, _dirs, _files in shard.fs.walk(shard.storage_medium):
243 for key_file_path in _files:
243 for key_file_path in _files:
244 if key_file_path.endswith(shard.key_suffix):
244 if key_file_path.endswith(shard.key_suffix):
245 yield shard, key_file_path
245 yield shard, key_file_path
246
246
247 def get_lock(self, lock_key):
247 def get_lock(self, lock_key):
248 return GenerationLock(lock_key, self._locking_url)
248 return GenerationLock(lock_key, self._locking_url)
249
249
250 def evict(self, policy=None, size_limit=None) -> int:
250 def evict(self, policy=None, size_limit=None) -> dict:
251 """
251 """
252 Remove old items based on the conditions
252 Remove old items based on the conditions
253
253
254
254
255 explanation of this algo:
255 explanation of this algo:
256 iterate over each shard, then for each shard iterate over the .key files
256 iterate over each shard, then for each shard iterate over the .key files
257 read the key files metadata stored. This gives us a full list of keys, cached_archived, their size and
257 read the key files metadata stored. This gives us a full list of keys, cached_archived, their size and
258 access data, time creation, and access counts.
258 access data, time creation, and access counts.
259
259
260 Store that into a memory DB so we can run different sorting strategies easily.
260 Store that into a memory DB in order we can run different sorting strategies easily.
261 Summing the size is a sum sql query.
261 Summing the size is a sum sql query.
262
262
263 Then we run a sorting strategy based on eviction policy.
263 Then we run a sorting strategy based on eviction policy.
264 We iterate over sorted keys, and remove each checking if we hit the overall limit.
264 We iterate over sorted keys, and remove each checking if we hit the overall limit.
265 """
265 """
266
266 removal_info = {
267 "removed_items": 0,
268 "removed_size": 0
269 }
267 policy = policy or self._eviction_policy
270 policy = policy or self._eviction_policy
268 size_limit = size_limit or self._cache_size_limit
271 size_limit = size_limit or self._cache_size_limit
269
272
270 select_policy = EVICTION_POLICY[policy]['evict']
273 select_policy = EVICTION_POLICY[policy]['evict']
271
274
272 log.debug('Running eviction policy \'%s\', and checking for size limit: %s',
275 log.debug('Running eviction policy \'%s\', and checking for size limit: %s',
273 policy, format_size(size_limit))
276 policy, format_size(size_limit))
274
277
275 if select_policy is None:
278 if select_policy is None:
276 return 0
279 return removal_info
277
280
278 db = self.get_stats_db()
281 db = self.get_stats_db()
279
282
280 data = []
283 data = []
281 cnt = 1
284 cnt = 1
282
285
283 for shard, key_file in self.iter_keys():
286 for shard, key_file in self.iter_keys():
284 with shard.fs.open(os.path.join(shard.storage_medium, key_file), 'rb') as f:
287 with shard.fs.open(os.path.join(shard.storage_medium, key_file), 'rb') as f:
285 metadata = json.loads(f.read())
288 metadata = json.loads(f.read())
286
289
287 key_file_path = os.path.join(shard.storage_medium, key_file)
290 key_file_path = os.path.join(shard.storage_medium, key_file)
288
291
289 archive_key = metadata['archive_key']
292 archive_key = metadata['archive_key']
290 archive_path = metadata['archive_full_path']
293 archive_path = metadata['archive_full_path']
291
294
292 size = metadata.get('size')
295 size = metadata.get('size')
293 if not size:
296 if not size:
294 # in case we don't have size re-calc it...
297 # in case we don't have size re-calc it...
295 size = self._get_size(shard, archive_path)
298 size = self._get_size(shard, archive_path)
296
299
297 data.append([
300 data.append([
298 cnt,
301 cnt,
299 key_file,
302 key_file,
300 key_file_path,
303 key_file_path,
301 archive_key,
304 archive_key,
302 archive_path,
305 archive_path,
303 metadata.get('store_time', 0),
306 metadata.get('store_time', 0),
304 metadata.get('access_time', 0),
307 metadata.get('access_time', 0),
305 metadata.get('access_count', 0),
308 metadata.get('access_count', 0),
306 size,
309 size,
307 ])
310 ])
308 cnt += 1
311 cnt += 1
309
312
310 # Insert bulk data using executemany
313 # Insert bulk data using executemany
311 db.bulk_insert(data)
314 db.bulk_insert(data)
312
315
313 total_size = db.get_total_size()
316 total_size = db.get_total_size()
314 log.debug('Analyzed %s keys, occupying: %s, running eviction to match %s',
317 log.debug('Analyzed %s keys, occupying: %s, running eviction to match %s',
315 len(data), format_size(total_size), format_size(size_limit))
318 len(data), format_size(total_size), format_size(size_limit))
316
319
317 removed_items = 0
320 removed_items = 0
318 removed_size = 0
321 removed_size = 0
319 for key_file, archive_key, size in db.get_sorted_keys(select_policy):
322 for key_file, archive_key, size in db.get_sorted_keys(select_policy):
320 # simulate removal impact BEFORE removal
323 # simulate removal impact BEFORE removal
321 total_size -= size
324 total_size -= size
322
325
323 if total_size <= size_limit:
326 if total_size <= size_limit:
324 # we obtained what we wanted...
327 # we obtained what we wanted...
325 break
328 break
326
329
327 self.remove(archive_key)
330 self.remove(archive_key)
328 removed_items += 1
331 removed_items += 1
329 removed_size += size
332 removed_size += size
330
333 removal_info['removed_items'] = removed_items
334 removal_info['removed_size'] = removed_size
331 log.debug('Removed %s cache archives, and reduced size by: %s',
335 log.debug('Removed %s cache archives, and reduced size by: %s',
332 removed_items, format_size(removed_size))
336 removed_items, format_size(removed_size))
333 return removed_items
337 return removal_info
334
338
335 def get_statistics(self):
339 def get_statistics(self):
336 total_files = 0
340 total_files = 0
337 total_size = 0
341 total_size = 0
338 meta = {}
342 meta = {}
339
343
340 for shard, key_file in self.iter_keys():
344 for shard, key_file in self.iter_keys():
341 json_key = f"{shard.storage_medium}/{key_file}"
345 json_key = f"{shard.storage_medium}/{key_file}"
342 with shard.fs.open(json_key, 'rb') as f:
346 with shard.fs.open(json_key, 'rb') as f:
343 total_files += 1
347 total_files += 1
344 metadata = json.loads(f.read())
348 metadata = json.loads(f.read())
345 total_size += metadata['size']
349 total_size += metadata['size']
346
350
347 return total_files, total_size, meta
351 return total_files, total_size, meta
348
352
@@ -1,105 +1,105 b''
1 # Copyright (C) 2016-2023 RhodeCode GmbH
1 # Copyright (C) 2016-2023 RhodeCode GmbH
2 #
2 #
3 # This program is free software: you can redistribute it and/or modify
3 # This program is free software: you can redistribute it and/or modify
4 # it under the terms of the GNU Affero General Public License, version 3
4 # it under the terms of the GNU Affero General Public License, version 3
5 # (only), as published by the Free Software Foundation.
5 # (only), as published by the Free Software Foundation.
6 #
6 #
7 # This program is distributed in the hope that it will be useful,
7 # This program is distributed in the hope that it will be useful,
8 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # but WITHOUT ANY WARRANTY; without even the implied warranty of
9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 # GNU General Public License for more details.
10 # GNU General Public License for more details.
11 #
11 #
12 # You should have received a copy of the GNU Affero General Public License
12 # You should have received a copy of the GNU Affero General Public License
13 # along with this program. If not, see <http://www.gnu.org/licenses/>.
13 # along with this program. If not, see <http://www.gnu.org/licenses/>.
14 #
14 #
15 # This program is dual-licensed. If you wish to learn more about the
15 # This program is dual-licensed. If you wish to learn more about the
16 # RhodeCode Enterprise Edition, including its added features, Support services,
16 # RhodeCode Enterprise Edition, including its added features, Support services,
17 # and proprietary license terms, please see https://rhodecode.com/licenses/
17 # and proprietary license terms, please see https://rhodecode.com/licenses/
18
18
19 import time
19 import time
20 import pytest
20 import pytest
21 import rhodecode
21 import rhodecode
22 import os
22 import os
23 import shutil
23 import shutil
24 from tempfile import mkdtemp
24 from tempfile import mkdtemp
25
25
26 from rhodecode.lib import archive_cache
26 from rhodecode.lib import archive_cache
27
27
28
28
29 def file_reader(temp_store):
29 def file_reader(temp_store):
30 with open(temp_store, 'w') as f:
30 with open(temp_store, 'w') as f:
31 for cnt in range(10000):
31 for cnt in range(10000):
32 f.write(str(cnt))
32 f.write(str(cnt))
33 return open(temp_store, 'rb')
33 return open(temp_store, 'rb')
34
34
35
35
36 @pytest.fixture()
36 @pytest.fixture()
37 def d_cache_instance(ini_settings):
37 def d_cache_instance(ini_settings):
38 config = ini_settings
38 config = ini_settings
39 d_cache = archive_cache.get_archival_cache_store(config=config, always_init=True)
39 d_cache = archive_cache.get_archival_cache_store(config=config, always_init=True)
40 return d_cache
40 return d_cache
41
41
42
42
43 @pytest.mark.usefixtures('app')
43 @pytest.mark.usefixtures('app')
44 class TestArchiveCaches(object):
44 class TestArchiveCaches(object):
45
45
46 def test_archivecache_empty_stats(self, d_cache_instance):
46 def test_archivecache_empty_stats(self, d_cache_instance):
47 d_cache = d_cache_instance
47 d_cache = d_cache_instance
48 shutil.rmtree(d_cache._directory)
48 shutil.rmtree(d_cache._directory)
49
49
50 stats = d_cache.get_statistics()
50 stats = d_cache.get_statistics()
51 assert (0, 0, {}) == stats
51 assert (0, 0, {}) == stats
52
52
53 def test_archivecache_store_keys(self, d_cache_instance, tmp_path):
53 def test_archivecache_store_keys(self, d_cache_instance, tmp_path):
54 d_cache = d_cache_instance
54 d_cache = d_cache_instance
55 shutil.rmtree(d_cache._directory)
55 shutil.rmtree(d_cache._directory)
56
56
57 for n in range(100):
57 for n in range(100):
58
58
59 archive_name = f'my-archive-abc-{n}.zip'
59 archive_name = f'my-archive-abc-{n}.zip'
60 temp_archive_path = os.path.join(tmp_path, archive_name)
60 temp_archive_path = os.path.join(tmp_path, archive_name)
61 d_cache.store(archive_name, file_reader(temp_archive_path ), {'foo': 'bar'})
61 d_cache.store(archive_name, file_reader(temp_archive_path ), {'foo': 'bar'})
62 reader, meta = d_cache.fetch(archive_name)
62 reader, meta = d_cache.fetch(archive_name)
63 content = reader.read()
63 content = reader.read()
64 assert content == open(temp_archive_path, 'rb').read()
64 assert content == open(temp_archive_path, 'rb').read()
65
65
66 stats = d_cache.get_statistics()
66 stats = d_cache.get_statistics()
67 assert (100, 3889000, {}) == stats
67 assert (100, 3889000, {}) == stats
68
68
69 def test_archivecache_remove_keys(self, d_cache_instance, tmp_path):
69 def test_archivecache_remove_keys(self, d_cache_instance, tmp_path):
70 d_cache = d_cache_instance
70 d_cache = d_cache_instance
71 shutil.rmtree(d_cache._directory)
71 shutil.rmtree(d_cache._directory)
72
72
73 n = 1
73 n = 1
74 archive_name = f'my-archive-abc-{n}.zip'
74 archive_name = f'my-archive-abc-{n}.zip'
75 temp_archive_path = os.path.join(tmp_path, archive_name)
75 temp_archive_path = os.path.join(tmp_path, archive_name)
76
76
77 d_cache.store(archive_name, file_reader(temp_archive_path ), {'foo': 'bar'})
77 d_cache.store(archive_name, file_reader(temp_archive_path ), {'foo': 'bar'})
78 stats = d_cache.get_statistics()
78 stats = d_cache.get_statistics()
79 assert (1, 38890, {}) == stats
79 assert (1, 38890, {}) == stats
80
80
81 assert 1 == d_cache.remove(archive_name)
81 assert 1 == d_cache.remove(archive_name)
82
82
83 stats = d_cache.get_statistics()
83 stats = d_cache.get_statistics()
84 assert (0, 0, {}) == stats
84 assert (0, 0, {}) == stats
85
85
86 def test_archivecache_evict_keys(self, d_cache_instance, tmp_path):
86 def test_archivecache_evict_keys(self, d_cache_instance, tmp_path):
87 d_cache = d_cache_instance
87 d_cache = d_cache_instance
88 shutil.rmtree(d_cache._directory)
88 shutil.rmtree(d_cache._directory)
89 tries = 500
89 tries = 500
90 for n in range(tries):
90 for n in range(tries):
91
91
92 archive_name = f'my-archive-abc-{n}.zip'
92 archive_name = f'my-archive-abc-{n}.zip'
93 temp_archive_path = os.path.join(tmp_path, archive_name)
93 temp_archive_path = os.path.join(tmp_path, archive_name)
94 d_cache.store(archive_name, file_reader(temp_archive_path ), {'foo': 'bar'})
94 d_cache.store(archive_name, file_reader(temp_archive_path ), {'foo': 'bar'})
95
95
96 stats = d_cache.get_statistics()
96 stats = d_cache.get_statistics()
97 assert (tries, 19445000, {}) == stats
97 assert (tries, 19445000, {}) == stats
98 evict_to = 0.005 # around (5mb)
98 evict_to = 0.005 # around (5mb)
99 evicted_items = d_cache.evict(size_limit=d_cache.gb_to_bytes(evict_to))
99 evicted_items = d_cache.evict(size_limit=d_cache.gb_to_bytes(evict_to))
100 evicted = 361
100 evicted = 361
101 assert evicted == evicted_items
101 assert {'removed_items': evicted, 'removed_size': 14039290} == evicted_items
102
102
103 stats = d_cache.get_statistics()
103 stats = d_cache.get_statistics()
104 assert (tries - evicted, 5405710, {}) == stats
104 assert (tries - evicted, 5405710, {}) == stats
105
105
@@ -1,827 +1,827 b''
1
1
2 ; #########################################
2 ; #########################################
3 ; RHODECODE COMMUNITY EDITION CONFIGURATION
3 ; RHODECODE COMMUNITY EDITION CONFIGURATION
4 ; #########################################
4 ; #########################################
5
5
6 [DEFAULT]
6 [DEFAULT]
7 ; Debug flag sets all loggers to debug, and enables request tracking
7 ; Debug flag sets all loggers to debug, and enables request tracking
8 debug = true
8 debug = true
9
9
10 ; ########################################################################
10 ; ########################################################################
11 ; EMAIL CONFIGURATION
11 ; EMAIL CONFIGURATION
12 ; These settings will be used by the RhodeCode mailing system
12 ; These settings will be used by the RhodeCode mailing system
13 ; ########################################################################
13 ; ########################################################################
14
14
15 ; prefix all emails subjects with given prefix, helps filtering out emails
15 ; prefix all emails subjects with given prefix, helps filtering out emails
16 #email_prefix = [RhodeCode]
16 #email_prefix = [RhodeCode]
17
17
18 ; email FROM address all mails will be sent
18 ; email FROM address all mails will be sent
19 #app_email_from = rhodecode-noreply@localhost
19 #app_email_from = rhodecode-noreply@localhost
20
20
21 #smtp_server = mail.server.com
21 #smtp_server = mail.server.com
22 #smtp_username =
22 #smtp_username =
23 #smtp_password =
23 #smtp_password =
24 #smtp_port =
24 #smtp_port =
25 #smtp_use_tls = false
25 #smtp_use_tls = false
26 #smtp_use_ssl = true
26 #smtp_use_ssl = true
27
27
28 [server:main]
28 [server:main]
29 ; COMMON HOST/IP CONFIG, This applies mostly to develop setup,
29 ; COMMON HOST/IP CONFIG, This applies mostly to develop setup,
30 ; Host port for gunicorn are controlled by gunicorn_conf.py
30 ; Host port for gunicorn are controlled by gunicorn_conf.py
31 host = 127.0.0.1
31 host = 127.0.0.1
32 port = 10020
32 port = 10020
33
33
34
34
35 ; ###########################
35 ; ###########################
36 ; GUNICORN APPLICATION SERVER
36 ; GUNICORN APPLICATION SERVER
37 ; ###########################
37 ; ###########################
38
38
39 ; run with gunicorn --paste rhodecode.ini --config gunicorn_conf.py
39 ; run with gunicorn --paste rhodecode.ini --config gunicorn_conf.py
40
40
41 ; Module to use, this setting shouldn't be changed
41 ; Module to use, this setting shouldn't be changed
42 use = egg:gunicorn#main
42 use = egg:gunicorn#main
43
43
44 ; Prefix middleware for RhodeCode.
44 ; Prefix middleware for RhodeCode.
45 ; recommended when using proxy setup.
45 ; recommended when using proxy setup.
46 ; allows to set RhodeCode under a prefix in server.
46 ; allows to set RhodeCode under a prefix in server.
47 ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well.
47 ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well.
48 ; And set your prefix like: `prefix = /custom_prefix`
48 ; And set your prefix like: `prefix = /custom_prefix`
49 ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need
49 ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need
50 ; to make your cookies only work on prefix url
50 ; to make your cookies only work on prefix url
51 [filter:proxy-prefix]
51 [filter:proxy-prefix]
52 use = egg:PasteDeploy#prefix
52 use = egg:PasteDeploy#prefix
53 prefix = /
53 prefix = /
54
54
55 [app:main]
55 [app:main]
56 ; The %(here)s variable will be replaced with the absolute path of parent directory
56 ; The %(here)s variable will be replaced with the absolute path of parent directory
57 ; of this file
57 ; of this file
58 ; Each option in the app:main can be override by an environmental variable
58 ; Each option in the app:main can be override by an environmental variable
59 ;
59 ;
60 ;To override an option:
60 ;To override an option:
61 ;
61 ;
62 ;RC_<KeyName>
62 ;RC_<KeyName>
63 ;Everything should be uppercase, . and - should be replaced by _.
63 ;Everything should be uppercase, . and - should be replaced by _.
64 ;For example, if you have these configuration settings:
64 ;For example, if you have these configuration settings:
65 ;rc_cache.repo_object.backend = foo
65 ;rc_cache.repo_object.backend = foo
66 ;can be overridden by
66 ;can be overridden by
67 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
67 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
68
68
69 use = egg:rhodecode-enterprise-ce
69 use = egg:rhodecode-enterprise-ce
70
70
71 ; enable proxy prefix middleware, defined above
71 ; enable proxy prefix middleware, defined above
72 #filter-with = proxy-prefix
72 #filter-with = proxy-prefix
73
73
74 ; encryption key used to encrypt social plugin tokens,
74 ; encryption key used to encrypt social plugin tokens,
75 ; remote_urls with credentials etc, if not set it defaults to
75 ; remote_urls with credentials etc, if not set it defaults to
76 ; `beaker.session.secret`
76 ; `beaker.session.secret`
77 #rhodecode.encrypted_values.secret =
77 #rhodecode.encrypted_values.secret =
78
78
79 ; decryption strict mode (enabled by default). It controls if decryption raises
79 ; decryption strict mode (enabled by default). It controls if decryption raises
80 ; `SignatureVerificationError` in case of wrong key, or damaged encryption data.
80 ; `SignatureVerificationError` in case of wrong key, or damaged encryption data.
81 #rhodecode.encrypted_values.strict = false
81 #rhodecode.encrypted_values.strict = false
82
82
83 ; Pick algorithm for encryption. Either fernet (more secure) or aes (default)
83 ; Pick algorithm for encryption. Either fernet (more secure) or aes (default)
84 ; fernet is safer, and we strongly recommend switching to it.
84 ; fernet is safer, and we strongly recommend switching to it.
85 ; Due to backward compatibility aes is used as default.
85 ; Due to backward compatibility aes is used as default.
86 #rhodecode.encrypted_values.algorithm = fernet
86 #rhodecode.encrypted_values.algorithm = fernet
87
87
88 ; Return gzipped responses from RhodeCode (static files/application)
88 ; Return gzipped responses from RhodeCode (static files/application)
89 gzip_responses = false
89 gzip_responses = false
90
90
91 ; Auto-generate javascript routes file on startup
91 ; Auto-generate javascript routes file on startup
92 generate_js_files = false
92 generate_js_files = false
93
93
94 ; System global default language.
94 ; System global default language.
95 ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh
95 ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh
96 lang = en
96 lang = en
97
97
98 ; Perform a full repository scan and import on each server start.
98 ; Perform a full repository scan and import on each server start.
99 ; Settings this to true could lead to very long startup time.
99 ; Settings this to true could lead to very long startup time.
100 startup.import_repos = true
100 startup.import_repos = true
101
101
102 ; URL at which the application is running. This is used for Bootstrapping
102 ; URL at which the application is running. This is used for Bootstrapping
103 ; requests in context when no web request is available. Used in ishell, or
103 ; requests in context when no web request is available. Used in ishell, or
104 ; SSH calls. Set this for events to receive proper url for SSH calls.
104 ; SSH calls. Set this for events to receive proper url for SSH calls.
105 app.base_url = http://rhodecode.local
105 app.base_url = http://rhodecode.local
106
106
107 ; Host at which the Service API is running.
107 ; Host at which the Service API is running.
108 app.service_api.host = http://rhodecode.local:10020
108 app.service_api.host = http://rhodecode.local:10020
109
109
110 ; Secret for Service API authentication.
110 ; Secret for Service API authentication.
111 app.service_api.token =
111 app.service_api.token =
112
112
113 ; Unique application ID. Should be a random unique string for security.
113 ; Unique application ID. Should be a random unique string for security.
114 app_instance_uuid = rc-production
114 app_instance_uuid = rc-production
115
115
116 ; Cut off limit for large diffs (size in bytes). If overall diff size on
116 ; Cut off limit for large diffs (size in bytes). If overall diff size on
117 ; commit, or pull request exceeds this limit this diff will be displayed
117 ; commit, or pull request exceeds this limit this diff will be displayed
118 ; partially. E.g 512000 == 512Kb
118 ; partially. E.g 512000 == 512Kb
119 cut_off_limit_diff = 1024000
119 cut_off_limit_diff = 1024000
120
120
121 ; Cut off limit for large files inside diffs (size in bytes). Each individual
121 ; Cut off limit for large files inside diffs (size in bytes). Each individual
122 ; file inside diff which exceeds this limit will be displayed partially.
122 ; file inside diff which exceeds this limit will be displayed partially.
123 ; E.g 128000 == 128Kb
123 ; E.g 128000 == 128Kb
124 cut_off_limit_file = 256000
124 cut_off_limit_file = 256000
125
125
126 ; Use cached version of vcs repositories everywhere. Recommended to be `true`
126 ; Use cached version of vcs repositories everywhere. Recommended to be `true`
127 vcs_full_cache = false
127 vcs_full_cache = false
128
128
129 ; Force https in RhodeCode, fixes https redirects, assumes it's always https.
129 ; Force https in RhodeCode, fixes https redirects, assumes it's always https.
130 ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache
130 ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache
131 force_https = false
131 force_https = false
132
132
133 ; use Strict-Transport-Security headers
133 ; use Strict-Transport-Security headers
134 use_htsts = false
134 use_htsts = false
135
135
136 ; Set to true if your repos are exposed using the dumb protocol
136 ; Set to true if your repos are exposed using the dumb protocol
137 git_update_server_info = false
137 git_update_server_info = false
138
138
139 ; RSS/ATOM feed options
139 ; RSS/ATOM feed options
140 rss_cut_off_limit = 256000
140 rss_cut_off_limit = 256000
141 rss_items_per_page = 10
141 rss_items_per_page = 10
142 rss_include_diff = false
142 rss_include_diff = false
143
143
144 ; gist URL alias, used to create nicer urls for gist. This should be an
144 ; gist URL alias, used to create nicer urls for gist. This should be an
145 ; url that does rewrites to _admin/gists/{gistid}.
145 ; url that does rewrites to _admin/gists/{gistid}.
146 ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal
146 ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal
147 ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid}
147 ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid}
148 gist_alias_url =
148 gist_alias_url =
149
149
150 ; List of views (using glob pattern syntax) that AUTH TOKENS could be
150 ; List of views (using glob pattern syntax) that AUTH TOKENS could be
151 ; used for access.
151 ; used for access.
152 ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it
152 ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it
153 ; came from the the logged in user who own this authentication token.
153 ; came from the the logged in user who own this authentication token.
154 ; Additionally @TOKEN syntax can be used to bound the view to specific
154 ; Additionally @TOKEN syntax can be used to bound the view to specific
155 ; authentication token. Such view would be only accessible when used together
155 ; authentication token. Such view would be only accessible when used together
156 ; with this authentication token
156 ; with this authentication token
157 ; list of all views can be found under `/_admin/permissions/auth_token_access`
157 ; list of all views can be found under `/_admin/permissions/auth_token_access`
158 ; The list should be "," separated and on a single line.
158 ; The list should be "," separated and on a single line.
159 ; Most common views to enable:
159 ; Most common views to enable:
160
160
161 # RepoCommitsView:repo_commit_download
161 # RepoCommitsView:repo_commit_download
162 # RepoCommitsView:repo_commit_patch
162 # RepoCommitsView:repo_commit_patch
163 # RepoCommitsView:repo_commit_raw
163 # RepoCommitsView:repo_commit_raw
164 # RepoCommitsView:repo_commit_raw@TOKEN
164 # RepoCommitsView:repo_commit_raw@TOKEN
165 # RepoFilesView:repo_files_diff
165 # RepoFilesView:repo_files_diff
166 # RepoFilesView:repo_archivefile
166 # RepoFilesView:repo_archivefile
167 # RepoFilesView:repo_file_raw
167 # RepoFilesView:repo_file_raw
168 # GistView:*
168 # GistView:*
169 api_access_controllers_whitelist =
169 api_access_controllers_whitelist =
170
170
171 ; Default encoding used to convert from and to unicode
171 ; Default encoding used to convert from and to unicode
172 ; can be also a comma separated list of encoding in case of mixed encodings
172 ; can be also a comma separated list of encoding in case of mixed encodings
173 default_encoding = UTF-8
173 default_encoding = UTF-8
174
174
175 ; instance-id prefix
175 ; instance-id prefix
176 ; a prefix key for this instance used for cache invalidation when running
176 ; a prefix key for this instance used for cache invalidation when running
177 ; multiple instances of RhodeCode, make sure it's globally unique for
177 ; multiple instances of RhodeCode, make sure it's globally unique for
178 ; all running RhodeCode instances. Leave empty if you don't use it
178 ; all running RhodeCode instances. Leave empty if you don't use it
179 instance_id =
179 instance_id =
180
180
181 ; Fallback authentication plugin. Set this to a plugin ID to force the usage
181 ; Fallback authentication plugin. Set this to a plugin ID to force the usage
182 ; of an authentication plugin also if it is disabled by it's settings.
182 ; of an authentication plugin also if it is disabled by it's settings.
183 ; This could be useful if you are unable to log in to the system due to broken
183 ; This could be useful if you are unable to log in to the system due to broken
184 ; authentication settings. Then you can enable e.g. the internal RhodeCode auth
184 ; authentication settings. Then you can enable e.g. the internal RhodeCode auth
185 ; module to log in again and fix the settings.
185 ; module to log in again and fix the settings.
186 ; Available builtin plugin IDs (hash is part of the ID):
186 ; Available builtin plugin IDs (hash is part of the ID):
187 ; egg:rhodecode-enterprise-ce#rhodecode
187 ; egg:rhodecode-enterprise-ce#rhodecode
188 ; egg:rhodecode-enterprise-ce#pam
188 ; egg:rhodecode-enterprise-ce#pam
189 ; egg:rhodecode-enterprise-ce#ldap
189 ; egg:rhodecode-enterprise-ce#ldap
190 ; egg:rhodecode-enterprise-ce#jasig_cas
190 ; egg:rhodecode-enterprise-ce#jasig_cas
191 ; egg:rhodecode-enterprise-ce#headers
191 ; egg:rhodecode-enterprise-ce#headers
192 ; egg:rhodecode-enterprise-ce#crowd
192 ; egg:rhodecode-enterprise-ce#crowd
193
193
194 #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode
194 #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode
195
195
196 ; Flag to control loading of legacy plugins in py:/path format
196 ; Flag to control loading of legacy plugins in py:/path format
197 auth_plugin.import_legacy_plugins = true
197 auth_plugin.import_legacy_plugins = true
198
198
199 ; alternative return HTTP header for failed authentication. Default HTTP
199 ; alternative return HTTP header for failed authentication. Default HTTP
200 ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with
200 ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with
201 ; handling that causing a series of failed authentication calls.
201 ; handling that causing a series of failed authentication calls.
202 ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code
202 ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code
203 ; This will be served instead of default 401 on bad authentication
203 ; This will be served instead of default 401 on bad authentication
204 auth_ret_code =
204 auth_ret_code =
205
205
206 ; use special detection method when serving auth_ret_code, instead of serving
206 ; use special detection method when serving auth_ret_code, instead of serving
207 ; ret_code directly, use 401 initially (Which triggers credentials prompt)
207 ; ret_code directly, use 401 initially (Which triggers credentials prompt)
208 ; and then serve auth_ret_code to clients
208 ; and then serve auth_ret_code to clients
209 auth_ret_code_detection = false
209 auth_ret_code_detection = false
210
210
211 ; locking return code. When repository is locked return this HTTP code. 2XX
211 ; locking return code. When repository is locked return this HTTP code. 2XX
212 ; codes don't break the transactions while 4XX codes do
212 ; codes don't break the transactions while 4XX codes do
213 lock_ret_code = 423
213 lock_ret_code = 423
214
214
215 ; Filesystem location were repositories should be stored
215 ; Filesystem location were repositories should be stored
216 repo_store.path = /var/opt/rhodecode_repo_store
216 repo_store.path = /var/opt/rhodecode_repo_store
217
217
218 ; allows to setup custom hooks in settings page
218 ; allows to setup custom hooks in settings page
219 allow_custom_hooks_settings = true
219 allow_custom_hooks_settings = true
220
220
221 ; Generated license token required for EE edition license.
221 ; Generated license token required for EE edition license.
222 ; New generated token value can be found in Admin > settings > license page.
222 ; New generated token value can be found in Admin > settings > license page.
223 license_token = abra-cada-bra1-rce3
223 license_token = abra-cada-bra1-rce3
224
224
225 ; This flag hides sensitive information on the license page such as token, and license data
225 ; This flag hides sensitive information on the license page such as token, and license data
226 license.hide_license_info = false
226 license.hide_license_info = false
227
227
228 ; supervisor connection uri, for managing supervisor and logs.
228 ; supervisor connection uri, for managing supervisor and logs.
229 supervisor.uri =
229 supervisor.uri =
230
230
231 ; supervisord group name/id we only want this RC instance to handle
231 ; supervisord group name/id we only want this RC instance to handle
232 supervisor.group_id = dev
232 supervisor.group_id = dev
233
233
234 ; Display extended labs settings
234 ; Display extended labs settings
235 labs_settings_active = true
235 labs_settings_active = true
236
236
237 ; Custom exception store path, defaults to TMPDIR
237 ; Custom exception store path, defaults to TMPDIR
238 ; This is used to store exception from RhodeCode in shared directory
238 ; This is used to store exception from RhodeCode in shared directory
239 #exception_tracker.store_path =
239 #exception_tracker.store_path =
240
240
241 ; Send email with exception details when it happens
241 ; Send email with exception details when it happens
242 #exception_tracker.send_email = false
242 #exception_tracker.send_email = false
243
243
244 ; Comma separated list of recipients for exception emails,
244 ; Comma separated list of recipients for exception emails,
245 ; e.g admin@rhodecode.com,devops@rhodecode.com
245 ; e.g admin@rhodecode.com,devops@rhodecode.com
246 ; Can be left empty, then emails will be sent to ALL super-admins
246 ; Can be left empty, then emails will be sent to ALL super-admins
247 #exception_tracker.send_email_recipients =
247 #exception_tracker.send_email_recipients =
248
248
249 ; optional prefix to Add to email Subject
249 ; optional prefix to Add to email Subject
250 #exception_tracker.email_prefix = [RHODECODE ERROR]
250 #exception_tracker.email_prefix = [RHODECODE ERROR]
251
251
252 ; File store configuration. This is used to store and serve uploaded files
252 ; File store configuration. This is used to store and serve uploaded files
253 file_store.enabled = true
253 file_store.enabled = true
254
254
255 ; Storage backend, available options are: local
255 ; Storage backend, available options are: local
256 file_store.backend = local
256 file_store.backend = local
257
257
258 ; path to store the uploaded binaries and artifacts
258 ; path to store the uploaded binaries and artifacts
259 file_store.storage_path = /var/opt/rhodecode_data/file_store
259 file_store.storage_path = /var/opt/rhodecode_data/file_store
260
260
261
261
262 ; Redis url to acquire/check generation of archives locks
262 ; Redis url to acquire/check generation of archives locks
263 archive_cache.locking.url = redis://redis:6379/1
263 archive_cache.locking.url = redis://redis:6379/1
264
264
265 ; Storage backend, only 'filesystem' and 'objectstore' are available now
265 ; Storage backend, only 'filesystem' and 'objectstore' are available now
266 archive_cache.backend.type = filesystem
266 archive_cache.backend.type = filesystem
267
267
268 ; url for s3 compatible storage that allows to upload artifacts
268 ; url for s3 compatible storage that allows to upload artifacts
269 ; e.g http://minio:9000
269 ; e.g http://minio:9000
270 archive_cache.objectstore.url = http://s3-minio:9000
270 archive_cache.objectstore.url = http://s3-minio:9000
271
271
272 ; key for s3 auth
272 ; key for s3 auth
273 archive_cache.objectstore.key = key
273 archive_cache.objectstore.key = key
274
274
275 ; secret for s3 auth
275 ; secret for s3 auth
276 archive_cache.objectstore.secret = secret
276 archive_cache.objectstore.secret = secret
277
277
278 ; number of sharded buckets to create to distribute archives across
278 ; number of sharded buckets to create to distribute archives across
279 ; default is 8 shards
279 ; default is 8 shards
280 archive_cache.objectstore.bucket_shards = 8
280 archive_cache.objectstore.bucket_shards = 8
281
281
282 ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time
282 ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time
283 archive_cache.objectstore.retry = false
283 archive_cache.objectstore.retry = false
284
284
285 ; number of seconds to wait for next try using retry
285 ; number of seconds to wait for next try using retry
286 archive_cache.objectstore.retry_backoff = 1
286 archive_cache.objectstore.retry_backoff = 1
287
287
288 ; how many tries do do a retry fetch from this backend
288 ; how many tries do do a retry fetch from this backend
289 archive_cache.objectstore.retry_attempts = 10
289 archive_cache.objectstore.retry_attempts = 10
290
290
291 ; Default is $cache_dir/archive_cache if not set
291 ; Default is $cache_dir/archive_cache if not set
292 ; Generated repo archives will be cached at this location
292 ; Generated repo archives will be cached at this location
293 ; and served from the cache during subsequent requests for the same archive of
293 ; and served from the cache during subsequent requests for the same archive of
294 ; the repository. This path is important to be shared across filesystems and with
294 ; the repository. This path is important to be shared across filesystems and with
295 ; RhodeCode and vcsserver
295 ; RhodeCode and vcsserver
296 archive_cache.filesystem.store_dir = %(here)s/rc-tests/archive_cache
296 archive_cache.filesystem.store_dir = %(here)s/rc-tests/archive_cache
297
297
298 ; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb
298 ; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb
299 archive_cache.filesystem.cache_size_gb = 2
299 archive_cache.filesystem.cache_size_gb = 2
300
300
301 ; Eviction policy used to clear out after cache_size_gb limit is reached
301 ; Eviction policy used to clear out after cache_size_gb limit is reached
302 archive_cache.filesystem.eviction_policy = least-recently-stored
302 archive_cache.filesystem.eviction_policy = least-recently-stored
303
303
304 ; By default cache uses sharding technique, this specifies how many shards are there
304 ; By default cache uses sharding technique, this specifies how many shards are there
305 ; default is 8 shards
305 ; default is 8 shards
306 archive_cache.filesystem.cache_shards = 8
306 archive_cache.filesystem.cache_shards = 8
307
307
308 ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time
308 ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time
309 archive_cache.filesystem.retry = false
309 archive_cache.filesystem.retry = false
310
310
311 ; number of seconds to wait for next try using retry
311 ; number of seconds to wait for next try using retry
312 archive_cache.filesystem.retry_backoff = 1
312 archive_cache.filesystem.retry_backoff = 1
313
313
314 ; how many tries do do a retry fetch from this backend
314 ; how many tries do do a retry fetch from this backend
315 archive_cache.filesystem.retry_attempts = 10
315 archive_cache.filesystem.retry_attempts = 10
316
316
317
317
318 ; #############
318 ; #############
319 ; CELERY CONFIG
319 ; CELERY CONFIG
320 ; #############
320 ; #############
321
321
322 ; manually run celery: /path/to/celery worker --task-events --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini
322 ; manually run celery: /path/to/celery worker --task-events --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini
323
323
324 use_celery = false
324 use_celery = false
325
325
326 ; path to store schedule database
326 ; path to store schedule database
327 #celerybeat-schedule.path =
327 #celerybeat-schedule.path =
328
328
329 ; connection url to the message broker (default redis)
329 ; connection url to the message broker (default redis)
330 celery.broker_url = redis://redis:6379/8
330 celery.broker_url = redis://redis:6379/8
331
331
332 ; results backend to get results for (default redis)
332 ; results backend to get results for (default redis)
333 celery.result_backend = redis://redis:6379/8
333 celery.result_backend = redis://redis:6379/8
334
334
335 ; rabbitmq example
335 ; rabbitmq example
336 #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost
336 #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost
337
337
338 ; maximum tasks to execute before worker restart
338 ; maximum tasks to execute before worker restart
339 celery.max_tasks_per_child = 20
339 celery.max_tasks_per_child = 20
340
340
341 ; tasks will never be sent to the queue, but executed locally instead.
341 ; tasks will never be sent to the queue, but executed locally instead.
342 celery.task_always_eager = true
342 celery.task_always_eager = true
343 celery.task_store_eager_result = true
343 celery.task_store_eager_result = true
344
344
345 ; #############
345 ; #############
346 ; DOGPILE CACHE
346 ; DOGPILE CACHE
347 ; #############
347 ; #############
348
348
349 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
349 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
350 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
350 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
351 cache_dir = /var/opt/rhodecode_data
351 cache_dir = %(here)s/rc-test-data
352
352
353 ; *********************************************
353 ; *********************************************
354 ; `sql_cache_short` cache for heavy SQL queries
354 ; `sql_cache_short` cache for heavy SQL queries
355 ; Only supported backend is `memory_lru`
355 ; Only supported backend is `memory_lru`
356 ; *********************************************
356 ; *********************************************
357 rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru
357 rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru
358 rc_cache.sql_cache_short.expiration_time = 0
358 rc_cache.sql_cache_short.expiration_time = 0
359
359
360
360
361 ; *****************************************************
361 ; *****************************************************
362 ; `cache_repo_longterm` cache for repo object instances
362 ; `cache_repo_longterm` cache for repo object instances
363 ; Only supported backend is `memory_lru`
363 ; Only supported backend is `memory_lru`
364 ; *****************************************************
364 ; *****************************************************
365 rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru
365 rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru
366 ; by default we use 30 Days, cache is still invalidated on push
366 ; by default we use 30 Days, cache is still invalidated on push
367 rc_cache.cache_repo_longterm.expiration_time = 2592000
367 rc_cache.cache_repo_longterm.expiration_time = 2592000
368 ; max items in LRU cache, set to smaller number to save memory, and expire last used caches
368 ; max items in LRU cache, set to smaller number to save memory, and expire last used caches
369 rc_cache.cache_repo_longterm.max_size = 10000
369 rc_cache.cache_repo_longterm.max_size = 10000
370
370
371
371
372 ; *********************************************
372 ; *********************************************
373 ; `cache_general` cache for general purpose use
373 ; `cache_general` cache for general purpose use
374 ; for simplicity use rc.file_namespace backend,
374 ; for simplicity use rc.file_namespace backend,
375 ; for performance and scale use rc.redis
375 ; for performance and scale use rc.redis
376 ; *********************************************
376 ; *********************************************
377 rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace
377 rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace
378 rc_cache.cache_general.expiration_time = 43200
378 rc_cache.cache_general.expiration_time = 43200
379 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
379 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
380 rc_cache.cache_general.arguments.filename = %(here)s/rc-tests/cache-backend/cache_general_db
380 rc_cache.cache_general.arguments.filename = %(here)s/rc-tests/cache-backend/cache_general_db
381
381
382 ; alternative `cache_general` redis backend with distributed lock
382 ; alternative `cache_general` redis backend with distributed lock
383 #rc_cache.cache_general.backend = dogpile.cache.rc.redis
383 #rc_cache.cache_general.backend = dogpile.cache.rc.redis
384 #rc_cache.cache_general.expiration_time = 300
384 #rc_cache.cache_general.expiration_time = 300
385
385
386 ; redis_expiration_time needs to be greater then expiration_time
386 ; redis_expiration_time needs to be greater then expiration_time
387 #rc_cache.cache_general.arguments.redis_expiration_time = 7200
387 #rc_cache.cache_general.arguments.redis_expiration_time = 7200
388
388
389 #rc_cache.cache_general.arguments.host = localhost
389 #rc_cache.cache_general.arguments.host = localhost
390 #rc_cache.cache_general.arguments.port = 6379
390 #rc_cache.cache_general.arguments.port = 6379
391 #rc_cache.cache_general.arguments.db = 0
391 #rc_cache.cache_general.arguments.db = 0
392 #rc_cache.cache_general.arguments.socket_timeout = 30
392 #rc_cache.cache_general.arguments.socket_timeout = 30
393 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
393 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
394 #rc_cache.cache_general.arguments.distributed_lock = true
394 #rc_cache.cache_general.arguments.distributed_lock = true
395
395
396 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
396 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
397 #rc_cache.cache_general.arguments.lock_auto_renewal = true
397 #rc_cache.cache_general.arguments.lock_auto_renewal = true
398
398
399 ; *************************************************
399 ; *************************************************
400 ; `cache_perms` cache for permission tree, auth TTL
400 ; `cache_perms` cache for permission tree, auth TTL
401 ; for simplicity use rc.file_namespace backend,
401 ; for simplicity use rc.file_namespace backend,
402 ; for performance and scale use rc.redis
402 ; for performance and scale use rc.redis
403 ; *************************************************
403 ; *************************************************
404 rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace
404 rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace
405 rc_cache.cache_perms.expiration_time = 0
405 rc_cache.cache_perms.expiration_time = 0
406 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
406 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
407 rc_cache.cache_perms.arguments.filename = %(here)s/rc-tests/cache-backend/cache_perms_db
407 rc_cache.cache_perms.arguments.filename = %(here)s/rc-tests/cache-backend/cache_perms_db
408
408
409 ; alternative `cache_perms` redis backend with distributed lock
409 ; alternative `cache_perms` redis backend with distributed lock
410 #rc_cache.cache_perms.backend = dogpile.cache.rc.redis
410 #rc_cache.cache_perms.backend = dogpile.cache.rc.redis
411 #rc_cache.cache_perms.expiration_time = 300
411 #rc_cache.cache_perms.expiration_time = 300
412
412
413 ; redis_expiration_time needs to be greater then expiration_time
413 ; redis_expiration_time needs to be greater then expiration_time
414 #rc_cache.cache_perms.arguments.redis_expiration_time = 7200
414 #rc_cache.cache_perms.arguments.redis_expiration_time = 7200
415
415
416 #rc_cache.cache_perms.arguments.host = localhost
416 #rc_cache.cache_perms.arguments.host = localhost
417 #rc_cache.cache_perms.arguments.port = 6379
417 #rc_cache.cache_perms.arguments.port = 6379
418 #rc_cache.cache_perms.arguments.db = 0
418 #rc_cache.cache_perms.arguments.db = 0
419 #rc_cache.cache_perms.arguments.socket_timeout = 30
419 #rc_cache.cache_perms.arguments.socket_timeout = 30
420 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
420 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
421 #rc_cache.cache_perms.arguments.distributed_lock = true
421 #rc_cache.cache_perms.arguments.distributed_lock = true
422
422
423 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
423 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
424 #rc_cache.cache_perms.arguments.lock_auto_renewal = true
424 #rc_cache.cache_perms.arguments.lock_auto_renewal = true
425
425
426 ; ***************************************************
426 ; ***************************************************
427 ; `cache_repo` cache for file tree, Readme, RSS FEEDS
427 ; `cache_repo` cache for file tree, Readme, RSS FEEDS
428 ; for simplicity use rc.file_namespace backend,
428 ; for simplicity use rc.file_namespace backend,
429 ; for performance and scale use rc.redis
429 ; for performance and scale use rc.redis
430 ; ***************************************************
430 ; ***************************************************
431 rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace
431 rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace
432 rc_cache.cache_repo.expiration_time = 2592000
432 rc_cache.cache_repo.expiration_time = 2592000
433 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
433 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
434 rc_cache.cache_repo.arguments.filename = %(here)s/rc-tests/cache-backend/cache_repo_db
434 rc_cache.cache_repo.arguments.filename = %(here)s/rc-tests/cache-backend/cache_repo_db
435
435
436 ; alternative `cache_repo` redis backend with distributed lock
436 ; alternative `cache_repo` redis backend with distributed lock
437 #rc_cache.cache_repo.backend = dogpile.cache.rc.redis
437 #rc_cache.cache_repo.backend = dogpile.cache.rc.redis
438 #rc_cache.cache_repo.expiration_time = 2592000
438 #rc_cache.cache_repo.expiration_time = 2592000
439
439
440 ; redis_expiration_time needs to be greater then expiration_time
440 ; redis_expiration_time needs to be greater then expiration_time
441 #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400
441 #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400
442
442
443 #rc_cache.cache_repo.arguments.host = localhost
443 #rc_cache.cache_repo.arguments.host = localhost
444 #rc_cache.cache_repo.arguments.port = 6379
444 #rc_cache.cache_repo.arguments.port = 6379
445 #rc_cache.cache_repo.arguments.db = 1
445 #rc_cache.cache_repo.arguments.db = 1
446 #rc_cache.cache_repo.arguments.socket_timeout = 30
446 #rc_cache.cache_repo.arguments.socket_timeout = 30
447 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
447 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
448 #rc_cache.cache_repo.arguments.distributed_lock = true
448 #rc_cache.cache_repo.arguments.distributed_lock = true
449
449
450 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
450 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
451 #rc_cache.cache_repo.arguments.lock_auto_renewal = true
451 #rc_cache.cache_repo.arguments.lock_auto_renewal = true
452
452
453 ; ##############
453 ; ##############
454 ; BEAKER SESSION
454 ; BEAKER SESSION
455 ; ##############
455 ; ##############
456
456
457 ; beaker.session.type is type of storage options for the logged users sessions. Current allowed
457 ; beaker.session.type is type of storage options for the logged users sessions. Current allowed
458 ; types are file, ext:redis, ext:database, ext:memcached
458 ; types are file, ext:redis, ext:database, ext:memcached
459 ; Fastest ones are ext:redis and ext:database, DO NOT use memory type for session
459 ; Fastest ones are ext:redis and ext:database, DO NOT use memory type for session
460 beaker.session.type = file
460 beaker.session.type = file
461 beaker.session.data_dir = %(here)s/rc-tests/data/sessions
461 beaker.session.data_dir = %(here)s/rc-tests/data/sessions
462
462
463 ; Redis based sessions
463 ; Redis based sessions
464 #beaker.session.type = ext:redis
464 #beaker.session.type = ext:redis
465 #beaker.session.url = redis://redis:6379/2
465 #beaker.session.url = redis://redis:6379/2
466
466
467 ; DB based session, fast, and allows easy management over logged in users
467 ; DB based session, fast, and allows easy management over logged in users
468 #beaker.session.type = ext:database
468 #beaker.session.type = ext:database
469 #beaker.session.table_name = db_session
469 #beaker.session.table_name = db_session
470 #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode
470 #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode
471 #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode
471 #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode
472 #beaker.session.sa.pool_recycle = 3600
472 #beaker.session.sa.pool_recycle = 3600
473 #beaker.session.sa.echo = false
473 #beaker.session.sa.echo = false
474
474
475 beaker.session.key = rhodecode
475 beaker.session.key = rhodecode
476 beaker.session.secret = test-rc-uytcxaz
476 beaker.session.secret = test-rc-uytcxaz
477 beaker.session.lock_dir = %(here)s/rc-tests/data/sessions/lock
477 beaker.session.lock_dir = %(here)s/rc-tests/data/sessions/lock
478
478
479 ; Secure encrypted cookie. Requires AES and AES python libraries
479 ; Secure encrypted cookie. Requires AES and AES python libraries
480 ; you must disable beaker.session.secret to use this
480 ; you must disable beaker.session.secret to use this
481 #beaker.session.encrypt_key = key_for_encryption
481 #beaker.session.encrypt_key = key_for_encryption
482 #beaker.session.validate_key = validation_key
482 #beaker.session.validate_key = validation_key
483
483
484 ; Sets session as invalid (also logging out user) if it haven not been
484 ; Sets session as invalid (also logging out user) if it haven not been
485 ; accessed for given amount of time in seconds
485 ; accessed for given amount of time in seconds
486 beaker.session.timeout = 2592000
486 beaker.session.timeout = 2592000
487 beaker.session.httponly = true
487 beaker.session.httponly = true
488
488
489 ; Path to use for the cookie. Set to prefix if you use prefix middleware
489 ; Path to use for the cookie. Set to prefix if you use prefix middleware
490 #beaker.session.cookie_path = /custom_prefix
490 #beaker.session.cookie_path = /custom_prefix
491
491
492 ; Set https secure cookie
492 ; Set https secure cookie
493 beaker.session.secure = false
493 beaker.session.secure = false
494
494
495 ; default cookie expiration time in seconds, set to `true` to set expire
495 ; default cookie expiration time in seconds, set to `true` to set expire
496 ; at browser close
496 ; at browser close
497 #beaker.session.cookie_expires = 3600
497 #beaker.session.cookie_expires = 3600
498
498
499 ; #############################
499 ; #############################
500 ; SEARCH INDEXING CONFIGURATION
500 ; SEARCH INDEXING CONFIGURATION
501 ; #############################
501 ; #############################
502
502
503 ; Full text search indexer is available in rhodecode-tools under
503 ; Full text search indexer is available in rhodecode-tools under
504 ; `rhodecode-tools index` command
504 ; `rhodecode-tools index` command
505
505
506 ; WHOOSH Backend, doesn't require additional services to run
506 ; WHOOSH Backend, doesn't require additional services to run
507 ; it works good with few dozen repos
507 ; it works good with few dozen repos
508 search.module = rhodecode.lib.index.whoosh
508 search.module = rhodecode.lib.index.whoosh
509 search.location = %(here)s/rc-tests/data/index
509 search.location = %(here)s/rc-tests/data/index
510
510
511 ; ####################
511 ; ####################
512 ; CHANNELSTREAM CONFIG
512 ; CHANNELSTREAM CONFIG
513 ; ####################
513 ; ####################
514
514
515 ; channelstream enables persistent connections and live notification
515 ; channelstream enables persistent connections and live notification
516 ; in the system. It's also used by the chat system
516 ; in the system. It's also used by the chat system
517
517
518 channelstream.enabled = false
518 channelstream.enabled = false
519
519
520 ; server address for channelstream server on the backend
520 ; server address for channelstream server on the backend
521 channelstream.server = channelstream:9800
521 channelstream.server = channelstream:9800
522
522
523 ; location of the channelstream server from outside world
523 ; location of the channelstream server from outside world
524 ; use ws:// for http or wss:// for https. This address needs to be handled
524 ; use ws:// for http or wss:// for https. This address needs to be handled
525 ; by external HTTP server such as Nginx or Apache
525 ; by external HTTP server such as Nginx or Apache
526 ; see Nginx/Apache configuration examples in our docs
526 ; see Nginx/Apache configuration examples in our docs
527 channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream
527 channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream
528 channelstream.secret = ENV_GENERATED
528 channelstream.secret = ENV_GENERATED
529 channelstream.history.location = %(here)s/rc-tests/channelstream_history
529 channelstream.history.location = %(here)s/rc-tests/channelstream_history
530
530
531 ; Internal application path that Javascript uses to connect into.
531 ; Internal application path that Javascript uses to connect into.
532 ; If you use proxy-prefix the prefix should be added before /_channelstream
532 ; If you use proxy-prefix the prefix should be added before /_channelstream
533 channelstream.proxy_path = /_channelstream
533 channelstream.proxy_path = /_channelstream
534
534
535
535
536 ; ##############################
536 ; ##############################
537 ; MAIN RHODECODE DATABASE CONFIG
537 ; MAIN RHODECODE DATABASE CONFIG
538 ; ##############################
538 ; ##############################
539
539
540 #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30
540 #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30
541 #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode
541 #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode
542 #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8
542 #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8
543 ; pymysql is an alternative driver for MySQL, use in case of problems with default one
543 ; pymysql is an alternative driver for MySQL, use in case of problems with default one
544 #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode
544 #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode
545
545
546 sqlalchemy.db1.url = sqlite:///%(here)s/rc-tests/rhodecode_test.db?timeout=30
546 sqlalchemy.db1.url = sqlite:///%(here)s/rc-tests/rhodecode_test.db?timeout=30
547
547
548 ; see sqlalchemy docs for other advanced settings
548 ; see sqlalchemy docs for other advanced settings
549 ; print the sql statements to output
549 ; print the sql statements to output
550 sqlalchemy.db1.echo = false
550 sqlalchemy.db1.echo = false
551
551
552 ; recycle the connections after this amount of seconds
552 ; recycle the connections after this amount of seconds
553 sqlalchemy.db1.pool_recycle = 3600
553 sqlalchemy.db1.pool_recycle = 3600
554
554
555 ; the number of connections to keep open inside the connection pool.
555 ; the number of connections to keep open inside the connection pool.
556 ; 0 indicates no limit
556 ; 0 indicates no limit
557 ; the general calculus with gevent is:
557 ; the general calculus with gevent is:
558 ; if your system allows 500 concurrent greenlets (max_connections) that all do database access,
558 ; if your system allows 500 concurrent greenlets (max_connections) that all do database access,
559 ; then increase pool size + max overflow so that they add up to 500.
559 ; then increase pool size + max overflow so that they add up to 500.
560 #sqlalchemy.db1.pool_size = 5
560 #sqlalchemy.db1.pool_size = 5
561
561
562 ; The number of connections to allow in connection pool "overflow", that is
562 ; The number of connections to allow in connection pool "overflow", that is
563 ; connections that can be opened above and beyond the pool_size setting,
563 ; connections that can be opened above and beyond the pool_size setting,
564 ; which defaults to five.
564 ; which defaults to five.
565 #sqlalchemy.db1.max_overflow = 10
565 #sqlalchemy.db1.max_overflow = 10
566
566
567 ; Connection check ping, used to detect broken database connections
567 ; Connection check ping, used to detect broken database connections
568 ; could be enabled to better handle cases if MySQL has gone away errors
568 ; could be enabled to better handle cases if MySQL has gone away errors
569 #sqlalchemy.db1.ping_connection = true
569 #sqlalchemy.db1.ping_connection = true
570
570
571 ; ##########
571 ; ##########
572 ; VCS CONFIG
572 ; VCS CONFIG
573 ; ##########
573 ; ##########
574 vcs.server.enable = true
574 vcs.server.enable = true
575 vcs.server = vcsserver:10010
575 vcs.server = vcsserver:10010
576
576
577 ; Web server connectivity protocol, responsible for web based VCS operations
577 ; Web server connectivity protocol, responsible for web based VCS operations
578 ; Available protocols are:
578 ; Available protocols are:
579 ; `http` - use http-rpc backend (default)
579 ; `http` - use http-rpc backend (default)
580 vcs.server.protocol = http
580 vcs.server.protocol = http
581
581
582 ; Push/Pull operations protocol, available options are:
582 ; Push/Pull operations protocol, available options are:
583 ; `http` - use http-rpc backend (default)
583 ; `http` - use http-rpc backend (default)
584 vcs.scm_app_implementation = http
584 vcs.scm_app_implementation = http
585
585
586 ; Push/Pull operations hooks protocol, available options are:
586 ; Push/Pull operations hooks protocol, available options are:
587 ; `http` - use http-rpc backend (default)
587 ; `http` - use http-rpc backend (default)
588 ; `celery` - use celery based hooks
588 ; `celery` - use celery based hooks
589 vcs.hooks.protocol = http
589 vcs.hooks.protocol = http
590
590
591 ; Host on which this instance is listening for hooks. vcsserver will call this host to pull/push hooks so it should be
591 ; Host on which this instance is listening for hooks. vcsserver will call this host to pull/push hooks so it should be
592 ; accessible via network.
592 ; accessible via network.
593 ; Use vcs.hooks.host = "*" to bind to current hostname (for Docker)
593 ; Use vcs.hooks.host = "*" to bind to current hostname (for Docker)
594 vcs.hooks.host = *
594 vcs.hooks.host = *
595
595
596 ; Start VCSServer with this instance as a subprocess, useful for development
596 ; Start VCSServer with this instance as a subprocess, useful for development
597 vcs.start_server = false
597 vcs.start_server = false
598
598
599 ; List of enabled VCS backends, available options are:
599 ; List of enabled VCS backends, available options are:
600 ; `hg` - mercurial
600 ; `hg` - mercurial
601 ; `git` - git
601 ; `git` - git
602 ; `svn` - subversion
602 ; `svn` - subversion
603 vcs.backends = hg, git, svn
603 vcs.backends = hg, git, svn
604
604
605 ; Wait this number of seconds before killing connection to the vcsserver
605 ; Wait this number of seconds before killing connection to the vcsserver
606 vcs.connection_timeout = 3600
606 vcs.connection_timeout = 3600
607
607
608 ; Cache flag to cache vcsserver remote calls locally
608 ; Cache flag to cache vcsserver remote calls locally
609 ; It uses cache_region `cache_repo`
609 ; It uses cache_region `cache_repo`
610 vcs.methods.cache = false
610 vcs.methods.cache = false
611
611
612 ; ####################################################
612 ; ####################################################
613 ; Subversion proxy support (mod_dav_svn)
613 ; Subversion proxy support (mod_dav_svn)
614 ; Maps RhodeCode repo groups into SVN paths for Apache
614 ; Maps RhodeCode repo groups into SVN paths for Apache
615 ; ####################################################
615 ; ####################################################
616
616
617 ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out.
617 ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out.
618 ; Set a numeric version for your current SVN e.g 1.8, or 1.12
618 ; Set a numeric version for your current SVN e.g 1.8, or 1.12
619 ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible
619 ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible
620 #vcs.svn.compatible_version = 1.8
620 #vcs.svn.compatible_version = 1.8
621
621
622 ; Enable SVN proxy of requests over HTTP
622 ; Enable SVN proxy of requests over HTTP
623 vcs.svn.proxy.enabled = true
623 vcs.svn.proxy.enabled = true
624
624
625 ; host to connect to running SVN subsystem
625 ; host to connect to running SVN subsystem
626 vcs.svn.proxy.host = http://svn:8090
626 vcs.svn.proxy.host = http://svn:8090
627
627
628 ; Enable or disable the config file generation.
628 ; Enable or disable the config file generation.
629 svn.proxy.generate_config = false
629 svn.proxy.generate_config = false
630
630
631 ; Generate config file with `SVNListParentPath` set to `On`.
631 ; Generate config file with `SVNListParentPath` set to `On`.
632 svn.proxy.list_parent_path = true
632 svn.proxy.list_parent_path = true
633
633
634 ; Set location and file name of generated config file.
634 ; Set location and file name of generated config file.
635 svn.proxy.config_file_path = %(here)s/rc-tests/mod_dav_svn.conf
635 svn.proxy.config_file_path = %(here)s/rc-tests/mod_dav_svn.conf
636
636
637 ; alternative mod_dav config template. This needs to be a valid mako template
637 ; alternative mod_dav config template. This needs to be a valid mako template
638 ; Example template can be found in the source code:
638 ; Example template can be found in the source code:
639 ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako
639 ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako
640 #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako
640 #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako
641
641
642 ; Used as a prefix to the `Location` block in the generated config file.
642 ; Used as a prefix to the `Location` block in the generated config file.
643 ; In most cases it should be set to `/`.
643 ; In most cases it should be set to `/`.
644 svn.proxy.location_root = /
644 svn.proxy.location_root = /
645
645
646 ; Command to reload the mod dav svn configuration on change.
646 ; Command to reload the mod dav svn configuration on change.
647 ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh
647 ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh
648 ; Make sure user who runs RhodeCode process is allowed to reload Apache
648 ; Make sure user who runs RhodeCode process is allowed to reload Apache
649 #svn.proxy.reload_cmd = /etc/init.d/apache2 reload
649 #svn.proxy.reload_cmd = /etc/init.d/apache2 reload
650
650
651 ; If the timeout expires before the reload command finishes, the command will
651 ; If the timeout expires before the reload command finishes, the command will
652 ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds.
652 ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds.
653 #svn.proxy.reload_timeout = 10
653 #svn.proxy.reload_timeout = 10
654
654
655 ; ####################
655 ; ####################
656 ; SSH Support Settings
656 ; SSH Support Settings
657 ; ####################
657 ; ####################
658
658
659 ; Defines if a custom authorized_keys file should be created and written on
659 ; Defines if a custom authorized_keys file should be created and written on
660 ; any change user ssh keys. Setting this to false also disables possibility
660 ; any change user ssh keys. Setting this to false also disables possibility
661 ; of adding SSH keys by users from web interface. Super admins can still
661 ; of adding SSH keys by users from web interface. Super admins can still
662 ; manage SSH Keys.
662 ; manage SSH Keys.
663 ssh.generate_authorized_keyfile = true
663 ssh.generate_authorized_keyfile = true
664
664
665 ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding`
665 ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding`
666 # ssh.authorized_keys_ssh_opts =
666 # ssh.authorized_keys_ssh_opts =
667
667
668 ; Path to the authorized_keys file where the generate entries are placed.
668 ; Path to the authorized_keys file where the generate entries are placed.
669 ; It is possible to have multiple key files specified in `sshd_config` e.g.
669 ; It is possible to have multiple key files specified in `sshd_config` e.g.
670 ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode
670 ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode
671 ssh.authorized_keys_file_path = %(here)s/rc-tests/authorized_keys_rhodecode
671 ssh.authorized_keys_file_path = %(here)s/rc-tests/authorized_keys_rhodecode
672
672
673 ; Command to execute the SSH wrapper. The binary is available in the
673 ; Command to execute the SSH wrapper. The binary is available in the
674 ; RhodeCode installation directory.
674 ; RhodeCode installation directory.
675 ; legacy: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper
675 ; legacy: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper
676 ; new rewrite: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2
676 ; new rewrite: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2
677 ssh.wrapper_cmd = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper
677 ssh.wrapper_cmd = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper
678
678
679 ; Allow shell when executing the ssh-wrapper command
679 ; Allow shell when executing the ssh-wrapper command
680 ssh.wrapper_cmd_allow_shell = false
680 ssh.wrapper_cmd_allow_shell = false
681
681
682 ; Enables logging, and detailed output send back to the client during SSH
682 ; Enables logging, and detailed output send back to the client during SSH
683 ; operations. Useful for debugging, shouldn't be used in production.
683 ; operations. Useful for debugging, shouldn't be used in production.
684 ssh.enable_debug_logging = false
684 ssh.enable_debug_logging = true
685
685
686 ; Paths to binary executable, by default they are the names, but we can
686 ; Paths to binary executable, by default they are the names, but we can
687 ; override them if we want to use a custom one
687 ; override them if we want to use a custom one
688 ssh.executable.hg = /usr/local/bin/rhodecode_bin/vcs_bin/hg
688 ssh.executable.hg = /usr/local/bin/rhodecode_bin/vcs_bin/hg
689 ssh.executable.git = /usr/local/bin/rhodecode_bin/vcs_bin/git
689 ssh.executable.git = /usr/local/bin/rhodecode_bin/vcs_bin/git
690 ssh.executable.svn = /usr/local/bin/rhodecode_bin/vcs_bin/svnserve
690 ssh.executable.svn = /usr/local/bin/rhodecode_bin/vcs_bin/svnserve
691
691
692 ; Enables SSH key generator web interface. Disabling this still allows users
692 ; Enables SSH key generator web interface. Disabling this still allows users
693 ; to add their own keys.
693 ; to add their own keys.
694 ssh.enable_ui_key_generator = true
694 ssh.enable_ui_key_generator = true
695
695
696 ; Statsd client config, this is used to send metrics to statsd
696 ; Statsd client config, this is used to send metrics to statsd
697 ; We recommend setting statsd_exported and scrape them using Prometheus
697 ; We recommend setting statsd_exported and scrape them using Prometheus
698 #statsd.enabled = false
698 #statsd.enabled = false
699 #statsd.statsd_host = 0.0.0.0
699 #statsd.statsd_host = 0.0.0.0
700 #statsd.statsd_port = 8125
700 #statsd.statsd_port = 8125
701 #statsd.statsd_prefix =
701 #statsd.statsd_prefix =
702 #statsd.statsd_ipv6 = false
702 #statsd.statsd_ipv6 = false
703
703
704 ; configure logging automatically at server startup set to false
704 ; configure logging automatically at server startup set to false
705 ; to use the below custom logging config.
705 ; to use the below custom logging config.
706 ; RC_LOGGING_FORMATTER
706 ; RC_LOGGING_FORMATTER
707 ; RC_LOGGING_LEVEL
707 ; RC_LOGGING_LEVEL
708 ; env variables can control the settings for logging in case of autoconfigure
708 ; env variables can control the settings for logging in case of autoconfigure
709
709
710 logging.autoconfigure = false
710 logging.autoconfigure = false
711
711
712 ; specify your own custom logging config file to configure logging
712 ; specify your own custom logging config file to configure logging
713 #logging.logging_conf_file = /path/to/custom_logging.ini
713 #logging.logging_conf_file = /path/to/custom_logging.ini
714
714
715 ; Dummy marker to add new entries after.
715 ; Dummy marker to add new entries after.
716 ; Add any custom entries below. Please don't remove this marker.
716 ; Add any custom entries below. Please don't remove this marker.
717 custom.conf = 1
717 custom.conf = 1
718
718
719
719
720 ; #####################
720 ; #####################
721 ; LOGGING CONFIGURATION
721 ; LOGGING CONFIGURATION
722 ; #####################
722 ; #####################
723
723
724 [loggers]
724 [loggers]
725 keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper, dogpile
725 keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper, dogpile
726
726
727 [handlers]
727 [handlers]
728 keys = console, console_sql
728 keys = console, console_sql
729
729
730 [formatters]
730 [formatters]
731 keys = generic, json, color_formatter, color_formatter_sql
731 keys = generic, json, color_formatter, color_formatter_sql
732
732
733 ; #######
733 ; #######
734 ; LOGGERS
734 ; LOGGERS
735 ; #######
735 ; #######
736 [logger_root]
736 [logger_root]
737 level = NOTSET
737 level = NOTSET
738 handlers = console
738 handlers = console
739
739
740 [logger_routes]
740 [logger_routes]
741 level = DEBUG
741 level = DEBUG
742 handlers =
742 handlers =
743 qualname = routes.middleware
743 qualname = routes.middleware
744 ## "level = DEBUG" logs the route matched and routing variables.
744 ## "level = DEBUG" logs the route matched and routing variables.
745 propagate = 1
745 propagate = 1
746
746
747 [logger_sqlalchemy]
747 [logger_sqlalchemy]
748 level = INFO
748 level = INFO
749 handlers = console_sql
749 handlers = console_sql
750 qualname = sqlalchemy.engine
750 qualname = sqlalchemy.engine
751 propagate = 0
751 propagate = 0
752
752
753 [logger_beaker]
753 [logger_beaker]
754 level = DEBUG
754 level = DEBUG
755 handlers =
755 handlers =
756 qualname = beaker.container
756 qualname = beaker.container
757 propagate = 1
757 propagate = 1
758
758
759 [logger_dogpile]
759 [logger_dogpile]
760 level = INFO
760 level = INFO
761 handlers = console
761 handlers = console
762 qualname = dogpile
762 qualname = dogpile
763 propagate = 1
763 propagate = 1
764
764
765 [logger_rhodecode]
765 [logger_rhodecode]
766 level = DEBUG
766 level = DEBUG
767 handlers =
767 handlers =
768 qualname = rhodecode
768 qualname = rhodecode
769 propagate = 1
769 propagate = 1
770
770
771 [logger_ssh_wrapper]
771 [logger_ssh_wrapper]
772 level = DEBUG
772 level = DEBUG
773 handlers =
773 handlers =
774 qualname = ssh_wrapper
774 qualname = ssh_wrapper
775 propagate = 1
775 propagate = 1
776
776
777 [logger_celery]
777 [logger_celery]
778 level = DEBUG
778 level = DEBUG
779 handlers =
779 handlers =
780 qualname = celery
780 qualname = celery
781
781
782
782
783 ; ########
783 ; ########
784 ; HANDLERS
784 ; HANDLERS
785 ; ########
785 ; ########
786
786
787 [handler_console]
787 [handler_console]
788 class = StreamHandler
788 class = StreamHandler
789 args = (sys.stderr, )
789 args = (sys.stderr, )
790 level = DEBUG
790 level = DEBUG
791 ; To enable JSON formatted logs replace 'generic/color_formatter' with 'json'
791 ; To enable JSON formatted logs replace 'generic/color_formatter' with 'json'
792 ; This allows sending properly formatted logs to grafana loki or elasticsearch
792 ; This allows sending properly formatted logs to grafana loki or elasticsearch
793 formatter = generic
793 formatter = generic
794
794
795 [handler_console_sql]
795 [handler_console_sql]
796 ; "level = DEBUG" logs SQL queries and results.
796 ; "level = DEBUG" logs SQL queries and results.
797 ; "level = INFO" logs SQL queries.
797 ; "level = INFO" logs SQL queries.
798 ; "level = WARN" logs neither. (Recommended for production systems.)
798 ; "level = WARN" logs neither. (Recommended for production systems.)
799 class = StreamHandler
799 class = StreamHandler
800 args = (sys.stderr, )
800 args = (sys.stderr, )
801 level = WARN
801 level = WARN
802 ; To enable JSON formatted logs replace 'generic/color_formatter_sql' with 'json'
802 ; To enable JSON formatted logs replace 'generic/color_formatter_sql' with 'json'
803 ; This allows sending properly formatted logs to grafana loki or elasticsearch
803 ; This allows sending properly formatted logs to grafana loki or elasticsearch
804 formatter = generic
804 formatter = generic
805
805
806 ; ##########
806 ; ##########
807 ; FORMATTERS
807 ; FORMATTERS
808 ; ##########
808 ; ##########
809
809
810 [formatter_generic]
810 [formatter_generic]
811 class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter
811 class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter
812 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
812 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
813 datefmt = %Y-%m-%d %H:%M:%S
813 datefmt = %Y-%m-%d %H:%M:%S
814
814
815 [formatter_color_formatter]
815 [formatter_color_formatter]
816 class = rhodecode.lib.logging_formatter.ColorFormatter
816 class = rhodecode.lib.logging_formatter.ColorFormatter
817 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
817 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
818 datefmt = %Y-%m-%d %H:%M:%S
818 datefmt = %Y-%m-%d %H:%M:%S
819
819
820 [formatter_color_formatter_sql]
820 [formatter_color_formatter_sql]
821 class = rhodecode.lib.logging_formatter.ColorFormatterSql
821 class = rhodecode.lib.logging_formatter.ColorFormatterSql
822 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
822 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
823 datefmt = %Y-%m-%d %H:%M:%S
823 datefmt = %Y-%m-%d %H:%M:%S
824
824
825 [formatter_json]
825 [formatter_json]
826 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
826 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
827 class = rhodecode.lib._vendor.jsonlogger.JsonFormatter
827 class = rhodecode.lib._vendor.jsonlogger.JsonFormatter
General Comments 0
You need to be logged in to leave comments. Login now