Show More
@@ -0,0 +1,269 b'' | |||||
|
1 | # Copyright (C) 2016-2023 RhodeCode GmbH | |||
|
2 | # | |||
|
3 | # This program is free software: you can redistribute it and/or modify | |||
|
4 | # it under the terms of the GNU Affero General Public License, version 3 | |||
|
5 | # (only), as published by the Free Software Foundation. | |||
|
6 | # | |||
|
7 | # This program is distributed in the hope that it will be useful, | |||
|
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |||
|
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |||
|
10 | # GNU General Public License for more details. | |||
|
11 | # | |||
|
12 | # You should have received a copy of the GNU Affero General Public License | |||
|
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |||
|
14 | # | |||
|
15 | # This program is dual-licensed. If you wish to learn more about the | |||
|
16 | # RhodeCode Enterprise Edition, including its added features, Support services, | |||
|
17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ | |||
|
18 | ||||
|
19 | import os | |||
|
20 | import fsspec # noqa | |||
|
21 | import logging | |||
|
22 | ||||
|
23 | from rhodecode.lib.ext_json import json | |||
|
24 | ||||
|
25 | from rhodecode.apps.file_store.utils import sha256_safe, ShardFileReader, get_uid_filename | |||
|
26 | from rhodecode.apps.file_store.extensions import resolve_extensions | |||
|
27 | from rhodecode.apps.file_store.exceptions import FileNotAllowedException, FileOverSizeException # noqa: F401 | |||
|
28 | ||||
|
29 | log = logging.getLogger(__name__) | |||
|
30 | ||||
|
31 | ||||
|
32 | class BaseShard: | |||
|
33 | ||||
|
34 | metadata_suffix: str = '.metadata' | |||
|
35 | storage_type: str = '' | |||
|
36 | fs = None | |||
|
37 | ||||
|
38 | @property | |||
|
39 | def storage_medium(self): | |||
|
40 | if not self.storage_type: | |||
|
41 | raise ValueError('No storage type set for this shard storage_type=""') | |||
|
42 | return getattr(self, self.storage_type) | |||
|
43 | ||||
|
44 | def __contains__(self, key): | |||
|
45 | full_path = self.store_path(key) | |||
|
46 | return self.fs.exists(full_path) | |||
|
47 | ||||
|
48 | def metadata_convert(self, uid_filename, metadata): | |||
|
49 | return metadata | |||
|
50 | ||||
|
51 | def get_metadata_filename(self, uid_filename) -> tuple[str, str]: | |||
|
52 | metadata_file: str = f'{uid_filename}{self.metadata_suffix}' | |||
|
53 | return metadata_file, self.store_path(metadata_file) | |||
|
54 | ||||
|
55 | def get_metadata(self, uid_filename, ignore_missing=False) -> dict: | |||
|
56 | _metadata_file, metadata_file_path = self.get_metadata_filename(uid_filename) | |||
|
57 | if ignore_missing and not self.fs.exists(metadata_file_path): | |||
|
58 | return {} | |||
|
59 | ||||
|
60 | with self.fs.open(metadata_file_path, 'rb') as f: | |||
|
61 | metadata = json.loads(f.read()) | |||
|
62 | ||||
|
63 | metadata = self.metadata_convert(uid_filename, metadata) | |||
|
64 | return metadata | |||
|
65 | ||||
|
66 | def _store(self, key: str, uid_key: str, value_reader, max_filesize: int | None = None, metadata: dict | None = None, **kwargs): | |||
|
67 | raise NotImplementedError | |||
|
68 | ||||
|
69 | def store(self, key: str, uid_key: str, value_reader, max_filesize: int | None = None, metadata: dict | None = None, **kwargs): | |||
|
70 | return self._store(key, uid_key, value_reader, max_filesize, metadata, **kwargs) | |||
|
71 | ||||
|
72 | def _fetch(self, key, presigned_url_expires: int = 0): | |||
|
73 | raise NotImplementedError | |||
|
74 | ||||
|
75 | def fetch(self, key, **kwargs) -> tuple[ShardFileReader, dict]: | |||
|
76 | return self._fetch(key) | |||
|
77 | ||||
|
78 | def _delete(self, key): | |||
|
79 | if key not in self: | |||
|
80 | log.exception(f'requested key={key} not found in {self}') | |||
|
81 | raise KeyError(key) | |||
|
82 | ||||
|
83 | metadata = self.get_metadata(key) | |||
|
84 | _metadata_file, metadata_file_path = self.get_metadata_filename(key) | |||
|
85 | artifact_file_path = metadata['filename_uid_path'] | |||
|
86 | self.fs.rm(artifact_file_path) | |||
|
87 | self.fs.rm(metadata_file_path) | |||
|
88 | ||||
|
89 | return 1 | |||
|
90 | ||||
|
91 | def delete(self, key): | |||
|
92 | raise NotImplementedError | |||
|
93 | ||||
|
94 | def store_path(self, uid_filename): | |||
|
95 | raise NotImplementedError | |||
|
96 | ||||
|
97 | ||||
|
98 | class BaseFileStoreBackend: | |||
|
99 | _shards = tuple() | |||
|
100 | _shard_cls = BaseShard | |||
|
101 | _config: dict | None = None | |||
|
102 | _storage_path: str = '' | |||
|
103 | ||||
|
104 | def __init__(self, settings, extension_groups=None): | |||
|
105 | self._config = settings | |||
|
106 | extension_groups = extension_groups or ['any'] | |||
|
107 | self.extensions = resolve_extensions([], groups=extension_groups) | |||
|
108 | ||||
|
109 | def __contains__(self, key): | |||
|
110 | return self.filename_exists(key) | |||
|
111 | ||||
|
112 | def __repr__(self): | |||
|
113 | return f'<{self.__class__.__name__}(storage={self.storage_path})>' | |||
|
114 | ||||
|
115 | @property | |||
|
116 | def storage_path(self): | |||
|
117 | return self._storage_path | |||
|
118 | ||||
|
119 | @classmethod | |||
|
120 | def get_shard_index(cls, filename: str, num_shards) -> int: | |||
|
121 | # Generate a hash value from the filename | |||
|
122 | hash_value = sha256_safe(filename) | |||
|
123 | ||||
|
124 | # Convert the hash value to an integer | |||
|
125 | hash_int = int(hash_value, 16) | |||
|
126 | ||||
|
127 | # Map the hash integer to a shard number between 1 and num_shards | |||
|
128 | shard_number = (hash_int % num_shards) | |||
|
129 | ||||
|
130 | return shard_number | |||
|
131 | ||||
|
132 | @classmethod | |||
|
133 | def apply_counter(cls, counter: int, filename: str) -> str: | |||
|
134 | """ | |||
|
135 | Apply a counter to the filename. | |||
|
136 | ||||
|
137 | :param counter: The counter value to apply. | |||
|
138 | :param filename: The original filename. | |||
|
139 | :return: The modified filename with the counter. | |||
|
140 | """ | |||
|
141 | name_counted = f'{counter:d}-{filename}' | |||
|
142 | return name_counted | |||
|
143 | ||||
|
144 | def _get_shard(self, key) -> _shard_cls: | |||
|
145 | index = self.get_shard_index(key, len(self._shards)) | |||
|
146 | shard = self._shards[index] | |||
|
147 | return shard | |||
|
148 | ||||
|
149 | def get_conf(self, key, pop=False): | |||
|
150 | if key not in self._config: | |||
|
151 | raise ValueError( | |||
|
152 | f"No configuration key '{key}', please make sure it exists in filestore config") | |||
|
153 | val = self._config[key] | |||
|
154 | if pop: | |||
|
155 | del self._config[key] | |||
|
156 | return val | |||
|
157 | ||||
|
158 | def filename_allowed(self, filename, extensions=None): | |||
|
159 | """Checks if a filename has an allowed extension | |||
|
160 | ||||
|
161 | :param filename: base name of file | |||
|
162 | :param extensions: iterable of extensions (or self.extensions) | |||
|
163 | """ | |||
|
164 | _, ext = os.path.splitext(filename) | |||
|
165 | return self.extension_allowed(ext, extensions) | |||
|
166 | ||||
|
167 | def extension_allowed(self, ext, extensions=None): | |||
|
168 | """ | |||
|
169 | Checks if an extension is permitted. Both e.g. ".jpg" and | |||
|
170 | "jpg" can be passed in. Extension lookup is case-insensitive. | |||
|
171 | ||||
|
172 | :param ext: extension to check | |||
|
173 | :param extensions: iterable of extensions to validate against (or self.extensions) | |||
|
174 | """ | |||
|
175 | def normalize_ext(_ext): | |||
|
176 | if _ext.startswith('.'): | |||
|
177 | _ext = _ext[1:] | |||
|
178 | return _ext.lower() | |||
|
179 | ||||
|
180 | extensions = extensions or self.extensions | |||
|
181 | if not extensions: | |||
|
182 | return True | |||
|
183 | ||||
|
184 | ext = normalize_ext(ext) | |||
|
185 | ||||
|
186 | return ext in [normalize_ext(x) for x in extensions] | |||
|
187 | ||||
|
188 | def filename_exists(self, uid_filename): | |||
|
189 | shard = self._get_shard(uid_filename) | |||
|
190 | return uid_filename in shard | |||
|
191 | ||||
|
192 | def store_path(self, uid_filename): | |||
|
193 | """ | |||
|
194 | Returns absolute file path of the uid_filename | |||
|
195 | """ | |||
|
196 | shard = self._get_shard(uid_filename) | |||
|
197 | return shard.store_path(uid_filename) | |||
|
198 | ||||
|
199 | def store_metadata(self, uid_filename): | |||
|
200 | shard = self._get_shard(uid_filename) | |||
|
201 | return shard.get_metadata_filename(uid_filename) | |||
|
202 | ||||
|
203 | def store(self, filename, value_reader, extensions=None, metadata=None, max_filesize=None, randomized_name=True, **kwargs): | |||
|
204 | extensions = extensions or self.extensions | |||
|
205 | ||||
|
206 | if not self.filename_allowed(filename, extensions): | |||
|
207 | msg = f'filename {filename} does not allow extensions {extensions}' | |||
|
208 | raise FileNotAllowedException(msg) | |||
|
209 | ||||
|
210 | # # TODO: check why we need this setting ? it looks stupid... | |||
|
211 | # no_body_seek is used in stream mode importer somehow | |||
|
212 | # no_body_seek = kwargs.pop('no_body_seek', False) | |||
|
213 | # if no_body_seek: | |||
|
214 | # pass | |||
|
215 | # else: | |||
|
216 | # value_reader.seek(0) | |||
|
217 | ||||
|
218 | uid_filename = kwargs.pop('uid_filename', None) | |||
|
219 | if uid_filename is None: | |||
|
220 | uid_filename = get_uid_filename(filename, randomized=randomized_name) | |||
|
221 | ||||
|
222 | shard = self._get_shard(uid_filename) | |||
|
223 | ||||
|
224 | return shard.store(filename, uid_filename, value_reader, max_filesize, metadata, **kwargs) | |||
|
225 | ||||
|
226 | def import_to_store(self, value_reader, org_filename, uid_filename, metadata, **kwargs): | |||
|
227 | shard = self._get_shard(uid_filename) | |||
|
228 | max_filesize = None | |||
|
229 | return shard.store(org_filename, uid_filename, value_reader, max_filesize, metadata, import_mode=True) | |||
|
230 | ||||
|
231 | def delete(self, uid_filename): | |||
|
232 | shard = self._get_shard(uid_filename) | |||
|
233 | return shard.delete(uid_filename) | |||
|
234 | ||||
|
235 | def fetch(self, uid_filename) -> tuple[ShardFileReader, dict]: | |||
|
236 | shard = self._get_shard(uid_filename) | |||
|
237 | return shard.fetch(uid_filename) | |||
|
238 | ||||
|
239 | def get_metadata(self, uid_filename, ignore_missing=False) -> dict: | |||
|
240 | shard = self._get_shard(uid_filename) | |||
|
241 | return shard.get_metadata(uid_filename, ignore_missing=ignore_missing) | |||
|
242 | ||||
|
243 | def iter_keys(self): | |||
|
244 | for shard in self._shards: | |||
|
245 | if shard.fs.exists(shard.storage_medium): | |||
|
246 | for path, _dirs, _files in shard.fs.walk(shard.storage_medium): | |||
|
247 | for key_file_path in _files: | |||
|
248 | if key_file_path.endswith(shard.metadata_suffix): | |||
|
249 | yield shard, key_file_path | |||
|
250 | ||||
|
251 | def iter_artifacts(self): | |||
|
252 | for shard, key_file in self.iter_keys(): | |||
|
253 | json_key = f"{shard.storage_medium}/{key_file}" | |||
|
254 | with shard.fs.open(json_key, 'rb') as f: | |||
|
255 | yield shard, json.loads(f.read())['filename_uid'] | |||
|
256 | ||||
|
257 | def get_statistics(self): | |||
|
258 | total_files = 0 | |||
|
259 | total_size = 0 | |||
|
260 | meta = {} | |||
|
261 | ||||
|
262 | for shard, key_file in self.iter_keys(): | |||
|
263 | json_key = f"{shard.storage_medium}/{key_file}" | |||
|
264 | with shard.fs.open(json_key, 'rb') as f: | |||
|
265 | total_files += 1 | |||
|
266 | metadata = json.loads(f.read()) | |||
|
267 | total_size += metadata['size'] | |||
|
268 | ||||
|
269 | return total_files, total_size, meta |
@@ -0,0 +1,183 b'' | |||||
|
1 | # Copyright (C) 2016-2023 RhodeCode GmbH | |||
|
2 | # | |||
|
3 | # This program is free software: you can redistribute it and/or modify | |||
|
4 | # it under the terms of the GNU Affero General Public License, version 3 | |||
|
5 | # (only), as published by the Free Software Foundation. | |||
|
6 | # | |||
|
7 | # This program is distributed in the hope that it will be useful, | |||
|
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |||
|
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |||
|
10 | # GNU General Public License for more details. | |||
|
11 | # | |||
|
12 | # You should have received a copy of the GNU Affero General Public License | |||
|
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |||
|
14 | # | |||
|
15 | # This program is dual-licensed. If you wish to learn more about the | |||
|
16 | # RhodeCode Enterprise Edition, including its added features, Support services, | |||
|
17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ | |||
|
18 | ||||
|
19 | import os | |||
|
20 | import hashlib | |||
|
21 | import functools | |||
|
22 | import time | |||
|
23 | import logging | |||
|
24 | ||||
|
25 | from .. import config_keys | |||
|
26 | from ..exceptions import FileOverSizeException | |||
|
27 | from ..backends.base import BaseFileStoreBackend, fsspec, BaseShard, ShardFileReader | |||
|
28 | ||||
|
29 | from ....lib.ext_json import json | |||
|
30 | ||||
|
31 | log = logging.getLogger(__name__) | |||
|
32 | ||||
|
33 | ||||
|
34 | class FileSystemShard(BaseShard): | |||
|
35 | METADATA_VER = 'v2' | |||
|
36 | BACKEND_TYPE = config_keys.backend_filesystem | |||
|
37 | storage_type: str = 'directory' | |||
|
38 | ||||
|
39 | def __init__(self, index, directory, directory_folder, fs, **settings): | |||
|
40 | self._index: int = index | |||
|
41 | self._directory: str = directory | |||
|
42 | self._directory_folder: str = directory_folder | |||
|
43 | self.fs = fs | |||
|
44 | ||||
|
45 | @property | |||
|
46 | def directory(self) -> str: | |||
|
47 | """Cache directory final path.""" | |||
|
48 | return os.path.join(self._directory, self._directory_folder) | |||
|
49 | ||||
|
50 | def _write_file(self, full_path, iterator, max_filesize, mode='wb'): | |||
|
51 | ||||
|
52 | # ensure dir exists | |||
|
53 | destination, _ = os.path.split(full_path) | |||
|
54 | if not self.fs.exists(destination): | |||
|
55 | self.fs.makedirs(destination) | |||
|
56 | ||||
|
57 | writer = self.fs.open(full_path, mode) | |||
|
58 | ||||
|
59 | digest = hashlib.sha256() | |||
|
60 | oversize_cleanup = False | |||
|
61 | with writer: | |||
|
62 | size = 0 | |||
|
63 | for chunk in iterator: | |||
|
64 | size += len(chunk) | |||
|
65 | digest.update(chunk) | |||
|
66 | writer.write(chunk) | |||
|
67 | ||||
|
68 | if max_filesize and size > max_filesize: | |||
|
69 | oversize_cleanup = True | |||
|
70 | # free up the copied file, and raise exc | |||
|
71 | break | |||
|
72 | ||||
|
73 | writer.flush() | |||
|
74 | # Get the file descriptor | |||
|
75 | fd = writer.fileno() | |||
|
76 | ||||
|
77 | # Sync the file descriptor to disk, helps with NFS cases... | |||
|
78 | os.fsync(fd) | |||
|
79 | ||||
|
80 | if oversize_cleanup: | |||
|
81 | self.fs.rm(full_path) | |||
|
82 | raise FileOverSizeException(f'given file is over size limit ({max_filesize}): {full_path}') | |||
|
83 | ||||
|
84 | sha256 = digest.hexdigest() | |||
|
85 | log.debug('written new artifact under %s, sha256: %s', full_path, sha256) | |||
|
86 | return size, sha256 | |||
|
87 | ||||
|
88 | def _store(self, key: str, uid_key, value_reader, max_filesize: int | None = None, metadata: dict | None = None, **kwargs): | |||
|
89 | ||||
|
90 | filename = key | |||
|
91 | uid_filename = uid_key | |||
|
92 | full_path = self.store_path(uid_filename) | |||
|
93 | ||||
|
94 | # STORE METADATA | |||
|
95 | _metadata = { | |||
|
96 | "version": self.METADATA_VER, | |||
|
97 | "store_type": self.BACKEND_TYPE, | |||
|
98 | ||||
|
99 | "filename": filename, | |||
|
100 | "filename_uid_path": full_path, | |||
|
101 | "filename_uid": uid_filename, | |||
|
102 | "sha256": "", # NOTE: filled in by reader iteration | |||
|
103 | ||||
|
104 | "store_time": time.time(), | |||
|
105 | ||||
|
106 | "size": 0 | |||
|
107 | } | |||
|
108 | ||||
|
109 | if metadata: | |||
|
110 | if kwargs.pop('import_mode', False): | |||
|
111 | # in import mode, we don't need to compute metadata, we just take the old version | |||
|
112 | _metadata["import_mode"] = True | |||
|
113 | else: | |||
|
114 | _metadata.update(metadata) | |||
|
115 | ||||
|
116 | read_iterator = iter(functools.partial(value_reader.read, 2**22), b'') | |||
|
117 | size, sha256 = self._write_file(full_path, read_iterator, max_filesize) | |||
|
118 | _metadata['size'] = size | |||
|
119 | _metadata['sha256'] = sha256 | |||
|
120 | ||||
|
121 | # after storing the artifacts, we write the metadata present | |||
|
122 | _metadata_file, metadata_file_path = self.get_metadata_filename(uid_key) | |||
|
123 | ||||
|
124 | with self.fs.open(metadata_file_path, 'wb') as f: | |||
|
125 | f.write(json.dumps(_metadata)) | |||
|
126 | ||||
|
127 | return uid_filename, _metadata | |||
|
128 | ||||
|
129 | def store_path(self, uid_filename): | |||
|
130 | """ | |||
|
131 | Returns absolute file path of the uid_filename | |||
|
132 | """ | |||
|
133 | return os.path.join(self._directory, self._directory_folder, uid_filename) | |||
|
134 | ||||
|
135 | def _fetch(self, key, presigned_url_expires: int = 0): | |||
|
136 | if key not in self: | |||
|
137 | log.exception(f'requested key={key} not found in {self}') | |||
|
138 | raise KeyError(key) | |||
|
139 | ||||
|
140 | metadata = self.get_metadata(key) | |||
|
141 | ||||
|
142 | file_path = metadata['filename_uid_path'] | |||
|
143 | if presigned_url_expires and presigned_url_expires > 0: | |||
|
144 | metadata['url'] = self.fs.url(file_path, expires=presigned_url_expires) | |||
|
145 | ||||
|
146 | return ShardFileReader(self.fs.open(file_path, 'rb')), metadata | |||
|
147 | ||||
|
148 | def delete(self, key): | |||
|
149 | return self._delete(key) | |||
|
150 | ||||
|
151 | ||||
|
152 | class FileSystemBackend(BaseFileStoreBackend): | |||
|
153 | shard_name: str = 'shard_{:03d}' | |||
|
154 | _shard_cls = FileSystemShard | |||
|
155 | ||||
|
156 | def __init__(self, settings): | |||
|
157 | super().__init__(settings) | |||
|
158 | ||||
|
159 | store_dir = self.get_conf(config_keys.filesystem_storage_path) | |||
|
160 | directory = os.path.expanduser(store_dir) | |||
|
161 | ||||
|
162 | self._directory = directory | |||
|
163 | self._storage_path = directory # common path for all from BaseCache | |||
|
164 | self._shard_count = int(self.get_conf(config_keys.filesystem_shards, pop=True)) | |||
|
165 | if self._shard_count < 1: | |||
|
166 | raise ValueError(f'{config_keys.filesystem_shards} must be 1 or more') | |||
|
167 | ||||
|
168 | log.debug('Initializing %s file_store instance', self) | |||
|
169 | fs = fsspec.filesystem('file') | |||
|
170 | ||||
|
171 | if not fs.exists(self._directory): | |||
|
172 | fs.makedirs(self._directory, exist_ok=True) | |||
|
173 | ||||
|
174 | self._shards = tuple( | |||
|
175 | self._shard_cls( | |||
|
176 | index=num, | |||
|
177 | directory=directory, | |||
|
178 | directory_folder=self.shard_name.format(num), | |||
|
179 | fs=fs, | |||
|
180 | **settings, | |||
|
181 | ) | |||
|
182 | for num in range(self._shard_count) | |||
|
183 | ) |
@@ -0,0 +1,278 b'' | |||||
|
1 | # Copyright (C) 2016-2023 RhodeCode GmbH | |||
|
2 | # | |||
|
3 | # This program is free software: you can redistribute it and/or modify | |||
|
4 | # it under the terms of the GNU Affero General Public License, version 3 | |||
|
5 | # (only), as published by the Free Software Foundation. | |||
|
6 | # | |||
|
7 | # This program is distributed in the hope that it will be useful, | |||
|
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |||
|
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |||
|
10 | # GNU General Public License for more details. | |||
|
11 | # | |||
|
12 | # You should have received a copy of the GNU Affero General Public License | |||
|
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |||
|
14 | # | |||
|
15 | # This program is dual-licensed. If you wish to learn more about the | |||
|
16 | # RhodeCode Enterprise Edition, including its added features, Support services, | |||
|
17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ | |||
|
18 | import errno | |||
|
19 | import os | |||
|
20 | import hashlib | |||
|
21 | import functools | |||
|
22 | import time | |||
|
23 | import logging | |||
|
24 | ||||
|
25 | from .. import config_keys | |||
|
26 | from ..exceptions import FileOverSizeException | |||
|
27 | from ..backends.base import BaseFileStoreBackend, fsspec, BaseShard, ShardFileReader | |||
|
28 | ||||
|
29 | from ....lib.ext_json import json | |||
|
30 | ||||
|
31 | log = logging.getLogger(__name__) | |||
|
32 | ||||
|
33 | ||||
|
34 | class LegacyFileSystemShard(BaseShard): | |||
|
35 | # legacy ver | |||
|
36 | METADATA_VER = 'v2' | |||
|
37 | BACKEND_TYPE = config_keys.backend_legacy_filesystem | |||
|
38 | storage_type: str = 'dir_struct' | |||
|
39 | ||||
|
40 | # legacy suffix | |||
|
41 | metadata_suffix: str = '.meta' | |||
|
42 | ||||
|
43 | @classmethod | |||
|
44 | def _sub_store_from_filename(cls, filename): | |||
|
45 | return filename[:2] | |||
|
46 | ||||
|
47 | @classmethod | |||
|
48 | def apply_counter(cls, counter, filename): | |||
|
49 | name_counted = '%d-%s' % (counter, filename) | |||
|
50 | return name_counted | |||
|
51 | ||||
|
52 | @classmethod | |||
|
53 | def safe_make_dirs(cls, dir_path): | |||
|
54 | if not os.path.exists(dir_path): | |||
|
55 | try: | |||
|
56 | os.makedirs(dir_path) | |||
|
57 | except OSError as e: | |||
|
58 | if e.errno != errno.EEXIST: | |||
|
59 | raise | |||
|
60 | return | |||
|
61 | ||||
|
62 | @classmethod | |||
|
63 | def resolve_name(cls, name, directory): | |||
|
64 | """ | |||
|
65 | Resolves a unique name and the correct path. If a filename | |||
|
66 | for that path already exists then a numeric prefix with values > 0 will be | |||
|
67 | added, for example test.jpg -> 1-test.jpg etc. initially file would have 0 prefix. | |||
|
68 | ||||
|
69 | :param name: base name of file | |||
|
70 | :param directory: absolute directory path | |||
|
71 | """ | |||
|
72 | ||||
|
73 | counter = 0 | |||
|
74 | while True: | |||
|
75 | name_counted = cls.apply_counter(counter, name) | |||
|
76 | ||||
|
77 | # sub_store prefix to optimize disk usage, e.g some_path/ab/final_file | |||
|
78 | sub_store: str = cls._sub_store_from_filename(name_counted) | |||
|
79 | sub_store_path: str = os.path.join(directory, sub_store) | |||
|
80 | cls.safe_make_dirs(sub_store_path) | |||
|
81 | ||||
|
82 | path = os.path.join(sub_store_path, name_counted) | |||
|
83 | if not os.path.exists(path): | |||
|
84 | return name_counted, path | |||
|
85 | counter += 1 | |||
|
86 | ||||
|
87 | def __init__(self, index, directory, directory_folder, fs, **settings): | |||
|
88 | self._index: int = index | |||
|
89 | self._directory: str = directory | |||
|
90 | self._directory_folder: str = directory_folder | |||
|
91 | self.fs = fs | |||
|
92 | ||||
|
93 | @property | |||
|
94 | def dir_struct(self) -> str: | |||
|
95 | """Cache directory final path.""" | |||
|
96 | return os.path.join(self._directory, '0-') | |||
|
97 | ||||
|
98 | def _write_file(self, full_path, iterator, max_filesize, mode='wb'): | |||
|
99 | ||||
|
100 | # ensure dir exists | |||
|
101 | destination, _ = os.path.split(full_path) | |||
|
102 | if not self.fs.exists(destination): | |||
|
103 | self.fs.makedirs(destination) | |||
|
104 | ||||
|
105 | writer = self.fs.open(full_path, mode) | |||
|
106 | ||||
|
107 | digest = hashlib.sha256() | |||
|
108 | oversize_cleanup = False | |||
|
109 | with writer: | |||
|
110 | size = 0 | |||
|
111 | for chunk in iterator: | |||
|
112 | size += len(chunk) | |||
|
113 | digest.update(chunk) | |||
|
114 | writer.write(chunk) | |||
|
115 | ||||
|
116 | if max_filesize and size > max_filesize: | |||
|
117 | # free up the copied file, and raise exc | |||
|
118 | oversize_cleanup = True | |||
|
119 | break | |||
|
120 | ||||
|
121 | writer.flush() | |||
|
122 | # Get the file descriptor | |||
|
123 | fd = writer.fileno() | |||
|
124 | ||||
|
125 | # Sync the file descriptor to disk, helps with NFS cases... | |||
|
126 | os.fsync(fd) | |||
|
127 | ||||
|
128 | if oversize_cleanup: | |||
|
129 | self.fs.rm(full_path) | |||
|
130 | raise FileOverSizeException(f'given file is over size limit ({max_filesize}): {full_path}') | |||
|
131 | ||||
|
132 | sha256 = digest.hexdigest() | |||
|
133 | log.debug('written new artifact under %s, sha256: %s', full_path, sha256) | |||
|
134 | return size, sha256 | |||
|
135 | ||||
|
136 | def _store(self, key: str, uid_key, value_reader, max_filesize: int | None = None, metadata: dict | None = None, **kwargs): | |||
|
137 | ||||
|
138 | filename = key | |||
|
139 | uid_filename = uid_key | |||
|
140 | ||||
|
141 | # NOTE:, also apply N- Counter... | |||
|
142 | uid_filename, full_path = self.resolve_name(uid_filename, self._directory) | |||
|
143 | ||||
|
144 | # STORE METADATA | |||
|
145 | # TODO: make it compatible, and backward proof | |||
|
146 | _metadata = { | |||
|
147 | "version": self.METADATA_VER, | |||
|
148 | ||||
|
149 | "filename": filename, | |||
|
150 | "filename_uid_path": full_path, | |||
|
151 | "filename_uid": uid_filename, | |||
|
152 | "sha256": "", # NOTE: filled in by reader iteration | |||
|
153 | ||||
|
154 | "store_time": time.time(), | |||
|
155 | ||||
|
156 | "size": 0 | |||
|
157 | } | |||
|
158 | if metadata: | |||
|
159 | _metadata.update(metadata) | |||
|
160 | ||||
|
161 | read_iterator = iter(functools.partial(value_reader.read, 2**22), b'') | |||
|
162 | size, sha256 = self._write_file(full_path, read_iterator, max_filesize) | |||
|
163 | _metadata['size'] = size | |||
|
164 | _metadata['sha256'] = sha256 | |||
|
165 | ||||
|
166 | # after storing the artifacts, we write the metadata present | |||
|
167 | _metadata_file, metadata_file_path = self.get_metadata_filename(uid_filename) | |||
|
168 | ||||
|
169 | with self.fs.open(metadata_file_path, 'wb') as f: | |||
|
170 | f.write(json.dumps(_metadata)) | |||
|
171 | ||||
|
172 | return uid_filename, _metadata | |||
|
173 | ||||
|
174 | def store_path(self, uid_filename): | |||
|
175 | """ | |||
|
176 | Returns absolute file path of the uid_filename | |||
|
177 | """ | |||
|
178 | prefix_dir = '' | |||
|
179 | if '/' in uid_filename: | |||
|
180 | prefix_dir, filename = uid_filename.split('/') | |||
|
181 | sub_store = self._sub_store_from_filename(filename) | |||
|
182 | else: | |||
|
183 | sub_store = self._sub_store_from_filename(uid_filename) | |||
|
184 | ||||
|
185 | return os.path.join(self._directory, prefix_dir, sub_store, uid_filename) | |||
|
186 | ||||
|
187 | def metadata_convert(self, uid_filename, metadata): | |||
|
188 | # NOTE: backward compat mode here... this is for file created PRE 5.2 system | |||
|
189 | if 'meta_ver' in metadata: | |||
|
190 | full_path = self.store_path(uid_filename) | |||
|
191 | metadata = { | |||
|
192 | "_converted": True, | |||
|
193 | "_org": metadata, | |||
|
194 | "version": self.METADATA_VER, | |||
|
195 | "store_type": self.BACKEND_TYPE, | |||
|
196 | ||||
|
197 | "filename": metadata['filename'], | |||
|
198 | "filename_uid_path": full_path, | |||
|
199 | "filename_uid": uid_filename, | |||
|
200 | "sha256": metadata['sha256'], | |||
|
201 | ||||
|
202 | "store_time": metadata['time'], | |||
|
203 | ||||
|
204 | "size": metadata['size'] | |||
|
205 | } | |||
|
206 | return metadata | |||
|
207 | ||||
|
208 | def _fetch(self, key, presigned_url_expires: int = 0): | |||
|
209 | if key not in self: | |||
|
210 | log.exception(f'requested key={key} not found in {self}') | |||
|
211 | raise KeyError(key) | |||
|
212 | ||||
|
213 | metadata = self.get_metadata(key) | |||
|
214 | ||||
|
215 | file_path = metadata['filename_uid_path'] | |||
|
216 | if presigned_url_expires and presigned_url_expires > 0: | |||
|
217 | metadata['url'] = self.fs.url(file_path, expires=presigned_url_expires) | |||
|
218 | ||||
|
219 | return ShardFileReader(self.fs.open(file_path, 'rb')), metadata | |||
|
220 | ||||
|
221 | def delete(self, key): | |||
|
222 | return self._delete(key) | |||
|
223 | ||||
|
224 | def _delete(self, key): | |||
|
225 | if key not in self: | |||
|
226 | log.exception(f'requested key={key} not found in {self}') | |||
|
227 | raise KeyError(key) | |||
|
228 | ||||
|
229 | metadata = self.get_metadata(key) | |||
|
230 | metadata_file, metadata_file_path = self.get_metadata_filename(key) | |||
|
231 | artifact_file_path = metadata['filename_uid_path'] | |||
|
232 | self.fs.rm(artifact_file_path) | |||
|
233 | self.fs.rm(metadata_file_path) | |||
|
234 | ||||
|
235 | def get_metadata_filename(self, uid_filename) -> tuple[str, str]: | |||
|
236 | ||||
|
237 | metadata_file: str = f'{uid_filename}{self.metadata_suffix}' | |||
|
238 | uid_path_in_store = self.store_path(uid_filename) | |||
|
239 | ||||
|
240 | metadata_file_path = f'{uid_path_in_store}{self.metadata_suffix}' | |||
|
241 | return metadata_file, metadata_file_path | |||
|
242 | ||||
|
243 | ||||
|
244 | class LegacyFileSystemBackend(BaseFileStoreBackend): | |||
|
245 | _shard_cls = LegacyFileSystemShard | |||
|
246 | ||||
|
247 | def __init__(self, settings): | |||
|
248 | super().__init__(settings) | |||
|
249 | ||||
|
250 | store_dir = self.get_conf(config_keys.legacy_filesystem_storage_path) | |||
|
251 | directory = os.path.expanduser(store_dir) | |||
|
252 | ||||
|
253 | self._directory = directory | |||
|
254 | self._storage_path = directory # common path for all from BaseCache | |||
|
255 | ||||
|
256 | log.debug('Initializing %s file_store instance', self) | |||
|
257 | fs = fsspec.filesystem('file') | |||
|
258 | ||||
|
259 | if not fs.exists(self._directory): | |||
|
260 | fs.makedirs(self._directory, exist_ok=True) | |||
|
261 | ||||
|
262 | # legacy system uses single shard | |||
|
263 | self._shards = tuple( | |||
|
264 | [ | |||
|
265 | self._shard_cls( | |||
|
266 | index=0, | |||
|
267 | directory=directory, | |||
|
268 | directory_folder='', | |||
|
269 | fs=fs, | |||
|
270 | **settings, | |||
|
271 | ) | |||
|
272 | ] | |||
|
273 | ) | |||
|
274 | ||||
|
275 | @classmethod | |||
|
276 | def get_shard_index(cls, filename: str, num_shards) -> int: | |||
|
277 | # legacy filesystem doesn't use shards, and always uses single shard | |||
|
278 | return 0 |
@@ -0,0 +1,184 b'' | |||||
|
1 | # Copyright (C) 2016-2023 RhodeCode GmbH | |||
|
2 | # | |||
|
3 | # This program is free software: you can redistribute it and/or modify | |||
|
4 | # it under the terms of the GNU Affero General Public License, version 3 | |||
|
5 | # (only), as published by the Free Software Foundation. | |||
|
6 | # | |||
|
7 | # This program is distributed in the hope that it will be useful, | |||
|
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |||
|
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |||
|
10 | # GNU General Public License for more details. | |||
|
11 | # | |||
|
12 | # You should have received a copy of the GNU Affero General Public License | |||
|
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |||
|
14 | # | |||
|
15 | # This program is dual-licensed. If you wish to learn more about the | |||
|
16 | # RhodeCode Enterprise Edition, including its added features, Support services, | |||
|
17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ | |||
|
18 | ||||
|
19 | import os | |||
|
20 | import hashlib | |||
|
21 | import functools | |||
|
22 | import time | |||
|
23 | import logging | |||
|
24 | ||||
|
25 | from .. import config_keys | |||
|
26 | from ..exceptions import FileOverSizeException | |||
|
27 | from ..backends.base import BaseFileStoreBackend, fsspec, BaseShard, ShardFileReader | |||
|
28 | ||||
|
29 | from ....lib.ext_json import json | |||
|
30 | ||||
|
31 | log = logging.getLogger(__name__) | |||
|
32 | ||||
|
33 | ||||
|
34 | class S3Shard(BaseShard): | |||
|
35 | METADATA_VER = 'v2' | |||
|
36 | BACKEND_TYPE = config_keys.backend_objectstore | |||
|
37 | storage_type: str = 'bucket' | |||
|
38 | ||||
|
39 | def __init__(self, index, bucket, bucket_folder, fs, **settings): | |||
|
40 | self._index: int = index | |||
|
41 | self._bucket_main: str = bucket | |||
|
42 | self._bucket_folder: str = bucket_folder | |||
|
43 | ||||
|
44 | self.fs = fs | |||
|
45 | ||||
|
46 | @property | |||
|
47 | def bucket(self) -> str: | |||
|
48 | """Cache bucket final path.""" | |||
|
49 | return os.path.join(self._bucket_main, self._bucket_folder) | |||
|
50 | ||||
|
51 | def _write_file(self, full_path, iterator, max_filesize, mode='wb'): | |||
|
52 | ||||
|
53 | # ensure dir exists | |||
|
54 | destination, _ = os.path.split(full_path) | |||
|
55 | if not self.fs.exists(destination): | |||
|
56 | self.fs.makedirs(destination) | |||
|
57 | ||||
|
58 | writer = self.fs.open(full_path, mode) | |||
|
59 | ||||
|
60 | digest = hashlib.sha256() | |||
|
61 | oversize_cleanup = False | |||
|
62 | with writer: | |||
|
63 | size = 0 | |||
|
64 | for chunk in iterator: | |||
|
65 | size += len(chunk) | |||
|
66 | digest.update(chunk) | |||
|
67 | writer.write(chunk) | |||
|
68 | ||||
|
69 | if max_filesize and size > max_filesize: | |||
|
70 | oversize_cleanup = True | |||
|
71 | # free up the copied file, and raise exc | |||
|
72 | break | |||
|
73 | ||||
|
74 | if oversize_cleanup: | |||
|
75 | self.fs.rm(full_path) | |||
|
76 | raise FileOverSizeException(f'given file is over size limit ({max_filesize}): {full_path}') | |||
|
77 | ||||
|
78 | sha256 = digest.hexdigest() | |||
|
79 | log.debug('written new artifact under %s, sha256: %s', full_path, sha256) | |||
|
80 | return size, sha256 | |||
|
81 | ||||
|
82 | def _store(self, key: str, uid_key, value_reader, max_filesize: int | None = None, metadata: dict | None = None, **kwargs): | |||
|
83 | ||||
|
84 | filename = key | |||
|
85 | uid_filename = uid_key | |||
|
86 | full_path = self.store_path(uid_filename) | |||
|
87 | ||||
|
88 | # STORE METADATA | |||
|
89 | _metadata = { | |||
|
90 | "version": self.METADATA_VER, | |||
|
91 | "store_type": self.BACKEND_TYPE, | |||
|
92 | ||||
|
93 | "filename": filename, | |||
|
94 | "filename_uid_path": full_path, | |||
|
95 | "filename_uid": uid_filename, | |||
|
96 | "sha256": "", # NOTE: filled in by reader iteration | |||
|
97 | ||||
|
98 | "store_time": time.time(), | |||
|
99 | ||||
|
100 | "size": 0 | |||
|
101 | } | |||
|
102 | ||||
|
103 | if metadata: | |||
|
104 | if kwargs.pop('import_mode', False): | |||
|
105 | # in import mode, we don't need to compute metadata, we just take the old version | |||
|
106 | _metadata["import_mode"] = True | |||
|
107 | else: | |||
|
108 | _metadata.update(metadata) | |||
|
109 | ||||
|
110 | read_iterator = iter(functools.partial(value_reader.read, 2**22), b'') | |||
|
111 | size, sha256 = self._write_file(full_path, read_iterator, max_filesize) | |||
|
112 | _metadata['size'] = size | |||
|
113 | _metadata['sha256'] = sha256 | |||
|
114 | ||||
|
115 | # after storing the artifacts, we write the metadata present | |||
|
116 | metadata_file, metadata_file_path = self.get_metadata_filename(uid_key) | |||
|
117 | ||||
|
118 | with self.fs.open(metadata_file_path, 'wb') as f: | |||
|
119 | f.write(json.dumps(_metadata)) | |||
|
120 | ||||
|
121 | return uid_filename, _metadata | |||
|
122 | ||||
|
123 | def store_path(self, uid_filename): | |||
|
124 | """ | |||
|
125 | Returns absolute file path of the uid_filename | |||
|
126 | """ | |||
|
127 | return os.path.join(self._bucket_main, self._bucket_folder, uid_filename) | |||
|
128 | ||||
|
129 | def _fetch(self, key, presigned_url_expires: int = 0): | |||
|
130 | if key not in self: | |||
|
131 | log.exception(f'requested key={key} not found in {self}') | |||
|
132 | raise KeyError(key) | |||
|
133 | ||||
|
134 | metadata_file, metadata_file_path = self.get_metadata_filename(key) | |||
|
135 | with self.fs.open(metadata_file_path, 'rb') as f: | |||
|
136 | metadata = json.loads(f.read()) | |||
|
137 | ||||
|
138 | file_path = metadata['filename_uid_path'] | |||
|
139 | if presigned_url_expires and presigned_url_expires > 0: | |||
|
140 | metadata['url'] = self.fs.url(file_path, expires=presigned_url_expires) | |||
|
141 | ||||
|
142 | return ShardFileReader(self.fs.open(file_path, 'rb')), metadata | |||
|
143 | ||||
|
144 | def delete(self, key): | |||
|
145 | return self._delete(key) | |||
|
146 | ||||
|
147 | ||||
|
148 | class ObjectStoreBackend(BaseFileStoreBackend): | |||
|
149 | shard_name: str = 'shard-{:03d}' | |||
|
150 | _shard_cls = S3Shard | |||
|
151 | ||||
|
152 | def __init__(self, settings): | |||
|
153 | super().__init__(settings) | |||
|
154 | ||||
|
155 | self._shard_count = int(self.get_conf(config_keys.objectstore_bucket_shards, pop=True)) | |||
|
156 | if self._shard_count < 1: | |||
|
157 | raise ValueError('cache_shards must be 1 or more') | |||
|
158 | ||||
|
159 | self._bucket = settings.pop(config_keys.objectstore_bucket) | |||
|
160 | if not self._bucket: | |||
|
161 | raise ValueError(f'{config_keys.objectstore_bucket} needs to have a value') | |||
|
162 | ||||
|
163 | objectstore_url = self.get_conf(config_keys.objectstore_url) | |||
|
164 | key = settings.pop(config_keys.objectstore_key) | |||
|
165 | secret = settings.pop(config_keys.objectstore_secret) | |||
|
166 | ||||
|
167 | self._storage_path = objectstore_url # common path for all from BaseCache | |||
|
168 | log.debug('Initializing %s file_store instance', self) | |||
|
169 | fs = fsspec.filesystem('s3', anon=False, endpoint_url=objectstore_url, key=key, secret=secret) | |||
|
170 | ||||
|
171 | # init main bucket | |||
|
172 | if not fs.exists(self._bucket): | |||
|
173 | fs.mkdir(self._bucket) | |||
|
174 | ||||
|
175 | self._shards = tuple( | |||
|
176 | self._shard_cls( | |||
|
177 | index=num, | |||
|
178 | bucket=self._bucket, | |||
|
179 | bucket_folder=self.shard_name.format(num), | |||
|
180 | fs=fs, | |||
|
181 | **settings, | |||
|
182 | ) | |||
|
183 | for num in range(self._shard_count) | |||
|
184 | ) |
@@ -0,0 +1,128 b'' | |||||
|
1 | # Copyright (C) 2010-2023 RhodeCode GmbH | |||
|
2 | # | |||
|
3 | # This program is free software: you can redistribute it and/or modify | |||
|
4 | # it under the terms of the GNU Affero General Public License, version 3 | |||
|
5 | # (only), as published by the Free Software Foundation. | |||
|
6 | # | |||
|
7 | # This program is distributed in the hope that it will be useful, | |||
|
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |||
|
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |||
|
10 | # GNU General Public License for more details. | |||
|
11 | # | |||
|
12 | # You should have received a copy of the GNU Affero General Public License | |||
|
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |||
|
14 | # | |||
|
15 | # This program is dual-licensed. If you wish to learn more about the | |||
|
16 | # RhodeCode Enterprise Edition, including its added features, Support services, | |||
|
17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ | |||
|
18 | import pytest | |||
|
19 | ||||
|
20 | from rhodecode.apps import file_store | |||
|
21 | from rhodecode.apps.file_store import config_keys | |||
|
22 | from rhodecode.apps.file_store.backends.filesystem_legacy import LegacyFileSystemBackend | |||
|
23 | from rhodecode.apps.file_store.backends.filesystem import FileSystemBackend | |||
|
24 | from rhodecode.apps.file_store.backends.objectstore import ObjectStoreBackend | |||
|
25 | from rhodecode.apps.file_store.exceptions import FileNotAllowedException, FileOverSizeException | |||
|
26 | ||||
|
27 | from rhodecode.apps.file_store import utils as store_utils | |||
|
28 | from rhodecode.apps.file_store.tests import random_binary_file, file_store_instance | |||
|
29 | ||||
|
30 | ||||
|
31 | class TestFileStoreBackends: | |||
|
32 | ||||
|
33 | @pytest.mark.parametrize('backend_type, expected_instance', [ | |||
|
34 | (config_keys.backend_legacy_filesystem, LegacyFileSystemBackend), | |||
|
35 | (config_keys.backend_filesystem, FileSystemBackend), | |||
|
36 | (config_keys.backend_objectstore, ObjectStoreBackend), | |||
|
37 | ]) | |||
|
38 | def test_get_backend(self, backend_type, expected_instance, ini_settings): | |||
|
39 | config = ini_settings | |||
|
40 | config[config_keys.backend_type] = backend_type | |||
|
41 | f_store = store_utils.get_filestore_backend(config=config, always_init=True) | |||
|
42 | assert isinstance(f_store, expected_instance) | |||
|
43 | ||||
|
44 | @pytest.mark.parametrize('backend_type, expected_instance', [ | |||
|
45 | (config_keys.backend_legacy_filesystem, LegacyFileSystemBackend), | |||
|
46 | (config_keys.backend_filesystem, FileSystemBackend), | |||
|
47 | (config_keys.backend_objectstore, ObjectStoreBackend), | |||
|
48 | ]) | |||
|
49 | def test_store_and_read(self, backend_type, expected_instance, ini_settings, random_binary_file): | |||
|
50 | filename, temp_file = random_binary_file | |||
|
51 | config = ini_settings | |||
|
52 | config[config_keys.backend_type] = backend_type | |||
|
53 | f_store = store_utils.get_filestore_backend(config=config, always_init=True) | |||
|
54 | metadata = { | |||
|
55 | 'user_uploaded': { | |||
|
56 | 'username': 'user1', | |||
|
57 | 'user_id': 10, | |||
|
58 | 'ip': '10.20.30.40' | |||
|
59 | } | |||
|
60 | } | |||
|
61 | store_fid, metadata = f_store.store(filename, temp_file, extra_metadata=metadata) | |||
|
62 | assert store_fid | |||
|
63 | assert metadata | |||
|
64 | ||||
|
65 | # read-after write | |||
|
66 | reader, metadata2 = f_store.fetch(store_fid) | |||
|
67 | assert reader | |||
|
68 | assert metadata2['filename'] == filename | |||
|
69 | ||||
|
70 | @pytest.mark.parametrize('backend_type, expected_instance', [ | |||
|
71 | (config_keys.backend_legacy_filesystem, LegacyFileSystemBackend), | |||
|
72 | (config_keys.backend_filesystem, FileSystemBackend), | |||
|
73 | (config_keys.backend_objectstore, ObjectStoreBackend), | |||
|
74 | ]) | |||
|
75 | def test_store_file_not_allowed(self, backend_type, expected_instance, ini_settings, random_binary_file): | |||
|
76 | filename, temp_file = random_binary_file | |||
|
77 | config = ini_settings | |||
|
78 | config[config_keys.backend_type] = backend_type | |||
|
79 | f_store = store_utils.get_filestore_backend(config=config, always_init=True) | |||
|
80 | with pytest.raises(FileNotAllowedException): | |||
|
81 | f_store.store('notallowed.exe', temp_file, extensions=['.txt']) | |||
|
82 | ||||
|
83 | @pytest.mark.parametrize('backend_type, expected_instance', [ | |||
|
84 | (config_keys.backend_legacy_filesystem, LegacyFileSystemBackend), | |||
|
85 | (config_keys.backend_filesystem, FileSystemBackend), | |||
|
86 | (config_keys.backend_objectstore, ObjectStoreBackend), | |||
|
87 | ]) | |||
|
88 | def test_store_file_over_size(self, backend_type, expected_instance, ini_settings, random_binary_file): | |||
|
89 | filename, temp_file = random_binary_file | |||
|
90 | config = ini_settings | |||
|
91 | config[config_keys.backend_type] = backend_type | |||
|
92 | f_store = store_utils.get_filestore_backend(config=config, always_init=True) | |||
|
93 | with pytest.raises(FileOverSizeException): | |||
|
94 | f_store.store('toobig.exe', temp_file, extensions=['.exe'], max_filesize=124) | |||
|
95 | ||||
|
96 | @pytest.mark.parametrize('backend_type, expected_instance, extra_conf', [ | |||
|
97 | (config_keys.backend_legacy_filesystem, LegacyFileSystemBackend, {}), | |||
|
98 | (config_keys.backend_filesystem, FileSystemBackend, {config_keys.filesystem_storage_path: '/tmp/test-fs-store'}), | |||
|
99 | (config_keys.backend_objectstore, ObjectStoreBackend, {config_keys.objectstore_bucket: 'test-bucket'}), | |||
|
100 | ]) | |||
|
101 | def test_store_stats_and_keys(self, backend_type, expected_instance, extra_conf, ini_settings, random_binary_file): | |||
|
102 | config = ini_settings | |||
|
103 | config[config_keys.backend_type] = backend_type | |||
|
104 | config.update(extra_conf) | |||
|
105 | ||||
|
106 | f_store = store_utils.get_filestore_backend(config=config, always_init=True) | |||
|
107 | ||||
|
108 | # purge storage before running | |||
|
109 | for shard, k in f_store.iter_artifacts(): | |||
|
110 | f_store.delete(k) | |||
|
111 | ||||
|
112 | for i in range(10): | |||
|
113 | filename, temp_file = random_binary_file | |||
|
114 | ||||
|
115 | metadata = { | |||
|
116 | 'user_uploaded': { | |||
|
117 | 'username': 'user1', | |||
|
118 | 'user_id': 10, | |||
|
119 | 'ip': '10.20.30.40' | |||
|
120 | } | |||
|
121 | } | |||
|
122 | store_fid, metadata = f_store.store(filename, temp_file, extra_metadata=metadata) | |||
|
123 | assert store_fid | |||
|
124 | assert metadata | |||
|
125 | ||||
|
126 | cnt, size, meta = f_store.get_statistics() | |||
|
127 | assert cnt == 10 | |||
|
128 | assert 10 == len(list(f_store.iter_keys())) |
@@ -0,0 +1,52 b'' | |||||
|
1 | # Copyright (C) 2010-2023 RhodeCode GmbH | |||
|
2 | # | |||
|
3 | # This program is free software: you can redistribute it and/or modify | |||
|
4 | # it under the terms of the GNU Affero General Public License, version 3 | |||
|
5 | # (only), as published by the Free Software Foundation. | |||
|
6 | # | |||
|
7 | # This program is distributed in the hope that it will be useful, | |||
|
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |||
|
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |||
|
10 | # GNU General Public License for more details. | |||
|
11 | # | |||
|
12 | # You should have received a copy of the GNU Affero General Public License | |||
|
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |||
|
14 | # | |||
|
15 | # This program is dual-licensed. If you wish to learn more about the | |||
|
16 | # RhodeCode Enterprise Edition, including its added features, Support services, | |||
|
17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ | |||
|
18 | import pytest | |||
|
19 | ||||
|
20 | from rhodecode.apps.file_store import utils as store_utils | |||
|
21 | from rhodecode.apps.file_store import config_keys | |||
|
22 | from rhodecode.apps.file_store.tests import generate_random_filename | |||
|
23 | ||||
|
24 | ||||
|
25 | @pytest.fixture() | |||
|
26 | def file_store_filesystem_instance(ini_settings): | |||
|
27 | config = ini_settings | |||
|
28 | config[config_keys.backend_type] = config_keys.backend_filesystem | |||
|
29 | f_store = store_utils.get_filestore_backend(config=config, always_init=True) | |||
|
30 | return f_store | |||
|
31 | ||||
|
32 | ||||
|
33 | class TestFileStoreFileSystemBackend: | |||
|
34 | ||||
|
35 | @pytest.mark.parametrize('filename', [generate_random_filename() for _ in range(10)]) | |||
|
36 | def test_get_shard_number(self, filename, file_store_filesystem_instance): | |||
|
37 | shard_number = file_store_filesystem_instance.get_shard_index(filename, len(file_store_filesystem_instance._shards)) | |||
|
38 | # Check that the shard number is between 0 and max-shards | |||
|
39 | assert 0 <= shard_number <= len(file_store_filesystem_instance._shards) | |||
|
40 | ||||
|
41 | @pytest.mark.parametrize('filename, expected_shard_num', [ | |||
|
42 | ('my-name-1', 3), | |||
|
43 | ('my-name-2', 2), | |||
|
44 | ('my-name-3', 4), | |||
|
45 | ('my-name-4', 1), | |||
|
46 | ||||
|
47 | ('rhodecode-enterprise-ce', 5), | |||
|
48 | ('rhodecode-enterprise-ee', 6), | |||
|
49 | ]) | |||
|
50 | def test_get_shard_number_consistency(self, filename, expected_shard_num, file_store_filesystem_instance): | |||
|
51 | shard_number = file_store_filesystem_instance.get_shard_index(filename, len(file_store_filesystem_instance._shards)) | |||
|
52 | assert expected_shard_num == shard_number |
@@ -0,0 +1,17 b'' | |||||
|
1 | # Copyright (C) 2010-2023 RhodeCode GmbH | |||
|
2 | # | |||
|
3 | # This program is free software: you can redistribute it and/or modify | |||
|
4 | # it under the terms of the GNU Affero General Public License, version 3 | |||
|
5 | # (only), as published by the Free Software Foundation. | |||
|
6 | # | |||
|
7 | # This program is distributed in the hope that it will be useful, | |||
|
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |||
|
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |||
|
10 | # GNU General Public License for more details. | |||
|
11 | # | |||
|
12 | # You should have received a copy of the GNU Affero General Public License | |||
|
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |||
|
14 | # | |||
|
15 | # This program is dual-licensed. If you wish to learn more about the | |||
|
16 | # RhodeCode Enterprise Edition, including its added features, Support services, | |||
|
17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ No newline at end of file |
@@ -0,0 +1,52 b'' | |||||
|
1 | # Copyright (C) 2010-2023 RhodeCode GmbH | |||
|
2 | # | |||
|
3 | # This program is free software: you can redistribute it and/or modify | |||
|
4 | # it under the terms of the GNU Affero General Public License, version 3 | |||
|
5 | # (only), as published by the Free Software Foundation. | |||
|
6 | # | |||
|
7 | # This program is distributed in the hope that it will be useful, | |||
|
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |||
|
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |||
|
10 | # GNU General Public License for more details. | |||
|
11 | # | |||
|
12 | # You should have received a copy of the GNU Affero General Public License | |||
|
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |||
|
14 | # | |||
|
15 | # This program is dual-licensed. If you wish to learn more about the | |||
|
16 | # RhodeCode Enterprise Edition, including its added features, Support services, | |||
|
17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ | |||
|
18 | import pytest | |||
|
19 | ||||
|
20 | from rhodecode.apps.file_store import utils as store_utils | |||
|
21 | from rhodecode.apps.file_store import config_keys | |||
|
22 | from rhodecode.apps.file_store.tests import generate_random_filename | |||
|
23 | ||||
|
24 | ||||
|
25 | @pytest.fixture() | |||
|
26 | def file_store_legacy_instance(ini_settings): | |||
|
27 | config = ini_settings | |||
|
28 | config[config_keys.backend_type] = config_keys.backend_legacy_filesystem | |||
|
29 | f_store = store_utils.get_filestore_backend(config=config, always_init=True) | |||
|
30 | return f_store | |||
|
31 | ||||
|
32 | ||||
|
33 | class TestFileStoreLegacyBackend: | |||
|
34 | ||||
|
35 | @pytest.mark.parametrize('filename', [generate_random_filename() for _ in range(10)]) | |||
|
36 | def test_get_shard_number(self, filename, file_store_legacy_instance): | |||
|
37 | shard_number = file_store_legacy_instance.get_shard_index(filename, len(file_store_legacy_instance._shards)) | |||
|
38 | # Check that the shard number is 0 for legacy filesystem store we don't use shards | |||
|
39 | assert shard_number == 0 | |||
|
40 | ||||
|
41 | @pytest.mark.parametrize('filename, expected_shard_num', [ | |||
|
42 | ('my-name-1', 0), | |||
|
43 | ('my-name-2', 0), | |||
|
44 | ('my-name-3', 0), | |||
|
45 | ('my-name-4', 0), | |||
|
46 | ||||
|
47 | ('rhodecode-enterprise-ce', 0), | |||
|
48 | ('rhodecode-enterprise-ee', 0), | |||
|
49 | ]) | |||
|
50 | def test_get_shard_number_consistency(self, filename, expected_shard_num, file_store_legacy_instance): | |||
|
51 | shard_number = file_store_legacy_instance.get_shard_index(filename, len(file_store_legacy_instance._shards)) | |||
|
52 | assert expected_shard_num == shard_number |
@@ -0,0 +1,52 b'' | |||||
|
1 | # Copyright (C) 2010-2023 RhodeCode GmbH | |||
|
2 | # | |||
|
3 | # This program is free software: you can redistribute it and/or modify | |||
|
4 | # it under the terms of the GNU Affero General Public License, version 3 | |||
|
5 | # (only), as published by the Free Software Foundation. | |||
|
6 | # | |||
|
7 | # This program is distributed in the hope that it will be useful, | |||
|
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |||
|
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |||
|
10 | # GNU General Public License for more details. | |||
|
11 | # | |||
|
12 | # You should have received a copy of the GNU Affero General Public License | |||
|
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |||
|
14 | # | |||
|
15 | # This program is dual-licensed. If you wish to learn more about the | |||
|
16 | # RhodeCode Enterprise Edition, including its added features, Support services, | |||
|
17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ | |||
|
18 | import pytest | |||
|
19 | ||||
|
20 | from rhodecode.apps.file_store import utils as store_utils | |||
|
21 | from rhodecode.apps.file_store import config_keys | |||
|
22 | from rhodecode.apps.file_store.tests import generate_random_filename | |||
|
23 | ||||
|
24 | ||||
|
25 | @pytest.fixture() | |||
|
26 | def file_store_objectstore_instance(ini_settings): | |||
|
27 | config = ini_settings | |||
|
28 | config[config_keys.backend_type] = config_keys.backend_objectstore | |||
|
29 | f_store = store_utils.get_filestore_backend(config=config, always_init=True) | |||
|
30 | return f_store | |||
|
31 | ||||
|
32 | ||||
|
33 | class TestFileStoreObjectStoreBackend: | |||
|
34 | ||||
|
35 | @pytest.mark.parametrize('filename', [generate_random_filename() for _ in range(10)]) | |||
|
36 | def test_get_shard_number(self, filename, file_store_objectstore_instance): | |||
|
37 | shard_number = file_store_objectstore_instance.get_shard_index(filename, len(file_store_objectstore_instance._shards)) | |||
|
38 | # Check that the shard number is between 0 and shards | |||
|
39 | assert 0 <= shard_number <= len(file_store_objectstore_instance._shards) | |||
|
40 | ||||
|
41 | @pytest.mark.parametrize('filename, expected_shard_num', [ | |||
|
42 | ('my-name-1', 3), | |||
|
43 | ('my-name-2', 2), | |||
|
44 | ('my-name-3', 4), | |||
|
45 | ('my-name-4', 1), | |||
|
46 | ||||
|
47 | ('rhodecode-enterprise-ce', 5), | |||
|
48 | ('rhodecode-enterprise-ee', 6), | |||
|
49 | ]) | |||
|
50 | def test_get_shard_number_consistency(self, filename, expected_shard_num, file_store_objectstore_instance): | |||
|
51 | shard_number = file_store_objectstore_instance.get_shard_index(filename, len(file_store_objectstore_instance._shards)) | |||
|
52 | assert expected_shard_num == shard_number |
@@ -0,0 +1,122 b'' | |||||
|
1 | # Copyright (C) 2016-2023 RhodeCode GmbH | |||
|
2 | # | |||
|
3 | # This program is free software: you can redistribute it and/or modify | |||
|
4 | # it under the terms of the GNU Affero General Public License, version 3 | |||
|
5 | # (only), as published by the Free Software Foundation. | |||
|
6 | # | |||
|
7 | # This program is distributed in the hope that it will be useful, | |||
|
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |||
|
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |||
|
10 | # GNU General Public License for more details. | |||
|
11 | # | |||
|
12 | # You should have received a copy of the GNU Affero General Public License | |||
|
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |||
|
14 | # | |||
|
15 | # This program is dual-licensed. If you wish to learn more about the | |||
|
16 | # RhodeCode Enterprise Edition, including its added features, Support services, | |||
|
17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ | |||
|
18 | ||||
|
19 | import sys | |||
|
20 | import logging | |||
|
21 | ||||
|
22 | import click | |||
|
23 | ||||
|
24 | from rhodecode.lib.pyramid_utils import bootstrap | |||
|
25 | from rhodecode.lib.ext_json import json | |||
|
26 | from rhodecode.model.db import FileStore | |||
|
27 | from rhodecode.apps.file_store import utils as store_utils | |||
|
28 | ||||
|
29 | log = logging.getLogger(__name__) | |||
|
30 | ||||
|
31 | ||||
|
32 | @click.command() | |||
|
33 | @click.argument('ini_path', type=click.Path(exists=True)) | |||
|
34 | @click.argument('file_uid') | |||
|
35 | @click.option( | |||
|
36 | '--source-backend-conf', | |||
|
37 | type=click.Path(exists=True, dir_okay=False, readable=True), | |||
|
38 | help='Source backend config file path in a json format' | |||
|
39 | ) | |||
|
40 | @click.option( | |||
|
41 | '--dest-backend-conf', | |||
|
42 | type=click.Path(exists=True, dir_okay=False, readable=True), | |||
|
43 | help='Source backend config file path in a json format' | |||
|
44 | ) | |||
|
45 | def main(ini_path, file_uid, source_backend_conf, dest_backend_conf): | |||
|
46 | return command(ini_path, file_uid, source_backend_conf, dest_backend_conf) | |||
|
47 | ||||
|
48 | ||||
|
49 | _source_settings = {} | |||
|
50 | ||||
|
51 | _dest_settings = {} | |||
|
52 | ||||
|
53 | ||||
|
54 | def command(ini_path, file_uid, source_backend_conf, dest_backend_conf): | |||
|
55 | with bootstrap(ini_path, env={'RC_CMD_SETUP_RC': '1'}) as env: | |||
|
56 | migrate_func(env, file_uid, source_backend_conf, dest_backend_conf) | |||
|
57 | ||||
|
58 | ||||
|
59 | def migrate_func(env, file_uid, source_backend_conf=None, dest_backend_conf=None): | |||
|
60 | """ | |||
|
61 | ||||
|
62 | Example usage:: | |||
|
63 | ||||
|
64 | from rhodecode.lib.rc_commands import migrate_artifact | |||
|
65 | migrate_artifact._source_settings = { | |||
|
66 | 'file_store.backend.type': 'filesystem_v1', | |||
|
67 | 'file_store.filesystem_v1.storage_path': '/var/opt/rhodecode_data/file_store', | |||
|
68 | } | |||
|
69 | migrate_artifact._dest_settings = { | |||
|
70 | 'file_store.backend.type': 'objectstore', | |||
|
71 | 'file_store.objectstore.url': 'http://s3-minio:9000', | |||
|
72 | 'file_store.objectstore.bucket': 'rhodecode-file-store', | |||
|
73 | 'file_store.objectstore.key': 's3admin', | |||
|
74 | 'file_store.objectstore.secret': 's3secret4', | |||
|
75 | 'file_store.objectstore.region': 'eu-central-1', | |||
|
76 | } | |||
|
77 | for db_obj in FileStore.query().all(): | |||
|
78 | migrate_artifact.migrate_func({}, db_obj.file_uid) | |||
|
79 | ||||
|
80 | """ | |||
|
81 | ||||
|
82 | try: | |||
|
83 | from rc_ee.api.views.store_api import _store_file | |||
|
84 | except ImportError: | |||
|
85 | click.secho('ERROR: Unable to import store_api. ' | |||
|
86 | 'store_api is only available in EE edition of RhodeCode', | |||
|
87 | fg='red') | |||
|
88 | sys.exit(-1) | |||
|
89 | ||||
|
90 | source_settings = _source_settings | |||
|
91 | if source_backend_conf: | |||
|
92 | source_settings = json.loads(open(source_backend_conf).read()) | |||
|
93 | dest_settings = _dest_settings | |||
|
94 | if dest_backend_conf: | |||
|
95 | dest_settings = json.loads(open(dest_backend_conf).read()) | |||
|
96 | ||||
|
97 | if file_uid.isnumeric(): | |||
|
98 | file_store_db_obj = FileStore().query() \ | |||
|
99 | .filter(FileStore.file_store_id == file_uid) \ | |||
|
100 | .scalar() | |||
|
101 | else: | |||
|
102 | file_store_db_obj = FileStore().query() \ | |||
|
103 | .filter(FileStore.file_uid == file_uid) \ | |||
|
104 | .scalar() | |||
|
105 | if not file_store_db_obj: | |||
|
106 | click.secho(f'ERROR: Unable to fetch artifact from database file_uid={file_uid}', | |||
|
107 | fg='red') | |||
|
108 | sys.exit(-1) | |||
|
109 | ||||
|
110 | uid_filename = file_store_db_obj.file_uid | |||
|
111 | org_filename = file_store_db_obj.file_display_name | |||
|
112 | click.secho(f'Attempting to migrate artifact {uid_filename}, filename: {org_filename}', fg='green') | |||
|
113 | ||||
|
114 | # get old version of f_store based on the data. | |||
|
115 | ||||
|
116 | origin_f_store = store_utils.get_filestore_backend(source_settings, always_init=True) | |||
|
117 | reader, metadata = origin_f_store.fetch(uid_filename) | |||
|
118 | ||||
|
119 | target_f_store = store_utils.get_filestore_backend(dest_settings, always_init=True) | |||
|
120 | target_f_store.import_to_store(reader, org_filename, uid_filename, metadata) | |||
|
121 | ||||
|
122 | click.secho(f'Migrated artifact {uid_filename}, filename: {org_filename} into {target_f_store} storage', fg='green') |
@@ -281,15 +281,56 b' labs_settings_active = true' | |||||
281 | ; optional prefix to Add to email Subject |
|
281 | ; optional prefix to Add to email Subject | |
282 | #exception_tracker.email_prefix = [RHODECODE ERROR] |
|
282 | #exception_tracker.email_prefix = [RHODECODE ERROR] | |
283 |
|
283 | |||
284 | ; File store configuration. This is used to store and serve uploaded files |
|
284 | ; NOTE: this setting IS DEPRECATED: | |
285 | file_store.enabled = true |
|
285 | ; file_store backend is always enabled | |
|
286 | #file_store.enabled = true | |||
286 |
|
287 | |||
|
288 | ; NOTE: this setting IS DEPRECATED: | |||
|
289 | ; file_store.backend = X -> use `file_store.backend.type = filesystem_v2` instead | |||
287 | ; Storage backend, available options are: local |
|
290 | ; Storage backend, available options are: local | |
288 | file_store.backend = local |
|
291 | #file_store.backend = local | |
289 |
|
292 | |||
|
293 | ; NOTE: this setting IS DEPRECATED: | |||
|
294 | ; file_store.storage_path = X -> use `file_store.filesystem_v2.storage_path = X` instead | |||
290 | ; path to store the uploaded binaries and artifacts |
|
295 | ; path to store the uploaded binaries and artifacts | |
291 | file_store.storage_path = /var/opt/rhodecode_data/file_store |
|
296 | #file_store.storage_path = /var/opt/rhodecode_data/file_store | |
|
297 | ||||
|
298 | ; Artifacts file-store, is used to store comment attachments and artifacts uploads. | |||
|
299 | ; file_store backend type: filesystem_v1, filesystem_v2 or objectstore (s3-based) are available as options | |||
|
300 | ; filesystem_v1 is backwards compat with pre 5.1 storage changes | |||
|
301 | ; new installations should choose filesystem_v2 or objectstore (s3-based), pick filesystem when migrating from | |||
|
302 | ; previous installations to keep the artifacts without a need of migration | |||
|
303 | #file_store.backend.type = filesystem_v2 | |||
|
304 | ||||
|
305 | ; filesystem options... | |||
|
306 | #file_store.filesystem_v1.storage_path = /var/opt/rhodecode_data/artifacts_file_store | |||
|
307 | ||||
|
308 | ; filesystem_v2 options... | |||
|
309 | #file_store.filesystem_v2.storage_path = /var/opt/rhodecode_data/artifacts_file_store | |||
|
310 | #file_store.filesystem_v2.shards = 8 | |||
292 |
|
311 | |||
|
312 | ; objectstore options... | |||
|
313 | ; url for s3 compatible storage that allows to upload artifacts | |||
|
314 | ; e.g http://minio:9000 | |||
|
315 | #file_store.backend.type = objectstore | |||
|
316 | #file_store.objectstore.url = http://s3-minio:9000 | |||
|
317 | ||||
|
318 | ; a top-level bucket to put all other shards in | |||
|
319 | ; objects will be stored in rhodecode-file-store/shard-N based on the bucket_shards number | |||
|
320 | #file_store.objectstore.bucket = rhodecode-file-store | |||
|
321 | ||||
|
322 | ; number of sharded buckets to create to distribute archives across | |||
|
323 | ; default is 8 shards | |||
|
324 | #file_store.objectstore.bucket_shards = 8 | |||
|
325 | ||||
|
326 | ; key for s3 auth | |||
|
327 | #file_store.objectstore.key = s3admin | |||
|
328 | ||||
|
329 | ; secret for s3 auth | |||
|
330 | #file_store.objectstore.secret = s3secret4 | |||
|
331 | ||||
|
332 | ;region for s3 storage | |||
|
333 | #file_store.objectstore.region = eu-central-1 | |||
293 |
|
334 | |||
294 | ; Redis url to acquire/check generation of archives locks |
|
335 | ; Redis url to acquire/check generation of archives locks | |
295 | archive_cache.locking.url = redis://redis:6379/1 |
|
336 | archive_cache.locking.url = redis://redis:6379/1 |
@@ -249,15 +249,56 b' labs_settings_active = true' | |||||
249 | ; optional prefix to Add to email Subject |
|
249 | ; optional prefix to Add to email Subject | |
250 | #exception_tracker.email_prefix = [RHODECODE ERROR] |
|
250 | #exception_tracker.email_prefix = [RHODECODE ERROR] | |
251 |
|
251 | |||
252 | ; File store configuration. This is used to store and serve uploaded files |
|
252 | ; NOTE: this setting IS DEPRECATED: | |
253 | file_store.enabled = true |
|
253 | ; file_store backend is always enabled | |
|
254 | #file_store.enabled = true | |||
254 |
|
255 | |||
|
256 | ; NOTE: this setting IS DEPRECATED: | |||
|
257 | ; file_store.backend = X -> use `file_store.backend.type = filesystem_v2` instead | |||
255 | ; Storage backend, available options are: local |
|
258 | ; Storage backend, available options are: local | |
256 | file_store.backend = local |
|
259 | #file_store.backend = local | |
257 |
|
260 | |||
|
261 | ; NOTE: this setting IS DEPRECATED: | |||
|
262 | ; file_store.storage_path = X -> use `file_store.filesystem_v2.storage_path = X` instead | |||
258 | ; path to store the uploaded binaries and artifacts |
|
263 | ; path to store the uploaded binaries and artifacts | |
259 | file_store.storage_path = /var/opt/rhodecode_data/file_store |
|
264 | #file_store.storage_path = /var/opt/rhodecode_data/file_store | |
|
265 | ||||
|
266 | ; Artifacts file-store, is used to store comment attachments and artifacts uploads. | |||
|
267 | ; file_store backend type: filesystem_v1, filesystem_v2 or objectstore (s3-based) are available as options | |||
|
268 | ; filesystem_v1 is backwards compat with pre 5.1 storage changes | |||
|
269 | ; new installations should choose filesystem_v2 or objectstore (s3-based), pick filesystem when migrating from | |||
|
270 | ; previous installations to keep the artifacts without a need of migration | |||
|
271 | #file_store.backend.type = filesystem_v2 | |||
|
272 | ||||
|
273 | ; filesystem options... | |||
|
274 | #file_store.filesystem_v1.storage_path = /var/opt/rhodecode_data/artifacts_file_store | |||
|
275 | ||||
|
276 | ; filesystem_v2 options... | |||
|
277 | #file_store.filesystem_v2.storage_path = /var/opt/rhodecode_data/artifacts_file_store | |||
|
278 | #file_store.filesystem_v2.shards = 8 | |||
260 |
|
279 | |||
|
280 | ; objectstore options... | |||
|
281 | ; url for s3 compatible storage that allows to upload artifacts | |||
|
282 | ; e.g http://minio:9000 | |||
|
283 | #file_store.backend.type = objectstore | |||
|
284 | #file_store.objectstore.url = http://s3-minio:9000 | |||
|
285 | ||||
|
286 | ; a top-level bucket to put all other shards in | |||
|
287 | ; objects will be stored in rhodecode-file-store/shard-N based on the bucket_shards number | |||
|
288 | #file_store.objectstore.bucket = rhodecode-file-store | |||
|
289 | ||||
|
290 | ; number of sharded buckets to create to distribute archives across | |||
|
291 | ; default is 8 shards | |||
|
292 | #file_store.objectstore.bucket_shards = 8 | |||
|
293 | ||||
|
294 | ; key for s3 auth | |||
|
295 | #file_store.objectstore.key = s3admin | |||
|
296 | ||||
|
297 | ; secret for s3 auth | |||
|
298 | #file_store.objectstore.secret = s3secret4 | |||
|
299 | ||||
|
300 | ;region for s3 storage | |||
|
301 | #file_store.objectstore.region = eu-central-1 | |||
261 |
|
302 | |||
262 | ; Redis url to acquire/check generation of archives locks |
|
303 | ; Redis url to acquire/check generation of archives locks | |
263 | archive_cache.locking.url = redis://redis:6379/1 |
|
304 | archive_cache.locking.url = redis://redis:6379/1 |
@@ -33,7 +33,7 b' from rhodecode.lib.ext_json import json' | |||||
33 | from rhodecode.lib.utils2 import safe_int |
|
33 | from rhodecode.lib.utils2 import safe_int | |
34 | from rhodecode.model.db import UserIpMap |
|
34 | from rhodecode.model.db import UserIpMap | |
35 | from rhodecode.model.scm import ScmModel |
|
35 | from rhodecode.model.scm import ScmModel | |
36 | from rhodecode.apps.file_store import utils |
|
36 | from rhodecode.apps.file_store import utils as store_utils | |
37 | from rhodecode.apps.file_store.exceptions import FileNotAllowedException, \ |
|
37 | from rhodecode.apps.file_store.exceptions import FileNotAllowedException, \ | |
38 | FileOverSizeException |
|
38 | FileOverSizeException | |
39 |
|
39 |
@@ -171,11 +171,17 b' class AdminSystemInfoSettingsView(BaseAp' | |||||
171 | (_('Gist storage info'), val('storage_gist')['text'], state('storage_gist')), |
|
171 | (_('Gist storage info'), val('storage_gist')['text'], state('storage_gist')), | |
172 | ('', '', ''), # spacer |
|
172 | ('', '', ''), # spacer | |
173 |
|
173 | |||
174 |
(_('Ar |
|
174 | (_('Artifacts storage backend'), val('storage_artifacts')['type'], state('storage_artifacts')), | |
|
175 | (_('Artifacts storage location'), val('storage_artifacts')['path'], state('storage_artifacts')), | |||
|
176 | (_('Artifacts info'), val('storage_artifacts')['text'], state('storage_artifacts')), | |||
|
177 | ('', '', ''), # spacer | |||
|
178 | ||||
|
179 | (_('Archive cache storage backend'), val('storage_archive')['type'], state('storage_archive')), | |||
175 | (_('Archive cache storage location'), val('storage_archive')['path'], state('storage_archive')), |
|
180 | (_('Archive cache storage location'), val('storage_archive')['path'], state('storage_archive')), | |
176 | (_('Archive cache info'), val('storage_archive')['text'], state('storage_archive')), |
|
181 | (_('Archive cache info'), val('storage_archive')['text'], state('storage_archive')), | |
177 | ('', '', ''), # spacer |
|
182 | ('', '', ''), # spacer | |
178 |
|
183 | |||
|
184 | ||||
179 | (_('Temp storage location'), val('storage_temp')['path'], state('storage_temp')), |
|
185 | (_('Temp storage location'), val('storage_temp')['path'], state('storage_temp')), | |
180 | (_('Temp storage info'), val('storage_temp')['text'], state('storage_temp')), |
|
186 | (_('Temp storage info'), val('storage_temp')['text'], state('storage_temp')), | |
181 | ('', '', ''), # spacer |
|
187 | ('', '', ''), # spacer |
@@ -16,7 +16,8 b'' | |||||
16 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
16 | # RhodeCode Enterprise Edition, including its added features, Support services, | |
17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ | |
18 | import os |
|
18 | import os | |
19 | from rhodecode.apps.file_store import config_keys |
|
19 | ||
|
20 | ||||
20 | from rhodecode.config.settings_maker import SettingsMaker |
|
21 | from rhodecode.config.settings_maker import SettingsMaker | |
21 |
|
22 | |||
22 |
|
23 | |||
@@ -24,18 +25,48 b' def _sanitize_settings_and_apply_default' | |||||
24 | """ |
|
25 | """ | |
25 | Set defaults, convert to python types and validate settings. |
|
26 | Set defaults, convert to python types and validate settings. | |
26 | """ |
|
27 | """ | |
|
28 | from rhodecode.apps.file_store import config_keys | |||
|
29 | ||||
|
30 | # translate "legacy" params into new config | |||
|
31 | settings.pop(config_keys.deprecated_enabled, True) | |||
|
32 | if config_keys.deprecated_backend in settings: | |||
|
33 | # if legacy backend key is detected we use "legacy" backward compat setting | |||
|
34 | settings.pop(config_keys.deprecated_backend) | |||
|
35 | settings[config_keys.backend_type] = config_keys.backend_legacy_filesystem | |||
|
36 | ||||
|
37 | if config_keys.deprecated_store_path in settings: | |||
|
38 | store_path = settings.pop(config_keys.deprecated_store_path) | |||
|
39 | settings[config_keys.legacy_filesystem_storage_path] = store_path | |||
|
40 | ||||
27 | settings_maker = SettingsMaker(settings) |
|
41 | settings_maker = SettingsMaker(settings) | |
28 |
|
42 | |||
29 | settings_maker.make_setting(config_keys.enabled, True, parser='bool') |
|
43 | default_cache_dir = settings['cache_dir'] | |
30 | settings_maker.make_setting(config_keys.backend, 'local') |
|
44 | default_store_dir = os.path.join(default_cache_dir, 'artifacts_filestore') | |
|
45 | ||||
|
46 | # set default backend | |||
|
47 | settings_maker.make_setting(config_keys.backend_type, config_keys.backend_legacy_filesystem) | |||
|
48 | ||||
|
49 | # legacy filesystem defaults | |||
|
50 | settings_maker.make_setting(config_keys.legacy_filesystem_storage_path, default_store_dir, default_when_empty=True, ) | |||
31 |
|
51 | |||
32 | default_store = os.path.join(os.path.dirname(settings['__file__']), 'upload_store') |
|
52 | # filesystem defaults | |
33 | settings_maker.make_setting(config_keys.store_path, default_store) |
|
53 | settings_maker.make_setting(config_keys.filesystem_storage_path, default_store_dir, default_when_empty=True,) | |
|
54 | settings_maker.make_setting(config_keys.filesystem_shards, 8, parser='int') | |||
|
55 | ||||
|
56 | # objectstore defaults | |||
|
57 | settings_maker.make_setting(config_keys.objectstore_url, 'http://s3-minio:9000') | |||
|
58 | settings_maker.make_setting(config_keys.objectstore_bucket, 'rhodecode-artifacts-filestore') | |||
|
59 | settings_maker.make_setting(config_keys.objectstore_bucket_shards, 8, parser='int') | |||
|
60 | ||||
|
61 | settings_maker.make_setting(config_keys.objectstore_region, '') | |||
|
62 | settings_maker.make_setting(config_keys.objectstore_key, '') | |||
|
63 | settings_maker.make_setting(config_keys.objectstore_secret, '') | |||
34 |
|
64 | |||
35 | settings_maker.env_expand() |
|
65 | settings_maker.env_expand() | |
36 |
|
66 | |||
37 |
|
67 | |||
38 | def includeme(config): |
|
68 | def includeme(config): | |
|
69 | ||||
39 | from rhodecode.apps.file_store.views import FileStoreView |
|
70 | from rhodecode.apps.file_store.views import FileStoreView | |
40 |
|
71 | |||
41 | settings = config.registry.settings |
|
72 | settings = config.registry.settings |
@@ -20,6 +20,38 b'' | |||||
20 | # Definition of setting keys used to configure this module. Defined here to |
|
20 | # Definition of setting keys used to configure this module. Defined here to | |
21 | # avoid repetition of keys throughout the module. |
|
21 | # avoid repetition of keys throughout the module. | |
22 |
|
22 | |||
23 | enabled = 'file_store.enabled' |
|
23 | # OLD and deprecated keys not used anymore | |
24 |
|
|
24 | deprecated_enabled = 'file_store.enabled' | |
25 | store_path = 'file_store.storage_path' |
|
25 | deprecated_backend = 'file_store.backend' | |
|
26 | deprecated_store_path = 'file_store.storage_path' | |||
|
27 | ||||
|
28 | ||||
|
29 | backend_type = 'file_store.backend.type' | |||
|
30 | ||||
|
31 | backend_legacy_filesystem = 'filesystem_v1' | |||
|
32 | backend_filesystem = 'filesystem_v2' | |||
|
33 | backend_objectstore = 'objectstore' | |||
|
34 | ||||
|
35 | backend_types = [ | |||
|
36 | backend_legacy_filesystem, | |||
|
37 | backend_filesystem, | |||
|
38 | backend_objectstore, | |||
|
39 | ] | |||
|
40 | ||||
|
41 | # filesystem_v1 legacy | |||
|
42 | legacy_filesystem_storage_path = 'file_store.filesystem_v1.storage_path' | |||
|
43 | ||||
|
44 | ||||
|
45 | # filesystem_v2 new option | |||
|
46 | filesystem_storage_path = 'file_store.filesystem_v2.storage_path' | |||
|
47 | filesystem_shards = 'file_store.filesystem_v2.shards' | |||
|
48 | ||||
|
49 | # objectstore | |||
|
50 | objectstore_url = 'file_store.objectstore.url' | |||
|
51 | objectstore_bucket = 'file_store.objectstore.bucket' | |||
|
52 | objectstore_bucket_shards = 'file_store.objectstore.bucket_shards' | |||
|
53 | ||||
|
54 | objectstore_region = 'file_store.objectstore.region' | |||
|
55 | objectstore_key = 'file_store.objectstore.key' | |||
|
56 | objectstore_secret = 'file_store.objectstore.secret' | |||
|
57 |
@@ -16,3 +16,42 b'' | |||||
16 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
16 | # RhodeCode Enterprise Edition, including its added features, Support services, | |
17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ | |
18 |
|
18 | |||
|
19 | import os | |||
|
20 | import random | |||
|
21 | import tempfile | |||
|
22 | import string | |||
|
23 | ||||
|
24 | import pytest | |||
|
25 | ||||
|
26 | from rhodecode.apps.file_store import utils as store_utils | |||
|
27 | ||||
|
28 | ||||
|
29 | @pytest.fixture() | |||
|
30 | def file_store_instance(ini_settings): | |||
|
31 | config = ini_settings | |||
|
32 | f_store = store_utils.get_filestore_backend(config=config, always_init=True) | |||
|
33 | return f_store | |||
|
34 | ||||
|
35 | ||||
|
36 | @pytest.fixture | |||
|
37 | def random_binary_file(): | |||
|
38 | # Generate random binary data | |||
|
39 | data = bytearray(random.getrandbits(8) for _ in range(1024 * 512)) # 512 KB of random data | |||
|
40 | ||||
|
41 | # Create a temporary file | |||
|
42 | temp_file = tempfile.NamedTemporaryFile(delete=False) | |||
|
43 | filename = temp_file.name | |||
|
44 | ||||
|
45 | try: | |||
|
46 | # Write the random binary data to the file | |||
|
47 | temp_file.write(data) | |||
|
48 | temp_file.seek(0) # Rewind the file pointer to the beginning | |||
|
49 | yield filename, temp_file | |||
|
50 | finally: | |||
|
51 | # Close and delete the temporary file after the test | |||
|
52 | temp_file.close() | |||
|
53 | os.remove(filename) | |||
|
54 | ||||
|
55 | ||||
|
56 | def generate_random_filename(length=10): | |||
|
57 | return ''.join(random.choices(string.ascii_letters + string.digits, k=length)) No newline at end of file |
@@ -15,13 +15,16 b'' | |||||
15 | # This program is dual-licensed. If you wish to learn more about the |
|
15 | # This program is dual-licensed. If you wish to learn more about the | |
16 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
16 | # RhodeCode Enterprise Edition, including its added features, Support services, | |
17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ | |
|
18 | ||||
18 | import os |
|
19 | import os | |
|
20 | ||||
19 | import pytest |
|
21 | import pytest | |
20 |
|
22 | |||
21 | from rhodecode.lib.ext_json import json |
|
23 | from rhodecode.lib.ext_json import json | |
22 | from rhodecode.model.auth_token import AuthTokenModel |
|
24 | from rhodecode.model.auth_token import AuthTokenModel | |
23 | from rhodecode.model.db import Session, FileStore, Repository, User |
|
25 | from rhodecode.model.db import Session, FileStore, Repository, User | |
24 |
from rhodecode.apps.file_store import utils |
|
26 | from rhodecode.apps.file_store import utils as store_utils | |
|
27 | from rhodecode.apps.file_store import config_keys | |||
25 |
|
28 | |||
26 | from rhodecode.tests import TestController |
|
29 | from rhodecode.tests import TestController | |
27 | from rhodecode.tests.routes import route_path |
|
30 | from rhodecode.tests.routes import route_path | |
@@ -29,27 +32,61 b' from rhodecode.tests.routes import route' | |||||
29 |
|
32 | |||
30 | class TestFileStoreViews(TestController): |
|
33 | class TestFileStoreViews(TestController): | |
31 |
|
34 | |||
|
35 | @pytest.fixture() | |||
|
36 | def create_artifact_factory(self, tmpdir, ini_settings): | |||
|
37 | ||||
|
38 | def factory(user_id, content, f_name='example.txt'): | |||
|
39 | ||||
|
40 | config = ini_settings | |||
|
41 | config[config_keys.backend_type] = config_keys.backend_legacy_filesystem | |||
|
42 | ||||
|
43 | f_store = store_utils.get_filestore_backend(config) | |||
|
44 | ||||
|
45 | filesystem_file = os.path.join(str(tmpdir), f_name) | |||
|
46 | with open(filesystem_file, 'wt') as f: | |||
|
47 | f.write(content) | |||
|
48 | ||||
|
49 | with open(filesystem_file, 'rb') as f: | |||
|
50 | store_uid, metadata = f_store.store(f_name, f, metadata={'filename': f_name}) | |||
|
51 | os.remove(filesystem_file) | |||
|
52 | ||||
|
53 | entry = FileStore.create( | |||
|
54 | file_uid=store_uid, filename=metadata["filename"], | |||
|
55 | file_hash=metadata["sha256"], file_size=metadata["size"], | |||
|
56 | file_display_name='file_display_name', | |||
|
57 | file_description='repo artifact `{}`'.format(metadata["filename"]), | |||
|
58 | check_acl=True, user_id=user_id, | |||
|
59 | ) | |||
|
60 | Session().add(entry) | |||
|
61 | Session().commit() | |||
|
62 | return entry | |||
|
63 | return factory | |||
|
64 | ||||
32 | @pytest.mark.parametrize("fid, content, exists", [ |
|
65 | @pytest.mark.parametrize("fid, content, exists", [ | |
33 | ('abcde-0.jpg', "xxxxx", True), |
|
66 | ('abcde-0.jpg', "xxxxx", True), | |
34 | ('abcde-0.exe', "1234567", True), |
|
67 | ('abcde-0.exe', "1234567", True), | |
35 | ('abcde-0.jpg', "xxxxx", False), |
|
68 | ('abcde-0.jpg', "xxxxx", False), | |
36 | ]) |
|
69 | ]) | |
37 | def test_get_files_from_store(self, fid, content, exists, tmpdir, user_util): |
|
70 | def test_get_files_from_store(self, fid, content, exists, tmpdir, user_util, ini_settings): | |
38 | user = self.log_user() |
|
71 | user = self.log_user() | |
39 | user_id = user['user_id'] |
|
72 | user_id = user['user_id'] | |
40 | repo_id = user_util.create_repo().repo_id |
|
73 | repo_id = user_util.create_repo().repo_id | |
41 | store_path = self.app._pyramid_settings[config_keys.store_path] |
|
74 | ||
|
75 | config = ini_settings | |||
|
76 | config[config_keys.backend_type] = config_keys.backend_legacy_filesystem | |||
|
77 | ||||
42 | store_uid = fid |
|
78 | store_uid = fid | |
43 |
|
79 | |||
44 | if exists: |
|
80 | if exists: | |
45 | status = 200 |
|
81 | status = 200 | |
46 |
store = utils.get_file |
|
82 | f_store = store_utils.get_filestore_backend(config) | |
47 | filesystem_file = os.path.join(str(tmpdir), fid) |
|
83 | filesystem_file = os.path.join(str(tmpdir), fid) | |
48 | with open(filesystem_file, 'wt') as f: |
|
84 | with open(filesystem_file, 'wt') as f: | |
49 | f.write(content) |
|
85 | f.write(content) | |
50 |
|
86 | |||
51 | with open(filesystem_file, 'rb') as f: |
|
87 | with open(filesystem_file, 'rb') as f: | |
52 |
store_uid, metadata = store |
|
88 | store_uid, metadata = f_store.store(fid, f, metadata={'filename': fid}) | |
|
89 | os.remove(filesystem_file) | |||
53 |
|
90 | |||
54 | entry = FileStore.create( |
|
91 | entry = FileStore.create( | |
55 | file_uid=store_uid, filename=metadata["filename"], |
|
92 | file_uid=store_uid, filename=metadata["filename"], | |
@@ -69,14 +106,10 b' class TestFileStoreViews(TestController)' | |||||
69 |
|
106 | |||
70 | if exists: |
|
107 | if exists: | |
71 | assert response.text == content |
|
108 | assert response.text == content | |
72 | file_store_path = os.path.dirname(store.resolve_name(store_uid, store_path)[1]) |
|
|||
73 | metadata_file = os.path.join(file_store_path, store_uid + '.meta') |
|
|||
74 | assert os.path.exists(metadata_file) |
|
|||
75 | with open(metadata_file, 'rb') as f: |
|
|||
76 | json_data = json.loads(f.read()) |
|
|||
77 |
|
109 | |||
78 | assert json_data |
|
110 | metadata = f_store.get_metadata(store_uid) | |
79 | assert 'size' in json_data |
|
111 | ||
|
112 | assert 'size' in metadata | |||
80 |
|
113 | |||
81 | def test_upload_files_without_content_to_store(self): |
|
114 | def test_upload_files_without_content_to_store(self): | |
82 | self.log_user() |
|
115 | self.log_user() | |
@@ -112,32 +145,6 b' class TestFileStoreViews(TestController)' | |||||
112 |
|
145 | |||
113 | assert response.json['store_fid'] |
|
146 | assert response.json['store_fid'] | |
114 |
|
147 | |||
115 | @pytest.fixture() |
|
|||
116 | def create_artifact_factory(self, tmpdir): |
|
|||
117 | def factory(user_id, content): |
|
|||
118 | store_path = self.app._pyramid_settings[config_keys.store_path] |
|
|||
119 | store = utils.get_file_storage({config_keys.store_path: store_path}) |
|
|||
120 | fid = 'example.txt' |
|
|||
121 |
|
||||
122 | filesystem_file = os.path.join(str(tmpdir), fid) |
|
|||
123 | with open(filesystem_file, 'wt') as f: |
|
|||
124 | f.write(content) |
|
|||
125 |
|
||||
126 | with open(filesystem_file, 'rb') as f: |
|
|||
127 | store_uid, metadata = store.save_file(f, fid, extra_metadata={'filename': fid}) |
|
|||
128 |
|
||||
129 | entry = FileStore.create( |
|
|||
130 | file_uid=store_uid, filename=metadata["filename"], |
|
|||
131 | file_hash=metadata["sha256"], file_size=metadata["size"], |
|
|||
132 | file_display_name='file_display_name', |
|
|||
133 | file_description='repo artifact `{}`'.format(metadata["filename"]), |
|
|||
134 | check_acl=True, user_id=user_id, |
|
|||
135 | ) |
|
|||
136 | Session().add(entry) |
|
|||
137 | Session().commit() |
|
|||
138 | return entry |
|
|||
139 | return factory |
|
|||
140 |
|
||||
141 | def test_download_file_non_scoped(self, user_util, create_artifact_factory): |
|
148 | def test_download_file_non_scoped(self, user_util, create_artifact_factory): | |
142 | user = self.log_user() |
|
149 | user = self.log_user() | |
143 | user_id = user['user_id'] |
|
150 | user_id = user['user_id'] |
@@ -19,21 +19,84 b'' | |||||
19 | import io |
|
19 | import io | |
20 | import uuid |
|
20 | import uuid | |
21 | import pathlib |
|
21 | import pathlib | |
|
22 | import s3fs | |||
|
23 | ||||
|
24 | from rhodecode.lib.hash_utils import sha256_safe | |||
|
25 | from rhodecode.apps.file_store import config_keys | |||
|
26 | ||||
|
27 | ||||
|
28 | file_store_meta = None | |||
|
29 | ||||
|
30 | ||||
|
31 | def get_filestore_config(config) -> dict: | |||
|
32 | ||||
|
33 | final_config = {} | |||
|
34 | ||||
|
35 | for k, v in config.items(): | |||
|
36 | if k.startswith('file_store'): | |||
|
37 | final_config[k] = v | |||
|
38 | ||||
|
39 | return final_config | |||
22 |
|
40 | |||
23 |
|
41 | |||
24 | def get_file_storage(settings): |
|
42 | def get_filestore_backend(config, always_init=False): | |
25 | from rhodecode.apps.file_store.backends.local_store import LocalFileStorage |
|
43 | """ | |
26 | from rhodecode.apps.file_store import config_keys |
|
44 | ||
27 | store_path = settings.get(config_keys.store_path) |
|
45 | usage:: | |
28 | return LocalFileStorage(base_path=store_path) |
|
46 | from rhodecode.apps.file_store import get_filestore_backend | |
|
47 | f_store = get_filestore_backend(config=CONFIG) | |||
|
48 | ||||
|
49 | :param config: | |||
|
50 | :param always_init: | |||
|
51 | :return: | |||
|
52 | """ | |||
|
53 | ||||
|
54 | global file_store_meta | |||
|
55 | if file_store_meta is not None and not always_init: | |||
|
56 | return file_store_meta | |||
|
57 | ||||
|
58 | config = get_filestore_config(config) | |||
|
59 | backend = config[config_keys.backend_type] | |||
|
60 | ||||
|
61 | match backend: | |||
|
62 | case config_keys.backend_legacy_filesystem: | |||
|
63 | # Legacy backward compatible storage | |||
|
64 | from rhodecode.apps.file_store.backends.filesystem_legacy import LegacyFileSystemBackend | |||
|
65 | d_cache = LegacyFileSystemBackend( | |||
|
66 | settings=config | |||
|
67 | ) | |||
|
68 | case config_keys.backend_filesystem: | |||
|
69 | from rhodecode.apps.file_store.backends.filesystem import FileSystemBackend | |||
|
70 | d_cache = FileSystemBackend( | |||
|
71 | settings=config | |||
|
72 | ) | |||
|
73 | case config_keys.backend_objectstore: | |||
|
74 | from rhodecode.apps.file_store.backends.objectstore import ObjectStoreBackend | |||
|
75 | d_cache = ObjectStoreBackend( | |||
|
76 | settings=config | |||
|
77 | ) | |||
|
78 | case _: | |||
|
79 | raise ValueError( | |||
|
80 | f'file_store.backend.type only supports "{config_keys.backend_types}" got {backend}' | |||
|
81 | ) | |||
|
82 | ||||
|
83 | cache_meta = d_cache | |||
|
84 | return cache_meta | |||
29 |
|
85 | |||
30 |
|
86 | |||
31 | def splitext(filename): |
|
87 | def splitext(filename): | |
32 | ext = ''.join(pathlib.Path(filename).suffixes) |
|
88 | final_ext = [] | |
|
89 | for suffix in pathlib.Path(filename).suffixes: | |||
|
90 | if not suffix.isascii(): | |||
|
91 | continue | |||
|
92 | ||||
|
93 | suffix = " ".join(suffix.split()).replace(" ", "") | |||
|
94 | final_ext.append(suffix) | |||
|
95 | ext = ''.join(final_ext) | |||
33 | return filename, ext |
|
96 | return filename, ext | |
34 |
|
97 | |||
35 |
|
98 | |||
36 | def uid_filename(filename, randomized=True): |
|
99 | def get_uid_filename(filename, randomized=True): | |
37 | """ |
|
100 | """ | |
38 | Generates a randomized or stable (uuid) filename, |
|
101 | Generates a randomized or stable (uuid) filename, | |
39 | preserving the original extension. |
|
102 | preserving the original extension. | |
@@ -46,10 +109,37 b' def uid_filename(filename, randomized=Tr' | |||||
46 | if randomized: |
|
109 | if randomized: | |
47 | uid = uuid.uuid4() |
|
110 | uid = uuid.uuid4() | |
48 | else: |
|
111 | else: | |
49 | hash_key = '{}.{}'.format(filename, 'store') |
|
112 | store_suffix = "store" | |
|
113 | hash_key = f'{filename}.{store_suffix}' | |||
50 | uid = uuid.uuid5(uuid.NAMESPACE_URL, hash_key) |
|
114 | uid = uuid.uuid5(uuid.NAMESPACE_URL, hash_key) | |
51 | return str(uid) + ext.lower() |
|
115 | return str(uid) + ext.lower() | |
52 |
|
116 | |||
53 |
|
117 | |||
54 | def bytes_to_file_obj(bytes_data): |
|
118 | def bytes_to_file_obj(bytes_data): | |
55 |
return io. |
|
119 | return io.BytesIO(bytes_data) | |
|
120 | ||||
|
121 | ||||
|
122 | class ShardFileReader: | |||
|
123 | ||||
|
124 | def __init__(self, file_like_reader): | |||
|
125 | self._file_like_reader = file_like_reader | |||
|
126 | ||||
|
127 | def __getattr__(self, item): | |||
|
128 | if isinstance(self._file_like_reader, s3fs.core.S3File): | |||
|
129 | match item: | |||
|
130 | case 'name': | |||
|
131 | # S3 FileWrapper doesn't support name attribute, and we use it | |||
|
132 | return self._file_like_reader.full_name | |||
|
133 | case _: | |||
|
134 | return getattr(self._file_like_reader, item) | |||
|
135 | else: | |||
|
136 | return getattr(self._file_like_reader, item) | |||
|
137 | ||||
|
138 | ||||
|
139 | def archive_iterator(_reader, block_size: int = 4096 * 512): | |||
|
140 | # 4096 * 64 = 64KB | |||
|
141 | while 1: | |||
|
142 | data = _reader.read(block_size) | |||
|
143 | if not data: | |||
|
144 | break | |||
|
145 | yield data |
@@ -17,12 +17,11 b'' | |||||
17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ | |
18 | import logging |
|
18 | import logging | |
19 |
|
19 | |||
20 |
|
20 | from pyramid.response import Response | ||
21 | from pyramid.response import FileResponse |
|
|||
22 | from pyramid.httpexceptions import HTTPFound, HTTPNotFound |
|
21 | from pyramid.httpexceptions import HTTPFound, HTTPNotFound | |
23 |
|
22 | |||
24 | from rhodecode.apps._base import BaseAppView |
|
23 | from rhodecode.apps._base import BaseAppView | |
25 | from rhodecode.apps.file_store import utils |
|
24 | from rhodecode.apps.file_store import utils as store_utils | |
26 | from rhodecode.apps.file_store.exceptions import ( |
|
25 | from rhodecode.apps.file_store.exceptions import ( | |
27 | FileNotAllowedException, FileOverSizeException) |
|
26 | FileNotAllowedException, FileOverSizeException) | |
28 |
|
27 | |||
@@ -31,6 +30,7 b' from rhodecode.lib import audit_logger' | |||||
31 | from rhodecode.lib.auth import ( |
|
30 | from rhodecode.lib.auth import ( | |
32 | CSRFRequired, NotAnonymous, HasRepoPermissionAny, HasRepoGroupPermissionAny, |
|
31 | CSRFRequired, NotAnonymous, HasRepoPermissionAny, HasRepoGroupPermissionAny, | |
33 | LoginRequired) |
|
32 | LoginRequired) | |
|
33 | from rhodecode.lib.str_utils import header_safe_str | |||
34 | from rhodecode.lib.vcs.conf.mtypes import get_mimetypes_db |
|
34 | from rhodecode.lib.vcs.conf.mtypes import get_mimetypes_db | |
35 | from rhodecode.model.db import Session, FileStore, UserApiKeys |
|
35 | from rhodecode.model.db import Session, FileStore, UserApiKeys | |
36 |
|
36 | |||
@@ -42,7 +42,7 b' class FileStoreView(BaseAppView):' | |||||
42 |
|
42 | |||
43 | def load_default_context(self): |
|
43 | def load_default_context(self): | |
44 | c = self._get_local_tmpl_context() |
|
44 | c = self._get_local_tmpl_context() | |
45 |
self.stor |
|
45 | self.f_store = store_utils.get_filestore_backend(self.request.registry.settings) | |
46 | return c |
|
46 | return c | |
47 |
|
47 | |||
48 | def _guess_type(self, file_name): |
|
48 | def _guess_type(self, file_name): | |
@@ -55,8 +55,8 b' class FileStoreView(BaseAppView):' | |||||
55 | return _content_type, _encoding |
|
55 | return _content_type, _encoding | |
56 |
|
56 | |||
57 | def _serve_file(self, file_uid): |
|
57 | def _serve_file(self, file_uid): | |
58 |
if not self.stor |
|
58 | if not self.f_store.filename_exists(file_uid): | |
59 |
store_path = self.stor |
|
59 | store_path = self.f_store.store_path(file_uid) | |
60 | log.warning('File with FID:%s not found in the store under `%s`', |
|
60 | log.warning('File with FID:%s not found in the store under `%s`', | |
61 | file_uid, store_path) |
|
61 | file_uid, store_path) | |
62 | raise HTTPNotFound() |
|
62 | raise HTTPNotFound() | |
@@ -98,28 +98,25 b' class FileStoreView(BaseAppView):' | |||||
98 |
|
98 | |||
99 | FileStore.bump_access_counter(file_uid) |
|
99 | FileStore.bump_access_counter(file_uid) | |
100 |
|
100 | |||
101 | file_path = self.storage.store_path(file_uid) |
|
101 | file_name = db_obj.file_display_name | |
102 | content_type = 'application/octet-stream' |
|
102 | content_type = 'application/octet-stream' | |
103 | content_encoding = None |
|
|||
104 |
|
103 | |||
105 |
_content_type, _encoding = self._guess_type(file_ |
|
104 | _content_type, _encoding = self._guess_type(file_name) | |
106 | if _content_type: |
|
105 | if _content_type: | |
107 | content_type = _content_type |
|
106 | content_type = _content_type | |
108 |
|
107 | |||
109 | # For file store we don't submit any session data, this logic tells the |
|
108 | # For file store we don't submit any session data, this logic tells the | |
110 | # Session lib to skip it |
|
109 | # Session lib to skip it | |
111 | setattr(self.request, '_file_response', True) |
|
110 | setattr(self.request, '_file_response', True) | |
112 | response = FileResponse( |
|
111 | reader, _meta = self.f_store.fetch(file_uid) | |
113 | file_path, request=self.request, |
|
|||
114 | content_type=content_type, content_encoding=content_encoding) |
|
|||
115 |
|
112 | |||
116 | file_name = db_obj.file_display_name |
|
113 | response = Response(app_iter=store_utils.archive_iterator(reader)) | |
117 |
|
114 | |||
118 | response.headers["Content-Disposition"] = ( |
|
115 | response.content_type = str(content_type) | |
119 |
|
|
116 | response.content_disposition = f'attachment; filename="{header_safe_str(file_name)}"' | |
120 | ) |
|
117 | ||
121 | response.headers["X-RC-Artifact-Id"] = str(db_obj.file_store_id) |
|
118 | response.headers["X-RC-Artifact-Id"] = str(db_obj.file_store_id) | |
122 | response.headers["X-RC-Artifact-Desc"] = str(db_obj.file_description) |
|
119 | response.headers["X-RC-Artifact-Desc"] = header_safe_str(db_obj.file_description) | |
123 | response.headers["X-RC-Artifact-Sha256"] = str(db_obj.file_hash) |
|
120 | response.headers["X-RC-Artifact-Sha256"] = str(db_obj.file_hash) | |
124 | return response |
|
121 | return response | |
125 |
|
122 | |||
@@ -147,8 +144,8 b' class FileStoreView(BaseAppView):' | |||||
147 | 'user_id': self._rhodecode_user.user_id, |
|
144 | 'user_id': self._rhodecode_user.user_id, | |
148 | 'ip': self._rhodecode_user.ip_addr}} |
|
145 | 'ip': self._rhodecode_user.ip_addr}} | |
149 | try: |
|
146 | try: | |
150 |
store_uid, metadata = self.stor |
|
147 | store_uid, metadata = self.f_store.store( | |
151 |
file_obj.file |
|
148 | filename, file_obj.file, extra_metadata=metadata) | |
152 | except FileNotAllowedException: |
|
149 | except FileNotAllowedException: | |
153 | return {'store_fid': None, |
|
150 | return {'store_fid': None, | |
154 | 'access_path': None, |
|
151 | 'access_path': None, | |
@@ -182,7 +179,7 b' class FileStoreView(BaseAppView):' | |||||
182 | def download_file(self): |
|
179 | def download_file(self): | |
183 | self.load_default_context() |
|
180 | self.load_default_context() | |
184 | file_uid = self.request.matchdict['fid'] |
|
181 | file_uid = self.request.matchdict['fid'] | |
185 |
log.debug('Requesting FID:%s from store %s', file_uid, self.stor |
|
182 | log.debug('Requesting FID:%s from store %s', file_uid, self.f_store) | |
186 | return self._serve_file(file_uid) |
|
183 | return self._serve_file(file_uid) | |
187 |
|
184 | |||
188 | # in addition to @LoginRequired ACL is checked by scopes |
|
185 | # in addition to @LoginRequired ACL is checked by scopes |
@@ -601,26 +601,26 b' class RepoCommitsView(RepoAppView):' | |||||
601 | max_file_size = 10 * 1024 * 1024 # 10MB, also validated via dropzone.js |
|
601 | max_file_size = 10 * 1024 * 1024 # 10MB, also validated via dropzone.js | |
602 |
|
602 | |||
603 | try: |
|
603 | try: | |
604 |
stor |
|
604 | f_store = store_utils.get_filestore_backend(self.request.registry.settings) | |
605 |
store_uid, metadata = stor |
|
605 | store_uid, metadata = f_store.store( | |
606 |
file_obj.file, |
|
606 | filename, file_obj.file, metadata=metadata, | |
607 | extensions=allowed_extensions, max_filesize=max_file_size) |
|
607 | extensions=allowed_extensions, max_filesize=max_file_size) | |
608 | except FileNotAllowedException: |
|
608 | except FileNotAllowedException: | |
609 | self.request.response.status = 400 |
|
609 | self.request.response.status = 400 | |
610 | permitted_extensions = ', '.join(allowed_extensions) |
|
610 | permitted_extensions = ', '.join(allowed_extensions) | |
611 | error_msg = 'File `{}` is not allowed. ' \ |
|
611 | error_msg = f'File `{filename}` is not allowed. ' \ | |
612 |
'Only following extensions are permitted: {}' |
|
612 | f'Only following extensions are permitted: {permitted_extensions}' | |
613 | filename, permitted_extensions) |
|
613 | ||
614 | return {'store_fid': None, |
|
614 | return {'store_fid': None, | |
615 | 'access_path': None, |
|
615 | 'access_path': None, | |
616 | 'error': error_msg} |
|
616 | 'error': error_msg} | |
617 | except FileOverSizeException: |
|
617 | except FileOverSizeException: | |
618 | self.request.response.status = 400 |
|
618 | self.request.response.status = 400 | |
619 | limit_mb = h.format_byte_size_binary(max_file_size) |
|
619 | limit_mb = h.format_byte_size_binary(max_file_size) | |
|
620 | error_msg = f'File {filename} is exceeding allowed limit of {limit_mb}.' | |||
620 | return {'store_fid': None, |
|
621 | return {'store_fid': None, | |
621 | 'access_path': None, |
|
622 | 'access_path': None, | |
622 | 'error': 'File {} is exceeding allowed limit of {}.'.format( |
|
623 | 'error': error_msg} | |
623 | filename, limit_mb)} |
|
|||
624 |
|
624 | |||
625 | try: |
|
625 | try: | |
626 | entry = FileStore.create( |
|
626 | entry = FileStore.create( |
@@ -48,7 +48,7 b' from rhodecode.lib.codeblocks import (' | |||||
48 | filenode_as_lines_tokens, filenode_as_annotated_lines_tokens) |
|
48 | filenode_as_lines_tokens, filenode_as_annotated_lines_tokens) | |
49 | from rhodecode.lib.utils2 import convert_line_endings, detect_mode |
|
49 | from rhodecode.lib.utils2 import convert_line_endings, detect_mode | |
50 | from rhodecode.lib.type_utils import str2bool |
|
50 | from rhodecode.lib.type_utils import str2bool | |
51 | from rhodecode.lib.str_utils import safe_str, safe_int |
|
51 | from rhodecode.lib.str_utils import safe_str, safe_int, header_safe_str | |
52 | from rhodecode.lib.auth import ( |
|
52 | from rhodecode.lib.auth import ( | |
53 | LoginRequired, HasRepoPermissionAnyDecorator, CSRFRequired) |
|
53 | LoginRequired, HasRepoPermissionAnyDecorator, CSRFRequired) | |
54 | from rhodecode.lib.vcs import path as vcspath |
|
54 | from rhodecode.lib.vcs import path as vcspath | |
@@ -820,7 +820,7 b' class RepoFilesView(RepoAppView):' | |||||
820 | "filename=\"{}\"; " \ |
|
820 | "filename=\"{}\"; " \ | |
821 | "filename*=UTF-8\'\'{}".format(safe_path, encoded_path) |
|
821 | "filename*=UTF-8\'\'{}".format(safe_path, encoded_path) | |
822 |
|
822 | |||
823 | return safe_bytes(headers).decode('latin-1', errors='replace') |
|
823 | return header_safe_str(headers) | |
824 |
|
824 | |||
825 | @LoginRequired() |
|
825 | @LoginRequired() | |
826 | @HasRepoPermissionAnyDecorator( |
|
826 | @HasRepoPermissionAnyDecorator( |
@@ -29,7 +29,7 b' from rhodecode.lib import audit_logger' | |||||
29 | from rhodecode.lib.auth import ( |
|
29 | from rhodecode.lib.auth import ( | |
30 | LoginRequired, HasRepoPermissionAnyDecorator, CSRFRequired, |
|
30 | LoginRequired, HasRepoPermissionAnyDecorator, CSRFRequired, | |
31 | HasRepoPermissionAny) |
|
31 | HasRepoPermissionAny) | |
32 | from rhodecode.lib.exceptions import AttachedForksError, AttachedPullRequestsError |
|
32 | from rhodecode.lib.exceptions import AttachedForksError, AttachedPullRequestsError, AttachedArtifactsError | |
33 | from rhodecode.lib.utils2 import safe_int |
|
33 | from rhodecode.lib.utils2 import safe_int | |
34 | from rhodecode.lib.vcs import RepositoryError |
|
34 | from rhodecode.lib.vcs import RepositoryError | |
35 | from rhodecode.model.db import Session, UserFollowing, User, Repository |
|
35 | from rhodecode.model.db import Session, UserFollowing, User, Repository | |
@@ -136,6 +136,9 b' class RepoSettingsAdvancedView(RepoAppVi' | |||||
136 | elif handle_forks == 'delete_forks': |
|
136 | elif handle_forks == 'delete_forks': | |
137 | handle_forks = 'delete' |
|
137 | handle_forks = 'delete' | |
138 |
|
138 | |||
|
139 | repo_advanced_url = h.route_path( | |||
|
140 | 'edit_repo_advanced', repo_name=self.db_repo_name, | |||
|
141 | _anchor='advanced-delete') | |||
139 | try: |
|
142 | try: | |
140 | old_data = self.db_repo.get_api_data() |
|
143 | old_data = self.db_repo.get_api_data() | |
141 | RepoModel().delete(self.db_repo, forks=handle_forks) |
|
144 | RepoModel().delete(self.db_repo, forks=handle_forks) | |
@@ -158,9 +161,6 b' class RepoSettingsAdvancedView(RepoAppVi' | |||||
158 | category='success') |
|
161 | category='success') | |
159 | Session().commit() |
|
162 | Session().commit() | |
160 | except AttachedForksError: |
|
163 | except AttachedForksError: | |
161 | repo_advanced_url = h.route_path( |
|
|||
162 | 'edit_repo_advanced', repo_name=self.db_repo_name, |
|
|||
163 | _anchor='advanced-delete') |
|
|||
164 | delete_anchor = h.link_to(_('detach or delete'), repo_advanced_url) |
|
164 | delete_anchor = h.link_to(_('detach or delete'), repo_advanced_url) | |
165 | h.flash(_('Cannot delete `{repo}` it still contains attached forks. ' |
|
165 | h.flash(_('Cannot delete `{repo}` it still contains attached forks. ' | |
166 | 'Try using {delete_or_detach} option.') |
|
166 | 'Try using {delete_or_detach} option.') | |
@@ -171,9 +171,6 b' class RepoSettingsAdvancedView(RepoAppVi' | |||||
171 | raise HTTPFound(repo_advanced_url) |
|
171 | raise HTTPFound(repo_advanced_url) | |
172 |
|
172 | |||
173 | except AttachedPullRequestsError: |
|
173 | except AttachedPullRequestsError: | |
174 | repo_advanced_url = h.route_path( |
|
|||
175 | 'edit_repo_advanced', repo_name=self.db_repo_name, |
|
|||
176 | _anchor='advanced-delete') |
|
|||
177 | attached_prs = len(self.db_repo.pull_requests_source + |
|
174 | attached_prs = len(self.db_repo.pull_requests_source + | |
178 | self.db_repo.pull_requests_target) |
|
175 | self.db_repo.pull_requests_target) | |
179 | h.flash( |
|
176 | h.flash( | |
@@ -184,6 +181,16 b' class RepoSettingsAdvancedView(RepoAppVi' | |||||
184 | # redirect to advanced for forks handle action ? |
|
181 | # redirect to advanced for forks handle action ? | |
185 | raise HTTPFound(repo_advanced_url) |
|
182 | raise HTTPFound(repo_advanced_url) | |
186 |
|
183 | |||
|
184 | except AttachedArtifactsError: | |||
|
185 | ||||
|
186 | attached_artifacts = len(self.db_repo.artifacts) | |||
|
187 | h.flash( | |||
|
188 | _('Cannot delete `{repo}` it still contains {num} attached artifacts. ' | |||
|
189 | 'Consider archiving the repository instead.').format( | |||
|
190 | repo=self.db_repo_name, num=attached_artifacts), category='warning') | |||
|
191 | ||||
|
192 | # redirect to advanced for forks handle action ? | |||
|
193 | raise HTTPFound(repo_advanced_url) | |||
187 | except Exception: |
|
194 | except Exception: | |
188 | log.exception("Exception during deletion of repository") |
|
195 | log.exception("Exception during deletion of repository") | |
189 | h.flash(_('An error occurred during deletion of `%s`') |
|
196 | h.flash(_('An error occurred during deletion of `%s`') |
@@ -206,7 +206,7 b' def sanitize_settings_and_apply_defaults' | |||||
206 | settings_maker.make_setting('archive_cache.filesystem.retry_backoff', 1, parser='int') |
|
206 | settings_maker.make_setting('archive_cache.filesystem.retry_backoff', 1, parser='int') | |
207 | settings_maker.make_setting('archive_cache.filesystem.retry_attempts', 10, parser='int') |
|
207 | settings_maker.make_setting('archive_cache.filesystem.retry_attempts', 10, parser='int') | |
208 |
|
208 | |||
209 |
settings_maker.make_setting('archive_cache.objectstore.url', |
|
209 | settings_maker.make_setting('archive_cache.objectstore.url', 'http://s3-minio:9000', default_when_empty=True,) | |
210 | settings_maker.make_setting('archive_cache.objectstore.key', '') |
|
210 | settings_maker.make_setting('archive_cache.objectstore.key', '') | |
211 | settings_maker.make_setting('archive_cache.objectstore.secret', '') |
|
211 | settings_maker.make_setting('archive_cache.objectstore.secret', '') | |
212 | settings_maker.make_setting('archive_cache.objectstore.region', 'eu-central-1') |
|
212 | settings_maker.make_setting('archive_cache.objectstore.region', 'eu-central-1') |
@@ -16,7 +16,6 b'' | |||||
16 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
16 | # RhodeCode Enterprise Edition, including its added features, Support services, | |
17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ | |
18 |
|
18 | |||
19 | import os |
|
|||
20 | import logging |
|
19 | import logging | |
21 | import rhodecode |
|
20 | import rhodecode | |
22 | import collections |
|
21 | import collections | |
@@ -30,6 +29,21 b' from rhodecode.lib.vcs import connect_vc' | |||||
30 | log = logging.getLogger(__name__) |
|
29 | log = logging.getLogger(__name__) | |
31 |
|
30 | |||
32 |
|
31 | |||
|
32 | def propagate_rhodecode_config(global_config, settings, config): | |||
|
33 | # Store the settings to make them available to other modules. | |||
|
34 | settings_merged = global_config.copy() | |||
|
35 | settings_merged.update(settings) | |||
|
36 | if config: | |||
|
37 | settings_merged.update(config) | |||
|
38 | ||||
|
39 | rhodecode.PYRAMID_SETTINGS = settings_merged | |||
|
40 | rhodecode.CONFIG = settings_merged | |||
|
41 | ||||
|
42 | if 'default_user_id' not in rhodecode.CONFIG: | |||
|
43 | rhodecode.CONFIG['default_user_id'] = utils.get_default_user_id() | |||
|
44 | log.debug('set rhodecode.CONFIG data') | |||
|
45 | ||||
|
46 | ||||
33 | def load_pyramid_environment(global_config, settings): |
|
47 | def load_pyramid_environment(global_config, settings): | |
34 | # Some parts of the code expect a merge of global and app settings. |
|
48 | # Some parts of the code expect a merge of global and app settings. | |
35 | settings_merged = global_config.copy() |
|
49 | settings_merged = global_config.copy() | |
@@ -75,11 +89,8 b' def load_pyramid_environment(global_conf' | |||||
75 |
|
89 | |||
76 | utils.configure_vcs(settings) |
|
90 | utils.configure_vcs(settings) | |
77 |
|
91 | |||
78 | # Store the settings to make them available to other modules. |
|
92 | # first run, to store data... | |
79 |
|
93 | propagate_rhodecode_config(global_config, settings, {}) | ||
80 | rhodecode.PYRAMID_SETTINGS = settings_merged |
|
|||
81 | rhodecode.CONFIG = settings_merged |
|
|||
82 | rhodecode.CONFIG['default_user_id'] = utils.get_default_user_id() |
|
|||
83 |
|
94 | |||
84 | if vcs_server_enabled: |
|
95 | if vcs_server_enabled: | |
85 | connect_vcs(vcs_server_uri, utils.get_vcs_server_protocol(settings)) |
|
96 | connect_vcs(vcs_server_uri, utils.get_vcs_server_protocol(settings)) |
@@ -115,6 +115,9 b' def make_pyramid_app(global_config, **se' | |||||
115 | celery_settings = get_celery_config(settings) |
|
115 | celery_settings = get_celery_config(settings) | |
116 | config.configure_celery(celery_settings) |
|
116 | config.configure_celery(celery_settings) | |
117 |
|
117 | |||
|
118 | # final config set... | |||
|
119 | propagate_rhodecode_config(global_config, settings, config.registry.settings) | |||
|
120 | ||||
118 | # creating the app uses a connection - return it after we are done |
|
121 | # creating the app uses a connection - return it after we are done | |
119 | meta.Session.remove() |
|
122 | meta.Session.remove() | |
120 |
|
123 |
@@ -80,6 +80,10 b' class AttachedPullRequestsError(Exceptio' | |||||
80 | pass |
|
80 | pass | |
81 |
|
81 | |||
82 |
|
82 | |||
|
83 | class AttachedArtifactsError(Exception): | |||
|
84 | pass | |||
|
85 | ||||
|
86 | ||||
83 | class RepoGroupAssignmentError(Exception): |
|
87 | class RepoGroupAssignmentError(Exception): | |
84 | pass |
|
88 | pass | |
85 |
|
89 |
@@ -81,7 +81,7 b' from rhodecode.lib.action_parser import ' | |||||
81 | from rhodecode.lib.html_filters import sanitize_html |
|
81 | from rhodecode.lib.html_filters import sanitize_html | |
82 | from rhodecode.lib.pagination import Page, RepoPage, SqlPage |
|
82 | from rhodecode.lib.pagination import Page, RepoPage, SqlPage | |
83 | from rhodecode.lib import ext_json |
|
83 | from rhodecode.lib import ext_json | |
84 | from rhodecode.lib.ext_json import json |
|
84 | from rhodecode.lib.ext_json import json, formatted_str_json | |
85 | from rhodecode.lib.str_utils import safe_bytes, convert_special_chars, base64_to_str |
|
85 | from rhodecode.lib.str_utils import safe_bytes, convert_special_chars, base64_to_str | |
86 | from rhodecode.lib.utils import repo_name_slug, get_custom_lexer |
|
86 | from rhodecode.lib.utils import repo_name_slug, get_custom_lexer | |
87 | from rhodecode.lib.str_utils import safe_str |
|
87 | from rhodecode.lib.str_utils import safe_str | |
@@ -1416,62 +1416,14 b' class InitialsGravatar(object):' | |||||
1416 | return "data:image/svg+xml;base64,{}".format(img_data) |
|
1416 | return "data:image/svg+xml;base64,{}".format(img_data) | |
1417 |
|
1417 | |||
1418 |
|
1418 | |||
1419 |
def initials_gravatar(request, email_address, first_name, last_name, size=30 |
|
1419 | def initials_gravatar(request, email_address, first_name, last_name, size=30): | |
1420 |
|
1420 | |||
1421 | svg_type = None |
|
1421 | svg_type = None | |
1422 | if email_address == User.DEFAULT_USER_EMAIL: |
|
1422 | if email_address == User.DEFAULT_USER_EMAIL: | |
1423 | svg_type = 'default_user' |
|
1423 | svg_type = 'default_user' | |
1424 |
|
1424 | |||
1425 | klass = InitialsGravatar(email_address, first_name, last_name, size) |
|
1425 | klass = InitialsGravatar(email_address, first_name, last_name, size) | |
1426 |
|
1426 | return klass.generate_svg(svg_type=svg_type) | ||
1427 | if store_on_disk: |
|
|||
1428 | from rhodecode.apps.file_store import utils as store_utils |
|
|||
1429 | from rhodecode.apps.file_store.exceptions import FileNotAllowedException, \ |
|
|||
1430 | FileOverSizeException |
|
|||
1431 | from rhodecode.model.db import Session |
|
|||
1432 |
|
||||
1433 | image_key = md5_safe(email_address.lower() |
|
|||
1434 | + first_name.lower() + last_name.lower()) |
|
|||
1435 |
|
||||
1436 | storage = store_utils.get_file_storage(request.registry.settings) |
|
|||
1437 | filename = '{}.svg'.format(image_key) |
|
|||
1438 | subdir = 'gravatars' |
|
|||
1439 | # since final name has a counter, we apply the 0 |
|
|||
1440 | uid = storage.apply_counter(0, store_utils.uid_filename(filename, randomized=False)) |
|
|||
1441 | store_uid = os.path.join(subdir, uid) |
|
|||
1442 |
|
||||
1443 | db_entry = FileStore.get_by_store_uid(store_uid) |
|
|||
1444 | if db_entry: |
|
|||
1445 | return request.route_path('download_file', fid=store_uid) |
|
|||
1446 |
|
||||
1447 | img_data = klass.get_img_data(svg_type=svg_type) |
|
|||
1448 | img_file = store_utils.bytes_to_file_obj(img_data) |
|
|||
1449 |
|
||||
1450 | try: |
|
|||
1451 | store_uid, metadata = storage.save_file( |
|
|||
1452 | img_file, filename, directory=subdir, |
|
|||
1453 | extensions=['.svg'], randomized_name=False) |
|
|||
1454 | except (FileNotAllowedException, FileOverSizeException): |
|
|||
1455 | raise |
|
|||
1456 |
|
||||
1457 | try: |
|
|||
1458 | entry = FileStore.create( |
|
|||
1459 | file_uid=store_uid, filename=metadata["filename"], |
|
|||
1460 | file_hash=metadata["sha256"], file_size=metadata["size"], |
|
|||
1461 | file_display_name=filename, |
|
|||
1462 | file_description=f'user gravatar `{safe_str(filename)}`', |
|
|||
1463 | hidden=True, check_acl=False, user_id=1 |
|
|||
1464 | ) |
|
|||
1465 | Session().add(entry) |
|
|||
1466 | Session().commit() |
|
|||
1467 | log.debug('Stored upload in DB as %s', entry) |
|
|||
1468 | except Exception: |
|
|||
1469 | raise |
|
|||
1470 |
|
||||
1471 | return request.route_path('download_file', fid=store_uid) |
|
|||
1472 |
|
||||
1473 | else: |
|
|||
1474 | return klass.generate_svg(svg_type=svg_type) |
|
|||
1475 |
|
1427 | |||
1476 |
|
1428 | |||
1477 | def gravatar_external(request, gravatar_url_tmpl, email_address, size=30): |
|
1429 | def gravatar_external(request, gravatar_url_tmpl, email_address, size=30): |
@@ -91,15 +91,14 b' def command(ini_path, filename, file_pat' | |||||
91 |
|
91 | |||
92 | auth_user = db_user.AuthUser(ip_addr='127.0.0.1') |
|
92 | auth_user = db_user.AuthUser(ip_addr='127.0.0.1') | |
93 |
|
93 | |||
94 |
stor |
|
94 | f_store = store_utils.get_filestore_backend(request.registry.settings) | |
95 |
|
95 | |||
96 | with open(file_path, 'rb') as f: |
|
96 | with open(file_path, 'rb') as f: | |
97 | click.secho(f'Adding new artifact from path: `{file_path}`', |
|
97 | click.secho(f'Adding new artifact from path: `{file_path}`', | |
98 | fg='green') |
|
98 | fg='green') | |
99 |
|
99 | |||
100 | file_data = _store_file( |
|
100 | file_data = _store_file( | |
101 |
stor |
|
101 | f_store, auth_user, filename, content=None, check_acl=True, | |
102 | file_obj=f, description=description, |
|
102 | file_obj=f, description=description, | |
103 | scope_repo_id=repo.repo_id) |
|
103 | scope_repo_id=repo.repo_id) | |
104 | click.secho(f'File Data: {file_data}', |
|
104 | click.secho(f'File Data: {file_data}', fg='green') | |
105 | fg='green') |
|
@@ -181,3 +181,7 b' def splitnewlines(text: bytes):' | |||||
181 | else: |
|
181 | else: | |
182 | lines[-1] = lines[-1][:-1] |
|
182 | lines[-1] = lines[-1][:-1] | |
183 | return lines |
|
183 | return lines | |
|
184 | ||||
|
185 | ||||
|
186 | def header_safe_str(val): | |||
|
187 | return safe_bytes(val).decode('latin-1', errors='replace') |
@@ -396,17 +396,18 b' def storage_inodes():' | |||||
396 |
|
396 | |||
397 |
|
397 | |||
398 | @register_sysinfo |
|
398 | @register_sysinfo | |
399 |
def storage_ar |
|
399 | def storage_artifacts(): | |
400 | import rhodecode |
|
400 | import rhodecode | |
401 | from rhodecode.lib.helpers import format_byte_size_binary |
|
401 | from rhodecode.lib.helpers import format_byte_size_binary | |
402 | from rhodecode.lib.archive_cache import get_archival_cache_store |
|
402 | from rhodecode.lib.archive_cache import get_archival_cache_store | |
403 |
|
403 | |||
404 |
|
|
404 | backend_type = rhodecode.ConfigGet().get_str('archive_cache.backend.type') | |
405 |
|
405 | |||
406 |
value = dict(percent=0, used=0, total=0, items=0, path='', text='', type= |
|
406 | value = dict(percent=0, used=0, total=0, items=0, path='', text='', type=backend_type) | |
407 | state = STATE_OK_DEFAULT |
|
407 | state = STATE_OK_DEFAULT | |
408 | try: |
|
408 | try: | |
409 | d_cache = get_archival_cache_store(config=rhodecode.CONFIG) |
|
409 | d_cache = get_archival_cache_store(config=rhodecode.CONFIG) | |
|
410 | backend_type = str(d_cache) | |||
410 |
|
411 | |||
411 | total_files, total_size, _directory_stats = d_cache.get_statistics() |
|
412 | total_files, total_size, _directory_stats = d_cache.get_statistics() | |
412 |
|
413 | |||
@@ -415,7 +416,8 b' def storage_archives():' | |||||
415 | 'used': total_size, |
|
416 | 'used': total_size, | |
416 | 'total': total_size, |
|
417 | 'total': total_size, | |
417 | 'items': total_files, |
|
418 | 'items': total_files, | |
418 | 'path': d_cache.storage_path |
|
419 | 'path': d_cache.storage_path, | |
|
420 | 'type': backend_type | |||
419 | }) |
|
421 | }) | |
420 |
|
422 | |||
421 | except Exception as e: |
|
423 | except Exception as e: | |
@@ -425,8 +427,44 b' def storage_archives():' | |||||
425 | human_value = value.copy() |
|
427 | human_value = value.copy() | |
426 | human_value['used'] = format_byte_size_binary(value['used']) |
|
428 | human_value['used'] = format_byte_size_binary(value['used']) | |
427 | human_value['total'] = format_byte_size_binary(value['total']) |
|
429 | human_value['total'] = format_byte_size_binary(value['total']) | |
428 |
human_value['text'] = "{} ({} items)" |
|
430 | human_value['text'] = f"{human_value['used']} ({value['items']} items)" | |
429 | human_value['used'], value['items']) |
|
431 | ||
|
432 | return SysInfoRes(value=value, state=state, human_value=human_value) | |||
|
433 | ||||
|
434 | ||||
|
435 | @register_sysinfo | |||
|
436 | def storage_archives(): | |||
|
437 | import rhodecode | |||
|
438 | from rhodecode.lib.helpers import format_byte_size_binary | |||
|
439 | import rhodecode.apps.file_store.utils as store_utils | |||
|
440 | from rhodecode import CONFIG | |||
|
441 | ||||
|
442 | backend_type = rhodecode.ConfigGet().get_str(store_utils.config_keys.backend_type) | |||
|
443 | ||||
|
444 | value = dict(percent=0, used=0, total=0, items=0, path='', text='', type=backend_type) | |||
|
445 | state = STATE_OK_DEFAULT | |||
|
446 | try: | |||
|
447 | f_store = store_utils.get_filestore_backend(config=CONFIG) | |||
|
448 | backend_type = str(f_store) | |||
|
449 | total_files, total_size, _directory_stats = f_store.get_statistics() | |||
|
450 | ||||
|
451 | value.update({ | |||
|
452 | 'percent': 100, | |||
|
453 | 'used': total_size, | |||
|
454 | 'total': total_size, | |||
|
455 | 'items': total_files, | |||
|
456 | 'path': f_store.storage_path, | |||
|
457 | 'type': backend_type | |||
|
458 | }) | |||
|
459 | ||||
|
460 | except Exception as e: | |||
|
461 | log.exception('failed to fetch archive cache storage') | |||
|
462 | state = {'message': str(e), 'type': STATE_ERR} | |||
|
463 | ||||
|
464 | human_value = value.copy() | |||
|
465 | human_value['used'] = format_byte_size_binary(value['used']) | |||
|
466 | human_value['total'] = format_byte_size_binary(value['total']) | |||
|
467 | human_value['text'] = f"{human_value['used']} ({value['items']} items)" | |||
430 |
|
468 | |||
431 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
469 | return SysInfoRes(value=value, state=state, human_value=human_value) | |
432 |
|
470 | |||
@@ -798,6 +836,7 b' def get_system_info(environ):' | |||||
798 | 'storage': SysInfo(storage)(), |
|
836 | 'storage': SysInfo(storage)(), | |
799 | 'storage_inodes': SysInfo(storage_inodes)(), |
|
837 | 'storage_inodes': SysInfo(storage_inodes)(), | |
800 | 'storage_archive': SysInfo(storage_archives)(), |
|
838 | 'storage_archive': SysInfo(storage_archives)(), | |
|
839 | 'storage_artifacts': SysInfo(storage_artifacts)(), | |||
801 | 'storage_gist': SysInfo(storage_gist)(), |
|
840 | 'storage_gist': SysInfo(storage_gist)(), | |
802 | 'storage_temp': SysInfo(storage_temp)(), |
|
841 | 'storage_temp': SysInfo(storage_temp)(), | |
803 |
|
842 |
@@ -5849,8 +5849,7 b' class FileStore(Base, BaseModel):' | |||||
5849 | .filter(FileStoreMetadata.file_store_meta_key == key) \ |
|
5849 | .filter(FileStoreMetadata.file_store_meta_key == key) \ | |
5850 | .scalar() |
|
5850 | .scalar() | |
5851 | if has_key: |
|
5851 | if has_key: | |
5852 |
msg = 'key `{}` already defined under section `{}` for this file.' |
|
5852 | msg = f'key `{key}` already defined under section `{section}` for this file.' | |
5853 | .format(key, section) |
|
|||
5854 | raise ArtifactMetadataDuplicate(msg, err_section=section, err_key=key) |
|
5853 | raise ArtifactMetadataDuplicate(msg, err_section=section, err_key=key) | |
5855 |
|
5854 | |||
5856 | # NOTE(marcink): raises ArtifactMetadataBadValueType |
|
5855 | # NOTE(marcink): raises ArtifactMetadataBadValueType | |
@@ -5949,7 +5948,7 b' class FileStoreMetadata(Base, BaseModel)' | |||||
5949 | def valid_value_type(cls, value): |
|
5948 | def valid_value_type(cls, value): | |
5950 | if value.split('.')[0] not in cls.SETTINGS_TYPES: |
|
5949 | if value.split('.')[0] not in cls.SETTINGS_TYPES: | |
5951 | raise ArtifactMetadataBadValueType( |
|
5950 | raise ArtifactMetadataBadValueType( | |
5952 |
'value_type must be one of |
|
5951 | f'value_type must be one of {cls.SETTINGS_TYPES.keys()} got {value}') | |
5953 |
|
5952 | |||
5954 | @hybrid_property |
|
5953 | @hybrid_property | |
5955 | def file_store_meta_section(self): |
|
5954 | def file_store_meta_section(self): |
@@ -31,7 +31,7 b' from zope.cachedescriptors.property impo' | |||||
31 | from rhodecode import events |
|
31 | from rhodecode import events | |
32 | from rhodecode.lib.auth import HasUserGroupPermissionAny |
|
32 | from rhodecode.lib.auth import HasUserGroupPermissionAny | |
33 | from rhodecode.lib.caching_query import FromCache |
|
33 | from rhodecode.lib.caching_query import FromCache | |
34 | from rhodecode.lib.exceptions import AttachedForksError, AttachedPullRequestsError |
|
34 | from rhodecode.lib.exceptions import AttachedForksError, AttachedPullRequestsError, AttachedArtifactsError | |
35 | from rhodecode.lib import hooks_base |
|
35 | from rhodecode.lib import hooks_base | |
36 | from rhodecode.lib.user_log_filter import user_log_filter |
|
36 | from rhodecode.lib.user_log_filter import user_log_filter | |
37 | from rhodecode.lib.utils import make_db_config |
|
37 | from rhodecode.lib.utils import make_db_config | |
@@ -736,7 +736,7 b' class RepoModel(BaseModel):' | |||||
736 | log.error(traceback.format_exc()) |
|
736 | log.error(traceback.format_exc()) | |
737 | raise |
|
737 | raise | |
738 |
|
738 | |||
739 | def delete(self, repo, forks=None, pull_requests=None, fs_remove=True, cur_user=None): |
|
739 | def delete(self, repo, forks=None, pull_requests=None, artifacts=None, fs_remove=True, cur_user=None): | |
740 | """ |
|
740 | """ | |
741 | Delete given repository, forks parameter defines what do do with |
|
741 | Delete given repository, forks parameter defines what do do with | |
742 | attached forks. Throws AttachedForksError if deleted repo has attached |
|
742 | attached forks. Throws AttachedForksError if deleted repo has attached | |
@@ -745,6 +745,7 b' class RepoModel(BaseModel):' | |||||
745 | :param repo: |
|
745 | :param repo: | |
746 | :param forks: str 'delete' or 'detach' |
|
746 | :param forks: str 'delete' or 'detach' | |
747 | :param pull_requests: str 'delete' or None |
|
747 | :param pull_requests: str 'delete' or None | |
|
748 | :param artifacts: str 'delete' or None | |||
748 | :param fs_remove: remove(archive) repo from filesystem |
|
749 | :param fs_remove: remove(archive) repo from filesystem | |
749 | """ |
|
750 | """ | |
750 | if not cur_user: |
|
751 | if not cur_user: | |
@@ -767,6 +768,13 b' class RepoModel(BaseModel):' | |||||
767 | if pull_requests != 'delete' and (pr_sources or pr_targets): |
|
768 | if pull_requests != 'delete' and (pr_sources or pr_targets): | |
768 | raise AttachedPullRequestsError() |
|
769 | raise AttachedPullRequestsError() | |
769 |
|
770 | |||
|
771 | artifacts_objs = repo.artifacts | |||
|
772 | if artifacts == 'delete': | |||
|
773 | for a in artifacts_objs: | |||
|
774 | self.sa.delete(a) | |||
|
775 | elif [a for a in artifacts_objs]: | |||
|
776 | raise AttachedArtifactsError() | |||
|
777 | ||||
770 | old_repo_dict = repo.get_dict() |
|
778 | old_repo_dict = repo.get_dict() | |
771 | events.trigger(events.RepoPreDeleteEvent(repo)) |
|
779 | events.trigger(events.RepoPreDeleteEvent(repo)) | |
772 | try: |
|
780 | try: |
@@ -557,10 +557,10 b' class UserModel(BaseModel):' | |||||
557 | elif handle_mode == 'delete': |
|
557 | elif handle_mode == 'delete': | |
558 | from rhodecode.apps.file_store import utils as store_utils |
|
558 | from rhodecode.apps.file_store import utils as store_utils | |
559 | request = get_current_request() |
|
559 | request = get_current_request() | |
560 |
stor |
|
560 | f_store = store_utils.get_filestore_backend(request.registry.settings) | |
561 | for a in artifacts: |
|
561 | for a in artifacts: | |
562 | file_uid = a.file_uid |
|
562 | file_uid = a.file_uid | |
563 |
stor |
|
563 | f_store.delete(file_uid) | |
564 | self.sa.delete(a) |
|
564 | self.sa.delete(a) | |
565 |
|
565 | |||
566 | left_overs = False |
|
566 | left_overs = False |
@@ -215,18 +215,35 b'' | |||||
215 | %endif |
|
215 | %endif | |
216 | </td> |
|
216 | </td> | |
217 | </tr> |
|
217 | </tr> | |
|
218 | ||||
218 | <% attached_prs = len(c.rhodecode_db_repo.pull_requests_source + c.rhodecode_db_repo.pull_requests_target) %> |
|
219 | <% attached_prs = len(c.rhodecode_db_repo.pull_requests_source + c.rhodecode_db_repo.pull_requests_target) %> | |
219 | % if c.rhodecode_db_repo.pull_requests_source or c.rhodecode_db_repo.pull_requests_target: |
|
220 | % if c.rhodecode_db_repo.pull_requests_source or c.rhodecode_db_repo.pull_requests_target: | |
220 | <tr> |
|
221 | <tr> | |
221 | <td> |
|
222 | <td> | |
222 | ${_ungettext('This repository has %s attached pull request.', 'This repository has %s attached pull requests.', attached_prs) % attached_prs} |
|
223 | ${_ungettext('This repository has %s attached pull request.', 'This repository has %s attached pull requests.', attached_prs) % attached_prs} | |
223 | <br/> |
|
224 | <br/> | |
224 | ${_('Consider to archive this repository instead.')} |
|
225 | <br/> | |
|
226 | <strong>${_('Consider to archive this repository instead.')}</strong> | |||
225 | </td> |
|
227 | </td> | |
226 | <td></td> |
|
228 | <td></td> | |
227 | <td></td> |
|
229 | <td></td> | |
228 | </tr> |
|
230 | </tr> | |
229 | % endif |
|
231 | % endif | |
|
232 | ||||
|
233 | <% attached_artifacts = len(c.rhodecode_db_repo.artifacts) %> | |||
|
234 | % if attached_artifacts: | |||
|
235 | <tr> | |||
|
236 | <td> | |||
|
237 | ${_ungettext('This repository has %s attached artifact.', 'This repository has %s attached artifacts.', attached_artifacts) % attached_artifacts} | |||
|
238 | <br/> | |||
|
239 | <br/> | |||
|
240 | <strong>${_('Consider to archive this repository instead.')}</strong> | |||
|
241 | </td> | |||
|
242 | <td></td> | |||
|
243 | <td></td> | |||
|
244 | </tr> | |||
|
245 | % endif | |||
|
246 | ||||
230 | </table> |
|
247 | </table> | |
231 | <div style="margin: 0 0 20px 0" class="fake-space"></div> |
|
248 | <div style="margin: 0 0 20px 0" class="fake-space"></div> | |
232 |
|
249 |
@@ -305,7 +305,7 b' class Fixture(object):' | |||||
305 | return r |
|
305 | return r | |
306 |
|
306 | |||
307 | def destroy_repo(self, repo_name, **kwargs): |
|
307 | def destroy_repo(self, repo_name, **kwargs): | |
308 | RepoModel().delete(repo_name, pull_requests='delete', **kwargs) |
|
308 | RepoModel().delete(repo_name, pull_requests='delete', artifacts='delete', **kwargs) | |
309 | Session().commit() |
|
309 | Session().commit() | |
310 |
|
310 | |||
311 | def destroy_repo_on_filesystem(self, repo_name): |
|
311 | def destroy_repo_on_filesystem(self, repo_name): |
@@ -36,7 +36,7 b' port = 10020' | |||||
36 | ; GUNICORN APPLICATION SERVER |
|
36 | ; GUNICORN APPLICATION SERVER | |
37 | ; ########################### |
|
37 | ; ########################### | |
38 |
|
38 | |||
39 |
; run with gunicorn |
|
39 | ; run with gunicorn --config gunicorn_conf.py --paste rhodecode.ini | |
40 |
|
40 | |||
41 | ; Module to use, this setting shouldn't be changed |
|
41 | ; Module to use, this setting shouldn't be changed | |
42 | use = egg:gunicorn#main |
|
42 | use = egg:gunicorn#main | |
@@ -249,15 +249,56 b' labs_settings_active = true' | |||||
249 | ; optional prefix to Add to email Subject |
|
249 | ; optional prefix to Add to email Subject | |
250 | #exception_tracker.email_prefix = [RHODECODE ERROR] |
|
250 | #exception_tracker.email_prefix = [RHODECODE ERROR] | |
251 |
|
251 | |||
252 | ; File store configuration. This is used to store and serve uploaded files |
|
252 | ; NOTE: this setting IS DEPRECATED: | |
253 | file_store.enabled = true |
|
253 | ; file_store backend is always enabled | |
|
254 | #file_store.enabled = true | |||
254 |
|
255 | |||
|
256 | ; NOTE: this setting IS DEPRECATED: | |||
|
257 | ; file_store.backend = X -> use `file_store.backend.type = filesystem_v2` instead | |||
255 | ; Storage backend, available options are: local |
|
258 | ; Storage backend, available options are: local | |
256 | file_store.backend = local |
|
259 | #file_store.backend = local | |
257 |
|
260 | |||
|
261 | ; NOTE: this setting IS DEPRECATED: | |||
|
262 | ; file_store.storage_path = X -> use `file_store.filesystem_v2.storage_path = X` instead | |||
258 | ; path to store the uploaded binaries and artifacts |
|
263 | ; path to store the uploaded binaries and artifacts | |
259 | file_store.storage_path = /var/opt/rhodecode_data/file_store |
|
264 | #file_store.storage_path = /var/opt/rhodecode_data/file_store | |
|
265 | ||||
|
266 | ; Artifacts file-store, is used to store comment attachments and artifacts uploads. | |||
|
267 | ; file_store backend type: filesystem_v1, filesystem_v2 or objectstore (s3-based) are available as options | |||
|
268 | ; filesystem_v1 is backwards compat with pre 5.1 storage changes | |||
|
269 | ; new installations should choose filesystem_v2 or objectstore (s3-based), pick filesystem when migrating from | |||
|
270 | ; previous installations to keep the artifacts without a need of migration | |||
|
271 | file_store.backend.type = filesystem_v1 | |||
|
272 | ||||
|
273 | ; filesystem options... | |||
|
274 | file_store.filesystem_v1.storage_path = /var/opt/rhodecode_data/test_artifacts_file_store | |||
|
275 | ||||
|
276 | ; filesystem_v2 options... | |||
|
277 | file_store.filesystem_v2.storage_path = /var/opt/rhodecode_data/test_artifacts_file_store_2 | |||
|
278 | file_store.filesystem_v2.shards = 8 | |||
260 |
|
279 | |||
|
280 | ; objectstore options... | |||
|
281 | ; url for s3 compatible storage that allows to upload artifacts | |||
|
282 | ; e.g http://minio:9000 | |||
|
283 | #file_store.backend.type = objectstore | |||
|
284 | file_store.objectstore.url = http://s3-minio:9000 | |||
|
285 | ||||
|
286 | ; a top-level bucket to put all other shards in | |||
|
287 | ; objects will be stored in rhodecode-file-store/shard-N based on the bucket_shards number | |||
|
288 | file_store.objectstore.bucket = rhodecode-file-store-tests | |||
|
289 | ||||
|
290 | ; number of sharded buckets to create to distribute archives across | |||
|
291 | ; default is 8 shards | |||
|
292 | file_store.objectstore.bucket_shards = 8 | |||
|
293 | ||||
|
294 | ; key for s3 auth | |||
|
295 | file_store.objectstore.key = s3admin | |||
|
296 | ||||
|
297 | ; secret for s3 auth | |||
|
298 | file_store.objectstore.secret = s3secret4 | |||
|
299 | ||||
|
300 | ;region for s3 storage | |||
|
301 | file_store.objectstore.region = eu-central-1 | |||
261 |
|
302 | |||
262 | ; Redis url to acquire/check generation of archives locks |
|
303 | ; Redis url to acquire/check generation of archives locks | |
263 | archive_cache.locking.url = redis://redis:6379/1 |
|
304 | archive_cache.locking.url = redis://redis:6379/1 | |
@@ -593,6 +634,7 b' vcs.scm_app_implementation = http' | |||||
593 | ; Push/Pull operations hooks protocol, available options are: |
|
634 | ; Push/Pull operations hooks protocol, available options are: | |
594 | ; `http` - use http-rpc backend (default) |
|
635 | ; `http` - use http-rpc backend (default) | |
595 | ; `celery` - use celery based hooks |
|
636 | ; `celery` - use celery based hooks | |
|
637 | #DEPRECATED:vcs.hooks.protocol = http | |||
596 | vcs.hooks.protocol = http |
|
638 | vcs.hooks.protocol = http | |
597 |
|
639 | |||
598 | ; Host on which this instance is listening for hooks. vcsserver will call this host to pull/push hooks so it should be |
|
640 | ; Host on which this instance is listening for hooks. vcsserver will call this host to pull/push hooks so it should be | |
@@ -626,6 +668,10 b' vcs.methods.cache = false' | |||||
626 | ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible |
|
668 | ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible | |
627 | #vcs.svn.compatible_version = 1.8 |
|
669 | #vcs.svn.compatible_version = 1.8 | |
628 |
|
670 | |||
|
671 | ; Redis connection settings for svn integrations logic | |||
|
672 | ; This connection string needs to be the same on ce and vcsserver | |||
|
673 | vcs.svn.redis_conn = redis://redis:6379/0 | |||
|
674 | ||||
629 | ; Enable SVN proxy of requests over HTTP |
|
675 | ; Enable SVN proxy of requests over HTTP | |
630 | vcs.svn.proxy.enabled = true |
|
676 | vcs.svn.proxy.enabled = true | |
631 |
|
677 | |||
@@ -681,7 +727,8 b' ssh.authorized_keys_file_path = %(here)s' | |||||
681 | ; RhodeCode installation directory. |
|
727 | ; RhodeCode installation directory. | |
682 | ; legacy: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper |
|
728 | ; legacy: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper | |
683 | ; new rewrite: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2 |
|
729 | ; new rewrite: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2 | |
684 | ssh.wrapper_cmd = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper |
|
730 | #DEPRECATED: ssh.wrapper_cmd = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper | |
|
731 | ssh.wrapper_cmd.v2 = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2 | |||
685 |
|
732 | |||
686 | ; Allow shell when executing the ssh-wrapper command |
|
733 | ; Allow shell when executing the ssh-wrapper command | |
687 | ssh.wrapper_cmd_allow_shell = false |
|
734 | ssh.wrapper_cmd_allow_shell = false |
@@ -189,6 +189,7 b' setup(' | |||||
189 | 'rc-upgrade-db=rhodecode.lib.rc_commands.upgrade_db:main', |
|
189 | 'rc-upgrade-db=rhodecode.lib.rc_commands.upgrade_db:main', | |
190 | 'rc-ishell=rhodecode.lib.rc_commands.ishell:main', |
|
190 | 'rc-ishell=rhodecode.lib.rc_commands.ishell:main', | |
191 | 'rc-add-artifact=rhodecode.lib.rc_commands.add_artifact:main', |
|
191 | 'rc-add-artifact=rhodecode.lib.rc_commands.add_artifact:main', | |
|
192 | 'rc-migrate-artifact=rhodecode.lib.rc_commands.migrate_artifact:main', | |||
192 | 'rc-ssh-wrapper=rhodecode.apps.ssh_support.lib.ssh_wrapper_v1:main', |
|
193 | 'rc-ssh-wrapper=rhodecode.apps.ssh_support.lib.ssh_wrapper_v1:main', | |
193 | 'rc-ssh-wrapper-v2=rhodecode.apps.ssh_support.lib.ssh_wrapper_v2:main', |
|
194 | 'rc-ssh-wrapper-v2=rhodecode.apps.ssh_support.lib.ssh_wrapper_v2:main', | |
194 | ], |
|
195 | ], |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed |
General Comments 0
You need to be logged in to leave comments.
Login now