Show More
@@ -0,0 +1,29 b'' | |||
|
1 | # Copyright (C) 2015-2024 RhodeCode GmbH | |
|
2 | # | |
|
3 | # This program is free software: you can redistribute it and/or modify | |
|
4 | # it under the terms of the GNU Affero General Public License, version 3 | |
|
5 | # (only), as published by the Free Software Foundation. | |
|
6 | # | |
|
7 | # This program is distributed in the hope that it will be useful, | |
|
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
|
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
|
10 | # GNU General Public License for more details. | |
|
11 | # | |
|
12 | # You should have received a copy of the GNU Affero General Public License | |
|
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |
|
14 | # | |
|
15 | # This program is dual-licensed. If you wish to learn more about the | |
|
16 | # RhodeCode Enterprise Edition, including its added features, Support services, | |
|
17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ | |
|
18 | ||
|
19 | from .fanout_cache import get_archival_cache_store | |
|
20 | from .fanout_cache import get_archival_config | |
|
21 | ||
|
22 | from .utils import archive_iterator | |
|
23 | from .utils import ArchiveCacheLock | |
|
24 | ||
|
25 | ||
|
26 | def includeme(config): | |
|
27 | # init our cache at start | |
|
28 | settings = config.get_settings() | |
|
29 | get_archival_cache_store(settings) |
@@ -0,0 +1,60 b'' | |||
|
1 | # Copyright (C) 2015-2024 RhodeCode GmbH | |
|
2 | # | |
|
3 | # This program is free software: you can redistribute it and/or modify | |
|
4 | # it under the terms of the GNU Affero General Public License, version 3 | |
|
5 | # (only), as published by the Free Software Foundation. | |
|
6 | # | |
|
7 | # This program is distributed in the hope that it will be useful, | |
|
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
|
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
|
10 | # GNU General Public License for more details. | |
|
11 | # | |
|
12 | # You should have received a copy of the GNU Affero General Public License | |
|
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |
|
14 | # | |
|
15 | # This program is dual-licensed. If you wish to learn more about the | |
|
16 | # RhodeCode Enterprise Edition, including its added features, Support services, | |
|
17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ | |
|
18 | ||
|
19 | import redis | |
|
20 | from rhodecode.lib._vendor import redis_lock | |
|
21 | ||
|
22 | from .utils import ArchiveCacheLock | |
|
23 | ||
|
24 | ||
|
25 | class GenerationLock: | |
|
26 | """ | |
|
27 | Locking mechanism that detects if a lock is acquired | |
|
28 | ||
|
29 | with GenerationLock(lock_key): | |
|
30 | compute_archive() | |
|
31 | """ | |
|
32 | lock_timeout = 7200 | |
|
33 | ||
|
34 | def __init__(self, lock_key, url): | |
|
35 | self.lock_key = lock_key | |
|
36 | self._create_client(url) | |
|
37 | self.lock = self.get_lock() | |
|
38 | ||
|
39 | def _create_client(self, url): | |
|
40 | connection_pool = redis.ConnectionPool.from_url(url) | |
|
41 | self.writer_client = redis.StrictRedis( | |
|
42 | connection_pool=connection_pool | |
|
43 | ) | |
|
44 | self.reader_client = self.writer_client | |
|
45 | ||
|
46 | def get_lock(self): | |
|
47 | return redis_lock.Lock( | |
|
48 | redis_client=self.writer_client, | |
|
49 | name=self.lock_key, | |
|
50 | expire=self.lock_timeout, | |
|
51 | strict=True | |
|
52 | ) | |
|
53 | ||
|
54 | def __enter__(self): | |
|
55 | acquired = self.lock.acquire(blocking=False) | |
|
56 | if not acquired: | |
|
57 | raise ArchiveCacheLock('Failed to create a lock') | |
|
58 | ||
|
59 | def __exit__(self, exc_type, exc_val, exc_tb): | |
|
60 | self.lock.release() |
@@ -0,0 +1,30 b'' | |||
|
1 | # Copyright (C) 2015-2024 RhodeCode GmbH | |
|
2 | # | |
|
3 | # This program is free software: you can redistribute it and/or modify | |
|
4 | # it under the terms of the GNU Affero General Public License, version 3 | |
|
5 | # (only), as published by the Free Software Foundation. | |
|
6 | # | |
|
7 | # This program is distributed in the hope that it will be useful, | |
|
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
|
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
|
10 | # GNU General Public License for more details. | |
|
11 | # | |
|
12 | # You should have received a copy of the GNU Affero General Public License | |
|
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |
|
14 | # | |
|
15 | # This program is dual-licensed. If you wish to learn more about the | |
|
16 | # RhodeCode Enterprise Edition, including its added features, Support services, | |
|
17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ | |
|
18 | ||
|
19 | ||
|
20 | class ArchiveCacheLock(Exception): | |
|
21 | pass | |
|
22 | ||
|
23 | ||
|
24 | def archive_iterator(_reader, block_size: int = 4096 * 512): | |
|
25 | # 4096 * 64 = 64KB | |
|
26 | while 1: | |
|
27 | data = _reader.read(block_size) | |
|
28 | if not data: | |
|
29 | break | |
|
30 | yield data |
@@ -1,803 +1,813 b'' | |||
|
1 | 1 | |
|
2 | 2 | ; ######################################### |
|
3 | 3 | ; RHODECODE COMMUNITY EDITION CONFIGURATION |
|
4 | 4 | ; ######################################### |
|
5 | 5 | |
|
6 | 6 | [DEFAULT] |
|
7 | 7 | ; Debug flag sets all loggers to debug, and enables request tracking |
|
8 | 8 | debug = true |
|
9 | 9 | |
|
10 | 10 | ; ######################################################################## |
|
11 | 11 | ; EMAIL CONFIGURATION |
|
12 | 12 | ; These settings will be used by the RhodeCode mailing system |
|
13 | 13 | ; ######################################################################## |
|
14 | 14 | |
|
15 | 15 | ; prefix all emails subjects with given prefix, helps filtering out emails |
|
16 | 16 | #email_prefix = [RhodeCode] |
|
17 | 17 | |
|
18 | 18 | ; email FROM address all mails will be sent |
|
19 | 19 | #app_email_from = rhodecode-noreply@localhost |
|
20 | 20 | |
|
21 | 21 | #smtp_server = mail.server.com |
|
22 | 22 | #smtp_username = |
|
23 | 23 | #smtp_password = |
|
24 | 24 | #smtp_port = |
|
25 | 25 | #smtp_use_tls = false |
|
26 | 26 | #smtp_use_ssl = true |
|
27 | 27 | |
|
28 | 28 | [server:main] |
|
29 | 29 | ; COMMON HOST/IP CONFIG, This applies mostly to develop setup, |
|
30 | 30 | ; Host port for gunicorn are controlled by gunicorn_conf.py |
|
31 | 31 | host = 127.0.0.1 |
|
32 | 32 | port = 10020 |
|
33 | 33 | |
|
34 | 34 | |
|
35 | 35 | ; ########################### |
|
36 | 36 | ; GUNICORN APPLICATION SERVER |
|
37 | 37 | ; ########################### |
|
38 | 38 | |
|
39 | 39 | ; run with gunicorn --paste rhodecode.ini --config gunicorn_conf.py |
|
40 | 40 | |
|
41 | 41 | ; Module to use, this setting shouldn't be changed |
|
42 | 42 | use = egg:gunicorn#main |
|
43 | 43 | |
|
44 | 44 | ; Prefix middleware for RhodeCode. |
|
45 | 45 | ; recommended when using proxy setup. |
|
46 | 46 | ; allows to set RhodeCode under a prefix in server. |
|
47 | 47 | ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well. |
|
48 | 48 | ; And set your prefix like: `prefix = /custom_prefix` |
|
49 | 49 | ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need |
|
50 | 50 | ; to make your cookies only work on prefix url |
|
51 | 51 | [filter:proxy-prefix] |
|
52 | 52 | use = egg:PasteDeploy#prefix |
|
53 | 53 | prefix = / |
|
54 | 54 | |
|
55 | 55 | [app:main] |
|
56 | 56 | ; The %(here)s variable will be replaced with the absolute path of parent directory |
|
57 | 57 | ; of this file |
|
58 | 58 | ; Each option in the app:main can be override by an environmental variable |
|
59 | 59 | ; |
|
60 | 60 | ;To override an option: |
|
61 | 61 | ; |
|
62 | 62 | ;RC_<KeyName> |
|
63 | 63 | ;Everything should be uppercase, . and - should be replaced by _. |
|
64 | 64 | ;For example, if you have these configuration settings: |
|
65 | 65 | ;rc_cache.repo_object.backend = foo |
|
66 | 66 | ;can be overridden by |
|
67 | 67 | ;export RC_CACHE_REPO_OBJECT_BACKEND=foo |
|
68 | 68 | |
|
69 | 69 | use = egg:rhodecode-enterprise-ce |
|
70 | 70 | |
|
71 | 71 | ; enable proxy prefix middleware, defined above |
|
72 | 72 | #filter-with = proxy-prefix |
|
73 | 73 | |
|
74 | 74 | ; ############# |
|
75 | 75 | ; DEBUG OPTIONS |
|
76 | 76 | ; ############# |
|
77 | 77 | |
|
78 | 78 | pyramid.reload_templates = true |
|
79 | 79 | |
|
80 | 80 | # During development the we want to have the debug toolbar enabled |
|
81 | 81 | pyramid.includes = |
|
82 | 82 | pyramid_debugtoolbar |
|
83 | 83 | |
|
84 | 84 | debugtoolbar.hosts = 0.0.0.0/0 |
|
85 | 85 | debugtoolbar.exclude_prefixes = |
|
86 | 86 | /css |
|
87 | 87 | /fonts |
|
88 | 88 | /images |
|
89 | 89 | /js |
|
90 | 90 | |
|
91 | 91 | ## RHODECODE PLUGINS ## |
|
92 | 92 | rhodecode.includes = |
|
93 | 93 | rhodecode.api |
|
94 | 94 | |
|
95 | 95 | |
|
96 | 96 | # api prefix url |
|
97 | 97 | rhodecode.api.url = /_admin/api |
|
98 | 98 | |
|
99 | 99 | ; enable debug style page |
|
100 | 100 | debug_style = true |
|
101 | 101 | |
|
102 | 102 | ; ################# |
|
103 | 103 | ; END DEBUG OPTIONS |
|
104 | 104 | ; ################# |
|
105 | 105 | |
|
106 | 106 | ; encryption key used to encrypt social plugin tokens, |
|
107 | 107 | ; remote_urls with credentials etc, if not set it defaults to |
|
108 | 108 | ; `beaker.session.secret` |
|
109 | 109 | #rhodecode.encrypted_values.secret = |
|
110 | 110 | |
|
111 | 111 | ; decryption strict mode (enabled by default). It controls if decryption raises |
|
112 | 112 | ; `SignatureVerificationError` in case of wrong key, or damaged encryption data. |
|
113 | 113 | #rhodecode.encrypted_values.strict = false |
|
114 | 114 | |
|
115 | 115 | ; Pick algorithm for encryption. Either fernet (more secure) or aes (default) |
|
116 | 116 | ; fernet is safer, and we strongly recommend switching to it. |
|
117 | 117 | ; Due to backward compatibility aes is used as default. |
|
118 | 118 | #rhodecode.encrypted_values.algorithm = fernet |
|
119 | 119 | |
|
120 | 120 | ; Return gzipped responses from RhodeCode (static files/application) |
|
121 | 121 | gzip_responses = false |
|
122 | 122 | |
|
123 | 123 | ; Auto-generate javascript routes file on startup |
|
124 | 124 | generate_js_files = false |
|
125 | 125 | |
|
126 | 126 | ; System global default language. |
|
127 | 127 | ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh |
|
128 | 128 | lang = en |
|
129 | 129 | |
|
130 | 130 | ; Perform a full repository scan and import on each server start. |
|
131 | 131 | ; Settings this to true could lead to very long startup time. |
|
132 | 132 | startup.import_repos = false |
|
133 | 133 | |
|
134 | 134 | ; URL at which the application is running. This is used for Bootstrapping |
|
135 | 135 | ; requests in context when no web request is available. Used in ishell, or |
|
136 | 136 | ; SSH calls. Set this for events to receive proper url for SSH calls. |
|
137 | 137 | app.base_url = http://rhodecode.local |
|
138 | 138 | |
|
139 | 139 | ; Host at which the Service API is running. |
|
140 | 140 | app.service_api.host = http://rhodecode.local:10020 |
|
141 | 141 | |
|
142 | 142 | ; Secret for Service API authentication. |
|
143 | 143 | app.service_api.token = |
|
144 | 144 | |
|
145 | 145 | ; Unique application ID. Should be a random unique string for security. |
|
146 | 146 | app_instance_uuid = rc-production |
|
147 | 147 | |
|
148 | 148 | ; Cut off limit for large diffs (size in bytes). If overall diff size on |
|
149 | 149 | ; commit, or pull request exceeds this limit this diff will be displayed |
|
150 | 150 | ; partially. E.g 512000 == 512Kb |
|
151 | 151 | cut_off_limit_diff = 512000 |
|
152 | 152 | |
|
153 | 153 | ; Cut off limit for large files inside diffs (size in bytes). Each individual |
|
154 | 154 | ; file inside diff which exceeds this limit will be displayed partially. |
|
155 | 155 | ; E.g 128000 == 128Kb |
|
156 | 156 | cut_off_limit_file = 128000 |
|
157 | 157 | |
|
158 | 158 | ; Use cached version of vcs repositories everywhere. Recommended to be `true` |
|
159 | 159 | vcs_full_cache = true |
|
160 | 160 | |
|
161 | 161 | ; Force https in RhodeCode, fixes https redirects, assumes it's always https. |
|
162 | 162 | ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache |
|
163 | 163 | force_https = false |
|
164 | 164 | |
|
165 | 165 | ; use Strict-Transport-Security headers |
|
166 | 166 | use_htsts = false |
|
167 | 167 | |
|
168 | 168 | ; Set to true if your repos are exposed using the dumb protocol |
|
169 | 169 | git_update_server_info = false |
|
170 | 170 | |
|
171 | 171 | ; RSS/ATOM feed options |
|
172 | 172 | rss_cut_off_limit = 256000 |
|
173 | 173 | rss_items_per_page = 10 |
|
174 | 174 | rss_include_diff = false |
|
175 | 175 | |
|
176 | 176 | ; gist URL alias, used to create nicer urls for gist. This should be an |
|
177 | 177 | ; url that does rewrites to _admin/gists/{gistid}. |
|
178 | 178 | ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal |
|
179 | 179 | ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid} |
|
180 | 180 | gist_alias_url = |
|
181 | 181 | |
|
182 | 182 | ; List of views (using glob pattern syntax) that AUTH TOKENS could be |
|
183 | 183 | ; used for access. |
|
184 | 184 | ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it |
|
185 | 185 | ; came from the the logged in user who own this authentication token. |
|
186 | 186 | ; Additionally @TOKEN syntax can be used to bound the view to specific |
|
187 | 187 | ; authentication token. Such view would be only accessible when used together |
|
188 | 188 | ; with this authentication token |
|
189 | 189 | ; list of all views can be found under `/_admin/permissions/auth_token_access` |
|
190 | 190 | ; The list should be "," separated and on a single line. |
|
191 | 191 | ; Most common views to enable: |
|
192 | 192 | |
|
193 | 193 | # RepoCommitsView:repo_commit_download |
|
194 | 194 | # RepoCommitsView:repo_commit_patch |
|
195 | 195 | # RepoCommitsView:repo_commit_raw |
|
196 | 196 | # RepoCommitsView:repo_commit_raw@TOKEN |
|
197 | 197 | # RepoFilesView:repo_files_diff |
|
198 | 198 | # RepoFilesView:repo_archivefile |
|
199 | 199 | # RepoFilesView:repo_file_raw |
|
200 | 200 | # GistView:* |
|
201 | 201 | api_access_controllers_whitelist = |
|
202 | 202 | |
|
203 | 203 | ; Default encoding used to convert from and to unicode |
|
204 | 204 | ; can be also a comma separated list of encoding in case of mixed encodings |
|
205 | 205 | default_encoding = UTF-8 |
|
206 | 206 | |
|
207 | 207 | ; instance-id prefix |
|
208 | 208 | ; a prefix key for this instance used for cache invalidation when running |
|
209 | 209 | ; multiple instances of RhodeCode, make sure it's globally unique for |
|
210 | 210 | ; all running RhodeCode instances. Leave empty if you don't use it |
|
211 | 211 | instance_id = |
|
212 | 212 | |
|
213 | 213 | ; Fallback authentication plugin. Set this to a plugin ID to force the usage |
|
214 | 214 | ; of an authentication plugin also if it is disabled by it's settings. |
|
215 | 215 | ; This could be useful if you are unable to log in to the system due to broken |
|
216 | 216 | ; authentication settings. Then you can enable e.g. the internal RhodeCode auth |
|
217 | 217 | ; module to log in again and fix the settings. |
|
218 | 218 | ; Available builtin plugin IDs (hash is part of the ID): |
|
219 | 219 | ; egg:rhodecode-enterprise-ce#rhodecode |
|
220 | 220 | ; egg:rhodecode-enterprise-ce#pam |
|
221 | 221 | ; egg:rhodecode-enterprise-ce#ldap |
|
222 | 222 | ; egg:rhodecode-enterprise-ce#jasig_cas |
|
223 | 223 | ; egg:rhodecode-enterprise-ce#headers |
|
224 | 224 | ; egg:rhodecode-enterprise-ce#crowd |
|
225 | 225 | |
|
226 | 226 | #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode |
|
227 | 227 | |
|
228 | 228 | ; Flag to control loading of legacy plugins in py:/path format |
|
229 | 229 | auth_plugin.import_legacy_plugins = true |
|
230 | 230 | |
|
231 | 231 | ; alternative return HTTP header for failed authentication. Default HTTP |
|
232 | 232 | ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with |
|
233 | 233 | ; handling that causing a series of failed authentication calls. |
|
234 | 234 | ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code |
|
235 | 235 | ; This will be served instead of default 401 on bad authentication |
|
236 | 236 | auth_ret_code = |
|
237 | 237 | |
|
238 | 238 | ; use special detection method when serving auth_ret_code, instead of serving |
|
239 | 239 | ; ret_code directly, use 401 initially (Which triggers credentials prompt) |
|
240 | 240 | ; and then serve auth_ret_code to clients |
|
241 | 241 | auth_ret_code_detection = false |
|
242 | 242 | |
|
243 | 243 | ; locking return code. When repository is locked return this HTTP code. 2XX |
|
244 | 244 | ; codes don't break the transactions while 4XX codes do |
|
245 | 245 | lock_ret_code = 423 |
|
246 | 246 | |
|
247 | 247 | ; Filesystem location were repositories should be stored |
|
248 | 248 | repo_store.path = /var/opt/rhodecode_repo_store |
|
249 | 249 | |
|
250 | 250 | ; allows to setup custom hooks in settings page |
|
251 | 251 | allow_custom_hooks_settings = true |
|
252 | 252 | |
|
253 | 253 | ; Generated license token required for EE edition license. |
|
254 | 254 | ; New generated token value can be found in Admin > settings > license page. |
|
255 | 255 | license_token = |
|
256 | 256 | |
|
257 | 257 | ; This flag hides sensitive information on the license page such as token, and license data |
|
258 | 258 | license.hide_license_info = false |
|
259 | 259 | |
|
260 | 260 | ; supervisor connection uri, for managing supervisor and logs. |
|
261 | 261 | supervisor.uri = |
|
262 | 262 | |
|
263 | 263 | ; supervisord group name/id we only want this RC instance to handle |
|
264 | 264 | supervisor.group_id = dev |
|
265 | 265 | |
|
266 | 266 | ; Display extended labs settings |
|
267 | 267 | labs_settings_active = true |
|
268 | 268 | |
|
269 | 269 | ; Custom exception store path, defaults to TMPDIR |
|
270 | 270 | ; This is used to store exception from RhodeCode in shared directory |
|
271 | 271 | #exception_tracker.store_path = |
|
272 | 272 | |
|
273 | 273 | ; Send email with exception details when it happens |
|
274 | 274 | #exception_tracker.send_email = false |
|
275 | 275 | |
|
276 | 276 | ; Comma separated list of recipients for exception emails, |
|
277 | 277 | ; e.g admin@rhodecode.com,devops@rhodecode.com |
|
278 | 278 | ; Can be left empty, then emails will be sent to ALL super-admins |
|
279 | 279 | #exception_tracker.send_email_recipients = |
|
280 | 280 | |
|
281 | 281 | ; optional prefix to Add to email Subject |
|
282 | 282 | #exception_tracker.email_prefix = [RHODECODE ERROR] |
|
283 | 283 | |
|
284 | 284 | ; File store configuration. This is used to store and serve uploaded files |
|
285 | 285 | file_store.enabled = true |
|
286 | 286 | |
|
287 | 287 | ; Storage backend, available options are: local |
|
288 | 288 | file_store.backend = local |
|
289 | 289 | |
|
290 | 290 | ; path to store the uploaded binaries and artifacts |
|
291 | 291 | file_store.storage_path = /var/opt/rhodecode_data/file_store |
|
292 | 292 | |
|
293 | 293 | ; Uncomment and set this path to control settings for archive download cache. |
|
294 | 294 | ; Generated repo archives will be cached at this location |
|
295 | 295 | ; and served from the cache during subsequent requests for the same archive of |
|
296 | 296 | ; the repository. This path is important to be shared across filesystems and with |
|
297 | 297 | ; RhodeCode and vcsserver |
|
298 | 298 | |
|
299 | ; Redis url to acquire/check generation of archives locks | |
|
300 | archive_cache.locking.url = redis://redis:6379/1 | |
|
301 | ||
|
302 | ; Storage backend, only 'filesystem' is available now | |
|
303 | archive_cache.backend.type = filesystem | |
|
304 | ||
|
299 | 305 | ; Default is $cache_dir/archive_cache if not set |
|
300 |
archive_cache.store_dir = /var/opt/rhodecode_data/ |
|
|
306 | archive_cache.filesystem.store_dir = /var/opt/rhodecode_data/archive_cache | |
|
301 | 307 | |
|
302 | 308 | ; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb |
|
303 |
archive_cache.cache_size_gb = 1 |
|
|
309 | archive_cache.filesystem.cache_size_gb = 1 | |
|
310 | ||
|
311 | ; Eviction policy used to clear out after cache_size_gb limit is reached | |
|
312 | archive_cache.filesystem.eviction_policy = least-recently-stored | |
|
304 | 313 | |
|
305 | 314 | ; By default cache uses sharding technique, this specifies how many shards are there |
|
306 |
archive_cache.cache_shards = |
|
|
315 | archive_cache.filesystem.cache_shards = 8 | |
|
316 | ||
|
307 | 317 | |
|
308 | 318 | ; ############# |
|
309 | 319 | ; CELERY CONFIG |
|
310 | 320 | ; ############# |
|
311 | 321 | |
|
312 | 322 | ; manually run celery: /path/to/celery worker --task-events --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini |
|
313 | 323 | |
|
314 | 324 | use_celery = true |
|
315 | 325 | |
|
316 | 326 | ; path to store schedule database |
|
317 | 327 | #celerybeat-schedule.path = |
|
318 | 328 | |
|
319 | 329 | ; connection url to the message broker (default redis) |
|
320 | 330 | celery.broker_url = redis://redis:6379/8 |
|
321 | 331 | |
|
322 | 332 | ; results backend to get results for (default redis) |
|
323 | 333 | celery.result_backend = redis://redis:6379/8 |
|
324 | 334 | |
|
325 | 335 | ; rabbitmq example |
|
326 | 336 | #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost |
|
327 | 337 | |
|
328 | 338 | ; maximum tasks to execute before worker restart |
|
329 | 339 | celery.max_tasks_per_child = 20 |
|
330 | 340 | |
|
331 | 341 | ; tasks will never be sent to the queue, but executed locally instead. |
|
332 | 342 | celery.task_always_eager = false |
|
333 | 343 | |
|
334 | 344 | ; ############# |
|
335 | 345 | ; DOGPILE CACHE |
|
336 | 346 | ; ############# |
|
337 | 347 | |
|
338 | 348 | ; Default cache dir for caches. Putting this into a ramdisk can boost performance. |
|
339 | 349 | ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space |
|
340 | 350 | cache_dir = /var/opt/rhodecode_data |
|
341 | 351 | |
|
342 | 352 | ; ********************************************* |
|
343 | 353 | ; `sql_cache_short` cache for heavy SQL queries |
|
344 | 354 | ; Only supported backend is `memory_lru` |
|
345 | 355 | ; ********************************************* |
|
346 | 356 | rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru |
|
347 | 357 | rc_cache.sql_cache_short.expiration_time = 30 |
|
348 | 358 | |
|
349 | 359 | |
|
350 | 360 | ; ***************************************************** |
|
351 | 361 | ; `cache_repo_longterm` cache for repo object instances |
|
352 | 362 | ; Only supported backend is `memory_lru` |
|
353 | 363 | ; ***************************************************** |
|
354 | 364 | rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru |
|
355 | 365 | ; by default we use 30 Days, cache is still invalidated on push |
|
356 | 366 | rc_cache.cache_repo_longterm.expiration_time = 2592000 |
|
357 | 367 | ; max items in LRU cache, set to smaller number to save memory, and expire last used caches |
|
358 | 368 | rc_cache.cache_repo_longterm.max_size = 10000 |
|
359 | 369 | |
|
360 | 370 | |
|
361 | 371 | ; ********************************************* |
|
362 | 372 | ; `cache_general` cache for general purpose use |
|
363 | 373 | ; for simplicity use rc.file_namespace backend, |
|
364 | 374 | ; for performance and scale use rc.redis |
|
365 | 375 | ; ********************************************* |
|
366 | 376 | rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace |
|
367 | 377 | rc_cache.cache_general.expiration_time = 43200 |
|
368 | 378 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set |
|
369 | 379 | #rc_cache.cache_general.arguments.filename = /tmp/cache_general_db |
|
370 | 380 | |
|
371 | 381 | ; alternative `cache_general` redis backend with distributed lock |
|
372 | 382 | #rc_cache.cache_general.backend = dogpile.cache.rc.redis |
|
373 | 383 | #rc_cache.cache_general.expiration_time = 300 |
|
374 | 384 | |
|
375 | 385 | ; redis_expiration_time needs to be greater then expiration_time |
|
376 | 386 | #rc_cache.cache_general.arguments.redis_expiration_time = 7200 |
|
377 | 387 | |
|
378 | 388 | #rc_cache.cache_general.arguments.host = localhost |
|
379 | 389 | #rc_cache.cache_general.arguments.port = 6379 |
|
380 | 390 | #rc_cache.cache_general.arguments.db = 0 |
|
381 | 391 | #rc_cache.cache_general.arguments.socket_timeout = 30 |
|
382 | 392 | ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends |
|
383 | 393 | #rc_cache.cache_general.arguments.distributed_lock = true |
|
384 | 394 | |
|
385 | 395 | ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen |
|
386 | 396 | #rc_cache.cache_general.arguments.lock_auto_renewal = true |
|
387 | 397 | |
|
388 | 398 | ; ************************************************* |
|
389 | 399 | ; `cache_perms` cache for permission tree, auth TTL |
|
390 | 400 | ; for simplicity use rc.file_namespace backend, |
|
391 | 401 | ; for performance and scale use rc.redis |
|
392 | 402 | ; ************************************************* |
|
393 | 403 | rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace |
|
394 | 404 | rc_cache.cache_perms.expiration_time = 3600 |
|
395 | 405 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set |
|
396 | 406 | #rc_cache.cache_perms.arguments.filename = /tmp/cache_perms_db |
|
397 | 407 | |
|
398 | 408 | ; alternative `cache_perms` redis backend with distributed lock |
|
399 | 409 | #rc_cache.cache_perms.backend = dogpile.cache.rc.redis |
|
400 | 410 | #rc_cache.cache_perms.expiration_time = 300 |
|
401 | 411 | |
|
402 | 412 | ; redis_expiration_time needs to be greater then expiration_time |
|
403 | 413 | #rc_cache.cache_perms.arguments.redis_expiration_time = 7200 |
|
404 | 414 | |
|
405 | 415 | #rc_cache.cache_perms.arguments.host = localhost |
|
406 | 416 | #rc_cache.cache_perms.arguments.port = 6379 |
|
407 | 417 | #rc_cache.cache_perms.arguments.db = 0 |
|
408 | 418 | #rc_cache.cache_perms.arguments.socket_timeout = 30 |
|
409 | 419 | ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends |
|
410 | 420 | #rc_cache.cache_perms.arguments.distributed_lock = true |
|
411 | 421 | |
|
412 | 422 | ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen |
|
413 | 423 | #rc_cache.cache_perms.arguments.lock_auto_renewal = true |
|
414 | 424 | |
|
415 | 425 | ; *************************************************** |
|
416 | 426 | ; `cache_repo` cache for file tree, Readme, RSS FEEDS |
|
417 | 427 | ; for simplicity use rc.file_namespace backend, |
|
418 | 428 | ; for performance and scale use rc.redis |
|
419 | 429 | ; *************************************************** |
|
420 | 430 | rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace |
|
421 | 431 | rc_cache.cache_repo.expiration_time = 2592000 |
|
422 | 432 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set |
|
423 | 433 | #rc_cache.cache_repo.arguments.filename = /tmp/cache_repo_db |
|
424 | 434 | |
|
425 | 435 | ; alternative `cache_repo` redis backend with distributed lock |
|
426 | 436 | #rc_cache.cache_repo.backend = dogpile.cache.rc.redis |
|
427 | 437 | #rc_cache.cache_repo.expiration_time = 2592000 |
|
428 | 438 | |
|
429 | 439 | ; redis_expiration_time needs to be greater then expiration_time |
|
430 | 440 | #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400 |
|
431 | 441 | |
|
432 | 442 | #rc_cache.cache_repo.arguments.host = localhost |
|
433 | 443 | #rc_cache.cache_repo.arguments.port = 6379 |
|
434 | 444 | #rc_cache.cache_repo.arguments.db = 1 |
|
435 | 445 | #rc_cache.cache_repo.arguments.socket_timeout = 30 |
|
436 | 446 | ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends |
|
437 | 447 | #rc_cache.cache_repo.arguments.distributed_lock = true |
|
438 | 448 | |
|
439 | 449 | ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen |
|
440 | 450 | #rc_cache.cache_repo.arguments.lock_auto_renewal = true |
|
441 | 451 | |
|
442 | 452 | ; ############## |
|
443 | 453 | ; BEAKER SESSION |
|
444 | 454 | ; ############## |
|
445 | 455 | |
|
446 | 456 | ; beaker.session.type is type of storage options for the logged users sessions. Current allowed |
|
447 | 457 | ; types are file, ext:redis, ext:database, ext:memcached |
|
448 | 458 | ; Fastest ones are ext:redis and ext:database, DO NOT use memory type for session |
|
449 | 459 | #beaker.session.type = file |
|
450 | 460 | #beaker.session.data_dir = %(here)s/data/sessions |
|
451 | 461 | |
|
452 | 462 | ; Redis based sessions |
|
453 | 463 | beaker.session.type = ext:redis |
|
454 | 464 | beaker.session.url = redis://redis:6379/2 |
|
455 | 465 | |
|
456 | 466 | ; DB based session, fast, and allows easy management over logged in users |
|
457 | 467 | #beaker.session.type = ext:database |
|
458 | 468 | #beaker.session.table_name = db_session |
|
459 | 469 | #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode |
|
460 | 470 | #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode |
|
461 | 471 | #beaker.session.sa.pool_recycle = 3600 |
|
462 | 472 | #beaker.session.sa.echo = false |
|
463 | 473 | |
|
464 | 474 | beaker.session.key = rhodecode |
|
465 | 475 | beaker.session.secret = develop-rc-uytcxaz |
|
466 | 476 | beaker.session.lock_dir = /data_ramdisk/lock |
|
467 | 477 | |
|
468 | 478 | ; Secure encrypted cookie. Requires AES and AES python libraries |
|
469 | 479 | ; you must disable beaker.session.secret to use this |
|
470 | 480 | #beaker.session.encrypt_key = key_for_encryption |
|
471 | 481 | #beaker.session.validate_key = validation_key |
|
472 | 482 | |
|
473 | 483 | ; Sets session as invalid (also logging out user) if it haven not been |
|
474 | 484 | ; accessed for given amount of time in seconds |
|
475 | 485 | beaker.session.timeout = 2592000 |
|
476 | 486 | beaker.session.httponly = true |
|
477 | 487 | |
|
478 | 488 | ; Path to use for the cookie. Set to prefix if you use prefix middleware |
|
479 | 489 | #beaker.session.cookie_path = /custom_prefix |
|
480 | 490 | |
|
481 | 491 | ; Set https secure cookie |
|
482 | 492 | beaker.session.secure = false |
|
483 | 493 | |
|
484 | 494 | ; default cookie expiration time in seconds, set to `true` to set expire |
|
485 | 495 | ; at browser close |
|
486 | 496 | #beaker.session.cookie_expires = 3600 |
|
487 | 497 | |
|
488 | 498 | ; ############################# |
|
489 | 499 | ; SEARCH INDEXING CONFIGURATION |
|
490 | 500 | ; ############################# |
|
491 | 501 | |
|
492 | 502 | ; Full text search indexer is available in rhodecode-tools under |
|
493 | 503 | ; `rhodecode-tools index` command |
|
494 | 504 | |
|
495 | 505 | ; WHOOSH Backend, doesn't require additional services to run |
|
496 | 506 | ; it works good with few dozen repos |
|
497 | 507 | search.module = rhodecode.lib.index.whoosh |
|
498 | 508 | search.location = %(here)s/data/index |
|
499 | 509 | |
|
500 | 510 | ; #################### |
|
501 | 511 | ; CHANNELSTREAM CONFIG |
|
502 | 512 | ; #################### |
|
503 | 513 | |
|
504 | 514 | ; channelstream enables persistent connections and live notification |
|
505 | 515 | ; in the system. It's also used by the chat system |
|
506 | 516 | |
|
507 | 517 | channelstream.enabled = true |
|
508 | 518 | |
|
509 | 519 | ; server address for channelstream server on the backend |
|
510 | 520 | channelstream.server = channelstream:9800 |
|
511 | 521 | |
|
512 | 522 | ; location of the channelstream server from outside world |
|
513 | 523 | ; use ws:// for http or wss:// for https. This address needs to be handled |
|
514 | 524 | ; by external HTTP server such as Nginx or Apache |
|
515 | 525 | ; see Nginx/Apache configuration examples in our docs |
|
516 | 526 | channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream |
|
517 | 527 | channelstream.secret = ENV_GENERATED |
|
518 | 528 | channelstream.history.location = /var/opt/rhodecode_data/channelstream_history |
|
519 | 529 | |
|
520 | 530 | ; Internal application path that Javascript uses to connect into. |
|
521 | 531 | ; If you use proxy-prefix the prefix should be added before /_channelstream |
|
522 | 532 | channelstream.proxy_path = /_channelstream |
|
523 | 533 | |
|
524 | 534 | |
|
525 | 535 | ; ############################## |
|
526 | 536 | ; MAIN RHODECODE DATABASE CONFIG |
|
527 | 537 | ; ############################## |
|
528 | 538 | |
|
529 | 539 | #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30 |
|
530 | 540 | #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode |
|
531 | 541 | #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8 |
|
532 | 542 | ; pymysql is an alternative driver for MySQL, use in case of problems with default one |
|
533 | 543 | #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode |
|
534 | 544 | |
|
535 | 545 | sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30 |
|
536 | 546 | |
|
537 | 547 | ; see sqlalchemy docs for other advanced settings |
|
538 | 548 | ; print the sql statements to output |
|
539 | 549 | sqlalchemy.db1.echo = false |
|
540 | 550 | |
|
541 | 551 | ; recycle the connections after this amount of seconds |
|
542 | 552 | sqlalchemy.db1.pool_recycle = 3600 |
|
543 | 553 | |
|
544 | 554 | ; the number of connections to keep open inside the connection pool. |
|
545 | 555 | ; 0 indicates no limit |
|
546 | 556 | ; the general calculus with gevent is: |
|
547 | 557 | ; if your system allows 500 concurrent greenlets (max_connections) that all do database access, |
|
548 | 558 | ; then increase pool size + max overflow so that they add up to 500. |
|
549 | 559 | #sqlalchemy.db1.pool_size = 5 |
|
550 | 560 | |
|
551 | 561 | ; The number of connections to allow in connection pool "overflow", that is |
|
552 | 562 | ; connections that can be opened above and beyond the pool_size setting, |
|
553 | 563 | ; which defaults to five. |
|
554 | 564 | #sqlalchemy.db1.max_overflow = 10 |
|
555 | 565 | |
|
556 | 566 | ; Connection check ping, used to detect broken database connections |
|
557 | 567 | ; could be enabled to better handle cases if MySQL has gone away errors |
|
558 | 568 | #sqlalchemy.db1.ping_connection = true |
|
559 | 569 | |
|
560 | 570 | ; ########## |
|
561 | 571 | ; VCS CONFIG |
|
562 | 572 | ; ########## |
|
563 | 573 | vcs.server.enable = true |
|
564 | 574 | vcs.server = vcsserver:10010 |
|
565 | 575 | |
|
566 | 576 | ; Web server connectivity protocol, responsible for web based VCS operations |
|
567 | 577 | ; Available protocols are: |
|
568 | 578 | ; `http` - use http-rpc backend (default) |
|
569 | 579 | vcs.server.protocol = http |
|
570 | 580 | |
|
571 | 581 | ; Push/Pull operations protocol, available options are: |
|
572 | 582 | ; `http` - use http-rpc backend (default) |
|
573 | 583 | vcs.scm_app_implementation = http |
|
574 | 584 | |
|
575 | 585 | ; Push/Pull operations hooks protocol, available options are: |
|
576 | 586 | ; `http` - use http-rpc backend (default) |
|
577 | 587 | ; `celery` - use celery based hooks |
|
578 | 588 | vcs.hooks.protocol = http |
|
579 | 589 | |
|
580 | 590 | ; Host on which this instance is listening for hooks. vcsserver will call this host to pull/push hooks so it should be |
|
581 | 591 | ; accessible via network. |
|
582 | 592 | ; Use vcs.hooks.host = "*" to bind to current hostname (for Docker) |
|
583 | 593 | vcs.hooks.host = * |
|
584 | 594 | |
|
585 | 595 | ; Start VCSServer with this instance as a subprocess, useful for development |
|
586 | 596 | vcs.start_server = false |
|
587 | 597 | |
|
588 | 598 | ; List of enabled VCS backends, available options are: |
|
589 | 599 | ; `hg` - mercurial |
|
590 | 600 | ; `git` - git |
|
591 | 601 | ; `svn` - subversion |
|
592 | 602 | vcs.backends = hg, git, svn |
|
593 | 603 | |
|
594 | 604 | ; Wait this number of seconds before killing connection to the vcsserver |
|
595 | 605 | vcs.connection_timeout = 3600 |
|
596 | 606 | |
|
597 | 607 | ; Cache flag to cache vcsserver remote calls locally |
|
598 | 608 | ; It uses cache_region `cache_repo` |
|
599 | 609 | vcs.methods.cache = true |
|
600 | 610 | |
|
601 | 611 | ; #################################################### |
|
602 | 612 | ; Subversion proxy support (mod_dav_svn) |
|
603 | 613 | ; Maps RhodeCode repo groups into SVN paths for Apache |
|
604 | 614 | ; #################################################### |
|
605 | 615 | |
|
606 | 616 | ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out. |
|
607 | 617 | ; Set a numeric version for your current SVN e.g 1.8, or 1.12 |
|
608 | 618 | ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible |
|
609 | 619 | #vcs.svn.compatible_version = 1.8 |
|
610 | 620 | |
|
611 | 621 | ; Enable SVN proxy of requests over HTTP |
|
612 | 622 | vcs.svn.proxy.enabled = true |
|
613 | 623 | |
|
614 | 624 | ; host to connect to running SVN subsystem |
|
615 | 625 | vcs.svn.proxy.host = http://svn:8090 |
|
616 | 626 | |
|
617 | 627 | ; Enable or disable the config file generation. |
|
618 | 628 | svn.proxy.generate_config = true |
|
619 | 629 | |
|
620 | 630 | ; Generate config file with `SVNListParentPath` set to `On`. |
|
621 | 631 | svn.proxy.list_parent_path = true |
|
622 | 632 | |
|
623 | 633 | ; Set location and file name of generated config file. |
|
624 | 634 | svn.proxy.config_file_path = /etc/rhodecode/conf/svn/mod_dav_svn.conf |
|
625 | 635 | |
|
626 | 636 | ; alternative mod_dav config template. This needs to be a valid mako template |
|
627 | 637 | ; Example template can be found in the source code: |
|
628 | 638 | ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako |
|
629 | 639 | #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako |
|
630 | 640 | |
|
631 | 641 | ; Used as a prefix to the `Location` block in the generated config file. |
|
632 | 642 | ; In most cases it should be set to `/`. |
|
633 | 643 | svn.proxy.location_root = / |
|
634 | 644 | |
|
635 | 645 | ; Command to reload the mod dav svn configuration on change. |
|
636 | 646 | ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh |
|
637 | 647 | ; Make sure user who runs RhodeCode process is allowed to reload Apache |
|
638 | 648 | #svn.proxy.reload_cmd = /etc/init.d/apache2 reload |
|
639 | 649 | |
|
640 | 650 | ; If the timeout expires before the reload command finishes, the command will |
|
641 | 651 | ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds. |
|
642 | 652 | #svn.proxy.reload_timeout = 10 |
|
643 | 653 | |
|
644 | 654 | ; #################### |
|
645 | 655 | ; SSH Support Settings |
|
646 | 656 | ; #################### |
|
647 | 657 | |
|
648 | 658 | ; Defines if a custom authorized_keys file should be created and written on |
|
649 | 659 | ; any change user ssh keys. Setting this to false also disables possibility |
|
650 | 660 | ; of adding SSH keys by users from web interface. Super admins can still |
|
651 | 661 | ; manage SSH Keys. |
|
652 | 662 | ssh.generate_authorized_keyfile = true |
|
653 | 663 | |
|
654 | 664 | ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding` |
|
655 | 665 | # ssh.authorized_keys_ssh_opts = |
|
656 | 666 | |
|
657 | 667 | ; Path to the authorized_keys file where the generate entries are placed. |
|
658 | 668 | ; It is possible to have multiple key files specified in `sshd_config` e.g. |
|
659 | 669 | ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode |
|
660 | 670 | ssh.authorized_keys_file_path = /etc/rhodecode/conf/ssh/authorized_keys_rhodecode |
|
661 | 671 | |
|
662 | 672 | ; Command to execute the SSH wrapper. The binary is available in the |
|
663 | 673 | ; RhodeCode installation directory. |
|
664 | 674 | ; legacy: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper |
|
665 | 675 | ; new rewrite: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2 |
|
666 | 676 | ssh.wrapper_cmd = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper |
|
667 | 677 | |
|
668 | 678 | ; Allow shell when executing the ssh-wrapper command |
|
669 | 679 | ssh.wrapper_cmd_allow_shell = false |
|
670 | 680 | |
|
671 | 681 | ; Enables logging, and detailed output send back to the client during SSH |
|
672 | 682 | ; operations. Useful for debugging, shouldn't be used in production. |
|
673 | 683 | ssh.enable_debug_logging = true |
|
674 | 684 | |
|
675 | 685 | ; Paths to binary executable, by default they are the names, but we can |
|
676 | 686 | ; override them if we want to use a custom one |
|
677 | 687 | ssh.executable.hg = /usr/local/bin/rhodecode_bin/vcs_bin/hg |
|
678 | 688 | ssh.executable.git = /usr/local/bin/rhodecode_bin/vcs_bin/git |
|
679 | 689 | ssh.executable.svn = /usr/local/bin/rhodecode_bin/vcs_bin/svnserve |
|
680 | 690 | |
|
681 | 691 | ; Enables SSH key generator web interface. Disabling this still allows users |
|
682 | 692 | ; to add their own keys. |
|
683 | 693 | ssh.enable_ui_key_generator = true |
|
684 | 694 | |
|
685 | 695 | ; Statsd client config, this is used to send metrics to statsd |
|
686 | 696 | ; We recommend setting statsd_exported and scrape them using Prometheus |
|
687 | 697 | #statsd.enabled = false |
|
688 | 698 | #statsd.statsd_host = 0.0.0.0 |
|
689 | 699 | #statsd.statsd_port = 8125 |
|
690 | 700 | #statsd.statsd_prefix = |
|
691 | 701 | #statsd.statsd_ipv6 = false |
|
692 | 702 | |
|
693 | 703 | ; configure logging automatically at server startup set to false |
|
694 | 704 | ; to use the below custom logging config. |
|
695 | 705 | ; RC_LOGGING_FORMATTER |
|
696 | 706 | ; RC_LOGGING_LEVEL |
|
697 | 707 | ; env variables can control the settings for logging in case of autoconfigure |
|
698 | 708 | |
|
699 | 709 | #logging.autoconfigure = true |
|
700 | 710 | |
|
701 | 711 | ; specify your own custom logging config file to configure logging |
|
702 | 712 | #logging.logging_conf_file = /path/to/custom_logging.ini |
|
703 | 713 | |
|
704 | 714 | ; Dummy marker to add new entries after. |
|
705 | 715 | ; Add any custom entries below. Please don't remove this marker. |
|
706 | 716 | custom.conf = 1 |
|
707 | 717 | |
|
708 | 718 | |
|
709 | 719 | ; ##################### |
|
710 | 720 | ; LOGGING CONFIGURATION |
|
711 | 721 | ; ##################### |
|
712 | 722 | |
|
713 | 723 | [loggers] |
|
714 | 724 | keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper |
|
715 | 725 | |
|
716 | 726 | [handlers] |
|
717 | 727 | keys = console, console_sql |
|
718 | 728 | |
|
719 | 729 | [formatters] |
|
720 | 730 | keys = generic, json, color_formatter, color_formatter_sql |
|
721 | 731 | |
|
722 | 732 | ; ####### |
|
723 | 733 | ; LOGGERS |
|
724 | 734 | ; ####### |
|
725 | 735 | [logger_root] |
|
726 | 736 | level = NOTSET |
|
727 | 737 | handlers = console |
|
728 | 738 | |
|
729 | 739 | [logger_sqlalchemy] |
|
730 | 740 | level = INFO |
|
731 | 741 | handlers = console_sql |
|
732 | 742 | qualname = sqlalchemy.engine |
|
733 | 743 | propagate = 0 |
|
734 | 744 | |
|
735 | 745 | [logger_beaker] |
|
736 | 746 | level = DEBUG |
|
737 | 747 | handlers = |
|
738 | 748 | qualname = beaker.container |
|
739 | 749 | propagate = 1 |
|
740 | 750 | |
|
741 | 751 | [logger_rhodecode] |
|
742 | 752 | level = DEBUG |
|
743 | 753 | handlers = |
|
744 | 754 | qualname = rhodecode |
|
745 | 755 | propagate = 1 |
|
746 | 756 | |
|
747 | 757 | [logger_ssh_wrapper] |
|
748 | 758 | level = DEBUG |
|
749 | 759 | handlers = |
|
750 | 760 | qualname = ssh_wrapper |
|
751 | 761 | propagate = 1 |
|
752 | 762 | |
|
753 | 763 | [logger_celery] |
|
754 | 764 | level = DEBUG |
|
755 | 765 | handlers = |
|
756 | 766 | qualname = celery |
|
757 | 767 | |
|
758 | 768 | |
|
759 | 769 | ; ######## |
|
760 | 770 | ; HANDLERS |
|
761 | 771 | ; ######## |
|
762 | 772 | |
|
763 | 773 | [handler_console] |
|
764 | 774 | class = StreamHandler |
|
765 | 775 | args = (sys.stderr, ) |
|
766 | 776 | level = DEBUG |
|
767 | 777 | ; To enable JSON formatted logs replace 'generic/color_formatter' with 'json' |
|
768 | 778 | ; This allows sending properly formatted logs to grafana loki or elasticsearch |
|
769 | 779 | formatter = color_formatter |
|
770 | 780 | |
|
771 | 781 | [handler_console_sql] |
|
772 | 782 | ; "level = DEBUG" logs SQL queries and results. |
|
773 | 783 | ; "level = INFO" logs SQL queries. |
|
774 | 784 | ; "level = WARN" logs neither. (Recommended for production systems.) |
|
775 | 785 | class = StreamHandler |
|
776 | 786 | args = (sys.stderr, ) |
|
777 | 787 | level = WARN |
|
778 | 788 | ; To enable JSON formatted logs replace 'generic/color_formatter_sql' with 'json' |
|
779 | 789 | ; This allows sending properly formatted logs to grafana loki or elasticsearch |
|
780 | 790 | formatter = color_formatter_sql |
|
781 | 791 | |
|
782 | 792 | ; ########## |
|
783 | 793 | ; FORMATTERS |
|
784 | 794 | ; ########## |
|
785 | 795 | |
|
786 | 796 | [formatter_generic] |
|
787 | 797 | class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter |
|
788 | 798 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s |
|
789 | 799 | datefmt = %Y-%m-%d %H:%M:%S |
|
790 | 800 | |
|
791 | 801 | [formatter_color_formatter] |
|
792 | 802 | class = rhodecode.lib.logging_formatter.ColorFormatter |
|
793 | 803 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s |
|
794 | 804 | datefmt = %Y-%m-%d %H:%M:%S |
|
795 | 805 | |
|
796 | 806 | [formatter_color_formatter_sql] |
|
797 | 807 | class = rhodecode.lib.logging_formatter.ColorFormatterSql |
|
798 | 808 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s |
|
799 | 809 | datefmt = %Y-%m-%d %H:%M:%S |
|
800 | 810 | |
|
801 | 811 | [formatter_json] |
|
802 | 812 | format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s |
|
803 | 813 | class = rhodecode.lib._vendor.jsonlogger.JsonFormatter |
@@ -1,771 +1,781 b'' | |||
|
1 | 1 | |
|
2 | 2 | ; ######################################### |
|
3 | 3 | ; RHODECODE COMMUNITY EDITION CONFIGURATION |
|
4 | 4 | ; ######################################### |
|
5 | 5 | |
|
6 | 6 | [DEFAULT] |
|
7 | 7 | ; Debug flag sets all loggers to debug, and enables request tracking |
|
8 | 8 | debug = false |
|
9 | 9 | |
|
10 | 10 | ; ######################################################################## |
|
11 | 11 | ; EMAIL CONFIGURATION |
|
12 | 12 | ; These settings will be used by the RhodeCode mailing system |
|
13 | 13 | ; ######################################################################## |
|
14 | 14 | |
|
15 | 15 | ; prefix all emails subjects with given prefix, helps filtering out emails |
|
16 | 16 | #email_prefix = [RhodeCode] |
|
17 | 17 | |
|
18 | 18 | ; email FROM address all mails will be sent |
|
19 | 19 | #app_email_from = rhodecode-noreply@localhost |
|
20 | 20 | |
|
21 | 21 | #smtp_server = mail.server.com |
|
22 | 22 | #smtp_username = |
|
23 | 23 | #smtp_password = |
|
24 | 24 | #smtp_port = |
|
25 | 25 | #smtp_use_tls = false |
|
26 | 26 | #smtp_use_ssl = true |
|
27 | 27 | |
|
28 | 28 | [server:main] |
|
29 | 29 | ; COMMON HOST/IP CONFIG, This applies mostly to develop setup, |
|
30 | 30 | ; Host port for gunicorn are controlled by gunicorn_conf.py |
|
31 | 31 | host = 127.0.0.1 |
|
32 | 32 | port = 10020 |
|
33 | 33 | |
|
34 | 34 | |
|
35 | 35 | ; ########################### |
|
36 | 36 | ; GUNICORN APPLICATION SERVER |
|
37 | 37 | ; ########################### |
|
38 | 38 | |
|
39 | 39 | ; run with gunicorn --paste rhodecode.ini --config gunicorn_conf.py |
|
40 | 40 | |
|
41 | 41 | ; Module to use, this setting shouldn't be changed |
|
42 | 42 | use = egg:gunicorn#main |
|
43 | 43 | |
|
44 | 44 | ; Prefix middleware for RhodeCode. |
|
45 | 45 | ; recommended when using proxy setup. |
|
46 | 46 | ; allows to set RhodeCode under a prefix in server. |
|
47 | 47 | ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well. |
|
48 | 48 | ; And set your prefix like: `prefix = /custom_prefix` |
|
49 | 49 | ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need |
|
50 | 50 | ; to make your cookies only work on prefix url |
|
51 | 51 | [filter:proxy-prefix] |
|
52 | 52 | use = egg:PasteDeploy#prefix |
|
53 | 53 | prefix = / |
|
54 | 54 | |
|
55 | 55 | [app:main] |
|
56 | 56 | ; The %(here)s variable will be replaced with the absolute path of parent directory |
|
57 | 57 | ; of this file |
|
58 | 58 | ; Each option in the app:main can be override by an environmental variable |
|
59 | 59 | ; |
|
60 | 60 | ;To override an option: |
|
61 | 61 | ; |
|
62 | 62 | ;RC_<KeyName> |
|
63 | 63 | ;Everything should be uppercase, . and - should be replaced by _. |
|
64 | 64 | ;For example, if you have these configuration settings: |
|
65 | 65 | ;rc_cache.repo_object.backend = foo |
|
66 | 66 | ;can be overridden by |
|
67 | 67 | ;export RC_CACHE_REPO_OBJECT_BACKEND=foo |
|
68 | 68 | |
|
69 | 69 | use = egg:rhodecode-enterprise-ce |
|
70 | 70 | |
|
71 | 71 | ; enable proxy prefix middleware, defined above |
|
72 | 72 | #filter-with = proxy-prefix |
|
73 | 73 | |
|
74 | 74 | ; encryption key used to encrypt social plugin tokens, |
|
75 | 75 | ; remote_urls with credentials etc, if not set it defaults to |
|
76 | 76 | ; `beaker.session.secret` |
|
77 | 77 | #rhodecode.encrypted_values.secret = |
|
78 | 78 | |
|
79 | 79 | ; decryption strict mode (enabled by default). It controls if decryption raises |
|
80 | 80 | ; `SignatureVerificationError` in case of wrong key, or damaged encryption data. |
|
81 | 81 | #rhodecode.encrypted_values.strict = false |
|
82 | 82 | |
|
83 | 83 | ; Pick algorithm for encryption. Either fernet (more secure) or aes (default) |
|
84 | 84 | ; fernet is safer, and we strongly recommend switching to it. |
|
85 | 85 | ; Due to backward compatibility aes is used as default. |
|
86 | 86 | #rhodecode.encrypted_values.algorithm = fernet |
|
87 | 87 | |
|
88 | 88 | ; Return gzipped responses from RhodeCode (static files/application) |
|
89 | 89 | gzip_responses = false |
|
90 | 90 | |
|
91 | 91 | ; Auto-generate javascript routes file on startup |
|
92 | 92 | generate_js_files = false |
|
93 | 93 | |
|
94 | 94 | ; System global default language. |
|
95 | 95 | ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh |
|
96 | 96 | lang = en |
|
97 | 97 | |
|
98 | 98 | ; Perform a full repository scan and import on each server start. |
|
99 | 99 | ; Settings this to true could lead to very long startup time. |
|
100 | 100 | startup.import_repos = false |
|
101 | 101 | |
|
102 | 102 | ; URL at which the application is running. This is used for Bootstrapping |
|
103 | 103 | ; requests in context when no web request is available. Used in ishell, or |
|
104 | 104 | ; SSH calls. Set this for events to receive proper url for SSH calls. |
|
105 | 105 | app.base_url = http://rhodecode.local |
|
106 | 106 | |
|
107 | 107 | ; Host at which the Service API is running. |
|
108 | 108 | app.service_api.host = http://rhodecode.local:10020 |
|
109 | 109 | |
|
110 | 110 | ; Secret for Service API authentication. |
|
111 | 111 | app.service_api.token = |
|
112 | 112 | |
|
113 | 113 | ; Unique application ID. Should be a random unique string for security. |
|
114 | 114 | app_instance_uuid = rc-production |
|
115 | 115 | |
|
116 | 116 | ; Cut off limit for large diffs (size in bytes). If overall diff size on |
|
117 | 117 | ; commit, or pull request exceeds this limit this diff will be displayed |
|
118 | 118 | ; partially. E.g 512000 == 512Kb |
|
119 | 119 | cut_off_limit_diff = 512000 |
|
120 | 120 | |
|
121 | 121 | ; Cut off limit for large files inside diffs (size in bytes). Each individual |
|
122 | 122 | ; file inside diff which exceeds this limit will be displayed partially. |
|
123 | 123 | ; E.g 128000 == 128Kb |
|
124 | 124 | cut_off_limit_file = 128000 |
|
125 | 125 | |
|
126 | 126 | ; Use cached version of vcs repositories everywhere. Recommended to be `true` |
|
127 | 127 | vcs_full_cache = true |
|
128 | 128 | |
|
129 | 129 | ; Force https in RhodeCode, fixes https redirects, assumes it's always https. |
|
130 | 130 | ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache |
|
131 | 131 | force_https = false |
|
132 | 132 | |
|
133 | 133 | ; use Strict-Transport-Security headers |
|
134 | 134 | use_htsts = false |
|
135 | 135 | |
|
136 | 136 | ; Set to true if your repos are exposed using the dumb protocol |
|
137 | 137 | git_update_server_info = false |
|
138 | 138 | |
|
139 | 139 | ; RSS/ATOM feed options |
|
140 | 140 | rss_cut_off_limit = 256000 |
|
141 | 141 | rss_items_per_page = 10 |
|
142 | 142 | rss_include_diff = false |
|
143 | 143 | |
|
144 | 144 | ; gist URL alias, used to create nicer urls for gist. This should be an |
|
145 | 145 | ; url that does rewrites to _admin/gists/{gistid}. |
|
146 | 146 | ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal |
|
147 | 147 | ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid} |
|
148 | 148 | gist_alias_url = |
|
149 | 149 | |
|
150 | 150 | ; List of views (using glob pattern syntax) that AUTH TOKENS could be |
|
151 | 151 | ; used for access. |
|
152 | 152 | ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it |
|
153 | 153 | ; came from the the logged in user who own this authentication token. |
|
154 | 154 | ; Additionally @TOKEN syntax can be used to bound the view to specific |
|
155 | 155 | ; authentication token. Such view would be only accessible when used together |
|
156 | 156 | ; with this authentication token |
|
157 | 157 | ; list of all views can be found under `/_admin/permissions/auth_token_access` |
|
158 | 158 | ; The list should be "," separated and on a single line. |
|
159 | 159 | ; Most common views to enable: |
|
160 | 160 | |
|
161 | 161 | # RepoCommitsView:repo_commit_download |
|
162 | 162 | # RepoCommitsView:repo_commit_patch |
|
163 | 163 | # RepoCommitsView:repo_commit_raw |
|
164 | 164 | # RepoCommitsView:repo_commit_raw@TOKEN |
|
165 | 165 | # RepoFilesView:repo_files_diff |
|
166 | 166 | # RepoFilesView:repo_archivefile |
|
167 | 167 | # RepoFilesView:repo_file_raw |
|
168 | 168 | # GistView:* |
|
169 | 169 | api_access_controllers_whitelist = |
|
170 | 170 | |
|
171 | 171 | ; Default encoding used to convert from and to unicode |
|
172 | 172 | ; can be also a comma separated list of encoding in case of mixed encodings |
|
173 | 173 | default_encoding = UTF-8 |
|
174 | 174 | |
|
175 | 175 | ; instance-id prefix |
|
176 | 176 | ; a prefix key for this instance used for cache invalidation when running |
|
177 | 177 | ; multiple instances of RhodeCode, make sure it's globally unique for |
|
178 | 178 | ; all running RhodeCode instances. Leave empty if you don't use it |
|
179 | 179 | instance_id = |
|
180 | 180 | |
|
181 | 181 | ; Fallback authentication plugin. Set this to a plugin ID to force the usage |
|
182 | 182 | ; of an authentication plugin also if it is disabled by it's settings. |
|
183 | 183 | ; This could be useful if you are unable to log in to the system due to broken |
|
184 | 184 | ; authentication settings. Then you can enable e.g. the internal RhodeCode auth |
|
185 | 185 | ; module to log in again and fix the settings. |
|
186 | 186 | ; Available builtin plugin IDs (hash is part of the ID): |
|
187 | 187 | ; egg:rhodecode-enterprise-ce#rhodecode |
|
188 | 188 | ; egg:rhodecode-enterprise-ce#pam |
|
189 | 189 | ; egg:rhodecode-enterprise-ce#ldap |
|
190 | 190 | ; egg:rhodecode-enterprise-ce#jasig_cas |
|
191 | 191 | ; egg:rhodecode-enterprise-ce#headers |
|
192 | 192 | ; egg:rhodecode-enterprise-ce#crowd |
|
193 | 193 | |
|
194 | 194 | #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode |
|
195 | 195 | |
|
196 | 196 | ; Flag to control loading of legacy plugins in py:/path format |
|
197 | 197 | auth_plugin.import_legacy_plugins = true |
|
198 | 198 | |
|
199 | 199 | ; alternative return HTTP header for failed authentication. Default HTTP |
|
200 | 200 | ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with |
|
201 | 201 | ; handling that causing a series of failed authentication calls. |
|
202 | 202 | ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code |
|
203 | 203 | ; This will be served instead of default 401 on bad authentication |
|
204 | 204 | auth_ret_code = |
|
205 | 205 | |
|
206 | 206 | ; use special detection method when serving auth_ret_code, instead of serving |
|
207 | 207 | ; ret_code directly, use 401 initially (Which triggers credentials prompt) |
|
208 | 208 | ; and then serve auth_ret_code to clients |
|
209 | 209 | auth_ret_code_detection = false |
|
210 | 210 | |
|
211 | 211 | ; locking return code. When repository is locked return this HTTP code. 2XX |
|
212 | 212 | ; codes don't break the transactions while 4XX codes do |
|
213 | 213 | lock_ret_code = 423 |
|
214 | 214 | |
|
215 | 215 | ; Filesystem location were repositories should be stored |
|
216 | 216 | repo_store.path = /var/opt/rhodecode_repo_store |
|
217 | 217 | |
|
218 | 218 | ; allows to setup custom hooks in settings page |
|
219 | 219 | allow_custom_hooks_settings = true |
|
220 | 220 | |
|
221 | 221 | ; Generated license token required for EE edition license. |
|
222 | 222 | ; New generated token value can be found in Admin > settings > license page. |
|
223 | 223 | license_token = |
|
224 | 224 | |
|
225 | 225 | ; This flag hides sensitive information on the license page such as token, and license data |
|
226 | 226 | license.hide_license_info = false |
|
227 | 227 | |
|
228 | 228 | ; supervisor connection uri, for managing supervisor and logs. |
|
229 | 229 | supervisor.uri = |
|
230 | 230 | |
|
231 | 231 | ; supervisord group name/id we only want this RC instance to handle |
|
232 | 232 | supervisor.group_id = prod |
|
233 | 233 | |
|
234 | 234 | ; Display extended labs settings |
|
235 | 235 | labs_settings_active = true |
|
236 | 236 | |
|
237 | 237 | ; Custom exception store path, defaults to TMPDIR |
|
238 | 238 | ; This is used to store exception from RhodeCode in shared directory |
|
239 | 239 | #exception_tracker.store_path = |
|
240 | 240 | |
|
241 | 241 | ; Send email with exception details when it happens |
|
242 | 242 | #exception_tracker.send_email = false |
|
243 | 243 | |
|
244 | 244 | ; Comma separated list of recipients for exception emails, |
|
245 | 245 | ; e.g admin@rhodecode.com,devops@rhodecode.com |
|
246 | 246 | ; Can be left empty, then emails will be sent to ALL super-admins |
|
247 | 247 | #exception_tracker.send_email_recipients = |
|
248 | 248 | |
|
249 | 249 | ; optional prefix to Add to email Subject |
|
250 | 250 | #exception_tracker.email_prefix = [RHODECODE ERROR] |
|
251 | 251 | |
|
252 | 252 | ; File store configuration. This is used to store and serve uploaded files |
|
253 | 253 | file_store.enabled = true |
|
254 | 254 | |
|
255 | 255 | ; Storage backend, available options are: local |
|
256 | 256 | file_store.backend = local |
|
257 | 257 | |
|
258 | 258 | ; path to store the uploaded binaries and artifacts |
|
259 | 259 | file_store.storage_path = /var/opt/rhodecode_data/file_store |
|
260 | 260 | |
|
261 | 261 | ; Uncomment and set this path to control settings for archive download cache. |
|
262 | 262 | ; Generated repo archives will be cached at this location |
|
263 | 263 | ; and served from the cache during subsequent requests for the same archive of |
|
264 | 264 | ; the repository. This path is important to be shared across filesystems and with |
|
265 | 265 | ; RhodeCode and vcsserver |
|
266 | 266 | |
|
267 | ; Redis url to acquire/check generation of archives locks | |
|
268 | archive_cache.locking.url = redis://redis:6379/1 | |
|
269 | ||
|
270 | ; Storage backend, only 'filesystem' is available now | |
|
271 | archive_cache.backend.type = filesystem | |
|
272 | ||
|
267 | 273 | ; Default is $cache_dir/archive_cache if not set |
|
268 |
archive_cache.store_dir = /var/opt/rhodecode_data/ |
|
|
274 | archive_cache.filesystem.store_dir = /var/opt/rhodecode_data/archive_cache | |
|
269 | 275 | |
|
270 | 276 | ; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb |
|
271 | archive_cache.cache_size_gb = 40 | |
|
277 | archive_cache.filesystem.cache_size_gb = 40 | |
|
278 | ||
|
279 | ; Eviction policy used to clear out after cache_size_gb limit is reached | |
|
280 | archive_cache.filesystem.eviction_policy = least-recently-stored | |
|
272 | 281 | |
|
273 | 282 | ; By default cache uses sharding technique, this specifies how many shards are there |
|
274 |
archive_cache.cache_shards = |
|
|
283 | archive_cache.filesystem.cache_shards = 8 | |
|
284 | ||
|
275 | 285 | |
|
276 | 286 | ; ############# |
|
277 | 287 | ; CELERY CONFIG |
|
278 | 288 | ; ############# |
|
279 | 289 | |
|
280 | 290 | ; manually run celery: /path/to/celery worker --task-events --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini |
|
281 | 291 | |
|
282 | 292 | use_celery = true |
|
283 | 293 | |
|
284 | 294 | ; path to store schedule database |
|
285 | 295 | #celerybeat-schedule.path = |
|
286 | 296 | |
|
287 | 297 | ; connection url to the message broker (default redis) |
|
288 | 298 | celery.broker_url = redis://redis:6379/8 |
|
289 | 299 | |
|
290 | 300 | ; results backend to get results for (default redis) |
|
291 | 301 | celery.result_backend = redis://redis:6379/8 |
|
292 | 302 | |
|
293 | 303 | ; rabbitmq example |
|
294 | 304 | #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost |
|
295 | 305 | |
|
296 | 306 | ; maximum tasks to execute before worker restart |
|
297 | 307 | celery.max_tasks_per_child = 20 |
|
298 | 308 | |
|
299 | 309 | ; tasks will never be sent to the queue, but executed locally instead. |
|
300 | 310 | celery.task_always_eager = false |
|
301 | 311 | |
|
302 | 312 | ; ############# |
|
303 | 313 | ; DOGPILE CACHE |
|
304 | 314 | ; ############# |
|
305 | 315 | |
|
306 | 316 | ; Default cache dir for caches. Putting this into a ramdisk can boost performance. |
|
307 | 317 | ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space |
|
308 | 318 | cache_dir = /var/opt/rhodecode_data |
|
309 | 319 | |
|
310 | 320 | ; ********************************************* |
|
311 | 321 | ; `sql_cache_short` cache for heavy SQL queries |
|
312 | 322 | ; Only supported backend is `memory_lru` |
|
313 | 323 | ; ********************************************* |
|
314 | 324 | rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru |
|
315 | 325 | rc_cache.sql_cache_short.expiration_time = 30 |
|
316 | 326 | |
|
317 | 327 | |
|
318 | 328 | ; ***************************************************** |
|
319 | 329 | ; `cache_repo_longterm` cache for repo object instances |
|
320 | 330 | ; Only supported backend is `memory_lru` |
|
321 | 331 | ; ***************************************************** |
|
322 | 332 | rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru |
|
323 | 333 | ; by default we use 30 Days, cache is still invalidated on push |
|
324 | 334 | rc_cache.cache_repo_longterm.expiration_time = 2592000 |
|
325 | 335 | ; max items in LRU cache, set to smaller number to save memory, and expire last used caches |
|
326 | 336 | rc_cache.cache_repo_longterm.max_size = 10000 |
|
327 | 337 | |
|
328 | 338 | |
|
329 | 339 | ; ********************************************* |
|
330 | 340 | ; `cache_general` cache for general purpose use |
|
331 | 341 | ; for simplicity use rc.file_namespace backend, |
|
332 | 342 | ; for performance and scale use rc.redis |
|
333 | 343 | ; ********************************************* |
|
334 | 344 | rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace |
|
335 | 345 | rc_cache.cache_general.expiration_time = 43200 |
|
336 | 346 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set |
|
337 | 347 | #rc_cache.cache_general.arguments.filename = /tmp/cache_general_db |
|
338 | 348 | |
|
339 | 349 | ; alternative `cache_general` redis backend with distributed lock |
|
340 | 350 | #rc_cache.cache_general.backend = dogpile.cache.rc.redis |
|
341 | 351 | #rc_cache.cache_general.expiration_time = 300 |
|
342 | 352 | |
|
343 | 353 | ; redis_expiration_time needs to be greater then expiration_time |
|
344 | 354 | #rc_cache.cache_general.arguments.redis_expiration_time = 7200 |
|
345 | 355 | |
|
346 | 356 | #rc_cache.cache_general.arguments.host = localhost |
|
347 | 357 | #rc_cache.cache_general.arguments.port = 6379 |
|
348 | 358 | #rc_cache.cache_general.arguments.db = 0 |
|
349 | 359 | #rc_cache.cache_general.arguments.socket_timeout = 30 |
|
350 | 360 | ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends |
|
351 | 361 | #rc_cache.cache_general.arguments.distributed_lock = true |
|
352 | 362 | |
|
353 | 363 | ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen |
|
354 | 364 | #rc_cache.cache_general.arguments.lock_auto_renewal = true |
|
355 | 365 | |
|
356 | 366 | ; ************************************************* |
|
357 | 367 | ; `cache_perms` cache for permission tree, auth TTL |
|
358 | 368 | ; for simplicity use rc.file_namespace backend, |
|
359 | 369 | ; for performance and scale use rc.redis |
|
360 | 370 | ; ************************************************* |
|
361 | 371 | rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace |
|
362 | 372 | rc_cache.cache_perms.expiration_time = 3600 |
|
363 | 373 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set |
|
364 | 374 | #rc_cache.cache_perms.arguments.filename = /tmp/cache_perms_db |
|
365 | 375 | |
|
366 | 376 | ; alternative `cache_perms` redis backend with distributed lock |
|
367 | 377 | #rc_cache.cache_perms.backend = dogpile.cache.rc.redis |
|
368 | 378 | #rc_cache.cache_perms.expiration_time = 300 |
|
369 | 379 | |
|
370 | 380 | ; redis_expiration_time needs to be greater then expiration_time |
|
371 | 381 | #rc_cache.cache_perms.arguments.redis_expiration_time = 7200 |
|
372 | 382 | |
|
373 | 383 | #rc_cache.cache_perms.arguments.host = localhost |
|
374 | 384 | #rc_cache.cache_perms.arguments.port = 6379 |
|
375 | 385 | #rc_cache.cache_perms.arguments.db = 0 |
|
376 | 386 | #rc_cache.cache_perms.arguments.socket_timeout = 30 |
|
377 | 387 | ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends |
|
378 | 388 | #rc_cache.cache_perms.arguments.distributed_lock = true |
|
379 | 389 | |
|
380 | 390 | ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen |
|
381 | 391 | #rc_cache.cache_perms.arguments.lock_auto_renewal = true |
|
382 | 392 | |
|
383 | 393 | ; *************************************************** |
|
384 | 394 | ; `cache_repo` cache for file tree, Readme, RSS FEEDS |
|
385 | 395 | ; for simplicity use rc.file_namespace backend, |
|
386 | 396 | ; for performance and scale use rc.redis |
|
387 | 397 | ; *************************************************** |
|
388 | 398 | rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace |
|
389 | 399 | rc_cache.cache_repo.expiration_time = 2592000 |
|
390 | 400 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set |
|
391 | 401 | #rc_cache.cache_repo.arguments.filename = /tmp/cache_repo_db |
|
392 | 402 | |
|
393 | 403 | ; alternative `cache_repo` redis backend with distributed lock |
|
394 | 404 | #rc_cache.cache_repo.backend = dogpile.cache.rc.redis |
|
395 | 405 | #rc_cache.cache_repo.expiration_time = 2592000 |
|
396 | 406 | |
|
397 | 407 | ; redis_expiration_time needs to be greater then expiration_time |
|
398 | 408 | #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400 |
|
399 | 409 | |
|
400 | 410 | #rc_cache.cache_repo.arguments.host = localhost |
|
401 | 411 | #rc_cache.cache_repo.arguments.port = 6379 |
|
402 | 412 | #rc_cache.cache_repo.arguments.db = 1 |
|
403 | 413 | #rc_cache.cache_repo.arguments.socket_timeout = 30 |
|
404 | 414 | ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends |
|
405 | 415 | #rc_cache.cache_repo.arguments.distributed_lock = true |
|
406 | 416 | |
|
407 | 417 | ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen |
|
408 | 418 | #rc_cache.cache_repo.arguments.lock_auto_renewal = true |
|
409 | 419 | |
|
410 | 420 | ; ############## |
|
411 | 421 | ; BEAKER SESSION |
|
412 | 422 | ; ############## |
|
413 | 423 | |
|
414 | 424 | ; beaker.session.type is type of storage options for the logged users sessions. Current allowed |
|
415 | 425 | ; types are file, ext:redis, ext:database, ext:memcached |
|
416 | 426 | ; Fastest ones are ext:redis and ext:database, DO NOT use memory type for session |
|
417 | 427 | #beaker.session.type = file |
|
418 | 428 | #beaker.session.data_dir = %(here)s/data/sessions |
|
419 | 429 | |
|
420 | 430 | ; Redis based sessions |
|
421 | 431 | beaker.session.type = ext:redis |
|
422 | 432 | beaker.session.url = redis://redis:6379/2 |
|
423 | 433 | |
|
424 | 434 | ; DB based session, fast, and allows easy management over logged in users |
|
425 | 435 | #beaker.session.type = ext:database |
|
426 | 436 | #beaker.session.table_name = db_session |
|
427 | 437 | #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode |
|
428 | 438 | #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode |
|
429 | 439 | #beaker.session.sa.pool_recycle = 3600 |
|
430 | 440 | #beaker.session.sa.echo = false |
|
431 | 441 | |
|
432 | 442 | beaker.session.key = rhodecode |
|
433 | 443 | beaker.session.secret = production-rc-uytcxaz |
|
434 | 444 | beaker.session.lock_dir = /data_ramdisk/lock |
|
435 | 445 | |
|
436 | 446 | ; Secure encrypted cookie. Requires AES and AES python libraries |
|
437 | 447 | ; you must disable beaker.session.secret to use this |
|
438 | 448 | #beaker.session.encrypt_key = key_for_encryption |
|
439 | 449 | #beaker.session.validate_key = validation_key |
|
440 | 450 | |
|
441 | 451 | ; Sets session as invalid (also logging out user) if it haven not been |
|
442 | 452 | ; accessed for given amount of time in seconds |
|
443 | 453 | beaker.session.timeout = 2592000 |
|
444 | 454 | beaker.session.httponly = true |
|
445 | 455 | |
|
446 | 456 | ; Path to use for the cookie. Set to prefix if you use prefix middleware |
|
447 | 457 | #beaker.session.cookie_path = /custom_prefix |
|
448 | 458 | |
|
449 | 459 | ; Set https secure cookie |
|
450 | 460 | beaker.session.secure = false |
|
451 | 461 | |
|
452 | 462 | ; default cookie expiration time in seconds, set to `true` to set expire |
|
453 | 463 | ; at browser close |
|
454 | 464 | #beaker.session.cookie_expires = 3600 |
|
455 | 465 | |
|
456 | 466 | ; ############################# |
|
457 | 467 | ; SEARCH INDEXING CONFIGURATION |
|
458 | 468 | ; ############################# |
|
459 | 469 | |
|
460 | 470 | ; Full text search indexer is available in rhodecode-tools under |
|
461 | 471 | ; `rhodecode-tools index` command |
|
462 | 472 | |
|
463 | 473 | ; WHOOSH Backend, doesn't require additional services to run |
|
464 | 474 | ; it works good with few dozen repos |
|
465 | 475 | search.module = rhodecode.lib.index.whoosh |
|
466 | 476 | search.location = %(here)s/data/index |
|
467 | 477 | |
|
468 | 478 | ; #################### |
|
469 | 479 | ; CHANNELSTREAM CONFIG |
|
470 | 480 | ; #################### |
|
471 | 481 | |
|
472 | 482 | ; channelstream enables persistent connections and live notification |
|
473 | 483 | ; in the system. It's also used by the chat system |
|
474 | 484 | |
|
475 | 485 | channelstream.enabled = true |
|
476 | 486 | |
|
477 | 487 | ; server address for channelstream server on the backend |
|
478 | 488 | channelstream.server = channelstream:9800 |
|
479 | 489 | |
|
480 | 490 | ; location of the channelstream server from outside world |
|
481 | 491 | ; use ws:// for http or wss:// for https. This address needs to be handled |
|
482 | 492 | ; by external HTTP server such as Nginx or Apache |
|
483 | 493 | ; see Nginx/Apache configuration examples in our docs |
|
484 | 494 | channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream |
|
485 | 495 | channelstream.secret = ENV_GENERATED |
|
486 | 496 | channelstream.history.location = /var/opt/rhodecode_data/channelstream_history |
|
487 | 497 | |
|
488 | 498 | ; Internal application path that Javascript uses to connect into. |
|
489 | 499 | ; If you use proxy-prefix the prefix should be added before /_channelstream |
|
490 | 500 | channelstream.proxy_path = /_channelstream |
|
491 | 501 | |
|
492 | 502 | |
|
493 | 503 | ; ############################## |
|
494 | 504 | ; MAIN RHODECODE DATABASE CONFIG |
|
495 | 505 | ; ############################## |
|
496 | 506 | |
|
497 | 507 | #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30 |
|
498 | 508 | #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode |
|
499 | 509 | #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8 |
|
500 | 510 | ; pymysql is an alternative driver for MySQL, use in case of problems with default one |
|
501 | 511 | #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode |
|
502 | 512 | |
|
503 | 513 | sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode |
|
504 | 514 | |
|
505 | 515 | ; see sqlalchemy docs for other advanced settings |
|
506 | 516 | ; print the sql statements to output |
|
507 | 517 | sqlalchemy.db1.echo = false |
|
508 | 518 | |
|
509 | 519 | ; recycle the connections after this amount of seconds |
|
510 | 520 | sqlalchemy.db1.pool_recycle = 3600 |
|
511 | 521 | |
|
512 | 522 | ; the number of connections to keep open inside the connection pool. |
|
513 | 523 | ; 0 indicates no limit |
|
514 | 524 | ; the general calculus with gevent is: |
|
515 | 525 | ; if your system allows 500 concurrent greenlets (max_connections) that all do database access, |
|
516 | 526 | ; then increase pool size + max overflow so that they add up to 500. |
|
517 | 527 | #sqlalchemy.db1.pool_size = 5 |
|
518 | 528 | |
|
519 | 529 | ; The number of connections to allow in connection pool "overflow", that is |
|
520 | 530 | ; connections that can be opened above and beyond the pool_size setting, |
|
521 | 531 | ; which defaults to five. |
|
522 | 532 | #sqlalchemy.db1.max_overflow = 10 |
|
523 | 533 | |
|
524 | 534 | ; Connection check ping, used to detect broken database connections |
|
525 | 535 | ; could be enabled to better handle cases if MySQL has gone away errors |
|
526 | 536 | #sqlalchemy.db1.ping_connection = true |
|
527 | 537 | |
|
528 | 538 | ; ########## |
|
529 | 539 | ; VCS CONFIG |
|
530 | 540 | ; ########## |
|
531 | 541 | vcs.server.enable = true |
|
532 | 542 | vcs.server = vcsserver:10010 |
|
533 | 543 | |
|
534 | 544 | ; Web server connectivity protocol, responsible for web based VCS operations |
|
535 | 545 | ; Available protocols are: |
|
536 | 546 | ; `http` - use http-rpc backend (default) |
|
537 | 547 | vcs.server.protocol = http |
|
538 | 548 | |
|
539 | 549 | ; Push/Pull operations protocol, available options are: |
|
540 | 550 | ; `http` - use http-rpc backend (default) |
|
541 | 551 | vcs.scm_app_implementation = http |
|
542 | 552 | |
|
543 | 553 | ; Push/Pull operations hooks protocol, available options are: |
|
544 | 554 | ; `http` - use http-rpc backend (default) |
|
545 | 555 | ; `celery` - use celery based hooks |
|
546 | 556 | vcs.hooks.protocol = http |
|
547 | 557 | |
|
548 | 558 | ; Host on which this instance is listening for hooks. vcsserver will call this host to pull/push hooks so it should be |
|
549 | 559 | ; accessible via network. |
|
550 | 560 | ; Use vcs.hooks.host = "*" to bind to current hostname (for Docker) |
|
551 | 561 | vcs.hooks.host = * |
|
552 | 562 | |
|
553 | 563 | ; Start VCSServer with this instance as a subprocess, useful for development |
|
554 | 564 | vcs.start_server = false |
|
555 | 565 | |
|
556 | 566 | ; List of enabled VCS backends, available options are: |
|
557 | 567 | ; `hg` - mercurial |
|
558 | 568 | ; `git` - git |
|
559 | 569 | ; `svn` - subversion |
|
560 | 570 | vcs.backends = hg, git, svn |
|
561 | 571 | |
|
562 | 572 | ; Wait this number of seconds before killing connection to the vcsserver |
|
563 | 573 | vcs.connection_timeout = 3600 |
|
564 | 574 | |
|
565 | 575 | ; Cache flag to cache vcsserver remote calls locally |
|
566 | 576 | ; It uses cache_region `cache_repo` |
|
567 | 577 | vcs.methods.cache = true |
|
568 | 578 | |
|
569 | 579 | ; #################################################### |
|
570 | 580 | ; Subversion proxy support (mod_dav_svn) |
|
571 | 581 | ; Maps RhodeCode repo groups into SVN paths for Apache |
|
572 | 582 | ; #################################################### |
|
573 | 583 | |
|
574 | 584 | ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out. |
|
575 | 585 | ; Set a numeric version for your current SVN e.g 1.8, or 1.12 |
|
576 | 586 | ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible |
|
577 | 587 | #vcs.svn.compatible_version = 1.8 |
|
578 | 588 | |
|
579 | 589 | ; Enable SVN proxy of requests over HTTP |
|
580 | 590 | vcs.svn.proxy.enabled = true |
|
581 | 591 | |
|
582 | 592 | ; host to connect to running SVN subsystem |
|
583 | 593 | vcs.svn.proxy.host = http://svn:8090 |
|
584 | 594 | |
|
585 | 595 | ; Enable or disable the config file generation. |
|
586 | 596 | svn.proxy.generate_config = true |
|
587 | 597 | |
|
588 | 598 | ; Generate config file with `SVNListParentPath` set to `On`. |
|
589 | 599 | svn.proxy.list_parent_path = true |
|
590 | 600 | |
|
591 | 601 | ; Set location and file name of generated config file. |
|
592 | 602 | svn.proxy.config_file_path = /etc/rhodecode/conf/svn/mod_dav_svn.conf |
|
593 | 603 | |
|
594 | 604 | ; alternative mod_dav config template. This needs to be a valid mako template |
|
595 | 605 | ; Example template can be found in the source code: |
|
596 | 606 | ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako |
|
597 | 607 | #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako |
|
598 | 608 | |
|
599 | 609 | ; Used as a prefix to the `Location` block in the generated config file. |
|
600 | 610 | ; In most cases it should be set to `/`. |
|
601 | 611 | svn.proxy.location_root = / |
|
602 | 612 | |
|
603 | 613 | ; Command to reload the mod dav svn configuration on change. |
|
604 | 614 | ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh |
|
605 | 615 | ; Make sure user who runs RhodeCode process is allowed to reload Apache |
|
606 | 616 | #svn.proxy.reload_cmd = /etc/init.d/apache2 reload |
|
607 | 617 | |
|
608 | 618 | ; If the timeout expires before the reload command finishes, the command will |
|
609 | 619 | ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds. |
|
610 | 620 | #svn.proxy.reload_timeout = 10 |
|
611 | 621 | |
|
612 | 622 | ; #################### |
|
613 | 623 | ; SSH Support Settings |
|
614 | 624 | ; #################### |
|
615 | 625 | |
|
616 | 626 | ; Defines if a custom authorized_keys file should be created and written on |
|
617 | 627 | ; any change user ssh keys. Setting this to false also disables possibility |
|
618 | 628 | ; of adding SSH keys by users from web interface. Super admins can still |
|
619 | 629 | ; manage SSH Keys. |
|
620 | 630 | ssh.generate_authorized_keyfile = true |
|
621 | 631 | |
|
622 | 632 | ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding` |
|
623 | 633 | # ssh.authorized_keys_ssh_opts = |
|
624 | 634 | |
|
625 | 635 | ; Path to the authorized_keys file where the generate entries are placed. |
|
626 | 636 | ; It is possible to have multiple key files specified in `sshd_config` e.g. |
|
627 | 637 | ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode |
|
628 | 638 | ssh.authorized_keys_file_path = /etc/rhodecode/conf/ssh/authorized_keys_rhodecode |
|
629 | 639 | |
|
630 | 640 | ; Command to execute the SSH wrapper. The binary is available in the |
|
631 | 641 | ; RhodeCode installation directory. |
|
632 | 642 | ; legacy: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper |
|
633 | 643 | ; new rewrite: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2 |
|
634 | 644 | ssh.wrapper_cmd = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper |
|
635 | 645 | |
|
636 | 646 | ; Allow shell when executing the ssh-wrapper command |
|
637 | 647 | ssh.wrapper_cmd_allow_shell = false |
|
638 | 648 | |
|
639 | 649 | ; Enables logging, and detailed output send back to the client during SSH |
|
640 | 650 | ; operations. Useful for debugging, shouldn't be used in production. |
|
641 | 651 | ssh.enable_debug_logging = false |
|
642 | 652 | |
|
643 | 653 | ; Paths to binary executable, by default they are the names, but we can |
|
644 | 654 | ; override them if we want to use a custom one |
|
645 | 655 | ssh.executable.hg = /usr/local/bin/rhodecode_bin/vcs_bin/hg |
|
646 | 656 | ssh.executable.git = /usr/local/bin/rhodecode_bin/vcs_bin/git |
|
647 | 657 | ssh.executable.svn = /usr/local/bin/rhodecode_bin/vcs_bin/svnserve |
|
648 | 658 | |
|
649 | 659 | ; Enables SSH key generator web interface. Disabling this still allows users |
|
650 | 660 | ; to add their own keys. |
|
651 | 661 | ssh.enable_ui_key_generator = true |
|
652 | 662 | |
|
653 | 663 | ; Statsd client config, this is used to send metrics to statsd |
|
654 | 664 | ; We recommend setting statsd_exported and scrape them using Prometheus |
|
655 | 665 | #statsd.enabled = false |
|
656 | 666 | #statsd.statsd_host = 0.0.0.0 |
|
657 | 667 | #statsd.statsd_port = 8125 |
|
658 | 668 | #statsd.statsd_prefix = |
|
659 | 669 | #statsd.statsd_ipv6 = false |
|
660 | 670 | |
|
661 | 671 | ; configure logging automatically at server startup set to false |
|
662 | 672 | ; to use the below custom logging config. |
|
663 | 673 | ; RC_LOGGING_FORMATTER |
|
664 | 674 | ; RC_LOGGING_LEVEL |
|
665 | 675 | ; env variables can control the settings for logging in case of autoconfigure |
|
666 | 676 | |
|
667 | 677 | #logging.autoconfigure = true |
|
668 | 678 | |
|
669 | 679 | ; specify your own custom logging config file to configure logging |
|
670 | 680 | #logging.logging_conf_file = /path/to/custom_logging.ini |
|
671 | 681 | |
|
672 | 682 | ; Dummy marker to add new entries after. |
|
673 | 683 | ; Add any custom entries below. Please don't remove this marker. |
|
674 | 684 | custom.conf = 1 |
|
675 | 685 | |
|
676 | 686 | |
|
677 | 687 | ; ##################### |
|
678 | 688 | ; LOGGING CONFIGURATION |
|
679 | 689 | ; ##################### |
|
680 | 690 | |
|
681 | 691 | [loggers] |
|
682 | 692 | keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper |
|
683 | 693 | |
|
684 | 694 | [handlers] |
|
685 | 695 | keys = console, console_sql |
|
686 | 696 | |
|
687 | 697 | [formatters] |
|
688 | 698 | keys = generic, json, color_formatter, color_formatter_sql |
|
689 | 699 | |
|
690 | 700 | ; ####### |
|
691 | 701 | ; LOGGERS |
|
692 | 702 | ; ####### |
|
693 | 703 | [logger_root] |
|
694 | 704 | level = NOTSET |
|
695 | 705 | handlers = console |
|
696 | 706 | |
|
697 | 707 | [logger_sqlalchemy] |
|
698 | 708 | level = INFO |
|
699 | 709 | handlers = console_sql |
|
700 | 710 | qualname = sqlalchemy.engine |
|
701 | 711 | propagate = 0 |
|
702 | 712 | |
|
703 | 713 | [logger_beaker] |
|
704 | 714 | level = DEBUG |
|
705 | 715 | handlers = |
|
706 | 716 | qualname = beaker.container |
|
707 | 717 | propagate = 1 |
|
708 | 718 | |
|
709 | 719 | [logger_rhodecode] |
|
710 | 720 | level = DEBUG |
|
711 | 721 | handlers = |
|
712 | 722 | qualname = rhodecode |
|
713 | 723 | propagate = 1 |
|
714 | 724 | |
|
715 | 725 | [logger_ssh_wrapper] |
|
716 | 726 | level = DEBUG |
|
717 | 727 | handlers = |
|
718 | 728 | qualname = ssh_wrapper |
|
719 | 729 | propagate = 1 |
|
720 | 730 | |
|
721 | 731 | [logger_celery] |
|
722 | 732 | level = DEBUG |
|
723 | 733 | handlers = |
|
724 | 734 | qualname = celery |
|
725 | 735 | |
|
726 | 736 | |
|
727 | 737 | ; ######## |
|
728 | 738 | ; HANDLERS |
|
729 | 739 | ; ######## |
|
730 | 740 | |
|
731 | 741 | [handler_console] |
|
732 | 742 | class = StreamHandler |
|
733 | 743 | args = (sys.stderr, ) |
|
734 | 744 | level = INFO |
|
735 | 745 | ; To enable JSON formatted logs replace 'generic/color_formatter' with 'json' |
|
736 | 746 | ; This allows sending properly formatted logs to grafana loki or elasticsearch |
|
737 | 747 | formatter = generic |
|
738 | 748 | |
|
739 | 749 | [handler_console_sql] |
|
740 | 750 | ; "level = DEBUG" logs SQL queries and results. |
|
741 | 751 | ; "level = INFO" logs SQL queries. |
|
742 | 752 | ; "level = WARN" logs neither. (Recommended for production systems.) |
|
743 | 753 | class = StreamHandler |
|
744 | 754 | args = (sys.stderr, ) |
|
745 | 755 | level = WARN |
|
746 | 756 | ; To enable JSON formatted logs replace 'generic/color_formatter_sql' with 'json' |
|
747 | 757 | ; This allows sending properly formatted logs to grafana loki or elasticsearch |
|
748 | 758 | formatter = generic |
|
749 | 759 | |
|
750 | 760 | ; ########## |
|
751 | 761 | ; FORMATTERS |
|
752 | 762 | ; ########## |
|
753 | 763 | |
|
754 | 764 | [formatter_generic] |
|
755 | 765 | class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter |
|
756 | 766 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s |
|
757 | 767 | datefmt = %Y-%m-%d %H:%M:%S |
|
758 | 768 | |
|
759 | 769 | [formatter_color_formatter] |
|
760 | 770 | class = rhodecode.lib.logging_formatter.ColorFormatter |
|
761 | 771 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s |
|
762 | 772 | datefmt = %Y-%m-%d %H:%M:%S |
|
763 | 773 | |
|
764 | 774 | [formatter_color_formatter_sql] |
|
765 | 775 | class = rhodecode.lib.logging_formatter.ColorFormatterSql |
|
766 | 776 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s |
|
767 | 777 | datefmt = %Y-%m-%d %H:%M:%S |
|
768 | 778 | |
|
769 | 779 | [formatter_json] |
|
770 | 780 | format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s |
|
771 | 781 | class = rhodecode.lib._vendor.jsonlogger.JsonFormatter |
@@ -1,287 +1,286 b'' | |||
|
1 | 1 | # deps, generated via pipdeptree --exclude setuptools,wheel,pipdeptree,pip -f | tr '[:upper:]' '[:lower:]' |
|
2 | 2 | |
|
3 | 3 | alembic==1.13.1 |
|
4 | 4 | mako==1.2.4 |
|
5 | 5 | markupsafe==2.1.2 |
|
6 | 6 | sqlalchemy==1.4.52 |
|
7 | 7 | greenlet==3.0.3 |
|
8 | 8 | typing_extensions==4.9.0 |
|
9 | 9 | async-timeout==4.0.3 |
|
10 | 10 | babel==2.12.1 |
|
11 | 11 | beaker==1.12.1 |
|
12 | 12 | celery==5.3.6 |
|
13 | 13 | billiard==4.2.0 |
|
14 | 14 | click==8.1.3 |
|
15 | 15 | click-didyoumean==0.3.0 |
|
16 | 16 | click==8.1.3 |
|
17 | 17 | click-plugins==1.1.1 |
|
18 | 18 | click==8.1.3 |
|
19 | 19 | click-repl==0.2.0 |
|
20 | 20 | click==8.1.3 |
|
21 | 21 | prompt-toolkit==3.0.38 |
|
22 | 22 | wcwidth==0.2.6 |
|
23 | 23 | six==1.16.0 |
|
24 | 24 | kombu==5.3.5 |
|
25 | 25 | amqp==5.2.0 |
|
26 | 26 | vine==5.1.0 |
|
27 | 27 | vine==5.1.0 |
|
28 | 28 | python-dateutil==2.8.2 |
|
29 | 29 | six==1.16.0 |
|
30 | 30 | tzdata==2024.1 |
|
31 | 31 | vine==5.1.0 |
|
32 | 32 | channelstream==0.7.1 |
|
33 | 33 | gevent==24.2.1 |
|
34 | 34 | greenlet==3.0.3 |
|
35 | 35 | zope.event==5.0.0 |
|
36 | 36 | zope.interface==6.3.0 |
|
37 | 37 | itsdangerous==1.1.0 |
|
38 | 38 | marshmallow==2.18.0 |
|
39 | 39 | pyramid==2.0.2 |
|
40 | 40 | hupper==1.12 |
|
41 | 41 | plaster==1.1.2 |
|
42 | 42 | plaster-pastedeploy==1.0.1 |
|
43 | 43 | pastedeploy==3.1.0 |
|
44 | 44 | plaster==1.1.2 |
|
45 | 45 | translationstring==1.4 |
|
46 | 46 | venusian==3.0.0 |
|
47 | 47 | webob==1.8.7 |
|
48 | 48 | zope.deprecation==5.0.0 |
|
49 | 49 | zope.interface==6.3.0 |
|
50 | 50 | pyramid-apispec==0.3.3 |
|
51 | 51 | apispec==1.3.3 |
|
52 | 52 | pyramid-jinja2==2.10 |
|
53 | 53 | jinja2==3.1.2 |
|
54 | 54 | markupsafe==2.1.2 |
|
55 | 55 | markupsafe==2.1.2 |
|
56 | 56 | pyramid==2.0.2 |
|
57 | 57 | hupper==1.12 |
|
58 | 58 | plaster==1.1.2 |
|
59 | 59 | plaster-pastedeploy==1.0.1 |
|
60 | 60 | pastedeploy==3.1.0 |
|
61 | 61 | plaster==1.1.2 |
|
62 | 62 | translationstring==1.4 |
|
63 | 63 | venusian==3.0.0 |
|
64 | 64 | webob==1.8.7 |
|
65 | 65 | zope.deprecation==5.0.0 |
|
66 | 66 | zope.interface==6.3.0 |
|
67 | 67 | zope.deprecation==5.0.0 |
|
68 | 68 | python-dateutil==2.8.2 |
|
69 | 69 | six==1.16.0 |
|
70 | 70 | requests==2.28.2 |
|
71 | 71 | certifi==2022.12.7 |
|
72 | 72 | charset-normalizer==3.1.0 |
|
73 | 73 | idna==3.4 |
|
74 | 74 | urllib3==1.26.14 |
|
75 | 75 | ws4py==0.5.1 |
|
76 | 76 | deform==2.0.15 |
|
77 | 77 | chameleon==3.10.2 |
|
78 | 78 | colander==2.0 |
|
79 | 79 | iso8601==1.1.0 |
|
80 | 80 | translationstring==1.4 |
|
81 | 81 | iso8601==1.1.0 |
|
82 | 82 | peppercorn==0.6 |
|
83 | 83 | translationstring==1.4 |
|
84 | 84 | zope.deprecation==5.0.0 |
|
85 | diskcache==5.6.3 | |
|
86 | 85 | docutils==0.19 |
|
87 | 86 | dogpile.cache==1.3.3 |
|
88 | 87 | decorator==5.1.1 |
|
89 | 88 | stevedore==5.1.0 |
|
90 | 89 | pbr==5.11.1 |
|
91 | 90 | formencode==2.1.0 |
|
92 | 91 | six==1.16.0 |
|
93 | 92 | gunicorn==21.2.0 |
|
94 | 93 | packaging==24.0 |
|
95 | 94 | gevent==24.2.1 |
|
96 | 95 | greenlet==3.0.3 |
|
97 | 96 | zope.event==5.0.0 |
|
98 | 97 | zope.interface==6.3.0 |
|
99 | 98 | ipython==8.14.0 |
|
100 | 99 | backcall==0.2.0 |
|
101 | 100 | decorator==5.1.1 |
|
102 | 101 | jedi==0.19.0 |
|
103 | 102 | parso==0.8.3 |
|
104 | 103 | matplotlib-inline==0.1.6 |
|
105 | 104 | traitlets==5.9.0 |
|
106 | 105 | pexpect==4.8.0 |
|
107 | 106 | ptyprocess==0.7.0 |
|
108 | 107 | pickleshare==0.7.5 |
|
109 | 108 | prompt-toolkit==3.0.38 |
|
110 | 109 | wcwidth==0.2.6 |
|
111 | 110 | pygments==2.15.1 |
|
112 | 111 | stack-data==0.6.2 |
|
113 | 112 | asttokens==2.2.1 |
|
114 | 113 | six==1.16.0 |
|
115 | 114 | executing==1.2.0 |
|
116 | 115 | pure-eval==0.2.2 |
|
117 | 116 | traitlets==5.9.0 |
|
118 | 117 | markdown==3.4.3 |
|
119 | 118 | msgpack==1.0.8 |
|
120 | 119 | mysqlclient==2.1.1 |
|
121 | 120 | nbconvert==7.7.3 |
|
122 | 121 | beautifulsoup4==4.12.3 |
|
123 | 122 | soupsieve==2.5 |
|
124 | 123 | bleach==6.1.0 |
|
125 | 124 | six==1.16.0 |
|
126 | 125 | webencodings==0.5.1 |
|
127 | 126 | defusedxml==0.7.1 |
|
128 | 127 | jinja2==3.1.2 |
|
129 | 128 | markupsafe==2.1.2 |
|
130 | 129 | jupyter_core==5.3.1 |
|
131 | 130 | platformdirs==3.10.0 |
|
132 | 131 | traitlets==5.9.0 |
|
133 | 132 | jupyterlab-pygments==0.2.2 |
|
134 | 133 | markupsafe==2.1.2 |
|
135 | 134 | mistune==2.0.5 |
|
136 | 135 | nbclient==0.8.0 |
|
137 | 136 | jupyter_client==8.3.0 |
|
138 | 137 | jupyter_core==5.3.1 |
|
139 | 138 | platformdirs==3.10.0 |
|
140 | 139 | traitlets==5.9.0 |
|
141 | 140 | python-dateutil==2.8.2 |
|
142 | 141 | six==1.16.0 |
|
143 | 142 | pyzmq==25.0.0 |
|
144 | 143 | tornado==6.2 |
|
145 | 144 | traitlets==5.9.0 |
|
146 | 145 | jupyter_core==5.3.1 |
|
147 | 146 | platformdirs==3.10.0 |
|
148 | 147 | traitlets==5.9.0 |
|
149 | 148 | nbformat==5.9.2 |
|
150 | 149 | fastjsonschema==2.18.0 |
|
151 | 150 | jsonschema==4.18.6 |
|
152 | 151 | attrs==22.2.0 |
|
153 | 152 | pyrsistent==0.19.3 |
|
154 | 153 | jupyter_core==5.3.1 |
|
155 | 154 | platformdirs==3.10.0 |
|
156 | 155 | traitlets==5.9.0 |
|
157 | 156 | traitlets==5.9.0 |
|
158 | 157 | traitlets==5.9.0 |
|
159 | 158 | nbformat==5.9.2 |
|
160 | 159 | fastjsonschema==2.18.0 |
|
161 | 160 | jsonschema==4.18.6 |
|
162 | 161 | attrs==22.2.0 |
|
163 | 162 | pyrsistent==0.19.3 |
|
164 | 163 | jupyter_core==5.3.1 |
|
165 | 164 | platformdirs==3.10.0 |
|
166 | 165 | traitlets==5.9.0 |
|
167 | 166 | traitlets==5.9.0 |
|
168 | 167 | pandocfilters==1.5.0 |
|
169 | 168 | pygments==2.15.1 |
|
170 | 169 | tinycss2==1.2.1 |
|
171 | 170 | webencodings==0.5.1 |
|
172 | 171 | traitlets==5.9.0 |
|
173 | 172 | orjson==3.10.3 |
|
174 | 173 | paste==3.10.1 |
|
175 | 174 | premailer==3.10.0 |
|
176 | 175 | cachetools==5.3.3 |
|
177 | 176 | cssselect==1.2.0 |
|
178 | 177 | cssutils==2.6.0 |
|
179 | 178 | lxml==4.9.3 |
|
180 | 179 | requests==2.28.2 |
|
181 | 180 | certifi==2022.12.7 |
|
182 | 181 | charset-normalizer==3.1.0 |
|
183 | 182 | idna==3.4 |
|
184 | 183 | urllib3==1.26.14 |
|
185 | 184 | psutil==5.9.8 |
|
186 | 185 | psycopg2==2.9.9 |
|
187 | 186 | py-bcrypt==0.4 |
|
188 | 187 | pycmarkgfm==1.2.0 |
|
189 | 188 | cffi==1.16.0 |
|
190 | 189 | pycparser==2.21 |
|
191 | 190 | pycryptodome==3.17 |
|
192 | 191 | pycurl==7.45.3 |
|
193 | 192 | pymysql==1.0.3 |
|
194 | 193 | pyotp==2.8.0 |
|
195 | 194 | pyparsing==3.1.1 |
|
196 | 195 | pyramid-debugtoolbar==4.12.1 |
|
197 | 196 | pygments==2.15.1 |
|
198 | 197 | pyramid==2.0.2 |
|
199 | 198 | hupper==1.12 |
|
200 | 199 | plaster==1.1.2 |
|
201 | 200 | plaster-pastedeploy==1.0.1 |
|
202 | 201 | pastedeploy==3.1.0 |
|
203 | 202 | plaster==1.1.2 |
|
204 | 203 | translationstring==1.4 |
|
205 | 204 | venusian==3.0.0 |
|
206 | 205 | webob==1.8.7 |
|
207 | 206 | zope.deprecation==5.0.0 |
|
208 | 207 | zope.interface==6.3.0 |
|
209 | 208 | pyramid-mako==1.1.0 |
|
210 | 209 | mako==1.2.4 |
|
211 | 210 | markupsafe==2.1.2 |
|
212 | 211 | pyramid==2.0.2 |
|
213 | 212 | hupper==1.12 |
|
214 | 213 | plaster==1.1.2 |
|
215 | 214 | plaster-pastedeploy==1.0.1 |
|
216 | 215 | pastedeploy==3.1.0 |
|
217 | 216 | plaster==1.1.2 |
|
218 | 217 | translationstring==1.4 |
|
219 | 218 | venusian==3.0.0 |
|
220 | 219 | webob==1.8.7 |
|
221 | 220 | zope.deprecation==5.0.0 |
|
222 | 221 | zope.interface==6.3.0 |
|
223 | 222 | pyramid-mailer==0.15.1 |
|
224 | 223 | pyramid==2.0.2 |
|
225 | 224 | hupper==1.12 |
|
226 | 225 | plaster==1.1.2 |
|
227 | 226 | plaster-pastedeploy==1.0.1 |
|
228 | 227 | pastedeploy==3.1.0 |
|
229 | 228 | plaster==1.1.2 |
|
230 | 229 | translationstring==1.4 |
|
231 | 230 | venusian==3.0.0 |
|
232 | 231 | webob==1.8.7 |
|
233 | 232 | zope.deprecation==5.0.0 |
|
234 | 233 | zope.interface==6.3.0 |
|
235 | 234 | repoze.sendmail==4.4.1 |
|
236 | 235 | transaction==3.1.0 |
|
237 | 236 | zope.interface==6.3.0 |
|
238 | 237 | zope.interface==6.3.0 |
|
239 | 238 | transaction==3.1.0 |
|
240 | 239 | zope.interface==6.3.0 |
|
241 | 240 | python-ldap==3.4.3 |
|
242 | 241 | pyasn1==0.4.8 |
|
243 | 242 | pyasn1-modules==0.2.8 |
|
244 | 243 | pyasn1==0.4.8 |
|
245 | 244 | python-memcached==1.59 |
|
246 | 245 | six==1.16.0 |
|
247 | 246 | python-pam==2.0.2 |
|
248 | 247 | python3-saml==1.15.0 |
|
249 | 248 | isodate==0.6.1 |
|
250 | 249 | six==1.16.0 |
|
251 | 250 | lxml==4.9.3 |
|
252 | 251 | xmlsec==1.3.13 |
|
253 | 252 | lxml==4.9.3 |
|
254 | 253 | pyyaml==6.0.1 |
|
255 | 254 | redis==5.0.4 |
|
256 | 255 | async-timeout==4.0.3 |
|
257 | 256 | regex==2022.10.31 |
|
258 | 257 | routes==2.5.1 |
|
259 | 258 | repoze.lru==0.7 |
|
260 | 259 | six==1.16.0 |
|
261 | 260 | simplejson==3.19.2 |
|
262 | 261 | sshpubkeys==3.3.1 |
|
263 | 262 | cryptography==40.0.2 |
|
264 | 263 | cffi==1.16.0 |
|
265 | 264 | pycparser==2.21 |
|
266 | 265 | ecdsa==0.18.0 |
|
267 | 266 | six==1.16.0 |
|
268 | 267 | sqlalchemy==1.4.52 |
|
269 | 268 | greenlet==3.0.3 |
|
270 | 269 | typing_extensions==4.9.0 |
|
271 | 270 | supervisor==4.2.5 |
|
272 | 271 | tzlocal==4.3 |
|
273 | 272 | pytz-deprecation-shim==0.1.0.post0 |
|
274 | 273 | tzdata==2024.1 |
|
275 | 274 | tempita==0.5.2 |
|
276 | 275 | unidecode==1.3.6 |
|
277 | 276 | urlobject==2.4.3 |
|
278 | 277 | waitress==3.0.0 |
|
279 | 278 | webhelpers2==2.1 |
|
280 | 279 | markupsafe==2.1.2 |
|
281 | 280 | six==1.16.0 |
|
282 | 281 | whoosh==2.7.4 |
|
283 | 282 | zope.cachedescriptors==5.0.0 |
|
284 | 283 | qrcode==7.4.2 |
|
285 | 284 | |
|
286 | 285 | ## uncomment to add the debug libraries |
|
287 | 286 | #-r requirements_debug.txt |
@@ -1,1708 +1,1715 b'' | |||
|
1 | 1 | # Copyright (C) 2011-2023 RhodeCode GmbH |
|
2 | 2 | # |
|
3 | 3 | # This program is free software: you can redistribute it and/or modify |
|
4 | 4 | # it under the terms of the GNU Affero General Public License, version 3 |
|
5 | 5 | # (only), as published by the Free Software Foundation. |
|
6 | 6 | # |
|
7 | 7 | # This program is distributed in the hope that it will be useful, |
|
8 | 8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
9 | 9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
10 | 10 | # GNU General Public License for more details. |
|
11 | 11 | # |
|
12 | 12 | # You should have received a copy of the GNU Affero General Public License |
|
13 | 13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
14 | 14 | # |
|
15 | 15 | # This program is dual-licensed. If you wish to learn more about the |
|
16 | 16 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
17 | 17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
18 | 18 | |
|
19 | 19 | import itertools |
|
20 | 20 | import logging |
|
21 | 21 | import os |
|
22 | 22 | import collections |
|
23 | 23 | import urllib.request |
|
24 | 24 | import urllib.parse |
|
25 | 25 | import urllib.error |
|
26 | 26 | import pathlib |
|
27 | import time | |
|
28 | import random | |
|
27 | 29 | |
|
28 | 30 | from pyramid.httpexceptions import HTTPNotFound, HTTPBadRequest, HTTPFound |
|
29 | 31 | |
|
30 | 32 | from pyramid.renderers import render |
|
31 | 33 | from pyramid.response import Response |
|
32 | 34 | |
|
33 | 35 | import rhodecode |
|
34 | 36 | from rhodecode.apps._base import RepoAppView |
|
35 | 37 | |
|
36 | 38 | |
|
37 | 39 | from rhodecode.lib import diffs, helpers as h, rc_cache |
|
38 | 40 | from rhodecode.lib import audit_logger |
|
39 | 41 | from rhodecode.lib.hash_utils import sha1_safe |
|
40 |
from rhodecode.lib.rc_cache.archive_cache import |
|
|
42 | from rhodecode.lib.rc_cache.archive_cache import ( | |
|
43 | get_archival_cache_store, get_archival_config, ArchiveCacheLock, archive_iterator) | |
|
41 | 44 | from rhodecode.lib.str_utils import safe_bytes, convert_special_chars |
|
42 | 45 | from rhodecode.lib.view_utils import parse_path_ref |
|
43 | 46 | from rhodecode.lib.exceptions import NonRelativePathError |
|
44 | 47 | from rhodecode.lib.codeblocks import ( |
|
45 | 48 | filenode_as_lines_tokens, filenode_as_annotated_lines_tokens) |
|
46 | 49 | from rhodecode.lib.utils2 import convert_line_endings, detect_mode |
|
47 | 50 | from rhodecode.lib.type_utils import str2bool |
|
48 | 51 | from rhodecode.lib.str_utils import safe_str, safe_int |
|
49 | 52 | from rhodecode.lib.auth import ( |
|
50 | 53 | LoginRequired, HasRepoPermissionAnyDecorator, CSRFRequired) |
|
51 | 54 | from rhodecode.lib.vcs import path as vcspath |
|
52 | 55 | from rhodecode.lib.vcs.backends.base import EmptyCommit |
|
53 | 56 | from rhodecode.lib.vcs.conf import settings |
|
54 | 57 | from rhodecode.lib.vcs.nodes import FileNode |
|
55 | 58 | from rhodecode.lib.vcs.exceptions import ( |
|
56 | 59 | RepositoryError, CommitDoesNotExistError, EmptyRepositoryError, |
|
57 | 60 | ImproperArchiveTypeError, VCSError, NodeAlreadyExistsError, |
|
58 | 61 | NodeDoesNotExistError, CommitError, NodeError) |
|
59 | 62 | |
|
60 | 63 | from rhodecode.model.scm import ScmModel |
|
61 | 64 | from rhodecode.model.db import Repository |
|
62 | 65 | |
|
63 | 66 | log = logging.getLogger(__name__) |
|
64 | 67 | |
|
65 | 68 | |
|
66 | 69 | def get_archive_name(db_repo_id, db_repo_name, commit_sha, ext, subrepos=False, path_sha='', with_hash=True): |
|
67 | 70 | # original backward compat name of archive |
|
68 | 71 | clean_name = safe_str(convert_special_chars(db_repo_name).replace('/', '_')) |
|
69 | 72 | |
|
70 | 73 | # e.g vcsserver-id-abcd-sub-1-abcfdef-archive-all.zip |
|
71 | 74 | # vcsserver-id-abcd-sub-0-abcfdef-COMMIT_SHA-PATH_SHA.zip |
|
72 | 75 | id_sha = sha1_safe(str(db_repo_id))[:4] |
|
73 | 76 | sub_repo = 'sub-1' if subrepos else 'sub-0' |
|
74 | 77 | commit = commit_sha if with_hash else 'archive' |
|
75 | 78 | path_marker = (path_sha if with_hash else '') or 'all' |
|
76 | 79 | archive_name = f'{clean_name}-id-{id_sha}-{sub_repo}-{commit}-{path_marker}{ext}' |
|
77 | 80 | |
|
78 | 81 | return archive_name |
|
79 | 82 | |
|
80 | 83 | |
|
81 | 84 | def get_path_sha(at_path): |
|
82 | 85 | return safe_str(sha1_safe(at_path)[:8]) |
|
83 | 86 | |
|
84 | 87 | |
|
85 | 88 | def _get_archive_spec(fname): |
|
86 | 89 | log.debug('Detecting archive spec for: `%s`', fname) |
|
87 | 90 | |
|
88 | 91 | fileformat = None |
|
89 | 92 | ext = None |
|
90 | 93 | content_type = None |
|
91 | 94 | for a_type, content_type, extension in settings.ARCHIVE_SPECS: |
|
92 | 95 | |
|
93 | 96 | if fname.endswith(extension): |
|
94 | 97 | fileformat = a_type |
|
95 | 98 | log.debug('archive is of type: %s', fileformat) |
|
96 | 99 | ext = extension |
|
97 | 100 | break |
|
98 | 101 | |
|
99 | 102 | if not fileformat: |
|
100 | 103 | raise ValueError() |
|
101 | 104 | |
|
102 | 105 | # left over part of whole fname is the commit |
|
103 | 106 | commit_id = fname[:-len(ext)] |
|
104 | 107 | |
|
105 | 108 | return commit_id, ext, fileformat, content_type |
|
106 | 109 | |
|
107 | 110 | |
|
108 | 111 | class RepoFilesView(RepoAppView): |
|
109 | 112 | |
|
110 | 113 | @staticmethod |
|
111 | 114 | def adjust_file_path_for_svn(f_path, repo): |
|
112 | 115 | """ |
|
113 | 116 | Computes the relative path of `f_path`. |
|
114 | 117 | |
|
115 | 118 | This is mainly based on prefix matching of the recognized tags and |
|
116 | 119 | branches in the underlying repository. |
|
117 | 120 | """ |
|
118 | 121 | tags_and_branches = itertools.chain( |
|
119 | 122 | repo.branches.keys(), |
|
120 | 123 | repo.tags.keys()) |
|
121 | 124 | tags_and_branches = sorted(tags_and_branches, key=len, reverse=True) |
|
122 | 125 | |
|
123 | 126 | for name in tags_and_branches: |
|
124 | 127 | if f_path.startswith(f'{name}/'): |
|
125 | 128 | f_path = vcspath.relpath(f_path, name) |
|
126 | 129 | break |
|
127 | 130 | return f_path |
|
128 | 131 | |
|
129 | 132 | def load_default_context(self): |
|
130 | 133 | c = self._get_local_tmpl_context(include_app_defaults=True) |
|
131 | 134 | c.rhodecode_repo = self.rhodecode_vcs_repo |
|
132 | 135 | c.enable_downloads = self.db_repo.enable_downloads |
|
133 | 136 | return c |
|
134 | 137 | |
|
135 | 138 | def _ensure_not_locked(self, commit_id='tip'): |
|
136 | 139 | _ = self.request.translate |
|
137 | 140 | |
|
138 | 141 | repo = self.db_repo |
|
139 | 142 | if repo.enable_locking and repo.locked[0]: |
|
140 | 143 | h.flash(_('This repository has been locked by %s on %s') |
|
141 | 144 | % (h.person_by_id(repo.locked[0]), |
|
142 | 145 | h.format_date(h.time_to_datetime(repo.locked[1]))), |
|
143 | 146 | 'warning') |
|
144 | 147 | files_url = h.route_path( |
|
145 | 148 | 'repo_files:default_path', |
|
146 | 149 | repo_name=self.db_repo_name, commit_id=commit_id) |
|
147 | 150 | raise HTTPFound(files_url) |
|
148 | 151 | |
|
149 | 152 | def forbid_non_head(self, is_head, f_path, commit_id='tip', json_mode=False): |
|
150 | 153 | _ = self.request.translate |
|
151 | 154 | |
|
152 | 155 | if not is_head: |
|
153 | 156 | message = _('Cannot modify file. ' |
|
154 | 157 | 'Given commit `{}` is not head of a branch.').format(commit_id) |
|
155 | 158 | h.flash(message, category='warning') |
|
156 | 159 | |
|
157 | 160 | if json_mode: |
|
158 | 161 | return message |
|
159 | 162 | |
|
160 | 163 | files_url = h.route_path( |
|
161 | 164 | 'repo_files', repo_name=self.db_repo_name, commit_id=commit_id, |
|
162 | 165 | f_path=f_path) |
|
163 | 166 | raise HTTPFound(files_url) |
|
164 | 167 | |
|
165 | 168 | def check_branch_permission(self, branch_name, commit_id='tip', json_mode=False): |
|
166 | 169 | _ = self.request.translate |
|
167 | 170 | |
|
168 | 171 | rule, branch_perm = self._rhodecode_user.get_rule_and_branch_permission( |
|
169 | 172 | self.db_repo_name, branch_name) |
|
170 | 173 | if branch_perm and branch_perm not in ['branch.push', 'branch.push_force']: |
|
171 | 174 | message = _('Branch `{}` changes forbidden by rule {}.').format( |
|
172 | 175 | h.escape(branch_name), h.escape(rule)) |
|
173 | 176 | h.flash(message, 'warning') |
|
174 | 177 | |
|
175 | 178 | if json_mode: |
|
176 | 179 | return message |
|
177 | 180 | |
|
178 | 181 | files_url = h.route_path( |
|
179 | 182 | 'repo_files:default_path', repo_name=self.db_repo_name, commit_id=commit_id) |
|
180 | 183 | |
|
181 | 184 | raise HTTPFound(files_url) |
|
182 | 185 | |
|
183 | 186 | def _get_commit_and_path(self): |
|
184 | 187 | default_commit_id = self.db_repo.landing_ref_name |
|
185 | 188 | default_f_path = '/' |
|
186 | 189 | |
|
187 | 190 | commit_id = self.request.matchdict.get( |
|
188 | 191 | 'commit_id', default_commit_id) |
|
189 | 192 | f_path = self._get_f_path(self.request.matchdict, default_f_path) |
|
190 | 193 | return commit_id, f_path |
|
191 | 194 | |
|
192 | 195 | def _get_default_encoding(self, c): |
|
193 | 196 | enc_list = getattr(c, 'default_encodings', []) |
|
194 | 197 | return enc_list[0] if enc_list else 'UTF-8' |
|
195 | 198 | |
|
196 | 199 | def _get_commit_or_redirect(self, commit_id, redirect_after=True): |
|
197 | 200 | """ |
|
198 | 201 | This is a safe way to get commit. If an error occurs it redirects to |
|
199 | 202 | tip with proper message |
|
200 | 203 | |
|
201 | 204 | :param commit_id: id of commit to fetch |
|
202 | 205 | :param redirect_after: toggle redirection |
|
203 | 206 | """ |
|
204 | 207 | _ = self.request.translate |
|
205 | 208 | |
|
206 | 209 | try: |
|
207 | 210 | return self.rhodecode_vcs_repo.get_commit(commit_id) |
|
208 | 211 | except EmptyRepositoryError: |
|
209 | 212 | if not redirect_after: |
|
210 | 213 | return None |
|
211 | 214 | |
|
212 | 215 | add_new = upload_new = "" |
|
213 | 216 | if h.HasRepoPermissionAny( |
|
214 | 217 | 'repository.write', 'repository.admin')(self.db_repo_name): |
|
215 | 218 | _url = h.route_path( |
|
216 | 219 | 'repo_files_add_file', |
|
217 | 220 | repo_name=self.db_repo_name, commit_id=0, f_path='') |
|
218 | 221 | add_new = h.link_to( |
|
219 | 222 | _('add a new file'), _url, class_="alert-link") |
|
220 | 223 | |
|
221 | 224 | _url_upld = h.route_path( |
|
222 | 225 | 'repo_files_upload_file', |
|
223 | 226 | repo_name=self.db_repo_name, commit_id=0, f_path='') |
|
224 | 227 | upload_new = h.link_to( |
|
225 | 228 | _('upload a new file'), _url_upld, class_="alert-link") |
|
226 | 229 | |
|
227 | 230 | h.flash(h.literal( |
|
228 | 231 | _('There are no files yet. Click here to %s or %s.') % (add_new, upload_new)), category='warning') |
|
229 | 232 | raise HTTPFound( |
|
230 | 233 | h.route_path('repo_summary', repo_name=self.db_repo_name)) |
|
231 | 234 | |
|
232 | 235 | except (CommitDoesNotExistError, LookupError) as e: |
|
233 | 236 | msg = _('No such commit exists for this repository. Commit: {}').format(commit_id) |
|
234 | 237 | h.flash(msg, category='error') |
|
235 | 238 | raise HTTPNotFound() |
|
236 | 239 | except RepositoryError as e: |
|
237 | 240 | h.flash(h.escape(safe_str(e)), category='error') |
|
238 | 241 | raise HTTPNotFound() |
|
239 | 242 | |
|
240 | 243 | def _get_filenode_or_redirect(self, commit_obj, path, pre_load=None): |
|
241 | 244 | """ |
|
242 | 245 | Returns file_node, if error occurs or given path is directory, |
|
243 | 246 | it'll redirect to top level path |
|
244 | 247 | """ |
|
245 | 248 | _ = self.request.translate |
|
246 | 249 | |
|
247 | 250 | try: |
|
248 | 251 | file_node = commit_obj.get_node(path, pre_load=pre_load) |
|
249 | 252 | if file_node.is_dir(): |
|
250 | 253 | raise RepositoryError('The given path is a directory') |
|
251 | 254 | except CommitDoesNotExistError: |
|
252 | 255 | log.exception('No such commit exists for this repository') |
|
253 | 256 | h.flash(_('No such commit exists for this repository'), category='error') |
|
254 | 257 | raise HTTPNotFound() |
|
255 | 258 | except RepositoryError as e: |
|
256 | 259 | log.warning('Repository error while fetching filenode `%s`. Err:%s', path, e) |
|
257 | 260 | h.flash(h.escape(safe_str(e)), category='error') |
|
258 | 261 | raise HTTPNotFound() |
|
259 | 262 | |
|
260 | 263 | return file_node |
|
261 | 264 | |
|
262 | 265 | def _is_valid_head(self, commit_id, repo, landing_ref): |
|
263 | 266 | branch_name = sha_commit_id = '' |
|
264 | 267 | is_head = False |
|
265 | 268 | log.debug('Checking if commit_id `%s` is a head for %s.', commit_id, repo) |
|
266 | 269 | |
|
267 | 270 | for _branch_name, branch_commit_id in repo.branches.items(): |
|
268 | 271 | # simple case we pass in branch name, it's a HEAD |
|
269 | 272 | if commit_id == _branch_name: |
|
270 | 273 | is_head = True |
|
271 | 274 | branch_name = _branch_name |
|
272 | 275 | sha_commit_id = branch_commit_id |
|
273 | 276 | break |
|
274 | 277 | # case when we pass in full sha commit_id, which is a head |
|
275 | 278 | elif commit_id == branch_commit_id: |
|
276 | 279 | is_head = True |
|
277 | 280 | branch_name = _branch_name |
|
278 | 281 | sha_commit_id = branch_commit_id |
|
279 | 282 | break |
|
280 | 283 | |
|
281 | 284 | if h.is_svn(repo) and not repo.is_empty(): |
|
282 | 285 | # Note: Subversion only has one head. |
|
283 | 286 | if commit_id == repo.get_commit(commit_idx=-1).raw_id: |
|
284 | 287 | is_head = True |
|
285 | 288 | return branch_name, sha_commit_id, is_head |
|
286 | 289 | |
|
287 | 290 | # checked branches, means we only need to try to get the branch/commit_sha |
|
288 | 291 | if repo.is_empty(): |
|
289 | 292 | is_head = True |
|
290 | 293 | branch_name = landing_ref |
|
291 | 294 | sha_commit_id = EmptyCommit().raw_id |
|
292 | 295 | else: |
|
293 | 296 | commit = repo.get_commit(commit_id=commit_id) |
|
294 | 297 | if commit: |
|
295 | 298 | branch_name = commit.branch |
|
296 | 299 | sha_commit_id = commit.raw_id |
|
297 | 300 | |
|
298 | 301 | return branch_name, sha_commit_id, is_head |
|
299 | 302 | |
|
300 | 303 | def _get_tree_at_commit(self, c, commit_id, f_path, full_load=False, at_rev=None): |
|
301 | 304 | |
|
302 | 305 | repo_id = self.db_repo.repo_id |
|
303 | 306 | force_recache = self.get_recache_flag() |
|
304 | 307 | |
|
305 | 308 | cache_seconds = safe_int( |
|
306 | 309 | rhodecode.CONFIG.get('rc_cache.cache_repo.expiration_time')) |
|
307 | 310 | cache_on = not force_recache and cache_seconds > 0 |
|
308 | 311 | log.debug( |
|
309 | 312 | 'Computing FILE TREE for repo_id %s commit_id `%s` and path `%s`' |
|
310 | 313 | 'with caching: %s[TTL: %ss]' % ( |
|
311 | 314 | repo_id, commit_id, f_path, cache_on, cache_seconds or 0)) |
|
312 | 315 | |
|
313 | 316 | cache_namespace_uid = f'repo.{rc_cache.FILE_TREE_CACHE_VER}.{repo_id}' |
|
314 | 317 | region = rc_cache.get_or_create_region('cache_repo', cache_namespace_uid) |
|
315 | 318 | |
|
316 | 319 | @region.conditional_cache_on_arguments(namespace=cache_namespace_uid, condition=cache_on) |
|
317 | 320 | def compute_file_tree(_name_hash, _repo_id, _commit_id, _f_path, _full_load, _at_rev): |
|
318 | 321 | log.debug('Generating cached file tree at for repo_id: %s, %s, %s', |
|
319 | 322 | _repo_id, _commit_id, _f_path) |
|
320 | 323 | |
|
321 | 324 | c.full_load = _full_load |
|
322 | 325 | return render( |
|
323 | 326 | 'rhodecode:templates/files/files_browser_tree.mako', |
|
324 | 327 | self._get_template_context(c), self.request, _at_rev) |
|
325 | 328 | |
|
326 | 329 | return compute_file_tree( |
|
327 | 330 | self.db_repo.repo_name_hash, self.db_repo.repo_id, commit_id, f_path, full_load, at_rev) |
|
328 | 331 | |
|
329 | 332 | def create_pure_path(self, *parts): |
|
330 | 333 | # Split paths and sanitize them, removing any ../ etc |
|
331 | 334 | sanitized_path = [ |
|
332 | 335 | x for x in pathlib.PurePath(*parts).parts |
|
333 | 336 | if x not in ['.', '..']] |
|
334 | 337 | |
|
335 | 338 | pure_path = pathlib.PurePath(*sanitized_path) |
|
336 | 339 | return pure_path |
|
337 | 340 | |
|
338 | 341 | def _is_lf_enabled(self, target_repo): |
|
339 | 342 | lf_enabled = False |
|
340 | 343 | |
|
341 | 344 | lf_key_for_vcs_map = { |
|
342 | 345 | 'hg': 'extensions_largefiles', |
|
343 | 346 | 'git': 'vcs_git_lfs_enabled' |
|
344 | 347 | } |
|
345 | 348 | |
|
346 | 349 | lf_key_for_vcs = lf_key_for_vcs_map.get(target_repo.repo_type) |
|
347 | 350 | |
|
348 | 351 | if lf_key_for_vcs: |
|
349 | 352 | lf_enabled = self._get_repo_setting(target_repo, lf_key_for_vcs) |
|
350 | 353 | |
|
351 | 354 | return lf_enabled |
|
352 | 355 | |
|
353 | 356 | @LoginRequired() |
|
354 | 357 | @HasRepoPermissionAnyDecorator( |
|
355 | 358 | 'repository.read', 'repository.write', 'repository.admin') |
|
356 | 359 | def repo_archivefile(self): |
|
357 | 360 | # archive cache config |
|
358 | 361 | from rhodecode import CONFIG |
|
359 | 362 | _ = self.request.translate |
|
360 | 363 | self.load_default_context() |
|
361 | 364 | default_at_path = '/' |
|
362 | 365 | fname = self.request.matchdict['fname'] |
|
363 | 366 | subrepos = self.request.GET.get('subrepos') == 'true' |
|
364 | 367 | with_hash = str2bool(self.request.GET.get('with_hash', '1')) |
|
365 | 368 | at_path = self.request.GET.get('at_path') or default_at_path |
|
366 | 369 | |
|
367 | 370 | if not self.db_repo.enable_downloads: |
|
368 | 371 | return Response(_('Downloads disabled')) |
|
369 | 372 | |
|
370 | 373 | try: |
|
371 | 374 | commit_id, ext, fileformat, content_type = \ |
|
372 | 375 | _get_archive_spec(fname) |
|
373 | 376 | except ValueError: |
|
374 | 377 | return Response(_('Unknown archive type for: `{}`').format( |
|
375 | 378 | h.escape(fname))) |
|
376 | 379 | |
|
377 | 380 | try: |
|
378 | 381 | commit = self.rhodecode_vcs_repo.get_commit(commit_id) |
|
379 | 382 | except CommitDoesNotExistError: |
|
380 | 383 | return Response(_('Unknown commit_id {}').format( |
|
381 | 384 | h.escape(commit_id))) |
|
382 | 385 | except EmptyRepositoryError: |
|
383 | 386 | return Response(_('Empty repository')) |
|
384 | 387 | |
|
385 | 388 | # we used a ref, or a shorter version, lets redirect client ot use explicit hash |
|
386 | 389 | if commit_id != commit.raw_id: |
|
387 | 390 | fname=f'{commit.raw_id}{ext}' |
|
388 | 391 | raise HTTPFound(self.request.current_route_path(fname=fname)) |
|
389 | 392 | |
|
390 | 393 | try: |
|
391 | 394 | at_path = commit.get_node(at_path).path or default_at_path |
|
392 | 395 | except Exception: |
|
393 | 396 | return Response(_('No node at path {} for this repository').format(h.escape(at_path))) |
|
394 | 397 | |
|
395 | 398 | path_sha = get_path_sha(at_path) |
|
396 | 399 | |
|
397 | 400 | # used for cache etc, consistent unique archive name |
|
398 | 401 | archive_name_key = get_archive_name( |
|
399 | 402 | self.db_repo.repo_id, self.db_repo_name, commit_sha=commit.short_id, ext=ext, subrepos=subrepos, |
|
400 | 403 | path_sha=path_sha, with_hash=True) |
|
401 | 404 | |
|
402 | 405 | if not with_hash: |
|
403 | 406 | path_sha = '' |
|
404 | 407 | |
|
405 | 408 | # what end client gets served |
|
406 | 409 | response_archive_name = get_archive_name( |
|
407 | 410 | self.db_repo.repo_id, self.db_repo_name, commit_sha=commit.short_id, ext=ext, subrepos=subrepos, |
|
408 | 411 | path_sha=path_sha, with_hash=with_hash) |
|
409 | 412 | |
|
410 | 413 | # remove extension from our archive directory name |
|
411 | 414 | archive_dir_name = response_archive_name[:-len(ext)] |
|
412 | 415 | |
|
413 | 416 | archive_cache_disable = self.request.GET.get('no_cache') |
|
414 | 417 | |
|
415 | 418 | d_cache = get_archival_cache_store(config=CONFIG) |
|
416 | 419 | |
|
417 | 420 | # NOTE: we get the config to pass to a call to lazy-init the SAME type of cache on vcsserver |
|
418 | 421 | d_cache_conf = get_archival_config(config=CONFIG) |
|
419 | 422 | |
|
423 | # This is also a cache key, and lock key | |
|
420 | 424 | reentrant_lock_key = archive_name_key + '.lock' |
|
421 | with ReentrantLock(d_cache, reentrant_lock_key): | |
|
422 | # This is also a cache key | |
|
425 | ||
|
423 | 426 |
|
|
424 | 427 |
|
|
425 |
|
|
|
428 | reader, metadata = d_cache.fetch(archive_name_key) | |
|
429 | ||
|
426 | 430 |
|
|
427 | 431 |
|
|
428 |
|
|
|
432 | archive_name_key, metadata, reader.name) | |
|
429 | 433 |
|
|
430 | 434 |
|
|
431 | 435 |
|
|
432 | 436 | |
|
437 | if not reader: | |
|
433 | 438 | # generate new archive, as previous was not found in the cache |
|
434 |
|
|
|
435 | ||
|
439 | try: | |
|
440 | with d_cache.get_lock(reentrant_lock_key): | |
|
436 | 441 | try: |
|
437 | 442 | commit.archive_repo(archive_name_key, archive_dir_name=archive_dir_name, |
|
438 | 443 | kind=fileformat, subrepos=subrepos, |
|
439 | 444 | archive_at_path=at_path, cache_config=d_cache_conf) |
|
440 | 445 | except ImproperArchiveTypeError: |
|
441 | 446 | return _('Unknown archive type') |
|
442 | ||
|
443 | reader, tag = d_cache.get(archive_name_key, read=True, tag=True, retry=True) | |
|
444 | ||
|
445 | if not reader: | |
|
446 | raise ValueError('archive cache reader is empty, failed to fetch file from distributed archive cache') | |
|
447 | except ArchiveCacheLock: | |
|
448 | retry_after = round(random.uniform(0.3, 3.0), 1) | |
|
449 | time.sleep(retry_after) | |
|
447 | 450 | |
|
448 | def archive_iterator(_reader, block_size: int = 4096*512): | |
|
449 | # 4096 * 64 = 64KB | |
|
450 | while 1: | |
|
451 | data = _reader.read(block_size) | |
|
452 | if not data: | |
|
453 | break | |
|
454 | yield data | |
|
451 | location = self.request.url | |
|
452 | response = Response( | |
|
453 | f"archive {archive_name_key} generation in progress, Retry-After={retry_after}, Location={location}" | |
|
454 | ) | |
|
455 | response.headers["Retry-After"] = str(retry_after) | |
|
456 | response.status_code = 307 # temporary redirect | |
|
457 | ||
|
458 | response.location = location | |
|
459 | return response | |
|
460 | ||
|
461 | reader, metadata = d_cache.fetch(archive_name_key) | |
|
455 | 462 | |
|
456 | 463 | response = Response(app_iter=archive_iterator(reader)) |
|
457 | 464 | response.content_disposition = f'attachment; filename={response_archive_name}' |
|
458 | 465 | response.content_type = str(content_type) |
|
459 | 466 | |
|
460 | 467 | try: |
|
461 | 468 | return response |
|
462 | 469 | finally: |
|
463 | 470 | # store download action |
|
464 | 471 | audit_logger.store_web( |
|
465 | 472 | 'repo.archive.download', action_data={ |
|
466 | 473 | 'user_agent': self.request.user_agent, |
|
467 | 474 | 'archive_name': archive_name_key, |
|
468 | 475 | 'archive_spec': fname, |
|
469 | 476 | 'archive_cached': use_cached_archive}, |
|
470 | 477 | user=self._rhodecode_user, |
|
471 | 478 | repo=self.db_repo, |
|
472 | 479 | commit=True |
|
473 | 480 | ) |
|
474 | 481 | |
|
475 | 482 | def _get_file_node(self, commit_id, f_path): |
|
476 | 483 | if commit_id not in ['', None, 'None', '0' * 12, '0' * 40]: |
|
477 | 484 | commit = self.rhodecode_vcs_repo.get_commit(commit_id=commit_id) |
|
478 | 485 | try: |
|
479 | 486 | node = commit.get_node(f_path) |
|
480 | 487 | if node.is_dir(): |
|
481 | 488 | raise NodeError(f'{node} path is a {type(node)} not a file') |
|
482 | 489 | except NodeDoesNotExistError: |
|
483 | 490 | commit = EmptyCommit( |
|
484 | 491 | commit_id=commit_id, |
|
485 | 492 | idx=commit.idx, |
|
486 | 493 | repo=commit.repository, |
|
487 | 494 | alias=commit.repository.alias, |
|
488 | 495 | message=commit.message, |
|
489 | 496 | author=commit.author, |
|
490 | 497 | date=commit.date) |
|
491 | 498 | node = FileNode(safe_bytes(f_path), b'', commit=commit) |
|
492 | 499 | else: |
|
493 | 500 | commit = EmptyCommit( |
|
494 | 501 | repo=self.rhodecode_vcs_repo, |
|
495 | 502 | alias=self.rhodecode_vcs_repo.alias) |
|
496 | 503 | node = FileNode(safe_bytes(f_path), b'', commit=commit) |
|
497 | 504 | return node |
|
498 | 505 | |
|
499 | 506 | @LoginRequired() |
|
500 | 507 | @HasRepoPermissionAnyDecorator( |
|
501 | 508 | 'repository.read', 'repository.write', 'repository.admin') |
|
502 | 509 | def repo_files_diff(self): |
|
503 | 510 | c = self.load_default_context() |
|
504 | 511 | f_path = self._get_f_path(self.request.matchdict) |
|
505 | 512 | diff1 = self.request.GET.get('diff1', '') |
|
506 | 513 | diff2 = self.request.GET.get('diff2', '') |
|
507 | 514 | |
|
508 | 515 | path1, diff1 = parse_path_ref(diff1, default_path=f_path) |
|
509 | 516 | |
|
510 | 517 | ignore_whitespace = str2bool(self.request.GET.get('ignorews')) |
|
511 | 518 | line_context = self.request.GET.get('context', 3) |
|
512 | 519 | |
|
513 | 520 | if not any((diff1, diff2)): |
|
514 | 521 | h.flash( |
|
515 | 522 | 'Need query parameter "diff1" or "diff2" to generate a diff.', |
|
516 | 523 | category='error') |
|
517 | 524 | raise HTTPBadRequest() |
|
518 | 525 | |
|
519 | 526 | c.action = self.request.GET.get('diff') |
|
520 | 527 | if c.action not in ['download', 'raw']: |
|
521 | 528 | compare_url = h.route_path( |
|
522 | 529 | 'repo_compare', |
|
523 | 530 | repo_name=self.db_repo_name, |
|
524 | 531 | source_ref_type='rev', |
|
525 | 532 | source_ref=diff1, |
|
526 | 533 | target_repo=self.db_repo_name, |
|
527 | 534 | target_ref_type='rev', |
|
528 | 535 | target_ref=diff2, |
|
529 | 536 | _query=dict(f_path=f_path)) |
|
530 | 537 | # redirect to new view if we render diff |
|
531 | 538 | raise HTTPFound(compare_url) |
|
532 | 539 | |
|
533 | 540 | try: |
|
534 | 541 | node1 = self._get_file_node(diff1, path1) |
|
535 | 542 | node2 = self._get_file_node(diff2, f_path) |
|
536 | 543 | except (RepositoryError, NodeError): |
|
537 | 544 | log.exception("Exception while trying to get node from repository") |
|
538 | 545 | raise HTTPFound( |
|
539 | 546 | h.route_path('repo_files', repo_name=self.db_repo_name, |
|
540 | 547 | commit_id='tip', f_path=f_path)) |
|
541 | 548 | |
|
542 | 549 | if all(isinstance(node.commit, EmptyCommit) |
|
543 | 550 | for node in (node1, node2)): |
|
544 | 551 | raise HTTPNotFound() |
|
545 | 552 | |
|
546 | 553 | c.commit_1 = node1.commit |
|
547 | 554 | c.commit_2 = node2.commit |
|
548 | 555 | |
|
549 | 556 | if c.action == 'download': |
|
550 | 557 | _diff = diffs.get_gitdiff(node1, node2, |
|
551 | 558 | ignore_whitespace=ignore_whitespace, |
|
552 | 559 | context=line_context) |
|
553 | 560 | # NOTE: this was using diff_format='gitdiff' |
|
554 | 561 | diff = diffs.DiffProcessor(_diff, diff_format='newdiff') |
|
555 | 562 | |
|
556 | 563 | response = Response(self.path_filter.get_raw_patch(diff)) |
|
557 | 564 | response.content_type = 'text/plain' |
|
558 | 565 | response.content_disposition = ( |
|
559 | 566 | f'attachment; filename={f_path}_{diff1}_vs_{diff2}.diff' |
|
560 | 567 | ) |
|
561 | 568 | charset = self._get_default_encoding(c) |
|
562 | 569 | if charset: |
|
563 | 570 | response.charset = charset |
|
564 | 571 | return response |
|
565 | 572 | |
|
566 | 573 | elif c.action == 'raw': |
|
567 | 574 | _diff = diffs.get_gitdiff(node1, node2, |
|
568 | 575 | ignore_whitespace=ignore_whitespace, |
|
569 | 576 | context=line_context) |
|
570 | 577 | # NOTE: this was using diff_format='gitdiff' |
|
571 | 578 | diff = diffs.DiffProcessor(_diff, diff_format='newdiff') |
|
572 | 579 | |
|
573 | 580 | response = Response(self.path_filter.get_raw_patch(diff)) |
|
574 | 581 | response.content_type = 'text/plain' |
|
575 | 582 | charset = self._get_default_encoding(c) |
|
576 | 583 | if charset: |
|
577 | 584 | response.charset = charset |
|
578 | 585 | return response |
|
579 | 586 | |
|
580 | 587 | # in case we ever end up here |
|
581 | 588 | raise HTTPNotFound() |
|
582 | 589 | |
|
583 | 590 | @LoginRequired() |
|
584 | 591 | @HasRepoPermissionAnyDecorator( |
|
585 | 592 | 'repository.read', 'repository.write', 'repository.admin') |
|
586 | 593 | def repo_files_diff_2way_redirect(self): |
|
587 | 594 | """ |
|
588 | 595 | Kept only to make OLD links work |
|
589 | 596 | """ |
|
590 | 597 | f_path = self._get_f_path_unchecked(self.request.matchdict) |
|
591 | 598 | diff1 = self.request.GET.get('diff1', '') |
|
592 | 599 | diff2 = self.request.GET.get('diff2', '') |
|
593 | 600 | |
|
594 | 601 | if not any((diff1, diff2)): |
|
595 | 602 | h.flash( |
|
596 | 603 | 'Need query parameter "diff1" or "diff2" to generate a diff.', |
|
597 | 604 | category='error') |
|
598 | 605 | raise HTTPBadRequest() |
|
599 | 606 | |
|
600 | 607 | compare_url = h.route_path( |
|
601 | 608 | 'repo_compare', |
|
602 | 609 | repo_name=self.db_repo_name, |
|
603 | 610 | source_ref_type='rev', |
|
604 | 611 | source_ref=diff1, |
|
605 | 612 | target_ref_type='rev', |
|
606 | 613 | target_ref=diff2, |
|
607 | 614 | _query=dict(f_path=f_path, diffmode='sideside', |
|
608 | 615 | target_repo=self.db_repo_name,)) |
|
609 | 616 | raise HTTPFound(compare_url) |
|
610 | 617 | |
|
611 | 618 | @LoginRequired() |
|
612 | 619 | def repo_files_default_commit_redirect(self): |
|
613 | 620 | """ |
|
614 | 621 | Special page that redirects to the landing page of files based on the default |
|
615 | 622 | commit for repository |
|
616 | 623 | """ |
|
617 | 624 | c = self.load_default_context() |
|
618 | 625 | ref_name = c.rhodecode_db_repo.landing_ref_name |
|
619 | 626 | landing_url = h.repo_files_by_ref_url( |
|
620 | 627 | c.rhodecode_db_repo.repo_name, |
|
621 | 628 | c.rhodecode_db_repo.repo_type, |
|
622 | 629 | f_path='', |
|
623 | 630 | ref_name=ref_name, |
|
624 | 631 | commit_id='tip', |
|
625 | 632 | query=dict(at=ref_name) |
|
626 | 633 | ) |
|
627 | 634 | |
|
628 | 635 | raise HTTPFound(landing_url) |
|
629 | 636 | |
|
630 | 637 | @LoginRequired() |
|
631 | 638 | @HasRepoPermissionAnyDecorator( |
|
632 | 639 | 'repository.read', 'repository.write', 'repository.admin') |
|
633 | 640 | def repo_files(self): |
|
634 | 641 | c = self.load_default_context() |
|
635 | 642 | |
|
636 | 643 | view_name = getattr(self.request.matched_route, 'name', None) |
|
637 | 644 | |
|
638 | 645 | c.annotate = view_name == 'repo_files:annotated' |
|
639 | 646 | # default is false, but .rst/.md files later are auto rendered, we can |
|
640 | 647 | # overwrite auto rendering by setting this GET flag |
|
641 | 648 | c.renderer = view_name == 'repo_files:rendered' or not self.request.GET.get('no-render', False) |
|
642 | 649 | |
|
643 | 650 | commit_id, f_path = self._get_commit_and_path() |
|
644 | 651 | |
|
645 | 652 | c.commit = self._get_commit_or_redirect(commit_id) |
|
646 | 653 | c.branch = self.request.GET.get('branch', None) |
|
647 | 654 | c.f_path = f_path |
|
648 | 655 | at_rev = self.request.GET.get('at') |
|
649 | 656 | |
|
650 | 657 | # files or dirs |
|
651 | 658 | try: |
|
652 | 659 | c.file = c.commit.get_node(f_path, pre_load=['is_binary', 'size', 'data']) |
|
653 | 660 | |
|
654 | 661 | c.file_author = True |
|
655 | 662 | c.file_tree = '' |
|
656 | 663 | |
|
657 | 664 | # prev link |
|
658 | 665 | try: |
|
659 | 666 | prev_commit = c.commit.prev(c.branch) |
|
660 | 667 | c.prev_commit = prev_commit |
|
661 | 668 | c.url_prev = h.route_path( |
|
662 | 669 | 'repo_files', repo_name=self.db_repo_name, |
|
663 | 670 | commit_id=prev_commit.raw_id, f_path=f_path) |
|
664 | 671 | if c.branch: |
|
665 | 672 | c.url_prev += '?branch=%s' % c.branch |
|
666 | 673 | except (CommitDoesNotExistError, VCSError): |
|
667 | 674 | c.url_prev = '#' |
|
668 | 675 | c.prev_commit = EmptyCommit() |
|
669 | 676 | |
|
670 | 677 | # next link |
|
671 | 678 | try: |
|
672 | 679 | next_commit = c.commit.next(c.branch) |
|
673 | 680 | c.next_commit = next_commit |
|
674 | 681 | c.url_next = h.route_path( |
|
675 | 682 | 'repo_files', repo_name=self.db_repo_name, |
|
676 | 683 | commit_id=next_commit.raw_id, f_path=f_path) |
|
677 | 684 | if c.branch: |
|
678 | 685 | c.url_next += '?branch=%s' % c.branch |
|
679 | 686 | except (CommitDoesNotExistError, VCSError): |
|
680 | 687 | c.url_next = '#' |
|
681 | 688 | c.next_commit = EmptyCommit() |
|
682 | 689 | |
|
683 | 690 | # load file content |
|
684 | 691 | if c.file.is_file(): |
|
685 | 692 | |
|
686 | 693 | c.lf_node = {} |
|
687 | 694 | |
|
688 | 695 | has_lf_enabled = self._is_lf_enabled(self.db_repo) |
|
689 | 696 | if has_lf_enabled: |
|
690 | 697 | c.lf_node = c.file.get_largefile_node() |
|
691 | 698 | |
|
692 | 699 | c.file_source_page = 'true' |
|
693 | 700 | c.file_last_commit = c.file.last_commit |
|
694 | 701 | |
|
695 | 702 | c.file_size_too_big = c.file.size > c.visual.cut_off_limit_file |
|
696 | 703 | |
|
697 | 704 | if not (c.file_size_too_big or c.file.is_binary): |
|
698 | 705 | if c.annotate: # annotation has precedence over renderer |
|
699 | 706 | c.annotated_lines = filenode_as_annotated_lines_tokens( |
|
700 | 707 | c.file |
|
701 | 708 | ) |
|
702 | 709 | else: |
|
703 | 710 | c.renderer = ( |
|
704 | 711 | c.renderer and h.renderer_from_filename(c.file.path) |
|
705 | 712 | ) |
|
706 | 713 | if not c.renderer: |
|
707 | 714 | c.lines = filenode_as_lines_tokens(c.file) |
|
708 | 715 | |
|
709 | 716 | _branch_name, _sha_commit_id, is_head = \ |
|
710 | 717 | self._is_valid_head(commit_id, self.rhodecode_vcs_repo, |
|
711 | 718 | landing_ref=self.db_repo.landing_ref_name) |
|
712 | 719 | c.on_branch_head = is_head |
|
713 | 720 | |
|
714 | 721 | branch = c.commit.branch if ( |
|
715 | 722 | c.commit.branch and '/' not in c.commit.branch) else None |
|
716 | 723 | c.branch_or_raw_id = branch or c.commit.raw_id |
|
717 | 724 | c.branch_name = c.commit.branch or h.short_id(c.commit.raw_id) |
|
718 | 725 | |
|
719 | 726 | author = c.file_last_commit.author |
|
720 | 727 | c.authors = [[ |
|
721 | 728 | h.email(author), |
|
722 | 729 | h.person(author, 'username_or_name_or_email'), |
|
723 | 730 | 1 |
|
724 | 731 | ]] |
|
725 | 732 | |
|
726 | 733 | else: # load tree content at path |
|
727 | 734 | c.file_source_page = 'false' |
|
728 | 735 | c.authors = [] |
|
729 | 736 | # this loads a simple tree without metadata to speed things up |
|
730 | 737 | # later via ajax we call repo_nodetree_full and fetch whole |
|
731 | 738 | c.file_tree = self._get_tree_at_commit(c, c.commit.raw_id, f_path, at_rev=at_rev) |
|
732 | 739 | |
|
733 | 740 | c.readme_data, c.readme_file = \ |
|
734 | 741 | self._get_readme_data(self.db_repo, c.visual.default_renderer, |
|
735 | 742 | c.commit.raw_id, f_path) |
|
736 | 743 | |
|
737 | 744 | except RepositoryError as e: |
|
738 | 745 | h.flash(h.escape(safe_str(e)), category='error') |
|
739 | 746 | raise HTTPNotFound() |
|
740 | 747 | |
|
741 | 748 | if self.request.environ.get('HTTP_X_PJAX'): |
|
742 | 749 | html = render('rhodecode:templates/files/files_pjax.mako', |
|
743 | 750 | self._get_template_context(c), self.request) |
|
744 | 751 | else: |
|
745 | 752 | html = render('rhodecode:templates/files/files.mako', |
|
746 | 753 | self._get_template_context(c), self.request) |
|
747 | 754 | return Response(html) |
|
748 | 755 | |
|
749 | 756 | @HasRepoPermissionAnyDecorator( |
|
750 | 757 | 'repository.read', 'repository.write', 'repository.admin') |
|
751 | 758 | def repo_files_annotated_previous(self): |
|
752 | 759 | self.load_default_context() |
|
753 | 760 | |
|
754 | 761 | commit_id, f_path = self._get_commit_and_path() |
|
755 | 762 | commit = self._get_commit_or_redirect(commit_id) |
|
756 | 763 | prev_commit_id = commit.raw_id |
|
757 | 764 | line_anchor = self.request.GET.get('line_anchor') |
|
758 | 765 | is_file = False |
|
759 | 766 | try: |
|
760 | 767 | _file = commit.get_node(f_path) |
|
761 | 768 | is_file = _file.is_file() |
|
762 | 769 | except (NodeDoesNotExistError, CommitDoesNotExistError, VCSError): |
|
763 | 770 | pass |
|
764 | 771 | |
|
765 | 772 | if is_file: |
|
766 | 773 | history = commit.get_path_history(f_path) |
|
767 | 774 | prev_commit_id = history[1].raw_id \ |
|
768 | 775 | if len(history) > 1 else prev_commit_id |
|
769 | 776 | prev_url = h.route_path( |
|
770 | 777 | 'repo_files:annotated', repo_name=self.db_repo_name, |
|
771 | 778 | commit_id=prev_commit_id, f_path=f_path, |
|
772 | 779 | _anchor=f'L{line_anchor}') |
|
773 | 780 | |
|
774 | 781 | raise HTTPFound(prev_url) |
|
775 | 782 | |
|
776 | 783 | @LoginRequired() |
|
777 | 784 | @HasRepoPermissionAnyDecorator( |
|
778 | 785 | 'repository.read', 'repository.write', 'repository.admin') |
|
779 | 786 | def repo_nodetree_full(self): |
|
780 | 787 | """ |
|
781 | 788 | Returns rendered html of file tree that contains commit date, |
|
782 | 789 | author, commit_id for the specified combination of |
|
783 | 790 | repo, commit_id and file path |
|
784 | 791 | """ |
|
785 | 792 | c = self.load_default_context() |
|
786 | 793 | |
|
787 | 794 | commit_id, f_path = self._get_commit_and_path() |
|
788 | 795 | commit = self._get_commit_or_redirect(commit_id) |
|
789 | 796 | try: |
|
790 | 797 | dir_node = commit.get_node(f_path) |
|
791 | 798 | except RepositoryError as e: |
|
792 | 799 | return Response(f'error: {h.escape(safe_str(e))}') |
|
793 | 800 | |
|
794 | 801 | if dir_node.is_file(): |
|
795 | 802 | return Response('') |
|
796 | 803 | |
|
797 | 804 | c.file = dir_node |
|
798 | 805 | c.commit = commit |
|
799 | 806 | at_rev = self.request.GET.get('at') |
|
800 | 807 | |
|
801 | 808 | html = self._get_tree_at_commit( |
|
802 | 809 | c, commit.raw_id, dir_node.path, full_load=True, at_rev=at_rev) |
|
803 | 810 | |
|
804 | 811 | return Response(html) |
|
805 | 812 | |
|
806 | 813 | def _get_attachement_headers(self, f_path): |
|
807 | 814 | f_name = safe_str(f_path.split(Repository.NAME_SEP)[-1]) |
|
808 | 815 | safe_path = f_name.replace('"', '\\"') |
|
809 | 816 | encoded_path = urllib.parse.quote(f_name) |
|
810 | 817 | |
|
811 | 818 | headers = "attachment; " \ |
|
812 | 819 | "filename=\"{}\"; " \ |
|
813 | 820 | "filename*=UTF-8\'\'{}".format(safe_path, encoded_path) |
|
814 | 821 | |
|
815 | 822 | return safe_bytes(headers).decode('latin-1', errors='replace') |
|
816 | 823 | |
|
817 | 824 | @LoginRequired() |
|
818 | 825 | @HasRepoPermissionAnyDecorator( |
|
819 | 826 | 'repository.read', 'repository.write', 'repository.admin') |
|
820 | 827 | def repo_file_raw(self): |
|
821 | 828 | """ |
|
822 | 829 | Action for show as raw, some mimetypes are "rendered", |
|
823 | 830 | those include images, icons. |
|
824 | 831 | """ |
|
825 | 832 | c = self.load_default_context() |
|
826 | 833 | |
|
827 | 834 | commit_id, f_path = self._get_commit_and_path() |
|
828 | 835 | commit = self._get_commit_or_redirect(commit_id) |
|
829 | 836 | file_node = self._get_filenode_or_redirect(commit, f_path) |
|
830 | 837 | |
|
831 | 838 | raw_mimetype_mapping = { |
|
832 | 839 | # map original mimetype to a mimetype used for "show as raw" |
|
833 | 840 | # you can also provide a content-disposition to override the |
|
834 | 841 | # default "attachment" disposition. |
|
835 | 842 | # orig_type: (new_type, new_dispo) |
|
836 | 843 | |
|
837 | 844 | # show images inline: |
|
838 | 845 | # Do not re-add SVG: it is unsafe and permits XSS attacks. One can |
|
839 | 846 | # for example render an SVG with javascript inside or even render |
|
840 | 847 | # HTML. |
|
841 | 848 | 'image/x-icon': ('image/x-icon', 'inline'), |
|
842 | 849 | 'image/png': ('image/png', 'inline'), |
|
843 | 850 | 'image/gif': ('image/gif', 'inline'), |
|
844 | 851 | 'image/jpeg': ('image/jpeg', 'inline'), |
|
845 | 852 | 'application/pdf': ('application/pdf', 'inline'), |
|
846 | 853 | } |
|
847 | 854 | |
|
848 | 855 | mimetype = file_node.mimetype |
|
849 | 856 | try: |
|
850 | 857 | mimetype, disposition = raw_mimetype_mapping[mimetype] |
|
851 | 858 | except KeyError: |
|
852 | 859 | # we don't know anything special about this, handle it safely |
|
853 | 860 | if file_node.is_binary: |
|
854 | 861 | # do same as download raw for binary files |
|
855 | 862 | mimetype, disposition = 'application/octet-stream', 'attachment' |
|
856 | 863 | else: |
|
857 | 864 | # do not just use the original mimetype, but force text/plain, |
|
858 | 865 | # otherwise it would serve text/html and that might be unsafe. |
|
859 | 866 | # Note: underlying vcs library fakes text/plain mimetype if the |
|
860 | 867 | # mimetype can not be determined and it thinks it is not |
|
861 | 868 | # binary.This might lead to erroneous text display in some |
|
862 | 869 | # cases, but helps in other cases, like with text files |
|
863 | 870 | # without extension. |
|
864 | 871 | mimetype, disposition = 'text/plain', 'inline' |
|
865 | 872 | |
|
866 | 873 | if disposition == 'attachment': |
|
867 | 874 | disposition = self._get_attachement_headers(f_path) |
|
868 | 875 | |
|
869 | 876 | stream_content = file_node.stream_bytes() |
|
870 | 877 | |
|
871 | 878 | response = Response(app_iter=stream_content) |
|
872 | 879 | response.content_disposition = disposition |
|
873 | 880 | response.content_type = mimetype |
|
874 | 881 | |
|
875 | 882 | charset = self._get_default_encoding(c) |
|
876 | 883 | if charset: |
|
877 | 884 | response.charset = charset |
|
878 | 885 | |
|
879 | 886 | return response |
|
880 | 887 | |
|
881 | 888 | @LoginRequired() |
|
882 | 889 | @HasRepoPermissionAnyDecorator( |
|
883 | 890 | 'repository.read', 'repository.write', 'repository.admin') |
|
884 | 891 | def repo_file_download(self): |
|
885 | 892 | c = self.load_default_context() |
|
886 | 893 | |
|
887 | 894 | commit_id, f_path = self._get_commit_and_path() |
|
888 | 895 | commit = self._get_commit_or_redirect(commit_id) |
|
889 | 896 | file_node = self._get_filenode_or_redirect(commit, f_path) |
|
890 | 897 | |
|
891 | 898 | if self.request.GET.get('lf'): |
|
892 | 899 | # only if lf get flag is passed, we download this file |
|
893 | 900 | # as LFS/Largefile |
|
894 | 901 | lf_node = file_node.get_largefile_node() |
|
895 | 902 | if lf_node: |
|
896 | 903 | # overwrite our pointer with the REAL large-file |
|
897 | 904 | file_node = lf_node |
|
898 | 905 | |
|
899 | 906 | disposition = self._get_attachement_headers(f_path) |
|
900 | 907 | |
|
901 | 908 | stream_content = file_node.stream_bytes() |
|
902 | 909 | |
|
903 | 910 | response = Response(app_iter=stream_content) |
|
904 | 911 | response.content_disposition = disposition |
|
905 | 912 | response.content_type = file_node.mimetype |
|
906 | 913 | |
|
907 | 914 | charset = self._get_default_encoding(c) |
|
908 | 915 | if charset: |
|
909 | 916 | response.charset = charset |
|
910 | 917 | |
|
911 | 918 | return response |
|
912 | 919 | |
|
913 | 920 | def _get_nodelist_at_commit(self, repo_name, repo_id, commit_id, f_path): |
|
914 | 921 | |
|
915 | 922 | cache_seconds = safe_int( |
|
916 | 923 | rhodecode.CONFIG.get('rc_cache.cache_repo.expiration_time')) |
|
917 | 924 | cache_on = cache_seconds > 0 |
|
918 | 925 | log.debug( |
|
919 | 926 | 'Computing FILE SEARCH for repo_id %s commit_id `%s` and path `%s`' |
|
920 | 927 | 'with caching: %s[TTL: %ss]' % ( |
|
921 | 928 | repo_id, commit_id, f_path, cache_on, cache_seconds or 0)) |
|
922 | 929 | |
|
923 | 930 | cache_namespace_uid = f'repo.{repo_id}' |
|
924 | 931 | region = rc_cache.get_or_create_region('cache_repo', cache_namespace_uid) |
|
925 | 932 | |
|
926 | 933 | @region.conditional_cache_on_arguments(namespace=cache_namespace_uid, condition=cache_on) |
|
927 | 934 | def compute_file_search(_name_hash, _repo_id, _commit_id, _f_path): |
|
928 | 935 | log.debug('Generating cached nodelist for repo_id:%s, %s, %s', |
|
929 | 936 | _repo_id, commit_id, f_path) |
|
930 | 937 | try: |
|
931 | 938 | _d, _f = ScmModel().get_quick_filter_nodes(repo_name, _commit_id, _f_path) |
|
932 | 939 | except (RepositoryError, CommitDoesNotExistError, Exception) as e: |
|
933 | 940 | log.exception(safe_str(e)) |
|
934 | 941 | h.flash(h.escape(safe_str(e)), category='error') |
|
935 | 942 | raise HTTPFound(h.route_path( |
|
936 | 943 | 'repo_files', repo_name=self.db_repo_name, |
|
937 | 944 | commit_id='tip', f_path='/')) |
|
938 | 945 | |
|
939 | 946 | return _d + _f |
|
940 | 947 | |
|
941 | 948 | result = compute_file_search(self.db_repo.repo_name_hash, self.db_repo.repo_id, |
|
942 | 949 | commit_id, f_path) |
|
943 | 950 | return filter(lambda n: self.path_filter.path_access_allowed(n['name']), result) |
|
944 | 951 | |
|
945 | 952 | @LoginRequired() |
|
946 | 953 | @HasRepoPermissionAnyDecorator( |
|
947 | 954 | 'repository.read', 'repository.write', 'repository.admin') |
|
948 | 955 | def repo_nodelist(self): |
|
949 | 956 | self.load_default_context() |
|
950 | 957 | |
|
951 | 958 | commit_id, f_path = self._get_commit_and_path() |
|
952 | 959 | commit = self._get_commit_or_redirect(commit_id) |
|
953 | 960 | |
|
954 | 961 | metadata = self._get_nodelist_at_commit( |
|
955 | 962 | self.db_repo_name, self.db_repo.repo_id, commit.raw_id, f_path) |
|
956 | 963 | return {'nodes': [x for x in metadata]} |
|
957 | 964 | |
|
958 | 965 | def _create_references(self, branches_or_tags, symbolic_reference, f_path, ref_type): |
|
959 | 966 | items = [] |
|
960 | 967 | for name, commit_id in branches_or_tags.items(): |
|
961 | 968 | sym_ref = symbolic_reference(commit_id, name, f_path, ref_type) |
|
962 | 969 | items.append((sym_ref, name, ref_type)) |
|
963 | 970 | return items |
|
964 | 971 | |
|
965 | 972 | def _symbolic_reference(self, commit_id, name, f_path, ref_type): |
|
966 | 973 | return commit_id |
|
967 | 974 | |
|
968 | 975 | def _symbolic_reference_svn(self, commit_id, name, f_path, ref_type): |
|
969 | 976 | return commit_id |
|
970 | 977 | |
|
971 | 978 | # NOTE(dan): old code we used in "diff" mode compare |
|
972 | 979 | new_f_path = vcspath.join(name, f_path) |
|
973 | 980 | return f'{new_f_path}@{commit_id}' |
|
974 | 981 | |
|
975 | 982 | def _get_node_history(self, commit_obj, f_path, commits=None): |
|
976 | 983 | """ |
|
977 | 984 | get commit history for given node |
|
978 | 985 | |
|
979 | 986 | :param commit_obj: commit to calculate history |
|
980 | 987 | :param f_path: path for node to calculate history for |
|
981 | 988 | :param commits: if passed don't calculate history and take |
|
982 | 989 | commits defined in this list |
|
983 | 990 | """ |
|
984 | 991 | _ = self.request.translate |
|
985 | 992 | |
|
986 | 993 | # calculate history based on tip |
|
987 | 994 | tip = self.rhodecode_vcs_repo.get_commit() |
|
988 | 995 | if commits is None: |
|
989 | 996 | pre_load = ["author", "branch"] |
|
990 | 997 | try: |
|
991 | 998 | commits = tip.get_path_history(f_path, pre_load=pre_load) |
|
992 | 999 | except (NodeDoesNotExistError, CommitError): |
|
993 | 1000 | # this node is not present at tip! |
|
994 | 1001 | commits = commit_obj.get_path_history(f_path, pre_load=pre_load) |
|
995 | 1002 | |
|
996 | 1003 | history = [] |
|
997 | 1004 | commits_group = ([], _("Changesets")) |
|
998 | 1005 | for commit in commits: |
|
999 | 1006 | branch = ' (%s)' % commit.branch if commit.branch else '' |
|
1000 | 1007 | n_desc = f'r{commit.idx}:{commit.short_id}{branch}' |
|
1001 | 1008 | commits_group[0].append((commit.raw_id, n_desc, 'sha')) |
|
1002 | 1009 | history.append(commits_group) |
|
1003 | 1010 | |
|
1004 | 1011 | symbolic_reference = self._symbolic_reference |
|
1005 | 1012 | |
|
1006 | 1013 | if self.rhodecode_vcs_repo.alias == 'svn': |
|
1007 | 1014 | adjusted_f_path = RepoFilesView.adjust_file_path_for_svn( |
|
1008 | 1015 | f_path, self.rhodecode_vcs_repo) |
|
1009 | 1016 | if adjusted_f_path != f_path: |
|
1010 | 1017 | log.debug( |
|
1011 | 1018 | 'Recognized svn tag or branch in file "%s", using svn ' |
|
1012 | 1019 | 'specific symbolic references', f_path) |
|
1013 | 1020 | f_path = adjusted_f_path |
|
1014 | 1021 | symbolic_reference = self._symbolic_reference_svn |
|
1015 | 1022 | |
|
1016 | 1023 | branches = self._create_references( |
|
1017 | 1024 | self.rhodecode_vcs_repo.branches, symbolic_reference, f_path, 'branch') |
|
1018 | 1025 | branches_group = (branches, _("Branches")) |
|
1019 | 1026 | |
|
1020 | 1027 | tags = self._create_references( |
|
1021 | 1028 | self.rhodecode_vcs_repo.tags, symbolic_reference, f_path, 'tag') |
|
1022 | 1029 | tags_group = (tags, _("Tags")) |
|
1023 | 1030 | |
|
1024 | 1031 | history.append(branches_group) |
|
1025 | 1032 | history.append(tags_group) |
|
1026 | 1033 | |
|
1027 | 1034 | return history, commits |
|
1028 | 1035 | |
|
1029 | 1036 | @LoginRequired() |
|
1030 | 1037 | @HasRepoPermissionAnyDecorator( |
|
1031 | 1038 | 'repository.read', 'repository.write', 'repository.admin') |
|
1032 | 1039 | def repo_file_history(self): |
|
1033 | 1040 | self.load_default_context() |
|
1034 | 1041 | |
|
1035 | 1042 | commit_id, f_path = self._get_commit_and_path() |
|
1036 | 1043 | commit = self._get_commit_or_redirect(commit_id) |
|
1037 | 1044 | file_node = self._get_filenode_or_redirect(commit, f_path) |
|
1038 | 1045 | |
|
1039 | 1046 | if file_node.is_file(): |
|
1040 | 1047 | file_history, _hist = self._get_node_history(commit, f_path) |
|
1041 | 1048 | |
|
1042 | 1049 | res = [] |
|
1043 | 1050 | for section_items, section in file_history: |
|
1044 | 1051 | items = [] |
|
1045 | 1052 | for obj_id, obj_text, obj_type in section_items: |
|
1046 | 1053 | at_rev = '' |
|
1047 | 1054 | if obj_type in ['branch', 'bookmark', 'tag']: |
|
1048 | 1055 | at_rev = obj_text |
|
1049 | 1056 | entry = { |
|
1050 | 1057 | 'id': obj_id, |
|
1051 | 1058 | 'text': obj_text, |
|
1052 | 1059 | 'type': obj_type, |
|
1053 | 1060 | 'at_rev': at_rev |
|
1054 | 1061 | } |
|
1055 | 1062 | |
|
1056 | 1063 | items.append(entry) |
|
1057 | 1064 | |
|
1058 | 1065 | res.append({ |
|
1059 | 1066 | 'text': section, |
|
1060 | 1067 | 'children': items |
|
1061 | 1068 | }) |
|
1062 | 1069 | |
|
1063 | 1070 | data = { |
|
1064 | 1071 | 'more': False, |
|
1065 | 1072 | 'results': res |
|
1066 | 1073 | } |
|
1067 | 1074 | return data |
|
1068 | 1075 | |
|
1069 | 1076 | log.warning('Cannot fetch history for directory') |
|
1070 | 1077 | raise HTTPBadRequest() |
|
1071 | 1078 | |
|
1072 | 1079 | @LoginRequired() |
|
1073 | 1080 | @HasRepoPermissionAnyDecorator( |
|
1074 | 1081 | 'repository.read', 'repository.write', 'repository.admin') |
|
1075 | 1082 | def repo_file_authors(self): |
|
1076 | 1083 | c = self.load_default_context() |
|
1077 | 1084 | |
|
1078 | 1085 | commit_id, f_path = self._get_commit_and_path() |
|
1079 | 1086 | commit = self._get_commit_or_redirect(commit_id) |
|
1080 | 1087 | file_node = self._get_filenode_or_redirect(commit, f_path) |
|
1081 | 1088 | |
|
1082 | 1089 | if not file_node.is_file(): |
|
1083 | 1090 | raise HTTPBadRequest() |
|
1084 | 1091 | |
|
1085 | 1092 | c.file_last_commit = file_node.last_commit |
|
1086 | 1093 | if self.request.GET.get('annotate') == '1': |
|
1087 | 1094 | # use _hist from annotation if annotation mode is on |
|
1088 | 1095 | commit_ids = {x[1] for x in file_node.annotate} |
|
1089 | 1096 | _hist = ( |
|
1090 | 1097 | self.rhodecode_vcs_repo.get_commit(commit_id) |
|
1091 | 1098 | for commit_id in commit_ids) |
|
1092 | 1099 | else: |
|
1093 | 1100 | _f_history, _hist = self._get_node_history(commit, f_path) |
|
1094 | 1101 | c.file_author = False |
|
1095 | 1102 | |
|
1096 | 1103 | unique = collections.OrderedDict() |
|
1097 | 1104 | for commit in _hist: |
|
1098 | 1105 | author = commit.author |
|
1099 | 1106 | if author not in unique: |
|
1100 | 1107 | unique[commit.author] = [ |
|
1101 | 1108 | h.email(author), |
|
1102 | 1109 | h.person(author, 'username_or_name_or_email'), |
|
1103 | 1110 | 1 # counter |
|
1104 | 1111 | ] |
|
1105 | 1112 | |
|
1106 | 1113 | else: |
|
1107 | 1114 | # increase counter |
|
1108 | 1115 | unique[commit.author][2] += 1 |
|
1109 | 1116 | |
|
1110 | 1117 | c.authors = [val for val in unique.values()] |
|
1111 | 1118 | |
|
1112 | 1119 | return self._get_template_context(c) |
|
1113 | 1120 | |
|
1114 | 1121 | @LoginRequired() |
|
1115 | 1122 | @HasRepoPermissionAnyDecorator('repository.write', 'repository.admin') |
|
1116 | 1123 | def repo_files_check_head(self): |
|
1117 | 1124 | self.load_default_context() |
|
1118 | 1125 | |
|
1119 | 1126 | commit_id, f_path = self._get_commit_and_path() |
|
1120 | 1127 | _branch_name, _sha_commit_id, is_head = \ |
|
1121 | 1128 | self._is_valid_head(commit_id, self.rhodecode_vcs_repo, |
|
1122 | 1129 | landing_ref=self.db_repo.landing_ref_name) |
|
1123 | 1130 | |
|
1124 | 1131 | new_path = self.request.POST.get('path') |
|
1125 | 1132 | operation = self.request.POST.get('operation') |
|
1126 | 1133 | path_exist = '' |
|
1127 | 1134 | |
|
1128 | 1135 | if new_path and operation in ['create', 'upload']: |
|
1129 | 1136 | new_f_path = os.path.join(f_path.lstrip('/'), new_path) |
|
1130 | 1137 | try: |
|
1131 | 1138 | commit_obj = self.rhodecode_vcs_repo.get_commit(commit_id) |
|
1132 | 1139 | # NOTE(dan): construct whole path without leading / |
|
1133 | 1140 | file_node = commit_obj.get_node(new_f_path) |
|
1134 | 1141 | if file_node is not None: |
|
1135 | 1142 | path_exist = new_f_path |
|
1136 | 1143 | except EmptyRepositoryError: |
|
1137 | 1144 | pass |
|
1138 | 1145 | except Exception: |
|
1139 | 1146 | pass |
|
1140 | 1147 | |
|
1141 | 1148 | return { |
|
1142 | 1149 | 'branch': _branch_name, |
|
1143 | 1150 | 'sha': _sha_commit_id, |
|
1144 | 1151 | 'is_head': is_head, |
|
1145 | 1152 | 'path_exists': path_exist |
|
1146 | 1153 | } |
|
1147 | 1154 | |
|
1148 | 1155 | @LoginRequired() |
|
1149 | 1156 | @HasRepoPermissionAnyDecorator('repository.write', 'repository.admin') |
|
1150 | 1157 | def repo_files_remove_file(self): |
|
1151 | 1158 | _ = self.request.translate |
|
1152 | 1159 | c = self.load_default_context() |
|
1153 | 1160 | commit_id, f_path = self._get_commit_and_path() |
|
1154 | 1161 | |
|
1155 | 1162 | self._ensure_not_locked() |
|
1156 | 1163 | _branch_name, _sha_commit_id, is_head = \ |
|
1157 | 1164 | self._is_valid_head(commit_id, self.rhodecode_vcs_repo, |
|
1158 | 1165 | landing_ref=self.db_repo.landing_ref_name) |
|
1159 | 1166 | |
|
1160 | 1167 | self.forbid_non_head(is_head, f_path) |
|
1161 | 1168 | self.check_branch_permission(_branch_name) |
|
1162 | 1169 | |
|
1163 | 1170 | c.commit = self._get_commit_or_redirect(commit_id) |
|
1164 | 1171 | c.file = self._get_filenode_or_redirect(c.commit, f_path) |
|
1165 | 1172 | |
|
1166 | 1173 | c.default_message = _( |
|
1167 | 1174 | 'Deleted file {} via RhodeCode Enterprise').format(f_path) |
|
1168 | 1175 | c.f_path = f_path |
|
1169 | 1176 | |
|
1170 | 1177 | return self._get_template_context(c) |
|
1171 | 1178 | |
|
1172 | 1179 | @LoginRequired() |
|
1173 | 1180 | @HasRepoPermissionAnyDecorator('repository.write', 'repository.admin') |
|
1174 | 1181 | @CSRFRequired() |
|
1175 | 1182 | def repo_files_delete_file(self): |
|
1176 | 1183 | _ = self.request.translate |
|
1177 | 1184 | |
|
1178 | 1185 | c = self.load_default_context() |
|
1179 | 1186 | commit_id, f_path = self._get_commit_and_path() |
|
1180 | 1187 | |
|
1181 | 1188 | self._ensure_not_locked() |
|
1182 | 1189 | _branch_name, _sha_commit_id, is_head = \ |
|
1183 | 1190 | self._is_valid_head(commit_id, self.rhodecode_vcs_repo, |
|
1184 | 1191 | landing_ref=self.db_repo.landing_ref_name) |
|
1185 | 1192 | |
|
1186 | 1193 | self.forbid_non_head(is_head, f_path) |
|
1187 | 1194 | self.check_branch_permission(_branch_name) |
|
1188 | 1195 | |
|
1189 | 1196 | c.commit = self._get_commit_or_redirect(commit_id) |
|
1190 | 1197 | c.file = self._get_filenode_or_redirect(c.commit, f_path) |
|
1191 | 1198 | |
|
1192 | 1199 | c.default_message = _( |
|
1193 | 1200 | 'Deleted file {} via RhodeCode Enterprise').format(f_path) |
|
1194 | 1201 | c.f_path = f_path |
|
1195 | 1202 | node_path = f_path |
|
1196 | 1203 | author = self._rhodecode_db_user.full_contact |
|
1197 | 1204 | message = self.request.POST.get('message') or c.default_message |
|
1198 | 1205 | try: |
|
1199 | 1206 | nodes = { |
|
1200 | 1207 | safe_bytes(node_path): { |
|
1201 | 1208 | 'content': b'' |
|
1202 | 1209 | } |
|
1203 | 1210 | } |
|
1204 | 1211 | ScmModel().delete_nodes( |
|
1205 | 1212 | user=self._rhodecode_db_user.user_id, repo=self.db_repo, |
|
1206 | 1213 | message=message, |
|
1207 | 1214 | nodes=nodes, |
|
1208 | 1215 | parent_commit=c.commit, |
|
1209 | 1216 | author=author, |
|
1210 | 1217 | ) |
|
1211 | 1218 | |
|
1212 | 1219 | h.flash( |
|
1213 | 1220 | _('Successfully deleted file `{}`').format( |
|
1214 | 1221 | h.escape(f_path)), category='success') |
|
1215 | 1222 | except Exception: |
|
1216 | 1223 | log.exception('Error during commit operation') |
|
1217 | 1224 | h.flash(_('Error occurred during commit'), category='error') |
|
1218 | 1225 | raise HTTPFound( |
|
1219 | 1226 | h.route_path('repo_commit', repo_name=self.db_repo_name, |
|
1220 | 1227 | commit_id='tip')) |
|
1221 | 1228 | |
|
1222 | 1229 | @LoginRequired() |
|
1223 | 1230 | @HasRepoPermissionAnyDecorator('repository.write', 'repository.admin') |
|
1224 | 1231 | def repo_files_edit_file(self): |
|
1225 | 1232 | _ = self.request.translate |
|
1226 | 1233 | c = self.load_default_context() |
|
1227 | 1234 | commit_id, f_path = self._get_commit_and_path() |
|
1228 | 1235 | |
|
1229 | 1236 | self._ensure_not_locked() |
|
1230 | 1237 | _branch_name, _sha_commit_id, is_head = \ |
|
1231 | 1238 | self._is_valid_head(commit_id, self.rhodecode_vcs_repo, |
|
1232 | 1239 | landing_ref=self.db_repo.landing_ref_name) |
|
1233 | 1240 | |
|
1234 | 1241 | self.forbid_non_head(is_head, f_path, commit_id=commit_id) |
|
1235 | 1242 | self.check_branch_permission(_branch_name, commit_id=commit_id) |
|
1236 | 1243 | |
|
1237 | 1244 | c.commit = self._get_commit_or_redirect(commit_id) |
|
1238 | 1245 | c.file = self._get_filenode_or_redirect(c.commit, f_path) |
|
1239 | 1246 | |
|
1240 | 1247 | if c.file.is_binary: |
|
1241 | 1248 | files_url = h.route_path( |
|
1242 | 1249 | 'repo_files', |
|
1243 | 1250 | repo_name=self.db_repo_name, |
|
1244 | 1251 | commit_id=c.commit.raw_id, f_path=f_path) |
|
1245 | 1252 | raise HTTPFound(files_url) |
|
1246 | 1253 | |
|
1247 | 1254 | c.default_message = _('Edited file {} via RhodeCode Enterprise').format(f_path) |
|
1248 | 1255 | c.f_path = f_path |
|
1249 | 1256 | |
|
1250 | 1257 | return self._get_template_context(c) |
|
1251 | 1258 | |
|
1252 | 1259 | @LoginRequired() |
|
1253 | 1260 | @HasRepoPermissionAnyDecorator('repository.write', 'repository.admin') |
|
1254 | 1261 | @CSRFRequired() |
|
1255 | 1262 | def repo_files_update_file(self): |
|
1256 | 1263 | _ = self.request.translate |
|
1257 | 1264 | c = self.load_default_context() |
|
1258 | 1265 | commit_id, f_path = self._get_commit_and_path() |
|
1259 | 1266 | |
|
1260 | 1267 | self._ensure_not_locked() |
|
1261 | 1268 | |
|
1262 | 1269 | c.commit = self._get_commit_or_redirect(commit_id) |
|
1263 | 1270 | c.file = self._get_filenode_or_redirect(c.commit, f_path) |
|
1264 | 1271 | |
|
1265 | 1272 | if c.file.is_binary: |
|
1266 | 1273 | raise HTTPFound(h.route_path('repo_files', repo_name=self.db_repo_name, |
|
1267 | 1274 | commit_id=c.commit.raw_id, f_path=f_path)) |
|
1268 | 1275 | |
|
1269 | 1276 | _branch_name, _sha_commit_id, is_head = \ |
|
1270 | 1277 | self._is_valid_head(commit_id, self.rhodecode_vcs_repo, |
|
1271 | 1278 | landing_ref=self.db_repo.landing_ref_name) |
|
1272 | 1279 | |
|
1273 | 1280 | self.forbid_non_head(is_head, f_path, commit_id=commit_id) |
|
1274 | 1281 | self.check_branch_permission(_branch_name, commit_id=commit_id) |
|
1275 | 1282 | |
|
1276 | 1283 | c.default_message = _('Edited file {} via RhodeCode Enterprise').format(f_path) |
|
1277 | 1284 | c.f_path = f_path |
|
1278 | 1285 | |
|
1279 | 1286 | old_content = c.file.str_content |
|
1280 | 1287 | sl = old_content.splitlines(1) |
|
1281 | 1288 | first_line = sl[0] if sl else '' |
|
1282 | 1289 | |
|
1283 | 1290 | r_post = self.request.POST |
|
1284 | 1291 | # line endings: 0 - Unix, 1 - Mac, 2 - DOS |
|
1285 | 1292 | line_ending_mode = detect_mode(first_line, 0) |
|
1286 | 1293 | content = convert_line_endings(r_post.get('content', ''), line_ending_mode) |
|
1287 | 1294 | |
|
1288 | 1295 | message = r_post.get('message') or c.default_message |
|
1289 | 1296 | |
|
1290 | 1297 | org_node_path = c.file.str_path |
|
1291 | 1298 | filename = r_post['filename'] |
|
1292 | 1299 | |
|
1293 | 1300 | root_path = c.file.dir_path |
|
1294 | 1301 | pure_path = self.create_pure_path(root_path, filename) |
|
1295 | 1302 | node_path = pure_path.as_posix() |
|
1296 | 1303 | |
|
1297 | 1304 | default_redirect_url = h.route_path('repo_commit', repo_name=self.db_repo_name, |
|
1298 | 1305 | commit_id=commit_id) |
|
1299 | 1306 | if content == old_content and node_path == org_node_path: |
|
1300 | 1307 | h.flash(_('No changes detected on {}').format(h.escape(org_node_path)), |
|
1301 | 1308 | category='warning') |
|
1302 | 1309 | raise HTTPFound(default_redirect_url) |
|
1303 | 1310 | |
|
1304 | 1311 | try: |
|
1305 | 1312 | mapping = { |
|
1306 | 1313 | c.file.bytes_path: { |
|
1307 | 1314 | 'org_filename': org_node_path, |
|
1308 | 1315 | 'filename': safe_bytes(node_path), |
|
1309 | 1316 | 'content': safe_bytes(content), |
|
1310 | 1317 | 'lexer': '', |
|
1311 | 1318 | 'op': 'mod', |
|
1312 | 1319 | 'mode': c.file.mode |
|
1313 | 1320 | } |
|
1314 | 1321 | } |
|
1315 | 1322 | |
|
1316 | 1323 | commit = ScmModel().update_nodes( |
|
1317 | 1324 | user=self._rhodecode_db_user.user_id, |
|
1318 | 1325 | repo=self.db_repo, |
|
1319 | 1326 | message=message, |
|
1320 | 1327 | nodes=mapping, |
|
1321 | 1328 | parent_commit=c.commit, |
|
1322 | 1329 | ) |
|
1323 | 1330 | |
|
1324 | 1331 | h.flash(_('Successfully committed changes to file `{}`').format( |
|
1325 | 1332 | h.escape(f_path)), category='success') |
|
1326 | 1333 | default_redirect_url = h.route_path( |
|
1327 | 1334 | 'repo_commit', repo_name=self.db_repo_name, commit_id=commit.raw_id) |
|
1328 | 1335 | |
|
1329 | 1336 | except Exception: |
|
1330 | 1337 | log.exception('Error occurred during commit') |
|
1331 | 1338 | h.flash(_('Error occurred during commit'), category='error') |
|
1332 | 1339 | |
|
1333 | 1340 | raise HTTPFound(default_redirect_url) |
|
1334 | 1341 | |
|
1335 | 1342 | @LoginRequired() |
|
1336 | 1343 | @HasRepoPermissionAnyDecorator('repository.write', 'repository.admin') |
|
1337 | 1344 | def repo_files_add_file(self): |
|
1338 | 1345 | _ = self.request.translate |
|
1339 | 1346 | c = self.load_default_context() |
|
1340 | 1347 | commit_id, f_path = self._get_commit_and_path() |
|
1341 | 1348 | |
|
1342 | 1349 | self._ensure_not_locked() |
|
1343 | 1350 | |
|
1344 | 1351 | # Check if we need to use this page to upload binary |
|
1345 | 1352 | upload_binary = str2bool(self.request.params.get('upload_binary', False)) |
|
1346 | 1353 | |
|
1347 | 1354 | c.commit = self._get_commit_or_redirect(commit_id, redirect_after=False) |
|
1348 | 1355 | if c.commit is None: |
|
1349 | 1356 | c.commit = EmptyCommit(alias=self.rhodecode_vcs_repo.alias) |
|
1350 | 1357 | |
|
1351 | 1358 | if self.rhodecode_vcs_repo.is_empty(): |
|
1352 | 1359 | # for empty repository we cannot check for current branch, we rely on |
|
1353 | 1360 | # c.commit.branch instead |
|
1354 | 1361 | _branch_name, _sha_commit_id, is_head = c.commit.branch, '', True |
|
1355 | 1362 | else: |
|
1356 | 1363 | _branch_name, _sha_commit_id, is_head = \ |
|
1357 | 1364 | self._is_valid_head(commit_id, self.rhodecode_vcs_repo, |
|
1358 | 1365 | landing_ref=self.db_repo.landing_ref_name) |
|
1359 | 1366 | |
|
1360 | 1367 | self.forbid_non_head(is_head, f_path, commit_id=commit_id) |
|
1361 | 1368 | self.check_branch_permission(_branch_name, commit_id=commit_id) |
|
1362 | 1369 | |
|
1363 | 1370 | c.default_message = (_('Added file via RhodeCode Enterprise')) \ |
|
1364 | 1371 | if not upload_binary else (_('Edited file {} via RhodeCode Enterprise').format(f_path)) |
|
1365 | 1372 | c.f_path = f_path.lstrip('/') # ensure not relative path |
|
1366 | 1373 | c.replace_binary = upload_binary |
|
1367 | 1374 | |
|
1368 | 1375 | return self._get_template_context(c) |
|
1369 | 1376 | |
|
1370 | 1377 | @LoginRequired() |
|
1371 | 1378 | @HasRepoPermissionAnyDecorator('repository.write', 'repository.admin') |
|
1372 | 1379 | @CSRFRequired() |
|
1373 | 1380 | def repo_files_create_file(self): |
|
1374 | 1381 | _ = self.request.translate |
|
1375 | 1382 | c = self.load_default_context() |
|
1376 | 1383 | commit_id, f_path = self._get_commit_and_path() |
|
1377 | 1384 | |
|
1378 | 1385 | self._ensure_not_locked() |
|
1379 | 1386 | |
|
1380 | 1387 | c.commit = self._get_commit_or_redirect(commit_id, redirect_after=False) |
|
1381 | 1388 | if c.commit is None: |
|
1382 | 1389 | c.commit = EmptyCommit(alias=self.rhodecode_vcs_repo.alias) |
|
1383 | 1390 | |
|
1384 | 1391 | # calculate redirect URL |
|
1385 | 1392 | if self.rhodecode_vcs_repo.is_empty(): |
|
1386 | 1393 | default_redirect_url = h.route_path( |
|
1387 | 1394 | 'repo_summary', repo_name=self.db_repo_name) |
|
1388 | 1395 | else: |
|
1389 | 1396 | default_redirect_url = h.route_path( |
|
1390 | 1397 | 'repo_commit', repo_name=self.db_repo_name, commit_id='tip') |
|
1391 | 1398 | |
|
1392 | 1399 | if self.rhodecode_vcs_repo.is_empty(): |
|
1393 | 1400 | # for empty repository we cannot check for current branch, we rely on |
|
1394 | 1401 | # c.commit.branch instead |
|
1395 | 1402 | _branch_name, _sha_commit_id, is_head = c.commit.branch, '', True |
|
1396 | 1403 | else: |
|
1397 | 1404 | _branch_name, _sha_commit_id, is_head = \ |
|
1398 | 1405 | self._is_valid_head(commit_id, self.rhodecode_vcs_repo, |
|
1399 | 1406 | landing_ref=self.db_repo.landing_ref_name) |
|
1400 | 1407 | |
|
1401 | 1408 | self.forbid_non_head(is_head, f_path, commit_id=commit_id) |
|
1402 | 1409 | self.check_branch_permission(_branch_name, commit_id=commit_id) |
|
1403 | 1410 | |
|
1404 | 1411 | c.default_message = (_('Added file via RhodeCode Enterprise')) |
|
1405 | 1412 | c.f_path = f_path |
|
1406 | 1413 | |
|
1407 | 1414 | r_post = self.request.POST |
|
1408 | 1415 | message = r_post.get('message') or c.default_message |
|
1409 | 1416 | filename = r_post.get('filename') |
|
1410 | 1417 | unix_mode = 0 |
|
1411 | 1418 | |
|
1412 | 1419 | if not filename: |
|
1413 | 1420 | # If there's no commit, redirect to repo summary |
|
1414 | 1421 | if type(c.commit) is EmptyCommit: |
|
1415 | 1422 | redirect_url = h.route_path( |
|
1416 | 1423 | 'repo_summary', repo_name=self.db_repo_name) |
|
1417 | 1424 | else: |
|
1418 | 1425 | redirect_url = default_redirect_url |
|
1419 | 1426 | h.flash(_('No filename specified'), category='warning') |
|
1420 | 1427 | raise HTTPFound(redirect_url) |
|
1421 | 1428 | |
|
1422 | 1429 | root_path = f_path |
|
1423 | 1430 | pure_path = self.create_pure_path(root_path, filename) |
|
1424 | 1431 | node_path = pure_path.as_posix().lstrip('/') |
|
1425 | 1432 | |
|
1426 | 1433 | author = self._rhodecode_db_user.full_contact |
|
1427 | 1434 | content = convert_line_endings(r_post.get('content', ''), unix_mode) |
|
1428 | 1435 | nodes = { |
|
1429 | 1436 | safe_bytes(node_path): { |
|
1430 | 1437 | 'content': safe_bytes(content) |
|
1431 | 1438 | } |
|
1432 | 1439 | } |
|
1433 | 1440 | |
|
1434 | 1441 | try: |
|
1435 | 1442 | |
|
1436 | 1443 | commit = ScmModel().create_nodes( |
|
1437 | 1444 | user=self._rhodecode_db_user.user_id, |
|
1438 | 1445 | repo=self.db_repo, |
|
1439 | 1446 | message=message, |
|
1440 | 1447 | nodes=nodes, |
|
1441 | 1448 | parent_commit=c.commit, |
|
1442 | 1449 | author=author, |
|
1443 | 1450 | ) |
|
1444 | 1451 | |
|
1445 | 1452 | h.flash(_('Successfully committed new file `{}`').format( |
|
1446 | 1453 | h.escape(node_path)), category='success') |
|
1447 | 1454 | |
|
1448 | 1455 | default_redirect_url = h.route_path( |
|
1449 | 1456 | 'repo_commit', repo_name=self.db_repo_name, commit_id=commit.raw_id) |
|
1450 | 1457 | |
|
1451 | 1458 | except NonRelativePathError: |
|
1452 | 1459 | log.exception('Non Relative path found') |
|
1453 | 1460 | h.flash(_('The location specified must be a relative path and must not ' |
|
1454 | 1461 | 'contain .. in the path'), category='warning') |
|
1455 | 1462 | raise HTTPFound(default_redirect_url) |
|
1456 | 1463 | except (NodeError, NodeAlreadyExistsError) as e: |
|
1457 | 1464 | h.flash(h.escape(safe_str(e)), category='error') |
|
1458 | 1465 | except Exception: |
|
1459 | 1466 | log.exception('Error occurred during commit') |
|
1460 | 1467 | h.flash(_('Error occurred during commit'), category='error') |
|
1461 | 1468 | |
|
1462 | 1469 | raise HTTPFound(default_redirect_url) |
|
1463 | 1470 | |
|
1464 | 1471 | @LoginRequired() |
|
1465 | 1472 | @HasRepoPermissionAnyDecorator('repository.write', 'repository.admin') |
|
1466 | 1473 | @CSRFRequired() |
|
1467 | 1474 | def repo_files_upload_file(self): |
|
1468 | 1475 | _ = self.request.translate |
|
1469 | 1476 | c = self.load_default_context() |
|
1470 | 1477 | commit_id, f_path = self._get_commit_and_path() |
|
1471 | 1478 | |
|
1472 | 1479 | self._ensure_not_locked() |
|
1473 | 1480 | |
|
1474 | 1481 | c.commit = self._get_commit_or_redirect(commit_id, redirect_after=False) |
|
1475 | 1482 | if c.commit is None: |
|
1476 | 1483 | c.commit = EmptyCommit(alias=self.rhodecode_vcs_repo.alias) |
|
1477 | 1484 | |
|
1478 | 1485 | # calculate redirect URL |
|
1479 | 1486 | if self.rhodecode_vcs_repo.is_empty(): |
|
1480 | 1487 | default_redirect_url = h.route_path( |
|
1481 | 1488 | 'repo_summary', repo_name=self.db_repo_name) |
|
1482 | 1489 | else: |
|
1483 | 1490 | default_redirect_url = h.route_path( |
|
1484 | 1491 | 'repo_commit', repo_name=self.db_repo_name, commit_id='tip') |
|
1485 | 1492 | |
|
1486 | 1493 | if self.rhodecode_vcs_repo.is_empty(): |
|
1487 | 1494 | # for empty repository we cannot check for current branch, we rely on |
|
1488 | 1495 | # c.commit.branch instead |
|
1489 | 1496 | _branch_name, _sha_commit_id, is_head = c.commit.branch, '', True |
|
1490 | 1497 | else: |
|
1491 | 1498 | _branch_name, _sha_commit_id, is_head = \ |
|
1492 | 1499 | self._is_valid_head(commit_id, self.rhodecode_vcs_repo, |
|
1493 | 1500 | landing_ref=self.db_repo.landing_ref_name) |
|
1494 | 1501 | |
|
1495 | 1502 | error = self.forbid_non_head(is_head, f_path, json_mode=True) |
|
1496 | 1503 | if error: |
|
1497 | 1504 | return { |
|
1498 | 1505 | 'error': error, |
|
1499 | 1506 | 'redirect_url': default_redirect_url |
|
1500 | 1507 | } |
|
1501 | 1508 | error = self.check_branch_permission(_branch_name, json_mode=True) |
|
1502 | 1509 | if error: |
|
1503 | 1510 | return { |
|
1504 | 1511 | 'error': error, |
|
1505 | 1512 | 'redirect_url': default_redirect_url |
|
1506 | 1513 | } |
|
1507 | 1514 | |
|
1508 | 1515 | c.default_message = (_('Added file via RhodeCode Enterprise')) |
|
1509 | 1516 | c.f_path = f_path |
|
1510 | 1517 | |
|
1511 | 1518 | r_post = self.request.POST |
|
1512 | 1519 | |
|
1513 | 1520 | message = c.default_message |
|
1514 | 1521 | user_message = r_post.getall('message') |
|
1515 | 1522 | if isinstance(user_message, list) and user_message: |
|
1516 | 1523 | # we take the first from duplicated results if it's not empty |
|
1517 | 1524 | message = user_message[0] if user_message[0] else message |
|
1518 | 1525 | |
|
1519 | 1526 | nodes = {} |
|
1520 | 1527 | |
|
1521 | 1528 | for file_obj in r_post.getall('files_upload') or []: |
|
1522 | 1529 | content = file_obj.file |
|
1523 | 1530 | filename = file_obj.filename |
|
1524 | 1531 | |
|
1525 | 1532 | root_path = f_path |
|
1526 | 1533 | pure_path = self.create_pure_path(root_path, filename) |
|
1527 | 1534 | node_path = pure_path.as_posix().lstrip('/') |
|
1528 | 1535 | |
|
1529 | 1536 | nodes[safe_bytes(node_path)] = { |
|
1530 | 1537 | 'content': content |
|
1531 | 1538 | } |
|
1532 | 1539 | |
|
1533 | 1540 | if not nodes: |
|
1534 | 1541 | error = 'missing files' |
|
1535 | 1542 | return { |
|
1536 | 1543 | 'error': error, |
|
1537 | 1544 | 'redirect_url': default_redirect_url |
|
1538 | 1545 | } |
|
1539 | 1546 | |
|
1540 | 1547 | author = self._rhodecode_db_user.full_contact |
|
1541 | 1548 | |
|
1542 | 1549 | try: |
|
1543 | 1550 | commit = ScmModel().create_nodes( |
|
1544 | 1551 | user=self._rhodecode_db_user.user_id, |
|
1545 | 1552 | repo=self.db_repo, |
|
1546 | 1553 | message=message, |
|
1547 | 1554 | nodes=nodes, |
|
1548 | 1555 | parent_commit=c.commit, |
|
1549 | 1556 | author=author, |
|
1550 | 1557 | ) |
|
1551 | 1558 | if len(nodes) == 1: |
|
1552 | 1559 | flash_message = _('Successfully committed {} new files').format(len(nodes)) |
|
1553 | 1560 | else: |
|
1554 | 1561 | flash_message = _('Successfully committed 1 new file') |
|
1555 | 1562 | |
|
1556 | 1563 | h.flash(flash_message, category='success') |
|
1557 | 1564 | |
|
1558 | 1565 | default_redirect_url = h.route_path( |
|
1559 | 1566 | 'repo_commit', repo_name=self.db_repo_name, commit_id=commit.raw_id) |
|
1560 | 1567 | |
|
1561 | 1568 | except NonRelativePathError: |
|
1562 | 1569 | log.exception('Non Relative path found') |
|
1563 | 1570 | error = _('The location specified must be a relative path and must not ' |
|
1564 | 1571 | 'contain .. in the path') |
|
1565 | 1572 | h.flash(error, category='warning') |
|
1566 | 1573 | |
|
1567 | 1574 | return { |
|
1568 | 1575 | 'error': error, |
|
1569 | 1576 | 'redirect_url': default_redirect_url |
|
1570 | 1577 | } |
|
1571 | 1578 | except (NodeError, NodeAlreadyExistsError) as e: |
|
1572 | 1579 | error = h.escape(e) |
|
1573 | 1580 | h.flash(error, category='error') |
|
1574 | 1581 | |
|
1575 | 1582 | return { |
|
1576 | 1583 | 'error': error, |
|
1577 | 1584 | 'redirect_url': default_redirect_url |
|
1578 | 1585 | } |
|
1579 | 1586 | except Exception: |
|
1580 | 1587 | log.exception('Error occurred during commit') |
|
1581 | 1588 | error = _('Error occurred during commit') |
|
1582 | 1589 | h.flash(error, category='error') |
|
1583 | 1590 | return { |
|
1584 | 1591 | 'error': error, |
|
1585 | 1592 | 'redirect_url': default_redirect_url |
|
1586 | 1593 | } |
|
1587 | 1594 | |
|
1588 | 1595 | return { |
|
1589 | 1596 | 'error': None, |
|
1590 | 1597 | 'redirect_url': default_redirect_url |
|
1591 | 1598 | } |
|
1592 | 1599 | |
|
1593 | 1600 | @LoginRequired() |
|
1594 | 1601 | @HasRepoPermissionAnyDecorator('repository.write', 'repository.admin') |
|
1595 | 1602 | @CSRFRequired() |
|
1596 | 1603 | def repo_files_replace_file(self): |
|
1597 | 1604 | _ = self.request.translate |
|
1598 | 1605 | c = self.load_default_context() |
|
1599 | 1606 | commit_id, f_path = self._get_commit_and_path() |
|
1600 | 1607 | |
|
1601 | 1608 | self._ensure_not_locked() |
|
1602 | 1609 | |
|
1603 | 1610 | c.commit = self._get_commit_or_redirect(commit_id, redirect_after=False) |
|
1604 | 1611 | if c.commit is None: |
|
1605 | 1612 | c.commit = EmptyCommit(alias=self.rhodecode_vcs_repo.alias) |
|
1606 | 1613 | |
|
1607 | 1614 | if self.rhodecode_vcs_repo.is_empty(): |
|
1608 | 1615 | default_redirect_url = h.route_path( |
|
1609 | 1616 | 'repo_summary', repo_name=self.db_repo_name) |
|
1610 | 1617 | else: |
|
1611 | 1618 | default_redirect_url = h.route_path( |
|
1612 | 1619 | 'repo_commit', repo_name=self.db_repo_name, commit_id='tip') |
|
1613 | 1620 | |
|
1614 | 1621 | if self.rhodecode_vcs_repo.is_empty(): |
|
1615 | 1622 | # for empty repository we cannot check for current branch, we rely on |
|
1616 | 1623 | # c.commit.branch instead |
|
1617 | 1624 | _branch_name, _sha_commit_id, is_head = c.commit.branch, '', True |
|
1618 | 1625 | else: |
|
1619 | 1626 | _branch_name, _sha_commit_id, is_head = \ |
|
1620 | 1627 | self._is_valid_head(commit_id, self.rhodecode_vcs_repo, |
|
1621 | 1628 | landing_ref=self.db_repo.landing_ref_name) |
|
1622 | 1629 | |
|
1623 | 1630 | error = self.forbid_non_head(is_head, f_path, json_mode=True) |
|
1624 | 1631 | if error: |
|
1625 | 1632 | return { |
|
1626 | 1633 | 'error': error, |
|
1627 | 1634 | 'redirect_url': default_redirect_url |
|
1628 | 1635 | } |
|
1629 | 1636 | error = self.check_branch_permission(_branch_name, json_mode=True) |
|
1630 | 1637 | if error: |
|
1631 | 1638 | return { |
|
1632 | 1639 | 'error': error, |
|
1633 | 1640 | 'redirect_url': default_redirect_url |
|
1634 | 1641 | } |
|
1635 | 1642 | |
|
1636 | 1643 | c.default_message = (_('Edited file {} via RhodeCode Enterprise').format(f_path)) |
|
1637 | 1644 | c.f_path = f_path |
|
1638 | 1645 | |
|
1639 | 1646 | r_post = self.request.POST |
|
1640 | 1647 | |
|
1641 | 1648 | message = c.default_message |
|
1642 | 1649 | user_message = r_post.getall('message') |
|
1643 | 1650 | if isinstance(user_message, list) and user_message: |
|
1644 | 1651 | # we take the first from duplicated results if it's not empty |
|
1645 | 1652 | message = user_message[0] if user_message[0] else message |
|
1646 | 1653 | |
|
1647 | 1654 | data_for_replacement = r_post.getall('files_upload') or [] |
|
1648 | 1655 | if (objects_count := len(data_for_replacement)) > 1: |
|
1649 | 1656 | return { |
|
1650 | 1657 | 'error': 'too many files for replacement', |
|
1651 | 1658 | 'redirect_url': default_redirect_url |
|
1652 | 1659 | } |
|
1653 | 1660 | elif not objects_count: |
|
1654 | 1661 | return { |
|
1655 | 1662 | 'error': 'missing files', |
|
1656 | 1663 | 'redirect_url': default_redirect_url |
|
1657 | 1664 | } |
|
1658 | 1665 | |
|
1659 | 1666 | content = data_for_replacement[0].file |
|
1660 | 1667 | retrieved_filename = data_for_replacement[0].filename |
|
1661 | 1668 | |
|
1662 | 1669 | if retrieved_filename.split('.')[-1] != f_path.split('.')[-1]: |
|
1663 | 1670 | return { |
|
1664 | 1671 | 'error': 'file extension of uploaded file doesn\'t match an original file\'s extension', |
|
1665 | 1672 | 'redirect_url': default_redirect_url |
|
1666 | 1673 | } |
|
1667 | 1674 | |
|
1668 | 1675 | author = self._rhodecode_db_user.full_contact |
|
1669 | 1676 | |
|
1670 | 1677 | try: |
|
1671 | 1678 | commit = ScmModel().update_binary_node( |
|
1672 | 1679 | user=self._rhodecode_db_user.user_id, |
|
1673 | 1680 | repo=self.db_repo, |
|
1674 | 1681 | message=message, |
|
1675 | 1682 | node={ |
|
1676 | 1683 | 'content': content, |
|
1677 | 1684 | 'file_path': f_path.encode(), |
|
1678 | 1685 | }, |
|
1679 | 1686 | parent_commit=c.commit, |
|
1680 | 1687 | author=author, |
|
1681 | 1688 | ) |
|
1682 | 1689 | |
|
1683 | 1690 | h.flash(_('Successfully committed 1 new file'), category='success') |
|
1684 | 1691 | |
|
1685 | 1692 | default_redirect_url = h.route_path( |
|
1686 | 1693 | 'repo_commit', repo_name=self.db_repo_name, commit_id=commit.raw_id) |
|
1687 | 1694 | |
|
1688 | 1695 | except (NodeError, NodeAlreadyExistsError) as e: |
|
1689 | 1696 | error = h.escape(e) |
|
1690 | 1697 | h.flash(error, category='error') |
|
1691 | 1698 | |
|
1692 | 1699 | return { |
|
1693 | 1700 | 'error': error, |
|
1694 | 1701 | 'redirect_url': default_redirect_url |
|
1695 | 1702 | } |
|
1696 | 1703 | except Exception: |
|
1697 | 1704 | log.exception('Error occurred during commit') |
|
1698 | 1705 | error = _('Error occurred during commit') |
|
1699 | 1706 | h.flash(error, category='error') |
|
1700 | 1707 | return { |
|
1701 | 1708 | 'error': error, |
|
1702 | 1709 | 'redirect_url': default_redirect_url |
|
1703 | 1710 | } |
|
1704 | 1711 | |
|
1705 | 1712 | return { |
|
1706 | 1713 | 'error': None, |
|
1707 | 1714 | 'redirect_url': default_redirect_url |
|
1708 | 1715 | } |
@@ -1,201 +1,205 b'' | |||
|
1 | 1 | # Copyright (C) 2010-2023 RhodeCode GmbH |
|
2 | 2 | # |
|
3 | 3 | # This program is free software: you can redistribute it and/or modify |
|
4 | 4 | # it under the terms of the GNU Affero General Public License, version 3 |
|
5 | 5 | # (only), as published by the Free Software Foundation. |
|
6 | 6 | # |
|
7 | 7 | # This program is distributed in the hope that it will be useful, |
|
8 | 8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
9 | 9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
10 | 10 | # GNU General Public License for more details. |
|
11 | 11 | # |
|
12 | 12 | # You should have received a copy of the GNU Affero General Public License |
|
13 | 13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
14 | 14 | # |
|
15 | 15 | # This program is dual-licensed. If you wish to learn more about the |
|
16 | 16 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
17 | 17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
18 | 18 | |
|
19 | 19 | import os |
|
20 | 20 | import tempfile |
|
21 | 21 | import logging |
|
22 | 22 | |
|
23 | 23 | from pyramid.settings import asbool |
|
24 | 24 | |
|
25 | 25 | from rhodecode.config.settings_maker import SettingsMaker |
|
26 | 26 | from rhodecode.config import utils as config_utils |
|
27 | 27 | |
|
28 | 28 | log = logging.getLogger(__name__) |
|
29 | 29 | |
|
30 | 30 | |
|
31 | 31 | def sanitize_settings_and_apply_defaults(global_config, settings): |
|
32 | 32 | """ |
|
33 | 33 | Applies settings defaults and does all type conversion. |
|
34 | 34 | |
|
35 | 35 | We would move all settings parsing and preparation into this place, so that |
|
36 | 36 | we have only one place left which deals with this part. The remaining parts |
|
37 | 37 | of the application would start to rely fully on well-prepared settings. |
|
38 | 38 | |
|
39 | 39 | This piece would later be split up per topic to avoid a big fat monster |
|
40 | 40 | function. |
|
41 | 41 | """ |
|
42 | 42 | jn = os.path.join |
|
43 | 43 | |
|
44 | 44 | global_settings_maker = SettingsMaker(global_config) |
|
45 | 45 | global_settings_maker.make_setting('debug', default=False, parser='bool') |
|
46 | 46 | debug_enabled = asbool(global_config.get('debug')) |
|
47 | 47 | |
|
48 | 48 | settings_maker = SettingsMaker(settings) |
|
49 | 49 | |
|
50 | 50 | settings_maker.make_setting( |
|
51 | 51 | 'logging.autoconfigure', |
|
52 | 52 | default=False, |
|
53 | 53 | parser='bool') |
|
54 | 54 | |
|
55 | 55 | logging_conf = jn(os.path.dirname(global_config.get('__file__')), 'logging.ini') |
|
56 | 56 | settings_maker.enable_logging(logging_conf, level='INFO' if debug_enabled else 'DEBUG') |
|
57 | 57 | |
|
58 | 58 | # Default includes, possible to change as a user |
|
59 | 59 | pyramid_includes = settings_maker.make_setting('pyramid.includes', [], parser='list:newline') |
|
60 | 60 | log.debug( |
|
61 | 61 | "Using the following pyramid.includes: %s", |
|
62 | 62 | pyramid_includes) |
|
63 | 63 | |
|
64 | 64 | settings_maker.make_setting('rhodecode.edition', 'Community Edition') |
|
65 | 65 | settings_maker.make_setting('rhodecode.edition_id', 'CE') |
|
66 | 66 | |
|
67 | 67 | if 'mako.default_filters' not in settings: |
|
68 | 68 | # set custom default filters if we don't have it defined |
|
69 | 69 | settings['mako.imports'] = 'from rhodecode.lib.base import h_filter' |
|
70 | 70 | settings['mako.default_filters'] = 'h_filter' |
|
71 | 71 | |
|
72 | 72 | if 'mako.directories' not in settings: |
|
73 | 73 | mako_directories = settings.setdefault('mako.directories', [ |
|
74 | 74 | # Base templates of the original application |
|
75 | 75 | 'rhodecode:templates', |
|
76 | 76 | ]) |
|
77 | 77 | log.debug( |
|
78 | 78 | "Using the following Mako template directories: %s", |
|
79 | 79 | mako_directories) |
|
80 | 80 | |
|
81 | 81 | # NOTE(marcink): fix redis requirement for schema of connection since 3.X |
|
82 | 82 | if 'beaker.session.type' in settings and settings['beaker.session.type'] == 'ext:redis': |
|
83 | 83 | raw_url = settings['beaker.session.url'] |
|
84 | 84 | if not raw_url.startswith(('redis://', 'rediss://', 'unix://')): |
|
85 | 85 | settings['beaker.session.url'] = 'redis://' + raw_url |
|
86 | 86 | |
|
87 | 87 | settings_maker.make_setting('__file__', global_config.get('__file__')) |
|
88 | 88 | |
|
89 | 89 | # TODO: johbo: Re-think this, usually the call to config.include |
|
90 | 90 | # should allow to pass in a prefix. |
|
91 | 91 | settings_maker.make_setting('rhodecode.api.url', '/_admin/api') |
|
92 | 92 | |
|
93 | 93 | # Sanitize generic settings. |
|
94 | 94 | settings_maker.make_setting('default_encoding', 'UTF-8', parser='list') |
|
95 | 95 | settings_maker.make_setting('gzip_responses', False, parser='bool') |
|
96 | 96 | settings_maker.make_setting('startup.import_repos', 'false', parser='bool') |
|
97 | 97 | |
|
98 | 98 | # statsd |
|
99 | 99 | settings_maker.make_setting('statsd.enabled', False, parser='bool') |
|
100 | 100 | settings_maker.make_setting('statsd.statsd_host', 'statsd-exporter', parser='string') |
|
101 | 101 | settings_maker.make_setting('statsd.statsd_port', 9125, parser='int') |
|
102 | 102 | settings_maker.make_setting('statsd.statsd_prefix', '') |
|
103 | 103 | settings_maker.make_setting('statsd.statsd_ipv6', False, parser='bool') |
|
104 | 104 | |
|
105 | 105 | settings_maker.make_setting('vcs.svn.compatible_version', '') |
|
106 | 106 | settings_maker.make_setting('vcs.svn.proxy.enabled', True, parser='bool') |
|
107 | 107 | settings_maker.make_setting('vcs.svn.proxy.host', 'http://svn:8090', parser='string') |
|
108 | 108 | settings_maker.make_setting('vcs.hooks.protocol', 'http') |
|
109 | 109 | settings_maker.make_setting('vcs.hooks.host', '*') |
|
110 | 110 | settings_maker.make_setting('vcs.scm_app_implementation', 'http') |
|
111 | 111 | settings_maker.make_setting('vcs.server', '') |
|
112 | 112 | settings_maker.make_setting('vcs.server.protocol', 'http') |
|
113 | 113 | settings_maker.make_setting('vcs.server.enable', 'true', parser='bool') |
|
114 | 114 | settings_maker.make_setting('vcs.hooks.direct_calls', 'false', parser='bool') |
|
115 | 115 | settings_maker.make_setting('vcs.start_server', 'false', parser='bool') |
|
116 | 116 | settings_maker.make_setting('vcs.backends', 'hg, git, svn', parser='list') |
|
117 | 117 | settings_maker.make_setting('vcs.connection_timeout', 3600, parser='int') |
|
118 | 118 | |
|
119 | 119 | settings_maker.make_setting('vcs.methods.cache', True, parser='bool') |
|
120 | 120 | |
|
121 | 121 | # repo_store path |
|
122 | 122 | settings_maker.make_setting('repo_store.path', '/var/opt/rhodecode_repo_store') |
|
123 | 123 | # Support legacy values of vcs.scm_app_implementation. Legacy |
|
124 | 124 | # configurations may use 'rhodecode.lib.middleware.utils.scm_app_http', or |
|
125 | 125 | # disabled since 4.13 'vcsserver.scm_app' which is now mapped to 'http'. |
|
126 | 126 | scm_app_impl = settings['vcs.scm_app_implementation'] |
|
127 | 127 | if scm_app_impl in ['rhodecode.lib.middleware.utils.scm_app_http', 'vcsserver.scm_app']: |
|
128 | 128 | settings['vcs.scm_app_implementation'] = 'http' |
|
129 | 129 | |
|
130 | 130 | settings_maker.make_setting('appenlight', False, parser='bool') |
|
131 | 131 | |
|
132 | 132 | temp_store = tempfile.gettempdir() |
|
133 | 133 | tmp_cache_dir = jn(temp_store, 'rc_cache') |
|
134 | 134 | |
|
135 | 135 | # save default, cache dir, and use it for all backends later. |
|
136 | 136 | default_cache_dir = settings_maker.make_setting( |
|
137 | 137 | 'cache_dir', |
|
138 | 138 | default=tmp_cache_dir, default_when_empty=True, |
|
139 | 139 | parser='dir:ensured') |
|
140 | 140 | |
|
141 | 141 | # exception store cache |
|
142 | 142 | settings_maker.make_setting( |
|
143 | 143 | 'exception_tracker.store_path', |
|
144 | 144 | default=jn(default_cache_dir, 'exc_store'), default_when_empty=True, |
|
145 | 145 | parser='dir:ensured' |
|
146 | 146 | ) |
|
147 | 147 | |
|
148 | 148 | settings_maker.make_setting( |
|
149 | 149 | 'celerybeat-schedule.path', |
|
150 | 150 | default=jn(default_cache_dir, 'celerybeat_schedule', 'celerybeat-schedule.db'), default_when_empty=True, |
|
151 | 151 | parser='file:ensured' |
|
152 | 152 | ) |
|
153 | 153 | |
|
154 | 154 | settings_maker.make_setting('exception_tracker.send_email', False, parser='bool') |
|
155 | 155 | settings_maker.make_setting('exception_tracker.email_prefix', '[RHODECODE ERROR]', default_when_empty=True) |
|
156 | 156 | |
|
157 | 157 | # sessions, ensure file since no-value is memory |
|
158 | 158 | settings_maker.make_setting('beaker.session.type', 'file') |
|
159 | 159 | settings_maker.make_setting('beaker.session.data_dir', jn(default_cache_dir, 'session_data')) |
|
160 | 160 | |
|
161 | 161 | # cache_general |
|
162 | 162 | settings_maker.make_setting('rc_cache.cache_general.backend', 'dogpile.cache.rc.file_namespace') |
|
163 | 163 | settings_maker.make_setting('rc_cache.cache_general.expiration_time', 60 * 60 * 12, parser='int') |
|
164 | 164 | settings_maker.make_setting('rc_cache.cache_general.arguments.filename', jn(default_cache_dir, 'rhodecode_cache_general.db')) |
|
165 | 165 | |
|
166 | 166 | # cache_perms |
|
167 | 167 | settings_maker.make_setting('rc_cache.cache_perms.backend', 'dogpile.cache.rc.file_namespace') |
|
168 | 168 | settings_maker.make_setting('rc_cache.cache_perms.expiration_time', 60 * 60, parser='int') |
|
169 | 169 | settings_maker.make_setting('rc_cache.cache_perms.arguments.filename', jn(default_cache_dir, 'rhodecode_cache_perms_db')) |
|
170 | 170 | |
|
171 | 171 | # cache_repo |
|
172 | 172 | settings_maker.make_setting('rc_cache.cache_repo.backend', 'dogpile.cache.rc.file_namespace') |
|
173 | 173 | settings_maker.make_setting('rc_cache.cache_repo.expiration_time', 60 * 60 * 24 * 30, parser='int') |
|
174 | 174 | settings_maker.make_setting('rc_cache.cache_repo.arguments.filename', jn(default_cache_dir, 'rhodecode_cache_repo_db')) |
|
175 | 175 | |
|
176 | 176 | # cache_license |
|
177 | 177 | settings_maker.make_setting('rc_cache.cache_license.backend', 'dogpile.cache.rc.file_namespace') |
|
178 | 178 | settings_maker.make_setting('rc_cache.cache_license.expiration_time', 60 * 5, parser='int') |
|
179 | 179 | settings_maker.make_setting('rc_cache.cache_license.arguments.filename', jn(default_cache_dir, 'rhodecode_cache_license_db')) |
|
180 | 180 | |
|
181 | 181 | # cache_repo_longterm memory, 96H |
|
182 | 182 | settings_maker.make_setting('rc_cache.cache_repo_longterm.backend', 'dogpile.cache.rc.memory_lru') |
|
183 | 183 | settings_maker.make_setting('rc_cache.cache_repo_longterm.expiration_time', 345600, parser='int') |
|
184 | 184 | settings_maker.make_setting('rc_cache.cache_repo_longterm.max_size', 10000, parser='int') |
|
185 | 185 | |
|
186 | 186 | # sql_cache_short |
|
187 | 187 | settings_maker.make_setting('rc_cache.sql_cache_short.backend', 'dogpile.cache.rc.memory_lru') |
|
188 | 188 | settings_maker.make_setting('rc_cache.sql_cache_short.expiration_time', 30, parser='int') |
|
189 | 189 | settings_maker.make_setting('rc_cache.sql_cache_short.max_size', 10000, parser='int') |
|
190 | 190 | |
|
191 | 191 | # archive_cache |
|
192 | settings_maker.make_setting('archive_cache.store_dir', jn(default_cache_dir, 'archive_cache'), default_when_empty=True,) | |
|
193 |
settings_maker.make_setting('archive_cache. |
|
|
194 | settings_maker.make_setting('archive_cache.cache_shards', 10, parser='int') | |
|
192 | settings_maker.make_setting('archive_cache.locking.url', 'redis://redis:6379/1') | |
|
193 | settings_maker.make_setting('archive_cache.backend.type', 'filesystem') | |
|
194 | ||
|
195 | settings_maker.make_setting('archive_cache.filesystem.store_dir', jn(default_cache_dir, 'archive_cache'), default_when_empty=True,) | |
|
196 | settings_maker.make_setting('archive_cache.filesystem.cache_size_gb', 10, parser='float') | |
|
197 | settings_maker.make_setting('archive_cache.filesystem.cache_shards', 8, parser='int') | |
|
198 | settings_maker.make_setting('archive_cache.filesystem.eviction_policy', 'least-recently-stored') | |
|
195 | 199 | |
|
196 | 200 | settings_maker.env_expand() |
|
197 | 201 | |
|
198 | 202 | # configure instance id |
|
199 | 203 | config_utils.set_instance_id(settings) |
|
200 | 204 | |
|
201 | 205 | return settings |
@@ -1,88 +1,264 b'' | |||
|
1 |
# Copyright (C) 2015-202 |
|
|
1 | # Copyright (C) 2015-2024 RhodeCode GmbH | |
|
2 | 2 | # |
|
3 | 3 | # This program is free software: you can redistribute it and/or modify |
|
4 | 4 | # it under the terms of the GNU Affero General Public License, version 3 |
|
5 | 5 | # (only), as published by the Free Software Foundation. |
|
6 | 6 | # |
|
7 | 7 | # This program is distributed in the hope that it will be useful, |
|
8 | 8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
9 | 9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
10 | 10 | # GNU General Public License for more details. |
|
11 | 11 | # |
|
12 | 12 | # You should have received a copy of the GNU Affero General Public License |
|
13 | 13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
14 | 14 | # |
|
15 | 15 | # This program is dual-licensed. If you wish to learn more about the |
|
16 | 16 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
17 | 17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
18 | 18 | |
|
19 | import logging | |
|
19 | import codecs | |
|
20 | import contextlib | |
|
21 | import functools | |
|
20 | 22 | import os |
|
21 | import diskcache | |
|
22 | from diskcache import RLock | |
|
23 | import logging | |
|
24 | import time | |
|
25 | import typing | |
|
26 | import zlib | |
|
27 | ||
|
28 | from rhodecode.lib.ext_json import json | |
|
29 | from .lock import GenerationLock | |
|
23 | 30 | |
|
24 | 31 | log = logging.getLogger(__name__) |
|
25 | 32 | |
|
26 | 33 | cache_meta = None |
|
27 | 34 | |
|
35 | UNKNOWN = -241 | |
|
36 | NO_VAL = -917 | |
|
28 | 37 | |
|
29 | class ReentrantLock(RLock): | |
|
30 | def __enter__(self): | |
|
31 | reentrant_lock_key = self._key | |
|
38 | MODE_BINARY = 'BINARY' | |
|
39 | ||
|
40 | ||
|
41 | class FileSystemCache: | |
|
42 | ||
|
43 | def __init__(self, index, directory, **settings): | |
|
44 | self._index = index | |
|
45 | self._directory = directory | |
|
46 | ||
|
47 | def _write_file(self, full_path, iterator, mode, encoding=None): | |
|
48 | full_dir, _ = os.path.split(full_path) | |
|
49 | ||
|
50 | for count in range(1, 11): | |
|
51 | with contextlib.suppress(OSError): | |
|
52 | os.makedirs(full_dir) | |
|
53 | ||
|
54 | try: | |
|
55 | # Another cache may have deleted the directory before | |
|
56 | # the file could be opened. | |
|
57 | writer = open(full_path, mode, encoding=encoding) | |
|
58 | except OSError: | |
|
59 | if count == 10: | |
|
60 | # Give up after 10 tries to open the file. | |
|
61 | raise | |
|
62 | continue | |
|
63 | ||
|
64 | with writer: | |
|
65 | size = 0 | |
|
66 | for chunk in iterator: | |
|
67 | size += len(chunk) | |
|
68 | writer.write(chunk) | |
|
69 | return size | |
|
70 | ||
|
71 | def _get_keyfile(self, key): | |
|
72 | return os.path.join(self._directory, f'{key}.key') | |
|
73 | ||
|
74 | def store(self, key, value_reader, metadata): | |
|
75 | filename, full_path = self.random_filename() | |
|
76 | key_file = self._get_keyfile(key) | |
|
77 | ||
|
78 | # STORE METADATA | |
|
79 | _metadata = { | |
|
80 | "version": "v1", | |
|
81 | "timestamp": time.time(), | |
|
82 | "filename": filename, | |
|
83 | "full_path": full_path, | |
|
84 | "key_file": key_file, | |
|
85 | } | |
|
86 | if metadata: | |
|
87 | _metadata.update(metadata) | |
|
88 | ||
|
89 | reader = functools.partial(value_reader.read, 2**22) | |
|
90 | ||
|
91 | iterator = iter(reader, b'') | |
|
92 | size = self._write_file(full_path, iterator, 'xb') | |
|
93 | ||
|
94 | # after archive is finished, we create a key to save the presence of the binary file | |
|
95 | with open(key_file, 'wb') as f: | |
|
96 | f.write(json.dumps(_metadata)) | |
|
97 | ||
|
98 | return key, size, MODE_BINARY, filename, _metadata | |
|
99 | ||
|
100 | def fetch(self, key) -> tuple[typing.BinaryIO, dict]: | |
|
101 | if key not in self: | |
|
102 | raise KeyError(key) | |
|
103 | ||
|
104 | key_file = self._get_keyfile(key) | |
|
105 | with open(key_file, 'rb') as f: | |
|
106 | metadata = json.loads(f.read()) | |
|
107 | ||
|
108 | filename = metadata['filename'] | |
|
109 | ||
|
110 | return open(os.path.join(self._directory, filename), 'rb'), metadata | |
|
111 | ||
|
112 | def random_filename(self): | |
|
113 | """Return filename and full-path tuple for file storage. | |
|
114 | ||
|
115 | Filename will be a randomly generated 28 character hexadecimal string | |
|
116 | with ".archive_cache" suffixed. Two levels of sub-directories will be used to | |
|
117 | reduce the size of directories. On older filesystems, lookups in | |
|
118 | directories with many files may be slow. | |
|
119 | """ | |
|
32 | 120 | |
|
33 | log.debug('Acquire ReentrantLock(key=%s) for archive cache generation...', reentrant_lock_key) | |
|
34 | #self.acquire() | |
|
35 | log.debug('Lock for key=%s acquired', reentrant_lock_key) | |
|
121 | hex_name = codecs.encode(os.urandom(16), 'hex').decode('utf-8') | |
|
122 | sub_dir = os.path.join(hex_name[:2], hex_name[2:4]) | |
|
123 | name = hex_name[4:] + '.archive_cache' | |
|
124 | filename = os.path.join(sub_dir, name) | |
|
125 | full_path = os.path.join(self._directory, filename) | |
|
126 | return filename, full_path | |
|
127 | ||
|
128 | def hash(self, key): | |
|
129 | """Compute portable hash for `key`. | |
|
130 | ||
|
131 | :param key: key to hash | |
|
132 | :return: hash value | |
|
133 | ||
|
134 | """ | |
|
135 | mask = 0xFFFFFFFF | |
|
136 | return zlib.adler32(key.encode('utf-8')) & mask # noqa | |
|
137 | ||
|
138 | def __contains__(self, key): | |
|
139 | """Return `True` if `key` matching item is found in cache. | |
|
140 | ||
|
141 | :param key: key matching item | |
|
142 | :return: True if key matching item | |
|
143 | ||
|
144 | """ | |
|
145 | key_file = self._get_keyfile(key) | |
|
146 | return os.path.exists(key_file) | |
|
147 | ||
|
148 | ||
|
149 | class FanoutCache: | |
|
150 | """Cache that shards keys and values.""" | |
|
151 | ||
|
152 | def __init__( | |
|
153 | self, directory=None, **settings | |
|
154 | ): | |
|
155 | """Initialize cache instance. | |
|
156 | ||
|
157 | :param str directory: cache directory | |
|
158 | :param settings: settings dict | |
|
159 | ||
|
160 | """ | |
|
161 | if directory is None: | |
|
162 | raise ValueError('directory cannot be None') | |
|
163 | ||
|
164 | directory = str(directory) | |
|
165 | directory = os.path.expanduser(directory) | |
|
166 | directory = os.path.expandvars(directory) | |
|
167 | self._directory = directory | |
|
36 | 168 | |
|
37 | def __exit__(self, *exc_info): | |
|
38 | #self.release() | |
|
39 | pass | |
|
169 | self._count = settings.pop('cache_shards') | |
|
170 | self._locking_url = settings.pop('locking_url') | |
|
171 | ||
|
172 | self._shards = tuple( | |
|
173 | FileSystemCache( | |
|
174 | index=num, | |
|
175 | directory=os.path.join(directory, 'shard_%03d' % num), | |
|
176 | **settings, | |
|
177 | ) | |
|
178 | for num in range(self._count) | |
|
179 | ) | |
|
180 | self._hash = self._shards[0].hash | |
|
181 | ||
|
182 | def get_lock(self, lock_key): | |
|
183 | return GenerationLock(lock_key, self._locking_url) | |
|
184 | ||
|
185 | def _get_shard(self, key) -> FileSystemCache: | |
|
186 | index = self._hash(key) % self._count | |
|
187 | shard = self._shards[index] | |
|
188 | return shard | |
|
189 | ||
|
190 | def store(self, key, value_reader, metadata=None): | |
|
191 | shard = self._get_shard(key) | |
|
192 | return shard.store(key, value_reader, metadata) | |
|
193 | ||
|
194 | def fetch(self, key): | |
|
195 | """Return file handle corresponding to `key` from cache. | |
|
196 | """ | |
|
197 | shard = self._get_shard(key) | |
|
198 | return shard.fetch(key) | |
|
199 | ||
|
200 | def has_key(self, key): | |
|
201 | """Return `True` if `key` matching item is found in cache. | |
|
202 | ||
|
203 | :param key: key for item | |
|
204 | :return: True if key is found | |
|
205 | ||
|
206 | """ | |
|
207 | shard = self._get_shard(key) | |
|
208 | return key in shard | |
|
209 | ||
|
210 | def __contains__(self, item): | |
|
211 | return self.has_key(item) | |
|
212 | ||
|
213 | def evict(self): | |
|
214 | """Remove old items based on the conditions""" | |
|
215 | # TODO: Implement this... | |
|
216 | return | |
|
40 | 217 | |
|
41 | 218 | |
|
42 | 219 | def get_archival_config(config): |
|
43 | 220 | |
|
44 | 221 | final_config = { |
|
45 | 'archive_cache.eviction_policy': 'least-frequently-used' | |
|
222 | ||
|
46 | 223 | } |
|
47 | 224 | |
|
48 | 225 | for k, v in config.items(): |
|
49 | 226 | if k.startswith('archive_cache'): |
|
50 | 227 | final_config[k] = v |
|
51 | 228 | |
|
52 | 229 | return final_config |
|
53 | 230 | |
|
54 | 231 | |
|
55 | 232 | def get_archival_cache_store(config): |
|
56 | 233 | |
|
57 | 234 | global cache_meta |
|
58 | 235 | if cache_meta is not None: |
|
59 | 236 | return cache_meta |
|
60 | 237 | |
|
61 | 238 | config = get_archival_config(config) |
|
239 | backend = config['archive_cache.backend.type'] | |
|
240 | if backend != 'filesystem': | |
|
241 | raise ValueError('archive_cache.backend.type only supports "filesystem"') | |
|
62 | 242 | |
|
63 |
archive_cache_ |
|
|
64 |
archive_cache_ |
|
|
65 |
archive_cache_s |
|
|
66 |
archive_cache_ |
|
|
243 | archive_cache_locking_url = config['archive_cache.locking.url'] | |
|
244 | archive_cache_dir = config['archive_cache.filesystem.store_dir'] | |
|
245 | archive_cache_size_gb = config['archive_cache.filesystem.cache_size_gb'] | |
|
246 | archive_cache_shards = config['archive_cache.filesystem.cache_shards'] | |
|
247 | archive_cache_eviction_policy = config['archive_cache.filesystem.eviction_policy'] | |
|
67 | 248 | |
|
68 | 249 | log.debug('Initializing archival cache instance under %s', archive_cache_dir) |
|
69 | 250 | |
|
70 | 251 | # check if it's ok to write, and re-create the archive cache |
|
71 | 252 | if not os.path.isdir(archive_cache_dir): |
|
72 | 253 | os.makedirs(archive_cache_dir, exist_ok=True) |
|
73 | 254 | |
|
74 |
d_cache = |
|
|
75 |
archive_cache_dir, |
|
|
76 | cull_limit=0, # manual eviction required | |
|
77 | size_limit=archive_cache_size_gb * 1024 * 1024 * 1024, | |
|
78 | eviction_policy=archive_cache_eviction_policy, | |
|
79 | timeout=30 | |
|
255 | d_cache = FanoutCache( | |
|
256 | archive_cache_dir, | |
|
257 | locking_url=archive_cache_locking_url, | |
|
258 | cache_shards=archive_cache_shards, | |
|
259 | cache_size_limit=archive_cache_size_gb * 1024 * 1024 * 1024, | |
|
260 | cache_eviction_policy=archive_cache_eviction_policy | |
|
80 | 261 | ) |
|
81 | 262 | cache_meta = d_cache |
|
82 | 263 | return cache_meta |
|
83 | 264 | |
|
84 | ||
|
85 | def includeme(config): | |
|
86 | # init our cache at start | |
|
87 | settings = config.get_settings() | |
|
88 | get_archival_cache_store(settings) |
@@ -1,844 +1,851 b'' | |||
|
1 | 1 | # Copyright (C) 2017-2023 RhodeCode GmbH |
|
2 | 2 | # |
|
3 | 3 | # This program is free software: you can redistribute it and/or modify |
|
4 | 4 | # it under the terms of the GNU Affero General Public License, version 3 |
|
5 | 5 | # (only), as published by the Free Software Foundation. |
|
6 | 6 | # |
|
7 | 7 | # This program is distributed in the hope that it will be useful, |
|
8 | 8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
9 | 9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
10 | 10 | # GNU General Public License for more details. |
|
11 | 11 | # |
|
12 | 12 | # You should have received a copy of the GNU Affero General Public License |
|
13 | 13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
14 | 14 | # |
|
15 | 15 | # This program is dual-licensed. If you wish to learn more about the |
|
16 | 16 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
17 | 17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
18 | 18 | |
|
19 | 19 | |
|
20 | 20 | import os |
|
21 | 21 | import sys |
|
22 | 22 | import time |
|
23 | 23 | import platform |
|
24 | 24 | import collections |
|
25 | 25 | import psutil |
|
26 | 26 | from functools import wraps |
|
27 | 27 | |
|
28 | 28 | import pkg_resources |
|
29 | 29 | import logging |
|
30 | 30 | import resource |
|
31 | 31 | |
|
32 | 32 | import configparser |
|
33 | 33 | |
|
34 | 34 | from rc_license.models import LicenseModel |
|
35 | 35 | from rhodecode.lib.str_utils import safe_str |
|
36 | 36 | |
|
37 | 37 | log = logging.getLogger(__name__) |
|
38 | 38 | |
|
39 | 39 | |
|
40 | 40 | _NA = 'NOT AVAILABLE' |
|
41 | 41 | _NA_FLOAT = 0.0 |
|
42 | 42 | |
|
43 | 43 | STATE_OK = 'ok' |
|
44 | 44 | STATE_ERR = 'error' |
|
45 | 45 | STATE_WARN = 'warning' |
|
46 | 46 | |
|
47 | 47 | STATE_OK_DEFAULT = {'message': '', 'type': STATE_OK} |
|
48 | 48 | |
|
49 | 49 | |
|
50 | 50 | registered_helpers = {} |
|
51 | 51 | |
|
52 | 52 | |
|
53 | 53 | def register_sysinfo(func): |
|
54 | 54 | """ |
|
55 | 55 | @register_helper |
|
56 | 56 | def db_check(): |
|
57 | 57 | pass |
|
58 | 58 | |
|
59 | 59 | db_check == registered_helpers['db_check'] |
|
60 | 60 | """ |
|
61 | 61 | global registered_helpers |
|
62 | 62 | registered_helpers[func.__name__] = func |
|
63 | 63 | |
|
64 | 64 | @wraps(func) |
|
65 | 65 | def _wrapper(*args, **kwargs): |
|
66 | 66 | return func(*args, **kwargs) |
|
67 | 67 | return _wrapper |
|
68 | 68 | |
|
69 | 69 | |
|
70 | 70 | # HELPERS |
|
71 | 71 | def percentage(part: (int, float), whole: (int, float)): |
|
72 | 72 | whole = float(whole) |
|
73 | 73 | if whole > 0: |
|
74 | 74 | return round(100 * float(part) / whole, 1) |
|
75 | 75 | return 0.0 |
|
76 | 76 | |
|
77 | 77 | |
|
78 | 78 | def get_storage_size(storage_path): |
|
79 | 79 | sizes = [] |
|
80 | 80 | for file_ in os.listdir(storage_path): |
|
81 | 81 | storage_file = os.path.join(storage_path, file_) |
|
82 | 82 | if os.path.isfile(storage_file): |
|
83 | 83 | try: |
|
84 | 84 | sizes.append(os.path.getsize(storage_file)) |
|
85 | 85 | except OSError: |
|
86 | 86 | log.exception('Failed to get size of storage file %s', storage_file) |
|
87 | 87 | pass |
|
88 | 88 | |
|
89 | 89 | return sum(sizes) |
|
90 | 90 | |
|
91 | 91 | |
|
92 | 92 | def get_resource(resource_type): |
|
93 | 93 | try: |
|
94 | 94 | return resource.getrlimit(resource_type) |
|
95 | 95 | except Exception: |
|
96 | 96 | return 'NOT_SUPPORTED' |
|
97 | 97 | |
|
98 | 98 | |
|
99 | 99 | def get_cert_path(ini_path): |
|
100 | 100 | default = '/etc/ssl/certs/ca-certificates.crt' |
|
101 | 101 | control_ca_bundle = os.path.join( |
|
102 | 102 | os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(ini_path)))), |
|
103 | 103 | '.rccontrol-profile/etc/ca-bundle.crt') |
|
104 | 104 | if os.path.isfile(control_ca_bundle): |
|
105 | 105 | default = control_ca_bundle |
|
106 | 106 | |
|
107 | 107 | return default |
|
108 | 108 | |
|
109 | 109 | |
|
110 | 110 | class SysInfoRes(object): |
|
111 | 111 | def __init__(self, value, state=None, human_value=None): |
|
112 | 112 | self.value = value |
|
113 | 113 | self.state = state or STATE_OK_DEFAULT |
|
114 | 114 | self.human_value = human_value or value |
|
115 | 115 | |
|
116 | 116 | def __json__(self): |
|
117 | 117 | return { |
|
118 | 118 | 'value': self.value, |
|
119 | 119 | 'state': self.state, |
|
120 | 120 | 'human_value': self.human_value, |
|
121 | 121 | } |
|
122 | 122 | |
|
123 | 123 | def get_value(self): |
|
124 | 124 | return self.__json__() |
|
125 | 125 | |
|
126 | 126 | def __str__(self): |
|
127 | 127 | return f'<SysInfoRes({self.__json__()})>' |
|
128 | 128 | |
|
129 | 129 | |
|
130 | 130 | class SysInfo(object): |
|
131 | 131 | |
|
132 | 132 | def __init__(self, func_name, **kwargs): |
|
133 | 133 | self.function_name = func_name |
|
134 | 134 | self.value = _NA |
|
135 | 135 | self.state = None |
|
136 | 136 | self.kwargs = kwargs or {} |
|
137 | 137 | |
|
138 | 138 | def __call__(self): |
|
139 | 139 | computed = self.compute(**self.kwargs) |
|
140 | 140 | if not isinstance(computed, SysInfoRes): |
|
141 | 141 | raise ValueError( |
|
142 | 142 | 'computed value for {} is not instance of ' |
|
143 | 143 | '{}, got {} instead'.format( |
|
144 | 144 | self.function_name, SysInfoRes, type(computed))) |
|
145 | 145 | return computed.__json__() |
|
146 | 146 | |
|
147 | 147 | def __str__(self): |
|
148 | 148 | return f'<SysInfo({self.function_name})>' |
|
149 | 149 | |
|
150 | 150 | def compute(self, **kwargs): |
|
151 | 151 | return self.function_name(**kwargs) |
|
152 | 152 | |
|
153 | 153 | |
|
154 | 154 | # SysInfo functions |
|
155 | 155 | @register_sysinfo |
|
156 | 156 | def python_info(): |
|
157 | 157 | value = dict(version=f'{platform.python_version()}:{platform.python_implementation()}', |
|
158 | 158 | executable=sys.executable) |
|
159 | 159 | return SysInfoRes(value=value) |
|
160 | 160 | |
|
161 | 161 | |
|
162 | 162 | @register_sysinfo |
|
163 | 163 | def py_modules(): |
|
164 | 164 | mods = dict([(p.project_name, {'version': p.version, 'location': p.location}) |
|
165 | 165 | for p in pkg_resources.working_set]) |
|
166 | 166 | |
|
167 | 167 | value = sorted(mods.items(), key=lambda k: k[0].lower()) |
|
168 | 168 | return SysInfoRes(value=value) |
|
169 | 169 | |
|
170 | 170 | |
|
171 | 171 | @register_sysinfo |
|
172 | 172 | def platform_type(): |
|
173 | 173 | from rhodecode.lib.utils import generate_platform_uuid |
|
174 | 174 | |
|
175 | 175 | value = dict( |
|
176 | 176 | name=safe_str(platform.platform()), |
|
177 | 177 | uuid=generate_platform_uuid() |
|
178 | 178 | ) |
|
179 | 179 | return SysInfoRes(value=value) |
|
180 | 180 | |
|
181 | 181 | |
|
182 | 182 | @register_sysinfo |
|
183 | 183 | def locale_info(): |
|
184 | 184 | import locale |
|
185 | 185 | |
|
186 | 186 | def safe_get_locale(locale_name): |
|
187 | 187 | try: |
|
188 | 188 | locale.getlocale(locale_name) |
|
189 | 189 | except TypeError: |
|
190 | 190 | return f'FAILED_LOCALE_GET:{locale_name}' |
|
191 | 191 | |
|
192 | 192 | value = dict( |
|
193 | 193 | locale_default=locale.getlocale(), |
|
194 | 194 | locale_lc_all=safe_get_locale(locale.LC_ALL), |
|
195 | 195 | locale_lc_ctype=safe_get_locale(locale.LC_CTYPE), |
|
196 | 196 | lang_env=os.environ.get('LANG'), |
|
197 | 197 | lc_all_env=os.environ.get('LC_ALL'), |
|
198 | 198 | local_archive_env=os.environ.get('LOCALE_ARCHIVE'), |
|
199 | 199 | ) |
|
200 | 200 | human_value = \ |
|
201 | 201 | f"LANG: {value['lang_env']}, \ |
|
202 | 202 | locale LC_ALL: {value['locale_lc_all']}, \ |
|
203 | 203 | locale LC_CTYPE: {value['locale_lc_ctype']}, \ |
|
204 | 204 | Default locales: {value['locale_default']}" |
|
205 | 205 | |
|
206 | 206 | return SysInfoRes(value=value, human_value=human_value) |
|
207 | 207 | |
|
208 | 208 | |
|
209 | 209 | @register_sysinfo |
|
210 | 210 | def ulimit_info(): |
|
211 | 211 | data = collections.OrderedDict([ |
|
212 | 212 | ('cpu time (seconds)', get_resource(resource.RLIMIT_CPU)), |
|
213 | 213 | ('file size', get_resource(resource.RLIMIT_FSIZE)), |
|
214 | 214 | ('stack size', get_resource(resource.RLIMIT_STACK)), |
|
215 | 215 | ('core file size', get_resource(resource.RLIMIT_CORE)), |
|
216 | 216 | ('address space size', get_resource(resource.RLIMIT_AS)), |
|
217 | 217 | ('locked in mem size', get_resource(resource.RLIMIT_MEMLOCK)), |
|
218 | 218 | ('heap size', get_resource(resource.RLIMIT_DATA)), |
|
219 | 219 | ('rss size', get_resource(resource.RLIMIT_RSS)), |
|
220 | 220 | ('number of processes', get_resource(resource.RLIMIT_NPROC)), |
|
221 | 221 | ('open files', get_resource(resource.RLIMIT_NOFILE)), |
|
222 | 222 | ]) |
|
223 | 223 | |
|
224 | 224 | text = ', '.join(f'{k}:{v}' for k, v in data.items()) |
|
225 | 225 | |
|
226 | 226 | value = { |
|
227 | 227 | 'limits': data, |
|
228 | 228 | 'text': text, |
|
229 | 229 | } |
|
230 | 230 | return SysInfoRes(value=value) |
|
231 | 231 | |
|
232 | 232 | |
|
233 | 233 | @register_sysinfo |
|
234 | 234 | def uptime(): |
|
235 | 235 | from rhodecode.lib.helpers import age, time_to_datetime |
|
236 | 236 | from rhodecode.translation import TranslationString |
|
237 | 237 | |
|
238 | 238 | value = dict(boot_time=0, uptime=0, text='') |
|
239 | 239 | state = STATE_OK_DEFAULT |
|
240 | 240 | |
|
241 | 241 | boot_time = psutil.boot_time() |
|
242 | 242 | value['boot_time'] = boot_time |
|
243 | 243 | value['uptime'] = time.time() - boot_time |
|
244 | 244 | |
|
245 | 245 | date_or_age = age(time_to_datetime(boot_time)) |
|
246 | 246 | if isinstance(date_or_age, TranslationString): |
|
247 | 247 | date_or_age = date_or_age.interpolate() |
|
248 | 248 | |
|
249 | 249 | human_value = value.copy() |
|
250 | 250 | human_value['boot_time'] = time_to_datetime(boot_time) |
|
251 | 251 | human_value['uptime'] = age(time_to_datetime(boot_time), show_suffix=False) |
|
252 | 252 | |
|
253 | 253 | human_value['text'] = f'Server started {date_or_age}' |
|
254 | 254 | return SysInfoRes(value=value, human_value=human_value) |
|
255 | 255 | |
|
256 | 256 | |
|
257 | 257 | @register_sysinfo |
|
258 | 258 | def memory(): |
|
259 | 259 | from rhodecode.lib.helpers import format_byte_size_binary |
|
260 | 260 | value = dict(available=0, used=0, used_real=0, cached=0, percent=0, |
|
261 | 261 | percent_used=0, free=0, inactive=0, active=0, shared=0, |
|
262 | 262 | total=0, buffers=0, text='') |
|
263 | 263 | |
|
264 | 264 | state = STATE_OK_DEFAULT |
|
265 | 265 | |
|
266 | 266 | value.update(dict(psutil.virtual_memory()._asdict())) |
|
267 | 267 | value['used_real'] = value['total'] - value['available'] |
|
268 | 268 | value['percent_used'] = psutil._common.usage_percent(value['used_real'], value['total'], 1) |
|
269 | 269 | |
|
270 | 270 | human_value = value.copy() |
|
271 | 271 | human_value['text'] = '{}/{}, {}% used'.format( |
|
272 | 272 | format_byte_size_binary(value['used_real']), |
|
273 | 273 | format_byte_size_binary(value['total']), |
|
274 | 274 | value['percent_used']) |
|
275 | 275 | |
|
276 | 276 | keys = list(value.keys())[::] |
|
277 | 277 | keys.pop(keys.index('percent')) |
|
278 | 278 | keys.pop(keys.index('percent_used')) |
|
279 | 279 | keys.pop(keys.index('text')) |
|
280 | 280 | for k in keys: |
|
281 | 281 | human_value[k] = format_byte_size_binary(value[k]) |
|
282 | 282 | |
|
283 | 283 | if state['type'] == STATE_OK and value['percent_used'] > 90: |
|
284 | 284 | msg = 'Critical: your available RAM memory is very low.' |
|
285 | 285 | state = {'message': msg, 'type': STATE_ERR} |
|
286 | 286 | |
|
287 | 287 | elif state['type'] == STATE_OK and value['percent_used'] > 70: |
|
288 | 288 | msg = 'Warning: your available RAM memory is running low.' |
|
289 | 289 | state = {'message': msg, 'type': STATE_WARN} |
|
290 | 290 | |
|
291 | 291 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
292 | 292 | |
|
293 | 293 | |
|
294 | 294 | @register_sysinfo |
|
295 | 295 | def machine_load(): |
|
296 | 296 | value = {'1_min': _NA_FLOAT, '5_min': _NA_FLOAT, '15_min': _NA_FLOAT, 'text': ''} |
|
297 | 297 | state = STATE_OK_DEFAULT |
|
298 | 298 | |
|
299 | 299 | # load averages |
|
300 | 300 | if hasattr(psutil.os, 'getloadavg'): |
|
301 | 301 | value.update(dict( |
|
302 | 302 | list(zip(['1_min', '5_min', '15_min'], psutil.os.getloadavg())) |
|
303 | 303 | )) |
|
304 | 304 | |
|
305 | 305 | human_value = value.copy() |
|
306 | 306 | human_value['text'] = '1min: {}, 5min: {}, 15min: {}'.format( |
|
307 | 307 | value['1_min'], value['5_min'], value['15_min']) |
|
308 | 308 | |
|
309 | 309 | if state['type'] == STATE_OK and value['15_min'] > 5.0: |
|
310 | 310 | msg = 'Warning: your machine load is very high.' |
|
311 | 311 | state = {'message': msg, 'type': STATE_WARN} |
|
312 | 312 | |
|
313 | 313 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
314 | 314 | |
|
315 | 315 | |
|
316 | 316 | @register_sysinfo |
|
317 | 317 | def cpu(): |
|
318 | 318 | value = {'cpu': 0, 'cpu_count': 0, 'cpu_usage': []} |
|
319 | 319 | state = STATE_OK_DEFAULT |
|
320 | 320 | |
|
321 | 321 | value['cpu'] = psutil.cpu_percent(0.5) |
|
322 | 322 | value['cpu_usage'] = psutil.cpu_percent(0.5, percpu=True) |
|
323 | 323 | value['cpu_count'] = psutil.cpu_count() |
|
324 | 324 | |
|
325 | 325 | human_value = value.copy() |
|
326 | 326 | human_value['text'] = '{} cores at {} %'.format(value['cpu_count'], value['cpu']) |
|
327 | 327 | |
|
328 | 328 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
329 | 329 | |
|
330 | 330 | |
|
331 | 331 | @register_sysinfo |
|
332 | 332 | def storage(): |
|
333 | 333 | from rhodecode.lib.helpers import format_byte_size_binary |
|
334 | 334 | from rhodecode.lib.utils import get_rhodecode_repo_store_path |
|
335 | 335 | path = get_rhodecode_repo_store_path() |
|
336 | 336 | |
|
337 | 337 | value = dict(percent=0, used=0, total=0, path=path, text='') |
|
338 | 338 | state = STATE_OK_DEFAULT |
|
339 | 339 | |
|
340 | 340 | try: |
|
341 | 341 | value.update(dict(psutil.disk_usage(path)._asdict())) |
|
342 | 342 | except Exception as e: |
|
343 | 343 | log.exception('Failed to fetch disk info') |
|
344 | 344 | state = {'message': str(e), 'type': STATE_ERR} |
|
345 | 345 | |
|
346 | 346 | human_value = value.copy() |
|
347 | 347 | human_value['used'] = format_byte_size_binary(value['used']) |
|
348 | 348 | human_value['total'] = format_byte_size_binary(value['total']) |
|
349 | 349 | human_value['text'] = "{}/{}, {}% used".format( |
|
350 | 350 | format_byte_size_binary(value['used']), |
|
351 | 351 | format_byte_size_binary(value['total']), |
|
352 | 352 | value['percent']) |
|
353 | 353 | |
|
354 | 354 | if state['type'] == STATE_OK and value['percent'] > 90: |
|
355 | 355 | msg = 'Critical: your disk space is very low.' |
|
356 | 356 | state = {'message': msg, 'type': STATE_ERR} |
|
357 | 357 | |
|
358 | 358 | elif state['type'] == STATE_OK and value['percent'] > 70: |
|
359 | 359 | msg = 'Warning: your disk space is running low.' |
|
360 | 360 | state = {'message': msg, 'type': STATE_WARN} |
|
361 | 361 | |
|
362 | 362 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
363 | 363 | |
|
364 | 364 | |
|
365 | 365 | @register_sysinfo |
|
366 | 366 | def storage_inodes(): |
|
367 | 367 | from rhodecode.lib.utils import get_rhodecode_repo_store_path |
|
368 | 368 | path = get_rhodecode_repo_store_path() |
|
369 | 369 | |
|
370 | 370 | value = dict(percent=0.0, free=0, used=0, total=0, path=path, text='') |
|
371 | 371 | state = STATE_OK_DEFAULT |
|
372 | 372 | |
|
373 | 373 | try: |
|
374 | 374 | i_stat = os.statvfs(path) |
|
375 | 375 | value['free'] = i_stat.f_ffree |
|
376 | 376 | value['used'] = i_stat.f_files-i_stat.f_favail |
|
377 | 377 | value['total'] = i_stat.f_files |
|
378 | 378 | value['percent'] = percentage(value['used'], value['total']) |
|
379 | 379 | except Exception as e: |
|
380 | 380 | log.exception('Failed to fetch disk inodes info') |
|
381 | 381 | state = {'message': str(e), 'type': STATE_ERR} |
|
382 | 382 | |
|
383 | 383 | human_value = value.copy() |
|
384 | 384 | human_value['text'] = "{}/{}, {}% used".format( |
|
385 | 385 | value['used'], value['total'], value['percent']) |
|
386 | 386 | |
|
387 | 387 | if state['type'] == STATE_OK and value['percent'] > 90: |
|
388 | 388 | msg = 'Critical: your disk free inodes are very low.' |
|
389 | 389 | state = {'message': msg, 'type': STATE_ERR} |
|
390 | 390 | |
|
391 | 391 | elif state['type'] == STATE_OK and value['percent'] > 70: |
|
392 | 392 | msg = 'Warning: your disk free inodes are running low.' |
|
393 | 393 | state = {'message': msg, 'type': STATE_WARN} |
|
394 | 394 | |
|
395 | 395 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
396 | 396 | |
|
397 | 397 | |
|
398 | 398 | @register_sysinfo |
|
399 | 399 | def storage_archives(): |
|
400 | 400 | import rhodecode |
|
401 | 401 | from rhodecode.lib.utils import safe_str |
|
402 | 402 | from rhodecode.lib.helpers import format_byte_size_binary |
|
403 | 403 | |
|
404 | msg = 'Archive cache storage is controlled by ' \ | |
|
405 | 'archive_cache.store_dir=/path/to/cache option in the .ini file' | |
|
406 | path = safe_str(rhodecode.CONFIG.get('archive_cache.store_dir', msg)) | |
|
404 | storage_type = rhodecode.ConfigGet().get_str('archive_cache.backend.type') | |
|
405 | storage_key = 'archive_cache.filesystem.store_dir' | |
|
406 | ||
|
407 | default_msg = 'Archive cache storage is controlled by '\ | |
|
408 | f'{storage_key}=/path/to/cache option in the .ini file' | |
|
409 | path = rhodecode.ConfigGet().get_str(storage_key, missing=default_msg) | |
|
407 | 410 | |
|
408 | 411 | value = dict(percent=0, used=0, total=0, items=0, path=path, text='') |
|
409 | 412 | state = STATE_OK_DEFAULT |
|
410 | 413 | try: |
|
414 | if storage_type != 'filesystem': | |
|
415 | # raise Exc to stop reporting on different type | |
|
416 | raise ValueError('Storage type must be "filesystem"') | |
|
417 | ||
|
411 | 418 | items_count = 0 |
|
412 | 419 | used = 0 |
|
413 | 420 | for root, dirs, files in os.walk(path): |
|
414 | 421 | if root == path: |
|
415 | 422 | items_count = len(dirs) |
|
416 | 423 | |
|
417 | 424 | for f in files: |
|
418 | 425 | try: |
|
419 | 426 | used += os.path.getsize(os.path.join(root, f)) |
|
420 | 427 | except OSError: |
|
421 | 428 | pass |
|
422 | 429 | value.update({ |
|
423 | 430 | 'percent': 100, |
|
424 | 431 | 'used': used, |
|
425 | 432 | 'total': used, |
|
426 | 433 | 'items': items_count |
|
427 | 434 | }) |
|
428 | 435 | |
|
429 | 436 | except Exception as e: |
|
430 | 437 | log.exception('failed to fetch archive cache storage') |
|
431 | 438 | state = {'message': str(e), 'type': STATE_ERR} |
|
432 | 439 | |
|
433 | 440 | human_value = value.copy() |
|
434 | 441 | human_value['used'] = format_byte_size_binary(value['used']) |
|
435 | 442 | human_value['total'] = format_byte_size_binary(value['total']) |
|
436 | 443 | human_value['text'] = "{} ({} items)".format( |
|
437 | 444 | human_value['used'], value['items']) |
|
438 | 445 | |
|
439 | 446 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
440 | 447 | |
|
441 | 448 | |
|
442 | 449 | @register_sysinfo |
|
443 | 450 | def storage_gist(): |
|
444 | 451 | from rhodecode.model.gist import GIST_STORE_LOC |
|
445 | 452 | from rhodecode.lib.utils import safe_str, get_rhodecode_repo_store_path |
|
446 | 453 | from rhodecode.lib.helpers import format_byte_size_binary |
|
447 | 454 | path = safe_str(os.path.join( |
|
448 | 455 | get_rhodecode_repo_store_path(), GIST_STORE_LOC)) |
|
449 | 456 | |
|
450 | 457 | # gist storage |
|
451 | 458 | value = dict(percent=0, used=0, total=0, items=0, path=path, text='') |
|
452 | 459 | state = STATE_OK_DEFAULT |
|
453 | 460 | |
|
454 | 461 | try: |
|
455 | 462 | items_count = 0 |
|
456 | 463 | used = 0 |
|
457 | 464 | for root, dirs, files in os.walk(path): |
|
458 | 465 | if root == path: |
|
459 | 466 | items_count = len(dirs) |
|
460 | 467 | |
|
461 | 468 | for f in files: |
|
462 | 469 | try: |
|
463 | 470 | used += os.path.getsize(os.path.join(root, f)) |
|
464 | 471 | except OSError: |
|
465 | 472 | pass |
|
466 | 473 | value.update({ |
|
467 | 474 | 'percent': 100, |
|
468 | 475 | 'used': used, |
|
469 | 476 | 'total': used, |
|
470 | 477 | 'items': items_count |
|
471 | 478 | }) |
|
472 | 479 | except Exception as e: |
|
473 | 480 | log.exception('failed to fetch gist storage items') |
|
474 | 481 | state = {'message': str(e), 'type': STATE_ERR} |
|
475 | 482 | |
|
476 | 483 | human_value = value.copy() |
|
477 | 484 | human_value['used'] = format_byte_size_binary(value['used']) |
|
478 | 485 | human_value['total'] = format_byte_size_binary(value['total']) |
|
479 | 486 | human_value['text'] = "{} ({} items)".format( |
|
480 | 487 | human_value['used'], value['items']) |
|
481 | 488 | |
|
482 | 489 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
483 | 490 | |
|
484 | 491 | |
|
485 | 492 | @register_sysinfo |
|
486 | 493 | def storage_temp(): |
|
487 | 494 | import tempfile |
|
488 | 495 | from rhodecode.lib.helpers import format_byte_size_binary |
|
489 | 496 | |
|
490 | 497 | path = tempfile.gettempdir() |
|
491 | 498 | value = dict(percent=0, used=0, total=0, items=0, path=path, text='') |
|
492 | 499 | state = STATE_OK_DEFAULT |
|
493 | 500 | |
|
494 | 501 | if not psutil: |
|
495 | 502 | return SysInfoRes(value=value, state=state) |
|
496 | 503 | |
|
497 | 504 | try: |
|
498 | 505 | value.update(dict(psutil.disk_usage(path)._asdict())) |
|
499 | 506 | except Exception as e: |
|
500 | 507 | log.exception('Failed to fetch temp dir info') |
|
501 | 508 | state = {'message': str(e), 'type': STATE_ERR} |
|
502 | 509 | |
|
503 | 510 | human_value = value.copy() |
|
504 | 511 | human_value['used'] = format_byte_size_binary(value['used']) |
|
505 | 512 | human_value['total'] = format_byte_size_binary(value['total']) |
|
506 | 513 | human_value['text'] = "{}/{}, {}% used".format( |
|
507 | 514 | format_byte_size_binary(value['used']), |
|
508 | 515 | format_byte_size_binary(value['total']), |
|
509 | 516 | value['percent']) |
|
510 | 517 | |
|
511 | 518 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
512 | 519 | |
|
513 | 520 | |
|
514 | 521 | @register_sysinfo |
|
515 | 522 | def search_info(): |
|
516 | 523 | import rhodecode |
|
517 | 524 | from rhodecode.lib.index import searcher_from_config |
|
518 | 525 | |
|
519 | 526 | backend = rhodecode.CONFIG.get('search.module', '') |
|
520 | 527 | location = rhodecode.CONFIG.get('search.location', '') |
|
521 | 528 | |
|
522 | 529 | try: |
|
523 | 530 | searcher = searcher_from_config(rhodecode.CONFIG) |
|
524 | 531 | searcher = searcher.__class__.__name__ |
|
525 | 532 | except Exception: |
|
526 | 533 | searcher = None |
|
527 | 534 | |
|
528 | 535 | value = dict( |
|
529 | 536 | backend=backend, searcher=searcher, location=location, text='') |
|
530 | 537 | state = STATE_OK_DEFAULT |
|
531 | 538 | |
|
532 | 539 | human_value = value.copy() |
|
533 | 540 | human_value['text'] = "backend:`{}`".format(human_value['backend']) |
|
534 | 541 | |
|
535 | 542 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
536 | 543 | |
|
537 | 544 | |
|
538 | 545 | @register_sysinfo |
|
539 | 546 | def git_info(): |
|
540 | 547 | from rhodecode.lib.vcs.backends import git |
|
541 | 548 | state = STATE_OK_DEFAULT |
|
542 | 549 | value = human_value = '' |
|
543 | 550 | try: |
|
544 | 551 | value = git.discover_git_version(raise_on_exc=True) |
|
545 | 552 | human_value = f'version reported from VCSServer: {value}' |
|
546 | 553 | except Exception as e: |
|
547 | 554 | state = {'message': str(e), 'type': STATE_ERR} |
|
548 | 555 | |
|
549 | 556 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
550 | 557 | |
|
551 | 558 | |
|
552 | 559 | @register_sysinfo |
|
553 | 560 | def hg_info(): |
|
554 | 561 | from rhodecode.lib.vcs.backends import hg |
|
555 | 562 | state = STATE_OK_DEFAULT |
|
556 | 563 | value = human_value = '' |
|
557 | 564 | try: |
|
558 | 565 | value = hg.discover_hg_version(raise_on_exc=True) |
|
559 | 566 | human_value = f'version reported from VCSServer: {value}' |
|
560 | 567 | except Exception as e: |
|
561 | 568 | state = {'message': str(e), 'type': STATE_ERR} |
|
562 | 569 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
563 | 570 | |
|
564 | 571 | |
|
565 | 572 | @register_sysinfo |
|
566 | 573 | def svn_info(): |
|
567 | 574 | from rhodecode.lib.vcs.backends import svn |
|
568 | 575 | state = STATE_OK_DEFAULT |
|
569 | 576 | value = human_value = '' |
|
570 | 577 | try: |
|
571 | 578 | value = svn.discover_svn_version(raise_on_exc=True) |
|
572 | 579 | human_value = f'version reported from VCSServer: {value}' |
|
573 | 580 | except Exception as e: |
|
574 | 581 | state = {'message': str(e), 'type': STATE_ERR} |
|
575 | 582 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
576 | 583 | |
|
577 | 584 | |
|
578 | 585 | @register_sysinfo |
|
579 | 586 | def vcs_backends(): |
|
580 | 587 | import rhodecode |
|
581 | 588 | value = rhodecode.CONFIG.get('vcs.backends') |
|
582 | 589 | human_value = 'Enabled backends in order: {}'.format(','.join(value)) |
|
583 | 590 | return SysInfoRes(value=value, human_value=human_value) |
|
584 | 591 | |
|
585 | 592 | |
|
586 | 593 | @register_sysinfo |
|
587 | 594 | def vcs_server(): |
|
588 | 595 | import rhodecode |
|
589 | 596 | from rhodecode.lib.vcs.backends import get_vcsserver_service_data |
|
590 | 597 | |
|
591 | 598 | server_url = rhodecode.CONFIG.get('vcs.server') |
|
592 | 599 | enabled = rhodecode.CONFIG.get('vcs.server.enable') |
|
593 | 600 | protocol = rhodecode.CONFIG.get('vcs.server.protocol') or 'http' |
|
594 | 601 | state = STATE_OK_DEFAULT |
|
595 | 602 | version = None |
|
596 | 603 | workers = 0 |
|
597 | 604 | |
|
598 | 605 | try: |
|
599 | 606 | data = get_vcsserver_service_data() |
|
600 | 607 | if data and 'version' in data: |
|
601 | 608 | version = data['version'] |
|
602 | 609 | |
|
603 | 610 | if data and 'config' in data: |
|
604 | 611 | conf = data['config'] |
|
605 | 612 | workers = conf.get('workers', 'NOT AVAILABLE') |
|
606 | 613 | |
|
607 | 614 | connection = 'connected' |
|
608 | 615 | except Exception as e: |
|
609 | 616 | connection = 'failed' |
|
610 | 617 | state = {'message': str(e), 'type': STATE_ERR} |
|
611 | 618 | |
|
612 | 619 | value = dict( |
|
613 | 620 | url=server_url, |
|
614 | 621 | enabled=enabled, |
|
615 | 622 | protocol=protocol, |
|
616 | 623 | connection=connection, |
|
617 | 624 | version=version, |
|
618 | 625 | text='', |
|
619 | 626 | ) |
|
620 | 627 | |
|
621 | 628 | human_value = value.copy() |
|
622 | 629 | human_value['text'] = \ |
|
623 | 630 | '{url}@ver:{ver} via {mode} mode[workers:{workers}], connection:{conn}'.format( |
|
624 | 631 | url=server_url, ver=version, workers=workers, mode=protocol, |
|
625 | 632 | conn=connection) |
|
626 | 633 | |
|
627 | 634 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
628 | 635 | |
|
629 | 636 | |
|
630 | 637 | @register_sysinfo |
|
631 | 638 | def vcs_server_config(): |
|
632 | 639 | from rhodecode.lib.vcs.backends import get_vcsserver_service_data |
|
633 | 640 | state = STATE_OK_DEFAULT |
|
634 | 641 | |
|
635 | 642 | value = {} |
|
636 | 643 | try: |
|
637 | 644 | data = get_vcsserver_service_data() |
|
638 | 645 | value = data['app_config'] |
|
639 | 646 | except Exception as e: |
|
640 | 647 | state = {'message': str(e), 'type': STATE_ERR} |
|
641 | 648 | |
|
642 | 649 | human_value = value.copy() |
|
643 | 650 | human_value['text'] = 'VCS Server config' |
|
644 | 651 | |
|
645 | 652 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
646 | 653 | |
|
647 | 654 | |
|
648 | 655 | @register_sysinfo |
|
649 | 656 | def rhodecode_app_info(): |
|
650 | 657 | import rhodecode |
|
651 | 658 | edition = rhodecode.CONFIG.get('rhodecode.edition') |
|
652 | 659 | |
|
653 | 660 | value = dict( |
|
654 | 661 | rhodecode_version=rhodecode.__version__, |
|
655 | 662 | rhodecode_lib_path=os.path.abspath(rhodecode.__file__), |
|
656 | 663 | text='' |
|
657 | 664 | ) |
|
658 | 665 | human_value = value.copy() |
|
659 | 666 | human_value['text'] = 'RhodeCode {edition}, version {ver}'.format( |
|
660 | 667 | edition=edition, ver=value['rhodecode_version'] |
|
661 | 668 | ) |
|
662 | 669 | return SysInfoRes(value=value, human_value=human_value) |
|
663 | 670 | |
|
664 | 671 | |
|
665 | 672 | @register_sysinfo |
|
666 | 673 | def rhodecode_config(): |
|
667 | 674 | import rhodecode |
|
668 | 675 | path = rhodecode.CONFIG.get('__file__') |
|
669 | 676 | rhodecode_ini_safe = rhodecode.CONFIG.copy() |
|
670 | 677 | cert_path = get_cert_path(path) |
|
671 | 678 | |
|
672 | 679 | try: |
|
673 | 680 | config = configparser.ConfigParser() |
|
674 | 681 | config.read(path) |
|
675 | 682 | parsed_ini = config |
|
676 | 683 | if parsed_ini.has_section('server:main'): |
|
677 | 684 | parsed_ini = dict(parsed_ini.items('server:main')) |
|
678 | 685 | except Exception: |
|
679 | 686 | log.exception('Failed to read .ini file for display') |
|
680 | 687 | parsed_ini = {} |
|
681 | 688 | |
|
682 | 689 | rhodecode_ini_safe['server:main'] = parsed_ini |
|
683 | 690 | |
|
684 | 691 | blacklist = [ |
|
685 | 692 | f'rhodecode_{LicenseModel.LICENSE_DB_KEY}', |
|
686 | 693 | 'routes.map', |
|
687 | 694 | 'sqlalchemy.db1.url', |
|
688 | 695 | 'channelstream.secret', |
|
689 | 696 | 'beaker.session.secret', |
|
690 | 697 | 'rhodecode.encrypted_values.secret', |
|
691 | 698 | 'rhodecode_auth_github_consumer_key', |
|
692 | 699 | 'rhodecode_auth_github_consumer_secret', |
|
693 | 700 | 'rhodecode_auth_google_consumer_key', |
|
694 | 701 | 'rhodecode_auth_google_consumer_secret', |
|
695 | 702 | 'rhodecode_auth_bitbucket_consumer_secret', |
|
696 | 703 | 'rhodecode_auth_bitbucket_consumer_key', |
|
697 | 704 | 'rhodecode_auth_twitter_consumer_secret', |
|
698 | 705 | 'rhodecode_auth_twitter_consumer_key', |
|
699 | 706 | |
|
700 | 707 | 'rhodecode_auth_twitter_secret', |
|
701 | 708 | 'rhodecode_auth_github_secret', |
|
702 | 709 | 'rhodecode_auth_google_secret', |
|
703 | 710 | 'rhodecode_auth_bitbucket_secret', |
|
704 | 711 | |
|
705 | 712 | 'appenlight.api_key', |
|
706 | 713 | ('app_conf', 'sqlalchemy.db1.url') |
|
707 | 714 | ] |
|
708 | 715 | for k in blacklist: |
|
709 | 716 | if isinstance(k, tuple): |
|
710 | 717 | section, key = k |
|
711 | 718 | if section in rhodecode_ini_safe: |
|
712 | 719 | rhodecode_ini_safe[section] = '**OBFUSCATED**' |
|
713 | 720 | else: |
|
714 | 721 | rhodecode_ini_safe.pop(k, None) |
|
715 | 722 | |
|
716 | 723 | # TODO: maybe put some CONFIG checks here ? |
|
717 | 724 | return SysInfoRes(value={'config': rhodecode_ini_safe, |
|
718 | 725 | 'path': path, 'cert_path': cert_path}) |
|
719 | 726 | |
|
720 | 727 | |
|
721 | 728 | @register_sysinfo |
|
722 | 729 | def database_info(): |
|
723 | 730 | import rhodecode |
|
724 | 731 | from sqlalchemy.engine import url as engine_url |
|
725 | 732 | from rhodecode.model import meta |
|
726 | 733 | from rhodecode.model.meta import Session |
|
727 | 734 | from rhodecode.model.db import DbMigrateVersion |
|
728 | 735 | |
|
729 | 736 | state = STATE_OK_DEFAULT |
|
730 | 737 | |
|
731 | 738 | db_migrate = DbMigrateVersion.query().filter( |
|
732 | 739 | DbMigrateVersion.repository_id == 'rhodecode_db_migrations').one() |
|
733 | 740 | |
|
734 | 741 | db_url_obj = engine_url.make_url(rhodecode.CONFIG['sqlalchemy.db1.url']) |
|
735 | 742 | |
|
736 | 743 | try: |
|
737 | 744 | engine = meta.get_engine() |
|
738 | 745 | db_server_info = engine.dialect._get_server_version_info( |
|
739 | 746 | Session.connection(bind=engine)) |
|
740 | 747 | db_version = '.'.join(map(str, db_server_info)) |
|
741 | 748 | except Exception: |
|
742 | 749 | log.exception('failed to fetch db version') |
|
743 | 750 | db_version = 'UNKNOWN' |
|
744 | 751 | |
|
745 | 752 | db_info = dict( |
|
746 | 753 | migrate_version=db_migrate.version, |
|
747 | 754 | type=db_url_obj.get_backend_name(), |
|
748 | 755 | version=db_version, |
|
749 | 756 | url=repr(db_url_obj) |
|
750 | 757 | ) |
|
751 | 758 | current_version = db_migrate.version |
|
752 | 759 | expected_version = rhodecode.__dbversion__ |
|
753 | 760 | if state['type'] == STATE_OK and current_version != expected_version: |
|
754 | 761 | msg = 'Critical: database schema mismatch, ' \ |
|
755 | 762 | 'expected version {}, got {}. ' \ |
|
756 | 763 | 'Please run migrations on your database.'.format( |
|
757 | 764 | expected_version, current_version) |
|
758 | 765 | state = {'message': msg, 'type': STATE_ERR} |
|
759 | 766 | |
|
760 | 767 | human_value = db_info.copy() |
|
761 | 768 | human_value['url'] = "{} @ migration version: {}".format( |
|
762 | 769 | db_info['url'], db_info['migrate_version']) |
|
763 | 770 | human_value['version'] = "{} {}".format(db_info['type'], db_info['version']) |
|
764 | 771 | return SysInfoRes(value=db_info, state=state, human_value=human_value) |
|
765 | 772 | |
|
766 | 773 | |
|
767 | 774 | @register_sysinfo |
|
768 | 775 | def server_info(environ): |
|
769 | 776 | import rhodecode |
|
770 | 777 | from rhodecode.lib.base import get_server_ip_addr, get_server_port |
|
771 | 778 | |
|
772 | 779 | value = { |
|
773 | 780 | 'server_ip': '{}:{}'.format( |
|
774 | 781 | get_server_ip_addr(environ, log_errors=False), |
|
775 | 782 | get_server_port(environ) |
|
776 | 783 | ), |
|
777 | 784 | 'server_id': rhodecode.CONFIG.get('instance_id'), |
|
778 | 785 | } |
|
779 | 786 | return SysInfoRes(value=value) |
|
780 | 787 | |
|
781 | 788 | |
|
782 | 789 | @register_sysinfo |
|
783 | 790 | def usage_info(): |
|
784 | 791 | from rhodecode.model.db import User, Repository, true |
|
785 | 792 | value = { |
|
786 | 793 | 'users': User.query().count(), |
|
787 | 794 | 'users_active': User.query().filter(User.active == true()).count(), |
|
788 | 795 | 'repositories': Repository.query().count(), |
|
789 | 796 | 'repository_types': { |
|
790 | 797 | 'hg': Repository.query().filter( |
|
791 | 798 | Repository.repo_type == 'hg').count(), |
|
792 | 799 | 'git': Repository.query().filter( |
|
793 | 800 | Repository.repo_type == 'git').count(), |
|
794 | 801 | 'svn': Repository.query().filter( |
|
795 | 802 | Repository.repo_type == 'svn').count(), |
|
796 | 803 | }, |
|
797 | 804 | } |
|
798 | 805 | return SysInfoRes(value=value) |
|
799 | 806 | |
|
800 | 807 | |
|
801 | 808 | def get_system_info(environ): |
|
802 | 809 | environ = environ or {} |
|
803 | 810 | return { |
|
804 | 811 | 'rhodecode_app': SysInfo(rhodecode_app_info)(), |
|
805 | 812 | 'rhodecode_config': SysInfo(rhodecode_config)(), |
|
806 | 813 | 'rhodecode_usage': SysInfo(usage_info)(), |
|
807 | 814 | 'python': SysInfo(python_info)(), |
|
808 | 815 | 'py_modules': SysInfo(py_modules)(), |
|
809 | 816 | |
|
810 | 817 | 'platform': SysInfo(platform_type)(), |
|
811 | 818 | 'locale': SysInfo(locale_info)(), |
|
812 | 819 | 'server': SysInfo(server_info, environ=environ)(), |
|
813 | 820 | 'database': SysInfo(database_info)(), |
|
814 | 821 | 'ulimit': SysInfo(ulimit_info)(), |
|
815 | 822 | 'storage': SysInfo(storage)(), |
|
816 | 823 | 'storage_inodes': SysInfo(storage_inodes)(), |
|
817 | 824 | 'storage_archive': SysInfo(storage_archives)(), |
|
818 | 825 | 'storage_gist': SysInfo(storage_gist)(), |
|
819 | 826 | 'storage_temp': SysInfo(storage_temp)(), |
|
820 | 827 | |
|
821 | 828 | 'search': SysInfo(search_info)(), |
|
822 | 829 | |
|
823 | 830 | 'uptime': SysInfo(uptime)(), |
|
824 | 831 | 'load': SysInfo(machine_load)(), |
|
825 | 832 | 'cpu': SysInfo(cpu)(), |
|
826 | 833 | 'memory': SysInfo(memory)(), |
|
827 | 834 | |
|
828 | 835 | 'vcs_backends': SysInfo(vcs_backends)(), |
|
829 | 836 | 'vcs_server': SysInfo(vcs_server)(), |
|
830 | 837 | |
|
831 | 838 | 'vcs_server_config': SysInfo(vcs_server_config)(), |
|
832 | 839 | |
|
833 | 840 | 'git': SysInfo(git_info)(), |
|
834 | 841 | 'hg': SysInfo(hg_info)(), |
|
835 | 842 | 'svn': SysInfo(svn_info)(), |
|
836 | 843 | } |
|
837 | 844 | |
|
838 | 845 | |
|
839 | 846 | def load_system_info(key): |
|
840 | 847 | """ |
|
841 | 848 | get_sys_info('vcs_server') |
|
842 | 849 | get_sys_info('database') |
|
843 | 850 | """ |
|
844 | 851 | return SysInfo(registered_helpers[key])() |
General Comments 0
You need to be logged in to leave comments.
Login now