Show More
@@ -0,0 +1,40 b'' | |||
|
1 | |RCE| 5.2.1 |RNS| | |
|
2 | ----------------- | |
|
3 | ||
|
4 | Release Date | |
|
5 | ^^^^^^^^^^^^ | |
|
6 | ||
|
7 | - 2024-09-16 | |
|
8 | ||
|
9 | ||
|
10 | New Features | |
|
11 | ^^^^^^^^^^^^ | |
|
12 | ||
|
13 | ||
|
14 | ||
|
15 | General | |
|
16 | ^^^^^^^ | |
|
17 | ||
|
18 | ||
|
19 | ||
|
20 | Security | |
|
21 | ^^^^^^^^ | |
|
22 | ||
|
23 | ||
|
24 | ||
|
25 | Performance | |
|
26 | ^^^^^^^^^^^ | |
|
27 | ||
|
28 | ||
|
29 | ||
|
30 | ||
|
31 | Fixes | |
|
32 | ^^^^^ | |
|
33 | ||
|
34 | - Fixed problems with incorrect user agent errors | |
|
35 | ||
|
36 | ||
|
37 | Upgrade notes | |
|
38 | ^^^^^^^^^^^^^ | |
|
39 | ||
|
40 | - RhodeCode 5.2.1 is unscheduled bugfix release to address some build issues with 5.2 images |
@@ -0,0 +1,45 b'' | |||
|
1 | |RCE| 5.3.0 |RNS| | |
|
2 | ----------------- | |
|
3 | ||
|
4 | Release Date | |
|
5 | ^^^^^^^^^^^^ | |
|
6 | ||
|
7 | - 2024-09-17 | |
|
8 | ||
|
9 | ||
|
10 | New Features | |
|
11 | ^^^^^^^^^^^^ | |
|
12 | ||
|
13 | - System-info: expose rhodecode config for better visibility of set settings for RhodeCode system. | |
|
14 | ||
|
15 | ||
|
16 | General | |
|
17 | ^^^^^^^ | |
|
18 | ||
|
19 | ||
|
20 | ||
|
21 | Security | |
|
22 | ^^^^^^^^ | |
|
23 | ||
|
24 | - Permissions: fixed security problem with apply-to-children from a repo group functionality breaking | |
|
25 | permissions for private repositories exposing them despite repo being private. | |
|
26 | - Git-lfs: fixed security problem with allowing off-chain attacks to replace OID data without validating hash for already present oids. | |
|
27 | This allowed to replace an LFS OID content with malicious request tailored to open RhodeCode server. | |
|
28 | ||
|
29 | ||
|
30 | Performance | |
|
31 | ^^^^^^^^^^^ | |
|
32 | ||
|
33 | ||
|
34 | ||
|
35 | ||
|
36 | Fixes | |
|
37 | ^^^^^ | |
|
38 | ||
|
39 | - Fixed problems with incorrect user agent errors | |
|
40 | ||
|
41 | ||
|
42 | Upgrade notes | |
|
43 | ^^^^^^^^^^^^^ | |
|
44 | ||
|
45 | - RhodeCode 5.3.0 is unscheduled security release to address some build issues with 5.X images |
@@ -1,5 +1,5 b'' | |||
|
1 | 1 | [bumpversion] |
|
2 |
current_version = 5. |
|
|
2 | current_version = 5.3.0 | |
|
3 | 3 | message = release: Bump version {current_version} to {new_version} |
|
4 | 4 | |
|
5 | 5 | [bumpversion:file:rhodecode/VERSION] |
@@ -1,912 +1,915 b'' | |||
|
1 | 1 | |
|
2 | 2 | ; ######################################### |
|
3 | 3 | ; RHODECODE COMMUNITY EDITION CONFIGURATION |
|
4 | 4 | ; ######################################### |
|
5 | 5 | |
|
6 | 6 | [DEFAULT] |
|
7 | 7 | ; Debug flag sets all loggers to debug, and enables request tracking |
|
8 | 8 | debug = true |
|
9 | 9 | |
|
10 | 10 | ; ######################################################################## |
|
11 | 11 | ; EMAIL CONFIGURATION |
|
12 | 12 | ; These settings will be used by the RhodeCode mailing system |
|
13 | 13 | ; ######################################################################## |
|
14 | 14 | |
|
15 | 15 | ; prefix all emails subjects with given prefix, helps filtering out emails |
|
16 | 16 | #email_prefix = [RhodeCode] |
|
17 | 17 | |
|
18 | 18 | ; email FROM address all mails will be sent |
|
19 | 19 | #app_email_from = rhodecode-noreply@localhost |
|
20 | 20 | |
|
21 | 21 | #smtp_server = mail.server.com |
|
22 | 22 | #smtp_username = |
|
23 | 23 | #smtp_password = |
|
24 | 24 | #smtp_port = |
|
25 | 25 | #smtp_use_tls = false |
|
26 | 26 | #smtp_use_ssl = true |
|
27 | 27 | |
|
28 | 28 | [server:main] |
|
29 | 29 | ; COMMON HOST/IP CONFIG, This applies mostly to develop setup, |
|
30 | 30 | ; Host port for gunicorn are controlled by gunicorn_conf.py |
|
31 | 31 | host = 127.0.0.1 |
|
32 | 32 | port = 10020 |
|
33 | 33 | |
|
34 | 34 | |
|
35 | 35 | ; ########################### |
|
36 | 36 | ; GUNICORN APPLICATION SERVER |
|
37 | 37 | ; ########################### |
|
38 | 38 | |
|
39 | 39 | ; run with gunicorn --config gunicorn_conf.py --paste rhodecode.ini |
|
40 | 40 | |
|
41 | 41 | ; Module to use, this setting shouldn't be changed |
|
42 | 42 | use = egg:gunicorn#main |
|
43 | 43 | |
|
44 | 44 | ; Prefix middleware for RhodeCode. |
|
45 | 45 | ; recommended when using proxy setup. |
|
46 | 46 | ; allows to set RhodeCode under a prefix in server. |
|
47 | 47 | ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well. |
|
48 | 48 | ; And set your prefix like: `prefix = /custom_prefix` |
|
49 | 49 | ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need |
|
50 | 50 | ; to make your cookies only work on prefix url |
|
51 | 51 | [filter:proxy-prefix] |
|
52 | 52 | use = egg:PasteDeploy#prefix |
|
53 | 53 | prefix = / |
|
54 | 54 | |
|
55 | 55 | [app:main] |
|
56 | 56 | ; The %(here)s variable will be replaced with the absolute path of parent directory |
|
57 | 57 | ; of this file |
|
58 | 58 | ; Each option in the app:main can be override by an environmental variable |
|
59 | 59 | ; |
|
60 | 60 | ;To override an option: |
|
61 | 61 | ; |
|
62 | 62 | ;RC_<KeyName> |
|
63 | 63 | ;Everything should be uppercase, . and - should be replaced by _. |
|
64 | 64 | ;For example, if you have these configuration settings: |
|
65 | 65 | ;rc_cache.repo_object.backend = foo |
|
66 | 66 | ;can be overridden by |
|
67 | 67 | ;export RC_CACHE_REPO_OBJECT_BACKEND=foo |
|
68 | 68 | |
|
69 | 69 | use = egg:rhodecode-enterprise-ce |
|
70 | 70 | |
|
71 | 71 | ; enable proxy prefix middleware, defined above |
|
72 | 72 | #filter-with = proxy-prefix |
|
73 | 73 | |
|
74 | ; control if environmental variables to be expanded into the .ini settings | |
|
75 | #rhodecode.env_expand = true | |
|
76 | ||
|
74 | 77 | ; ############# |
|
75 | 78 | ; DEBUG OPTIONS |
|
76 | 79 | ; ############# |
|
77 | 80 | |
|
78 | 81 | pyramid.reload_templates = true |
|
79 | 82 | |
|
80 | 83 | # During development the we want to have the debug toolbar enabled |
|
81 | 84 | pyramid.includes = |
|
82 | 85 | pyramid_debugtoolbar |
|
83 | 86 | |
|
84 | 87 | debugtoolbar.hosts = 0.0.0.0/0 |
|
85 | 88 | debugtoolbar.exclude_prefixes = |
|
86 | 89 | /css |
|
87 | 90 | /fonts |
|
88 | 91 | /images |
|
89 | 92 | /js |
|
90 | 93 | |
|
91 | 94 | ## RHODECODE PLUGINS ## |
|
92 | 95 | rhodecode.includes = |
|
93 | 96 | rhodecode.api |
|
94 | 97 | |
|
95 | 98 | |
|
96 | 99 | # api prefix url |
|
97 | 100 | rhodecode.api.url = /_admin/api |
|
98 | 101 | |
|
99 | 102 | ; enable debug style page |
|
100 | 103 | debug_style = true |
|
101 | 104 | |
|
102 | 105 | ; ################# |
|
103 | 106 | ; END DEBUG OPTIONS |
|
104 | 107 | ; ################# |
|
105 | 108 | |
|
106 | 109 | ; encryption key used to encrypt social plugin tokens, |
|
107 | 110 | ; remote_urls with credentials etc, if not set it defaults to |
|
108 | 111 | ; `beaker.session.secret` |
|
109 | 112 | #rhodecode.encrypted_values.secret = |
|
110 | 113 | |
|
111 | 114 | ; decryption strict mode (enabled by default). It controls if decryption raises |
|
112 | 115 | ; `SignatureVerificationError` in case of wrong key, or damaged encryption data. |
|
113 | 116 | #rhodecode.encrypted_values.strict = false |
|
114 | 117 | |
|
115 | 118 | ; Pick algorithm for encryption. Either fernet (more secure) or aes (default) |
|
116 | 119 | ; fernet is safer, and we strongly recommend switching to it. |
|
117 | 120 | ; Due to backward compatibility aes is used as default. |
|
118 | 121 | #rhodecode.encrypted_values.algorithm = fernet |
|
119 | 122 | |
|
120 | 123 | ; Return gzipped responses from RhodeCode (static files/application) |
|
121 | 124 | gzip_responses = false |
|
122 | 125 | |
|
123 | 126 | ; Auto-generate javascript routes file on startup |
|
124 | 127 | generate_js_files = false |
|
125 | 128 | |
|
126 | 129 | ; System global default language. |
|
127 | 130 | ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh |
|
128 | 131 | lang = en |
|
129 | 132 | |
|
130 | 133 | ; Perform a full repository scan and import on each server start. |
|
131 | 134 | ; Settings this to true could lead to very long startup time. |
|
132 | 135 | startup.import_repos = false |
|
133 | 136 | |
|
134 | 137 | ; URL at which the application is running. This is used for Bootstrapping |
|
135 | 138 | ; requests in context when no web request is available. Used in ishell, or |
|
136 | 139 | ; SSH calls. Set this for events to receive proper url for SSH calls. |
|
137 | 140 | app.base_url = http://rhodecode.local |
|
138 | 141 | |
|
139 | 142 | ; Host at which the Service API is running. |
|
140 | 143 | app.service_api.host = http://rhodecode.local:10020 |
|
141 | 144 | |
|
142 | 145 | ; Secret for Service API authentication. |
|
143 | 146 | app.service_api.token = |
|
144 | 147 | |
|
145 | 148 | ; Unique application ID. Should be a random unique string for security. |
|
146 | 149 | app_instance_uuid = rc-production |
|
147 | 150 | |
|
148 | 151 | ; Cut off limit for large diffs (size in bytes). If overall diff size on |
|
149 | 152 | ; commit, or pull request exceeds this limit this diff will be displayed |
|
150 | 153 | ; partially. E.g 512000 == 512Kb |
|
151 | 154 | cut_off_limit_diff = 512000 |
|
152 | 155 | |
|
153 | 156 | ; Cut off limit for large files inside diffs (size in bytes). Each individual |
|
154 | 157 | ; file inside diff which exceeds this limit will be displayed partially. |
|
155 | 158 | ; E.g 128000 == 128Kb |
|
156 | 159 | cut_off_limit_file = 128000 |
|
157 | 160 | |
|
158 | 161 | ; Use cached version of vcs repositories everywhere. Recommended to be `true` |
|
159 | 162 | vcs_full_cache = true |
|
160 | 163 | |
|
161 | 164 | ; Force https in RhodeCode, fixes https redirects, assumes it's always https. |
|
162 | 165 | ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache |
|
163 | 166 | force_https = false |
|
164 | 167 | |
|
165 | 168 | ; use Strict-Transport-Security headers |
|
166 | 169 | use_htsts = false |
|
167 | 170 | |
|
168 | 171 | ; Set to true if your repos are exposed using the dumb protocol |
|
169 | 172 | git_update_server_info = false |
|
170 | 173 | |
|
171 | 174 | ; RSS/ATOM feed options |
|
172 | 175 | rss_cut_off_limit = 256000 |
|
173 | 176 | rss_items_per_page = 10 |
|
174 | 177 | rss_include_diff = false |
|
175 | 178 | |
|
176 | 179 | ; gist URL alias, used to create nicer urls for gist. This should be an |
|
177 | 180 | ; url that does rewrites to _admin/gists/{gistid}. |
|
178 | 181 | ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal |
|
179 | 182 | ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid} |
|
180 | 183 | gist_alias_url = |
|
181 | 184 | |
|
182 | 185 | ; List of views (using glob pattern syntax) that AUTH TOKENS could be |
|
183 | 186 | ; used for access. |
|
184 | 187 | ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it |
|
185 | 188 | ; came from the the logged in user who own this authentication token. |
|
186 | 189 | ; Additionally @TOKEN syntax can be used to bound the view to specific |
|
187 | 190 | ; authentication token. Such view would be only accessible when used together |
|
188 | 191 | ; with this authentication token |
|
189 | 192 | ; list of all views can be found under `/_admin/permissions/auth_token_access` |
|
190 | 193 | ; The list should be "," separated and on a single line. |
|
191 | 194 | ; Most common views to enable: |
|
192 | 195 | |
|
193 | 196 | # RepoCommitsView:repo_commit_download |
|
194 | 197 | # RepoCommitsView:repo_commit_patch |
|
195 | 198 | # RepoCommitsView:repo_commit_raw |
|
196 | 199 | # RepoCommitsView:repo_commit_raw@TOKEN |
|
197 | 200 | # RepoFilesView:repo_files_diff |
|
198 | 201 | # RepoFilesView:repo_archivefile |
|
199 | 202 | # RepoFilesView:repo_file_raw |
|
200 | 203 | # GistView:* |
|
201 | 204 | api_access_controllers_whitelist = |
|
202 | 205 | |
|
203 | 206 | ; Default encoding used to convert from and to unicode |
|
204 | 207 | ; can be also a comma separated list of encoding in case of mixed encodings |
|
205 | 208 | default_encoding = UTF-8 |
|
206 | 209 | |
|
207 | 210 | ; instance-id prefix |
|
208 | 211 | ; a prefix key for this instance used for cache invalidation when running |
|
209 | 212 | ; multiple instances of RhodeCode, make sure it's globally unique for |
|
210 | 213 | ; all running RhodeCode instances. Leave empty if you don't use it |
|
211 | 214 | instance_id = |
|
212 | 215 | |
|
213 | 216 | ; Fallback authentication plugin. Set this to a plugin ID to force the usage |
|
214 | 217 | ; of an authentication plugin also if it is disabled by it's settings. |
|
215 | 218 | ; This could be useful if you are unable to log in to the system due to broken |
|
216 | 219 | ; authentication settings. Then you can enable e.g. the internal RhodeCode auth |
|
217 | 220 | ; module to log in again and fix the settings. |
|
218 | 221 | ; Available builtin plugin IDs (hash is part of the ID): |
|
219 | 222 | ; egg:rhodecode-enterprise-ce#rhodecode |
|
220 | 223 | ; egg:rhodecode-enterprise-ce#pam |
|
221 | 224 | ; egg:rhodecode-enterprise-ce#ldap |
|
222 | 225 | ; egg:rhodecode-enterprise-ce#jasig_cas |
|
223 | 226 | ; egg:rhodecode-enterprise-ce#headers |
|
224 | 227 | ; egg:rhodecode-enterprise-ce#crowd |
|
225 | 228 | |
|
226 | 229 | #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode |
|
227 | 230 | |
|
228 | 231 | ; Flag to control loading of legacy plugins in py:/path format |
|
229 | 232 | auth_plugin.import_legacy_plugins = true |
|
230 | 233 | |
|
231 | 234 | ; alternative return HTTP header for failed authentication. Default HTTP |
|
232 | 235 | ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with |
|
233 | 236 | ; handling that causing a series of failed authentication calls. |
|
234 | 237 | ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code |
|
235 | 238 | ; This will be served instead of default 401 on bad authentication |
|
236 | 239 | auth_ret_code = |
|
237 | 240 | |
|
238 | 241 | ; use special detection method when serving auth_ret_code, instead of serving |
|
239 | 242 | ; ret_code directly, use 401 initially (Which triggers credentials prompt) |
|
240 | 243 | ; and then serve auth_ret_code to clients |
|
241 | 244 | auth_ret_code_detection = false |
|
242 | 245 | |
|
243 | 246 | ; locking return code. When repository is locked return this HTTP code. 2XX |
|
244 | 247 | ; codes don't break the transactions while 4XX codes do |
|
245 | 248 | lock_ret_code = 423 |
|
246 | 249 | |
|
247 | 250 | ; Filesystem location were repositories should be stored |
|
248 | 251 | repo_store.path = /var/opt/rhodecode_repo_store |
|
249 | 252 | |
|
250 | 253 | ; allows to setup custom hooks in settings page |
|
251 | 254 | allow_custom_hooks_settings = true |
|
252 | 255 | |
|
253 | 256 | ; Generated license token required for EE edition license. |
|
254 | 257 | ; New generated token value can be found in Admin > settings > license page. |
|
255 | 258 | license_token = |
|
256 | 259 | |
|
257 | 260 | ; This flag hides sensitive information on the license page such as token, and license data |
|
258 | 261 | license.hide_license_info = false |
|
259 | 262 | |
|
260 | 263 | ; Import EE license from this license path |
|
261 | 264 | #license.import_path = %(here)s/rhodecode_enterprise.license |
|
262 | 265 | |
|
263 | 266 | ; import license 'if-missing' or 'force' (always override) |
|
264 | 267 | ; if-missing means apply license if it doesn't exist. 'force' option always overrides it |
|
265 | 268 | license.import_path_mode = if-missing |
|
266 | 269 | |
|
267 | 270 | ; supervisor connection uri, for managing supervisor and logs. |
|
268 | 271 | supervisor.uri = |
|
269 | 272 | |
|
270 | 273 | ; supervisord group name/id we only want this RC instance to handle |
|
271 | 274 | supervisor.group_id = dev |
|
272 | 275 | |
|
273 | 276 | ; Display extended labs settings |
|
274 | 277 | labs_settings_active = true |
|
275 | 278 | |
|
276 | 279 | ; Custom exception store path, defaults to TMPDIR |
|
277 | 280 | ; This is used to store exception from RhodeCode in shared directory |
|
278 | 281 | #exception_tracker.store_path = |
|
279 | 282 | |
|
280 | 283 | ; Send email with exception details when it happens |
|
281 | 284 | #exception_tracker.send_email = false |
|
282 | 285 | |
|
283 | 286 | ; Comma separated list of recipients for exception emails, |
|
284 | 287 | ; e.g admin@rhodecode.com,devops@rhodecode.com |
|
285 | 288 | ; Can be left empty, then emails will be sent to ALL super-admins |
|
286 | 289 | #exception_tracker.send_email_recipients = |
|
287 | 290 | |
|
288 | 291 | ; optional prefix to Add to email Subject |
|
289 | 292 | #exception_tracker.email_prefix = [RHODECODE ERROR] |
|
290 | 293 | |
|
291 | 294 | ; NOTE: this setting IS DEPRECATED: |
|
292 | 295 | ; file_store backend is always enabled |
|
293 | 296 | #file_store.enabled = true |
|
294 | 297 | |
|
295 | 298 | ; NOTE: this setting IS DEPRECATED: |
|
296 | 299 | ; file_store.backend = X -> use `file_store.backend.type = filesystem_v2` instead |
|
297 | 300 | ; Storage backend, available options are: local |
|
298 | 301 | #file_store.backend = local |
|
299 | 302 | |
|
300 | 303 | ; NOTE: this setting IS DEPRECATED: |
|
301 | 304 | ; file_store.storage_path = X -> use `file_store.filesystem_v2.storage_path = X` instead |
|
302 | 305 | ; path to store the uploaded binaries and artifacts |
|
303 | 306 | #file_store.storage_path = /var/opt/rhodecode_data/file_store |
|
304 | 307 | |
|
305 | 308 | ; Artifacts file-store, is used to store comment attachments and artifacts uploads. |
|
306 | 309 | ; file_store backend type: filesystem_v1, filesystem_v2 or objectstore (s3-based) are available as options |
|
307 | 310 | ; filesystem_v1 is backwards compat with pre 5.1 storage changes |
|
308 | 311 | ; new installations should choose filesystem_v2 or objectstore (s3-based), pick filesystem when migrating from |
|
309 | 312 | ; previous installations to keep the artifacts without a need of migration |
|
310 | 313 | #file_store.backend.type = filesystem_v2 |
|
311 | 314 | |
|
312 | 315 | ; filesystem options... |
|
313 | 316 | #file_store.filesystem_v1.storage_path = /var/opt/rhodecode_data/artifacts_file_store |
|
314 | 317 | |
|
315 | 318 | ; filesystem_v2 options... |
|
316 | 319 | #file_store.filesystem_v2.storage_path = /var/opt/rhodecode_data/artifacts_file_store |
|
317 | 320 | #file_store.filesystem_v2.shards = 8 |
|
318 | 321 | |
|
319 | 322 | ; objectstore options... |
|
320 | 323 | ; url for s3 compatible storage that allows to upload artifacts |
|
321 | 324 | ; e.g http://minio:9000 |
|
322 | 325 | #file_store.backend.type = objectstore |
|
323 | 326 | #file_store.objectstore.url = http://s3-minio:9000 |
|
324 | 327 | |
|
325 | 328 | ; a top-level bucket to put all other shards in |
|
326 | 329 | ; objects will be stored in rhodecode-file-store/shard-N based on the bucket_shards number |
|
327 | 330 | #file_store.objectstore.bucket = rhodecode-file-store |
|
328 | 331 | |
|
329 | 332 | ; number of sharded buckets to create to distribute archives across |
|
330 | 333 | ; default is 8 shards |
|
331 | 334 | #file_store.objectstore.bucket_shards = 8 |
|
332 | 335 | |
|
333 | 336 | ; key for s3 auth |
|
334 | 337 | #file_store.objectstore.key = s3admin |
|
335 | 338 | |
|
336 | 339 | ; secret for s3 auth |
|
337 | 340 | #file_store.objectstore.secret = s3secret4 |
|
338 | 341 | |
|
339 | 342 | ;region for s3 storage |
|
340 | 343 | #file_store.objectstore.region = eu-central-1 |
|
341 | 344 | |
|
342 | 345 | ; Redis url to acquire/check generation of archives locks |
|
343 | 346 | archive_cache.locking.url = redis://redis:6379/1 |
|
344 | 347 | |
|
345 | 348 | ; Storage backend, only 'filesystem' and 'objectstore' are available now |
|
346 | 349 | archive_cache.backend.type = filesystem |
|
347 | 350 | |
|
348 | 351 | ; url for s3 compatible storage that allows to upload artifacts |
|
349 | 352 | ; e.g http://minio:9000 |
|
350 | 353 | archive_cache.objectstore.url = http://s3-minio:9000 |
|
351 | 354 | |
|
352 | 355 | ; key for s3 auth |
|
353 | 356 | archive_cache.objectstore.key = key |
|
354 | 357 | |
|
355 | 358 | ; secret for s3 auth |
|
356 | 359 | archive_cache.objectstore.secret = secret |
|
357 | 360 | |
|
358 | 361 | ;region for s3 storage |
|
359 | 362 | archive_cache.objectstore.region = eu-central-1 |
|
360 | 363 | |
|
361 | 364 | ; number of sharded buckets to create to distribute archives across |
|
362 | 365 | ; default is 8 shards |
|
363 | 366 | archive_cache.objectstore.bucket_shards = 8 |
|
364 | 367 | |
|
365 | 368 | ; a top-level bucket to put all other shards in |
|
366 | 369 | ; objects will be stored in rhodecode-archive-cache/shard-N based on the bucket_shards number |
|
367 | 370 | archive_cache.objectstore.bucket = rhodecode-archive-cache |
|
368 | 371 | |
|
369 | 372 | ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time |
|
370 | 373 | archive_cache.objectstore.retry = false |
|
371 | 374 | |
|
372 | 375 | ; number of seconds to wait for next try using retry |
|
373 | 376 | archive_cache.objectstore.retry_backoff = 1 |
|
374 | 377 | |
|
375 | 378 | ; how many tries do do a retry fetch from this backend |
|
376 | 379 | archive_cache.objectstore.retry_attempts = 10 |
|
377 | 380 | |
|
378 | 381 | ; Default is $cache_dir/archive_cache if not set |
|
379 | 382 | ; Generated repo archives will be cached at this location |
|
380 | 383 | ; and served from the cache during subsequent requests for the same archive of |
|
381 | 384 | ; the repository. This path is important to be shared across filesystems and with |
|
382 | 385 | ; RhodeCode and vcsserver |
|
383 | 386 | archive_cache.filesystem.store_dir = /var/opt/rhodecode_data/archive_cache |
|
384 | 387 | |
|
385 | 388 | ; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb |
|
386 | 389 | archive_cache.filesystem.cache_size_gb = 1 |
|
387 | 390 | |
|
388 | 391 | ; Eviction policy used to clear out after cache_size_gb limit is reached |
|
389 | 392 | archive_cache.filesystem.eviction_policy = least-recently-stored |
|
390 | 393 | |
|
391 | 394 | ; By default cache uses sharding technique, this specifies how many shards are there |
|
392 | 395 | ; default is 8 shards |
|
393 | 396 | archive_cache.filesystem.cache_shards = 8 |
|
394 | 397 | |
|
395 | 398 | ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time |
|
396 | 399 | archive_cache.filesystem.retry = false |
|
397 | 400 | |
|
398 | 401 | ; number of seconds to wait for next try using retry |
|
399 | 402 | archive_cache.filesystem.retry_backoff = 1 |
|
400 | 403 | |
|
401 | 404 | ; how many tries do do a retry fetch from this backend |
|
402 | 405 | archive_cache.filesystem.retry_attempts = 10 |
|
403 | 406 | |
|
404 | 407 | |
|
405 | 408 | ; ############# |
|
406 | 409 | ; CELERY CONFIG |
|
407 | 410 | ; ############# |
|
408 | 411 | |
|
409 | 412 | ; manually run celery: /path/to/celery worker --task-events --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini |
|
410 | 413 | |
|
411 | 414 | use_celery = true |
|
412 | 415 | |
|
413 | 416 | ; path to store schedule database |
|
414 | 417 | #celerybeat-schedule.path = |
|
415 | 418 | |
|
416 | 419 | ; connection url to the message broker (default redis) |
|
417 | 420 | celery.broker_url = redis://redis:6379/8 |
|
418 | 421 | |
|
419 | 422 | ; results backend to get results for (default redis) |
|
420 | 423 | celery.result_backend = redis://redis:6379/8 |
|
421 | 424 | |
|
422 | 425 | ; rabbitmq example |
|
423 | 426 | #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost |
|
424 | 427 | |
|
425 | 428 | ; maximum tasks to execute before worker restart |
|
426 | 429 | celery.max_tasks_per_child = 20 |
|
427 | 430 | |
|
428 | 431 | ; tasks will never be sent to the queue, but executed locally instead. |
|
429 | 432 | celery.task_always_eager = false |
|
430 | 433 | |
|
431 | 434 | ; ############# |
|
432 | 435 | ; DOGPILE CACHE |
|
433 | 436 | ; ############# |
|
434 | 437 | |
|
435 | 438 | ; Default cache dir for caches. Putting this into a ramdisk can boost performance. |
|
436 | 439 | ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space |
|
437 | 440 | cache_dir = /var/opt/rhodecode_data |
|
438 | 441 | |
|
439 | 442 | ; ********************************************* |
|
440 | 443 | ; `sql_cache_short` cache for heavy SQL queries |
|
441 | 444 | ; Only supported backend is `memory_lru` |
|
442 | 445 | ; ********************************************* |
|
443 | 446 | rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru |
|
444 | 447 | rc_cache.sql_cache_short.expiration_time = 30 |
|
445 | 448 | |
|
446 | 449 | |
|
447 | 450 | ; ***************************************************** |
|
448 | 451 | ; `cache_repo_longterm` cache for repo object instances |
|
449 | 452 | ; Only supported backend is `memory_lru` |
|
450 | 453 | ; ***************************************************** |
|
451 | 454 | rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru |
|
452 | 455 | ; by default we use 30 Days, cache is still invalidated on push |
|
453 | 456 | rc_cache.cache_repo_longterm.expiration_time = 2592000 |
|
454 | 457 | ; max items in LRU cache, set to smaller number to save memory, and expire last used caches |
|
455 | 458 | rc_cache.cache_repo_longterm.max_size = 10000 |
|
456 | 459 | |
|
457 | 460 | |
|
458 | 461 | ; ********************************************* |
|
459 | 462 | ; `cache_general` cache for general purpose use |
|
460 | 463 | ; for simplicity use rc.file_namespace backend, |
|
461 | 464 | ; for performance and scale use rc.redis |
|
462 | 465 | ; ********************************************* |
|
463 | 466 | rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace |
|
464 | 467 | rc_cache.cache_general.expiration_time = 43200 |
|
465 | 468 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set |
|
466 | 469 | #rc_cache.cache_general.arguments.filename = /tmp/cache_general_db |
|
467 | 470 | |
|
468 | 471 | ; alternative `cache_general` redis backend with distributed lock |
|
469 | 472 | #rc_cache.cache_general.backend = dogpile.cache.rc.redis |
|
470 | 473 | #rc_cache.cache_general.expiration_time = 300 |
|
471 | 474 | |
|
472 | 475 | ; redis_expiration_time needs to be greater then expiration_time |
|
473 | 476 | #rc_cache.cache_general.arguments.redis_expiration_time = 7200 |
|
474 | 477 | |
|
475 | 478 | #rc_cache.cache_general.arguments.host = localhost |
|
476 | 479 | #rc_cache.cache_general.arguments.port = 6379 |
|
477 | 480 | #rc_cache.cache_general.arguments.db = 0 |
|
478 | 481 | #rc_cache.cache_general.arguments.socket_timeout = 30 |
|
479 | 482 | ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends |
|
480 | 483 | #rc_cache.cache_general.arguments.distributed_lock = true |
|
481 | 484 | |
|
482 | 485 | ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen |
|
483 | 486 | #rc_cache.cache_general.arguments.lock_auto_renewal = true |
|
484 | 487 | |
|
485 | 488 | ; ************************************************* |
|
486 | 489 | ; `cache_perms` cache for permission tree, auth TTL |
|
487 | 490 | ; for simplicity use rc.file_namespace backend, |
|
488 | 491 | ; for performance and scale use rc.redis |
|
489 | 492 | ; ************************************************* |
|
490 | 493 | rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace |
|
491 | 494 | rc_cache.cache_perms.expiration_time = 3600 |
|
492 | 495 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set |
|
493 | 496 | #rc_cache.cache_perms.arguments.filename = /tmp/cache_perms_db |
|
494 | 497 | |
|
495 | 498 | ; alternative `cache_perms` redis backend with distributed lock |
|
496 | 499 | #rc_cache.cache_perms.backend = dogpile.cache.rc.redis |
|
497 | 500 | #rc_cache.cache_perms.expiration_time = 300 |
|
498 | 501 | |
|
499 | 502 | ; redis_expiration_time needs to be greater then expiration_time |
|
500 | 503 | #rc_cache.cache_perms.arguments.redis_expiration_time = 7200 |
|
501 | 504 | |
|
502 | 505 | #rc_cache.cache_perms.arguments.host = localhost |
|
503 | 506 | #rc_cache.cache_perms.arguments.port = 6379 |
|
504 | 507 | #rc_cache.cache_perms.arguments.db = 0 |
|
505 | 508 | #rc_cache.cache_perms.arguments.socket_timeout = 30 |
|
506 | 509 | ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends |
|
507 | 510 | #rc_cache.cache_perms.arguments.distributed_lock = true |
|
508 | 511 | |
|
509 | 512 | ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen |
|
510 | 513 | #rc_cache.cache_perms.arguments.lock_auto_renewal = true |
|
511 | 514 | |
|
512 | 515 | ; *************************************************** |
|
513 | 516 | ; `cache_repo` cache for file tree, Readme, RSS FEEDS |
|
514 | 517 | ; for simplicity use rc.file_namespace backend, |
|
515 | 518 | ; for performance and scale use rc.redis |
|
516 | 519 | ; *************************************************** |
|
517 | 520 | rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace |
|
518 | 521 | rc_cache.cache_repo.expiration_time = 2592000 |
|
519 | 522 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set |
|
520 | 523 | #rc_cache.cache_repo.arguments.filename = /tmp/cache_repo_db |
|
521 | 524 | |
|
522 | 525 | ; alternative `cache_repo` redis backend with distributed lock |
|
523 | 526 | #rc_cache.cache_repo.backend = dogpile.cache.rc.redis |
|
524 | 527 | #rc_cache.cache_repo.expiration_time = 2592000 |
|
525 | 528 | |
|
526 | 529 | ; redis_expiration_time needs to be greater then expiration_time |
|
527 | 530 | #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400 |
|
528 | 531 | |
|
529 | 532 | #rc_cache.cache_repo.arguments.host = localhost |
|
530 | 533 | #rc_cache.cache_repo.arguments.port = 6379 |
|
531 | 534 | #rc_cache.cache_repo.arguments.db = 1 |
|
532 | 535 | #rc_cache.cache_repo.arguments.socket_timeout = 30 |
|
533 | 536 | ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends |
|
534 | 537 | #rc_cache.cache_repo.arguments.distributed_lock = true |
|
535 | 538 | |
|
536 | 539 | ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen |
|
537 | 540 | #rc_cache.cache_repo.arguments.lock_auto_renewal = true |
|
538 | 541 | |
|
539 | 542 | ; ############## |
|
540 | 543 | ; BEAKER SESSION |
|
541 | 544 | ; ############## |
|
542 | 545 | |
|
543 | 546 | ; beaker.session.type is type of storage options for the logged users sessions. Current allowed |
|
544 | 547 | ; types are file, ext:redis, ext:database, ext:memcached |
|
545 | 548 | ; Fastest ones are ext:redis and ext:database, DO NOT use memory type for session |
|
546 | 549 | #beaker.session.type = file |
|
547 | 550 | #beaker.session.data_dir = %(here)s/data/sessions |
|
548 | 551 | |
|
549 | 552 | ; Redis based sessions |
|
550 | 553 | beaker.session.type = ext:redis |
|
551 | 554 | beaker.session.url = redis://redis:6379/2 |
|
552 | 555 | |
|
553 | 556 | ; DB based session, fast, and allows easy management over logged in users |
|
554 | 557 | #beaker.session.type = ext:database |
|
555 | 558 | #beaker.session.table_name = db_session |
|
556 | 559 | #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode |
|
557 | 560 | #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode |
|
558 | 561 | #beaker.session.sa.pool_recycle = 3600 |
|
559 | 562 | #beaker.session.sa.echo = false |
|
560 | 563 | |
|
561 | 564 | beaker.session.key = rhodecode |
|
562 | 565 | beaker.session.secret = develop-rc-uytcxaz |
|
563 | 566 | beaker.session.lock_dir = /data_ramdisk/lock |
|
564 | 567 | |
|
565 | 568 | ; Secure encrypted cookie. Requires AES and AES python libraries |
|
566 | 569 | ; you must disable beaker.session.secret to use this |
|
567 | 570 | #beaker.session.encrypt_key = key_for_encryption |
|
568 | 571 | #beaker.session.validate_key = validation_key |
|
569 | 572 | |
|
570 | 573 | ; Sets session as invalid (also logging out user) if it haven not been |
|
571 | 574 | ; accessed for given amount of time in seconds |
|
572 | 575 | beaker.session.timeout = 2592000 |
|
573 | 576 | beaker.session.httponly = true |
|
574 | 577 | |
|
575 | 578 | ; Path to use for the cookie. Set to prefix if you use prefix middleware |
|
576 | 579 | #beaker.session.cookie_path = /custom_prefix |
|
577 | 580 | |
|
578 | 581 | ; Set https secure cookie |
|
579 | 582 | beaker.session.secure = false |
|
580 | 583 | |
|
581 | 584 | ; default cookie expiration time in seconds, set to `true` to set expire |
|
582 | 585 | ; at browser close |
|
583 | 586 | #beaker.session.cookie_expires = 3600 |
|
584 | 587 | |
|
585 | 588 | ; ############################# |
|
586 | 589 | ; SEARCH INDEXING CONFIGURATION |
|
587 | 590 | ; ############################# |
|
588 | 591 | |
|
589 | 592 | ; Full text search indexer is available in rhodecode-tools under |
|
590 | 593 | ; `rhodecode-tools index` command |
|
591 | 594 | |
|
592 | 595 | ; WHOOSH Backend, doesn't require additional services to run |
|
593 | 596 | ; it works good with few dozen repos |
|
594 | 597 | search.module = rhodecode.lib.index.whoosh |
|
595 | 598 | search.location = %(here)s/data/index |
|
596 | 599 | |
|
597 | 600 | ; #################### |
|
598 | 601 | ; CHANNELSTREAM CONFIG |
|
599 | 602 | ; #################### |
|
600 | 603 | |
|
601 | 604 | ; channelstream enables persistent connections and live notification |
|
602 | 605 | ; in the system. It's also used by the chat system |
|
603 | 606 | |
|
604 | 607 | channelstream.enabled = true |
|
605 | 608 | |
|
606 | 609 | ; server address for channelstream server on the backend |
|
607 | 610 | channelstream.server = channelstream:9800 |
|
608 | 611 | |
|
609 | 612 | ; location of the channelstream server from outside world |
|
610 | 613 | ; use ws:// for http or wss:// for https. This address needs to be handled |
|
611 | 614 | ; by external HTTP server such as Nginx or Apache |
|
612 | 615 | ; see Nginx/Apache configuration examples in our docs |
|
613 | 616 | channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream |
|
614 | 617 | channelstream.secret = ENV_GENERATED |
|
615 | 618 | channelstream.history.location = /var/opt/rhodecode_data/channelstream_history |
|
616 | 619 | |
|
617 | 620 | ; Internal application path that Javascript uses to connect into. |
|
618 | 621 | ; If you use proxy-prefix the prefix should be added before /_channelstream |
|
619 | 622 | channelstream.proxy_path = /_channelstream |
|
620 | 623 | |
|
621 | 624 | |
|
622 | 625 | ; ############################## |
|
623 | 626 | ; MAIN RHODECODE DATABASE CONFIG |
|
624 | 627 | ; ############################## |
|
625 | 628 | |
|
626 | 629 | #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30 |
|
627 | 630 | #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode |
|
628 | 631 | #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8 |
|
629 | 632 | ; pymysql is an alternative driver for MySQL, use in case of problems with default one |
|
630 | 633 | #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode |
|
631 | 634 | |
|
632 | 635 | sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30 |
|
633 | 636 | |
|
634 | 637 | ; see sqlalchemy docs for other advanced settings |
|
635 | 638 | ; print the sql statements to output |
|
636 | 639 | sqlalchemy.db1.echo = false |
|
637 | 640 | |
|
638 | 641 | ; recycle the connections after this amount of seconds |
|
639 | 642 | sqlalchemy.db1.pool_recycle = 3600 |
|
640 | 643 | |
|
641 | 644 | ; the number of connections to keep open inside the connection pool. |
|
642 | 645 | ; 0 indicates no limit |
|
643 | 646 | ; the general calculus with gevent is: |
|
644 | 647 | ; if your system allows 500 concurrent greenlets (max_connections) that all do database access, |
|
645 | 648 | ; then increase pool size + max overflow so that they add up to 500. |
|
646 | 649 | #sqlalchemy.db1.pool_size = 5 |
|
647 | 650 | |
|
648 | 651 | ; The number of connections to allow in connection pool "overflow", that is |
|
649 | 652 | ; connections that can be opened above and beyond the pool_size setting, |
|
650 | 653 | ; which defaults to five. |
|
651 | 654 | #sqlalchemy.db1.max_overflow = 10 |
|
652 | 655 | |
|
653 | 656 | ; Connection check ping, used to detect broken database connections |
|
654 | 657 | ; could be enabled to better handle cases if MySQL has gone away errors |
|
655 | 658 | #sqlalchemy.db1.ping_connection = true |
|
656 | 659 | |
|
657 | 660 | ; ########## |
|
658 | 661 | ; VCS CONFIG |
|
659 | 662 | ; ########## |
|
660 | 663 | vcs.server.enable = true |
|
661 | 664 | vcs.server = vcsserver:10010 |
|
662 | 665 | |
|
663 | 666 | ; Web server connectivity protocol, responsible for web based VCS operations |
|
664 | 667 | ; Available protocols are: |
|
665 | 668 | ; `http` - use http-rpc backend (default) |
|
666 | 669 | vcs.server.protocol = http |
|
667 | 670 | |
|
668 | 671 | ; Push/Pull operations protocol, available options are: |
|
669 | 672 | ; `http` - use http-rpc backend (default) |
|
670 | 673 | vcs.scm_app_implementation = http |
|
671 | 674 | |
|
672 | 675 | ; Push/Pull operations hooks protocol, available options are: |
|
673 | 676 | ; `http` - use http-rpc backend (default) |
|
674 | 677 | ; `celery` - use celery based hooks |
|
675 | 678 | #DEPRECATED:vcs.hooks.protocol = http |
|
676 | 679 | vcs.hooks.protocol.v2 = celery |
|
677 | 680 | |
|
678 | 681 | ; Host on which this instance is listening for hooks. vcsserver will call this host to pull/push hooks so it should be |
|
679 | 682 | ; accessible via network. |
|
680 | 683 | ; Use vcs.hooks.host = "*" to bind to current hostname (for Docker) |
|
681 | 684 | vcs.hooks.host = * |
|
682 | 685 | |
|
683 | 686 | ; Start VCSServer with this instance as a subprocess, useful for development |
|
684 | 687 | vcs.start_server = false |
|
685 | 688 | |
|
686 | 689 | ; List of enabled VCS backends, available options are: |
|
687 | 690 | ; `hg` - mercurial |
|
688 | 691 | ; `git` - git |
|
689 | 692 | ; `svn` - subversion |
|
690 | 693 | vcs.backends = hg, git, svn |
|
691 | 694 | |
|
692 | 695 | ; Wait this number of seconds before killing connection to the vcsserver |
|
693 | 696 | vcs.connection_timeout = 3600 |
|
694 | 697 | |
|
695 | 698 | ; Cache flag to cache vcsserver remote calls locally |
|
696 | 699 | ; It uses cache_region `cache_repo` |
|
697 | 700 | vcs.methods.cache = true |
|
698 | 701 | |
|
699 | 702 | ; Filesystem location where Git lfs objects should be stored |
|
700 | 703 | vcs.git.lfs.storage_location = /var/opt/rhodecode_repo_store/.cache/git_lfs_store |
|
701 | 704 | |
|
702 | 705 | ; Filesystem location where Mercurial largefile objects should be stored |
|
703 | 706 | vcs.hg.largefiles.storage_location = /var/opt/rhodecode_repo_store/.cache/hg_largefiles_store |
|
704 | 707 | |
|
705 | 708 | ; #################################################### |
|
706 | 709 | ; Subversion proxy support (mod_dav_svn) |
|
707 | 710 | ; Maps RhodeCode repo groups into SVN paths for Apache |
|
708 | 711 | ; #################################################### |
|
709 | 712 | |
|
710 | 713 | ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out. |
|
711 | 714 | ; Set a numeric version for your current SVN e.g 1.8, or 1.12 |
|
712 | 715 | ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible |
|
713 | 716 | #vcs.svn.compatible_version = 1.8 |
|
714 | 717 | |
|
715 | 718 | ; Redis connection settings for svn integrations logic |
|
716 | 719 | ; This connection string needs to be the same on ce and vcsserver |
|
717 | 720 | vcs.svn.redis_conn = redis://redis:6379/0 |
|
718 | 721 | |
|
719 | 722 | ; Enable SVN proxy of requests over HTTP |
|
720 | 723 | vcs.svn.proxy.enabled = true |
|
721 | 724 | |
|
722 | 725 | ; host to connect to running SVN subsystem |
|
723 | 726 | vcs.svn.proxy.host = http://svn:8090 |
|
724 | 727 | |
|
725 | 728 | ; Enable or disable the config file generation. |
|
726 | 729 | svn.proxy.generate_config = true |
|
727 | 730 | |
|
728 | 731 | ; Generate config file with `SVNListParentPath` set to `On`. |
|
729 | 732 | svn.proxy.list_parent_path = true |
|
730 | 733 | |
|
731 | 734 | ; Set location and file name of generated config file. |
|
732 | 735 | svn.proxy.config_file_path = /etc/rhodecode/conf/svn/mod_dav_svn.conf |
|
733 | 736 | |
|
734 | 737 | ; alternative mod_dav config template. This needs to be a valid mako template |
|
735 | 738 | ; Example template can be found in the source code: |
|
736 | 739 | ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako |
|
737 | 740 | #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako |
|
738 | 741 | |
|
739 | 742 | ; Used as a prefix to the `Location` block in the generated config file. |
|
740 | 743 | ; In most cases it should be set to `/`. |
|
741 | 744 | svn.proxy.location_root = / |
|
742 | 745 | |
|
743 | 746 | ; Command to reload the mod dav svn configuration on change. |
|
744 | 747 | ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh |
|
745 | 748 | ; Make sure user who runs RhodeCode process is allowed to reload Apache |
|
746 | 749 | #svn.proxy.reload_cmd = /etc/init.d/apache2 reload |
|
747 | 750 | |
|
748 | 751 | ; If the timeout expires before the reload command finishes, the command will |
|
749 | 752 | ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds. |
|
750 | 753 | #svn.proxy.reload_timeout = 10 |
|
751 | 754 | |
|
752 | 755 | ; #################### |
|
753 | 756 | ; SSH Support Settings |
|
754 | 757 | ; #################### |
|
755 | 758 | |
|
756 | 759 | ; Defines if a custom authorized_keys file should be created and written on |
|
757 | 760 | ; any change user ssh keys. Setting this to false also disables possibility |
|
758 | 761 | ; of adding SSH keys by users from web interface. Super admins can still |
|
759 | 762 | ; manage SSH Keys. |
|
760 | 763 | ssh.generate_authorized_keyfile = true |
|
761 | 764 | |
|
762 | 765 | ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding` |
|
763 | 766 | # ssh.authorized_keys_ssh_opts = |
|
764 | 767 | |
|
765 | 768 | ; Path to the authorized_keys file where the generate entries are placed. |
|
766 | 769 | ; It is possible to have multiple key files specified in `sshd_config` e.g. |
|
767 | 770 | ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode |
|
768 | 771 | ssh.authorized_keys_file_path = /etc/rhodecode/conf/ssh/authorized_keys_rhodecode |
|
769 | 772 | |
|
770 | 773 | ; Command to execute the SSH wrapper. The binary is available in the |
|
771 | 774 | ; RhodeCode installation directory. |
|
772 | 775 | ; legacy: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper |
|
773 | 776 | ; new rewrite: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2 |
|
774 | 777 | #DEPRECATED: ssh.wrapper_cmd = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper |
|
775 | 778 | ssh.wrapper_cmd.v2 = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2 |
|
776 | 779 | |
|
777 | 780 | ; Allow shell when executing the ssh-wrapper command |
|
778 | 781 | ssh.wrapper_cmd_allow_shell = false |
|
779 | 782 | |
|
780 | 783 | ; Enables logging, and detailed output send back to the client during SSH |
|
781 | 784 | ; operations. Useful for debugging, shouldn't be used in production. |
|
782 | 785 | ssh.enable_debug_logging = true |
|
783 | 786 | |
|
784 | 787 | ; Paths to binary executable, by default they are the names, but we can |
|
785 | 788 | ; override them if we want to use a custom one |
|
786 | 789 | ssh.executable.hg = /usr/local/bin/rhodecode_bin/vcs_bin/hg |
|
787 | 790 | ssh.executable.git = /usr/local/bin/rhodecode_bin/vcs_bin/git |
|
788 | 791 | ssh.executable.svn = /usr/local/bin/rhodecode_bin/vcs_bin/svnserve |
|
789 | 792 | |
|
790 | 793 | ; Enables SSH key generator web interface. Disabling this still allows users |
|
791 | 794 | ; to add their own keys. |
|
792 | 795 | ssh.enable_ui_key_generator = true |
|
793 | 796 | |
|
794 | 797 | ; Statsd client config, this is used to send metrics to statsd |
|
795 | 798 | ; We recommend setting statsd_exported and scrape them using Prometheus |
|
796 | 799 | #statsd.enabled = false |
|
797 | 800 | #statsd.statsd_host = 0.0.0.0 |
|
798 | 801 | #statsd.statsd_port = 8125 |
|
799 | 802 | #statsd.statsd_prefix = |
|
800 | 803 | #statsd.statsd_ipv6 = false |
|
801 | 804 | |
|
802 | 805 | ; configure logging automatically at server startup set to false |
|
803 | 806 | ; to use the below custom logging config. |
|
804 | 807 | ; RC_LOGGING_FORMATTER |
|
805 | 808 | ; RC_LOGGING_LEVEL |
|
806 | 809 | ; env variables can control the settings for logging in case of autoconfigure |
|
807 | 810 | |
|
808 | 811 | #logging.autoconfigure = true |
|
809 | 812 | |
|
810 | 813 | ; specify your own custom logging config file to configure logging |
|
811 | 814 | #logging.logging_conf_file = /path/to/custom_logging.ini |
|
812 | 815 | |
|
813 | 816 | ; Dummy marker to add new entries after. |
|
814 | 817 | ; Add any custom entries below. Please don't remove this marker. |
|
815 | 818 | custom.conf = 1 |
|
816 | 819 | |
|
817 | 820 | |
|
818 | 821 | ; ##################### |
|
819 | 822 | ; LOGGING CONFIGURATION |
|
820 | 823 | ; ##################### |
|
821 | 824 | |
|
822 | 825 | [loggers] |
|
823 | 826 | keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper |
|
824 | 827 | |
|
825 | 828 | [handlers] |
|
826 | 829 | keys = console, console_sql |
|
827 | 830 | |
|
828 | 831 | [formatters] |
|
829 | 832 | keys = generic, json, color_formatter, color_formatter_sql |
|
830 | 833 | |
|
831 | 834 | ; ####### |
|
832 | 835 | ; LOGGERS |
|
833 | 836 | ; ####### |
|
834 | 837 | [logger_root] |
|
835 | 838 | level = NOTSET |
|
836 | 839 | handlers = console |
|
837 | 840 | |
|
838 | 841 | [logger_sqlalchemy] |
|
839 | 842 | level = INFO |
|
840 | 843 | handlers = console_sql |
|
841 | 844 | qualname = sqlalchemy.engine |
|
842 | 845 | propagate = 0 |
|
843 | 846 | |
|
844 | 847 | [logger_beaker] |
|
845 | 848 | level = DEBUG |
|
846 | 849 | handlers = |
|
847 | 850 | qualname = beaker.container |
|
848 | 851 | propagate = 1 |
|
849 | 852 | |
|
850 | 853 | [logger_rhodecode] |
|
851 | 854 | level = DEBUG |
|
852 | 855 | handlers = |
|
853 | 856 | qualname = rhodecode |
|
854 | 857 | propagate = 1 |
|
855 | 858 | |
|
856 | 859 | [logger_ssh_wrapper] |
|
857 | 860 | level = DEBUG |
|
858 | 861 | handlers = |
|
859 | 862 | qualname = ssh_wrapper |
|
860 | 863 | propagate = 1 |
|
861 | 864 | |
|
862 | 865 | [logger_celery] |
|
863 | 866 | level = DEBUG |
|
864 | 867 | handlers = |
|
865 | 868 | qualname = celery |
|
866 | 869 | |
|
867 | 870 | |
|
868 | 871 | ; ######## |
|
869 | 872 | ; HANDLERS |
|
870 | 873 | ; ######## |
|
871 | 874 | |
|
872 | 875 | [handler_console] |
|
873 | 876 | class = StreamHandler |
|
874 | 877 | args = (sys.stderr, ) |
|
875 | 878 | level = DEBUG |
|
876 | 879 | ; To enable JSON formatted logs replace 'generic/color_formatter' with 'json' |
|
877 | 880 | ; This allows sending properly formatted logs to grafana loki or elasticsearch |
|
878 | 881 | formatter = color_formatter |
|
879 | 882 | |
|
880 | 883 | [handler_console_sql] |
|
881 | 884 | ; "level = DEBUG" logs SQL queries and results. |
|
882 | 885 | ; "level = INFO" logs SQL queries. |
|
883 | 886 | ; "level = WARN" logs neither. (Recommended for production systems.) |
|
884 | 887 | class = StreamHandler |
|
885 | 888 | args = (sys.stderr, ) |
|
886 | 889 | level = WARN |
|
887 | 890 | ; To enable JSON formatted logs replace 'generic/color_formatter_sql' with 'json' |
|
888 | 891 | ; This allows sending properly formatted logs to grafana loki or elasticsearch |
|
889 | 892 | formatter = color_formatter_sql |
|
890 | 893 | |
|
891 | 894 | ; ########## |
|
892 | 895 | ; FORMATTERS |
|
893 | 896 | ; ########## |
|
894 | 897 | |
|
895 | 898 | [formatter_generic] |
|
896 | 899 | class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter |
|
897 | 900 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s |
|
898 | 901 | datefmt = %Y-%m-%d %H:%M:%S |
|
899 | 902 | |
|
900 | 903 | [formatter_color_formatter] |
|
901 | 904 | class = rhodecode.lib.logging_formatter.ColorFormatter |
|
902 | 905 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s |
|
903 | 906 | datefmt = %Y-%m-%d %H:%M:%S |
|
904 | 907 | |
|
905 | 908 | [formatter_color_formatter_sql] |
|
906 | 909 | class = rhodecode.lib.logging_formatter.ColorFormatterSql |
|
907 | 910 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s |
|
908 | 911 | datefmt = %Y-%m-%d %H:%M:%S |
|
909 | 912 | |
|
910 | 913 | [formatter_json] |
|
911 | 914 | format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s |
|
912 | 915 | class = rhodecode.lib._vendor.jsonlogger.JsonFormatter |
@@ -1,880 +1,883 b'' | |||
|
1 | 1 | |
|
2 | 2 | ; ######################################### |
|
3 | 3 | ; RHODECODE COMMUNITY EDITION CONFIGURATION |
|
4 | 4 | ; ######################################### |
|
5 | 5 | |
|
6 | 6 | [DEFAULT] |
|
7 | 7 | ; Debug flag sets all loggers to debug, and enables request tracking |
|
8 | 8 | debug = false |
|
9 | 9 | |
|
10 | 10 | ; ######################################################################## |
|
11 | 11 | ; EMAIL CONFIGURATION |
|
12 | 12 | ; These settings will be used by the RhodeCode mailing system |
|
13 | 13 | ; ######################################################################## |
|
14 | 14 | |
|
15 | 15 | ; prefix all emails subjects with given prefix, helps filtering out emails |
|
16 | 16 | #email_prefix = [RhodeCode] |
|
17 | 17 | |
|
18 | 18 | ; email FROM address all mails will be sent |
|
19 | 19 | #app_email_from = rhodecode-noreply@localhost |
|
20 | 20 | |
|
21 | 21 | #smtp_server = mail.server.com |
|
22 | 22 | #smtp_username = |
|
23 | 23 | #smtp_password = |
|
24 | 24 | #smtp_port = |
|
25 | 25 | #smtp_use_tls = false |
|
26 | 26 | #smtp_use_ssl = true |
|
27 | 27 | |
|
28 | 28 | [server:main] |
|
29 | 29 | ; COMMON HOST/IP CONFIG, This applies mostly to develop setup, |
|
30 | 30 | ; Host port for gunicorn are controlled by gunicorn_conf.py |
|
31 | 31 | host = 127.0.0.1 |
|
32 | 32 | port = 10020 |
|
33 | 33 | |
|
34 | 34 | |
|
35 | 35 | ; ########################### |
|
36 | 36 | ; GUNICORN APPLICATION SERVER |
|
37 | 37 | ; ########################### |
|
38 | 38 | |
|
39 | 39 | ; run with gunicorn --config gunicorn_conf.py --paste rhodecode.ini |
|
40 | 40 | |
|
41 | 41 | ; Module to use, this setting shouldn't be changed |
|
42 | 42 | use = egg:gunicorn#main |
|
43 | 43 | |
|
44 | 44 | ; Prefix middleware for RhodeCode. |
|
45 | 45 | ; recommended when using proxy setup. |
|
46 | 46 | ; allows to set RhodeCode under a prefix in server. |
|
47 | 47 | ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well. |
|
48 | 48 | ; And set your prefix like: `prefix = /custom_prefix` |
|
49 | 49 | ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need |
|
50 | 50 | ; to make your cookies only work on prefix url |
|
51 | 51 | [filter:proxy-prefix] |
|
52 | 52 | use = egg:PasteDeploy#prefix |
|
53 | 53 | prefix = / |
|
54 | 54 | |
|
55 | 55 | [app:main] |
|
56 | 56 | ; The %(here)s variable will be replaced with the absolute path of parent directory |
|
57 | 57 | ; of this file |
|
58 | 58 | ; Each option in the app:main can be override by an environmental variable |
|
59 | 59 | ; |
|
60 | 60 | ;To override an option: |
|
61 | 61 | ; |
|
62 | 62 | ;RC_<KeyName> |
|
63 | 63 | ;Everything should be uppercase, . and - should be replaced by _. |
|
64 | 64 | ;For example, if you have these configuration settings: |
|
65 | 65 | ;rc_cache.repo_object.backend = foo |
|
66 | 66 | ;can be overridden by |
|
67 | 67 | ;export RC_CACHE_REPO_OBJECT_BACKEND=foo |
|
68 | 68 | |
|
69 | 69 | use = egg:rhodecode-enterprise-ce |
|
70 | 70 | |
|
71 | 71 | ; enable proxy prefix middleware, defined above |
|
72 | 72 | #filter-with = proxy-prefix |
|
73 | 73 | |
|
74 | ; control if environmental variables to be expanded into the .ini settings | |
|
75 | #rhodecode.env_expand = true | |
|
76 | ||
|
74 | 77 | ; encryption key used to encrypt social plugin tokens, |
|
75 | 78 | ; remote_urls with credentials etc, if not set it defaults to |
|
76 | 79 | ; `beaker.session.secret` |
|
77 | 80 | #rhodecode.encrypted_values.secret = |
|
78 | 81 | |
|
79 | 82 | ; decryption strict mode (enabled by default). It controls if decryption raises |
|
80 | 83 | ; `SignatureVerificationError` in case of wrong key, or damaged encryption data. |
|
81 | 84 | #rhodecode.encrypted_values.strict = false |
|
82 | 85 | |
|
83 | 86 | ; Pick algorithm for encryption. Either fernet (more secure) or aes (default) |
|
84 | 87 | ; fernet is safer, and we strongly recommend switching to it. |
|
85 | 88 | ; Due to backward compatibility aes is used as default. |
|
86 | 89 | #rhodecode.encrypted_values.algorithm = fernet |
|
87 | 90 | |
|
88 | 91 | ; Return gzipped responses from RhodeCode (static files/application) |
|
89 | 92 | gzip_responses = false |
|
90 | 93 | |
|
91 | 94 | ; Auto-generate javascript routes file on startup |
|
92 | 95 | generate_js_files = false |
|
93 | 96 | |
|
94 | 97 | ; System global default language. |
|
95 | 98 | ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh |
|
96 | 99 | lang = en |
|
97 | 100 | |
|
98 | 101 | ; Perform a full repository scan and import on each server start. |
|
99 | 102 | ; Settings this to true could lead to very long startup time. |
|
100 | 103 | startup.import_repos = false |
|
101 | 104 | |
|
102 | 105 | ; URL at which the application is running. This is used for Bootstrapping |
|
103 | 106 | ; requests in context when no web request is available. Used in ishell, or |
|
104 | 107 | ; SSH calls. Set this for events to receive proper url for SSH calls. |
|
105 | 108 | app.base_url = http://rhodecode.local |
|
106 | 109 | |
|
107 | 110 | ; Host at which the Service API is running. |
|
108 | 111 | app.service_api.host = http://rhodecode.local:10020 |
|
109 | 112 | |
|
110 | 113 | ; Secret for Service API authentication. |
|
111 | 114 | app.service_api.token = |
|
112 | 115 | |
|
113 | 116 | ; Unique application ID. Should be a random unique string for security. |
|
114 | 117 | app_instance_uuid = rc-production |
|
115 | 118 | |
|
116 | 119 | ; Cut off limit for large diffs (size in bytes). If overall diff size on |
|
117 | 120 | ; commit, or pull request exceeds this limit this diff will be displayed |
|
118 | 121 | ; partially. E.g 512000 == 512Kb |
|
119 | 122 | cut_off_limit_diff = 512000 |
|
120 | 123 | |
|
121 | 124 | ; Cut off limit for large files inside diffs (size in bytes). Each individual |
|
122 | 125 | ; file inside diff which exceeds this limit will be displayed partially. |
|
123 | 126 | ; E.g 128000 == 128Kb |
|
124 | 127 | cut_off_limit_file = 128000 |
|
125 | 128 | |
|
126 | 129 | ; Use cached version of vcs repositories everywhere. Recommended to be `true` |
|
127 | 130 | vcs_full_cache = true |
|
128 | 131 | |
|
129 | 132 | ; Force https in RhodeCode, fixes https redirects, assumes it's always https. |
|
130 | 133 | ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache |
|
131 | 134 | force_https = false |
|
132 | 135 | |
|
133 | 136 | ; use Strict-Transport-Security headers |
|
134 | 137 | use_htsts = false |
|
135 | 138 | |
|
136 | 139 | ; Set to true if your repos are exposed using the dumb protocol |
|
137 | 140 | git_update_server_info = false |
|
138 | 141 | |
|
139 | 142 | ; RSS/ATOM feed options |
|
140 | 143 | rss_cut_off_limit = 256000 |
|
141 | 144 | rss_items_per_page = 10 |
|
142 | 145 | rss_include_diff = false |
|
143 | 146 | |
|
144 | 147 | ; gist URL alias, used to create nicer urls for gist. This should be an |
|
145 | 148 | ; url that does rewrites to _admin/gists/{gistid}. |
|
146 | 149 | ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal |
|
147 | 150 | ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid} |
|
148 | 151 | gist_alias_url = |
|
149 | 152 | |
|
150 | 153 | ; List of views (using glob pattern syntax) that AUTH TOKENS could be |
|
151 | 154 | ; used for access. |
|
152 | 155 | ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it |
|
153 | 156 | ; came from the the logged in user who own this authentication token. |
|
154 | 157 | ; Additionally @TOKEN syntax can be used to bound the view to specific |
|
155 | 158 | ; authentication token. Such view would be only accessible when used together |
|
156 | 159 | ; with this authentication token |
|
157 | 160 | ; list of all views can be found under `/_admin/permissions/auth_token_access` |
|
158 | 161 | ; The list should be "," separated and on a single line. |
|
159 | 162 | ; Most common views to enable: |
|
160 | 163 | |
|
161 | 164 | # RepoCommitsView:repo_commit_download |
|
162 | 165 | # RepoCommitsView:repo_commit_patch |
|
163 | 166 | # RepoCommitsView:repo_commit_raw |
|
164 | 167 | # RepoCommitsView:repo_commit_raw@TOKEN |
|
165 | 168 | # RepoFilesView:repo_files_diff |
|
166 | 169 | # RepoFilesView:repo_archivefile |
|
167 | 170 | # RepoFilesView:repo_file_raw |
|
168 | 171 | # GistView:* |
|
169 | 172 | api_access_controllers_whitelist = |
|
170 | 173 | |
|
171 | 174 | ; Default encoding used to convert from and to unicode |
|
172 | 175 | ; can be also a comma separated list of encoding in case of mixed encodings |
|
173 | 176 | default_encoding = UTF-8 |
|
174 | 177 | |
|
175 | 178 | ; instance-id prefix |
|
176 | 179 | ; a prefix key for this instance used for cache invalidation when running |
|
177 | 180 | ; multiple instances of RhodeCode, make sure it's globally unique for |
|
178 | 181 | ; all running RhodeCode instances. Leave empty if you don't use it |
|
179 | 182 | instance_id = |
|
180 | 183 | |
|
181 | 184 | ; Fallback authentication plugin. Set this to a plugin ID to force the usage |
|
182 | 185 | ; of an authentication plugin also if it is disabled by it's settings. |
|
183 | 186 | ; This could be useful if you are unable to log in to the system due to broken |
|
184 | 187 | ; authentication settings. Then you can enable e.g. the internal RhodeCode auth |
|
185 | 188 | ; module to log in again and fix the settings. |
|
186 | 189 | ; Available builtin plugin IDs (hash is part of the ID): |
|
187 | 190 | ; egg:rhodecode-enterprise-ce#rhodecode |
|
188 | 191 | ; egg:rhodecode-enterprise-ce#pam |
|
189 | 192 | ; egg:rhodecode-enterprise-ce#ldap |
|
190 | 193 | ; egg:rhodecode-enterprise-ce#jasig_cas |
|
191 | 194 | ; egg:rhodecode-enterprise-ce#headers |
|
192 | 195 | ; egg:rhodecode-enterprise-ce#crowd |
|
193 | 196 | |
|
194 | 197 | #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode |
|
195 | 198 | |
|
196 | 199 | ; Flag to control loading of legacy plugins in py:/path format |
|
197 | 200 | auth_plugin.import_legacy_plugins = true |
|
198 | 201 | |
|
199 | 202 | ; alternative return HTTP header for failed authentication. Default HTTP |
|
200 | 203 | ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with |
|
201 | 204 | ; handling that causing a series of failed authentication calls. |
|
202 | 205 | ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code |
|
203 | 206 | ; This will be served instead of default 401 on bad authentication |
|
204 | 207 | auth_ret_code = |
|
205 | 208 | |
|
206 | 209 | ; use special detection method when serving auth_ret_code, instead of serving |
|
207 | 210 | ; ret_code directly, use 401 initially (Which triggers credentials prompt) |
|
208 | 211 | ; and then serve auth_ret_code to clients |
|
209 | 212 | auth_ret_code_detection = false |
|
210 | 213 | |
|
211 | 214 | ; locking return code. When repository is locked return this HTTP code. 2XX |
|
212 | 215 | ; codes don't break the transactions while 4XX codes do |
|
213 | 216 | lock_ret_code = 423 |
|
214 | 217 | |
|
215 | 218 | ; Filesystem location were repositories should be stored |
|
216 | 219 | repo_store.path = /var/opt/rhodecode_repo_store |
|
217 | 220 | |
|
218 | 221 | ; allows to setup custom hooks in settings page |
|
219 | 222 | allow_custom_hooks_settings = true |
|
220 | 223 | |
|
221 | 224 | ; Generated license token required for EE edition license. |
|
222 | 225 | ; New generated token value can be found in Admin > settings > license page. |
|
223 | 226 | license_token = |
|
224 | 227 | |
|
225 | 228 | ; This flag hides sensitive information on the license page such as token, and license data |
|
226 | 229 | license.hide_license_info = false |
|
227 | 230 | |
|
228 | 231 | ; Import EE license from this license path |
|
229 | 232 | #license.import_path = %(here)s/rhodecode_enterprise.license |
|
230 | 233 | |
|
231 | 234 | ; import license 'if-missing' or 'force' (always override) |
|
232 | 235 | ; if-missing means apply license if it doesn't exist. 'force' option always overrides it |
|
233 | 236 | license.import_path_mode = if-missing |
|
234 | 237 | |
|
235 | 238 | ; supervisor connection uri, for managing supervisor and logs. |
|
236 | 239 | supervisor.uri = |
|
237 | 240 | |
|
238 | 241 | ; supervisord group name/id we only want this RC instance to handle |
|
239 | 242 | supervisor.group_id = prod |
|
240 | 243 | |
|
241 | 244 | ; Display extended labs settings |
|
242 | 245 | labs_settings_active = true |
|
243 | 246 | |
|
244 | 247 | ; Custom exception store path, defaults to TMPDIR |
|
245 | 248 | ; This is used to store exception from RhodeCode in shared directory |
|
246 | 249 | #exception_tracker.store_path = |
|
247 | 250 | |
|
248 | 251 | ; Send email with exception details when it happens |
|
249 | 252 | #exception_tracker.send_email = false |
|
250 | 253 | |
|
251 | 254 | ; Comma separated list of recipients for exception emails, |
|
252 | 255 | ; e.g admin@rhodecode.com,devops@rhodecode.com |
|
253 | 256 | ; Can be left empty, then emails will be sent to ALL super-admins |
|
254 | 257 | #exception_tracker.send_email_recipients = |
|
255 | 258 | |
|
256 | 259 | ; optional prefix to Add to email Subject |
|
257 | 260 | #exception_tracker.email_prefix = [RHODECODE ERROR] |
|
258 | 261 | |
|
259 | 262 | ; NOTE: this setting IS DEPRECATED: |
|
260 | 263 | ; file_store backend is always enabled |
|
261 | 264 | #file_store.enabled = true |
|
262 | 265 | |
|
263 | 266 | ; NOTE: this setting IS DEPRECATED: |
|
264 | 267 | ; file_store.backend = X -> use `file_store.backend.type = filesystem_v2` instead |
|
265 | 268 | ; Storage backend, available options are: local |
|
266 | 269 | #file_store.backend = local |
|
267 | 270 | |
|
268 | 271 | ; NOTE: this setting IS DEPRECATED: |
|
269 | 272 | ; file_store.storage_path = X -> use `file_store.filesystem_v2.storage_path = X` instead |
|
270 | 273 | ; path to store the uploaded binaries and artifacts |
|
271 | 274 | #file_store.storage_path = /var/opt/rhodecode_data/file_store |
|
272 | 275 | |
|
273 | 276 | ; Artifacts file-store, is used to store comment attachments and artifacts uploads. |
|
274 | 277 | ; file_store backend type: filesystem_v1, filesystem_v2 or objectstore (s3-based) are available as options |
|
275 | 278 | ; filesystem_v1 is backwards compat with pre 5.1 storage changes |
|
276 | 279 | ; new installations should choose filesystem_v2 or objectstore (s3-based), pick filesystem when migrating from |
|
277 | 280 | ; previous installations to keep the artifacts without a need of migration |
|
278 | 281 | #file_store.backend.type = filesystem_v2 |
|
279 | 282 | |
|
280 | 283 | ; filesystem options... |
|
281 | 284 | #file_store.filesystem_v1.storage_path = /var/opt/rhodecode_data/artifacts_file_store |
|
282 | 285 | |
|
283 | 286 | ; filesystem_v2 options... |
|
284 | 287 | #file_store.filesystem_v2.storage_path = /var/opt/rhodecode_data/artifacts_file_store |
|
285 | 288 | #file_store.filesystem_v2.shards = 8 |
|
286 | 289 | |
|
287 | 290 | ; objectstore options... |
|
288 | 291 | ; url for s3 compatible storage that allows to upload artifacts |
|
289 | 292 | ; e.g http://minio:9000 |
|
290 | 293 | #file_store.backend.type = objectstore |
|
291 | 294 | #file_store.objectstore.url = http://s3-minio:9000 |
|
292 | 295 | |
|
293 | 296 | ; a top-level bucket to put all other shards in |
|
294 | 297 | ; objects will be stored in rhodecode-file-store/shard-N based on the bucket_shards number |
|
295 | 298 | #file_store.objectstore.bucket = rhodecode-file-store |
|
296 | 299 | |
|
297 | 300 | ; number of sharded buckets to create to distribute archives across |
|
298 | 301 | ; default is 8 shards |
|
299 | 302 | #file_store.objectstore.bucket_shards = 8 |
|
300 | 303 | |
|
301 | 304 | ; key for s3 auth |
|
302 | 305 | #file_store.objectstore.key = s3admin |
|
303 | 306 | |
|
304 | 307 | ; secret for s3 auth |
|
305 | 308 | #file_store.objectstore.secret = s3secret4 |
|
306 | 309 | |
|
307 | 310 | ;region for s3 storage |
|
308 | 311 | #file_store.objectstore.region = eu-central-1 |
|
309 | 312 | |
|
310 | 313 | ; Redis url to acquire/check generation of archives locks |
|
311 | 314 | archive_cache.locking.url = redis://redis:6379/1 |
|
312 | 315 | |
|
313 | 316 | ; Storage backend, only 'filesystem' and 'objectstore' are available now |
|
314 | 317 | archive_cache.backend.type = filesystem |
|
315 | 318 | |
|
316 | 319 | ; url for s3 compatible storage that allows to upload artifacts |
|
317 | 320 | ; e.g http://minio:9000 |
|
318 | 321 | archive_cache.objectstore.url = http://s3-minio:9000 |
|
319 | 322 | |
|
320 | 323 | ; key for s3 auth |
|
321 | 324 | archive_cache.objectstore.key = key |
|
322 | 325 | |
|
323 | 326 | ; secret for s3 auth |
|
324 | 327 | archive_cache.objectstore.secret = secret |
|
325 | 328 | |
|
326 | 329 | ;region for s3 storage |
|
327 | 330 | archive_cache.objectstore.region = eu-central-1 |
|
328 | 331 | |
|
329 | 332 | ; number of sharded buckets to create to distribute archives across |
|
330 | 333 | ; default is 8 shards |
|
331 | 334 | archive_cache.objectstore.bucket_shards = 8 |
|
332 | 335 | |
|
333 | 336 | ; a top-level bucket to put all other shards in |
|
334 | 337 | ; objects will be stored in rhodecode-archive-cache/shard-N based on the bucket_shards number |
|
335 | 338 | archive_cache.objectstore.bucket = rhodecode-archive-cache |
|
336 | 339 | |
|
337 | 340 | ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time |
|
338 | 341 | archive_cache.objectstore.retry = false |
|
339 | 342 | |
|
340 | 343 | ; number of seconds to wait for next try using retry |
|
341 | 344 | archive_cache.objectstore.retry_backoff = 1 |
|
342 | 345 | |
|
343 | 346 | ; how many tries do do a retry fetch from this backend |
|
344 | 347 | archive_cache.objectstore.retry_attempts = 10 |
|
345 | 348 | |
|
346 | 349 | ; Default is $cache_dir/archive_cache if not set |
|
347 | 350 | ; Generated repo archives will be cached at this location |
|
348 | 351 | ; and served from the cache during subsequent requests for the same archive of |
|
349 | 352 | ; the repository. This path is important to be shared across filesystems and with |
|
350 | 353 | ; RhodeCode and vcsserver |
|
351 | 354 | archive_cache.filesystem.store_dir = /var/opt/rhodecode_data/archive_cache |
|
352 | 355 | |
|
353 | 356 | ; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb |
|
354 | 357 | archive_cache.filesystem.cache_size_gb = 40 |
|
355 | 358 | |
|
356 | 359 | ; Eviction policy used to clear out after cache_size_gb limit is reached |
|
357 | 360 | archive_cache.filesystem.eviction_policy = least-recently-stored |
|
358 | 361 | |
|
359 | 362 | ; By default cache uses sharding technique, this specifies how many shards are there |
|
360 | 363 | ; default is 8 shards |
|
361 | 364 | archive_cache.filesystem.cache_shards = 8 |
|
362 | 365 | |
|
363 | 366 | ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time |
|
364 | 367 | archive_cache.filesystem.retry = false |
|
365 | 368 | |
|
366 | 369 | ; number of seconds to wait for next try using retry |
|
367 | 370 | archive_cache.filesystem.retry_backoff = 1 |
|
368 | 371 | |
|
369 | 372 | ; how many tries do do a retry fetch from this backend |
|
370 | 373 | archive_cache.filesystem.retry_attempts = 10 |
|
371 | 374 | |
|
372 | 375 | |
|
373 | 376 | ; ############# |
|
374 | 377 | ; CELERY CONFIG |
|
375 | 378 | ; ############# |
|
376 | 379 | |
|
377 | 380 | ; manually run celery: /path/to/celery worker --task-events --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini |
|
378 | 381 | |
|
379 | 382 | use_celery = true |
|
380 | 383 | |
|
381 | 384 | ; path to store schedule database |
|
382 | 385 | #celerybeat-schedule.path = |
|
383 | 386 | |
|
384 | 387 | ; connection url to the message broker (default redis) |
|
385 | 388 | celery.broker_url = redis://redis:6379/8 |
|
386 | 389 | |
|
387 | 390 | ; results backend to get results for (default redis) |
|
388 | 391 | celery.result_backend = redis://redis:6379/8 |
|
389 | 392 | |
|
390 | 393 | ; rabbitmq example |
|
391 | 394 | #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost |
|
392 | 395 | |
|
393 | 396 | ; maximum tasks to execute before worker restart |
|
394 | 397 | celery.max_tasks_per_child = 20 |
|
395 | 398 | |
|
396 | 399 | ; tasks will never be sent to the queue, but executed locally instead. |
|
397 | 400 | celery.task_always_eager = false |
|
398 | 401 | |
|
399 | 402 | ; ############# |
|
400 | 403 | ; DOGPILE CACHE |
|
401 | 404 | ; ############# |
|
402 | 405 | |
|
403 | 406 | ; Default cache dir for caches. Putting this into a ramdisk can boost performance. |
|
404 | 407 | ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space |
|
405 | 408 | cache_dir = /var/opt/rhodecode_data |
|
406 | 409 | |
|
407 | 410 | ; ********************************************* |
|
408 | 411 | ; `sql_cache_short` cache for heavy SQL queries |
|
409 | 412 | ; Only supported backend is `memory_lru` |
|
410 | 413 | ; ********************************************* |
|
411 | 414 | rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru |
|
412 | 415 | rc_cache.sql_cache_short.expiration_time = 30 |
|
413 | 416 | |
|
414 | 417 | |
|
415 | 418 | ; ***************************************************** |
|
416 | 419 | ; `cache_repo_longterm` cache for repo object instances |
|
417 | 420 | ; Only supported backend is `memory_lru` |
|
418 | 421 | ; ***************************************************** |
|
419 | 422 | rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru |
|
420 | 423 | ; by default we use 30 Days, cache is still invalidated on push |
|
421 | 424 | rc_cache.cache_repo_longterm.expiration_time = 2592000 |
|
422 | 425 | ; max items in LRU cache, set to smaller number to save memory, and expire last used caches |
|
423 | 426 | rc_cache.cache_repo_longterm.max_size = 10000 |
|
424 | 427 | |
|
425 | 428 | |
|
426 | 429 | ; ********************************************* |
|
427 | 430 | ; `cache_general` cache for general purpose use |
|
428 | 431 | ; for simplicity use rc.file_namespace backend, |
|
429 | 432 | ; for performance and scale use rc.redis |
|
430 | 433 | ; ********************************************* |
|
431 | 434 | rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace |
|
432 | 435 | rc_cache.cache_general.expiration_time = 43200 |
|
433 | 436 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set |
|
434 | 437 | #rc_cache.cache_general.arguments.filename = /tmp/cache_general_db |
|
435 | 438 | |
|
436 | 439 | ; alternative `cache_general` redis backend with distributed lock |
|
437 | 440 | #rc_cache.cache_general.backend = dogpile.cache.rc.redis |
|
438 | 441 | #rc_cache.cache_general.expiration_time = 300 |
|
439 | 442 | |
|
440 | 443 | ; redis_expiration_time needs to be greater then expiration_time |
|
441 | 444 | #rc_cache.cache_general.arguments.redis_expiration_time = 7200 |
|
442 | 445 | |
|
443 | 446 | #rc_cache.cache_general.arguments.host = localhost |
|
444 | 447 | #rc_cache.cache_general.arguments.port = 6379 |
|
445 | 448 | #rc_cache.cache_general.arguments.db = 0 |
|
446 | 449 | #rc_cache.cache_general.arguments.socket_timeout = 30 |
|
447 | 450 | ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends |
|
448 | 451 | #rc_cache.cache_general.arguments.distributed_lock = true |
|
449 | 452 | |
|
450 | 453 | ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen |
|
451 | 454 | #rc_cache.cache_general.arguments.lock_auto_renewal = true |
|
452 | 455 | |
|
453 | 456 | ; ************************************************* |
|
454 | 457 | ; `cache_perms` cache for permission tree, auth TTL |
|
455 | 458 | ; for simplicity use rc.file_namespace backend, |
|
456 | 459 | ; for performance and scale use rc.redis |
|
457 | 460 | ; ************************************************* |
|
458 | 461 | rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace |
|
459 | 462 | rc_cache.cache_perms.expiration_time = 3600 |
|
460 | 463 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set |
|
461 | 464 | #rc_cache.cache_perms.arguments.filename = /tmp/cache_perms_db |
|
462 | 465 | |
|
463 | 466 | ; alternative `cache_perms` redis backend with distributed lock |
|
464 | 467 | #rc_cache.cache_perms.backend = dogpile.cache.rc.redis |
|
465 | 468 | #rc_cache.cache_perms.expiration_time = 300 |
|
466 | 469 | |
|
467 | 470 | ; redis_expiration_time needs to be greater then expiration_time |
|
468 | 471 | #rc_cache.cache_perms.arguments.redis_expiration_time = 7200 |
|
469 | 472 | |
|
470 | 473 | #rc_cache.cache_perms.arguments.host = localhost |
|
471 | 474 | #rc_cache.cache_perms.arguments.port = 6379 |
|
472 | 475 | #rc_cache.cache_perms.arguments.db = 0 |
|
473 | 476 | #rc_cache.cache_perms.arguments.socket_timeout = 30 |
|
474 | 477 | ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends |
|
475 | 478 | #rc_cache.cache_perms.arguments.distributed_lock = true |
|
476 | 479 | |
|
477 | 480 | ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen |
|
478 | 481 | #rc_cache.cache_perms.arguments.lock_auto_renewal = true |
|
479 | 482 | |
|
480 | 483 | ; *************************************************** |
|
481 | 484 | ; `cache_repo` cache for file tree, Readme, RSS FEEDS |
|
482 | 485 | ; for simplicity use rc.file_namespace backend, |
|
483 | 486 | ; for performance and scale use rc.redis |
|
484 | 487 | ; *************************************************** |
|
485 | 488 | rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace |
|
486 | 489 | rc_cache.cache_repo.expiration_time = 2592000 |
|
487 | 490 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set |
|
488 | 491 | #rc_cache.cache_repo.arguments.filename = /tmp/cache_repo_db |
|
489 | 492 | |
|
490 | 493 | ; alternative `cache_repo` redis backend with distributed lock |
|
491 | 494 | #rc_cache.cache_repo.backend = dogpile.cache.rc.redis |
|
492 | 495 | #rc_cache.cache_repo.expiration_time = 2592000 |
|
493 | 496 | |
|
494 | 497 | ; redis_expiration_time needs to be greater then expiration_time |
|
495 | 498 | #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400 |
|
496 | 499 | |
|
497 | 500 | #rc_cache.cache_repo.arguments.host = localhost |
|
498 | 501 | #rc_cache.cache_repo.arguments.port = 6379 |
|
499 | 502 | #rc_cache.cache_repo.arguments.db = 1 |
|
500 | 503 | #rc_cache.cache_repo.arguments.socket_timeout = 30 |
|
501 | 504 | ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends |
|
502 | 505 | #rc_cache.cache_repo.arguments.distributed_lock = true |
|
503 | 506 | |
|
504 | 507 | ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen |
|
505 | 508 | #rc_cache.cache_repo.arguments.lock_auto_renewal = true |
|
506 | 509 | |
|
507 | 510 | ; ############## |
|
508 | 511 | ; BEAKER SESSION |
|
509 | 512 | ; ############## |
|
510 | 513 | |
|
511 | 514 | ; beaker.session.type is type of storage options for the logged users sessions. Current allowed |
|
512 | 515 | ; types are file, ext:redis, ext:database, ext:memcached |
|
513 | 516 | ; Fastest ones are ext:redis and ext:database, DO NOT use memory type for session |
|
514 | 517 | #beaker.session.type = file |
|
515 | 518 | #beaker.session.data_dir = %(here)s/data/sessions |
|
516 | 519 | |
|
517 | 520 | ; Redis based sessions |
|
518 | 521 | beaker.session.type = ext:redis |
|
519 | 522 | beaker.session.url = redis://redis:6379/2 |
|
520 | 523 | |
|
521 | 524 | ; DB based session, fast, and allows easy management over logged in users |
|
522 | 525 | #beaker.session.type = ext:database |
|
523 | 526 | #beaker.session.table_name = db_session |
|
524 | 527 | #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode |
|
525 | 528 | #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode |
|
526 | 529 | #beaker.session.sa.pool_recycle = 3600 |
|
527 | 530 | #beaker.session.sa.echo = false |
|
528 | 531 | |
|
529 | 532 | beaker.session.key = rhodecode |
|
530 | 533 | beaker.session.secret = production-rc-uytcxaz |
|
531 | 534 | beaker.session.lock_dir = /data_ramdisk/lock |
|
532 | 535 | |
|
533 | 536 | ; Secure encrypted cookie. Requires AES and AES python libraries |
|
534 | 537 | ; you must disable beaker.session.secret to use this |
|
535 | 538 | #beaker.session.encrypt_key = key_for_encryption |
|
536 | 539 | #beaker.session.validate_key = validation_key |
|
537 | 540 | |
|
538 | 541 | ; Sets session as invalid (also logging out user) if it haven not been |
|
539 | 542 | ; accessed for given amount of time in seconds |
|
540 | 543 | beaker.session.timeout = 2592000 |
|
541 | 544 | beaker.session.httponly = true |
|
542 | 545 | |
|
543 | 546 | ; Path to use for the cookie. Set to prefix if you use prefix middleware |
|
544 | 547 | #beaker.session.cookie_path = /custom_prefix |
|
545 | 548 | |
|
546 | 549 | ; Set https secure cookie |
|
547 | 550 | beaker.session.secure = false |
|
548 | 551 | |
|
549 | 552 | ; default cookie expiration time in seconds, set to `true` to set expire |
|
550 | 553 | ; at browser close |
|
551 | 554 | #beaker.session.cookie_expires = 3600 |
|
552 | 555 | |
|
553 | 556 | ; ############################# |
|
554 | 557 | ; SEARCH INDEXING CONFIGURATION |
|
555 | 558 | ; ############################# |
|
556 | 559 | |
|
557 | 560 | ; Full text search indexer is available in rhodecode-tools under |
|
558 | 561 | ; `rhodecode-tools index` command |
|
559 | 562 | |
|
560 | 563 | ; WHOOSH Backend, doesn't require additional services to run |
|
561 | 564 | ; it works good with few dozen repos |
|
562 | 565 | search.module = rhodecode.lib.index.whoosh |
|
563 | 566 | search.location = %(here)s/data/index |
|
564 | 567 | |
|
565 | 568 | ; #################### |
|
566 | 569 | ; CHANNELSTREAM CONFIG |
|
567 | 570 | ; #################### |
|
568 | 571 | |
|
569 | 572 | ; channelstream enables persistent connections and live notification |
|
570 | 573 | ; in the system. It's also used by the chat system |
|
571 | 574 | |
|
572 | 575 | channelstream.enabled = true |
|
573 | 576 | |
|
574 | 577 | ; server address for channelstream server on the backend |
|
575 | 578 | channelstream.server = channelstream:9800 |
|
576 | 579 | |
|
577 | 580 | ; location of the channelstream server from outside world |
|
578 | 581 | ; use ws:// for http or wss:// for https. This address needs to be handled |
|
579 | 582 | ; by external HTTP server such as Nginx or Apache |
|
580 | 583 | ; see Nginx/Apache configuration examples in our docs |
|
581 | 584 | channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream |
|
582 | 585 | channelstream.secret = ENV_GENERATED |
|
583 | 586 | channelstream.history.location = /var/opt/rhodecode_data/channelstream_history |
|
584 | 587 | |
|
585 | 588 | ; Internal application path that Javascript uses to connect into. |
|
586 | 589 | ; If you use proxy-prefix the prefix should be added before /_channelstream |
|
587 | 590 | channelstream.proxy_path = /_channelstream |
|
588 | 591 | |
|
589 | 592 | |
|
590 | 593 | ; ############################## |
|
591 | 594 | ; MAIN RHODECODE DATABASE CONFIG |
|
592 | 595 | ; ############################## |
|
593 | 596 | |
|
594 | 597 | #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30 |
|
595 | 598 | #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode |
|
596 | 599 | #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8 |
|
597 | 600 | ; pymysql is an alternative driver for MySQL, use in case of problems with default one |
|
598 | 601 | #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode |
|
599 | 602 | |
|
600 | 603 | sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode |
|
601 | 604 | |
|
602 | 605 | ; see sqlalchemy docs for other advanced settings |
|
603 | 606 | ; print the sql statements to output |
|
604 | 607 | sqlalchemy.db1.echo = false |
|
605 | 608 | |
|
606 | 609 | ; recycle the connections after this amount of seconds |
|
607 | 610 | sqlalchemy.db1.pool_recycle = 3600 |
|
608 | 611 | |
|
609 | 612 | ; the number of connections to keep open inside the connection pool. |
|
610 | 613 | ; 0 indicates no limit |
|
611 | 614 | ; the general calculus with gevent is: |
|
612 | 615 | ; if your system allows 500 concurrent greenlets (max_connections) that all do database access, |
|
613 | 616 | ; then increase pool size + max overflow so that they add up to 500. |
|
614 | 617 | #sqlalchemy.db1.pool_size = 5 |
|
615 | 618 | |
|
616 | 619 | ; The number of connections to allow in connection pool "overflow", that is |
|
617 | 620 | ; connections that can be opened above and beyond the pool_size setting, |
|
618 | 621 | ; which defaults to five. |
|
619 | 622 | #sqlalchemy.db1.max_overflow = 10 |
|
620 | 623 | |
|
621 | 624 | ; Connection check ping, used to detect broken database connections |
|
622 | 625 | ; could be enabled to better handle cases if MySQL has gone away errors |
|
623 | 626 | #sqlalchemy.db1.ping_connection = true |
|
624 | 627 | |
|
625 | 628 | ; ########## |
|
626 | 629 | ; VCS CONFIG |
|
627 | 630 | ; ########## |
|
628 | 631 | vcs.server.enable = true |
|
629 | 632 | vcs.server = vcsserver:10010 |
|
630 | 633 | |
|
631 | 634 | ; Web server connectivity protocol, responsible for web based VCS operations |
|
632 | 635 | ; Available protocols are: |
|
633 | 636 | ; `http` - use http-rpc backend (default) |
|
634 | 637 | vcs.server.protocol = http |
|
635 | 638 | |
|
636 | 639 | ; Push/Pull operations protocol, available options are: |
|
637 | 640 | ; `http` - use http-rpc backend (default) |
|
638 | 641 | vcs.scm_app_implementation = http |
|
639 | 642 | |
|
640 | 643 | ; Push/Pull operations hooks protocol, available options are: |
|
641 | 644 | ; `http` - use http-rpc backend (default) |
|
642 | 645 | ; `celery` - use celery based hooks |
|
643 | 646 | #DEPRECATED:vcs.hooks.protocol = http |
|
644 | 647 | vcs.hooks.protocol.v2 = celery |
|
645 | 648 | |
|
646 | 649 | ; Host on which this instance is listening for hooks. vcsserver will call this host to pull/push hooks so it should be |
|
647 | 650 | ; accessible via network. |
|
648 | 651 | ; Use vcs.hooks.host = "*" to bind to current hostname (for Docker) |
|
649 | 652 | vcs.hooks.host = * |
|
650 | 653 | |
|
651 | 654 | ; Start VCSServer with this instance as a subprocess, useful for development |
|
652 | 655 | vcs.start_server = false |
|
653 | 656 | |
|
654 | 657 | ; List of enabled VCS backends, available options are: |
|
655 | 658 | ; `hg` - mercurial |
|
656 | 659 | ; `git` - git |
|
657 | 660 | ; `svn` - subversion |
|
658 | 661 | vcs.backends = hg, git, svn |
|
659 | 662 | |
|
660 | 663 | ; Wait this number of seconds before killing connection to the vcsserver |
|
661 | 664 | vcs.connection_timeout = 3600 |
|
662 | 665 | |
|
663 | 666 | ; Cache flag to cache vcsserver remote calls locally |
|
664 | 667 | ; It uses cache_region `cache_repo` |
|
665 | 668 | vcs.methods.cache = true |
|
666 | 669 | |
|
667 | 670 | ; Filesystem location where Git lfs objects should be stored |
|
668 | 671 | vcs.git.lfs.storage_location = /var/opt/rhodecode_repo_store/.cache/git_lfs_store |
|
669 | 672 | |
|
670 | 673 | ; Filesystem location where Mercurial largefile objects should be stored |
|
671 | 674 | vcs.hg.largefiles.storage_location = /var/opt/rhodecode_repo_store/.cache/hg_largefiles_store |
|
672 | 675 | |
|
673 | 676 | ; #################################################### |
|
674 | 677 | ; Subversion proxy support (mod_dav_svn) |
|
675 | 678 | ; Maps RhodeCode repo groups into SVN paths for Apache |
|
676 | 679 | ; #################################################### |
|
677 | 680 | |
|
678 | 681 | ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out. |
|
679 | 682 | ; Set a numeric version for your current SVN e.g 1.8, or 1.12 |
|
680 | 683 | ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible |
|
681 | 684 | #vcs.svn.compatible_version = 1.8 |
|
682 | 685 | |
|
683 | 686 | ; Redis connection settings for svn integrations logic |
|
684 | 687 | ; This connection string needs to be the same on ce and vcsserver |
|
685 | 688 | vcs.svn.redis_conn = redis://redis:6379/0 |
|
686 | 689 | |
|
687 | 690 | ; Enable SVN proxy of requests over HTTP |
|
688 | 691 | vcs.svn.proxy.enabled = true |
|
689 | 692 | |
|
690 | 693 | ; host to connect to running SVN subsystem |
|
691 | 694 | vcs.svn.proxy.host = http://svn:8090 |
|
692 | 695 | |
|
693 | 696 | ; Enable or disable the config file generation. |
|
694 | 697 | svn.proxy.generate_config = true |
|
695 | 698 | |
|
696 | 699 | ; Generate config file with `SVNListParentPath` set to `On`. |
|
697 | 700 | svn.proxy.list_parent_path = true |
|
698 | 701 | |
|
699 | 702 | ; Set location and file name of generated config file. |
|
700 | 703 | svn.proxy.config_file_path = /etc/rhodecode/conf/svn/mod_dav_svn.conf |
|
701 | 704 | |
|
702 | 705 | ; alternative mod_dav config template. This needs to be a valid mako template |
|
703 | 706 | ; Example template can be found in the source code: |
|
704 | 707 | ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako |
|
705 | 708 | #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako |
|
706 | 709 | |
|
707 | 710 | ; Used as a prefix to the `Location` block in the generated config file. |
|
708 | 711 | ; In most cases it should be set to `/`. |
|
709 | 712 | svn.proxy.location_root = / |
|
710 | 713 | |
|
711 | 714 | ; Command to reload the mod dav svn configuration on change. |
|
712 | 715 | ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh |
|
713 | 716 | ; Make sure user who runs RhodeCode process is allowed to reload Apache |
|
714 | 717 | #svn.proxy.reload_cmd = /etc/init.d/apache2 reload |
|
715 | 718 | |
|
716 | 719 | ; If the timeout expires before the reload command finishes, the command will |
|
717 | 720 | ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds. |
|
718 | 721 | #svn.proxy.reload_timeout = 10 |
|
719 | 722 | |
|
720 | 723 | ; #################### |
|
721 | 724 | ; SSH Support Settings |
|
722 | 725 | ; #################### |
|
723 | 726 | |
|
724 | 727 | ; Defines if a custom authorized_keys file should be created and written on |
|
725 | 728 | ; any change user ssh keys. Setting this to false also disables possibility |
|
726 | 729 | ; of adding SSH keys by users from web interface. Super admins can still |
|
727 | 730 | ; manage SSH Keys. |
|
728 | 731 | ssh.generate_authorized_keyfile = true |
|
729 | 732 | |
|
730 | 733 | ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding` |
|
731 | 734 | # ssh.authorized_keys_ssh_opts = |
|
732 | 735 | |
|
733 | 736 | ; Path to the authorized_keys file where the generate entries are placed. |
|
734 | 737 | ; It is possible to have multiple key files specified in `sshd_config` e.g. |
|
735 | 738 | ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode |
|
736 | 739 | ssh.authorized_keys_file_path = /etc/rhodecode/conf/ssh/authorized_keys_rhodecode |
|
737 | 740 | |
|
738 | 741 | ; Command to execute the SSH wrapper. The binary is available in the |
|
739 | 742 | ; RhodeCode installation directory. |
|
740 | 743 | ; legacy: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper |
|
741 | 744 | ; new rewrite: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2 |
|
742 | 745 | #DEPRECATED: ssh.wrapper_cmd = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper |
|
743 | 746 | ssh.wrapper_cmd.v2 = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2 |
|
744 | 747 | |
|
745 | 748 | ; Allow shell when executing the ssh-wrapper command |
|
746 | 749 | ssh.wrapper_cmd_allow_shell = false |
|
747 | 750 | |
|
748 | 751 | ; Enables logging, and detailed output send back to the client during SSH |
|
749 | 752 | ; operations. Useful for debugging, shouldn't be used in production. |
|
750 | 753 | ssh.enable_debug_logging = false |
|
751 | 754 | |
|
752 | 755 | ; Paths to binary executable, by default they are the names, but we can |
|
753 | 756 | ; override them if we want to use a custom one |
|
754 | 757 | ssh.executable.hg = /usr/local/bin/rhodecode_bin/vcs_bin/hg |
|
755 | 758 | ssh.executable.git = /usr/local/bin/rhodecode_bin/vcs_bin/git |
|
756 | 759 | ssh.executable.svn = /usr/local/bin/rhodecode_bin/vcs_bin/svnserve |
|
757 | 760 | |
|
758 | 761 | ; Enables SSH key generator web interface. Disabling this still allows users |
|
759 | 762 | ; to add their own keys. |
|
760 | 763 | ssh.enable_ui_key_generator = true |
|
761 | 764 | |
|
762 | 765 | ; Statsd client config, this is used to send metrics to statsd |
|
763 | 766 | ; We recommend setting statsd_exported and scrape them using Prometheus |
|
764 | 767 | #statsd.enabled = false |
|
765 | 768 | #statsd.statsd_host = 0.0.0.0 |
|
766 | 769 | #statsd.statsd_port = 8125 |
|
767 | 770 | #statsd.statsd_prefix = |
|
768 | 771 | #statsd.statsd_ipv6 = false |
|
769 | 772 | |
|
770 | 773 | ; configure logging automatically at server startup set to false |
|
771 | 774 | ; to use the below custom logging config. |
|
772 | 775 | ; RC_LOGGING_FORMATTER |
|
773 | 776 | ; RC_LOGGING_LEVEL |
|
774 | 777 | ; env variables can control the settings for logging in case of autoconfigure |
|
775 | 778 | |
|
776 | 779 | #logging.autoconfigure = true |
|
777 | 780 | |
|
778 | 781 | ; specify your own custom logging config file to configure logging |
|
779 | 782 | #logging.logging_conf_file = /path/to/custom_logging.ini |
|
780 | 783 | |
|
781 | 784 | ; Dummy marker to add new entries after. |
|
782 | 785 | ; Add any custom entries below. Please don't remove this marker. |
|
783 | 786 | custom.conf = 1 |
|
784 | 787 | |
|
785 | 788 | |
|
786 | 789 | ; ##################### |
|
787 | 790 | ; LOGGING CONFIGURATION |
|
788 | 791 | ; ##################### |
|
789 | 792 | |
|
790 | 793 | [loggers] |
|
791 | 794 | keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper |
|
792 | 795 | |
|
793 | 796 | [handlers] |
|
794 | 797 | keys = console, console_sql |
|
795 | 798 | |
|
796 | 799 | [formatters] |
|
797 | 800 | keys = generic, json, color_formatter, color_formatter_sql |
|
798 | 801 | |
|
799 | 802 | ; ####### |
|
800 | 803 | ; LOGGERS |
|
801 | 804 | ; ####### |
|
802 | 805 | [logger_root] |
|
803 | 806 | level = NOTSET |
|
804 | 807 | handlers = console |
|
805 | 808 | |
|
806 | 809 | [logger_sqlalchemy] |
|
807 | 810 | level = INFO |
|
808 | 811 | handlers = console_sql |
|
809 | 812 | qualname = sqlalchemy.engine |
|
810 | 813 | propagate = 0 |
|
811 | 814 | |
|
812 | 815 | [logger_beaker] |
|
813 | 816 | level = DEBUG |
|
814 | 817 | handlers = |
|
815 | 818 | qualname = beaker.container |
|
816 | 819 | propagate = 1 |
|
817 | 820 | |
|
818 | 821 | [logger_rhodecode] |
|
819 | 822 | level = DEBUG |
|
820 | 823 | handlers = |
|
821 | 824 | qualname = rhodecode |
|
822 | 825 | propagate = 1 |
|
823 | 826 | |
|
824 | 827 | [logger_ssh_wrapper] |
|
825 | 828 | level = DEBUG |
|
826 | 829 | handlers = |
|
827 | 830 | qualname = ssh_wrapper |
|
828 | 831 | propagate = 1 |
|
829 | 832 | |
|
830 | 833 | [logger_celery] |
|
831 | 834 | level = DEBUG |
|
832 | 835 | handlers = |
|
833 | 836 | qualname = celery |
|
834 | 837 | |
|
835 | 838 | |
|
836 | 839 | ; ######## |
|
837 | 840 | ; HANDLERS |
|
838 | 841 | ; ######## |
|
839 | 842 | |
|
840 | 843 | [handler_console] |
|
841 | 844 | class = StreamHandler |
|
842 | 845 | args = (sys.stderr, ) |
|
843 | 846 | level = INFO |
|
844 | 847 | ; To enable JSON formatted logs replace 'generic/color_formatter' with 'json' |
|
845 | 848 | ; This allows sending properly formatted logs to grafana loki or elasticsearch |
|
846 | 849 | formatter = generic |
|
847 | 850 | |
|
848 | 851 | [handler_console_sql] |
|
849 | 852 | ; "level = DEBUG" logs SQL queries and results. |
|
850 | 853 | ; "level = INFO" logs SQL queries. |
|
851 | 854 | ; "level = WARN" logs neither. (Recommended for production systems.) |
|
852 | 855 | class = StreamHandler |
|
853 | 856 | args = (sys.stderr, ) |
|
854 | 857 | level = WARN |
|
855 | 858 | ; To enable JSON formatted logs replace 'generic/color_formatter_sql' with 'json' |
|
856 | 859 | ; This allows sending properly formatted logs to grafana loki or elasticsearch |
|
857 | 860 | formatter = generic |
|
858 | 861 | |
|
859 | 862 | ; ########## |
|
860 | 863 | ; FORMATTERS |
|
861 | 864 | ; ########## |
|
862 | 865 | |
|
863 | 866 | [formatter_generic] |
|
864 | 867 | class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter |
|
865 | 868 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s |
|
866 | 869 | datefmt = %Y-%m-%d %H:%M:%S |
|
867 | 870 | |
|
868 | 871 | [formatter_color_formatter] |
|
869 | 872 | class = rhodecode.lib.logging_formatter.ColorFormatter |
|
870 | 873 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s |
|
871 | 874 | datefmt = %Y-%m-%d %H:%M:%S |
|
872 | 875 | |
|
873 | 876 | [formatter_color_formatter_sql] |
|
874 | 877 | class = rhodecode.lib.logging_formatter.ColorFormatterSql |
|
875 | 878 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s |
|
876 | 879 | datefmt = %Y-%m-%d %H:%M:%S |
|
877 | 880 | |
|
878 | 881 | [formatter_json] |
|
879 | 882 | format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s |
|
880 | 883 | class = rhodecode.lib._vendor.jsonlogger.JsonFormatter |
@@ -1,175 +1,177 b'' | |||
|
1 | 1 | .. _rhodecode-release-notes-ref: |
|
2 | 2 | |
|
3 | 3 | Release Notes |
|
4 | 4 | ============= |
|
5 | 5 | |
|
6 | 6 | |RCE| 5.x Versions |
|
7 | 7 | ------------------ |
|
8 | 8 | |
|
9 | 9 | .. toctree:: |
|
10 | 10 | :maxdepth: 1 |
|
11 | 11 | |
|
12 | release-notes-5.3.0.rst | |
|
13 | release-notes-5.2.1.rst | |
|
12 | 14 | release-notes-5.2.0.rst |
|
13 | 15 | release-notes-5.1.2.rst |
|
14 | 16 | release-notes-5.1.1.rst |
|
15 | 17 | release-notes-5.1.0.rst |
|
16 | 18 | release-notes-5.0.3.rst |
|
17 | 19 | release-notes-5.0.2.rst |
|
18 | 20 | release-notes-5.0.1.rst |
|
19 | 21 | release-notes-5.0.0.rst |
|
20 | 22 | |
|
21 | 23 | |
|
22 | 24 | |RCE| 4.x Versions |
|
23 | 25 | ------------------ |
|
24 | 26 | |
|
25 | 27 | .. toctree:: |
|
26 | 28 | :maxdepth: 1 |
|
27 | 29 | |
|
28 | 30 | release-notes-4.27.1.rst |
|
29 | 31 | release-notes-4.27.0.rst |
|
30 | 32 | release-notes-4.26.0.rst |
|
31 | 33 | release-notes-4.25.2.rst |
|
32 | 34 | release-notes-4.25.1.rst |
|
33 | 35 | release-notes-4.25.0.rst |
|
34 | 36 | release-notes-4.24.1.rst |
|
35 | 37 | release-notes-4.24.0.rst |
|
36 | 38 | release-notes-4.23.2.rst |
|
37 | 39 | release-notes-4.23.1.rst |
|
38 | 40 | release-notes-4.23.0.rst |
|
39 | 41 | release-notes-4.22.0.rst |
|
40 | 42 | release-notes-4.21.0.rst |
|
41 | 43 | release-notes-4.20.1.rst |
|
42 | 44 | release-notes-4.20.0.rst |
|
43 | 45 | release-notes-4.19.3.rst |
|
44 | 46 | release-notes-4.19.2.rst |
|
45 | 47 | release-notes-4.19.1.rst |
|
46 | 48 | release-notes-4.19.0.rst |
|
47 | 49 | release-notes-4.18.3.rst |
|
48 | 50 | release-notes-4.18.2.rst |
|
49 | 51 | release-notes-4.18.1.rst |
|
50 | 52 | release-notes-4.18.0.rst |
|
51 | 53 | release-notes-4.17.4.rst |
|
52 | 54 | release-notes-4.17.3.rst |
|
53 | 55 | release-notes-4.17.2.rst |
|
54 | 56 | release-notes-4.17.1.rst |
|
55 | 57 | release-notes-4.17.0.rst |
|
56 | 58 | release-notes-4.16.2.rst |
|
57 | 59 | release-notes-4.16.1.rst |
|
58 | 60 | release-notes-4.16.0.rst |
|
59 | 61 | release-notes-4.15.2.rst |
|
60 | 62 | release-notes-4.15.1.rst |
|
61 | 63 | release-notes-4.15.0.rst |
|
62 | 64 | release-notes-4.14.1.rst |
|
63 | 65 | release-notes-4.14.0.rst |
|
64 | 66 | release-notes-4.13.3.rst |
|
65 | 67 | release-notes-4.13.2.rst |
|
66 | 68 | release-notes-4.13.1.rst |
|
67 | 69 | release-notes-4.13.0.rst |
|
68 | 70 | release-notes-4.12.4.rst |
|
69 | 71 | release-notes-4.12.3.rst |
|
70 | 72 | release-notes-4.12.2.rst |
|
71 | 73 | release-notes-4.12.1.rst |
|
72 | 74 | release-notes-4.12.0.rst |
|
73 | 75 | release-notes-4.11.6.rst |
|
74 | 76 | release-notes-4.11.5.rst |
|
75 | 77 | release-notes-4.11.4.rst |
|
76 | 78 | release-notes-4.11.3.rst |
|
77 | 79 | release-notes-4.11.2.rst |
|
78 | 80 | release-notes-4.11.1.rst |
|
79 | 81 | release-notes-4.11.0.rst |
|
80 | 82 | release-notes-4.10.6.rst |
|
81 | 83 | release-notes-4.10.5.rst |
|
82 | 84 | release-notes-4.10.4.rst |
|
83 | 85 | release-notes-4.10.3.rst |
|
84 | 86 | release-notes-4.10.2.rst |
|
85 | 87 | release-notes-4.10.1.rst |
|
86 | 88 | release-notes-4.10.0.rst |
|
87 | 89 | release-notes-4.9.1.rst |
|
88 | 90 | release-notes-4.9.0.rst |
|
89 | 91 | release-notes-4.8.0.rst |
|
90 | 92 | release-notes-4.7.2.rst |
|
91 | 93 | release-notes-4.7.1.rst |
|
92 | 94 | release-notes-4.7.0.rst |
|
93 | 95 | release-notes-4.6.1.rst |
|
94 | 96 | release-notes-4.6.0.rst |
|
95 | 97 | release-notes-4.5.2.rst |
|
96 | 98 | release-notes-4.5.1.rst |
|
97 | 99 | release-notes-4.5.0.rst |
|
98 | 100 | release-notes-4.4.2.rst |
|
99 | 101 | release-notes-4.4.1.rst |
|
100 | 102 | release-notes-4.4.0.rst |
|
101 | 103 | release-notes-4.3.1.rst |
|
102 | 104 | release-notes-4.3.0.rst |
|
103 | 105 | release-notes-4.2.1.rst |
|
104 | 106 | release-notes-4.2.0.rst |
|
105 | 107 | release-notes-4.1.2.rst |
|
106 | 108 | release-notes-4.1.1.rst |
|
107 | 109 | release-notes-4.1.0.rst |
|
108 | 110 | release-notes-4.0.1.rst |
|
109 | 111 | release-notes-4.0.0.rst |
|
110 | 112 | |
|
111 | 113 | |RCE| 3.x Versions |
|
112 | 114 | ------------------ |
|
113 | 115 | |
|
114 | 116 | .. toctree:: |
|
115 | 117 | :maxdepth: 1 |
|
116 | 118 | |
|
117 | 119 | release-notes-3.8.4.rst |
|
118 | 120 | release-notes-3.8.3.rst |
|
119 | 121 | release-notes-3.8.2.rst |
|
120 | 122 | release-notes-3.8.1.rst |
|
121 | 123 | release-notes-3.8.0.rst |
|
122 | 124 | release-notes-3.7.1.rst |
|
123 | 125 | release-notes-3.7.0.rst |
|
124 | 126 | release-notes-3.6.1.rst |
|
125 | 127 | release-notes-3.6.0.rst |
|
126 | 128 | release-notes-3.5.2.rst |
|
127 | 129 | release-notes-3.5.1.rst |
|
128 | 130 | release-notes-3.5.0.rst |
|
129 | 131 | release-notes-3.4.1.rst |
|
130 | 132 | release-notes-3.4.0.rst |
|
131 | 133 | release-notes-3.3.4.rst |
|
132 | 134 | release-notes-3.3.3.rst |
|
133 | 135 | release-notes-3.3.2.rst |
|
134 | 136 | release-notes-3.3.1.rst |
|
135 | 137 | release-notes-3.3.0.rst |
|
136 | 138 | release-notes-3.2.3.rst |
|
137 | 139 | release-notes-3.2.2.rst |
|
138 | 140 | release-notes-3.2.1.rst |
|
139 | 141 | release-notes-3.2.0.rst |
|
140 | 142 | release-notes-3.1.1.rst |
|
141 | 143 | release-notes-3.1.0.rst |
|
142 | 144 | release-notes-3.0.2.rst |
|
143 | 145 | release-notes-3.0.1.rst |
|
144 | 146 | release-notes-3.0.0.rst |
|
145 | 147 | |
|
146 | 148 | |RCE| 2.x Versions |
|
147 | 149 | ------------------ |
|
148 | 150 | |
|
149 | 151 | .. toctree:: |
|
150 | 152 | :maxdepth: 1 |
|
151 | 153 | |
|
152 | 154 | release-notes-2.2.8.rst |
|
153 | 155 | release-notes-2.2.7.rst |
|
154 | 156 | release-notes-2.2.6.rst |
|
155 | 157 | release-notes-2.2.5.rst |
|
156 | 158 | release-notes-2.2.4.rst |
|
157 | 159 | release-notes-2.2.3.rst |
|
158 | 160 | release-notes-2.2.2.rst |
|
159 | 161 | release-notes-2.2.1.rst |
|
160 | 162 | release-notes-2.2.0.rst |
|
161 | 163 | release-notes-2.1.0.rst |
|
162 | 164 | release-notes-2.0.2.rst |
|
163 | 165 | release-notes-2.0.1.rst |
|
164 | 166 | release-notes-2.0.0.rst |
|
165 | 167 | |
|
166 | 168 | |RCE| 1.x Versions |
|
167 | 169 | ------------------ |
|
168 | 170 | |
|
169 | 171 | .. toctree:: |
|
170 | 172 | :maxdepth: 1 |
|
171 | 173 | |
|
172 | 174 | release-notes-1.7.2.rst |
|
173 | 175 | release-notes-1.7.1.rst |
|
174 | 176 | release-notes-1.7.0.rst |
|
175 | 177 | release-notes-1.6.0.rst |
@@ -1,249 +1,253 b'' | |||
|
1 | 1 | |
|
2 | 2 | |
|
3 | 3 | # Copyright (C) 2016-2023 RhodeCode GmbH |
|
4 | 4 | # |
|
5 | 5 | # This program is free software: you can redistribute it and/or modify |
|
6 | 6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
7 | 7 | # (only), as published by the Free Software Foundation. |
|
8 | 8 | # |
|
9 | 9 | # This program is distributed in the hope that it will be useful, |
|
10 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
12 | 12 | # GNU General Public License for more details. |
|
13 | 13 | # |
|
14 | 14 | # You should have received a copy of the GNU Affero General Public License |
|
15 | 15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
16 | 16 | # |
|
17 | 17 | # This program is dual-licensed. If you wish to learn more about the |
|
18 | 18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
19 | 19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
20 | 20 | |
|
21 | 21 | import logging |
|
22 | 22 | import urllib.request |
|
23 | 23 | import urllib.error |
|
24 | 24 | import urllib.parse |
|
25 | 25 | import os |
|
26 | 26 | |
|
27 | 27 | import rhodecode |
|
28 | 28 | from rhodecode.apps._base import BaseAppView |
|
29 | 29 | from rhodecode.apps._base.navigation import navigation_list |
|
30 | 30 | from rhodecode.lib import helpers as h |
|
31 | 31 | from rhodecode.lib.auth import (LoginRequired, HasPermissionAllDecorator) |
|
32 | 32 | from rhodecode.lib.utils2 import str2bool |
|
33 | 33 | from rhodecode.lib import system_info |
|
34 | 34 | from rhodecode.model.update import UpdateModel |
|
35 | 35 | |
|
36 | 36 | log = logging.getLogger(__name__) |
|
37 | 37 | |
|
38 | 38 | |
|
39 | 39 | class AdminSystemInfoSettingsView(BaseAppView): |
|
40 | 40 | def load_default_context(self): |
|
41 | 41 | c = self._get_local_tmpl_context() |
|
42 | 42 | return c |
|
43 | 43 | |
|
44 | 44 | def get_env_data(self): |
|
45 | 45 | black_list = [ |
|
46 | 46 | 'NIX_LDFLAGS', |
|
47 | 47 | 'NIX_CFLAGS_COMPILE', |
|
48 | 48 | 'propagatedBuildInputs', |
|
49 | 49 | 'propagatedNativeBuildInputs', |
|
50 | 50 | 'postInstall', |
|
51 | 51 | 'buildInputs', |
|
52 | 52 | 'buildPhase', |
|
53 | 53 | 'preShellHook', |
|
54 | 54 | 'preShellHook', |
|
55 | 55 | 'preCheck', |
|
56 | 56 | 'preBuild', |
|
57 | 57 | 'postShellHook', |
|
58 | 58 | 'postFixup', |
|
59 | 59 | 'postCheck', |
|
60 | 60 | 'nativeBuildInputs', |
|
61 | 61 | 'installPhase', |
|
62 | 62 | 'installCheckPhase', |
|
63 | 63 | 'checkPhase', |
|
64 | 64 | 'configurePhase', |
|
65 | 65 | 'shellHook' |
|
66 | 66 | ] |
|
67 | 67 | secret_list = [ |
|
68 | 68 | 'RHODECODE_USER_PASS' |
|
69 | 69 | ] |
|
70 | 70 | |
|
71 | 71 | for k, v in sorted(os.environ.items()): |
|
72 | 72 | if k in black_list: |
|
73 | 73 | continue |
|
74 | 74 | if k in secret_list: |
|
75 | 75 | v = '*****' |
|
76 | 76 | yield k, v |
|
77 | 77 | |
|
78 | 78 | @LoginRequired() |
|
79 | 79 | @HasPermissionAllDecorator('hg.admin') |
|
80 | 80 | def settings_system_info(self): |
|
81 | 81 | _ = self.request.translate |
|
82 | 82 | c = self.load_default_context() |
|
83 | 83 | |
|
84 | 84 | c.active = 'system' |
|
85 | 85 | c.navlist = navigation_list(self.request) |
|
86 | 86 | |
|
87 | 87 | # TODO(marcink), figure out how to allow only selected users to do this |
|
88 | 88 | c.allowed_to_snapshot = self._rhodecode_user.admin |
|
89 | 89 | |
|
90 | 90 | snapshot = str2bool(self.request.params.get('snapshot')) |
|
91 | 91 | |
|
92 | 92 | c.rhodecode_update_url = UpdateModel().get_update_url() |
|
93 | 93 | c.env_data = self.get_env_data() |
|
94 | 94 | server_info = system_info.get_system_info(self.request.environ) |
|
95 | 95 | |
|
96 | 96 | for key, val in server_info.items(): |
|
97 | 97 | setattr(c, key, val) |
|
98 | 98 | |
|
99 | 99 | def val(name, subkey='human_value'): |
|
100 | 100 | return server_info[name][subkey] |
|
101 | 101 | |
|
102 | 102 | def state(name): |
|
103 | 103 | return server_info[name]['state'] |
|
104 | 104 | |
|
105 | 105 | def val2(name): |
|
106 | 106 | val = server_info[name]['human_value'] |
|
107 | 107 | state = server_info[name]['state'] |
|
108 | 108 | return val, state |
|
109 | 109 | |
|
110 | 110 | update_info_msg = _('Note: please make sure this server can ' |
|
111 | 111 | 'access `${url}` for the update link to work', |
|
112 | 112 | mapping=dict(url=c.rhodecode_update_url)) |
|
113 | 113 | version = UpdateModel().get_stored_version() |
|
114 | 114 | is_outdated = UpdateModel().is_outdated( |
|
115 | 115 | rhodecode.__version__, version) |
|
116 | 116 | update_state = { |
|
117 | 117 | 'type': 'warning', |
|
118 | 118 | 'message': 'New version available: {}'.format(version) |
|
119 | 119 | } \ |
|
120 | 120 | if is_outdated else {} |
|
121 | 121 | c.data_items = [ |
|
122 | 122 | # update info |
|
123 | 123 | (_('Update info'), h.literal( |
|
124 | 124 | '<span class="link" id="check_for_update" >%s.</span>' % ( |
|
125 | 125 | _('Check for updates')) + |
|
126 | 126 | '<br/> <span >%s.</span>' % (update_info_msg) |
|
127 | 127 | ), ''), |
|
128 | 128 | |
|
129 | 129 | # RhodeCode specific |
|
130 | 130 | (_('RhodeCode Version'), val('rhodecode_app')['text'], state('rhodecode_app')), |
|
131 | 131 | (_('Latest version'), version, update_state), |
|
132 | 132 | (_('RhodeCode Base URL'), val('rhodecode_config')['config'].get('app.base_url'), state('rhodecode_config')), |
|
133 | 133 | (_('RhodeCode Server IP'), val('server')['server_ip'], state('server')), |
|
134 | 134 | (_('RhodeCode Server ID'), val('server')['server_id'], state('server')), |
|
135 | 135 | (_('RhodeCode Configuration'), val('rhodecode_config')['path'], state('rhodecode_config')), |
|
136 | 136 | (_('RhodeCode Certificate'), val('rhodecode_config')['cert_path'], state('rhodecode_config')), |
|
137 | 137 | (_('Workers'), val('rhodecode_config')['config']['server:main'].get('workers', '?'), state('rhodecode_config')), |
|
138 | 138 | (_('Worker Type'), val('rhodecode_config')['config']['server:main'].get('worker_class', 'sync'), state('rhodecode_config')), |
|
139 | 139 | ('', '', ''), # spacer |
|
140 | 140 | |
|
141 | 141 | # Database |
|
142 | 142 | (_('Database'), val('database')['url'], state('database')), |
|
143 | 143 | (_('Database version'), val('database')['version'], state('database')), |
|
144 | 144 | ('', '', ''), # spacer |
|
145 | 145 | |
|
146 | 146 | # Platform/Python |
|
147 | 147 | (_('Platform'), val('platform')['name'], state('platform')), |
|
148 | 148 | (_('Platform UUID'), val('platform')['uuid'], state('platform')), |
|
149 | 149 | (_('Lang'), val('locale'), state('locale')), |
|
150 | 150 | (_('Python version'), val('python')['version'], state('python')), |
|
151 | 151 | (_('Python path'), val('python')['executable'], state('python')), |
|
152 | 152 | ('', '', ''), # spacer |
|
153 | 153 | |
|
154 | 154 | # Systems stats |
|
155 | 155 | (_('CPU'), val('cpu')['text'], state('cpu')), |
|
156 | 156 | (_('Load'), val('load')['text'], state('load')), |
|
157 | 157 | (_('Memory'), val('memory')['text'], state('memory')), |
|
158 | 158 | (_('Uptime'), val('uptime')['text'], state('uptime')), |
|
159 | 159 | ('', '', ''), # spacer |
|
160 | 160 | |
|
161 | 161 | # ulimit |
|
162 | 162 | (_('Ulimit'), val('ulimit')['text'], state('ulimit')), |
|
163 | 163 | |
|
164 | 164 | # Repo storage |
|
165 | 165 | (_('Storage location'), val('storage')['path'], state('storage')), |
|
166 | 166 | (_('Storage info'), val('storage')['text'], state('storage')), |
|
167 | 167 | (_('Storage inodes'), val('storage_inodes')['text'], state('storage_inodes')), |
|
168 | 168 | ('', '', ''), # spacer |
|
169 | 169 | |
|
170 | 170 | (_('Gist storage location'), val('storage_gist')['path'], state('storage_gist')), |
|
171 | 171 | (_('Gist storage info'), val('storage_gist')['text'], state('storage_gist')), |
|
172 | 172 | ('', '', ''), # spacer |
|
173 | 173 | |
|
174 | 174 | (_('Artifacts storage backend'), val('storage_artifacts')['type'], state('storage_artifacts')), |
|
175 | 175 | (_('Artifacts storage location'), val('storage_artifacts')['path'], state('storage_artifacts')), |
|
176 | 176 | (_('Artifacts info'), val('storage_artifacts')['text'], state('storage_artifacts')), |
|
177 | 177 | ('', '', ''), # spacer |
|
178 | 178 | |
|
179 | 179 | (_('Archive cache storage backend'), val('storage_archive')['type'], state('storage_archive')), |
|
180 | 180 | (_('Archive cache storage location'), val('storage_archive')['path'], state('storage_archive')), |
|
181 | 181 | (_('Archive cache info'), val('storage_archive')['text'], state('storage_archive')), |
|
182 | 182 | ('', '', ''), # spacer |
|
183 | 183 | |
|
184 | 184 | |
|
185 | 185 | (_('Temp storage location'), val('storage_temp')['path'], state('storage_temp')), |
|
186 | 186 | (_('Temp storage info'), val('storage_temp')['text'], state('storage_temp')), |
|
187 | 187 | ('', '', ''), # spacer |
|
188 | 188 | |
|
189 | 189 | (_('Search info'), val('search')['text'], state('search')), |
|
190 | 190 | (_('Search location'), val('search')['location'], state('search')), |
|
191 | 191 | ('', '', ''), # spacer |
|
192 | 192 | |
|
193 | 193 | # VCS specific |
|
194 | 194 | (_('VCS Backends'), val('vcs_backends'), state('vcs_backends')), |
|
195 | 195 | (_('VCS Server'), val('vcs_server')['text'], state('vcs_server')), |
|
196 | 196 | (_('GIT'), val('git'), state('git')), |
|
197 | 197 | (_('HG'), val('hg'), state('hg')), |
|
198 | 198 | (_('SVN'), val('svn'), state('svn')), |
|
199 | 199 | |
|
200 | 200 | ] |
|
201 | 201 | |
|
202 | c.rhodecode_data_items = [ | |
|
203 | (k, v) for k, v in sorted((val('rhodecode_server_config') or {}).items(), key=lambda x: x[0].lower()) | |
|
204 | ] | |
|
205 | ||
|
202 | 206 | c.vcsserver_data_items = [ |
|
203 | (k, v) for k, v in (val('vcs_server_config') or {}).items() | |
|
207 | (k, v) for k, v in sorted((val('vcs_server_config') or {}).items(), key=lambda x: x[0].lower()) | |
|
204 | 208 | ] |
|
205 | 209 | |
|
206 | 210 | if snapshot: |
|
207 | 211 | if c.allowed_to_snapshot: |
|
208 | 212 | c.data_items.pop(0) # remove server info |
|
209 | 213 | self.request.override_renderer = 'admin/settings/settings_system_snapshot.mako' |
|
210 | 214 | else: |
|
211 | 215 | h.flash('You are not allowed to do this', category='warning') |
|
212 | 216 | return self._get_template_context(c) |
|
213 | 217 | |
|
214 | 218 | @LoginRequired() |
|
215 | 219 | @HasPermissionAllDecorator('hg.admin') |
|
216 | 220 | def settings_system_info_check_update(self): |
|
217 | 221 | _ = self.request.translate |
|
218 | 222 | c = self.load_default_context() |
|
219 | 223 | |
|
220 | 224 | update_url = UpdateModel().get_update_url() |
|
221 | 225 | |
|
222 | 226 | def _err(s): |
|
223 | 227 | return f'<div style="color:#ff8888; padding:4px 0px">{s}</div>' |
|
224 | 228 | |
|
225 | 229 | try: |
|
226 | 230 | data = UpdateModel().get_update_data(update_url) |
|
227 | 231 | except urllib.error.URLError as e: |
|
228 | 232 | log.exception("Exception contacting upgrade server") |
|
229 | 233 | self.request.override_renderer = 'string' |
|
230 | 234 | return _err('Failed to contact upgrade server: %r' % e) |
|
231 | 235 | except ValueError as e: |
|
232 | 236 | log.exception("Bad data sent from update server") |
|
233 | 237 | self.request.override_renderer = 'string' |
|
234 | 238 | return _err('Bad data sent from update server') |
|
235 | 239 | |
|
236 | 240 | latest = data['versions'][0] |
|
237 | 241 | |
|
238 | 242 | c.update_url = update_url |
|
239 | 243 | c.latest_data = latest |
|
240 | 244 | c.latest_ver = (latest['version'] or '').strip() |
|
241 | 245 | c.cur_ver = self.request.GET.get('ver') or rhodecode.__version__ |
|
242 | 246 | c.should_upgrade = False |
|
243 | 247 | |
|
244 | 248 | is_outdated = UpdateModel().is_outdated(c.cur_ver, c.latest_ver) |
|
245 | 249 | if is_outdated: |
|
246 | 250 | c.should_upgrade = True |
|
247 | 251 | c.important_notices = latest['general'] |
|
248 | 252 | UpdateModel().store_version(latest['version']) |
|
249 | 253 | return self._get_template_context(c) |
@@ -1,128 +1,122 b'' | |||
|
1 | 1 | # Copyright (C) 2011-2023 RhodeCode GmbH |
|
2 | 2 | # |
|
3 | 3 | # This program is free software: you can redistribute it and/or modify |
|
4 | 4 | # it under the terms of the GNU Affero General Public License, version 3 |
|
5 | 5 | # (only), as published by the Free Software Foundation. |
|
6 | 6 | # |
|
7 | 7 | # This program is distributed in the hope that it will be useful, |
|
8 | 8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
9 | 9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
10 | 10 | # GNU General Public License for more details. |
|
11 | 11 | # |
|
12 | 12 | # You should have received a copy of the GNU Affero General Public License |
|
13 | 13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
14 | 14 | # |
|
15 | 15 | # This program is dual-licensed. If you wish to learn more about the |
|
16 | 16 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
17 | 17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
18 | 18 | |
|
19 | 19 | import logging |
|
20 | 20 | |
|
21 | 21 | from pyramid.httpexceptions import HTTPFound |
|
22 | 22 | |
|
23 | 23 | from rhodecode.apps._base import RepoAppView |
|
24 | 24 | from rhodecode.lib import helpers as h |
|
25 | 25 | from rhodecode.lib import audit_logger |
|
26 | 26 | from rhodecode.lib.auth import ( |
|
27 | 27 | LoginRequired, HasRepoPermissionAnyDecorator, CSRFRequired) |
|
28 | 28 | from rhodecode.lib.utils2 import str2bool |
|
29 | 29 | from rhodecode.model.db import User |
|
30 | 30 | from rhodecode.model.forms import RepoPermsForm |
|
31 | 31 | from rhodecode.model.meta import Session |
|
32 | 32 | from rhodecode.model.permission import PermissionModel |
|
33 | 33 | from rhodecode.model.repo import RepoModel |
|
34 | 34 | |
|
35 | 35 | log = logging.getLogger(__name__) |
|
36 | 36 | |
|
37 | 37 | |
|
38 | 38 | class RepoSettingsPermissionsView(RepoAppView): |
|
39 | 39 | |
|
40 | 40 | def load_default_context(self): |
|
41 | 41 | c = self._get_local_tmpl_context() |
|
42 | 42 | return c |
|
43 | 43 | |
|
44 | 44 | @LoginRequired() |
|
45 | 45 | @HasRepoPermissionAnyDecorator('repository.admin') |
|
46 | 46 | def edit_permissions(self): |
|
47 | 47 | _ = self.request.translate |
|
48 | 48 | c = self.load_default_context() |
|
49 | 49 | c.active = 'permissions' |
|
50 | 50 | if self.request.GET.get('branch_permissions'): |
|
51 | 51 | h.flash(_('Explicitly add user or user group with write or higher ' |
|
52 | 52 | 'permission to modify their branch permissions.'), |
|
53 | 53 | category='notice') |
|
54 | 54 | return self._get_template_context(c) |
|
55 | 55 | |
|
56 | 56 | @LoginRequired() |
|
57 | 57 | @HasRepoPermissionAnyDecorator('repository.admin') |
|
58 | 58 | @CSRFRequired() |
|
59 | 59 | def edit_permissions_update(self): |
|
60 | 60 | _ = self.request.translate |
|
61 | 61 | c = self.load_default_context() |
|
62 | 62 | c.active = 'permissions' |
|
63 | 63 | data = self.request.POST |
|
64 | 64 | # store private flag outside of HTML to verify if we can modify |
|
65 | 65 | # default user permissions, prevents submission of FAKE post data |
|
66 | 66 | # into the form for private repos |
|
67 | 67 | data['repo_private'] = self.db_repo.private |
|
68 | 68 | form = RepoPermsForm(self.request.translate)().to_python(data) |
|
69 | 69 | changes = RepoModel().update_permissions( |
|
70 | 70 | self.db_repo_name, form['perm_additions'], form['perm_updates'], |
|
71 | 71 | form['perm_deletions']) |
|
72 | 72 | |
|
73 | 73 | action_data = { |
|
74 | 74 | 'added': changes['added'], |
|
75 | 75 | 'updated': changes['updated'], |
|
76 | 76 | 'deleted': changes['deleted'], |
|
77 | 77 | } |
|
78 | 78 | audit_logger.store_web( |
|
79 | 79 | 'repo.edit.permissions', action_data=action_data, |
|
80 | 80 | user=self._rhodecode_user, repo=self.db_repo) |
|
81 | 81 | |
|
82 | 82 | Session().commit() |
|
83 | 83 | h.flash(_('Repository access permissions updated'), category='success') |
|
84 | 84 | |
|
85 | 85 | affected_user_ids = None |
|
86 | 86 | if changes.get('default_user_changed', False): |
|
87 | 87 | # if we change the default user, we need to flush everyone permissions |
|
88 | 88 | affected_user_ids = User.get_all_user_ids() |
|
89 | 89 | PermissionModel().flush_user_permission_caches( |
|
90 | 90 | changes, affected_user_ids=affected_user_ids) |
|
91 | 91 | |
|
92 | 92 | raise HTTPFound( |
|
93 | 93 | h.route_path('edit_repo_perms', repo_name=self.db_repo_name)) |
|
94 | 94 | |
|
95 | 95 | @LoginRequired() |
|
96 | 96 | @HasRepoPermissionAnyDecorator('repository.admin') |
|
97 | 97 | @CSRFRequired() |
|
98 | 98 | def edit_permissions_set_private_repo(self): |
|
99 | 99 | _ = self.request.translate |
|
100 | 100 | self.load_default_context() |
|
101 | 101 | |
|
102 | 102 | private_flag = str2bool(self.request.POST.get('private')) |
|
103 | ||
|
103 | changes = { | |
|
104 | 'repo_private': private_flag | |
|
105 | } | |
|
104 | 106 | try: |
|
105 | 107 | repo = RepoModel().get(self.db_repo.repo_id) |
|
106 | repo.private = private_flag | |
|
107 | Session().add(repo) | |
|
108 | RepoModel().grant_user_permission( | |
|
109 | repo=self.db_repo, user=User.DEFAULT_USER, perm='repository.none' | |
|
110 | ) | |
|
111 | ||
|
108 | RepoModel().update(repo, **changes) | |
|
112 | 109 | Session().commit() |
|
113 | 110 | |
|
114 | 111 | h.flash(_('Repository `{}` private mode set successfully').format(self.db_repo_name), |
|
115 | 112 | category='success') |
|
116 | # NOTE(dan): we change repo private mode we need to notify all USERS | |
|
117 | affected_user_ids = User.get_all_user_ids() | |
|
118 | PermissionModel().trigger_permission_flush(affected_user_ids) | |
|
119 | 113 | |
|
120 | 114 | except Exception: |
|
121 | 115 | log.exception("Exception during update of repository") |
|
122 | 116 | h.flash(_('Error occurred during update of repository {}').format( |
|
123 | 117 | self.db_repo_name), category='error') |
|
124 | 118 | |
|
125 | 119 | return { |
|
126 | 120 | 'redirect_url': h.route_path('edit_repo_perms', repo_name=self.db_repo_name), |
|
127 | 121 | 'private': private_flag |
|
128 | 122 | } |
@@ -1,184 +1,187 b'' | |||
|
1 | 1 | # Copyright (C) 2010-2023 RhodeCode GmbH |
|
2 | 2 | # |
|
3 | 3 | # This program is free software: you can redistribute it and/or modify |
|
4 | 4 | # it under the terms of the GNU Affero General Public License, version 3 |
|
5 | 5 | # (only), as published by the Free Software Foundation. |
|
6 | 6 | # |
|
7 | 7 | # This program is distributed in the hope that it will be useful, |
|
8 | 8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
9 | 9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
10 | 10 | # GNU General Public License for more details. |
|
11 | 11 | # |
|
12 | 12 | # You should have received a copy of the GNU Affero General Public License |
|
13 | 13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
14 | 14 | # |
|
15 | 15 | # This program is dual-licensed. If you wish to learn more about the |
|
16 | 16 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
17 | 17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
18 | 18 | |
|
19 | 19 | import os |
|
20 | 20 | import textwrap |
|
21 | 21 | import string |
|
22 | 22 | import functools |
|
23 | 23 | import logging |
|
24 | 24 | import tempfile |
|
25 | 25 | import logging.config |
|
26 | 26 | |
|
27 | 27 | from rhodecode.lib.type_utils import str2bool, aslist |
|
28 | 28 | |
|
29 | 29 | log = logging.getLogger(__name__) |
|
30 | 30 | |
|
31 | 31 | |
|
32 | 32 | # skip keys, that are set here, so we don't double process those |
|
33 | 33 | set_keys = { |
|
34 | 34 | '__file__': '' |
|
35 | 35 | } |
|
36 | 36 | |
|
37 | 37 | |
|
38 | 38 | class SettingsMaker: |
|
39 | 39 | |
|
40 | 40 | def __init__(self, app_settings): |
|
41 | 41 | self.settings = app_settings |
|
42 | 42 | |
|
43 | 43 | @classmethod |
|
44 | 44 | def _bool_func(cls, input_val): |
|
45 | 45 | if isinstance(input_val, bytes): |
|
46 | 46 | # decode to str |
|
47 | 47 | input_val = input_val.decode('utf8') |
|
48 | 48 | return str2bool(input_val) |
|
49 | 49 | |
|
50 | 50 | @classmethod |
|
51 | 51 | def _int_func(cls, input_val): |
|
52 | 52 | return int(input_val) |
|
53 | 53 | |
|
54 | 54 | @classmethod |
|
55 | 55 | def _float_func(cls, input_val): |
|
56 | 56 | return float(input_val) |
|
57 | 57 | |
|
58 | 58 | @classmethod |
|
59 | 59 | def _list_func(cls, input_val, sep=','): |
|
60 | 60 | return aslist(input_val, sep=sep) |
|
61 | 61 | |
|
62 | 62 | @classmethod |
|
63 | 63 | def _string_func(cls, input_val, lower=True): |
|
64 | 64 | if lower: |
|
65 | 65 | input_val = input_val.lower() |
|
66 | 66 | return input_val |
|
67 | 67 | |
|
68 | 68 | @classmethod |
|
69 | 69 | def _string_no_quote_func(cls, input_val, lower=True): |
|
70 | 70 | """ |
|
71 | 71 | Special case string function that detects if value is set to empty quote string |
|
72 | 72 | e.g. |
|
73 | 73 | |
|
74 | 74 | core.binar_dir = "" |
|
75 | 75 | """ |
|
76 | 76 | |
|
77 | 77 | input_val = cls._string_func(input_val, lower=lower) |
|
78 | 78 | if input_val in ['""', "''"]: |
|
79 | 79 | return '' |
|
80 | 80 | |
|
81 | 81 | @classmethod |
|
82 | 82 | def _dir_func(cls, input_val, ensure_dir=False, mode=0o755): |
|
83 | 83 | |
|
84 | 84 | # ensure we have our dir created |
|
85 | 85 | if not os.path.isdir(input_val) and ensure_dir: |
|
86 | 86 | os.makedirs(input_val, mode=mode, exist_ok=True) |
|
87 | 87 | |
|
88 | 88 | if not os.path.isdir(input_val): |
|
89 | 89 | raise Exception(f'Dir at {input_val} does not exist') |
|
90 | 90 | return input_val |
|
91 | 91 | |
|
92 | 92 | @classmethod |
|
93 | 93 | def _file_path_func(cls, input_val, ensure_dir=False, mode=0o755): |
|
94 | 94 | dirname = os.path.dirname(input_val) |
|
95 | 95 | cls._dir_func(dirname, ensure_dir=ensure_dir) |
|
96 | 96 | return input_val |
|
97 | 97 | |
|
98 | 98 | @classmethod |
|
99 | 99 | def _key_transformator(cls, key): |
|
100 | 100 | return "{}_{}".format('RC'.upper(), key.upper().replace('.', '_').replace('-', '_')) |
|
101 | 101 | |
|
102 | 102 | def maybe_env_key(self, key): |
|
103 | 103 | # now maybe we have this KEY in env, search and use the value with higher priority. |
|
104 | 104 | transformed_key = self._key_transformator(key) |
|
105 | 105 | envvar_value = os.environ.get(transformed_key) |
|
106 | 106 | if envvar_value: |
|
107 | 107 | log.debug('using `%s` key instead of `%s` key for config', transformed_key, key) |
|
108 | 108 | |
|
109 | 109 | return envvar_value |
|
110 | 110 | |
|
111 | 111 | def env_expand(self): |
|
112 | if self.settings.get('rhodecode.env_expand') == 'false': | |
|
113 | return | |
|
114 | ||
|
112 | 115 | replaced = {} |
|
113 | 116 | for k, v in self.settings.items(): |
|
114 | 117 | if k not in set_keys: |
|
115 | 118 | envvar_value = self.maybe_env_key(k) |
|
116 | 119 | if envvar_value: |
|
117 | 120 | replaced[k] = envvar_value |
|
118 | 121 | set_keys[k] = envvar_value |
|
119 | 122 | |
|
120 | 123 | # replace ALL keys updated |
|
121 | 124 | self.settings.update(replaced) |
|
122 | 125 | |
|
123 | 126 | def enable_logging(self, logging_conf=None, level='INFO', formatter='generic'): |
|
124 | 127 | """ |
|
125 | 128 | Helper to enable debug on running instance |
|
126 | 129 | :return: |
|
127 | 130 | """ |
|
128 | 131 | |
|
129 | 132 | if not str2bool(self.settings.get('logging.autoconfigure')): |
|
130 | 133 | log.info('logging configuration based on main .ini file') |
|
131 | 134 | return |
|
132 | 135 | |
|
133 | 136 | if logging_conf is None: |
|
134 | 137 | logging_conf = self.settings.get('logging.logging_conf_file') or '' |
|
135 | 138 | |
|
136 | 139 | if not os.path.isfile(logging_conf): |
|
137 | 140 | log.error('Unable to setup logging based on %s, ' |
|
138 | 141 | 'file does not exist.... specify path using logging.logging_conf_file= config setting. ', logging_conf) |
|
139 | 142 | return |
|
140 | 143 | |
|
141 | 144 | with open(logging_conf, 'rt') as f: |
|
142 | 145 | ini_template = textwrap.dedent(f.read()) |
|
143 | 146 | ini_template = string.Template(ini_template).safe_substitute( |
|
144 | 147 | RC_LOGGING_LEVEL=os.environ.get('RC_LOGGING_LEVEL', '') or level, |
|
145 | 148 | RC_LOGGING_FORMATTER=os.environ.get('RC_LOGGING_FORMATTER', '') or formatter |
|
146 | 149 | ) |
|
147 | 150 | |
|
148 | 151 | with tempfile.NamedTemporaryFile(prefix='rc_logging_', suffix='.ini', delete=False) as f: |
|
149 | 152 | log.info('Saved Temporary LOGGING config at %s', f.name) |
|
150 | 153 | f.write(ini_template) |
|
151 | 154 | |
|
152 | 155 | logging.config.fileConfig(f.name) |
|
153 | 156 | os.remove(f.name) |
|
154 | 157 | |
|
155 | 158 | def make_setting(self, key, default, lower=False, default_when_empty=False, parser=None): |
|
156 | 159 | input_val = self.settings.get(key, default) |
|
157 | 160 | |
|
158 | 161 | if default_when_empty and not input_val: |
|
159 | 162 | # use default value when value is set in the config but it is empty |
|
160 | 163 | input_val = default |
|
161 | 164 | |
|
162 | 165 | parser_func = { |
|
163 | 166 | 'bool': self._bool_func, |
|
164 | 167 | 'int': self._int_func, |
|
165 | 168 | 'float': self._float_func, |
|
166 | 169 | 'list': self._list_func, |
|
167 | 170 | 'list:newline': functools.partial(self._list_func, sep='/n'), |
|
168 | 171 | 'list:spacesep': functools.partial(self._list_func, sep=' '), |
|
169 | 172 | 'string': functools.partial(self._string_func, lower=lower), |
|
170 | 173 | 'string:noquote': functools.partial(self._string_no_quote_func, lower=lower), |
|
171 | 174 | 'dir': self._dir_func, |
|
172 | 175 | 'dir:ensured': functools.partial(self._dir_func, ensure_dir=True), |
|
173 | 176 | 'file': self._file_path_func, |
|
174 | 177 | 'file:ensured': functools.partial(self._file_path_func, ensure_dir=True), |
|
175 | 178 | None: lambda i: i |
|
176 | 179 | }[parser] |
|
177 | 180 | |
|
178 | 181 | envvar_value = self.maybe_env_key(key) |
|
179 | 182 | if envvar_value: |
|
180 | 183 | input_val = envvar_value |
|
181 | 184 | set_keys[key] = input_val |
|
182 | 185 | |
|
183 | 186 | self.settings[key] = parser_func(input_val) |
|
184 | 187 | return self.settings[key] |
@@ -1,193 +1,194 b'' | |||
|
1 | 1 | |
|
2 | 2 | |
|
3 | 3 | # Copyright (C) 2014-2023 RhodeCode GmbH |
|
4 | 4 | # |
|
5 | 5 | # This program is free software: you can redistribute it and/or modify |
|
6 | 6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
7 | 7 | # (only), as published by the Free Software Foundation. |
|
8 | 8 | # |
|
9 | 9 | # This program is distributed in the hope that it will be useful, |
|
10 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
12 | 12 | # GNU General Public License for more details. |
|
13 | 13 | # |
|
14 | 14 | # You should have received a copy of the GNU Affero General Public License |
|
15 | 15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
16 | 16 | # |
|
17 | 17 | # This program is dual-licensed. If you wish to learn more about the |
|
18 | 18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
19 | 19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
20 | 20 | |
|
21 | 21 | """ |
|
22 | 22 | Implementation of the scm_app interface using raw HTTP communication. |
|
23 | 23 | """ |
|
24 | 24 | |
|
25 | 25 | import base64 |
|
26 | 26 | import logging |
|
27 | 27 | import urllib.parse |
|
28 | 28 | import wsgiref.util |
|
29 | 29 | |
|
30 | 30 | import msgpack |
|
31 | 31 | import requests |
|
32 | 32 | import webob.request |
|
33 | 33 | |
|
34 | 34 | import rhodecode |
|
35 | 35 | from rhodecode.lib.middleware.utils import get_path_info |
|
36 | 36 | |
|
37 | 37 | log = logging.getLogger(__name__) |
|
38 | 38 | |
|
39 | 39 | |
|
40 | 40 | def create_git_wsgi_app(repo_path, repo_name, config): |
|
41 | 41 | url = _vcs_streaming_url() + 'git/' |
|
42 | 42 | return VcsHttpProxy(url, repo_path, repo_name, config) |
|
43 | 43 | |
|
44 | 44 | |
|
45 | 45 | def create_hg_wsgi_app(repo_path, repo_name, config): |
|
46 | 46 | url = _vcs_streaming_url() + 'hg/' |
|
47 | 47 | return VcsHttpProxy(url, repo_path, repo_name, config) |
|
48 | 48 | |
|
49 | 49 | |
|
50 | 50 | def _vcs_streaming_url(): |
|
51 | 51 | template = 'http://{}/stream/' |
|
52 | 52 | return template.format(rhodecode.CONFIG['vcs.server']) |
|
53 | 53 | |
|
54 | 54 | |
|
55 | 55 | # TODO: johbo: Avoid the global. |
|
56 | 56 | session = requests.Session() |
|
57 | 57 | # Requests speedup, avoid reading .netrc and similar |
|
58 | 58 | session.trust_env = False |
|
59 | 59 | |
|
60 | 60 | # prevent urllib3 spawning our logs. |
|
61 | 61 | logging.getLogger("requests.packages.urllib3.connectionpool").setLevel( |
|
62 | 62 | logging.WARNING) |
|
63 | 63 | |
|
64 | 64 | |
|
65 | 65 | class VcsHttpProxy(object): |
|
66 | 66 | """ |
|
67 | 67 | A WSGI application which proxies vcs requests. |
|
68 | 68 | |
|
69 | 69 | The goal is to shuffle the data around without touching it. The only |
|
70 | 70 | exception is the extra data from the config object which we send to the |
|
71 | 71 | server as well. |
|
72 | 72 | """ |
|
73 | 73 | |
|
74 | 74 | def __init__(self, url, repo_path, repo_name, config): |
|
75 | 75 | """ |
|
76 | 76 | :param str url: The URL of the VCSServer to call. |
|
77 | 77 | """ |
|
78 | 78 | self._url = url |
|
79 | 79 | self._repo_name = repo_name |
|
80 | 80 | self._repo_path = repo_path |
|
81 | 81 | self._config = config |
|
82 | 82 | self.rc_extras = {} |
|
83 | 83 | log.debug( |
|
84 | 84 | "Creating VcsHttpProxy for repo %s, url %s", |
|
85 | 85 | repo_name, url) |
|
86 | 86 | |
|
87 | 87 | def __call__(self, environ, start_response): |
|
88 | 88 | config = self._config |
|
89 | 89 | request = webob.request.Request(environ) |
|
90 | 90 | request_headers = request.headers |
|
91 | 91 | |
|
92 | 92 | call_context = { |
|
93 | 93 | # TODO: johbo: Remove this, rely on URL path only |
|
94 | 94 | 'repo_name': self._repo_name, |
|
95 | 95 | 'repo_path': self._repo_path, |
|
96 | 96 | 'path_info': get_path_info(environ), |
|
97 | 97 | |
|
98 | 98 | 'repo_store': self.rc_extras.get('repo_store'), |
|
99 | 99 | 'server_config_file': self.rc_extras.get('config'), |
|
100 | 100 | |
|
101 | 101 | 'auth_user': self.rc_extras.get('username'), |
|
102 | 102 | 'auth_user_id': str(self.rc_extras.get('user_id')), |
|
103 | 103 | 'auth_user_ip': self.rc_extras.get('ip'), |
|
104 | 104 | |
|
105 | 105 | 'repo_config': config, |
|
106 | 106 | 'locked_status_code': rhodecode.CONFIG.get('lock_ret_code'), |
|
107 | 107 | } |
|
108 | 108 | |
|
109 | 109 | request_headers.update({ |
|
110 | 110 | # TODO: johbo: Avoid encoding and put this into payload? |
|
111 | 111 | 'X_RC_VCS_STREAM_CALL_CONTEXT': base64.b64encode(msgpack.packb(call_context)) |
|
112 | 112 | }) |
|
113 | 113 | |
|
114 | 114 | method = environ['REQUEST_METHOD'] |
|
115 | 115 | |
|
116 | 116 | # Preserve the query string |
|
117 | 117 | url = self._url |
|
118 | 118 | url = urllib.parse.urljoin(url, self._repo_name) |
|
119 | 119 | if environ.get('QUERY_STRING'): |
|
120 | 120 | url += '?' + environ['QUERY_STRING'] |
|
121 | 121 | |
|
122 | 122 | log.debug('http-app: preparing request to: %s', url) |
|
123 | 123 | response = session.request( |
|
124 | 124 | method, |
|
125 | 125 | url, |
|
126 | 126 | data=_maybe_stream_request(environ), |
|
127 | 127 | headers=request_headers, |
|
128 | 128 | stream=True) |
|
129 | 129 | |
|
130 | 130 | log.debug('http-app: got vcsserver response: %s', response) |
|
131 | 131 | if response.status_code >= 500: |
|
132 | 132 | log.error('Exception returned by vcsserver at: %s %s, %s', |
|
133 | 133 | url, response.status_code, response.content) |
|
134 | 134 | |
|
135 | 135 | # Preserve the headers of the response, except hop_by_hop ones |
|
136 | 136 | response_headers = [ |
|
137 | 137 | (h, v) for h, v in response.headers.items() |
|
138 | 138 | if not wsgiref.util.is_hop_by_hop(h) |
|
139 | 139 | ] |
|
140 | 140 | |
|
141 | 141 | # Build status argument for start_response callable. |
|
142 | 142 | status = '{status_code} {reason_phrase}'.format( |
|
143 | 143 | status_code=response.status_code, |
|
144 | 144 | reason_phrase=response.reason) |
|
145 | 145 | |
|
146 | 146 | start_response(status, response_headers) |
|
147 | 147 | return _maybe_stream_response(response) |
|
148 | 148 | |
|
149 | 149 | |
|
150 | 150 | def read_in_chunks(stream_obj, block_size=1024, chunks=-1): |
|
151 | 151 | """ |
|
152 | 152 | Read Stream in chunks, default chunk size: 1k. |
|
153 | 153 | """ |
|
154 | 154 | while chunks: |
|
155 | 155 | data = stream_obj.read(block_size) |
|
156 | 156 | if not data: |
|
157 | 157 | break |
|
158 | 158 | yield data |
|
159 | 159 | chunks -= 1 |
|
160 | 160 | |
|
161 | 161 | |
|
162 | 162 | def _is_request_chunked(environ): |
|
163 | 163 | stream = environ.get('HTTP_TRANSFER_ENCODING', '') == 'chunked' |
|
164 | 164 | return stream |
|
165 | 165 | |
|
166 | 166 | |
|
167 | 167 | def _maybe_stream_request(environ): |
|
168 | 168 | path = get_path_info(environ) |
|
169 | 169 | stream = _is_request_chunked(environ) |
|
170 | log.debug('handling request `%s` with stream support: %s', path, stream) | |
|
170 | req_method = environ['REQUEST_METHOD'] | |
|
171 | log.debug('handling scm request: %s `%s` with stream support: %s', req_method, path, stream) | |
|
171 | 172 | |
|
172 | 173 | if stream: |
|
173 | 174 | # set stream by 256k |
|
174 | 175 | return read_in_chunks(environ['wsgi.input'], block_size=1024 * 256) |
|
175 | 176 | else: |
|
176 | 177 | return environ['wsgi.input'].read() |
|
177 | 178 | |
|
178 | 179 | |
|
179 | 180 | def _maybe_stream_response(response): |
|
180 | 181 | """ |
|
181 | 182 | Try to generate chunks from the response if it is chunked. |
|
182 | 183 | """ |
|
183 | 184 | stream = _is_chunked(response) |
|
184 | 185 | log.debug('returning response with stream: %s', stream) |
|
185 | 186 | if stream: |
|
186 | 187 | # read in 256k Chunks |
|
187 | 188 | return response.raw.read_chunked(amt=1024 * 256) |
|
188 | 189 | else: |
|
189 | 190 | return [response.content] |
|
190 | 191 | |
|
191 | 192 | |
|
192 | 193 | def _is_chunked(response): |
|
193 | 194 | return response.headers.get('Transfer-Encoding', '') == 'chunked' |
@@ -1,866 +1,893 b'' | |||
|
1 | 1 | # Copyright (C) 2017-2023 RhodeCode GmbH |
|
2 | 2 | # |
|
3 | 3 | # This program is free software: you can redistribute it and/or modify |
|
4 | 4 | # it under the terms of the GNU Affero General Public License, version 3 |
|
5 | 5 | # (only), as published by the Free Software Foundation. |
|
6 | 6 | # |
|
7 | 7 | # This program is distributed in the hope that it will be useful, |
|
8 | 8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
9 | 9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
10 | 10 | # GNU General Public License for more details. |
|
11 | 11 | # |
|
12 | 12 | # You should have received a copy of the GNU Affero General Public License |
|
13 | 13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
14 | 14 | # |
|
15 | 15 | # This program is dual-licensed. If you wish to learn more about the |
|
16 | 16 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
17 | 17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
18 | 18 | |
|
19 | 19 | |
|
20 | 20 | import os |
|
21 | 21 | import sys |
|
22 | 22 | import time |
|
23 | 23 | import platform |
|
24 | 24 | import collections |
|
25 | 25 | import psutil |
|
26 | 26 | from functools import wraps |
|
27 | 27 | |
|
28 | 28 | import pkg_resources |
|
29 | 29 | import logging |
|
30 | 30 | import resource |
|
31 | 31 | |
|
32 | 32 | import configparser |
|
33 | 33 | |
|
34 | 34 | from rc_license.models import LicenseModel |
|
35 | 35 | from rhodecode.lib.str_utils import safe_str |
|
36 | 36 | |
|
37 | 37 | log = logging.getLogger(__name__) |
|
38 | 38 | |
|
39 | 39 | |
|
40 | 40 | _NA = 'NOT AVAILABLE' |
|
41 | 41 | _NA_FLOAT = 0.0 |
|
42 | 42 | |
|
43 | 43 | STATE_OK = 'ok' |
|
44 | 44 | STATE_ERR = 'error' |
|
45 | 45 | STATE_WARN = 'warning' |
|
46 | 46 | |
|
47 | 47 | STATE_OK_DEFAULT = {'message': '', 'type': STATE_OK} |
|
48 | 48 | |
|
49 | 49 | |
|
50 | 50 | registered_helpers = {} |
|
51 | 51 | |
|
52 | 52 | |
|
53 | 53 | def register_sysinfo(func): |
|
54 | 54 | """ |
|
55 | 55 | @register_helper |
|
56 | 56 | def db_check(): |
|
57 | 57 | pass |
|
58 | 58 | |
|
59 | 59 | db_check == registered_helpers['db_check'] |
|
60 | 60 | """ |
|
61 | 61 | global registered_helpers |
|
62 | 62 | registered_helpers[func.__name__] = func |
|
63 | 63 | |
|
64 | 64 | @wraps(func) |
|
65 | 65 | def _wrapper(*args, **kwargs): |
|
66 | 66 | return func(*args, **kwargs) |
|
67 | 67 | return _wrapper |
|
68 | 68 | |
|
69 | 69 | |
|
70 | 70 | # HELPERS |
|
71 | 71 | def percentage(part: (int, float), whole: (int, float)): |
|
72 | 72 | whole = float(whole) |
|
73 | 73 | if whole > 0: |
|
74 | 74 | return round(100 * float(part) / whole, 1) |
|
75 | 75 | return 0.0 |
|
76 | 76 | |
|
77 | 77 | |
|
78 | 78 | def get_storage_size(storage_path): |
|
79 | 79 | sizes = [] |
|
80 | 80 | for file_ in os.listdir(storage_path): |
|
81 | 81 | storage_file = os.path.join(storage_path, file_) |
|
82 | 82 | if os.path.isfile(storage_file): |
|
83 | 83 | try: |
|
84 | 84 | sizes.append(os.path.getsize(storage_file)) |
|
85 | 85 | except OSError: |
|
86 | 86 | log.exception('Failed to get size of storage file %s', storage_file) |
|
87 | 87 | pass |
|
88 | 88 | |
|
89 | 89 | return sum(sizes) |
|
90 | 90 | |
|
91 | 91 | |
|
92 | 92 | def get_resource(resource_type): |
|
93 | 93 | try: |
|
94 | 94 | return resource.getrlimit(resource_type) |
|
95 | 95 | except Exception: |
|
96 | 96 | return 'NOT_SUPPORTED' |
|
97 | 97 | |
|
98 | 98 | |
|
99 | 99 | def get_cert_path(ini_path): |
|
100 | 100 | default = '/etc/ssl/certs/ca-certificates.crt' |
|
101 | 101 | control_ca_bundle = os.path.join( |
|
102 | 102 | os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(ini_path)))), |
|
103 | 103 | '/etc/ssl/certs/ca-certificates.crt') |
|
104 | 104 | if os.path.isfile(control_ca_bundle): |
|
105 | 105 | default = control_ca_bundle |
|
106 | 106 | |
|
107 | 107 | return default |
|
108 | 108 | |
|
109 | 109 | |
|
110 | 110 | class SysInfoRes(object): |
|
111 | 111 | def __init__(self, value, state=None, human_value=None): |
|
112 | 112 | self.value = value |
|
113 | 113 | self.state = state or STATE_OK_DEFAULT |
|
114 | 114 | self.human_value = human_value or value |
|
115 | 115 | |
|
116 | 116 | def __json__(self): |
|
117 | 117 | return { |
|
118 | 118 | 'value': self.value, |
|
119 | 119 | 'state': self.state, |
|
120 | 120 | 'human_value': self.human_value, |
|
121 | 121 | } |
|
122 | 122 | |
|
123 | 123 | def get_value(self): |
|
124 | 124 | return self.__json__() |
|
125 | 125 | |
|
126 | 126 | def __str__(self): |
|
127 | 127 | return f'<SysInfoRes({self.__json__()})>' |
|
128 | 128 | |
|
129 | 129 | |
|
130 | 130 | class SysInfo(object): |
|
131 | 131 | |
|
132 | 132 | def __init__(self, func_name, **kwargs): |
|
133 | 133 | self.function_name = func_name |
|
134 | 134 | self.value = _NA |
|
135 | 135 | self.state = None |
|
136 | 136 | self.kwargs = kwargs or {} |
|
137 | 137 | |
|
138 | 138 | def __call__(self): |
|
139 | 139 | computed = self.compute(**self.kwargs) |
|
140 | 140 | if not isinstance(computed, SysInfoRes): |
|
141 | 141 | raise ValueError( |
|
142 | 142 | 'computed value for {} is not instance of ' |
|
143 | 143 | '{}, got {} instead'.format( |
|
144 | 144 | self.function_name, SysInfoRes, type(computed))) |
|
145 | 145 | return computed.__json__() |
|
146 | 146 | |
|
147 | 147 | def __str__(self): |
|
148 | 148 | return f'<SysInfo({self.function_name})>' |
|
149 | 149 | |
|
150 | 150 | def compute(self, **kwargs): |
|
151 | 151 | return self.function_name(**kwargs) |
|
152 | 152 | |
|
153 | 153 | |
|
154 | 154 | # SysInfo functions |
|
155 | 155 | @register_sysinfo |
|
156 | 156 | def python_info(): |
|
157 | 157 | value = dict(version=f'{platform.python_version()}:{platform.python_implementation()}', |
|
158 | 158 | executable=sys.executable) |
|
159 | 159 | return SysInfoRes(value=value) |
|
160 | 160 | |
|
161 | 161 | |
|
162 | 162 | @register_sysinfo |
|
163 | 163 | def py_modules(): |
|
164 | 164 | mods = dict([(p.project_name, {'version': p.version, 'location': p.location}) |
|
165 | 165 | for p in pkg_resources.working_set]) |
|
166 | 166 | |
|
167 | 167 | value = sorted(mods.items(), key=lambda k: k[0].lower()) |
|
168 | 168 | return SysInfoRes(value=value) |
|
169 | 169 | |
|
170 | 170 | |
|
171 | 171 | @register_sysinfo |
|
172 | 172 | def platform_type(): |
|
173 | 173 | from rhodecode.lib.utils import generate_platform_uuid |
|
174 | 174 | |
|
175 | 175 | value = dict( |
|
176 | 176 | name=safe_str(platform.platform()), |
|
177 | 177 | uuid=generate_platform_uuid() |
|
178 | 178 | ) |
|
179 | 179 | return SysInfoRes(value=value) |
|
180 | 180 | |
|
181 | 181 | |
|
182 | 182 | @register_sysinfo |
|
183 | 183 | def locale_info(): |
|
184 | 184 | import locale |
|
185 | 185 | |
|
186 | 186 | def safe_get_locale(locale_name): |
|
187 | 187 | try: |
|
188 | 188 | locale.getlocale(locale_name) |
|
189 | 189 | except TypeError: |
|
190 | 190 | return f'FAILED_LOCALE_GET:{locale_name}' |
|
191 | 191 | |
|
192 | 192 | value = dict( |
|
193 | 193 | locale_default=locale.getlocale(), |
|
194 | 194 | locale_lc_all=safe_get_locale(locale.LC_ALL), |
|
195 | 195 | locale_lc_ctype=safe_get_locale(locale.LC_CTYPE), |
|
196 | 196 | lang_env=os.environ.get('LANG'), |
|
197 | 197 | lc_all_env=os.environ.get('LC_ALL'), |
|
198 | 198 | local_archive_env=os.environ.get('LOCALE_ARCHIVE'), |
|
199 | 199 | ) |
|
200 | 200 | human_value = \ |
|
201 | 201 | f"LANG: {value['lang_env']}, \ |
|
202 | 202 | locale LC_ALL: {value['locale_lc_all']}, \ |
|
203 | 203 | locale LC_CTYPE: {value['locale_lc_ctype']}, \ |
|
204 | 204 | Default locales: {value['locale_default']}" |
|
205 | 205 | |
|
206 | 206 | return SysInfoRes(value=value, human_value=human_value) |
|
207 | 207 | |
|
208 | 208 | |
|
209 | 209 | @register_sysinfo |
|
210 | 210 | def ulimit_info(): |
|
211 | 211 | data = collections.OrderedDict([ |
|
212 | 212 | ('cpu time (seconds)', get_resource(resource.RLIMIT_CPU)), |
|
213 | 213 | ('file size', get_resource(resource.RLIMIT_FSIZE)), |
|
214 | 214 | ('stack size', get_resource(resource.RLIMIT_STACK)), |
|
215 | 215 | ('core file size', get_resource(resource.RLIMIT_CORE)), |
|
216 | 216 | ('address space size', get_resource(resource.RLIMIT_AS)), |
|
217 | 217 | ('locked in mem size', get_resource(resource.RLIMIT_MEMLOCK)), |
|
218 | 218 | ('heap size', get_resource(resource.RLIMIT_DATA)), |
|
219 | 219 | ('rss size', get_resource(resource.RLIMIT_RSS)), |
|
220 | 220 | ('number of processes', get_resource(resource.RLIMIT_NPROC)), |
|
221 | 221 | ('open files', get_resource(resource.RLIMIT_NOFILE)), |
|
222 | 222 | ]) |
|
223 | 223 | |
|
224 | 224 | text = ', '.join(f'{k}:{v}' for k, v in data.items()) |
|
225 | 225 | |
|
226 | 226 | value = { |
|
227 | 227 | 'limits': data, |
|
228 | 228 | 'text': text, |
|
229 | 229 | } |
|
230 | 230 | return SysInfoRes(value=value) |
|
231 | 231 | |
|
232 | 232 | |
|
233 | 233 | @register_sysinfo |
|
234 | 234 | def uptime(): |
|
235 | 235 | from rhodecode.lib.helpers import age, time_to_datetime |
|
236 | 236 | from rhodecode.translation import TranslationString |
|
237 | 237 | |
|
238 | 238 | value = dict(boot_time=0, uptime=0, text='') |
|
239 | 239 | state = STATE_OK_DEFAULT |
|
240 | 240 | |
|
241 | 241 | boot_time = psutil.boot_time() |
|
242 | 242 | value['boot_time'] = boot_time |
|
243 | 243 | value['uptime'] = time.time() - boot_time |
|
244 | 244 | |
|
245 | 245 | date_or_age = age(time_to_datetime(boot_time)) |
|
246 | 246 | if isinstance(date_or_age, TranslationString): |
|
247 | 247 | date_or_age = date_or_age.interpolate() |
|
248 | 248 | |
|
249 | 249 | human_value = value.copy() |
|
250 | 250 | human_value['boot_time'] = time_to_datetime(boot_time) |
|
251 | 251 | human_value['uptime'] = age(time_to_datetime(boot_time), show_suffix=False) |
|
252 | 252 | |
|
253 | 253 | human_value['text'] = f'Server started {date_or_age}' |
|
254 | 254 | return SysInfoRes(value=value, human_value=human_value) |
|
255 | 255 | |
|
256 | 256 | |
|
257 | 257 | @register_sysinfo |
|
258 | 258 | def memory(): |
|
259 | 259 | from rhodecode.lib.helpers import format_byte_size_binary |
|
260 | 260 | value = dict(available=0, used=0, used_real=0, cached=0, percent=0, |
|
261 | 261 | percent_used=0, free=0, inactive=0, active=0, shared=0, |
|
262 | 262 | total=0, buffers=0, text='') |
|
263 | 263 | |
|
264 | 264 | state = STATE_OK_DEFAULT |
|
265 | 265 | |
|
266 | 266 | value.update(dict(psutil.virtual_memory()._asdict())) |
|
267 | 267 | value['used_real'] = value['total'] - value['available'] |
|
268 | 268 | value['percent_used'] = psutil._common.usage_percent(value['used_real'], value['total'], 1) |
|
269 | 269 | |
|
270 | 270 | human_value = value.copy() |
|
271 | 271 | human_value['text'] = '{}/{}, {}% used'.format( |
|
272 | 272 | format_byte_size_binary(value['used_real']), |
|
273 | 273 | format_byte_size_binary(value['total']), |
|
274 | 274 | value['percent_used']) |
|
275 | 275 | |
|
276 | 276 | keys = list(value.keys())[::] |
|
277 | 277 | keys.pop(keys.index('percent')) |
|
278 | 278 | keys.pop(keys.index('percent_used')) |
|
279 | 279 | keys.pop(keys.index('text')) |
|
280 | 280 | for k in keys: |
|
281 | 281 | human_value[k] = format_byte_size_binary(value[k]) |
|
282 | 282 | |
|
283 | 283 | if state['type'] == STATE_OK and value['percent_used'] > 90: |
|
284 | 284 | msg = 'Critical: your available RAM memory is very low.' |
|
285 | 285 | state = {'message': msg, 'type': STATE_ERR} |
|
286 | 286 | |
|
287 | 287 | elif state['type'] == STATE_OK and value['percent_used'] > 70: |
|
288 | 288 | msg = 'Warning: your available RAM memory is running low.' |
|
289 | 289 | state = {'message': msg, 'type': STATE_WARN} |
|
290 | 290 | |
|
291 | 291 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
292 | 292 | |
|
293 | 293 | |
|
294 | 294 | @register_sysinfo |
|
295 | 295 | def machine_load(): |
|
296 | 296 | value = {'1_min': _NA_FLOAT, '5_min': _NA_FLOAT, '15_min': _NA_FLOAT, 'text': ''} |
|
297 | 297 | state = STATE_OK_DEFAULT |
|
298 | 298 | |
|
299 | 299 | # load averages |
|
300 | 300 | if hasattr(psutil.os, 'getloadavg'): |
|
301 | 301 | value.update(dict( |
|
302 | 302 | list(zip(['1_min', '5_min', '15_min'], psutil.os.getloadavg())) |
|
303 | 303 | )) |
|
304 | 304 | |
|
305 | 305 | human_value = value.copy() |
|
306 | 306 | human_value['text'] = '1min: {}, 5min: {}, 15min: {}'.format( |
|
307 | 307 | value['1_min'], value['5_min'], value['15_min']) |
|
308 | 308 | |
|
309 | 309 | if state['type'] == STATE_OK and value['15_min'] > 5.0: |
|
310 | 310 | msg = 'Warning: your machine load is very high.' |
|
311 | 311 | state = {'message': msg, 'type': STATE_WARN} |
|
312 | 312 | |
|
313 | 313 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
314 | 314 | |
|
315 | 315 | |
|
316 | 316 | @register_sysinfo |
|
317 | 317 | def cpu(): |
|
318 | 318 | value = {'cpu': 0, 'cpu_count': 0, 'cpu_usage': []} |
|
319 | 319 | state = STATE_OK_DEFAULT |
|
320 | 320 | |
|
321 | 321 | value['cpu'] = psutil.cpu_percent(0.5) |
|
322 | 322 | value['cpu_usage'] = psutil.cpu_percent(0.5, percpu=True) |
|
323 | 323 | value['cpu_count'] = psutil.cpu_count() |
|
324 | 324 | |
|
325 | 325 | human_value = value.copy() |
|
326 | 326 | human_value['text'] = f'{value["cpu_count"]} cores at {value["cpu"]} %' |
|
327 | 327 | |
|
328 | 328 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
329 | 329 | |
|
330 | 330 | |
|
331 | 331 | @register_sysinfo |
|
332 | 332 | def storage(): |
|
333 | 333 | from rhodecode.lib.helpers import format_byte_size_binary |
|
334 | 334 | from rhodecode.lib.utils import get_rhodecode_repo_store_path |
|
335 | 335 | path = get_rhodecode_repo_store_path() |
|
336 | 336 | |
|
337 | 337 | value = dict(percent=0, used=0, total=0, path=path, text='') |
|
338 | 338 | state = STATE_OK_DEFAULT |
|
339 | 339 | |
|
340 | 340 | try: |
|
341 | 341 | value.update(dict(psutil.disk_usage(path)._asdict())) |
|
342 | 342 | except Exception as e: |
|
343 | 343 | log.exception('Failed to fetch disk info') |
|
344 | 344 | state = {'message': str(e), 'type': STATE_ERR} |
|
345 | 345 | |
|
346 | 346 | human_value = value.copy() |
|
347 | 347 | human_value['used'] = format_byte_size_binary(value['used']) |
|
348 | 348 | human_value['total'] = format_byte_size_binary(value['total']) |
|
349 | 349 | human_value['text'] = "{}/{}, {}% used".format( |
|
350 | 350 | format_byte_size_binary(value['used']), |
|
351 | 351 | format_byte_size_binary(value['total']), |
|
352 | 352 | value['percent']) |
|
353 | 353 | |
|
354 | 354 | if state['type'] == STATE_OK and value['percent'] > 90: |
|
355 | 355 | msg = 'Critical: your disk space is very low.' |
|
356 | 356 | state = {'message': msg, 'type': STATE_ERR} |
|
357 | 357 | |
|
358 | 358 | elif state['type'] == STATE_OK and value['percent'] > 70: |
|
359 | 359 | msg = 'Warning: your disk space is running low.' |
|
360 | 360 | state = {'message': msg, 'type': STATE_WARN} |
|
361 | 361 | |
|
362 | 362 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
363 | 363 | |
|
364 | 364 | |
|
365 | 365 | @register_sysinfo |
|
366 | 366 | def storage_inodes(): |
|
367 | 367 | from rhodecode.lib.utils import get_rhodecode_repo_store_path |
|
368 | 368 | path = get_rhodecode_repo_store_path() |
|
369 | 369 | |
|
370 | 370 | value = dict(percent=0.0, free=0, used=0, total=0, path=path, text='') |
|
371 | 371 | state = STATE_OK_DEFAULT |
|
372 | 372 | |
|
373 | 373 | try: |
|
374 | 374 | i_stat = os.statvfs(path) |
|
375 | 375 | value['free'] = i_stat.f_ffree |
|
376 | 376 | value['used'] = i_stat.f_files-i_stat.f_favail |
|
377 | 377 | value['total'] = i_stat.f_files |
|
378 | 378 | value['percent'] = percentage(value['used'], value['total']) |
|
379 | 379 | except Exception as e: |
|
380 | 380 | log.exception('Failed to fetch disk inodes info') |
|
381 | 381 | state = {'message': str(e), 'type': STATE_ERR} |
|
382 | 382 | |
|
383 | 383 | human_value = value.copy() |
|
384 | 384 | human_value['text'] = "{}/{}, {}% used".format( |
|
385 | 385 | value['used'], value['total'], value['percent']) |
|
386 | 386 | |
|
387 | 387 | if state['type'] == STATE_OK and value['percent'] > 90: |
|
388 | 388 | msg = 'Critical: your disk free inodes are very low.' |
|
389 | 389 | state = {'message': msg, 'type': STATE_ERR} |
|
390 | 390 | |
|
391 | 391 | elif state['type'] == STATE_OK and value['percent'] > 70: |
|
392 | 392 | msg = 'Warning: your disk free inodes are running low.' |
|
393 | 393 | state = {'message': msg, 'type': STATE_WARN} |
|
394 | 394 | |
|
395 | 395 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
396 | 396 | |
|
397 | 397 | |
|
398 | 398 | @register_sysinfo |
|
399 | 399 | def storage_artifacts(): |
|
400 | 400 | import rhodecode |
|
401 | 401 | from rhodecode.lib.helpers import format_byte_size_binary |
|
402 | 402 | from rhodecode.lib.archive_cache import get_archival_cache_store |
|
403 | 403 | |
|
404 | 404 | backend_type = rhodecode.ConfigGet().get_str('archive_cache.backend.type') |
|
405 | 405 | |
|
406 | 406 | value = dict(percent=0, used=0, total=0, items=0, path='', text='', type=backend_type) |
|
407 | 407 | state = STATE_OK_DEFAULT |
|
408 | 408 | try: |
|
409 | 409 | d_cache = get_archival_cache_store(config=rhodecode.CONFIG) |
|
410 | 410 | backend_type = str(d_cache) |
|
411 | 411 | |
|
412 | 412 | total_files, total_size, _directory_stats = d_cache.get_statistics() |
|
413 | 413 | |
|
414 | 414 | value.update({ |
|
415 | 415 | 'percent': 100, |
|
416 | 416 | 'used': total_size, |
|
417 | 417 | 'total': total_size, |
|
418 | 418 | 'items': total_files, |
|
419 | 419 | 'path': d_cache.storage_path, |
|
420 | 420 | 'type': backend_type |
|
421 | 421 | }) |
|
422 | 422 | |
|
423 | 423 | except Exception as e: |
|
424 | 424 | log.exception('failed to fetch archive cache storage') |
|
425 | 425 | state = {'message': str(e), 'type': STATE_ERR} |
|
426 | 426 | |
|
427 | 427 | human_value = value.copy() |
|
428 | 428 | human_value['used'] = format_byte_size_binary(value['used']) |
|
429 | 429 | human_value['total'] = format_byte_size_binary(value['total']) |
|
430 | 430 | human_value['text'] = f"{human_value['used']} ({value['items']} items)" |
|
431 | 431 | |
|
432 | 432 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
433 | 433 | |
|
434 | 434 | |
|
435 | 435 | @register_sysinfo |
|
436 | 436 | def storage_archives(): |
|
437 | 437 | import rhodecode |
|
438 | 438 | from rhodecode.lib.helpers import format_byte_size_binary |
|
439 | 439 | import rhodecode.apps.file_store.utils as store_utils |
|
440 | 440 | from rhodecode import CONFIG |
|
441 | 441 | |
|
442 | 442 | backend_type = rhodecode.ConfigGet().get_str(store_utils.config_keys.backend_type) |
|
443 | 443 | |
|
444 | 444 | value = dict(percent=0, used=0, total=0, items=0, path='', text='', type=backend_type) |
|
445 | 445 | state = STATE_OK_DEFAULT |
|
446 | 446 | try: |
|
447 | 447 | f_store = store_utils.get_filestore_backend(config=CONFIG) |
|
448 | 448 | backend_type = str(f_store) |
|
449 | 449 | total_files, total_size, _directory_stats = f_store.get_statistics() |
|
450 | 450 | |
|
451 | 451 | value.update({ |
|
452 | 452 | 'percent': 100, |
|
453 | 453 | 'used': total_size, |
|
454 | 454 | 'total': total_size, |
|
455 | 455 | 'items': total_files, |
|
456 | 456 | 'path': f_store.storage_path, |
|
457 | 457 | 'type': backend_type |
|
458 | 458 | }) |
|
459 | 459 | |
|
460 | 460 | except Exception as e: |
|
461 | 461 | log.exception('failed to fetch archive cache storage') |
|
462 | 462 | state = {'message': str(e), 'type': STATE_ERR} |
|
463 | 463 | |
|
464 | 464 | human_value = value.copy() |
|
465 | 465 | human_value['used'] = format_byte_size_binary(value['used']) |
|
466 | 466 | human_value['total'] = format_byte_size_binary(value['total']) |
|
467 | 467 | human_value['text'] = f"{human_value['used']} ({value['items']} items)" |
|
468 | 468 | |
|
469 | 469 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
470 | 470 | |
|
471 | 471 | |
|
472 | 472 | @register_sysinfo |
|
473 | 473 | def storage_gist(): |
|
474 | 474 | from rhodecode.model.gist import GIST_STORE_LOC |
|
475 | 475 | from rhodecode.lib.utils import safe_str, get_rhodecode_repo_store_path |
|
476 | 476 | from rhodecode.lib.helpers import format_byte_size_binary, get_directory_statistics |
|
477 | 477 | |
|
478 | 478 | path = safe_str(os.path.join( |
|
479 | 479 | get_rhodecode_repo_store_path(), GIST_STORE_LOC)) |
|
480 | 480 | |
|
481 | 481 | # gist storage |
|
482 | 482 | value = dict(percent=0, used=0, total=0, items=0, path=path, text='') |
|
483 | 483 | state = STATE_OK_DEFAULT |
|
484 | 484 | |
|
485 | 485 | try: |
|
486 | 486 | total_files, total_size, _directory_stats = get_directory_statistics(path) |
|
487 | 487 | value.update({ |
|
488 | 488 | 'percent': 100, |
|
489 | 489 | 'used': total_size, |
|
490 | 490 | 'total': total_size, |
|
491 | 491 | 'items': total_files |
|
492 | 492 | }) |
|
493 | 493 | except Exception as e: |
|
494 | 494 | log.exception('failed to fetch gist storage items') |
|
495 | 495 | state = {'message': str(e), 'type': STATE_ERR} |
|
496 | 496 | |
|
497 | 497 | human_value = value.copy() |
|
498 | 498 | human_value['used'] = format_byte_size_binary(value['used']) |
|
499 | 499 | human_value['total'] = format_byte_size_binary(value['total']) |
|
500 | 500 | human_value['text'] = "{} ({} items)".format( |
|
501 | 501 | human_value['used'], value['items']) |
|
502 | 502 | |
|
503 | 503 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
504 | 504 | |
|
505 | 505 | |
|
506 | 506 | @register_sysinfo |
|
507 | 507 | def storage_temp(): |
|
508 | 508 | import tempfile |
|
509 | 509 | from rhodecode.lib.helpers import format_byte_size_binary |
|
510 | 510 | |
|
511 | 511 | path = tempfile.gettempdir() |
|
512 | 512 | value = dict(percent=0, used=0, total=0, items=0, path=path, text='') |
|
513 | 513 | state = STATE_OK_DEFAULT |
|
514 | 514 | |
|
515 | 515 | if not psutil: |
|
516 | 516 | return SysInfoRes(value=value, state=state) |
|
517 | 517 | |
|
518 | 518 | try: |
|
519 | 519 | value.update(dict(psutil.disk_usage(path)._asdict())) |
|
520 | 520 | except Exception as e: |
|
521 | 521 | log.exception('Failed to fetch temp dir info') |
|
522 | 522 | state = {'message': str(e), 'type': STATE_ERR} |
|
523 | 523 | |
|
524 | 524 | human_value = value.copy() |
|
525 | 525 | human_value['used'] = format_byte_size_binary(value['used']) |
|
526 | 526 | human_value['total'] = format_byte_size_binary(value['total']) |
|
527 | 527 | human_value['text'] = "{}/{}, {}% used".format( |
|
528 | 528 | format_byte_size_binary(value['used']), |
|
529 | 529 | format_byte_size_binary(value['total']), |
|
530 | 530 | value['percent']) |
|
531 | 531 | |
|
532 | 532 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
533 | 533 | |
|
534 | 534 | |
|
535 | 535 | @register_sysinfo |
|
536 | 536 | def search_info(): |
|
537 | 537 | import rhodecode |
|
538 | 538 | from rhodecode.lib.index import searcher_from_config |
|
539 | 539 | |
|
540 | 540 | backend = rhodecode.CONFIG.get('search.module', '') |
|
541 | 541 | location = rhodecode.CONFIG.get('search.location', '') |
|
542 | 542 | |
|
543 | 543 | try: |
|
544 | 544 | searcher = searcher_from_config(rhodecode.CONFIG) |
|
545 | 545 | searcher = searcher.__class__.__name__ |
|
546 | 546 | except Exception: |
|
547 | 547 | searcher = None |
|
548 | 548 | |
|
549 | 549 | value = dict( |
|
550 | 550 | backend=backend, searcher=searcher, location=location, text='') |
|
551 | 551 | state = STATE_OK_DEFAULT |
|
552 | 552 | |
|
553 | 553 | human_value = value.copy() |
|
554 | 554 | human_value['text'] = "backend:`{}`".format(human_value['backend']) |
|
555 | 555 | |
|
556 | 556 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
557 | 557 | |
|
558 | 558 | |
|
559 | 559 | @register_sysinfo |
|
560 | 560 | def git_info(): |
|
561 | 561 | from rhodecode.lib.vcs.backends import git |
|
562 | 562 | state = STATE_OK_DEFAULT |
|
563 | 563 | value = human_value = '' |
|
564 | 564 | try: |
|
565 | 565 | value = git.discover_git_version(raise_on_exc=True) |
|
566 | 566 | human_value = f'version reported from VCSServer: {value}' |
|
567 | 567 | except Exception as e: |
|
568 | 568 | state = {'message': str(e), 'type': STATE_ERR} |
|
569 | 569 | |
|
570 | 570 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
571 | 571 | |
|
572 | 572 | |
|
573 | 573 | @register_sysinfo |
|
574 | 574 | def hg_info(): |
|
575 | 575 | from rhodecode.lib.vcs.backends import hg |
|
576 | 576 | state = STATE_OK_DEFAULT |
|
577 | 577 | value = human_value = '' |
|
578 | 578 | try: |
|
579 | 579 | value = hg.discover_hg_version(raise_on_exc=True) |
|
580 | 580 | human_value = f'version reported from VCSServer: {value}' |
|
581 | 581 | except Exception as e: |
|
582 | 582 | state = {'message': str(e), 'type': STATE_ERR} |
|
583 | 583 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
584 | 584 | |
|
585 | 585 | |
|
586 | 586 | @register_sysinfo |
|
587 | 587 | def svn_info(): |
|
588 | 588 | from rhodecode.lib.vcs.backends import svn |
|
589 | 589 | state = STATE_OK_DEFAULT |
|
590 | 590 | value = human_value = '' |
|
591 | 591 | try: |
|
592 | 592 | value = svn.discover_svn_version(raise_on_exc=True) |
|
593 | 593 | human_value = f'version reported from VCSServer: {value}' |
|
594 | 594 | except Exception as e: |
|
595 | 595 | state = {'message': str(e), 'type': STATE_ERR} |
|
596 | 596 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
597 | 597 | |
|
598 | 598 | |
|
599 | 599 | @register_sysinfo |
|
600 | 600 | def vcs_backends(): |
|
601 | 601 | import rhodecode |
|
602 | 602 | value = rhodecode.CONFIG.get('vcs.backends') |
|
603 | 603 | human_value = 'Enabled backends in order: {}'.format(','.join(value)) |
|
604 | 604 | return SysInfoRes(value=value, human_value=human_value) |
|
605 | 605 | |
|
606 | 606 | |
|
607 | 607 | @register_sysinfo |
|
608 | 608 | def vcs_server(): |
|
609 | 609 | import rhodecode |
|
610 | 610 | from rhodecode.lib.vcs.backends import get_vcsserver_service_data |
|
611 | 611 | |
|
612 | 612 | server_url = rhodecode.CONFIG.get('vcs.server') |
|
613 | 613 | enabled = rhodecode.CONFIG.get('vcs.server.enable') |
|
614 | 614 | protocol = rhodecode.CONFIG.get('vcs.server.protocol') or 'http' |
|
615 | 615 | state = STATE_OK_DEFAULT |
|
616 | 616 | version = None |
|
617 | 617 | workers = 0 |
|
618 | 618 | |
|
619 | 619 | try: |
|
620 | 620 | data = get_vcsserver_service_data() |
|
621 | 621 | if data and 'version' in data: |
|
622 | 622 | version = data['version'] |
|
623 | 623 | |
|
624 | 624 | if data and 'config' in data: |
|
625 | 625 | conf = data['config'] |
|
626 | 626 | workers = conf.get('workers', 'NOT AVAILABLE') |
|
627 | 627 | |
|
628 | 628 | connection = 'connected' |
|
629 | 629 | except Exception as e: |
|
630 | 630 | connection = 'failed' |
|
631 | 631 | state = {'message': str(e), 'type': STATE_ERR} |
|
632 | 632 | |
|
633 | 633 | value = dict( |
|
634 | 634 | url=server_url, |
|
635 | 635 | enabled=enabled, |
|
636 | 636 | protocol=protocol, |
|
637 | 637 | connection=connection, |
|
638 | 638 | version=version, |
|
639 | 639 | text='', |
|
640 | 640 | ) |
|
641 | 641 | |
|
642 | 642 | human_value = value.copy() |
|
643 | 643 | human_value['text'] = \ |
|
644 | 644 | '{url}@ver:{ver} via {mode} mode[workers:{workers}], connection:{conn}'.format( |
|
645 | 645 | url=server_url, ver=version, workers=workers, mode=protocol, |
|
646 | 646 | conn=connection) |
|
647 | 647 | |
|
648 | 648 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
649 | 649 | |
|
650 | 650 | |
|
651 | 651 | @register_sysinfo |
|
652 | 652 | def vcs_server_config(): |
|
653 | 653 | from rhodecode.lib.vcs.backends import get_vcsserver_service_data |
|
654 | 654 | state = STATE_OK_DEFAULT |
|
655 | 655 | |
|
656 | 656 | value = {} |
|
657 | 657 | try: |
|
658 | 658 | data = get_vcsserver_service_data() |
|
659 | 659 | value = data['app_config'] |
|
660 | 660 | except Exception as e: |
|
661 | 661 | state = {'message': str(e), 'type': STATE_ERR} |
|
662 | 662 | |
|
663 | 663 | human_value = value.copy() |
|
664 | 664 | human_value['text'] = 'VCS Server config' |
|
665 | 665 | |
|
666 | 666 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
667 | 667 | |
|
668 | @register_sysinfo | |
|
669 | def rhodecode_server_config(): | |
|
670 | import rhodecode | |
|
671 | ||
|
672 | state = STATE_OK_DEFAULT | |
|
673 | config = rhodecode.CONFIG.copy() | |
|
674 | ||
|
675 | secrets_lits = [ | |
|
676 | f'rhodecode_{LicenseModel.LICENSE_DB_KEY}', | |
|
677 | 'sqlalchemy.db1.url', | |
|
678 | 'channelstream.secret', | |
|
679 | 'beaker.session.secret', | |
|
680 | 'rhodecode.encrypted_values.secret', | |
|
681 | 'appenlight.api_key', | |
|
682 | 'smtp_password', | |
|
683 | 'file_store.objectstore.secret', | |
|
684 | 'archive_cache.objectstore.secret', | |
|
685 | 'app.service_api.token', | |
|
686 | ] | |
|
687 | for k in secrets_lits: | |
|
688 | if k in config: | |
|
689 | config[k] = '**OBFUSCATED**' | |
|
690 | ||
|
691 | value = human_value = config | |
|
692 | return SysInfoRes(value=value, state=state, human_value=human_value) | |
|
693 | ||
|
668 | 694 | |
|
669 | 695 | @register_sysinfo |
|
670 | 696 | def rhodecode_app_info(): |
|
671 | 697 | import rhodecode |
|
672 | 698 | edition = rhodecode.CONFIG.get('rhodecode.edition') |
|
673 | 699 | |
|
674 | 700 | value = dict( |
|
675 | 701 | rhodecode_version=rhodecode.__version__, |
|
676 | 702 | rhodecode_lib_path=os.path.abspath(rhodecode.__file__), |
|
677 | 703 | text='' |
|
678 | 704 | ) |
|
679 | 705 | human_value = value.copy() |
|
680 | 706 | human_value['text'] = 'RhodeCode {edition}, version {ver}'.format( |
|
681 | 707 | edition=edition, ver=value['rhodecode_version'] |
|
682 | 708 | ) |
|
683 | 709 | return SysInfoRes(value=value, human_value=human_value) |
|
684 | 710 | |
|
685 | 711 | |
|
686 | 712 | @register_sysinfo |
|
687 | 713 | def rhodecode_config(): |
|
688 | 714 | import rhodecode |
|
689 | 715 | path = rhodecode.CONFIG.get('__file__') |
|
690 | 716 | rhodecode_ini_safe = rhodecode.CONFIG.copy() |
|
691 | 717 | cert_path = get_cert_path(path) |
|
692 | 718 | |
|
693 | 719 | try: |
|
694 | 720 | config = configparser.ConfigParser() |
|
695 | 721 | config.read(path) |
|
696 | 722 | parsed_ini = config |
|
697 | 723 | if parsed_ini.has_section('server:main'): |
|
698 | 724 | parsed_ini = dict(parsed_ini.items('server:main')) |
|
699 | 725 | except Exception: |
|
700 | 726 | log.exception('Failed to read .ini file for display') |
|
701 | 727 | parsed_ini = {} |
|
702 | 728 | |
|
703 | 729 | rhodecode_ini_safe['server:main'] = parsed_ini |
|
704 | 730 | |
|
705 | 731 | blacklist = [ |
|
706 | 732 | f'rhodecode_{LicenseModel.LICENSE_DB_KEY}', |
|
707 | 733 | 'routes.map', |
|
708 | 734 | 'sqlalchemy.db1.url', |
|
709 | 735 | 'channelstream.secret', |
|
710 | 736 | 'beaker.session.secret', |
|
711 | 737 | 'rhodecode.encrypted_values.secret', |
|
712 | 738 | 'rhodecode_auth_github_consumer_key', |
|
713 | 739 | 'rhodecode_auth_github_consumer_secret', |
|
714 | 740 | 'rhodecode_auth_google_consumer_key', |
|
715 | 741 | 'rhodecode_auth_google_consumer_secret', |
|
716 | 742 | 'rhodecode_auth_bitbucket_consumer_secret', |
|
717 | 743 | 'rhodecode_auth_bitbucket_consumer_key', |
|
718 | 744 | 'rhodecode_auth_twitter_consumer_secret', |
|
719 | 745 | 'rhodecode_auth_twitter_consumer_key', |
|
720 | 746 | |
|
721 | 747 | 'rhodecode_auth_twitter_secret', |
|
722 | 748 | 'rhodecode_auth_github_secret', |
|
723 | 749 | 'rhodecode_auth_google_secret', |
|
724 | 750 | 'rhodecode_auth_bitbucket_secret', |
|
725 | 751 | |
|
726 | 752 | 'appenlight.api_key', |
|
727 | 753 | ('app_conf', 'sqlalchemy.db1.url') |
|
728 | 754 | ] |
|
729 | 755 | for k in blacklist: |
|
730 | 756 | if isinstance(k, tuple): |
|
731 | 757 | section, key = k |
|
732 | 758 | if section in rhodecode_ini_safe: |
|
733 | 759 | rhodecode_ini_safe[section] = '**OBFUSCATED**' |
|
734 | 760 | else: |
|
735 | 761 | rhodecode_ini_safe.pop(k, None) |
|
736 | 762 | |
|
737 | 763 | # TODO: maybe put some CONFIG checks here ? |
|
738 | 764 | return SysInfoRes(value={'config': rhodecode_ini_safe, |
|
739 | 765 | 'path': path, 'cert_path': cert_path}) |
|
740 | 766 | |
|
741 | 767 | |
|
742 | 768 | @register_sysinfo |
|
743 | 769 | def database_info(): |
|
744 | 770 | import rhodecode |
|
745 | 771 | from sqlalchemy.engine import url as engine_url |
|
746 | 772 | from rhodecode.model import meta |
|
747 | 773 | from rhodecode.model.meta import Session |
|
748 | 774 | from rhodecode.model.db import DbMigrateVersion |
|
749 | 775 | |
|
750 | 776 | state = STATE_OK_DEFAULT |
|
751 | 777 | |
|
752 | 778 | db_migrate = DbMigrateVersion.query().filter( |
|
753 | 779 | DbMigrateVersion.repository_id == 'rhodecode_db_migrations').one() |
|
754 | 780 | |
|
755 | 781 | db_url_obj = engine_url.make_url(rhodecode.CONFIG['sqlalchemy.db1.url']) |
|
756 | 782 | |
|
757 | 783 | try: |
|
758 | 784 | engine = meta.get_engine() |
|
759 | 785 | db_server_info = engine.dialect._get_server_version_info( |
|
760 | 786 | Session.connection(bind=engine)) |
|
761 | 787 | db_version = '.'.join(map(str, db_server_info)) |
|
762 | 788 | except Exception: |
|
763 | 789 | log.exception('failed to fetch db version') |
|
764 | 790 | db_version = 'UNKNOWN' |
|
765 | 791 | |
|
766 | 792 | db_info = dict( |
|
767 | 793 | migrate_version=db_migrate.version, |
|
768 | 794 | type=db_url_obj.get_backend_name(), |
|
769 | 795 | version=db_version, |
|
770 | 796 | url=repr(db_url_obj) |
|
771 | 797 | ) |
|
772 | 798 | current_version = db_migrate.version |
|
773 | 799 | expected_version = rhodecode.__dbversion__ |
|
774 | 800 | if state['type'] == STATE_OK and current_version != expected_version: |
|
775 | 801 | msg = 'Critical: database schema mismatch, ' \ |
|
776 | 802 | 'expected version {}, got {}. ' \ |
|
777 | 803 | 'Please run migrations on your database.'.format( |
|
778 | 804 | expected_version, current_version) |
|
779 | 805 | state = {'message': msg, 'type': STATE_ERR} |
|
780 | 806 | |
|
781 | 807 | human_value = db_info.copy() |
|
782 | 808 | human_value['url'] = "{} @ migration version: {}".format( |
|
783 | 809 | db_info['url'], db_info['migrate_version']) |
|
784 | 810 | human_value['version'] = "{} {}".format(db_info['type'], db_info['version']) |
|
785 | 811 | return SysInfoRes(value=db_info, state=state, human_value=human_value) |
|
786 | 812 | |
|
787 | 813 | |
|
788 | 814 | @register_sysinfo |
|
789 | 815 | def server_info(environ): |
|
790 | 816 | import rhodecode |
|
791 | 817 | from rhodecode.lib.base import get_server_ip_addr, get_server_port |
|
792 | 818 | |
|
793 | 819 | value = { |
|
794 | 820 | 'server_ip': '{}:{}'.format( |
|
795 | 821 | get_server_ip_addr(environ, log_errors=False), |
|
796 | 822 | get_server_port(environ) |
|
797 | 823 | ), |
|
798 | 824 | 'server_id': rhodecode.CONFIG.get('instance_id'), |
|
799 | 825 | } |
|
800 | 826 | return SysInfoRes(value=value) |
|
801 | 827 | |
|
802 | 828 | |
|
803 | 829 | @register_sysinfo |
|
804 | 830 | def usage_info(): |
|
805 | 831 | from rhodecode.model.db import User, Repository, true |
|
806 | 832 | value = { |
|
807 | 833 | 'users': User.query().count(), |
|
808 | 834 | 'users_active': User.query().filter(User.active == true()).count(), |
|
809 | 835 | 'repositories': Repository.query().count(), |
|
810 | 836 | 'repository_types': { |
|
811 | 837 | 'hg': Repository.query().filter( |
|
812 | 838 | Repository.repo_type == 'hg').count(), |
|
813 | 839 | 'git': Repository.query().filter( |
|
814 | 840 | Repository.repo_type == 'git').count(), |
|
815 | 841 | 'svn': Repository.query().filter( |
|
816 | 842 | Repository.repo_type == 'svn').count(), |
|
817 | 843 | }, |
|
818 | 844 | } |
|
819 | 845 | return SysInfoRes(value=value) |
|
820 | 846 | |
|
821 | 847 | |
|
822 | 848 | def get_system_info(environ): |
|
823 | 849 | environ = environ or {} |
|
824 | 850 | return { |
|
825 | 851 | 'rhodecode_app': SysInfo(rhodecode_app_info)(), |
|
826 | 852 | 'rhodecode_config': SysInfo(rhodecode_config)(), |
|
827 | 853 | 'rhodecode_usage': SysInfo(usage_info)(), |
|
828 | 854 | 'python': SysInfo(python_info)(), |
|
829 | 855 | 'py_modules': SysInfo(py_modules)(), |
|
830 | 856 | |
|
831 | 857 | 'platform': SysInfo(platform_type)(), |
|
832 | 858 | 'locale': SysInfo(locale_info)(), |
|
833 | 859 | 'server': SysInfo(server_info, environ=environ)(), |
|
834 | 860 | 'database': SysInfo(database_info)(), |
|
835 | 861 | 'ulimit': SysInfo(ulimit_info)(), |
|
836 | 862 | 'storage': SysInfo(storage)(), |
|
837 | 863 | 'storage_inodes': SysInfo(storage_inodes)(), |
|
838 | 864 | 'storage_archive': SysInfo(storage_archives)(), |
|
839 | 865 | 'storage_artifacts': SysInfo(storage_artifacts)(), |
|
840 | 866 | 'storage_gist': SysInfo(storage_gist)(), |
|
841 | 867 | 'storage_temp': SysInfo(storage_temp)(), |
|
842 | 868 | |
|
843 | 869 | 'search': SysInfo(search_info)(), |
|
844 | 870 | |
|
845 | 871 | 'uptime': SysInfo(uptime)(), |
|
846 | 872 | 'load': SysInfo(machine_load)(), |
|
847 | 873 | 'cpu': SysInfo(cpu)(), |
|
848 | 874 | 'memory': SysInfo(memory)(), |
|
849 | 875 | |
|
850 | 876 | 'vcs_backends': SysInfo(vcs_backends)(), |
|
851 | 877 | 'vcs_server': SysInfo(vcs_server)(), |
|
852 | 878 | |
|
853 | 879 | 'vcs_server_config': SysInfo(vcs_server_config)(), |
|
880 | 'rhodecode_server_config': SysInfo(rhodecode_server_config)(), | |
|
854 | 881 | |
|
855 | 882 | 'git': SysInfo(git_info)(), |
|
856 | 883 | 'hg': SysInfo(hg_info)(), |
|
857 | 884 | 'svn': SysInfo(svn_info)(), |
|
858 | 885 | } |
|
859 | 886 | |
|
860 | 887 | |
|
861 | 888 | def load_system_info(key): |
|
862 | 889 | """ |
|
863 | 890 | get_sys_info('vcs_server') |
|
864 | 891 | get_sys_info('database') |
|
865 | 892 | """ |
|
866 | 893 | return SysInfo(registered_helpers[key])() |
@@ -1,1203 +1,1212 b'' | |||
|
1 | 1 | # Copyright (C) 2010-2023 RhodeCode GmbH |
|
2 | 2 | # |
|
3 | 3 | # This program is free software: you can redistribute it and/or modify |
|
4 | 4 | # it under the terms of the GNU Affero General Public License, version 3 |
|
5 | 5 | # (only), as published by the Free Software Foundation. |
|
6 | 6 | # |
|
7 | 7 | # This program is distributed in the hope that it will be useful, |
|
8 | 8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
9 | 9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
10 | 10 | # GNU General Public License for more details. |
|
11 | 11 | # |
|
12 | 12 | # You should have received a copy of the GNU Affero General Public License |
|
13 | 13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
14 | 14 | # |
|
15 | 15 | # This program is dual-licensed. If you wish to learn more about the |
|
16 | 16 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
17 | 17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
18 | 18 | |
|
19 | 19 | import os |
|
20 | 20 | import re |
|
21 | 21 | import shutil |
|
22 | 22 | import time |
|
23 | 23 | import logging |
|
24 | 24 | import traceback |
|
25 | 25 | import datetime |
|
26 | 26 | |
|
27 | 27 | from pyramid.threadlocal import get_current_request |
|
28 | 28 | from sqlalchemy.orm import aliased |
|
29 | 29 | from zope.cachedescriptors.property import Lazy as LazyProperty |
|
30 | 30 | |
|
31 | 31 | from rhodecode import events |
|
32 | 32 | from rhodecode.lib.auth import HasUserGroupPermissionAny |
|
33 | 33 | from rhodecode.lib.caching_query import FromCache |
|
34 | 34 | from rhodecode.lib.exceptions import AttachedForksError, AttachedPullRequestsError, AttachedArtifactsError |
|
35 | 35 | from rhodecode.lib import hooks_base |
|
36 | 36 | from rhodecode.lib.user_log_filter import user_log_filter |
|
37 | 37 | from rhodecode.lib.utils import make_db_config |
|
38 | 38 | from rhodecode.lib.utils2 import ( |
|
39 | 39 | safe_str, remove_prefix, obfuscate_url_pw, |
|
40 | 40 | get_current_rhodecode_user, safe_int, action_logger_generic) |
|
41 | 41 | from rhodecode.lib.vcs.backends import get_backend |
|
42 | 42 | from rhodecode.lib.vcs.nodes import NodeKind |
|
43 | 43 | from rhodecode.model import BaseModel |
|
44 | 44 | from rhodecode.model.db import ( |
|
45 | 45 | _hash_key, func, case, joinedload, or_, in_filter_generator, |
|
46 | 46 | Session, Repository, UserRepoToPerm, UserGroupRepoToPerm, |
|
47 | 47 | UserRepoGroupToPerm, UserGroupRepoGroupToPerm, User, Permission, |
|
48 | 48 | Statistics, UserGroup, RepoGroup, RepositoryField, UserLog) |
|
49 | 49 | from rhodecode.model.permission import PermissionModel |
|
50 | 50 | from rhodecode.model.settings import VcsSettingsModel |
|
51 | 51 | |
|
52 | 52 | log = logging.getLogger(__name__) |
|
53 | 53 | |
|
54 | 54 | |
|
55 | 55 | class RepoModel(BaseModel): |
|
56 | 56 | |
|
57 | 57 | cls = Repository |
|
58 | 58 | |
|
59 | 59 | def _get_user_group(self, users_group): |
|
60 | 60 | return self._get_instance(UserGroup, users_group, |
|
61 | 61 | callback=UserGroup.get_by_group_name) |
|
62 | 62 | |
|
63 | 63 | def _get_repo_group(self, repo_group): |
|
64 | 64 | return self._get_instance(RepoGroup, repo_group, |
|
65 | 65 | callback=RepoGroup.get_by_group_name) |
|
66 | 66 | |
|
67 | 67 | def _create_default_perms(self, repository, private): |
|
68 | 68 | # create default permission |
|
69 | 69 | default = 'repository.read' |
|
70 | 70 | def_user = User.get_default_user() |
|
71 | 71 | for p in def_user.user_perms: |
|
72 | 72 | if p.permission.permission_name.startswith('repository.'): |
|
73 | 73 | default = p.permission.permission_name |
|
74 | 74 | break |
|
75 | 75 | |
|
76 | 76 | default_perm = 'repository.none' if private else default |
|
77 | 77 | |
|
78 | 78 | repo_to_perm = UserRepoToPerm() |
|
79 | 79 | repo_to_perm.permission = Permission.get_by_key(default_perm) |
|
80 | 80 | |
|
81 | 81 | repo_to_perm.repository = repository |
|
82 | 82 | repo_to_perm.user = def_user |
|
83 | 83 | |
|
84 | 84 | return repo_to_perm |
|
85 | 85 | |
|
86 | 86 | def get(self, repo_id): |
|
87 | 87 | repo = self.sa.query(Repository) \ |
|
88 | 88 | .filter(Repository.repo_id == repo_id) |
|
89 | 89 | |
|
90 | 90 | return repo.scalar() |
|
91 | 91 | |
|
92 | 92 | def get_repo(self, repository): |
|
93 | 93 | return self._get_repo(repository) |
|
94 | 94 | |
|
95 | 95 | def get_by_repo_name(self, repo_name, cache=False): |
|
96 | 96 | repo = self.sa.query(Repository) \ |
|
97 | 97 | .filter(Repository.repo_name == repo_name) |
|
98 | 98 | |
|
99 | 99 | if cache: |
|
100 | 100 | name_key = _hash_key(repo_name) |
|
101 | 101 | repo = repo.options( |
|
102 | 102 | FromCache("sql_cache_short", f"get_repo_{name_key}")) |
|
103 | 103 | return repo.scalar() |
|
104 | 104 | |
|
105 | 105 | def _extract_id_from_repo_name(self, repo_name): |
|
106 | 106 | if repo_name.startswith('/'): |
|
107 | 107 | repo_name = repo_name.lstrip('/') |
|
108 | 108 | by_id_match = re.match(r'^_(\d+)', repo_name) |
|
109 | 109 | if by_id_match: |
|
110 | 110 | return by_id_match.groups()[0] |
|
111 | 111 | |
|
112 | 112 | def get_repo_by_id(self, repo_name): |
|
113 | 113 | """ |
|
114 | 114 | Extracts repo_name by id from special urls. |
|
115 | 115 | Example url is _11/repo_name |
|
116 | 116 | |
|
117 | 117 | :param repo_name: |
|
118 | 118 | :return: repo object if matched else None |
|
119 | 119 | """ |
|
120 | 120 | _repo_id = None |
|
121 | 121 | try: |
|
122 | 122 | _repo_id = self._extract_id_from_repo_name(repo_name) |
|
123 | 123 | if _repo_id: |
|
124 | 124 | return self.get(_repo_id) |
|
125 | 125 | except Exception: |
|
126 | 126 | log.exception('Failed to extract repo_name from URL') |
|
127 | 127 | if _repo_id: |
|
128 | 128 | Session().rollback() |
|
129 | 129 | |
|
130 | 130 | return None |
|
131 | 131 | |
|
132 | 132 | def get_repos_for_root(self, root, traverse=False): |
|
133 | 133 | if traverse: |
|
134 | 134 | like_expression = u'{}%'.format(safe_str(root)) |
|
135 | 135 | repos = Repository.query().filter( |
|
136 | 136 | Repository.repo_name.like(like_expression)).all() |
|
137 | 137 | else: |
|
138 | 138 | if root and not isinstance(root, RepoGroup): |
|
139 | 139 | raise ValueError( |
|
140 | 140 | 'Root must be an instance ' |
|
141 | 141 | 'of RepoGroup, got:{} instead'.format(type(root))) |
|
142 | 142 | repos = Repository.query().filter(Repository.group == root).all() |
|
143 | 143 | return repos |
|
144 | 144 | |
|
145 | 145 | def get_url(self, repo, request=None, permalink=False): |
|
146 | 146 | if not request: |
|
147 | 147 | request = get_current_request() |
|
148 | 148 | |
|
149 | 149 | if not request: |
|
150 | 150 | return |
|
151 | 151 | |
|
152 | 152 | if permalink: |
|
153 | 153 | return request.route_url( |
|
154 | 154 | 'repo_summary', repo_name='_{}'.format(safe_str(repo.repo_id))) |
|
155 | 155 | else: |
|
156 | 156 | return request.route_url( |
|
157 | 157 | 'repo_summary', repo_name=safe_str(repo.repo_name)) |
|
158 | 158 | |
|
159 | 159 | def get_commit_url(self, repo, commit_id, request=None, permalink=False): |
|
160 | 160 | if not request: |
|
161 | 161 | request = get_current_request() |
|
162 | 162 | |
|
163 | 163 | if not request: |
|
164 | 164 | return |
|
165 | 165 | |
|
166 | 166 | if permalink: |
|
167 | 167 | return request.route_url( |
|
168 | 168 | 'repo_commit', repo_name=safe_str(repo.repo_id), |
|
169 | 169 | commit_id=commit_id) |
|
170 | 170 | |
|
171 | 171 | else: |
|
172 | 172 | return request.route_url( |
|
173 | 173 | 'repo_commit', repo_name=safe_str(repo.repo_name), |
|
174 | 174 | commit_id=commit_id) |
|
175 | 175 | |
|
176 | 176 | def get_repo_log(self, repo, filter_term): |
|
177 | 177 | repo_log = UserLog.query()\ |
|
178 | 178 | .filter(or_(UserLog.repository_id == repo.repo_id, |
|
179 | 179 | UserLog.repository_name == repo.repo_name))\ |
|
180 | 180 | .options(joinedload(UserLog.user))\ |
|
181 | 181 | .options(joinedload(UserLog.repository))\ |
|
182 | 182 | .order_by(UserLog.action_date.desc()) |
|
183 | 183 | |
|
184 | 184 | repo_log = user_log_filter(repo_log, filter_term) |
|
185 | 185 | return repo_log |
|
186 | 186 | |
|
187 | 187 | @classmethod |
|
188 | 188 | def update_commit_cache(cls, repositories=None): |
|
189 | 189 | if not repositories: |
|
190 | 190 | repositories = Repository.getAll() |
|
191 | 191 | for repo in repositories: |
|
192 | 192 | repo.update_commit_cache() |
|
193 | 193 | |
|
194 | 194 | def get_repos_as_dict(self, repo_list=None, admin=False, |
|
195 | 195 | super_user_actions=False, short_name=None): |
|
196 | 196 | |
|
197 | 197 | _render = get_current_request().get_partial_renderer( |
|
198 | 198 | 'rhodecode:templates/data_table/_dt_elements.mako') |
|
199 | 199 | c = _render.get_call_context() |
|
200 | 200 | h = _render.get_helpers() |
|
201 | 201 | |
|
202 | 202 | def quick_menu(repo_name): |
|
203 | 203 | return _render('quick_menu', repo_name) |
|
204 | 204 | |
|
205 | 205 | def repo_lnk(name, rtype, rstate, private, archived, fork_repo_name): |
|
206 | 206 | if short_name is not None: |
|
207 | 207 | short_name_var = short_name |
|
208 | 208 | else: |
|
209 | 209 | short_name_var = not admin |
|
210 | 210 | return _render('repo_name', name, rtype, rstate, private, archived, fork_repo_name, |
|
211 | 211 | short_name=short_name_var, admin=False) |
|
212 | 212 | |
|
213 | 213 | def last_change(last_change): |
|
214 | 214 | if admin and isinstance(last_change, datetime.datetime) and not last_change.tzinfo: |
|
215 | 215 | ts = time.time() |
|
216 | 216 | utc_offset = (datetime.datetime.fromtimestamp(ts) |
|
217 | 217 | - datetime.datetime.utcfromtimestamp(ts)).total_seconds() |
|
218 | 218 | last_change = last_change + datetime.timedelta(seconds=utc_offset) |
|
219 | 219 | |
|
220 | 220 | return _render("last_change", last_change) |
|
221 | 221 | |
|
222 | 222 | def rss_lnk(repo_name): |
|
223 | 223 | return _render("rss", repo_name) |
|
224 | 224 | |
|
225 | 225 | def atom_lnk(repo_name): |
|
226 | 226 | return _render("atom", repo_name) |
|
227 | 227 | |
|
228 | 228 | def last_rev(repo_name, cs_cache): |
|
229 | 229 | return _render('revision', repo_name, cs_cache.get('revision'), |
|
230 | 230 | cs_cache.get('raw_id'), cs_cache.get('author'), |
|
231 | 231 | cs_cache.get('message'), cs_cache.get('date')) |
|
232 | 232 | |
|
233 | 233 | def desc(desc): |
|
234 | 234 | return _render('repo_desc', desc, c.visual.stylify_metatags) |
|
235 | 235 | |
|
236 | 236 | def state(repo_state): |
|
237 | 237 | return _render("repo_state", repo_state) |
|
238 | 238 | |
|
239 | 239 | def repo_actions(repo_name): |
|
240 | 240 | return _render('repo_actions', repo_name, super_user_actions) |
|
241 | 241 | |
|
242 | 242 | def user_profile(username): |
|
243 | 243 | return _render('user_profile', username) |
|
244 | 244 | |
|
245 | 245 | repos_data = [] |
|
246 | 246 | for repo in repo_list: |
|
247 | 247 | # NOTE(marcink): because we use only raw column we need to load it like that |
|
248 | 248 | changeset_cache = Repository._load_changeset_cache( |
|
249 | 249 | repo.repo_id, repo._changeset_cache) |
|
250 | 250 | |
|
251 | 251 | row = { |
|
252 | 252 | "menu": quick_menu(repo.repo_name), |
|
253 | 253 | |
|
254 | 254 | "name": repo_lnk(repo.repo_name, repo.repo_type, repo.repo_state, |
|
255 | 255 | repo.private, repo.archived, repo.fork_repo_name), |
|
256 | 256 | |
|
257 | 257 | "desc": desc(h.escape(repo.description)), |
|
258 | 258 | |
|
259 | 259 | "last_change": last_change(repo.updated_on), |
|
260 | 260 | |
|
261 | 261 | "last_changeset": last_rev(repo.repo_name, changeset_cache), |
|
262 | 262 | "last_changeset_raw": changeset_cache.get('revision'), |
|
263 | 263 | |
|
264 | 264 | "owner": user_profile(repo.owner_username), |
|
265 | 265 | |
|
266 | 266 | "state": state(repo.repo_state), |
|
267 | 267 | "rss": rss_lnk(repo.repo_name), |
|
268 | 268 | "atom": atom_lnk(repo.repo_name), |
|
269 | 269 | } |
|
270 | 270 | if admin: |
|
271 | 271 | row.update({ |
|
272 | 272 | "action": repo_actions(repo.repo_name), |
|
273 | 273 | }) |
|
274 | 274 | repos_data.append(row) |
|
275 | 275 | |
|
276 | 276 | return repos_data |
|
277 | 277 | |
|
278 | 278 | def get_repos_data_table( |
|
279 | 279 | self, draw, start, limit, |
|
280 | 280 | search_q, order_by, order_dir, |
|
281 | 281 | auth_user, repo_group_id): |
|
282 | 282 | from rhodecode.model.scm import RepoList |
|
283 | 283 | |
|
284 | 284 | _perms = ['repository.read', 'repository.write', 'repository.admin'] |
|
285 | 285 | |
|
286 | 286 | repos = Repository.query() \ |
|
287 | 287 | .filter(Repository.group_id == repo_group_id) \ |
|
288 | 288 | .all() |
|
289 | 289 | auth_repo_list = RepoList( |
|
290 | 290 | repos, perm_set=_perms, |
|
291 | 291 | extra_kwargs=dict(user=auth_user)) |
|
292 | 292 | |
|
293 | 293 | allowed_ids = [-1] |
|
294 | 294 | for repo in auth_repo_list: |
|
295 | 295 | allowed_ids.append(repo.repo_id) |
|
296 | 296 | |
|
297 | 297 | repos_data_total_count = Repository.query() \ |
|
298 | 298 | .filter(Repository.group_id == repo_group_id) \ |
|
299 | 299 | .filter(or_( |
|
300 | 300 | # generate multiple IN to fix limitation problems |
|
301 | 301 | *in_filter_generator(Repository.repo_id, allowed_ids)) |
|
302 | 302 | ) \ |
|
303 | 303 | .count() |
|
304 | 304 | |
|
305 | 305 | RepoFork = aliased(Repository) |
|
306 | 306 | OwnerUser = aliased(User) |
|
307 | 307 | base_q = Session.query( |
|
308 | 308 | Repository.repo_id, |
|
309 | 309 | Repository.repo_name, |
|
310 | 310 | Repository.description, |
|
311 | 311 | Repository.repo_type, |
|
312 | 312 | Repository.repo_state, |
|
313 | 313 | Repository.private, |
|
314 | 314 | Repository.archived, |
|
315 | 315 | Repository.updated_on, |
|
316 | 316 | Repository._changeset_cache, |
|
317 | 317 | RepoFork.repo_name.label('fork_repo_name'), |
|
318 | 318 | OwnerUser.username.label('owner_username'), |
|
319 | 319 | ) \ |
|
320 | 320 | .filter(Repository.group_id == repo_group_id) \ |
|
321 | 321 | .filter(or_( |
|
322 | 322 | # generate multiple IN to fix limitation problems |
|
323 | 323 | *in_filter_generator(Repository.repo_id, allowed_ids)) |
|
324 | 324 | ) \ |
|
325 | 325 | .outerjoin(RepoFork, Repository.fork_id == RepoFork.repo_id) \ |
|
326 | 326 | .join(OwnerUser, Repository.user_id == OwnerUser.user_id) |
|
327 | 327 | |
|
328 | 328 | repos_data_total_filtered_count = base_q.count() |
|
329 | 329 | |
|
330 | 330 | sort_defined = False |
|
331 | 331 | if order_by == 'repo_name': |
|
332 | 332 | sort_col = func.lower(Repository.repo_name) |
|
333 | 333 | sort_defined = True |
|
334 | 334 | elif order_by == 'user_username': |
|
335 | 335 | sort_col = User.username |
|
336 | 336 | else: |
|
337 | 337 | sort_col = getattr(Repository, order_by, None) |
|
338 | 338 | |
|
339 | 339 | if sort_defined or sort_col: |
|
340 | 340 | if order_dir == 'asc': |
|
341 | 341 | sort_col = sort_col.asc() |
|
342 | 342 | else: |
|
343 | 343 | sort_col = sort_col.desc() |
|
344 | 344 | |
|
345 | 345 | base_q = base_q.order_by(sort_col) |
|
346 | 346 | base_q = base_q.offset(start).limit(limit) |
|
347 | 347 | |
|
348 | 348 | repos_list = base_q.all() |
|
349 | 349 | |
|
350 | 350 | repos_data = RepoModel().get_repos_as_dict( |
|
351 | 351 | repo_list=repos_list, admin=False) |
|
352 | 352 | |
|
353 | 353 | data = ({ |
|
354 | 354 | 'draw': draw, |
|
355 | 355 | 'data': repos_data, |
|
356 | 356 | 'recordsTotal': repos_data_total_count, |
|
357 | 357 | 'recordsFiltered': repos_data_total_filtered_count, |
|
358 | 358 | }) |
|
359 | 359 | return data |
|
360 | 360 | |
|
361 | 361 | def _get_defaults(self, repo_name): |
|
362 | 362 | """ |
|
363 | 363 | Gets information about repository, and returns a dict for |
|
364 | 364 | usage in forms |
|
365 | 365 | |
|
366 | 366 | :param repo_name: |
|
367 | 367 | """ |
|
368 | 368 | |
|
369 | 369 | repo_info = Repository.get_by_repo_name(repo_name) |
|
370 | 370 | |
|
371 | 371 | if repo_info is None: |
|
372 | 372 | return None |
|
373 | 373 | |
|
374 | 374 | defaults = repo_info.get_dict() |
|
375 | 375 | defaults['repo_name'] = repo_info.just_name |
|
376 | 376 | |
|
377 | 377 | groups = repo_info.groups_with_parents |
|
378 | 378 | parent_group = groups[-1] if groups else None |
|
379 | 379 | |
|
380 | 380 | # we use -1 as this is how in HTML, we mark an empty group |
|
381 | 381 | defaults['repo_group'] = getattr(parent_group, 'group_id', -1) |
|
382 | 382 | |
|
383 | 383 | keys_to_process = ( |
|
384 | 384 | {'k': 'repo_type', 'strip': False}, |
|
385 | 385 | {'k': 'repo_enable_downloads', 'strip': True}, |
|
386 | 386 | {'k': 'repo_description', 'strip': True}, |
|
387 | 387 | {'k': 'repo_enable_locking', 'strip': True}, |
|
388 | 388 | {'k': 'repo_landing_rev', 'strip': True}, |
|
389 | 389 | {'k': 'clone_uri', 'strip': False}, |
|
390 | 390 | {'k': 'push_uri', 'strip': False}, |
|
391 | 391 | {'k': 'repo_private', 'strip': True}, |
|
392 | 392 | {'k': 'repo_enable_statistics', 'strip': True} |
|
393 | 393 | ) |
|
394 | 394 | |
|
395 | 395 | for item in keys_to_process: |
|
396 | 396 | attr = item['k'] |
|
397 | 397 | if item['strip']: |
|
398 | 398 | attr = remove_prefix(item['k'], 'repo_') |
|
399 | 399 | |
|
400 | 400 | val = defaults[attr] |
|
401 | 401 | if item['k'] == 'repo_landing_rev': |
|
402 | 402 | val = ':'.join(defaults[attr]) |
|
403 | 403 | defaults[item['k']] = val |
|
404 | 404 | if item['k'] == 'clone_uri': |
|
405 | 405 | defaults['clone_uri_hidden'] = repo_info.clone_uri_hidden |
|
406 | 406 | if item['k'] == 'push_uri': |
|
407 | 407 | defaults['push_uri_hidden'] = repo_info.push_uri_hidden |
|
408 | 408 | |
|
409 | 409 | # fill owner |
|
410 | 410 | if repo_info.user: |
|
411 | 411 | defaults.update({'user': repo_info.user.username}) |
|
412 | 412 | else: |
|
413 | 413 | replacement_user = User.get_first_super_admin().username |
|
414 | 414 | defaults.update({'user': replacement_user}) |
|
415 | 415 | |
|
416 | 416 | return defaults |
|
417 | 417 | |
|
418 | 418 | def update(self, repo, **kwargs): |
|
419 | 419 | try: |
|
420 | 420 | cur_repo = self._get_repo(repo) |
|
421 | 421 | source_repo_name = cur_repo.repo_name |
|
422 | 422 | |
|
423 | 423 | affected_user_ids = [] |
|
424 | 424 | if 'user' in kwargs: |
|
425 | 425 | old_owner_id = cur_repo.user.user_id |
|
426 | 426 | new_owner = User.get_by_username(kwargs['user']) |
|
427 | 427 | cur_repo.user = new_owner |
|
428 | 428 | |
|
429 | 429 | if old_owner_id != new_owner.user_id: |
|
430 | 430 | affected_user_ids = [new_owner.user_id, old_owner_id] |
|
431 | 431 | |
|
432 | 432 | if 'repo_group' in kwargs: |
|
433 | 433 | cur_repo.group = RepoGroup.get(kwargs['repo_group']) |
|
434 | 434 | log.debug('Updating repo %s with params:%s', cur_repo, kwargs) |
|
435 | 435 | |
|
436 | 436 | update_keys = [ |
|
437 | 437 | (1, 'repo_description'), |
|
438 | 438 | (1, 'repo_landing_rev'), |
|
439 | 439 | (1, 'repo_private'), |
|
440 | 440 | (1, 'repo_enable_downloads'), |
|
441 | 441 | (1, 'repo_enable_locking'), |
|
442 | 442 | (1, 'repo_enable_statistics'), |
|
443 | 443 | (0, 'clone_uri'), |
|
444 | 444 | (0, 'push_uri'), |
|
445 | 445 | (0, 'fork_id') |
|
446 | 446 | ] |
|
447 | 447 | for strip, k in update_keys: |
|
448 | 448 | if k in kwargs: |
|
449 | 449 | val = kwargs[k] |
|
450 | 450 | if strip: |
|
451 | 451 | k = remove_prefix(k, 'repo_') |
|
452 | 452 | |
|
453 | 453 | setattr(cur_repo, k, val) |
|
454 | 454 | |
|
455 | new_name = source_repo_name | |
|
456 | if 'repo_name' in kwargs: | |
|
455 | 457 | new_name = cur_repo.get_new_name(kwargs['repo_name']) |
|
456 | 458 | cur_repo.repo_name = new_name |
|
457 | 459 | |
|
458 | # if private flag is set, reset default permission to NONE | |
|
459 | if kwargs.get('repo_private'): | |
|
460 | if 'repo_private' in kwargs: | |
|
461 | # if private flag is set to True, reset default permission to NONE | |
|
462 | set_private_to = kwargs.get('repo_private') | |
|
463 | if set_private_to: | |
|
460 | 464 | EMPTY_PERM = 'repository.none' |
|
461 | 465 | RepoModel().grant_user_permission( |
|
462 | 466 | repo=cur_repo, user=User.DEFAULT_USER, perm=EMPTY_PERM |
|
463 | 467 | ) |
|
468 | if set_private_to != cur_repo.private: | |
|
469 | # NOTE(dan): we change repo private mode we need to notify all USERS | |
|
470 | # this is just by having this value set to a different value then it was before | |
|
471 | affected_user_ids = User.get_all_user_ids() | |
|
472 | ||
|
464 | 473 | if kwargs.get('repo_landing_rev'): |
|
465 | 474 | landing_rev_val = kwargs['repo_landing_rev'] |
|
466 | 475 | RepoModel().set_landing_rev(cur_repo, landing_rev_val) |
|
467 | 476 | |
|
468 | 477 | # handle extra fields |
|
469 | 478 | for field in filter(lambda k: k.startswith(RepositoryField.PREFIX), kwargs): |
|
470 | 479 | k = RepositoryField.un_prefix_key(field) |
|
471 | 480 | ex_field = RepositoryField.get_by_key_name( |
|
472 | 481 | key=k, repo=cur_repo) |
|
473 | 482 | if ex_field: |
|
474 | 483 | ex_field.field_value = kwargs[field] |
|
475 | 484 | self.sa.add(ex_field) |
|
476 | 485 | |
|
477 | 486 | self.sa.add(cur_repo) |
|
478 | 487 | |
|
479 | 488 | if source_repo_name != new_name: |
|
480 | 489 | # rename repository |
|
481 | 490 | self._rename_filesystem_repo( |
|
482 | 491 | old=source_repo_name, new=new_name) |
|
483 | 492 | |
|
484 | 493 | if affected_user_ids: |
|
485 | 494 | PermissionModel().trigger_permission_flush(affected_user_ids) |
|
486 | 495 | |
|
487 | 496 | return cur_repo |
|
488 | 497 | except Exception: |
|
489 | 498 | log.error(traceback.format_exc()) |
|
490 | 499 | raise |
|
491 | 500 | |
|
492 | 501 | def _create_repo(self, repo_name, repo_type, description, owner, |
|
493 | 502 | private=False, clone_uri=None, repo_group=None, |
|
494 | 503 | landing_rev=None, fork_of=None, |
|
495 | 504 | copy_fork_permissions=False, enable_statistics=False, |
|
496 | 505 | enable_locking=False, enable_downloads=False, |
|
497 | 506 | copy_group_permissions=False, |
|
498 | 507 | state=Repository.STATE_PENDING): |
|
499 | 508 | """ |
|
500 | 509 | Create repository inside database with PENDING state, this should be |
|
501 | 510 | only executed by create() repo. With exception of importing existing |
|
502 | 511 | repos |
|
503 | 512 | """ |
|
504 | 513 | from rhodecode.model.scm import ScmModel |
|
505 | 514 | |
|
506 | 515 | owner = self._get_user(owner) |
|
507 | 516 | fork_of = self._get_repo(fork_of) |
|
508 | 517 | repo_group = self._get_repo_group(safe_int(repo_group)) |
|
509 | 518 | default_landing_ref, _lbl = ScmModel.backend_landing_ref(repo_type) |
|
510 | 519 | landing_rev = landing_rev or default_landing_ref |
|
511 | 520 | |
|
512 | 521 | try: |
|
513 | 522 | repo_name = safe_str(repo_name) |
|
514 | 523 | description = safe_str(description) |
|
515 | 524 | # repo name is just a name of repository |
|
516 | 525 | # while repo_name_full is a full qualified name that is combined |
|
517 | 526 | # with name and path of group |
|
518 | 527 | repo_name_full = repo_name |
|
519 | 528 | repo_name = repo_name.split(Repository.NAME_SEP)[-1] |
|
520 | 529 | |
|
521 | 530 | new_repo = Repository() |
|
522 | 531 | new_repo.repo_state = state |
|
523 | 532 | new_repo.enable_statistics = False |
|
524 | 533 | new_repo.repo_name = repo_name_full |
|
525 | 534 | new_repo.repo_type = repo_type |
|
526 | 535 | new_repo.user = owner |
|
527 | 536 | new_repo.group = repo_group |
|
528 | 537 | new_repo.description = description or repo_name |
|
529 | 538 | new_repo.private = private |
|
530 | 539 | new_repo.archived = False |
|
531 | 540 | new_repo.clone_uri = clone_uri |
|
532 | 541 | new_repo.landing_rev = landing_rev |
|
533 | 542 | |
|
534 | 543 | new_repo.enable_statistics = enable_statistics |
|
535 | 544 | new_repo.enable_locking = enable_locking |
|
536 | 545 | new_repo.enable_downloads = enable_downloads |
|
537 | 546 | |
|
538 | 547 | if repo_group: |
|
539 | 548 | new_repo.enable_locking = repo_group.enable_locking |
|
540 | 549 | |
|
541 | 550 | if fork_of: |
|
542 | 551 | parent_repo = fork_of |
|
543 | 552 | new_repo.fork = parent_repo |
|
544 | 553 | |
|
545 | 554 | events.trigger(events.RepoPreCreateEvent(new_repo)) |
|
546 | 555 | |
|
547 | 556 | self.sa.add(new_repo) |
|
548 | 557 | |
|
549 | 558 | EMPTY_PERM = 'repository.none' |
|
550 | 559 | if fork_of and copy_fork_permissions: |
|
551 | 560 | repo = fork_of |
|
552 | 561 | user_perms = UserRepoToPerm.query() \ |
|
553 | 562 | .filter(UserRepoToPerm.repository == repo).all() |
|
554 | 563 | group_perms = UserGroupRepoToPerm.query() \ |
|
555 | 564 | .filter(UserGroupRepoToPerm.repository == repo).all() |
|
556 | 565 | |
|
557 | 566 | for perm in user_perms: |
|
558 | 567 | UserRepoToPerm.create( |
|
559 | 568 | perm.user, new_repo, perm.permission) |
|
560 | 569 | |
|
561 | 570 | for perm in group_perms: |
|
562 | 571 | UserGroupRepoToPerm.create( |
|
563 | 572 | perm.users_group, new_repo, perm.permission) |
|
564 | 573 | # in case we copy permissions and also set this repo to private |
|
565 | 574 | # override the default user permission to make it a private repo |
|
566 | 575 | if private: |
|
567 | 576 | RepoModel(self.sa).grant_user_permission( |
|
568 | 577 | repo=new_repo, user=User.DEFAULT_USER, perm=EMPTY_PERM) |
|
569 | 578 | |
|
570 | 579 | elif repo_group and copy_group_permissions: |
|
571 | 580 | user_perms = UserRepoGroupToPerm.query() \ |
|
572 | 581 | .filter(UserRepoGroupToPerm.group == repo_group).all() |
|
573 | 582 | |
|
574 | 583 | group_perms = UserGroupRepoGroupToPerm.query() \ |
|
575 | 584 | .filter(UserGroupRepoGroupToPerm.group == repo_group).all() |
|
576 | 585 | |
|
577 | 586 | for perm in user_perms: |
|
578 | 587 | perm_name = perm.permission.permission_name.replace( |
|
579 | 588 | 'group.', 'repository.') |
|
580 | 589 | perm_obj = Permission.get_by_key(perm_name) |
|
581 | 590 | UserRepoToPerm.create(perm.user, new_repo, perm_obj) |
|
582 | 591 | |
|
583 | 592 | for perm in group_perms: |
|
584 | 593 | perm_name = perm.permission.permission_name.replace( |
|
585 | 594 | 'group.', 'repository.') |
|
586 | 595 | perm_obj = Permission.get_by_key(perm_name) |
|
587 | 596 | UserGroupRepoToPerm.create(perm.users_group, new_repo, perm_obj) |
|
588 | 597 | |
|
589 | 598 | if private: |
|
590 | 599 | RepoModel(self.sa).grant_user_permission( |
|
591 | 600 | repo=new_repo, user=User.DEFAULT_USER, perm=EMPTY_PERM) |
|
592 | 601 | |
|
593 | 602 | else: |
|
594 | 603 | perm_obj = self._create_default_perms(new_repo, private) |
|
595 | 604 | self.sa.add(perm_obj) |
|
596 | 605 | |
|
597 | 606 | # now automatically start following this repository as owner |
|
598 | 607 | ScmModel(self.sa).toggle_following_repo(new_repo.repo_id, owner.user_id) |
|
599 | 608 | |
|
600 | 609 | # we need to flush here, in order to check if database won't |
|
601 | 610 | # throw any exceptions, create filesystem dirs at the very end |
|
602 | 611 | self.sa.flush() |
|
603 | 612 | events.trigger(events.RepoCreateEvent(new_repo, actor=owner)) |
|
604 | 613 | return new_repo |
|
605 | 614 | |
|
606 | 615 | except Exception: |
|
607 | 616 | log.error(traceback.format_exc()) |
|
608 | 617 | raise |
|
609 | 618 | |
|
610 | 619 | def create(self, form_data, cur_user): |
|
611 | 620 | """ |
|
612 | 621 | Create repository using celery tasks |
|
613 | 622 | |
|
614 | 623 | :param form_data: |
|
615 | 624 | :param cur_user: |
|
616 | 625 | """ |
|
617 | 626 | from rhodecode.lib.celerylib import tasks, run_task |
|
618 | 627 | return run_task(tasks.create_repo, form_data, cur_user) |
|
619 | 628 | |
|
620 | 629 | def update_permissions(self, repo, perm_additions=None, perm_updates=None, |
|
621 | 630 | perm_deletions=None, check_perms=True, |
|
622 | 631 | cur_user=None): |
|
623 | 632 | if not perm_additions: |
|
624 | 633 | perm_additions = [] |
|
625 | 634 | if not perm_updates: |
|
626 | 635 | perm_updates = [] |
|
627 | 636 | if not perm_deletions: |
|
628 | 637 | perm_deletions = [] |
|
629 | 638 | |
|
630 | 639 | req_perms = ('usergroup.read', 'usergroup.write', 'usergroup.admin') |
|
631 | 640 | |
|
632 | 641 | changes = { |
|
633 | 642 | 'added': [], |
|
634 | 643 | 'updated': [], |
|
635 | 644 | 'deleted': [], |
|
636 | 645 | 'default_user_changed': None |
|
637 | 646 | } |
|
638 | 647 | |
|
639 | 648 | repo = self._get_repo(repo) |
|
640 | 649 | |
|
641 | 650 | # update permissions |
|
642 | 651 | for member_id, perm, member_type in perm_updates: |
|
643 | 652 | member_id = int(member_id) |
|
644 | 653 | if member_type == 'user': |
|
645 | 654 | member_name = User.get(member_id).username |
|
646 | 655 | if member_name == User.DEFAULT_USER: |
|
647 | 656 | # NOTE(dan): detect if we changed permissions for default user |
|
648 | 657 | perm_obj = self.sa.query(UserRepoToPerm) \ |
|
649 | 658 | .filter(UserRepoToPerm.user_id == member_id) \ |
|
650 | 659 | .filter(UserRepoToPerm.repository == repo) \ |
|
651 | 660 | .scalar() |
|
652 | 661 | if perm_obj and perm_obj.permission.permission_name != perm: |
|
653 | 662 | changes['default_user_changed'] = True |
|
654 | 663 | |
|
655 | 664 | # this updates also current one if found |
|
656 | 665 | self.grant_user_permission( |
|
657 | 666 | repo=repo, user=member_id, perm=perm) |
|
658 | 667 | elif member_type == 'user_group': |
|
659 | 668 | # check if we have permissions to alter this usergroup |
|
660 | 669 | member_name = UserGroup.get(member_id).users_group_name |
|
661 | 670 | if not check_perms or HasUserGroupPermissionAny( |
|
662 | 671 | *req_perms)(member_name, user=cur_user): |
|
663 | 672 | self.grant_user_group_permission( |
|
664 | 673 | repo=repo, group_name=member_id, perm=perm) |
|
665 | 674 | else: |
|
666 | 675 | raise ValueError("member_type must be 'user' or 'user_group' " |
|
667 | 676 | "got {} instead".format(member_type)) |
|
668 | 677 | changes['updated'].append({'type': member_type, 'id': member_id, |
|
669 | 678 | 'name': member_name, 'new_perm': perm}) |
|
670 | 679 | |
|
671 | 680 | # set new permissions |
|
672 | 681 | for member_id, perm, member_type in perm_additions: |
|
673 | 682 | member_id = int(member_id) |
|
674 | 683 | if member_type == 'user': |
|
675 | 684 | member_name = User.get(member_id).username |
|
676 | 685 | self.grant_user_permission( |
|
677 | 686 | repo=repo, user=member_id, perm=perm) |
|
678 | 687 | elif member_type == 'user_group': |
|
679 | 688 | # check if we have permissions to alter this usergroup |
|
680 | 689 | member_name = UserGroup.get(member_id).users_group_name |
|
681 | 690 | if not check_perms or HasUserGroupPermissionAny( |
|
682 | 691 | *req_perms)(member_name, user=cur_user): |
|
683 | 692 | self.grant_user_group_permission( |
|
684 | 693 | repo=repo, group_name=member_id, perm=perm) |
|
685 | 694 | else: |
|
686 | 695 | raise ValueError("member_type must be 'user' or 'user_group' " |
|
687 | 696 | "got {} instead".format(member_type)) |
|
688 | 697 | |
|
689 | 698 | changes['added'].append({'type': member_type, 'id': member_id, |
|
690 | 699 | 'name': member_name, 'new_perm': perm}) |
|
691 | 700 | # delete permissions |
|
692 | 701 | for member_id, perm, member_type in perm_deletions: |
|
693 | 702 | member_id = int(member_id) |
|
694 | 703 | if member_type == 'user': |
|
695 | 704 | member_name = User.get(member_id).username |
|
696 | 705 | self.revoke_user_permission(repo=repo, user=member_id) |
|
697 | 706 | elif member_type == 'user_group': |
|
698 | 707 | # check if we have permissions to alter this usergroup |
|
699 | 708 | member_name = UserGroup.get(member_id).users_group_name |
|
700 | 709 | if not check_perms or HasUserGroupPermissionAny( |
|
701 | 710 | *req_perms)(member_name, user=cur_user): |
|
702 | 711 | self.revoke_user_group_permission( |
|
703 | 712 | repo=repo, group_name=member_id) |
|
704 | 713 | else: |
|
705 | 714 | raise ValueError("member_type must be 'user' or 'user_group' " |
|
706 | 715 | "got {} instead".format(member_type)) |
|
707 | 716 | |
|
708 | 717 | changes['deleted'].append({'type': member_type, 'id': member_id, |
|
709 | 718 | 'name': member_name, 'new_perm': perm}) |
|
710 | 719 | return changes |
|
711 | 720 | |
|
712 | 721 | def create_fork(self, form_data, cur_user): |
|
713 | 722 | """ |
|
714 | 723 | Simple wrapper into executing celery task for fork creation |
|
715 | 724 | |
|
716 | 725 | :param form_data: |
|
717 | 726 | :param cur_user: |
|
718 | 727 | """ |
|
719 | 728 | from rhodecode.lib.celerylib import tasks, run_task |
|
720 | 729 | return run_task(tasks.create_repo_fork, form_data, cur_user) |
|
721 | 730 | |
|
722 | 731 | def archive(self, repo): |
|
723 | 732 | """ |
|
724 | 733 | Archive given repository. Set archive flag. |
|
725 | 734 | |
|
726 | 735 | :param repo: |
|
727 | 736 | """ |
|
728 | 737 | repo = self._get_repo(repo) |
|
729 | 738 | if repo: |
|
730 | 739 | |
|
731 | 740 | try: |
|
732 | 741 | repo.archived = True |
|
733 | 742 | self.sa.add(repo) |
|
734 | 743 | self.sa.commit() |
|
735 | 744 | except Exception: |
|
736 | 745 | log.error(traceback.format_exc()) |
|
737 | 746 | raise |
|
738 | 747 | |
|
739 | 748 | def delete(self, repo, forks=None, pull_requests=None, artifacts=None, fs_remove=True, cur_user=None): |
|
740 | 749 | """ |
|
741 | 750 | Delete given repository, forks parameter defines what do do with |
|
742 | 751 | attached forks. Throws AttachedForksError if deleted repo has attached |
|
743 | 752 | forks |
|
744 | 753 | |
|
745 | 754 | :param repo: |
|
746 | 755 | :param forks: str 'delete' or 'detach' |
|
747 | 756 | :param pull_requests: str 'delete' or None |
|
748 | 757 | :param artifacts: str 'delete' or None |
|
749 | 758 | :param fs_remove: remove(archive) repo from filesystem |
|
750 | 759 | """ |
|
751 | 760 | if not cur_user: |
|
752 | 761 | cur_user = getattr(get_current_rhodecode_user(), 'username', None) |
|
753 | 762 | repo = self._get_repo(repo) |
|
754 | 763 | if repo: |
|
755 | 764 | if forks == 'detach': |
|
756 | 765 | for r in repo.forks: |
|
757 | 766 | r.fork = None |
|
758 | 767 | self.sa.add(r) |
|
759 | 768 | elif forks == 'delete': |
|
760 | 769 | for r in repo.forks: |
|
761 | 770 | self.delete(r, forks='delete') |
|
762 | 771 | elif [f for f in repo.forks]: |
|
763 | 772 | raise AttachedForksError() |
|
764 | 773 | |
|
765 | 774 | # check for pull requests |
|
766 | 775 | pr_sources = repo.pull_requests_source |
|
767 | 776 | pr_targets = repo.pull_requests_target |
|
768 | 777 | if pull_requests != 'delete' and (pr_sources or pr_targets): |
|
769 | 778 | raise AttachedPullRequestsError() |
|
770 | 779 | |
|
771 | 780 | artifacts_objs = repo.artifacts |
|
772 | 781 | if artifacts == 'delete': |
|
773 | 782 | for a in artifacts_objs: |
|
774 | 783 | self.sa.delete(a) |
|
775 | 784 | elif [a for a in artifacts_objs]: |
|
776 | 785 | raise AttachedArtifactsError() |
|
777 | 786 | |
|
778 | 787 | old_repo_dict = repo.get_dict() |
|
779 | 788 | events.trigger(events.RepoPreDeleteEvent(repo)) |
|
780 | 789 | try: |
|
781 | 790 | self.sa.delete(repo) |
|
782 | 791 | if fs_remove: |
|
783 | 792 | self._delete_filesystem_repo(repo) |
|
784 | 793 | else: |
|
785 | 794 | log.debug('skipping removal from filesystem') |
|
786 | 795 | old_repo_dict.update({ |
|
787 | 796 | 'deleted_by': cur_user, |
|
788 | 797 | 'deleted_on': time.time(), |
|
789 | 798 | }) |
|
790 | 799 | hooks_base.delete_repository(**old_repo_dict) |
|
791 | 800 | events.trigger(events.RepoDeleteEvent(repo)) |
|
792 | 801 | except Exception: |
|
793 | 802 | log.error(traceback.format_exc()) |
|
794 | 803 | raise |
|
795 | 804 | |
|
796 | 805 | def grant_user_permission(self, repo, user, perm): |
|
797 | 806 | """ |
|
798 | 807 | Grant permission for user on given repository, or update existing one |
|
799 | 808 | if found |
|
800 | 809 | |
|
801 | 810 | :param repo: Instance of Repository, repository_id, or repository name |
|
802 | 811 | :param user: Instance of User, user_id or username |
|
803 | 812 | :param perm: Instance of Permission, or permission_name |
|
804 | 813 | """ |
|
805 | 814 | user = self._get_user(user) |
|
806 | 815 | repo = self._get_repo(repo) |
|
807 | 816 | permission = self._get_perm(perm) |
|
808 | 817 | |
|
809 | 818 | # check if we have that permission already |
|
810 | 819 | obj = self.sa.query(UserRepoToPerm) \ |
|
811 | 820 | .filter(UserRepoToPerm.user == user) \ |
|
812 | 821 | .filter(UserRepoToPerm.repository == repo) \ |
|
813 | 822 | .scalar() |
|
814 | 823 | if obj is None: |
|
815 | 824 | # create new ! |
|
816 | 825 | obj = UserRepoToPerm() |
|
817 | 826 | obj.repository = repo |
|
818 | 827 | obj.user = user |
|
819 | 828 | obj.permission = permission |
|
820 | 829 | self.sa.add(obj) |
|
821 | 830 | log.debug('Granted perm %s to %s on %s', perm, user, repo) |
|
822 | 831 | action_logger_generic( |
|
823 | 832 | 'granted permission: {} to user: {} on repo: {}'.format( |
|
824 | 833 | perm, user, repo), namespace='security.repo') |
|
825 | 834 | return obj |
|
826 | 835 | |
|
827 | 836 | def revoke_user_permission(self, repo, user): |
|
828 | 837 | """ |
|
829 | 838 | Revoke permission for user on given repository |
|
830 | 839 | |
|
831 | 840 | :param repo: Instance of Repository, repository_id, or repository name |
|
832 | 841 | :param user: Instance of User, user_id or username |
|
833 | 842 | """ |
|
834 | 843 | |
|
835 | 844 | user = self._get_user(user) |
|
836 | 845 | repo = self._get_repo(repo) |
|
837 | 846 | |
|
838 | 847 | obj = self.sa.query(UserRepoToPerm) \ |
|
839 | 848 | .filter(UserRepoToPerm.repository == repo) \ |
|
840 | 849 | .filter(UserRepoToPerm.user == user) \ |
|
841 | 850 | .scalar() |
|
842 | 851 | if obj: |
|
843 | 852 | self.sa.delete(obj) |
|
844 | 853 | log.debug('Revoked perm on %s on %s', repo, user) |
|
845 | 854 | action_logger_generic( |
|
846 | 855 | 'revoked permission from user: {} on repo: {}'.format( |
|
847 | 856 | user, repo), namespace='security.repo') |
|
848 | 857 | |
|
849 | 858 | def grant_user_group_permission(self, repo, group_name, perm): |
|
850 | 859 | """ |
|
851 | 860 | Grant permission for user group on given repository, or update |
|
852 | 861 | existing one if found |
|
853 | 862 | |
|
854 | 863 | :param repo: Instance of Repository, repository_id, or repository name |
|
855 | 864 | :param group_name: Instance of UserGroup, users_group_id, |
|
856 | 865 | or user group name |
|
857 | 866 | :param perm: Instance of Permission, or permission_name |
|
858 | 867 | """ |
|
859 | 868 | repo = self._get_repo(repo) |
|
860 | 869 | group_name = self._get_user_group(group_name) |
|
861 | 870 | permission = self._get_perm(perm) |
|
862 | 871 | |
|
863 | 872 | # check if we have that permission already |
|
864 | 873 | obj = self.sa.query(UserGroupRepoToPerm) \ |
|
865 | 874 | .filter(UserGroupRepoToPerm.users_group == group_name) \ |
|
866 | 875 | .filter(UserGroupRepoToPerm.repository == repo) \ |
|
867 | 876 | .scalar() |
|
868 | 877 | |
|
869 | 878 | if obj is None: |
|
870 | 879 | # create new |
|
871 | 880 | obj = UserGroupRepoToPerm() |
|
872 | 881 | |
|
873 | 882 | obj.repository = repo |
|
874 | 883 | obj.users_group = group_name |
|
875 | 884 | obj.permission = permission |
|
876 | 885 | self.sa.add(obj) |
|
877 | 886 | log.debug('Granted perm %s to %s on %s', perm, group_name, repo) |
|
878 | 887 | action_logger_generic( |
|
879 | 888 | 'granted permission: {} to usergroup: {} on repo: {}'.format( |
|
880 | 889 | perm, group_name, repo), namespace='security.repo') |
|
881 | 890 | |
|
882 | 891 | return obj |
|
883 | 892 | |
|
884 | 893 | def revoke_user_group_permission(self, repo, group_name): |
|
885 | 894 | """ |
|
886 | 895 | Revoke permission for user group on given repository |
|
887 | 896 | |
|
888 | 897 | :param repo: Instance of Repository, repository_id, or repository name |
|
889 | 898 | :param group_name: Instance of UserGroup, users_group_id, |
|
890 | 899 | or user group name |
|
891 | 900 | """ |
|
892 | 901 | repo = self._get_repo(repo) |
|
893 | 902 | group_name = self._get_user_group(group_name) |
|
894 | 903 | |
|
895 | 904 | obj = self.sa.query(UserGroupRepoToPerm) \ |
|
896 | 905 | .filter(UserGroupRepoToPerm.repository == repo) \ |
|
897 | 906 | .filter(UserGroupRepoToPerm.users_group == group_name) \ |
|
898 | 907 | .scalar() |
|
899 | 908 | if obj: |
|
900 | 909 | self.sa.delete(obj) |
|
901 | 910 | log.debug('Revoked perm to %s on %s', repo, group_name) |
|
902 | 911 | action_logger_generic( |
|
903 | 912 | 'revoked permission from usergroup: {} on repo: {}'.format( |
|
904 | 913 | group_name, repo), namespace='security.repo') |
|
905 | 914 | |
|
906 | 915 | def delete_stats(self, repo_name): |
|
907 | 916 | """ |
|
908 | 917 | removes stats for given repo |
|
909 | 918 | |
|
910 | 919 | :param repo_name: |
|
911 | 920 | """ |
|
912 | 921 | repo = self._get_repo(repo_name) |
|
913 | 922 | try: |
|
914 | 923 | obj = self.sa.query(Statistics) \ |
|
915 | 924 | .filter(Statistics.repository == repo).scalar() |
|
916 | 925 | if obj: |
|
917 | 926 | self.sa.delete(obj) |
|
918 | 927 | except Exception: |
|
919 | 928 | log.error(traceback.format_exc()) |
|
920 | 929 | raise |
|
921 | 930 | |
|
922 | 931 | def add_repo_field(self, repo_name, field_key, field_label, field_value='', |
|
923 | 932 | field_type='str', field_desc=''): |
|
924 | 933 | |
|
925 | 934 | repo = self._get_repo(repo_name) |
|
926 | 935 | |
|
927 | 936 | new_field = RepositoryField() |
|
928 | 937 | new_field.repository = repo |
|
929 | 938 | new_field.field_key = field_key |
|
930 | 939 | new_field.field_type = field_type # python type |
|
931 | 940 | new_field.field_value = field_value |
|
932 | 941 | new_field.field_desc = field_desc |
|
933 | 942 | new_field.field_label = field_label |
|
934 | 943 | self.sa.add(new_field) |
|
935 | 944 | return new_field |
|
936 | 945 | |
|
937 | 946 | def delete_repo_field(self, repo_name, field_key): |
|
938 | 947 | repo = self._get_repo(repo_name) |
|
939 | 948 | field = RepositoryField.get_by_key_name(field_key, repo) |
|
940 | 949 | if field: |
|
941 | 950 | self.sa.delete(field) |
|
942 | 951 | |
|
943 | 952 | def set_landing_rev(self, repo, landing_rev_name): |
|
944 | 953 | if landing_rev_name.startswith('branch:'): |
|
945 | 954 | landing_rev_name = landing_rev_name.split('branch:')[-1] |
|
946 | 955 | scm_instance = repo.scm_instance() |
|
947 | 956 | if scm_instance: |
|
948 | 957 | return scm_instance._remote.set_head_ref(landing_rev_name) |
|
949 | 958 | |
|
950 | 959 | def _create_filesystem_repo(self, repo_name, repo_type, repo_group, |
|
951 | 960 | clone_uri=None, repo_store_location=None, |
|
952 | 961 | use_global_config=False, install_hooks=True): |
|
953 | 962 | """ |
|
954 | 963 | makes repository on filesystem. It's group aware means it'll create |
|
955 | 964 | a repository within a group, and alter the paths accordingly of |
|
956 | 965 | group location |
|
957 | 966 | |
|
958 | 967 | :param repo_name: |
|
959 | 968 | :param alias: |
|
960 | 969 | :param parent: |
|
961 | 970 | :param clone_uri: |
|
962 | 971 | :param repo_store_location: |
|
963 | 972 | """ |
|
964 | 973 | from rhodecode.lib.utils import is_valid_repo, is_valid_repo_group |
|
965 | 974 | from rhodecode.model.scm import ScmModel |
|
966 | 975 | |
|
967 | 976 | if Repository.NAME_SEP in repo_name: |
|
968 | 977 | raise ValueError( |
|
969 | 978 | 'repo_name must not contain groups got `%s`' % repo_name) |
|
970 | 979 | |
|
971 | 980 | if isinstance(repo_group, RepoGroup): |
|
972 | 981 | new_parent_path = os.sep.join(repo_group.full_path_splitted) |
|
973 | 982 | else: |
|
974 | 983 | new_parent_path = repo_group or '' |
|
975 | 984 | |
|
976 | 985 | if repo_store_location: |
|
977 | 986 | _paths = [repo_store_location] |
|
978 | 987 | else: |
|
979 | 988 | _paths = [self.repos_path, new_parent_path, repo_name] |
|
980 | 989 | # we need to make it str for mercurial |
|
981 | 990 | repo_path = os.path.join(*map(lambda x: safe_str(x), _paths)) |
|
982 | 991 | |
|
983 | 992 | # check if this path is not a repository |
|
984 | 993 | if is_valid_repo(repo_path, self.repos_path): |
|
985 | 994 | raise Exception(f'This path {repo_path} is a valid repository') |
|
986 | 995 | |
|
987 | 996 | # check if this path is a group |
|
988 | 997 | if is_valid_repo_group(repo_path, self.repos_path): |
|
989 | 998 | raise Exception(f'This path {repo_path} is a valid group') |
|
990 | 999 | |
|
991 | 1000 | log.info('creating repo %s in %s from url: `%s`', |
|
992 | 1001 | repo_name, safe_str(repo_path), |
|
993 | 1002 | obfuscate_url_pw(clone_uri)) |
|
994 | 1003 | |
|
995 | 1004 | backend = get_backend(repo_type) |
|
996 | 1005 | |
|
997 | 1006 | config_repo = None if use_global_config else repo_name |
|
998 | 1007 | if config_repo and new_parent_path: |
|
999 | 1008 | config_repo = Repository.NAME_SEP.join( |
|
1000 | 1009 | (new_parent_path, config_repo)) |
|
1001 | 1010 | config = make_db_config(clear_session=False, repo=config_repo) |
|
1002 | 1011 | config.set('extensions', 'largefiles', '') |
|
1003 | 1012 | |
|
1004 | 1013 | # patch and reset hooks section of UI config to not run any |
|
1005 | 1014 | # hooks on creating remote repo |
|
1006 | 1015 | config.clear_section('hooks') |
|
1007 | 1016 | |
|
1008 | 1017 | # TODO: johbo: Unify this, hardcoded "bare=True" does not look nice |
|
1009 | 1018 | if repo_type == 'git': |
|
1010 | 1019 | repo = backend( |
|
1011 | 1020 | repo_path, config=config, create=True, src_url=clone_uri, bare=True, |
|
1012 | 1021 | with_wire={"cache": False}) |
|
1013 | 1022 | else: |
|
1014 | 1023 | repo = backend( |
|
1015 | 1024 | repo_path, config=config, create=True, src_url=clone_uri, |
|
1016 | 1025 | with_wire={"cache": False}) |
|
1017 | 1026 | |
|
1018 | 1027 | if install_hooks: |
|
1019 | 1028 | repo.install_hooks() |
|
1020 | 1029 | |
|
1021 | 1030 | log.debug('Created repo %s with %s backend', |
|
1022 | 1031 | safe_str(repo_name), safe_str(repo_type)) |
|
1023 | 1032 | return repo |
|
1024 | 1033 | |
|
1025 | 1034 | def _rename_filesystem_repo(self, old, new): |
|
1026 | 1035 | """ |
|
1027 | 1036 | renames repository on filesystem |
|
1028 | 1037 | |
|
1029 | 1038 | :param old: old name |
|
1030 | 1039 | :param new: new name |
|
1031 | 1040 | """ |
|
1032 | 1041 | log.info('renaming repo from %s to %s', old, new) |
|
1033 | 1042 | |
|
1034 | 1043 | old_path = os.path.join(self.repos_path, old) |
|
1035 | 1044 | new_path = os.path.join(self.repos_path, new) |
|
1036 | 1045 | if os.path.isdir(new_path): |
|
1037 | 1046 | raise Exception( |
|
1038 | 1047 | 'Was trying to rename to already existing dir %s' % new_path |
|
1039 | 1048 | ) |
|
1040 | 1049 | shutil.move(old_path, new_path) |
|
1041 | 1050 | |
|
1042 | 1051 | def _delete_filesystem_repo(self, repo): |
|
1043 | 1052 | """ |
|
1044 | 1053 | removes repo from filesystem, the removal is actually made by |
|
1045 | 1054 | added rm__ prefix into dir, and rename internal .hg/.git dirs so this |
|
1046 | 1055 | repository is no longer valid for rhodecode, can be undeleted later on |
|
1047 | 1056 | by reverting the renames on this repository |
|
1048 | 1057 | |
|
1049 | 1058 | :param repo: repo object |
|
1050 | 1059 | """ |
|
1051 | 1060 | rm_path = os.path.join(self.repos_path, repo.repo_name) |
|
1052 | 1061 | repo_group = repo.group |
|
1053 | 1062 | log.info("delete_filesystem_repo: removing repository %s", rm_path) |
|
1054 | 1063 | # disable hg/git internal that it doesn't get detected as repo |
|
1055 | 1064 | alias = repo.repo_type |
|
1056 | 1065 | |
|
1057 | 1066 | config = make_db_config(clear_session=False) |
|
1058 | 1067 | config.set('extensions', 'largefiles', '') |
|
1059 | 1068 | bare = getattr(repo.scm_instance(config=config), 'bare', False) |
|
1060 | 1069 | |
|
1061 | 1070 | # skip this for bare git repos |
|
1062 | 1071 | if not bare: |
|
1063 | 1072 | # disable VCS repo |
|
1064 | 1073 | vcs_path = os.path.join(rm_path, '.%s' % alias) |
|
1065 | 1074 | if os.path.exists(vcs_path): |
|
1066 | 1075 | shutil.move(vcs_path, os.path.join(rm_path, 'rm__.%s' % alias)) |
|
1067 | 1076 | |
|
1068 | 1077 | _now = datetime.datetime.now() |
|
1069 | 1078 | _ms = str(_now.microsecond).rjust(6, '0') |
|
1070 | 1079 | _d = 'rm__{}__{}'.format(_now.strftime('%Y%m%d_%H%M%S_' + _ms), |
|
1071 | 1080 | repo.just_name) |
|
1072 | 1081 | if repo_group: |
|
1073 | 1082 | # if repository is in group, prefix the removal path with the group |
|
1074 | 1083 | args = repo_group.full_path_splitted + [_d] |
|
1075 | 1084 | _d = os.path.join(*args) |
|
1076 | 1085 | |
|
1077 | 1086 | if os.path.isdir(rm_path): |
|
1078 | 1087 | shutil.move(rm_path, os.path.join(self.repos_path, _d)) |
|
1079 | 1088 | |
|
1080 | 1089 | # finally cleanup diff-cache if it exists |
|
1081 | 1090 | cached_diffs_dir = repo.cached_diffs_dir |
|
1082 | 1091 | if os.path.isdir(cached_diffs_dir): |
|
1083 | 1092 | shutil.rmtree(cached_diffs_dir) |
|
1084 | 1093 | |
|
1085 | 1094 | |
|
1086 | 1095 | class ReadmeFinder: |
|
1087 | 1096 | """ |
|
1088 | 1097 | Utility which knows how to find a readme for a specific commit. |
|
1089 | 1098 | |
|
1090 | 1099 | The main idea is that this is a configurable algorithm. When creating an |
|
1091 | 1100 | instance you can define parameters, currently only the `default_renderer`. |
|
1092 | 1101 | Based on this configuration the method :meth:`search` behaves slightly |
|
1093 | 1102 | different. |
|
1094 | 1103 | """ |
|
1095 | 1104 | |
|
1096 | 1105 | readme_re = re.compile(r'^readme(\.[^\.]+)?$', re.IGNORECASE) |
|
1097 | 1106 | path_re = re.compile(r'^docs?', re.IGNORECASE) |
|
1098 | 1107 | |
|
1099 | 1108 | default_priorities = { |
|
1100 | 1109 | None: 0, |
|
1101 | 1110 | '.rst': 1, |
|
1102 | 1111 | '.md': 1, |
|
1103 | 1112 | '.rest': 2, |
|
1104 | 1113 | '.mkdn': 2, |
|
1105 | 1114 | '.text': 2, |
|
1106 | 1115 | '.txt': 3, |
|
1107 | 1116 | '.mdown': 3, |
|
1108 | 1117 | '.markdown': 4, |
|
1109 | 1118 | } |
|
1110 | 1119 | |
|
1111 | 1120 | path_priority = { |
|
1112 | 1121 | 'doc': 0, |
|
1113 | 1122 | 'docs': 1, |
|
1114 | 1123 | } |
|
1115 | 1124 | |
|
1116 | 1125 | FALLBACK_PRIORITY = 99 |
|
1117 | 1126 | |
|
1118 | 1127 | RENDERER_TO_EXTENSION = { |
|
1119 | 1128 | 'rst': ['.rst', '.rest'], |
|
1120 | 1129 | 'markdown': ['.md', 'mkdn', '.mdown', '.markdown'], |
|
1121 | 1130 | } |
|
1122 | 1131 | |
|
1123 | 1132 | def __init__(self, default_renderer=None): |
|
1124 | 1133 | self._default_renderer = default_renderer |
|
1125 | 1134 | self._renderer_extensions = self.RENDERER_TO_EXTENSION.get( |
|
1126 | 1135 | default_renderer, []) |
|
1127 | 1136 | |
|
1128 | 1137 | def search(self, commit, path='/'): |
|
1129 | 1138 | """ |
|
1130 | 1139 | Find a readme in the given `commit`. |
|
1131 | 1140 | """ |
|
1132 | 1141 | # firstly, check the PATH type if it is actually a DIR |
|
1133 | 1142 | if commit.get_node(path).kind != NodeKind.DIR: |
|
1134 | 1143 | return None |
|
1135 | 1144 | |
|
1136 | 1145 | nodes = commit.get_nodes(path) |
|
1137 | 1146 | matches = self._match_readmes(nodes) |
|
1138 | 1147 | matches = self._sort_according_to_priority(matches) |
|
1139 | 1148 | if matches: |
|
1140 | 1149 | return matches[0].node |
|
1141 | 1150 | |
|
1142 | 1151 | paths = self._match_paths(nodes) |
|
1143 | 1152 | paths = self._sort_paths_according_to_priority(paths) |
|
1144 | 1153 | for path in paths: |
|
1145 | 1154 | match = self.search(commit, path=path) |
|
1146 | 1155 | if match: |
|
1147 | 1156 | return match |
|
1148 | 1157 | |
|
1149 | 1158 | return None |
|
1150 | 1159 | |
|
1151 | 1160 | def _match_readmes(self, nodes): |
|
1152 | 1161 | for node in nodes: |
|
1153 | 1162 | if not node.is_file(): |
|
1154 | 1163 | continue |
|
1155 | 1164 | path = node.path.rsplit('/', 1)[-1] |
|
1156 | 1165 | match = self.readme_re.match(path) |
|
1157 | 1166 | if match: |
|
1158 | 1167 | extension = match.group(1) |
|
1159 | 1168 | yield ReadmeMatch(node, match, self._priority(extension)) |
|
1160 | 1169 | |
|
1161 | 1170 | def _match_paths(self, nodes): |
|
1162 | 1171 | for node in nodes: |
|
1163 | 1172 | if not node.is_dir(): |
|
1164 | 1173 | continue |
|
1165 | 1174 | match = self.path_re.match(node.path) |
|
1166 | 1175 | if match: |
|
1167 | 1176 | yield node.path |
|
1168 | 1177 | |
|
1169 | 1178 | def _priority(self, extension): |
|
1170 | 1179 | renderer_priority = ( |
|
1171 | 1180 | 0 if extension in self._renderer_extensions else 1) |
|
1172 | 1181 | extension_priority = self.default_priorities.get( |
|
1173 | 1182 | extension, self.FALLBACK_PRIORITY) |
|
1174 | 1183 | return (renderer_priority, extension_priority) |
|
1175 | 1184 | |
|
1176 | 1185 | def _sort_according_to_priority(self, matches): |
|
1177 | 1186 | |
|
1178 | 1187 | def priority_and_path(match): |
|
1179 | 1188 | return (match.priority, match.path) |
|
1180 | 1189 | |
|
1181 | 1190 | return sorted(matches, key=priority_and_path) |
|
1182 | 1191 | |
|
1183 | 1192 | def _sort_paths_according_to_priority(self, paths): |
|
1184 | 1193 | |
|
1185 | 1194 | def priority_and_path(path): |
|
1186 | 1195 | return (self.path_priority.get(path, self.FALLBACK_PRIORITY), path) |
|
1187 | 1196 | |
|
1188 | 1197 | return sorted(paths, key=priority_and_path) |
|
1189 | 1198 | |
|
1190 | 1199 | |
|
1191 | 1200 | class ReadmeMatch: |
|
1192 | 1201 | |
|
1193 | 1202 | def __init__(self, node, match, priority): |
|
1194 | 1203 | self.node = node |
|
1195 | 1204 | self._match = match |
|
1196 | 1205 | self.priority = priority |
|
1197 | 1206 | |
|
1198 | 1207 | @property |
|
1199 | 1208 | def path(self): |
|
1200 | 1209 | return self.node.path |
|
1201 | 1210 | |
|
1202 | 1211 | def __repr__(self): |
|
1203 | 1212 | return f'<ReadmeMatch {self.path} priority={self.priority}' |
@@ -1,886 +1,889 b'' | |||
|
1 |
# Copyright (C) 2011-202 |
|
|
1 | # Copyright (C) 2011-2024 RhodeCode GmbH | |
|
2 | 2 | # |
|
3 | 3 | # This program is free software: you can redistribute it and/or modify |
|
4 | 4 | # it under the terms of the GNU Affero General Public License, version 3 |
|
5 | 5 | # (only), as published by the Free Software Foundation. |
|
6 | 6 | # |
|
7 | 7 | # This program is distributed in the hope that it will be useful, |
|
8 | 8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
9 | 9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
10 | 10 | # GNU General Public License for more details. |
|
11 | 11 | # |
|
12 | 12 | # You should have received a copy of the GNU Affero General Public License |
|
13 | 13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
14 | 14 | # |
|
15 | 15 | # This program is dual-licensed. If you wish to learn more about the |
|
16 | 16 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
17 | 17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
18 | 18 | |
|
19 | 19 | |
|
20 | 20 | """ |
|
21 | 21 | repo group model for RhodeCode |
|
22 | 22 | """ |
|
23 | 23 | |
|
24 | 24 | import os |
|
25 | 25 | import datetime |
|
26 | 26 | import itertools |
|
27 | 27 | import logging |
|
28 | 28 | import shutil |
|
29 | 29 | import time |
|
30 | 30 | import traceback |
|
31 | 31 | import string |
|
32 | 32 | |
|
33 | from zope.cachedescriptors.property import Lazy as LazyProperty | |
|
34 | 33 | |
|
35 | 34 | from rhodecode import events |
|
36 | 35 | from rhodecode.model import BaseModel |
|
37 | 36 | from rhodecode.model.db import (_hash_key, func, or_, in_filter_generator, |
|
38 | 37 | Session, RepoGroup, UserRepoGroupToPerm, User, Permission, UserGroupRepoGroupToPerm, |
|
39 | 38 | UserGroup, Repository) |
|
40 | 39 | from rhodecode.model.permission import PermissionModel |
|
41 |
from rhodecode.model.settings import |
|
|
40 | from rhodecode.model.settings import SettingsModel | |
|
42 | 41 | from rhodecode.lib.caching_query import FromCache |
|
43 | 42 | from rhodecode.lib.utils2 import action_logger_generic |
|
44 | 43 | |
|
45 | 44 | log = logging.getLogger(__name__) |
|
46 | 45 | |
|
47 | 46 | |
|
48 | 47 | class RepoGroupModel(BaseModel): |
|
49 | 48 | |
|
50 | 49 | cls = RepoGroup |
|
51 | 50 | PERSONAL_GROUP_DESC = 'personal repo group of user `%(username)s`' |
|
52 | 51 | PERSONAL_GROUP_PATTERN = '${username}' # default |
|
53 | 52 | |
|
54 | 53 | def _get_user_group(self, users_group): |
|
55 | 54 | return self._get_instance(UserGroup, users_group, |
|
56 | 55 | callback=UserGroup.get_by_group_name) |
|
57 | 56 | |
|
58 | 57 | def _get_repo_group(self, repo_group): |
|
59 | 58 | return self._get_instance(RepoGroup, repo_group, |
|
60 | 59 | callback=RepoGroup.get_by_group_name) |
|
61 | 60 | |
|
62 | 61 | def get_repo_group(self, repo_group): |
|
63 | 62 | return self._get_repo_group(repo_group) |
|
64 | 63 | |
|
65 | 64 | def get_by_group_name(self, repo_group_name, cache=None): |
|
66 | 65 | repo = self.sa.query(RepoGroup) \ |
|
67 | 66 | .filter(RepoGroup.group_name == repo_group_name) |
|
68 | 67 | |
|
69 | 68 | if cache: |
|
70 | 69 | name_key = _hash_key(repo_group_name) |
|
71 | 70 | repo = repo.options( |
|
72 | 71 | FromCache("sql_cache_short", f"get_repo_group_{name_key}")) |
|
73 | 72 | return repo.scalar() |
|
74 | 73 | |
|
75 | 74 | def get_default_create_personal_repo_group(self): |
|
76 | 75 | value = SettingsModel().get_setting_by_name( |
|
77 | 76 | 'create_personal_repo_group') |
|
78 | 77 | return value.app_settings_value if value else None or False |
|
79 | 78 | |
|
80 | 79 | def get_personal_group_name_pattern(self): |
|
81 | 80 | value = SettingsModel().get_setting_by_name( |
|
82 | 81 | 'personal_repo_group_pattern') |
|
83 | 82 | val = value.app_settings_value if value else None |
|
84 | 83 | group_template = val or self.PERSONAL_GROUP_PATTERN |
|
85 | 84 | |
|
86 | 85 | group_template = group_template.lstrip('/') |
|
87 | 86 | return group_template |
|
88 | 87 | |
|
89 | 88 | def get_personal_group_name(self, user): |
|
90 | 89 | template = self.get_personal_group_name_pattern() |
|
91 | 90 | return string.Template(template).safe_substitute( |
|
92 | 91 | username=user.username, |
|
93 | 92 | user_id=user.user_id, |
|
94 | 93 | first_name=user.first_name, |
|
95 | 94 | last_name=user.last_name, |
|
96 | 95 | ) |
|
97 | 96 | |
|
98 | 97 | def create_personal_repo_group(self, user, commit_early=True): |
|
99 | 98 | desc = self.PERSONAL_GROUP_DESC % {'username': user.username} |
|
100 | 99 | personal_repo_group_name = self.get_personal_group_name(user) |
|
101 | 100 | |
|
102 | 101 | # create a new one |
|
103 | 102 | RepoGroupModel().create( |
|
104 | 103 | group_name=personal_repo_group_name, |
|
105 | 104 | group_description=desc, |
|
106 | 105 | owner=user.username, |
|
107 | 106 | personal=True, |
|
108 | 107 | commit_early=commit_early) |
|
109 | 108 | |
|
110 | 109 | def _create_default_perms(self, new_group): |
|
111 | 110 | # create default permission |
|
112 | 111 | default_perm = 'group.read' |
|
113 | 112 | def_user = User.get_default_user() |
|
114 | 113 | for p in def_user.user_perms: |
|
115 | 114 | if p.permission.permission_name.startswith('group.'): |
|
116 | 115 | default_perm = p.permission.permission_name |
|
117 | 116 | break |
|
118 | 117 | |
|
119 | 118 | repo_group_to_perm = UserRepoGroupToPerm() |
|
120 | 119 | repo_group_to_perm.permission = Permission.get_by_key(default_perm) |
|
121 | 120 | |
|
122 | 121 | repo_group_to_perm.group = new_group |
|
123 | 122 | repo_group_to_perm.user = def_user |
|
124 | 123 | return repo_group_to_perm |
|
125 | 124 | |
|
126 | 125 | def _get_group_name_and_parent(self, group_name_full, repo_in_path=False, |
|
127 | 126 | get_object=False): |
|
128 | 127 | """ |
|
129 | 128 | Get's the group name and a parent group name from given group name. |
|
130 | 129 | If repo_in_path is set to truth, we asume the full path also includes |
|
131 | 130 | repo name, in such case we clean the last element. |
|
132 | 131 | |
|
133 | 132 | :param group_name_full: |
|
134 | 133 | """ |
|
135 | 134 | split_paths = 1 |
|
136 | 135 | if repo_in_path: |
|
137 | 136 | split_paths = 2 |
|
138 | 137 | _parts = group_name_full.rsplit(RepoGroup.url_sep(), split_paths) |
|
139 | 138 | |
|
140 | 139 | if repo_in_path and len(_parts) > 1: |
|
141 | 140 | # such case last element is the repo_name |
|
142 | 141 | _parts.pop(-1) |
|
143 | 142 | group_name_cleaned = _parts[-1] # just the group name |
|
144 | 143 | parent_repo_group_name = None |
|
145 | 144 | |
|
146 | 145 | if len(_parts) > 1: |
|
147 | 146 | parent_repo_group_name = _parts[0] |
|
148 | 147 | |
|
149 | 148 | parent_group = None |
|
150 | 149 | if parent_repo_group_name: |
|
151 | 150 | parent_group = RepoGroup.get_by_group_name(parent_repo_group_name) |
|
152 | 151 | |
|
153 | 152 | if get_object: |
|
154 | 153 | return group_name_cleaned, parent_repo_group_name, parent_group |
|
155 | 154 | |
|
156 | 155 | return group_name_cleaned, parent_repo_group_name |
|
157 | 156 | |
|
158 | 157 | def check_exist_filesystem(self, group_name, exc_on_failure=True): |
|
159 | 158 | create_path = os.path.join(self.repos_path, group_name) |
|
160 | 159 | log.debug('creating new group in %s', create_path) |
|
161 | 160 | |
|
162 | 161 | if os.path.isdir(create_path): |
|
163 | 162 | if exc_on_failure: |
|
164 | 163 | abs_create_path = os.path.abspath(create_path) |
|
165 | 164 | raise Exception(f'Directory `{abs_create_path}` already exists !') |
|
166 | 165 | return False |
|
167 | 166 | return True |
|
168 | 167 | |
|
169 | 168 | def _create_group(self, group_name): |
|
170 | 169 | """ |
|
171 | 170 | makes repository group on filesystem |
|
172 | 171 | |
|
173 | 172 | :param repo_name: |
|
174 | 173 | :param parent_id: |
|
175 | 174 | """ |
|
176 | 175 | |
|
177 | 176 | self.check_exist_filesystem(group_name) |
|
178 | 177 | create_path = os.path.join(self.repos_path, group_name) |
|
179 | 178 | log.debug('creating new group in %s', create_path) |
|
180 | 179 | os.makedirs(create_path, mode=0o755) |
|
181 | 180 | log.debug('created group in %s', create_path) |
|
182 | 181 | |
|
183 | 182 | def _rename_group(self, old, new): |
|
184 | 183 | """ |
|
185 | 184 | Renames a group on filesystem |
|
186 | 185 | |
|
187 | 186 | :param group_name: |
|
188 | 187 | """ |
|
189 | 188 | |
|
190 | 189 | if old == new: |
|
191 | 190 | log.debug('skipping group rename') |
|
192 | 191 | return |
|
193 | 192 | |
|
194 | 193 | log.debug('renaming repository group from %s to %s', old, new) |
|
195 | 194 | |
|
196 | 195 | old_path = os.path.join(self.repos_path, old) |
|
197 | 196 | new_path = os.path.join(self.repos_path, new) |
|
198 | 197 | |
|
199 | 198 | log.debug('renaming repos paths from %s to %s', old_path, new_path) |
|
200 | 199 | |
|
201 | 200 | if os.path.isdir(new_path): |
|
202 | 201 | raise Exception('Was trying to rename to already ' |
|
203 | 202 | 'existing dir %s' % new_path) |
|
204 | 203 | shutil.move(old_path, new_path) |
|
205 | 204 | |
|
206 | 205 | def _delete_filesystem_group(self, group, force_delete=False): |
|
207 | 206 | """ |
|
208 | 207 | Deletes a group from a filesystem |
|
209 | 208 | |
|
210 | 209 | :param group: instance of group from database |
|
211 | 210 | :param force_delete: use shutil rmtree to remove all objects |
|
212 | 211 | """ |
|
213 | 212 | paths = group.full_path.split(RepoGroup.url_sep()) |
|
214 | 213 | paths = os.sep.join(paths) |
|
215 | 214 | |
|
216 | 215 | rm_path = os.path.join(self.repos_path, paths) |
|
217 | 216 | log.info("Removing group %s", rm_path) |
|
218 | 217 | # delete only if that path really exists |
|
219 | 218 | if os.path.isdir(rm_path): |
|
220 | 219 | if force_delete: |
|
221 | 220 | shutil.rmtree(rm_path) |
|
222 | 221 | else: |
|
223 | 222 | # archive that group` |
|
224 | 223 | _now = datetime.datetime.now() |
|
225 | 224 | _ms = str(_now.microsecond).rjust(6, '0') |
|
226 | 225 | _d = 'rm__{}_GROUP_{}'.format( |
|
227 | 226 | _now.strftime('%Y%m%d_%H%M%S_' + _ms), group.name) |
|
228 | 227 | shutil.move(rm_path, os.path.join(self.repos_path, _d)) |
|
229 | 228 | |
|
230 | 229 | def create(self, group_name, group_description, owner, just_db=False, |
|
231 | 230 | copy_permissions=False, personal=None, commit_early=True): |
|
232 | 231 | |
|
233 | 232 | (group_name_cleaned, |
|
234 | 233 | parent_group_name) = RepoGroupModel()._get_group_name_and_parent(group_name) |
|
235 | 234 | |
|
236 | 235 | parent_group = None |
|
237 | 236 | if parent_group_name: |
|
238 | 237 | parent_group = self._get_repo_group(parent_group_name) |
|
239 | 238 | if not parent_group: |
|
240 | 239 | # we tried to create a nested group, but the parent is not |
|
241 | 240 | # existing |
|
242 | 241 | raise ValueError( |
|
243 | 242 | 'Parent group `%s` given in `%s` group name ' |
|
244 | 243 | 'is not yet existing.' % (parent_group_name, group_name)) |
|
245 | 244 | |
|
246 | 245 | # because we are doing a cleanup, we need to check if such directory |
|
247 | 246 | # already exists. If we don't do that we can accidentally delete |
|
248 | 247 | # existing directory via cleanup that can cause data issues, since |
|
249 | 248 | # delete does a folder rename to special syntax later cleanup |
|
250 | 249 | # functions can delete this |
|
251 | 250 | cleanup_group = self.check_exist_filesystem(group_name, |
|
252 | 251 | exc_on_failure=False) |
|
253 | 252 | user = self._get_user(owner) |
|
254 | 253 | if not user: |
|
255 | 254 | raise ValueError('Owner %s not found as rhodecode user', owner) |
|
256 | 255 | |
|
257 | 256 | try: |
|
258 | 257 | new_repo_group = RepoGroup() |
|
259 | 258 | new_repo_group.user = user |
|
260 | 259 | new_repo_group.group_description = group_description or group_name |
|
261 | 260 | new_repo_group.parent_group = parent_group |
|
262 | 261 | new_repo_group.group_name = group_name |
|
263 | 262 | new_repo_group.personal = personal |
|
264 | 263 | |
|
265 | 264 | self.sa.add(new_repo_group) |
|
266 | 265 | |
|
267 | 266 | # create an ADMIN permission for owner except if we're super admin, |
|
268 | 267 | # later owner should go into the owner field of groups |
|
269 | 268 | if not user.is_admin: |
|
270 | 269 | self.grant_user_permission(repo_group=new_repo_group, |
|
271 | 270 | user=owner, perm='group.admin') |
|
272 | 271 | |
|
273 | 272 | if parent_group and copy_permissions: |
|
274 | 273 | # copy permissions from parent |
|
275 | 274 | user_perms = UserRepoGroupToPerm.query() \ |
|
276 | 275 | .filter(UserRepoGroupToPerm.group == parent_group).all() |
|
277 | 276 | |
|
278 | 277 | group_perms = UserGroupRepoGroupToPerm.query() \ |
|
279 | 278 | .filter(UserGroupRepoGroupToPerm.group == parent_group).all() |
|
280 | 279 | |
|
281 | 280 | for perm in user_perms: |
|
282 | 281 | # don't copy over the permission for user who is creating |
|
283 | 282 | # this group, if he is not super admin he get's admin |
|
284 | 283 | # permission set above |
|
285 | 284 | if perm.user != user or user.is_admin: |
|
286 | 285 | UserRepoGroupToPerm.create( |
|
287 | 286 | perm.user, new_repo_group, perm.permission) |
|
288 | 287 | |
|
289 | 288 | for perm in group_perms: |
|
290 | 289 | UserGroupRepoGroupToPerm.create( |
|
291 | 290 | perm.users_group, new_repo_group, perm.permission) |
|
292 | 291 | else: |
|
293 | 292 | perm_obj = self._create_default_perms(new_repo_group) |
|
294 | 293 | self.sa.add(perm_obj) |
|
295 | 294 | |
|
296 | 295 | # now commit the changes, earlier so we are sure everything is in |
|
297 | 296 | # the database. |
|
298 | 297 | if commit_early: |
|
299 | 298 | self.sa.commit() |
|
300 | 299 | if not just_db: |
|
301 | 300 | self._create_group(new_repo_group.group_name) |
|
302 | 301 | |
|
303 | 302 | # trigger the post hook |
|
304 | 303 | from rhodecode.lib import hooks_base |
|
305 | 304 | repo_group = RepoGroup.get_by_group_name(group_name) |
|
306 | 305 | |
|
307 | 306 | # update repo group commit caches initially |
|
308 | 307 | repo_group.update_commit_cache() |
|
309 | 308 | |
|
310 | 309 | hooks_base.create_repository_group( |
|
311 | 310 | created_by=user.username, **repo_group.get_dict()) |
|
312 | 311 | |
|
313 | 312 | # Trigger create event. |
|
314 | 313 | events.trigger(events.RepoGroupCreateEvent(repo_group)) |
|
315 | 314 | |
|
316 | 315 | return new_repo_group |
|
317 | 316 | except Exception: |
|
318 | 317 | self.sa.rollback() |
|
319 | 318 | log.exception('Exception occurred when creating repository group, ' |
|
320 | 319 | 'doing cleanup...') |
|
321 | 320 | # rollback things manually ! |
|
322 | 321 | repo_group = RepoGroup.get_by_group_name(group_name) |
|
323 | 322 | if repo_group: |
|
324 | 323 | RepoGroup.delete(repo_group.group_id) |
|
325 | 324 | self.sa.commit() |
|
326 | 325 | if cleanup_group: |
|
327 | 326 | RepoGroupModel()._delete_filesystem_group(repo_group) |
|
328 | 327 | raise |
|
329 | 328 | |
|
330 | 329 | def update_permissions( |
|
331 | 330 | self, repo_group, perm_additions=None, perm_updates=None, |
|
332 | 331 | perm_deletions=None, recursive=None, check_perms=True, |
|
333 | 332 | cur_user=None): |
|
334 | 333 | from rhodecode.model.repo import RepoModel |
|
335 | 334 | from rhodecode.lib.auth import HasUserGroupPermissionAny |
|
336 | 335 | |
|
337 | 336 | if not perm_additions: |
|
338 | 337 | perm_additions = [] |
|
339 | 338 | if not perm_updates: |
|
340 | 339 | perm_updates = [] |
|
341 | 340 | if not perm_deletions: |
|
342 | 341 | perm_deletions = [] |
|
343 | 342 | |
|
344 | 343 | req_perms = ('usergroup.read', 'usergroup.write', 'usergroup.admin') |
|
345 | 344 | |
|
346 | 345 | changes = { |
|
347 | 346 | 'added': [], |
|
348 | 347 | 'updated': [], |
|
349 | 348 | 'deleted': [], |
|
350 | 349 | 'default_user_changed': None |
|
351 | 350 | } |
|
352 | 351 | |
|
353 | def _set_perm_user(obj, user, perm): | |
|
354 | if isinstance(obj, RepoGroup): | |
|
355 | self.grant_user_permission( | |
|
356 |
|
|
|
357 | elif isinstance(obj, Repository): | |
|
352 | def _set_perm_user(_obj: RepoGroup | Repository, _user_obj: User, _perm): | |
|
353 | ||
|
354 | if isinstance(_obj, RepoGroup): | |
|
355 | self.grant_user_permission(repo_group=_obj, user=_user_obj, perm=_perm) | |
|
356 | elif isinstance(_obj, Repository): | |
|
358 | 357 | # private repos will not allow to change the default |
|
359 | 358 | # permissions using recursive mode |
|
360 | if obj.private and user == User.DEFAULT_USER: | |
|
359 | if _obj.private and _user_obj.username == User.DEFAULT_USER: | |
|
360 | log.debug('Skipping private repo %s for user %s', _obj, _user_obj) | |
|
361 | 361 | return |
|
362 | 362 | |
|
363 |
# we set group permission |
|
|
364 | # permission | |
|
365 | perm = perm.replace('group.', 'repository.') | |
|
366 | RepoModel().grant_user_permission( | |
|
367 | repo=obj, user=user, perm=perm) | |
|
363 | # we set group permission, we have to switch to repo permission definition | |
|
364 | new_perm = _perm.replace('group.', 'repository.') | |
|
365 | RepoModel().grant_user_permission(repo=_obj, user=_user_obj, perm=new_perm) | |
|
366 | ||
|
367 | def _set_perm_group(_obj: RepoGroup | Repository, users_group: UserGroup, _perm): | |
|
368 | if isinstance(_obj, RepoGroup): | |
|
369 | self.grant_user_group_permission(repo_group=_obj, group_name=users_group, perm=_perm) | |
|
370 | elif isinstance(_obj, Repository): | |
|
371 | # we set group permission, we have to switch to repo permission definition | |
|
372 | new_perm = _perm.replace('group.', 'repository.') | |
|
373 | RepoModel().grant_user_group_permission(repo=_obj, group_name=users_group, perm=new_perm) | |
|
368 | 374 | |
|
369 | def _set_perm_group(obj, users_group, perm): | |
|
370 | if isinstance(obj, RepoGroup): | |
|
371 |
self. |
|
|
372 | repo_group=obj, group_name=users_group, perm=perm) | |
|
373 | elif isinstance(obj, Repository): | |
|
374 | # we set group permission but we have to switch to repo | |
|
375 | # permission | |
|
376 | perm = perm.replace('group.', 'repository.') | |
|
377 | RepoModel().grant_user_group_permission( | |
|
378 | repo=obj, group_name=users_group, perm=perm) | |
|
375 | def _revoke_perm_user(_obj: RepoGroup | Repository, _user_obj: User): | |
|
376 | if isinstance(_obj, RepoGroup): | |
|
377 | self.revoke_user_permission(repo_group=_obj, user=_user_obj) | |
|
378 | elif isinstance(_obj, Repository): | |
|
379 | # private repos will not allow to change the default | |
|
380 | # permissions using recursive mode, also there's no revocation fo default user, just update | |
|
381 | if _user_obj.username == User.DEFAULT_USER: | |
|
382 | log.debug('Skipping private repo %s for user %s', _obj, _user_obj) | |
|
383 | return | |
|
384 | RepoModel().revoke_user_permission(repo=_obj, user=_user_obj) | |
|
379 | 385 | |
|
380 |
def _revoke_perm_ |
|
|
381 | if isinstance(obj, RepoGroup): | |
|
382 |
self.revoke_user_permission(repo_group=obj, |
|
|
383 | elif isinstance(obj, Repository): | |
|
384 |
RepoModel().revoke_user_permission(repo=obj, |
|
|
385 | ||
|
386 | def _revoke_perm_group(obj, user_group): | |
|
387 | if isinstance(obj, RepoGroup): | |
|
388 | self.revoke_user_group_permission( | |
|
389 | repo_group=obj, group_name=user_group) | |
|
390 | elif isinstance(obj, Repository): | |
|
391 | RepoModel().revoke_user_group_permission( | |
|
392 | repo=obj, group_name=user_group) | |
|
386 | def _revoke_perm_group(_obj: RepoGroup | Repository, user_group: UserGroup): | |
|
387 | if isinstance(_obj, RepoGroup): | |
|
388 | self.revoke_user_group_permission(repo_group=_obj, group_name=user_group) | |
|
389 | elif isinstance(_obj, Repository): | |
|
390 | RepoModel().revoke_user_group_permission(repo=_obj, group_name=user_group) | |
|
393 | 391 | |
|
394 | 392 | # start updates |
|
395 | 393 | log.debug('Now updating permissions for %s in recursive mode:%s', |
|
396 | 394 | repo_group, recursive) |
|
397 | 395 | |
|
398 | 396 | # initialize check function, we'll call that multiple times |
|
399 | 397 | has_group_perm = HasUserGroupPermissionAny(*req_perms) |
|
400 | 398 | |
|
401 | 399 | for obj in repo_group.recursive_groups_and_repos(): |
|
402 | 400 | # iterated obj is an instance of a repos group or repository in |
|
403 | 401 | # that group, recursive option can be: none, repos, groups, all |
|
404 | 402 | if recursive == 'all': |
|
405 | 403 | obj = obj |
|
406 | 404 | elif recursive == 'repos': |
|
407 | 405 | # skip groups, other than this one |
|
408 | 406 | if isinstance(obj, RepoGroup) and not obj == repo_group: |
|
409 | 407 | continue |
|
410 | 408 | elif recursive == 'groups': |
|
411 | 409 | # skip repos |
|
412 | 410 | if isinstance(obj, Repository): |
|
413 | 411 | continue |
|
414 | 412 | else: # recursive == 'none': |
|
415 | 413 | # DEFAULT option - don't apply to iterated objects |
|
416 | 414 | # also we do a break at the end of this loop. if we are not |
|
417 | 415 | # in recursive mode |
|
418 | 416 | obj = repo_group |
|
419 | 417 | |
|
420 | 418 | change_obj = obj.get_api_data() |
|
421 | 419 | |
|
422 | 420 | # update permissions |
|
423 | 421 | for member_id, perm, member_type in perm_updates: |
|
424 | 422 | member_id = int(member_id) |
|
425 | 423 | if member_type == 'user': |
|
426 |
member_ |
|
|
424 | member_obj = User.get(member_id) | |
|
425 | member_name = member_obj.username | |
|
427 | 426 | if isinstance(obj, RepoGroup) and obj == repo_group and member_name == User.DEFAULT_USER: |
|
428 | 427 | # NOTE(dan): detect if we changed permissions for default user |
|
429 | 428 | perm_obj = self.sa.query(UserRepoGroupToPerm) \ |
|
430 | 429 | .filter(UserRepoGroupToPerm.user_id == member_id) \ |
|
431 | 430 | .filter(UserRepoGroupToPerm.group == repo_group) \ |
|
432 | 431 | .scalar() |
|
433 | 432 | if perm_obj and perm_obj.permission.permission_name != perm: |
|
434 | 433 | changes['default_user_changed'] = True |
|
435 | 434 | |
|
436 | 435 | # this updates also current one if found |
|
437 |
_set_perm_user(obj, |
|
|
436 | _set_perm_user(obj, member_obj, perm) | |
|
438 | 437 | elif member_type == 'user_group': |
|
439 |
member_ |
|
|
440 | if not check_perms or has_group_perm(member_name, | |
|
441 | user=cur_user): | |
|
442 |
_set_perm_group(obj, |
|
|
438 | member_obj = UserGroup.get(member_id) | |
|
439 | member_name = member_obj.users_group_name | |
|
440 | if not check_perms or has_group_perm(member_name, user=cur_user): | |
|
441 | _set_perm_group(obj, member_obj, perm) | |
|
443 | 442 | else: |
|
444 |
raise ValueError( |
|
|
445 | "got {} instead".format(member_type)) | |
|
443 | raise ValueError( | |
|
444 | f"member_type must be 'user' or 'user_group' got {member_type} instead" | |
|
445 | ) | |
|
446 | 446 | |
|
447 | 447 | changes['updated'].append( |
|
448 | 448 | {'change_obj': change_obj, 'type': member_type, |
|
449 | 449 | 'id': member_id, 'name': member_name, 'new_perm': perm}) |
|
450 | 450 | |
|
451 | 451 | # set new permissions |
|
452 | 452 | for member_id, perm, member_type in perm_additions: |
|
453 | 453 | member_id = int(member_id) |
|
454 | 454 | if member_type == 'user': |
|
455 |
member_ |
|
|
456 | _set_perm_user(obj, user=member_id, perm=perm) | |
|
455 | member_obj = User.get(member_id) | |
|
456 | member_name = member_obj.username | |
|
457 | _set_perm_user(obj, member_obj, perm) | |
|
457 | 458 | elif member_type == 'user_group': |
|
458 | 459 | # check if we have permissions to alter this usergroup |
|
459 |
member_ |
|
|
460 | if not check_perms or has_group_perm(member_name, | |
|
461 | user=cur_user): | |
|
462 |
_set_perm_group(obj, |
|
|
460 | member_obj = UserGroup.get(member_id) | |
|
461 | member_name = member_obj.users_group_name | |
|
462 | if not check_perms or has_group_perm(member_name, user=cur_user): | |
|
463 | _set_perm_group(obj, member_obj, perm) | |
|
463 | 464 | else: |
|
464 |
raise ValueError( |
|
|
465 | "got {} instead".format(member_type)) | |
|
465 | raise ValueError( | |
|
466 | f"member_type must be 'user' or 'user_group' got {member_type} instead" | |
|
467 | ) | |
|
466 | 468 | |
|
467 | 469 | changes['added'].append( |
|
468 | 470 | {'change_obj': change_obj, 'type': member_type, |
|
469 | 471 | 'id': member_id, 'name': member_name, 'new_perm': perm}) |
|
470 | 472 | |
|
471 | 473 | # delete permissions |
|
472 | 474 | for member_id, perm, member_type in perm_deletions: |
|
473 | 475 | member_id = int(member_id) |
|
474 | 476 | if member_type == 'user': |
|
475 |
member_ |
|
|
476 | _revoke_perm_user(obj, user=member_id) | |
|
477 | member_obj = User.get(member_id) | |
|
478 | member_name = member_obj.username | |
|
479 | _revoke_perm_user(obj, member_obj) | |
|
477 | 480 | elif member_type == 'user_group': |
|
478 | 481 | # check if we have permissions to alter this usergroup |
|
479 |
member_ |
|
|
480 | if not check_perms or has_group_perm(member_name, | |
|
481 | user=cur_user): | |
|
482 |
_revoke_perm_group(obj, |
|
|
482 | member_obj = UserGroup.get(member_id) | |
|
483 | member_name = member_obj.users_group_name | |
|
484 | if not check_perms or has_group_perm(member_name, user=cur_user): | |
|
485 | _revoke_perm_group(obj, member_obj) | |
|
483 | 486 | else: |
|
484 |
raise ValueError( |
|
|
485 | "got {} instead".format(member_type)) | |
|
486 | ||
|
487 | raise ValueError( | |
|
488 | f"member_type must be 'user' or 'user_group' got {member_type} instead" | |
|
489 | ) | |
|
487 | 490 | changes['deleted'].append( |
|
488 | 491 | {'change_obj': change_obj, 'type': member_type, |
|
489 | 492 | 'id': member_id, 'name': member_name, 'new_perm': perm}) |
|
490 | 493 | |
|
491 | 494 | # if it's not recursive call for all,repos,groups |
|
492 | 495 | # break the loop and don't proceed with other changes |
|
493 | 496 | if recursive not in ['all', 'repos', 'groups']: |
|
494 | 497 | break |
|
495 | 498 | |
|
496 | 499 | return changes |
|
497 | 500 | |
|
498 | 501 | def update(self, repo_group, form_data): |
|
499 | 502 | try: |
|
500 | 503 | repo_group = self._get_repo_group(repo_group) |
|
501 | 504 | old_path = repo_group.full_path |
|
502 | 505 | |
|
503 | 506 | # change properties |
|
504 | 507 | if 'group_description' in form_data: |
|
505 | 508 | repo_group.group_description = form_data['group_description'] |
|
506 | 509 | |
|
507 | 510 | if 'enable_locking' in form_data: |
|
508 | 511 | repo_group.enable_locking = form_data['enable_locking'] |
|
509 | 512 | |
|
510 | 513 | if 'group_parent_id' in form_data: |
|
511 | 514 | parent_group = ( |
|
512 | 515 | self._get_repo_group(form_data['group_parent_id'])) |
|
513 | 516 | repo_group.group_parent_id = ( |
|
514 | 517 | parent_group.group_id if parent_group else None) |
|
515 | 518 | repo_group.parent_group = parent_group |
|
516 | 519 | |
|
517 | 520 | # mikhail: to update the full_path, we have to explicitly |
|
518 | 521 | # update group_name |
|
519 | 522 | group_name = form_data.get('group_name', repo_group.name) |
|
520 | 523 | repo_group.group_name = repo_group.get_new_name(group_name) |
|
521 | 524 | |
|
522 | 525 | new_path = repo_group.full_path |
|
523 | 526 | |
|
524 | 527 | affected_user_ids = [] |
|
525 | 528 | if 'user' in form_data: |
|
526 | 529 | old_owner_id = repo_group.user.user_id |
|
527 | 530 | new_owner = User.get_by_username(form_data['user']) |
|
528 | 531 | repo_group.user = new_owner |
|
529 | 532 | |
|
530 | 533 | if old_owner_id != new_owner.user_id: |
|
531 | 534 | affected_user_ids = [new_owner.user_id, old_owner_id] |
|
532 | 535 | |
|
533 | 536 | self.sa.add(repo_group) |
|
534 | 537 | |
|
535 | 538 | # iterate over all members of this groups and do fixes |
|
536 | 539 | # set locking if given |
|
537 | 540 | # if obj is a repoGroup also fix the name of the group according |
|
538 | 541 | # to the parent |
|
539 | 542 | # if obj is a Repo fix it's name |
|
540 | 543 | # this can be potentially heavy operation |
|
541 | 544 | for obj in repo_group.recursive_groups_and_repos(): |
|
542 | 545 | # set the value from it's parent |
|
543 | 546 | obj.enable_locking = repo_group.enable_locking |
|
544 | 547 | if isinstance(obj, RepoGroup): |
|
545 | 548 | new_name = obj.get_new_name(obj.name) |
|
546 | 549 | log.debug('Fixing group %s to new name %s', |
|
547 | 550 | obj.group_name, new_name) |
|
548 | 551 | obj.group_name = new_name |
|
549 | 552 | |
|
550 | 553 | elif isinstance(obj, Repository): |
|
551 | 554 | # we need to get all repositories from this new group and |
|
552 | 555 | # rename them accordingly to new group path |
|
553 | 556 | new_name = obj.get_new_name(obj.just_name) |
|
554 | 557 | log.debug('Fixing repo %s to new name %s', |
|
555 | 558 | obj.repo_name, new_name) |
|
556 | 559 | obj.repo_name = new_name |
|
557 | 560 | |
|
558 | 561 | self.sa.add(obj) |
|
559 | 562 | |
|
560 | 563 | self._rename_group(old_path, new_path) |
|
561 | 564 | |
|
562 | 565 | # Trigger update event. |
|
563 | 566 | events.trigger(events.RepoGroupUpdateEvent(repo_group)) |
|
564 | 567 | |
|
565 | 568 | if affected_user_ids: |
|
566 | 569 | PermissionModel().trigger_permission_flush(affected_user_ids) |
|
567 | 570 | |
|
568 | 571 | return repo_group |
|
569 | 572 | except Exception: |
|
570 | 573 | log.error(traceback.format_exc()) |
|
571 | 574 | raise |
|
572 | 575 | |
|
573 | 576 | def delete(self, repo_group, force_delete=False, fs_remove=True): |
|
574 | 577 | repo_group = self._get_repo_group(repo_group) |
|
575 | 578 | if not repo_group: |
|
576 | 579 | return False |
|
577 | 580 | try: |
|
578 | 581 | self.sa.delete(repo_group) |
|
579 | 582 | if fs_remove: |
|
580 | 583 | self._delete_filesystem_group(repo_group, force_delete) |
|
581 | 584 | else: |
|
582 | 585 | log.debug('skipping removal from filesystem') |
|
583 | 586 | |
|
584 | 587 | # Trigger delete event. |
|
585 | 588 | events.trigger(events.RepoGroupDeleteEvent(repo_group)) |
|
586 | 589 | return True |
|
587 | 590 | |
|
588 | 591 | except Exception: |
|
589 | 592 | log.error('Error removing repo_group %s', repo_group) |
|
590 | 593 | raise |
|
591 | 594 | |
|
592 | 595 | def grant_user_permission(self, repo_group, user, perm): |
|
593 | 596 | """ |
|
594 | 597 | Grant permission for user on given repository group, or update |
|
595 | 598 | existing one if found |
|
596 | 599 | |
|
597 | 600 | :param repo_group: Instance of RepoGroup, repositories_group_id, |
|
598 | 601 | or repositories_group name |
|
599 | 602 | :param user: Instance of User, user_id or username |
|
600 | 603 | :param perm: Instance of Permission, or permission_name |
|
601 | 604 | """ |
|
602 | 605 | |
|
603 | 606 | repo_group = self._get_repo_group(repo_group) |
|
604 | 607 | user = self._get_user(user) |
|
605 | 608 | permission = self._get_perm(perm) |
|
606 | 609 | |
|
607 | 610 | # check if we have that permission already |
|
608 | 611 | obj = self.sa.query(UserRepoGroupToPerm)\ |
|
609 | 612 | .filter(UserRepoGroupToPerm.user == user)\ |
|
610 | 613 | .filter(UserRepoGroupToPerm.group == repo_group)\ |
|
611 | 614 | .scalar() |
|
612 | 615 | if obj is None: |
|
613 | 616 | # create new ! |
|
614 | 617 | obj = UserRepoGroupToPerm() |
|
615 | 618 | obj.group = repo_group |
|
616 | 619 | obj.user = user |
|
617 | 620 | obj.permission = permission |
|
618 | 621 | self.sa.add(obj) |
|
619 | 622 | log.debug('Granted perm %s to %s on %s', perm, user, repo_group) |
|
620 | 623 | action_logger_generic( |
|
621 | 624 | 'granted permission: {} to user: {} on repogroup: {}'.format( |
|
622 | 625 | perm, user, repo_group), namespace='security.repogroup') |
|
623 | 626 | return obj |
|
624 | 627 | |
|
625 | 628 | def revoke_user_permission(self, repo_group, user): |
|
626 | 629 | """ |
|
627 | 630 | Revoke permission for user on given repository group |
|
628 | 631 | |
|
629 | 632 | :param repo_group: Instance of RepoGroup, repositories_group_id, |
|
630 | 633 | or repositories_group name |
|
631 | 634 | :param user: Instance of User, user_id or username |
|
632 | 635 | """ |
|
633 | 636 | |
|
634 | 637 | repo_group = self._get_repo_group(repo_group) |
|
635 | 638 | user = self._get_user(user) |
|
636 | 639 | |
|
637 | 640 | obj = self.sa.query(UserRepoGroupToPerm)\ |
|
638 | 641 | .filter(UserRepoGroupToPerm.user == user)\ |
|
639 | 642 | .filter(UserRepoGroupToPerm.group == repo_group)\ |
|
640 | 643 | .scalar() |
|
641 | 644 | if obj: |
|
642 | 645 | self.sa.delete(obj) |
|
643 | 646 | log.debug('Revoked perm on %s on %s', repo_group, user) |
|
644 | 647 | action_logger_generic( |
|
645 | 648 | 'revoked permission from user: {} on repogroup: {}'.format( |
|
646 | 649 | user, repo_group), namespace='security.repogroup') |
|
647 | 650 | |
|
648 | 651 | def grant_user_group_permission(self, repo_group, group_name, perm): |
|
649 | 652 | """ |
|
650 | 653 | Grant permission for user group on given repository group, or update |
|
651 | 654 | existing one if found |
|
652 | 655 | |
|
653 | 656 | :param repo_group: Instance of RepoGroup, repositories_group_id, |
|
654 | 657 | or repositories_group name |
|
655 | 658 | :param group_name: Instance of UserGroup, users_group_id, |
|
656 | 659 | or user group name |
|
657 | 660 | :param perm: Instance of Permission, or permission_name |
|
658 | 661 | """ |
|
659 | 662 | repo_group = self._get_repo_group(repo_group) |
|
660 | 663 | group_name = self._get_user_group(group_name) |
|
661 | 664 | permission = self._get_perm(perm) |
|
662 | 665 | |
|
663 | 666 | # check if we have that permission already |
|
664 | 667 | obj = self.sa.query(UserGroupRepoGroupToPerm)\ |
|
665 | 668 | .filter(UserGroupRepoGroupToPerm.group == repo_group)\ |
|
666 | 669 | .filter(UserGroupRepoGroupToPerm.users_group == group_name)\ |
|
667 | 670 | .scalar() |
|
668 | 671 | |
|
669 | 672 | if obj is None: |
|
670 | 673 | # create new |
|
671 | 674 | obj = UserGroupRepoGroupToPerm() |
|
672 | 675 | |
|
673 | 676 | obj.group = repo_group |
|
674 | 677 | obj.users_group = group_name |
|
675 | 678 | obj.permission = permission |
|
676 | 679 | self.sa.add(obj) |
|
677 | 680 | log.debug('Granted perm %s to %s on %s', perm, group_name, repo_group) |
|
678 | 681 | action_logger_generic( |
|
679 | 682 | 'granted permission: {} to usergroup: {} on repogroup: {}'.format( |
|
680 | 683 | perm, group_name, repo_group), namespace='security.repogroup') |
|
681 | 684 | return obj |
|
682 | 685 | |
|
683 | 686 | def revoke_user_group_permission(self, repo_group, group_name): |
|
684 | 687 | """ |
|
685 | 688 | Revoke permission for user group on given repository group |
|
686 | 689 | |
|
687 | 690 | :param repo_group: Instance of RepoGroup, repositories_group_id, |
|
688 | 691 | or repositories_group name |
|
689 | 692 | :param group_name: Instance of UserGroup, users_group_id, |
|
690 | 693 | or user group name |
|
691 | 694 | """ |
|
692 | 695 | repo_group = self._get_repo_group(repo_group) |
|
693 | 696 | group_name = self._get_user_group(group_name) |
|
694 | 697 | |
|
695 | 698 | obj = self.sa.query(UserGroupRepoGroupToPerm)\ |
|
696 | 699 | .filter(UserGroupRepoGroupToPerm.group == repo_group)\ |
|
697 | 700 | .filter(UserGroupRepoGroupToPerm.users_group == group_name)\ |
|
698 | 701 | .scalar() |
|
699 | 702 | if obj: |
|
700 | 703 | self.sa.delete(obj) |
|
701 | 704 | log.debug('Revoked perm to %s on %s', repo_group, group_name) |
|
702 | 705 | action_logger_generic( |
|
703 | 706 | 'revoked permission from usergroup: {} on repogroup: {}'.format( |
|
704 | 707 | group_name, repo_group), namespace='security.repogroup') |
|
705 | 708 | |
|
706 | 709 | @classmethod |
|
707 | 710 | def update_commit_cache(cls, repo_groups=None): |
|
708 | 711 | if not repo_groups: |
|
709 | 712 | repo_groups = RepoGroup.getAll() |
|
710 | 713 | for repo_group in repo_groups: |
|
711 | 714 | repo_group.update_commit_cache() |
|
712 | 715 | |
|
713 | 716 | def get_repo_groups_as_dict(self, repo_group_list=None, admin=False, |
|
714 | 717 | super_user_actions=False): |
|
715 | 718 | |
|
716 | 719 | from pyramid.threadlocal import get_current_request |
|
717 | 720 | _render = get_current_request().get_partial_renderer( |
|
718 | 721 | 'rhodecode:templates/data_table/_dt_elements.mako') |
|
719 | 722 | c = _render.get_call_context() |
|
720 | 723 | h = _render.get_helpers() |
|
721 | 724 | |
|
722 | 725 | def quick_menu(repo_group_name): |
|
723 | 726 | return _render('quick_repo_group_menu', repo_group_name) |
|
724 | 727 | |
|
725 | 728 | def repo_group_lnk(repo_group_name): |
|
726 | 729 | return _render('repo_group_name', repo_group_name) |
|
727 | 730 | |
|
728 | 731 | def last_change(last_change): |
|
729 | 732 | if admin and isinstance(last_change, datetime.datetime) and not last_change.tzinfo: |
|
730 | 733 | ts = time.time() |
|
731 | 734 | utc_offset = (datetime.datetime.fromtimestamp(ts) |
|
732 | 735 | - datetime.datetime.utcfromtimestamp(ts)).total_seconds() |
|
733 | 736 | last_change = last_change + datetime.timedelta(seconds=utc_offset) |
|
734 | 737 | return _render("last_change", last_change) |
|
735 | 738 | |
|
736 | 739 | def desc(desc, personal): |
|
737 | 740 | return _render( |
|
738 | 741 | 'repo_group_desc', desc, personal, c.visual.stylify_metatags) |
|
739 | 742 | |
|
740 | 743 | def repo_group_actions(repo_group_id, repo_group_name, gr_count): |
|
741 | 744 | return _render( |
|
742 | 745 | 'repo_group_actions', repo_group_id, repo_group_name, gr_count) |
|
743 | 746 | |
|
744 | 747 | def repo_group_name(repo_group_name, children_groups): |
|
745 | 748 | return _render("repo_group_name", repo_group_name, children_groups) |
|
746 | 749 | |
|
747 | 750 | def user_profile(username): |
|
748 | 751 | return _render('user_profile', username) |
|
749 | 752 | |
|
750 | 753 | repo_group_data = [] |
|
751 | 754 | for group in repo_group_list: |
|
752 | 755 | # NOTE(marcink): because we use only raw column we need to load it like that |
|
753 | 756 | changeset_cache = RepoGroup._load_changeset_cache( |
|
754 | 757 | '', group._changeset_cache) |
|
755 | 758 | last_commit_change = RepoGroup._load_commit_change(changeset_cache) |
|
756 | 759 | row = { |
|
757 | 760 | "menu": quick_menu(group.group_name), |
|
758 | 761 | "name": repo_group_lnk(group.group_name), |
|
759 | 762 | "name_raw": group.group_name, |
|
760 | 763 | |
|
761 | 764 | "last_change": last_change(last_commit_change), |
|
762 | 765 | |
|
763 | 766 | "last_changeset": "", |
|
764 | 767 | "last_changeset_raw": "", |
|
765 | 768 | |
|
766 | 769 | "desc": desc(h.escape(group.group_description), group.personal), |
|
767 | 770 | "top_level_repos": 0, |
|
768 | 771 | "owner": user_profile(group.User.username) |
|
769 | 772 | } |
|
770 | 773 | if admin: |
|
771 | 774 | repo_count = group.repositories.count() |
|
772 | 775 | children_groups = list(map( |
|
773 | 776 | h.safe_str, |
|
774 | 777 | itertools.chain((g.name for g in group.parents), |
|
775 | 778 | (x.name for x in [group])))) |
|
776 | 779 | row.update({ |
|
777 | 780 | "action": repo_group_actions( |
|
778 | 781 | group.group_id, group.group_name, repo_count), |
|
779 | 782 | "top_level_repos": repo_count, |
|
780 | 783 | "name": repo_group_name(group.group_name, children_groups), |
|
781 | 784 | |
|
782 | 785 | }) |
|
783 | 786 | repo_group_data.append(row) |
|
784 | 787 | |
|
785 | 788 | return repo_group_data |
|
786 | 789 | |
|
787 | 790 | def get_repo_groups_data_table( |
|
788 | 791 | self, draw, start, limit, |
|
789 | 792 | search_q, order_by, order_dir, |
|
790 | 793 | auth_user, repo_group_id): |
|
791 | 794 | from rhodecode.model.scm import RepoGroupList |
|
792 | 795 | |
|
793 | 796 | _perms = ['group.read', 'group.write', 'group.admin'] |
|
794 | 797 | repo_groups = RepoGroup.query() \ |
|
795 | 798 | .filter(RepoGroup.group_parent_id == repo_group_id) \ |
|
796 | 799 | .all() |
|
797 | 800 | auth_repo_group_list = RepoGroupList( |
|
798 | 801 | repo_groups, perm_set=_perms, |
|
799 | 802 | extra_kwargs=dict(user=auth_user)) |
|
800 | 803 | |
|
801 | 804 | allowed_ids = [-1] |
|
802 | 805 | for repo_group in auth_repo_group_list: |
|
803 | 806 | allowed_ids.append(repo_group.group_id) |
|
804 | 807 | |
|
805 | 808 | repo_groups_data_total_count = RepoGroup.query() \ |
|
806 | 809 | .filter(RepoGroup.group_parent_id == repo_group_id) \ |
|
807 | 810 | .filter(or_( |
|
808 | 811 | # generate multiple IN to fix limitation problems |
|
809 | 812 | *in_filter_generator(RepoGroup.group_id, allowed_ids)) |
|
810 | 813 | ) \ |
|
811 | 814 | .count() |
|
812 | 815 | |
|
813 | 816 | base_q = Session.query( |
|
814 | 817 | RepoGroup.group_name, |
|
815 | 818 | RepoGroup.group_name_hash, |
|
816 | 819 | RepoGroup.group_description, |
|
817 | 820 | RepoGroup.group_id, |
|
818 | 821 | RepoGroup.personal, |
|
819 | 822 | RepoGroup.updated_on, |
|
820 | 823 | RepoGroup._changeset_cache, |
|
821 | 824 | User, |
|
822 | 825 | ) \ |
|
823 | 826 | .filter(RepoGroup.group_parent_id == repo_group_id) \ |
|
824 | 827 | .filter(or_( |
|
825 | 828 | # generate multiple IN to fix limitation problems |
|
826 | 829 | *in_filter_generator(RepoGroup.group_id, allowed_ids)) |
|
827 | 830 | ) \ |
|
828 | 831 | .join(User, User.user_id == RepoGroup.user_id) \ |
|
829 | 832 | .group_by(RepoGroup, User) |
|
830 | 833 | |
|
831 | 834 | repo_groups_data_total_filtered_count = base_q.count() |
|
832 | 835 | |
|
833 | 836 | sort_defined = False |
|
834 | 837 | |
|
835 | 838 | if order_by == 'group_name': |
|
836 | 839 | sort_col = func.lower(RepoGroup.group_name) |
|
837 | 840 | sort_defined = True |
|
838 | 841 | elif order_by == 'user_username': |
|
839 | 842 | sort_col = User.username |
|
840 | 843 | else: |
|
841 | 844 | sort_col = getattr(RepoGroup, order_by, None) |
|
842 | 845 | |
|
843 | 846 | if sort_defined or sort_col: |
|
844 | 847 | if order_dir == 'asc': |
|
845 | 848 | sort_col = sort_col.asc() |
|
846 | 849 | else: |
|
847 | 850 | sort_col = sort_col.desc() |
|
848 | 851 | |
|
849 | 852 | base_q = base_q.order_by(sort_col) |
|
850 | 853 | base_q = base_q.offset(start).limit(limit) |
|
851 | 854 | |
|
852 | 855 | repo_group_list = base_q.all() |
|
853 | 856 | |
|
854 | 857 | repo_groups_data = RepoGroupModel().get_repo_groups_as_dict( |
|
855 | 858 | repo_group_list=repo_group_list, admin=False) |
|
856 | 859 | |
|
857 | 860 | data = ({ |
|
858 | 861 | 'draw': draw, |
|
859 | 862 | 'data': repo_groups_data, |
|
860 | 863 | 'recordsTotal': repo_groups_data_total_count, |
|
861 | 864 | 'recordsFiltered': repo_groups_data_total_filtered_count, |
|
862 | 865 | }) |
|
863 | 866 | return data |
|
864 | 867 | |
|
865 | 868 | def _get_defaults(self, repo_group_name): |
|
866 | 869 | repo_group = RepoGroup.get_by_group_name(repo_group_name) |
|
867 | 870 | |
|
868 | 871 | if repo_group is None: |
|
869 | 872 | return None |
|
870 | 873 | |
|
871 | 874 | defaults = repo_group.get_dict() |
|
872 | 875 | defaults['repo_group_name'] = repo_group.name |
|
873 | 876 | defaults['repo_group_description'] = repo_group.group_description |
|
874 | 877 | defaults['repo_group_enable_locking'] = repo_group.enable_locking |
|
875 | 878 | |
|
876 | 879 | # we use -1 as this is how in HTML, we mark an empty group |
|
877 | 880 | defaults['repo_group'] = defaults['group_parent_id'] or -1 |
|
878 | 881 | |
|
879 | 882 | # fill owner |
|
880 | 883 | if repo_group.user: |
|
881 | 884 | defaults.update({'user': repo_group.user.username}) |
|
882 | 885 | else: |
|
883 | 886 | replacement_user = User.get_first_super_admin().username |
|
884 | 887 | defaults.update({'user': replacement_user}) |
|
885 | 888 | |
|
886 | 889 | return defaults |
@@ -1,83 +1,83 b'' | |||
|
1 | 1 | # Copyright (C) 2013-2023 RhodeCode GmbH |
|
2 | 2 | # |
|
3 | 3 | # This program is free software: you can redistribute it and/or modify |
|
4 | 4 | # it under the terms of the GNU Affero General Public License, version 3 |
|
5 | 5 | # (only), as published by the Free Software Foundation. |
|
6 | 6 | # |
|
7 | 7 | # This program is distributed in the hope that it will be useful, |
|
8 | 8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
9 | 9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
10 | 10 | # GNU General Public License for more details. |
|
11 | 11 | # |
|
12 | 12 | # You should have received a copy of the GNU Affero General Public License |
|
13 | 13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
14 | 14 | # |
|
15 | 15 | # This program is dual-licensed. If you wish to learn more about the |
|
16 | 16 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
17 | 17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
18 | 18 | |
|
19 | 19 | import logging |
|
20 | 20 | import urllib.request |
|
21 | 21 | import urllib.error |
|
22 | 22 | import urllib.parse |
|
23 | 23 | from packaging.version import Version |
|
24 | 24 | |
|
25 | 25 | import rhodecode |
|
26 | 26 | from rhodecode.lib.ext_json import json |
|
27 | 27 | from rhodecode.model import BaseModel |
|
28 | 28 | from rhodecode.model.meta import Session |
|
29 | 29 | from rhodecode.model.settings import SettingsModel |
|
30 | 30 | |
|
31 | 31 | |
|
32 | 32 | log = logging.getLogger(__name__) |
|
33 | 33 | |
|
34 | 34 | |
|
35 | 35 | class UpdateModel(BaseModel): |
|
36 | 36 | UPDATE_SETTINGS_KEY = 'update_version' |
|
37 | 37 | UPDATE_URL_SETTINGS_KEY = 'rhodecode_update_url' |
|
38 | 38 | |
|
39 | 39 | @staticmethod |
|
40 | 40 | def get_update_data(update_url): |
|
41 | 41 | """Return the JSON update data.""" |
|
42 | 42 | ver = rhodecode.__version__ |
|
43 | 43 | log.debug('Checking for upgrade on `%s` server', update_url) |
|
44 | 44 | opener = urllib.request.build_opener() |
|
45 |
opener.addheaders = [('User-agent', 'RhodeCode-SCM/ |
|
|
45 | opener.addheaders = [('User-agent', f'RhodeCode-SCM/{ver.strip()}')] | |
|
46 | 46 | response = opener.open(update_url) |
|
47 | 47 | response_data = response.read() |
|
48 | 48 | data = json.loads(response_data) |
|
49 | 49 | log.debug('update server returned data') |
|
50 | 50 | return data |
|
51 | 51 | |
|
52 | 52 | def get_update_url(self): |
|
53 | 53 | settings = SettingsModel().get_all_settings() |
|
54 | 54 | return settings.get(self.UPDATE_URL_SETTINGS_KEY) |
|
55 | 55 | |
|
56 | 56 | def store_version(self, version): |
|
57 | 57 | log.debug('Storing version %s into settings', version) |
|
58 | 58 | setting = SettingsModel().create_or_update_setting( |
|
59 | 59 | self.UPDATE_SETTINGS_KEY, version) |
|
60 | 60 | Session().add(setting) |
|
61 | 61 | Session().commit() |
|
62 | 62 | |
|
63 | 63 | def get_stored_version(self, fallback=None): |
|
64 | 64 | obj = SettingsModel().get_setting_by_name(self.UPDATE_SETTINGS_KEY) |
|
65 | 65 | if obj: |
|
66 | 66 | return obj.app_settings_value |
|
67 | 67 | return fallback or '0.0.0' |
|
68 | 68 | |
|
69 | 69 | def _sanitize_version(self, version): |
|
70 | 70 | """ |
|
71 | 71 | Cleanup our custom ver. |
|
72 | 72 | e.g 4.11.0_20171204_204825_CE_default_EE_default to 4.11.0 |
|
73 | 73 | """ |
|
74 | 74 | return version.split('_')[0] |
|
75 | 75 | |
|
76 | 76 | def is_outdated(self, cur_version, latest_version=None): |
|
77 | 77 | latest_version = latest_version or self.get_stored_version() |
|
78 | 78 | try: |
|
79 | 79 | cur_version = self._sanitize_version(cur_version) |
|
80 | 80 | return Version(latest_version) > Version(cur_version) |
|
81 | 81 | except Exception: |
|
82 | 82 | # could be invalid version, etc |
|
83 | 83 | return False |
@@ -1,89 +1,103 b'' | |||
|
1 | 1 | |
|
2 | 2 | <div id="update_notice" style="display: none; margin: 0px 0px 30px 0px"> |
|
3 | 3 | <div>${_('Checking for updates...')}</div> |
|
4 | 4 | </div> |
|
5 | 5 | |
|
6 | 6 | |
|
7 | 7 | <div class="panel panel-default"> |
|
8 | 8 | <div class="panel-heading"> |
|
9 | 9 | <h3 class="panel-title">${_('System Info')}</h3> |
|
10 | 10 | % if c.allowed_to_snapshot: |
|
11 | 11 | <a href="${h.route_path('admin_settings_system', _query={'snapshot':1})}" class="panel-edit">${_('create summary snapshot')}</a> |
|
12 | 12 | % endif |
|
13 | 13 | </div> |
|
14 | 14 | <div class="panel-body"> |
|
15 | 15 | <dl class="dl-horizontal settings dt-400"> |
|
16 | 16 | % for dt, dd, warn in c.data_items: |
|
17 | 17 | <dt>${dt}${':' if dt else '---'}</dt> |
|
18 | 18 | <dd>${dd}${'' if dt else '---'} |
|
19 | 19 | % if warn and warn['message']: |
|
20 | 20 | <div class="alert-${warn['type']}"> |
|
21 | 21 | <strong>${warn['message']}</strong> |
|
22 | 22 | </div> |
|
23 | 23 | % endif |
|
24 | 24 | </dd> |
|
25 | 25 | % endfor |
|
26 | 26 | </dl> |
|
27 | 27 | </div> |
|
28 | 28 | </div> |
|
29 | 29 | |
|
30 | 30 | <div class="panel panel-default"> |
|
31 | 31 | <div class="panel-heading"> |
|
32 |
<h3 class="panel-title">${_(' |
|
|
32 | <h3 class="panel-title">${_('RhodeCode Server Config')}</h3> | |
|
33 | </div> | |
|
34 | <div class="panel-body"> | |
|
35 | <dl class="dl-horizontal settings dt-400"> | |
|
36 | % for dt, dd in c.rhodecode_data_items: | |
|
37 | <dt>${dt}${':' if dt else '---'}</dt> | |
|
38 | <dd>${dd}${'' if dt else '---'}</dd> | |
|
39 | % endfor | |
|
40 | </dl> | |
|
41 | </div> | |
|
42 | </div> | |
|
43 | ||
|
44 | <div class="panel panel-default"> | |
|
45 | <div class="panel-heading"> | |
|
46 | <h3 class="panel-title">${_('VCS Server Config')}</h3> | |
|
33 | 47 | </div> |
|
34 | 48 | <div class="panel-body"> |
|
35 | 49 | <dl class="dl-horizontal settings dt-400"> |
|
36 | 50 | % for dt, dd in c.vcsserver_data_items: |
|
37 | 51 | <dt>${dt}${':' if dt else '---'}</dt> |
|
38 | 52 | <dd>${dd}${'' if dt else '---'}</dd> |
|
39 | 53 | % endfor |
|
40 | 54 | </dl> |
|
41 | 55 | </div> |
|
42 | 56 | </div> |
|
43 | 57 | |
|
44 | 58 | <div class="panel panel-default"> |
|
45 | 59 | <div class="panel-heading"> |
|
46 | 60 | <h3 class="panel-title">${_('Python Packages')}</h3> |
|
47 | 61 | </div> |
|
48 | 62 | <div class="panel-body"> |
|
49 | 63 | <table> |
|
50 | 64 | <th></th> |
|
51 | 65 | <th></th> |
|
52 | 66 | <th></th> |
|
53 | 67 | % for name, package_data in c.py_modules['human_value']: |
|
54 | 68 | <tr> |
|
55 | 69 | <td>${name.lower()}</td> |
|
56 | 70 | <td>${package_data['version']}</td> |
|
57 | 71 | <td>(${package_data['location']})</td> |
|
58 | 72 | </tr> |
|
59 | 73 | % endfor |
|
60 | 74 | </table> |
|
61 | 75 | |
|
62 | 76 | </div> |
|
63 | 77 | </div> |
|
64 | 78 | |
|
65 | 79 | <div class="panel panel-default"> |
|
66 | 80 | <div class="panel-heading"> |
|
67 | 81 | <h3 class="panel-title">${_('Env Variables')}</h3> |
|
68 | 82 | </div> |
|
69 | 83 | <div class="panel-body"> |
|
70 | 84 | <table> |
|
71 | 85 | <th></th> |
|
72 | 86 | <th></th> |
|
73 | 87 | % for env_key, env_val in c.env_data: |
|
74 | 88 | <tr> |
|
75 | 89 | <td style="vertical-align: top">${env_key}</td> |
|
76 | 90 | <td>${env_val}</td> |
|
77 | 91 | </tr> |
|
78 | 92 | % endfor |
|
79 | 93 | </table> |
|
80 | 94 | |
|
81 | 95 | </div> |
|
82 | 96 | </div> |
|
83 | 97 | |
|
84 | 98 | <script> |
|
85 | 99 | $('#check_for_update').click(function(e){ |
|
86 | 100 | $('#update_notice').show(); |
|
87 | 101 | $('#update_notice').load("${h.route_path('admin_settings_system_update', _query={'ver': request.GET.get('ver')})}"); |
|
88 | 102 | }) |
|
89 | 103 | </script> |
@@ -1,743 +1,811 b'' | |||
|
1 | 1 | |
|
2 | 2 | # Copyright (C) 2010-2023 RhodeCode GmbH |
|
3 | 3 | # |
|
4 | 4 | # This program is free software: you can redistribute it and/or modify |
|
5 | 5 | # it under the terms of the GNU Affero General Public License, version 3 |
|
6 | 6 | # (only), as published by the Free Software Foundation. |
|
7 | 7 | # |
|
8 | 8 | # This program is distributed in the hope that it will be useful, |
|
9 | 9 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
10 | 10 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
11 | 11 | # GNU General Public License for more details. |
|
12 | 12 | # |
|
13 | 13 | # You should have received a copy of the GNU Affero General Public License |
|
14 | 14 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
15 | 15 | # |
|
16 | 16 | # This program is dual-licensed. If you wish to learn more about the |
|
17 | 17 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
18 | 18 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
19 | 19 | |
|
20 | 20 | import pytest |
|
21 | 21 | |
|
22 | 22 | from rhodecode.lib.auth import AuthUser |
|
23 | 23 | from rhodecode.model.db import ( |
|
24 | 24 | RepoGroup, User, UserGroupRepoGroupToPerm, Permission, UserToPerm, |
|
25 | 25 | UserGroupToPerm) |
|
26 | 26 | from rhodecode.model.meta import Session |
|
27 | 27 | from rhodecode.model.permission import PermissionModel |
|
28 | 28 | from rhodecode.model.repo import RepoModel |
|
29 | 29 | from rhodecode.model.repo_group import RepoGroupModel |
|
30 | 30 | from rhodecode.model.user import UserModel |
|
31 | 31 | from rhodecode.model.user_group import UserGroupModel |
|
32 | 32 | from rhodecode.tests.fixture import Fixture |
|
33 | 33 | |
|
34 | 34 | |
|
35 | 35 | fixture = Fixture() |
|
36 | 36 | |
|
37 | 37 | |
|
38 | 38 | @pytest.fixture() |
|
39 | 39 | def repo_name(backend_hg): |
|
40 | 40 | return backend_hg.repo_name |
|
41 | 41 | |
|
42 | 42 | |
|
43 | 43 | class TestPermissions(object): |
|
44 | 44 | |
|
45 | 45 | @pytest.fixture(scope='class', autouse=True) |
|
46 | 46 | def default_permissions(self, request, baseapp): |
|
47 | 47 | # recreate default user to get a clean start |
|
48 | 48 | PermissionModel().create_default_user_permissions( |
|
49 | 49 | user=User.DEFAULT_USER, force=True) |
|
50 | 50 | Session().commit() |
|
51 | 51 | |
|
52 | 52 | @pytest.fixture(autouse=True) |
|
53 | 53 | def prepare_users(self, request): |
|
54 | 54 | # TODO: User creation is a duplicate of test_nofitications, check |
|
55 | 55 | # if that can be unified |
|
56 | 56 | self.u1 = UserModel().create_or_update( |
|
57 | 57 | username=u'u1', password=u'qweqwe', |
|
58 | 58 | email=u'u1@rhodecode.org', firstname=u'u1', lastname=u'u1' |
|
59 | 59 | ) |
|
60 | 60 | self.u2 = UserModel().create_or_update( |
|
61 | 61 | username=u'u2', password=u'qweqwe', |
|
62 | 62 | email=u'u2@rhodecode.org', firstname=u'u2', lastname=u'u2' |
|
63 | 63 | ) |
|
64 | 64 | self.u3 = UserModel().create_or_update( |
|
65 | 65 | username=u'u3', password=u'qweqwe', |
|
66 | 66 | email=u'u3@rhodecode.org', firstname=u'u3', lastname=u'u3' |
|
67 | 67 | ) |
|
68 | 68 | self.anon = User.get_default_user() |
|
69 | 69 | self.a1 = UserModel().create_or_update( |
|
70 | 70 | username=u'a1', password=u'qweqwe', |
|
71 | 71 | email=u'a1@rhodecode.org', firstname=u'a1', lastname=u'a1', |
|
72 | 72 | admin=True |
|
73 | 73 | ) |
|
74 | 74 | Session().commit() |
|
75 | 75 | |
|
76 | 76 | request.addfinalizer(self.cleanup) |
|
77 | 77 | |
|
78 | 78 | def cleanup(self): |
|
79 | 79 | if hasattr(self, 'test_repo'): |
|
80 | 80 | RepoModel().delete(repo=self.test_repo) |
|
81 | 81 | Session().commit() |
|
82 | 82 | |
|
83 | 83 | if hasattr(self, 'g1'): |
|
84 | 84 | RepoGroupModel().delete(self.g1.group_id) |
|
85 | 85 | if hasattr(self, 'g2'): |
|
86 | 86 | RepoGroupModel().delete(self.g2.group_id) |
|
87 | 87 | Session().commit() |
|
88 | 88 | |
|
89 | 89 | UserModel().delete(self.u1, handle_repos='delete', handle_repo_groups='delete') |
|
90 | 90 | UserModel().delete(self.u2, handle_repos='delete', handle_repo_groups='delete') |
|
91 | 91 | UserModel().delete(self.u3, handle_repos='delete', handle_repo_groups='delete') |
|
92 | 92 | UserModel().delete(self.a1, handle_repos='delete', handle_repo_groups='delete') |
|
93 | 93 | Session().commit() |
|
94 | 94 | |
|
95 | 95 | if hasattr(self, 'ug1'): |
|
96 | 96 | UserGroupModel().delete(self.ug1, force=True) |
|
97 | 97 | Session().commit() |
|
98 | 98 | |
|
99 | 99 | def test_default_perms_set(self, repo_name): |
|
100 | 100 | assert repo_perms(self.u1)[repo_name] == 'repository.read' |
|
101 | 101 | new_perm = 'repository.write' |
|
102 | 102 | RepoModel().grant_user_permission(repo=repo_name, user=self.u1, |
|
103 | 103 | perm=new_perm) |
|
104 | 104 | Session().commit() |
|
105 | 105 | assert repo_perms(self.u1)[repo_name] == new_perm |
|
106 | 106 | |
|
107 | 107 | def test_default_admin_perms_set(self, repo_name): |
|
108 | 108 | assert repo_perms(self.a1)[repo_name] == 'repository.admin' |
|
109 | 109 | RepoModel().grant_user_permission(repo=repo_name, user=self.a1, |
|
110 | 110 | perm='repository.write') |
|
111 | 111 | Session().commit() |
|
112 | 112 | # cannot really downgrade admins permissions !? they still gets set as |
|
113 | 113 | # admin ! |
|
114 | 114 | assert repo_perms(self.a1)[repo_name] == 'repository.admin' |
|
115 | 115 | |
|
116 | 116 | def test_default_group_perms(self, repo_name): |
|
117 | 117 | self.g1 = fixture.create_repo_group('test1', skip_if_exists=True) |
|
118 | 118 | self.g2 = fixture.create_repo_group('test2', skip_if_exists=True) |
|
119 | 119 | |
|
120 | 120 | assert repo_perms(self.u1)[repo_name] == 'repository.read' |
|
121 | 121 | assert group_perms(self.u1) == { |
|
122 | 122 | 'test1': 'group.read', 'test2': 'group.read'} |
|
123 | 123 | assert global_perms(self.u1) == set( |
|
124 | 124 | Permission.DEFAULT_USER_PERMISSIONS) |
|
125 | 125 | |
|
126 | 126 | def test_default_admin_group_perms(self, repo_name): |
|
127 | 127 | self.g1 = fixture.create_repo_group('test1', skip_if_exists=True) |
|
128 | 128 | self.g2 = fixture.create_repo_group('test2', skip_if_exists=True) |
|
129 | 129 | |
|
130 | 130 | assert repo_perms(self.a1)[repo_name] == 'repository.admin' |
|
131 | 131 | assert group_perms(self.a1) == { |
|
132 | 132 | 'test1': 'group.admin', 'test2': 'group.admin'} |
|
133 | 133 | |
|
134 | 134 | def test_default_owner_repo_perms(self, backend, user_util, test_repo): |
|
135 | 135 | user = user_util.create_user() |
|
136 | 136 | repo = test_repo('minimal', backend.alias) |
|
137 | 137 | org_owner = repo.user |
|
138 | 138 | assert repo_perms(user)[repo.repo_name] == 'repository.read' |
|
139 | 139 | |
|
140 | 140 | repo.user = user |
|
141 | 141 | assert repo_perms(user)[repo.repo_name] == 'repository.admin' |
|
142 | 142 | repo.user = org_owner |
|
143 | 143 | |
|
144 | 144 | def test_default_owner_branch_perms(self, user_util, test_user_group): |
|
145 | 145 | user = user_util.create_user() |
|
146 | 146 | assert branch_perms(user) == {} |
|
147 | 147 | |
|
148 | 148 | def test_default_owner_repo_group_perms(self, user_util, test_repo_group): |
|
149 | 149 | user = user_util.create_user() |
|
150 | 150 | org_owner = test_repo_group.user |
|
151 | 151 | |
|
152 | 152 | assert group_perms(user)[test_repo_group.group_name] == 'group.read' |
|
153 | 153 | |
|
154 | 154 | test_repo_group.user = user |
|
155 | 155 | assert group_perms(user)[test_repo_group.group_name] == 'group.admin' |
|
156 | 156 | test_repo_group.user = org_owner |
|
157 | 157 | |
|
158 | 158 | def test_default_owner_user_group_perms(self, user_util, test_user_group): |
|
159 | 159 | user = user_util.create_user() |
|
160 | 160 | org_owner = test_user_group.user |
|
161 | 161 | |
|
162 | 162 | assert user_group_perms(user)[test_user_group.users_group_name] == 'usergroup.read' |
|
163 | 163 | |
|
164 | 164 | test_user_group.user = user |
|
165 | 165 | assert user_group_perms(user)[test_user_group.users_group_name] == 'usergroup.admin' |
|
166 | 166 | |
|
167 | 167 | test_user_group.user = org_owner |
|
168 | 168 | |
|
169 | def test_propagated_permissions_from_repo_group_to_private_repo(self, repo_name): | |
|
170 | # make group | |
|
171 | self.g1 = fixture.create_repo_group('TOP_LEVEL', skip_if_exists=True) | |
|
172 | # both perms should be read ! | |
|
173 | assert group_perms(self.anon) == { | |
|
174 | 'TOP_LEVEL': 'group.read' | |
|
175 | } | |
|
176 | ||
|
177 | # Create repo inside the TOP_LEVEL | |
|
178 | repo_name_in_group = RepoGroup.url_sep().join([self.g1.group_name, 'test_perm_on_private_repo']) | |
|
179 | self.test_repo = fixture.create_repo(name=repo_name_in_group, | |
|
180 | repo_type='hg', | |
|
181 | repo_group=self.g1, | |
|
182 | cur_user=self.u1,) | |
|
183 | assert repo_perms(self.anon) == { | |
|
184 | repo_name_in_group: 'repository.read', | |
|
185 | 'vcs_test_git': 'repository.read', | |
|
186 | 'vcs_test_hg': 'repository.read', | |
|
187 | 'vcs_test_svn': 'repository.read', | |
|
188 | } | |
|
189 | # Now change default user permissions | |
|
190 | new_perm = 'repository.write' | |
|
191 | perm_updates = [ | |
|
192 | [self.anon.user_id, new_perm, 'user'] | |
|
193 | ] | |
|
194 | RepoGroupModel().update_permissions( | |
|
195 | repo_group=self.g1, perm_updates=perm_updates, recursive='all') | |
|
196 | ||
|
197 | Session().commit() | |
|
198 | assert repo_perms(self.anon) == { | |
|
199 | repo_name_in_group: new_perm, | |
|
200 | 'vcs_test_git': 'repository.read', | |
|
201 | 'vcs_test_hg': 'repository.read', | |
|
202 | 'vcs_test_svn': 'repository.read', | |
|
203 | } | |
|
204 | ||
|
205 | # NOW MARK repo as private | |
|
206 | changes = { | |
|
207 | 'repo_private': True | |
|
208 | } | |
|
209 | repo = RepoModel().get_by_repo_name(repo_name_in_group) | |
|
210 | RepoModel().update(repo, **changes) | |
|
211 | Session().commit() | |
|
212 | ||
|
213 | # Private repo sets 'none' permission for default user | |
|
214 | assert repo_perms(self.anon) == { | |
|
215 | repo_name_in_group: 'repository.none', | |
|
216 | 'vcs_test_git': 'repository.read', | |
|
217 | 'vcs_test_hg': 'repository.read', | |
|
218 | 'vcs_test_svn': 'repository.read', | |
|
219 | } | |
|
220 | ||
|
221 | # apply same logic of "updated" recursive, but now the anon permissions should be not be impacted | |
|
222 | new_perm = 'repository.write' | |
|
223 | perm_updates = [ | |
|
224 | [self.anon.user_id, new_perm, 'user'] | |
|
225 | ] | |
|
226 | RepoGroupModel().update_permissions( | |
|
227 | repo_group=self.g1, perm_updates=perm_updates, recursive='all') | |
|
228 | ||
|
229 | Session().commit() | |
|
230 | assert repo_perms(self.anon) == { | |
|
231 | repo_name_in_group: 'repository.none', | |
|
232 | 'vcs_test_git': 'repository.read', | |
|
233 | 'vcs_test_hg': 'repository.read', | |
|
234 | 'vcs_test_svn': 'repository.read', | |
|
235 | } | |
|
236 | ||
|
169 | 237 | def test_propagated_permission_from_users_group_by_explicit_perms_exist( |
|
170 | 238 | self, repo_name): |
|
171 | 239 | # make group |
|
172 | 240 | self.ug1 = fixture.create_user_group('G1') |
|
173 | 241 | UserGroupModel().add_user_to_group(self.ug1, self.u1) |
|
174 | 242 | |
|
175 | 243 | # set permission to lower |
|
176 | 244 | new_perm = 'repository.none' |
|
177 | 245 | RepoModel().grant_user_permission( |
|
178 | 246 | repo=repo_name, user=self.u1, perm=new_perm) |
|
179 | 247 | Session().commit() |
|
180 | 248 | assert repo_perms(self.u1)[repo_name] == new_perm |
|
181 | 249 | |
|
182 | 250 | # grant perm for group this should not override permission from user |
|
183 | 251 | # since it has explicitly set |
|
184 | 252 | new_perm_gr = 'repository.write' |
|
185 | 253 | RepoModel().grant_user_group_permission( |
|
186 | 254 | repo=repo_name, group_name=self.ug1, perm=new_perm_gr) |
|
187 | 255 | Session().commit() |
|
188 | 256 | |
|
189 | 257 | assert repo_perms(self.u1)[repo_name] == new_perm |
|
190 | 258 | assert group_perms(self.u1) == {} |
|
191 | 259 | |
|
192 | 260 | def test_propagated_permission_from_users_group(self, repo_name): |
|
193 | 261 | # make group |
|
194 | 262 | self.ug1 = fixture.create_user_group('G1') |
|
195 | 263 | UserGroupModel().add_user_to_group(self.ug1, self.u3) |
|
196 | 264 | |
|
197 | 265 | # grant perm for group |
|
198 | 266 | # this should override default permission from user |
|
199 | 267 | new_perm_gr = 'repository.write' |
|
200 | 268 | RepoModel().grant_user_group_permission( |
|
201 | 269 | repo=repo_name, group_name=self.ug1, perm=new_perm_gr) |
|
202 | 270 | Session().commit() |
|
203 | 271 | |
|
204 | 272 | assert repo_perms(self.u3)[repo_name] == new_perm_gr |
|
205 | 273 | assert group_perms(self.u3) == {} |
|
206 | 274 | |
|
207 | 275 | def test_propagated_permission_from_users_group_lower_weight( |
|
208 | 276 | self, repo_name): |
|
209 | 277 | # make group with user |
|
210 | 278 | self.ug1 = fixture.create_user_group('G1') |
|
211 | 279 | UserGroupModel().add_user_to_group(self.ug1, self.u1) |
|
212 | 280 | |
|
213 | 281 | # set permission to lower |
|
214 | 282 | new_perm_h = 'repository.write' |
|
215 | 283 | RepoModel().grant_user_permission( |
|
216 | 284 | repo=repo_name, user=self.u1, perm=new_perm_h) |
|
217 | 285 | Session().commit() |
|
218 | 286 | |
|
219 | 287 | assert repo_perms(self.u1)[repo_name] == new_perm_h |
|
220 | 288 | |
|
221 | 289 | # grant perm for group this should NOT override permission from user |
|
222 | 290 | # since it's lower than granted |
|
223 | 291 | new_perm_l = 'repository.read' |
|
224 | 292 | RepoModel().grant_user_group_permission( |
|
225 | 293 | repo=repo_name, group_name=self.ug1, perm=new_perm_l) |
|
226 | 294 | Session().commit() |
|
227 | 295 | |
|
228 | 296 | assert repo_perms(self.u1)[repo_name] == new_perm_h |
|
229 | 297 | assert group_perms(self.u1) == {} |
|
230 | 298 | |
|
231 | 299 | def test_repo_in_group_permissions(self): |
|
232 | 300 | self.g1 = fixture.create_repo_group('group1', skip_if_exists=True) |
|
233 | 301 | self.g2 = fixture.create_repo_group('group2', skip_if_exists=True) |
|
234 | 302 | # both perms should be read ! |
|
235 | 303 | assert group_perms(self.u1) == \ |
|
236 | 304 | {u'group1': u'group.read', u'group2': u'group.read'} |
|
237 | 305 | |
|
238 | 306 | assert group_perms(self.anon) == \ |
|
239 | 307 | {u'group1': u'group.read', u'group2': u'group.read'} |
|
240 | 308 | |
|
241 | 309 | # Change perms to none for both groups |
|
242 | 310 | RepoGroupModel().grant_user_permission( |
|
243 | 311 | repo_group=self.g1, user=self.anon, perm='group.none') |
|
244 | 312 | RepoGroupModel().grant_user_permission( |
|
245 | 313 | repo_group=self.g2, user=self.anon, perm='group.none') |
|
246 | 314 | |
|
247 | 315 | assert group_perms(self.u1) == \ |
|
248 | 316 | {u'group1': u'group.none', u'group2': u'group.none'} |
|
249 | 317 | assert group_perms(self.anon) == \ |
|
250 | 318 | {u'group1': u'group.none', u'group2': u'group.none'} |
|
251 | 319 | |
|
252 | 320 | # add repo to group |
|
253 | 321 | name = RepoGroup.url_sep().join([self.g1.group_name, 'test_perm']) |
|
254 | 322 | self.test_repo = fixture.create_repo(name=name, |
|
255 | 323 | repo_type='hg', |
|
256 | 324 | repo_group=self.g1, |
|
257 | 325 | cur_user=self.u1,) |
|
258 | 326 | |
|
259 | 327 | assert group_perms(self.u1) == \ |
|
260 | 328 | {u'group1': u'group.none', u'group2': u'group.none'} |
|
261 | 329 | assert group_perms(self.anon) == \ |
|
262 | 330 | {u'group1': u'group.none', u'group2': u'group.none'} |
|
263 | 331 | |
|
264 | 332 | # grant permission for u2 ! |
|
265 | 333 | RepoGroupModel().grant_user_permission( |
|
266 | 334 | repo_group=self.g1, user=self.u2, perm='group.read') |
|
267 | 335 | RepoGroupModel().grant_user_permission( |
|
268 | 336 | repo_group=self.g2, user=self.u2, perm='group.read') |
|
269 | 337 | Session().commit() |
|
270 | 338 | assert self.u1 != self.u2 |
|
271 | 339 | |
|
272 | 340 | # u1 and anon should have not change perms while u2 should ! |
|
273 | 341 | assert group_perms(self.u1) == \ |
|
274 | 342 | {u'group1': u'group.none', u'group2': u'group.none'} |
|
275 | 343 | assert group_perms(self.u2) == \ |
|
276 | 344 | {u'group1': u'group.read', u'group2': u'group.read'} |
|
277 | 345 | assert group_perms(self.anon) == \ |
|
278 | 346 | {u'group1': u'group.none', u'group2': u'group.none'} |
|
279 | 347 | |
|
280 | 348 | def test_repo_group_user_as_user_group_member(self): |
|
281 | 349 | # create Group1 |
|
282 | 350 | self.g1 = fixture.create_repo_group('group1', skip_if_exists=True) |
|
283 | 351 | assert group_perms(self.anon) == {u'group1': u'group.read'} |
|
284 | 352 | |
|
285 | 353 | # set default permission to none |
|
286 | 354 | RepoGroupModel().grant_user_permission( |
|
287 | 355 | repo_group=self.g1, user=self.anon, perm='group.none') |
|
288 | 356 | Session().commit() |
|
289 | 357 | |
|
290 | 358 | # make group |
|
291 | 359 | self.ug1 = fixture.create_user_group('G1') |
|
292 | 360 | # add user to group |
|
293 | 361 | UserGroupModel().add_user_to_group(self.ug1, self.u1) |
|
294 | 362 | Session().commit() |
|
295 | 363 | |
|
296 | 364 | # check if user is in the group |
|
297 | 365 | ug1 = UserGroupModel().get(self.ug1.users_group_id) |
|
298 | 366 | members = [x.user_id for x in ug1.members] |
|
299 | 367 | assert members == [self.u1.user_id] |
|
300 | 368 | # add some user to that group |
|
301 | 369 | |
|
302 | 370 | # check his permissions |
|
303 | 371 | assert group_perms(self.anon) == {u'group1': u'group.none'} |
|
304 | 372 | assert group_perms(self.u1) == {u'group1': u'group.none'} |
|
305 | 373 | |
|
306 | 374 | # grant ug1 read permissions for |
|
307 | 375 | RepoGroupModel().grant_user_group_permission( |
|
308 | 376 | repo_group=self.g1, group_name=self.ug1, perm='group.read') |
|
309 | 377 | Session().commit() |
|
310 | 378 | |
|
311 | 379 | # check if the |
|
312 | 380 | obj = Session().query(UserGroupRepoGroupToPerm)\ |
|
313 | 381 | .filter(UserGroupRepoGroupToPerm.group == self.g1)\ |
|
314 | 382 | .filter(UserGroupRepoGroupToPerm.users_group == self.ug1)\ |
|
315 | 383 | .scalar() |
|
316 | 384 | assert obj.permission.permission_name == 'group.read' |
|
317 | 385 | |
|
318 | 386 | assert group_perms(self.anon) == {u'group1': u'group.none'} |
|
319 | 387 | assert group_perms(self.u1) == {u'group1': u'group.read'} |
|
320 | 388 | |
|
321 | 389 | def test_inherited_permissions_from_default_on_user_enabled(self): |
|
322 | 390 | # enable fork and create on default user |
|
323 | 391 | _form_result = { |
|
324 | 392 | 'default_repo_create': 'hg.create.repository', |
|
325 | 393 | 'default_fork_create': 'hg.fork.repository' |
|
326 | 394 | } |
|
327 | 395 | PermissionModel().set_new_user_perms( |
|
328 | 396 | User.get_default_user(), _form_result) |
|
329 | 397 | Session().commit() |
|
330 | 398 | |
|
331 | 399 | # make sure inherit flag is turned on |
|
332 | 400 | self.u1.inherit_default_permissions = True |
|
333 | 401 | Session().commit() |
|
334 | 402 | |
|
335 | 403 | # this user will have inherited permissions from default user |
|
336 | 404 | assert global_perms(self.u1) == default_perms() |
|
337 | 405 | |
|
338 | 406 | def test_inherited_permissions_from_default_on_user_disabled(self): |
|
339 | 407 | # disable fork and create on default user |
|
340 | 408 | _form_result = { |
|
341 | 409 | 'default_repo_create': 'hg.create.none', |
|
342 | 410 | 'default_fork_create': 'hg.fork.none' |
|
343 | 411 | } |
|
344 | 412 | PermissionModel().set_new_user_perms( |
|
345 | 413 | User.get_default_user(), _form_result) |
|
346 | 414 | Session().commit() |
|
347 | 415 | |
|
348 | 416 | # make sure inherit flag is turned on |
|
349 | 417 | self.u1.inherit_default_permissions = True |
|
350 | 418 | Session().commit() |
|
351 | 419 | |
|
352 | 420 | # this user will have inherited permissions from default user |
|
353 | 421 | expected_perms = default_perms( |
|
354 | 422 | added=['hg.create.none', 'hg.fork.none'], |
|
355 | 423 | removed=['hg.create.repository', 'hg.fork.repository']) |
|
356 | 424 | assert global_perms(self.u1) == expected_perms |
|
357 | 425 | |
|
358 | 426 | def test_non_inherited_permissions_from_default_on_user_enabled(self): |
|
359 | 427 | user_model = UserModel() |
|
360 | 428 | # enable fork and create on default user |
|
361 | 429 | usr = User.DEFAULT_USER |
|
362 | 430 | user_model.revoke_perm(usr, 'hg.create.none') |
|
363 | 431 | user_model.grant_perm(usr, 'hg.create.repository') |
|
364 | 432 | user_model.revoke_perm(usr, 'hg.fork.none') |
|
365 | 433 | user_model.grant_perm(usr, 'hg.fork.repository') |
|
366 | 434 | |
|
367 | 435 | # disable global perms on specific user |
|
368 | 436 | user_model.revoke_perm(self.u1, 'hg.create.repository') |
|
369 | 437 | user_model.grant_perm(self.u1, 'hg.create.none') |
|
370 | 438 | user_model.revoke_perm(self.u1, 'hg.fork.repository') |
|
371 | 439 | user_model.grant_perm(self.u1, 'hg.fork.none') |
|
372 | 440 | |
|
373 | 441 | # TODO(marcink): check branch permissions now ? |
|
374 | 442 | |
|
375 | 443 | # make sure inherit flag is turned off |
|
376 | 444 | self.u1.inherit_default_permissions = False |
|
377 | 445 | Session().commit() |
|
378 | 446 | |
|
379 | 447 | # this user will have non inherited permissions from he's |
|
380 | 448 | # explicitly set permissions |
|
381 | 449 | assert global_perms(self.u1) == { |
|
382 | 450 | 'hg.create.none', |
|
383 | 451 | 'hg.fork.none', |
|
384 | 452 | 'hg.register.manual_activate', |
|
385 | 453 | 'hg.password_reset.enabled', |
|
386 | 454 | 'hg.extern_activate.auto', |
|
387 | 455 | 'repository.read', |
|
388 | 456 | 'group.read', |
|
389 | 457 | 'usergroup.read', |
|
390 | 458 | 'branch.push_force', |
|
391 | 459 | } |
|
392 | 460 | |
|
393 | 461 | def test_non_inherited_permissions_from_default_on_user_disabled(self): |
|
394 | 462 | user_model = UserModel() |
|
395 | 463 | # disable fork and create on default user |
|
396 | 464 | usr = User.DEFAULT_USER |
|
397 | 465 | user_model.revoke_perm(usr, 'hg.create.repository') |
|
398 | 466 | user_model.grant_perm(usr, 'hg.create.none') |
|
399 | 467 | user_model.revoke_perm(usr, 'hg.fork.repository') |
|
400 | 468 | user_model.grant_perm(usr, 'hg.fork.none') |
|
401 | 469 | |
|
402 | 470 | # enable global perms on specific user |
|
403 | 471 | user_model.revoke_perm(self.u1, 'hg.create.none') |
|
404 | 472 | user_model.grant_perm(self.u1, 'hg.create.repository') |
|
405 | 473 | user_model.revoke_perm(self.u1, 'hg.fork.none') |
|
406 | 474 | user_model.grant_perm(self.u1, 'hg.fork.repository') |
|
407 | 475 | |
|
408 | 476 | # make sure inherit flag is turned off |
|
409 | 477 | self.u1.inherit_default_permissions = False |
|
410 | 478 | Session().commit() |
|
411 | 479 | |
|
412 | 480 | # TODO(marcink): check branch perms |
|
413 | 481 | |
|
414 | 482 | # this user will have non inherited permissions from he's |
|
415 | 483 | # explicitly set permissions |
|
416 | 484 | assert global_perms(self.u1) == { |
|
417 | 485 | 'hg.create.repository', |
|
418 | 486 | 'hg.fork.repository', |
|
419 | 487 | 'hg.register.manual_activate', |
|
420 | 488 | 'hg.password_reset.enabled', |
|
421 | 489 | 'hg.extern_activate.auto', |
|
422 | 490 | 'repository.read', |
|
423 | 491 | 'group.read', |
|
424 | 492 | 'usergroup.read', |
|
425 | 493 | 'branch.push_force', |
|
426 | 494 | } |
|
427 | 495 | |
|
428 | 496 | @pytest.mark.parametrize('perm, expected_perm', [ |
|
429 | 497 | ('hg.inherit_default_perms.false', 'repository.none', ), |
|
430 | 498 | ('hg.inherit_default_perms.true', 'repository.read', ), |
|
431 | 499 | ]) |
|
432 | 500 | def test_inherited_permissions_on_objects(self, perm, expected_perm): |
|
433 | 501 | _form_result = { |
|
434 | 502 | 'default_inherit_default_permissions': perm, |
|
435 | 503 | } |
|
436 | 504 | PermissionModel().set_new_user_perms( |
|
437 | 505 | User.get_default_user(), _form_result) |
|
438 | 506 | Session().commit() |
|
439 | 507 | |
|
440 | 508 | # make sure inherit flag is turned on |
|
441 | 509 | self.u1.inherit_default_permissions = True |
|
442 | 510 | Session().commit() |
|
443 | 511 | |
|
444 | 512 | # TODO(marcink): check branch perms |
|
445 | 513 | |
|
446 | 514 | # this user will have inherited permissions from default user |
|
447 | 515 | assert global_perms(self.u1) == { |
|
448 | 516 | 'hg.create.none', |
|
449 | 517 | 'hg.fork.none', |
|
450 | 518 | 'hg.register.manual_activate', |
|
451 | 519 | 'hg.password_reset.enabled', |
|
452 | 520 | 'hg.extern_activate.auto', |
|
453 | 521 | 'repository.read', |
|
454 | 522 | 'group.read', |
|
455 | 523 | 'usergroup.read', |
|
456 | 524 | 'branch.push_force', |
|
457 | 525 | 'hg.create.write_on_repogroup.true', |
|
458 | 526 | 'hg.usergroup.create.false', |
|
459 | 527 | 'hg.repogroup.create.false', |
|
460 | 528 | perm |
|
461 | 529 | } |
|
462 | 530 | |
|
463 | 531 | assert set(repo_perms(self.u1).values()) == set([expected_perm]) |
|
464 | 532 | |
|
465 | 533 | def test_repo_owner_permissions_not_overwritten_by_group(self): |
|
466 | 534 | # create repo as USER, |
|
467 | 535 | self.test_repo = fixture.create_repo(name='myownrepo', |
|
468 | 536 | repo_type='hg', |
|
469 | 537 | cur_user=self.u1) |
|
470 | 538 | |
|
471 | 539 | # he has permissions of admin as owner |
|
472 | 540 | assert repo_perms(self.u1)['myownrepo'] == 'repository.admin' |
|
473 | 541 | |
|
474 | 542 | # set his permission as user group, he should still be admin |
|
475 | 543 | self.ug1 = fixture.create_user_group('G1') |
|
476 | 544 | UserGroupModel().add_user_to_group(self.ug1, self.u1) |
|
477 | 545 | RepoModel().grant_user_group_permission( |
|
478 | 546 | self.test_repo, |
|
479 | 547 | group_name=self.ug1, |
|
480 | 548 | perm='repository.none') |
|
481 | 549 | Session().commit() |
|
482 | 550 | |
|
483 | 551 | assert repo_perms(self.u1)['myownrepo'] == 'repository.admin' |
|
484 | 552 | |
|
485 | 553 | def test_repo_owner_permissions_not_overwritten_by_others(self): |
|
486 | 554 | # create repo as USER, |
|
487 | 555 | self.test_repo = fixture.create_repo(name='myownrepo', |
|
488 | 556 | repo_type='hg', |
|
489 | 557 | cur_user=self.u1) |
|
490 | 558 | |
|
491 | 559 | # he has permissions of admin as owner |
|
492 | 560 | assert repo_perms(self.u1)['myownrepo'] == 'repository.admin' |
|
493 | 561 | |
|
494 | 562 | # set his permission as user, he should still be admin |
|
495 | 563 | RepoModel().grant_user_permission( |
|
496 | 564 | self.test_repo, user=self.u1, perm='repository.none') |
|
497 | 565 | Session().commit() |
|
498 | 566 | |
|
499 | 567 | assert repo_perms(self.u1)['myownrepo'] == 'repository.admin' |
|
500 | 568 | |
|
501 | 569 | def test_repo_group_owner_permissions_not_overwritten_by_group(self): |
|
502 | 570 | # "u1" shall be owner without any special permission assigned |
|
503 | 571 | self.g1 = fixture.create_repo_group('test1') |
|
504 | 572 | |
|
505 | 573 | # Make user group and grant a permission to user group |
|
506 | 574 | self.ug1 = fixture.create_user_group('G1') |
|
507 | 575 | UserGroupModel().add_user_to_group(self.ug1, self.u1) |
|
508 | 576 | RepoGroupModel().grant_user_group_permission( |
|
509 | 577 | repo_group=self.g1, group_name=self.ug1, perm='group.write') |
|
510 | 578 | Session().commit() |
|
511 | 579 | |
|
512 | 580 | # Verify that user does not get any special permission if he is not |
|
513 | 581 | # owner |
|
514 | 582 | assert group_perms(self.u1) == {'test1': 'group.write'} |
|
515 | 583 | |
|
516 | 584 | # Make him owner of the repo group |
|
517 | 585 | self.g1.user = self.u1 |
|
518 | 586 | assert group_perms(self.u1) == {'test1': 'group.admin'} |
|
519 | 587 | |
|
520 | 588 | def test_repo_group_owner_permissions_not_overwritten_by_others(self): |
|
521 | 589 | # "u1" shall be owner without any special permission assigned |
|
522 | 590 | self.g1 = fixture.create_repo_group('test1') |
|
523 | 591 | RepoGroupModel().grant_user_permission( |
|
524 | 592 | repo_group=self.g1, user=self.u1, perm='group.write') |
|
525 | 593 | Session().commit() |
|
526 | 594 | |
|
527 | 595 | # Verify that user does not get any special permission if he is not |
|
528 | 596 | # owner |
|
529 | 597 | assert group_perms(self.u1) == {'test1': 'group.write'} |
|
530 | 598 | |
|
531 | 599 | # Make him owner of the repo group |
|
532 | 600 | self.g1.user = self.u1 |
|
533 | 601 | assert group_perms(self.u1) == {u'test1': 'group.admin'} |
|
534 | 602 | |
|
535 | 603 | def assert_user_perm_equal( |
|
536 | 604 | self, user, change_factor=0, compare_keys=None): |
|
537 | 605 | perms = UserToPerm.query().filter(UserToPerm.user == user).all() |
|
538 | 606 | assert len(perms) == \ |
|
539 | 607 | len(Permission.DEFAULT_USER_PERMISSIONS) + change_factor |
|
540 | 608 | if compare_keys: |
|
541 | 609 | assert set( |
|
542 | 610 | x.permissions.permission_name for x in perms) == compare_keys |
|
543 | 611 | |
|
544 | 612 | def assert_def_user_group_perm_equal( |
|
545 | 613 | self, user_group, change_factor=0, compare_keys=None): |
|
546 | 614 | perms = UserGroupToPerm.query().filter( |
|
547 | 615 | UserGroupToPerm.users_group == user_group).all() |
|
548 | 616 | assert len(perms) == \ |
|
549 | 617 | len(Permission.DEFAULT_USER_PERMISSIONS) + change_factor |
|
550 | 618 | if compare_keys: |
|
551 | 619 | assert set( |
|
552 | 620 | x.permissions.permission_name for x in perms) == compare_keys |
|
553 | 621 | |
|
554 | 622 | def test_set_default_permissions(self): |
|
555 | 623 | PermissionModel().create_default_user_permissions(user=self.u1) |
|
556 | 624 | self.assert_user_perm_equal(user=self.u1) |
|
557 | 625 | |
|
558 | 626 | def test_set_default_permissions_after_one_is_missing(self): |
|
559 | 627 | PermissionModel().create_default_user_permissions(user=self.u1) |
|
560 | 628 | self.assert_user_perm_equal(user=self.u1) |
|
561 | 629 | # now we delete one, it should be re-created after another call |
|
562 | 630 | perms = UserToPerm.query().filter(UserToPerm.user == self.u1).all() |
|
563 | 631 | Session().delete(perms[0]) |
|
564 | 632 | Session().commit() |
|
565 | 633 | |
|
566 | 634 | self.assert_user_perm_equal(user=self.u1, change_factor=-1) |
|
567 | 635 | |
|
568 | 636 | # create missing one ! |
|
569 | 637 | PermissionModel().create_default_user_permissions(user=self.u1) |
|
570 | 638 | self.assert_user_perm_equal(user=self.u1) |
|
571 | 639 | |
|
572 | 640 | @pytest.mark.parametrize("perm, modify_to", [ |
|
573 | 641 | ('repository.read', 'repository.none'), |
|
574 | 642 | ('group.read', 'group.none'), |
|
575 | 643 | ('usergroup.read', 'usergroup.none'), |
|
576 | 644 | ('hg.create.repository', 'hg.create.none'), |
|
577 | 645 | ('hg.fork.repository', 'hg.fork.none'), |
|
578 | 646 | ('hg.register.manual_activate', 'hg.register.auto_activate',) |
|
579 | 647 | ]) |
|
580 | 648 | def test_set_default_permissions_after_modification(self, perm, modify_to): |
|
581 | 649 | PermissionModel().create_default_user_permissions(user=self.u1) |
|
582 | 650 | self.assert_user_perm_equal(user=self.u1) |
|
583 | 651 | |
|
584 | 652 | old = Permission.get_by_key(perm) |
|
585 | 653 | new = Permission.get_by_key(modify_to) |
|
586 | 654 | assert old is not None |
|
587 | 655 | assert new is not None |
|
588 | 656 | |
|
589 | 657 | # now modify permissions |
|
590 | 658 | p = UserToPerm.query().filter( |
|
591 | 659 | UserToPerm.user == self.u1).filter( |
|
592 | 660 | UserToPerm.permission == old).one() |
|
593 | 661 | p.permission = new |
|
594 | 662 | Session().add(p) |
|
595 | 663 | Session().commit() |
|
596 | 664 | |
|
597 | 665 | PermissionModel().create_default_user_permissions(user=self.u1) |
|
598 | 666 | self.assert_user_perm_equal(user=self.u1) |
|
599 | 667 | |
|
600 | 668 | def test_clear_user_perms(self): |
|
601 | 669 | PermissionModel().create_default_user_permissions(user=self.u1) |
|
602 | 670 | self.assert_user_perm_equal(user=self.u1) |
|
603 | 671 | |
|
604 | 672 | # now clear permissions |
|
605 | 673 | cleared = PermissionModel()._clear_user_perms(self.u1.user_id) |
|
606 | 674 | self.assert_user_perm_equal(user=self.u1, |
|
607 | 675 | change_factor=len(cleared)*-1) |
|
608 | 676 | |
|
609 | 677 | def test_clear_user_group_perms(self): |
|
610 | 678 | self.ug1 = fixture.create_user_group('G1') |
|
611 | 679 | PermissionModel().create_default_user_group_permissions( |
|
612 | 680 | user_group=self.ug1) |
|
613 | 681 | self.assert_def_user_group_perm_equal(user_group=self.ug1) |
|
614 | 682 | |
|
615 | 683 | # now clear permissions |
|
616 | 684 | cleared = PermissionModel()._clear_user_group_perms( |
|
617 | 685 | self.ug1.users_group_id) |
|
618 | 686 | self.assert_def_user_group_perm_equal(user_group=self.ug1, |
|
619 | 687 | change_factor=len(cleared)*-1) |
|
620 | 688 | |
|
621 | 689 | @pytest.mark.parametrize("form_result", [ |
|
622 | 690 | {}, |
|
623 | 691 | {'default_repo_create': 'hg.create.repository'}, |
|
624 | 692 | {'default_repo_create': 'hg.create.repository', |
|
625 | 693 | 'default_repo_perm': 'repository.read'}, |
|
626 | 694 | {'default_repo_create': 'hg.create.none', |
|
627 | 695 | 'default_repo_perm': 'repository.write', |
|
628 | 696 | 'default_fork_create': 'hg.fork.none'}, |
|
629 | 697 | ]) |
|
630 | 698 | def test_set_new_user_permissions(self, form_result): |
|
631 | 699 | _form_result = {} |
|
632 | 700 | _form_result.update(form_result) |
|
633 | 701 | PermissionModel().set_new_user_perms(self.u1, _form_result) |
|
634 | 702 | Session().commit() |
|
635 | 703 | change_factor = -1 * (len(Permission.DEFAULT_USER_PERMISSIONS) |
|
636 | 704 | - len(form_result.keys())) |
|
637 | 705 | self.assert_user_perm_equal( |
|
638 | 706 | self.u1, change_factor=change_factor) |
|
639 | 707 | |
|
640 | 708 | @pytest.mark.parametrize("form_result", [ |
|
641 | 709 | {}, |
|
642 | 710 | {'default_repo_create': 'hg.create.repository'}, |
|
643 | 711 | {'default_repo_create': 'hg.create.repository', |
|
644 | 712 | 'default_repo_perm': 'repository.read'}, |
|
645 | 713 | {'default_repo_create': 'hg.create.none', |
|
646 | 714 | 'default_repo_perm': 'repository.write', |
|
647 | 715 | 'default_fork_create': 'hg.fork.none'}, |
|
648 | 716 | ]) |
|
649 | 717 | def test_set_new_user_group_permissions(self, form_result): |
|
650 | 718 | _form_result = {} |
|
651 | 719 | _form_result.update(form_result) |
|
652 | 720 | self.ug1 = fixture.create_user_group('G1') |
|
653 | 721 | PermissionModel().set_new_user_group_perms(self.ug1, _form_result) |
|
654 | 722 | Session().commit() |
|
655 | 723 | change_factor = -1 * (len(Permission.DEFAULT_USER_PERMISSIONS) |
|
656 | 724 | - len(form_result.keys())) |
|
657 | 725 | self.assert_def_user_group_perm_equal( |
|
658 | 726 | self.ug1, change_factor=change_factor) |
|
659 | 727 | |
|
660 | 728 | @pytest.mark.parametrize("group_active, expected_perm", [ |
|
661 | 729 | (True, 'repository.admin'), |
|
662 | 730 | (False, 'repository.read'), |
|
663 | 731 | ]) |
|
664 | 732 | def test_get_default_repo_perms_from_user_group_with_active_group( |
|
665 | 733 | self, backend, user_util, group_active, expected_perm): |
|
666 | 734 | repo = backend.create_repo() |
|
667 | 735 | user = user_util.create_user() |
|
668 | 736 | user_group = user_util.create_user_group( |
|
669 | 737 | members=[user], users_group_active=group_active) |
|
670 | 738 | |
|
671 | 739 | user_util.grant_user_group_permission_to_repo( |
|
672 | 740 | repo, user_group, 'repository.admin') |
|
673 | 741 | permissions = repo_perms(user) |
|
674 | 742 | repo_permission = permissions.get(repo.repo_name) |
|
675 | 743 | assert repo_permission == expected_perm |
|
676 | 744 | |
|
677 | 745 | @pytest.mark.parametrize("group_active, expected_perm", [ |
|
678 | 746 | (True, 'group.admin'), |
|
679 | 747 | (False, 'group.read') |
|
680 | 748 | ]) |
|
681 | 749 | def test_get_default_group_perms_from_user_group_with_active_group( |
|
682 | 750 | self, user_util, group_active, expected_perm): |
|
683 | 751 | user = user_util.create_user() |
|
684 | 752 | repo_group = user_util.create_repo_group() |
|
685 | 753 | user_group = user_util.create_user_group( |
|
686 | 754 | members=[user], users_group_active=group_active) |
|
687 | 755 | |
|
688 | 756 | user_util.grant_user_group_permission_to_repo_group( |
|
689 | 757 | repo_group, user_group, 'group.admin') |
|
690 | 758 | permissions = group_perms(user) |
|
691 | 759 | group_permission = permissions.get(repo_group.name) |
|
692 | 760 | assert group_permission == expected_perm |
|
693 | 761 | |
|
694 | 762 | @pytest.mark.parametrize("group_active, expected_perm", [ |
|
695 | 763 | (True, 'usergroup.admin'), |
|
696 | 764 | (False, 'usergroup.read') |
|
697 | 765 | ]) |
|
698 | 766 | def test_get_default_user_group_perms_from_user_group_with_active_group( |
|
699 | 767 | self, user_util, group_active, expected_perm): |
|
700 | 768 | user = user_util.create_user() |
|
701 | 769 | user_group = user_util.create_user_group( |
|
702 | 770 | members=[user], users_group_active=group_active) |
|
703 | 771 | target_user_group = user_util.create_user_group() |
|
704 | 772 | |
|
705 | 773 | user_util.grant_user_group_permission_to_user_group( |
|
706 | 774 | target_user_group, user_group, 'usergroup.admin') |
|
707 | 775 | permissions = user_group_perms(user) |
|
708 | 776 | group_permission = permissions.get(target_user_group.users_group_name) |
|
709 | 777 | assert group_permission == expected_perm |
|
710 | 778 | |
|
711 | 779 | |
|
712 | 780 | def repo_perms(user): |
|
713 | 781 | auth_user = AuthUser(user_id=user.user_id) |
|
714 | 782 | return auth_user.permissions['repositories'] |
|
715 | 783 | |
|
716 | 784 | |
|
717 | 785 | def branch_perms(user): |
|
718 | 786 | auth_user = AuthUser(user_id=user.user_id) |
|
719 | 787 | return auth_user.permissions['repository_branches'] |
|
720 | 788 | |
|
721 | 789 | |
|
722 | 790 | def group_perms(user): |
|
723 | 791 | auth_user = AuthUser(user_id=user.user_id) |
|
724 | 792 | return auth_user.permissions['repositories_groups'] |
|
725 | 793 | |
|
726 | 794 | |
|
727 | 795 | def user_group_perms(user): |
|
728 | 796 | auth_user = AuthUser(user_id=user.user_id) |
|
729 | 797 | return auth_user.permissions['user_groups'] |
|
730 | 798 | |
|
731 | 799 | |
|
732 | 800 | def global_perms(user): |
|
733 | 801 | auth_user = AuthUser(user_id=user.user_id) |
|
734 | 802 | return auth_user.permissions['global'] |
|
735 | 803 | |
|
736 | 804 | |
|
737 | 805 | def default_perms(added=None, removed=None): |
|
738 | 806 | expected_perms = set(Permission.DEFAULT_USER_PERMISSIONS) |
|
739 | 807 | if removed: |
|
740 | 808 | expected_perms.difference_update(removed) |
|
741 | 809 | if added: |
|
742 | 810 | expected_perms.update(added) |
|
743 | 811 | return expected_perms |
@@ -1,881 +1,897 b'' | |||
|
1 | 1 | |
|
2 | 2 | ; ######################################### |
|
3 | 3 | ; RHODECODE COMMUNITY EDITION CONFIGURATION |
|
4 | 4 | ; ######################################### |
|
5 | 5 | |
|
6 | 6 | [DEFAULT] |
|
7 | 7 | ; Debug flag sets all loggers to debug, and enables request tracking |
|
8 | 8 | debug = true |
|
9 | 9 | |
|
10 | 10 | ; ######################################################################## |
|
11 | 11 | ; EMAIL CONFIGURATION |
|
12 | 12 | ; These settings will be used by the RhodeCode mailing system |
|
13 | 13 | ; ######################################################################## |
|
14 | 14 | |
|
15 | 15 | ; prefix all emails subjects with given prefix, helps filtering out emails |
|
16 | 16 | #email_prefix = [RhodeCode] |
|
17 | 17 | |
|
18 | 18 | ; email FROM address all mails will be sent |
|
19 | 19 | #app_email_from = rhodecode-noreply@localhost |
|
20 | 20 | |
|
21 | 21 | #smtp_server = mail.server.com |
|
22 | 22 | #smtp_username = |
|
23 | 23 | #smtp_password = |
|
24 | 24 | #smtp_port = |
|
25 | 25 | #smtp_use_tls = false |
|
26 | 26 | #smtp_use_ssl = true |
|
27 | 27 | |
|
28 | 28 | [server:main] |
|
29 | 29 | ; COMMON HOST/IP CONFIG, This applies mostly to develop setup, |
|
30 | 30 | ; Host port for gunicorn are controlled by gunicorn_conf.py |
|
31 | 31 | host = 127.0.0.1 |
|
32 | 32 | port = 10020 |
|
33 | 33 | |
|
34 | 34 | |
|
35 | 35 | ; ########################### |
|
36 | 36 | ; GUNICORN APPLICATION SERVER |
|
37 | 37 | ; ########################### |
|
38 | 38 | |
|
39 | 39 | ; run with gunicorn --config gunicorn_conf.py --paste rhodecode.ini |
|
40 | 40 | |
|
41 | 41 | ; Module to use, this setting shouldn't be changed |
|
42 | 42 | use = egg:gunicorn#main |
|
43 | 43 | |
|
44 | 44 | ; Prefix middleware for RhodeCode. |
|
45 | 45 | ; recommended when using proxy setup. |
|
46 | 46 | ; allows to set RhodeCode under a prefix in server. |
|
47 | 47 | ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well. |
|
48 | 48 | ; And set your prefix like: `prefix = /custom_prefix` |
|
49 | 49 | ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need |
|
50 | 50 | ; to make your cookies only work on prefix url |
|
51 | 51 | [filter:proxy-prefix] |
|
52 | 52 | use = egg:PasteDeploy#prefix |
|
53 | 53 | prefix = / |
|
54 | 54 | |
|
55 | 55 | [app:main] |
|
56 | 56 | ; The %(here)s variable will be replaced with the absolute path of parent directory |
|
57 | 57 | ; of this file |
|
58 | 58 | ; Each option in the app:main can be override by an environmental variable |
|
59 | 59 | ; |
|
60 | 60 | ;To override an option: |
|
61 | 61 | ; |
|
62 | 62 | ;RC_<KeyName> |
|
63 | 63 | ;Everything should be uppercase, . and - should be replaced by _. |
|
64 | 64 | ;For example, if you have these configuration settings: |
|
65 | 65 | ;rc_cache.repo_object.backend = foo |
|
66 | 66 | ;can be overridden by |
|
67 | 67 | ;export RC_CACHE_REPO_OBJECT_BACKEND=foo |
|
68 | 68 | |
|
69 | 69 | use = egg:rhodecode-enterprise-ce |
|
70 | 70 | |
|
71 | 71 | ; enable proxy prefix middleware, defined above |
|
72 | 72 | #filter-with = proxy-prefix |
|
73 | 73 | |
|
74 | ; control if environmental variables to be expanded into the .ini settings | |
|
75 | rhodecode.env_expand = false | |
|
76 | ||
|
74 | 77 | ; encryption key used to encrypt social plugin tokens, |
|
75 | 78 | ; remote_urls with credentials etc, if not set it defaults to |
|
76 | 79 | ; `beaker.session.secret` |
|
77 | 80 | #rhodecode.encrypted_values.secret = |
|
78 | 81 | |
|
79 | 82 | ; decryption strict mode (enabled by default). It controls if decryption raises |
|
80 | 83 | ; `SignatureVerificationError` in case of wrong key, or damaged encryption data. |
|
81 | 84 | #rhodecode.encrypted_values.strict = false |
|
82 | 85 | |
|
83 | 86 | ; Pick algorithm for encryption. Either fernet (more secure) or aes (default) |
|
84 | 87 | ; fernet is safer, and we strongly recommend switching to it. |
|
85 | 88 | ; Due to backward compatibility aes is used as default. |
|
86 | 89 | #rhodecode.encrypted_values.algorithm = fernet |
|
87 | 90 | |
|
88 | 91 | ; Return gzipped responses from RhodeCode (static files/application) |
|
89 | 92 | gzip_responses = false |
|
90 | 93 | |
|
91 | 94 | ; Auto-generate javascript routes file on startup |
|
92 | 95 | generate_js_files = false |
|
93 | 96 | |
|
94 | 97 | ; System global default language. |
|
95 | 98 | ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh |
|
96 | 99 | lang = en |
|
97 | 100 | |
|
98 | 101 | ; Perform a full repository scan and import on each server start. |
|
99 | 102 | ; Settings this to true could lead to very long startup time. |
|
100 | 103 | startup.import_repos = true |
|
101 | 104 | |
|
102 | 105 | ; URL at which the application is running. This is used for Bootstrapping |
|
103 | 106 | ; requests in context when no web request is available. Used in ishell, or |
|
104 | 107 | ; SSH calls. Set this for events to receive proper url for SSH calls. |
|
105 | 108 | app.base_url = http://rhodecode.local |
|
106 | 109 | |
|
107 | 110 | ; Host at which the Service API is running. |
|
108 | 111 | app.service_api.host = http://rhodecode.local:10020 |
|
109 | 112 | |
|
110 | 113 | ; Secret for Service API authentication. |
|
111 | 114 | app.service_api.token = |
|
112 | 115 | |
|
113 | 116 | ; Unique application ID. Should be a random unique string for security. |
|
114 | 117 | app_instance_uuid = rc-production |
|
115 | 118 | |
|
116 | 119 | ; Cut off limit for large diffs (size in bytes). If overall diff size on |
|
117 | 120 | ; commit, or pull request exceeds this limit this diff will be displayed |
|
118 | 121 | ; partially. E.g 512000 == 512Kb |
|
119 | 122 | cut_off_limit_diff = 1024000 |
|
120 | 123 | |
|
121 | 124 | ; Cut off limit for large files inside diffs (size in bytes). Each individual |
|
122 | 125 | ; file inside diff which exceeds this limit will be displayed partially. |
|
123 | 126 | ; E.g 128000 == 128Kb |
|
124 | 127 | cut_off_limit_file = 256000 |
|
125 | 128 | |
|
126 | 129 | ; Use cached version of vcs repositories everywhere. Recommended to be `true` |
|
127 | 130 | vcs_full_cache = false |
|
128 | 131 | |
|
129 | 132 | ; Force https in RhodeCode, fixes https redirects, assumes it's always https. |
|
130 | 133 | ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache |
|
131 | 134 | force_https = false |
|
132 | 135 | |
|
133 | 136 | ; use Strict-Transport-Security headers |
|
134 | 137 | use_htsts = false |
|
135 | 138 | |
|
136 | 139 | ; Set to true if your repos are exposed using the dumb protocol |
|
137 | 140 | git_update_server_info = false |
|
138 | 141 | |
|
139 | 142 | ; RSS/ATOM feed options |
|
140 | 143 | rss_cut_off_limit = 256000 |
|
141 | 144 | rss_items_per_page = 10 |
|
142 | 145 | rss_include_diff = false |
|
143 | 146 | |
|
144 | 147 | ; gist URL alias, used to create nicer urls for gist. This should be an |
|
145 | 148 | ; url that does rewrites to _admin/gists/{gistid}. |
|
146 | 149 | ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal |
|
147 | 150 | ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid} |
|
148 | 151 | gist_alias_url = |
|
149 | 152 | |
|
150 | 153 | ; List of views (using glob pattern syntax) that AUTH TOKENS could be |
|
151 | 154 | ; used for access. |
|
152 | 155 | ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it |
|
153 | 156 | ; came from the the logged in user who own this authentication token. |
|
154 | 157 | ; Additionally @TOKEN syntax can be used to bound the view to specific |
|
155 | 158 | ; authentication token. Such view would be only accessible when used together |
|
156 | 159 | ; with this authentication token |
|
157 | 160 | ; list of all views can be found under `/_admin/permissions/auth_token_access` |
|
158 | 161 | ; The list should be "," separated and on a single line. |
|
159 | 162 | ; Most common views to enable: |
|
160 | 163 | |
|
161 | 164 | # RepoCommitsView:repo_commit_download |
|
162 | 165 | # RepoCommitsView:repo_commit_patch |
|
163 | 166 | # RepoCommitsView:repo_commit_raw |
|
164 | 167 | # RepoCommitsView:repo_commit_raw@TOKEN |
|
165 | 168 | # RepoFilesView:repo_files_diff |
|
166 | 169 | # RepoFilesView:repo_archivefile |
|
167 | 170 | # RepoFilesView:repo_file_raw |
|
168 | 171 | # GistView:* |
|
169 | 172 | api_access_controllers_whitelist = |
|
170 | 173 | |
|
171 | 174 | ; Default encoding used to convert from and to unicode |
|
172 | 175 | ; can be also a comma separated list of encoding in case of mixed encodings |
|
173 | 176 | default_encoding = UTF-8 |
|
174 | 177 | |
|
175 | 178 | ; instance-id prefix |
|
176 | 179 | ; a prefix key for this instance used for cache invalidation when running |
|
177 | 180 | ; multiple instances of RhodeCode, make sure it's globally unique for |
|
178 | 181 | ; all running RhodeCode instances. Leave empty if you don't use it |
|
179 | 182 | instance_id = |
|
180 | 183 | |
|
181 | 184 | ; Fallback authentication plugin. Set this to a plugin ID to force the usage |
|
182 | 185 | ; of an authentication plugin also if it is disabled by it's settings. |
|
183 | 186 | ; This could be useful if you are unable to log in to the system due to broken |
|
184 | 187 | ; authentication settings. Then you can enable e.g. the internal RhodeCode auth |
|
185 | 188 | ; module to log in again and fix the settings. |
|
186 | 189 | ; Available builtin plugin IDs (hash is part of the ID): |
|
187 | 190 | ; egg:rhodecode-enterprise-ce#rhodecode |
|
188 | 191 | ; egg:rhodecode-enterprise-ce#pam |
|
189 | 192 | ; egg:rhodecode-enterprise-ce#ldap |
|
190 | 193 | ; egg:rhodecode-enterprise-ce#jasig_cas |
|
191 | 194 | ; egg:rhodecode-enterprise-ce#headers |
|
192 | 195 | ; egg:rhodecode-enterprise-ce#crowd |
|
193 | 196 | |
|
194 | 197 | #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode |
|
195 | 198 | |
|
196 | 199 | ; Flag to control loading of legacy plugins in py:/path format |
|
197 | 200 | auth_plugin.import_legacy_plugins = true |
|
198 | 201 | |
|
199 | 202 | ; alternative return HTTP header for failed authentication. Default HTTP |
|
200 | 203 | ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with |
|
201 | 204 | ; handling that causing a series of failed authentication calls. |
|
202 | 205 | ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code |
|
203 | 206 | ; This will be served instead of default 401 on bad authentication |
|
204 | 207 | auth_ret_code = |
|
205 | 208 | |
|
206 | 209 | ; use special detection method when serving auth_ret_code, instead of serving |
|
207 | 210 | ; ret_code directly, use 401 initially (Which triggers credentials prompt) |
|
208 | 211 | ; and then serve auth_ret_code to clients |
|
209 | 212 | auth_ret_code_detection = false |
|
210 | 213 | |
|
211 | 214 | ; locking return code. When repository is locked return this HTTP code. 2XX |
|
212 | 215 | ; codes don't break the transactions while 4XX codes do |
|
213 | 216 | lock_ret_code = 423 |
|
214 | 217 | |
|
215 | 218 | ; Filesystem location were repositories should be stored |
|
216 | 219 | repo_store.path = /var/opt/rhodecode_repo_store |
|
217 | 220 | |
|
218 | 221 | ; allows to setup custom hooks in settings page |
|
219 | 222 | allow_custom_hooks_settings = true |
|
220 | 223 | |
|
221 | 224 | ; Generated license token required for EE edition license. |
|
222 | 225 | ; New generated token value can be found in Admin > settings > license page. |
|
223 | 226 | license_token = abra-cada-bra1-rce3 |
|
224 | 227 | |
|
225 | 228 | ; This flag hides sensitive information on the license page such as token, and license data |
|
226 | 229 | license.hide_license_info = false |
|
227 | 230 | |
|
231 | ; Import EE license from this license path | |
|
232 | #license.import_path = %(here)s/rhodecode_enterprise.license | |
|
233 | ||
|
234 | ; import license 'if-missing' or 'force' (always override) | |
|
235 | ; if-missing means apply license if it doesn't exist. 'force' option always overrides it | |
|
236 | license.import_path_mode = if-missing | |
|
237 | ||
|
228 | 238 | ; supervisor connection uri, for managing supervisor and logs. |
|
229 | 239 | supervisor.uri = |
|
230 | 240 | |
|
231 | 241 | ; supervisord group name/id we only want this RC instance to handle |
|
232 | 242 | supervisor.group_id = dev |
|
233 | 243 | |
|
234 | 244 | ; Display extended labs settings |
|
235 | 245 | labs_settings_active = true |
|
236 | 246 | |
|
237 | 247 | ; Custom exception store path, defaults to TMPDIR |
|
238 | 248 | ; This is used to store exception from RhodeCode in shared directory |
|
239 | 249 | #exception_tracker.store_path = |
|
240 | 250 | |
|
241 | 251 | ; Send email with exception details when it happens |
|
242 | 252 | #exception_tracker.send_email = false |
|
243 | 253 | |
|
244 | 254 | ; Comma separated list of recipients for exception emails, |
|
245 | 255 | ; e.g admin@rhodecode.com,devops@rhodecode.com |
|
246 | 256 | ; Can be left empty, then emails will be sent to ALL super-admins |
|
247 | 257 | #exception_tracker.send_email_recipients = |
|
248 | 258 | |
|
249 | 259 | ; optional prefix to Add to email Subject |
|
250 | 260 | #exception_tracker.email_prefix = [RHODECODE ERROR] |
|
251 | 261 | |
|
252 | 262 | ; NOTE: this setting IS DEPRECATED: |
|
253 | 263 | ; file_store backend is always enabled |
|
254 | 264 | #file_store.enabled = true |
|
255 | 265 | |
|
256 | 266 | ; NOTE: this setting IS DEPRECATED: |
|
257 | 267 | ; file_store.backend = X -> use `file_store.backend.type = filesystem_v2` instead |
|
258 | 268 | ; Storage backend, available options are: local |
|
259 | 269 | #file_store.backend = local |
|
260 | 270 | |
|
261 | 271 | ; NOTE: this setting IS DEPRECATED: |
|
262 | 272 | ; file_store.storage_path = X -> use `file_store.filesystem_v2.storage_path = X` instead |
|
263 | 273 | ; path to store the uploaded binaries and artifacts |
|
264 | 274 | #file_store.storage_path = /var/opt/rhodecode_data/file_store |
|
265 | 275 | |
|
266 | 276 | ; Artifacts file-store, is used to store comment attachments and artifacts uploads. |
|
267 | 277 | ; file_store backend type: filesystem_v1, filesystem_v2 or objectstore (s3-based) are available as options |
|
268 | 278 | ; filesystem_v1 is backwards compat with pre 5.1 storage changes |
|
269 | 279 | ; new installations should choose filesystem_v2 or objectstore (s3-based), pick filesystem when migrating from |
|
270 | 280 | ; previous installations to keep the artifacts without a need of migration |
|
271 | 281 | file_store.backend.type = filesystem_v1 |
|
272 | 282 | |
|
273 | 283 | ; filesystem options... |
|
274 | 284 | file_store.filesystem_v1.storage_path = /var/opt/rhodecode_data/test_artifacts_file_store |
|
275 | 285 | |
|
276 | 286 | ; filesystem_v2 options... |
|
277 | 287 | file_store.filesystem_v2.storage_path = /var/opt/rhodecode_data/test_artifacts_file_store_2 |
|
278 | 288 | file_store.filesystem_v2.shards = 8 |
|
279 | 289 | |
|
280 | 290 | ; objectstore options... |
|
281 | 291 | ; url for s3 compatible storage that allows to upload artifacts |
|
282 | 292 | ; e.g http://minio:9000 |
|
283 | 293 | #file_store.backend.type = objectstore |
|
284 | 294 | file_store.objectstore.url = http://s3-minio:9000 |
|
285 | 295 | |
|
286 | 296 | ; a top-level bucket to put all other shards in |
|
287 | 297 | ; objects will be stored in rhodecode-file-store/shard-N based on the bucket_shards number |
|
288 | 298 | file_store.objectstore.bucket = rhodecode-file-store-tests |
|
289 | 299 | |
|
290 | 300 | ; number of sharded buckets to create to distribute archives across |
|
291 | 301 | ; default is 8 shards |
|
292 | 302 | file_store.objectstore.bucket_shards = 8 |
|
293 | 303 | |
|
294 | 304 | ; key for s3 auth |
|
295 | 305 | file_store.objectstore.key = s3admin |
|
296 | 306 | |
|
297 | 307 | ; secret for s3 auth |
|
298 | 308 | file_store.objectstore.secret = s3secret4 |
|
299 | 309 | |
|
300 | 310 | ;region for s3 storage |
|
301 | 311 | file_store.objectstore.region = eu-central-1 |
|
302 | 312 | |
|
303 | 313 | ; Redis url to acquire/check generation of archives locks |
|
304 | 314 | archive_cache.locking.url = redis://redis:6379/1 |
|
305 | 315 | |
|
306 | 316 | ; Storage backend, only 'filesystem' and 'objectstore' are available now |
|
307 | 317 | archive_cache.backend.type = filesystem |
|
308 | 318 | |
|
309 | 319 | ; url for s3 compatible storage that allows to upload artifacts |
|
310 | 320 | ; e.g http://minio:9000 |
|
311 | 321 | archive_cache.objectstore.url = http://s3-minio:9000 |
|
312 | 322 | |
|
313 | 323 | ; key for s3 auth |
|
314 | 324 | archive_cache.objectstore.key = key |
|
315 | 325 | |
|
316 | 326 | ; secret for s3 auth |
|
317 | 327 | archive_cache.objectstore.secret = secret |
|
318 | 328 | |
|
319 | 329 | ;region for s3 storage |
|
320 | 330 | archive_cache.objectstore.region = eu-central-1 |
|
321 | 331 | |
|
322 | 332 | ; number of sharded buckets to create to distribute archives across |
|
323 | 333 | ; default is 8 shards |
|
324 | 334 | archive_cache.objectstore.bucket_shards = 8 |
|
325 | 335 | |
|
326 | 336 | ; a top-level bucket to put all other shards in |
|
327 | 337 | ; objects will be stored in rhodecode-archive-cache/shard-N based on the bucket_shards number |
|
328 | 338 | archive_cache.objectstore.bucket = rhodecode-archive-cache |
|
329 | 339 | |
|
330 | 340 | ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time |
|
331 | 341 | archive_cache.objectstore.retry = false |
|
332 | 342 | |
|
333 | 343 | ; number of seconds to wait for next try using retry |
|
334 | 344 | archive_cache.objectstore.retry_backoff = 1 |
|
335 | 345 | |
|
336 | 346 | ; how many tries do do a retry fetch from this backend |
|
337 | 347 | archive_cache.objectstore.retry_attempts = 10 |
|
338 | 348 | |
|
339 | 349 | ; Default is $cache_dir/archive_cache if not set |
|
340 | 350 | ; Generated repo archives will be cached at this location |
|
341 | 351 | ; and served from the cache during subsequent requests for the same archive of |
|
342 | 352 | ; the repository. This path is important to be shared across filesystems and with |
|
343 | 353 | ; RhodeCode and vcsserver |
|
344 | 354 | archive_cache.filesystem.store_dir = %(here)s/rc-tests/archive_cache |
|
345 | 355 | |
|
346 | 356 | ; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb |
|
347 | 357 | archive_cache.filesystem.cache_size_gb = 2 |
|
348 | 358 | |
|
349 | 359 | ; Eviction policy used to clear out after cache_size_gb limit is reached |
|
350 | 360 | archive_cache.filesystem.eviction_policy = least-recently-stored |
|
351 | 361 | |
|
352 | 362 | ; By default cache uses sharding technique, this specifies how many shards are there |
|
353 | 363 | ; default is 8 shards |
|
354 | 364 | archive_cache.filesystem.cache_shards = 8 |
|
355 | 365 | |
|
356 | 366 | ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time |
|
357 | 367 | archive_cache.filesystem.retry = false |
|
358 | 368 | |
|
359 | 369 | ; number of seconds to wait for next try using retry |
|
360 | 370 | archive_cache.filesystem.retry_backoff = 1 |
|
361 | 371 | |
|
362 | 372 | ; how many tries do do a retry fetch from this backend |
|
363 | 373 | archive_cache.filesystem.retry_attempts = 10 |
|
364 | 374 | |
|
365 | 375 | |
|
366 | 376 | ; ############# |
|
367 | 377 | ; CELERY CONFIG |
|
368 | 378 | ; ############# |
|
369 | 379 | |
|
370 | 380 | ; manually run celery: /path/to/celery worker --task-events --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini |
|
371 | 381 | |
|
372 | 382 | use_celery = false |
|
373 | 383 | |
|
374 | 384 | ; path to store schedule database |
|
375 | 385 | #celerybeat-schedule.path = |
|
376 | 386 | |
|
377 | 387 | ; connection url to the message broker (default redis) |
|
378 | 388 | celery.broker_url = redis://redis:6379/8 |
|
379 | 389 | |
|
380 | 390 | ; results backend to get results for (default redis) |
|
381 | 391 | celery.result_backend = redis://redis:6379/8 |
|
382 | 392 | |
|
383 | 393 | ; rabbitmq example |
|
384 | 394 | #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost |
|
385 | 395 | |
|
386 | 396 | ; maximum tasks to execute before worker restart |
|
387 | 397 | celery.max_tasks_per_child = 20 |
|
388 | 398 | |
|
389 | 399 | ; tasks will never be sent to the queue, but executed locally instead. |
|
390 | 400 | celery.task_always_eager = true |
|
391 | 401 | celery.task_store_eager_result = true |
|
392 | 402 | |
|
393 | 403 | ; ############# |
|
394 | 404 | ; DOGPILE CACHE |
|
395 | 405 | ; ############# |
|
396 | 406 | |
|
397 | 407 | ; Default cache dir for caches. Putting this into a ramdisk can boost performance. |
|
398 | 408 | ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space |
|
399 | 409 | cache_dir = %(here)s/rc-test-data |
|
400 | 410 | |
|
401 | 411 | ; ********************************************* |
|
402 | 412 | ; `sql_cache_short` cache for heavy SQL queries |
|
403 | 413 | ; Only supported backend is `memory_lru` |
|
404 | 414 | ; ********************************************* |
|
405 | 415 | rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru |
|
406 | 416 | rc_cache.sql_cache_short.expiration_time = 0 |
|
407 | 417 | |
|
408 | 418 | |
|
409 | 419 | ; ***************************************************** |
|
410 | 420 | ; `cache_repo_longterm` cache for repo object instances |
|
411 | 421 | ; Only supported backend is `memory_lru` |
|
412 | 422 | ; ***************************************************** |
|
413 | 423 | rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru |
|
414 | 424 | ; by default we use 30 Days, cache is still invalidated on push |
|
415 | 425 | rc_cache.cache_repo_longterm.expiration_time = 2592000 |
|
416 | 426 | ; max items in LRU cache, set to smaller number to save memory, and expire last used caches |
|
417 | 427 | rc_cache.cache_repo_longterm.max_size = 10000 |
|
418 | 428 | |
|
419 | 429 | |
|
420 | 430 | ; ********************************************* |
|
421 | 431 | ; `cache_general` cache for general purpose use |
|
422 | 432 | ; for simplicity use rc.file_namespace backend, |
|
423 | 433 | ; for performance and scale use rc.redis |
|
424 | 434 | ; ********************************************* |
|
425 | 435 | rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace |
|
426 | 436 | rc_cache.cache_general.expiration_time = 43200 |
|
427 | 437 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set |
|
428 | 438 | rc_cache.cache_general.arguments.filename = %(here)s/rc-tests/cache-backend/cache_general_db |
|
429 | 439 | |
|
430 | 440 | ; alternative `cache_general` redis backend with distributed lock |
|
431 | 441 | #rc_cache.cache_general.backend = dogpile.cache.rc.redis |
|
432 | 442 | #rc_cache.cache_general.expiration_time = 300 |
|
433 | 443 | |
|
434 | 444 | ; redis_expiration_time needs to be greater then expiration_time |
|
435 | 445 | #rc_cache.cache_general.arguments.redis_expiration_time = 7200 |
|
436 | 446 | |
|
437 | 447 | #rc_cache.cache_general.arguments.host = localhost |
|
438 | 448 | #rc_cache.cache_general.arguments.port = 6379 |
|
439 | 449 | #rc_cache.cache_general.arguments.db = 0 |
|
440 | 450 | #rc_cache.cache_general.arguments.socket_timeout = 30 |
|
441 | 451 | ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends |
|
442 | 452 | #rc_cache.cache_general.arguments.distributed_lock = true |
|
443 | 453 | |
|
444 | 454 | ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen |
|
445 | 455 | #rc_cache.cache_general.arguments.lock_auto_renewal = true |
|
446 | 456 | |
|
447 | 457 | ; ************************************************* |
|
448 | 458 | ; `cache_perms` cache for permission tree, auth TTL |
|
449 | 459 | ; for simplicity use rc.file_namespace backend, |
|
450 | 460 | ; for performance and scale use rc.redis |
|
451 | 461 | ; ************************************************* |
|
452 | 462 | rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace |
|
453 | 463 | rc_cache.cache_perms.expiration_time = 0 |
|
454 | 464 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set |
|
455 | 465 | rc_cache.cache_perms.arguments.filename = %(here)s/rc-tests/cache-backend/cache_perms_db |
|
456 | 466 | |
|
457 | 467 | ; alternative `cache_perms` redis backend with distributed lock |
|
458 | 468 | #rc_cache.cache_perms.backend = dogpile.cache.rc.redis |
|
459 | 469 | #rc_cache.cache_perms.expiration_time = 300 |
|
460 | 470 | |
|
461 | 471 | ; redis_expiration_time needs to be greater then expiration_time |
|
462 | 472 | #rc_cache.cache_perms.arguments.redis_expiration_time = 7200 |
|
463 | 473 | |
|
464 | 474 | #rc_cache.cache_perms.arguments.host = localhost |
|
465 | 475 | #rc_cache.cache_perms.arguments.port = 6379 |
|
466 | 476 | #rc_cache.cache_perms.arguments.db = 0 |
|
467 | 477 | #rc_cache.cache_perms.arguments.socket_timeout = 30 |
|
468 | 478 | ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends |
|
469 | 479 | #rc_cache.cache_perms.arguments.distributed_lock = true |
|
470 | 480 | |
|
471 | 481 | ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen |
|
472 | 482 | #rc_cache.cache_perms.arguments.lock_auto_renewal = true |
|
473 | 483 | |
|
474 | 484 | ; *************************************************** |
|
475 | 485 | ; `cache_repo` cache for file tree, Readme, RSS FEEDS |
|
476 | 486 | ; for simplicity use rc.file_namespace backend, |
|
477 | 487 | ; for performance and scale use rc.redis |
|
478 | 488 | ; *************************************************** |
|
479 | 489 | rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace |
|
480 | 490 | rc_cache.cache_repo.expiration_time = 2592000 |
|
481 | 491 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set |
|
482 | 492 | rc_cache.cache_repo.arguments.filename = %(here)s/rc-tests/cache-backend/cache_repo_db |
|
483 | 493 | |
|
484 | 494 | ; alternative `cache_repo` redis backend with distributed lock |
|
485 | 495 | #rc_cache.cache_repo.backend = dogpile.cache.rc.redis |
|
486 | 496 | #rc_cache.cache_repo.expiration_time = 2592000 |
|
487 | 497 | |
|
488 | 498 | ; redis_expiration_time needs to be greater then expiration_time |
|
489 | 499 | #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400 |
|
490 | 500 | |
|
491 | 501 | #rc_cache.cache_repo.arguments.host = localhost |
|
492 | 502 | #rc_cache.cache_repo.arguments.port = 6379 |
|
493 | 503 | #rc_cache.cache_repo.arguments.db = 1 |
|
494 | 504 | #rc_cache.cache_repo.arguments.socket_timeout = 30 |
|
495 | 505 | ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends |
|
496 | 506 | #rc_cache.cache_repo.arguments.distributed_lock = true |
|
497 | 507 | |
|
498 | 508 | ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen |
|
499 | 509 | #rc_cache.cache_repo.arguments.lock_auto_renewal = true |
|
500 | 510 | |
|
501 | 511 | ; ############## |
|
502 | 512 | ; BEAKER SESSION |
|
503 | 513 | ; ############## |
|
504 | 514 | |
|
505 | 515 | ; beaker.session.type is type of storage options for the logged users sessions. Current allowed |
|
506 | 516 | ; types are file, ext:redis, ext:database, ext:memcached |
|
507 | 517 | ; Fastest ones are ext:redis and ext:database, DO NOT use memory type for session |
|
508 | 518 | beaker.session.type = file |
|
509 | 519 | beaker.session.data_dir = %(here)s/rc-tests/data/sessions |
|
510 | 520 | |
|
511 | 521 | ; Redis based sessions |
|
512 | 522 | #beaker.session.type = ext:redis |
|
513 | 523 | #beaker.session.url = redis://redis:6379/2 |
|
514 | 524 | |
|
515 | 525 | ; DB based session, fast, and allows easy management over logged in users |
|
516 | 526 | #beaker.session.type = ext:database |
|
517 | 527 | #beaker.session.table_name = db_session |
|
518 | 528 | #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode |
|
519 | 529 | #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode |
|
520 | 530 | #beaker.session.sa.pool_recycle = 3600 |
|
521 | 531 | #beaker.session.sa.echo = false |
|
522 | 532 | |
|
523 | 533 | beaker.session.key = rhodecode |
|
524 | 534 | beaker.session.secret = test-rc-uytcxaz |
|
525 | 535 | beaker.session.lock_dir = %(here)s/rc-tests/data/sessions/lock |
|
526 | 536 | |
|
527 | 537 | ; Secure encrypted cookie. Requires AES and AES python libraries |
|
528 | 538 | ; you must disable beaker.session.secret to use this |
|
529 | 539 | #beaker.session.encrypt_key = key_for_encryption |
|
530 | 540 | #beaker.session.validate_key = validation_key |
|
531 | 541 | |
|
532 | 542 | ; Sets session as invalid (also logging out user) if it haven not been |
|
533 | 543 | ; accessed for given amount of time in seconds |
|
534 | 544 | beaker.session.timeout = 2592000 |
|
535 | 545 | beaker.session.httponly = true |
|
536 | 546 | |
|
537 | 547 | ; Path to use for the cookie. Set to prefix if you use prefix middleware |
|
538 | 548 | #beaker.session.cookie_path = /custom_prefix |
|
539 | 549 | |
|
540 | 550 | ; Set https secure cookie |
|
541 | 551 | beaker.session.secure = false |
|
542 | 552 | |
|
543 | 553 | ; default cookie expiration time in seconds, set to `true` to set expire |
|
544 | 554 | ; at browser close |
|
545 | 555 | #beaker.session.cookie_expires = 3600 |
|
546 | 556 | |
|
547 | 557 | ; ############################# |
|
548 | 558 | ; SEARCH INDEXING CONFIGURATION |
|
549 | 559 | ; ############################# |
|
550 | 560 | |
|
551 | 561 | ; Full text search indexer is available in rhodecode-tools under |
|
552 | 562 | ; `rhodecode-tools index` command |
|
553 | 563 | |
|
554 | 564 | ; WHOOSH Backend, doesn't require additional services to run |
|
555 | 565 | ; it works good with few dozen repos |
|
556 | 566 | search.module = rhodecode.lib.index.whoosh |
|
557 | 567 | search.location = %(here)s/rc-tests/data/index |
|
558 | 568 | |
|
559 | 569 | ; #################### |
|
560 | 570 | ; CHANNELSTREAM CONFIG |
|
561 | 571 | ; #################### |
|
562 | 572 | |
|
563 | 573 | ; channelstream enables persistent connections and live notification |
|
564 | 574 | ; in the system. It's also used by the chat system |
|
565 | 575 | |
|
566 | 576 | channelstream.enabled = false |
|
567 | 577 | |
|
568 | 578 | ; server address for channelstream server on the backend |
|
569 | 579 | channelstream.server = channelstream:9800 |
|
570 | 580 | |
|
571 | 581 | ; location of the channelstream server from outside world |
|
572 | 582 | ; use ws:// for http or wss:// for https. This address needs to be handled |
|
573 | 583 | ; by external HTTP server such as Nginx or Apache |
|
574 | 584 | ; see Nginx/Apache configuration examples in our docs |
|
575 | 585 | channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream |
|
576 | 586 | channelstream.secret = ENV_GENERATED |
|
577 | 587 | channelstream.history.location = %(here)s/rc-tests/channelstream_history |
|
578 | 588 | |
|
579 | 589 | ; Internal application path that Javascript uses to connect into. |
|
580 | 590 | ; If you use proxy-prefix the prefix should be added before /_channelstream |
|
581 | 591 | channelstream.proxy_path = /_channelstream |
|
582 | 592 | |
|
583 | 593 | |
|
584 | 594 | ; ############################## |
|
585 | 595 | ; MAIN RHODECODE DATABASE CONFIG |
|
586 | 596 | ; ############################## |
|
587 | 597 | |
|
588 | 598 | #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30 |
|
589 | 599 | #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode |
|
590 | 600 | #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8 |
|
591 | 601 | ; pymysql is an alternative driver for MySQL, use in case of problems with default one |
|
592 | 602 | #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode |
|
593 | 603 | |
|
594 | 604 | sqlalchemy.db1.url = sqlite:///%(here)s/rc-tests/rhodecode_test.db?timeout=30 |
|
595 | 605 | |
|
596 | 606 | ; see sqlalchemy docs for other advanced settings |
|
597 | 607 | ; print the sql statements to output |
|
598 | 608 | sqlalchemy.db1.echo = false |
|
599 | 609 | |
|
600 | 610 | ; recycle the connections after this amount of seconds |
|
601 | 611 | sqlalchemy.db1.pool_recycle = 3600 |
|
602 | 612 | |
|
603 | 613 | ; the number of connections to keep open inside the connection pool. |
|
604 | 614 | ; 0 indicates no limit |
|
605 | 615 | ; the general calculus with gevent is: |
|
606 | 616 | ; if your system allows 500 concurrent greenlets (max_connections) that all do database access, |
|
607 | 617 | ; then increase pool size + max overflow so that they add up to 500. |
|
608 | 618 | #sqlalchemy.db1.pool_size = 5 |
|
609 | 619 | |
|
610 | 620 | ; The number of connections to allow in connection pool "overflow", that is |
|
611 | 621 | ; connections that can be opened above and beyond the pool_size setting, |
|
612 | 622 | ; which defaults to five. |
|
613 | 623 | #sqlalchemy.db1.max_overflow = 10 |
|
614 | 624 | |
|
615 | 625 | ; Connection check ping, used to detect broken database connections |
|
616 | 626 | ; could be enabled to better handle cases if MySQL has gone away errors |
|
617 | 627 | #sqlalchemy.db1.ping_connection = true |
|
618 | 628 | |
|
619 | 629 | ; ########## |
|
620 | 630 | ; VCS CONFIG |
|
621 | 631 | ; ########## |
|
622 | 632 | vcs.server.enable = true |
|
623 | 633 | vcs.server = vcsserver:10010 |
|
624 | 634 | |
|
625 | 635 | ; Web server connectivity protocol, responsible for web based VCS operations |
|
626 | 636 | ; Available protocols are: |
|
627 | 637 | ; `http` - use http-rpc backend (default) |
|
628 | 638 | vcs.server.protocol = http |
|
629 | 639 | |
|
630 | 640 | ; Push/Pull operations protocol, available options are: |
|
631 | 641 | ; `http` - use http-rpc backend (default) |
|
632 | 642 | vcs.scm_app_implementation = http |
|
633 | 643 | |
|
634 | 644 | ; Push/Pull operations hooks protocol, available options are: |
|
635 | 645 | ; `http` - use http-rpc backend (default) |
|
636 | 646 | ; `celery` - use celery based hooks |
|
637 | 647 | #DEPRECATED:vcs.hooks.protocol = http |
|
638 | 648 | vcs.hooks.protocol = http |
|
639 | 649 | |
|
640 | 650 | ; Host on which this instance is listening for hooks. vcsserver will call this host to pull/push hooks so it should be |
|
641 | 651 | ; accessible via network. |
|
642 | 652 | ; Use vcs.hooks.host = "*" to bind to current hostname (for Docker) |
|
643 | 653 | vcs.hooks.host = * |
|
644 | 654 | |
|
645 | 655 | ; Start VCSServer with this instance as a subprocess, useful for development |
|
646 | 656 | vcs.start_server = false |
|
647 | 657 | |
|
648 | 658 | ; List of enabled VCS backends, available options are: |
|
649 | 659 | ; `hg` - mercurial |
|
650 | 660 | ; `git` - git |
|
651 | 661 | ; `svn` - subversion |
|
652 | 662 | vcs.backends = hg, git, svn |
|
653 | 663 | |
|
654 | 664 | ; Wait this number of seconds before killing connection to the vcsserver |
|
655 | 665 | vcs.connection_timeout = 3600 |
|
656 | 666 | |
|
657 | 667 | ; Cache flag to cache vcsserver remote calls locally |
|
658 | 668 | ; It uses cache_region `cache_repo` |
|
659 | 669 | vcs.methods.cache = false |
|
660 | 670 | |
|
671 | ; Filesystem location where Git lfs objects should be stored | |
|
672 | vcs.git.lfs.storage_location = /var/opt/rhodecode_repo_store/.cache/git_lfs_store | |
|
673 | ||
|
674 | ; Filesystem location where Mercurial largefile objects should be stored | |
|
675 | vcs.hg.largefiles.storage_location = /var/opt/rhodecode_repo_store/.cache/hg_largefiles_store | |
|
676 | ||
|
661 | 677 | ; #################################################### |
|
662 | 678 | ; Subversion proxy support (mod_dav_svn) |
|
663 | 679 | ; Maps RhodeCode repo groups into SVN paths for Apache |
|
664 | 680 | ; #################################################### |
|
665 | 681 | |
|
666 | 682 | ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out. |
|
667 | 683 | ; Set a numeric version for your current SVN e.g 1.8, or 1.12 |
|
668 | 684 | ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible |
|
669 | 685 | #vcs.svn.compatible_version = 1.8 |
|
670 | 686 | |
|
671 | 687 | ; Redis connection settings for svn integrations logic |
|
672 | 688 | ; This connection string needs to be the same on ce and vcsserver |
|
673 | 689 | vcs.svn.redis_conn = redis://redis:6379/0 |
|
674 | 690 | |
|
675 | 691 | ; Enable SVN proxy of requests over HTTP |
|
676 | 692 | vcs.svn.proxy.enabled = true |
|
677 | 693 | |
|
678 | 694 | ; host to connect to running SVN subsystem |
|
679 | 695 | vcs.svn.proxy.host = http://svn:8090 |
|
680 | 696 | |
|
681 | 697 | ; Enable or disable the config file generation. |
|
682 | 698 | svn.proxy.generate_config = false |
|
683 | 699 | |
|
684 | 700 | ; Generate config file with `SVNListParentPath` set to `On`. |
|
685 | 701 | svn.proxy.list_parent_path = true |
|
686 | 702 | |
|
687 | 703 | ; Set location and file name of generated config file. |
|
688 | 704 | svn.proxy.config_file_path = %(here)s/rc-tests/mod_dav_svn.conf |
|
689 | 705 | |
|
690 | 706 | ; alternative mod_dav config template. This needs to be a valid mako template |
|
691 | 707 | ; Example template can be found in the source code: |
|
692 | 708 | ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako |
|
693 | 709 | #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako |
|
694 | 710 | |
|
695 | 711 | ; Used as a prefix to the `Location` block in the generated config file. |
|
696 | 712 | ; In most cases it should be set to `/`. |
|
697 | 713 | svn.proxy.location_root = / |
|
698 | 714 | |
|
699 | 715 | ; Command to reload the mod dav svn configuration on change. |
|
700 | 716 | ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh |
|
701 | 717 | ; Make sure user who runs RhodeCode process is allowed to reload Apache |
|
702 | 718 | #svn.proxy.reload_cmd = /etc/init.d/apache2 reload |
|
703 | 719 | |
|
704 | 720 | ; If the timeout expires before the reload command finishes, the command will |
|
705 | 721 | ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds. |
|
706 | 722 | #svn.proxy.reload_timeout = 10 |
|
707 | 723 | |
|
708 | 724 | ; #################### |
|
709 | 725 | ; SSH Support Settings |
|
710 | 726 | ; #################### |
|
711 | 727 | |
|
712 | 728 | ; Defines if a custom authorized_keys file should be created and written on |
|
713 | 729 | ; any change user ssh keys. Setting this to false also disables possibility |
|
714 | 730 | ; of adding SSH keys by users from web interface. Super admins can still |
|
715 | 731 | ; manage SSH Keys. |
|
716 | 732 | ssh.generate_authorized_keyfile = true |
|
717 | 733 | |
|
718 | 734 | ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding` |
|
719 | 735 | # ssh.authorized_keys_ssh_opts = |
|
720 | 736 | |
|
721 | 737 | ; Path to the authorized_keys file where the generate entries are placed. |
|
722 | 738 | ; It is possible to have multiple key files specified in `sshd_config` e.g. |
|
723 | 739 | ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode |
|
724 | 740 | ssh.authorized_keys_file_path = %(here)s/rc-tests/authorized_keys_rhodecode |
|
725 | 741 | |
|
726 | 742 | ; Command to execute the SSH wrapper. The binary is available in the |
|
727 | 743 | ; RhodeCode installation directory. |
|
728 | 744 | ; legacy: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper |
|
729 | 745 | ; new rewrite: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2 |
|
730 | 746 | #DEPRECATED: ssh.wrapper_cmd = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper |
|
731 | 747 | ssh.wrapper_cmd.v2 = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2 |
|
732 | 748 | |
|
733 | 749 | ; Allow shell when executing the ssh-wrapper command |
|
734 | 750 | ssh.wrapper_cmd_allow_shell = false |
|
735 | 751 | |
|
736 | 752 | ; Enables logging, and detailed output send back to the client during SSH |
|
737 | 753 | ; operations. Useful for debugging, shouldn't be used in production. |
|
738 | 754 | ssh.enable_debug_logging = true |
|
739 | 755 | |
|
740 | 756 | ; Paths to binary executable, by default they are the names, but we can |
|
741 | 757 | ; override them if we want to use a custom one |
|
742 | 758 | ssh.executable.hg = /usr/local/bin/rhodecode_bin/vcs_bin/hg |
|
743 | 759 | ssh.executable.git = /usr/local/bin/rhodecode_bin/vcs_bin/git |
|
744 | 760 | ssh.executable.svn = /usr/local/bin/rhodecode_bin/vcs_bin/svnserve |
|
745 | 761 | |
|
746 | 762 | ; Enables SSH key generator web interface. Disabling this still allows users |
|
747 | 763 | ; to add their own keys. |
|
748 | 764 | ssh.enable_ui_key_generator = true |
|
749 | 765 | |
|
750 | 766 | ; Statsd client config, this is used to send metrics to statsd |
|
751 | 767 | ; We recommend setting statsd_exported and scrape them using Prometheus |
|
752 | 768 | #statsd.enabled = false |
|
753 | 769 | #statsd.statsd_host = 0.0.0.0 |
|
754 | 770 | #statsd.statsd_port = 8125 |
|
755 | 771 | #statsd.statsd_prefix = |
|
756 | 772 | #statsd.statsd_ipv6 = false |
|
757 | 773 | |
|
758 | 774 | ; configure logging automatically at server startup set to false |
|
759 | 775 | ; to use the below custom logging config. |
|
760 | 776 | ; RC_LOGGING_FORMATTER |
|
761 | 777 | ; RC_LOGGING_LEVEL |
|
762 | 778 | ; env variables can control the settings for logging in case of autoconfigure |
|
763 | 779 | |
|
764 | 780 | logging.autoconfigure = false |
|
765 | 781 | |
|
766 | 782 | ; specify your own custom logging config file to configure logging |
|
767 | 783 | #logging.logging_conf_file = /path/to/custom_logging.ini |
|
768 | 784 | |
|
769 | 785 | ; Dummy marker to add new entries after. |
|
770 | 786 | ; Add any custom entries below. Please don't remove this marker. |
|
771 | 787 | custom.conf = 1 |
|
772 | 788 | |
|
773 | 789 | |
|
774 | 790 | ; ##################### |
|
775 | 791 | ; LOGGING CONFIGURATION |
|
776 | 792 | ; ##################### |
|
777 | 793 | |
|
778 | 794 | [loggers] |
|
779 | 795 | keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper, dogpile |
|
780 | 796 | |
|
781 | 797 | [handlers] |
|
782 | 798 | keys = console, console_sql |
|
783 | 799 | |
|
784 | 800 | [formatters] |
|
785 | 801 | keys = generic, json, color_formatter, color_formatter_sql |
|
786 | 802 | |
|
787 | 803 | ; ####### |
|
788 | 804 | ; LOGGERS |
|
789 | 805 | ; ####### |
|
790 | 806 | [logger_root] |
|
791 | 807 | level = NOTSET |
|
792 | 808 | handlers = console |
|
793 | 809 | |
|
794 | 810 | [logger_routes] |
|
795 | 811 | level = DEBUG |
|
796 | 812 | handlers = |
|
797 | 813 | qualname = routes.middleware |
|
798 | 814 | ## "level = DEBUG" logs the route matched and routing variables. |
|
799 | 815 | propagate = 1 |
|
800 | 816 | |
|
801 | 817 | [logger_sqlalchemy] |
|
802 | 818 | level = INFO |
|
803 | 819 | handlers = console_sql |
|
804 | 820 | qualname = sqlalchemy.engine |
|
805 | 821 | propagate = 0 |
|
806 | 822 | |
|
807 | 823 | [logger_beaker] |
|
808 | 824 | level = DEBUG |
|
809 | 825 | handlers = |
|
810 | 826 | qualname = beaker.container |
|
811 | 827 | propagate = 1 |
|
812 | 828 | |
|
813 | 829 | [logger_dogpile] |
|
814 | 830 | level = INFO |
|
815 | 831 | handlers = console |
|
816 | 832 | qualname = dogpile |
|
817 | 833 | propagate = 1 |
|
818 | 834 | |
|
819 | 835 | [logger_rhodecode] |
|
820 | 836 | level = DEBUG |
|
821 | 837 | handlers = |
|
822 | 838 | qualname = rhodecode |
|
823 | 839 | propagate = 1 |
|
824 | 840 | |
|
825 | 841 | [logger_ssh_wrapper] |
|
826 | 842 | level = DEBUG |
|
827 | 843 | handlers = |
|
828 | 844 | qualname = ssh_wrapper |
|
829 | 845 | propagate = 1 |
|
830 | 846 | |
|
831 | 847 | [logger_celery] |
|
832 | 848 | level = DEBUG |
|
833 | 849 | handlers = |
|
834 | 850 | qualname = celery |
|
835 | 851 | |
|
836 | 852 | |
|
837 | 853 | ; ######## |
|
838 | 854 | ; HANDLERS |
|
839 | 855 | ; ######## |
|
840 | 856 | |
|
841 | 857 | [handler_console] |
|
842 | 858 | class = StreamHandler |
|
843 | 859 | args = (sys.stderr, ) |
|
844 | 860 | level = DEBUG |
|
845 | 861 | ; To enable JSON formatted logs replace 'generic/color_formatter' with 'json' |
|
846 | 862 | ; This allows sending properly formatted logs to grafana loki or elasticsearch |
|
847 | 863 | formatter = generic |
|
848 | 864 | |
|
849 | 865 | [handler_console_sql] |
|
850 | 866 | ; "level = DEBUG" logs SQL queries and results. |
|
851 | 867 | ; "level = INFO" logs SQL queries. |
|
852 | 868 | ; "level = WARN" logs neither. (Recommended for production systems.) |
|
853 | 869 | class = StreamHandler |
|
854 | 870 | args = (sys.stderr, ) |
|
855 | 871 | level = WARN |
|
856 | 872 | ; To enable JSON formatted logs replace 'generic/color_formatter_sql' with 'json' |
|
857 | 873 | ; This allows sending properly formatted logs to grafana loki or elasticsearch |
|
858 | 874 | formatter = generic |
|
859 | 875 | |
|
860 | 876 | ; ########## |
|
861 | 877 | ; FORMATTERS |
|
862 | 878 | ; ########## |
|
863 | 879 | |
|
864 | 880 | [formatter_generic] |
|
865 | 881 | class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter |
|
866 | 882 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s |
|
867 | 883 | datefmt = %Y-%m-%d %H:%M:%S |
|
868 | 884 | |
|
869 | 885 | [formatter_color_formatter] |
|
870 | 886 | class = rhodecode.lib.logging_formatter.ColorFormatter |
|
871 | 887 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s |
|
872 | 888 | datefmt = %Y-%m-%d %H:%M:%S |
|
873 | 889 | |
|
874 | 890 | [formatter_color_formatter_sql] |
|
875 | 891 | class = rhodecode.lib.logging_formatter.ColorFormatterSql |
|
876 | 892 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s |
|
877 | 893 | datefmt = %Y-%m-%d %H:%M:%S |
|
878 | 894 | |
|
879 | 895 | [formatter_json] |
|
880 | 896 | format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s |
|
881 | 897 | class = rhodecode.lib._vendor.jsonlogger.JsonFormatter |
General Comments 0
You need to be logged in to leave comments.
Login now