Show More
@@ -0,0 +1,40 b'' | |||||
|
1 | |RCE| 5.2.1 |RNS| | |||
|
2 | ----------------- | |||
|
3 | ||||
|
4 | Release Date | |||
|
5 | ^^^^^^^^^^^^ | |||
|
6 | ||||
|
7 | - 2024-09-16 | |||
|
8 | ||||
|
9 | ||||
|
10 | New Features | |||
|
11 | ^^^^^^^^^^^^ | |||
|
12 | ||||
|
13 | ||||
|
14 | ||||
|
15 | General | |||
|
16 | ^^^^^^^ | |||
|
17 | ||||
|
18 | ||||
|
19 | ||||
|
20 | Security | |||
|
21 | ^^^^^^^^ | |||
|
22 | ||||
|
23 | ||||
|
24 | ||||
|
25 | Performance | |||
|
26 | ^^^^^^^^^^^ | |||
|
27 | ||||
|
28 | ||||
|
29 | ||||
|
30 | ||||
|
31 | Fixes | |||
|
32 | ^^^^^ | |||
|
33 | ||||
|
34 | - Fixed problems with incorrect user agent errors | |||
|
35 | ||||
|
36 | ||||
|
37 | Upgrade notes | |||
|
38 | ^^^^^^^^^^^^^ | |||
|
39 | ||||
|
40 | - RhodeCode 5.2.1 is unscheduled bugfix release to address some build issues with 5.2 images |
@@ -0,0 +1,45 b'' | |||||
|
1 | |RCE| 5.3.0 |RNS| | |||
|
2 | ----------------- | |||
|
3 | ||||
|
4 | Release Date | |||
|
5 | ^^^^^^^^^^^^ | |||
|
6 | ||||
|
7 | - 2024-09-17 | |||
|
8 | ||||
|
9 | ||||
|
10 | New Features | |||
|
11 | ^^^^^^^^^^^^ | |||
|
12 | ||||
|
13 | - System-info: expose rhodecode config for better visibility of set settings for RhodeCode system. | |||
|
14 | ||||
|
15 | ||||
|
16 | General | |||
|
17 | ^^^^^^^ | |||
|
18 | ||||
|
19 | ||||
|
20 | ||||
|
21 | Security | |||
|
22 | ^^^^^^^^ | |||
|
23 | ||||
|
24 | - Permissions: fixed security problem with apply-to-children from a repo group functionality breaking | |||
|
25 | permissions for private repositories exposing them despite repo being private. | |||
|
26 | - Git-lfs: fixed security problem with allowing off-chain attacks to replace OID data without validating hash for already present oids. | |||
|
27 | This allowed to replace an LFS OID content with malicious request tailored to open RhodeCode server. | |||
|
28 | ||||
|
29 | ||||
|
30 | Performance | |||
|
31 | ^^^^^^^^^^^ | |||
|
32 | ||||
|
33 | ||||
|
34 | ||||
|
35 | ||||
|
36 | Fixes | |||
|
37 | ^^^^^ | |||
|
38 | ||||
|
39 | - Fixed problems with incorrect user agent errors | |||
|
40 | ||||
|
41 | ||||
|
42 | Upgrade notes | |||
|
43 | ^^^^^^^^^^^^^ | |||
|
44 | ||||
|
45 | - RhodeCode 5.3.0 is unscheduled security release to address some build issues with 5.X images |
@@ -1,5 +1,5 b'' | |||||
1 | [bumpversion] |
|
1 | [bumpversion] | |
2 |
current_version = 5. |
|
2 | current_version = 5.3.0 | |
3 | message = release: Bump version {current_version} to {new_version} |
|
3 | message = release: Bump version {current_version} to {new_version} | |
4 |
|
4 | |||
5 | [bumpversion:file:rhodecode/VERSION] |
|
5 | [bumpversion:file:rhodecode/VERSION] |
@@ -1,912 +1,915 b'' | |||||
1 |
|
1 | |||
2 | ; ######################################### |
|
2 | ; ######################################### | |
3 | ; RHODECODE COMMUNITY EDITION CONFIGURATION |
|
3 | ; RHODECODE COMMUNITY EDITION CONFIGURATION | |
4 | ; ######################################### |
|
4 | ; ######################################### | |
5 |
|
5 | |||
6 | [DEFAULT] |
|
6 | [DEFAULT] | |
7 | ; Debug flag sets all loggers to debug, and enables request tracking |
|
7 | ; Debug flag sets all loggers to debug, and enables request tracking | |
8 | debug = true |
|
8 | debug = true | |
9 |
|
9 | |||
10 | ; ######################################################################## |
|
10 | ; ######################################################################## | |
11 | ; EMAIL CONFIGURATION |
|
11 | ; EMAIL CONFIGURATION | |
12 | ; These settings will be used by the RhodeCode mailing system |
|
12 | ; These settings will be used by the RhodeCode mailing system | |
13 | ; ######################################################################## |
|
13 | ; ######################################################################## | |
14 |
|
14 | |||
15 | ; prefix all emails subjects with given prefix, helps filtering out emails |
|
15 | ; prefix all emails subjects with given prefix, helps filtering out emails | |
16 | #email_prefix = [RhodeCode] |
|
16 | #email_prefix = [RhodeCode] | |
17 |
|
17 | |||
18 | ; email FROM address all mails will be sent |
|
18 | ; email FROM address all mails will be sent | |
19 | #app_email_from = rhodecode-noreply@localhost |
|
19 | #app_email_from = rhodecode-noreply@localhost | |
20 |
|
20 | |||
21 | #smtp_server = mail.server.com |
|
21 | #smtp_server = mail.server.com | |
22 | #smtp_username = |
|
22 | #smtp_username = | |
23 | #smtp_password = |
|
23 | #smtp_password = | |
24 | #smtp_port = |
|
24 | #smtp_port = | |
25 | #smtp_use_tls = false |
|
25 | #smtp_use_tls = false | |
26 | #smtp_use_ssl = true |
|
26 | #smtp_use_ssl = true | |
27 |
|
27 | |||
28 | [server:main] |
|
28 | [server:main] | |
29 | ; COMMON HOST/IP CONFIG, This applies mostly to develop setup, |
|
29 | ; COMMON HOST/IP CONFIG, This applies mostly to develop setup, | |
30 | ; Host port for gunicorn are controlled by gunicorn_conf.py |
|
30 | ; Host port for gunicorn are controlled by gunicorn_conf.py | |
31 | host = 127.0.0.1 |
|
31 | host = 127.0.0.1 | |
32 | port = 10020 |
|
32 | port = 10020 | |
33 |
|
33 | |||
34 |
|
34 | |||
35 | ; ########################### |
|
35 | ; ########################### | |
36 | ; GUNICORN APPLICATION SERVER |
|
36 | ; GUNICORN APPLICATION SERVER | |
37 | ; ########################### |
|
37 | ; ########################### | |
38 |
|
38 | |||
39 | ; run with gunicorn --config gunicorn_conf.py --paste rhodecode.ini |
|
39 | ; run with gunicorn --config gunicorn_conf.py --paste rhodecode.ini | |
40 |
|
40 | |||
41 | ; Module to use, this setting shouldn't be changed |
|
41 | ; Module to use, this setting shouldn't be changed | |
42 | use = egg:gunicorn#main |
|
42 | use = egg:gunicorn#main | |
43 |
|
43 | |||
44 | ; Prefix middleware for RhodeCode. |
|
44 | ; Prefix middleware for RhodeCode. | |
45 | ; recommended when using proxy setup. |
|
45 | ; recommended when using proxy setup. | |
46 | ; allows to set RhodeCode under a prefix in server. |
|
46 | ; allows to set RhodeCode under a prefix in server. | |
47 | ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well. |
|
47 | ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well. | |
48 | ; And set your prefix like: `prefix = /custom_prefix` |
|
48 | ; And set your prefix like: `prefix = /custom_prefix` | |
49 | ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need |
|
49 | ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need | |
50 | ; to make your cookies only work on prefix url |
|
50 | ; to make your cookies only work on prefix url | |
51 | [filter:proxy-prefix] |
|
51 | [filter:proxy-prefix] | |
52 | use = egg:PasteDeploy#prefix |
|
52 | use = egg:PasteDeploy#prefix | |
53 | prefix = / |
|
53 | prefix = / | |
54 |
|
54 | |||
55 | [app:main] |
|
55 | [app:main] | |
56 | ; The %(here)s variable will be replaced with the absolute path of parent directory |
|
56 | ; The %(here)s variable will be replaced with the absolute path of parent directory | |
57 | ; of this file |
|
57 | ; of this file | |
58 | ; Each option in the app:main can be override by an environmental variable |
|
58 | ; Each option in the app:main can be override by an environmental variable | |
59 | ; |
|
59 | ; | |
60 | ;To override an option: |
|
60 | ;To override an option: | |
61 | ; |
|
61 | ; | |
62 | ;RC_<KeyName> |
|
62 | ;RC_<KeyName> | |
63 | ;Everything should be uppercase, . and - should be replaced by _. |
|
63 | ;Everything should be uppercase, . and - should be replaced by _. | |
64 | ;For example, if you have these configuration settings: |
|
64 | ;For example, if you have these configuration settings: | |
65 | ;rc_cache.repo_object.backend = foo |
|
65 | ;rc_cache.repo_object.backend = foo | |
66 | ;can be overridden by |
|
66 | ;can be overridden by | |
67 | ;export RC_CACHE_REPO_OBJECT_BACKEND=foo |
|
67 | ;export RC_CACHE_REPO_OBJECT_BACKEND=foo | |
68 |
|
68 | |||
69 | use = egg:rhodecode-enterprise-ce |
|
69 | use = egg:rhodecode-enterprise-ce | |
70 |
|
70 | |||
71 | ; enable proxy prefix middleware, defined above |
|
71 | ; enable proxy prefix middleware, defined above | |
72 | #filter-with = proxy-prefix |
|
72 | #filter-with = proxy-prefix | |
73 |
|
73 | |||
|
74 | ; control if environmental variables to be expanded into the .ini settings | |||
|
75 | #rhodecode.env_expand = true | |||
|
76 | ||||
74 | ; ############# |
|
77 | ; ############# | |
75 | ; DEBUG OPTIONS |
|
78 | ; DEBUG OPTIONS | |
76 | ; ############# |
|
79 | ; ############# | |
77 |
|
80 | |||
78 | pyramid.reload_templates = true |
|
81 | pyramid.reload_templates = true | |
79 |
|
82 | |||
80 | # During development the we want to have the debug toolbar enabled |
|
83 | # During development the we want to have the debug toolbar enabled | |
81 | pyramid.includes = |
|
84 | pyramid.includes = | |
82 | pyramid_debugtoolbar |
|
85 | pyramid_debugtoolbar | |
83 |
|
86 | |||
84 | debugtoolbar.hosts = 0.0.0.0/0 |
|
87 | debugtoolbar.hosts = 0.0.0.0/0 | |
85 | debugtoolbar.exclude_prefixes = |
|
88 | debugtoolbar.exclude_prefixes = | |
86 | /css |
|
89 | /css | |
87 | /fonts |
|
90 | /fonts | |
88 | /images |
|
91 | /images | |
89 | /js |
|
92 | /js | |
90 |
|
93 | |||
91 | ## RHODECODE PLUGINS ## |
|
94 | ## RHODECODE PLUGINS ## | |
92 | rhodecode.includes = |
|
95 | rhodecode.includes = | |
93 | rhodecode.api |
|
96 | rhodecode.api | |
94 |
|
97 | |||
95 |
|
98 | |||
96 | # api prefix url |
|
99 | # api prefix url | |
97 | rhodecode.api.url = /_admin/api |
|
100 | rhodecode.api.url = /_admin/api | |
98 |
|
101 | |||
99 | ; enable debug style page |
|
102 | ; enable debug style page | |
100 | debug_style = true |
|
103 | debug_style = true | |
101 |
|
104 | |||
102 | ; ################# |
|
105 | ; ################# | |
103 | ; END DEBUG OPTIONS |
|
106 | ; END DEBUG OPTIONS | |
104 | ; ################# |
|
107 | ; ################# | |
105 |
|
108 | |||
106 | ; encryption key used to encrypt social plugin tokens, |
|
109 | ; encryption key used to encrypt social plugin tokens, | |
107 | ; remote_urls with credentials etc, if not set it defaults to |
|
110 | ; remote_urls with credentials etc, if not set it defaults to | |
108 | ; `beaker.session.secret` |
|
111 | ; `beaker.session.secret` | |
109 | #rhodecode.encrypted_values.secret = |
|
112 | #rhodecode.encrypted_values.secret = | |
110 |
|
113 | |||
111 | ; decryption strict mode (enabled by default). It controls if decryption raises |
|
114 | ; decryption strict mode (enabled by default). It controls if decryption raises | |
112 | ; `SignatureVerificationError` in case of wrong key, or damaged encryption data. |
|
115 | ; `SignatureVerificationError` in case of wrong key, or damaged encryption data. | |
113 | #rhodecode.encrypted_values.strict = false |
|
116 | #rhodecode.encrypted_values.strict = false | |
114 |
|
117 | |||
115 | ; Pick algorithm for encryption. Either fernet (more secure) or aes (default) |
|
118 | ; Pick algorithm for encryption. Either fernet (more secure) or aes (default) | |
116 | ; fernet is safer, and we strongly recommend switching to it. |
|
119 | ; fernet is safer, and we strongly recommend switching to it. | |
117 | ; Due to backward compatibility aes is used as default. |
|
120 | ; Due to backward compatibility aes is used as default. | |
118 | #rhodecode.encrypted_values.algorithm = fernet |
|
121 | #rhodecode.encrypted_values.algorithm = fernet | |
119 |
|
122 | |||
120 | ; Return gzipped responses from RhodeCode (static files/application) |
|
123 | ; Return gzipped responses from RhodeCode (static files/application) | |
121 | gzip_responses = false |
|
124 | gzip_responses = false | |
122 |
|
125 | |||
123 | ; Auto-generate javascript routes file on startup |
|
126 | ; Auto-generate javascript routes file on startup | |
124 | generate_js_files = false |
|
127 | generate_js_files = false | |
125 |
|
128 | |||
126 | ; System global default language. |
|
129 | ; System global default language. | |
127 | ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh |
|
130 | ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh | |
128 | lang = en |
|
131 | lang = en | |
129 |
|
132 | |||
130 | ; Perform a full repository scan and import on each server start. |
|
133 | ; Perform a full repository scan and import on each server start. | |
131 | ; Settings this to true could lead to very long startup time. |
|
134 | ; Settings this to true could lead to very long startup time. | |
132 | startup.import_repos = false |
|
135 | startup.import_repos = false | |
133 |
|
136 | |||
134 | ; URL at which the application is running. This is used for Bootstrapping |
|
137 | ; URL at which the application is running. This is used for Bootstrapping | |
135 | ; requests in context when no web request is available. Used in ishell, or |
|
138 | ; requests in context when no web request is available. Used in ishell, or | |
136 | ; SSH calls. Set this for events to receive proper url for SSH calls. |
|
139 | ; SSH calls. Set this for events to receive proper url for SSH calls. | |
137 | app.base_url = http://rhodecode.local |
|
140 | app.base_url = http://rhodecode.local | |
138 |
|
141 | |||
139 | ; Host at which the Service API is running. |
|
142 | ; Host at which the Service API is running. | |
140 | app.service_api.host = http://rhodecode.local:10020 |
|
143 | app.service_api.host = http://rhodecode.local:10020 | |
141 |
|
144 | |||
142 | ; Secret for Service API authentication. |
|
145 | ; Secret for Service API authentication. | |
143 | app.service_api.token = |
|
146 | app.service_api.token = | |
144 |
|
147 | |||
145 | ; Unique application ID. Should be a random unique string for security. |
|
148 | ; Unique application ID. Should be a random unique string for security. | |
146 | app_instance_uuid = rc-production |
|
149 | app_instance_uuid = rc-production | |
147 |
|
150 | |||
148 | ; Cut off limit for large diffs (size in bytes). If overall diff size on |
|
151 | ; Cut off limit for large diffs (size in bytes). If overall diff size on | |
149 | ; commit, or pull request exceeds this limit this diff will be displayed |
|
152 | ; commit, or pull request exceeds this limit this diff will be displayed | |
150 | ; partially. E.g 512000 == 512Kb |
|
153 | ; partially. E.g 512000 == 512Kb | |
151 | cut_off_limit_diff = 512000 |
|
154 | cut_off_limit_diff = 512000 | |
152 |
|
155 | |||
153 | ; Cut off limit for large files inside diffs (size in bytes). Each individual |
|
156 | ; Cut off limit for large files inside diffs (size in bytes). Each individual | |
154 | ; file inside diff which exceeds this limit will be displayed partially. |
|
157 | ; file inside diff which exceeds this limit will be displayed partially. | |
155 | ; E.g 128000 == 128Kb |
|
158 | ; E.g 128000 == 128Kb | |
156 | cut_off_limit_file = 128000 |
|
159 | cut_off_limit_file = 128000 | |
157 |
|
160 | |||
158 | ; Use cached version of vcs repositories everywhere. Recommended to be `true` |
|
161 | ; Use cached version of vcs repositories everywhere. Recommended to be `true` | |
159 | vcs_full_cache = true |
|
162 | vcs_full_cache = true | |
160 |
|
163 | |||
161 | ; Force https in RhodeCode, fixes https redirects, assumes it's always https. |
|
164 | ; Force https in RhodeCode, fixes https redirects, assumes it's always https. | |
162 | ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache |
|
165 | ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache | |
163 | force_https = false |
|
166 | force_https = false | |
164 |
|
167 | |||
165 | ; use Strict-Transport-Security headers |
|
168 | ; use Strict-Transport-Security headers | |
166 | use_htsts = false |
|
169 | use_htsts = false | |
167 |
|
170 | |||
168 | ; Set to true if your repos are exposed using the dumb protocol |
|
171 | ; Set to true if your repos are exposed using the dumb protocol | |
169 | git_update_server_info = false |
|
172 | git_update_server_info = false | |
170 |
|
173 | |||
171 | ; RSS/ATOM feed options |
|
174 | ; RSS/ATOM feed options | |
172 | rss_cut_off_limit = 256000 |
|
175 | rss_cut_off_limit = 256000 | |
173 | rss_items_per_page = 10 |
|
176 | rss_items_per_page = 10 | |
174 | rss_include_diff = false |
|
177 | rss_include_diff = false | |
175 |
|
178 | |||
176 | ; gist URL alias, used to create nicer urls for gist. This should be an |
|
179 | ; gist URL alias, used to create nicer urls for gist. This should be an | |
177 | ; url that does rewrites to _admin/gists/{gistid}. |
|
180 | ; url that does rewrites to _admin/gists/{gistid}. | |
178 | ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal |
|
181 | ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal | |
179 | ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid} |
|
182 | ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid} | |
180 | gist_alias_url = |
|
183 | gist_alias_url = | |
181 |
|
184 | |||
182 | ; List of views (using glob pattern syntax) that AUTH TOKENS could be |
|
185 | ; List of views (using glob pattern syntax) that AUTH TOKENS could be | |
183 | ; used for access. |
|
186 | ; used for access. | |
184 | ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it |
|
187 | ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it | |
185 | ; came from the the logged in user who own this authentication token. |
|
188 | ; came from the the logged in user who own this authentication token. | |
186 | ; Additionally @TOKEN syntax can be used to bound the view to specific |
|
189 | ; Additionally @TOKEN syntax can be used to bound the view to specific | |
187 | ; authentication token. Such view would be only accessible when used together |
|
190 | ; authentication token. Such view would be only accessible when used together | |
188 | ; with this authentication token |
|
191 | ; with this authentication token | |
189 | ; list of all views can be found under `/_admin/permissions/auth_token_access` |
|
192 | ; list of all views can be found under `/_admin/permissions/auth_token_access` | |
190 | ; The list should be "," separated and on a single line. |
|
193 | ; The list should be "," separated and on a single line. | |
191 | ; Most common views to enable: |
|
194 | ; Most common views to enable: | |
192 |
|
195 | |||
193 | # RepoCommitsView:repo_commit_download |
|
196 | # RepoCommitsView:repo_commit_download | |
194 | # RepoCommitsView:repo_commit_patch |
|
197 | # RepoCommitsView:repo_commit_patch | |
195 | # RepoCommitsView:repo_commit_raw |
|
198 | # RepoCommitsView:repo_commit_raw | |
196 | # RepoCommitsView:repo_commit_raw@TOKEN |
|
199 | # RepoCommitsView:repo_commit_raw@TOKEN | |
197 | # RepoFilesView:repo_files_diff |
|
200 | # RepoFilesView:repo_files_diff | |
198 | # RepoFilesView:repo_archivefile |
|
201 | # RepoFilesView:repo_archivefile | |
199 | # RepoFilesView:repo_file_raw |
|
202 | # RepoFilesView:repo_file_raw | |
200 | # GistView:* |
|
203 | # GistView:* | |
201 | api_access_controllers_whitelist = |
|
204 | api_access_controllers_whitelist = | |
202 |
|
205 | |||
203 | ; Default encoding used to convert from and to unicode |
|
206 | ; Default encoding used to convert from and to unicode | |
204 | ; can be also a comma separated list of encoding in case of mixed encodings |
|
207 | ; can be also a comma separated list of encoding in case of mixed encodings | |
205 | default_encoding = UTF-8 |
|
208 | default_encoding = UTF-8 | |
206 |
|
209 | |||
207 | ; instance-id prefix |
|
210 | ; instance-id prefix | |
208 | ; a prefix key for this instance used for cache invalidation when running |
|
211 | ; a prefix key for this instance used for cache invalidation when running | |
209 | ; multiple instances of RhodeCode, make sure it's globally unique for |
|
212 | ; multiple instances of RhodeCode, make sure it's globally unique for | |
210 | ; all running RhodeCode instances. Leave empty if you don't use it |
|
213 | ; all running RhodeCode instances. Leave empty if you don't use it | |
211 | instance_id = |
|
214 | instance_id = | |
212 |
|
215 | |||
213 | ; Fallback authentication plugin. Set this to a plugin ID to force the usage |
|
216 | ; Fallback authentication plugin. Set this to a plugin ID to force the usage | |
214 | ; of an authentication plugin also if it is disabled by it's settings. |
|
217 | ; of an authentication plugin also if it is disabled by it's settings. | |
215 | ; This could be useful if you are unable to log in to the system due to broken |
|
218 | ; This could be useful if you are unable to log in to the system due to broken | |
216 | ; authentication settings. Then you can enable e.g. the internal RhodeCode auth |
|
219 | ; authentication settings. Then you can enable e.g. the internal RhodeCode auth | |
217 | ; module to log in again and fix the settings. |
|
220 | ; module to log in again and fix the settings. | |
218 | ; Available builtin plugin IDs (hash is part of the ID): |
|
221 | ; Available builtin plugin IDs (hash is part of the ID): | |
219 | ; egg:rhodecode-enterprise-ce#rhodecode |
|
222 | ; egg:rhodecode-enterprise-ce#rhodecode | |
220 | ; egg:rhodecode-enterprise-ce#pam |
|
223 | ; egg:rhodecode-enterprise-ce#pam | |
221 | ; egg:rhodecode-enterprise-ce#ldap |
|
224 | ; egg:rhodecode-enterprise-ce#ldap | |
222 | ; egg:rhodecode-enterprise-ce#jasig_cas |
|
225 | ; egg:rhodecode-enterprise-ce#jasig_cas | |
223 | ; egg:rhodecode-enterprise-ce#headers |
|
226 | ; egg:rhodecode-enterprise-ce#headers | |
224 | ; egg:rhodecode-enterprise-ce#crowd |
|
227 | ; egg:rhodecode-enterprise-ce#crowd | |
225 |
|
228 | |||
226 | #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode |
|
229 | #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode | |
227 |
|
230 | |||
228 | ; Flag to control loading of legacy plugins in py:/path format |
|
231 | ; Flag to control loading of legacy plugins in py:/path format | |
229 | auth_plugin.import_legacy_plugins = true |
|
232 | auth_plugin.import_legacy_plugins = true | |
230 |
|
233 | |||
231 | ; alternative return HTTP header for failed authentication. Default HTTP |
|
234 | ; alternative return HTTP header for failed authentication. Default HTTP | |
232 | ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with |
|
235 | ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with | |
233 | ; handling that causing a series of failed authentication calls. |
|
236 | ; handling that causing a series of failed authentication calls. | |
234 | ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code |
|
237 | ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code | |
235 | ; This will be served instead of default 401 on bad authentication |
|
238 | ; This will be served instead of default 401 on bad authentication | |
236 | auth_ret_code = |
|
239 | auth_ret_code = | |
237 |
|
240 | |||
238 | ; use special detection method when serving auth_ret_code, instead of serving |
|
241 | ; use special detection method when serving auth_ret_code, instead of serving | |
239 | ; ret_code directly, use 401 initially (Which triggers credentials prompt) |
|
242 | ; ret_code directly, use 401 initially (Which triggers credentials prompt) | |
240 | ; and then serve auth_ret_code to clients |
|
243 | ; and then serve auth_ret_code to clients | |
241 | auth_ret_code_detection = false |
|
244 | auth_ret_code_detection = false | |
242 |
|
245 | |||
243 | ; locking return code. When repository is locked return this HTTP code. 2XX |
|
246 | ; locking return code. When repository is locked return this HTTP code. 2XX | |
244 | ; codes don't break the transactions while 4XX codes do |
|
247 | ; codes don't break the transactions while 4XX codes do | |
245 | lock_ret_code = 423 |
|
248 | lock_ret_code = 423 | |
246 |
|
249 | |||
247 | ; Filesystem location were repositories should be stored |
|
250 | ; Filesystem location were repositories should be stored | |
248 | repo_store.path = /var/opt/rhodecode_repo_store |
|
251 | repo_store.path = /var/opt/rhodecode_repo_store | |
249 |
|
252 | |||
250 | ; allows to setup custom hooks in settings page |
|
253 | ; allows to setup custom hooks in settings page | |
251 | allow_custom_hooks_settings = true |
|
254 | allow_custom_hooks_settings = true | |
252 |
|
255 | |||
253 | ; Generated license token required for EE edition license. |
|
256 | ; Generated license token required for EE edition license. | |
254 | ; New generated token value can be found in Admin > settings > license page. |
|
257 | ; New generated token value can be found in Admin > settings > license page. | |
255 | license_token = |
|
258 | license_token = | |
256 |
|
259 | |||
257 | ; This flag hides sensitive information on the license page such as token, and license data |
|
260 | ; This flag hides sensitive information on the license page such as token, and license data | |
258 | license.hide_license_info = false |
|
261 | license.hide_license_info = false | |
259 |
|
262 | |||
260 | ; Import EE license from this license path |
|
263 | ; Import EE license from this license path | |
261 | #license.import_path = %(here)s/rhodecode_enterprise.license |
|
264 | #license.import_path = %(here)s/rhodecode_enterprise.license | |
262 |
|
265 | |||
263 | ; import license 'if-missing' or 'force' (always override) |
|
266 | ; import license 'if-missing' or 'force' (always override) | |
264 | ; if-missing means apply license if it doesn't exist. 'force' option always overrides it |
|
267 | ; if-missing means apply license if it doesn't exist. 'force' option always overrides it | |
265 | license.import_path_mode = if-missing |
|
268 | license.import_path_mode = if-missing | |
266 |
|
269 | |||
267 | ; supervisor connection uri, for managing supervisor and logs. |
|
270 | ; supervisor connection uri, for managing supervisor and logs. | |
268 | supervisor.uri = |
|
271 | supervisor.uri = | |
269 |
|
272 | |||
270 | ; supervisord group name/id we only want this RC instance to handle |
|
273 | ; supervisord group name/id we only want this RC instance to handle | |
271 | supervisor.group_id = dev |
|
274 | supervisor.group_id = dev | |
272 |
|
275 | |||
273 | ; Display extended labs settings |
|
276 | ; Display extended labs settings | |
274 | labs_settings_active = true |
|
277 | labs_settings_active = true | |
275 |
|
278 | |||
276 | ; Custom exception store path, defaults to TMPDIR |
|
279 | ; Custom exception store path, defaults to TMPDIR | |
277 | ; This is used to store exception from RhodeCode in shared directory |
|
280 | ; This is used to store exception from RhodeCode in shared directory | |
278 | #exception_tracker.store_path = |
|
281 | #exception_tracker.store_path = | |
279 |
|
282 | |||
280 | ; Send email with exception details when it happens |
|
283 | ; Send email with exception details when it happens | |
281 | #exception_tracker.send_email = false |
|
284 | #exception_tracker.send_email = false | |
282 |
|
285 | |||
283 | ; Comma separated list of recipients for exception emails, |
|
286 | ; Comma separated list of recipients for exception emails, | |
284 | ; e.g admin@rhodecode.com,devops@rhodecode.com |
|
287 | ; e.g admin@rhodecode.com,devops@rhodecode.com | |
285 | ; Can be left empty, then emails will be sent to ALL super-admins |
|
288 | ; Can be left empty, then emails will be sent to ALL super-admins | |
286 | #exception_tracker.send_email_recipients = |
|
289 | #exception_tracker.send_email_recipients = | |
287 |
|
290 | |||
288 | ; optional prefix to Add to email Subject |
|
291 | ; optional prefix to Add to email Subject | |
289 | #exception_tracker.email_prefix = [RHODECODE ERROR] |
|
292 | #exception_tracker.email_prefix = [RHODECODE ERROR] | |
290 |
|
293 | |||
291 | ; NOTE: this setting IS DEPRECATED: |
|
294 | ; NOTE: this setting IS DEPRECATED: | |
292 | ; file_store backend is always enabled |
|
295 | ; file_store backend is always enabled | |
293 | #file_store.enabled = true |
|
296 | #file_store.enabled = true | |
294 |
|
297 | |||
295 | ; NOTE: this setting IS DEPRECATED: |
|
298 | ; NOTE: this setting IS DEPRECATED: | |
296 | ; file_store.backend = X -> use `file_store.backend.type = filesystem_v2` instead |
|
299 | ; file_store.backend = X -> use `file_store.backend.type = filesystem_v2` instead | |
297 | ; Storage backend, available options are: local |
|
300 | ; Storage backend, available options are: local | |
298 | #file_store.backend = local |
|
301 | #file_store.backend = local | |
299 |
|
302 | |||
300 | ; NOTE: this setting IS DEPRECATED: |
|
303 | ; NOTE: this setting IS DEPRECATED: | |
301 | ; file_store.storage_path = X -> use `file_store.filesystem_v2.storage_path = X` instead |
|
304 | ; file_store.storage_path = X -> use `file_store.filesystem_v2.storage_path = X` instead | |
302 | ; path to store the uploaded binaries and artifacts |
|
305 | ; path to store the uploaded binaries and artifacts | |
303 | #file_store.storage_path = /var/opt/rhodecode_data/file_store |
|
306 | #file_store.storage_path = /var/opt/rhodecode_data/file_store | |
304 |
|
307 | |||
305 | ; Artifacts file-store, is used to store comment attachments and artifacts uploads. |
|
308 | ; Artifacts file-store, is used to store comment attachments and artifacts uploads. | |
306 | ; file_store backend type: filesystem_v1, filesystem_v2 or objectstore (s3-based) are available as options |
|
309 | ; file_store backend type: filesystem_v1, filesystem_v2 or objectstore (s3-based) are available as options | |
307 | ; filesystem_v1 is backwards compat with pre 5.1 storage changes |
|
310 | ; filesystem_v1 is backwards compat with pre 5.1 storage changes | |
308 | ; new installations should choose filesystem_v2 or objectstore (s3-based), pick filesystem when migrating from |
|
311 | ; new installations should choose filesystem_v2 or objectstore (s3-based), pick filesystem when migrating from | |
309 | ; previous installations to keep the artifacts without a need of migration |
|
312 | ; previous installations to keep the artifacts without a need of migration | |
310 | #file_store.backend.type = filesystem_v2 |
|
313 | #file_store.backend.type = filesystem_v2 | |
311 |
|
314 | |||
312 | ; filesystem options... |
|
315 | ; filesystem options... | |
313 | #file_store.filesystem_v1.storage_path = /var/opt/rhodecode_data/artifacts_file_store |
|
316 | #file_store.filesystem_v1.storage_path = /var/opt/rhodecode_data/artifacts_file_store | |
314 |
|
317 | |||
315 | ; filesystem_v2 options... |
|
318 | ; filesystem_v2 options... | |
316 | #file_store.filesystem_v2.storage_path = /var/opt/rhodecode_data/artifacts_file_store |
|
319 | #file_store.filesystem_v2.storage_path = /var/opt/rhodecode_data/artifacts_file_store | |
317 | #file_store.filesystem_v2.shards = 8 |
|
320 | #file_store.filesystem_v2.shards = 8 | |
318 |
|
321 | |||
319 | ; objectstore options... |
|
322 | ; objectstore options... | |
320 | ; url for s3 compatible storage that allows to upload artifacts |
|
323 | ; url for s3 compatible storage that allows to upload artifacts | |
321 | ; e.g http://minio:9000 |
|
324 | ; e.g http://minio:9000 | |
322 | #file_store.backend.type = objectstore |
|
325 | #file_store.backend.type = objectstore | |
323 | #file_store.objectstore.url = http://s3-minio:9000 |
|
326 | #file_store.objectstore.url = http://s3-minio:9000 | |
324 |
|
327 | |||
325 | ; a top-level bucket to put all other shards in |
|
328 | ; a top-level bucket to put all other shards in | |
326 | ; objects will be stored in rhodecode-file-store/shard-N based on the bucket_shards number |
|
329 | ; objects will be stored in rhodecode-file-store/shard-N based on the bucket_shards number | |
327 | #file_store.objectstore.bucket = rhodecode-file-store |
|
330 | #file_store.objectstore.bucket = rhodecode-file-store | |
328 |
|
331 | |||
329 | ; number of sharded buckets to create to distribute archives across |
|
332 | ; number of sharded buckets to create to distribute archives across | |
330 | ; default is 8 shards |
|
333 | ; default is 8 shards | |
331 | #file_store.objectstore.bucket_shards = 8 |
|
334 | #file_store.objectstore.bucket_shards = 8 | |
332 |
|
335 | |||
333 | ; key for s3 auth |
|
336 | ; key for s3 auth | |
334 | #file_store.objectstore.key = s3admin |
|
337 | #file_store.objectstore.key = s3admin | |
335 |
|
338 | |||
336 | ; secret for s3 auth |
|
339 | ; secret for s3 auth | |
337 | #file_store.objectstore.secret = s3secret4 |
|
340 | #file_store.objectstore.secret = s3secret4 | |
338 |
|
341 | |||
339 | ;region for s3 storage |
|
342 | ;region for s3 storage | |
340 | #file_store.objectstore.region = eu-central-1 |
|
343 | #file_store.objectstore.region = eu-central-1 | |
341 |
|
344 | |||
342 | ; Redis url to acquire/check generation of archives locks |
|
345 | ; Redis url to acquire/check generation of archives locks | |
343 | archive_cache.locking.url = redis://redis:6379/1 |
|
346 | archive_cache.locking.url = redis://redis:6379/1 | |
344 |
|
347 | |||
345 | ; Storage backend, only 'filesystem' and 'objectstore' are available now |
|
348 | ; Storage backend, only 'filesystem' and 'objectstore' are available now | |
346 | archive_cache.backend.type = filesystem |
|
349 | archive_cache.backend.type = filesystem | |
347 |
|
350 | |||
348 | ; url for s3 compatible storage that allows to upload artifacts |
|
351 | ; url for s3 compatible storage that allows to upload artifacts | |
349 | ; e.g http://minio:9000 |
|
352 | ; e.g http://minio:9000 | |
350 | archive_cache.objectstore.url = http://s3-minio:9000 |
|
353 | archive_cache.objectstore.url = http://s3-minio:9000 | |
351 |
|
354 | |||
352 | ; key for s3 auth |
|
355 | ; key for s3 auth | |
353 | archive_cache.objectstore.key = key |
|
356 | archive_cache.objectstore.key = key | |
354 |
|
357 | |||
355 | ; secret for s3 auth |
|
358 | ; secret for s3 auth | |
356 | archive_cache.objectstore.secret = secret |
|
359 | archive_cache.objectstore.secret = secret | |
357 |
|
360 | |||
358 | ;region for s3 storage |
|
361 | ;region for s3 storage | |
359 | archive_cache.objectstore.region = eu-central-1 |
|
362 | archive_cache.objectstore.region = eu-central-1 | |
360 |
|
363 | |||
361 | ; number of sharded buckets to create to distribute archives across |
|
364 | ; number of sharded buckets to create to distribute archives across | |
362 | ; default is 8 shards |
|
365 | ; default is 8 shards | |
363 | archive_cache.objectstore.bucket_shards = 8 |
|
366 | archive_cache.objectstore.bucket_shards = 8 | |
364 |
|
367 | |||
365 | ; a top-level bucket to put all other shards in |
|
368 | ; a top-level bucket to put all other shards in | |
366 | ; objects will be stored in rhodecode-archive-cache/shard-N based on the bucket_shards number |
|
369 | ; objects will be stored in rhodecode-archive-cache/shard-N based on the bucket_shards number | |
367 | archive_cache.objectstore.bucket = rhodecode-archive-cache |
|
370 | archive_cache.objectstore.bucket = rhodecode-archive-cache | |
368 |
|
371 | |||
369 | ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time |
|
372 | ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time | |
370 | archive_cache.objectstore.retry = false |
|
373 | archive_cache.objectstore.retry = false | |
371 |
|
374 | |||
372 | ; number of seconds to wait for next try using retry |
|
375 | ; number of seconds to wait for next try using retry | |
373 | archive_cache.objectstore.retry_backoff = 1 |
|
376 | archive_cache.objectstore.retry_backoff = 1 | |
374 |
|
377 | |||
375 | ; how many tries do do a retry fetch from this backend |
|
378 | ; how many tries do do a retry fetch from this backend | |
376 | archive_cache.objectstore.retry_attempts = 10 |
|
379 | archive_cache.objectstore.retry_attempts = 10 | |
377 |
|
380 | |||
378 | ; Default is $cache_dir/archive_cache if not set |
|
381 | ; Default is $cache_dir/archive_cache if not set | |
379 | ; Generated repo archives will be cached at this location |
|
382 | ; Generated repo archives will be cached at this location | |
380 | ; and served from the cache during subsequent requests for the same archive of |
|
383 | ; and served from the cache during subsequent requests for the same archive of | |
381 | ; the repository. This path is important to be shared across filesystems and with |
|
384 | ; the repository. This path is important to be shared across filesystems and with | |
382 | ; RhodeCode and vcsserver |
|
385 | ; RhodeCode and vcsserver | |
383 | archive_cache.filesystem.store_dir = /var/opt/rhodecode_data/archive_cache |
|
386 | archive_cache.filesystem.store_dir = /var/opt/rhodecode_data/archive_cache | |
384 |
|
387 | |||
385 | ; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb |
|
388 | ; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb | |
386 | archive_cache.filesystem.cache_size_gb = 1 |
|
389 | archive_cache.filesystem.cache_size_gb = 1 | |
387 |
|
390 | |||
388 | ; Eviction policy used to clear out after cache_size_gb limit is reached |
|
391 | ; Eviction policy used to clear out after cache_size_gb limit is reached | |
389 | archive_cache.filesystem.eviction_policy = least-recently-stored |
|
392 | archive_cache.filesystem.eviction_policy = least-recently-stored | |
390 |
|
393 | |||
391 | ; By default cache uses sharding technique, this specifies how many shards are there |
|
394 | ; By default cache uses sharding technique, this specifies how many shards are there | |
392 | ; default is 8 shards |
|
395 | ; default is 8 shards | |
393 | archive_cache.filesystem.cache_shards = 8 |
|
396 | archive_cache.filesystem.cache_shards = 8 | |
394 |
|
397 | |||
395 | ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time |
|
398 | ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time | |
396 | archive_cache.filesystem.retry = false |
|
399 | archive_cache.filesystem.retry = false | |
397 |
|
400 | |||
398 | ; number of seconds to wait for next try using retry |
|
401 | ; number of seconds to wait for next try using retry | |
399 | archive_cache.filesystem.retry_backoff = 1 |
|
402 | archive_cache.filesystem.retry_backoff = 1 | |
400 |
|
403 | |||
401 | ; how many tries do do a retry fetch from this backend |
|
404 | ; how many tries do do a retry fetch from this backend | |
402 | archive_cache.filesystem.retry_attempts = 10 |
|
405 | archive_cache.filesystem.retry_attempts = 10 | |
403 |
|
406 | |||
404 |
|
407 | |||
405 | ; ############# |
|
408 | ; ############# | |
406 | ; CELERY CONFIG |
|
409 | ; CELERY CONFIG | |
407 | ; ############# |
|
410 | ; ############# | |
408 |
|
411 | |||
409 | ; manually run celery: /path/to/celery worker --task-events --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini |
|
412 | ; manually run celery: /path/to/celery worker --task-events --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini | |
410 |
|
413 | |||
411 | use_celery = true |
|
414 | use_celery = true | |
412 |
|
415 | |||
413 | ; path to store schedule database |
|
416 | ; path to store schedule database | |
414 | #celerybeat-schedule.path = |
|
417 | #celerybeat-schedule.path = | |
415 |
|
418 | |||
416 | ; connection url to the message broker (default redis) |
|
419 | ; connection url to the message broker (default redis) | |
417 | celery.broker_url = redis://redis:6379/8 |
|
420 | celery.broker_url = redis://redis:6379/8 | |
418 |
|
421 | |||
419 | ; results backend to get results for (default redis) |
|
422 | ; results backend to get results for (default redis) | |
420 | celery.result_backend = redis://redis:6379/8 |
|
423 | celery.result_backend = redis://redis:6379/8 | |
421 |
|
424 | |||
422 | ; rabbitmq example |
|
425 | ; rabbitmq example | |
423 | #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost |
|
426 | #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost | |
424 |
|
427 | |||
425 | ; maximum tasks to execute before worker restart |
|
428 | ; maximum tasks to execute before worker restart | |
426 | celery.max_tasks_per_child = 20 |
|
429 | celery.max_tasks_per_child = 20 | |
427 |
|
430 | |||
428 | ; tasks will never be sent to the queue, but executed locally instead. |
|
431 | ; tasks will never be sent to the queue, but executed locally instead. | |
429 | celery.task_always_eager = false |
|
432 | celery.task_always_eager = false | |
430 |
|
433 | |||
431 | ; ############# |
|
434 | ; ############# | |
432 | ; DOGPILE CACHE |
|
435 | ; DOGPILE CACHE | |
433 | ; ############# |
|
436 | ; ############# | |
434 |
|
437 | |||
435 | ; Default cache dir for caches. Putting this into a ramdisk can boost performance. |
|
438 | ; Default cache dir for caches. Putting this into a ramdisk can boost performance. | |
436 | ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space |
|
439 | ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space | |
437 | cache_dir = /var/opt/rhodecode_data |
|
440 | cache_dir = /var/opt/rhodecode_data | |
438 |
|
441 | |||
439 | ; ********************************************* |
|
442 | ; ********************************************* | |
440 | ; `sql_cache_short` cache for heavy SQL queries |
|
443 | ; `sql_cache_short` cache for heavy SQL queries | |
441 | ; Only supported backend is `memory_lru` |
|
444 | ; Only supported backend is `memory_lru` | |
442 | ; ********************************************* |
|
445 | ; ********************************************* | |
443 | rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru |
|
446 | rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru | |
444 | rc_cache.sql_cache_short.expiration_time = 30 |
|
447 | rc_cache.sql_cache_short.expiration_time = 30 | |
445 |
|
448 | |||
446 |
|
449 | |||
447 | ; ***************************************************** |
|
450 | ; ***************************************************** | |
448 | ; `cache_repo_longterm` cache for repo object instances |
|
451 | ; `cache_repo_longterm` cache for repo object instances | |
449 | ; Only supported backend is `memory_lru` |
|
452 | ; Only supported backend is `memory_lru` | |
450 | ; ***************************************************** |
|
453 | ; ***************************************************** | |
451 | rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru |
|
454 | rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru | |
452 | ; by default we use 30 Days, cache is still invalidated on push |
|
455 | ; by default we use 30 Days, cache is still invalidated on push | |
453 | rc_cache.cache_repo_longterm.expiration_time = 2592000 |
|
456 | rc_cache.cache_repo_longterm.expiration_time = 2592000 | |
454 | ; max items in LRU cache, set to smaller number to save memory, and expire last used caches |
|
457 | ; max items in LRU cache, set to smaller number to save memory, and expire last used caches | |
455 | rc_cache.cache_repo_longterm.max_size = 10000 |
|
458 | rc_cache.cache_repo_longterm.max_size = 10000 | |
456 |
|
459 | |||
457 |
|
460 | |||
458 | ; ********************************************* |
|
461 | ; ********************************************* | |
459 | ; `cache_general` cache for general purpose use |
|
462 | ; `cache_general` cache for general purpose use | |
460 | ; for simplicity use rc.file_namespace backend, |
|
463 | ; for simplicity use rc.file_namespace backend, | |
461 | ; for performance and scale use rc.redis |
|
464 | ; for performance and scale use rc.redis | |
462 | ; ********************************************* |
|
465 | ; ********************************************* | |
463 | rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace |
|
466 | rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace | |
464 | rc_cache.cache_general.expiration_time = 43200 |
|
467 | rc_cache.cache_general.expiration_time = 43200 | |
465 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set |
|
468 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set | |
466 | #rc_cache.cache_general.arguments.filename = /tmp/cache_general_db |
|
469 | #rc_cache.cache_general.arguments.filename = /tmp/cache_general_db | |
467 |
|
470 | |||
468 | ; alternative `cache_general` redis backend with distributed lock |
|
471 | ; alternative `cache_general` redis backend with distributed lock | |
469 | #rc_cache.cache_general.backend = dogpile.cache.rc.redis |
|
472 | #rc_cache.cache_general.backend = dogpile.cache.rc.redis | |
470 | #rc_cache.cache_general.expiration_time = 300 |
|
473 | #rc_cache.cache_general.expiration_time = 300 | |
471 |
|
474 | |||
472 | ; redis_expiration_time needs to be greater then expiration_time |
|
475 | ; redis_expiration_time needs to be greater then expiration_time | |
473 | #rc_cache.cache_general.arguments.redis_expiration_time = 7200 |
|
476 | #rc_cache.cache_general.arguments.redis_expiration_time = 7200 | |
474 |
|
477 | |||
475 | #rc_cache.cache_general.arguments.host = localhost |
|
478 | #rc_cache.cache_general.arguments.host = localhost | |
476 | #rc_cache.cache_general.arguments.port = 6379 |
|
479 | #rc_cache.cache_general.arguments.port = 6379 | |
477 | #rc_cache.cache_general.arguments.db = 0 |
|
480 | #rc_cache.cache_general.arguments.db = 0 | |
478 | #rc_cache.cache_general.arguments.socket_timeout = 30 |
|
481 | #rc_cache.cache_general.arguments.socket_timeout = 30 | |
479 | ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends |
|
482 | ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends | |
480 | #rc_cache.cache_general.arguments.distributed_lock = true |
|
483 | #rc_cache.cache_general.arguments.distributed_lock = true | |
481 |
|
484 | |||
482 | ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen |
|
485 | ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen | |
483 | #rc_cache.cache_general.arguments.lock_auto_renewal = true |
|
486 | #rc_cache.cache_general.arguments.lock_auto_renewal = true | |
484 |
|
487 | |||
485 | ; ************************************************* |
|
488 | ; ************************************************* | |
486 | ; `cache_perms` cache for permission tree, auth TTL |
|
489 | ; `cache_perms` cache for permission tree, auth TTL | |
487 | ; for simplicity use rc.file_namespace backend, |
|
490 | ; for simplicity use rc.file_namespace backend, | |
488 | ; for performance and scale use rc.redis |
|
491 | ; for performance and scale use rc.redis | |
489 | ; ************************************************* |
|
492 | ; ************************************************* | |
490 | rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace |
|
493 | rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace | |
491 | rc_cache.cache_perms.expiration_time = 3600 |
|
494 | rc_cache.cache_perms.expiration_time = 3600 | |
492 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set |
|
495 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set | |
493 | #rc_cache.cache_perms.arguments.filename = /tmp/cache_perms_db |
|
496 | #rc_cache.cache_perms.arguments.filename = /tmp/cache_perms_db | |
494 |
|
497 | |||
495 | ; alternative `cache_perms` redis backend with distributed lock |
|
498 | ; alternative `cache_perms` redis backend with distributed lock | |
496 | #rc_cache.cache_perms.backend = dogpile.cache.rc.redis |
|
499 | #rc_cache.cache_perms.backend = dogpile.cache.rc.redis | |
497 | #rc_cache.cache_perms.expiration_time = 300 |
|
500 | #rc_cache.cache_perms.expiration_time = 300 | |
498 |
|
501 | |||
499 | ; redis_expiration_time needs to be greater then expiration_time |
|
502 | ; redis_expiration_time needs to be greater then expiration_time | |
500 | #rc_cache.cache_perms.arguments.redis_expiration_time = 7200 |
|
503 | #rc_cache.cache_perms.arguments.redis_expiration_time = 7200 | |
501 |
|
504 | |||
502 | #rc_cache.cache_perms.arguments.host = localhost |
|
505 | #rc_cache.cache_perms.arguments.host = localhost | |
503 | #rc_cache.cache_perms.arguments.port = 6379 |
|
506 | #rc_cache.cache_perms.arguments.port = 6379 | |
504 | #rc_cache.cache_perms.arguments.db = 0 |
|
507 | #rc_cache.cache_perms.arguments.db = 0 | |
505 | #rc_cache.cache_perms.arguments.socket_timeout = 30 |
|
508 | #rc_cache.cache_perms.arguments.socket_timeout = 30 | |
506 | ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends |
|
509 | ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends | |
507 | #rc_cache.cache_perms.arguments.distributed_lock = true |
|
510 | #rc_cache.cache_perms.arguments.distributed_lock = true | |
508 |
|
511 | |||
509 | ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen |
|
512 | ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen | |
510 | #rc_cache.cache_perms.arguments.lock_auto_renewal = true |
|
513 | #rc_cache.cache_perms.arguments.lock_auto_renewal = true | |
511 |
|
514 | |||
512 | ; *************************************************** |
|
515 | ; *************************************************** | |
513 | ; `cache_repo` cache for file tree, Readme, RSS FEEDS |
|
516 | ; `cache_repo` cache for file tree, Readme, RSS FEEDS | |
514 | ; for simplicity use rc.file_namespace backend, |
|
517 | ; for simplicity use rc.file_namespace backend, | |
515 | ; for performance and scale use rc.redis |
|
518 | ; for performance and scale use rc.redis | |
516 | ; *************************************************** |
|
519 | ; *************************************************** | |
517 | rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace |
|
520 | rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace | |
518 | rc_cache.cache_repo.expiration_time = 2592000 |
|
521 | rc_cache.cache_repo.expiration_time = 2592000 | |
519 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set |
|
522 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set | |
520 | #rc_cache.cache_repo.arguments.filename = /tmp/cache_repo_db |
|
523 | #rc_cache.cache_repo.arguments.filename = /tmp/cache_repo_db | |
521 |
|
524 | |||
522 | ; alternative `cache_repo` redis backend with distributed lock |
|
525 | ; alternative `cache_repo` redis backend with distributed lock | |
523 | #rc_cache.cache_repo.backend = dogpile.cache.rc.redis |
|
526 | #rc_cache.cache_repo.backend = dogpile.cache.rc.redis | |
524 | #rc_cache.cache_repo.expiration_time = 2592000 |
|
527 | #rc_cache.cache_repo.expiration_time = 2592000 | |
525 |
|
528 | |||
526 | ; redis_expiration_time needs to be greater then expiration_time |
|
529 | ; redis_expiration_time needs to be greater then expiration_time | |
527 | #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400 |
|
530 | #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400 | |
528 |
|
531 | |||
529 | #rc_cache.cache_repo.arguments.host = localhost |
|
532 | #rc_cache.cache_repo.arguments.host = localhost | |
530 | #rc_cache.cache_repo.arguments.port = 6379 |
|
533 | #rc_cache.cache_repo.arguments.port = 6379 | |
531 | #rc_cache.cache_repo.arguments.db = 1 |
|
534 | #rc_cache.cache_repo.arguments.db = 1 | |
532 | #rc_cache.cache_repo.arguments.socket_timeout = 30 |
|
535 | #rc_cache.cache_repo.arguments.socket_timeout = 30 | |
533 | ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends |
|
536 | ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends | |
534 | #rc_cache.cache_repo.arguments.distributed_lock = true |
|
537 | #rc_cache.cache_repo.arguments.distributed_lock = true | |
535 |
|
538 | |||
536 | ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen |
|
539 | ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen | |
537 | #rc_cache.cache_repo.arguments.lock_auto_renewal = true |
|
540 | #rc_cache.cache_repo.arguments.lock_auto_renewal = true | |
538 |
|
541 | |||
539 | ; ############## |
|
542 | ; ############## | |
540 | ; BEAKER SESSION |
|
543 | ; BEAKER SESSION | |
541 | ; ############## |
|
544 | ; ############## | |
542 |
|
545 | |||
543 | ; beaker.session.type is type of storage options for the logged users sessions. Current allowed |
|
546 | ; beaker.session.type is type of storage options for the logged users sessions. Current allowed | |
544 | ; types are file, ext:redis, ext:database, ext:memcached |
|
547 | ; types are file, ext:redis, ext:database, ext:memcached | |
545 | ; Fastest ones are ext:redis and ext:database, DO NOT use memory type for session |
|
548 | ; Fastest ones are ext:redis and ext:database, DO NOT use memory type for session | |
546 | #beaker.session.type = file |
|
549 | #beaker.session.type = file | |
547 | #beaker.session.data_dir = %(here)s/data/sessions |
|
550 | #beaker.session.data_dir = %(here)s/data/sessions | |
548 |
|
551 | |||
549 | ; Redis based sessions |
|
552 | ; Redis based sessions | |
550 | beaker.session.type = ext:redis |
|
553 | beaker.session.type = ext:redis | |
551 | beaker.session.url = redis://redis:6379/2 |
|
554 | beaker.session.url = redis://redis:6379/2 | |
552 |
|
555 | |||
553 | ; DB based session, fast, and allows easy management over logged in users |
|
556 | ; DB based session, fast, and allows easy management over logged in users | |
554 | #beaker.session.type = ext:database |
|
557 | #beaker.session.type = ext:database | |
555 | #beaker.session.table_name = db_session |
|
558 | #beaker.session.table_name = db_session | |
556 | #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode |
|
559 | #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode | |
557 | #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode |
|
560 | #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode | |
558 | #beaker.session.sa.pool_recycle = 3600 |
|
561 | #beaker.session.sa.pool_recycle = 3600 | |
559 | #beaker.session.sa.echo = false |
|
562 | #beaker.session.sa.echo = false | |
560 |
|
563 | |||
561 | beaker.session.key = rhodecode |
|
564 | beaker.session.key = rhodecode | |
562 | beaker.session.secret = develop-rc-uytcxaz |
|
565 | beaker.session.secret = develop-rc-uytcxaz | |
563 | beaker.session.lock_dir = /data_ramdisk/lock |
|
566 | beaker.session.lock_dir = /data_ramdisk/lock | |
564 |
|
567 | |||
565 | ; Secure encrypted cookie. Requires AES and AES python libraries |
|
568 | ; Secure encrypted cookie. Requires AES and AES python libraries | |
566 | ; you must disable beaker.session.secret to use this |
|
569 | ; you must disable beaker.session.secret to use this | |
567 | #beaker.session.encrypt_key = key_for_encryption |
|
570 | #beaker.session.encrypt_key = key_for_encryption | |
568 | #beaker.session.validate_key = validation_key |
|
571 | #beaker.session.validate_key = validation_key | |
569 |
|
572 | |||
570 | ; Sets session as invalid (also logging out user) if it haven not been |
|
573 | ; Sets session as invalid (also logging out user) if it haven not been | |
571 | ; accessed for given amount of time in seconds |
|
574 | ; accessed for given amount of time in seconds | |
572 | beaker.session.timeout = 2592000 |
|
575 | beaker.session.timeout = 2592000 | |
573 | beaker.session.httponly = true |
|
576 | beaker.session.httponly = true | |
574 |
|
577 | |||
575 | ; Path to use for the cookie. Set to prefix if you use prefix middleware |
|
578 | ; Path to use for the cookie. Set to prefix if you use prefix middleware | |
576 | #beaker.session.cookie_path = /custom_prefix |
|
579 | #beaker.session.cookie_path = /custom_prefix | |
577 |
|
580 | |||
578 | ; Set https secure cookie |
|
581 | ; Set https secure cookie | |
579 | beaker.session.secure = false |
|
582 | beaker.session.secure = false | |
580 |
|
583 | |||
581 | ; default cookie expiration time in seconds, set to `true` to set expire |
|
584 | ; default cookie expiration time in seconds, set to `true` to set expire | |
582 | ; at browser close |
|
585 | ; at browser close | |
583 | #beaker.session.cookie_expires = 3600 |
|
586 | #beaker.session.cookie_expires = 3600 | |
584 |
|
587 | |||
585 | ; ############################# |
|
588 | ; ############################# | |
586 | ; SEARCH INDEXING CONFIGURATION |
|
589 | ; SEARCH INDEXING CONFIGURATION | |
587 | ; ############################# |
|
590 | ; ############################# | |
588 |
|
591 | |||
589 | ; Full text search indexer is available in rhodecode-tools under |
|
592 | ; Full text search indexer is available in rhodecode-tools under | |
590 | ; `rhodecode-tools index` command |
|
593 | ; `rhodecode-tools index` command | |
591 |
|
594 | |||
592 | ; WHOOSH Backend, doesn't require additional services to run |
|
595 | ; WHOOSH Backend, doesn't require additional services to run | |
593 | ; it works good with few dozen repos |
|
596 | ; it works good with few dozen repos | |
594 | search.module = rhodecode.lib.index.whoosh |
|
597 | search.module = rhodecode.lib.index.whoosh | |
595 | search.location = %(here)s/data/index |
|
598 | search.location = %(here)s/data/index | |
596 |
|
599 | |||
597 | ; #################### |
|
600 | ; #################### | |
598 | ; CHANNELSTREAM CONFIG |
|
601 | ; CHANNELSTREAM CONFIG | |
599 | ; #################### |
|
602 | ; #################### | |
600 |
|
603 | |||
601 | ; channelstream enables persistent connections and live notification |
|
604 | ; channelstream enables persistent connections and live notification | |
602 | ; in the system. It's also used by the chat system |
|
605 | ; in the system. It's also used by the chat system | |
603 |
|
606 | |||
604 | channelstream.enabled = true |
|
607 | channelstream.enabled = true | |
605 |
|
608 | |||
606 | ; server address for channelstream server on the backend |
|
609 | ; server address for channelstream server on the backend | |
607 | channelstream.server = channelstream:9800 |
|
610 | channelstream.server = channelstream:9800 | |
608 |
|
611 | |||
609 | ; location of the channelstream server from outside world |
|
612 | ; location of the channelstream server from outside world | |
610 | ; use ws:// for http or wss:// for https. This address needs to be handled |
|
613 | ; use ws:// for http or wss:// for https. This address needs to be handled | |
611 | ; by external HTTP server such as Nginx or Apache |
|
614 | ; by external HTTP server such as Nginx or Apache | |
612 | ; see Nginx/Apache configuration examples in our docs |
|
615 | ; see Nginx/Apache configuration examples in our docs | |
613 | channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream |
|
616 | channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream | |
614 | channelstream.secret = ENV_GENERATED |
|
617 | channelstream.secret = ENV_GENERATED | |
615 | channelstream.history.location = /var/opt/rhodecode_data/channelstream_history |
|
618 | channelstream.history.location = /var/opt/rhodecode_data/channelstream_history | |
616 |
|
619 | |||
617 | ; Internal application path that Javascript uses to connect into. |
|
620 | ; Internal application path that Javascript uses to connect into. | |
618 | ; If you use proxy-prefix the prefix should be added before /_channelstream |
|
621 | ; If you use proxy-prefix the prefix should be added before /_channelstream | |
619 | channelstream.proxy_path = /_channelstream |
|
622 | channelstream.proxy_path = /_channelstream | |
620 |
|
623 | |||
621 |
|
624 | |||
622 | ; ############################## |
|
625 | ; ############################## | |
623 | ; MAIN RHODECODE DATABASE CONFIG |
|
626 | ; MAIN RHODECODE DATABASE CONFIG | |
624 | ; ############################## |
|
627 | ; ############################## | |
625 |
|
628 | |||
626 | #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30 |
|
629 | #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30 | |
627 | #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode |
|
630 | #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode | |
628 | #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8 |
|
631 | #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8 | |
629 | ; pymysql is an alternative driver for MySQL, use in case of problems with default one |
|
632 | ; pymysql is an alternative driver for MySQL, use in case of problems with default one | |
630 | #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode |
|
633 | #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode | |
631 |
|
634 | |||
632 | sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30 |
|
635 | sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30 | |
633 |
|
636 | |||
634 | ; see sqlalchemy docs for other advanced settings |
|
637 | ; see sqlalchemy docs for other advanced settings | |
635 | ; print the sql statements to output |
|
638 | ; print the sql statements to output | |
636 | sqlalchemy.db1.echo = false |
|
639 | sqlalchemy.db1.echo = false | |
637 |
|
640 | |||
638 | ; recycle the connections after this amount of seconds |
|
641 | ; recycle the connections after this amount of seconds | |
639 | sqlalchemy.db1.pool_recycle = 3600 |
|
642 | sqlalchemy.db1.pool_recycle = 3600 | |
640 |
|
643 | |||
641 | ; the number of connections to keep open inside the connection pool. |
|
644 | ; the number of connections to keep open inside the connection pool. | |
642 | ; 0 indicates no limit |
|
645 | ; 0 indicates no limit | |
643 | ; the general calculus with gevent is: |
|
646 | ; the general calculus with gevent is: | |
644 | ; if your system allows 500 concurrent greenlets (max_connections) that all do database access, |
|
647 | ; if your system allows 500 concurrent greenlets (max_connections) that all do database access, | |
645 | ; then increase pool size + max overflow so that they add up to 500. |
|
648 | ; then increase pool size + max overflow so that they add up to 500. | |
646 | #sqlalchemy.db1.pool_size = 5 |
|
649 | #sqlalchemy.db1.pool_size = 5 | |
647 |
|
650 | |||
648 | ; The number of connections to allow in connection pool "overflow", that is |
|
651 | ; The number of connections to allow in connection pool "overflow", that is | |
649 | ; connections that can be opened above and beyond the pool_size setting, |
|
652 | ; connections that can be opened above and beyond the pool_size setting, | |
650 | ; which defaults to five. |
|
653 | ; which defaults to five. | |
651 | #sqlalchemy.db1.max_overflow = 10 |
|
654 | #sqlalchemy.db1.max_overflow = 10 | |
652 |
|
655 | |||
653 | ; Connection check ping, used to detect broken database connections |
|
656 | ; Connection check ping, used to detect broken database connections | |
654 | ; could be enabled to better handle cases if MySQL has gone away errors |
|
657 | ; could be enabled to better handle cases if MySQL has gone away errors | |
655 | #sqlalchemy.db1.ping_connection = true |
|
658 | #sqlalchemy.db1.ping_connection = true | |
656 |
|
659 | |||
657 | ; ########## |
|
660 | ; ########## | |
658 | ; VCS CONFIG |
|
661 | ; VCS CONFIG | |
659 | ; ########## |
|
662 | ; ########## | |
660 | vcs.server.enable = true |
|
663 | vcs.server.enable = true | |
661 | vcs.server = vcsserver:10010 |
|
664 | vcs.server = vcsserver:10010 | |
662 |
|
665 | |||
663 | ; Web server connectivity protocol, responsible for web based VCS operations |
|
666 | ; Web server connectivity protocol, responsible for web based VCS operations | |
664 | ; Available protocols are: |
|
667 | ; Available protocols are: | |
665 | ; `http` - use http-rpc backend (default) |
|
668 | ; `http` - use http-rpc backend (default) | |
666 | vcs.server.protocol = http |
|
669 | vcs.server.protocol = http | |
667 |
|
670 | |||
668 | ; Push/Pull operations protocol, available options are: |
|
671 | ; Push/Pull operations protocol, available options are: | |
669 | ; `http` - use http-rpc backend (default) |
|
672 | ; `http` - use http-rpc backend (default) | |
670 | vcs.scm_app_implementation = http |
|
673 | vcs.scm_app_implementation = http | |
671 |
|
674 | |||
672 | ; Push/Pull operations hooks protocol, available options are: |
|
675 | ; Push/Pull operations hooks protocol, available options are: | |
673 | ; `http` - use http-rpc backend (default) |
|
676 | ; `http` - use http-rpc backend (default) | |
674 | ; `celery` - use celery based hooks |
|
677 | ; `celery` - use celery based hooks | |
675 | #DEPRECATED:vcs.hooks.protocol = http |
|
678 | #DEPRECATED:vcs.hooks.protocol = http | |
676 | vcs.hooks.protocol.v2 = celery |
|
679 | vcs.hooks.protocol.v2 = celery | |
677 |
|
680 | |||
678 | ; Host on which this instance is listening for hooks. vcsserver will call this host to pull/push hooks so it should be |
|
681 | ; Host on which this instance is listening for hooks. vcsserver will call this host to pull/push hooks so it should be | |
679 | ; accessible via network. |
|
682 | ; accessible via network. | |
680 | ; Use vcs.hooks.host = "*" to bind to current hostname (for Docker) |
|
683 | ; Use vcs.hooks.host = "*" to bind to current hostname (for Docker) | |
681 | vcs.hooks.host = * |
|
684 | vcs.hooks.host = * | |
682 |
|
685 | |||
683 | ; Start VCSServer with this instance as a subprocess, useful for development |
|
686 | ; Start VCSServer with this instance as a subprocess, useful for development | |
684 | vcs.start_server = false |
|
687 | vcs.start_server = false | |
685 |
|
688 | |||
686 | ; List of enabled VCS backends, available options are: |
|
689 | ; List of enabled VCS backends, available options are: | |
687 | ; `hg` - mercurial |
|
690 | ; `hg` - mercurial | |
688 | ; `git` - git |
|
691 | ; `git` - git | |
689 | ; `svn` - subversion |
|
692 | ; `svn` - subversion | |
690 | vcs.backends = hg, git, svn |
|
693 | vcs.backends = hg, git, svn | |
691 |
|
694 | |||
692 | ; Wait this number of seconds before killing connection to the vcsserver |
|
695 | ; Wait this number of seconds before killing connection to the vcsserver | |
693 | vcs.connection_timeout = 3600 |
|
696 | vcs.connection_timeout = 3600 | |
694 |
|
697 | |||
695 | ; Cache flag to cache vcsserver remote calls locally |
|
698 | ; Cache flag to cache vcsserver remote calls locally | |
696 | ; It uses cache_region `cache_repo` |
|
699 | ; It uses cache_region `cache_repo` | |
697 | vcs.methods.cache = true |
|
700 | vcs.methods.cache = true | |
698 |
|
701 | |||
699 | ; Filesystem location where Git lfs objects should be stored |
|
702 | ; Filesystem location where Git lfs objects should be stored | |
700 | vcs.git.lfs.storage_location = /var/opt/rhodecode_repo_store/.cache/git_lfs_store |
|
703 | vcs.git.lfs.storage_location = /var/opt/rhodecode_repo_store/.cache/git_lfs_store | |
701 |
|
704 | |||
702 | ; Filesystem location where Mercurial largefile objects should be stored |
|
705 | ; Filesystem location where Mercurial largefile objects should be stored | |
703 | vcs.hg.largefiles.storage_location = /var/opt/rhodecode_repo_store/.cache/hg_largefiles_store |
|
706 | vcs.hg.largefiles.storage_location = /var/opt/rhodecode_repo_store/.cache/hg_largefiles_store | |
704 |
|
707 | |||
705 | ; #################################################### |
|
708 | ; #################################################### | |
706 | ; Subversion proxy support (mod_dav_svn) |
|
709 | ; Subversion proxy support (mod_dav_svn) | |
707 | ; Maps RhodeCode repo groups into SVN paths for Apache |
|
710 | ; Maps RhodeCode repo groups into SVN paths for Apache | |
708 | ; #################################################### |
|
711 | ; #################################################### | |
709 |
|
712 | |||
710 | ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out. |
|
713 | ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out. | |
711 | ; Set a numeric version for your current SVN e.g 1.8, or 1.12 |
|
714 | ; Set a numeric version for your current SVN e.g 1.8, or 1.12 | |
712 | ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible |
|
715 | ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible | |
713 | #vcs.svn.compatible_version = 1.8 |
|
716 | #vcs.svn.compatible_version = 1.8 | |
714 |
|
717 | |||
715 | ; Redis connection settings for svn integrations logic |
|
718 | ; Redis connection settings for svn integrations logic | |
716 | ; This connection string needs to be the same on ce and vcsserver |
|
719 | ; This connection string needs to be the same on ce and vcsserver | |
717 | vcs.svn.redis_conn = redis://redis:6379/0 |
|
720 | vcs.svn.redis_conn = redis://redis:6379/0 | |
718 |
|
721 | |||
719 | ; Enable SVN proxy of requests over HTTP |
|
722 | ; Enable SVN proxy of requests over HTTP | |
720 | vcs.svn.proxy.enabled = true |
|
723 | vcs.svn.proxy.enabled = true | |
721 |
|
724 | |||
722 | ; host to connect to running SVN subsystem |
|
725 | ; host to connect to running SVN subsystem | |
723 | vcs.svn.proxy.host = http://svn:8090 |
|
726 | vcs.svn.proxy.host = http://svn:8090 | |
724 |
|
727 | |||
725 | ; Enable or disable the config file generation. |
|
728 | ; Enable or disable the config file generation. | |
726 | svn.proxy.generate_config = true |
|
729 | svn.proxy.generate_config = true | |
727 |
|
730 | |||
728 | ; Generate config file with `SVNListParentPath` set to `On`. |
|
731 | ; Generate config file with `SVNListParentPath` set to `On`. | |
729 | svn.proxy.list_parent_path = true |
|
732 | svn.proxy.list_parent_path = true | |
730 |
|
733 | |||
731 | ; Set location and file name of generated config file. |
|
734 | ; Set location and file name of generated config file. | |
732 | svn.proxy.config_file_path = /etc/rhodecode/conf/svn/mod_dav_svn.conf |
|
735 | svn.proxy.config_file_path = /etc/rhodecode/conf/svn/mod_dav_svn.conf | |
733 |
|
736 | |||
734 | ; alternative mod_dav config template. This needs to be a valid mako template |
|
737 | ; alternative mod_dav config template. This needs to be a valid mako template | |
735 | ; Example template can be found in the source code: |
|
738 | ; Example template can be found in the source code: | |
736 | ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako |
|
739 | ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako | |
737 | #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako |
|
740 | #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako | |
738 |
|
741 | |||
739 | ; Used as a prefix to the `Location` block in the generated config file. |
|
742 | ; Used as a prefix to the `Location` block in the generated config file. | |
740 | ; In most cases it should be set to `/`. |
|
743 | ; In most cases it should be set to `/`. | |
741 | svn.proxy.location_root = / |
|
744 | svn.proxy.location_root = / | |
742 |
|
745 | |||
743 | ; Command to reload the mod dav svn configuration on change. |
|
746 | ; Command to reload the mod dav svn configuration on change. | |
744 | ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh |
|
747 | ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh | |
745 | ; Make sure user who runs RhodeCode process is allowed to reload Apache |
|
748 | ; Make sure user who runs RhodeCode process is allowed to reload Apache | |
746 | #svn.proxy.reload_cmd = /etc/init.d/apache2 reload |
|
749 | #svn.proxy.reload_cmd = /etc/init.d/apache2 reload | |
747 |
|
750 | |||
748 | ; If the timeout expires before the reload command finishes, the command will |
|
751 | ; If the timeout expires before the reload command finishes, the command will | |
749 | ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds. |
|
752 | ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds. | |
750 | #svn.proxy.reload_timeout = 10 |
|
753 | #svn.proxy.reload_timeout = 10 | |
751 |
|
754 | |||
752 | ; #################### |
|
755 | ; #################### | |
753 | ; SSH Support Settings |
|
756 | ; SSH Support Settings | |
754 | ; #################### |
|
757 | ; #################### | |
755 |
|
758 | |||
756 | ; Defines if a custom authorized_keys file should be created and written on |
|
759 | ; Defines if a custom authorized_keys file should be created and written on | |
757 | ; any change user ssh keys. Setting this to false also disables possibility |
|
760 | ; any change user ssh keys. Setting this to false also disables possibility | |
758 | ; of adding SSH keys by users from web interface. Super admins can still |
|
761 | ; of adding SSH keys by users from web interface. Super admins can still | |
759 | ; manage SSH Keys. |
|
762 | ; manage SSH Keys. | |
760 | ssh.generate_authorized_keyfile = true |
|
763 | ssh.generate_authorized_keyfile = true | |
761 |
|
764 | |||
762 | ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding` |
|
765 | ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding` | |
763 | # ssh.authorized_keys_ssh_opts = |
|
766 | # ssh.authorized_keys_ssh_opts = | |
764 |
|
767 | |||
765 | ; Path to the authorized_keys file where the generate entries are placed. |
|
768 | ; Path to the authorized_keys file where the generate entries are placed. | |
766 | ; It is possible to have multiple key files specified in `sshd_config` e.g. |
|
769 | ; It is possible to have multiple key files specified in `sshd_config` e.g. | |
767 | ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode |
|
770 | ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode | |
768 | ssh.authorized_keys_file_path = /etc/rhodecode/conf/ssh/authorized_keys_rhodecode |
|
771 | ssh.authorized_keys_file_path = /etc/rhodecode/conf/ssh/authorized_keys_rhodecode | |
769 |
|
772 | |||
770 | ; Command to execute the SSH wrapper. The binary is available in the |
|
773 | ; Command to execute the SSH wrapper. The binary is available in the | |
771 | ; RhodeCode installation directory. |
|
774 | ; RhodeCode installation directory. | |
772 | ; legacy: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper |
|
775 | ; legacy: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper | |
773 | ; new rewrite: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2 |
|
776 | ; new rewrite: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2 | |
774 | #DEPRECATED: ssh.wrapper_cmd = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper |
|
777 | #DEPRECATED: ssh.wrapper_cmd = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper | |
775 | ssh.wrapper_cmd.v2 = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2 |
|
778 | ssh.wrapper_cmd.v2 = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2 | |
776 |
|
779 | |||
777 | ; Allow shell when executing the ssh-wrapper command |
|
780 | ; Allow shell when executing the ssh-wrapper command | |
778 | ssh.wrapper_cmd_allow_shell = false |
|
781 | ssh.wrapper_cmd_allow_shell = false | |
779 |
|
782 | |||
780 | ; Enables logging, and detailed output send back to the client during SSH |
|
783 | ; Enables logging, and detailed output send back to the client during SSH | |
781 | ; operations. Useful for debugging, shouldn't be used in production. |
|
784 | ; operations. Useful for debugging, shouldn't be used in production. | |
782 | ssh.enable_debug_logging = true |
|
785 | ssh.enable_debug_logging = true | |
783 |
|
786 | |||
784 | ; Paths to binary executable, by default they are the names, but we can |
|
787 | ; Paths to binary executable, by default they are the names, but we can | |
785 | ; override them if we want to use a custom one |
|
788 | ; override them if we want to use a custom one | |
786 | ssh.executable.hg = /usr/local/bin/rhodecode_bin/vcs_bin/hg |
|
789 | ssh.executable.hg = /usr/local/bin/rhodecode_bin/vcs_bin/hg | |
787 | ssh.executable.git = /usr/local/bin/rhodecode_bin/vcs_bin/git |
|
790 | ssh.executable.git = /usr/local/bin/rhodecode_bin/vcs_bin/git | |
788 | ssh.executable.svn = /usr/local/bin/rhodecode_bin/vcs_bin/svnserve |
|
791 | ssh.executable.svn = /usr/local/bin/rhodecode_bin/vcs_bin/svnserve | |
789 |
|
792 | |||
790 | ; Enables SSH key generator web interface. Disabling this still allows users |
|
793 | ; Enables SSH key generator web interface. Disabling this still allows users | |
791 | ; to add their own keys. |
|
794 | ; to add their own keys. | |
792 | ssh.enable_ui_key_generator = true |
|
795 | ssh.enable_ui_key_generator = true | |
793 |
|
796 | |||
794 | ; Statsd client config, this is used to send metrics to statsd |
|
797 | ; Statsd client config, this is used to send metrics to statsd | |
795 | ; We recommend setting statsd_exported and scrape them using Prometheus |
|
798 | ; We recommend setting statsd_exported and scrape them using Prometheus | |
796 | #statsd.enabled = false |
|
799 | #statsd.enabled = false | |
797 | #statsd.statsd_host = 0.0.0.0 |
|
800 | #statsd.statsd_host = 0.0.0.0 | |
798 | #statsd.statsd_port = 8125 |
|
801 | #statsd.statsd_port = 8125 | |
799 | #statsd.statsd_prefix = |
|
802 | #statsd.statsd_prefix = | |
800 | #statsd.statsd_ipv6 = false |
|
803 | #statsd.statsd_ipv6 = false | |
801 |
|
804 | |||
802 | ; configure logging automatically at server startup set to false |
|
805 | ; configure logging automatically at server startup set to false | |
803 | ; to use the below custom logging config. |
|
806 | ; to use the below custom logging config. | |
804 | ; RC_LOGGING_FORMATTER |
|
807 | ; RC_LOGGING_FORMATTER | |
805 | ; RC_LOGGING_LEVEL |
|
808 | ; RC_LOGGING_LEVEL | |
806 | ; env variables can control the settings for logging in case of autoconfigure |
|
809 | ; env variables can control the settings for logging in case of autoconfigure | |
807 |
|
810 | |||
808 | #logging.autoconfigure = true |
|
811 | #logging.autoconfigure = true | |
809 |
|
812 | |||
810 | ; specify your own custom logging config file to configure logging |
|
813 | ; specify your own custom logging config file to configure logging | |
811 | #logging.logging_conf_file = /path/to/custom_logging.ini |
|
814 | #logging.logging_conf_file = /path/to/custom_logging.ini | |
812 |
|
815 | |||
813 | ; Dummy marker to add new entries after. |
|
816 | ; Dummy marker to add new entries after. | |
814 | ; Add any custom entries below. Please don't remove this marker. |
|
817 | ; Add any custom entries below. Please don't remove this marker. | |
815 | custom.conf = 1 |
|
818 | custom.conf = 1 | |
816 |
|
819 | |||
817 |
|
820 | |||
818 | ; ##################### |
|
821 | ; ##################### | |
819 | ; LOGGING CONFIGURATION |
|
822 | ; LOGGING CONFIGURATION | |
820 | ; ##################### |
|
823 | ; ##################### | |
821 |
|
824 | |||
822 | [loggers] |
|
825 | [loggers] | |
823 | keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper |
|
826 | keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper | |
824 |
|
827 | |||
825 | [handlers] |
|
828 | [handlers] | |
826 | keys = console, console_sql |
|
829 | keys = console, console_sql | |
827 |
|
830 | |||
828 | [formatters] |
|
831 | [formatters] | |
829 | keys = generic, json, color_formatter, color_formatter_sql |
|
832 | keys = generic, json, color_formatter, color_formatter_sql | |
830 |
|
833 | |||
831 | ; ####### |
|
834 | ; ####### | |
832 | ; LOGGERS |
|
835 | ; LOGGERS | |
833 | ; ####### |
|
836 | ; ####### | |
834 | [logger_root] |
|
837 | [logger_root] | |
835 | level = NOTSET |
|
838 | level = NOTSET | |
836 | handlers = console |
|
839 | handlers = console | |
837 |
|
840 | |||
838 | [logger_sqlalchemy] |
|
841 | [logger_sqlalchemy] | |
839 | level = INFO |
|
842 | level = INFO | |
840 | handlers = console_sql |
|
843 | handlers = console_sql | |
841 | qualname = sqlalchemy.engine |
|
844 | qualname = sqlalchemy.engine | |
842 | propagate = 0 |
|
845 | propagate = 0 | |
843 |
|
846 | |||
844 | [logger_beaker] |
|
847 | [logger_beaker] | |
845 | level = DEBUG |
|
848 | level = DEBUG | |
846 | handlers = |
|
849 | handlers = | |
847 | qualname = beaker.container |
|
850 | qualname = beaker.container | |
848 | propagate = 1 |
|
851 | propagate = 1 | |
849 |
|
852 | |||
850 | [logger_rhodecode] |
|
853 | [logger_rhodecode] | |
851 | level = DEBUG |
|
854 | level = DEBUG | |
852 | handlers = |
|
855 | handlers = | |
853 | qualname = rhodecode |
|
856 | qualname = rhodecode | |
854 | propagate = 1 |
|
857 | propagate = 1 | |
855 |
|
858 | |||
856 | [logger_ssh_wrapper] |
|
859 | [logger_ssh_wrapper] | |
857 | level = DEBUG |
|
860 | level = DEBUG | |
858 | handlers = |
|
861 | handlers = | |
859 | qualname = ssh_wrapper |
|
862 | qualname = ssh_wrapper | |
860 | propagate = 1 |
|
863 | propagate = 1 | |
861 |
|
864 | |||
862 | [logger_celery] |
|
865 | [logger_celery] | |
863 | level = DEBUG |
|
866 | level = DEBUG | |
864 | handlers = |
|
867 | handlers = | |
865 | qualname = celery |
|
868 | qualname = celery | |
866 |
|
869 | |||
867 |
|
870 | |||
868 | ; ######## |
|
871 | ; ######## | |
869 | ; HANDLERS |
|
872 | ; HANDLERS | |
870 | ; ######## |
|
873 | ; ######## | |
871 |
|
874 | |||
872 | [handler_console] |
|
875 | [handler_console] | |
873 | class = StreamHandler |
|
876 | class = StreamHandler | |
874 | args = (sys.stderr, ) |
|
877 | args = (sys.stderr, ) | |
875 | level = DEBUG |
|
878 | level = DEBUG | |
876 | ; To enable JSON formatted logs replace 'generic/color_formatter' with 'json' |
|
879 | ; To enable JSON formatted logs replace 'generic/color_formatter' with 'json' | |
877 | ; This allows sending properly formatted logs to grafana loki or elasticsearch |
|
880 | ; This allows sending properly formatted logs to grafana loki or elasticsearch | |
878 | formatter = color_formatter |
|
881 | formatter = color_formatter | |
879 |
|
882 | |||
880 | [handler_console_sql] |
|
883 | [handler_console_sql] | |
881 | ; "level = DEBUG" logs SQL queries and results. |
|
884 | ; "level = DEBUG" logs SQL queries and results. | |
882 | ; "level = INFO" logs SQL queries. |
|
885 | ; "level = INFO" logs SQL queries. | |
883 | ; "level = WARN" logs neither. (Recommended for production systems.) |
|
886 | ; "level = WARN" logs neither. (Recommended for production systems.) | |
884 | class = StreamHandler |
|
887 | class = StreamHandler | |
885 | args = (sys.stderr, ) |
|
888 | args = (sys.stderr, ) | |
886 | level = WARN |
|
889 | level = WARN | |
887 | ; To enable JSON formatted logs replace 'generic/color_formatter_sql' with 'json' |
|
890 | ; To enable JSON formatted logs replace 'generic/color_formatter_sql' with 'json' | |
888 | ; This allows sending properly formatted logs to grafana loki or elasticsearch |
|
891 | ; This allows sending properly formatted logs to grafana loki or elasticsearch | |
889 | formatter = color_formatter_sql |
|
892 | formatter = color_formatter_sql | |
890 |
|
893 | |||
891 | ; ########## |
|
894 | ; ########## | |
892 | ; FORMATTERS |
|
895 | ; FORMATTERS | |
893 | ; ########## |
|
896 | ; ########## | |
894 |
|
897 | |||
895 | [formatter_generic] |
|
898 | [formatter_generic] | |
896 | class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter |
|
899 | class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter | |
897 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s |
|
900 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s | |
898 | datefmt = %Y-%m-%d %H:%M:%S |
|
901 | datefmt = %Y-%m-%d %H:%M:%S | |
899 |
|
902 | |||
900 | [formatter_color_formatter] |
|
903 | [formatter_color_formatter] | |
901 | class = rhodecode.lib.logging_formatter.ColorFormatter |
|
904 | class = rhodecode.lib.logging_formatter.ColorFormatter | |
902 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s |
|
905 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s | |
903 | datefmt = %Y-%m-%d %H:%M:%S |
|
906 | datefmt = %Y-%m-%d %H:%M:%S | |
904 |
|
907 | |||
905 | [formatter_color_formatter_sql] |
|
908 | [formatter_color_formatter_sql] | |
906 | class = rhodecode.lib.logging_formatter.ColorFormatterSql |
|
909 | class = rhodecode.lib.logging_formatter.ColorFormatterSql | |
907 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s |
|
910 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s | |
908 | datefmt = %Y-%m-%d %H:%M:%S |
|
911 | datefmt = %Y-%m-%d %H:%M:%S | |
909 |
|
912 | |||
910 | [formatter_json] |
|
913 | [formatter_json] | |
911 | format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s |
|
914 | format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s | |
912 | class = rhodecode.lib._vendor.jsonlogger.JsonFormatter |
|
915 | class = rhodecode.lib._vendor.jsonlogger.JsonFormatter |
@@ -1,880 +1,883 b'' | |||||
1 |
|
1 | |||
2 | ; ######################################### |
|
2 | ; ######################################### | |
3 | ; RHODECODE COMMUNITY EDITION CONFIGURATION |
|
3 | ; RHODECODE COMMUNITY EDITION CONFIGURATION | |
4 | ; ######################################### |
|
4 | ; ######################################### | |
5 |
|
5 | |||
6 | [DEFAULT] |
|
6 | [DEFAULT] | |
7 | ; Debug flag sets all loggers to debug, and enables request tracking |
|
7 | ; Debug flag sets all loggers to debug, and enables request tracking | |
8 | debug = false |
|
8 | debug = false | |
9 |
|
9 | |||
10 | ; ######################################################################## |
|
10 | ; ######################################################################## | |
11 | ; EMAIL CONFIGURATION |
|
11 | ; EMAIL CONFIGURATION | |
12 | ; These settings will be used by the RhodeCode mailing system |
|
12 | ; These settings will be used by the RhodeCode mailing system | |
13 | ; ######################################################################## |
|
13 | ; ######################################################################## | |
14 |
|
14 | |||
15 | ; prefix all emails subjects with given prefix, helps filtering out emails |
|
15 | ; prefix all emails subjects with given prefix, helps filtering out emails | |
16 | #email_prefix = [RhodeCode] |
|
16 | #email_prefix = [RhodeCode] | |
17 |
|
17 | |||
18 | ; email FROM address all mails will be sent |
|
18 | ; email FROM address all mails will be sent | |
19 | #app_email_from = rhodecode-noreply@localhost |
|
19 | #app_email_from = rhodecode-noreply@localhost | |
20 |
|
20 | |||
21 | #smtp_server = mail.server.com |
|
21 | #smtp_server = mail.server.com | |
22 | #smtp_username = |
|
22 | #smtp_username = | |
23 | #smtp_password = |
|
23 | #smtp_password = | |
24 | #smtp_port = |
|
24 | #smtp_port = | |
25 | #smtp_use_tls = false |
|
25 | #smtp_use_tls = false | |
26 | #smtp_use_ssl = true |
|
26 | #smtp_use_ssl = true | |
27 |
|
27 | |||
28 | [server:main] |
|
28 | [server:main] | |
29 | ; COMMON HOST/IP CONFIG, This applies mostly to develop setup, |
|
29 | ; COMMON HOST/IP CONFIG, This applies mostly to develop setup, | |
30 | ; Host port for gunicorn are controlled by gunicorn_conf.py |
|
30 | ; Host port for gunicorn are controlled by gunicorn_conf.py | |
31 | host = 127.0.0.1 |
|
31 | host = 127.0.0.1 | |
32 | port = 10020 |
|
32 | port = 10020 | |
33 |
|
33 | |||
34 |
|
34 | |||
35 | ; ########################### |
|
35 | ; ########################### | |
36 | ; GUNICORN APPLICATION SERVER |
|
36 | ; GUNICORN APPLICATION SERVER | |
37 | ; ########################### |
|
37 | ; ########################### | |
38 |
|
38 | |||
39 | ; run with gunicorn --config gunicorn_conf.py --paste rhodecode.ini |
|
39 | ; run with gunicorn --config gunicorn_conf.py --paste rhodecode.ini | |
40 |
|
40 | |||
41 | ; Module to use, this setting shouldn't be changed |
|
41 | ; Module to use, this setting shouldn't be changed | |
42 | use = egg:gunicorn#main |
|
42 | use = egg:gunicorn#main | |
43 |
|
43 | |||
44 | ; Prefix middleware for RhodeCode. |
|
44 | ; Prefix middleware for RhodeCode. | |
45 | ; recommended when using proxy setup. |
|
45 | ; recommended when using proxy setup. | |
46 | ; allows to set RhodeCode under a prefix in server. |
|
46 | ; allows to set RhodeCode under a prefix in server. | |
47 | ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well. |
|
47 | ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well. | |
48 | ; And set your prefix like: `prefix = /custom_prefix` |
|
48 | ; And set your prefix like: `prefix = /custom_prefix` | |
49 | ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need |
|
49 | ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need | |
50 | ; to make your cookies only work on prefix url |
|
50 | ; to make your cookies only work on prefix url | |
51 | [filter:proxy-prefix] |
|
51 | [filter:proxy-prefix] | |
52 | use = egg:PasteDeploy#prefix |
|
52 | use = egg:PasteDeploy#prefix | |
53 | prefix = / |
|
53 | prefix = / | |
54 |
|
54 | |||
55 | [app:main] |
|
55 | [app:main] | |
56 | ; The %(here)s variable will be replaced with the absolute path of parent directory |
|
56 | ; The %(here)s variable will be replaced with the absolute path of parent directory | |
57 | ; of this file |
|
57 | ; of this file | |
58 | ; Each option in the app:main can be override by an environmental variable |
|
58 | ; Each option in the app:main can be override by an environmental variable | |
59 | ; |
|
59 | ; | |
60 | ;To override an option: |
|
60 | ;To override an option: | |
61 | ; |
|
61 | ; | |
62 | ;RC_<KeyName> |
|
62 | ;RC_<KeyName> | |
63 | ;Everything should be uppercase, . and - should be replaced by _. |
|
63 | ;Everything should be uppercase, . and - should be replaced by _. | |
64 | ;For example, if you have these configuration settings: |
|
64 | ;For example, if you have these configuration settings: | |
65 | ;rc_cache.repo_object.backend = foo |
|
65 | ;rc_cache.repo_object.backend = foo | |
66 | ;can be overridden by |
|
66 | ;can be overridden by | |
67 | ;export RC_CACHE_REPO_OBJECT_BACKEND=foo |
|
67 | ;export RC_CACHE_REPO_OBJECT_BACKEND=foo | |
68 |
|
68 | |||
69 | use = egg:rhodecode-enterprise-ce |
|
69 | use = egg:rhodecode-enterprise-ce | |
70 |
|
70 | |||
71 | ; enable proxy prefix middleware, defined above |
|
71 | ; enable proxy prefix middleware, defined above | |
72 | #filter-with = proxy-prefix |
|
72 | #filter-with = proxy-prefix | |
73 |
|
73 | |||
|
74 | ; control if environmental variables to be expanded into the .ini settings | |||
|
75 | #rhodecode.env_expand = true | |||
|
76 | ||||
74 | ; encryption key used to encrypt social plugin tokens, |
|
77 | ; encryption key used to encrypt social plugin tokens, | |
75 | ; remote_urls with credentials etc, if not set it defaults to |
|
78 | ; remote_urls with credentials etc, if not set it defaults to | |
76 | ; `beaker.session.secret` |
|
79 | ; `beaker.session.secret` | |
77 | #rhodecode.encrypted_values.secret = |
|
80 | #rhodecode.encrypted_values.secret = | |
78 |
|
81 | |||
79 | ; decryption strict mode (enabled by default). It controls if decryption raises |
|
82 | ; decryption strict mode (enabled by default). It controls if decryption raises | |
80 | ; `SignatureVerificationError` in case of wrong key, or damaged encryption data. |
|
83 | ; `SignatureVerificationError` in case of wrong key, or damaged encryption data. | |
81 | #rhodecode.encrypted_values.strict = false |
|
84 | #rhodecode.encrypted_values.strict = false | |
82 |
|
85 | |||
83 | ; Pick algorithm for encryption. Either fernet (more secure) or aes (default) |
|
86 | ; Pick algorithm for encryption. Either fernet (more secure) or aes (default) | |
84 | ; fernet is safer, and we strongly recommend switching to it. |
|
87 | ; fernet is safer, and we strongly recommend switching to it. | |
85 | ; Due to backward compatibility aes is used as default. |
|
88 | ; Due to backward compatibility aes is used as default. | |
86 | #rhodecode.encrypted_values.algorithm = fernet |
|
89 | #rhodecode.encrypted_values.algorithm = fernet | |
87 |
|
90 | |||
88 | ; Return gzipped responses from RhodeCode (static files/application) |
|
91 | ; Return gzipped responses from RhodeCode (static files/application) | |
89 | gzip_responses = false |
|
92 | gzip_responses = false | |
90 |
|
93 | |||
91 | ; Auto-generate javascript routes file on startup |
|
94 | ; Auto-generate javascript routes file on startup | |
92 | generate_js_files = false |
|
95 | generate_js_files = false | |
93 |
|
96 | |||
94 | ; System global default language. |
|
97 | ; System global default language. | |
95 | ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh |
|
98 | ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh | |
96 | lang = en |
|
99 | lang = en | |
97 |
|
100 | |||
98 | ; Perform a full repository scan and import on each server start. |
|
101 | ; Perform a full repository scan and import on each server start. | |
99 | ; Settings this to true could lead to very long startup time. |
|
102 | ; Settings this to true could lead to very long startup time. | |
100 | startup.import_repos = false |
|
103 | startup.import_repos = false | |
101 |
|
104 | |||
102 | ; URL at which the application is running. This is used for Bootstrapping |
|
105 | ; URL at which the application is running. This is used for Bootstrapping | |
103 | ; requests in context when no web request is available. Used in ishell, or |
|
106 | ; requests in context when no web request is available. Used in ishell, or | |
104 | ; SSH calls. Set this for events to receive proper url for SSH calls. |
|
107 | ; SSH calls. Set this for events to receive proper url for SSH calls. | |
105 | app.base_url = http://rhodecode.local |
|
108 | app.base_url = http://rhodecode.local | |
106 |
|
109 | |||
107 | ; Host at which the Service API is running. |
|
110 | ; Host at which the Service API is running. | |
108 | app.service_api.host = http://rhodecode.local:10020 |
|
111 | app.service_api.host = http://rhodecode.local:10020 | |
109 |
|
112 | |||
110 | ; Secret for Service API authentication. |
|
113 | ; Secret for Service API authentication. | |
111 | app.service_api.token = |
|
114 | app.service_api.token = | |
112 |
|
115 | |||
113 | ; Unique application ID. Should be a random unique string for security. |
|
116 | ; Unique application ID. Should be a random unique string for security. | |
114 | app_instance_uuid = rc-production |
|
117 | app_instance_uuid = rc-production | |
115 |
|
118 | |||
116 | ; Cut off limit for large diffs (size in bytes). If overall diff size on |
|
119 | ; Cut off limit for large diffs (size in bytes). If overall diff size on | |
117 | ; commit, or pull request exceeds this limit this diff will be displayed |
|
120 | ; commit, or pull request exceeds this limit this diff will be displayed | |
118 | ; partially. E.g 512000 == 512Kb |
|
121 | ; partially. E.g 512000 == 512Kb | |
119 | cut_off_limit_diff = 512000 |
|
122 | cut_off_limit_diff = 512000 | |
120 |
|
123 | |||
121 | ; Cut off limit for large files inside diffs (size in bytes). Each individual |
|
124 | ; Cut off limit for large files inside diffs (size in bytes). Each individual | |
122 | ; file inside diff which exceeds this limit will be displayed partially. |
|
125 | ; file inside diff which exceeds this limit will be displayed partially. | |
123 | ; E.g 128000 == 128Kb |
|
126 | ; E.g 128000 == 128Kb | |
124 | cut_off_limit_file = 128000 |
|
127 | cut_off_limit_file = 128000 | |
125 |
|
128 | |||
126 | ; Use cached version of vcs repositories everywhere. Recommended to be `true` |
|
129 | ; Use cached version of vcs repositories everywhere. Recommended to be `true` | |
127 | vcs_full_cache = true |
|
130 | vcs_full_cache = true | |
128 |
|
131 | |||
129 | ; Force https in RhodeCode, fixes https redirects, assumes it's always https. |
|
132 | ; Force https in RhodeCode, fixes https redirects, assumes it's always https. | |
130 | ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache |
|
133 | ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache | |
131 | force_https = false |
|
134 | force_https = false | |
132 |
|
135 | |||
133 | ; use Strict-Transport-Security headers |
|
136 | ; use Strict-Transport-Security headers | |
134 | use_htsts = false |
|
137 | use_htsts = false | |
135 |
|
138 | |||
136 | ; Set to true if your repos are exposed using the dumb protocol |
|
139 | ; Set to true if your repos are exposed using the dumb protocol | |
137 | git_update_server_info = false |
|
140 | git_update_server_info = false | |
138 |
|
141 | |||
139 | ; RSS/ATOM feed options |
|
142 | ; RSS/ATOM feed options | |
140 | rss_cut_off_limit = 256000 |
|
143 | rss_cut_off_limit = 256000 | |
141 | rss_items_per_page = 10 |
|
144 | rss_items_per_page = 10 | |
142 | rss_include_diff = false |
|
145 | rss_include_diff = false | |
143 |
|
146 | |||
144 | ; gist URL alias, used to create nicer urls for gist. This should be an |
|
147 | ; gist URL alias, used to create nicer urls for gist. This should be an | |
145 | ; url that does rewrites to _admin/gists/{gistid}. |
|
148 | ; url that does rewrites to _admin/gists/{gistid}. | |
146 | ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal |
|
149 | ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal | |
147 | ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid} |
|
150 | ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid} | |
148 | gist_alias_url = |
|
151 | gist_alias_url = | |
149 |
|
152 | |||
150 | ; List of views (using glob pattern syntax) that AUTH TOKENS could be |
|
153 | ; List of views (using glob pattern syntax) that AUTH TOKENS could be | |
151 | ; used for access. |
|
154 | ; used for access. | |
152 | ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it |
|
155 | ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it | |
153 | ; came from the the logged in user who own this authentication token. |
|
156 | ; came from the the logged in user who own this authentication token. | |
154 | ; Additionally @TOKEN syntax can be used to bound the view to specific |
|
157 | ; Additionally @TOKEN syntax can be used to bound the view to specific | |
155 | ; authentication token. Such view would be only accessible when used together |
|
158 | ; authentication token. Such view would be only accessible when used together | |
156 | ; with this authentication token |
|
159 | ; with this authentication token | |
157 | ; list of all views can be found under `/_admin/permissions/auth_token_access` |
|
160 | ; list of all views can be found under `/_admin/permissions/auth_token_access` | |
158 | ; The list should be "," separated and on a single line. |
|
161 | ; The list should be "," separated and on a single line. | |
159 | ; Most common views to enable: |
|
162 | ; Most common views to enable: | |
160 |
|
163 | |||
161 | # RepoCommitsView:repo_commit_download |
|
164 | # RepoCommitsView:repo_commit_download | |
162 | # RepoCommitsView:repo_commit_patch |
|
165 | # RepoCommitsView:repo_commit_patch | |
163 | # RepoCommitsView:repo_commit_raw |
|
166 | # RepoCommitsView:repo_commit_raw | |
164 | # RepoCommitsView:repo_commit_raw@TOKEN |
|
167 | # RepoCommitsView:repo_commit_raw@TOKEN | |
165 | # RepoFilesView:repo_files_diff |
|
168 | # RepoFilesView:repo_files_diff | |
166 | # RepoFilesView:repo_archivefile |
|
169 | # RepoFilesView:repo_archivefile | |
167 | # RepoFilesView:repo_file_raw |
|
170 | # RepoFilesView:repo_file_raw | |
168 | # GistView:* |
|
171 | # GistView:* | |
169 | api_access_controllers_whitelist = |
|
172 | api_access_controllers_whitelist = | |
170 |
|
173 | |||
171 | ; Default encoding used to convert from and to unicode |
|
174 | ; Default encoding used to convert from and to unicode | |
172 | ; can be also a comma separated list of encoding in case of mixed encodings |
|
175 | ; can be also a comma separated list of encoding in case of mixed encodings | |
173 | default_encoding = UTF-8 |
|
176 | default_encoding = UTF-8 | |
174 |
|
177 | |||
175 | ; instance-id prefix |
|
178 | ; instance-id prefix | |
176 | ; a prefix key for this instance used for cache invalidation when running |
|
179 | ; a prefix key for this instance used for cache invalidation when running | |
177 | ; multiple instances of RhodeCode, make sure it's globally unique for |
|
180 | ; multiple instances of RhodeCode, make sure it's globally unique for | |
178 | ; all running RhodeCode instances. Leave empty if you don't use it |
|
181 | ; all running RhodeCode instances. Leave empty if you don't use it | |
179 | instance_id = |
|
182 | instance_id = | |
180 |
|
183 | |||
181 | ; Fallback authentication plugin. Set this to a plugin ID to force the usage |
|
184 | ; Fallback authentication plugin. Set this to a plugin ID to force the usage | |
182 | ; of an authentication plugin also if it is disabled by it's settings. |
|
185 | ; of an authentication plugin also if it is disabled by it's settings. | |
183 | ; This could be useful if you are unable to log in to the system due to broken |
|
186 | ; This could be useful if you are unable to log in to the system due to broken | |
184 | ; authentication settings. Then you can enable e.g. the internal RhodeCode auth |
|
187 | ; authentication settings. Then you can enable e.g. the internal RhodeCode auth | |
185 | ; module to log in again and fix the settings. |
|
188 | ; module to log in again and fix the settings. | |
186 | ; Available builtin plugin IDs (hash is part of the ID): |
|
189 | ; Available builtin plugin IDs (hash is part of the ID): | |
187 | ; egg:rhodecode-enterprise-ce#rhodecode |
|
190 | ; egg:rhodecode-enterprise-ce#rhodecode | |
188 | ; egg:rhodecode-enterprise-ce#pam |
|
191 | ; egg:rhodecode-enterprise-ce#pam | |
189 | ; egg:rhodecode-enterprise-ce#ldap |
|
192 | ; egg:rhodecode-enterprise-ce#ldap | |
190 | ; egg:rhodecode-enterprise-ce#jasig_cas |
|
193 | ; egg:rhodecode-enterprise-ce#jasig_cas | |
191 | ; egg:rhodecode-enterprise-ce#headers |
|
194 | ; egg:rhodecode-enterprise-ce#headers | |
192 | ; egg:rhodecode-enterprise-ce#crowd |
|
195 | ; egg:rhodecode-enterprise-ce#crowd | |
193 |
|
196 | |||
194 | #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode |
|
197 | #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode | |
195 |
|
198 | |||
196 | ; Flag to control loading of legacy plugins in py:/path format |
|
199 | ; Flag to control loading of legacy plugins in py:/path format | |
197 | auth_plugin.import_legacy_plugins = true |
|
200 | auth_plugin.import_legacy_plugins = true | |
198 |
|
201 | |||
199 | ; alternative return HTTP header for failed authentication. Default HTTP |
|
202 | ; alternative return HTTP header for failed authentication. Default HTTP | |
200 | ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with |
|
203 | ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with | |
201 | ; handling that causing a series of failed authentication calls. |
|
204 | ; handling that causing a series of failed authentication calls. | |
202 | ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code |
|
205 | ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code | |
203 | ; This will be served instead of default 401 on bad authentication |
|
206 | ; This will be served instead of default 401 on bad authentication | |
204 | auth_ret_code = |
|
207 | auth_ret_code = | |
205 |
|
208 | |||
206 | ; use special detection method when serving auth_ret_code, instead of serving |
|
209 | ; use special detection method when serving auth_ret_code, instead of serving | |
207 | ; ret_code directly, use 401 initially (Which triggers credentials prompt) |
|
210 | ; ret_code directly, use 401 initially (Which triggers credentials prompt) | |
208 | ; and then serve auth_ret_code to clients |
|
211 | ; and then serve auth_ret_code to clients | |
209 | auth_ret_code_detection = false |
|
212 | auth_ret_code_detection = false | |
210 |
|
213 | |||
211 | ; locking return code. When repository is locked return this HTTP code. 2XX |
|
214 | ; locking return code. When repository is locked return this HTTP code. 2XX | |
212 | ; codes don't break the transactions while 4XX codes do |
|
215 | ; codes don't break the transactions while 4XX codes do | |
213 | lock_ret_code = 423 |
|
216 | lock_ret_code = 423 | |
214 |
|
217 | |||
215 | ; Filesystem location were repositories should be stored |
|
218 | ; Filesystem location were repositories should be stored | |
216 | repo_store.path = /var/opt/rhodecode_repo_store |
|
219 | repo_store.path = /var/opt/rhodecode_repo_store | |
217 |
|
220 | |||
218 | ; allows to setup custom hooks in settings page |
|
221 | ; allows to setup custom hooks in settings page | |
219 | allow_custom_hooks_settings = true |
|
222 | allow_custom_hooks_settings = true | |
220 |
|
223 | |||
221 | ; Generated license token required for EE edition license. |
|
224 | ; Generated license token required for EE edition license. | |
222 | ; New generated token value can be found in Admin > settings > license page. |
|
225 | ; New generated token value can be found in Admin > settings > license page. | |
223 | license_token = |
|
226 | license_token = | |
224 |
|
227 | |||
225 | ; This flag hides sensitive information on the license page such as token, and license data |
|
228 | ; This flag hides sensitive information on the license page such as token, and license data | |
226 | license.hide_license_info = false |
|
229 | license.hide_license_info = false | |
227 |
|
230 | |||
228 | ; Import EE license from this license path |
|
231 | ; Import EE license from this license path | |
229 | #license.import_path = %(here)s/rhodecode_enterprise.license |
|
232 | #license.import_path = %(here)s/rhodecode_enterprise.license | |
230 |
|
233 | |||
231 | ; import license 'if-missing' or 'force' (always override) |
|
234 | ; import license 'if-missing' or 'force' (always override) | |
232 | ; if-missing means apply license if it doesn't exist. 'force' option always overrides it |
|
235 | ; if-missing means apply license if it doesn't exist. 'force' option always overrides it | |
233 | license.import_path_mode = if-missing |
|
236 | license.import_path_mode = if-missing | |
234 |
|
237 | |||
235 | ; supervisor connection uri, for managing supervisor and logs. |
|
238 | ; supervisor connection uri, for managing supervisor and logs. | |
236 | supervisor.uri = |
|
239 | supervisor.uri = | |
237 |
|
240 | |||
238 | ; supervisord group name/id we only want this RC instance to handle |
|
241 | ; supervisord group name/id we only want this RC instance to handle | |
239 | supervisor.group_id = prod |
|
242 | supervisor.group_id = prod | |
240 |
|
243 | |||
241 | ; Display extended labs settings |
|
244 | ; Display extended labs settings | |
242 | labs_settings_active = true |
|
245 | labs_settings_active = true | |
243 |
|
246 | |||
244 | ; Custom exception store path, defaults to TMPDIR |
|
247 | ; Custom exception store path, defaults to TMPDIR | |
245 | ; This is used to store exception from RhodeCode in shared directory |
|
248 | ; This is used to store exception from RhodeCode in shared directory | |
246 | #exception_tracker.store_path = |
|
249 | #exception_tracker.store_path = | |
247 |
|
250 | |||
248 | ; Send email with exception details when it happens |
|
251 | ; Send email with exception details when it happens | |
249 | #exception_tracker.send_email = false |
|
252 | #exception_tracker.send_email = false | |
250 |
|
253 | |||
251 | ; Comma separated list of recipients for exception emails, |
|
254 | ; Comma separated list of recipients for exception emails, | |
252 | ; e.g admin@rhodecode.com,devops@rhodecode.com |
|
255 | ; e.g admin@rhodecode.com,devops@rhodecode.com | |
253 | ; Can be left empty, then emails will be sent to ALL super-admins |
|
256 | ; Can be left empty, then emails will be sent to ALL super-admins | |
254 | #exception_tracker.send_email_recipients = |
|
257 | #exception_tracker.send_email_recipients = | |
255 |
|
258 | |||
256 | ; optional prefix to Add to email Subject |
|
259 | ; optional prefix to Add to email Subject | |
257 | #exception_tracker.email_prefix = [RHODECODE ERROR] |
|
260 | #exception_tracker.email_prefix = [RHODECODE ERROR] | |
258 |
|
261 | |||
259 | ; NOTE: this setting IS DEPRECATED: |
|
262 | ; NOTE: this setting IS DEPRECATED: | |
260 | ; file_store backend is always enabled |
|
263 | ; file_store backend is always enabled | |
261 | #file_store.enabled = true |
|
264 | #file_store.enabled = true | |
262 |
|
265 | |||
263 | ; NOTE: this setting IS DEPRECATED: |
|
266 | ; NOTE: this setting IS DEPRECATED: | |
264 | ; file_store.backend = X -> use `file_store.backend.type = filesystem_v2` instead |
|
267 | ; file_store.backend = X -> use `file_store.backend.type = filesystem_v2` instead | |
265 | ; Storage backend, available options are: local |
|
268 | ; Storage backend, available options are: local | |
266 | #file_store.backend = local |
|
269 | #file_store.backend = local | |
267 |
|
270 | |||
268 | ; NOTE: this setting IS DEPRECATED: |
|
271 | ; NOTE: this setting IS DEPRECATED: | |
269 | ; file_store.storage_path = X -> use `file_store.filesystem_v2.storage_path = X` instead |
|
272 | ; file_store.storage_path = X -> use `file_store.filesystem_v2.storage_path = X` instead | |
270 | ; path to store the uploaded binaries and artifacts |
|
273 | ; path to store the uploaded binaries and artifacts | |
271 | #file_store.storage_path = /var/opt/rhodecode_data/file_store |
|
274 | #file_store.storage_path = /var/opt/rhodecode_data/file_store | |
272 |
|
275 | |||
273 | ; Artifacts file-store, is used to store comment attachments and artifacts uploads. |
|
276 | ; Artifacts file-store, is used to store comment attachments and artifacts uploads. | |
274 | ; file_store backend type: filesystem_v1, filesystem_v2 or objectstore (s3-based) are available as options |
|
277 | ; file_store backend type: filesystem_v1, filesystem_v2 or objectstore (s3-based) are available as options | |
275 | ; filesystem_v1 is backwards compat with pre 5.1 storage changes |
|
278 | ; filesystem_v1 is backwards compat with pre 5.1 storage changes | |
276 | ; new installations should choose filesystem_v2 or objectstore (s3-based), pick filesystem when migrating from |
|
279 | ; new installations should choose filesystem_v2 or objectstore (s3-based), pick filesystem when migrating from | |
277 | ; previous installations to keep the artifacts without a need of migration |
|
280 | ; previous installations to keep the artifacts without a need of migration | |
278 | #file_store.backend.type = filesystem_v2 |
|
281 | #file_store.backend.type = filesystem_v2 | |
279 |
|
282 | |||
280 | ; filesystem options... |
|
283 | ; filesystem options... | |
281 | #file_store.filesystem_v1.storage_path = /var/opt/rhodecode_data/artifacts_file_store |
|
284 | #file_store.filesystem_v1.storage_path = /var/opt/rhodecode_data/artifacts_file_store | |
282 |
|
285 | |||
283 | ; filesystem_v2 options... |
|
286 | ; filesystem_v2 options... | |
284 | #file_store.filesystem_v2.storage_path = /var/opt/rhodecode_data/artifacts_file_store |
|
287 | #file_store.filesystem_v2.storage_path = /var/opt/rhodecode_data/artifacts_file_store | |
285 | #file_store.filesystem_v2.shards = 8 |
|
288 | #file_store.filesystem_v2.shards = 8 | |
286 |
|
289 | |||
287 | ; objectstore options... |
|
290 | ; objectstore options... | |
288 | ; url for s3 compatible storage that allows to upload artifacts |
|
291 | ; url for s3 compatible storage that allows to upload artifacts | |
289 | ; e.g http://minio:9000 |
|
292 | ; e.g http://minio:9000 | |
290 | #file_store.backend.type = objectstore |
|
293 | #file_store.backend.type = objectstore | |
291 | #file_store.objectstore.url = http://s3-minio:9000 |
|
294 | #file_store.objectstore.url = http://s3-minio:9000 | |
292 |
|
295 | |||
293 | ; a top-level bucket to put all other shards in |
|
296 | ; a top-level bucket to put all other shards in | |
294 | ; objects will be stored in rhodecode-file-store/shard-N based on the bucket_shards number |
|
297 | ; objects will be stored in rhodecode-file-store/shard-N based on the bucket_shards number | |
295 | #file_store.objectstore.bucket = rhodecode-file-store |
|
298 | #file_store.objectstore.bucket = rhodecode-file-store | |
296 |
|
299 | |||
297 | ; number of sharded buckets to create to distribute archives across |
|
300 | ; number of sharded buckets to create to distribute archives across | |
298 | ; default is 8 shards |
|
301 | ; default is 8 shards | |
299 | #file_store.objectstore.bucket_shards = 8 |
|
302 | #file_store.objectstore.bucket_shards = 8 | |
300 |
|
303 | |||
301 | ; key for s3 auth |
|
304 | ; key for s3 auth | |
302 | #file_store.objectstore.key = s3admin |
|
305 | #file_store.objectstore.key = s3admin | |
303 |
|
306 | |||
304 | ; secret for s3 auth |
|
307 | ; secret for s3 auth | |
305 | #file_store.objectstore.secret = s3secret4 |
|
308 | #file_store.objectstore.secret = s3secret4 | |
306 |
|
309 | |||
307 | ;region for s3 storage |
|
310 | ;region for s3 storage | |
308 | #file_store.objectstore.region = eu-central-1 |
|
311 | #file_store.objectstore.region = eu-central-1 | |
309 |
|
312 | |||
310 | ; Redis url to acquire/check generation of archives locks |
|
313 | ; Redis url to acquire/check generation of archives locks | |
311 | archive_cache.locking.url = redis://redis:6379/1 |
|
314 | archive_cache.locking.url = redis://redis:6379/1 | |
312 |
|
315 | |||
313 | ; Storage backend, only 'filesystem' and 'objectstore' are available now |
|
316 | ; Storage backend, only 'filesystem' and 'objectstore' are available now | |
314 | archive_cache.backend.type = filesystem |
|
317 | archive_cache.backend.type = filesystem | |
315 |
|
318 | |||
316 | ; url for s3 compatible storage that allows to upload artifacts |
|
319 | ; url for s3 compatible storage that allows to upload artifacts | |
317 | ; e.g http://minio:9000 |
|
320 | ; e.g http://minio:9000 | |
318 | archive_cache.objectstore.url = http://s3-minio:9000 |
|
321 | archive_cache.objectstore.url = http://s3-minio:9000 | |
319 |
|
322 | |||
320 | ; key for s3 auth |
|
323 | ; key for s3 auth | |
321 | archive_cache.objectstore.key = key |
|
324 | archive_cache.objectstore.key = key | |
322 |
|
325 | |||
323 | ; secret for s3 auth |
|
326 | ; secret for s3 auth | |
324 | archive_cache.objectstore.secret = secret |
|
327 | archive_cache.objectstore.secret = secret | |
325 |
|
328 | |||
326 | ;region for s3 storage |
|
329 | ;region for s3 storage | |
327 | archive_cache.objectstore.region = eu-central-1 |
|
330 | archive_cache.objectstore.region = eu-central-1 | |
328 |
|
331 | |||
329 | ; number of sharded buckets to create to distribute archives across |
|
332 | ; number of sharded buckets to create to distribute archives across | |
330 | ; default is 8 shards |
|
333 | ; default is 8 shards | |
331 | archive_cache.objectstore.bucket_shards = 8 |
|
334 | archive_cache.objectstore.bucket_shards = 8 | |
332 |
|
335 | |||
333 | ; a top-level bucket to put all other shards in |
|
336 | ; a top-level bucket to put all other shards in | |
334 | ; objects will be stored in rhodecode-archive-cache/shard-N based on the bucket_shards number |
|
337 | ; objects will be stored in rhodecode-archive-cache/shard-N based on the bucket_shards number | |
335 | archive_cache.objectstore.bucket = rhodecode-archive-cache |
|
338 | archive_cache.objectstore.bucket = rhodecode-archive-cache | |
336 |
|
339 | |||
337 | ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time |
|
340 | ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time | |
338 | archive_cache.objectstore.retry = false |
|
341 | archive_cache.objectstore.retry = false | |
339 |
|
342 | |||
340 | ; number of seconds to wait for next try using retry |
|
343 | ; number of seconds to wait for next try using retry | |
341 | archive_cache.objectstore.retry_backoff = 1 |
|
344 | archive_cache.objectstore.retry_backoff = 1 | |
342 |
|
345 | |||
343 | ; how many tries do do a retry fetch from this backend |
|
346 | ; how many tries do do a retry fetch from this backend | |
344 | archive_cache.objectstore.retry_attempts = 10 |
|
347 | archive_cache.objectstore.retry_attempts = 10 | |
345 |
|
348 | |||
346 | ; Default is $cache_dir/archive_cache if not set |
|
349 | ; Default is $cache_dir/archive_cache if not set | |
347 | ; Generated repo archives will be cached at this location |
|
350 | ; Generated repo archives will be cached at this location | |
348 | ; and served from the cache during subsequent requests for the same archive of |
|
351 | ; and served from the cache during subsequent requests for the same archive of | |
349 | ; the repository. This path is important to be shared across filesystems and with |
|
352 | ; the repository. This path is important to be shared across filesystems and with | |
350 | ; RhodeCode and vcsserver |
|
353 | ; RhodeCode and vcsserver | |
351 | archive_cache.filesystem.store_dir = /var/opt/rhodecode_data/archive_cache |
|
354 | archive_cache.filesystem.store_dir = /var/opt/rhodecode_data/archive_cache | |
352 |
|
355 | |||
353 | ; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb |
|
356 | ; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb | |
354 | archive_cache.filesystem.cache_size_gb = 40 |
|
357 | archive_cache.filesystem.cache_size_gb = 40 | |
355 |
|
358 | |||
356 | ; Eviction policy used to clear out after cache_size_gb limit is reached |
|
359 | ; Eviction policy used to clear out after cache_size_gb limit is reached | |
357 | archive_cache.filesystem.eviction_policy = least-recently-stored |
|
360 | archive_cache.filesystem.eviction_policy = least-recently-stored | |
358 |
|
361 | |||
359 | ; By default cache uses sharding technique, this specifies how many shards are there |
|
362 | ; By default cache uses sharding technique, this specifies how many shards are there | |
360 | ; default is 8 shards |
|
363 | ; default is 8 shards | |
361 | archive_cache.filesystem.cache_shards = 8 |
|
364 | archive_cache.filesystem.cache_shards = 8 | |
362 |
|
365 | |||
363 | ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time |
|
366 | ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time | |
364 | archive_cache.filesystem.retry = false |
|
367 | archive_cache.filesystem.retry = false | |
365 |
|
368 | |||
366 | ; number of seconds to wait for next try using retry |
|
369 | ; number of seconds to wait for next try using retry | |
367 | archive_cache.filesystem.retry_backoff = 1 |
|
370 | archive_cache.filesystem.retry_backoff = 1 | |
368 |
|
371 | |||
369 | ; how many tries do do a retry fetch from this backend |
|
372 | ; how many tries do do a retry fetch from this backend | |
370 | archive_cache.filesystem.retry_attempts = 10 |
|
373 | archive_cache.filesystem.retry_attempts = 10 | |
371 |
|
374 | |||
372 |
|
375 | |||
373 | ; ############# |
|
376 | ; ############# | |
374 | ; CELERY CONFIG |
|
377 | ; CELERY CONFIG | |
375 | ; ############# |
|
378 | ; ############# | |
376 |
|
379 | |||
377 | ; manually run celery: /path/to/celery worker --task-events --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini |
|
380 | ; manually run celery: /path/to/celery worker --task-events --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini | |
378 |
|
381 | |||
379 | use_celery = true |
|
382 | use_celery = true | |
380 |
|
383 | |||
381 | ; path to store schedule database |
|
384 | ; path to store schedule database | |
382 | #celerybeat-schedule.path = |
|
385 | #celerybeat-schedule.path = | |
383 |
|
386 | |||
384 | ; connection url to the message broker (default redis) |
|
387 | ; connection url to the message broker (default redis) | |
385 | celery.broker_url = redis://redis:6379/8 |
|
388 | celery.broker_url = redis://redis:6379/8 | |
386 |
|
389 | |||
387 | ; results backend to get results for (default redis) |
|
390 | ; results backend to get results for (default redis) | |
388 | celery.result_backend = redis://redis:6379/8 |
|
391 | celery.result_backend = redis://redis:6379/8 | |
389 |
|
392 | |||
390 | ; rabbitmq example |
|
393 | ; rabbitmq example | |
391 | #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost |
|
394 | #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost | |
392 |
|
395 | |||
393 | ; maximum tasks to execute before worker restart |
|
396 | ; maximum tasks to execute before worker restart | |
394 | celery.max_tasks_per_child = 20 |
|
397 | celery.max_tasks_per_child = 20 | |
395 |
|
398 | |||
396 | ; tasks will never be sent to the queue, but executed locally instead. |
|
399 | ; tasks will never be sent to the queue, but executed locally instead. | |
397 | celery.task_always_eager = false |
|
400 | celery.task_always_eager = false | |
398 |
|
401 | |||
399 | ; ############# |
|
402 | ; ############# | |
400 | ; DOGPILE CACHE |
|
403 | ; DOGPILE CACHE | |
401 | ; ############# |
|
404 | ; ############# | |
402 |
|
405 | |||
403 | ; Default cache dir for caches. Putting this into a ramdisk can boost performance. |
|
406 | ; Default cache dir for caches. Putting this into a ramdisk can boost performance. | |
404 | ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space |
|
407 | ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space | |
405 | cache_dir = /var/opt/rhodecode_data |
|
408 | cache_dir = /var/opt/rhodecode_data | |
406 |
|
409 | |||
407 | ; ********************************************* |
|
410 | ; ********************************************* | |
408 | ; `sql_cache_short` cache for heavy SQL queries |
|
411 | ; `sql_cache_short` cache for heavy SQL queries | |
409 | ; Only supported backend is `memory_lru` |
|
412 | ; Only supported backend is `memory_lru` | |
410 | ; ********************************************* |
|
413 | ; ********************************************* | |
411 | rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru |
|
414 | rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru | |
412 | rc_cache.sql_cache_short.expiration_time = 30 |
|
415 | rc_cache.sql_cache_short.expiration_time = 30 | |
413 |
|
416 | |||
414 |
|
417 | |||
415 | ; ***************************************************** |
|
418 | ; ***************************************************** | |
416 | ; `cache_repo_longterm` cache for repo object instances |
|
419 | ; `cache_repo_longterm` cache for repo object instances | |
417 | ; Only supported backend is `memory_lru` |
|
420 | ; Only supported backend is `memory_lru` | |
418 | ; ***************************************************** |
|
421 | ; ***************************************************** | |
419 | rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru |
|
422 | rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru | |
420 | ; by default we use 30 Days, cache is still invalidated on push |
|
423 | ; by default we use 30 Days, cache is still invalidated on push | |
421 | rc_cache.cache_repo_longterm.expiration_time = 2592000 |
|
424 | rc_cache.cache_repo_longterm.expiration_time = 2592000 | |
422 | ; max items in LRU cache, set to smaller number to save memory, and expire last used caches |
|
425 | ; max items in LRU cache, set to smaller number to save memory, and expire last used caches | |
423 | rc_cache.cache_repo_longterm.max_size = 10000 |
|
426 | rc_cache.cache_repo_longterm.max_size = 10000 | |
424 |
|
427 | |||
425 |
|
428 | |||
426 | ; ********************************************* |
|
429 | ; ********************************************* | |
427 | ; `cache_general` cache for general purpose use |
|
430 | ; `cache_general` cache for general purpose use | |
428 | ; for simplicity use rc.file_namespace backend, |
|
431 | ; for simplicity use rc.file_namespace backend, | |
429 | ; for performance and scale use rc.redis |
|
432 | ; for performance and scale use rc.redis | |
430 | ; ********************************************* |
|
433 | ; ********************************************* | |
431 | rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace |
|
434 | rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace | |
432 | rc_cache.cache_general.expiration_time = 43200 |
|
435 | rc_cache.cache_general.expiration_time = 43200 | |
433 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set |
|
436 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set | |
434 | #rc_cache.cache_general.arguments.filename = /tmp/cache_general_db |
|
437 | #rc_cache.cache_general.arguments.filename = /tmp/cache_general_db | |
435 |
|
438 | |||
436 | ; alternative `cache_general` redis backend with distributed lock |
|
439 | ; alternative `cache_general` redis backend with distributed lock | |
437 | #rc_cache.cache_general.backend = dogpile.cache.rc.redis |
|
440 | #rc_cache.cache_general.backend = dogpile.cache.rc.redis | |
438 | #rc_cache.cache_general.expiration_time = 300 |
|
441 | #rc_cache.cache_general.expiration_time = 300 | |
439 |
|
442 | |||
440 | ; redis_expiration_time needs to be greater then expiration_time |
|
443 | ; redis_expiration_time needs to be greater then expiration_time | |
441 | #rc_cache.cache_general.arguments.redis_expiration_time = 7200 |
|
444 | #rc_cache.cache_general.arguments.redis_expiration_time = 7200 | |
442 |
|
445 | |||
443 | #rc_cache.cache_general.arguments.host = localhost |
|
446 | #rc_cache.cache_general.arguments.host = localhost | |
444 | #rc_cache.cache_general.arguments.port = 6379 |
|
447 | #rc_cache.cache_general.arguments.port = 6379 | |
445 | #rc_cache.cache_general.arguments.db = 0 |
|
448 | #rc_cache.cache_general.arguments.db = 0 | |
446 | #rc_cache.cache_general.arguments.socket_timeout = 30 |
|
449 | #rc_cache.cache_general.arguments.socket_timeout = 30 | |
447 | ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends |
|
450 | ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends | |
448 | #rc_cache.cache_general.arguments.distributed_lock = true |
|
451 | #rc_cache.cache_general.arguments.distributed_lock = true | |
449 |
|
452 | |||
450 | ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen |
|
453 | ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen | |
451 | #rc_cache.cache_general.arguments.lock_auto_renewal = true |
|
454 | #rc_cache.cache_general.arguments.lock_auto_renewal = true | |
452 |
|
455 | |||
453 | ; ************************************************* |
|
456 | ; ************************************************* | |
454 | ; `cache_perms` cache for permission tree, auth TTL |
|
457 | ; `cache_perms` cache for permission tree, auth TTL | |
455 | ; for simplicity use rc.file_namespace backend, |
|
458 | ; for simplicity use rc.file_namespace backend, | |
456 | ; for performance and scale use rc.redis |
|
459 | ; for performance and scale use rc.redis | |
457 | ; ************************************************* |
|
460 | ; ************************************************* | |
458 | rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace |
|
461 | rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace | |
459 | rc_cache.cache_perms.expiration_time = 3600 |
|
462 | rc_cache.cache_perms.expiration_time = 3600 | |
460 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set |
|
463 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set | |
461 | #rc_cache.cache_perms.arguments.filename = /tmp/cache_perms_db |
|
464 | #rc_cache.cache_perms.arguments.filename = /tmp/cache_perms_db | |
462 |
|
465 | |||
463 | ; alternative `cache_perms` redis backend with distributed lock |
|
466 | ; alternative `cache_perms` redis backend with distributed lock | |
464 | #rc_cache.cache_perms.backend = dogpile.cache.rc.redis |
|
467 | #rc_cache.cache_perms.backend = dogpile.cache.rc.redis | |
465 | #rc_cache.cache_perms.expiration_time = 300 |
|
468 | #rc_cache.cache_perms.expiration_time = 300 | |
466 |
|
469 | |||
467 | ; redis_expiration_time needs to be greater then expiration_time |
|
470 | ; redis_expiration_time needs to be greater then expiration_time | |
468 | #rc_cache.cache_perms.arguments.redis_expiration_time = 7200 |
|
471 | #rc_cache.cache_perms.arguments.redis_expiration_time = 7200 | |
469 |
|
472 | |||
470 | #rc_cache.cache_perms.arguments.host = localhost |
|
473 | #rc_cache.cache_perms.arguments.host = localhost | |
471 | #rc_cache.cache_perms.arguments.port = 6379 |
|
474 | #rc_cache.cache_perms.arguments.port = 6379 | |
472 | #rc_cache.cache_perms.arguments.db = 0 |
|
475 | #rc_cache.cache_perms.arguments.db = 0 | |
473 | #rc_cache.cache_perms.arguments.socket_timeout = 30 |
|
476 | #rc_cache.cache_perms.arguments.socket_timeout = 30 | |
474 | ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends |
|
477 | ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends | |
475 | #rc_cache.cache_perms.arguments.distributed_lock = true |
|
478 | #rc_cache.cache_perms.arguments.distributed_lock = true | |
476 |
|
479 | |||
477 | ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen |
|
480 | ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen | |
478 | #rc_cache.cache_perms.arguments.lock_auto_renewal = true |
|
481 | #rc_cache.cache_perms.arguments.lock_auto_renewal = true | |
479 |
|
482 | |||
480 | ; *************************************************** |
|
483 | ; *************************************************** | |
481 | ; `cache_repo` cache for file tree, Readme, RSS FEEDS |
|
484 | ; `cache_repo` cache for file tree, Readme, RSS FEEDS | |
482 | ; for simplicity use rc.file_namespace backend, |
|
485 | ; for simplicity use rc.file_namespace backend, | |
483 | ; for performance and scale use rc.redis |
|
486 | ; for performance and scale use rc.redis | |
484 | ; *************************************************** |
|
487 | ; *************************************************** | |
485 | rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace |
|
488 | rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace | |
486 | rc_cache.cache_repo.expiration_time = 2592000 |
|
489 | rc_cache.cache_repo.expiration_time = 2592000 | |
487 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set |
|
490 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set | |
488 | #rc_cache.cache_repo.arguments.filename = /tmp/cache_repo_db |
|
491 | #rc_cache.cache_repo.arguments.filename = /tmp/cache_repo_db | |
489 |
|
492 | |||
490 | ; alternative `cache_repo` redis backend with distributed lock |
|
493 | ; alternative `cache_repo` redis backend with distributed lock | |
491 | #rc_cache.cache_repo.backend = dogpile.cache.rc.redis |
|
494 | #rc_cache.cache_repo.backend = dogpile.cache.rc.redis | |
492 | #rc_cache.cache_repo.expiration_time = 2592000 |
|
495 | #rc_cache.cache_repo.expiration_time = 2592000 | |
493 |
|
496 | |||
494 | ; redis_expiration_time needs to be greater then expiration_time |
|
497 | ; redis_expiration_time needs to be greater then expiration_time | |
495 | #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400 |
|
498 | #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400 | |
496 |
|
499 | |||
497 | #rc_cache.cache_repo.arguments.host = localhost |
|
500 | #rc_cache.cache_repo.arguments.host = localhost | |
498 | #rc_cache.cache_repo.arguments.port = 6379 |
|
501 | #rc_cache.cache_repo.arguments.port = 6379 | |
499 | #rc_cache.cache_repo.arguments.db = 1 |
|
502 | #rc_cache.cache_repo.arguments.db = 1 | |
500 | #rc_cache.cache_repo.arguments.socket_timeout = 30 |
|
503 | #rc_cache.cache_repo.arguments.socket_timeout = 30 | |
501 | ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends |
|
504 | ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends | |
502 | #rc_cache.cache_repo.arguments.distributed_lock = true |
|
505 | #rc_cache.cache_repo.arguments.distributed_lock = true | |
503 |
|
506 | |||
504 | ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen |
|
507 | ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen | |
505 | #rc_cache.cache_repo.arguments.lock_auto_renewal = true |
|
508 | #rc_cache.cache_repo.arguments.lock_auto_renewal = true | |
506 |
|
509 | |||
507 | ; ############## |
|
510 | ; ############## | |
508 | ; BEAKER SESSION |
|
511 | ; BEAKER SESSION | |
509 | ; ############## |
|
512 | ; ############## | |
510 |
|
513 | |||
511 | ; beaker.session.type is type of storage options for the logged users sessions. Current allowed |
|
514 | ; beaker.session.type is type of storage options for the logged users sessions. Current allowed | |
512 | ; types are file, ext:redis, ext:database, ext:memcached |
|
515 | ; types are file, ext:redis, ext:database, ext:memcached | |
513 | ; Fastest ones are ext:redis and ext:database, DO NOT use memory type for session |
|
516 | ; Fastest ones are ext:redis and ext:database, DO NOT use memory type for session | |
514 | #beaker.session.type = file |
|
517 | #beaker.session.type = file | |
515 | #beaker.session.data_dir = %(here)s/data/sessions |
|
518 | #beaker.session.data_dir = %(here)s/data/sessions | |
516 |
|
519 | |||
517 | ; Redis based sessions |
|
520 | ; Redis based sessions | |
518 | beaker.session.type = ext:redis |
|
521 | beaker.session.type = ext:redis | |
519 | beaker.session.url = redis://redis:6379/2 |
|
522 | beaker.session.url = redis://redis:6379/2 | |
520 |
|
523 | |||
521 | ; DB based session, fast, and allows easy management over logged in users |
|
524 | ; DB based session, fast, and allows easy management over logged in users | |
522 | #beaker.session.type = ext:database |
|
525 | #beaker.session.type = ext:database | |
523 | #beaker.session.table_name = db_session |
|
526 | #beaker.session.table_name = db_session | |
524 | #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode |
|
527 | #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode | |
525 | #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode |
|
528 | #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode | |
526 | #beaker.session.sa.pool_recycle = 3600 |
|
529 | #beaker.session.sa.pool_recycle = 3600 | |
527 | #beaker.session.sa.echo = false |
|
530 | #beaker.session.sa.echo = false | |
528 |
|
531 | |||
529 | beaker.session.key = rhodecode |
|
532 | beaker.session.key = rhodecode | |
530 | beaker.session.secret = production-rc-uytcxaz |
|
533 | beaker.session.secret = production-rc-uytcxaz | |
531 | beaker.session.lock_dir = /data_ramdisk/lock |
|
534 | beaker.session.lock_dir = /data_ramdisk/lock | |
532 |
|
535 | |||
533 | ; Secure encrypted cookie. Requires AES and AES python libraries |
|
536 | ; Secure encrypted cookie. Requires AES and AES python libraries | |
534 | ; you must disable beaker.session.secret to use this |
|
537 | ; you must disable beaker.session.secret to use this | |
535 | #beaker.session.encrypt_key = key_for_encryption |
|
538 | #beaker.session.encrypt_key = key_for_encryption | |
536 | #beaker.session.validate_key = validation_key |
|
539 | #beaker.session.validate_key = validation_key | |
537 |
|
540 | |||
538 | ; Sets session as invalid (also logging out user) if it haven not been |
|
541 | ; Sets session as invalid (also logging out user) if it haven not been | |
539 | ; accessed for given amount of time in seconds |
|
542 | ; accessed for given amount of time in seconds | |
540 | beaker.session.timeout = 2592000 |
|
543 | beaker.session.timeout = 2592000 | |
541 | beaker.session.httponly = true |
|
544 | beaker.session.httponly = true | |
542 |
|
545 | |||
543 | ; Path to use for the cookie. Set to prefix if you use prefix middleware |
|
546 | ; Path to use for the cookie. Set to prefix if you use prefix middleware | |
544 | #beaker.session.cookie_path = /custom_prefix |
|
547 | #beaker.session.cookie_path = /custom_prefix | |
545 |
|
548 | |||
546 | ; Set https secure cookie |
|
549 | ; Set https secure cookie | |
547 | beaker.session.secure = false |
|
550 | beaker.session.secure = false | |
548 |
|
551 | |||
549 | ; default cookie expiration time in seconds, set to `true` to set expire |
|
552 | ; default cookie expiration time in seconds, set to `true` to set expire | |
550 | ; at browser close |
|
553 | ; at browser close | |
551 | #beaker.session.cookie_expires = 3600 |
|
554 | #beaker.session.cookie_expires = 3600 | |
552 |
|
555 | |||
553 | ; ############################# |
|
556 | ; ############################# | |
554 | ; SEARCH INDEXING CONFIGURATION |
|
557 | ; SEARCH INDEXING CONFIGURATION | |
555 | ; ############################# |
|
558 | ; ############################# | |
556 |
|
559 | |||
557 | ; Full text search indexer is available in rhodecode-tools under |
|
560 | ; Full text search indexer is available in rhodecode-tools under | |
558 | ; `rhodecode-tools index` command |
|
561 | ; `rhodecode-tools index` command | |
559 |
|
562 | |||
560 | ; WHOOSH Backend, doesn't require additional services to run |
|
563 | ; WHOOSH Backend, doesn't require additional services to run | |
561 | ; it works good with few dozen repos |
|
564 | ; it works good with few dozen repos | |
562 | search.module = rhodecode.lib.index.whoosh |
|
565 | search.module = rhodecode.lib.index.whoosh | |
563 | search.location = %(here)s/data/index |
|
566 | search.location = %(here)s/data/index | |
564 |
|
567 | |||
565 | ; #################### |
|
568 | ; #################### | |
566 | ; CHANNELSTREAM CONFIG |
|
569 | ; CHANNELSTREAM CONFIG | |
567 | ; #################### |
|
570 | ; #################### | |
568 |
|
571 | |||
569 | ; channelstream enables persistent connections and live notification |
|
572 | ; channelstream enables persistent connections and live notification | |
570 | ; in the system. It's also used by the chat system |
|
573 | ; in the system. It's also used by the chat system | |
571 |
|
574 | |||
572 | channelstream.enabled = true |
|
575 | channelstream.enabled = true | |
573 |
|
576 | |||
574 | ; server address for channelstream server on the backend |
|
577 | ; server address for channelstream server on the backend | |
575 | channelstream.server = channelstream:9800 |
|
578 | channelstream.server = channelstream:9800 | |
576 |
|
579 | |||
577 | ; location of the channelstream server from outside world |
|
580 | ; location of the channelstream server from outside world | |
578 | ; use ws:// for http or wss:// for https. This address needs to be handled |
|
581 | ; use ws:// for http or wss:// for https. This address needs to be handled | |
579 | ; by external HTTP server such as Nginx or Apache |
|
582 | ; by external HTTP server such as Nginx or Apache | |
580 | ; see Nginx/Apache configuration examples in our docs |
|
583 | ; see Nginx/Apache configuration examples in our docs | |
581 | channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream |
|
584 | channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream | |
582 | channelstream.secret = ENV_GENERATED |
|
585 | channelstream.secret = ENV_GENERATED | |
583 | channelstream.history.location = /var/opt/rhodecode_data/channelstream_history |
|
586 | channelstream.history.location = /var/opt/rhodecode_data/channelstream_history | |
584 |
|
587 | |||
585 | ; Internal application path that Javascript uses to connect into. |
|
588 | ; Internal application path that Javascript uses to connect into. | |
586 | ; If you use proxy-prefix the prefix should be added before /_channelstream |
|
589 | ; If you use proxy-prefix the prefix should be added before /_channelstream | |
587 | channelstream.proxy_path = /_channelstream |
|
590 | channelstream.proxy_path = /_channelstream | |
588 |
|
591 | |||
589 |
|
592 | |||
590 | ; ############################## |
|
593 | ; ############################## | |
591 | ; MAIN RHODECODE DATABASE CONFIG |
|
594 | ; MAIN RHODECODE DATABASE CONFIG | |
592 | ; ############################## |
|
595 | ; ############################## | |
593 |
|
596 | |||
594 | #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30 |
|
597 | #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30 | |
595 | #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode |
|
598 | #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode | |
596 | #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8 |
|
599 | #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8 | |
597 | ; pymysql is an alternative driver for MySQL, use in case of problems with default one |
|
600 | ; pymysql is an alternative driver for MySQL, use in case of problems with default one | |
598 | #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode |
|
601 | #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode | |
599 |
|
602 | |||
600 | sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode |
|
603 | sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode | |
601 |
|
604 | |||
602 | ; see sqlalchemy docs for other advanced settings |
|
605 | ; see sqlalchemy docs for other advanced settings | |
603 | ; print the sql statements to output |
|
606 | ; print the sql statements to output | |
604 | sqlalchemy.db1.echo = false |
|
607 | sqlalchemy.db1.echo = false | |
605 |
|
608 | |||
606 | ; recycle the connections after this amount of seconds |
|
609 | ; recycle the connections after this amount of seconds | |
607 | sqlalchemy.db1.pool_recycle = 3600 |
|
610 | sqlalchemy.db1.pool_recycle = 3600 | |
608 |
|
611 | |||
609 | ; the number of connections to keep open inside the connection pool. |
|
612 | ; the number of connections to keep open inside the connection pool. | |
610 | ; 0 indicates no limit |
|
613 | ; 0 indicates no limit | |
611 | ; the general calculus with gevent is: |
|
614 | ; the general calculus with gevent is: | |
612 | ; if your system allows 500 concurrent greenlets (max_connections) that all do database access, |
|
615 | ; if your system allows 500 concurrent greenlets (max_connections) that all do database access, | |
613 | ; then increase pool size + max overflow so that they add up to 500. |
|
616 | ; then increase pool size + max overflow so that they add up to 500. | |
614 | #sqlalchemy.db1.pool_size = 5 |
|
617 | #sqlalchemy.db1.pool_size = 5 | |
615 |
|
618 | |||
616 | ; The number of connections to allow in connection pool "overflow", that is |
|
619 | ; The number of connections to allow in connection pool "overflow", that is | |
617 | ; connections that can be opened above and beyond the pool_size setting, |
|
620 | ; connections that can be opened above and beyond the pool_size setting, | |
618 | ; which defaults to five. |
|
621 | ; which defaults to five. | |
619 | #sqlalchemy.db1.max_overflow = 10 |
|
622 | #sqlalchemy.db1.max_overflow = 10 | |
620 |
|
623 | |||
621 | ; Connection check ping, used to detect broken database connections |
|
624 | ; Connection check ping, used to detect broken database connections | |
622 | ; could be enabled to better handle cases if MySQL has gone away errors |
|
625 | ; could be enabled to better handle cases if MySQL has gone away errors | |
623 | #sqlalchemy.db1.ping_connection = true |
|
626 | #sqlalchemy.db1.ping_connection = true | |
624 |
|
627 | |||
625 | ; ########## |
|
628 | ; ########## | |
626 | ; VCS CONFIG |
|
629 | ; VCS CONFIG | |
627 | ; ########## |
|
630 | ; ########## | |
628 | vcs.server.enable = true |
|
631 | vcs.server.enable = true | |
629 | vcs.server = vcsserver:10010 |
|
632 | vcs.server = vcsserver:10010 | |
630 |
|
633 | |||
631 | ; Web server connectivity protocol, responsible for web based VCS operations |
|
634 | ; Web server connectivity protocol, responsible for web based VCS operations | |
632 | ; Available protocols are: |
|
635 | ; Available protocols are: | |
633 | ; `http` - use http-rpc backend (default) |
|
636 | ; `http` - use http-rpc backend (default) | |
634 | vcs.server.protocol = http |
|
637 | vcs.server.protocol = http | |
635 |
|
638 | |||
636 | ; Push/Pull operations protocol, available options are: |
|
639 | ; Push/Pull operations protocol, available options are: | |
637 | ; `http` - use http-rpc backend (default) |
|
640 | ; `http` - use http-rpc backend (default) | |
638 | vcs.scm_app_implementation = http |
|
641 | vcs.scm_app_implementation = http | |
639 |
|
642 | |||
640 | ; Push/Pull operations hooks protocol, available options are: |
|
643 | ; Push/Pull operations hooks protocol, available options are: | |
641 | ; `http` - use http-rpc backend (default) |
|
644 | ; `http` - use http-rpc backend (default) | |
642 | ; `celery` - use celery based hooks |
|
645 | ; `celery` - use celery based hooks | |
643 | #DEPRECATED:vcs.hooks.protocol = http |
|
646 | #DEPRECATED:vcs.hooks.protocol = http | |
644 | vcs.hooks.protocol.v2 = celery |
|
647 | vcs.hooks.protocol.v2 = celery | |
645 |
|
648 | |||
646 | ; Host on which this instance is listening for hooks. vcsserver will call this host to pull/push hooks so it should be |
|
649 | ; Host on which this instance is listening for hooks. vcsserver will call this host to pull/push hooks so it should be | |
647 | ; accessible via network. |
|
650 | ; accessible via network. | |
648 | ; Use vcs.hooks.host = "*" to bind to current hostname (for Docker) |
|
651 | ; Use vcs.hooks.host = "*" to bind to current hostname (for Docker) | |
649 | vcs.hooks.host = * |
|
652 | vcs.hooks.host = * | |
650 |
|
653 | |||
651 | ; Start VCSServer with this instance as a subprocess, useful for development |
|
654 | ; Start VCSServer with this instance as a subprocess, useful for development | |
652 | vcs.start_server = false |
|
655 | vcs.start_server = false | |
653 |
|
656 | |||
654 | ; List of enabled VCS backends, available options are: |
|
657 | ; List of enabled VCS backends, available options are: | |
655 | ; `hg` - mercurial |
|
658 | ; `hg` - mercurial | |
656 | ; `git` - git |
|
659 | ; `git` - git | |
657 | ; `svn` - subversion |
|
660 | ; `svn` - subversion | |
658 | vcs.backends = hg, git, svn |
|
661 | vcs.backends = hg, git, svn | |
659 |
|
662 | |||
660 | ; Wait this number of seconds before killing connection to the vcsserver |
|
663 | ; Wait this number of seconds before killing connection to the vcsserver | |
661 | vcs.connection_timeout = 3600 |
|
664 | vcs.connection_timeout = 3600 | |
662 |
|
665 | |||
663 | ; Cache flag to cache vcsserver remote calls locally |
|
666 | ; Cache flag to cache vcsserver remote calls locally | |
664 | ; It uses cache_region `cache_repo` |
|
667 | ; It uses cache_region `cache_repo` | |
665 | vcs.methods.cache = true |
|
668 | vcs.methods.cache = true | |
666 |
|
669 | |||
667 | ; Filesystem location where Git lfs objects should be stored |
|
670 | ; Filesystem location where Git lfs objects should be stored | |
668 | vcs.git.lfs.storage_location = /var/opt/rhodecode_repo_store/.cache/git_lfs_store |
|
671 | vcs.git.lfs.storage_location = /var/opt/rhodecode_repo_store/.cache/git_lfs_store | |
669 |
|
672 | |||
670 | ; Filesystem location where Mercurial largefile objects should be stored |
|
673 | ; Filesystem location where Mercurial largefile objects should be stored | |
671 | vcs.hg.largefiles.storage_location = /var/opt/rhodecode_repo_store/.cache/hg_largefiles_store |
|
674 | vcs.hg.largefiles.storage_location = /var/opt/rhodecode_repo_store/.cache/hg_largefiles_store | |
672 |
|
675 | |||
673 | ; #################################################### |
|
676 | ; #################################################### | |
674 | ; Subversion proxy support (mod_dav_svn) |
|
677 | ; Subversion proxy support (mod_dav_svn) | |
675 | ; Maps RhodeCode repo groups into SVN paths for Apache |
|
678 | ; Maps RhodeCode repo groups into SVN paths for Apache | |
676 | ; #################################################### |
|
679 | ; #################################################### | |
677 |
|
680 | |||
678 | ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out. |
|
681 | ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out. | |
679 | ; Set a numeric version for your current SVN e.g 1.8, or 1.12 |
|
682 | ; Set a numeric version for your current SVN e.g 1.8, or 1.12 | |
680 | ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible |
|
683 | ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible | |
681 | #vcs.svn.compatible_version = 1.8 |
|
684 | #vcs.svn.compatible_version = 1.8 | |
682 |
|
685 | |||
683 | ; Redis connection settings for svn integrations logic |
|
686 | ; Redis connection settings for svn integrations logic | |
684 | ; This connection string needs to be the same on ce and vcsserver |
|
687 | ; This connection string needs to be the same on ce and vcsserver | |
685 | vcs.svn.redis_conn = redis://redis:6379/0 |
|
688 | vcs.svn.redis_conn = redis://redis:6379/0 | |
686 |
|
689 | |||
687 | ; Enable SVN proxy of requests over HTTP |
|
690 | ; Enable SVN proxy of requests over HTTP | |
688 | vcs.svn.proxy.enabled = true |
|
691 | vcs.svn.proxy.enabled = true | |
689 |
|
692 | |||
690 | ; host to connect to running SVN subsystem |
|
693 | ; host to connect to running SVN subsystem | |
691 | vcs.svn.proxy.host = http://svn:8090 |
|
694 | vcs.svn.proxy.host = http://svn:8090 | |
692 |
|
695 | |||
693 | ; Enable or disable the config file generation. |
|
696 | ; Enable or disable the config file generation. | |
694 | svn.proxy.generate_config = true |
|
697 | svn.proxy.generate_config = true | |
695 |
|
698 | |||
696 | ; Generate config file with `SVNListParentPath` set to `On`. |
|
699 | ; Generate config file with `SVNListParentPath` set to `On`. | |
697 | svn.proxy.list_parent_path = true |
|
700 | svn.proxy.list_parent_path = true | |
698 |
|
701 | |||
699 | ; Set location and file name of generated config file. |
|
702 | ; Set location and file name of generated config file. | |
700 | svn.proxy.config_file_path = /etc/rhodecode/conf/svn/mod_dav_svn.conf |
|
703 | svn.proxy.config_file_path = /etc/rhodecode/conf/svn/mod_dav_svn.conf | |
701 |
|
704 | |||
702 | ; alternative mod_dav config template. This needs to be a valid mako template |
|
705 | ; alternative mod_dav config template. This needs to be a valid mako template | |
703 | ; Example template can be found in the source code: |
|
706 | ; Example template can be found in the source code: | |
704 | ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako |
|
707 | ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako | |
705 | #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako |
|
708 | #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako | |
706 |
|
709 | |||
707 | ; Used as a prefix to the `Location` block in the generated config file. |
|
710 | ; Used as a prefix to the `Location` block in the generated config file. | |
708 | ; In most cases it should be set to `/`. |
|
711 | ; In most cases it should be set to `/`. | |
709 | svn.proxy.location_root = / |
|
712 | svn.proxy.location_root = / | |
710 |
|
713 | |||
711 | ; Command to reload the mod dav svn configuration on change. |
|
714 | ; Command to reload the mod dav svn configuration on change. | |
712 | ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh |
|
715 | ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh | |
713 | ; Make sure user who runs RhodeCode process is allowed to reload Apache |
|
716 | ; Make sure user who runs RhodeCode process is allowed to reload Apache | |
714 | #svn.proxy.reload_cmd = /etc/init.d/apache2 reload |
|
717 | #svn.proxy.reload_cmd = /etc/init.d/apache2 reload | |
715 |
|
718 | |||
716 | ; If the timeout expires before the reload command finishes, the command will |
|
719 | ; If the timeout expires before the reload command finishes, the command will | |
717 | ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds. |
|
720 | ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds. | |
718 | #svn.proxy.reload_timeout = 10 |
|
721 | #svn.proxy.reload_timeout = 10 | |
719 |
|
722 | |||
720 | ; #################### |
|
723 | ; #################### | |
721 | ; SSH Support Settings |
|
724 | ; SSH Support Settings | |
722 | ; #################### |
|
725 | ; #################### | |
723 |
|
726 | |||
724 | ; Defines if a custom authorized_keys file should be created and written on |
|
727 | ; Defines if a custom authorized_keys file should be created and written on | |
725 | ; any change user ssh keys. Setting this to false also disables possibility |
|
728 | ; any change user ssh keys. Setting this to false also disables possibility | |
726 | ; of adding SSH keys by users from web interface. Super admins can still |
|
729 | ; of adding SSH keys by users from web interface. Super admins can still | |
727 | ; manage SSH Keys. |
|
730 | ; manage SSH Keys. | |
728 | ssh.generate_authorized_keyfile = true |
|
731 | ssh.generate_authorized_keyfile = true | |
729 |
|
732 | |||
730 | ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding` |
|
733 | ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding` | |
731 | # ssh.authorized_keys_ssh_opts = |
|
734 | # ssh.authorized_keys_ssh_opts = | |
732 |
|
735 | |||
733 | ; Path to the authorized_keys file where the generate entries are placed. |
|
736 | ; Path to the authorized_keys file where the generate entries are placed. | |
734 | ; It is possible to have multiple key files specified in `sshd_config` e.g. |
|
737 | ; It is possible to have multiple key files specified in `sshd_config` e.g. | |
735 | ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode |
|
738 | ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode | |
736 | ssh.authorized_keys_file_path = /etc/rhodecode/conf/ssh/authorized_keys_rhodecode |
|
739 | ssh.authorized_keys_file_path = /etc/rhodecode/conf/ssh/authorized_keys_rhodecode | |
737 |
|
740 | |||
738 | ; Command to execute the SSH wrapper. The binary is available in the |
|
741 | ; Command to execute the SSH wrapper. The binary is available in the | |
739 | ; RhodeCode installation directory. |
|
742 | ; RhodeCode installation directory. | |
740 | ; legacy: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper |
|
743 | ; legacy: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper | |
741 | ; new rewrite: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2 |
|
744 | ; new rewrite: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2 | |
742 | #DEPRECATED: ssh.wrapper_cmd = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper |
|
745 | #DEPRECATED: ssh.wrapper_cmd = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper | |
743 | ssh.wrapper_cmd.v2 = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2 |
|
746 | ssh.wrapper_cmd.v2 = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2 | |
744 |
|
747 | |||
745 | ; Allow shell when executing the ssh-wrapper command |
|
748 | ; Allow shell when executing the ssh-wrapper command | |
746 | ssh.wrapper_cmd_allow_shell = false |
|
749 | ssh.wrapper_cmd_allow_shell = false | |
747 |
|
750 | |||
748 | ; Enables logging, and detailed output send back to the client during SSH |
|
751 | ; Enables logging, and detailed output send back to the client during SSH | |
749 | ; operations. Useful for debugging, shouldn't be used in production. |
|
752 | ; operations. Useful for debugging, shouldn't be used in production. | |
750 | ssh.enable_debug_logging = false |
|
753 | ssh.enable_debug_logging = false | |
751 |
|
754 | |||
752 | ; Paths to binary executable, by default they are the names, but we can |
|
755 | ; Paths to binary executable, by default they are the names, but we can | |
753 | ; override them if we want to use a custom one |
|
756 | ; override them if we want to use a custom one | |
754 | ssh.executable.hg = /usr/local/bin/rhodecode_bin/vcs_bin/hg |
|
757 | ssh.executable.hg = /usr/local/bin/rhodecode_bin/vcs_bin/hg | |
755 | ssh.executable.git = /usr/local/bin/rhodecode_bin/vcs_bin/git |
|
758 | ssh.executable.git = /usr/local/bin/rhodecode_bin/vcs_bin/git | |
756 | ssh.executable.svn = /usr/local/bin/rhodecode_bin/vcs_bin/svnserve |
|
759 | ssh.executable.svn = /usr/local/bin/rhodecode_bin/vcs_bin/svnserve | |
757 |
|
760 | |||
758 | ; Enables SSH key generator web interface. Disabling this still allows users |
|
761 | ; Enables SSH key generator web interface. Disabling this still allows users | |
759 | ; to add their own keys. |
|
762 | ; to add their own keys. | |
760 | ssh.enable_ui_key_generator = true |
|
763 | ssh.enable_ui_key_generator = true | |
761 |
|
764 | |||
762 | ; Statsd client config, this is used to send metrics to statsd |
|
765 | ; Statsd client config, this is used to send metrics to statsd | |
763 | ; We recommend setting statsd_exported and scrape them using Prometheus |
|
766 | ; We recommend setting statsd_exported and scrape them using Prometheus | |
764 | #statsd.enabled = false |
|
767 | #statsd.enabled = false | |
765 | #statsd.statsd_host = 0.0.0.0 |
|
768 | #statsd.statsd_host = 0.0.0.0 | |
766 | #statsd.statsd_port = 8125 |
|
769 | #statsd.statsd_port = 8125 | |
767 | #statsd.statsd_prefix = |
|
770 | #statsd.statsd_prefix = | |
768 | #statsd.statsd_ipv6 = false |
|
771 | #statsd.statsd_ipv6 = false | |
769 |
|
772 | |||
770 | ; configure logging automatically at server startup set to false |
|
773 | ; configure logging automatically at server startup set to false | |
771 | ; to use the below custom logging config. |
|
774 | ; to use the below custom logging config. | |
772 | ; RC_LOGGING_FORMATTER |
|
775 | ; RC_LOGGING_FORMATTER | |
773 | ; RC_LOGGING_LEVEL |
|
776 | ; RC_LOGGING_LEVEL | |
774 | ; env variables can control the settings for logging in case of autoconfigure |
|
777 | ; env variables can control the settings for logging in case of autoconfigure | |
775 |
|
778 | |||
776 | #logging.autoconfigure = true |
|
779 | #logging.autoconfigure = true | |
777 |
|
780 | |||
778 | ; specify your own custom logging config file to configure logging |
|
781 | ; specify your own custom logging config file to configure logging | |
779 | #logging.logging_conf_file = /path/to/custom_logging.ini |
|
782 | #logging.logging_conf_file = /path/to/custom_logging.ini | |
780 |
|
783 | |||
781 | ; Dummy marker to add new entries after. |
|
784 | ; Dummy marker to add new entries after. | |
782 | ; Add any custom entries below. Please don't remove this marker. |
|
785 | ; Add any custom entries below. Please don't remove this marker. | |
783 | custom.conf = 1 |
|
786 | custom.conf = 1 | |
784 |
|
787 | |||
785 |
|
788 | |||
786 | ; ##################### |
|
789 | ; ##################### | |
787 | ; LOGGING CONFIGURATION |
|
790 | ; LOGGING CONFIGURATION | |
788 | ; ##################### |
|
791 | ; ##################### | |
789 |
|
792 | |||
790 | [loggers] |
|
793 | [loggers] | |
791 | keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper |
|
794 | keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper | |
792 |
|
795 | |||
793 | [handlers] |
|
796 | [handlers] | |
794 | keys = console, console_sql |
|
797 | keys = console, console_sql | |
795 |
|
798 | |||
796 | [formatters] |
|
799 | [formatters] | |
797 | keys = generic, json, color_formatter, color_formatter_sql |
|
800 | keys = generic, json, color_formatter, color_formatter_sql | |
798 |
|
801 | |||
799 | ; ####### |
|
802 | ; ####### | |
800 | ; LOGGERS |
|
803 | ; LOGGERS | |
801 | ; ####### |
|
804 | ; ####### | |
802 | [logger_root] |
|
805 | [logger_root] | |
803 | level = NOTSET |
|
806 | level = NOTSET | |
804 | handlers = console |
|
807 | handlers = console | |
805 |
|
808 | |||
806 | [logger_sqlalchemy] |
|
809 | [logger_sqlalchemy] | |
807 | level = INFO |
|
810 | level = INFO | |
808 | handlers = console_sql |
|
811 | handlers = console_sql | |
809 | qualname = sqlalchemy.engine |
|
812 | qualname = sqlalchemy.engine | |
810 | propagate = 0 |
|
813 | propagate = 0 | |
811 |
|
814 | |||
812 | [logger_beaker] |
|
815 | [logger_beaker] | |
813 | level = DEBUG |
|
816 | level = DEBUG | |
814 | handlers = |
|
817 | handlers = | |
815 | qualname = beaker.container |
|
818 | qualname = beaker.container | |
816 | propagate = 1 |
|
819 | propagate = 1 | |
817 |
|
820 | |||
818 | [logger_rhodecode] |
|
821 | [logger_rhodecode] | |
819 | level = DEBUG |
|
822 | level = DEBUG | |
820 | handlers = |
|
823 | handlers = | |
821 | qualname = rhodecode |
|
824 | qualname = rhodecode | |
822 | propagate = 1 |
|
825 | propagate = 1 | |
823 |
|
826 | |||
824 | [logger_ssh_wrapper] |
|
827 | [logger_ssh_wrapper] | |
825 | level = DEBUG |
|
828 | level = DEBUG | |
826 | handlers = |
|
829 | handlers = | |
827 | qualname = ssh_wrapper |
|
830 | qualname = ssh_wrapper | |
828 | propagate = 1 |
|
831 | propagate = 1 | |
829 |
|
832 | |||
830 | [logger_celery] |
|
833 | [logger_celery] | |
831 | level = DEBUG |
|
834 | level = DEBUG | |
832 | handlers = |
|
835 | handlers = | |
833 | qualname = celery |
|
836 | qualname = celery | |
834 |
|
837 | |||
835 |
|
838 | |||
836 | ; ######## |
|
839 | ; ######## | |
837 | ; HANDLERS |
|
840 | ; HANDLERS | |
838 | ; ######## |
|
841 | ; ######## | |
839 |
|
842 | |||
840 | [handler_console] |
|
843 | [handler_console] | |
841 | class = StreamHandler |
|
844 | class = StreamHandler | |
842 | args = (sys.stderr, ) |
|
845 | args = (sys.stderr, ) | |
843 | level = INFO |
|
846 | level = INFO | |
844 | ; To enable JSON formatted logs replace 'generic/color_formatter' with 'json' |
|
847 | ; To enable JSON formatted logs replace 'generic/color_formatter' with 'json' | |
845 | ; This allows sending properly formatted logs to grafana loki or elasticsearch |
|
848 | ; This allows sending properly formatted logs to grafana loki or elasticsearch | |
846 | formatter = generic |
|
849 | formatter = generic | |
847 |
|
850 | |||
848 | [handler_console_sql] |
|
851 | [handler_console_sql] | |
849 | ; "level = DEBUG" logs SQL queries and results. |
|
852 | ; "level = DEBUG" logs SQL queries and results. | |
850 | ; "level = INFO" logs SQL queries. |
|
853 | ; "level = INFO" logs SQL queries. | |
851 | ; "level = WARN" logs neither. (Recommended for production systems.) |
|
854 | ; "level = WARN" logs neither. (Recommended for production systems.) | |
852 | class = StreamHandler |
|
855 | class = StreamHandler | |
853 | args = (sys.stderr, ) |
|
856 | args = (sys.stderr, ) | |
854 | level = WARN |
|
857 | level = WARN | |
855 | ; To enable JSON formatted logs replace 'generic/color_formatter_sql' with 'json' |
|
858 | ; To enable JSON formatted logs replace 'generic/color_formatter_sql' with 'json' | |
856 | ; This allows sending properly formatted logs to grafana loki or elasticsearch |
|
859 | ; This allows sending properly formatted logs to grafana loki or elasticsearch | |
857 | formatter = generic |
|
860 | formatter = generic | |
858 |
|
861 | |||
859 | ; ########## |
|
862 | ; ########## | |
860 | ; FORMATTERS |
|
863 | ; FORMATTERS | |
861 | ; ########## |
|
864 | ; ########## | |
862 |
|
865 | |||
863 | [formatter_generic] |
|
866 | [formatter_generic] | |
864 | class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter |
|
867 | class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter | |
865 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s |
|
868 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s | |
866 | datefmt = %Y-%m-%d %H:%M:%S |
|
869 | datefmt = %Y-%m-%d %H:%M:%S | |
867 |
|
870 | |||
868 | [formatter_color_formatter] |
|
871 | [formatter_color_formatter] | |
869 | class = rhodecode.lib.logging_formatter.ColorFormatter |
|
872 | class = rhodecode.lib.logging_formatter.ColorFormatter | |
870 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s |
|
873 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s | |
871 | datefmt = %Y-%m-%d %H:%M:%S |
|
874 | datefmt = %Y-%m-%d %H:%M:%S | |
872 |
|
875 | |||
873 | [formatter_color_formatter_sql] |
|
876 | [formatter_color_formatter_sql] | |
874 | class = rhodecode.lib.logging_formatter.ColorFormatterSql |
|
877 | class = rhodecode.lib.logging_formatter.ColorFormatterSql | |
875 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s |
|
878 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s | |
876 | datefmt = %Y-%m-%d %H:%M:%S |
|
879 | datefmt = %Y-%m-%d %H:%M:%S | |
877 |
|
880 | |||
878 | [formatter_json] |
|
881 | [formatter_json] | |
879 | format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s |
|
882 | format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s | |
880 | class = rhodecode.lib._vendor.jsonlogger.JsonFormatter |
|
883 | class = rhodecode.lib._vendor.jsonlogger.JsonFormatter |
@@ -1,175 +1,177 b'' | |||||
1 | .. _rhodecode-release-notes-ref: |
|
1 | .. _rhodecode-release-notes-ref: | |
2 |
|
2 | |||
3 | Release Notes |
|
3 | Release Notes | |
4 | ============= |
|
4 | ============= | |
5 |
|
5 | |||
6 | |RCE| 5.x Versions |
|
6 | |RCE| 5.x Versions | |
7 | ------------------ |
|
7 | ------------------ | |
8 |
|
8 | |||
9 | .. toctree:: |
|
9 | .. toctree:: | |
10 | :maxdepth: 1 |
|
10 | :maxdepth: 1 | |
11 |
|
11 | |||
|
12 | release-notes-5.3.0.rst | |||
|
13 | release-notes-5.2.1.rst | |||
12 | release-notes-5.2.0.rst |
|
14 | release-notes-5.2.0.rst | |
13 | release-notes-5.1.2.rst |
|
15 | release-notes-5.1.2.rst | |
14 | release-notes-5.1.1.rst |
|
16 | release-notes-5.1.1.rst | |
15 | release-notes-5.1.0.rst |
|
17 | release-notes-5.1.0.rst | |
16 | release-notes-5.0.3.rst |
|
18 | release-notes-5.0.3.rst | |
17 | release-notes-5.0.2.rst |
|
19 | release-notes-5.0.2.rst | |
18 | release-notes-5.0.1.rst |
|
20 | release-notes-5.0.1.rst | |
19 | release-notes-5.0.0.rst |
|
21 | release-notes-5.0.0.rst | |
20 |
|
22 | |||
21 |
|
23 | |||
22 | |RCE| 4.x Versions |
|
24 | |RCE| 4.x Versions | |
23 | ------------------ |
|
25 | ------------------ | |
24 |
|
26 | |||
25 | .. toctree:: |
|
27 | .. toctree:: | |
26 | :maxdepth: 1 |
|
28 | :maxdepth: 1 | |
27 |
|
29 | |||
28 | release-notes-4.27.1.rst |
|
30 | release-notes-4.27.1.rst | |
29 | release-notes-4.27.0.rst |
|
31 | release-notes-4.27.0.rst | |
30 | release-notes-4.26.0.rst |
|
32 | release-notes-4.26.0.rst | |
31 | release-notes-4.25.2.rst |
|
33 | release-notes-4.25.2.rst | |
32 | release-notes-4.25.1.rst |
|
34 | release-notes-4.25.1.rst | |
33 | release-notes-4.25.0.rst |
|
35 | release-notes-4.25.0.rst | |
34 | release-notes-4.24.1.rst |
|
36 | release-notes-4.24.1.rst | |
35 | release-notes-4.24.0.rst |
|
37 | release-notes-4.24.0.rst | |
36 | release-notes-4.23.2.rst |
|
38 | release-notes-4.23.2.rst | |
37 | release-notes-4.23.1.rst |
|
39 | release-notes-4.23.1.rst | |
38 | release-notes-4.23.0.rst |
|
40 | release-notes-4.23.0.rst | |
39 | release-notes-4.22.0.rst |
|
41 | release-notes-4.22.0.rst | |
40 | release-notes-4.21.0.rst |
|
42 | release-notes-4.21.0.rst | |
41 | release-notes-4.20.1.rst |
|
43 | release-notes-4.20.1.rst | |
42 | release-notes-4.20.0.rst |
|
44 | release-notes-4.20.0.rst | |
43 | release-notes-4.19.3.rst |
|
45 | release-notes-4.19.3.rst | |
44 | release-notes-4.19.2.rst |
|
46 | release-notes-4.19.2.rst | |
45 | release-notes-4.19.1.rst |
|
47 | release-notes-4.19.1.rst | |
46 | release-notes-4.19.0.rst |
|
48 | release-notes-4.19.0.rst | |
47 | release-notes-4.18.3.rst |
|
49 | release-notes-4.18.3.rst | |
48 | release-notes-4.18.2.rst |
|
50 | release-notes-4.18.2.rst | |
49 | release-notes-4.18.1.rst |
|
51 | release-notes-4.18.1.rst | |
50 | release-notes-4.18.0.rst |
|
52 | release-notes-4.18.0.rst | |
51 | release-notes-4.17.4.rst |
|
53 | release-notes-4.17.4.rst | |
52 | release-notes-4.17.3.rst |
|
54 | release-notes-4.17.3.rst | |
53 | release-notes-4.17.2.rst |
|
55 | release-notes-4.17.2.rst | |
54 | release-notes-4.17.1.rst |
|
56 | release-notes-4.17.1.rst | |
55 | release-notes-4.17.0.rst |
|
57 | release-notes-4.17.0.rst | |
56 | release-notes-4.16.2.rst |
|
58 | release-notes-4.16.2.rst | |
57 | release-notes-4.16.1.rst |
|
59 | release-notes-4.16.1.rst | |
58 | release-notes-4.16.0.rst |
|
60 | release-notes-4.16.0.rst | |
59 | release-notes-4.15.2.rst |
|
61 | release-notes-4.15.2.rst | |
60 | release-notes-4.15.1.rst |
|
62 | release-notes-4.15.1.rst | |
61 | release-notes-4.15.0.rst |
|
63 | release-notes-4.15.0.rst | |
62 | release-notes-4.14.1.rst |
|
64 | release-notes-4.14.1.rst | |
63 | release-notes-4.14.0.rst |
|
65 | release-notes-4.14.0.rst | |
64 | release-notes-4.13.3.rst |
|
66 | release-notes-4.13.3.rst | |
65 | release-notes-4.13.2.rst |
|
67 | release-notes-4.13.2.rst | |
66 | release-notes-4.13.1.rst |
|
68 | release-notes-4.13.1.rst | |
67 | release-notes-4.13.0.rst |
|
69 | release-notes-4.13.0.rst | |
68 | release-notes-4.12.4.rst |
|
70 | release-notes-4.12.4.rst | |
69 | release-notes-4.12.3.rst |
|
71 | release-notes-4.12.3.rst | |
70 | release-notes-4.12.2.rst |
|
72 | release-notes-4.12.2.rst | |
71 | release-notes-4.12.1.rst |
|
73 | release-notes-4.12.1.rst | |
72 | release-notes-4.12.0.rst |
|
74 | release-notes-4.12.0.rst | |
73 | release-notes-4.11.6.rst |
|
75 | release-notes-4.11.6.rst | |
74 | release-notes-4.11.5.rst |
|
76 | release-notes-4.11.5.rst | |
75 | release-notes-4.11.4.rst |
|
77 | release-notes-4.11.4.rst | |
76 | release-notes-4.11.3.rst |
|
78 | release-notes-4.11.3.rst | |
77 | release-notes-4.11.2.rst |
|
79 | release-notes-4.11.2.rst | |
78 | release-notes-4.11.1.rst |
|
80 | release-notes-4.11.1.rst | |
79 | release-notes-4.11.0.rst |
|
81 | release-notes-4.11.0.rst | |
80 | release-notes-4.10.6.rst |
|
82 | release-notes-4.10.6.rst | |
81 | release-notes-4.10.5.rst |
|
83 | release-notes-4.10.5.rst | |
82 | release-notes-4.10.4.rst |
|
84 | release-notes-4.10.4.rst | |
83 | release-notes-4.10.3.rst |
|
85 | release-notes-4.10.3.rst | |
84 | release-notes-4.10.2.rst |
|
86 | release-notes-4.10.2.rst | |
85 | release-notes-4.10.1.rst |
|
87 | release-notes-4.10.1.rst | |
86 | release-notes-4.10.0.rst |
|
88 | release-notes-4.10.0.rst | |
87 | release-notes-4.9.1.rst |
|
89 | release-notes-4.9.1.rst | |
88 | release-notes-4.9.0.rst |
|
90 | release-notes-4.9.0.rst | |
89 | release-notes-4.8.0.rst |
|
91 | release-notes-4.8.0.rst | |
90 | release-notes-4.7.2.rst |
|
92 | release-notes-4.7.2.rst | |
91 | release-notes-4.7.1.rst |
|
93 | release-notes-4.7.1.rst | |
92 | release-notes-4.7.0.rst |
|
94 | release-notes-4.7.0.rst | |
93 | release-notes-4.6.1.rst |
|
95 | release-notes-4.6.1.rst | |
94 | release-notes-4.6.0.rst |
|
96 | release-notes-4.6.0.rst | |
95 | release-notes-4.5.2.rst |
|
97 | release-notes-4.5.2.rst | |
96 | release-notes-4.5.1.rst |
|
98 | release-notes-4.5.1.rst | |
97 | release-notes-4.5.0.rst |
|
99 | release-notes-4.5.0.rst | |
98 | release-notes-4.4.2.rst |
|
100 | release-notes-4.4.2.rst | |
99 | release-notes-4.4.1.rst |
|
101 | release-notes-4.4.1.rst | |
100 | release-notes-4.4.0.rst |
|
102 | release-notes-4.4.0.rst | |
101 | release-notes-4.3.1.rst |
|
103 | release-notes-4.3.1.rst | |
102 | release-notes-4.3.0.rst |
|
104 | release-notes-4.3.0.rst | |
103 | release-notes-4.2.1.rst |
|
105 | release-notes-4.2.1.rst | |
104 | release-notes-4.2.0.rst |
|
106 | release-notes-4.2.0.rst | |
105 | release-notes-4.1.2.rst |
|
107 | release-notes-4.1.2.rst | |
106 | release-notes-4.1.1.rst |
|
108 | release-notes-4.1.1.rst | |
107 | release-notes-4.1.0.rst |
|
109 | release-notes-4.1.0.rst | |
108 | release-notes-4.0.1.rst |
|
110 | release-notes-4.0.1.rst | |
109 | release-notes-4.0.0.rst |
|
111 | release-notes-4.0.0.rst | |
110 |
|
112 | |||
111 | |RCE| 3.x Versions |
|
113 | |RCE| 3.x Versions | |
112 | ------------------ |
|
114 | ------------------ | |
113 |
|
115 | |||
114 | .. toctree:: |
|
116 | .. toctree:: | |
115 | :maxdepth: 1 |
|
117 | :maxdepth: 1 | |
116 |
|
118 | |||
117 | release-notes-3.8.4.rst |
|
119 | release-notes-3.8.4.rst | |
118 | release-notes-3.8.3.rst |
|
120 | release-notes-3.8.3.rst | |
119 | release-notes-3.8.2.rst |
|
121 | release-notes-3.8.2.rst | |
120 | release-notes-3.8.1.rst |
|
122 | release-notes-3.8.1.rst | |
121 | release-notes-3.8.0.rst |
|
123 | release-notes-3.8.0.rst | |
122 | release-notes-3.7.1.rst |
|
124 | release-notes-3.7.1.rst | |
123 | release-notes-3.7.0.rst |
|
125 | release-notes-3.7.0.rst | |
124 | release-notes-3.6.1.rst |
|
126 | release-notes-3.6.1.rst | |
125 | release-notes-3.6.0.rst |
|
127 | release-notes-3.6.0.rst | |
126 | release-notes-3.5.2.rst |
|
128 | release-notes-3.5.2.rst | |
127 | release-notes-3.5.1.rst |
|
129 | release-notes-3.5.1.rst | |
128 | release-notes-3.5.0.rst |
|
130 | release-notes-3.5.0.rst | |
129 | release-notes-3.4.1.rst |
|
131 | release-notes-3.4.1.rst | |
130 | release-notes-3.4.0.rst |
|
132 | release-notes-3.4.0.rst | |
131 | release-notes-3.3.4.rst |
|
133 | release-notes-3.3.4.rst | |
132 | release-notes-3.3.3.rst |
|
134 | release-notes-3.3.3.rst | |
133 | release-notes-3.3.2.rst |
|
135 | release-notes-3.3.2.rst | |
134 | release-notes-3.3.1.rst |
|
136 | release-notes-3.3.1.rst | |
135 | release-notes-3.3.0.rst |
|
137 | release-notes-3.3.0.rst | |
136 | release-notes-3.2.3.rst |
|
138 | release-notes-3.2.3.rst | |
137 | release-notes-3.2.2.rst |
|
139 | release-notes-3.2.2.rst | |
138 | release-notes-3.2.1.rst |
|
140 | release-notes-3.2.1.rst | |
139 | release-notes-3.2.0.rst |
|
141 | release-notes-3.2.0.rst | |
140 | release-notes-3.1.1.rst |
|
142 | release-notes-3.1.1.rst | |
141 | release-notes-3.1.0.rst |
|
143 | release-notes-3.1.0.rst | |
142 | release-notes-3.0.2.rst |
|
144 | release-notes-3.0.2.rst | |
143 | release-notes-3.0.1.rst |
|
145 | release-notes-3.0.1.rst | |
144 | release-notes-3.0.0.rst |
|
146 | release-notes-3.0.0.rst | |
145 |
|
147 | |||
146 | |RCE| 2.x Versions |
|
148 | |RCE| 2.x Versions | |
147 | ------------------ |
|
149 | ------------------ | |
148 |
|
150 | |||
149 | .. toctree:: |
|
151 | .. toctree:: | |
150 | :maxdepth: 1 |
|
152 | :maxdepth: 1 | |
151 |
|
153 | |||
152 | release-notes-2.2.8.rst |
|
154 | release-notes-2.2.8.rst | |
153 | release-notes-2.2.7.rst |
|
155 | release-notes-2.2.7.rst | |
154 | release-notes-2.2.6.rst |
|
156 | release-notes-2.2.6.rst | |
155 | release-notes-2.2.5.rst |
|
157 | release-notes-2.2.5.rst | |
156 | release-notes-2.2.4.rst |
|
158 | release-notes-2.2.4.rst | |
157 | release-notes-2.2.3.rst |
|
159 | release-notes-2.2.3.rst | |
158 | release-notes-2.2.2.rst |
|
160 | release-notes-2.2.2.rst | |
159 | release-notes-2.2.1.rst |
|
161 | release-notes-2.2.1.rst | |
160 | release-notes-2.2.0.rst |
|
162 | release-notes-2.2.0.rst | |
161 | release-notes-2.1.0.rst |
|
163 | release-notes-2.1.0.rst | |
162 | release-notes-2.0.2.rst |
|
164 | release-notes-2.0.2.rst | |
163 | release-notes-2.0.1.rst |
|
165 | release-notes-2.0.1.rst | |
164 | release-notes-2.0.0.rst |
|
166 | release-notes-2.0.0.rst | |
165 |
|
167 | |||
166 | |RCE| 1.x Versions |
|
168 | |RCE| 1.x Versions | |
167 | ------------------ |
|
169 | ------------------ | |
168 |
|
170 | |||
169 | .. toctree:: |
|
171 | .. toctree:: | |
170 | :maxdepth: 1 |
|
172 | :maxdepth: 1 | |
171 |
|
173 | |||
172 | release-notes-1.7.2.rst |
|
174 | release-notes-1.7.2.rst | |
173 | release-notes-1.7.1.rst |
|
175 | release-notes-1.7.1.rst | |
174 | release-notes-1.7.0.rst |
|
176 | release-notes-1.7.0.rst | |
175 | release-notes-1.6.0.rst |
|
177 | release-notes-1.6.0.rst |
@@ -1,249 +1,253 b'' | |||||
1 |
|
1 | |||
2 |
|
2 | |||
3 | # Copyright (C) 2016-2023 RhodeCode GmbH |
|
3 | # Copyright (C) 2016-2023 RhodeCode GmbH | |
4 | # |
|
4 | # | |
5 | # This program is free software: you can redistribute it and/or modify |
|
5 | # This program is free software: you can redistribute it and/or modify | |
6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
6 | # it under the terms of the GNU Affero General Public License, version 3 | |
7 | # (only), as published by the Free Software Foundation. |
|
7 | # (only), as published by the Free Software Foundation. | |
8 | # |
|
8 | # | |
9 | # This program is distributed in the hope that it will be useful, |
|
9 | # This program is distributed in the hope that it will be useful, | |
10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | # GNU General Public License for more details. |
|
12 | # GNU General Public License for more details. | |
13 | # |
|
13 | # | |
14 | # You should have received a copy of the GNU Affero General Public License |
|
14 | # You should have received a copy of the GNU Affero General Public License | |
15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | # |
|
16 | # | |
17 | # This program is dual-licensed. If you wish to learn more about the |
|
17 | # This program is dual-licensed. If you wish to learn more about the | |
18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
18 | # RhodeCode Enterprise Edition, including its added features, Support services, | |
19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ | |
20 |
|
20 | |||
21 | import logging |
|
21 | import logging | |
22 | import urllib.request |
|
22 | import urllib.request | |
23 | import urllib.error |
|
23 | import urllib.error | |
24 | import urllib.parse |
|
24 | import urllib.parse | |
25 | import os |
|
25 | import os | |
26 |
|
26 | |||
27 | import rhodecode |
|
27 | import rhodecode | |
28 | from rhodecode.apps._base import BaseAppView |
|
28 | from rhodecode.apps._base import BaseAppView | |
29 | from rhodecode.apps._base.navigation import navigation_list |
|
29 | from rhodecode.apps._base.navigation import navigation_list | |
30 | from rhodecode.lib import helpers as h |
|
30 | from rhodecode.lib import helpers as h | |
31 | from rhodecode.lib.auth import (LoginRequired, HasPermissionAllDecorator) |
|
31 | from rhodecode.lib.auth import (LoginRequired, HasPermissionAllDecorator) | |
32 | from rhodecode.lib.utils2 import str2bool |
|
32 | from rhodecode.lib.utils2 import str2bool | |
33 | from rhodecode.lib import system_info |
|
33 | from rhodecode.lib import system_info | |
34 | from rhodecode.model.update import UpdateModel |
|
34 | from rhodecode.model.update import UpdateModel | |
35 |
|
35 | |||
36 | log = logging.getLogger(__name__) |
|
36 | log = logging.getLogger(__name__) | |
37 |
|
37 | |||
38 |
|
38 | |||
39 | class AdminSystemInfoSettingsView(BaseAppView): |
|
39 | class AdminSystemInfoSettingsView(BaseAppView): | |
40 | def load_default_context(self): |
|
40 | def load_default_context(self): | |
41 | c = self._get_local_tmpl_context() |
|
41 | c = self._get_local_tmpl_context() | |
42 | return c |
|
42 | return c | |
43 |
|
43 | |||
44 | def get_env_data(self): |
|
44 | def get_env_data(self): | |
45 | black_list = [ |
|
45 | black_list = [ | |
46 | 'NIX_LDFLAGS', |
|
46 | 'NIX_LDFLAGS', | |
47 | 'NIX_CFLAGS_COMPILE', |
|
47 | 'NIX_CFLAGS_COMPILE', | |
48 | 'propagatedBuildInputs', |
|
48 | 'propagatedBuildInputs', | |
49 | 'propagatedNativeBuildInputs', |
|
49 | 'propagatedNativeBuildInputs', | |
50 | 'postInstall', |
|
50 | 'postInstall', | |
51 | 'buildInputs', |
|
51 | 'buildInputs', | |
52 | 'buildPhase', |
|
52 | 'buildPhase', | |
53 | 'preShellHook', |
|
53 | 'preShellHook', | |
54 | 'preShellHook', |
|
54 | 'preShellHook', | |
55 | 'preCheck', |
|
55 | 'preCheck', | |
56 | 'preBuild', |
|
56 | 'preBuild', | |
57 | 'postShellHook', |
|
57 | 'postShellHook', | |
58 | 'postFixup', |
|
58 | 'postFixup', | |
59 | 'postCheck', |
|
59 | 'postCheck', | |
60 | 'nativeBuildInputs', |
|
60 | 'nativeBuildInputs', | |
61 | 'installPhase', |
|
61 | 'installPhase', | |
62 | 'installCheckPhase', |
|
62 | 'installCheckPhase', | |
63 | 'checkPhase', |
|
63 | 'checkPhase', | |
64 | 'configurePhase', |
|
64 | 'configurePhase', | |
65 | 'shellHook' |
|
65 | 'shellHook' | |
66 | ] |
|
66 | ] | |
67 | secret_list = [ |
|
67 | secret_list = [ | |
68 | 'RHODECODE_USER_PASS' |
|
68 | 'RHODECODE_USER_PASS' | |
69 | ] |
|
69 | ] | |
70 |
|
70 | |||
71 | for k, v in sorted(os.environ.items()): |
|
71 | for k, v in sorted(os.environ.items()): | |
72 | if k in black_list: |
|
72 | if k in black_list: | |
73 | continue |
|
73 | continue | |
74 | if k in secret_list: |
|
74 | if k in secret_list: | |
75 | v = '*****' |
|
75 | v = '*****' | |
76 | yield k, v |
|
76 | yield k, v | |
77 |
|
77 | |||
78 | @LoginRequired() |
|
78 | @LoginRequired() | |
79 | @HasPermissionAllDecorator('hg.admin') |
|
79 | @HasPermissionAllDecorator('hg.admin') | |
80 | def settings_system_info(self): |
|
80 | def settings_system_info(self): | |
81 | _ = self.request.translate |
|
81 | _ = self.request.translate | |
82 | c = self.load_default_context() |
|
82 | c = self.load_default_context() | |
83 |
|
83 | |||
84 | c.active = 'system' |
|
84 | c.active = 'system' | |
85 | c.navlist = navigation_list(self.request) |
|
85 | c.navlist = navigation_list(self.request) | |
86 |
|
86 | |||
87 | # TODO(marcink), figure out how to allow only selected users to do this |
|
87 | # TODO(marcink), figure out how to allow only selected users to do this | |
88 | c.allowed_to_snapshot = self._rhodecode_user.admin |
|
88 | c.allowed_to_snapshot = self._rhodecode_user.admin | |
89 |
|
89 | |||
90 | snapshot = str2bool(self.request.params.get('snapshot')) |
|
90 | snapshot = str2bool(self.request.params.get('snapshot')) | |
91 |
|
91 | |||
92 | c.rhodecode_update_url = UpdateModel().get_update_url() |
|
92 | c.rhodecode_update_url = UpdateModel().get_update_url() | |
93 | c.env_data = self.get_env_data() |
|
93 | c.env_data = self.get_env_data() | |
94 | server_info = system_info.get_system_info(self.request.environ) |
|
94 | server_info = system_info.get_system_info(self.request.environ) | |
95 |
|
95 | |||
96 | for key, val in server_info.items(): |
|
96 | for key, val in server_info.items(): | |
97 | setattr(c, key, val) |
|
97 | setattr(c, key, val) | |
98 |
|
98 | |||
99 | def val(name, subkey='human_value'): |
|
99 | def val(name, subkey='human_value'): | |
100 | return server_info[name][subkey] |
|
100 | return server_info[name][subkey] | |
101 |
|
101 | |||
102 | def state(name): |
|
102 | def state(name): | |
103 | return server_info[name]['state'] |
|
103 | return server_info[name]['state'] | |
104 |
|
104 | |||
105 | def val2(name): |
|
105 | def val2(name): | |
106 | val = server_info[name]['human_value'] |
|
106 | val = server_info[name]['human_value'] | |
107 | state = server_info[name]['state'] |
|
107 | state = server_info[name]['state'] | |
108 | return val, state |
|
108 | return val, state | |
109 |
|
109 | |||
110 | update_info_msg = _('Note: please make sure this server can ' |
|
110 | update_info_msg = _('Note: please make sure this server can ' | |
111 | 'access `${url}` for the update link to work', |
|
111 | 'access `${url}` for the update link to work', | |
112 | mapping=dict(url=c.rhodecode_update_url)) |
|
112 | mapping=dict(url=c.rhodecode_update_url)) | |
113 | version = UpdateModel().get_stored_version() |
|
113 | version = UpdateModel().get_stored_version() | |
114 | is_outdated = UpdateModel().is_outdated( |
|
114 | is_outdated = UpdateModel().is_outdated( | |
115 | rhodecode.__version__, version) |
|
115 | rhodecode.__version__, version) | |
116 | update_state = { |
|
116 | update_state = { | |
117 | 'type': 'warning', |
|
117 | 'type': 'warning', | |
118 | 'message': 'New version available: {}'.format(version) |
|
118 | 'message': 'New version available: {}'.format(version) | |
119 | } \ |
|
119 | } \ | |
120 | if is_outdated else {} |
|
120 | if is_outdated else {} | |
121 | c.data_items = [ |
|
121 | c.data_items = [ | |
122 | # update info |
|
122 | # update info | |
123 | (_('Update info'), h.literal( |
|
123 | (_('Update info'), h.literal( | |
124 | '<span class="link" id="check_for_update" >%s.</span>' % ( |
|
124 | '<span class="link" id="check_for_update" >%s.</span>' % ( | |
125 | _('Check for updates')) + |
|
125 | _('Check for updates')) + | |
126 | '<br/> <span >%s.</span>' % (update_info_msg) |
|
126 | '<br/> <span >%s.</span>' % (update_info_msg) | |
127 | ), ''), |
|
127 | ), ''), | |
128 |
|
128 | |||
129 | # RhodeCode specific |
|
129 | # RhodeCode specific | |
130 | (_('RhodeCode Version'), val('rhodecode_app')['text'], state('rhodecode_app')), |
|
130 | (_('RhodeCode Version'), val('rhodecode_app')['text'], state('rhodecode_app')), | |
131 | (_('Latest version'), version, update_state), |
|
131 | (_('Latest version'), version, update_state), | |
132 | (_('RhodeCode Base URL'), val('rhodecode_config')['config'].get('app.base_url'), state('rhodecode_config')), |
|
132 | (_('RhodeCode Base URL'), val('rhodecode_config')['config'].get('app.base_url'), state('rhodecode_config')), | |
133 | (_('RhodeCode Server IP'), val('server')['server_ip'], state('server')), |
|
133 | (_('RhodeCode Server IP'), val('server')['server_ip'], state('server')), | |
134 | (_('RhodeCode Server ID'), val('server')['server_id'], state('server')), |
|
134 | (_('RhodeCode Server ID'), val('server')['server_id'], state('server')), | |
135 | (_('RhodeCode Configuration'), val('rhodecode_config')['path'], state('rhodecode_config')), |
|
135 | (_('RhodeCode Configuration'), val('rhodecode_config')['path'], state('rhodecode_config')), | |
136 | (_('RhodeCode Certificate'), val('rhodecode_config')['cert_path'], state('rhodecode_config')), |
|
136 | (_('RhodeCode Certificate'), val('rhodecode_config')['cert_path'], state('rhodecode_config')), | |
137 | (_('Workers'), val('rhodecode_config')['config']['server:main'].get('workers', '?'), state('rhodecode_config')), |
|
137 | (_('Workers'), val('rhodecode_config')['config']['server:main'].get('workers', '?'), state('rhodecode_config')), | |
138 | (_('Worker Type'), val('rhodecode_config')['config']['server:main'].get('worker_class', 'sync'), state('rhodecode_config')), |
|
138 | (_('Worker Type'), val('rhodecode_config')['config']['server:main'].get('worker_class', 'sync'), state('rhodecode_config')), | |
139 | ('', '', ''), # spacer |
|
139 | ('', '', ''), # spacer | |
140 |
|
140 | |||
141 | # Database |
|
141 | # Database | |
142 | (_('Database'), val('database')['url'], state('database')), |
|
142 | (_('Database'), val('database')['url'], state('database')), | |
143 | (_('Database version'), val('database')['version'], state('database')), |
|
143 | (_('Database version'), val('database')['version'], state('database')), | |
144 | ('', '', ''), # spacer |
|
144 | ('', '', ''), # spacer | |
145 |
|
145 | |||
146 | # Platform/Python |
|
146 | # Platform/Python | |
147 | (_('Platform'), val('platform')['name'], state('platform')), |
|
147 | (_('Platform'), val('platform')['name'], state('platform')), | |
148 | (_('Platform UUID'), val('platform')['uuid'], state('platform')), |
|
148 | (_('Platform UUID'), val('platform')['uuid'], state('platform')), | |
149 | (_('Lang'), val('locale'), state('locale')), |
|
149 | (_('Lang'), val('locale'), state('locale')), | |
150 | (_('Python version'), val('python')['version'], state('python')), |
|
150 | (_('Python version'), val('python')['version'], state('python')), | |
151 | (_('Python path'), val('python')['executable'], state('python')), |
|
151 | (_('Python path'), val('python')['executable'], state('python')), | |
152 | ('', '', ''), # spacer |
|
152 | ('', '', ''), # spacer | |
153 |
|
153 | |||
154 | # Systems stats |
|
154 | # Systems stats | |
155 | (_('CPU'), val('cpu')['text'], state('cpu')), |
|
155 | (_('CPU'), val('cpu')['text'], state('cpu')), | |
156 | (_('Load'), val('load')['text'], state('load')), |
|
156 | (_('Load'), val('load')['text'], state('load')), | |
157 | (_('Memory'), val('memory')['text'], state('memory')), |
|
157 | (_('Memory'), val('memory')['text'], state('memory')), | |
158 | (_('Uptime'), val('uptime')['text'], state('uptime')), |
|
158 | (_('Uptime'), val('uptime')['text'], state('uptime')), | |
159 | ('', '', ''), # spacer |
|
159 | ('', '', ''), # spacer | |
160 |
|
160 | |||
161 | # ulimit |
|
161 | # ulimit | |
162 | (_('Ulimit'), val('ulimit')['text'], state('ulimit')), |
|
162 | (_('Ulimit'), val('ulimit')['text'], state('ulimit')), | |
163 |
|
163 | |||
164 | # Repo storage |
|
164 | # Repo storage | |
165 | (_('Storage location'), val('storage')['path'], state('storage')), |
|
165 | (_('Storage location'), val('storage')['path'], state('storage')), | |
166 | (_('Storage info'), val('storage')['text'], state('storage')), |
|
166 | (_('Storage info'), val('storage')['text'], state('storage')), | |
167 | (_('Storage inodes'), val('storage_inodes')['text'], state('storage_inodes')), |
|
167 | (_('Storage inodes'), val('storage_inodes')['text'], state('storage_inodes')), | |
168 | ('', '', ''), # spacer |
|
168 | ('', '', ''), # spacer | |
169 |
|
169 | |||
170 | (_('Gist storage location'), val('storage_gist')['path'], state('storage_gist')), |
|
170 | (_('Gist storage location'), val('storage_gist')['path'], state('storage_gist')), | |
171 | (_('Gist storage info'), val('storage_gist')['text'], state('storage_gist')), |
|
171 | (_('Gist storage info'), val('storage_gist')['text'], state('storage_gist')), | |
172 | ('', '', ''), # spacer |
|
172 | ('', '', ''), # spacer | |
173 |
|
173 | |||
174 | (_('Artifacts storage backend'), val('storage_artifacts')['type'], state('storage_artifacts')), |
|
174 | (_('Artifacts storage backend'), val('storage_artifacts')['type'], state('storage_artifacts')), | |
175 | (_('Artifacts storage location'), val('storage_artifacts')['path'], state('storage_artifacts')), |
|
175 | (_('Artifacts storage location'), val('storage_artifacts')['path'], state('storage_artifacts')), | |
176 | (_('Artifacts info'), val('storage_artifacts')['text'], state('storage_artifacts')), |
|
176 | (_('Artifacts info'), val('storage_artifacts')['text'], state('storage_artifacts')), | |
177 | ('', '', ''), # spacer |
|
177 | ('', '', ''), # spacer | |
178 |
|
178 | |||
179 | (_('Archive cache storage backend'), val('storage_archive')['type'], state('storage_archive')), |
|
179 | (_('Archive cache storage backend'), val('storage_archive')['type'], state('storage_archive')), | |
180 | (_('Archive cache storage location'), val('storage_archive')['path'], state('storage_archive')), |
|
180 | (_('Archive cache storage location'), val('storage_archive')['path'], state('storage_archive')), | |
181 | (_('Archive cache info'), val('storage_archive')['text'], state('storage_archive')), |
|
181 | (_('Archive cache info'), val('storage_archive')['text'], state('storage_archive')), | |
182 | ('', '', ''), # spacer |
|
182 | ('', '', ''), # spacer | |
183 |
|
183 | |||
184 |
|
184 | |||
185 | (_('Temp storage location'), val('storage_temp')['path'], state('storage_temp')), |
|
185 | (_('Temp storage location'), val('storage_temp')['path'], state('storage_temp')), | |
186 | (_('Temp storage info'), val('storage_temp')['text'], state('storage_temp')), |
|
186 | (_('Temp storage info'), val('storage_temp')['text'], state('storage_temp')), | |
187 | ('', '', ''), # spacer |
|
187 | ('', '', ''), # spacer | |
188 |
|
188 | |||
189 | (_('Search info'), val('search')['text'], state('search')), |
|
189 | (_('Search info'), val('search')['text'], state('search')), | |
190 | (_('Search location'), val('search')['location'], state('search')), |
|
190 | (_('Search location'), val('search')['location'], state('search')), | |
191 | ('', '', ''), # spacer |
|
191 | ('', '', ''), # spacer | |
192 |
|
192 | |||
193 | # VCS specific |
|
193 | # VCS specific | |
194 | (_('VCS Backends'), val('vcs_backends'), state('vcs_backends')), |
|
194 | (_('VCS Backends'), val('vcs_backends'), state('vcs_backends')), | |
195 | (_('VCS Server'), val('vcs_server')['text'], state('vcs_server')), |
|
195 | (_('VCS Server'), val('vcs_server')['text'], state('vcs_server')), | |
196 | (_('GIT'), val('git'), state('git')), |
|
196 | (_('GIT'), val('git'), state('git')), | |
197 | (_('HG'), val('hg'), state('hg')), |
|
197 | (_('HG'), val('hg'), state('hg')), | |
198 | (_('SVN'), val('svn'), state('svn')), |
|
198 | (_('SVN'), val('svn'), state('svn')), | |
199 |
|
199 | |||
200 | ] |
|
200 | ] | |
201 |
|
201 | |||
|
202 | c.rhodecode_data_items = [ | |||
|
203 | (k, v) for k, v in sorted((val('rhodecode_server_config') or {}).items(), key=lambda x: x[0].lower()) | |||
|
204 | ] | |||
|
205 | ||||
202 | c.vcsserver_data_items = [ |
|
206 | c.vcsserver_data_items = [ | |
203 | (k, v) for k, v in (val('vcs_server_config') or {}).items() |
|
207 | (k, v) for k, v in sorted((val('vcs_server_config') or {}).items(), key=lambda x: x[0].lower()) | |
204 | ] |
|
208 | ] | |
205 |
|
209 | |||
206 | if snapshot: |
|
210 | if snapshot: | |
207 | if c.allowed_to_snapshot: |
|
211 | if c.allowed_to_snapshot: | |
208 | c.data_items.pop(0) # remove server info |
|
212 | c.data_items.pop(0) # remove server info | |
209 | self.request.override_renderer = 'admin/settings/settings_system_snapshot.mako' |
|
213 | self.request.override_renderer = 'admin/settings/settings_system_snapshot.mako' | |
210 | else: |
|
214 | else: | |
211 | h.flash('You are not allowed to do this', category='warning') |
|
215 | h.flash('You are not allowed to do this', category='warning') | |
212 | return self._get_template_context(c) |
|
216 | return self._get_template_context(c) | |
213 |
|
217 | |||
214 | @LoginRequired() |
|
218 | @LoginRequired() | |
215 | @HasPermissionAllDecorator('hg.admin') |
|
219 | @HasPermissionAllDecorator('hg.admin') | |
216 | def settings_system_info_check_update(self): |
|
220 | def settings_system_info_check_update(self): | |
217 | _ = self.request.translate |
|
221 | _ = self.request.translate | |
218 | c = self.load_default_context() |
|
222 | c = self.load_default_context() | |
219 |
|
223 | |||
220 | update_url = UpdateModel().get_update_url() |
|
224 | update_url = UpdateModel().get_update_url() | |
221 |
|
225 | |||
222 | def _err(s): |
|
226 | def _err(s): | |
223 | return f'<div style="color:#ff8888; padding:4px 0px">{s}</div>' |
|
227 | return f'<div style="color:#ff8888; padding:4px 0px">{s}</div>' | |
224 |
|
228 | |||
225 | try: |
|
229 | try: | |
226 | data = UpdateModel().get_update_data(update_url) |
|
230 | data = UpdateModel().get_update_data(update_url) | |
227 | except urllib.error.URLError as e: |
|
231 | except urllib.error.URLError as e: | |
228 | log.exception("Exception contacting upgrade server") |
|
232 | log.exception("Exception contacting upgrade server") | |
229 | self.request.override_renderer = 'string' |
|
233 | self.request.override_renderer = 'string' | |
230 | return _err('Failed to contact upgrade server: %r' % e) |
|
234 | return _err('Failed to contact upgrade server: %r' % e) | |
231 | except ValueError as e: |
|
235 | except ValueError as e: | |
232 | log.exception("Bad data sent from update server") |
|
236 | log.exception("Bad data sent from update server") | |
233 | self.request.override_renderer = 'string' |
|
237 | self.request.override_renderer = 'string' | |
234 | return _err('Bad data sent from update server') |
|
238 | return _err('Bad data sent from update server') | |
235 |
|
239 | |||
236 | latest = data['versions'][0] |
|
240 | latest = data['versions'][0] | |
237 |
|
241 | |||
238 | c.update_url = update_url |
|
242 | c.update_url = update_url | |
239 | c.latest_data = latest |
|
243 | c.latest_data = latest | |
240 | c.latest_ver = (latest['version'] or '').strip() |
|
244 | c.latest_ver = (latest['version'] or '').strip() | |
241 | c.cur_ver = self.request.GET.get('ver') or rhodecode.__version__ |
|
245 | c.cur_ver = self.request.GET.get('ver') or rhodecode.__version__ | |
242 | c.should_upgrade = False |
|
246 | c.should_upgrade = False | |
243 |
|
247 | |||
244 | is_outdated = UpdateModel().is_outdated(c.cur_ver, c.latest_ver) |
|
248 | is_outdated = UpdateModel().is_outdated(c.cur_ver, c.latest_ver) | |
245 | if is_outdated: |
|
249 | if is_outdated: | |
246 | c.should_upgrade = True |
|
250 | c.should_upgrade = True | |
247 | c.important_notices = latest['general'] |
|
251 | c.important_notices = latest['general'] | |
248 | UpdateModel().store_version(latest['version']) |
|
252 | UpdateModel().store_version(latest['version']) | |
249 | return self._get_template_context(c) |
|
253 | return self._get_template_context(c) |
@@ -1,128 +1,122 b'' | |||||
1 | # Copyright (C) 2011-2023 RhodeCode GmbH |
|
1 | # Copyright (C) 2011-2023 RhodeCode GmbH | |
2 | # |
|
2 | # | |
3 | # This program is free software: you can redistribute it and/or modify |
|
3 | # This program is free software: you can redistribute it and/or modify | |
4 | # it under the terms of the GNU Affero General Public License, version 3 |
|
4 | # it under the terms of the GNU Affero General Public License, version 3 | |
5 | # (only), as published by the Free Software Foundation. |
|
5 | # (only), as published by the Free Software Foundation. | |
6 | # |
|
6 | # | |
7 | # This program is distributed in the hope that it will be useful, |
|
7 | # This program is distributed in the hope that it will be useful, | |
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
10 | # GNU General Public License for more details. |
|
10 | # GNU General Public License for more details. | |
11 | # |
|
11 | # | |
12 | # You should have received a copy of the GNU Affero General Public License |
|
12 | # You should have received a copy of the GNU Affero General Public License | |
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |
14 | # |
|
14 | # | |
15 | # This program is dual-licensed. If you wish to learn more about the |
|
15 | # This program is dual-licensed. If you wish to learn more about the | |
16 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
16 | # RhodeCode Enterprise Edition, including its added features, Support services, | |
17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ | |
18 |
|
18 | |||
19 | import logging |
|
19 | import logging | |
20 |
|
20 | |||
21 | from pyramid.httpexceptions import HTTPFound |
|
21 | from pyramid.httpexceptions import HTTPFound | |
22 |
|
22 | |||
23 | from rhodecode.apps._base import RepoAppView |
|
23 | from rhodecode.apps._base import RepoAppView | |
24 | from rhodecode.lib import helpers as h |
|
24 | from rhodecode.lib import helpers as h | |
25 | from rhodecode.lib import audit_logger |
|
25 | from rhodecode.lib import audit_logger | |
26 | from rhodecode.lib.auth import ( |
|
26 | from rhodecode.lib.auth import ( | |
27 | LoginRequired, HasRepoPermissionAnyDecorator, CSRFRequired) |
|
27 | LoginRequired, HasRepoPermissionAnyDecorator, CSRFRequired) | |
28 | from rhodecode.lib.utils2 import str2bool |
|
28 | from rhodecode.lib.utils2 import str2bool | |
29 | from rhodecode.model.db import User |
|
29 | from rhodecode.model.db import User | |
30 | from rhodecode.model.forms import RepoPermsForm |
|
30 | from rhodecode.model.forms import RepoPermsForm | |
31 | from rhodecode.model.meta import Session |
|
31 | from rhodecode.model.meta import Session | |
32 | from rhodecode.model.permission import PermissionModel |
|
32 | from rhodecode.model.permission import PermissionModel | |
33 | from rhodecode.model.repo import RepoModel |
|
33 | from rhodecode.model.repo import RepoModel | |
34 |
|
34 | |||
35 | log = logging.getLogger(__name__) |
|
35 | log = logging.getLogger(__name__) | |
36 |
|
36 | |||
37 |
|
37 | |||
38 | class RepoSettingsPermissionsView(RepoAppView): |
|
38 | class RepoSettingsPermissionsView(RepoAppView): | |
39 |
|
39 | |||
40 | def load_default_context(self): |
|
40 | def load_default_context(self): | |
41 | c = self._get_local_tmpl_context() |
|
41 | c = self._get_local_tmpl_context() | |
42 | return c |
|
42 | return c | |
43 |
|
43 | |||
44 | @LoginRequired() |
|
44 | @LoginRequired() | |
45 | @HasRepoPermissionAnyDecorator('repository.admin') |
|
45 | @HasRepoPermissionAnyDecorator('repository.admin') | |
46 | def edit_permissions(self): |
|
46 | def edit_permissions(self): | |
47 | _ = self.request.translate |
|
47 | _ = self.request.translate | |
48 | c = self.load_default_context() |
|
48 | c = self.load_default_context() | |
49 | c.active = 'permissions' |
|
49 | c.active = 'permissions' | |
50 | if self.request.GET.get('branch_permissions'): |
|
50 | if self.request.GET.get('branch_permissions'): | |
51 | h.flash(_('Explicitly add user or user group with write or higher ' |
|
51 | h.flash(_('Explicitly add user or user group with write or higher ' | |
52 | 'permission to modify their branch permissions.'), |
|
52 | 'permission to modify their branch permissions.'), | |
53 | category='notice') |
|
53 | category='notice') | |
54 | return self._get_template_context(c) |
|
54 | return self._get_template_context(c) | |
55 |
|
55 | |||
56 | @LoginRequired() |
|
56 | @LoginRequired() | |
57 | @HasRepoPermissionAnyDecorator('repository.admin') |
|
57 | @HasRepoPermissionAnyDecorator('repository.admin') | |
58 | @CSRFRequired() |
|
58 | @CSRFRequired() | |
59 | def edit_permissions_update(self): |
|
59 | def edit_permissions_update(self): | |
60 | _ = self.request.translate |
|
60 | _ = self.request.translate | |
61 | c = self.load_default_context() |
|
61 | c = self.load_default_context() | |
62 | c.active = 'permissions' |
|
62 | c.active = 'permissions' | |
63 | data = self.request.POST |
|
63 | data = self.request.POST | |
64 | # store private flag outside of HTML to verify if we can modify |
|
64 | # store private flag outside of HTML to verify if we can modify | |
65 | # default user permissions, prevents submission of FAKE post data |
|
65 | # default user permissions, prevents submission of FAKE post data | |
66 | # into the form for private repos |
|
66 | # into the form for private repos | |
67 | data['repo_private'] = self.db_repo.private |
|
67 | data['repo_private'] = self.db_repo.private | |
68 | form = RepoPermsForm(self.request.translate)().to_python(data) |
|
68 | form = RepoPermsForm(self.request.translate)().to_python(data) | |
69 | changes = RepoModel().update_permissions( |
|
69 | changes = RepoModel().update_permissions( | |
70 | self.db_repo_name, form['perm_additions'], form['perm_updates'], |
|
70 | self.db_repo_name, form['perm_additions'], form['perm_updates'], | |
71 | form['perm_deletions']) |
|
71 | form['perm_deletions']) | |
72 |
|
72 | |||
73 | action_data = { |
|
73 | action_data = { | |
74 | 'added': changes['added'], |
|
74 | 'added': changes['added'], | |
75 | 'updated': changes['updated'], |
|
75 | 'updated': changes['updated'], | |
76 | 'deleted': changes['deleted'], |
|
76 | 'deleted': changes['deleted'], | |
77 | } |
|
77 | } | |
78 | audit_logger.store_web( |
|
78 | audit_logger.store_web( | |
79 | 'repo.edit.permissions', action_data=action_data, |
|
79 | 'repo.edit.permissions', action_data=action_data, | |
80 | user=self._rhodecode_user, repo=self.db_repo) |
|
80 | user=self._rhodecode_user, repo=self.db_repo) | |
81 |
|
81 | |||
82 | Session().commit() |
|
82 | Session().commit() | |
83 | h.flash(_('Repository access permissions updated'), category='success') |
|
83 | h.flash(_('Repository access permissions updated'), category='success') | |
84 |
|
84 | |||
85 | affected_user_ids = None |
|
85 | affected_user_ids = None | |
86 | if changes.get('default_user_changed', False): |
|
86 | if changes.get('default_user_changed', False): | |
87 | # if we change the default user, we need to flush everyone permissions |
|
87 | # if we change the default user, we need to flush everyone permissions | |
88 | affected_user_ids = User.get_all_user_ids() |
|
88 | affected_user_ids = User.get_all_user_ids() | |
89 | PermissionModel().flush_user_permission_caches( |
|
89 | PermissionModel().flush_user_permission_caches( | |
90 | changes, affected_user_ids=affected_user_ids) |
|
90 | changes, affected_user_ids=affected_user_ids) | |
91 |
|
91 | |||
92 | raise HTTPFound( |
|
92 | raise HTTPFound( | |
93 | h.route_path('edit_repo_perms', repo_name=self.db_repo_name)) |
|
93 | h.route_path('edit_repo_perms', repo_name=self.db_repo_name)) | |
94 |
|
94 | |||
95 | @LoginRequired() |
|
95 | @LoginRequired() | |
96 | @HasRepoPermissionAnyDecorator('repository.admin') |
|
96 | @HasRepoPermissionAnyDecorator('repository.admin') | |
97 | @CSRFRequired() |
|
97 | @CSRFRequired() | |
98 | def edit_permissions_set_private_repo(self): |
|
98 | def edit_permissions_set_private_repo(self): | |
99 | _ = self.request.translate |
|
99 | _ = self.request.translate | |
100 | self.load_default_context() |
|
100 | self.load_default_context() | |
101 |
|
101 | |||
102 | private_flag = str2bool(self.request.POST.get('private')) |
|
102 | private_flag = str2bool(self.request.POST.get('private')) | |
103 |
|
103 | changes = { | ||
|
104 | 'repo_private': private_flag | |||
|
105 | } | |||
104 | try: |
|
106 | try: | |
105 | repo = RepoModel().get(self.db_repo.repo_id) |
|
107 | repo = RepoModel().get(self.db_repo.repo_id) | |
106 | repo.private = private_flag |
|
108 | RepoModel().update(repo, **changes) | |
107 | Session().add(repo) |
|
|||
108 | RepoModel().grant_user_permission( |
|
|||
109 | repo=self.db_repo, user=User.DEFAULT_USER, perm='repository.none' |
|
|||
110 | ) |
|
|||
111 |
|
||||
112 | Session().commit() |
|
109 | Session().commit() | |
113 |
|
110 | |||
114 | h.flash(_('Repository `{}` private mode set successfully').format(self.db_repo_name), |
|
111 | h.flash(_('Repository `{}` private mode set successfully').format(self.db_repo_name), | |
115 | category='success') |
|
112 | category='success') | |
116 | # NOTE(dan): we change repo private mode we need to notify all USERS |
|
|||
117 | affected_user_ids = User.get_all_user_ids() |
|
|||
118 | PermissionModel().trigger_permission_flush(affected_user_ids) |
|
|||
119 |
|
113 | |||
120 | except Exception: |
|
114 | except Exception: | |
121 | log.exception("Exception during update of repository") |
|
115 | log.exception("Exception during update of repository") | |
122 | h.flash(_('Error occurred during update of repository {}').format( |
|
116 | h.flash(_('Error occurred during update of repository {}').format( | |
123 | self.db_repo_name), category='error') |
|
117 | self.db_repo_name), category='error') | |
124 |
|
118 | |||
125 | return { |
|
119 | return { | |
126 | 'redirect_url': h.route_path('edit_repo_perms', repo_name=self.db_repo_name), |
|
120 | 'redirect_url': h.route_path('edit_repo_perms', repo_name=self.db_repo_name), | |
127 | 'private': private_flag |
|
121 | 'private': private_flag | |
128 | } |
|
122 | } |
@@ -1,184 +1,187 b'' | |||||
1 | # Copyright (C) 2010-2023 RhodeCode GmbH |
|
1 | # Copyright (C) 2010-2023 RhodeCode GmbH | |
2 | # |
|
2 | # | |
3 | # This program is free software: you can redistribute it and/or modify |
|
3 | # This program is free software: you can redistribute it and/or modify | |
4 | # it under the terms of the GNU Affero General Public License, version 3 |
|
4 | # it under the terms of the GNU Affero General Public License, version 3 | |
5 | # (only), as published by the Free Software Foundation. |
|
5 | # (only), as published by the Free Software Foundation. | |
6 | # |
|
6 | # | |
7 | # This program is distributed in the hope that it will be useful, |
|
7 | # This program is distributed in the hope that it will be useful, | |
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
10 | # GNU General Public License for more details. |
|
10 | # GNU General Public License for more details. | |
11 | # |
|
11 | # | |
12 | # You should have received a copy of the GNU Affero General Public License |
|
12 | # You should have received a copy of the GNU Affero General Public License | |
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |
14 | # |
|
14 | # | |
15 | # This program is dual-licensed. If you wish to learn more about the |
|
15 | # This program is dual-licensed. If you wish to learn more about the | |
16 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
16 | # RhodeCode Enterprise Edition, including its added features, Support services, | |
17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ | |
18 |
|
18 | |||
19 | import os |
|
19 | import os | |
20 | import textwrap |
|
20 | import textwrap | |
21 | import string |
|
21 | import string | |
22 | import functools |
|
22 | import functools | |
23 | import logging |
|
23 | import logging | |
24 | import tempfile |
|
24 | import tempfile | |
25 | import logging.config |
|
25 | import logging.config | |
26 |
|
26 | |||
27 | from rhodecode.lib.type_utils import str2bool, aslist |
|
27 | from rhodecode.lib.type_utils import str2bool, aslist | |
28 |
|
28 | |||
29 | log = logging.getLogger(__name__) |
|
29 | log = logging.getLogger(__name__) | |
30 |
|
30 | |||
31 |
|
31 | |||
32 | # skip keys, that are set here, so we don't double process those |
|
32 | # skip keys, that are set here, so we don't double process those | |
33 | set_keys = { |
|
33 | set_keys = { | |
34 | '__file__': '' |
|
34 | '__file__': '' | |
35 | } |
|
35 | } | |
36 |
|
36 | |||
37 |
|
37 | |||
38 | class SettingsMaker: |
|
38 | class SettingsMaker: | |
39 |
|
39 | |||
40 | def __init__(self, app_settings): |
|
40 | def __init__(self, app_settings): | |
41 | self.settings = app_settings |
|
41 | self.settings = app_settings | |
42 |
|
42 | |||
43 | @classmethod |
|
43 | @classmethod | |
44 | def _bool_func(cls, input_val): |
|
44 | def _bool_func(cls, input_val): | |
45 | if isinstance(input_val, bytes): |
|
45 | if isinstance(input_val, bytes): | |
46 | # decode to str |
|
46 | # decode to str | |
47 | input_val = input_val.decode('utf8') |
|
47 | input_val = input_val.decode('utf8') | |
48 | return str2bool(input_val) |
|
48 | return str2bool(input_val) | |
49 |
|
49 | |||
50 | @classmethod |
|
50 | @classmethod | |
51 | def _int_func(cls, input_val): |
|
51 | def _int_func(cls, input_val): | |
52 | return int(input_val) |
|
52 | return int(input_val) | |
53 |
|
53 | |||
54 | @classmethod |
|
54 | @classmethod | |
55 | def _float_func(cls, input_val): |
|
55 | def _float_func(cls, input_val): | |
56 | return float(input_val) |
|
56 | return float(input_val) | |
57 |
|
57 | |||
58 | @classmethod |
|
58 | @classmethod | |
59 | def _list_func(cls, input_val, sep=','): |
|
59 | def _list_func(cls, input_val, sep=','): | |
60 | return aslist(input_val, sep=sep) |
|
60 | return aslist(input_val, sep=sep) | |
61 |
|
61 | |||
62 | @classmethod |
|
62 | @classmethod | |
63 | def _string_func(cls, input_val, lower=True): |
|
63 | def _string_func(cls, input_val, lower=True): | |
64 | if lower: |
|
64 | if lower: | |
65 | input_val = input_val.lower() |
|
65 | input_val = input_val.lower() | |
66 | return input_val |
|
66 | return input_val | |
67 |
|
67 | |||
68 | @classmethod |
|
68 | @classmethod | |
69 | def _string_no_quote_func(cls, input_val, lower=True): |
|
69 | def _string_no_quote_func(cls, input_val, lower=True): | |
70 | """ |
|
70 | """ | |
71 | Special case string function that detects if value is set to empty quote string |
|
71 | Special case string function that detects if value is set to empty quote string | |
72 | e.g. |
|
72 | e.g. | |
73 |
|
73 | |||
74 | core.binar_dir = "" |
|
74 | core.binar_dir = "" | |
75 | """ |
|
75 | """ | |
76 |
|
76 | |||
77 | input_val = cls._string_func(input_val, lower=lower) |
|
77 | input_val = cls._string_func(input_val, lower=lower) | |
78 | if input_val in ['""', "''"]: |
|
78 | if input_val in ['""', "''"]: | |
79 | return '' |
|
79 | return '' | |
80 |
|
80 | |||
81 | @classmethod |
|
81 | @classmethod | |
82 | def _dir_func(cls, input_val, ensure_dir=False, mode=0o755): |
|
82 | def _dir_func(cls, input_val, ensure_dir=False, mode=0o755): | |
83 |
|
83 | |||
84 | # ensure we have our dir created |
|
84 | # ensure we have our dir created | |
85 | if not os.path.isdir(input_val) and ensure_dir: |
|
85 | if not os.path.isdir(input_val) and ensure_dir: | |
86 | os.makedirs(input_val, mode=mode, exist_ok=True) |
|
86 | os.makedirs(input_val, mode=mode, exist_ok=True) | |
87 |
|
87 | |||
88 | if not os.path.isdir(input_val): |
|
88 | if not os.path.isdir(input_val): | |
89 | raise Exception(f'Dir at {input_val} does not exist') |
|
89 | raise Exception(f'Dir at {input_val} does not exist') | |
90 | return input_val |
|
90 | return input_val | |
91 |
|
91 | |||
92 | @classmethod |
|
92 | @classmethod | |
93 | def _file_path_func(cls, input_val, ensure_dir=False, mode=0o755): |
|
93 | def _file_path_func(cls, input_val, ensure_dir=False, mode=0o755): | |
94 | dirname = os.path.dirname(input_val) |
|
94 | dirname = os.path.dirname(input_val) | |
95 | cls._dir_func(dirname, ensure_dir=ensure_dir) |
|
95 | cls._dir_func(dirname, ensure_dir=ensure_dir) | |
96 | return input_val |
|
96 | return input_val | |
97 |
|
97 | |||
98 | @classmethod |
|
98 | @classmethod | |
99 | def _key_transformator(cls, key): |
|
99 | def _key_transformator(cls, key): | |
100 | return "{}_{}".format('RC'.upper(), key.upper().replace('.', '_').replace('-', '_')) |
|
100 | return "{}_{}".format('RC'.upper(), key.upper().replace('.', '_').replace('-', '_')) | |
101 |
|
101 | |||
102 | def maybe_env_key(self, key): |
|
102 | def maybe_env_key(self, key): | |
103 | # now maybe we have this KEY in env, search and use the value with higher priority. |
|
103 | # now maybe we have this KEY in env, search and use the value with higher priority. | |
104 | transformed_key = self._key_transformator(key) |
|
104 | transformed_key = self._key_transformator(key) | |
105 | envvar_value = os.environ.get(transformed_key) |
|
105 | envvar_value = os.environ.get(transformed_key) | |
106 | if envvar_value: |
|
106 | if envvar_value: | |
107 | log.debug('using `%s` key instead of `%s` key for config', transformed_key, key) |
|
107 | log.debug('using `%s` key instead of `%s` key for config', transformed_key, key) | |
108 |
|
108 | |||
109 | return envvar_value |
|
109 | return envvar_value | |
110 |
|
110 | |||
111 | def env_expand(self): |
|
111 | def env_expand(self): | |
|
112 | if self.settings.get('rhodecode.env_expand') == 'false': | |||
|
113 | return | |||
|
114 | ||||
112 | replaced = {} |
|
115 | replaced = {} | |
113 | for k, v in self.settings.items(): |
|
116 | for k, v in self.settings.items(): | |
114 | if k not in set_keys: |
|
117 | if k not in set_keys: | |
115 | envvar_value = self.maybe_env_key(k) |
|
118 | envvar_value = self.maybe_env_key(k) | |
116 | if envvar_value: |
|
119 | if envvar_value: | |
117 | replaced[k] = envvar_value |
|
120 | replaced[k] = envvar_value | |
118 | set_keys[k] = envvar_value |
|
121 | set_keys[k] = envvar_value | |
119 |
|
122 | |||
120 | # replace ALL keys updated |
|
123 | # replace ALL keys updated | |
121 | self.settings.update(replaced) |
|
124 | self.settings.update(replaced) | |
122 |
|
125 | |||
123 | def enable_logging(self, logging_conf=None, level='INFO', formatter='generic'): |
|
126 | def enable_logging(self, logging_conf=None, level='INFO', formatter='generic'): | |
124 | """ |
|
127 | """ | |
125 | Helper to enable debug on running instance |
|
128 | Helper to enable debug on running instance | |
126 | :return: |
|
129 | :return: | |
127 | """ |
|
130 | """ | |
128 |
|
131 | |||
129 | if not str2bool(self.settings.get('logging.autoconfigure')): |
|
132 | if not str2bool(self.settings.get('logging.autoconfigure')): | |
130 | log.info('logging configuration based on main .ini file') |
|
133 | log.info('logging configuration based on main .ini file') | |
131 | return |
|
134 | return | |
132 |
|
135 | |||
133 | if logging_conf is None: |
|
136 | if logging_conf is None: | |
134 | logging_conf = self.settings.get('logging.logging_conf_file') or '' |
|
137 | logging_conf = self.settings.get('logging.logging_conf_file') or '' | |
135 |
|
138 | |||
136 | if not os.path.isfile(logging_conf): |
|
139 | if not os.path.isfile(logging_conf): | |
137 | log.error('Unable to setup logging based on %s, ' |
|
140 | log.error('Unable to setup logging based on %s, ' | |
138 | 'file does not exist.... specify path using logging.logging_conf_file= config setting. ', logging_conf) |
|
141 | 'file does not exist.... specify path using logging.logging_conf_file= config setting. ', logging_conf) | |
139 | return |
|
142 | return | |
140 |
|
143 | |||
141 | with open(logging_conf, 'rt') as f: |
|
144 | with open(logging_conf, 'rt') as f: | |
142 | ini_template = textwrap.dedent(f.read()) |
|
145 | ini_template = textwrap.dedent(f.read()) | |
143 | ini_template = string.Template(ini_template).safe_substitute( |
|
146 | ini_template = string.Template(ini_template).safe_substitute( | |
144 | RC_LOGGING_LEVEL=os.environ.get('RC_LOGGING_LEVEL', '') or level, |
|
147 | RC_LOGGING_LEVEL=os.environ.get('RC_LOGGING_LEVEL', '') or level, | |
145 | RC_LOGGING_FORMATTER=os.environ.get('RC_LOGGING_FORMATTER', '') or formatter |
|
148 | RC_LOGGING_FORMATTER=os.environ.get('RC_LOGGING_FORMATTER', '') or formatter | |
146 | ) |
|
149 | ) | |
147 |
|
150 | |||
148 | with tempfile.NamedTemporaryFile(prefix='rc_logging_', suffix='.ini', delete=False) as f: |
|
151 | with tempfile.NamedTemporaryFile(prefix='rc_logging_', suffix='.ini', delete=False) as f: | |
149 | log.info('Saved Temporary LOGGING config at %s', f.name) |
|
152 | log.info('Saved Temporary LOGGING config at %s', f.name) | |
150 | f.write(ini_template) |
|
153 | f.write(ini_template) | |
151 |
|
154 | |||
152 | logging.config.fileConfig(f.name) |
|
155 | logging.config.fileConfig(f.name) | |
153 | os.remove(f.name) |
|
156 | os.remove(f.name) | |
154 |
|
157 | |||
155 | def make_setting(self, key, default, lower=False, default_when_empty=False, parser=None): |
|
158 | def make_setting(self, key, default, lower=False, default_when_empty=False, parser=None): | |
156 | input_val = self.settings.get(key, default) |
|
159 | input_val = self.settings.get(key, default) | |
157 |
|
160 | |||
158 | if default_when_empty and not input_val: |
|
161 | if default_when_empty and not input_val: | |
159 | # use default value when value is set in the config but it is empty |
|
162 | # use default value when value is set in the config but it is empty | |
160 | input_val = default |
|
163 | input_val = default | |
161 |
|
164 | |||
162 | parser_func = { |
|
165 | parser_func = { | |
163 | 'bool': self._bool_func, |
|
166 | 'bool': self._bool_func, | |
164 | 'int': self._int_func, |
|
167 | 'int': self._int_func, | |
165 | 'float': self._float_func, |
|
168 | 'float': self._float_func, | |
166 | 'list': self._list_func, |
|
169 | 'list': self._list_func, | |
167 | 'list:newline': functools.partial(self._list_func, sep='/n'), |
|
170 | 'list:newline': functools.partial(self._list_func, sep='/n'), | |
168 | 'list:spacesep': functools.partial(self._list_func, sep=' '), |
|
171 | 'list:spacesep': functools.partial(self._list_func, sep=' '), | |
169 | 'string': functools.partial(self._string_func, lower=lower), |
|
172 | 'string': functools.partial(self._string_func, lower=lower), | |
170 | 'string:noquote': functools.partial(self._string_no_quote_func, lower=lower), |
|
173 | 'string:noquote': functools.partial(self._string_no_quote_func, lower=lower), | |
171 | 'dir': self._dir_func, |
|
174 | 'dir': self._dir_func, | |
172 | 'dir:ensured': functools.partial(self._dir_func, ensure_dir=True), |
|
175 | 'dir:ensured': functools.partial(self._dir_func, ensure_dir=True), | |
173 | 'file': self._file_path_func, |
|
176 | 'file': self._file_path_func, | |
174 | 'file:ensured': functools.partial(self._file_path_func, ensure_dir=True), |
|
177 | 'file:ensured': functools.partial(self._file_path_func, ensure_dir=True), | |
175 | None: lambda i: i |
|
178 | None: lambda i: i | |
176 | }[parser] |
|
179 | }[parser] | |
177 |
|
180 | |||
178 | envvar_value = self.maybe_env_key(key) |
|
181 | envvar_value = self.maybe_env_key(key) | |
179 | if envvar_value: |
|
182 | if envvar_value: | |
180 | input_val = envvar_value |
|
183 | input_val = envvar_value | |
181 | set_keys[key] = input_val |
|
184 | set_keys[key] = input_val | |
182 |
|
185 | |||
183 | self.settings[key] = parser_func(input_val) |
|
186 | self.settings[key] = parser_func(input_val) | |
184 | return self.settings[key] |
|
187 | return self.settings[key] |
@@ -1,193 +1,194 b'' | |||||
1 |
|
1 | |||
2 |
|
2 | |||
3 | # Copyright (C) 2014-2023 RhodeCode GmbH |
|
3 | # Copyright (C) 2014-2023 RhodeCode GmbH | |
4 | # |
|
4 | # | |
5 | # This program is free software: you can redistribute it and/or modify |
|
5 | # This program is free software: you can redistribute it and/or modify | |
6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
6 | # it under the terms of the GNU Affero General Public License, version 3 | |
7 | # (only), as published by the Free Software Foundation. |
|
7 | # (only), as published by the Free Software Foundation. | |
8 | # |
|
8 | # | |
9 | # This program is distributed in the hope that it will be useful, |
|
9 | # This program is distributed in the hope that it will be useful, | |
10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | # GNU General Public License for more details. |
|
12 | # GNU General Public License for more details. | |
13 | # |
|
13 | # | |
14 | # You should have received a copy of the GNU Affero General Public License |
|
14 | # You should have received a copy of the GNU Affero General Public License | |
15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | # |
|
16 | # | |
17 | # This program is dual-licensed. If you wish to learn more about the |
|
17 | # This program is dual-licensed. If you wish to learn more about the | |
18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
18 | # RhodeCode Enterprise Edition, including its added features, Support services, | |
19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ | |
20 |
|
20 | |||
21 | """ |
|
21 | """ | |
22 | Implementation of the scm_app interface using raw HTTP communication. |
|
22 | Implementation of the scm_app interface using raw HTTP communication. | |
23 | """ |
|
23 | """ | |
24 |
|
24 | |||
25 | import base64 |
|
25 | import base64 | |
26 | import logging |
|
26 | import logging | |
27 | import urllib.parse |
|
27 | import urllib.parse | |
28 | import wsgiref.util |
|
28 | import wsgiref.util | |
29 |
|
29 | |||
30 | import msgpack |
|
30 | import msgpack | |
31 | import requests |
|
31 | import requests | |
32 | import webob.request |
|
32 | import webob.request | |
33 |
|
33 | |||
34 | import rhodecode |
|
34 | import rhodecode | |
35 | from rhodecode.lib.middleware.utils import get_path_info |
|
35 | from rhodecode.lib.middleware.utils import get_path_info | |
36 |
|
36 | |||
37 | log = logging.getLogger(__name__) |
|
37 | log = logging.getLogger(__name__) | |
38 |
|
38 | |||
39 |
|
39 | |||
40 | def create_git_wsgi_app(repo_path, repo_name, config): |
|
40 | def create_git_wsgi_app(repo_path, repo_name, config): | |
41 | url = _vcs_streaming_url() + 'git/' |
|
41 | url = _vcs_streaming_url() + 'git/' | |
42 | return VcsHttpProxy(url, repo_path, repo_name, config) |
|
42 | return VcsHttpProxy(url, repo_path, repo_name, config) | |
43 |
|
43 | |||
44 |
|
44 | |||
45 | def create_hg_wsgi_app(repo_path, repo_name, config): |
|
45 | def create_hg_wsgi_app(repo_path, repo_name, config): | |
46 | url = _vcs_streaming_url() + 'hg/' |
|
46 | url = _vcs_streaming_url() + 'hg/' | |
47 | return VcsHttpProxy(url, repo_path, repo_name, config) |
|
47 | return VcsHttpProxy(url, repo_path, repo_name, config) | |
48 |
|
48 | |||
49 |
|
49 | |||
50 | def _vcs_streaming_url(): |
|
50 | def _vcs_streaming_url(): | |
51 | template = 'http://{}/stream/' |
|
51 | template = 'http://{}/stream/' | |
52 | return template.format(rhodecode.CONFIG['vcs.server']) |
|
52 | return template.format(rhodecode.CONFIG['vcs.server']) | |
53 |
|
53 | |||
54 |
|
54 | |||
55 | # TODO: johbo: Avoid the global. |
|
55 | # TODO: johbo: Avoid the global. | |
56 | session = requests.Session() |
|
56 | session = requests.Session() | |
57 | # Requests speedup, avoid reading .netrc and similar |
|
57 | # Requests speedup, avoid reading .netrc and similar | |
58 | session.trust_env = False |
|
58 | session.trust_env = False | |
59 |
|
59 | |||
60 | # prevent urllib3 spawning our logs. |
|
60 | # prevent urllib3 spawning our logs. | |
61 | logging.getLogger("requests.packages.urllib3.connectionpool").setLevel( |
|
61 | logging.getLogger("requests.packages.urllib3.connectionpool").setLevel( | |
62 | logging.WARNING) |
|
62 | logging.WARNING) | |
63 |
|
63 | |||
64 |
|
64 | |||
65 | class VcsHttpProxy(object): |
|
65 | class VcsHttpProxy(object): | |
66 | """ |
|
66 | """ | |
67 | A WSGI application which proxies vcs requests. |
|
67 | A WSGI application which proxies vcs requests. | |
68 |
|
68 | |||
69 | The goal is to shuffle the data around without touching it. The only |
|
69 | The goal is to shuffle the data around without touching it. The only | |
70 | exception is the extra data from the config object which we send to the |
|
70 | exception is the extra data from the config object which we send to the | |
71 | server as well. |
|
71 | server as well. | |
72 | """ |
|
72 | """ | |
73 |
|
73 | |||
74 | def __init__(self, url, repo_path, repo_name, config): |
|
74 | def __init__(self, url, repo_path, repo_name, config): | |
75 | """ |
|
75 | """ | |
76 | :param str url: The URL of the VCSServer to call. |
|
76 | :param str url: The URL of the VCSServer to call. | |
77 | """ |
|
77 | """ | |
78 | self._url = url |
|
78 | self._url = url | |
79 | self._repo_name = repo_name |
|
79 | self._repo_name = repo_name | |
80 | self._repo_path = repo_path |
|
80 | self._repo_path = repo_path | |
81 | self._config = config |
|
81 | self._config = config | |
82 | self.rc_extras = {} |
|
82 | self.rc_extras = {} | |
83 | log.debug( |
|
83 | log.debug( | |
84 | "Creating VcsHttpProxy for repo %s, url %s", |
|
84 | "Creating VcsHttpProxy for repo %s, url %s", | |
85 | repo_name, url) |
|
85 | repo_name, url) | |
86 |
|
86 | |||
87 | def __call__(self, environ, start_response): |
|
87 | def __call__(self, environ, start_response): | |
88 | config = self._config |
|
88 | config = self._config | |
89 | request = webob.request.Request(environ) |
|
89 | request = webob.request.Request(environ) | |
90 | request_headers = request.headers |
|
90 | request_headers = request.headers | |
91 |
|
91 | |||
92 | call_context = { |
|
92 | call_context = { | |
93 | # TODO: johbo: Remove this, rely on URL path only |
|
93 | # TODO: johbo: Remove this, rely on URL path only | |
94 | 'repo_name': self._repo_name, |
|
94 | 'repo_name': self._repo_name, | |
95 | 'repo_path': self._repo_path, |
|
95 | 'repo_path': self._repo_path, | |
96 | 'path_info': get_path_info(environ), |
|
96 | 'path_info': get_path_info(environ), | |
97 |
|
97 | |||
98 | 'repo_store': self.rc_extras.get('repo_store'), |
|
98 | 'repo_store': self.rc_extras.get('repo_store'), | |
99 | 'server_config_file': self.rc_extras.get('config'), |
|
99 | 'server_config_file': self.rc_extras.get('config'), | |
100 |
|
100 | |||
101 | 'auth_user': self.rc_extras.get('username'), |
|
101 | 'auth_user': self.rc_extras.get('username'), | |
102 | 'auth_user_id': str(self.rc_extras.get('user_id')), |
|
102 | 'auth_user_id': str(self.rc_extras.get('user_id')), | |
103 | 'auth_user_ip': self.rc_extras.get('ip'), |
|
103 | 'auth_user_ip': self.rc_extras.get('ip'), | |
104 |
|
104 | |||
105 | 'repo_config': config, |
|
105 | 'repo_config': config, | |
106 | 'locked_status_code': rhodecode.CONFIG.get('lock_ret_code'), |
|
106 | 'locked_status_code': rhodecode.CONFIG.get('lock_ret_code'), | |
107 | } |
|
107 | } | |
108 |
|
108 | |||
109 | request_headers.update({ |
|
109 | request_headers.update({ | |
110 | # TODO: johbo: Avoid encoding and put this into payload? |
|
110 | # TODO: johbo: Avoid encoding and put this into payload? | |
111 | 'X_RC_VCS_STREAM_CALL_CONTEXT': base64.b64encode(msgpack.packb(call_context)) |
|
111 | 'X_RC_VCS_STREAM_CALL_CONTEXT': base64.b64encode(msgpack.packb(call_context)) | |
112 | }) |
|
112 | }) | |
113 |
|
113 | |||
114 | method = environ['REQUEST_METHOD'] |
|
114 | method = environ['REQUEST_METHOD'] | |
115 |
|
115 | |||
116 | # Preserve the query string |
|
116 | # Preserve the query string | |
117 | url = self._url |
|
117 | url = self._url | |
118 | url = urllib.parse.urljoin(url, self._repo_name) |
|
118 | url = urllib.parse.urljoin(url, self._repo_name) | |
119 | if environ.get('QUERY_STRING'): |
|
119 | if environ.get('QUERY_STRING'): | |
120 | url += '?' + environ['QUERY_STRING'] |
|
120 | url += '?' + environ['QUERY_STRING'] | |
121 |
|
121 | |||
122 | log.debug('http-app: preparing request to: %s', url) |
|
122 | log.debug('http-app: preparing request to: %s', url) | |
123 | response = session.request( |
|
123 | response = session.request( | |
124 | method, |
|
124 | method, | |
125 | url, |
|
125 | url, | |
126 | data=_maybe_stream_request(environ), |
|
126 | data=_maybe_stream_request(environ), | |
127 | headers=request_headers, |
|
127 | headers=request_headers, | |
128 | stream=True) |
|
128 | stream=True) | |
129 |
|
129 | |||
130 | log.debug('http-app: got vcsserver response: %s', response) |
|
130 | log.debug('http-app: got vcsserver response: %s', response) | |
131 | if response.status_code >= 500: |
|
131 | if response.status_code >= 500: | |
132 | log.error('Exception returned by vcsserver at: %s %s, %s', |
|
132 | log.error('Exception returned by vcsserver at: %s %s, %s', | |
133 | url, response.status_code, response.content) |
|
133 | url, response.status_code, response.content) | |
134 |
|
134 | |||
135 | # Preserve the headers of the response, except hop_by_hop ones |
|
135 | # Preserve the headers of the response, except hop_by_hop ones | |
136 | response_headers = [ |
|
136 | response_headers = [ | |
137 | (h, v) for h, v in response.headers.items() |
|
137 | (h, v) for h, v in response.headers.items() | |
138 | if not wsgiref.util.is_hop_by_hop(h) |
|
138 | if not wsgiref.util.is_hop_by_hop(h) | |
139 | ] |
|
139 | ] | |
140 |
|
140 | |||
141 | # Build status argument for start_response callable. |
|
141 | # Build status argument for start_response callable. | |
142 | status = '{status_code} {reason_phrase}'.format( |
|
142 | status = '{status_code} {reason_phrase}'.format( | |
143 | status_code=response.status_code, |
|
143 | status_code=response.status_code, | |
144 | reason_phrase=response.reason) |
|
144 | reason_phrase=response.reason) | |
145 |
|
145 | |||
146 | start_response(status, response_headers) |
|
146 | start_response(status, response_headers) | |
147 | return _maybe_stream_response(response) |
|
147 | return _maybe_stream_response(response) | |
148 |
|
148 | |||
149 |
|
149 | |||
150 | def read_in_chunks(stream_obj, block_size=1024, chunks=-1): |
|
150 | def read_in_chunks(stream_obj, block_size=1024, chunks=-1): | |
151 | """ |
|
151 | """ | |
152 | Read Stream in chunks, default chunk size: 1k. |
|
152 | Read Stream in chunks, default chunk size: 1k. | |
153 | """ |
|
153 | """ | |
154 | while chunks: |
|
154 | while chunks: | |
155 | data = stream_obj.read(block_size) |
|
155 | data = stream_obj.read(block_size) | |
156 | if not data: |
|
156 | if not data: | |
157 | break |
|
157 | break | |
158 | yield data |
|
158 | yield data | |
159 | chunks -= 1 |
|
159 | chunks -= 1 | |
160 |
|
160 | |||
161 |
|
161 | |||
162 | def _is_request_chunked(environ): |
|
162 | def _is_request_chunked(environ): | |
163 | stream = environ.get('HTTP_TRANSFER_ENCODING', '') == 'chunked' |
|
163 | stream = environ.get('HTTP_TRANSFER_ENCODING', '') == 'chunked' | |
164 | return stream |
|
164 | return stream | |
165 |
|
165 | |||
166 |
|
166 | |||
167 | def _maybe_stream_request(environ): |
|
167 | def _maybe_stream_request(environ): | |
168 | path = get_path_info(environ) |
|
168 | path = get_path_info(environ) | |
169 | stream = _is_request_chunked(environ) |
|
169 | stream = _is_request_chunked(environ) | |
170 | log.debug('handling request `%s` with stream support: %s', path, stream) |
|
170 | req_method = environ['REQUEST_METHOD'] | |
|
171 | log.debug('handling scm request: %s `%s` with stream support: %s', req_method, path, stream) | |||
171 |
|
172 | |||
172 | if stream: |
|
173 | if stream: | |
173 | # set stream by 256k |
|
174 | # set stream by 256k | |
174 | return read_in_chunks(environ['wsgi.input'], block_size=1024 * 256) |
|
175 | return read_in_chunks(environ['wsgi.input'], block_size=1024 * 256) | |
175 | else: |
|
176 | else: | |
176 | return environ['wsgi.input'].read() |
|
177 | return environ['wsgi.input'].read() | |
177 |
|
178 | |||
178 |
|
179 | |||
179 | def _maybe_stream_response(response): |
|
180 | def _maybe_stream_response(response): | |
180 | """ |
|
181 | """ | |
181 | Try to generate chunks from the response if it is chunked. |
|
182 | Try to generate chunks from the response if it is chunked. | |
182 | """ |
|
183 | """ | |
183 | stream = _is_chunked(response) |
|
184 | stream = _is_chunked(response) | |
184 | log.debug('returning response with stream: %s', stream) |
|
185 | log.debug('returning response with stream: %s', stream) | |
185 | if stream: |
|
186 | if stream: | |
186 | # read in 256k Chunks |
|
187 | # read in 256k Chunks | |
187 | return response.raw.read_chunked(amt=1024 * 256) |
|
188 | return response.raw.read_chunked(amt=1024 * 256) | |
188 | else: |
|
189 | else: | |
189 | return [response.content] |
|
190 | return [response.content] | |
190 |
|
191 | |||
191 |
|
192 | |||
192 | def _is_chunked(response): |
|
193 | def _is_chunked(response): | |
193 | return response.headers.get('Transfer-Encoding', '') == 'chunked' |
|
194 | return response.headers.get('Transfer-Encoding', '') == 'chunked' |
@@ -1,866 +1,893 b'' | |||||
1 | # Copyright (C) 2017-2023 RhodeCode GmbH |
|
1 | # Copyright (C) 2017-2023 RhodeCode GmbH | |
2 | # |
|
2 | # | |
3 | # This program is free software: you can redistribute it and/or modify |
|
3 | # This program is free software: you can redistribute it and/or modify | |
4 | # it under the terms of the GNU Affero General Public License, version 3 |
|
4 | # it under the terms of the GNU Affero General Public License, version 3 | |
5 | # (only), as published by the Free Software Foundation. |
|
5 | # (only), as published by the Free Software Foundation. | |
6 | # |
|
6 | # | |
7 | # This program is distributed in the hope that it will be useful, |
|
7 | # This program is distributed in the hope that it will be useful, | |
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
10 | # GNU General Public License for more details. |
|
10 | # GNU General Public License for more details. | |
11 | # |
|
11 | # | |
12 | # You should have received a copy of the GNU Affero General Public License |
|
12 | # You should have received a copy of the GNU Affero General Public License | |
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |
14 | # |
|
14 | # | |
15 | # This program is dual-licensed. If you wish to learn more about the |
|
15 | # This program is dual-licensed. If you wish to learn more about the | |
16 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
16 | # RhodeCode Enterprise Edition, including its added features, Support services, | |
17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ | |
18 |
|
18 | |||
19 |
|
19 | |||
20 | import os |
|
20 | import os | |
21 | import sys |
|
21 | import sys | |
22 | import time |
|
22 | import time | |
23 | import platform |
|
23 | import platform | |
24 | import collections |
|
24 | import collections | |
25 | import psutil |
|
25 | import psutil | |
26 | from functools import wraps |
|
26 | from functools import wraps | |
27 |
|
27 | |||
28 | import pkg_resources |
|
28 | import pkg_resources | |
29 | import logging |
|
29 | import logging | |
30 | import resource |
|
30 | import resource | |
31 |
|
31 | |||
32 | import configparser |
|
32 | import configparser | |
33 |
|
33 | |||
34 | from rc_license.models import LicenseModel |
|
34 | from rc_license.models import LicenseModel | |
35 | from rhodecode.lib.str_utils import safe_str |
|
35 | from rhodecode.lib.str_utils import safe_str | |
36 |
|
36 | |||
37 | log = logging.getLogger(__name__) |
|
37 | log = logging.getLogger(__name__) | |
38 |
|
38 | |||
39 |
|
39 | |||
40 | _NA = 'NOT AVAILABLE' |
|
40 | _NA = 'NOT AVAILABLE' | |
41 | _NA_FLOAT = 0.0 |
|
41 | _NA_FLOAT = 0.0 | |
42 |
|
42 | |||
43 | STATE_OK = 'ok' |
|
43 | STATE_OK = 'ok' | |
44 | STATE_ERR = 'error' |
|
44 | STATE_ERR = 'error' | |
45 | STATE_WARN = 'warning' |
|
45 | STATE_WARN = 'warning' | |
46 |
|
46 | |||
47 | STATE_OK_DEFAULT = {'message': '', 'type': STATE_OK} |
|
47 | STATE_OK_DEFAULT = {'message': '', 'type': STATE_OK} | |
48 |
|
48 | |||
49 |
|
49 | |||
50 | registered_helpers = {} |
|
50 | registered_helpers = {} | |
51 |
|
51 | |||
52 |
|
52 | |||
53 | def register_sysinfo(func): |
|
53 | def register_sysinfo(func): | |
54 | """ |
|
54 | """ | |
55 | @register_helper |
|
55 | @register_helper | |
56 | def db_check(): |
|
56 | def db_check(): | |
57 | pass |
|
57 | pass | |
58 |
|
58 | |||
59 | db_check == registered_helpers['db_check'] |
|
59 | db_check == registered_helpers['db_check'] | |
60 | """ |
|
60 | """ | |
61 | global registered_helpers |
|
61 | global registered_helpers | |
62 | registered_helpers[func.__name__] = func |
|
62 | registered_helpers[func.__name__] = func | |
63 |
|
63 | |||
64 | @wraps(func) |
|
64 | @wraps(func) | |
65 | def _wrapper(*args, **kwargs): |
|
65 | def _wrapper(*args, **kwargs): | |
66 | return func(*args, **kwargs) |
|
66 | return func(*args, **kwargs) | |
67 | return _wrapper |
|
67 | return _wrapper | |
68 |
|
68 | |||
69 |
|
69 | |||
70 | # HELPERS |
|
70 | # HELPERS | |
71 | def percentage(part: (int, float), whole: (int, float)): |
|
71 | def percentage(part: (int, float), whole: (int, float)): | |
72 | whole = float(whole) |
|
72 | whole = float(whole) | |
73 | if whole > 0: |
|
73 | if whole > 0: | |
74 | return round(100 * float(part) / whole, 1) |
|
74 | return round(100 * float(part) / whole, 1) | |
75 | return 0.0 |
|
75 | return 0.0 | |
76 |
|
76 | |||
77 |
|
77 | |||
78 | def get_storage_size(storage_path): |
|
78 | def get_storage_size(storage_path): | |
79 | sizes = [] |
|
79 | sizes = [] | |
80 | for file_ in os.listdir(storage_path): |
|
80 | for file_ in os.listdir(storage_path): | |
81 | storage_file = os.path.join(storage_path, file_) |
|
81 | storage_file = os.path.join(storage_path, file_) | |
82 | if os.path.isfile(storage_file): |
|
82 | if os.path.isfile(storage_file): | |
83 | try: |
|
83 | try: | |
84 | sizes.append(os.path.getsize(storage_file)) |
|
84 | sizes.append(os.path.getsize(storage_file)) | |
85 | except OSError: |
|
85 | except OSError: | |
86 | log.exception('Failed to get size of storage file %s', storage_file) |
|
86 | log.exception('Failed to get size of storage file %s', storage_file) | |
87 | pass |
|
87 | pass | |
88 |
|
88 | |||
89 | return sum(sizes) |
|
89 | return sum(sizes) | |
90 |
|
90 | |||
91 |
|
91 | |||
92 | def get_resource(resource_type): |
|
92 | def get_resource(resource_type): | |
93 | try: |
|
93 | try: | |
94 | return resource.getrlimit(resource_type) |
|
94 | return resource.getrlimit(resource_type) | |
95 | except Exception: |
|
95 | except Exception: | |
96 | return 'NOT_SUPPORTED' |
|
96 | return 'NOT_SUPPORTED' | |
97 |
|
97 | |||
98 |
|
98 | |||
99 | def get_cert_path(ini_path): |
|
99 | def get_cert_path(ini_path): | |
100 | default = '/etc/ssl/certs/ca-certificates.crt' |
|
100 | default = '/etc/ssl/certs/ca-certificates.crt' | |
101 | control_ca_bundle = os.path.join( |
|
101 | control_ca_bundle = os.path.join( | |
102 | os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(ini_path)))), |
|
102 | os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(ini_path)))), | |
103 | '/etc/ssl/certs/ca-certificates.crt') |
|
103 | '/etc/ssl/certs/ca-certificates.crt') | |
104 | if os.path.isfile(control_ca_bundle): |
|
104 | if os.path.isfile(control_ca_bundle): | |
105 | default = control_ca_bundle |
|
105 | default = control_ca_bundle | |
106 |
|
106 | |||
107 | return default |
|
107 | return default | |
108 |
|
108 | |||
109 |
|
109 | |||
110 | class SysInfoRes(object): |
|
110 | class SysInfoRes(object): | |
111 | def __init__(self, value, state=None, human_value=None): |
|
111 | def __init__(self, value, state=None, human_value=None): | |
112 | self.value = value |
|
112 | self.value = value | |
113 | self.state = state or STATE_OK_DEFAULT |
|
113 | self.state = state or STATE_OK_DEFAULT | |
114 | self.human_value = human_value or value |
|
114 | self.human_value = human_value or value | |
115 |
|
115 | |||
116 | def __json__(self): |
|
116 | def __json__(self): | |
117 | return { |
|
117 | return { | |
118 | 'value': self.value, |
|
118 | 'value': self.value, | |
119 | 'state': self.state, |
|
119 | 'state': self.state, | |
120 | 'human_value': self.human_value, |
|
120 | 'human_value': self.human_value, | |
121 | } |
|
121 | } | |
122 |
|
122 | |||
123 | def get_value(self): |
|
123 | def get_value(self): | |
124 | return self.__json__() |
|
124 | return self.__json__() | |
125 |
|
125 | |||
126 | def __str__(self): |
|
126 | def __str__(self): | |
127 | return f'<SysInfoRes({self.__json__()})>' |
|
127 | return f'<SysInfoRes({self.__json__()})>' | |
128 |
|
128 | |||
129 |
|
129 | |||
130 | class SysInfo(object): |
|
130 | class SysInfo(object): | |
131 |
|
131 | |||
132 | def __init__(self, func_name, **kwargs): |
|
132 | def __init__(self, func_name, **kwargs): | |
133 | self.function_name = func_name |
|
133 | self.function_name = func_name | |
134 | self.value = _NA |
|
134 | self.value = _NA | |
135 | self.state = None |
|
135 | self.state = None | |
136 | self.kwargs = kwargs or {} |
|
136 | self.kwargs = kwargs or {} | |
137 |
|
137 | |||
138 | def __call__(self): |
|
138 | def __call__(self): | |
139 | computed = self.compute(**self.kwargs) |
|
139 | computed = self.compute(**self.kwargs) | |
140 | if not isinstance(computed, SysInfoRes): |
|
140 | if not isinstance(computed, SysInfoRes): | |
141 | raise ValueError( |
|
141 | raise ValueError( | |
142 | 'computed value for {} is not instance of ' |
|
142 | 'computed value for {} is not instance of ' | |
143 | '{}, got {} instead'.format( |
|
143 | '{}, got {} instead'.format( | |
144 | self.function_name, SysInfoRes, type(computed))) |
|
144 | self.function_name, SysInfoRes, type(computed))) | |
145 | return computed.__json__() |
|
145 | return computed.__json__() | |
146 |
|
146 | |||
147 | def __str__(self): |
|
147 | def __str__(self): | |
148 | return f'<SysInfo({self.function_name})>' |
|
148 | return f'<SysInfo({self.function_name})>' | |
149 |
|
149 | |||
150 | def compute(self, **kwargs): |
|
150 | def compute(self, **kwargs): | |
151 | return self.function_name(**kwargs) |
|
151 | return self.function_name(**kwargs) | |
152 |
|
152 | |||
153 |
|
153 | |||
154 | # SysInfo functions |
|
154 | # SysInfo functions | |
155 | @register_sysinfo |
|
155 | @register_sysinfo | |
156 | def python_info(): |
|
156 | def python_info(): | |
157 | value = dict(version=f'{platform.python_version()}:{platform.python_implementation()}', |
|
157 | value = dict(version=f'{platform.python_version()}:{platform.python_implementation()}', | |
158 | executable=sys.executable) |
|
158 | executable=sys.executable) | |
159 | return SysInfoRes(value=value) |
|
159 | return SysInfoRes(value=value) | |
160 |
|
160 | |||
161 |
|
161 | |||
162 | @register_sysinfo |
|
162 | @register_sysinfo | |
163 | def py_modules(): |
|
163 | def py_modules(): | |
164 | mods = dict([(p.project_name, {'version': p.version, 'location': p.location}) |
|
164 | mods = dict([(p.project_name, {'version': p.version, 'location': p.location}) | |
165 | for p in pkg_resources.working_set]) |
|
165 | for p in pkg_resources.working_set]) | |
166 |
|
166 | |||
167 | value = sorted(mods.items(), key=lambda k: k[0].lower()) |
|
167 | value = sorted(mods.items(), key=lambda k: k[0].lower()) | |
168 | return SysInfoRes(value=value) |
|
168 | return SysInfoRes(value=value) | |
169 |
|
169 | |||
170 |
|
170 | |||
171 | @register_sysinfo |
|
171 | @register_sysinfo | |
172 | def platform_type(): |
|
172 | def platform_type(): | |
173 | from rhodecode.lib.utils import generate_platform_uuid |
|
173 | from rhodecode.lib.utils import generate_platform_uuid | |
174 |
|
174 | |||
175 | value = dict( |
|
175 | value = dict( | |
176 | name=safe_str(platform.platform()), |
|
176 | name=safe_str(platform.platform()), | |
177 | uuid=generate_platform_uuid() |
|
177 | uuid=generate_platform_uuid() | |
178 | ) |
|
178 | ) | |
179 | return SysInfoRes(value=value) |
|
179 | return SysInfoRes(value=value) | |
180 |
|
180 | |||
181 |
|
181 | |||
182 | @register_sysinfo |
|
182 | @register_sysinfo | |
183 | def locale_info(): |
|
183 | def locale_info(): | |
184 | import locale |
|
184 | import locale | |
185 |
|
185 | |||
186 | def safe_get_locale(locale_name): |
|
186 | def safe_get_locale(locale_name): | |
187 | try: |
|
187 | try: | |
188 | locale.getlocale(locale_name) |
|
188 | locale.getlocale(locale_name) | |
189 | except TypeError: |
|
189 | except TypeError: | |
190 | return f'FAILED_LOCALE_GET:{locale_name}' |
|
190 | return f'FAILED_LOCALE_GET:{locale_name}' | |
191 |
|
191 | |||
192 | value = dict( |
|
192 | value = dict( | |
193 | locale_default=locale.getlocale(), |
|
193 | locale_default=locale.getlocale(), | |
194 | locale_lc_all=safe_get_locale(locale.LC_ALL), |
|
194 | locale_lc_all=safe_get_locale(locale.LC_ALL), | |
195 | locale_lc_ctype=safe_get_locale(locale.LC_CTYPE), |
|
195 | locale_lc_ctype=safe_get_locale(locale.LC_CTYPE), | |
196 | lang_env=os.environ.get('LANG'), |
|
196 | lang_env=os.environ.get('LANG'), | |
197 | lc_all_env=os.environ.get('LC_ALL'), |
|
197 | lc_all_env=os.environ.get('LC_ALL'), | |
198 | local_archive_env=os.environ.get('LOCALE_ARCHIVE'), |
|
198 | local_archive_env=os.environ.get('LOCALE_ARCHIVE'), | |
199 | ) |
|
199 | ) | |
200 | human_value = \ |
|
200 | human_value = \ | |
201 | f"LANG: {value['lang_env']}, \ |
|
201 | f"LANG: {value['lang_env']}, \ | |
202 | locale LC_ALL: {value['locale_lc_all']}, \ |
|
202 | locale LC_ALL: {value['locale_lc_all']}, \ | |
203 | locale LC_CTYPE: {value['locale_lc_ctype']}, \ |
|
203 | locale LC_CTYPE: {value['locale_lc_ctype']}, \ | |
204 | Default locales: {value['locale_default']}" |
|
204 | Default locales: {value['locale_default']}" | |
205 |
|
205 | |||
206 | return SysInfoRes(value=value, human_value=human_value) |
|
206 | return SysInfoRes(value=value, human_value=human_value) | |
207 |
|
207 | |||
208 |
|
208 | |||
209 | @register_sysinfo |
|
209 | @register_sysinfo | |
210 | def ulimit_info(): |
|
210 | def ulimit_info(): | |
211 | data = collections.OrderedDict([ |
|
211 | data = collections.OrderedDict([ | |
212 | ('cpu time (seconds)', get_resource(resource.RLIMIT_CPU)), |
|
212 | ('cpu time (seconds)', get_resource(resource.RLIMIT_CPU)), | |
213 | ('file size', get_resource(resource.RLIMIT_FSIZE)), |
|
213 | ('file size', get_resource(resource.RLIMIT_FSIZE)), | |
214 | ('stack size', get_resource(resource.RLIMIT_STACK)), |
|
214 | ('stack size', get_resource(resource.RLIMIT_STACK)), | |
215 | ('core file size', get_resource(resource.RLIMIT_CORE)), |
|
215 | ('core file size', get_resource(resource.RLIMIT_CORE)), | |
216 | ('address space size', get_resource(resource.RLIMIT_AS)), |
|
216 | ('address space size', get_resource(resource.RLIMIT_AS)), | |
217 | ('locked in mem size', get_resource(resource.RLIMIT_MEMLOCK)), |
|
217 | ('locked in mem size', get_resource(resource.RLIMIT_MEMLOCK)), | |
218 | ('heap size', get_resource(resource.RLIMIT_DATA)), |
|
218 | ('heap size', get_resource(resource.RLIMIT_DATA)), | |
219 | ('rss size', get_resource(resource.RLIMIT_RSS)), |
|
219 | ('rss size', get_resource(resource.RLIMIT_RSS)), | |
220 | ('number of processes', get_resource(resource.RLIMIT_NPROC)), |
|
220 | ('number of processes', get_resource(resource.RLIMIT_NPROC)), | |
221 | ('open files', get_resource(resource.RLIMIT_NOFILE)), |
|
221 | ('open files', get_resource(resource.RLIMIT_NOFILE)), | |
222 | ]) |
|
222 | ]) | |
223 |
|
223 | |||
224 | text = ', '.join(f'{k}:{v}' for k, v in data.items()) |
|
224 | text = ', '.join(f'{k}:{v}' for k, v in data.items()) | |
225 |
|
225 | |||
226 | value = { |
|
226 | value = { | |
227 | 'limits': data, |
|
227 | 'limits': data, | |
228 | 'text': text, |
|
228 | 'text': text, | |
229 | } |
|
229 | } | |
230 | return SysInfoRes(value=value) |
|
230 | return SysInfoRes(value=value) | |
231 |
|
231 | |||
232 |
|
232 | |||
233 | @register_sysinfo |
|
233 | @register_sysinfo | |
234 | def uptime(): |
|
234 | def uptime(): | |
235 | from rhodecode.lib.helpers import age, time_to_datetime |
|
235 | from rhodecode.lib.helpers import age, time_to_datetime | |
236 | from rhodecode.translation import TranslationString |
|
236 | from rhodecode.translation import TranslationString | |
237 |
|
237 | |||
238 | value = dict(boot_time=0, uptime=0, text='') |
|
238 | value = dict(boot_time=0, uptime=0, text='') | |
239 | state = STATE_OK_DEFAULT |
|
239 | state = STATE_OK_DEFAULT | |
240 |
|
240 | |||
241 | boot_time = psutil.boot_time() |
|
241 | boot_time = psutil.boot_time() | |
242 | value['boot_time'] = boot_time |
|
242 | value['boot_time'] = boot_time | |
243 | value['uptime'] = time.time() - boot_time |
|
243 | value['uptime'] = time.time() - boot_time | |
244 |
|
244 | |||
245 | date_or_age = age(time_to_datetime(boot_time)) |
|
245 | date_or_age = age(time_to_datetime(boot_time)) | |
246 | if isinstance(date_or_age, TranslationString): |
|
246 | if isinstance(date_or_age, TranslationString): | |
247 | date_or_age = date_or_age.interpolate() |
|
247 | date_or_age = date_or_age.interpolate() | |
248 |
|
248 | |||
249 | human_value = value.copy() |
|
249 | human_value = value.copy() | |
250 | human_value['boot_time'] = time_to_datetime(boot_time) |
|
250 | human_value['boot_time'] = time_to_datetime(boot_time) | |
251 | human_value['uptime'] = age(time_to_datetime(boot_time), show_suffix=False) |
|
251 | human_value['uptime'] = age(time_to_datetime(boot_time), show_suffix=False) | |
252 |
|
252 | |||
253 | human_value['text'] = f'Server started {date_or_age}' |
|
253 | human_value['text'] = f'Server started {date_or_age}' | |
254 | return SysInfoRes(value=value, human_value=human_value) |
|
254 | return SysInfoRes(value=value, human_value=human_value) | |
255 |
|
255 | |||
256 |
|
256 | |||
257 | @register_sysinfo |
|
257 | @register_sysinfo | |
258 | def memory(): |
|
258 | def memory(): | |
259 | from rhodecode.lib.helpers import format_byte_size_binary |
|
259 | from rhodecode.lib.helpers import format_byte_size_binary | |
260 | value = dict(available=0, used=0, used_real=0, cached=0, percent=0, |
|
260 | value = dict(available=0, used=0, used_real=0, cached=0, percent=0, | |
261 | percent_used=0, free=0, inactive=0, active=0, shared=0, |
|
261 | percent_used=0, free=0, inactive=0, active=0, shared=0, | |
262 | total=0, buffers=0, text='') |
|
262 | total=0, buffers=0, text='') | |
263 |
|
263 | |||
264 | state = STATE_OK_DEFAULT |
|
264 | state = STATE_OK_DEFAULT | |
265 |
|
265 | |||
266 | value.update(dict(psutil.virtual_memory()._asdict())) |
|
266 | value.update(dict(psutil.virtual_memory()._asdict())) | |
267 | value['used_real'] = value['total'] - value['available'] |
|
267 | value['used_real'] = value['total'] - value['available'] | |
268 | value['percent_used'] = psutil._common.usage_percent(value['used_real'], value['total'], 1) |
|
268 | value['percent_used'] = psutil._common.usage_percent(value['used_real'], value['total'], 1) | |
269 |
|
269 | |||
270 | human_value = value.copy() |
|
270 | human_value = value.copy() | |
271 | human_value['text'] = '{}/{}, {}% used'.format( |
|
271 | human_value['text'] = '{}/{}, {}% used'.format( | |
272 | format_byte_size_binary(value['used_real']), |
|
272 | format_byte_size_binary(value['used_real']), | |
273 | format_byte_size_binary(value['total']), |
|
273 | format_byte_size_binary(value['total']), | |
274 | value['percent_used']) |
|
274 | value['percent_used']) | |
275 |
|
275 | |||
276 | keys = list(value.keys())[::] |
|
276 | keys = list(value.keys())[::] | |
277 | keys.pop(keys.index('percent')) |
|
277 | keys.pop(keys.index('percent')) | |
278 | keys.pop(keys.index('percent_used')) |
|
278 | keys.pop(keys.index('percent_used')) | |
279 | keys.pop(keys.index('text')) |
|
279 | keys.pop(keys.index('text')) | |
280 | for k in keys: |
|
280 | for k in keys: | |
281 | human_value[k] = format_byte_size_binary(value[k]) |
|
281 | human_value[k] = format_byte_size_binary(value[k]) | |
282 |
|
282 | |||
283 | if state['type'] == STATE_OK and value['percent_used'] > 90: |
|
283 | if state['type'] == STATE_OK and value['percent_used'] > 90: | |
284 | msg = 'Critical: your available RAM memory is very low.' |
|
284 | msg = 'Critical: your available RAM memory is very low.' | |
285 | state = {'message': msg, 'type': STATE_ERR} |
|
285 | state = {'message': msg, 'type': STATE_ERR} | |
286 |
|
286 | |||
287 | elif state['type'] == STATE_OK and value['percent_used'] > 70: |
|
287 | elif state['type'] == STATE_OK and value['percent_used'] > 70: | |
288 | msg = 'Warning: your available RAM memory is running low.' |
|
288 | msg = 'Warning: your available RAM memory is running low.' | |
289 | state = {'message': msg, 'type': STATE_WARN} |
|
289 | state = {'message': msg, 'type': STATE_WARN} | |
290 |
|
290 | |||
291 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
291 | return SysInfoRes(value=value, state=state, human_value=human_value) | |
292 |
|
292 | |||
293 |
|
293 | |||
294 | @register_sysinfo |
|
294 | @register_sysinfo | |
295 | def machine_load(): |
|
295 | def machine_load(): | |
296 | value = {'1_min': _NA_FLOAT, '5_min': _NA_FLOAT, '15_min': _NA_FLOAT, 'text': ''} |
|
296 | value = {'1_min': _NA_FLOAT, '5_min': _NA_FLOAT, '15_min': _NA_FLOAT, 'text': ''} | |
297 | state = STATE_OK_DEFAULT |
|
297 | state = STATE_OK_DEFAULT | |
298 |
|
298 | |||
299 | # load averages |
|
299 | # load averages | |
300 | if hasattr(psutil.os, 'getloadavg'): |
|
300 | if hasattr(psutil.os, 'getloadavg'): | |
301 | value.update(dict( |
|
301 | value.update(dict( | |
302 | list(zip(['1_min', '5_min', '15_min'], psutil.os.getloadavg())) |
|
302 | list(zip(['1_min', '5_min', '15_min'], psutil.os.getloadavg())) | |
303 | )) |
|
303 | )) | |
304 |
|
304 | |||
305 | human_value = value.copy() |
|
305 | human_value = value.copy() | |
306 | human_value['text'] = '1min: {}, 5min: {}, 15min: {}'.format( |
|
306 | human_value['text'] = '1min: {}, 5min: {}, 15min: {}'.format( | |
307 | value['1_min'], value['5_min'], value['15_min']) |
|
307 | value['1_min'], value['5_min'], value['15_min']) | |
308 |
|
308 | |||
309 | if state['type'] == STATE_OK and value['15_min'] > 5.0: |
|
309 | if state['type'] == STATE_OK and value['15_min'] > 5.0: | |
310 | msg = 'Warning: your machine load is very high.' |
|
310 | msg = 'Warning: your machine load is very high.' | |
311 | state = {'message': msg, 'type': STATE_WARN} |
|
311 | state = {'message': msg, 'type': STATE_WARN} | |
312 |
|
312 | |||
313 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
313 | return SysInfoRes(value=value, state=state, human_value=human_value) | |
314 |
|
314 | |||
315 |
|
315 | |||
316 | @register_sysinfo |
|
316 | @register_sysinfo | |
317 | def cpu(): |
|
317 | def cpu(): | |
318 | value = {'cpu': 0, 'cpu_count': 0, 'cpu_usage': []} |
|
318 | value = {'cpu': 0, 'cpu_count': 0, 'cpu_usage': []} | |
319 | state = STATE_OK_DEFAULT |
|
319 | state = STATE_OK_DEFAULT | |
320 |
|
320 | |||
321 | value['cpu'] = psutil.cpu_percent(0.5) |
|
321 | value['cpu'] = psutil.cpu_percent(0.5) | |
322 | value['cpu_usage'] = psutil.cpu_percent(0.5, percpu=True) |
|
322 | value['cpu_usage'] = psutil.cpu_percent(0.5, percpu=True) | |
323 | value['cpu_count'] = psutil.cpu_count() |
|
323 | value['cpu_count'] = psutil.cpu_count() | |
324 |
|
324 | |||
325 | human_value = value.copy() |
|
325 | human_value = value.copy() | |
326 | human_value['text'] = f'{value["cpu_count"]} cores at {value["cpu"]} %' |
|
326 | human_value['text'] = f'{value["cpu_count"]} cores at {value["cpu"]} %' | |
327 |
|
327 | |||
328 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
328 | return SysInfoRes(value=value, state=state, human_value=human_value) | |
329 |
|
329 | |||
330 |
|
330 | |||
331 | @register_sysinfo |
|
331 | @register_sysinfo | |
332 | def storage(): |
|
332 | def storage(): | |
333 | from rhodecode.lib.helpers import format_byte_size_binary |
|
333 | from rhodecode.lib.helpers import format_byte_size_binary | |
334 | from rhodecode.lib.utils import get_rhodecode_repo_store_path |
|
334 | from rhodecode.lib.utils import get_rhodecode_repo_store_path | |
335 | path = get_rhodecode_repo_store_path() |
|
335 | path = get_rhodecode_repo_store_path() | |
336 |
|
336 | |||
337 | value = dict(percent=0, used=0, total=0, path=path, text='') |
|
337 | value = dict(percent=0, used=0, total=0, path=path, text='') | |
338 | state = STATE_OK_DEFAULT |
|
338 | state = STATE_OK_DEFAULT | |
339 |
|
339 | |||
340 | try: |
|
340 | try: | |
341 | value.update(dict(psutil.disk_usage(path)._asdict())) |
|
341 | value.update(dict(psutil.disk_usage(path)._asdict())) | |
342 | except Exception as e: |
|
342 | except Exception as e: | |
343 | log.exception('Failed to fetch disk info') |
|
343 | log.exception('Failed to fetch disk info') | |
344 | state = {'message': str(e), 'type': STATE_ERR} |
|
344 | state = {'message': str(e), 'type': STATE_ERR} | |
345 |
|
345 | |||
346 | human_value = value.copy() |
|
346 | human_value = value.copy() | |
347 | human_value['used'] = format_byte_size_binary(value['used']) |
|
347 | human_value['used'] = format_byte_size_binary(value['used']) | |
348 | human_value['total'] = format_byte_size_binary(value['total']) |
|
348 | human_value['total'] = format_byte_size_binary(value['total']) | |
349 | human_value['text'] = "{}/{}, {}% used".format( |
|
349 | human_value['text'] = "{}/{}, {}% used".format( | |
350 | format_byte_size_binary(value['used']), |
|
350 | format_byte_size_binary(value['used']), | |
351 | format_byte_size_binary(value['total']), |
|
351 | format_byte_size_binary(value['total']), | |
352 | value['percent']) |
|
352 | value['percent']) | |
353 |
|
353 | |||
354 | if state['type'] == STATE_OK and value['percent'] > 90: |
|
354 | if state['type'] == STATE_OK and value['percent'] > 90: | |
355 | msg = 'Critical: your disk space is very low.' |
|
355 | msg = 'Critical: your disk space is very low.' | |
356 | state = {'message': msg, 'type': STATE_ERR} |
|
356 | state = {'message': msg, 'type': STATE_ERR} | |
357 |
|
357 | |||
358 | elif state['type'] == STATE_OK and value['percent'] > 70: |
|
358 | elif state['type'] == STATE_OK and value['percent'] > 70: | |
359 | msg = 'Warning: your disk space is running low.' |
|
359 | msg = 'Warning: your disk space is running low.' | |
360 | state = {'message': msg, 'type': STATE_WARN} |
|
360 | state = {'message': msg, 'type': STATE_WARN} | |
361 |
|
361 | |||
362 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
362 | return SysInfoRes(value=value, state=state, human_value=human_value) | |
363 |
|
363 | |||
364 |
|
364 | |||
365 | @register_sysinfo |
|
365 | @register_sysinfo | |
366 | def storage_inodes(): |
|
366 | def storage_inodes(): | |
367 | from rhodecode.lib.utils import get_rhodecode_repo_store_path |
|
367 | from rhodecode.lib.utils import get_rhodecode_repo_store_path | |
368 | path = get_rhodecode_repo_store_path() |
|
368 | path = get_rhodecode_repo_store_path() | |
369 |
|
369 | |||
370 | value = dict(percent=0.0, free=0, used=0, total=0, path=path, text='') |
|
370 | value = dict(percent=0.0, free=0, used=0, total=0, path=path, text='') | |
371 | state = STATE_OK_DEFAULT |
|
371 | state = STATE_OK_DEFAULT | |
372 |
|
372 | |||
373 | try: |
|
373 | try: | |
374 | i_stat = os.statvfs(path) |
|
374 | i_stat = os.statvfs(path) | |
375 | value['free'] = i_stat.f_ffree |
|
375 | value['free'] = i_stat.f_ffree | |
376 | value['used'] = i_stat.f_files-i_stat.f_favail |
|
376 | value['used'] = i_stat.f_files-i_stat.f_favail | |
377 | value['total'] = i_stat.f_files |
|
377 | value['total'] = i_stat.f_files | |
378 | value['percent'] = percentage(value['used'], value['total']) |
|
378 | value['percent'] = percentage(value['used'], value['total']) | |
379 | except Exception as e: |
|
379 | except Exception as e: | |
380 | log.exception('Failed to fetch disk inodes info') |
|
380 | log.exception('Failed to fetch disk inodes info') | |
381 | state = {'message': str(e), 'type': STATE_ERR} |
|
381 | state = {'message': str(e), 'type': STATE_ERR} | |
382 |
|
382 | |||
383 | human_value = value.copy() |
|
383 | human_value = value.copy() | |
384 | human_value['text'] = "{}/{}, {}% used".format( |
|
384 | human_value['text'] = "{}/{}, {}% used".format( | |
385 | value['used'], value['total'], value['percent']) |
|
385 | value['used'], value['total'], value['percent']) | |
386 |
|
386 | |||
387 | if state['type'] == STATE_OK and value['percent'] > 90: |
|
387 | if state['type'] == STATE_OK and value['percent'] > 90: | |
388 | msg = 'Critical: your disk free inodes are very low.' |
|
388 | msg = 'Critical: your disk free inodes are very low.' | |
389 | state = {'message': msg, 'type': STATE_ERR} |
|
389 | state = {'message': msg, 'type': STATE_ERR} | |
390 |
|
390 | |||
391 | elif state['type'] == STATE_OK and value['percent'] > 70: |
|
391 | elif state['type'] == STATE_OK and value['percent'] > 70: | |
392 | msg = 'Warning: your disk free inodes are running low.' |
|
392 | msg = 'Warning: your disk free inodes are running low.' | |
393 | state = {'message': msg, 'type': STATE_WARN} |
|
393 | state = {'message': msg, 'type': STATE_WARN} | |
394 |
|
394 | |||
395 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
395 | return SysInfoRes(value=value, state=state, human_value=human_value) | |
396 |
|
396 | |||
397 |
|
397 | |||
398 | @register_sysinfo |
|
398 | @register_sysinfo | |
399 | def storage_artifacts(): |
|
399 | def storage_artifacts(): | |
400 | import rhodecode |
|
400 | import rhodecode | |
401 | from rhodecode.lib.helpers import format_byte_size_binary |
|
401 | from rhodecode.lib.helpers import format_byte_size_binary | |
402 | from rhodecode.lib.archive_cache import get_archival_cache_store |
|
402 | from rhodecode.lib.archive_cache import get_archival_cache_store | |
403 |
|
403 | |||
404 | backend_type = rhodecode.ConfigGet().get_str('archive_cache.backend.type') |
|
404 | backend_type = rhodecode.ConfigGet().get_str('archive_cache.backend.type') | |
405 |
|
405 | |||
406 | value = dict(percent=0, used=0, total=0, items=0, path='', text='', type=backend_type) |
|
406 | value = dict(percent=0, used=0, total=0, items=0, path='', text='', type=backend_type) | |
407 | state = STATE_OK_DEFAULT |
|
407 | state = STATE_OK_DEFAULT | |
408 | try: |
|
408 | try: | |
409 | d_cache = get_archival_cache_store(config=rhodecode.CONFIG) |
|
409 | d_cache = get_archival_cache_store(config=rhodecode.CONFIG) | |
410 | backend_type = str(d_cache) |
|
410 | backend_type = str(d_cache) | |
411 |
|
411 | |||
412 | total_files, total_size, _directory_stats = d_cache.get_statistics() |
|
412 | total_files, total_size, _directory_stats = d_cache.get_statistics() | |
413 |
|
413 | |||
414 | value.update({ |
|
414 | value.update({ | |
415 | 'percent': 100, |
|
415 | 'percent': 100, | |
416 | 'used': total_size, |
|
416 | 'used': total_size, | |
417 | 'total': total_size, |
|
417 | 'total': total_size, | |
418 | 'items': total_files, |
|
418 | 'items': total_files, | |
419 | 'path': d_cache.storage_path, |
|
419 | 'path': d_cache.storage_path, | |
420 | 'type': backend_type |
|
420 | 'type': backend_type | |
421 | }) |
|
421 | }) | |
422 |
|
422 | |||
423 | except Exception as e: |
|
423 | except Exception as e: | |
424 | log.exception('failed to fetch archive cache storage') |
|
424 | log.exception('failed to fetch archive cache storage') | |
425 | state = {'message': str(e), 'type': STATE_ERR} |
|
425 | state = {'message': str(e), 'type': STATE_ERR} | |
426 |
|
426 | |||
427 | human_value = value.copy() |
|
427 | human_value = value.copy() | |
428 | human_value['used'] = format_byte_size_binary(value['used']) |
|
428 | human_value['used'] = format_byte_size_binary(value['used']) | |
429 | human_value['total'] = format_byte_size_binary(value['total']) |
|
429 | human_value['total'] = format_byte_size_binary(value['total']) | |
430 | human_value['text'] = f"{human_value['used']} ({value['items']} items)" |
|
430 | human_value['text'] = f"{human_value['used']} ({value['items']} items)" | |
431 |
|
431 | |||
432 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
432 | return SysInfoRes(value=value, state=state, human_value=human_value) | |
433 |
|
433 | |||
434 |
|
434 | |||
435 | @register_sysinfo |
|
435 | @register_sysinfo | |
436 | def storage_archives(): |
|
436 | def storage_archives(): | |
437 | import rhodecode |
|
437 | import rhodecode | |
438 | from rhodecode.lib.helpers import format_byte_size_binary |
|
438 | from rhodecode.lib.helpers import format_byte_size_binary | |
439 | import rhodecode.apps.file_store.utils as store_utils |
|
439 | import rhodecode.apps.file_store.utils as store_utils | |
440 | from rhodecode import CONFIG |
|
440 | from rhodecode import CONFIG | |
441 |
|
441 | |||
442 | backend_type = rhodecode.ConfigGet().get_str(store_utils.config_keys.backend_type) |
|
442 | backend_type = rhodecode.ConfigGet().get_str(store_utils.config_keys.backend_type) | |
443 |
|
443 | |||
444 | value = dict(percent=0, used=0, total=0, items=0, path='', text='', type=backend_type) |
|
444 | value = dict(percent=0, used=0, total=0, items=0, path='', text='', type=backend_type) | |
445 | state = STATE_OK_DEFAULT |
|
445 | state = STATE_OK_DEFAULT | |
446 | try: |
|
446 | try: | |
447 | f_store = store_utils.get_filestore_backend(config=CONFIG) |
|
447 | f_store = store_utils.get_filestore_backend(config=CONFIG) | |
448 | backend_type = str(f_store) |
|
448 | backend_type = str(f_store) | |
449 | total_files, total_size, _directory_stats = f_store.get_statistics() |
|
449 | total_files, total_size, _directory_stats = f_store.get_statistics() | |
450 |
|
450 | |||
451 | value.update({ |
|
451 | value.update({ | |
452 | 'percent': 100, |
|
452 | 'percent': 100, | |
453 | 'used': total_size, |
|
453 | 'used': total_size, | |
454 | 'total': total_size, |
|
454 | 'total': total_size, | |
455 | 'items': total_files, |
|
455 | 'items': total_files, | |
456 | 'path': f_store.storage_path, |
|
456 | 'path': f_store.storage_path, | |
457 | 'type': backend_type |
|
457 | 'type': backend_type | |
458 | }) |
|
458 | }) | |
459 |
|
459 | |||
460 | except Exception as e: |
|
460 | except Exception as e: | |
461 | log.exception('failed to fetch archive cache storage') |
|
461 | log.exception('failed to fetch archive cache storage') | |
462 | state = {'message': str(e), 'type': STATE_ERR} |
|
462 | state = {'message': str(e), 'type': STATE_ERR} | |
463 |
|
463 | |||
464 | human_value = value.copy() |
|
464 | human_value = value.copy() | |
465 | human_value['used'] = format_byte_size_binary(value['used']) |
|
465 | human_value['used'] = format_byte_size_binary(value['used']) | |
466 | human_value['total'] = format_byte_size_binary(value['total']) |
|
466 | human_value['total'] = format_byte_size_binary(value['total']) | |
467 | human_value['text'] = f"{human_value['used']} ({value['items']} items)" |
|
467 | human_value['text'] = f"{human_value['used']} ({value['items']} items)" | |
468 |
|
468 | |||
469 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
469 | return SysInfoRes(value=value, state=state, human_value=human_value) | |
470 |
|
470 | |||
471 |
|
471 | |||
472 | @register_sysinfo |
|
472 | @register_sysinfo | |
473 | def storage_gist(): |
|
473 | def storage_gist(): | |
474 | from rhodecode.model.gist import GIST_STORE_LOC |
|
474 | from rhodecode.model.gist import GIST_STORE_LOC | |
475 | from rhodecode.lib.utils import safe_str, get_rhodecode_repo_store_path |
|
475 | from rhodecode.lib.utils import safe_str, get_rhodecode_repo_store_path | |
476 | from rhodecode.lib.helpers import format_byte_size_binary, get_directory_statistics |
|
476 | from rhodecode.lib.helpers import format_byte_size_binary, get_directory_statistics | |
477 |
|
477 | |||
478 | path = safe_str(os.path.join( |
|
478 | path = safe_str(os.path.join( | |
479 | get_rhodecode_repo_store_path(), GIST_STORE_LOC)) |
|
479 | get_rhodecode_repo_store_path(), GIST_STORE_LOC)) | |
480 |
|
480 | |||
481 | # gist storage |
|
481 | # gist storage | |
482 | value = dict(percent=0, used=0, total=0, items=0, path=path, text='') |
|
482 | value = dict(percent=0, used=0, total=0, items=0, path=path, text='') | |
483 | state = STATE_OK_DEFAULT |
|
483 | state = STATE_OK_DEFAULT | |
484 |
|
484 | |||
485 | try: |
|
485 | try: | |
486 | total_files, total_size, _directory_stats = get_directory_statistics(path) |
|
486 | total_files, total_size, _directory_stats = get_directory_statistics(path) | |
487 | value.update({ |
|
487 | value.update({ | |
488 | 'percent': 100, |
|
488 | 'percent': 100, | |
489 | 'used': total_size, |
|
489 | 'used': total_size, | |
490 | 'total': total_size, |
|
490 | 'total': total_size, | |
491 | 'items': total_files |
|
491 | 'items': total_files | |
492 | }) |
|
492 | }) | |
493 | except Exception as e: |
|
493 | except Exception as e: | |
494 | log.exception('failed to fetch gist storage items') |
|
494 | log.exception('failed to fetch gist storage items') | |
495 | state = {'message': str(e), 'type': STATE_ERR} |
|
495 | state = {'message': str(e), 'type': STATE_ERR} | |
496 |
|
496 | |||
497 | human_value = value.copy() |
|
497 | human_value = value.copy() | |
498 | human_value['used'] = format_byte_size_binary(value['used']) |
|
498 | human_value['used'] = format_byte_size_binary(value['used']) | |
499 | human_value['total'] = format_byte_size_binary(value['total']) |
|
499 | human_value['total'] = format_byte_size_binary(value['total']) | |
500 | human_value['text'] = "{} ({} items)".format( |
|
500 | human_value['text'] = "{} ({} items)".format( | |
501 | human_value['used'], value['items']) |
|
501 | human_value['used'], value['items']) | |
502 |
|
502 | |||
503 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
503 | return SysInfoRes(value=value, state=state, human_value=human_value) | |
504 |
|
504 | |||
505 |
|
505 | |||
506 | @register_sysinfo |
|
506 | @register_sysinfo | |
507 | def storage_temp(): |
|
507 | def storage_temp(): | |
508 | import tempfile |
|
508 | import tempfile | |
509 | from rhodecode.lib.helpers import format_byte_size_binary |
|
509 | from rhodecode.lib.helpers import format_byte_size_binary | |
510 |
|
510 | |||
511 | path = tempfile.gettempdir() |
|
511 | path = tempfile.gettempdir() | |
512 | value = dict(percent=0, used=0, total=0, items=0, path=path, text='') |
|
512 | value = dict(percent=0, used=0, total=0, items=0, path=path, text='') | |
513 | state = STATE_OK_DEFAULT |
|
513 | state = STATE_OK_DEFAULT | |
514 |
|
514 | |||
515 | if not psutil: |
|
515 | if not psutil: | |
516 | return SysInfoRes(value=value, state=state) |
|
516 | return SysInfoRes(value=value, state=state) | |
517 |
|
517 | |||
518 | try: |
|
518 | try: | |
519 | value.update(dict(psutil.disk_usage(path)._asdict())) |
|
519 | value.update(dict(psutil.disk_usage(path)._asdict())) | |
520 | except Exception as e: |
|
520 | except Exception as e: | |
521 | log.exception('Failed to fetch temp dir info') |
|
521 | log.exception('Failed to fetch temp dir info') | |
522 | state = {'message': str(e), 'type': STATE_ERR} |
|
522 | state = {'message': str(e), 'type': STATE_ERR} | |
523 |
|
523 | |||
524 | human_value = value.copy() |
|
524 | human_value = value.copy() | |
525 | human_value['used'] = format_byte_size_binary(value['used']) |
|
525 | human_value['used'] = format_byte_size_binary(value['used']) | |
526 | human_value['total'] = format_byte_size_binary(value['total']) |
|
526 | human_value['total'] = format_byte_size_binary(value['total']) | |
527 | human_value['text'] = "{}/{}, {}% used".format( |
|
527 | human_value['text'] = "{}/{}, {}% used".format( | |
528 | format_byte_size_binary(value['used']), |
|
528 | format_byte_size_binary(value['used']), | |
529 | format_byte_size_binary(value['total']), |
|
529 | format_byte_size_binary(value['total']), | |
530 | value['percent']) |
|
530 | value['percent']) | |
531 |
|
531 | |||
532 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
532 | return SysInfoRes(value=value, state=state, human_value=human_value) | |
533 |
|
533 | |||
534 |
|
534 | |||
535 | @register_sysinfo |
|
535 | @register_sysinfo | |
536 | def search_info(): |
|
536 | def search_info(): | |
537 | import rhodecode |
|
537 | import rhodecode | |
538 | from rhodecode.lib.index import searcher_from_config |
|
538 | from rhodecode.lib.index import searcher_from_config | |
539 |
|
539 | |||
540 | backend = rhodecode.CONFIG.get('search.module', '') |
|
540 | backend = rhodecode.CONFIG.get('search.module', '') | |
541 | location = rhodecode.CONFIG.get('search.location', '') |
|
541 | location = rhodecode.CONFIG.get('search.location', '') | |
542 |
|
542 | |||
543 | try: |
|
543 | try: | |
544 | searcher = searcher_from_config(rhodecode.CONFIG) |
|
544 | searcher = searcher_from_config(rhodecode.CONFIG) | |
545 | searcher = searcher.__class__.__name__ |
|
545 | searcher = searcher.__class__.__name__ | |
546 | except Exception: |
|
546 | except Exception: | |
547 | searcher = None |
|
547 | searcher = None | |
548 |
|
548 | |||
549 | value = dict( |
|
549 | value = dict( | |
550 | backend=backend, searcher=searcher, location=location, text='') |
|
550 | backend=backend, searcher=searcher, location=location, text='') | |
551 | state = STATE_OK_DEFAULT |
|
551 | state = STATE_OK_DEFAULT | |
552 |
|
552 | |||
553 | human_value = value.copy() |
|
553 | human_value = value.copy() | |
554 | human_value['text'] = "backend:`{}`".format(human_value['backend']) |
|
554 | human_value['text'] = "backend:`{}`".format(human_value['backend']) | |
555 |
|
555 | |||
556 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
556 | return SysInfoRes(value=value, state=state, human_value=human_value) | |
557 |
|
557 | |||
558 |
|
558 | |||
559 | @register_sysinfo |
|
559 | @register_sysinfo | |
560 | def git_info(): |
|
560 | def git_info(): | |
561 | from rhodecode.lib.vcs.backends import git |
|
561 | from rhodecode.lib.vcs.backends import git | |
562 | state = STATE_OK_DEFAULT |
|
562 | state = STATE_OK_DEFAULT | |
563 | value = human_value = '' |
|
563 | value = human_value = '' | |
564 | try: |
|
564 | try: | |
565 | value = git.discover_git_version(raise_on_exc=True) |
|
565 | value = git.discover_git_version(raise_on_exc=True) | |
566 | human_value = f'version reported from VCSServer: {value}' |
|
566 | human_value = f'version reported from VCSServer: {value}' | |
567 | except Exception as e: |
|
567 | except Exception as e: | |
568 | state = {'message': str(e), 'type': STATE_ERR} |
|
568 | state = {'message': str(e), 'type': STATE_ERR} | |
569 |
|
569 | |||
570 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
570 | return SysInfoRes(value=value, state=state, human_value=human_value) | |
571 |
|
571 | |||
572 |
|
572 | |||
573 | @register_sysinfo |
|
573 | @register_sysinfo | |
574 | def hg_info(): |
|
574 | def hg_info(): | |
575 | from rhodecode.lib.vcs.backends import hg |
|
575 | from rhodecode.lib.vcs.backends import hg | |
576 | state = STATE_OK_DEFAULT |
|
576 | state = STATE_OK_DEFAULT | |
577 | value = human_value = '' |
|
577 | value = human_value = '' | |
578 | try: |
|
578 | try: | |
579 | value = hg.discover_hg_version(raise_on_exc=True) |
|
579 | value = hg.discover_hg_version(raise_on_exc=True) | |
580 | human_value = f'version reported from VCSServer: {value}' |
|
580 | human_value = f'version reported from VCSServer: {value}' | |
581 | except Exception as e: |
|
581 | except Exception as e: | |
582 | state = {'message': str(e), 'type': STATE_ERR} |
|
582 | state = {'message': str(e), 'type': STATE_ERR} | |
583 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
583 | return SysInfoRes(value=value, state=state, human_value=human_value) | |
584 |
|
584 | |||
585 |
|
585 | |||
586 | @register_sysinfo |
|
586 | @register_sysinfo | |
587 | def svn_info(): |
|
587 | def svn_info(): | |
588 | from rhodecode.lib.vcs.backends import svn |
|
588 | from rhodecode.lib.vcs.backends import svn | |
589 | state = STATE_OK_DEFAULT |
|
589 | state = STATE_OK_DEFAULT | |
590 | value = human_value = '' |
|
590 | value = human_value = '' | |
591 | try: |
|
591 | try: | |
592 | value = svn.discover_svn_version(raise_on_exc=True) |
|
592 | value = svn.discover_svn_version(raise_on_exc=True) | |
593 | human_value = f'version reported from VCSServer: {value}' |
|
593 | human_value = f'version reported from VCSServer: {value}' | |
594 | except Exception as e: |
|
594 | except Exception as e: | |
595 | state = {'message': str(e), 'type': STATE_ERR} |
|
595 | state = {'message': str(e), 'type': STATE_ERR} | |
596 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
596 | return SysInfoRes(value=value, state=state, human_value=human_value) | |
597 |
|
597 | |||
598 |
|
598 | |||
599 | @register_sysinfo |
|
599 | @register_sysinfo | |
600 | def vcs_backends(): |
|
600 | def vcs_backends(): | |
601 | import rhodecode |
|
601 | import rhodecode | |
602 | value = rhodecode.CONFIG.get('vcs.backends') |
|
602 | value = rhodecode.CONFIG.get('vcs.backends') | |
603 | human_value = 'Enabled backends in order: {}'.format(','.join(value)) |
|
603 | human_value = 'Enabled backends in order: {}'.format(','.join(value)) | |
604 | return SysInfoRes(value=value, human_value=human_value) |
|
604 | return SysInfoRes(value=value, human_value=human_value) | |
605 |
|
605 | |||
606 |
|
606 | |||
607 | @register_sysinfo |
|
607 | @register_sysinfo | |
608 | def vcs_server(): |
|
608 | def vcs_server(): | |
609 | import rhodecode |
|
609 | import rhodecode | |
610 | from rhodecode.lib.vcs.backends import get_vcsserver_service_data |
|
610 | from rhodecode.lib.vcs.backends import get_vcsserver_service_data | |
611 |
|
611 | |||
612 | server_url = rhodecode.CONFIG.get('vcs.server') |
|
612 | server_url = rhodecode.CONFIG.get('vcs.server') | |
613 | enabled = rhodecode.CONFIG.get('vcs.server.enable') |
|
613 | enabled = rhodecode.CONFIG.get('vcs.server.enable') | |
614 | protocol = rhodecode.CONFIG.get('vcs.server.protocol') or 'http' |
|
614 | protocol = rhodecode.CONFIG.get('vcs.server.protocol') or 'http' | |
615 | state = STATE_OK_DEFAULT |
|
615 | state = STATE_OK_DEFAULT | |
616 | version = None |
|
616 | version = None | |
617 | workers = 0 |
|
617 | workers = 0 | |
618 |
|
618 | |||
619 | try: |
|
619 | try: | |
620 | data = get_vcsserver_service_data() |
|
620 | data = get_vcsserver_service_data() | |
621 | if data and 'version' in data: |
|
621 | if data and 'version' in data: | |
622 | version = data['version'] |
|
622 | version = data['version'] | |
623 |
|
623 | |||
624 | if data and 'config' in data: |
|
624 | if data and 'config' in data: | |
625 | conf = data['config'] |
|
625 | conf = data['config'] | |
626 | workers = conf.get('workers', 'NOT AVAILABLE') |
|
626 | workers = conf.get('workers', 'NOT AVAILABLE') | |
627 |
|
627 | |||
628 | connection = 'connected' |
|
628 | connection = 'connected' | |
629 | except Exception as e: |
|
629 | except Exception as e: | |
630 | connection = 'failed' |
|
630 | connection = 'failed' | |
631 | state = {'message': str(e), 'type': STATE_ERR} |
|
631 | state = {'message': str(e), 'type': STATE_ERR} | |
632 |
|
632 | |||
633 | value = dict( |
|
633 | value = dict( | |
634 | url=server_url, |
|
634 | url=server_url, | |
635 | enabled=enabled, |
|
635 | enabled=enabled, | |
636 | protocol=protocol, |
|
636 | protocol=protocol, | |
637 | connection=connection, |
|
637 | connection=connection, | |
638 | version=version, |
|
638 | version=version, | |
639 | text='', |
|
639 | text='', | |
640 | ) |
|
640 | ) | |
641 |
|
641 | |||
642 | human_value = value.copy() |
|
642 | human_value = value.copy() | |
643 | human_value['text'] = \ |
|
643 | human_value['text'] = \ | |
644 | '{url}@ver:{ver} via {mode} mode[workers:{workers}], connection:{conn}'.format( |
|
644 | '{url}@ver:{ver} via {mode} mode[workers:{workers}], connection:{conn}'.format( | |
645 | url=server_url, ver=version, workers=workers, mode=protocol, |
|
645 | url=server_url, ver=version, workers=workers, mode=protocol, | |
646 | conn=connection) |
|
646 | conn=connection) | |
647 |
|
647 | |||
648 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
648 | return SysInfoRes(value=value, state=state, human_value=human_value) | |
649 |
|
649 | |||
650 |
|
650 | |||
651 | @register_sysinfo |
|
651 | @register_sysinfo | |
652 | def vcs_server_config(): |
|
652 | def vcs_server_config(): | |
653 | from rhodecode.lib.vcs.backends import get_vcsserver_service_data |
|
653 | from rhodecode.lib.vcs.backends import get_vcsserver_service_data | |
654 | state = STATE_OK_DEFAULT |
|
654 | state = STATE_OK_DEFAULT | |
655 |
|
655 | |||
656 | value = {} |
|
656 | value = {} | |
657 | try: |
|
657 | try: | |
658 | data = get_vcsserver_service_data() |
|
658 | data = get_vcsserver_service_data() | |
659 | value = data['app_config'] |
|
659 | value = data['app_config'] | |
660 | except Exception as e: |
|
660 | except Exception as e: | |
661 | state = {'message': str(e), 'type': STATE_ERR} |
|
661 | state = {'message': str(e), 'type': STATE_ERR} | |
662 |
|
662 | |||
663 | human_value = value.copy() |
|
663 | human_value = value.copy() | |
664 | human_value['text'] = 'VCS Server config' |
|
664 | human_value['text'] = 'VCS Server config' | |
665 |
|
665 | |||
666 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
666 | return SysInfoRes(value=value, state=state, human_value=human_value) | |
667 |
|
667 | |||
|
668 | @register_sysinfo | |||
|
669 | def rhodecode_server_config(): | |||
|
670 | import rhodecode | |||
|
671 | ||||
|
672 | state = STATE_OK_DEFAULT | |||
|
673 | config = rhodecode.CONFIG.copy() | |||
|
674 | ||||
|
675 | secrets_lits = [ | |||
|
676 | f'rhodecode_{LicenseModel.LICENSE_DB_KEY}', | |||
|
677 | 'sqlalchemy.db1.url', | |||
|
678 | 'channelstream.secret', | |||
|
679 | 'beaker.session.secret', | |||
|
680 | 'rhodecode.encrypted_values.secret', | |||
|
681 | 'appenlight.api_key', | |||
|
682 | 'smtp_password', | |||
|
683 | 'file_store.objectstore.secret', | |||
|
684 | 'archive_cache.objectstore.secret', | |||
|
685 | 'app.service_api.token', | |||
|
686 | ] | |||
|
687 | for k in secrets_lits: | |||
|
688 | if k in config: | |||
|
689 | config[k] = '**OBFUSCATED**' | |||
|
690 | ||||
|
691 | value = human_value = config | |||
|
692 | return SysInfoRes(value=value, state=state, human_value=human_value) | |||
|
693 | ||||
668 |
|
694 | |||
669 | @register_sysinfo |
|
695 | @register_sysinfo | |
670 | def rhodecode_app_info(): |
|
696 | def rhodecode_app_info(): | |
671 | import rhodecode |
|
697 | import rhodecode | |
672 | edition = rhodecode.CONFIG.get('rhodecode.edition') |
|
698 | edition = rhodecode.CONFIG.get('rhodecode.edition') | |
673 |
|
699 | |||
674 | value = dict( |
|
700 | value = dict( | |
675 | rhodecode_version=rhodecode.__version__, |
|
701 | rhodecode_version=rhodecode.__version__, | |
676 | rhodecode_lib_path=os.path.abspath(rhodecode.__file__), |
|
702 | rhodecode_lib_path=os.path.abspath(rhodecode.__file__), | |
677 | text='' |
|
703 | text='' | |
678 | ) |
|
704 | ) | |
679 | human_value = value.copy() |
|
705 | human_value = value.copy() | |
680 | human_value['text'] = 'RhodeCode {edition}, version {ver}'.format( |
|
706 | human_value['text'] = 'RhodeCode {edition}, version {ver}'.format( | |
681 | edition=edition, ver=value['rhodecode_version'] |
|
707 | edition=edition, ver=value['rhodecode_version'] | |
682 | ) |
|
708 | ) | |
683 | return SysInfoRes(value=value, human_value=human_value) |
|
709 | return SysInfoRes(value=value, human_value=human_value) | |
684 |
|
710 | |||
685 |
|
711 | |||
686 | @register_sysinfo |
|
712 | @register_sysinfo | |
687 | def rhodecode_config(): |
|
713 | def rhodecode_config(): | |
688 | import rhodecode |
|
714 | import rhodecode | |
689 | path = rhodecode.CONFIG.get('__file__') |
|
715 | path = rhodecode.CONFIG.get('__file__') | |
690 | rhodecode_ini_safe = rhodecode.CONFIG.copy() |
|
716 | rhodecode_ini_safe = rhodecode.CONFIG.copy() | |
691 | cert_path = get_cert_path(path) |
|
717 | cert_path = get_cert_path(path) | |
692 |
|
718 | |||
693 | try: |
|
719 | try: | |
694 | config = configparser.ConfigParser() |
|
720 | config = configparser.ConfigParser() | |
695 | config.read(path) |
|
721 | config.read(path) | |
696 | parsed_ini = config |
|
722 | parsed_ini = config | |
697 | if parsed_ini.has_section('server:main'): |
|
723 | if parsed_ini.has_section('server:main'): | |
698 | parsed_ini = dict(parsed_ini.items('server:main')) |
|
724 | parsed_ini = dict(parsed_ini.items('server:main')) | |
699 | except Exception: |
|
725 | except Exception: | |
700 | log.exception('Failed to read .ini file for display') |
|
726 | log.exception('Failed to read .ini file for display') | |
701 | parsed_ini = {} |
|
727 | parsed_ini = {} | |
702 |
|
728 | |||
703 | rhodecode_ini_safe['server:main'] = parsed_ini |
|
729 | rhodecode_ini_safe['server:main'] = parsed_ini | |
704 |
|
730 | |||
705 | blacklist = [ |
|
731 | blacklist = [ | |
706 | f'rhodecode_{LicenseModel.LICENSE_DB_KEY}', |
|
732 | f'rhodecode_{LicenseModel.LICENSE_DB_KEY}', | |
707 | 'routes.map', |
|
733 | 'routes.map', | |
708 | 'sqlalchemy.db1.url', |
|
734 | 'sqlalchemy.db1.url', | |
709 | 'channelstream.secret', |
|
735 | 'channelstream.secret', | |
710 | 'beaker.session.secret', |
|
736 | 'beaker.session.secret', | |
711 | 'rhodecode.encrypted_values.secret', |
|
737 | 'rhodecode.encrypted_values.secret', | |
712 | 'rhodecode_auth_github_consumer_key', |
|
738 | 'rhodecode_auth_github_consumer_key', | |
713 | 'rhodecode_auth_github_consumer_secret', |
|
739 | 'rhodecode_auth_github_consumer_secret', | |
714 | 'rhodecode_auth_google_consumer_key', |
|
740 | 'rhodecode_auth_google_consumer_key', | |
715 | 'rhodecode_auth_google_consumer_secret', |
|
741 | 'rhodecode_auth_google_consumer_secret', | |
716 | 'rhodecode_auth_bitbucket_consumer_secret', |
|
742 | 'rhodecode_auth_bitbucket_consumer_secret', | |
717 | 'rhodecode_auth_bitbucket_consumer_key', |
|
743 | 'rhodecode_auth_bitbucket_consumer_key', | |
718 | 'rhodecode_auth_twitter_consumer_secret', |
|
744 | 'rhodecode_auth_twitter_consumer_secret', | |
719 | 'rhodecode_auth_twitter_consumer_key', |
|
745 | 'rhodecode_auth_twitter_consumer_key', | |
720 |
|
746 | |||
721 | 'rhodecode_auth_twitter_secret', |
|
747 | 'rhodecode_auth_twitter_secret', | |
722 | 'rhodecode_auth_github_secret', |
|
748 | 'rhodecode_auth_github_secret', | |
723 | 'rhodecode_auth_google_secret', |
|
749 | 'rhodecode_auth_google_secret', | |
724 | 'rhodecode_auth_bitbucket_secret', |
|
750 | 'rhodecode_auth_bitbucket_secret', | |
725 |
|
751 | |||
726 | 'appenlight.api_key', |
|
752 | 'appenlight.api_key', | |
727 | ('app_conf', 'sqlalchemy.db1.url') |
|
753 | ('app_conf', 'sqlalchemy.db1.url') | |
728 | ] |
|
754 | ] | |
729 | for k in blacklist: |
|
755 | for k in blacklist: | |
730 | if isinstance(k, tuple): |
|
756 | if isinstance(k, tuple): | |
731 | section, key = k |
|
757 | section, key = k | |
732 | if section in rhodecode_ini_safe: |
|
758 | if section in rhodecode_ini_safe: | |
733 | rhodecode_ini_safe[section] = '**OBFUSCATED**' |
|
759 | rhodecode_ini_safe[section] = '**OBFUSCATED**' | |
734 | else: |
|
760 | else: | |
735 | rhodecode_ini_safe.pop(k, None) |
|
761 | rhodecode_ini_safe.pop(k, None) | |
736 |
|
762 | |||
737 | # TODO: maybe put some CONFIG checks here ? |
|
763 | # TODO: maybe put some CONFIG checks here ? | |
738 | return SysInfoRes(value={'config': rhodecode_ini_safe, |
|
764 | return SysInfoRes(value={'config': rhodecode_ini_safe, | |
739 | 'path': path, 'cert_path': cert_path}) |
|
765 | 'path': path, 'cert_path': cert_path}) | |
740 |
|
766 | |||
741 |
|
767 | |||
742 | @register_sysinfo |
|
768 | @register_sysinfo | |
743 | def database_info(): |
|
769 | def database_info(): | |
744 | import rhodecode |
|
770 | import rhodecode | |
745 | from sqlalchemy.engine import url as engine_url |
|
771 | from sqlalchemy.engine import url as engine_url | |
746 | from rhodecode.model import meta |
|
772 | from rhodecode.model import meta | |
747 | from rhodecode.model.meta import Session |
|
773 | from rhodecode.model.meta import Session | |
748 | from rhodecode.model.db import DbMigrateVersion |
|
774 | from rhodecode.model.db import DbMigrateVersion | |
749 |
|
775 | |||
750 | state = STATE_OK_DEFAULT |
|
776 | state = STATE_OK_DEFAULT | |
751 |
|
777 | |||
752 | db_migrate = DbMigrateVersion.query().filter( |
|
778 | db_migrate = DbMigrateVersion.query().filter( | |
753 | DbMigrateVersion.repository_id == 'rhodecode_db_migrations').one() |
|
779 | DbMigrateVersion.repository_id == 'rhodecode_db_migrations').one() | |
754 |
|
780 | |||
755 | db_url_obj = engine_url.make_url(rhodecode.CONFIG['sqlalchemy.db1.url']) |
|
781 | db_url_obj = engine_url.make_url(rhodecode.CONFIG['sqlalchemy.db1.url']) | |
756 |
|
782 | |||
757 | try: |
|
783 | try: | |
758 | engine = meta.get_engine() |
|
784 | engine = meta.get_engine() | |
759 | db_server_info = engine.dialect._get_server_version_info( |
|
785 | db_server_info = engine.dialect._get_server_version_info( | |
760 | Session.connection(bind=engine)) |
|
786 | Session.connection(bind=engine)) | |
761 | db_version = '.'.join(map(str, db_server_info)) |
|
787 | db_version = '.'.join(map(str, db_server_info)) | |
762 | except Exception: |
|
788 | except Exception: | |
763 | log.exception('failed to fetch db version') |
|
789 | log.exception('failed to fetch db version') | |
764 | db_version = 'UNKNOWN' |
|
790 | db_version = 'UNKNOWN' | |
765 |
|
791 | |||
766 | db_info = dict( |
|
792 | db_info = dict( | |
767 | migrate_version=db_migrate.version, |
|
793 | migrate_version=db_migrate.version, | |
768 | type=db_url_obj.get_backend_name(), |
|
794 | type=db_url_obj.get_backend_name(), | |
769 | version=db_version, |
|
795 | version=db_version, | |
770 | url=repr(db_url_obj) |
|
796 | url=repr(db_url_obj) | |
771 | ) |
|
797 | ) | |
772 | current_version = db_migrate.version |
|
798 | current_version = db_migrate.version | |
773 | expected_version = rhodecode.__dbversion__ |
|
799 | expected_version = rhodecode.__dbversion__ | |
774 | if state['type'] == STATE_OK and current_version != expected_version: |
|
800 | if state['type'] == STATE_OK and current_version != expected_version: | |
775 | msg = 'Critical: database schema mismatch, ' \ |
|
801 | msg = 'Critical: database schema mismatch, ' \ | |
776 | 'expected version {}, got {}. ' \ |
|
802 | 'expected version {}, got {}. ' \ | |
777 | 'Please run migrations on your database.'.format( |
|
803 | 'Please run migrations on your database.'.format( | |
778 | expected_version, current_version) |
|
804 | expected_version, current_version) | |
779 | state = {'message': msg, 'type': STATE_ERR} |
|
805 | state = {'message': msg, 'type': STATE_ERR} | |
780 |
|
806 | |||
781 | human_value = db_info.copy() |
|
807 | human_value = db_info.copy() | |
782 | human_value['url'] = "{} @ migration version: {}".format( |
|
808 | human_value['url'] = "{} @ migration version: {}".format( | |
783 | db_info['url'], db_info['migrate_version']) |
|
809 | db_info['url'], db_info['migrate_version']) | |
784 | human_value['version'] = "{} {}".format(db_info['type'], db_info['version']) |
|
810 | human_value['version'] = "{} {}".format(db_info['type'], db_info['version']) | |
785 | return SysInfoRes(value=db_info, state=state, human_value=human_value) |
|
811 | return SysInfoRes(value=db_info, state=state, human_value=human_value) | |
786 |
|
812 | |||
787 |
|
813 | |||
788 | @register_sysinfo |
|
814 | @register_sysinfo | |
789 | def server_info(environ): |
|
815 | def server_info(environ): | |
790 | import rhodecode |
|
816 | import rhodecode | |
791 | from rhodecode.lib.base import get_server_ip_addr, get_server_port |
|
817 | from rhodecode.lib.base import get_server_ip_addr, get_server_port | |
792 |
|
818 | |||
793 | value = { |
|
819 | value = { | |
794 | 'server_ip': '{}:{}'.format( |
|
820 | 'server_ip': '{}:{}'.format( | |
795 | get_server_ip_addr(environ, log_errors=False), |
|
821 | get_server_ip_addr(environ, log_errors=False), | |
796 | get_server_port(environ) |
|
822 | get_server_port(environ) | |
797 | ), |
|
823 | ), | |
798 | 'server_id': rhodecode.CONFIG.get('instance_id'), |
|
824 | 'server_id': rhodecode.CONFIG.get('instance_id'), | |
799 | } |
|
825 | } | |
800 | return SysInfoRes(value=value) |
|
826 | return SysInfoRes(value=value) | |
801 |
|
827 | |||
802 |
|
828 | |||
803 | @register_sysinfo |
|
829 | @register_sysinfo | |
804 | def usage_info(): |
|
830 | def usage_info(): | |
805 | from rhodecode.model.db import User, Repository, true |
|
831 | from rhodecode.model.db import User, Repository, true | |
806 | value = { |
|
832 | value = { | |
807 | 'users': User.query().count(), |
|
833 | 'users': User.query().count(), | |
808 | 'users_active': User.query().filter(User.active == true()).count(), |
|
834 | 'users_active': User.query().filter(User.active == true()).count(), | |
809 | 'repositories': Repository.query().count(), |
|
835 | 'repositories': Repository.query().count(), | |
810 | 'repository_types': { |
|
836 | 'repository_types': { | |
811 | 'hg': Repository.query().filter( |
|
837 | 'hg': Repository.query().filter( | |
812 | Repository.repo_type == 'hg').count(), |
|
838 | Repository.repo_type == 'hg').count(), | |
813 | 'git': Repository.query().filter( |
|
839 | 'git': Repository.query().filter( | |
814 | Repository.repo_type == 'git').count(), |
|
840 | Repository.repo_type == 'git').count(), | |
815 | 'svn': Repository.query().filter( |
|
841 | 'svn': Repository.query().filter( | |
816 | Repository.repo_type == 'svn').count(), |
|
842 | Repository.repo_type == 'svn').count(), | |
817 | }, |
|
843 | }, | |
818 | } |
|
844 | } | |
819 | return SysInfoRes(value=value) |
|
845 | return SysInfoRes(value=value) | |
820 |
|
846 | |||
821 |
|
847 | |||
822 | def get_system_info(environ): |
|
848 | def get_system_info(environ): | |
823 | environ = environ or {} |
|
849 | environ = environ or {} | |
824 | return { |
|
850 | return { | |
825 | 'rhodecode_app': SysInfo(rhodecode_app_info)(), |
|
851 | 'rhodecode_app': SysInfo(rhodecode_app_info)(), | |
826 | 'rhodecode_config': SysInfo(rhodecode_config)(), |
|
852 | 'rhodecode_config': SysInfo(rhodecode_config)(), | |
827 | 'rhodecode_usage': SysInfo(usage_info)(), |
|
853 | 'rhodecode_usage': SysInfo(usage_info)(), | |
828 | 'python': SysInfo(python_info)(), |
|
854 | 'python': SysInfo(python_info)(), | |
829 | 'py_modules': SysInfo(py_modules)(), |
|
855 | 'py_modules': SysInfo(py_modules)(), | |
830 |
|
856 | |||
831 | 'platform': SysInfo(platform_type)(), |
|
857 | 'platform': SysInfo(platform_type)(), | |
832 | 'locale': SysInfo(locale_info)(), |
|
858 | 'locale': SysInfo(locale_info)(), | |
833 | 'server': SysInfo(server_info, environ=environ)(), |
|
859 | 'server': SysInfo(server_info, environ=environ)(), | |
834 | 'database': SysInfo(database_info)(), |
|
860 | 'database': SysInfo(database_info)(), | |
835 | 'ulimit': SysInfo(ulimit_info)(), |
|
861 | 'ulimit': SysInfo(ulimit_info)(), | |
836 | 'storage': SysInfo(storage)(), |
|
862 | 'storage': SysInfo(storage)(), | |
837 | 'storage_inodes': SysInfo(storage_inodes)(), |
|
863 | 'storage_inodes': SysInfo(storage_inodes)(), | |
838 | 'storage_archive': SysInfo(storage_archives)(), |
|
864 | 'storage_archive': SysInfo(storage_archives)(), | |
839 | 'storage_artifacts': SysInfo(storage_artifacts)(), |
|
865 | 'storage_artifacts': SysInfo(storage_artifacts)(), | |
840 | 'storage_gist': SysInfo(storage_gist)(), |
|
866 | 'storage_gist': SysInfo(storage_gist)(), | |
841 | 'storage_temp': SysInfo(storage_temp)(), |
|
867 | 'storage_temp': SysInfo(storage_temp)(), | |
842 |
|
868 | |||
843 | 'search': SysInfo(search_info)(), |
|
869 | 'search': SysInfo(search_info)(), | |
844 |
|
870 | |||
845 | 'uptime': SysInfo(uptime)(), |
|
871 | 'uptime': SysInfo(uptime)(), | |
846 | 'load': SysInfo(machine_load)(), |
|
872 | 'load': SysInfo(machine_load)(), | |
847 | 'cpu': SysInfo(cpu)(), |
|
873 | 'cpu': SysInfo(cpu)(), | |
848 | 'memory': SysInfo(memory)(), |
|
874 | 'memory': SysInfo(memory)(), | |
849 |
|
875 | |||
850 | 'vcs_backends': SysInfo(vcs_backends)(), |
|
876 | 'vcs_backends': SysInfo(vcs_backends)(), | |
851 | 'vcs_server': SysInfo(vcs_server)(), |
|
877 | 'vcs_server': SysInfo(vcs_server)(), | |
852 |
|
878 | |||
853 | 'vcs_server_config': SysInfo(vcs_server_config)(), |
|
879 | 'vcs_server_config': SysInfo(vcs_server_config)(), | |
|
880 | 'rhodecode_server_config': SysInfo(rhodecode_server_config)(), | |||
854 |
|
881 | |||
855 | 'git': SysInfo(git_info)(), |
|
882 | 'git': SysInfo(git_info)(), | |
856 | 'hg': SysInfo(hg_info)(), |
|
883 | 'hg': SysInfo(hg_info)(), | |
857 | 'svn': SysInfo(svn_info)(), |
|
884 | 'svn': SysInfo(svn_info)(), | |
858 | } |
|
885 | } | |
859 |
|
886 | |||
860 |
|
887 | |||
861 | def load_system_info(key): |
|
888 | def load_system_info(key): | |
862 | """ |
|
889 | """ | |
863 | get_sys_info('vcs_server') |
|
890 | get_sys_info('vcs_server') | |
864 | get_sys_info('database') |
|
891 | get_sys_info('database') | |
865 | """ |
|
892 | """ | |
866 | return SysInfo(registered_helpers[key])() |
|
893 | return SysInfo(registered_helpers[key])() |
@@ -1,1203 +1,1212 b'' | |||||
1 | # Copyright (C) 2010-2023 RhodeCode GmbH |
|
1 | # Copyright (C) 2010-2023 RhodeCode GmbH | |
2 | # |
|
2 | # | |
3 | # This program is free software: you can redistribute it and/or modify |
|
3 | # This program is free software: you can redistribute it and/or modify | |
4 | # it under the terms of the GNU Affero General Public License, version 3 |
|
4 | # it under the terms of the GNU Affero General Public License, version 3 | |
5 | # (only), as published by the Free Software Foundation. |
|
5 | # (only), as published by the Free Software Foundation. | |
6 | # |
|
6 | # | |
7 | # This program is distributed in the hope that it will be useful, |
|
7 | # This program is distributed in the hope that it will be useful, | |
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
10 | # GNU General Public License for more details. |
|
10 | # GNU General Public License for more details. | |
11 | # |
|
11 | # | |
12 | # You should have received a copy of the GNU Affero General Public License |
|
12 | # You should have received a copy of the GNU Affero General Public License | |
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |
14 | # |
|
14 | # | |
15 | # This program is dual-licensed. If you wish to learn more about the |
|
15 | # This program is dual-licensed. If you wish to learn more about the | |
16 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
16 | # RhodeCode Enterprise Edition, including its added features, Support services, | |
17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ | |
18 |
|
18 | |||
19 | import os |
|
19 | import os | |
20 | import re |
|
20 | import re | |
21 | import shutil |
|
21 | import shutil | |
22 | import time |
|
22 | import time | |
23 | import logging |
|
23 | import logging | |
24 | import traceback |
|
24 | import traceback | |
25 | import datetime |
|
25 | import datetime | |
26 |
|
26 | |||
27 | from pyramid.threadlocal import get_current_request |
|
27 | from pyramid.threadlocal import get_current_request | |
28 | from sqlalchemy.orm import aliased |
|
28 | from sqlalchemy.orm import aliased | |
29 | from zope.cachedescriptors.property import Lazy as LazyProperty |
|
29 | from zope.cachedescriptors.property import Lazy as LazyProperty | |
30 |
|
30 | |||
31 | from rhodecode import events |
|
31 | from rhodecode import events | |
32 | from rhodecode.lib.auth import HasUserGroupPermissionAny |
|
32 | from rhodecode.lib.auth import HasUserGroupPermissionAny | |
33 | from rhodecode.lib.caching_query import FromCache |
|
33 | from rhodecode.lib.caching_query import FromCache | |
34 | from rhodecode.lib.exceptions import AttachedForksError, AttachedPullRequestsError, AttachedArtifactsError |
|
34 | from rhodecode.lib.exceptions import AttachedForksError, AttachedPullRequestsError, AttachedArtifactsError | |
35 | from rhodecode.lib import hooks_base |
|
35 | from rhodecode.lib import hooks_base | |
36 | from rhodecode.lib.user_log_filter import user_log_filter |
|
36 | from rhodecode.lib.user_log_filter import user_log_filter | |
37 | from rhodecode.lib.utils import make_db_config |
|
37 | from rhodecode.lib.utils import make_db_config | |
38 | from rhodecode.lib.utils2 import ( |
|
38 | from rhodecode.lib.utils2 import ( | |
39 | safe_str, remove_prefix, obfuscate_url_pw, |
|
39 | safe_str, remove_prefix, obfuscate_url_pw, | |
40 | get_current_rhodecode_user, safe_int, action_logger_generic) |
|
40 | get_current_rhodecode_user, safe_int, action_logger_generic) | |
41 | from rhodecode.lib.vcs.backends import get_backend |
|
41 | from rhodecode.lib.vcs.backends import get_backend | |
42 | from rhodecode.lib.vcs.nodes import NodeKind |
|
42 | from rhodecode.lib.vcs.nodes import NodeKind | |
43 | from rhodecode.model import BaseModel |
|
43 | from rhodecode.model import BaseModel | |
44 | from rhodecode.model.db import ( |
|
44 | from rhodecode.model.db import ( | |
45 | _hash_key, func, case, joinedload, or_, in_filter_generator, |
|
45 | _hash_key, func, case, joinedload, or_, in_filter_generator, | |
46 | Session, Repository, UserRepoToPerm, UserGroupRepoToPerm, |
|
46 | Session, Repository, UserRepoToPerm, UserGroupRepoToPerm, | |
47 | UserRepoGroupToPerm, UserGroupRepoGroupToPerm, User, Permission, |
|
47 | UserRepoGroupToPerm, UserGroupRepoGroupToPerm, User, Permission, | |
48 | Statistics, UserGroup, RepoGroup, RepositoryField, UserLog) |
|
48 | Statistics, UserGroup, RepoGroup, RepositoryField, UserLog) | |
49 | from rhodecode.model.permission import PermissionModel |
|
49 | from rhodecode.model.permission import PermissionModel | |
50 | from rhodecode.model.settings import VcsSettingsModel |
|
50 | from rhodecode.model.settings import VcsSettingsModel | |
51 |
|
51 | |||
52 | log = logging.getLogger(__name__) |
|
52 | log = logging.getLogger(__name__) | |
53 |
|
53 | |||
54 |
|
54 | |||
55 | class RepoModel(BaseModel): |
|
55 | class RepoModel(BaseModel): | |
56 |
|
56 | |||
57 | cls = Repository |
|
57 | cls = Repository | |
58 |
|
58 | |||
59 | def _get_user_group(self, users_group): |
|
59 | def _get_user_group(self, users_group): | |
60 | return self._get_instance(UserGroup, users_group, |
|
60 | return self._get_instance(UserGroup, users_group, | |
61 | callback=UserGroup.get_by_group_name) |
|
61 | callback=UserGroup.get_by_group_name) | |
62 |
|
62 | |||
63 | def _get_repo_group(self, repo_group): |
|
63 | def _get_repo_group(self, repo_group): | |
64 | return self._get_instance(RepoGroup, repo_group, |
|
64 | return self._get_instance(RepoGroup, repo_group, | |
65 | callback=RepoGroup.get_by_group_name) |
|
65 | callback=RepoGroup.get_by_group_name) | |
66 |
|
66 | |||
67 | def _create_default_perms(self, repository, private): |
|
67 | def _create_default_perms(self, repository, private): | |
68 | # create default permission |
|
68 | # create default permission | |
69 | default = 'repository.read' |
|
69 | default = 'repository.read' | |
70 | def_user = User.get_default_user() |
|
70 | def_user = User.get_default_user() | |
71 | for p in def_user.user_perms: |
|
71 | for p in def_user.user_perms: | |
72 | if p.permission.permission_name.startswith('repository.'): |
|
72 | if p.permission.permission_name.startswith('repository.'): | |
73 | default = p.permission.permission_name |
|
73 | default = p.permission.permission_name | |
74 | break |
|
74 | break | |
75 |
|
75 | |||
76 | default_perm = 'repository.none' if private else default |
|
76 | default_perm = 'repository.none' if private else default | |
77 |
|
77 | |||
78 | repo_to_perm = UserRepoToPerm() |
|
78 | repo_to_perm = UserRepoToPerm() | |
79 | repo_to_perm.permission = Permission.get_by_key(default_perm) |
|
79 | repo_to_perm.permission = Permission.get_by_key(default_perm) | |
80 |
|
80 | |||
81 | repo_to_perm.repository = repository |
|
81 | repo_to_perm.repository = repository | |
82 | repo_to_perm.user = def_user |
|
82 | repo_to_perm.user = def_user | |
83 |
|
83 | |||
84 | return repo_to_perm |
|
84 | return repo_to_perm | |
85 |
|
85 | |||
86 | def get(self, repo_id): |
|
86 | def get(self, repo_id): | |
87 | repo = self.sa.query(Repository) \ |
|
87 | repo = self.sa.query(Repository) \ | |
88 | .filter(Repository.repo_id == repo_id) |
|
88 | .filter(Repository.repo_id == repo_id) | |
89 |
|
89 | |||
90 | return repo.scalar() |
|
90 | return repo.scalar() | |
91 |
|
91 | |||
92 | def get_repo(self, repository): |
|
92 | def get_repo(self, repository): | |
93 | return self._get_repo(repository) |
|
93 | return self._get_repo(repository) | |
94 |
|
94 | |||
95 | def get_by_repo_name(self, repo_name, cache=False): |
|
95 | def get_by_repo_name(self, repo_name, cache=False): | |
96 | repo = self.sa.query(Repository) \ |
|
96 | repo = self.sa.query(Repository) \ | |
97 | .filter(Repository.repo_name == repo_name) |
|
97 | .filter(Repository.repo_name == repo_name) | |
98 |
|
98 | |||
99 | if cache: |
|
99 | if cache: | |
100 | name_key = _hash_key(repo_name) |
|
100 | name_key = _hash_key(repo_name) | |
101 | repo = repo.options( |
|
101 | repo = repo.options( | |
102 | FromCache("sql_cache_short", f"get_repo_{name_key}")) |
|
102 | FromCache("sql_cache_short", f"get_repo_{name_key}")) | |
103 | return repo.scalar() |
|
103 | return repo.scalar() | |
104 |
|
104 | |||
105 | def _extract_id_from_repo_name(self, repo_name): |
|
105 | def _extract_id_from_repo_name(self, repo_name): | |
106 | if repo_name.startswith('/'): |
|
106 | if repo_name.startswith('/'): | |
107 | repo_name = repo_name.lstrip('/') |
|
107 | repo_name = repo_name.lstrip('/') | |
108 | by_id_match = re.match(r'^_(\d+)', repo_name) |
|
108 | by_id_match = re.match(r'^_(\d+)', repo_name) | |
109 | if by_id_match: |
|
109 | if by_id_match: | |
110 | return by_id_match.groups()[0] |
|
110 | return by_id_match.groups()[0] | |
111 |
|
111 | |||
112 | def get_repo_by_id(self, repo_name): |
|
112 | def get_repo_by_id(self, repo_name): | |
113 | """ |
|
113 | """ | |
114 | Extracts repo_name by id from special urls. |
|
114 | Extracts repo_name by id from special urls. | |
115 | Example url is _11/repo_name |
|
115 | Example url is _11/repo_name | |
116 |
|
116 | |||
117 | :param repo_name: |
|
117 | :param repo_name: | |
118 | :return: repo object if matched else None |
|
118 | :return: repo object if matched else None | |
119 | """ |
|
119 | """ | |
120 | _repo_id = None |
|
120 | _repo_id = None | |
121 | try: |
|
121 | try: | |
122 | _repo_id = self._extract_id_from_repo_name(repo_name) |
|
122 | _repo_id = self._extract_id_from_repo_name(repo_name) | |
123 | if _repo_id: |
|
123 | if _repo_id: | |
124 | return self.get(_repo_id) |
|
124 | return self.get(_repo_id) | |
125 | except Exception: |
|
125 | except Exception: | |
126 | log.exception('Failed to extract repo_name from URL') |
|
126 | log.exception('Failed to extract repo_name from URL') | |
127 | if _repo_id: |
|
127 | if _repo_id: | |
128 | Session().rollback() |
|
128 | Session().rollback() | |
129 |
|
129 | |||
130 | return None |
|
130 | return None | |
131 |
|
131 | |||
132 | def get_repos_for_root(self, root, traverse=False): |
|
132 | def get_repos_for_root(self, root, traverse=False): | |
133 | if traverse: |
|
133 | if traverse: | |
134 | like_expression = u'{}%'.format(safe_str(root)) |
|
134 | like_expression = u'{}%'.format(safe_str(root)) | |
135 | repos = Repository.query().filter( |
|
135 | repos = Repository.query().filter( | |
136 | Repository.repo_name.like(like_expression)).all() |
|
136 | Repository.repo_name.like(like_expression)).all() | |
137 | else: |
|
137 | else: | |
138 | if root and not isinstance(root, RepoGroup): |
|
138 | if root and not isinstance(root, RepoGroup): | |
139 | raise ValueError( |
|
139 | raise ValueError( | |
140 | 'Root must be an instance ' |
|
140 | 'Root must be an instance ' | |
141 | 'of RepoGroup, got:{} instead'.format(type(root))) |
|
141 | 'of RepoGroup, got:{} instead'.format(type(root))) | |
142 | repos = Repository.query().filter(Repository.group == root).all() |
|
142 | repos = Repository.query().filter(Repository.group == root).all() | |
143 | return repos |
|
143 | return repos | |
144 |
|
144 | |||
145 | def get_url(self, repo, request=None, permalink=False): |
|
145 | def get_url(self, repo, request=None, permalink=False): | |
146 | if not request: |
|
146 | if not request: | |
147 | request = get_current_request() |
|
147 | request = get_current_request() | |
148 |
|
148 | |||
149 | if not request: |
|
149 | if not request: | |
150 | return |
|
150 | return | |
151 |
|
151 | |||
152 | if permalink: |
|
152 | if permalink: | |
153 | return request.route_url( |
|
153 | return request.route_url( | |
154 | 'repo_summary', repo_name='_{}'.format(safe_str(repo.repo_id))) |
|
154 | 'repo_summary', repo_name='_{}'.format(safe_str(repo.repo_id))) | |
155 | else: |
|
155 | else: | |
156 | return request.route_url( |
|
156 | return request.route_url( | |
157 | 'repo_summary', repo_name=safe_str(repo.repo_name)) |
|
157 | 'repo_summary', repo_name=safe_str(repo.repo_name)) | |
158 |
|
158 | |||
159 | def get_commit_url(self, repo, commit_id, request=None, permalink=False): |
|
159 | def get_commit_url(self, repo, commit_id, request=None, permalink=False): | |
160 | if not request: |
|
160 | if not request: | |
161 | request = get_current_request() |
|
161 | request = get_current_request() | |
162 |
|
162 | |||
163 | if not request: |
|
163 | if not request: | |
164 | return |
|
164 | return | |
165 |
|
165 | |||
166 | if permalink: |
|
166 | if permalink: | |
167 | return request.route_url( |
|
167 | return request.route_url( | |
168 | 'repo_commit', repo_name=safe_str(repo.repo_id), |
|
168 | 'repo_commit', repo_name=safe_str(repo.repo_id), | |
169 | commit_id=commit_id) |
|
169 | commit_id=commit_id) | |
170 |
|
170 | |||
171 | else: |
|
171 | else: | |
172 | return request.route_url( |
|
172 | return request.route_url( | |
173 | 'repo_commit', repo_name=safe_str(repo.repo_name), |
|
173 | 'repo_commit', repo_name=safe_str(repo.repo_name), | |
174 | commit_id=commit_id) |
|
174 | commit_id=commit_id) | |
175 |
|
175 | |||
176 | def get_repo_log(self, repo, filter_term): |
|
176 | def get_repo_log(self, repo, filter_term): | |
177 | repo_log = UserLog.query()\ |
|
177 | repo_log = UserLog.query()\ | |
178 | .filter(or_(UserLog.repository_id == repo.repo_id, |
|
178 | .filter(or_(UserLog.repository_id == repo.repo_id, | |
179 | UserLog.repository_name == repo.repo_name))\ |
|
179 | UserLog.repository_name == repo.repo_name))\ | |
180 | .options(joinedload(UserLog.user))\ |
|
180 | .options(joinedload(UserLog.user))\ | |
181 | .options(joinedload(UserLog.repository))\ |
|
181 | .options(joinedload(UserLog.repository))\ | |
182 | .order_by(UserLog.action_date.desc()) |
|
182 | .order_by(UserLog.action_date.desc()) | |
183 |
|
183 | |||
184 | repo_log = user_log_filter(repo_log, filter_term) |
|
184 | repo_log = user_log_filter(repo_log, filter_term) | |
185 | return repo_log |
|
185 | return repo_log | |
186 |
|
186 | |||
187 | @classmethod |
|
187 | @classmethod | |
188 | def update_commit_cache(cls, repositories=None): |
|
188 | def update_commit_cache(cls, repositories=None): | |
189 | if not repositories: |
|
189 | if not repositories: | |
190 | repositories = Repository.getAll() |
|
190 | repositories = Repository.getAll() | |
191 | for repo in repositories: |
|
191 | for repo in repositories: | |
192 | repo.update_commit_cache() |
|
192 | repo.update_commit_cache() | |
193 |
|
193 | |||
194 | def get_repos_as_dict(self, repo_list=None, admin=False, |
|
194 | def get_repos_as_dict(self, repo_list=None, admin=False, | |
195 | super_user_actions=False, short_name=None): |
|
195 | super_user_actions=False, short_name=None): | |
196 |
|
196 | |||
197 | _render = get_current_request().get_partial_renderer( |
|
197 | _render = get_current_request().get_partial_renderer( | |
198 | 'rhodecode:templates/data_table/_dt_elements.mako') |
|
198 | 'rhodecode:templates/data_table/_dt_elements.mako') | |
199 | c = _render.get_call_context() |
|
199 | c = _render.get_call_context() | |
200 | h = _render.get_helpers() |
|
200 | h = _render.get_helpers() | |
201 |
|
201 | |||
202 | def quick_menu(repo_name): |
|
202 | def quick_menu(repo_name): | |
203 | return _render('quick_menu', repo_name) |
|
203 | return _render('quick_menu', repo_name) | |
204 |
|
204 | |||
205 | def repo_lnk(name, rtype, rstate, private, archived, fork_repo_name): |
|
205 | def repo_lnk(name, rtype, rstate, private, archived, fork_repo_name): | |
206 | if short_name is not None: |
|
206 | if short_name is not None: | |
207 | short_name_var = short_name |
|
207 | short_name_var = short_name | |
208 | else: |
|
208 | else: | |
209 | short_name_var = not admin |
|
209 | short_name_var = not admin | |
210 | return _render('repo_name', name, rtype, rstate, private, archived, fork_repo_name, |
|
210 | return _render('repo_name', name, rtype, rstate, private, archived, fork_repo_name, | |
211 | short_name=short_name_var, admin=False) |
|
211 | short_name=short_name_var, admin=False) | |
212 |
|
212 | |||
213 | def last_change(last_change): |
|
213 | def last_change(last_change): | |
214 | if admin and isinstance(last_change, datetime.datetime) and not last_change.tzinfo: |
|
214 | if admin and isinstance(last_change, datetime.datetime) and not last_change.tzinfo: | |
215 | ts = time.time() |
|
215 | ts = time.time() | |
216 | utc_offset = (datetime.datetime.fromtimestamp(ts) |
|
216 | utc_offset = (datetime.datetime.fromtimestamp(ts) | |
217 | - datetime.datetime.utcfromtimestamp(ts)).total_seconds() |
|
217 | - datetime.datetime.utcfromtimestamp(ts)).total_seconds() | |
218 | last_change = last_change + datetime.timedelta(seconds=utc_offset) |
|
218 | last_change = last_change + datetime.timedelta(seconds=utc_offset) | |
219 |
|
219 | |||
220 | return _render("last_change", last_change) |
|
220 | return _render("last_change", last_change) | |
221 |
|
221 | |||
222 | def rss_lnk(repo_name): |
|
222 | def rss_lnk(repo_name): | |
223 | return _render("rss", repo_name) |
|
223 | return _render("rss", repo_name) | |
224 |
|
224 | |||
225 | def atom_lnk(repo_name): |
|
225 | def atom_lnk(repo_name): | |
226 | return _render("atom", repo_name) |
|
226 | return _render("atom", repo_name) | |
227 |
|
227 | |||
228 | def last_rev(repo_name, cs_cache): |
|
228 | def last_rev(repo_name, cs_cache): | |
229 | return _render('revision', repo_name, cs_cache.get('revision'), |
|
229 | return _render('revision', repo_name, cs_cache.get('revision'), | |
230 | cs_cache.get('raw_id'), cs_cache.get('author'), |
|
230 | cs_cache.get('raw_id'), cs_cache.get('author'), | |
231 | cs_cache.get('message'), cs_cache.get('date')) |
|
231 | cs_cache.get('message'), cs_cache.get('date')) | |
232 |
|
232 | |||
233 | def desc(desc): |
|
233 | def desc(desc): | |
234 | return _render('repo_desc', desc, c.visual.stylify_metatags) |
|
234 | return _render('repo_desc', desc, c.visual.stylify_metatags) | |
235 |
|
235 | |||
236 | def state(repo_state): |
|
236 | def state(repo_state): | |
237 | return _render("repo_state", repo_state) |
|
237 | return _render("repo_state", repo_state) | |
238 |
|
238 | |||
239 | def repo_actions(repo_name): |
|
239 | def repo_actions(repo_name): | |
240 | return _render('repo_actions', repo_name, super_user_actions) |
|
240 | return _render('repo_actions', repo_name, super_user_actions) | |
241 |
|
241 | |||
242 | def user_profile(username): |
|
242 | def user_profile(username): | |
243 | return _render('user_profile', username) |
|
243 | return _render('user_profile', username) | |
244 |
|
244 | |||
245 | repos_data = [] |
|
245 | repos_data = [] | |
246 | for repo in repo_list: |
|
246 | for repo in repo_list: | |
247 | # NOTE(marcink): because we use only raw column we need to load it like that |
|
247 | # NOTE(marcink): because we use only raw column we need to load it like that | |
248 | changeset_cache = Repository._load_changeset_cache( |
|
248 | changeset_cache = Repository._load_changeset_cache( | |
249 | repo.repo_id, repo._changeset_cache) |
|
249 | repo.repo_id, repo._changeset_cache) | |
250 |
|
250 | |||
251 | row = { |
|
251 | row = { | |
252 | "menu": quick_menu(repo.repo_name), |
|
252 | "menu": quick_menu(repo.repo_name), | |
253 |
|
253 | |||
254 | "name": repo_lnk(repo.repo_name, repo.repo_type, repo.repo_state, |
|
254 | "name": repo_lnk(repo.repo_name, repo.repo_type, repo.repo_state, | |
255 | repo.private, repo.archived, repo.fork_repo_name), |
|
255 | repo.private, repo.archived, repo.fork_repo_name), | |
256 |
|
256 | |||
257 | "desc": desc(h.escape(repo.description)), |
|
257 | "desc": desc(h.escape(repo.description)), | |
258 |
|
258 | |||
259 | "last_change": last_change(repo.updated_on), |
|
259 | "last_change": last_change(repo.updated_on), | |
260 |
|
260 | |||
261 | "last_changeset": last_rev(repo.repo_name, changeset_cache), |
|
261 | "last_changeset": last_rev(repo.repo_name, changeset_cache), | |
262 | "last_changeset_raw": changeset_cache.get('revision'), |
|
262 | "last_changeset_raw": changeset_cache.get('revision'), | |
263 |
|
263 | |||
264 | "owner": user_profile(repo.owner_username), |
|
264 | "owner": user_profile(repo.owner_username), | |
265 |
|
265 | |||
266 | "state": state(repo.repo_state), |
|
266 | "state": state(repo.repo_state), | |
267 | "rss": rss_lnk(repo.repo_name), |
|
267 | "rss": rss_lnk(repo.repo_name), | |
268 | "atom": atom_lnk(repo.repo_name), |
|
268 | "atom": atom_lnk(repo.repo_name), | |
269 | } |
|
269 | } | |
270 | if admin: |
|
270 | if admin: | |
271 | row.update({ |
|
271 | row.update({ | |
272 | "action": repo_actions(repo.repo_name), |
|
272 | "action": repo_actions(repo.repo_name), | |
273 | }) |
|
273 | }) | |
274 | repos_data.append(row) |
|
274 | repos_data.append(row) | |
275 |
|
275 | |||
276 | return repos_data |
|
276 | return repos_data | |
277 |
|
277 | |||
278 | def get_repos_data_table( |
|
278 | def get_repos_data_table( | |
279 | self, draw, start, limit, |
|
279 | self, draw, start, limit, | |
280 | search_q, order_by, order_dir, |
|
280 | search_q, order_by, order_dir, | |
281 | auth_user, repo_group_id): |
|
281 | auth_user, repo_group_id): | |
282 | from rhodecode.model.scm import RepoList |
|
282 | from rhodecode.model.scm import RepoList | |
283 |
|
283 | |||
284 | _perms = ['repository.read', 'repository.write', 'repository.admin'] |
|
284 | _perms = ['repository.read', 'repository.write', 'repository.admin'] | |
285 |
|
285 | |||
286 | repos = Repository.query() \ |
|
286 | repos = Repository.query() \ | |
287 | .filter(Repository.group_id == repo_group_id) \ |
|
287 | .filter(Repository.group_id == repo_group_id) \ | |
288 | .all() |
|
288 | .all() | |
289 | auth_repo_list = RepoList( |
|
289 | auth_repo_list = RepoList( | |
290 | repos, perm_set=_perms, |
|
290 | repos, perm_set=_perms, | |
291 | extra_kwargs=dict(user=auth_user)) |
|
291 | extra_kwargs=dict(user=auth_user)) | |
292 |
|
292 | |||
293 | allowed_ids = [-1] |
|
293 | allowed_ids = [-1] | |
294 | for repo in auth_repo_list: |
|
294 | for repo in auth_repo_list: | |
295 | allowed_ids.append(repo.repo_id) |
|
295 | allowed_ids.append(repo.repo_id) | |
296 |
|
296 | |||
297 | repos_data_total_count = Repository.query() \ |
|
297 | repos_data_total_count = Repository.query() \ | |
298 | .filter(Repository.group_id == repo_group_id) \ |
|
298 | .filter(Repository.group_id == repo_group_id) \ | |
299 | .filter(or_( |
|
299 | .filter(or_( | |
300 | # generate multiple IN to fix limitation problems |
|
300 | # generate multiple IN to fix limitation problems | |
301 | *in_filter_generator(Repository.repo_id, allowed_ids)) |
|
301 | *in_filter_generator(Repository.repo_id, allowed_ids)) | |
302 | ) \ |
|
302 | ) \ | |
303 | .count() |
|
303 | .count() | |
304 |
|
304 | |||
305 | RepoFork = aliased(Repository) |
|
305 | RepoFork = aliased(Repository) | |
306 | OwnerUser = aliased(User) |
|
306 | OwnerUser = aliased(User) | |
307 | base_q = Session.query( |
|
307 | base_q = Session.query( | |
308 | Repository.repo_id, |
|
308 | Repository.repo_id, | |
309 | Repository.repo_name, |
|
309 | Repository.repo_name, | |
310 | Repository.description, |
|
310 | Repository.description, | |
311 | Repository.repo_type, |
|
311 | Repository.repo_type, | |
312 | Repository.repo_state, |
|
312 | Repository.repo_state, | |
313 | Repository.private, |
|
313 | Repository.private, | |
314 | Repository.archived, |
|
314 | Repository.archived, | |
315 | Repository.updated_on, |
|
315 | Repository.updated_on, | |
316 | Repository._changeset_cache, |
|
316 | Repository._changeset_cache, | |
317 | RepoFork.repo_name.label('fork_repo_name'), |
|
317 | RepoFork.repo_name.label('fork_repo_name'), | |
318 | OwnerUser.username.label('owner_username'), |
|
318 | OwnerUser.username.label('owner_username'), | |
319 | ) \ |
|
319 | ) \ | |
320 | .filter(Repository.group_id == repo_group_id) \ |
|
320 | .filter(Repository.group_id == repo_group_id) \ | |
321 | .filter(or_( |
|
321 | .filter(or_( | |
322 | # generate multiple IN to fix limitation problems |
|
322 | # generate multiple IN to fix limitation problems | |
323 | *in_filter_generator(Repository.repo_id, allowed_ids)) |
|
323 | *in_filter_generator(Repository.repo_id, allowed_ids)) | |
324 | ) \ |
|
324 | ) \ | |
325 | .outerjoin(RepoFork, Repository.fork_id == RepoFork.repo_id) \ |
|
325 | .outerjoin(RepoFork, Repository.fork_id == RepoFork.repo_id) \ | |
326 | .join(OwnerUser, Repository.user_id == OwnerUser.user_id) |
|
326 | .join(OwnerUser, Repository.user_id == OwnerUser.user_id) | |
327 |
|
327 | |||
328 | repos_data_total_filtered_count = base_q.count() |
|
328 | repos_data_total_filtered_count = base_q.count() | |
329 |
|
329 | |||
330 | sort_defined = False |
|
330 | sort_defined = False | |
331 | if order_by == 'repo_name': |
|
331 | if order_by == 'repo_name': | |
332 | sort_col = func.lower(Repository.repo_name) |
|
332 | sort_col = func.lower(Repository.repo_name) | |
333 | sort_defined = True |
|
333 | sort_defined = True | |
334 | elif order_by == 'user_username': |
|
334 | elif order_by == 'user_username': | |
335 | sort_col = User.username |
|
335 | sort_col = User.username | |
336 | else: |
|
336 | else: | |
337 | sort_col = getattr(Repository, order_by, None) |
|
337 | sort_col = getattr(Repository, order_by, None) | |
338 |
|
338 | |||
339 | if sort_defined or sort_col: |
|
339 | if sort_defined or sort_col: | |
340 | if order_dir == 'asc': |
|
340 | if order_dir == 'asc': | |
341 | sort_col = sort_col.asc() |
|
341 | sort_col = sort_col.asc() | |
342 | else: |
|
342 | else: | |
343 | sort_col = sort_col.desc() |
|
343 | sort_col = sort_col.desc() | |
344 |
|
344 | |||
345 | base_q = base_q.order_by(sort_col) |
|
345 | base_q = base_q.order_by(sort_col) | |
346 | base_q = base_q.offset(start).limit(limit) |
|
346 | base_q = base_q.offset(start).limit(limit) | |
347 |
|
347 | |||
348 | repos_list = base_q.all() |
|
348 | repos_list = base_q.all() | |
349 |
|
349 | |||
350 | repos_data = RepoModel().get_repos_as_dict( |
|
350 | repos_data = RepoModel().get_repos_as_dict( | |
351 | repo_list=repos_list, admin=False) |
|
351 | repo_list=repos_list, admin=False) | |
352 |
|
352 | |||
353 | data = ({ |
|
353 | data = ({ | |
354 | 'draw': draw, |
|
354 | 'draw': draw, | |
355 | 'data': repos_data, |
|
355 | 'data': repos_data, | |
356 | 'recordsTotal': repos_data_total_count, |
|
356 | 'recordsTotal': repos_data_total_count, | |
357 | 'recordsFiltered': repos_data_total_filtered_count, |
|
357 | 'recordsFiltered': repos_data_total_filtered_count, | |
358 | }) |
|
358 | }) | |
359 | return data |
|
359 | return data | |
360 |
|
360 | |||
361 | def _get_defaults(self, repo_name): |
|
361 | def _get_defaults(self, repo_name): | |
362 | """ |
|
362 | """ | |
363 | Gets information about repository, and returns a dict for |
|
363 | Gets information about repository, and returns a dict for | |
364 | usage in forms |
|
364 | usage in forms | |
365 |
|
365 | |||
366 | :param repo_name: |
|
366 | :param repo_name: | |
367 | """ |
|
367 | """ | |
368 |
|
368 | |||
369 | repo_info = Repository.get_by_repo_name(repo_name) |
|
369 | repo_info = Repository.get_by_repo_name(repo_name) | |
370 |
|
370 | |||
371 | if repo_info is None: |
|
371 | if repo_info is None: | |
372 | return None |
|
372 | return None | |
373 |
|
373 | |||
374 | defaults = repo_info.get_dict() |
|
374 | defaults = repo_info.get_dict() | |
375 | defaults['repo_name'] = repo_info.just_name |
|
375 | defaults['repo_name'] = repo_info.just_name | |
376 |
|
376 | |||
377 | groups = repo_info.groups_with_parents |
|
377 | groups = repo_info.groups_with_parents | |
378 | parent_group = groups[-1] if groups else None |
|
378 | parent_group = groups[-1] if groups else None | |
379 |
|
379 | |||
380 | # we use -1 as this is how in HTML, we mark an empty group |
|
380 | # we use -1 as this is how in HTML, we mark an empty group | |
381 | defaults['repo_group'] = getattr(parent_group, 'group_id', -1) |
|
381 | defaults['repo_group'] = getattr(parent_group, 'group_id', -1) | |
382 |
|
382 | |||
383 | keys_to_process = ( |
|
383 | keys_to_process = ( | |
384 | {'k': 'repo_type', 'strip': False}, |
|
384 | {'k': 'repo_type', 'strip': False}, | |
385 | {'k': 'repo_enable_downloads', 'strip': True}, |
|
385 | {'k': 'repo_enable_downloads', 'strip': True}, | |
386 | {'k': 'repo_description', 'strip': True}, |
|
386 | {'k': 'repo_description', 'strip': True}, | |
387 | {'k': 'repo_enable_locking', 'strip': True}, |
|
387 | {'k': 'repo_enable_locking', 'strip': True}, | |
388 | {'k': 'repo_landing_rev', 'strip': True}, |
|
388 | {'k': 'repo_landing_rev', 'strip': True}, | |
389 | {'k': 'clone_uri', 'strip': False}, |
|
389 | {'k': 'clone_uri', 'strip': False}, | |
390 | {'k': 'push_uri', 'strip': False}, |
|
390 | {'k': 'push_uri', 'strip': False}, | |
391 | {'k': 'repo_private', 'strip': True}, |
|
391 | {'k': 'repo_private', 'strip': True}, | |
392 | {'k': 'repo_enable_statistics', 'strip': True} |
|
392 | {'k': 'repo_enable_statistics', 'strip': True} | |
393 | ) |
|
393 | ) | |
394 |
|
394 | |||
395 | for item in keys_to_process: |
|
395 | for item in keys_to_process: | |
396 | attr = item['k'] |
|
396 | attr = item['k'] | |
397 | if item['strip']: |
|
397 | if item['strip']: | |
398 | attr = remove_prefix(item['k'], 'repo_') |
|
398 | attr = remove_prefix(item['k'], 'repo_') | |
399 |
|
399 | |||
400 | val = defaults[attr] |
|
400 | val = defaults[attr] | |
401 | if item['k'] == 'repo_landing_rev': |
|
401 | if item['k'] == 'repo_landing_rev': | |
402 | val = ':'.join(defaults[attr]) |
|
402 | val = ':'.join(defaults[attr]) | |
403 | defaults[item['k']] = val |
|
403 | defaults[item['k']] = val | |
404 | if item['k'] == 'clone_uri': |
|
404 | if item['k'] == 'clone_uri': | |
405 | defaults['clone_uri_hidden'] = repo_info.clone_uri_hidden |
|
405 | defaults['clone_uri_hidden'] = repo_info.clone_uri_hidden | |
406 | if item['k'] == 'push_uri': |
|
406 | if item['k'] == 'push_uri': | |
407 | defaults['push_uri_hidden'] = repo_info.push_uri_hidden |
|
407 | defaults['push_uri_hidden'] = repo_info.push_uri_hidden | |
408 |
|
408 | |||
409 | # fill owner |
|
409 | # fill owner | |
410 | if repo_info.user: |
|
410 | if repo_info.user: | |
411 | defaults.update({'user': repo_info.user.username}) |
|
411 | defaults.update({'user': repo_info.user.username}) | |
412 | else: |
|
412 | else: | |
413 | replacement_user = User.get_first_super_admin().username |
|
413 | replacement_user = User.get_first_super_admin().username | |
414 | defaults.update({'user': replacement_user}) |
|
414 | defaults.update({'user': replacement_user}) | |
415 |
|
415 | |||
416 | return defaults |
|
416 | return defaults | |
417 |
|
417 | |||
418 | def update(self, repo, **kwargs): |
|
418 | def update(self, repo, **kwargs): | |
419 | try: |
|
419 | try: | |
420 | cur_repo = self._get_repo(repo) |
|
420 | cur_repo = self._get_repo(repo) | |
421 | source_repo_name = cur_repo.repo_name |
|
421 | source_repo_name = cur_repo.repo_name | |
422 |
|
422 | |||
423 | affected_user_ids = [] |
|
423 | affected_user_ids = [] | |
424 | if 'user' in kwargs: |
|
424 | if 'user' in kwargs: | |
425 | old_owner_id = cur_repo.user.user_id |
|
425 | old_owner_id = cur_repo.user.user_id | |
426 | new_owner = User.get_by_username(kwargs['user']) |
|
426 | new_owner = User.get_by_username(kwargs['user']) | |
427 | cur_repo.user = new_owner |
|
427 | cur_repo.user = new_owner | |
428 |
|
428 | |||
429 | if old_owner_id != new_owner.user_id: |
|
429 | if old_owner_id != new_owner.user_id: | |
430 | affected_user_ids = [new_owner.user_id, old_owner_id] |
|
430 | affected_user_ids = [new_owner.user_id, old_owner_id] | |
431 |
|
431 | |||
432 | if 'repo_group' in kwargs: |
|
432 | if 'repo_group' in kwargs: | |
433 | cur_repo.group = RepoGroup.get(kwargs['repo_group']) |
|
433 | cur_repo.group = RepoGroup.get(kwargs['repo_group']) | |
434 | log.debug('Updating repo %s with params:%s', cur_repo, kwargs) |
|
434 | log.debug('Updating repo %s with params:%s', cur_repo, kwargs) | |
435 |
|
435 | |||
436 | update_keys = [ |
|
436 | update_keys = [ | |
437 | (1, 'repo_description'), |
|
437 | (1, 'repo_description'), | |
438 | (1, 'repo_landing_rev'), |
|
438 | (1, 'repo_landing_rev'), | |
439 | (1, 'repo_private'), |
|
439 | (1, 'repo_private'), | |
440 | (1, 'repo_enable_downloads'), |
|
440 | (1, 'repo_enable_downloads'), | |
441 | (1, 'repo_enable_locking'), |
|
441 | (1, 'repo_enable_locking'), | |
442 | (1, 'repo_enable_statistics'), |
|
442 | (1, 'repo_enable_statistics'), | |
443 | (0, 'clone_uri'), |
|
443 | (0, 'clone_uri'), | |
444 | (0, 'push_uri'), |
|
444 | (0, 'push_uri'), | |
445 | (0, 'fork_id') |
|
445 | (0, 'fork_id') | |
446 | ] |
|
446 | ] | |
447 | for strip, k in update_keys: |
|
447 | for strip, k in update_keys: | |
448 | if k in kwargs: |
|
448 | if k in kwargs: | |
449 | val = kwargs[k] |
|
449 | val = kwargs[k] | |
450 | if strip: |
|
450 | if strip: | |
451 | k = remove_prefix(k, 'repo_') |
|
451 | k = remove_prefix(k, 'repo_') | |
452 |
|
452 | |||
453 | setattr(cur_repo, k, val) |
|
453 | setattr(cur_repo, k, val) | |
454 |
|
454 | |||
455 |
new_name = |
|
455 | new_name = source_repo_name | |
456 |
|
|
456 | if 'repo_name' in kwargs: | |
|
457 | new_name = cur_repo.get_new_name(kwargs['repo_name']) | |||
|
458 | cur_repo.repo_name = new_name | |||
457 |
|
459 | |||
458 | # if private flag is set, reset default permission to NONE |
|
460 | if 'repo_private' in kwargs: | |
459 | if kwargs.get('repo_private'): |
|
461 | # if private flag is set to True, reset default permission to NONE | |
460 | EMPTY_PERM = 'repository.none' |
|
462 | set_private_to = kwargs.get('repo_private') | |
461 | RepoModel().grant_user_permission( |
|
463 | if set_private_to: | |
462 | repo=cur_repo, user=User.DEFAULT_USER, perm=EMPTY_PERM |
|
464 | EMPTY_PERM = 'repository.none' | |
463 | ) |
|
465 | RepoModel().grant_user_permission( | |
|
466 | repo=cur_repo, user=User.DEFAULT_USER, perm=EMPTY_PERM | |||
|
467 | ) | |||
|
468 | if set_private_to != cur_repo.private: | |||
|
469 | # NOTE(dan): we change repo private mode we need to notify all USERS | |||
|
470 | # this is just by having this value set to a different value then it was before | |||
|
471 | affected_user_ids = User.get_all_user_ids() | |||
|
472 | ||||
464 | if kwargs.get('repo_landing_rev'): |
|
473 | if kwargs.get('repo_landing_rev'): | |
465 | landing_rev_val = kwargs['repo_landing_rev'] |
|
474 | landing_rev_val = kwargs['repo_landing_rev'] | |
466 | RepoModel().set_landing_rev(cur_repo, landing_rev_val) |
|
475 | RepoModel().set_landing_rev(cur_repo, landing_rev_val) | |
467 |
|
476 | |||
468 | # handle extra fields |
|
477 | # handle extra fields | |
469 | for field in filter(lambda k: k.startswith(RepositoryField.PREFIX), kwargs): |
|
478 | for field in filter(lambda k: k.startswith(RepositoryField.PREFIX), kwargs): | |
470 | k = RepositoryField.un_prefix_key(field) |
|
479 | k = RepositoryField.un_prefix_key(field) | |
471 | ex_field = RepositoryField.get_by_key_name( |
|
480 | ex_field = RepositoryField.get_by_key_name( | |
472 | key=k, repo=cur_repo) |
|
481 | key=k, repo=cur_repo) | |
473 | if ex_field: |
|
482 | if ex_field: | |
474 | ex_field.field_value = kwargs[field] |
|
483 | ex_field.field_value = kwargs[field] | |
475 | self.sa.add(ex_field) |
|
484 | self.sa.add(ex_field) | |
476 |
|
485 | |||
477 | self.sa.add(cur_repo) |
|
486 | self.sa.add(cur_repo) | |
478 |
|
487 | |||
479 | if source_repo_name != new_name: |
|
488 | if source_repo_name != new_name: | |
480 | # rename repository |
|
489 | # rename repository | |
481 | self._rename_filesystem_repo( |
|
490 | self._rename_filesystem_repo( | |
482 | old=source_repo_name, new=new_name) |
|
491 | old=source_repo_name, new=new_name) | |
483 |
|
492 | |||
484 | if affected_user_ids: |
|
493 | if affected_user_ids: | |
485 | PermissionModel().trigger_permission_flush(affected_user_ids) |
|
494 | PermissionModel().trigger_permission_flush(affected_user_ids) | |
486 |
|
495 | |||
487 | return cur_repo |
|
496 | return cur_repo | |
488 | except Exception: |
|
497 | except Exception: | |
489 | log.error(traceback.format_exc()) |
|
498 | log.error(traceback.format_exc()) | |
490 | raise |
|
499 | raise | |
491 |
|
500 | |||
492 | def _create_repo(self, repo_name, repo_type, description, owner, |
|
501 | def _create_repo(self, repo_name, repo_type, description, owner, | |
493 | private=False, clone_uri=None, repo_group=None, |
|
502 | private=False, clone_uri=None, repo_group=None, | |
494 | landing_rev=None, fork_of=None, |
|
503 | landing_rev=None, fork_of=None, | |
495 | copy_fork_permissions=False, enable_statistics=False, |
|
504 | copy_fork_permissions=False, enable_statistics=False, | |
496 | enable_locking=False, enable_downloads=False, |
|
505 | enable_locking=False, enable_downloads=False, | |
497 | copy_group_permissions=False, |
|
506 | copy_group_permissions=False, | |
498 | state=Repository.STATE_PENDING): |
|
507 | state=Repository.STATE_PENDING): | |
499 | """ |
|
508 | """ | |
500 | Create repository inside database with PENDING state, this should be |
|
509 | Create repository inside database with PENDING state, this should be | |
501 | only executed by create() repo. With exception of importing existing |
|
510 | only executed by create() repo. With exception of importing existing | |
502 | repos |
|
511 | repos | |
503 | """ |
|
512 | """ | |
504 | from rhodecode.model.scm import ScmModel |
|
513 | from rhodecode.model.scm import ScmModel | |
505 |
|
514 | |||
506 | owner = self._get_user(owner) |
|
515 | owner = self._get_user(owner) | |
507 | fork_of = self._get_repo(fork_of) |
|
516 | fork_of = self._get_repo(fork_of) | |
508 | repo_group = self._get_repo_group(safe_int(repo_group)) |
|
517 | repo_group = self._get_repo_group(safe_int(repo_group)) | |
509 | default_landing_ref, _lbl = ScmModel.backend_landing_ref(repo_type) |
|
518 | default_landing_ref, _lbl = ScmModel.backend_landing_ref(repo_type) | |
510 | landing_rev = landing_rev or default_landing_ref |
|
519 | landing_rev = landing_rev or default_landing_ref | |
511 |
|
520 | |||
512 | try: |
|
521 | try: | |
513 | repo_name = safe_str(repo_name) |
|
522 | repo_name = safe_str(repo_name) | |
514 | description = safe_str(description) |
|
523 | description = safe_str(description) | |
515 | # repo name is just a name of repository |
|
524 | # repo name is just a name of repository | |
516 | # while repo_name_full is a full qualified name that is combined |
|
525 | # while repo_name_full is a full qualified name that is combined | |
517 | # with name and path of group |
|
526 | # with name and path of group | |
518 | repo_name_full = repo_name |
|
527 | repo_name_full = repo_name | |
519 | repo_name = repo_name.split(Repository.NAME_SEP)[-1] |
|
528 | repo_name = repo_name.split(Repository.NAME_SEP)[-1] | |
520 |
|
529 | |||
521 | new_repo = Repository() |
|
530 | new_repo = Repository() | |
522 | new_repo.repo_state = state |
|
531 | new_repo.repo_state = state | |
523 | new_repo.enable_statistics = False |
|
532 | new_repo.enable_statistics = False | |
524 | new_repo.repo_name = repo_name_full |
|
533 | new_repo.repo_name = repo_name_full | |
525 | new_repo.repo_type = repo_type |
|
534 | new_repo.repo_type = repo_type | |
526 | new_repo.user = owner |
|
535 | new_repo.user = owner | |
527 | new_repo.group = repo_group |
|
536 | new_repo.group = repo_group | |
528 | new_repo.description = description or repo_name |
|
537 | new_repo.description = description or repo_name | |
529 | new_repo.private = private |
|
538 | new_repo.private = private | |
530 | new_repo.archived = False |
|
539 | new_repo.archived = False | |
531 | new_repo.clone_uri = clone_uri |
|
540 | new_repo.clone_uri = clone_uri | |
532 | new_repo.landing_rev = landing_rev |
|
541 | new_repo.landing_rev = landing_rev | |
533 |
|
542 | |||
534 | new_repo.enable_statistics = enable_statistics |
|
543 | new_repo.enable_statistics = enable_statistics | |
535 | new_repo.enable_locking = enable_locking |
|
544 | new_repo.enable_locking = enable_locking | |
536 | new_repo.enable_downloads = enable_downloads |
|
545 | new_repo.enable_downloads = enable_downloads | |
537 |
|
546 | |||
538 | if repo_group: |
|
547 | if repo_group: | |
539 | new_repo.enable_locking = repo_group.enable_locking |
|
548 | new_repo.enable_locking = repo_group.enable_locking | |
540 |
|
549 | |||
541 | if fork_of: |
|
550 | if fork_of: | |
542 | parent_repo = fork_of |
|
551 | parent_repo = fork_of | |
543 | new_repo.fork = parent_repo |
|
552 | new_repo.fork = parent_repo | |
544 |
|
553 | |||
545 | events.trigger(events.RepoPreCreateEvent(new_repo)) |
|
554 | events.trigger(events.RepoPreCreateEvent(new_repo)) | |
546 |
|
555 | |||
547 | self.sa.add(new_repo) |
|
556 | self.sa.add(new_repo) | |
548 |
|
557 | |||
549 | EMPTY_PERM = 'repository.none' |
|
558 | EMPTY_PERM = 'repository.none' | |
550 | if fork_of and copy_fork_permissions: |
|
559 | if fork_of and copy_fork_permissions: | |
551 | repo = fork_of |
|
560 | repo = fork_of | |
552 | user_perms = UserRepoToPerm.query() \ |
|
561 | user_perms = UserRepoToPerm.query() \ | |
553 | .filter(UserRepoToPerm.repository == repo).all() |
|
562 | .filter(UserRepoToPerm.repository == repo).all() | |
554 | group_perms = UserGroupRepoToPerm.query() \ |
|
563 | group_perms = UserGroupRepoToPerm.query() \ | |
555 | .filter(UserGroupRepoToPerm.repository == repo).all() |
|
564 | .filter(UserGroupRepoToPerm.repository == repo).all() | |
556 |
|
565 | |||
557 | for perm in user_perms: |
|
566 | for perm in user_perms: | |
558 | UserRepoToPerm.create( |
|
567 | UserRepoToPerm.create( | |
559 | perm.user, new_repo, perm.permission) |
|
568 | perm.user, new_repo, perm.permission) | |
560 |
|
569 | |||
561 | for perm in group_perms: |
|
570 | for perm in group_perms: | |
562 | UserGroupRepoToPerm.create( |
|
571 | UserGroupRepoToPerm.create( | |
563 | perm.users_group, new_repo, perm.permission) |
|
572 | perm.users_group, new_repo, perm.permission) | |
564 | # in case we copy permissions and also set this repo to private |
|
573 | # in case we copy permissions and also set this repo to private | |
565 | # override the default user permission to make it a private repo |
|
574 | # override the default user permission to make it a private repo | |
566 | if private: |
|
575 | if private: | |
567 | RepoModel(self.sa).grant_user_permission( |
|
576 | RepoModel(self.sa).grant_user_permission( | |
568 | repo=new_repo, user=User.DEFAULT_USER, perm=EMPTY_PERM) |
|
577 | repo=new_repo, user=User.DEFAULT_USER, perm=EMPTY_PERM) | |
569 |
|
578 | |||
570 | elif repo_group and copy_group_permissions: |
|
579 | elif repo_group and copy_group_permissions: | |
571 | user_perms = UserRepoGroupToPerm.query() \ |
|
580 | user_perms = UserRepoGroupToPerm.query() \ | |
572 | .filter(UserRepoGroupToPerm.group == repo_group).all() |
|
581 | .filter(UserRepoGroupToPerm.group == repo_group).all() | |
573 |
|
582 | |||
574 | group_perms = UserGroupRepoGroupToPerm.query() \ |
|
583 | group_perms = UserGroupRepoGroupToPerm.query() \ | |
575 | .filter(UserGroupRepoGroupToPerm.group == repo_group).all() |
|
584 | .filter(UserGroupRepoGroupToPerm.group == repo_group).all() | |
576 |
|
585 | |||
577 | for perm in user_perms: |
|
586 | for perm in user_perms: | |
578 | perm_name = perm.permission.permission_name.replace( |
|
587 | perm_name = perm.permission.permission_name.replace( | |
579 | 'group.', 'repository.') |
|
588 | 'group.', 'repository.') | |
580 | perm_obj = Permission.get_by_key(perm_name) |
|
589 | perm_obj = Permission.get_by_key(perm_name) | |
581 | UserRepoToPerm.create(perm.user, new_repo, perm_obj) |
|
590 | UserRepoToPerm.create(perm.user, new_repo, perm_obj) | |
582 |
|
591 | |||
583 | for perm in group_perms: |
|
592 | for perm in group_perms: | |
584 | perm_name = perm.permission.permission_name.replace( |
|
593 | perm_name = perm.permission.permission_name.replace( | |
585 | 'group.', 'repository.') |
|
594 | 'group.', 'repository.') | |
586 | perm_obj = Permission.get_by_key(perm_name) |
|
595 | perm_obj = Permission.get_by_key(perm_name) | |
587 | UserGroupRepoToPerm.create(perm.users_group, new_repo, perm_obj) |
|
596 | UserGroupRepoToPerm.create(perm.users_group, new_repo, perm_obj) | |
588 |
|
597 | |||
589 | if private: |
|
598 | if private: | |
590 | RepoModel(self.sa).grant_user_permission( |
|
599 | RepoModel(self.sa).grant_user_permission( | |
591 | repo=new_repo, user=User.DEFAULT_USER, perm=EMPTY_PERM) |
|
600 | repo=new_repo, user=User.DEFAULT_USER, perm=EMPTY_PERM) | |
592 |
|
601 | |||
593 | else: |
|
602 | else: | |
594 | perm_obj = self._create_default_perms(new_repo, private) |
|
603 | perm_obj = self._create_default_perms(new_repo, private) | |
595 | self.sa.add(perm_obj) |
|
604 | self.sa.add(perm_obj) | |
596 |
|
605 | |||
597 | # now automatically start following this repository as owner |
|
606 | # now automatically start following this repository as owner | |
598 | ScmModel(self.sa).toggle_following_repo(new_repo.repo_id, owner.user_id) |
|
607 | ScmModel(self.sa).toggle_following_repo(new_repo.repo_id, owner.user_id) | |
599 |
|
608 | |||
600 | # we need to flush here, in order to check if database won't |
|
609 | # we need to flush here, in order to check if database won't | |
601 | # throw any exceptions, create filesystem dirs at the very end |
|
610 | # throw any exceptions, create filesystem dirs at the very end | |
602 | self.sa.flush() |
|
611 | self.sa.flush() | |
603 | events.trigger(events.RepoCreateEvent(new_repo, actor=owner)) |
|
612 | events.trigger(events.RepoCreateEvent(new_repo, actor=owner)) | |
604 | return new_repo |
|
613 | return new_repo | |
605 |
|
614 | |||
606 | except Exception: |
|
615 | except Exception: | |
607 | log.error(traceback.format_exc()) |
|
616 | log.error(traceback.format_exc()) | |
608 | raise |
|
617 | raise | |
609 |
|
618 | |||
610 | def create(self, form_data, cur_user): |
|
619 | def create(self, form_data, cur_user): | |
611 | """ |
|
620 | """ | |
612 | Create repository using celery tasks |
|
621 | Create repository using celery tasks | |
613 |
|
622 | |||
614 | :param form_data: |
|
623 | :param form_data: | |
615 | :param cur_user: |
|
624 | :param cur_user: | |
616 | """ |
|
625 | """ | |
617 | from rhodecode.lib.celerylib import tasks, run_task |
|
626 | from rhodecode.lib.celerylib import tasks, run_task | |
618 | return run_task(tasks.create_repo, form_data, cur_user) |
|
627 | return run_task(tasks.create_repo, form_data, cur_user) | |
619 |
|
628 | |||
620 | def update_permissions(self, repo, perm_additions=None, perm_updates=None, |
|
629 | def update_permissions(self, repo, perm_additions=None, perm_updates=None, | |
621 | perm_deletions=None, check_perms=True, |
|
630 | perm_deletions=None, check_perms=True, | |
622 | cur_user=None): |
|
631 | cur_user=None): | |
623 | if not perm_additions: |
|
632 | if not perm_additions: | |
624 | perm_additions = [] |
|
633 | perm_additions = [] | |
625 | if not perm_updates: |
|
634 | if not perm_updates: | |
626 | perm_updates = [] |
|
635 | perm_updates = [] | |
627 | if not perm_deletions: |
|
636 | if not perm_deletions: | |
628 | perm_deletions = [] |
|
637 | perm_deletions = [] | |
629 |
|
638 | |||
630 | req_perms = ('usergroup.read', 'usergroup.write', 'usergroup.admin') |
|
639 | req_perms = ('usergroup.read', 'usergroup.write', 'usergroup.admin') | |
631 |
|
640 | |||
632 | changes = { |
|
641 | changes = { | |
633 | 'added': [], |
|
642 | 'added': [], | |
634 | 'updated': [], |
|
643 | 'updated': [], | |
635 | 'deleted': [], |
|
644 | 'deleted': [], | |
636 | 'default_user_changed': None |
|
645 | 'default_user_changed': None | |
637 | } |
|
646 | } | |
638 |
|
647 | |||
639 | repo = self._get_repo(repo) |
|
648 | repo = self._get_repo(repo) | |
640 |
|
649 | |||
641 | # update permissions |
|
650 | # update permissions | |
642 | for member_id, perm, member_type in perm_updates: |
|
651 | for member_id, perm, member_type in perm_updates: | |
643 | member_id = int(member_id) |
|
652 | member_id = int(member_id) | |
644 | if member_type == 'user': |
|
653 | if member_type == 'user': | |
645 | member_name = User.get(member_id).username |
|
654 | member_name = User.get(member_id).username | |
646 | if member_name == User.DEFAULT_USER: |
|
655 | if member_name == User.DEFAULT_USER: | |
647 | # NOTE(dan): detect if we changed permissions for default user |
|
656 | # NOTE(dan): detect if we changed permissions for default user | |
648 | perm_obj = self.sa.query(UserRepoToPerm) \ |
|
657 | perm_obj = self.sa.query(UserRepoToPerm) \ | |
649 | .filter(UserRepoToPerm.user_id == member_id) \ |
|
658 | .filter(UserRepoToPerm.user_id == member_id) \ | |
650 | .filter(UserRepoToPerm.repository == repo) \ |
|
659 | .filter(UserRepoToPerm.repository == repo) \ | |
651 | .scalar() |
|
660 | .scalar() | |
652 | if perm_obj and perm_obj.permission.permission_name != perm: |
|
661 | if perm_obj and perm_obj.permission.permission_name != perm: | |
653 | changes['default_user_changed'] = True |
|
662 | changes['default_user_changed'] = True | |
654 |
|
663 | |||
655 | # this updates also current one if found |
|
664 | # this updates also current one if found | |
656 | self.grant_user_permission( |
|
665 | self.grant_user_permission( | |
657 | repo=repo, user=member_id, perm=perm) |
|
666 | repo=repo, user=member_id, perm=perm) | |
658 | elif member_type == 'user_group': |
|
667 | elif member_type == 'user_group': | |
659 | # check if we have permissions to alter this usergroup |
|
668 | # check if we have permissions to alter this usergroup | |
660 | member_name = UserGroup.get(member_id).users_group_name |
|
669 | member_name = UserGroup.get(member_id).users_group_name | |
661 | if not check_perms or HasUserGroupPermissionAny( |
|
670 | if not check_perms or HasUserGroupPermissionAny( | |
662 | *req_perms)(member_name, user=cur_user): |
|
671 | *req_perms)(member_name, user=cur_user): | |
663 | self.grant_user_group_permission( |
|
672 | self.grant_user_group_permission( | |
664 | repo=repo, group_name=member_id, perm=perm) |
|
673 | repo=repo, group_name=member_id, perm=perm) | |
665 | else: |
|
674 | else: | |
666 | raise ValueError("member_type must be 'user' or 'user_group' " |
|
675 | raise ValueError("member_type must be 'user' or 'user_group' " | |
667 | "got {} instead".format(member_type)) |
|
676 | "got {} instead".format(member_type)) | |
668 | changes['updated'].append({'type': member_type, 'id': member_id, |
|
677 | changes['updated'].append({'type': member_type, 'id': member_id, | |
669 | 'name': member_name, 'new_perm': perm}) |
|
678 | 'name': member_name, 'new_perm': perm}) | |
670 |
|
679 | |||
671 | # set new permissions |
|
680 | # set new permissions | |
672 | for member_id, perm, member_type in perm_additions: |
|
681 | for member_id, perm, member_type in perm_additions: | |
673 | member_id = int(member_id) |
|
682 | member_id = int(member_id) | |
674 | if member_type == 'user': |
|
683 | if member_type == 'user': | |
675 | member_name = User.get(member_id).username |
|
684 | member_name = User.get(member_id).username | |
676 | self.grant_user_permission( |
|
685 | self.grant_user_permission( | |
677 | repo=repo, user=member_id, perm=perm) |
|
686 | repo=repo, user=member_id, perm=perm) | |
678 | elif member_type == 'user_group': |
|
687 | elif member_type == 'user_group': | |
679 | # check if we have permissions to alter this usergroup |
|
688 | # check if we have permissions to alter this usergroup | |
680 | member_name = UserGroup.get(member_id).users_group_name |
|
689 | member_name = UserGroup.get(member_id).users_group_name | |
681 | if not check_perms or HasUserGroupPermissionAny( |
|
690 | if not check_perms or HasUserGroupPermissionAny( | |
682 | *req_perms)(member_name, user=cur_user): |
|
691 | *req_perms)(member_name, user=cur_user): | |
683 | self.grant_user_group_permission( |
|
692 | self.grant_user_group_permission( | |
684 | repo=repo, group_name=member_id, perm=perm) |
|
693 | repo=repo, group_name=member_id, perm=perm) | |
685 | else: |
|
694 | else: | |
686 | raise ValueError("member_type must be 'user' or 'user_group' " |
|
695 | raise ValueError("member_type must be 'user' or 'user_group' " | |
687 | "got {} instead".format(member_type)) |
|
696 | "got {} instead".format(member_type)) | |
688 |
|
697 | |||
689 | changes['added'].append({'type': member_type, 'id': member_id, |
|
698 | changes['added'].append({'type': member_type, 'id': member_id, | |
690 | 'name': member_name, 'new_perm': perm}) |
|
699 | 'name': member_name, 'new_perm': perm}) | |
691 | # delete permissions |
|
700 | # delete permissions | |
692 | for member_id, perm, member_type in perm_deletions: |
|
701 | for member_id, perm, member_type in perm_deletions: | |
693 | member_id = int(member_id) |
|
702 | member_id = int(member_id) | |
694 | if member_type == 'user': |
|
703 | if member_type == 'user': | |
695 | member_name = User.get(member_id).username |
|
704 | member_name = User.get(member_id).username | |
696 | self.revoke_user_permission(repo=repo, user=member_id) |
|
705 | self.revoke_user_permission(repo=repo, user=member_id) | |
697 | elif member_type == 'user_group': |
|
706 | elif member_type == 'user_group': | |
698 | # check if we have permissions to alter this usergroup |
|
707 | # check if we have permissions to alter this usergroup | |
699 | member_name = UserGroup.get(member_id).users_group_name |
|
708 | member_name = UserGroup.get(member_id).users_group_name | |
700 | if not check_perms or HasUserGroupPermissionAny( |
|
709 | if not check_perms or HasUserGroupPermissionAny( | |
701 | *req_perms)(member_name, user=cur_user): |
|
710 | *req_perms)(member_name, user=cur_user): | |
702 | self.revoke_user_group_permission( |
|
711 | self.revoke_user_group_permission( | |
703 | repo=repo, group_name=member_id) |
|
712 | repo=repo, group_name=member_id) | |
704 | else: |
|
713 | else: | |
705 | raise ValueError("member_type must be 'user' or 'user_group' " |
|
714 | raise ValueError("member_type must be 'user' or 'user_group' " | |
706 | "got {} instead".format(member_type)) |
|
715 | "got {} instead".format(member_type)) | |
707 |
|
716 | |||
708 | changes['deleted'].append({'type': member_type, 'id': member_id, |
|
717 | changes['deleted'].append({'type': member_type, 'id': member_id, | |
709 | 'name': member_name, 'new_perm': perm}) |
|
718 | 'name': member_name, 'new_perm': perm}) | |
710 | return changes |
|
719 | return changes | |
711 |
|
720 | |||
712 | def create_fork(self, form_data, cur_user): |
|
721 | def create_fork(self, form_data, cur_user): | |
713 | """ |
|
722 | """ | |
714 | Simple wrapper into executing celery task for fork creation |
|
723 | Simple wrapper into executing celery task for fork creation | |
715 |
|
724 | |||
716 | :param form_data: |
|
725 | :param form_data: | |
717 | :param cur_user: |
|
726 | :param cur_user: | |
718 | """ |
|
727 | """ | |
719 | from rhodecode.lib.celerylib import tasks, run_task |
|
728 | from rhodecode.lib.celerylib import tasks, run_task | |
720 | return run_task(tasks.create_repo_fork, form_data, cur_user) |
|
729 | return run_task(tasks.create_repo_fork, form_data, cur_user) | |
721 |
|
730 | |||
722 | def archive(self, repo): |
|
731 | def archive(self, repo): | |
723 | """ |
|
732 | """ | |
724 | Archive given repository. Set archive flag. |
|
733 | Archive given repository. Set archive flag. | |
725 |
|
734 | |||
726 | :param repo: |
|
735 | :param repo: | |
727 | """ |
|
736 | """ | |
728 | repo = self._get_repo(repo) |
|
737 | repo = self._get_repo(repo) | |
729 | if repo: |
|
738 | if repo: | |
730 |
|
739 | |||
731 | try: |
|
740 | try: | |
732 | repo.archived = True |
|
741 | repo.archived = True | |
733 | self.sa.add(repo) |
|
742 | self.sa.add(repo) | |
734 | self.sa.commit() |
|
743 | self.sa.commit() | |
735 | except Exception: |
|
744 | except Exception: | |
736 | log.error(traceback.format_exc()) |
|
745 | log.error(traceback.format_exc()) | |
737 | raise |
|
746 | raise | |
738 |
|
747 | |||
739 | def delete(self, repo, forks=None, pull_requests=None, artifacts=None, fs_remove=True, cur_user=None): |
|
748 | def delete(self, repo, forks=None, pull_requests=None, artifacts=None, fs_remove=True, cur_user=None): | |
740 | """ |
|
749 | """ | |
741 | Delete given repository, forks parameter defines what do do with |
|
750 | Delete given repository, forks parameter defines what do do with | |
742 | attached forks. Throws AttachedForksError if deleted repo has attached |
|
751 | attached forks. Throws AttachedForksError if deleted repo has attached | |
743 | forks |
|
752 | forks | |
744 |
|
753 | |||
745 | :param repo: |
|
754 | :param repo: | |
746 | :param forks: str 'delete' or 'detach' |
|
755 | :param forks: str 'delete' or 'detach' | |
747 | :param pull_requests: str 'delete' or None |
|
756 | :param pull_requests: str 'delete' or None | |
748 | :param artifacts: str 'delete' or None |
|
757 | :param artifacts: str 'delete' or None | |
749 | :param fs_remove: remove(archive) repo from filesystem |
|
758 | :param fs_remove: remove(archive) repo from filesystem | |
750 | """ |
|
759 | """ | |
751 | if not cur_user: |
|
760 | if not cur_user: | |
752 | cur_user = getattr(get_current_rhodecode_user(), 'username', None) |
|
761 | cur_user = getattr(get_current_rhodecode_user(), 'username', None) | |
753 | repo = self._get_repo(repo) |
|
762 | repo = self._get_repo(repo) | |
754 | if repo: |
|
763 | if repo: | |
755 | if forks == 'detach': |
|
764 | if forks == 'detach': | |
756 | for r in repo.forks: |
|
765 | for r in repo.forks: | |
757 | r.fork = None |
|
766 | r.fork = None | |
758 | self.sa.add(r) |
|
767 | self.sa.add(r) | |
759 | elif forks == 'delete': |
|
768 | elif forks == 'delete': | |
760 | for r in repo.forks: |
|
769 | for r in repo.forks: | |
761 | self.delete(r, forks='delete') |
|
770 | self.delete(r, forks='delete') | |
762 | elif [f for f in repo.forks]: |
|
771 | elif [f for f in repo.forks]: | |
763 | raise AttachedForksError() |
|
772 | raise AttachedForksError() | |
764 |
|
773 | |||
765 | # check for pull requests |
|
774 | # check for pull requests | |
766 | pr_sources = repo.pull_requests_source |
|
775 | pr_sources = repo.pull_requests_source | |
767 | pr_targets = repo.pull_requests_target |
|
776 | pr_targets = repo.pull_requests_target | |
768 | if pull_requests != 'delete' and (pr_sources or pr_targets): |
|
777 | if pull_requests != 'delete' and (pr_sources or pr_targets): | |
769 | raise AttachedPullRequestsError() |
|
778 | raise AttachedPullRequestsError() | |
770 |
|
779 | |||
771 | artifacts_objs = repo.artifacts |
|
780 | artifacts_objs = repo.artifacts | |
772 | if artifacts == 'delete': |
|
781 | if artifacts == 'delete': | |
773 | for a in artifacts_objs: |
|
782 | for a in artifacts_objs: | |
774 | self.sa.delete(a) |
|
783 | self.sa.delete(a) | |
775 | elif [a for a in artifacts_objs]: |
|
784 | elif [a for a in artifacts_objs]: | |
776 | raise AttachedArtifactsError() |
|
785 | raise AttachedArtifactsError() | |
777 |
|
786 | |||
778 | old_repo_dict = repo.get_dict() |
|
787 | old_repo_dict = repo.get_dict() | |
779 | events.trigger(events.RepoPreDeleteEvent(repo)) |
|
788 | events.trigger(events.RepoPreDeleteEvent(repo)) | |
780 | try: |
|
789 | try: | |
781 | self.sa.delete(repo) |
|
790 | self.sa.delete(repo) | |
782 | if fs_remove: |
|
791 | if fs_remove: | |
783 | self._delete_filesystem_repo(repo) |
|
792 | self._delete_filesystem_repo(repo) | |
784 | else: |
|
793 | else: | |
785 | log.debug('skipping removal from filesystem') |
|
794 | log.debug('skipping removal from filesystem') | |
786 | old_repo_dict.update({ |
|
795 | old_repo_dict.update({ | |
787 | 'deleted_by': cur_user, |
|
796 | 'deleted_by': cur_user, | |
788 | 'deleted_on': time.time(), |
|
797 | 'deleted_on': time.time(), | |
789 | }) |
|
798 | }) | |
790 | hooks_base.delete_repository(**old_repo_dict) |
|
799 | hooks_base.delete_repository(**old_repo_dict) | |
791 | events.trigger(events.RepoDeleteEvent(repo)) |
|
800 | events.trigger(events.RepoDeleteEvent(repo)) | |
792 | except Exception: |
|
801 | except Exception: | |
793 | log.error(traceback.format_exc()) |
|
802 | log.error(traceback.format_exc()) | |
794 | raise |
|
803 | raise | |
795 |
|
804 | |||
796 | def grant_user_permission(self, repo, user, perm): |
|
805 | def grant_user_permission(self, repo, user, perm): | |
797 | """ |
|
806 | """ | |
798 | Grant permission for user on given repository, or update existing one |
|
807 | Grant permission for user on given repository, or update existing one | |
799 | if found |
|
808 | if found | |
800 |
|
809 | |||
801 | :param repo: Instance of Repository, repository_id, or repository name |
|
810 | :param repo: Instance of Repository, repository_id, or repository name | |
802 | :param user: Instance of User, user_id or username |
|
811 | :param user: Instance of User, user_id or username | |
803 | :param perm: Instance of Permission, or permission_name |
|
812 | :param perm: Instance of Permission, or permission_name | |
804 | """ |
|
813 | """ | |
805 | user = self._get_user(user) |
|
814 | user = self._get_user(user) | |
806 | repo = self._get_repo(repo) |
|
815 | repo = self._get_repo(repo) | |
807 | permission = self._get_perm(perm) |
|
816 | permission = self._get_perm(perm) | |
808 |
|
817 | |||
809 | # check if we have that permission already |
|
818 | # check if we have that permission already | |
810 | obj = self.sa.query(UserRepoToPerm) \ |
|
819 | obj = self.sa.query(UserRepoToPerm) \ | |
811 | .filter(UserRepoToPerm.user == user) \ |
|
820 | .filter(UserRepoToPerm.user == user) \ | |
812 | .filter(UserRepoToPerm.repository == repo) \ |
|
821 | .filter(UserRepoToPerm.repository == repo) \ | |
813 | .scalar() |
|
822 | .scalar() | |
814 | if obj is None: |
|
823 | if obj is None: | |
815 | # create new ! |
|
824 | # create new ! | |
816 | obj = UserRepoToPerm() |
|
825 | obj = UserRepoToPerm() | |
817 | obj.repository = repo |
|
826 | obj.repository = repo | |
818 | obj.user = user |
|
827 | obj.user = user | |
819 | obj.permission = permission |
|
828 | obj.permission = permission | |
820 | self.sa.add(obj) |
|
829 | self.sa.add(obj) | |
821 | log.debug('Granted perm %s to %s on %s', perm, user, repo) |
|
830 | log.debug('Granted perm %s to %s on %s', perm, user, repo) | |
822 | action_logger_generic( |
|
831 | action_logger_generic( | |
823 | 'granted permission: {} to user: {} on repo: {}'.format( |
|
832 | 'granted permission: {} to user: {} on repo: {}'.format( | |
824 | perm, user, repo), namespace='security.repo') |
|
833 | perm, user, repo), namespace='security.repo') | |
825 | return obj |
|
834 | return obj | |
826 |
|
835 | |||
827 | def revoke_user_permission(self, repo, user): |
|
836 | def revoke_user_permission(self, repo, user): | |
828 | """ |
|
837 | """ | |
829 | Revoke permission for user on given repository |
|
838 | Revoke permission for user on given repository | |
830 |
|
839 | |||
831 | :param repo: Instance of Repository, repository_id, or repository name |
|
840 | :param repo: Instance of Repository, repository_id, or repository name | |
832 | :param user: Instance of User, user_id or username |
|
841 | :param user: Instance of User, user_id or username | |
833 | """ |
|
842 | """ | |
834 |
|
843 | |||
835 | user = self._get_user(user) |
|
844 | user = self._get_user(user) | |
836 | repo = self._get_repo(repo) |
|
845 | repo = self._get_repo(repo) | |
837 |
|
846 | |||
838 | obj = self.sa.query(UserRepoToPerm) \ |
|
847 | obj = self.sa.query(UserRepoToPerm) \ | |
839 | .filter(UserRepoToPerm.repository == repo) \ |
|
848 | .filter(UserRepoToPerm.repository == repo) \ | |
840 | .filter(UserRepoToPerm.user == user) \ |
|
849 | .filter(UserRepoToPerm.user == user) \ | |
841 | .scalar() |
|
850 | .scalar() | |
842 | if obj: |
|
851 | if obj: | |
843 | self.sa.delete(obj) |
|
852 | self.sa.delete(obj) | |
844 | log.debug('Revoked perm on %s on %s', repo, user) |
|
853 | log.debug('Revoked perm on %s on %s', repo, user) | |
845 | action_logger_generic( |
|
854 | action_logger_generic( | |
846 | 'revoked permission from user: {} on repo: {}'.format( |
|
855 | 'revoked permission from user: {} on repo: {}'.format( | |
847 | user, repo), namespace='security.repo') |
|
856 | user, repo), namespace='security.repo') | |
848 |
|
857 | |||
849 | def grant_user_group_permission(self, repo, group_name, perm): |
|
858 | def grant_user_group_permission(self, repo, group_name, perm): | |
850 | """ |
|
859 | """ | |
851 | Grant permission for user group on given repository, or update |
|
860 | Grant permission for user group on given repository, or update | |
852 | existing one if found |
|
861 | existing one if found | |
853 |
|
862 | |||
854 | :param repo: Instance of Repository, repository_id, or repository name |
|
863 | :param repo: Instance of Repository, repository_id, or repository name | |
855 | :param group_name: Instance of UserGroup, users_group_id, |
|
864 | :param group_name: Instance of UserGroup, users_group_id, | |
856 | or user group name |
|
865 | or user group name | |
857 | :param perm: Instance of Permission, or permission_name |
|
866 | :param perm: Instance of Permission, or permission_name | |
858 | """ |
|
867 | """ | |
859 | repo = self._get_repo(repo) |
|
868 | repo = self._get_repo(repo) | |
860 | group_name = self._get_user_group(group_name) |
|
869 | group_name = self._get_user_group(group_name) | |
861 | permission = self._get_perm(perm) |
|
870 | permission = self._get_perm(perm) | |
862 |
|
871 | |||
863 | # check if we have that permission already |
|
872 | # check if we have that permission already | |
864 | obj = self.sa.query(UserGroupRepoToPerm) \ |
|
873 | obj = self.sa.query(UserGroupRepoToPerm) \ | |
865 | .filter(UserGroupRepoToPerm.users_group == group_name) \ |
|
874 | .filter(UserGroupRepoToPerm.users_group == group_name) \ | |
866 | .filter(UserGroupRepoToPerm.repository == repo) \ |
|
875 | .filter(UserGroupRepoToPerm.repository == repo) \ | |
867 | .scalar() |
|
876 | .scalar() | |
868 |
|
877 | |||
869 | if obj is None: |
|
878 | if obj is None: | |
870 | # create new |
|
879 | # create new | |
871 | obj = UserGroupRepoToPerm() |
|
880 | obj = UserGroupRepoToPerm() | |
872 |
|
881 | |||
873 | obj.repository = repo |
|
882 | obj.repository = repo | |
874 | obj.users_group = group_name |
|
883 | obj.users_group = group_name | |
875 | obj.permission = permission |
|
884 | obj.permission = permission | |
876 | self.sa.add(obj) |
|
885 | self.sa.add(obj) | |
877 | log.debug('Granted perm %s to %s on %s', perm, group_name, repo) |
|
886 | log.debug('Granted perm %s to %s on %s', perm, group_name, repo) | |
878 | action_logger_generic( |
|
887 | action_logger_generic( | |
879 | 'granted permission: {} to usergroup: {} on repo: {}'.format( |
|
888 | 'granted permission: {} to usergroup: {} on repo: {}'.format( | |
880 | perm, group_name, repo), namespace='security.repo') |
|
889 | perm, group_name, repo), namespace='security.repo') | |
881 |
|
890 | |||
882 | return obj |
|
891 | return obj | |
883 |
|
892 | |||
884 | def revoke_user_group_permission(self, repo, group_name): |
|
893 | def revoke_user_group_permission(self, repo, group_name): | |
885 | """ |
|
894 | """ | |
886 | Revoke permission for user group on given repository |
|
895 | Revoke permission for user group on given repository | |
887 |
|
896 | |||
888 | :param repo: Instance of Repository, repository_id, or repository name |
|
897 | :param repo: Instance of Repository, repository_id, or repository name | |
889 | :param group_name: Instance of UserGroup, users_group_id, |
|
898 | :param group_name: Instance of UserGroup, users_group_id, | |
890 | or user group name |
|
899 | or user group name | |
891 | """ |
|
900 | """ | |
892 | repo = self._get_repo(repo) |
|
901 | repo = self._get_repo(repo) | |
893 | group_name = self._get_user_group(group_name) |
|
902 | group_name = self._get_user_group(group_name) | |
894 |
|
903 | |||
895 | obj = self.sa.query(UserGroupRepoToPerm) \ |
|
904 | obj = self.sa.query(UserGroupRepoToPerm) \ | |
896 | .filter(UserGroupRepoToPerm.repository == repo) \ |
|
905 | .filter(UserGroupRepoToPerm.repository == repo) \ | |
897 | .filter(UserGroupRepoToPerm.users_group == group_name) \ |
|
906 | .filter(UserGroupRepoToPerm.users_group == group_name) \ | |
898 | .scalar() |
|
907 | .scalar() | |
899 | if obj: |
|
908 | if obj: | |
900 | self.sa.delete(obj) |
|
909 | self.sa.delete(obj) | |
901 | log.debug('Revoked perm to %s on %s', repo, group_name) |
|
910 | log.debug('Revoked perm to %s on %s', repo, group_name) | |
902 | action_logger_generic( |
|
911 | action_logger_generic( | |
903 | 'revoked permission from usergroup: {} on repo: {}'.format( |
|
912 | 'revoked permission from usergroup: {} on repo: {}'.format( | |
904 | group_name, repo), namespace='security.repo') |
|
913 | group_name, repo), namespace='security.repo') | |
905 |
|
914 | |||
906 | def delete_stats(self, repo_name): |
|
915 | def delete_stats(self, repo_name): | |
907 | """ |
|
916 | """ | |
908 | removes stats for given repo |
|
917 | removes stats for given repo | |
909 |
|
918 | |||
910 | :param repo_name: |
|
919 | :param repo_name: | |
911 | """ |
|
920 | """ | |
912 | repo = self._get_repo(repo_name) |
|
921 | repo = self._get_repo(repo_name) | |
913 | try: |
|
922 | try: | |
914 | obj = self.sa.query(Statistics) \ |
|
923 | obj = self.sa.query(Statistics) \ | |
915 | .filter(Statistics.repository == repo).scalar() |
|
924 | .filter(Statistics.repository == repo).scalar() | |
916 | if obj: |
|
925 | if obj: | |
917 | self.sa.delete(obj) |
|
926 | self.sa.delete(obj) | |
918 | except Exception: |
|
927 | except Exception: | |
919 | log.error(traceback.format_exc()) |
|
928 | log.error(traceback.format_exc()) | |
920 | raise |
|
929 | raise | |
921 |
|
930 | |||
922 | def add_repo_field(self, repo_name, field_key, field_label, field_value='', |
|
931 | def add_repo_field(self, repo_name, field_key, field_label, field_value='', | |
923 | field_type='str', field_desc=''): |
|
932 | field_type='str', field_desc=''): | |
924 |
|
933 | |||
925 | repo = self._get_repo(repo_name) |
|
934 | repo = self._get_repo(repo_name) | |
926 |
|
935 | |||
927 | new_field = RepositoryField() |
|
936 | new_field = RepositoryField() | |
928 | new_field.repository = repo |
|
937 | new_field.repository = repo | |
929 | new_field.field_key = field_key |
|
938 | new_field.field_key = field_key | |
930 | new_field.field_type = field_type # python type |
|
939 | new_field.field_type = field_type # python type | |
931 | new_field.field_value = field_value |
|
940 | new_field.field_value = field_value | |
932 | new_field.field_desc = field_desc |
|
941 | new_field.field_desc = field_desc | |
933 | new_field.field_label = field_label |
|
942 | new_field.field_label = field_label | |
934 | self.sa.add(new_field) |
|
943 | self.sa.add(new_field) | |
935 | return new_field |
|
944 | return new_field | |
936 |
|
945 | |||
937 | def delete_repo_field(self, repo_name, field_key): |
|
946 | def delete_repo_field(self, repo_name, field_key): | |
938 | repo = self._get_repo(repo_name) |
|
947 | repo = self._get_repo(repo_name) | |
939 | field = RepositoryField.get_by_key_name(field_key, repo) |
|
948 | field = RepositoryField.get_by_key_name(field_key, repo) | |
940 | if field: |
|
949 | if field: | |
941 | self.sa.delete(field) |
|
950 | self.sa.delete(field) | |
942 |
|
951 | |||
943 | def set_landing_rev(self, repo, landing_rev_name): |
|
952 | def set_landing_rev(self, repo, landing_rev_name): | |
944 | if landing_rev_name.startswith('branch:'): |
|
953 | if landing_rev_name.startswith('branch:'): | |
945 | landing_rev_name = landing_rev_name.split('branch:')[-1] |
|
954 | landing_rev_name = landing_rev_name.split('branch:')[-1] | |
946 | scm_instance = repo.scm_instance() |
|
955 | scm_instance = repo.scm_instance() | |
947 | if scm_instance: |
|
956 | if scm_instance: | |
948 | return scm_instance._remote.set_head_ref(landing_rev_name) |
|
957 | return scm_instance._remote.set_head_ref(landing_rev_name) | |
949 |
|
958 | |||
950 | def _create_filesystem_repo(self, repo_name, repo_type, repo_group, |
|
959 | def _create_filesystem_repo(self, repo_name, repo_type, repo_group, | |
951 | clone_uri=None, repo_store_location=None, |
|
960 | clone_uri=None, repo_store_location=None, | |
952 | use_global_config=False, install_hooks=True): |
|
961 | use_global_config=False, install_hooks=True): | |
953 | """ |
|
962 | """ | |
954 | makes repository on filesystem. It's group aware means it'll create |
|
963 | makes repository on filesystem. It's group aware means it'll create | |
955 | a repository within a group, and alter the paths accordingly of |
|
964 | a repository within a group, and alter the paths accordingly of | |
956 | group location |
|
965 | group location | |
957 |
|
966 | |||
958 | :param repo_name: |
|
967 | :param repo_name: | |
959 | :param alias: |
|
968 | :param alias: | |
960 | :param parent: |
|
969 | :param parent: | |
961 | :param clone_uri: |
|
970 | :param clone_uri: | |
962 | :param repo_store_location: |
|
971 | :param repo_store_location: | |
963 | """ |
|
972 | """ | |
964 | from rhodecode.lib.utils import is_valid_repo, is_valid_repo_group |
|
973 | from rhodecode.lib.utils import is_valid_repo, is_valid_repo_group | |
965 | from rhodecode.model.scm import ScmModel |
|
974 | from rhodecode.model.scm import ScmModel | |
966 |
|
975 | |||
967 | if Repository.NAME_SEP in repo_name: |
|
976 | if Repository.NAME_SEP in repo_name: | |
968 | raise ValueError( |
|
977 | raise ValueError( | |
969 | 'repo_name must not contain groups got `%s`' % repo_name) |
|
978 | 'repo_name must not contain groups got `%s`' % repo_name) | |
970 |
|
979 | |||
971 | if isinstance(repo_group, RepoGroup): |
|
980 | if isinstance(repo_group, RepoGroup): | |
972 | new_parent_path = os.sep.join(repo_group.full_path_splitted) |
|
981 | new_parent_path = os.sep.join(repo_group.full_path_splitted) | |
973 | else: |
|
982 | else: | |
974 | new_parent_path = repo_group or '' |
|
983 | new_parent_path = repo_group or '' | |
975 |
|
984 | |||
976 | if repo_store_location: |
|
985 | if repo_store_location: | |
977 | _paths = [repo_store_location] |
|
986 | _paths = [repo_store_location] | |
978 | else: |
|
987 | else: | |
979 | _paths = [self.repos_path, new_parent_path, repo_name] |
|
988 | _paths = [self.repos_path, new_parent_path, repo_name] | |
980 | # we need to make it str for mercurial |
|
989 | # we need to make it str for mercurial | |
981 | repo_path = os.path.join(*map(lambda x: safe_str(x), _paths)) |
|
990 | repo_path = os.path.join(*map(lambda x: safe_str(x), _paths)) | |
982 |
|
991 | |||
983 | # check if this path is not a repository |
|
992 | # check if this path is not a repository | |
984 | if is_valid_repo(repo_path, self.repos_path): |
|
993 | if is_valid_repo(repo_path, self.repos_path): | |
985 | raise Exception(f'This path {repo_path} is a valid repository') |
|
994 | raise Exception(f'This path {repo_path} is a valid repository') | |
986 |
|
995 | |||
987 | # check if this path is a group |
|
996 | # check if this path is a group | |
988 | if is_valid_repo_group(repo_path, self.repos_path): |
|
997 | if is_valid_repo_group(repo_path, self.repos_path): | |
989 | raise Exception(f'This path {repo_path} is a valid group') |
|
998 | raise Exception(f'This path {repo_path} is a valid group') | |
990 |
|
999 | |||
991 | log.info('creating repo %s in %s from url: `%s`', |
|
1000 | log.info('creating repo %s in %s from url: `%s`', | |
992 | repo_name, safe_str(repo_path), |
|
1001 | repo_name, safe_str(repo_path), | |
993 | obfuscate_url_pw(clone_uri)) |
|
1002 | obfuscate_url_pw(clone_uri)) | |
994 |
|
1003 | |||
995 | backend = get_backend(repo_type) |
|
1004 | backend = get_backend(repo_type) | |
996 |
|
1005 | |||
997 | config_repo = None if use_global_config else repo_name |
|
1006 | config_repo = None if use_global_config else repo_name | |
998 | if config_repo and new_parent_path: |
|
1007 | if config_repo and new_parent_path: | |
999 | config_repo = Repository.NAME_SEP.join( |
|
1008 | config_repo = Repository.NAME_SEP.join( | |
1000 | (new_parent_path, config_repo)) |
|
1009 | (new_parent_path, config_repo)) | |
1001 | config = make_db_config(clear_session=False, repo=config_repo) |
|
1010 | config = make_db_config(clear_session=False, repo=config_repo) | |
1002 | config.set('extensions', 'largefiles', '') |
|
1011 | config.set('extensions', 'largefiles', '') | |
1003 |
|
1012 | |||
1004 | # patch and reset hooks section of UI config to not run any |
|
1013 | # patch and reset hooks section of UI config to not run any | |
1005 | # hooks on creating remote repo |
|
1014 | # hooks on creating remote repo | |
1006 | config.clear_section('hooks') |
|
1015 | config.clear_section('hooks') | |
1007 |
|
1016 | |||
1008 | # TODO: johbo: Unify this, hardcoded "bare=True" does not look nice |
|
1017 | # TODO: johbo: Unify this, hardcoded "bare=True" does not look nice | |
1009 | if repo_type == 'git': |
|
1018 | if repo_type == 'git': | |
1010 | repo = backend( |
|
1019 | repo = backend( | |
1011 | repo_path, config=config, create=True, src_url=clone_uri, bare=True, |
|
1020 | repo_path, config=config, create=True, src_url=clone_uri, bare=True, | |
1012 | with_wire={"cache": False}) |
|
1021 | with_wire={"cache": False}) | |
1013 | else: |
|
1022 | else: | |
1014 | repo = backend( |
|
1023 | repo = backend( | |
1015 | repo_path, config=config, create=True, src_url=clone_uri, |
|
1024 | repo_path, config=config, create=True, src_url=clone_uri, | |
1016 | with_wire={"cache": False}) |
|
1025 | with_wire={"cache": False}) | |
1017 |
|
1026 | |||
1018 | if install_hooks: |
|
1027 | if install_hooks: | |
1019 | repo.install_hooks() |
|
1028 | repo.install_hooks() | |
1020 |
|
1029 | |||
1021 | log.debug('Created repo %s with %s backend', |
|
1030 | log.debug('Created repo %s with %s backend', | |
1022 | safe_str(repo_name), safe_str(repo_type)) |
|
1031 | safe_str(repo_name), safe_str(repo_type)) | |
1023 | return repo |
|
1032 | return repo | |
1024 |
|
1033 | |||
1025 | def _rename_filesystem_repo(self, old, new): |
|
1034 | def _rename_filesystem_repo(self, old, new): | |
1026 | """ |
|
1035 | """ | |
1027 | renames repository on filesystem |
|
1036 | renames repository on filesystem | |
1028 |
|
1037 | |||
1029 | :param old: old name |
|
1038 | :param old: old name | |
1030 | :param new: new name |
|
1039 | :param new: new name | |
1031 | """ |
|
1040 | """ | |
1032 | log.info('renaming repo from %s to %s', old, new) |
|
1041 | log.info('renaming repo from %s to %s', old, new) | |
1033 |
|
1042 | |||
1034 | old_path = os.path.join(self.repos_path, old) |
|
1043 | old_path = os.path.join(self.repos_path, old) | |
1035 | new_path = os.path.join(self.repos_path, new) |
|
1044 | new_path = os.path.join(self.repos_path, new) | |
1036 | if os.path.isdir(new_path): |
|
1045 | if os.path.isdir(new_path): | |
1037 | raise Exception( |
|
1046 | raise Exception( | |
1038 | 'Was trying to rename to already existing dir %s' % new_path |
|
1047 | 'Was trying to rename to already existing dir %s' % new_path | |
1039 | ) |
|
1048 | ) | |
1040 | shutil.move(old_path, new_path) |
|
1049 | shutil.move(old_path, new_path) | |
1041 |
|
1050 | |||
1042 | def _delete_filesystem_repo(self, repo): |
|
1051 | def _delete_filesystem_repo(self, repo): | |
1043 | """ |
|
1052 | """ | |
1044 | removes repo from filesystem, the removal is actually made by |
|
1053 | removes repo from filesystem, the removal is actually made by | |
1045 | added rm__ prefix into dir, and rename internal .hg/.git dirs so this |
|
1054 | added rm__ prefix into dir, and rename internal .hg/.git dirs so this | |
1046 | repository is no longer valid for rhodecode, can be undeleted later on |
|
1055 | repository is no longer valid for rhodecode, can be undeleted later on | |
1047 | by reverting the renames on this repository |
|
1056 | by reverting the renames on this repository | |
1048 |
|
1057 | |||
1049 | :param repo: repo object |
|
1058 | :param repo: repo object | |
1050 | """ |
|
1059 | """ | |
1051 | rm_path = os.path.join(self.repos_path, repo.repo_name) |
|
1060 | rm_path = os.path.join(self.repos_path, repo.repo_name) | |
1052 | repo_group = repo.group |
|
1061 | repo_group = repo.group | |
1053 | log.info("delete_filesystem_repo: removing repository %s", rm_path) |
|
1062 | log.info("delete_filesystem_repo: removing repository %s", rm_path) | |
1054 | # disable hg/git internal that it doesn't get detected as repo |
|
1063 | # disable hg/git internal that it doesn't get detected as repo | |
1055 | alias = repo.repo_type |
|
1064 | alias = repo.repo_type | |
1056 |
|
1065 | |||
1057 | config = make_db_config(clear_session=False) |
|
1066 | config = make_db_config(clear_session=False) | |
1058 | config.set('extensions', 'largefiles', '') |
|
1067 | config.set('extensions', 'largefiles', '') | |
1059 | bare = getattr(repo.scm_instance(config=config), 'bare', False) |
|
1068 | bare = getattr(repo.scm_instance(config=config), 'bare', False) | |
1060 |
|
1069 | |||
1061 | # skip this for bare git repos |
|
1070 | # skip this for bare git repos | |
1062 | if not bare: |
|
1071 | if not bare: | |
1063 | # disable VCS repo |
|
1072 | # disable VCS repo | |
1064 | vcs_path = os.path.join(rm_path, '.%s' % alias) |
|
1073 | vcs_path = os.path.join(rm_path, '.%s' % alias) | |
1065 | if os.path.exists(vcs_path): |
|
1074 | if os.path.exists(vcs_path): | |
1066 | shutil.move(vcs_path, os.path.join(rm_path, 'rm__.%s' % alias)) |
|
1075 | shutil.move(vcs_path, os.path.join(rm_path, 'rm__.%s' % alias)) | |
1067 |
|
1076 | |||
1068 | _now = datetime.datetime.now() |
|
1077 | _now = datetime.datetime.now() | |
1069 | _ms = str(_now.microsecond).rjust(6, '0') |
|
1078 | _ms = str(_now.microsecond).rjust(6, '0') | |
1070 | _d = 'rm__{}__{}'.format(_now.strftime('%Y%m%d_%H%M%S_' + _ms), |
|
1079 | _d = 'rm__{}__{}'.format(_now.strftime('%Y%m%d_%H%M%S_' + _ms), | |
1071 | repo.just_name) |
|
1080 | repo.just_name) | |
1072 | if repo_group: |
|
1081 | if repo_group: | |
1073 | # if repository is in group, prefix the removal path with the group |
|
1082 | # if repository is in group, prefix the removal path with the group | |
1074 | args = repo_group.full_path_splitted + [_d] |
|
1083 | args = repo_group.full_path_splitted + [_d] | |
1075 | _d = os.path.join(*args) |
|
1084 | _d = os.path.join(*args) | |
1076 |
|
1085 | |||
1077 | if os.path.isdir(rm_path): |
|
1086 | if os.path.isdir(rm_path): | |
1078 | shutil.move(rm_path, os.path.join(self.repos_path, _d)) |
|
1087 | shutil.move(rm_path, os.path.join(self.repos_path, _d)) | |
1079 |
|
1088 | |||
1080 | # finally cleanup diff-cache if it exists |
|
1089 | # finally cleanup diff-cache if it exists | |
1081 | cached_diffs_dir = repo.cached_diffs_dir |
|
1090 | cached_diffs_dir = repo.cached_diffs_dir | |
1082 | if os.path.isdir(cached_diffs_dir): |
|
1091 | if os.path.isdir(cached_diffs_dir): | |
1083 | shutil.rmtree(cached_diffs_dir) |
|
1092 | shutil.rmtree(cached_diffs_dir) | |
1084 |
|
1093 | |||
1085 |
|
1094 | |||
1086 | class ReadmeFinder: |
|
1095 | class ReadmeFinder: | |
1087 | """ |
|
1096 | """ | |
1088 | Utility which knows how to find a readme for a specific commit. |
|
1097 | Utility which knows how to find a readme for a specific commit. | |
1089 |
|
1098 | |||
1090 | The main idea is that this is a configurable algorithm. When creating an |
|
1099 | The main idea is that this is a configurable algorithm. When creating an | |
1091 | instance you can define parameters, currently only the `default_renderer`. |
|
1100 | instance you can define parameters, currently only the `default_renderer`. | |
1092 | Based on this configuration the method :meth:`search` behaves slightly |
|
1101 | Based on this configuration the method :meth:`search` behaves slightly | |
1093 | different. |
|
1102 | different. | |
1094 | """ |
|
1103 | """ | |
1095 |
|
1104 | |||
1096 | readme_re = re.compile(r'^readme(\.[^\.]+)?$', re.IGNORECASE) |
|
1105 | readme_re = re.compile(r'^readme(\.[^\.]+)?$', re.IGNORECASE) | |
1097 | path_re = re.compile(r'^docs?', re.IGNORECASE) |
|
1106 | path_re = re.compile(r'^docs?', re.IGNORECASE) | |
1098 |
|
1107 | |||
1099 | default_priorities = { |
|
1108 | default_priorities = { | |
1100 | None: 0, |
|
1109 | None: 0, | |
1101 | '.rst': 1, |
|
1110 | '.rst': 1, | |
1102 | '.md': 1, |
|
1111 | '.md': 1, | |
1103 | '.rest': 2, |
|
1112 | '.rest': 2, | |
1104 | '.mkdn': 2, |
|
1113 | '.mkdn': 2, | |
1105 | '.text': 2, |
|
1114 | '.text': 2, | |
1106 | '.txt': 3, |
|
1115 | '.txt': 3, | |
1107 | '.mdown': 3, |
|
1116 | '.mdown': 3, | |
1108 | '.markdown': 4, |
|
1117 | '.markdown': 4, | |
1109 | } |
|
1118 | } | |
1110 |
|
1119 | |||
1111 | path_priority = { |
|
1120 | path_priority = { | |
1112 | 'doc': 0, |
|
1121 | 'doc': 0, | |
1113 | 'docs': 1, |
|
1122 | 'docs': 1, | |
1114 | } |
|
1123 | } | |
1115 |
|
1124 | |||
1116 | FALLBACK_PRIORITY = 99 |
|
1125 | FALLBACK_PRIORITY = 99 | |
1117 |
|
1126 | |||
1118 | RENDERER_TO_EXTENSION = { |
|
1127 | RENDERER_TO_EXTENSION = { | |
1119 | 'rst': ['.rst', '.rest'], |
|
1128 | 'rst': ['.rst', '.rest'], | |
1120 | 'markdown': ['.md', 'mkdn', '.mdown', '.markdown'], |
|
1129 | 'markdown': ['.md', 'mkdn', '.mdown', '.markdown'], | |
1121 | } |
|
1130 | } | |
1122 |
|
1131 | |||
1123 | def __init__(self, default_renderer=None): |
|
1132 | def __init__(self, default_renderer=None): | |
1124 | self._default_renderer = default_renderer |
|
1133 | self._default_renderer = default_renderer | |
1125 | self._renderer_extensions = self.RENDERER_TO_EXTENSION.get( |
|
1134 | self._renderer_extensions = self.RENDERER_TO_EXTENSION.get( | |
1126 | default_renderer, []) |
|
1135 | default_renderer, []) | |
1127 |
|
1136 | |||
1128 | def search(self, commit, path='/'): |
|
1137 | def search(self, commit, path='/'): | |
1129 | """ |
|
1138 | """ | |
1130 | Find a readme in the given `commit`. |
|
1139 | Find a readme in the given `commit`. | |
1131 | """ |
|
1140 | """ | |
1132 | # firstly, check the PATH type if it is actually a DIR |
|
1141 | # firstly, check the PATH type if it is actually a DIR | |
1133 | if commit.get_node(path).kind != NodeKind.DIR: |
|
1142 | if commit.get_node(path).kind != NodeKind.DIR: | |
1134 | return None |
|
1143 | return None | |
1135 |
|
1144 | |||
1136 | nodes = commit.get_nodes(path) |
|
1145 | nodes = commit.get_nodes(path) | |
1137 | matches = self._match_readmes(nodes) |
|
1146 | matches = self._match_readmes(nodes) | |
1138 | matches = self._sort_according_to_priority(matches) |
|
1147 | matches = self._sort_according_to_priority(matches) | |
1139 | if matches: |
|
1148 | if matches: | |
1140 | return matches[0].node |
|
1149 | return matches[0].node | |
1141 |
|
1150 | |||
1142 | paths = self._match_paths(nodes) |
|
1151 | paths = self._match_paths(nodes) | |
1143 | paths = self._sort_paths_according_to_priority(paths) |
|
1152 | paths = self._sort_paths_according_to_priority(paths) | |
1144 | for path in paths: |
|
1153 | for path in paths: | |
1145 | match = self.search(commit, path=path) |
|
1154 | match = self.search(commit, path=path) | |
1146 | if match: |
|
1155 | if match: | |
1147 | return match |
|
1156 | return match | |
1148 |
|
1157 | |||
1149 | return None |
|
1158 | return None | |
1150 |
|
1159 | |||
1151 | def _match_readmes(self, nodes): |
|
1160 | def _match_readmes(self, nodes): | |
1152 | for node in nodes: |
|
1161 | for node in nodes: | |
1153 | if not node.is_file(): |
|
1162 | if not node.is_file(): | |
1154 | continue |
|
1163 | continue | |
1155 | path = node.path.rsplit('/', 1)[-1] |
|
1164 | path = node.path.rsplit('/', 1)[-1] | |
1156 | match = self.readme_re.match(path) |
|
1165 | match = self.readme_re.match(path) | |
1157 | if match: |
|
1166 | if match: | |
1158 | extension = match.group(1) |
|
1167 | extension = match.group(1) | |
1159 | yield ReadmeMatch(node, match, self._priority(extension)) |
|
1168 | yield ReadmeMatch(node, match, self._priority(extension)) | |
1160 |
|
1169 | |||
1161 | def _match_paths(self, nodes): |
|
1170 | def _match_paths(self, nodes): | |
1162 | for node in nodes: |
|
1171 | for node in nodes: | |
1163 | if not node.is_dir(): |
|
1172 | if not node.is_dir(): | |
1164 | continue |
|
1173 | continue | |
1165 | match = self.path_re.match(node.path) |
|
1174 | match = self.path_re.match(node.path) | |
1166 | if match: |
|
1175 | if match: | |
1167 | yield node.path |
|
1176 | yield node.path | |
1168 |
|
1177 | |||
1169 | def _priority(self, extension): |
|
1178 | def _priority(self, extension): | |
1170 | renderer_priority = ( |
|
1179 | renderer_priority = ( | |
1171 | 0 if extension in self._renderer_extensions else 1) |
|
1180 | 0 if extension in self._renderer_extensions else 1) | |
1172 | extension_priority = self.default_priorities.get( |
|
1181 | extension_priority = self.default_priorities.get( | |
1173 | extension, self.FALLBACK_PRIORITY) |
|
1182 | extension, self.FALLBACK_PRIORITY) | |
1174 | return (renderer_priority, extension_priority) |
|
1183 | return (renderer_priority, extension_priority) | |
1175 |
|
1184 | |||
1176 | def _sort_according_to_priority(self, matches): |
|
1185 | def _sort_according_to_priority(self, matches): | |
1177 |
|
1186 | |||
1178 | def priority_and_path(match): |
|
1187 | def priority_and_path(match): | |
1179 | return (match.priority, match.path) |
|
1188 | return (match.priority, match.path) | |
1180 |
|
1189 | |||
1181 | return sorted(matches, key=priority_and_path) |
|
1190 | return sorted(matches, key=priority_and_path) | |
1182 |
|
1191 | |||
1183 | def _sort_paths_according_to_priority(self, paths): |
|
1192 | def _sort_paths_according_to_priority(self, paths): | |
1184 |
|
1193 | |||
1185 | def priority_and_path(path): |
|
1194 | def priority_and_path(path): | |
1186 | return (self.path_priority.get(path, self.FALLBACK_PRIORITY), path) |
|
1195 | return (self.path_priority.get(path, self.FALLBACK_PRIORITY), path) | |
1187 |
|
1196 | |||
1188 | return sorted(paths, key=priority_and_path) |
|
1197 | return sorted(paths, key=priority_and_path) | |
1189 |
|
1198 | |||
1190 |
|
1199 | |||
1191 | class ReadmeMatch: |
|
1200 | class ReadmeMatch: | |
1192 |
|
1201 | |||
1193 | def __init__(self, node, match, priority): |
|
1202 | def __init__(self, node, match, priority): | |
1194 | self.node = node |
|
1203 | self.node = node | |
1195 | self._match = match |
|
1204 | self._match = match | |
1196 | self.priority = priority |
|
1205 | self.priority = priority | |
1197 |
|
1206 | |||
1198 | @property |
|
1207 | @property | |
1199 | def path(self): |
|
1208 | def path(self): | |
1200 | return self.node.path |
|
1209 | return self.node.path | |
1201 |
|
1210 | |||
1202 | def __repr__(self): |
|
1211 | def __repr__(self): | |
1203 | return f'<ReadmeMatch {self.path} priority={self.priority}' |
|
1212 | return f'<ReadmeMatch {self.path} priority={self.priority}' |
@@ -1,886 +1,889 b'' | |||||
1 |
# Copyright (C) 2011-202 |
|
1 | # Copyright (C) 2011-2024 RhodeCode GmbH | |
2 | # |
|
2 | # | |
3 | # This program is free software: you can redistribute it and/or modify |
|
3 | # This program is free software: you can redistribute it and/or modify | |
4 | # it under the terms of the GNU Affero General Public License, version 3 |
|
4 | # it under the terms of the GNU Affero General Public License, version 3 | |
5 | # (only), as published by the Free Software Foundation. |
|
5 | # (only), as published by the Free Software Foundation. | |
6 | # |
|
6 | # | |
7 | # This program is distributed in the hope that it will be useful, |
|
7 | # This program is distributed in the hope that it will be useful, | |
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
10 | # GNU General Public License for more details. |
|
10 | # GNU General Public License for more details. | |
11 | # |
|
11 | # | |
12 | # You should have received a copy of the GNU Affero General Public License |
|
12 | # You should have received a copy of the GNU Affero General Public License | |
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |
14 | # |
|
14 | # | |
15 | # This program is dual-licensed. If you wish to learn more about the |
|
15 | # This program is dual-licensed. If you wish to learn more about the | |
16 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
16 | # RhodeCode Enterprise Edition, including its added features, Support services, | |
17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ | |
18 |
|
18 | |||
19 |
|
19 | |||
20 | """ |
|
20 | """ | |
21 | repo group model for RhodeCode |
|
21 | repo group model for RhodeCode | |
22 | """ |
|
22 | """ | |
23 |
|
23 | |||
24 | import os |
|
24 | import os | |
25 | import datetime |
|
25 | import datetime | |
26 | import itertools |
|
26 | import itertools | |
27 | import logging |
|
27 | import logging | |
28 | import shutil |
|
28 | import shutil | |
29 | import time |
|
29 | import time | |
30 | import traceback |
|
30 | import traceback | |
31 | import string |
|
31 | import string | |
32 |
|
32 | |||
33 | from zope.cachedescriptors.property import Lazy as LazyProperty |
|
|||
34 |
|
33 | |||
35 | from rhodecode import events |
|
34 | from rhodecode import events | |
36 | from rhodecode.model import BaseModel |
|
35 | from rhodecode.model import BaseModel | |
37 | from rhodecode.model.db import (_hash_key, func, or_, in_filter_generator, |
|
36 | from rhodecode.model.db import (_hash_key, func, or_, in_filter_generator, | |
38 | Session, RepoGroup, UserRepoGroupToPerm, User, Permission, UserGroupRepoGroupToPerm, |
|
37 | Session, RepoGroup, UserRepoGroupToPerm, User, Permission, UserGroupRepoGroupToPerm, | |
39 | UserGroup, Repository) |
|
38 | UserGroup, Repository) | |
40 | from rhodecode.model.permission import PermissionModel |
|
39 | from rhodecode.model.permission import PermissionModel | |
41 |
from rhodecode.model.settings import |
|
40 | from rhodecode.model.settings import SettingsModel | |
42 | from rhodecode.lib.caching_query import FromCache |
|
41 | from rhodecode.lib.caching_query import FromCache | |
43 | from rhodecode.lib.utils2 import action_logger_generic |
|
42 | from rhodecode.lib.utils2 import action_logger_generic | |
44 |
|
43 | |||
45 | log = logging.getLogger(__name__) |
|
44 | log = logging.getLogger(__name__) | |
46 |
|
45 | |||
47 |
|
46 | |||
48 | class RepoGroupModel(BaseModel): |
|
47 | class RepoGroupModel(BaseModel): | |
49 |
|
48 | |||
50 | cls = RepoGroup |
|
49 | cls = RepoGroup | |
51 | PERSONAL_GROUP_DESC = 'personal repo group of user `%(username)s`' |
|
50 | PERSONAL_GROUP_DESC = 'personal repo group of user `%(username)s`' | |
52 | PERSONAL_GROUP_PATTERN = '${username}' # default |
|
51 | PERSONAL_GROUP_PATTERN = '${username}' # default | |
53 |
|
52 | |||
54 | def _get_user_group(self, users_group): |
|
53 | def _get_user_group(self, users_group): | |
55 | return self._get_instance(UserGroup, users_group, |
|
54 | return self._get_instance(UserGroup, users_group, | |
56 | callback=UserGroup.get_by_group_name) |
|
55 | callback=UserGroup.get_by_group_name) | |
57 |
|
56 | |||
58 | def _get_repo_group(self, repo_group): |
|
57 | def _get_repo_group(self, repo_group): | |
59 | return self._get_instance(RepoGroup, repo_group, |
|
58 | return self._get_instance(RepoGroup, repo_group, | |
60 | callback=RepoGroup.get_by_group_name) |
|
59 | callback=RepoGroup.get_by_group_name) | |
61 |
|
60 | |||
62 | def get_repo_group(self, repo_group): |
|
61 | def get_repo_group(self, repo_group): | |
63 | return self._get_repo_group(repo_group) |
|
62 | return self._get_repo_group(repo_group) | |
64 |
|
63 | |||
65 | def get_by_group_name(self, repo_group_name, cache=None): |
|
64 | def get_by_group_name(self, repo_group_name, cache=None): | |
66 | repo = self.sa.query(RepoGroup) \ |
|
65 | repo = self.sa.query(RepoGroup) \ | |
67 | .filter(RepoGroup.group_name == repo_group_name) |
|
66 | .filter(RepoGroup.group_name == repo_group_name) | |
68 |
|
67 | |||
69 | if cache: |
|
68 | if cache: | |
70 | name_key = _hash_key(repo_group_name) |
|
69 | name_key = _hash_key(repo_group_name) | |
71 | repo = repo.options( |
|
70 | repo = repo.options( | |
72 | FromCache("sql_cache_short", f"get_repo_group_{name_key}")) |
|
71 | FromCache("sql_cache_short", f"get_repo_group_{name_key}")) | |
73 | return repo.scalar() |
|
72 | return repo.scalar() | |
74 |
|
73 | |||
75 | def get_default_create_personal_repo_group(self): |
|
74 | def get_default_create_personal_repo_group(self): | |
76 | value = SettingsModel().get_setting_by_name( |
|
75 | value = SettingsModel().get_setting_by_name( | |
77 | 'create_personal_repo_group') |
|
76 | 'create_personal_repo_group') | |
78 | return value.app_settings_value if value else None or False |
|
77 | return value.app_settings_value if value else None or False | |
79 |
|
78 | |||
80 | def get_personal_group_name_pattern(self): |
|
79 | def get_personal_group_name_pattern(self): | |
81 | value = SettingsModel().get_setting_by_name( |
|
80 | value = SettingsModel().get_setting_by_name( | |
82 | 'personal_repo_group_pattern') |
|
81 | 'personal_repo_group_pattern') | |
83 | val = value.app_settings_value if value else None |
|
82 | val = value.app_settings_value if value else None | |
84 | group_template = val or self.PERSONAL_GROUP_PATTERN |
|
83 | group_template = val or self.PERSONAL_GROUP_PATTERN | |
85 |
|
84 | |||
86 | group_template = group_template.lstrip('/') |
|
85 | group_template = group_template.lstrip('/') | |
87 | return group_template |
|
86 | return group_template | |
88 |
|
87 | |||
89 | def get_personal_group_name(self, user): |
|
88 | def get_personal_group_name(self, user): | |
90 | template = self.get_personal_group_name_pattern() |
|
89 | template = self.get_personal_group_name_pattern() | |
91 | return string.Template(template).safe_substitute( |
|
90 | return string.Template(template).safe_substitute( | |
92 | username=user.username, |
|
91 | username=user.username, | |
93 | user_id=user.user_id, |
|
92 | user_id=user.user_id, | |
94 | first_name=user.first_name, |
|
93 | first_name=user.first_name, | |
95 | last_name=user.last_name, |
|
94 | last_name=user.last_name, | |
96 | ) |
|
95 | ) | |
97 |
|
96 | |||
98 | def create_personal_repo_group(self, user, commit_early=True): |
|
97 | def create_personal_repo_group(self, user, commit_early=True): | |
99 | desc = self.PERSONAL_GROUP_DESC % {'username': user.username} |
|
98 | desc = self.PERSONAL_GROUP_DESC % {'username': user.username} | |
100 | personal_repo_group_name = self.get_personal_group_name(user) |
|
99 | personal_repo_group_name = self.get_personal_group_name(user) | |
101 |
|
100 | |||
102 | # create a new one |
|
101 | # create a new one | |
103 | RepoGroupModel().create( |
|
102 | RepoGroupModel().create( | |
104 | group_name=personal_repo_group_name, |
|
103 | group_name=personal_repo_group_name, | |
105 | group_description=desc, |
|
104 | group_description=desc, | |
106 | owner=user.username, |
|
105 | owner=user.username, | |
107 | personal=True, |
|
106 | personal=True, | |
108 | commit_early=commit_early) |
|
107 | commit_early=commit_early) | |
109 |
|
108 | |||
110 | def _create_default_perms(self, new_group): |
|
109 | def _create_default_perms(self, new_group): | |
111 | # create default permission |
|
110 | # create default permission | |
112 | default_perm = 'group.read' |
|
111 | default_perm = 'group.read' | |
113 | def_user = User.get_default_user() |
|
112 | def_user = User.get_default_user() | |
114 | for p in def_user.user_perms: |
|
113 | for p in def_user.user_perms: | |
115 | if p.permission.permission_name.startswith('group.'): |
|
114 | if p.permission.permission_name.startswith('group.'): | |
116 | default_perm = p.permission.permission_name |
|
115 | default_perm = p.permission.permission_name | |
117 | break |
|
116 | break | |
118 |
|
117 | |||
119 | repo_group_to_perm = UserRepoGroupToPerm() |
|
118 | repo_group_to_perm = UserRepoGroupToPerm() | |
120 | repo_group_to_perm.permission = Permission.get_by_key(default_perm) |
|
119 | repo_group_to_perm.permission = Permission.get_by_key(default_perm) | |
121 |
|
120 | |||
122 | repo_group_to_perm.group = new_group |
|
121 | repo_group_to_perm.group = new_group | |
123 | repo_group_to_perm.user = def_user |
|
122 | repo_group_to_perm.user = def_user | |
124 | return repo_group_to_perm |
|
123 | return repo_group_to_perm | |
125 |
|
124 | |||
126 | def _get_group_name_and_parent(self, group_name_full, repo_in_path=False, |
|
125 | def _get_group_name_and_parent(self, group_name_full, repo_in_path=False, | |
127 | get_object=False): |
|
126 | get_object=False): | |
128 | """ |
|
127 | """ | |
129 | Get's the group name and a parent group name from given group name. |
|
128 | Get's the group name and a parent group name from given group name. | |
130 | If repo_in_path is set to truth, we asume the full path also includes |
|
129 | If repo_in_path is set to truth, we asume the full path also includes | |
131 | repo name, in such case we clean the last element. |
|
130 | repo name, in such case we clean the last element. | |
132 |
|
131 | |||
133 | :param group_name_full: |
|
132 | :param group_name_full: | |
134 | """ |
|
133 | """ | |
135 | split_paths = 1 |
|
134 | split_paths = 1 | |
136 | if repo_in_path: |
|
135 | if repo_in_path: | |
137 | split_paths = 2 |
|
136 | split_paths = 2 | |
138 | _parts = group_name_full.rsplit(RepoGroup.url_sep(), split_paths) |
|
137 | _parts = group_name_full.rsplit(RepoGroup.url_sep(), split_paths) | |
139 |
|
138 | |||
140 | if repo_in_path and len(_parts) > 1: |
|
139 | if repo_in_path and len(_parts) > 1: | |
141 | # such case last element is the repo_name |
|
140 | # such case last element is the repo_name | |
142 | _parts.pop(-1) |
|
141 | _parts.pop(-1) | |
143 | group_name_cleaned = _parts[-1] # just the group name |
|
142 | group_name_cleaned = _parts[-1] # just the group name | |
144 | parent_repo_group_name = None |
|
143 | parent_repo_group_name = None | |
145 |
|
144 | |||
146 | if len(_parts) > 1: |
|
145 | if len(_parts) > 1: | |
147 | parent_repo_group_name = _parts[0] |
|
146 | parent_repo_group_name = _parts[0] | |
148 |
|
147 | |||
149 | parent_group = None |
|
148 | parent_group = None | |
150 | if parent_repo_group_name: |
|
149 | if parent_repo_group_name: | |
151 | parent_group = RepoGroup.get_by_group_name(parent_repo_group_name) |
|
150 | parent_group = RepoGroup.get_by_group_name(parent_repo_group_name) | |
152 |
|
151 | |||
153 | if get_object: |
|
152 | if get_object: | |
154 | return group_name_cleaned, parent_repo_group_name, parent_group |
|
153 | return group_name_cleaned, parent_repo_group_name, parent_group | |
155 |
|
154 | |||
156 | return group_name_cleaned, parent_repo_group_name |
|
155 | return group_name_cleaned, parent_repo_group_name | |
157 |
|
156 | |||
158 | def check_exist_filesystem(self, group_name, exc_on_failure=True): |
|
157 | def check_exist_filesystem(self, group_name, exc_on_failure=True): | |
159 | create_path = os.path.join(self.repos_path, group_name) |
|
158 | create_path = os.path.join(self.repos_path, group_name) | |
160 | log.debug('creating new group in %s', create_path) |
|
159 | log.debug('creating new group in %s', create_path) | |
161 |
|
160 | |||
162 | if os.path.isdir(create_path): |
|
161 | if os.path.isdir(create_path): | |
163 | if exc_on_failure: |
|
162 | if exc_on_failure: | |
164 | abs_create_path = os.path.abspath(create_path) |
|
163 | abs_create_path = os.path.abspath(create_path) | |
165 | raise Exception(f'Directory `{abs_create_path}` already exists !') |
|
164 | raise Exception(f'Directory `{abs_create_path}` already exists !') | |
166 | return False |
|
165 | return False | |
167 | return True |
|
166 | return True | |
168 |
|
167 | |||
169 | def _create_group(self, group_name): |
|
168 | def _create_group(self, group_name): | |
170 | """ |
|
169 | """ | |
171 | makes repository group on filesystem |
|
170 | makes repository group on filesystem | |
172 |
|
171 | |||
173 | :param repo_name: |
|
172 | :param repo_name: | |
174 | :param parent_id: |
|
173 | :param parent_id: | |
175 | """ |
|
174 | """ | |
176 |
|
175 | |||
177 | self.check_exist_filesystem(group_name) |
|
176 | self.check_exist_filesystem(group_name) | |
178 | create_path = os.path.join(self.repos_path, group_name) |
|
177 | create_path = os.path.join(self.repos_path, group_name) | |
179 | log.debug('creating new group in %s', create_path) |
|
178 | log.debug('creating new group in %s', create_path) | |
180 | os.makedirs(create_path, mode=0o755) |
|
179 | os.makedirs(create_path, mode=0o755) | |
181 | log.debug('created group in %s', create_path) |
|
180 | log.debug('created group in %s', create_path) | |
182 |
|
181 | |||
183 | def _rename_group(self, old, new): |
|
182 | def _rename_group(self, old, new): | |
184 | """ |
|
183 | """ | |
185 | Renames a group on filesystem |
|
184 | Renames a group on filesystem | |
186 |
|
185 | |||
187 | :param group_name: |
|
186 | :param group_name: | |
188 | """ |
|
187 | """ | |
189 |
|
188 | |||
190 | if old == new: |
|
189 | if old == new: | |
191 | log.debug('skipping group rename') |
|
190 | log.debug('skipping group rename') | |
192 | return |
|
191 | return | |
193 |
|
192 | |||
194 | log.debug('renaming repository group from %s to %s', old, new) |
|
193 | log.debug('renaming repository group from %s to %s', old, new) | |
195 |
|
194 | |||
196 | old_path = os.path.join(self.repos_path, old) |
|
195 | old_path = os.path.join(self.repos_path, old) | |
197 | new_path = os.path.join(self.repos_path, new) |
|
196 | new_path = os.path.join(self.repos_path, new) | |
198 |
|
197 | |||
199 | log.debug('renaming repos paths from %s to %s', old_path, new_path) |
|
198 | log.debug('renaming repos paths from %s to %s', old_path, new_path) | |
200 |
|
199 | |||
201 | if os.path.isdir(new_path): |
|
200 | if os.path.isdir(new_path): | |
202 | raise Exception('Was trying to rename to already ' |
|
201 | raise Exception('Was trying to rename to already ' | |
203 | 'existing dir %s' % new_path) |
|
202 | 'existing dir %s' % new_path) | |
204 | shutil.move(old_path, new_path) |
|
203 | shutil.move(old_path, new_path) | |
205 |
|
204 | |||
206 | def _delete_filesystem_group(self, group, force_delete=False): |
|
205 | def _delete_filesystem_group(self, group, force_delete=False): | |
207 | """ |
|
206 | """ | |
208 | Deletes a group from a filesystem |
|
207 | Deletes a group from a filesystem | |
209 |
|
208 | |||
210 | :param group: instance of group from database |
|
209 | :param group: instance of group from database | |
211 | :param force_delete: use shutil rmtree to remove all objects |
|
210 | :param force_delete: use shutil rmtree to remove all objects | |
212 | """ |
|
211 | """ | |
213 | paths = group.full_path.split(RepoGroup.url_sep()) |
|
212 | paths = group.full_path.split(RepoGroup.url_sep()) | |
214 | paths = os.sep.join(paths) |
|
213 | paths = os.sep.join(paths) | |
215 |
|
214 | |||
216 | rm_path = os.path.join(self.repos_path, paths) |
|
215 | rm_path = os.path.join(self.repos_path, paths) | |
217 | log.info("Removing group %s", rm_path) |
|
216 | log.info("Removing group %s", rm_path) | |
218 | # delete only if that path really exists |
|
217 | # delete only if that path really exists | |
219 | if os.path.isdir(rm_path): |
|
218 | if os.path.isdir(rm_path): | |
220 | if force_delete: |
|
219 | if force_delete: | |
221 | shutil.rmtree(rm_path) |
|
220 | shutil.rmtree(rm_path) | |
222 | else: |
|
221 | else: | |
223 | # archive that group` |
|
222 | # archive that group` | |
224 | _now = datetime.datetime.now() |
|
223 | _now = datetime.datetime.now() | |
225 | _ms = str(_now.microsecond).rjust(6, '0') |
|
224 | _ms = str(_now.microsecond).rjust(6, '0') | |
226 | _d = 'rm__{}_GROUP_{}'.format( |
|
225 | _d = 'rm__{}_GROUP_{}'.format( | |
227 | _now.strftime('%Y%m%d_%H%M%S_' + _ms), group.name) |
|
226 | _now.strftime('%Y%m%d_%H%M%S_' + _ms), group.name) | |
228 | shutil.move(rm_path, os.path.join(self.repos_path, _d)) |
|
227 | shutil.move(rm_path, os.path.join(self.repos_path, _d)) | |
229 |
|
228 | |||
230 | def create(self, group_name, group_description, owner, just_db=False, |
|
229 | def create(self, group_name, group_description, owner, just_db=False, | |
231 | copy_permissions=False, personal=None, commit_early=True): |
|
230 | copy_permissions=False, personal=None, commit_early=True): | |
232 |
|
231 | |||
233 | (group_name_cleaned, |
|
232 | (group_name_cleaned, | |
234 | parent_group_name) = RepoGroupModel()._get_group_name_and_parent(group_name) |
|
233 | parent_group_name) = RepoGroupModel()._get_group_name_and_parent(group_name) | |
235 |
|
234 | |||
236 | parent_group = None |
|
235 | parent_group = None | |
237 | if parent_group_name: |
|
236 | if parent_group_name: | |
238 | parent_group = self._get_repo_group(parent_group_name) |
|
237 | parent_group = self._get_repo_group(parent_group_name) | |
239 | if not parent_group: |
|
238 | if not parent_group: | |
240 | # we tried to create a nested group, but the parent is not |
|
239 | # we tried to create a nested group, but the parent is not | |
241 | # existing |
|
240 | # existing | |
242 | raise ValueError( |
|
241 | raise ValueError( | |
243 | 'Parent group `%s` given in `%s` group name ' |
|
242 | 'Parent group `%s` given in `%s` group name ' | |
244 | 'is not yet existing.' % (parent_group_name, group_name)) |
|
243 | 'is not yet existing.' % (parent_group_name, group_name)) | |
245 |
|
244 | |||
246 | # because we are doing a cleanup, we need to check if such directory |
|
245 | # because we are doing a cleanup, we need to check if such directory | |
247 | # already exists. If we don't do that we can accidentally delete |
|
246 | # already exists. If we don't do that we can accidentally delete | |
248 | # existing directory via cleanup that can cause data issues, since |
|
247 | # existing directory via cleanup that can cause data issues, since | |
249 | # delete does a folder rename to special syntax later cleanup |
|
248 | # delete does a folder rename to special syntax later cleanup | |
250 | # functions can delete this |
|
249 | # functions can delete this | |
251 | cleanup_group = self.check_exist_filesystem(group_name, |
|
250 | cleanup_group = self.check_exist_filesystem(group_name, | |
252 | exc_on_failure=False) |
|
251 | exc_on_failure=False) | |
253 | user = self._get_user(owner) |
|
252 | user = self._get_user(owner) | |
254 | if not user: |
|
253 | if not user: | |
255 | raise ValueError('Owner %s not found as rhodecode user', owner) |
|
254 | raise ValueError('Owner %s not found as rhodecode user', owner) | |
256 |
|
255 | |||
257 | try: |
|
256 | try: | |
258 | new_repo_group = RepoGroup() |
|
257 | new_repo_group = RepoGroup() | |
259 | new_repo_group.user = user |
|
258 | new_repo_group.user = user | |
260 | new_repo_group.group_description = group_description or group_name |
|
259 | new_repo_group.group_description = group_description or group_name | |
261 | new_repo_group.parent_group = parent_group |
|
260 | new_repo_group.parent_group = parent_group | |
262 | new_repo_group.group_name = group_name |
|
261 | new_repo_group.group_name = group_name | |
263 | new_repo_group.personal = personal |
|
262 | new_repo_group.personal = personal | |
264 |
|
263 | |||
265 | self.sa.add(new_repo_group) |
|
264 | self.sa.add(new_repo_group) | |
266 |
|
265 | |||
267 | # create an ADMIN permission for owner except if we're super admin, |
|
266 | # create an ADMIN permission for owner except if we're super admin, | |
268 | # later owner should go into the owner field of groups |
|
267 | # later owner should go into the owner field of groups | |
269 | if not user.is_admin: |
|
268 | if not user.is_admin: | |
270 | self.grant_user_permission(repo_group=new_repo_group, |
|
269 | self.grant_user_permission(repo_group=new_repo_group, | |
271 | user=owner, perm='group.admin') |
|
270 | user=owner, perm='group.admin') | |
272 |
|
271 | |||
273 | if parent_group and copy_permissions: |
|
272 | if parent_group and copy_permissions: | |
274 | # copy permissions from parent |
|
273 | # copy permissions from parent | |
275 | user_perms = UserRepoGroupToPerm.query() \ |
|
274 | user_perms = UserRepoGroupToPerm.query() \ | |
276 | .filter(UserRepoGroupToPerm.group == parent_group).all() |
|
275 | .filter(UserRepoGroupToPerm.group == parent_group).all() | |
277 |
|
276 | |||
278 | group_perms = UserGroupRepoGroupToPerm.query() \ |
|
277 | group_perms = UserGroupRepoGroupToPerm.query() \ | |
279 | .filter(UserGroupRepoGroupToPerm.group == parent_group).all() |
|
278 | .filter(UserGroupRepoGroupToPerm.group == parent_group).all() | |
280 |
|
279 | |||
281 | for perm in user_perms: |
|
280 | for perm in user_perms: | |
282 | # don't copy over the permission for user who is creating |
|
281 | # don't copy over the permission for user who is creating | |
283 | # this group, if he is not super admin he get's admin |
|
282 | # this group, if he is not super admin he get's admin | |
284 | # permission set above |
|
283 | # permission set above | |
285 | if perm.user != user or user.is_admin: |
|
284 | if perm.user != user or user.is_admin: | |
286 | UserRepoGroupToPerm.create( |
|
285 | UserRepoGroupToPerm.create( | |
287 | perm.user, new_repo_group, perm.permission) |
|
286 | perm.user, new_repo_group, perm.permission) | |
288 |
|
287 | |||
289 | for perm in group_perms: |
|
288 | for perm in group_perms: | |
290 | UserGroupRepoGroupToPerm.create( |
|
289 | UserGroupRepoGroupToPerm.create( | |
291 | perm.users_group, new_repo_group, perm.permission) |
|
290 | perm.users_group, new_repo_group, perm.permission) | |
292 | else: |
|
291 | else: | |
293 | perm_obj = self._create_default_perms(new_repo_group) |
|
292 | perm_obj = self._create_default_perms(new_repo_group) | |
294 | self.sa.add(perm_obj) |
|
293 | self.sa.add(perm_obj) | |
295 |
|
294 | |||
296 | # now commit the changes, earlier so we are sure everything is in |
|
295 | # now commit the changes, earlier so we are sure everything is in | |
297 | # the database. |
|
296 | # the database. | |
298 | if commit_early: |
|
297 | if commit_early: | |
299 | self.sa.commit() |
|
298 | self.sa.commit() | |
300 | if not just_db: |
|
299 | if not just_db: | |
301 | self._create_group(new_repo_group.group_name) |
|
300 | self._create_group(new_repo_group.group_name) | |
302 |
|
301 | |||
303 | # trigger the post hook |
|
302 | # trigger the post hook | |
304 | from rhodecode.lib import hooks_base |
|
303 | from rhodecode.lib import hooks_base | |
305 | repo_group = RepoGroup.get_by_group_name(group_name) |
|
304 | repo_group = RepoGroup.get_by_group_name(group_name) | |
306 |
|
305 | |||
307 | # update repo group commit caches initially |
|
306 | # update repo group commit caches initially | |
308 | repo_group.update_commit_cache() |
|
307 | repo_group.update_commit_cache() | |
309 |
|
308 | |||
310 | hooks_base.create_repository_group( |
|
309 | hooks_base.create_repository_group( | |
311 | created_by=user.username, **repo_group.get_dict()) |
|
310 | created_by=user.username, **repo_group.get_dict()) | |
312 |
|
311 | |||
313 | # Trigger create event. |
|
312 | # Trigger create event. | |
314 | events.trigger(events.RepoGroupCreateEvent(repo_group)) |
|
313 | events.trigger(events.RepoGroupCreateEvent(repo_group)) | |
315 |
|
314 | |||
316 | return new_repo_group |
|
315 | return new_repo_group | |
317 | except Exception: |
|
316 | except Exception: | |
318 | self.sa.rollback() |
|
317 | self.sa.rollback() | |
319 | log.exception('Exception occurred when creating repository group, ' |
|
318 | log.exception('Exception occurred when creating repository group, ' | |
320 | 'doing cleanup...') |
|
319 | 'doing cleanup...') | |
321 | # rollback things manually ! |
|
320 | # rollback things manually ! | |
322 | repo_group = RepoGroup.get_by_group_name(group_name) |
|
321 | repo_group = RepoGroup.get_by_group_name(group_name) | |
323 | if repo_group: |
|
322 | if repo_group: | |
324 | RepoGroup.delete(repo_group.group_id) |
|
323 | RepoGroup.delete(repo_group.group_id) | |
325 | self.sa.commit() |
|
324 | self.sa.commit() | |
326 | if cleanup_group: |
|
325 | if cleanup_group: | |
327 | RepoGroupModel()._delete_filesystem_group(repo_group) |
|
326 | RepoGroupModel()._delete_filesystem_group(repo_group) | |
328 | raise |
|
327 | raise | |
329 |
|
328 | |||
330 | def update_permissions( |
|
329 | def update_permissions( | |
331 | self, repo_group, perm_additions=None, perm_updates=None, |
|
330 | self, repo_group, perm_additions=None, perm_updates=None, | |
332 | perm_deletions=None, recursive=None, check_perms=True, |
|
331 | perm_deletions=None, recursive=None, check_perms=True, | |
333 | cur_user=None): |
|
332 | cur_user=None): | |
334 | from rhodecode.model.repo import RepoModel |
|
333 | from rhodecode.model.repo import RepoModel | |
335 | from rhodecode.lib.auth import HasUserGroupPermissionAny |
|
334 | from rhodecode.lib.auth import HasUserGroupPermissionAny | |
336 |
|
335 | |||
337 | if not perm_additions: |
|
336 | if not perm_additions: | |
338 | perm_additions = [] |
|
337 | perm_additions = [] | |
339 | if not perm_updates: |
|
338 | if not perm_updates: | |
340 | perm_updates = [] |
|
339 | perm_updates = [] | |
341 | if not perm_deletions: |
|
340 | if not perm_deletions: | |
342 | perm_deletions = [] |
|
341 | perm_deletions = [] | |
343 |
|
342 | |||
344 | req_perms = ('usergroup.read', 'usergroup.write', 'usergroup.admin') |
|
343 | req_perms = ('usergroup.read', 'usergroup.write', 'usergroup.admin') | |
345 |
|
344 | |||
346 | changes = { |
|
345 | changes = { | |
347 | 'added': [], |
|
346 | 'added': [], | |
348 | 'updated': [], |
|
347 | 'updated': [], | |
349 | 'deleted': [], |
|
348 | 'deleted': [], | |
350 | 'default_user_changed': None |
|
349 | 'default_user_changed': None | |
351 | } |
|
350 | } | |
352 |
|
351 | |||
353 | def _set_perm_user(obj, user, perm): |
|
352 | def _set_perm_user(_obj: RepoGroup | Repository, _user_obj: User, _perm): | |
354 | if isinstance(obj, RepoGroup): |
|
353 | ||
355 | self.grant_user_permission( |
|
354 | if isinstance(_obj, RepoGroup): | |
356 |
|
|
355 | self.grant_user_permission(repo_group=_obj, user=_user_obj, perm=_perm) | |
357 | elif isinstance(obj, Repository): |
|
356 | elif isinstance(_obj, Repository): | |
358 | # private repos will not allow to change the default |
|
357 | # private repos will not allow to change the default | |
359 | # permissions using recursive mode |
|
358 | # permissions using recursive mode | |
360 | if obj.private and user == User.DEFAULT_USER: |
|
359 | if _obj.private and _user_obj.username == User.DEFAULT_USER: | |
|
360 | log.debug('Skipping private repo %s for user %s', _obj, _user_obj) | |||
361 | return |
|
361 | return | |
362 |
|
362 | |||
363 |
# we set group permission |
|
363 | # we set group permission, we have to switch to repo permission definition | |
364 | # permission |
|
364 | new_perm = _perm.replace('group.', 'repository.') | |
365 | perm = perm.replace('group.', 'repository.') |
|
365 | RepoModel().grant_user_permission(repo=_obj, user=_user_obj, perm=new_perm) | |
366 | RepoModel().grant_user_permission( |
|
366 | ||
367 | repo=obj, user=user, perm=perm) |
|
367 | def _set_perm_group(_obj: RepoGroup | Repository, users_group: UserGroup, _perm): | |
|
368 | if isinstance(_obj, RepoGroup): | |||
|
369 | self.grant_user_group_permission(repo_group=_obj, group_name=users_group, perm=_perm) | |||
|
370 | elif isinstance(_obj, Repository): | |||
|
371 | # we set group permission, we have to switch to repo permission definition | |||
|
372 | new_perm = _perm.replace('group.', 'repository.') | |||
|
373 | RepoModel().grant_user_group_permission(repo=_obj, group_name=users_group, perm=new_perm) | |||
368 |
|
374 | |||
369 | def _set_perm_group(obj, users_group, perm): |
|
375 | def _revoke_perm_user(_obj: RepoGroup | Repository, _user_obj: User): | |
370 | if isinstance(obj, RepoGroup): |
|
376 | if isinstance(_obj, RepoGroup): | |
371 |
self. |
|
377 | self.revoke_user_permission(repo_group=_obj, user=_user_obj) | |
372 | repo_group=obj, group_name=users_group, perm=perm) |
|
378 | elif isinstance(_obj, Repository): | |
373 | elif isinstance(obj, Repository): |
|
379 | # private repos will not allow to change the default | |
374 | # we set group permission but we have to switch to repo |
|
380 | # permissions using recursive mode, also there's no revocation fo default user, just update | |
375 | # permission |
|
381 | if _user_obj.username == User.DEFAULT_USER: | |
376 | perm = perm.replace('group.', 'repository.') |
|
382 | log.debug('Skipping private repo %s for user %s', _obj, _user_obj) | |
377 | RepoModel().grant_user_group_permission( |
|
383 | return | |
378 | repo=obj, group_name=users_group, perm=perm) |
|
384 | RepoModel().revoke_user_permission(repo=_obj, user=_user_obj) | |
379 |
|
385 | |||
380 |
def _revoke_perm_ |
|
386 | def _revoke_perm_group(_obj: RepoGroup | Repository, user_group: UserGroup): | |
381 | if isinstance(obj, RepoGroup): |
|
387 | if isinstance(_obj, RepoGroup): | |
382 |
self.revoke_user_permission(repo_group=obj, |
|
388 | self.revoke_user_group_permission(repo_group=_obj, group_name=user_group) | |
383 | elif isinstance(obj, Repository): |
|
389 | elif isinstance(_obj, Repository): | |
384 |
RepoModel().revoke_user_permission(repo=obj, |
|
390 | RepoModel().revoke_user_group_permission(repo=_obj, group_name=user_group) | |
385 |
|
||||
386 | def _revoke_perm_group(obj, user_group): |
|
|||
387 | if isinstance(obj, RepoGroup): |
|
|||
388 | self.revoke_user_group_permission( |
|
|||
389 | repo_group=obj, group_name=user_group) |
|
|||
390 | elif isinstance(obj, Repository): |
|
|||
391 | RepoModel().revoke_user_group_permission( |
|
|||
392 | repo=obj, group_name=user_group) |
|
|||
393 |
|
391 | |||
394 | # start updates |
|
392 | # start updates | |
395 | log.debug('Now updating permissions for %s in recursive mode:%s', |
|
393 | log.debug('Now updating permissions for %s in recursive mode:%s', | |
396 | repo_group, recursive) |
|
394 | repo_group, recursive) | |
397 |
|
395 | |||
398 | # initialize check function, we'll call that multiple times |
|
396 | # initialize check function, we'll call that multiple times | |
399 | has_group_perm = HasUserGroupPermissionAny(*req_perms) |
|
397 | has_group_perm = HasUserGroupPermissionAny(*req_perms) | |
400 |
|
398 | |||
401 | for obj in repo_group.recursive_groups_and_repos(): |
|
399 | for obj in repo_group.recursive_groups_and_repos(): | |
402 | # iterated obj is an instance of a repos group or repository in |
|
400 | # iterated obj is an instance of a repos group or repository in | |
403 | # that group, recursive option can be: none, repos, groups, all |
|
401 | # that group, recursive option can be: none, repos, groups, all | |
404 | if recursive == 'all': |
|
402 | if recursive == 'all': | |
405 | obj = obj |
|
403 | obj = obj | |
406 | elif recursive == 'repos': |
|
404 | elif recursive == 'repos': | |
407 | # skip groups, other than this one |
|
405 | # skip groups, other than this one | |
408 | if isinstance(obj, RepoGroup) and not obj == repo_group: |
|
406 | if isinstance(obj, RepoGroup) and not obj == repo_group: | |
409 | continue |
|
407 | continue | |
410 | elif recursive == 'groups': |
|
408 | elif recursive == 'groups': | |
411 | # skip repos |
|
409 | # skip repos | |
412 | if isinstance(obj, Repository): |
|
410 | if isinstance(obj, Repository): | |
413 | continue |
|
411 | continue | |
414 | else: # recursive == 'none': |
|
412 | else: # recursive == 'none': | |
415 | # DEFAULT option - don't apply to iterated objects |
|
413 | # DEFAULT option - don't apply to iterated objects | |
416 | # also we do a break at the end of this loop. if we are not |
|
414 | # also we do a break at the end of this loop. if we are not | |
417 | # in recursive mode |
|
415 | # in recursive mode | |
418 | obj = repo_group |
|
416 | obj = repo_group | |
419 |
|
417 | |||
420 | change_obj = obj.get_api_data() |
|
418 | change_obj = obj.get_api_data() | |
421 |
|
419 | |||
422 | # update permissions |
|
420 | # update permissions | |
423 | for member_id, perm, member_type in perm_updates: |
|
421 | for member_id, perm, member_type in perm_updates: | |
424 | member_id = int(member_id) |
|
422 | member_id = int(member_id) | |
425 | if member_type == 'user': |
|
423 | if member_type == 'user': | |
426 |
member_ |
|
424 | member_obj = User.get(member_id) | |
|
425 | member_name = member_obj.username | |||
427 | if isinstance(obj, RepoGroup) and obj == repo_group and member_name == User.DEFAULT_USER: |
|
426 | if isinstance(obj, RepoGroup) and obj == repo_group and member_name == User.DEFAULT_USER: | |
428 | # NOTE(dan): detect if we changed permissions for default user |
|
427 | # NOTE(dan): detect if we changed permissions for default user | |
429 | perm_obj = self.sa.query(UserRepoGroupToPerm) \ |
|
428 | perm_obj = self.sa.query(UserRepoGroupToPerm) \ | |
430 | .filter(UserRepoGroupToPerm.user_id == member_id) \ |
|
429 | .filter(UserRepoGroupToPerm.user_id == member_id) \ | |
431 | .filter(UserRepoGroupToPerm.group == repo_group) \ |
|
430 | .filter(UserRepoGroupToPerm.group == repo_group) \ | |
432 | .scalar() |
|
431 | .scalar() | |
433 | if perm_obj and perm_obj.permission.permission_name != perm: |
|
432 | if perm_obj and perm_obj.permission.permission_name != perm: | |
434 | changes['default_user_changed'] = True |
|
433 | changes['default_user_changed'] = True | |
435 |
|
434 | |||
436 | # this updates also current one if found |
|
435 | # this updates also current one if found | |
437 |
_set_perm_user(obj, |
|
436 | _set_perm_user(obj, member_obj, perm) | |
438 | elif member_type == 'user_group': |
|
437 | elif member_type == 'user_group': | |
439 |
member_ |
|
438 | member_obj = UserGroup.get(member_id) | |
440 | if not check_perms or has_group_perm(member_name, |
|
439 | member_name = member_obj.users_group_name | |
441 | user=cur_user): |
|
440 | if not check_perms or has_group_perm(member_name, user=cur_user): | |
442 |
_set_perm_group(obj, |
|
441 | _set_perm_group(obj, member_obj, perm) | |
443 | else: |
|
442 | else: | |
444 |
raise ValueError( |
|
443 | raise ValueError( | |
445 | "got {} instead".format(member_type)) |
|
444 | f"member_type must be 'user' or 'user_group' got {member_type} instead" | |
|
445 | ) | |||
446 |
|
446 | |||
447 | changes['updated'].append( |
|
447 | changes['updated'].append( | |
448 | {'change_obj': change_obj, 'type': member_type, |
|
448 | {'change_obj': change_obj, 'type': member_type, | |
449 | 'id': member_id, 'name': member_name, 'new_perm': perm}) |
|
449 | 'id': member_id, 'name': member_name, 'new_perm': perm}) | |
450 |
|
450 | |||
451 | # set new permissions |
|
451 | # set new permissions | |
452 | for member_id, perm, member_type in perm_additions: |
|
452 | for member_id, perm, member_type in perm_additions: | |
453 | member_id = int(member_id) |
|
453 | member_id = int(member_id) | |
454 | if member_type == 'user': |
|
454 | if member_type == 'user': | |
455 |
member_ |
|
455 | member_obj = User.get(member_id) | |
456 | _set_perm_user(obj, user=member_id, perm=perm) |
|
456 | member_name = member_obj.username | |
|
457 | _set_perm_user(obj, member_obj, perm) | |||
457 | elif member_type == 'user_group': |
|
458 | elif member_type == 'user_group': | |
458 | # check if we have permissions to alter this usergroup |
|
459 | # check if we have permissions to alter this usergroup | |
459 |
member_ |
|
460 | member_obj = UserGroup.get(member_id) | |
460 | if not check_perms or has_group_perm(member_name, |
|
461 | member_name = member_obj.users_group_name | |
461 | user=cur_user): |
|
462 | if not check_perms or has_group_perm(member_name, user=cur_user): | |
462 |
_set_perm_group(obj, |
|
463 | _set_perm_group(obj, member_obj, perm) | |
463 | else: |
|
464 | else: | |
464 |
raise ValueError( |
|
465 | raise ValueError( | |
465 | "got {} instead".format(member_type)) |
|
466 | f"member_type must be 'user' or 'user_group' got {member_type} instead" | |
|
467 | ) | |||
466 |
|
468 | |||
467 | changes['added'].append( |
|
469 | changes['added'].append( | |
468 | {'change_obj': change_obj, 'type': member_type, |
|
470 | {'change_obj': change_obj, 'type': member_type, | |
469 | 'id': member_id, 'name': member_name, 'new_perm': perm}) |
|
471 | 'id': member_id, 'name': member_name, 'new_perm': perm}) | |
470 |
|
472 | |||
471 | # delete permissions |
|
473 | # delete permissions | |
472 | for member_id, perm, member_type in perm_deletions: |
|
474 | for member_id, perm, member_type in perm_deletions: | |
473 | member_id = int(member_id) |
|
475 | member_id = int(member_id) | |
474 | if member_type == 'user': |
|
476 | if member_type == 'user': | |
475 |
member_ |
|
477 | member_obj = User.get(member_id) | |
476 | _revoke_perm_user(obj, user=member_id) |
|
478 | member_name = member_obj.username | |
|
479 | _revoke_perm_user(obj, member_obj) | |||
477 | elif member_type == 'user_group': |
|
480 | elif member_type == 'user_group': | |
478 | # check if we have permissions to alter this usergroup |
|
481 | # check if we have permissions to alter this usergroup | |
479 |
member_ |
|
482 | member_obj = UserGroup.get(member_id) | |
480 | if not check_perms or has_group_perm(member_name, |
|
483 | member_name = member_obj.users_group_name | |
481 | user=cur_user): |
|
484 | if not check_perms or has_group_perm(member_name, user=cur_user): | |
482 |
_revoke_perm_group(obj, |
|
485 | _revoke_perm_group(obj, member_obj) | |
483 | else: |
|
486 | else: | |
484 |
raise ValueError( |
|
487 | raise ValueError( | |
485 | "got {} instead".format(member_type)) |
|
488 | f"member_type must be 'user' or 'user_group' got {member_type} instead" | |
486 |
|
489 | ) | ||
487 | changes['deleted'].append( |
|
490 | changes['deleted'].append( | |
488 | {'change_obj': change_obj, 'type': member_type, |
|
491 | {'change_obj': change_obj, 'type': member_type, | |
489 | 'id': member_id, 'name': member_name, 'new_perm': perm}) |
|
492 | 'id': member_id, 'name': member_name, 'new_perm': perm}) | |
490 |
|
493 | |||
491 | # if it's not recursive call for all,repos,groups |
|
494 | # if it's not recursive call for all,repos,groups | |
492 | # break the loop and don't proceed with other changes |
|
495 | # break the loop and don't proceed with other changes | |
493 | if recursive not in ['all', 'repos', 'groups']: |
|
496 | if recursive not in ['all', 'repos', 'groups']: | |
494 | break |
|
497 | break | |
495 |
|
498 | |||
496 | return changes |
|
499 | return changes | |
497 |
|
500 | |||
498 | def update(self, repo_group, form_data): |
|
501 | def update(self, repo_group, form_data): | |
499 | try: |
|
502 | try: | |
500 | repo_group = self._get_repo_group(repo_group) |
|
503 | repo_group = self._get_repo_group(repo_group) | |
501 | old_path = repo_group.full_path |
|
504 | old_path = repo_group.full_path | |
502 |
|
505 | |||
503 | # change properties |
|
506 | # change properties | |
504 | if 'group_description' in form_data: |
|
507 | if 'group_description' in form_data: | |
505 | repo_group.group_description = form_data['group_description'] |
|
508 | repo_group.group_description = form_data['group_description'] | |
506 |
|
509 | |||
507 | if 'enable_locking' in form_data: |
|
510 | if 'enable_locking' in form_data: | |
508 | repo_group.enable_locking = form_data['enable_locking'] |
|
511 | repo_group.enable_locking = form_data['enable_locking'] | |
509 |
|
512 | |||
510 | if 'group_parent_id' in form_data: |
|
513 | if 'group_parent_id' in form_data: | |
511 | parent_group = ( |
|
514 | parent_group = ( | |
512 | self._get_repo_group(form_data['group_parent_id'])) |
|
515 | self._get_repo_group(form_data['group_parent_id'])) | |
513 | repo_group.group_parent_id = ( |
|
516 | repo_group.group_parent_id = ( | |
514 | parent_group.group_id if parent_group else None) |
|
517 | parent_group.group_id if parent_group else None) | |
515 | repo_group.parent_group = parent_group |
|
518 | repo_group.parent_group = parent_group | |
516 |
|
519 | |||
517 | # mikhail: to update the full_path, we have to explicitly |
|
520 | # mikhail: to update the full_path, we have to explicitly | |
518 | # update group_name |
|
521 | # update group_name | |
519 | group_name = form_data.get('group_name', repo_group.name) |
|
522 | group_name = form_data.get('group_name', repo_group.name) | |
520 | repo_group.group_name = repo_group.get_new_name(group_name) |
|
523 | repo_group.group_name = repo_group.get_new_name(group_name) | |
521 |
|
524 | |||
522 | new_path = repo_group.full_path |
|
525 | new_path = repo_group.full_path | |
523 |
|
526 | |||
524 | affected_user_ids = [] |
|
527 | affected_user_ids = [] | |
525 | if 'user' in form_data: |
|
528 | if 'user' in form_data: | |
526 | old_owner_id = repo_group.user.user_id |
|
529 | old_owner_id = repo_group.user.user_id | |
527 | new_owner = User.get_by_username(form_data['user']) |
|
530 | new_owner = User.get_by_username(form_data['user']) | |
528 | repo_group.user = new_owner |
|
531 | repo_group.user = new_owner | |
529 |
|
532 | |||
530 | if old_owner_id != new_owner.user_id: |
|
533 | if old_owner_id != new_owner.user_id: | |
531 | affected_user_ids = [new_owner.user_id, old_owner_id] |
|
534 | affected_user_ids = [new_owner.user_id, old_owner_id] | |
532 |
|
535 | |||
533 | self.sa.add(repo_group) |
|
536 | self.sa.add(repo_group) | |
534 |
|
537 | |||
535 | # iterate over all members of this groups and do fixes |
|
538 | # iterate over all members of this groups and do fixes | |
536 | # set locking if given |
|
539 | # set locking if given | |
537 | # if obj is a repoGroup also fix the name of the group according |
|
540 | # if obj is a repoGroup also fix the name of the group according | |
538 | # to the parent |
|
541 | # to the parent | |
539 | # if obj is a Repo fix it's name |
|
542 | # if obj is a Repo fix it's name | |
540 | # this can be potentially heavy operation |
|
543 | # this can be potentially heavy operation | |
541 | for obj in repo_group.recursive_groups_and_repos(): |
|
544 | for obj in repo_group.recursive_groups_and_repos(): | |
542 | # set the value from it's parent |
|
545 | # set the value from it's parent | |
543 | obj.enable_locking = repo_group.enable_locking |
|
546 | obj.enable_locking = repo_group.enable_locking | |
544 | if isinstance(obj, RepoGroup): |
|
547 | if isinstance(obj, RepoGroup): | |
545 | new_name = obj.get_new_name(obj.name) |
|
548 | new_name = obj.get_new_name(obj.name) | |
546 | log.debug('Fixing group %s to new name %s', |
|
549 | log.debug('Fixing group %s to new name %s', | |
547 | obj.group_name, new_name) |
|
550 | obj.group_name, new_name) | |
548 | obj.group_name = new_name |
|
551 | obj.group_name = new_name | |
549 |
|
552 | |||
550 | elif isinstance(obj, Repository): |
|
553 | elif isinstance(obj, Repository): | |
551 | # we need to get all repositories from this new group and |
|
554 | # we need to get all repositories from this new group and | |
552 | # rename them accordingly to new group path |
|
555 | # rename them accordingly to new group path | |
553 | new_name = obj.get_new_name(obj.just_name) |
|
556 | new_name = obj.get_new_name(obj.just_name) | |
554 | log.debug('Fixing repo %s to new name %s', |
|
557 | log.debug('Fixing repo %s to new name %s', | |
555 | obj.repo_name, new_name) |
|
558 | obj.repo_name, new_name) | |
556 | obj.repo_name = new_name |
|
559 | obj.repo_name = new_name | |
557 |
|
560 | |||
558 | self.sa.add(obj) |
|
561 | self.sa.add(obj) | |
559 |
|
562 | |||
560 | self._rename_group(old_path, new_path) |
|
563 | self._rename_group(old_path, new_path) | |
561 |
|
564 | |||
562 | # Trigger update event. |
|
565 | # Trigger update event. | |
563 | events.trigger(events.RepoGroupUpdateEvent(repo_group)) |
|
566 | events.trigger(events.RepoGroupUpdateEvent(repo_group)) | |
564 |
|
567 | |||
565 | if affected_user_ids: |
|
568 | if affected_user_ids: | |
566 | PermissionModel().trigger_permission_flush(affected_user_ids) |
|
569 | PermissionModel().trigger_permission_flush(affected_user_ids) | |
567 |
|
570 | |||
568 | return repo_group |
|
571 | return repo_group | |
569 | except Exception: |
|
572 | except Exception: | |
570 | log.error(traceback.format_exc()) |
|
573 | log.error(traceback.format_exc()) | |
571 | raise |
|
574 | raise | |
572 |
|
575 | |||
573 | def delete(self, repo_group, force_delete=False, fs_remove=True): |
|
576 | def delete(self, repo_group, force_delete=False, fs_remove=True): | |
574 | repo_group = self._get_repo_group(repo_group) |
|
577 | repo_group = self._get_repo_group(repo_group) | |
575 | if not repo_group: |
|
578 | if not repo_group: | |
576 | return False |
|
579 | return False | |
577 | try: |
|
580 | try: | |
578 | self.sa.delete(repo_group) |
|
581 | self.sa.delete(repo_group) | |
579 | if fs_remove: |
|
582 | if fs_remove: | |
580 | self._delete_filesystem_group(repo_group, force_delete) |
|
583 | self._delete_filesystem_group(repo_group, force_delete) | |
581 | else: |
|
584 | else: | |
582 | log.debug('skipping removal from filesystem') |
|
585 | log.debug('skipping removal from filesystem') | |
583 |
|
586 | |||
584 | # Trigger delete event. |
|
587 | # Trigger delete event. | |
585 | events.trigger(events.RepoGroupDeleteEvent(repo_group)) |
|
588 | events.trigger(events.RepoGroupDeleteEvent(repo_group)) | |
586 | return True |
|
589 | return True | |
587 |
|
590 | |||
588 | except Exception: |
|
591 | except Exception: | |
589 | log.error('Error removing repo_group %s', repo_group) |
|
592 | log.error('Error removing repo_group %s', repo_group) | |
590 | raise |
|
593 | raise | |
591 |
|
594 | |||
592 | def grant_user_permission(self, repo_group, user, perm): |
|
595 | def grant_user_permission(self, repo_group, user, perm): | |
593 | """ |
|
596 | """ | |
594 | Grant permission for user on given repository group, or update |
|
597 | Grant permission for user on given repository group, or update | |
595 | existing one if found |
|
598 | existing one if found | |
596 |
|
599 | |||
597 | :param repo_group: Instance of RepoGroup, repositories_group_id, |
|
600 | :param repo_group: Instance of RepoGroup, repositories_group_id, | |
598 | or repositories_group name |
|
601 | or repositories_group name | |
599 | :param user: Instance of User, user_id or username |
|
602 | :param user: Instance of User, user_id or username | |
600 | :param perm: Instance of Permission, or permission_name |
|
603 | :param perm: Instance of Permission, or permission_name | |
601 | """ |
|
604 | """ | |
602 |
|
605 | |||
603 | repo_group = self._get_repo_group(repo_group) |
|
606 | repo_group = self._get_repo_group(repo_group) | |
604 | user = self._get_user(user) |
|
607 | user = self._get_user(user) | |
605 | permission = self._get_perm(perm) |
|
608 | permission = self._get_perm(perm) | |
606 |
|
609 | |||
607 | # check if we have that permission already |
|
610 | # check if we have that permission already | |
608 | obj = self.sa.query(UserRepoGroupToPerm)\ |
|
611 | obj = self.sa.query(UserRepoGroupToPerm)\ | |
609 | .filter(UserRepoGroupToPerm.user == user)\ |
|
612 | .filter(UserRepoGroupToPerm.user == user)\ | |
610 | .filter(UserRepoGroupToPerm.group == repo_group)\ |
|
613 | .filter(UserRepoGroupToPerm.group == repo_group)\ | |
611 | .scalar() |
|
614 | .scalar() | |
612 | if obj is None: |
|
615 | if obj is None: | |
613 | # create new ! |
|
616 | # create new ! | |
614 | obj = UserRepoGroupToPerm() |
|
617 | obj = UserRepoGroupToPerm() | |
615 | obj.group = repo_group |
|
618 | obj.group = repo_group | |
616 | obj.user = user |
|
619 | obj.user = user | |
617 | obj.permission = permission |
|
620 | obj.permission = permission | |
618 | self.sa.add(obj) |
|
621 | self.sa.add(obj) | |
619 | log.debug('Granted perm %s to %s on %s', perm, user, repo_group) |
|
622 | log.debug('Granted perm %s to %s on %s', perm, user, repo_group) | |
620 | action_logger_generic( |
|
623 | action_logger_generic( | |
621 | 'granted permission: {} to user: {} on repogroup: {}'.format( |
|
624 | 'granted permission: {} to user: {} on repogroup: {}'.format( | |
622 | perm, user, repo_group), namespace='security.repogroup') |
|
625 | perm, user, repo_group), namespace='security.repogroup') | |
623 | return obj |
|
626 | return obj | |
624 |
|
627 | |||
625 | def revoke_user_permission(self, repo_group, user): |
|
628 | def revoke_user_permission(self, repo_group, user): | |
626 | """ |
|
629 | """ | |
627 | Revoke permission for user on given repository group |
|
630 | Revoke permission for user on given repository group | |
628 |
|
631 | |||
629 | :param repo_group: Instance of RepoGroup, repositories_group_id, |
|
632 | :param repo_group: Instance of RepoGroup, repositories_group_id, | |
630 | or repositories_group name |
|
633 | or repositories_group name | |
631 | :param user: Instance of User, user_id or username |
|
634 | :param user: Instance of User, user_id or username | |
632 | """ |
|
635 | """ | |
633 |
|
636 | |||
634 | repo_group = self._get_repo_group(repo_group) |
|
637 | repo_group = self._get_repo_group(repo_group) | |
635 | user = self._get_user(user) |
|
638 | user = self._get_user(user) | |
636 |
|
639 | |||
637 | obj = self.sa.query(UserRepoGroupToPerm)\ |
|
640 | obj = self.sa.query(UserRepoGroupToPerm)\ | |
638 | .filter(UserRepoGroupToPerm.user == user)\ |
|
641 | .filter(UserRepoGroupToPerm.user == user)\ | |
639 | .filter(UserRepoGroupToPerm.group == repo_group)\ |
|
642 | .filter(UserRepoGroupToPerm.group == repo_group)\ | |
640 | .scalar() |
|
643 | .scalar() | |
641 | if obj: |
|
644 | if obj: | |
642 | self.sa.delete(obj) |
|
645 | self.sa.delete(obj) | |
643 | log.debug('Revoked perm on %s on %s', repo_group, user) |
|
646 | log.debug('Revoked perm on %s on %s', repo_group, user) | |
644 | action_logger_generic( |
|
647 | action_logger_generic( | |
645 | 'revoked permission from user: {} on repogroup: {}'.format( |
|
648 | 'revoked permission from user: {} on repogroup: {}'.format( | |
646 | user, repo_group), namespace='security.repogroup') |
|
649 | user, repo_group), namespace='security.repogroup') | |
647 |
|
650 | |||
648 | def grant_user_group_permission(self, repo_group, group_name, perm): |
|
651 | def grant_user_group_permission(self, repo_group, group_name, perm): | |
649 | """ |
|
652 | """ | |
650 | Grant permission for user group on given repository group, or update |
|
653 | Grant permission for user group on given repository group, or update | |
651 | existing one if found |
|
654 | existing one if found | |
652 |
|
655 | |||
653 | :param repo_group: Instance of RepoGroup, repositories_group_id, |
|
656 | :param repo_group: Instance of RepoGroup, repositories_group_id, | |
654 | or repositories_group name |
|
657 | or repositories_group name | |
655 | :param group_name: Instance of UserGroup, users_group_id, |
|
658 | :param group_name: Instance of UserGroup, users_group_id, | |
656 | or user group name |
|
659 | or user group name | |
657 | :param perm: Instance of Permission, or permission_name |
|
660 | :param perm: Instance of Permission, or permission_name | |
658 | """ |
|
661 | """ | |
659 | repo_group = self._get_repo_group(repo_group) |
|
662 | repo_group = self._get_repo_group(repo_group) | |
660 | group_name = self._get_user_group(group_name) |
|
663 | group_name = self._get_user_group(group_name) | |
661 | permission = self._get_perm(perm) |
|
664 | permission = self._get_perm(perm) | |
662 |
|
665 | |||
663 | # check if we have that permission already |
|
666 | # check if we have that permission already | |
664 | obj = self.sa.query(UserGroupRepoGroupToPerm)\ |
|
667 | obj = self.sa.query(UserGroupRepoGroupToPerm)\ | |
665 | .filter(UserGroupRepoGroupToPerm.group == repo_group)\ |
|
668 | .filter(UserGroupRepoGroupToPerm.group == repo_group)\ | |
666 | .filter(UserGroupRepoGroupToPerm.users_group == group_name)\ |
|
669 | .filter(UserGroupRepoGroupToPerm.users_group == group_name)\ | |
667 | .scalar() |
|
670 | .scalar() | |
668 |
|
671 | |||
669 | if obj is None: |
|
672 | if obj is None: | |
670 | # create new |
|
673 | # create new | |
671 | obj = UserGroupRepoGroupToPerm() |
|
674 | obj = UserGroupRepoGroupToPerm() | |
672 |
|
675 | |||
673 | obj.group = repo_group |
|
676 | obj.group = repo_group | |
674 | obj.users_group = group_name |
|
677 | obj.users_group = group_name | |
675 | obj.permission = permission |
|
678 | obj.permission = permission | |
676 | self.sa.add(obj) |
|
679 | self.sa.add(obj) | |
677 | log.debug('Granted perm %s to %s on %s', perm, group_name, repo_group) |
|
680 | log.debug('Granted perm %s to %s on %s', perm, group_name, repo_group) | |
678 | action_logger_generic( |
|
681 | action_logger_generic( | |
679 | 'granted permission: {} to usergroup: {} on repogroup: {}'.format( |
|
682 | 'granted permission: {} to usergroup: {} on repogroup: {}'.format( | |
680 | perm, group_name, repo_group), namespace='security.repogroup') |
|
683 | perm, group_name, repo_group), namespace='security.repogroup') | |
681 | return obj |
|
684 | return obj | |
682 |
|
685 | |||
683 | def revoke_user_group_permission(self, repo_group, group_name): |
|
686 | def revoke_user_group_permission(self, repo_group, group_name): | |
684 | """ |
|
687 | """ | |
685 | Revoke permission for user group on given repository group |
|
688 | Revoke permission for user group on given repository group | |
686 |
|
689 | |||
687 | :param repo_group: Instance of RepoGroup, repositories_group_id, |
|
690 | :param repo_group: Instance of RepoGroup, repositories_group_id, | |
688 | or repositories_group name |
|
691 | or repositories_group name | |
689 | :param group_name: Instance of UserGroup, users_group_id, |
|
692 | :param group_name: Instance of UserGroup, users_group_id, | |
690 | or user group name |
|
693 | or user group name | |
691 | """ |
|
694 | """ | |
692 | repo_group = self._get_repo_group(repo_group) |
|
695 | repo_group = self._get_repo_group(repo_group) | |
693 | group_name = self._get_user_group(group_name) |
|
696 | group_name = self._get_user_group(group_name) | |
694 |
|
697 | |||
695 | obj = self.sa.query(UserGroupRepoGroupToPerm)\ |
|
698 | obj = self.sa.query(UserGroupRepoGroupToPerm)\ | |
696 | .filter(UserGroupRepoGroupToPerm.group == repo_group)\ |
|
699 | .filter(UserGroupRepoGroupToPerm.group == repo_group)\ | |
697 | .filter(UserGroupRepoGroupToPerm.users_group == group_name)\ |
|
700 | .filter(UserGroupRepoGroupToPerm.users_group == group_name)\ | |
698 | .scalar() |
|
701 | .scalar() | |
699 | if obj: |
|
702 | if obj: | |
700 | self.sa.delete(obj) |
|
703 | self.sa.delete(obj) | |
701 | log.debug('Revoked perm to %s on %s', repo_group, group_name) |
|
704 | log.debug('Revoked perm to %s on %s', repo_group, group_name) | |
702 | action_logger_generic( |
|
705 | action_logger_generic( | |
703 | 'revoked permission from usergroup: {} on repogroup: {}'.format( |
|
706 | 'revoked permission from usergroup: {} on repogroup: {}'.format( | |
704 | group_name, repo_group), namespace='security.repogroup') |
|
707 | group_name, repo_group), namespace='security.repogroup') | |
705 |
|
708 | |||
706 | @classmethod |
|
709 | @classmethod | |
707 | def update_commit_cache(cls, repo_groups=None): |
|
710 | def update_commit_cache(cls, repo_groups=None): | |
708 | if not repo_groups: |
|
711 | if not repo_groups: | |
709 | repo_groups = RepoGroup.getAll() |
|
712 | repo_groups = RepoGroup.getAll() | |
710 | for repo_group in repo_groups: |
|
713 | for repo_group in repo_groups: | |
711 | repo_group.update_commit_cache() |
|
714 | repo_group.update_commit_cache() | |
712 |
|
715 | |||
713 | def get_repo_groups_as_dict(self, repo_group_list=None, admin=False, |
|
716 | def get_repo_groups_as_dict(self, repo_group_list=None, admin=False, | |
714 | super_user_actions=False): |
|
717 | super_user_actions=False): | |
715 |
|
718 | |||
716 | from pyramid.threadlocal import get_current_request |
|
719 | from pyramid.threadlocal import get_current_request | |
717 | _render = get_current_request().get_partial_renderer( |
|
720 | _render = get_current_request().get_partial_renderer( | |
718 | 'rhodecode:templates/data_table/_dt_elements.mako') |
|
721 | 'rhodecode:templates/data_table/_dt_elements.mako') | |
719 | c = _render.get_call_context() |
|
722 | c = _render.get_call_context() | |
720 | h = _render.get_helpers() |
|
723 | h = _render.get_helpers() | |
721 |
|
724 | |||
722 | def quick_menu(repo_group_name): |
|
725 | def quick_menu(repo_group_name): | |
723 | return _render('quick_repo_group_menu', repo_group_name) |
|
726 | return _render('quick_repo_group_menu', repo_group_name) | |
724 |
|
727 | |||
725 | def repo_group_lnk(repo_group_name): |
|
728 | def repo_group_lnk(repo_group_name): | |
726 | return _render('repo_group_name', repo_group_name) |
|
729 | return _render('repo_group_name', repo_group_name) | |
727 |
|
730 | |||
728 | def last_change(last_change): |
|
731 | def last_change(last_change): | |
729 | if admin and isinstance(last_change, datetime.datetime) and not last_change.tzinfo: |
|
732 | if admin and isinstance(last_change, datetime.datetime) and not last_change.tzinfo: | |
730 | ts = time.time() |
|
733 | ts = time.time() | |
731 | utc_offset = (datetime.datetime.fromtimestamp(ts) |
|
734 | utc_offset = (datetime.datetime.fromtimestamp(ts) | |
732 | - datetime.datetime.utcfromtimestamp(ts)).total_seconds() |
|
735 | - datetime.datetime.utcfromtimestamp(ts)).total_seconds() | |
733 | last_change = last_change + datetime.timedelta(seconds=utc_offset) |
|
736 | last_change = last_change + datetime.timedelta(seconds=utc_offset) | |
734 | return _render("last_change", last_change) |
|
737 | return _render("last_change", last_change) | |
735 |
|
738 | |||
736 | def desc(desc, personal): |
|
739 | def desc(desc, personal): | |
737 | return _render( |
|
740 | return _render( | |
738 | 'repo_group_desc', desc, personal, c.visual.stylify_metatags) |
|
741 | 'repo_group_desc', desc, personal, c.visual.stylify_metatags) | |
739 |
|
742 | |||
740 | def repo_group_actions(repo_group_id, repo_group_name, gr_count): |
|
743 | def repo_group_actions(repo_group_id, repo_group_name, gr_count): | |
741 | return _render( |
|
744 | return _render( | |
742 | 'repo_group_actions', repo_group_id, repo_group_name, gr_count) |
|
745 | 'repo_group_actions', repo_group_id, repo_group_name, gr_count) | |
743 |
|
746 | |||
744 | def repo_group_name(repo_group_name, children_groups): |
|
747 | def repo_group_name(repo_group_name, children_groups): | |
745 | return _render("repo_group_name", repo_group_name, children_groups) |
|
748 | return _render("repo_group_name", repo_group_name, children_groups) | |
746 |
|
749 | |||
747 | def user_profile(username): |
|
750 | def user_profile(username): | |
748 | return _render('user_profile', username) |
|
751 | return _render('user_profile', username) | |
749 |
|
752 | |||
750 | repo_group_data = [] |
|
753 | repo_group_data = [] | |
751 | for group in repo_group_list: |
|
754 | for group in repo_group_list: | |
752 | # NOTE(marcink): because we use only raw column we need to load it like that |
|
755 | # NOTE(marcink): because we use only raw column we need to load it like that | |
753 | changeset_cache = RepoGroup._load_changeset_cache( |
|
756 | changeset_cache = RepoGroup._load_changeset_cache( | |
754 | '', group._changeset_cache) |
|
757 | '', group._changeset_cache) | |
755 | last_commit_change = RepoGroup._load_commit_change(changeset_cache) |
|
758 | last_commit_change = RepoGroup._load_commit_change(changeset_cache) | |
756 | row = { |
|
759 | row = { | |
757 | "menu": quick_menu(group.group_name), |
|
760 | "menu": quick_menu(group.group_name), | |
758 | "name": repo_group_lnk(group.group_name), |
|
761 | "name": repo_group_lnk(group.group_name), | |
759 | "name_raw": group.group_name, |
|
762 | "name_raw": group.group_name, | |
760 |
|
763 | |||
761 | "last_change": last_change(last_commit_change), |
|
764 | "last_change": last_change(last_commit_change), | |
762 |
|
765 | |||
763 | "last_changeset": "", |
|
766 | "last_changeset": "", | |
764 | "last_changeset_raw": "", |
|
767 | "last_changeset_raw": "", | |
765 |
|
768 | |||
766 | "desc": desc(h.escape(group.group_description), group.personal), |
|
769 | "desc": desc(h.escape(group.group_description), group.personal), | |
767 | "top_level_repos": 0, |
|
770 | "top_level_repos": 0, | |
768 | "owner": user_profile(group.User.username) |
|
771 | "owner": user_profile(group.User.username) | |
769 | } |
|
772 | } | |
770 | if admin: |
|
773 | if admin: | |
771 | repo_count = group.repositories.count() |
|
774 | repo_count = group.repositories.count() | |
772 | children_groups = list(map( |
|
775 | children_groups = list(map( | |
773 | h.safe_str, |
|
776 | h.safe_str, | |
774 | itertools.chain((g.name for g in group.parents), |
|
777 | itertools.chain((g.name for g in group.parents), | |
775 | (x.name for x in [group])))) |
|
778 | (x.name for x in [group])))) | |
776 | row.update({ |
|
779 | row.update({ | |
777 | "action": repo_group_actions( |
|
780 | "action": repo_group_actions( | |
778 | group.group_id, group.group_name, repo_count), |
|
781 | group.group_id, group.group_name, repo_count), | |
779 | "top_level_repos": repo_count, |
|
782 | "top_level_repos": repo_count, | |
780 | "name": repo_group_name(group.group_name, children_groups), |
|
783 | "name": repo_group_name(group.group_name, children_groups), | |
781 |
|
784 | |||
782 | }) |
|
785 | }) | |
783 | repo_group_data.append(row) |
|
786 | repo_group_data.append(row) | |
784 |
|
787 | |||
785 | return repo_group_data |
|
788 | return repo_group_data | |
786 |
|
789 | |||
787 | def get_repo_groups_data_table( |
|
790 | def get_repo_groups_data_table( | |
788 | self, draw, start, limit, |
|
791 | self, draw, start, limit, | |
789 | search_q, order_by, order_dir, |
|
792 | search_q, order_by, order_dir, | |
790 | auth_user, repo_group_id): |
|
793 | auth_user, repo_group_id): | |
791 | from rhodecode.model.scm import RepoGroupList |
|
794 | from rhodecode.model.scm import RepoGroupList | |
792 |
|
795 | |||
793 | _perms = ['group.read', 'group.write', 'group.admin'] |
|
796 | _perms = ['group.read', 'group.write', 'group.admin'] | |
794 | repo_groups = RepoGroup.query() \ |
|
797 | repo_groups = RepoGroup.query() \ | |
795 | .filter(RepoGroup.group_parent_id == repo_group_id) \ |
|
798 | .filter(RepoGroup.group_parent_id == repo_group_id) \ | |
796 | .all() |
|
799 | .all() | |
797 | auth_repo_group_list = RepoGroupList( |
|
800 | auth_repo_group_list = RepoGroupList( | |
798 | repo_groups, perm_set=_perms, |
|
801 | repo_groups, perm_set=_perms, | |
799 | extra_kwargs=dict(user=auth_user)) |
|
802 | extra_kwargs=dict(user=auth_user)) | |
800 |
|
803 | |||
801 | allowed_ids = [-1] |
|
804 | allowed_ids = [-1] | |
802 | for repo_group in auth_repo_group_list: |
|
805 | for repo_group in auth_repo_group_list: | |
803 | allowed_ids.append(repo_group.group_id) |
|
806 | allowed_ids.append(repo_group.group_id) | |
804 |
|
807 | |||
805 | repo_groups_data_total_count = RepoGroup.query() \ |
|
808 | repo_groups_data_total_count = RepoGroup.query() \ | |
806 | .filter(RepoGroup.group_parent_id == repo_group_id) \ |
|
809 | .filter(RepoGroup.group_parent_id == repo_group_id) \ | |
807 | .filter(or_( |
|
810 | .filter(or_( | |
808 | # generate multiple IN to fix limitation problems |
|
811 | # generate multiple IN to fix limitation problems | |
809 | *in_filter_generator(RepoGroup.group_id, allowed_ids)) |
|
812 | *in_filter_generator(RepoGroup.group_id, allowed_ids)) | |
810 | ) \ |
|
813 | ) \ | |
811 | .count() |
|
814 | .count() | |
812 |
|
815 | |||
813 | base_q = Session.query( |
|
816 | base_q = Session.query( | |
814 | RepoGroup.group_name, |
|
817 | RepoGroup.group_name, | |
815 | RepoGroup.group_name_hash, |
|
818 | RepoGroup.group_name_hash, | |
816 | RepoGroup.group_description, |
|
819 | RepoGroup.group_description, | |
817 | RepoGroup.group_id, |
|
820 | RepoGroup.group_id, | |
818 | RepoGroup.personal, |
|
821 | RepoGroup.personal, | |
819 | RepoGroup.updated_on, |
|
822 | RepoGroup.updated_on, | |
820 | RepoGroup._changeset_cache, |
|
823 | RepoGroup._changeset_cache, | |
821 | User, |
|
824 | User, | |
822 | ) \ |
|
825 | ) \ | |
823 | .filter(RepoGroup.group_parent_id == repo_group_id) \ |
|
826 | .filter(RepoGroup.group_parent_id == repo_group_id) \ | |
824 | .filter(or_( |
|
827 | .filter(or_( | |
825 | # generate multiple IN to fix limitation problems |
|
828 | # generate multiple IN to fix limitation problems | |
826 | *in_filter_generator(RepoGroup.group_id, allowed_ids)) |
|
829 | *in_filter_generator(RepoGroup.group_id, allowed_ids)) | |
827 | ) \ |
|
830 | ) \ | |
828 | .join(User, User.user_id == RepoGroup.user_id) \ |
|
831 | .join(User, User.user_id == RepoGroup.user_id) \ | |
829 | .group_by(RepoGroup, User) |
|
832 | .group_by(RepoGroup, User) | |
830 |
|
833 | |||
831 | repo_groups_data_total_filtered_count = base_q.count() |
|
834 | repo_groups_data_total_filtered_count = base_q.count() | |
832 |
|
835 | |||
833 | sort_defined = False |
|
836 | sort_defined = False | |
834 |
|
837 | |||
835 | if order_by == 'group_name': |
|
838 | if order_by == 'group_name': | |
836 | sort_col = func.lower(RepoGroup.group_name) |
|
839 | sort_col = func.lower(RepoGroup.group_name) | |
837 | sort_defined = True |
|
840 | sort_defined = True | |
838 | elif order_by == 'user_username': |
|
841 | elif order_by == 'user_username': | |
839 | sort_col = User.username |
|
842 | sort_col = User.username | |
840 | else: |
|
843 | else: | |
841 | sort_col = getattr(RepoGroup, order_by, None) |
|
844 | sort_col = getattr(RepoGroup, order_by, None) | |
842 |
|
845 | |||
843 | if sort_defined or sort_col: |
|
846 | if sort_defined or sort_col: | |
844 | if order_dir == 'asc': |
|
847 | if order_dir == 'asc': | |
845 | sort_col = sort_col.asc() |
|
848 | sort_col = sort_col.asc() | |
846 | else: |
|
849 | else: | |
847 | sort_col = sort_col.desc() |
|
850 | sort_col = sort_col.desc() | |
848 |
|
851 | |||
849 | base_q = base_q.order_by(sort_col) |
|
852 | base_q = base_q.order_by(sort_col) | |
850 | base_q = base_q.offset(start).limit(limit) |
|
853 | base_q = base_q.offset(start).limit(limit) | |
851 |
|
854 | |||
852 | repo_group_list = base_q.all() |
|
855 | repo_group_list = base_q.all() | |
853 |
|
856 | |||
854 | repo_groups_data = RepoGroupModel().get_repo_groups_as_dict( |
|
857 | repo_groups_data = RepoGroupModel().get_repo_groups_as_dict( | |
855 | repo_group_list=repo_group_list, admin=False) |
|
858 | repo_group_list=repo_group_list, admin=False) | |
856 |
|
859 | |||
857 | data = ({ |
|
860 | data = ({ | |
858 | 'draw': draw, |
|
861 | 'draw': draw, | |
859 | 'data': repo_groups_data, |
|
862 | 'data': repo_groups_data, | |
860 | 'recordsTotal': repo_groups_data_total_count, |
|
863 | 'recordsTotal': repo_groups_data_total_count, | |
861 | 'recordsFiltered': repo_groups_data_total_filtered_count, |
|
864 | 'recordsFiltered': repo_groups_data_total_filtered_count, | |
862 | }) |
|
865 | }) | |
863 | return data |
|
866 | return data | |
864 |
|
867 | |||
865 | def _get_defaults(self, repo_group_name): |
|
868 | def _get_defaults(self, repo_group_name): | |
866 | repo_group = RepoGroup.get_by_group_name(repo_group_name) |
|
869 | repo_group = RepoGroup.get_by_group_name(repo_group_name) | |
867 |
|
870 | |||
868 | if repo_group is None: |
|
871 | if repo_group is None: | |
869 | return None |
|
872 | return None | |
870 |
|
873 | |||
871 | defaults = repo_group.get_dict() |
|
874 | defaults = repo_group.get_dict() | |
872 | defaults['repo_group_name'] = repo_group.name |
|
875 | defaults['repo_group_name'] = repo_group.name | |
873 | defaults['repo_group_description'] = repo_group.group_description |
|
876 | defaults['repo_group_description'] = repo_group.group_description | |
874 | defaults['repo_group_enable_locking'] = repo_group.enable_locking |
|
877 | defaults['repo_group_enable_locking'] = repo_group.enable_locking | |
875 |
|
878 | |||
876 | # we use -1 as this is how in HTML, we mark an empty group |
|
879 | # we use -1 as this is how in HTML, we mark an empty group | |
877 | defaults['repo_group'] = defaults['group_parent_id'] or -1 |
|
880 | defaults['repo_group'] = defaults['group_parent_id'] or -1 | |
878 |
|
881 | |||
879 | # fill owner |
|
882 | # fill owner | |
880 | if repo_group.user: |
|
883 | if repo_group.user: | |
881 | defaults.update({'user': repo_group.user.username}) |
|
884 | defaults.update({'user': repo_group.user.username}) | |
882 | else: |
|
885 | else: | |
883 | replacement_user = User.get_first_super_admin().username |
|
886 | replacement_user = User.get_first_super_admin().username | |
884 | defaults.update({'user': replacement_user}) |
|
887 | defaults.update({'user': replacement_user}) | |
885 |
|
888 | |||
886 | return defaults |
|
889 | return defaults |
@@ -1,83 +1,83 b'' | |||||
1 | # Copyright (C) 2013-2023 RhodeCode GmbH |
|
1 | # Copyright (C) 2013-2023 RhodeCode GmbH | |
2 | # |
|
2 | # | |
3 | # This program is free software: you can redistribute it and/or modify |
|
3 | # This program is free software: you can redistribute it and/or modify | |
4 | # it under the terms of the GNU Affero General Public License, version 3 |
|
4 | # it under the terms of the GNU Affero General Public License, version 3 | |
5 | # (only), as published by the Free Software Foundation. |
|
5 | # (only), as published by the Free Software Foundation. | |
6 | # |
|
6 | # | |
7 | # This program is distributed in the hope that it will be useful, |
|
7 | # This program is distributed in the hope that it will be useful, | |
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
10 | # GNU General Public License for more details. |
|
10 | # GNU General Public License for more details. | |
11 | # |
|
11 | # | |
12 | # You should have received a copy of the GNU Affero General Public License |
|
12 | # You should have received a copy of the GNU Affero General Public License | |
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |
14 | # |
|
14 | # | |
15 | # This program is dual-licensed. If you wish to learn more about the |
|
15 | # This program is dual-licensed. If you wish to learn more about the | |
16 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
16 | # RhodeCode Enterprise Edition, including its added features, Support services, | |
17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ | |
18 |
|
18 | |||
19 | import logging |
|
19 | import logging | |
20 | import urllib.request |
|
20 | import urllib.request | |
21 | import urllib.error |
|
21 | import urllib.error | |
22 | import urllib.parse |
|
22 | import urllib.parse | |
23 | from packaging.version import Version |
|
23 | from packaging.version import Version | |
24 |
|
24 | |||
25 | import rhodecode |
|
25 | import rhodecode | |
26 | from rhodecode.lib.ext_json import json |
|
26 | from rhodecode.lib.ext_json import json | |
27 | from rhodecode.model import BaseModel |
|
27 | from rhodecode.model import BaseModel | |
28 | from rhodecode.model.meta import Session |
|
28 | from rhodecode.model.meta import Session | |
29 | from rhodecode.model.settings import SettingsModel |
|
29 | from rhodecode.model.settings import SettingsModel | |
30 |
|
30 | |||
31 |
|
31 | |||
32 | log = logging.getLogger(__name__) |
|
32 | log = logging.getLogger(__name__) | |
33 |
|
33 | |||
34 |
|
34 | |||
35 | class UpdateModel(BaseModel): |
|
35 | class UpdateModel(BaseModel): | |
36 | UPDATE_SETTINGS_KEY = 'update_version' |
|
36 | UPDATE_SETTINGS_KEY = 'update_version' | |
37 | UPDATE_URL_SETTINGS_KEY = 'rhodecode_update_url' |
|
37 | UPDATE_URL_SETTINGS_KEY = 'rhodecode_update_url' | |
38 |
|
38 | |||
39 | @staticmethod |
|
39 | @staticmethod | |
40 | def get_update_data(update_url): |
|
40 | def get_update_data(update_url): | |
41 | """Return the JSON update data.""" |
|
41 | """Return the JSON update data.""" | |
42 | ver = rhodecode.__version__ |
|
42 | ver = rhodecode.__version__ | |
43 | log.debug('Checking for upgrade on `%s` server', update_url) |
|
43 | log.debug('Checking for upgrade on `%s` server', update_url) | |
44 | opener = urllib.request.build_opener() |
|
44 | opener = urllib.request.build_opener() | |
45 |
opener.addheaders = [('User-agent', 'RhodeCode-SCM/ |
|
45 | opener.addheaders = [('User-agent', f'RhodeCode-SCM/{ver.strip()}')] | |
46 | response = opener.open(update_url) |
|
46 | response = opener.open(update_url) | |
47 | response_data = response.read() |
|
47 | response_data = response.read() | |
48 | data = json.loads(response_data) |
|
48 | data = json.loads(response_data) | |
49 | log.debug('update server returned data') |
|
49 | log.debug('update server returned data') | |
50 | return data |
|
50 | return data | |
51 |
|
51 | |||
52 | def get_update_url(self): |
|
52 | def get_update_url(self): | |
53 | settings = SettingsModel().get_all_settings() |
|
53 | settings = SettingsModel().get_all_settings() | |
54 | return settings.get(self.UPDATE_URL_SETTINGS_KEY) |
|
54 | return settings.get(self.UPDATE_URL_SETTINGS_KEY) | |
55 |
|
55 | |||
56 | def store_version(self, version): |
|
56 | def store_version(self, version): | |
57 | log.debug('Storing version %s into settings', version) |
|
57 | log.debug('Storing version %s into settings', version) | |
58 | setting = SettingsModel().create_or_update_setting( |
|
58 | setting = SettingsModel().create_or_update_setting( | |
59 | self.UPDATE_SETTINGS_KEY, version) |
|
59 | self.UPDATE_SETTINGS_KEY, version) | |
60 | Session().add(setting) |
|
60 | Session().add(setting) | |
61 | Session().commit() |
|
61 | Session().commit() | |
62 |
|
62 | |||
63 | def get_stored_version(self, fallback=None): |
|
63 | def get_stored_version(self, fallback=None): | |
64 | obj = SettingsModel().get_setting_by_name(self.UPDATE_SETTINGS_KEY) |
|
64 | obj = SettingsModel().get_setting_by_name(self.UPDATE_SETTINGS_KEY) | |
65 | if obj: |
|
65 | if obj: | |
66 | return obj.app_settings_value |
|
66 | return obj.app_settings_value | |
67 | return fallback or '0.0.0' |
|
67 | return fallback or '0.0.0' | |
68 |
|
68 | |||
69 | def _sanitize_version(self, version): |
|
69 | def _sanitize_version(self, version): | |
70 | """ |
|
70 | """ | |
71 | Cleanup our custom ver. |
|
71 | Cleanup our custom ver. | |
72 | e.g 4.11.0_20171204_204825_CE_default_EE_default to 4.11.0 |
|
72 | e.g 4.11.0_20171204_204825_CE_default_EE_default to 4.11.0 | |
73 | """ |
|
73 | """ | |
74 | return version.split('_')[0] |
|
74 | return version.split('_')[0] | |
75 |
|
75 | |||
76 | def is_outdated(self, cur_version, latest_version=None): |
|
76 | def is_outdated(self, cur_version, latest_version=None): | |
77 | latest_version = latest_version or self.get_stored_version() |
|
77 | latest_version = latest_version or self.get_stored_version() | |
78 | try: |
|
78 | try: | |
79 | cur_version = self._sanitize_version(cur_version) |
|
79 | cur_version = self._sanitize_version(cur_version) | |
80 | return Version(latest_version) > Version(cur_version) |
|
80 | return Version(latest_version) > Version(cur_version) | |
81 | except Exception: |
|
81 | except Exception: | |
82 | # could be invalid version, etc |
|
82 | # could be invalid version, etc | |
83 | return False |
|
83 | return False |
@@ -1,89 +1,103 b'' | |||||
1 |
|
1 | |||
2 | <div id="update_notice" style="display: none; margin: 0px 0px 30px 0px"> |
|
2 | <div id="update_notice" style="display: none; margin: 0px 0px 30px 0px"> | |
3 | <div>${_('Checking for updates...')}</div> |
|
3 | <div>${_('Checking for updates...')}</div> | |
4 | </div> |
|
4 | </div> | |
5 |
|
5 | |||
6 |
|
6 | |||
7 | <div class="panel panel-default"> |
|
7 | <div class="panel panel-default"> | |
8 | <div class="panel-heading"> |
|
8 | <div class="panel-heading"> | |
9 | <h3 class="panel-title">${_('System Info')}</h3> |
|
9 | <h3 class="panel-title">${_('System Info')}</h3> | |
10 | % if c.allowed_to_snapshot: |
|
10 | % if c.allowed_to_snapshot: | |
11 | <a href="${h.route_path('admin_settings_system', _query={'snapshot':1})}" class="panel-edit">${_('create summary snapshot')}</a> |
|
11 | <a href="${h.route_path('admin_settings_system', _query={'snapshot':1})}" class="panel-edit">${_('create summary snapshot')}</a> | |
12 | % endif |
|
12 | % endif | |
13 | </div> |
|
13 | </div> | |
14 | <div class="panel-body"> |
|
14 | <div class="panel-body"> | |
15 | <dl class="dl-horizontal settings dt-400"> |
|
15 | <dl class="dl-horizontal settings dt-400"> | |
16 | % for dt, dd, warn in c.data_items: |
|
16 | % for dt, dd, warn in c.data_items: | |
17 | <dt>${dt}${':' if dt else '---'}</dt> |
|
17 | <dt>${dt}${':' if dt else '---'}</dt> | |
18 | <dd>${dd}${'' if dt else '---'} |
|
18 | <dd>${dd}${'' if dt else '---'} | |
19 | % if warn and warn['message']: |
|
19 | % if warn and warn['message']: | |
20 | <div class="alert-${warn['type']}"> |
|
20 | <div class="alert-${warn['type']}"> | |
21 | <strong>${warn['message']}</strong> |
|
21 | <strong>${warn['message']}</strong> | |
22 | </div> |
|
22 | </div> | |
23 | % endif |
|
23 | % endif | |
24 | </dd> |
|
24 | </dd> | |
25 | % endfor |
|
25 | % endfor | |
26 | </dl> |
|
26 | </dl> | |
27 | </div> |
|
27 | </div> | |
28 | </div> |
|
28 | </div> | |
29 |
|
29 | |||
30 | <div class="panel panel-default"> |
|
30 | <div class="panel panel-default"> | |
31 | <div class="panel-heading"> |
|
31 | <div class="panel-heading"> | |
32 |
<h3 class="panel-title">${_(' |
|
32 | <h3 class="panel-title">${_('RhodeCode Server Config')}</h3> | |
|
33 | </div> | |||
|
34 | <div class="panel-body"> | |||
|
35 | <dl class="dl-horizontal settings dt-400"> | |||
|
36 | % for dt, dd in c.rhodecode_data_items: | |||
|
37 | <dt>${dt}${':' if dt else '---'}</dt> | |||
|
38 | <dd>${dd}${'' if dt else '---'}</dd> | |||
|
39 | % endfor | |||
|
40 | </dl> | |||
|
41 | </div> | |||
|
42 | </div> | |||
|
43 | ||||
|
44 | <div class="panel panel-default"> | |||
|
45 | <div class="panel-heading"> | |||
|
46 | <h3 class="panel-title">${_('VCS Server Config')}</h3> | |||
33 | </div> |
|
47 | </div> | |
34 | <div class="panel-body"> |
|
48 | <div class="panel-body"> | |
35 | <dl class="dl-horizontal settings dt-400"> |
|
49 | <dl class="dl-horizontal settings dt-400"> | |
36 | % for dt, dd in c.vcsserver_data_items: |
|
50 | % for dt, dd in c.vcsserver_data_items: | |
37 | <dt>${dt}${':' if dt else '---'}</dt> |
|
51 | <dt>${dt}${':' if dt else '---'}</dt> | |
38 | <dd>${dd}${'' if dt else '---'}</dd> |
|
52 | <dd>${dd}${'' if dt else '---'}</dd> | |
39 | % endfor |
|
53 | % endfor | |
40 | </dl> |
|
54 | </dl> | |
41 | </div> |
|
55 | </div> | |
42 | </div> |
|
56 | </div> | |
43 |
|
57 | |||
44 | <div class="panel panel-default"> |
|
58 | <div class="panel panel-default"> | |
45 | <div class="panel-heading"> |
|
59 | <div class="panel-heading"> | |
46 | <h3 class="panel-title">${_('Python Packages')}</h3> |
|
60 | <h3 class="panel-title">${_('Python Packages')}</h3> | |
47 | </div> |
|
61 | </div> | |
48 | <div class="panel-body"> |
|
62 | <div class="panel-body"> | |
49 | <table> |
|
63 | <table> | |
50 | <th></th> |
|
64 | <th></th> | |
51 | <th></th> |
|
65 | <th></th> | |
52 | <th></th> |
|
66 | <th></th> | |
53 | % for name, package_data in c.py_modules['human_value']: |
|
67 | % for name, package_data in c.py_modules['human_value']: | |
54 | <tr> |
|
68 | <tr> | |
55 | <td>${name.lower()}</td> |
|
69 | <td>${name.lower()}</td> | |
56 | <td>${package_data['version']}</td> |
|
70 | <td>${package_data['version']}</td> | |
57 | <td>(${package_data['location']})</td> |
|
71 | <td>(${package_data['location']})</td> | |
58 | </tr> |
|
72 | </tr> | |
59 | % endfor |
|
73 | % endfor | |
60 | </table> |
|
74 | </table> | |
61 |
|
75 | |||
62 | </div> |
|
76 | </div> | |
63 | </div> |
|
77 | </div> | |
64 |
|
78 | |||
65 | <div class="panel panel-default"> |
|
79 | <div class="panel panel-default"> | |
66 | <div class="panel-heading"> |
|
80 | <div class="panel-heading"> | |
67 | <h3 class="panel-title">${_('Env Variables')}</h3> |
|
81 | <h3 class="panel-title">${_('Env Variables')}</h3> | |
68 | </div> |
|
82 | </div> | |
69 | <div class="panel-body"> |
|
83 | <div class="panel-body"> | |
70 | <table> |
|
84 | <table> | |
71 | <th></th> |
|
85 | <th></th> | |
72 | <th></th> |
|
86 | <th></th> | |
73 | % for env_key, env_val in c.env_data: |
|
87 | % for env_key, env_val in c.env_data: | |
74 | <tr> |
|
88 | <tr> | |
75 | <td style="vertical-align: top">${env_key}</td> |
|
89 | <td style="vertical-align: top">${env_key}</td> | |
76 | <td>${env_val}</td> |
|
90 | <td>${env_val}</td> | |
77 | </tr> |
|
91 | </tr> | |
78 | % endfor |
|
92 | % endfor | |
79 | </table> |
|
93 | </table> | |
80 |
|
94 | |||
81 | </div> |
|
95 | </div> | |
82 | </div> |
|
96 | </div> | |
83 |
|
97 | |||
84 | <script> |
|
98 | <script> | |
85 | $('#check_for_update').click(function(e){ |
|
99 | $('#check_for_update').click(function(e){ | |
86 | $('#update_notice').show(); |
|
100 | $('#update_notice').show(); | |
87 | $('#update_notice').load("${h.route_path('admin_settings_system_update', _query={'ver': request.GET.get('ver')})}"); |
|
101 | $('#update_notice').load("${h.route_path('admin_settings_system_update', _query={'ver': request.GET.get('ver')})}"); | |
88 | }) |
|
102 | }) | |
89 | </script> |
|
103 | </script> |
@@ -1,743 +1,811 b'' | |||||
1 |
|
1 | |||
2 | # Copyright (C) 2010-2023 RhodeCode GmbH |
|
2 | # Copyright (C) 2010-2023 RhodeCode GmbH | |
3 | # |
|
3 | # | |
4 | # This program is free software: you can redistribute it and/or modify |
|
4 | # This program is free software: you can redistribute it and/or modify | |
5 | # it under the terms of the GNU Affero General Public License, version 3 |
|
5 | # it under the terms of the GNU Affero General Public License, version 3 | |
6 | # (only), as published by the Free Software Foundation. |
|
6 | # (only), as published by the Free Software Foundation. | |
7 | # |
|
7 | # | |
8 | # This program is distributed in the hope that it will be useful, |
|
8 | # This program is distributed in the hope that it will be useful, | |
9 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
9 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
10 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
11 | # GNU General Public License for more details. |
|
11 | # GNU General Public License for more details. | |
12 | # |
|
12 | # | |
13 | # You should have received a copy of the GNU Affero General Public License |
|
13 | # You should have received a copy of the GNU Affero General Public License | |
14 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
14 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |
15 | # |
|
15 | # | |
16 | # This program is dual-licensed. If you wish to learn more about the |
|
16 | # This program is dual-licensed. If you wish to learn more about the | |
17 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
17 | # RhodeCode Enterprise Edition, including its added features, Support services, | |
18 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
18 | # and proprietary license terms, please see https://rhodecode.com/licenses/ | |
19 |
|
19 | |||
20 | import pytest |
|
20 | import pytest | |
21 |
|
21 | |||
22 | from rhodecode.lib.auth import AuthUser |
|
22 | from rhodecode.lib.auth import AuthUser | |
23 | from rhodecode.model.db import ( |
|
23 | from rhodecode.model.db import ( | |
24 | RepoGroup, User, UserGroupRepoGroupToPerm, Permission, UserToPerm, |
|
24 | RepoGroup, User, UserGroupRepoGroupToPerm, Permission, UserToPerm, | |
25 | UserGroupToPerm) |
|
25 | UserGroupToPerm) | |
26 | from rhodecode.model.meta import Session |
|
26 | from rhodecode.model.meta import Session | |
27 | from rhodecode.model.permission import PermissionModel |
|
27 | from rhodecode.model.permission import PermissionModel | |
28 | from rhodecode.model.repo import RepoModel |
|
28 | from rhodecode.model.repo import RepoModel | |
29 | from rhodecode.model.repo_group import RepoGroupModel |
|
29 | from rhodecode.model.repo_group import RepoGroupModel | |
30 | from rhodecode.model.user import UserModel |
|
30 | from rhodecode.model.user import UserModel | |
31 | from rhodecode.model.user_group import UserGroupModel |
|
31 | from rhodecode.model.user_group import UserGroupModel | |
32 | from rhodecode.tests.fixture import Fixture |
|
32 | from rhodecode.tests.fixture import Fixture | |
33 |
|
33 | |||
34 |
|
34 | |||
35 | fixture = Fixture() |
|
35 | fixture = Fixture() | |
36 |
|
36 | |||
37 |
|
37 | |||
38 | @pytest.fixture() |
|
38 | @pytest.fixture() | |
39 | def repo_name(backend_hg): |
|
39 | def repo_name(backend_hg): | |
40 | return backend_hg.repo_name |
|
40 | return backend_hg.repo_name | |
41 |
|
41 | |||
42 |
|
42 | |||
43 | class TestPermissions(object): |
|
43 | class TestPermissions(object): | |
44 |
|
44 | |||
45 | @pytest.fixture(scope='class', autouse=True) |
|
45 | @pytest.fixture(scope='class', autouse=True) | |
46 | def default_permissions(self, request, baseapp): |
|
46 | def default_permissions(self, request, baseapp): | |
47 | # recreate default user to get a clean start |
|
47 | # recreate default user to get a clean start | |
48 | PermissionModel().create_default_user_permissions( |
|
48 | PermissionModel().create_default_user_permissions( | |
49 | user=User.DEFAULT_USER, force=True) |
|
49 | user=User.DEFAULT_USER, force=True) | |
50 | Session().commit() |
|
50 | Session().commit() | |
51 |
|
51 | |||
52 | @pytest.fixture(autouse=True) |
|
52 | @pytest.fixture(autouse=True) | |
53 | def prepare_users(self, request): |
|
53 | def prepare_users(self, request): | |
54 | # TODO: User creation is a duplicate of test_nofitications, check |
|
54 | # TODO: User creation is a duplicate of test_nofitications, check | |
55 | # if that can be unified |
|
55 | # if that can be unified | |
56 | self.u1 = UserModel().create_or_update( |
|
56 | self.u1 = UserModel().create_or_update( | |
57 | username=u'u1', password=u'qweqwe', |
|
57 | username=u'u1', password=u'qweqwe', | |
58 | email=u'u1@rhodecode.org', firstname=u'u1', lastname=u'u1' |
|
58 | email=u'u1@rhodecode.org', firstname=u'u1', lastname=u'u1' | |
59 | ) |
|
59 | ) | |
60 | self.u2 = UserModel().create_or_update( |
|
60 | self.u2 = UserModel().create_or_update( | |
61 | username=u'u2', password=u'qweqwe', |
|
61 | username=u'u2', password=u'qweqwe', | |
62 | email=u'u2@rhodecode.org', firstname=u'u2', lastname=u'u2' |
|
62 | email=u'u2@rhodecode.org', firstname=u'u2', lastname=u'u2' | |
63 | ) |
|
63 | ) | |
64 | self.u3 = UserModel().create_or_update( |
|
64 | self.u3 = UserModel().create_or_update( | |
65 | username=u'u3', password=u'qweqwe', |
|
65 | username=u'u3', password=u'qweqwe', | |
66 | email=u'u3@rhodecode.org', firstname=u'u3', lastname=u'u3' |
|
66 | email=u'u3@rhodecode.org', firstname=u'u3', lastname=u'u3' | |
67 | ) |
|
67 | ) | |
68 | self.anon = User.get_default_user() |
|
68 | self.anon = User.get_default_user() | |
69 | self.a1 = UserModel().create_or_update( |
|
69 | self.a1 = UserModel().create_or_update( | |
70 | username=u'a1', password=u'qweqwe', |
|
70 | username=u'a1', password=u'qweqwe', | |
71 | email=u'a1@rhodecode.org', firstname=u'a1', lastname=u'a1', |
|
71 | email=u'a1@rhodecode.org', firstname=u'a1', lastname=u'a1', | |
72 | admin=True |
|
72 | admin=True | |
73 | ) |
|
73 | ) | |
74 | Session().commit() |
|
74 | Session().commit() | |
75 |
|
75 | |||
76 | request.addfinalizer(self.cleanup) |
|
76 | request.addfinalizer(self.cleanup) | |
77 |
|
77 | |||
78 | def cleanup(self): |
|
78 | def cleanup(self): | |
79 | if hasattr(self, 'test_repo'): |
|
79 | if hasattr(self, 'test_repo'): | |
80 | RepoModel().delete(repo=self.test_repo) |
|
80 | RepoModel().delete(repo=self.test_repo) | |
81 | Session().commit() |
|
81 | Session().commit() | |
82 |
|
82 | |||
83 | if hasattr(self, 'g1'): |
|
83 | if hasattr(self, 'g1'): | |
84 | RepoGroupModel().delete(self.g1.group_id) |
|
84 | RepoGroupModel().delete(self.g1.group_id) | |
85 | if hasattr(self, 'g2'): |
|
85 | if hasattr(self, 'g2'): | |
86 | RepoGroupModel().delete(self.g2.group_id) |
|
86 | RepoGroupModel().delete(self.g2.group_id) | |
87 | Session().commit() |
|
87 | Session().commit() | |
88 |
|
88 | |||
89 | UserModel().delete(self.u1, handle_repos='delete', handle_repo_groups='delete') |
|
89 | UserModel().delete(self.u1, handle_repos='delete', handle_repo_groups='delete') | |
90 | UserModel().delete(self.u2, handle_repos='delete', handle_repo_groups='delete') |
|
90 | UserModel().delete(self.u2, handle_repos='delete', handle_repo_groups='delete') | |
91 | UserModel().delete(self.u3, handle_repos='delete', handle_repo_groups='delete') |
|
91 | UserModel().delete(self.u3, handle_repos='delete', handle_repo_groups='delete') | |
92 | UserModel().delete(self.a1, handle_repos='delete', handle_repo_groups='delete') |
|
92 | UserModel().delete(self.a1, handle_repos='delete', handle_repo_groups='delete') | |
93 | Session().commit() |
|
93 | Session().commit() | |
94 |
|
94 | |||
95 | if hasattr(self, 'ug1'): |
|
95 | if hasattr(self, 'ug1'): | |
96 | UserGroupModel().delete(self.ug1, force=True) |
|
96 | UserGroupModel().delete(self.ug1, force=True) | |
97 | Session().commit() |
|
97 | Session().commit() | |
98 |
|
98 | |||
99 | def test_default_perms_set(self, repo_name): |
|
99 | def test_default_perms_set(self, repo_name): | |
100 | assert repo_perms(self.u1)[repo_name] == 'repository.read' |
|
100 | assert repo_perms(self.u1)[repo_name] == 'repository.read' | |
101 | new_perm = 'repository.write' |
|
101 | new_perm = 'repository.write' | |
102 | RepoModel().grant_user_permission(repo=repo_name, user=self.u1, |
|
102 | RepoModel().grant_user_permission(repo=repo_name, user=self.u1, | |
103 | perm=new_perm) |
|
103 | perm=new_perm) | |
104 | Session().commit() |
|
104 | Session().commit() | |
105 | assert repo_perms(self.u1)[repo_name] == new_perm |
|
105 | assert repo_perms(self.u1)[repo_name] == new_perm | |
106 |
|
106 | |||
107 | def test_default_admin_perms_set(self, repo_name): |
|
107 | def test_default_admin_perms_set(self, repo_name): | |
108 | assert repo_perms(self.a1)[repo_name] == 'repository.admin' |
|
108 | assert repo_perms(self.a1)[repo_name] == 'repository.admin' | |
109 | RepoModel().grant_user_permission(repo=repo_name, user=self.a1, |
|
109 | RepoModel().grant_user_permission(repo=repo_name, user=self.a1, | |
110 | perm='repository.write') |
|
110 | perm='repository.write') | |
111 | Session().commit() |
|
111 | Session().commit() | |
112 | # cannot really downgrade admins permissions !? they still gets set as |
|
112 | # cannot really downgrade admins permissions !? they still gets set as | |
113 | # admin ! |
|
113 | # admin ! | |
114 | assert repo_perms(self.a1)[repo_name] == 'repository.admin' |
|
114 | assert repo_perms(self.a1)[repo_name] == 'repository.admin' | |
115 |
|
115 | |||
116 | def test_default_group_perms(self, repo_name): |
|
116 | def test_default_group_perms(self, repo_name): | |
117 | self.g1 = fixture.create_repo_group('test1', skip_if_exists=True) |
|
117 | self.g1 = fixture.create_repo_group('test1', skip_if_exists=True) | |
118 | self.g2 = fixture.create_repo_group('test2', skip_if_exists=True) |
|
118 | self.g2 = fixture.create_repo_group('test2', skip_if_exists=True) | |
119 |
|
119 | |||
120 | assert repo_perms(self.u1)[repo_name] == 'repository.read' |
|
120 | assert repo_perms(self.u1)[repo_name] == 'repository.read' | |
121 | assert group_perms(self.u1) == { |
|
121 | assert group_perms(self.u1) == { | |
122 | 'test1': 'group.read', 'test2': 'group.read'} |
|
122 | 'test1': 'group.read', 'test2': 'group.read'} | |
123 | assert global_perms(self.u1) == set( |
|
123 | assert global_perms(self.u1) == set( | |
124 | Permission.DEFAULT_USER_PERMISSIONS) |
|
124 | Permission.DEFAULT_USER_PERMISSIONS) | |
125 |
|
125 | |||
126 | def test_default_admin_group_perms(self, repo_name): |
|
126 | def test_default_admin_group_perms(self, repo_name): | |
127 | self.g1 = fixture.create_repo_group('test1', skip_if_exists=True) |
|
127 | self.g1 = fixture.create_repo_group('test1', skip_if_exists=True) | |
128 | self.g2 = fixture.create_repo_group('test2', skip_if_exists=True) |
|
128 | self.g2 = fixture.create_repo_group('test2', skip_if_exists=True) | |
129 |
|
129 | |||
130 | assert repo_perms(self.a1)[repo_name] == 'repository.admin' |
|
130 | assert repo_perms(self.a1)[repo_name] == 'repository.admin' | |
131 | assert group_perms(self.a1) == { |
|
131 | assert group_perms(self.a1) == { | |
132 | 'test1': 'group.admin', 'test2': 'group.admin'} |
|
132 | 'test1': 'group.admin', 'test2': 'group.admin'} | |
133 |
|
133 | |||
134 | def test_default_owner_repo_perms(self, backend, user_util, test_repo): |
|
134 | def test_default_owner_repo_perms(self, backend, user_util, test_repo): | |
135 | user = user_util.create_user() |
|
135 | user = user_util.create_user() | |
136 | repo = test_repo('minimal', backend.alias) |
|
136 | repo = test_repo('minimal', backend.alias) | |
137 | org_owner = repo.user |
|
137 | org_owner = repo.user | |
138 | assert repo_perms(user)[repo.repo_name] == 'repository.read' |
|
138 | assert repo_perms(user)[repo.repo_name] == 'repository.read' | |
139 |
|
139 | |||
140 | repo.user = user |
|
140 | repo.user = user | |
141 | assert repo_perms(user)[repo.repo_name] == 'repository.admin' |
|
141 | assert repo_perms(user)[repo.repo_name] == 'repository.admin' | |
142 | repo.user = org_owner |
|
142 | repo.user = org_owner | |
143 |
|
143 | |||
144 | def test_default_owner_branch_perms(self, user_util, test_user_group): |
|
144 | def test_default_owner_branch_perms(self, user_util, test_user_group): | |
145 | user = user_util.create_user() |
|
145 | user = user_util.create_user() | |
146 | assert branch_perms(user) == {} |
|
146 | assert branch_perms(user) == {} | |
147 |
|
147 | |||
148 | def test_default_owner_repo_group_perms(self, user_util, test_repo_group): |
|
148 | def test_default_owner_repo_group_perms(self, user_util, test_repo_group): | |
149 | user = user_util.create_user() |
|
149 | user = user_util.create_user() | |
150 | org_owner = test_repo_group.user |
|
150 | org_owner = test_repo_group.user | |
151 |
|
151 | |||
152 | assert group_perms(user)[test_repo_group.group_name] == 'group.read' |
|
152 | assert group_perms(user)[test_repo_group.group_name] == 'group.read' | |
153 |
|
153 | |||
154 | test_repo_group.user = user |
|
154 | test_repo_group.user = user | |
155 | assert group_perms(user)[test_repo_group.group_name] == 'group.admin' |
|
155 | assert group_perms(user)[test_repo_group.group_name] == 'group.admin' | |
156 | test_repo_group.user = org_owner |
|
156 | test_repo_group.user = org_owner | |
157 |
|
157 | |||
158 | def test_default_owner_user_group_perms(self, user_util, test_user_group): |
|
158 | def test_default_owner_user_group_perms(self, user_util, test_user_group): | |
159 | user = user_util.create_user() |
|
159 | user = user_util.create_user() | |
160 | org_owner = test_user_group.user |
|
160 | org_owner = test_user_group.user | |
161 |
|
161 | |||
162 | assert user_group_perms(user)[test_user_group.users_group_name] == 'usergroup.read' |
|
162 | assert user_group_perms(user)[test_user_group.users_group_name] == 'usergroup.read' | |
163 |
|
163 | |||
164 | test_user_group.user = user |
|
164 | test_user_group.user = user | |
165 | assert user_group_perms(user)[test_user_group.users_group_name] == 'usergroup.admin' |
|
165 | assert user_group_perms(user)[test_user_group.users_group_name] == 'usergroup.admin' | |
166 |
|
166 | |||
167 | test_user_group.user = org_owner |
|
167 | test_user_group.user = org_owner | |
168 |
|
168 | |||
|
169 | def test_propagated_permissions_from_repo_group_to_private_repo(self, repo_name): | |||
|
170 | # make group | |||
|
171 | self.g1 = fixture.create_repo_group('TOP_LEVEL', skip_if_exists=True) | |||
|
172 | # both perms should be read ! | |||
|
173 | assert group_perms(self.anon) == { | |||
|
174 | 'TOP_LEVEL': 'group.read' | |||
|
175 | } | |||
|
176 | ||||
|
177 | # Create repo inside the TOP_LEVEL | |||
|
178 | repo_name_in_group = RepoGroup.url_sep().join([self.g1.group_name, 'test_perm_on_private_repo']) | |||
|
179 | self.test_repo = fixture.create_repo(name=repo_name_in_group, | |||
|
180 | repo_type='hg', | |||
|
181 | repo_group=self.g1, | |||
|
182 | cur_user=self.u1,) | |||
|
183 | assert repo_perms(self.anon) == { | |||
|
184 | repo_name_in_group: 'repository.read', | |||
|
185 | 'vcs_test_git': 'repository.read', | |||
|
186 | 'vcs_test_hg': 'repository.read', | |||
|
187 | 'vcs_test_svn': 'repository.read', | |||
|
188 | } | |||
|
189 | # Now change default user permissions | |||
|
190 | new_perm = 'repository.write' | |||
|
191 | perm_updates = [ | |||
|
192 | [self.anon.user_id, new_perm, 'user'] | |||
|
193 | ] | |||
|
194 | RepoGroupModel().update_permissions( | |||
|
195 | repo_group=self.g1, perm_updates=perm_updates, recursive='all') | |||
|
196 | ||||
|
197 | Session().commit() | |||
|
198 | assert repo_perms(self.anon) == { | |||
|
199 | repo_name_in_group: new_perm, | |||
|
200 | 'vcs_test_git': 'repository.read', | |||
|
201 | 'vcs_test_hg': 'repository.read', | |||
|
202 | 'vcs_test_svn': 'repository.read', | |||
|
203 | } | |||
|
204 | ||||
|
205 | # NOW MARK repo as private | |||
|
206 | changes = { | |||
|
207 | 'repo_private': True | |||
|
208 | } | |||
|
209 | repo = RepoModel().get_by_repo_name(repo_name_in_group) | |||
|
210 | RepoModel().update(repo, **changes) | |||
|
211 | Session().commit() | |||
|
212 | ||||
|
213 | # Private repo sets 'none' permission for default user | |||
|
214 | assert repo_perms(self.anon) == { | |||
|
215 | repo_name_in_group: 'repository.none', | |||
|
216 | 'vcs_test_git': 'repository.read', | |||
|
217 | 'vcs_test_hg': 'repository.read', | |||
|
218 | 'vcs_test_svn': 'repository.read', | |||
|
219 | } | |||
|
220 | ||||
|
221 | # apply same logic of "updated" recursive, but now the anon permissions should be not be impacted | |||
|
222 | new_perm = 'repository.write' | |||
|
223 | perm_updates = [ | |||
|
224 | [self.anon.user_id, new_perm, 'user'] | |||
|
225 | ] | |||
|
226 | RepoGroupModel().update_permissions( | |||
|
227 | repo_group=self.g1, perm_updates=perm_updates, recursive='all') | |||
|
228 | ||||
|
229 | Session().commit() | |||
|
230 | assert repo_perms(self.anon) == { | |||
|
231 | repo_name_in_group: 'repository.none', | |||
|
232 | 'vcs_test_git': 'repository.read', | |||
|
233 | 'vcs_test_hg': 'repository.read', | |||
|
234 | 'vcs_test_svn': 'repository.read', | |||
|
235 | } | |||
|
236 | ||||
169 | def test_propagated_permission_from_users_group_by_explicit_perms_exist( |
|
237 | def test_propagated_permission_from_users_group_by_explicit_perms_exist( | |
170 | self, repo_name): |
|
238 | self, repo_name): | |
171 | # make group |
|
239 | # make group | |
172 | self.ug1 = fixture.create_user_group('G1') |
|
240 | self.ug1 = fixture.create_user_group('G1') | |
173 | UserGroupModel().add_user_to_group(self.ug1, self.u1) |
|
241 | UserGroupModel().add_user_to_group(self.ug1, self.u1) | |
174 |
|
242 | |||
175 | # set permission to lower |
|
243 | # set permission to lower | |
176 | new_perm = 'repository.none' |
|
244 | new_perm = 'repository.none' | |
177 | RepoModel().grant_user_permission( |
|
245 | RepoModel().grant_user_permission( | |
178 | repo=repo_name, user=self.u1, perm=new_perm) |
|
246 | repo=repo_name, user=self.u1, perm=new_perm) | |
179 | Session().commit() |
|
247 | Session().commit() | |
180 | assert repo_perms(self.u1)[repo_name] == new_perm |
|
248 | assert repo_perms(self.u1)[repo_name] == new_perm | |
181 |
|
249 | |||
182 | # grant perm for group this should not override permission from user |
|
250 | # grant perm for group this should not override permission from user | |
183 | # since it has explicitly set |
|
251 | # since it has explicitly set | |
184 | new_perm_gr = 'repository.write' |
|
252 | new_perm_gr = 'repository.write' | |
185 | RepoModel().grant_user_group_permission( |
|
253 | RepoModel().grant_user_group_permission( | |
186 | repo=repo_name, group_name=self.ug1, perm=new_perm_gr) |
|
254 | repo=repo_name, group_name=self.ug1, perm=new_perm_gr) | |
187 | Session().commit() |
|
255 | Session().commit() | |
188 |
|
256 | |||
189 | assert repo_perms(self.u1)[repo_name] == new_perm |
|
257 | assert repo_perms(self.u1)[repo_name] == new_perm | |
190 | assert group_perms(self.u1) == {} |
|
258 | assert group_perms(self.u1) == {} | |
191 |
|
259 | |||
192 | def test_propagated_permission_from_users_group(self, repo_name): |
|
260 | def test_propagated_permission_from_users_group(self, repo_name): | |
193 | # make group |
|
261 | # make group | |
194 | self.ug1 = fixture.create_user_group('G1') |
|
262 | self.ug1 = fixture.create_user_group('G1') | |
195 | UserGroupModel().add_user_to_group(self.ug1, self.u3) |
|
263 | UserGroupModel().add_user_to_group(self.ug1, self.u3) | |
196 |
|
264 | |||
197 | # grant perm for group |
|
265 | # grant perm for group | |
198 | # this should override default permission from user |
|
266 | # this should override default permission from user | |
199 | new_perm_gr = 'repository.write' |
|
267 | new_perm_gr = 'repository.write' | |
200 | RepoModel().grant_user_group_permission( |
|
268 | RepoModel().grant_user_group_permission( | |
201 | repo=repo_name, group_name=self.ug1, perm=new_perm_gr) |
|
269 | repo=repo_name, group_name=self.ug1, perm=new_perm_gr) | |
202 | Session().commit() |
|
270 | Session().commit() | |
203 |
|
271 | |||
204 | assert repo_perms(self.u3)[repo_name] == new_perm_gr |
|
272 | assert repo_perms(self.u3)[repo_name] == new_perm_gr | |
205 | assert group_perms(self.u3) == {} |
|
273 | assert group_perms(self.u3) == {} | |
206 |
|
274 | |||
207 | def test_propagated_permission_from_users_group_lower_weight( |
|
275 | def test_propagated_permission_from_users_group_lower_weight( | |
208 | self, repo_name): |
|
276 | self, repo_name): | |
209 | # make group with user |
|
277 | # make group with user | |
210 | self.ug1 = fixture.create_user_group('G1') |
|
278 | self.ug1 = fixture.create_user_group('G1') | |
211 | UserGroupModel().add_user_to_group(self.ug1, self.u1) |
|
279 | UserGroupModel().add_user_to_group(self.ug1, self.u1) | |
212 |
|
280 | |||
213 | # set permission to lower |
|
281 | # set permission to lower | |
214 | new_perm_h = 'repository.write' |
|
282 | new_perm_h = 'repository.write' | |
215 | RepoModel().grant_user_permission( |
|
283 | RepoModel().grant_user_permission( | |
216 | repo=repo_name, user=self.u1, perm=new_perm_h) |
|
284 | repo=repo_name, user=self.u1, perm=new_perm_h) | |
217 | Session().commit() |
|
285 | Session().commit() | |
218 |
|
286 | |||
219 | assert repo_perms(self.u1)[repo_name] == new_perm_h |
|
287 | assert repo_perms(self.u1)[repo_name] == new_perm_h | |
220 |
|
288 | |||
221 | # grant perm for group this should NOT override permission from user |
|
289 | # grant perm for group this should NOT override permission from user | |
222 | # since it's lower than granted |
|
290 | # since it's lower than granted | |
223 | new_perm_l = 'repository.read' |
|
291 | new_perm_l = 'repository.read' | |
224 | RepoModel().grant_user_group_permission( |
|
292 | RepoModel().grant_user_group_permission( | |
225 | repo=repo_name, group_name=self.ug1, perm=new_perm_l) |
|
293 | repo=repo_name, group_name=self.ug1, perm=new_perm_l) | |
226 | Session().commit() |
|
294 | Session().commit() | |
227 |
|
295 | |||
228 | assert repo_perms(self.u1)[repo_name] == new_perm_h |
|
296 | assert repo_perms(self.u1)[repo_name] == new_perm_h | |
229 | assert group_perms(self.u1) == {} |
|
297 | assert group_perms(self.u1) == {} | |
230 |
|
298 | |||
231 | def test_repo_in_group_permissions(self): |
|
299 | def test_repo_in_group_permissions(self): | |
232 | self.g1 = fixture.create_repo_group('group1', skip_if_exists=True) |
|
300 | self.g1 = fixture.create_repo_group('group1', skip_if_exists=True) | |
233 | self.g2 = fixture.create_repo_group('group2', skip_if_exists=True) |
|
301 | self.g2 = fixture.create_repo_group('group2', skip_if_exists=True) | |
234 | # both perms should be read ! |
|
302 | # both perms should be read ! | |
235 | assert group_perms(self.u1) == \ |
|
303 | assert group_perms(self.u1) == \ | |
236 | {u'group1': u'group.read', u'group2': u'group.read'} |
|
304 | {u'group1': u'group.read', u'group2': u'group.read'} | |
237 |
|
305 | |||
238 | assert group_perms(self.anon) == \ |
|
306 | assert group_perms(self.anon) == \ | |
239 | {u'group1': u'group.read', u'group2': u'group.read'} |
|
307 | {u'group1': u'group.read', u'group2': u'group.read'} | |
240 |
|
308 | |||
241 | # Change perms to none for both groups |
|
309 | # Change perms to none for both groups | |
242 | RepoGroupModel().grant_user_permission( |
|
310 | RepoGroupModel().grant_user_permission( | |
243 | repo_group=self.g1, user=self.anon, perm='group.none') |
|
311 | repo_group=self.g1, user=self.anon, perm='group.none') | |
244 | RepoGroupModel().grant_user_permission( |
|
312 | RepoGroupModel().grant_user_permission( | |
245 | repo_group=self.g2, user=self.anon, perm='group.none') |
|
313 | repo_group=self.g2, user=self.anon, perm='group.none') | |
246 |
|
314 | |||
247 | assert group_perms(self.u1) == \ |
|
315 | assert group_perms(self.u1) == \ | |
248 | {u'group1': u'group.none', u'group2': u'group.none'} |
|
316 | {u'group1': u'group.none', u'group2': u'group.none'} | |
249 | assert group_perms(self.anon) == \ |
|
317 | assert group_perms(self.anon) == \ | |
250 | {u'group1': u'group.none', u'group2': u'group.none'} |
|
318 | {u'group1': u'group.none', u'group2': u'group.none'} | |
251 |
|
319 | |||
252 | # add repo to group |
|
320 | # add repo to group | |
253 | name = RepoGroup.url_sep().join([self.g1.group_name, 'test_perm']) |
|
321 | name = RepoGroup.url_sep().join([self.g1.group_name, 'test_perm']) | |
254 | self.test_repo = fixture.create_repo(name=name, |
|
322 | self.test_repo = fixture.create_repo(name=name, | |
255 | repo_type='hg', |
|
323 | repo_type='hg', | |
256 | repo_group=self.g1, |
|
324 | repo_group=self.g1, | |
257 | cur_user=self.u1,) |
|
325 | cur_user=self.u1,) | |
258 |
|
326 | |||
259 | assert group_perms(self.u1) == \ |
|
327 | assert group_perms(self.u1) == \ | |
260 | {u'group1': u'group.none', u'group2': u'group.none'} |
|
328 | {u'group1': u'group.none', u'group2': u'group.none'} | |
261 | assert group_perms(self.anon) == \ |
|
329 | assert group_perms(self.anon) == \ | |
262 | {u'group1': u'group.none', u'group2': u'group.none'} |
|
330 | {u'group1': u'group.none', u'group2': u'group.none'} | |
263 |
|
331 | |||
264 | # grant permission for u2 ! |
|
332 | # grant permission for u2 ! | |
265 | RepoGroupModel().grant_user_permission( |
|
333 | RepoGroupModel().grant_user_permission( | |
266 | repo_group=self.g1, user=self.u2, perm='group.read') |
|
334 | repo_group=self.g1, user=self.u2, perm='group.read') | |
267 | RepoGroupModel().grant_user_permission( |
|
335 | RepoGroupModel().grant_user_permission( | |
268 | repo_group=self.g2, user=self.u2, perm='group.read') |
|
336 | repo_group=self.g2, user=self.u2, perm='group.read') | |
269 | Session().commit() |
|
337 | Session().commit() | |
270 | assert self.u1 != self.u2 |
|
338 | assert self.u1 != self.u2 | |
271 |
|
339 | |||
272 | # u1 and anon should have not change perms while u2 should ! |
|
340 | # u1 and anon should have not change perms while u2 should ! | |
273 | assert group_perms(self.u1) == \ |
|
341 | assert group_perms(self.u1) == \ | |
274 | {u'group1': u'group.none', u'group2': u'group.none'} |
|
342 | {u'group1': u'group.none', u'group2': u'group.none'} | |
275 | assert group_perms(self.u2) == \ |
|
343 | assert group_perms(self.u2) == \ | |
276 | {u'group1': u'group.read', u'group2': u'group.read'} |
|
344 | {u'group1': u'group.read', u'group2': u'group.read'} | |
277 | assert group_perms(self.anon) == \ |
|
345 | assert group_perms(self.anon) == \ | |
278 | {u'group1': u'group.none', u'group2': u'group.none'} |
|
346 | {u'group1': u'group.none', u'group2': u'group.none'} | |
279 |
|
347 | |||
280 | def test_repo_group_user_as_user_group_member(self): |
|
348 | def test_repo_group_user_as_user_group_member(self): | |
281 | # create Group1 |
|
349 | # create Group1 | |
282 | self.g1 = fixture.create_repo_group('group1', skip_if_exists=True) |
|
350 | self.g1 = fixture.create_repo_group('group1', skip_if_exists=True) | |
283 | assert group_perms(self.anon) == {u'group1': u'group.read'} |
|
351 | assert group_perms(self.anon) == {u'group1': u'group.read'} | |
284 |
|
352 | |||
285 | # set default permission to none |
|
353 | # set default permission to none | |
286 | RepoGroupModel().grant_user_permission( |
|
354 | RepoGroupModel().grant_user_permission( | |
287 | repo_group=self.g1, user=self.anon, perm='group.none') |
|
355 | repo_group=self.g1, user=self.anon, perm='group.none') | |
288 | Session().commit() |
|
356 | Session().commit() | |
289 |
|
357 | |||
290 | # make group |
|
358 | # make group | |
291 | self.ug1 = fixture.create_user_group('G1') |
|
359 | self.ug1 = fixture.create_user_group('G1') | |
292 | # add user to group |
|
360 | # add user to group | |
293 | UserGroupModel().add_user_to_group(self.ug1, self.u1) |
|
361 | UserGroupModel().add_user_to_group(self.ug1, self.u1) | |
294 | Session().commit() |
|
362 | Session().commit() | |
295 |
|
363 | |||
296 | # check if user is in the group |
|
364 | # check if user is in the group | |
297 | ug1 = UserGroupModel().get(self.ug1.users_group_id) |
|
365 | ug1 = UserGroupModel().get(self.ug1.users_group_id) | |
298 | members = [x.user_id for x in ug1.members] |
|
366 | members = [x.user_id for x in ug1.members] | |
299 | assert members == [self.u1.user_id] |
|
367 | assert members == [self.u1.user_id] | |
300 | # add some user to that group |
|
368 | # add some user to that group | |
301 |
|
369 | |||
302 | # check his permissions |
|
370 | # check his permissions | |
303 | assert group_perms(self.anon) == {u'group1': u'group.none'} |
|
371 | assert group_perms(self.anon) == {u'group1': u'group.none'} | |
304 | assert group_perms(self.u1) == {u'group1': u'group.none'} |
|
372 | assert group_perms(self.u1) == {u'group1': u'group.none'} | |
305 |
|
373 | |||
306 | # grant ug1 read permissions for |
|
374 | # grant ug1 read permissions for | |
307 | RepoGroupModel().grant_user_group_permission( |
|
375 | RepoGroupModel().grant_user_group_permission( | |
308 | repo_group=self.g1, group_name=self.ug1, perm='group.read') |
|
376 | repo_group=self.g1, group_name=self.ug1, perm='group.read') | |
309 | Session().commit() |
|
377 | Session().commit() | |
310 |
|
378 | |||
311 | # check if the |
|
379 | # check if the | |
312 | obj = Session().query(UserGroupRepoGroupToPerm)\ |
|
380 | obj = Session().query(UserGroupRepoGroupToPerm)\ | |
313 | .filter(UserGroupRepoGroupToPerm.group == self.g1)\ |
|
381 | .filter(UserGroupRepoGroupToPerm.group == self.g1)\ | |
314 | .filter(UserGroupRepoGroupToPerm.users_group == self.ug1)\ |
|
382 | .filter(UserGroupRepoGroupToPerm.users_group == self.ug1)\ | |
315 | .scalar() |
|
383 | .scalar() | |
316 | assert obj.permission.permission_name == 'group.read' |
|
384 | assert obj.permission.permission_name == 'group.read' | |
317 |
|
385 | |||
318 | assert group_perms(self.anon) == {u'group1': u'group.none'} |
|
386 | assert group_perms(self.anon) == {u'group1': u'group.none'} | |
319 | assert group_perms(self.u1) == {u'group1': u'group.read'} |
|
387 | assert group_perms(self.u1) == {u'group1': u'group.read'} | |
320 |
|
388 | |||
321 | def test_inherited_permissions_from_default_on_user_enabled(self): |
|
389 | def test_inherited_permissions_from_default_on_user_enabled(self): | |
322 | # enable fork and create on default user |
|
390 | # enable fork and create on default user | |
323 | _form_result = { |
|
391 | _form_result = { | |
324 | 'default_repo_create': 'hg.create.repository', |
|
392 | 'default_repo_create': 'hg.create.repository', | |
325 | 'default_fork_create': 'hg.fork.repository' |
|
393 | 'default_fork_create': 'hg.fork.repository' | |
326 | } |
|
394 | } | |
327 | PermissionModel().set_new_user_perms( |
|
395 | PermissionModel().set_new_user_perms( | |
328 | User.get_default_user(), _form_result) |
|
396 | User.get_default_user(), _form_result) | |
329 | Session().commit() |
|
397 | Session().commit() | |
330 |
|
398 | |||
331 | # make sure inherit flag is turned on |
|
399 | # make sure inherit flag is turned on | |
332 | self.u1.inherit_default_permissions = True |
|
400 | self.u1.inherit_default_permissions = True | |
333 | Session().commit() |
|
401 | Session().commit() | |
334 |
|
402 | |||
335 | # this user will have inherited permissions from default user |
|
403 | # this user will have inherited permissions from default user | |
336 | assert global_perms(self.u1) == default_perms() |
|
404 | assert global_perms(self.u1) == default_perms() | |
337 |
|
405 | |||
338 | def test_inherited_permissions_from_default_on_user_disabled(self): |
|
406 | def test_inherited_permissions_from_default_on_user_disabled(self): | |
339 | # disable fork and create on default user |
|
407 | # disable fork and create on default user | |
340 | _form_result = { |
|
408 | _form_result = { | |
341 | 'default_repo_create': 'hg.create.none', |
|
409 | 'default_repo_create': 'hg.create.none', | |
342 | 'default_fork_create': 'hg.fork.none' |
|
410 | 'default_fork_create': 'hg.fork.none' | |
343 | } |
|
411 | } | |
344 | PermissionModel().set_new_user_perms( |
|
412 | PermissionModel().set_new_user_perms( | |
345 | User.get_default_user(), _form_result) |
|
413 | User.get_default_user(), _form_result) | |
346 | Session().commit() |
|
414 | Session().commit() | |
347 |
|
415 | |||
348 | # make sure inherit flag is turned on |
|
416 | # make sure inherit flag is turned on | |
349 | self.u1.inherit_default_permissions = True |
|
417 | self.u1.inherit_default_permissions = True | |
350 | Session().commit() |
|
418 | Session().commit() | |
351 |
|
419 | |||
352 | # this user will have inherited permissions from default user |
|
420 | # this user will have inherited permissions from default user | |
353 | expected_perms = default_perms( |
|
421 | expected_perms = default_perms( | |
354 | added=['hg.create.none', 'hg.fork.none'], |
|
422 | added=['hg.create.none', 'hg.fork.none'], | |
355 | removed=['hg.create.repository', 'hg.fork.repository']) |
|
423 | removed=['hg.create.repository', 'hg.fork.repository']) | |
356 | assert global_perms(self.u1) == expected_perms |
|
424 | assert global_perms(self.u1) == expected_perms | |
357 |
|
425 | |||
358 | def test_non_inherited_permissions_from_default_on_user_enabled(self): |
|
426 | def test_non_inherited_permissions_from_default_on_user_enabled(self): | |
359 | user_model = UserModel() |
|
427 | user_model = UserModel() | |
360 | # enable fork and create on default user |
|
428 | # enable fork and create on default user | |
361 | usr = User.DEFAULT_USER |
|
429 | usr = User.DEFAULT_USER | |
362 | user_model.revoke_perm(usr, 'hg.create.none') |
|
430 | user_model.revoke_perm(usr, 'hg.create.none') | |
363 | user_model.grant_perm(usr, 'hg.create.repository') |
|
431 | user_model.grant_perm(usr, 'hg.create.repository') | |
364 | user_model.revoke_perm(usr, 'hg.fork.none') |
|
432 | user_model.revoke_perm(usr, 'hg.fork.none') | |
365 | user_model.grant_perm(usr, 'hg.fork.repository') |
|
433 | user_model.grant_perm(usr, 'hg.fork.repository') | |
366 |
|
434 | |||
367 | # disable global perms on specific user |
|
435 | # disable global perms on specific user | |
368 | user_model.revoke_perm(self.u1, 'hg.create.repository') |
|
436 | user_model.revoke_perm(self.u1, 'hg.create.repository') | |
369 | user_model.grant_perm(self.u1, 'hg.create.none') |
|
437 | user_model.grant_perm(self.u1, 'hg.create.none') | |
370 | user_model.revoke_perm(self.u1, 'hg.fork.repository') |
|
438 | user_model.revoke_perm(self.u1, 'hg.fork.repository') | |
371 | user_model.grant_perm(self.u1, 'hg.fork.none') |
|
439 | user_model.grant_perm(self.u1, 'hg.fork.none') | |
372 |
|
440 | |||
373 | # TODO(marcink): check branch permissions now ? |
|
441 | # TODO(marcink): check branch permissions now ? | |
374 |
|
442 | |||
375 | # make sure inherit flag is turned off |
|
443 | # make sure inherit flag is turned off | |
376 | self.u1.inherit_default_permissions = False |
|
444 | self.u1.inherit_default_permissions = False | |
377 | Session().commit() |
|
445 | Session().commit() | |
378 |
|
446 | |||
379 | # this user will have non inherited permissions from he's |
|
447 | # this user will have non inherited permissions from he's | |
380 | # explicitly set permissions |
|
448 | # explicitly set permissions | |
381 | assert global_perms(self.u1) == { |
|
449 | assert global_perms(self.u1) == { | |
382 | 'hg.create.none', |
|
450 | 'hg.create.none', | |
383 | 'hg.fork.none', |
|
451 | 'hg.fork.none', | |
384 | 'hg.register.manual_activate', |
|
452 | 'hg.register.manual_activate', | |
385 | 'hg.password_reset.enabled', |
|
453 | 'hg.password_reset.enabled', | |
386 | 'hg.extern_activate.auto', |
|
454 | 'hg.extern_activate.auto', | |
387 | 'repository.read', |
|
455 | 'repository.read', | |
388 | 'group.read', |
|
456 | 'group.read', | |
389 | 'usergroup.read', |
|
457 | 'usergroup.read', | |
390 | 'branch.push_force', |
|
458 | 'branch.push_force', | |
391 | } |
|
459 | } | |
392 |
|
460 | |||
393 | def test_non_inherited_permissions_from_default_on_user_disabled(self): |
|
461 | def test_non_inherited_permissions_from_default_on_user_disabled(self): | |
394 | user_model = UserModel() |
|
462 | user_model = UserModel() | |
395 | # disable fork and create on default user |
|
463 | # disable fork and create on default user | |
396 | usr = User.DEFAULT_USER |
|
464 | usr = User.DEFAULT_USER | |
397 | user_model.revoke_perm(usr, 'hg.create.repository') |
|
465 | user_model.revoke_perm(usr, 'hg.create.repository') | |
398 | user_model.grant_perm(usr, 'hg.create.none') |
|
466 | user_model.grant_perm(usr, 'hg.create.none') | |
399 | user_model.revoke_perm(usr, 'hg.fork.repository') |
|
467 | user_model.revoke_perm(usr, 'hg.fork.repository') | |
400 | user_model.grant_perm(usr, 'hg.fork.none') |
|
468 | user_model.grant_perm(usr, 'hg.fork.none') | |
401 |
|
469 | |||
402 | # enable global perms on specific user |
|
470 | # enable global perms on specific user | |
403 | user_model.revoke_perm(self.u1, 'hg.create.none') |
|
471 | user_model.revoke_perm(self.u1, 'hg.create.none') | |
404 | user_model.grant_perm(self.u1, 'hg.create.repository') |
|
472 | user_model.grant_perm(self.u1, 'hg.create.repository') | |
405 | user_model.revoke_perm(self.u1, 'hg.fork.none') |
|
473 | user_model.revoke_perm(self.u1, 'hg.fork.none') | |
406 | user_model.grant_perm(self.u1, 'hg.fork.repository') |
|
474 | user_model.grant_perm(self.u1, 'hg.fork.repository') | |
407 |
|
475 | |||
408 | # make sure inherit flag is turned off |
|
476 | # make sure inherit flag is turned off | |
409 | self.u1.inherit_default_permissions = False |
|
477 | self.u1.inherit_default_permissions = False | |
410 | Session().commit() |
|
478 | Session().commit() | |
411 |
|
479 | |||
412 | # TODO(marcink): check branch perms |
|
480 | # TODO(marcink): check branch perms | |
413 |
|
481 | |||
414 | # this user will have non inherited permissions from he's |
|
482 | # this user will have non inherited permissions from he's | |
415 | # explicitly set permissions |
|
483 | # explicitly set permissions | |
416 | assert global_perms(self.u1) == { |
|
484 | assert global_perms(self.u1) == { | |
417 | 'hg.create.repository', |
|
485 | 'hg.create.repository', | |
418 | 'hg.fork.repository', |
|
486 | 'hg.fork.repository', | |
419 | 'hg.register.manual_activate', |
|
487 | 'hg.register.manual_activate', | |
420 | 'hg.password_reset.enabled', |
|
488 | 'hg.password_reset.enabled', | |
421 | 'hg.extern_activate.auto', |
|
489 | 'hg.extern_activate.auto', | |
422 | 'repository.read', |
|
490 | 'repository.read', | |
423 | 'group.read', |
|
491 | 'group.read', | |
424 | 'usergroup.read', |
|
492 | 'usergroup.read', | |
425 | 'branch.push_force', |
|
493 | 'branch.push_force', | |
426 | } |
|
494 | } | |
427 |
|
495 | |||
428 | @pytest.mark.parametrize('perm, expected_perm', [ |
|
496 | @pytest.mark.parametrize('perm, expected_perm', [ | |
429 | ('hg.inherit_default_perms.false', 'repository.none', ), |
|
497 | ('hg.inherit_default_perms.false', 'repository.none', ), | |
430 | ('hg.inherit_default_perms.true', 'repository.read', ), |
|
498 | ('hg.inherit_default_perms.true', 'repository.read', ), | |
431 | ]) |
|
499 | ]) | |
432 | def test_inherited_permissions_on_objects(self, perm, expected_perm): |
|
500 | def test_inherited_permissions_on_objects(self, perm, expected_perm): | |
433 | _form_result = { |
|
501 | _form_result = { | |
434 | 'default_inherit_default_permissions': perm, |
|
502 | 'default_inherit_default_permissions': perm, | |
435 | } |
|
503 | } | |
436 | PermissionModel().set_new_user_perms( |
|
504 | PermissionModel().set_new_user_perms( | |
437 | User.get_default_user(), _form_result) |
|
505 | User.get_default_user(), _form_result) | |
438 | Session().commit() |
|
506 | Session().commit() | |
439 |
|
507 | |||
440 | # make sure inherit flag is turned on |
|
508 | # make sure inherit flag is turned on | |
441 | self.u1.inherit_default_permissions = True |
|
509 | self.u1.inherit_default_permissions = True | |
442 | Session().commit() |
|
510 | Session().commit() | |
443 |
|
511 | |||
444 | # TODO(marcink): check branch perms |
|
512 | # TODO(marcink): check branch perms | |
445 |
|
513 | |||
446 | # this user will have inherited permissions from default user |
|
514 | # this user will have inherited permissions from default user | |
447 | assert global_perms(self.u1) == { |
|
515 | assert global_perms(self.u1) == { | |
448 | 'hg.create.none', |
|
516 | 'hg.create.none', | |
449 | 'hg.fork.none', |
|
517 | 'hg.fork.none', | |
450 | 'hg.register.manual_activate', |
|
518 | 'hg.register.manual_activate', | |
451 | 'hg.password_reset.enabled', |
|
519 | 'hg.password_reset.enabled', | |
452 | 'hg.extern_activate.auto', |
|
520 | 'hg.extern_activate.auto', | |
453 | 'repository.read', |
|
521 | 'repository.read', | |
454 | 'group.read', |
|
522 | 'group.read', | |
455 | 'usergroup.read', |
|
523 | 'usergroup.read', | |
456 | 'branch.push_force', |
|
524 | 'branch.push_force', | |
457 | 'hg.create.write_on_repogroup.true', |
|
525 | 'hg.create.write_on_repogroup.true', | |
458 | 'hg.usergroup.create.false', |
|
526 | 'hg.usergroup.create.false', | |
459 | 'hg.repogroup.create.false', |
|
527 | 'hg.repogroup.create.false', | |
460 | perm |
|
528 | perm | |
461 | } |
|
529 | } | |
462 |
|
530 | |||
463 | assert set(repo_perms(self.u1).values()) == set([expected_perm]) |
|
531 | assert set(repo_perms(self.u1).values()) == set([expected_perm]) | |
464 |
|
532 | |||
465 | def test_repo_owner_permissions_not_overwritten_by_group(self): |
|
533 | def test_repo_owner_permissions_not_overwritten_by_group(self): | |
466 | # create repo as USER, |
|
534 | # create repo as USER, | |
467 | self.test_repo = fixture.create_repo(name='myownrepo', |
|
535 | self.test_repo = fixture.create_repo(name='myownrepo', | |
468 | repo_type='hg', |
|
536 | repo_type='hg', | |
469 | cur_user=self.u1) |
|
537 | cur_user=self.u1) | |
470 |
|
538 | |||
471 | # he has permissions of admin as owner |
|
539 | # he has permissions of admin as owner | |
472 | assert repo_perms(self.u1)['myownrepo'] == 'repository.admin' |
|
540 | assert repo_perms(self.u1)['myownrepo'] == 'repository.admin' | |
473 |
|
541 | |||
474 | # set his permission as user group, he should still be admin |
|
542 | # set his permission as user group, he should still be admin | |
475 | self.ug1 = fixture.create_user_group('G1') |
|
543 | self.ug1 = fixture.create_user_group('G1') | |
476 | UserGroupModel().add_user_to_group(self.ug1, self.u1) |
|
544 | UserGroupModel().add_user_to_group(self.ug1, self.u1) | |
477 | RepoModel().grant_user_group_permission( |
|
545 | RepoModel().grant_user_group_permission( | |
478 | self.test_repo, |
|
546 | self.test_repo, | |
479 | group_name=self.ug1, |
|
547 | group_name=self.ug1, | |
480 | perm='repository.none') |
|
548 | perm='repository.none') | |
481 | Session().commit() |
|
549 | Session().commit() | |
482 |
|
550 | |||
483 | assert repo_perms(self.u1)['myownrepo'] == 'repository.admin' |
|
551 | assert repo_perms(self.u1)['myownrepo'] == 'repository.admin' | |
484 |
|
552 | |||
485 | def test_repo_owner_permissions_not_overwritten_by_others(self): |
|
553 | def test_repo_owner_permissions_not_overwritten_by_others(self): | |
486 | # create repo as USER, |
|
554 | # create repo as USER, | |
487 | self.test_repo = fixture.create_repo(name='myownrepo', |
|
555 | self.test_repo = fixture.create_repo(name='myownrepo', | |
488 | repo_type='hg', |
|
556 | repo_type='hg', | |
489 | cur_user=self.u1) |
|
557 | cur_user=self.u1) | |
490 |
|
558 | |||
491 | # he has permissions of admin as owner |
|
559 | # he has permissions of admin as owner | |
492 | assert repo_perms(self.u1)['myownrepo'] == 'repository.admin' |
|
560 | assert repo_perms(self.u1)['myownrepo'] == 'repository.admin' | |
493 |
|
561 | |||
494 | # set his permission as user, he should still be admin |
|
562 | # set his permission as user, he should still be admin | |
495 | RepoModel().grant_user_permission( |
|
563 | RepoModel().grant_user_permission( | |
496 | self.test_repo, user=self.u1, perm='repository.none') |
|
564 | self.test_repo, user=self.u1, perm='repository.none') | |
497 | Session().commit() |
|
565 | Session().commit() | |
498 |
|
566 | |||
499 | assert repo_perms(self.u1)['myownrepo'] == 'repository.admin' |
|
567 | assert repo_perms(self.u1)['myownrepo'] == 'repository.admin' | |
500 |
|
568 | |||
501 | def test_repo_group_owner_permissions_not_overwritten_by_group(self): |
|
569 | def test_repo_group_owner_permissions_not_overwritten_by_group(self): | |
502 | # "u1" shall be owner without any special permission assigned |
|
570 | # "u1" shall be owner without any special permission assigned | |
503 | self.g1 = fixture.create_repo_group('test1') |
|
571 | self.g1 = fixture.create_repo_group('test1') | |
504 |
|
572 | |||
505 | # Make user group and grant a permission to user group |
|
573 | # Make user group and grant a permission to user group | |
506 | self.ug1 = fixture.create_user_group('G1') |
|
574 | self.ug1 = fixture.create_user_group('G1') | |
507 | UserGroupModel().add_user_to_group(self.ug1, self.u1) |
|
575 | UserGroupModel().add_user_to_group(self.ug1, self.u1) | |
508 | RepoGroupModel().grant_user_group_permission( |
|
576 | RepoGroupModel().grant_user_group_permission( | |
509 | repo_group=self.g1, group_name=self.ug1, perm='group.write') |
|
577 | repo_group=self.g1, group_name=self.ug1, perm='group.write') | |
510 | Session().commit() |
|
578 | Session().commit() | |
511 |
|
579 | |||
512 | # Verify that user does not get any special permission if he is not |
|
580 | # Verify that user does not get any special permission if he is not | |
513 | # owner |
|
581 | # owner | |
514 | assert group_perms(self.u1) == {'test1': 'group.write'} |
|
582 | assert group_perms(self.u1) == {'test1': 'group.write'} | |
515 |
|
583 | |||
516 | # Make him owner of the repo group |
|
584 | # Make him owner of the repo group | |
517 | self.g1.user = self.u1 |
|
585 | self.g1.user = self.u1 | |
518 | assert group_perms(self.u1) == {'test1': 'group.admin'} |
|
586 | assert group_perms(self.u1) == {'test1': 'group.admin'} | |
519 |
|
587 | |||
520 | def test_repo_group_owner_permissions_not_overwritten_by_others(self): |
|
588 | def test_repo_group_owner_permissions_not_overwritten_by_others(self): | |
521 | # "u1" shall be owner without any special permission assigned |
|
589 | # "u1" shall be owner without any special permission assigned | |
522 | self.g1 = fixture.create_repo_group('test1') |
|
590 | self.g1 = fixture.create_repo_group('test1') | |
523 | RepoGroupModel().grant_user_permission( |
|
591 | RepoGroupModel().grant_user_permission( | |
524 | repo_group=self.g1, user=self.u1, perm='group.write') |
|
592 | repo_group=self.g1, user=self.u1, perm='group.write') | |
525 | Session().commit() |
|
593 | Session().commit() | |
526 |
|
594 | |||
527 | # Verify that user does not get any special permission if he is not |
|
595 | # Verify that user does not get any special permission if he is not | |
528 | # owner |
|
596 | # owner | |
529 | assert group_perms(self.u1) == {'test1': 'group.write'} |
|
597 | assert group_perms(self.u1) == {'test1': 'group.write'} | |
530 |
|
598 | |||
531 | # Make him owner of the repo group |
|
599 | # Make him owner of the repo group | |
532 | self.g1.user = self.u1 |
|
600 | self.g1.user = self.u1 | |
533 | assert group_perms(self.u1) == {u'test1': 'group.admin'} |
|
601 | assert group_perms(self.u1) == {u'test1': 'group.admin'} | |
534 |
|
602 | |||
535 | def assert_user_perm_equal( |
|
603 | def assert_user_perm_equal( | |
536 | self, user, change_factor=0, compare_keys=None): |
|
604 | self, user, change_factor=0, compare_keys=None): | |
537 | perms = UserToPerm.query().filter(UserToPerm.user == user).all() |
|
605 | perms = UserToPerm.query().filter(UserToPerm.user == user).all() | |
538 | assert len(perms) == \ |
|
606 | assert len(perms) == \ | |
539 | len(Permission.DEFAULT_USER_PERMISSIONS) + change_factor |
|
607 | len(Permission.DEFAULT_USER_PERMISSIONS) + change_factor | |
540 | if compare_keys: |
|
608 | if compare_keys: | |
541 | assert set( |
|
609 | assert set( | |
542 | x.permissions.permission_name for x in perms) == compare_keys |
|
610 | x.permissions.permission_name for x in perms) == compare_keys | |
543 |
|
611 | |||
544 | def assert_def_user_group_perm_equal( |
|
612 | def assert_def_user_group_perm_equal( | |
545 | self, user_group, change_factor=0, compare_keys=None): |
|
613 | self, user_group, change_factor=0, compare_keys=None): | |
546 | perms = UserGroupToPerm.query().filter( |
|
614 | perms = UserGroupToPerm.query().filter( | |
547 | UserGroupToPerm.users_group == user_group).all() |
|
615 | UserGroupToPerm.users_group == user_group).all() | |
548 | assert len(perms) == \ |
|
616 | assert len(perms) == \ | |
549 | len(Permission.DEFAULT_USER_PERMISSIONS) + change_factor |
|
617 | len(Permission.DEFAULT_USER_PERMISSIONS) + change_factor | |
550 | if compare_keys: |
|
618 | if compare_keys: | |
551 | assert set( |
|
619 | assert set( | |
552 | x.permissions.permission_name for x in perms) == compare_keys |
|
620 | x.permissions.permission_name for x in perms) == compare_keys | |
553 |
|
621 | |||
554 | def test_set_default_permissions(self): |
|
622 | def test_set_default_permissions(self): | |
555 | PermissionModel().create_default_user_permissions(user=self.u1) |
|
623 | PermissionModel().create_default_user_permissions(user=self.u1) | |
556 | self.assert_user_perm_equal(user=self.u1) |
|
624 | self.assert_user_perm_equal(user=self.u1) | |
557 |
|
625 | |||
558 | def test_set_default_permissions_after_one_is_missing(self): |
|
626 | def test_set_default_permissions_after_one_is_missing(self): | |
559 | PermissionModel().create_default_user_permissions(user=self.u1) |
|
627 | PermissionModel().create_default_user_permissions(user=self.u1) | |
560 | self.assert_user_perm_equal(user=self.u1) |
|
628 | self.assert_user_perm_equal(user=self.u1) | |
561 | # now we delete one, it should be re-created after another call |
|
629 | # now we delete one, it should be re-created after another call | |
562 | perms = UserToPerm.query().filter(UserToPerm.user == self.u1).all() |
|
630 | perms = UserToPerm.query().filter(UserToPerm.user == self.u1).all() | |
563 | Session().delete(perms[0]) |
|
631 | Session().delete(perms[0]) | |
564 | Session().commit() |
|
632 | Session().commit() | |
565 |
|
633 | |||
566 | self.assert_user_perm_equal(user=self.u1, change_factor=-1) |
|
634 | self.assert_user_perm_equal(user=self.u1, change_factor=-1) | |
567 |
|
635 | |||
568 | # create missing one ! |
|
636 | # create missing one ! | |
569 | PermissionModel().create_default_user_permissions(user=self.u1) |
|
637 | PermissionModel().create_default_user_permissions(user=self.u1) | |
570 | self.assert_user_perm_equal(user=self.u1) |
|
638 | self.assert_user_perm_equal(user=self.u1) | |
571 |
|
639 | |||
572 | @pytest.mark.parametrize("perm, modify_to", [ |
|
640 | @pytest.mark.parametrize("perm, modify_to", [ | |
573 | ('repository.read', 'repository.none'), |
|
641 | ('repository.read', 'repository.none'), | |
574 | ('group.read', 'group.none'), |
|
642 | ('group.read', 'group.none'), | |
575 | ('usergroup.read', 'usergroup.none'), |
|
643 | ('usergroup.read', 'usergroup.none'), | |
576 | ('hg.create.repository', 'hg.create.none'), |
|
644 | ('hg.create.repository', 'hg.create.none'), | |
577 | ('hg.fork.repository', 'hg.fork.none'), |
|
645 | ('hg.fork.repository', 'hg.fork.none'), | |
578 | ('hg.register.manual_activate', 'hg.register.auto_activate',) |
|
646 | ('hg.register.manual_activate', 'hg.register.auto_activate',) | |
579 | ]) |
|
647 | ]) | |
580 | def test_set_default_permissions_after_modification(self, perm, modify_to): |
|
648 | def test_set_default_permissions_after_modification(self, perm, modify_to): | |
581 | PermissionModel().create_default_user_permissions(user=self.u1) |
|
649 | PermissionModel().create_default_user_permissions(user=self.u1) | |
582 | self.assert_user_perm_equal(user=self.u1) |
|
650 | self.assert_user_perm_equal(user=self.u1) | |
583 |
|
651 | |||
584 | old = Permission.get_by_key(perm) |
|
652 | old = Permission.get_by_key(perm) | |
585 | new = Permission.get_by_key(modify_to) |
|
653 | new = Permission.get_by_key(modify_to) | |
586 | assert old is not None |
|
654 | assert old is not None | |
587 | assert new is not None |
|
655 | assert new is not None | |
588 |
|
656 | |||
589 | # now modify permissions |
|
657 | # now modify permissions | |
590 | p = UserToPerm.query().filter( |
|
658 | p = UserToPerm.query().filter( | |
591 | UserToPerm.user == self.u1).filter( |
|
659 | UserToPerm.user == self.u1).filter( | |
592 | UserToPerm.permission == old).one() |
|
660 | UserToPerm.permission == old).one() | |
593 | p.permission = new |
|
661 | p.permission = new | |
594 | Session().add(p) |
|
662 | Session().add(p) | |
595 | Session().commit() |
|
663 | Session().commit() | |
596 |
|
664 | |||
597 | PermissionModel().create_default_user_permissions(user=self.u1) |
|
665 | PermissionModel().create_default_user_permissions(user=self.u1) | |
598 | self.assert_user_perm_equal(user=self.u1) |
|
666 | self.assert_user_perm_equal(user=self.u1) | |
599 |
|
667 | |||
600 | def test_clear_user_perms(self): |
|
668 | def test_clear_user_perms(self): | |
601 | PermissionModel().create_default_user_permissions(user=self.u1) |
|
669 | PermissionModel().create_default_user_permissions(user=self.u1) | |
602 | self.assert_user_perm_equal(user=self.u1) |
|
670 | self.assert_user_perm_equal(user=self.u1) | |
603 |
|
671 | |||
604 | # now clear permissions |
|
672 | # now clear permissions | |
605 | cleared = PermissionModel()._clear_user_perms(self.u1.user_id) |
|
673 | cleared = PermissionModel()._clear_user_perms(self.u1.user_id) | |
606 | self.assert_user_perm_equal(user=self.u1, |
|
674 | self.assert_user_perm_equal(user=self.u1, | |
607 | change_factor=len(cleared)*-1) |
|
675 | change_factor=len(cleared)*-1) | |
608 |
|
676 | |||
609 | def test_clear_user_group_perms(self): |
|
677 | def test_clear_user_group_perms(self): | |
610 | self.ug1 = fixture.create_user_group('G1') |
|
678 | self.ug1 = fixture.create_user_group('G1') | |
611 | PermissionModel().create_default_user_group_permissions( |
|
679 | PermissionModel().create_default_user_group_permissions( | |
612 | user_group=self.ug1) |
|
680 | user_group=self.ug1) | |
613 | self.assert_def_user_group_perm_equal(user_group=self.ug1) |
|
681 | self.assert_def_user_group_perm_equal(user_group=self.ug1) | |
614 |
|
682 | |||
615 | # now clear permissions |
|
683 | # now clear permissions | |
616 | cleared = PermissionModel()._clear_user_group_perms( |
|
684 | cleared = PermissionModel()._clear_user_group_perms( | |
617 | self.ug1.users_group_id) |
|
685 | self.ug1.users_group_id) | |
618 | self.assert_def_user_group_perm_equal(user_group=self.ug1, |
|
686 | self.assert_def_user_group_perm_equal(user_group=self.ug1, | |
619 | change_factor=len(cleared)*-1) |
|
687 | change_factor=len(cleared)*-1) | |
620 |
|
688 | |||
621 | @pytest.mark.parametrize("form_result", [ |
|
689 | @pytest.mark.parametrize("form_result", [ | |
622 | {}, |
|
690 | {}, | |
623 | {'default_repo_create': 'hg.create.repository'}, |
|
691 | {'default_repo_create': 'hg.create.repository'}, | |
624 | {'default_repo_create': 'hg.create.repository', |
|
692 | {'default_repo_create': 'hg.create.repository', | |
625 | 'default_repo_perm': 'repository.read'}, |
|
693 | 'default_repo_perm': 'repository.read'}, | |
626 | {'default_repo_create': 'hg.create.none', |
|
694 | {'default_repo_create': 'hg.create.none', | |
627 | 'default_repo_perm': 'repository.write', |
|
695 | 'default_repo_perm': 'repository.write', | |
628 | 'default_fork_create': 'hg.fork.none'}, |
|
696 | 'default_fork_create': 'hg.fork.none'}, | |
629 | ]) |
|
697 | ]) | |
630 | def test_set_new_user_permissions(self, form_result): |
|
698 | def test_set_new_user_permissions(self, form_result): | |
631 | _form_result = {} |
|
699 | _form_result = {} | |
632 | _form_result.update(form_result) |
|
700 | _form_result.update(form_result) | |
633 | PermissionModel().set_new_user_perms(self.u1, _form_result) |
|
701 | PermissionModel().set_new_user_perms(self.u1, _form_result) | |
634 | Session().commit() |
|
702 | Session().commit() | |
635 | change_factor = -1 * (len(Permission.DEFAULT_USER_PERMISSIONS) |
|
703 | change_factor = -1 * (len(Permission.DEFAULT_USER_PERMISSIONS) | |
636 | - len(form_result.keys())) |
|
704 | - len(form_result.keys())) | |
637 | self.assert_user_perm_equal( |
|
705 | self.assert_user_perm_equal( | |
638 | self.u1, change_factor=change_factor) |
|
706 | self.u1, change_factor=change_factor) | |
639 |
|
707 | |||
640 | @pytest.mark.parametrize("form_result", [ |
|
708 | @pytest.mark.parametrize("form_result", [ | |
641 | {}, |
|
709 | {}, | |
642 | {'default_repo_create': 'hg.create.repository'}, |
|
710 | {'default_repo_create': 'hg.create.repository'}, | |
643 | {'default_repo_create': 'hg.create.repository', |
|
711 | {'default_repo_create': 'hg.create.repository', | |
644 | 'default_repo_perm': 'repository.read'}, |
|
712 | 'default_repo_perm': 'repository.read'}, | |
645 | {'default_repo_create': 'hg.create.none', |
|
713 | {'default_repo_create': 'hg.create.none', | |
646 | 'default_repo_perm': 'repository.write', |
|
714 | 'default_repo_perm': 'repository.write', | |
647 | 'default_fork_create': 'hg.fork.none'}, |
|
715 | 'default_fork_create': 'hg.fork.none'}, | |
648 | ]) |
|
716 | ]) | |
649 | def test_set_new_user_group_permissions(self, form_result): |
|
717 | def test_set_new_user_group_permissions(self, form_result): | |
650 | _form_result = {} |
|
718 | _form_result = {} | |
651 | _form_result.update(form_result) |
|
719 | _form_result.update(form_result) | |
652 | self.ug1 = fixture.create_user_group('G1') |
|
720 | self.ug1 = fixture.create_user_group('G1') | |
653 | PermissionModel().set_new_user_group_perms(self.ug1, _form_result) |
|
721 | PermissionModel().set_new_user_group_perms(self.ug1, _form_result) | |
654 | Session().commit() |
|
722 | Session().commit() | |
655 | change_factor = -1 * (len(Permission.DEFAULT_USER_PERMISSIONS) |
|
723 | change_factor = -1 * (len(Permission.DEFAULT_USER_PERMISSIONS) | |
656 | - len(form_result.keys())) |
|
724 | - len(form_result.keys())) | |
657 | self.assert_def_user_group_perm_equal( |
|
725 | self.assert_def_user_group_perm_equal( | |
658 | self.ug1, change_factor=change_factor) |
|
726 | self.ug1, change_factor=change_factor) | |
659 |
|
727 | |||
660 | @pytest.mark.parametrize("group_active, expected_perm", [ |
|
728 | @pytest.mark.parametrize("group_active, expected_perm", [ | |
661 | (True, 'repository.admin'), |
|
729 | (True, 'repository.admin'), | |
662 | (False, 'repository.read'), |
|
730 | (False, 'repository.read'), | |
663 | ]) |
|
731 | ]) | |
664 | def test_get_default_repo_perms_from_user_group_with_active_group( |
|
732 | def test_get_default_repo_perms_from_user_group_with_active_group( | |
665 | self, backend, user_util, group_active, expected_perm): |
|
733 | self, backend, user_util, group_active, expected_perm): | |
666 | repo = backend.create_repo() |
|
734 | repo = backend.create_repo() | |
667 | user = user_util.create_user() |
|
735 | user = user_util.create_user() | |
668 | user_group = user_util.create_user_group( |
|
736 | user_group = user_util.create_user_group( | |
669 | members=[user], users_group_active=group_active) |
|
737 | members=[user], users_group_active=group_active) | |
670 |
|
738 | |||
671 | user_util.grant_user_group_permission_to_repo( |
|
739 | user_util.grant_user_group_permission_to_repo( | |
672 | repo, user_group, 'repository.admin') |
|
740 | repo, user_group, 'repository.admin') | |
673 | permissions = repo_perms(user) |
|
741 | permissions = repo_perms(user) | |
674 | repo_permission = permissions.get(repo.repo_name) |
|
742 | repo_permission = permissions.get(repo.repo_name) | |
675 | assert repo_permission == expected_perm |
|
743 | assert repo_permission == expected_perm | |
676 |
|
744 | |||
677 | @pytest.mark.parametrize("group_active, expected_perm", [ |
|
745 | @pytest.mark.parametrize("group_active, expected_perm", [ | |
678 | (True, 'group.admin'), |
|
746 | (True, 'group.admin'), | |
679 | (False, 'group.read') |
|
747 | (False, 'group.read') | |
680 | ]) |
|
748 | ]) | |
681 | def test_get_default_group_perms_from_user_group_with_active_group( |
|
749 | def test_get_default_group_perms_from_user_group_with_active_group( | |
682 | self, user_util, group_active, expected_perm): |
|
750 | self, user_util, group_active, expected_perm): | |
683 | user = user_util.create_user() |
|
751 | user = user_util.create_user() | |
684 | repo_group = user_util.create_repo_group() |
|
752 | repo_group = user_util.create_repo_group() | |
685 | user_group = user_util.create_user_group( |
|
753 | user_group = user_util.create_user_group( | |
686 | members=[user], users_group_active=group_active) |
|
754 | members=[user], users_group_active=group_active) | |
687 |
|
755 | |||
688 | user_util.grant_user_group_permission_to_repo_group( |
|
756 | user_util.grant_user_group_permission_to_repo_group( | |
689 | repo_group, user_group, 'group.admin') |
|
757 | repo_group, user_group, 'group.admin') | |
690 | permissions = group_perms(user) |
|
758 | permissions = group_perms(user) | |
691 | group_permission = permissions.get(repo_group.name) |
|
759 | group_permission = permissions.get(repo_group.name) | |
692 | assert group_permission == expected_perm |
|
760 | assert group_permission == expected_perm | |
693 |
|
761 | |||
694 | @pytest.mark.parametrize("group_active, expected_perm", [ |
|
762 | @pytest.mark.parametrize("group_active, expected_perm", [ | |
695 | (True, 'usergroup.admin'), |
|
763 | (True, 'usergroup.admin'), | |
696 | (False, 'usergroup.read') |
|
764 | (False, 'usergroup.read') | |
697 | ]) |
|
765 | ]) | |
698 | def test_get_default_user_group_perms_from_user_group_with_active_group( |
|
766 | def test_get_default_user_group_perms_from_user_group_with_active_group( | |
699 | self, user_util, group_active, expected_perm): |
|
767 | self, user_util, group_active, expected_perm): | |
700 | user = user_util.create_user() |
|
768 | user = user_util.create_user() | |
701 | user_group = user_util.create_user_group( |
|
769 | user_group = user_util.create_user_group( | |
702 | members=[user], users_group_active=group_active) |
|
770 | members=[user], users_group_active=group_active) | |
703 | target_user_group = user_util.create_user_group() |
|
771 | target_user_group = user_util.create_user_group() | |
704 |
|
772 | |||
705 | user_util.grant_user_group_permission_to_user_group( |
|
773 | user_util.grant_user_group_permission_to_user_group( | |
706 | target_user_group, user_group, 'usergroup.admin') |
|
774 | target_user_group, user_group, 'usergroup.admin') | |
707 | permissions = user_group_perms(user) |
|
775 | permissions = user_group_perms(user) | |
708 | group_permission = permissions.get(target_user_group.users_group_name) |
|
776 | group_permission = permissions.get(target_user_group.users_group_name) | |
709 | assert group_permission == expected_perm |
|
777 | assert group_permission == expected_perm | |
710 |
|
778 | |||
711 |
|
779 | |||
712 | def repo_perms(user): |
|
780 | def repo_perms(user): | |
713 | auth_user = AuthUser(user_id=user.user_id) |
|
781 | auth_user = AuthUser(user_id=user.user_id) | |
714 | return auth_user.permissions['repositories'] |
|
782 | return auth_user.permissions['repositories'] | |
715 |
|
783 | |||
716 |
|
784 | |||
717 | def branch_perms(user): |
|
785 | def branch_perms(user): | |
718 | auth_user = AuthUser(user_id=user.user_id) |
|
786 | auth_user = AuthUser(user_id=user.user_id) | |
719 | return auth_user.permissions['repository_branches'] |
|
787 | return auth_user.permissions['repository_branches'] | |
720 |
|
788 | |||
721 |
|
789 | |||
722 | def group_perms(user): |
|
790 | def group_perms(user): | |
723 | auth_user = AuthUser(user_id=user.user_id) |
|
791 | auth_user = AuthUser(user_id=user.user_id) | |
724 | return auth_user.permissions['repositories_groups'] |
|
792 | return auth_user.permissions['repositories_groups'] | |
725 |
|
793 | |||
726 |
|
794 | |||
727 | def user_group_perms(user): |
|
795 | def user_group_perms(user): | |
728 | auth_user = AuthUser(user_id=user.user_id) |
|
796 | auth_user = AuthUser(user_id=user.user_id) | |
729 | return auth_user.permissions['user_groups'] |
|
797 | return auth_user.permissions['user_groups'] | |
730 |
|
798 | |||
731 |
|
799 | |||
732 | def global_perms(user): |
|
800 | def global_perms(user): | |
733 | auth_user = AuthUser(user_id=user.user_id) |
|
801 | auth_user = AuthUser(user_id=user.user_id) | |
734 | return auth_user.permissions['global'] |
|
802 | return auth_user.permissions['global'] | |
735 |
|
803 | |||
736 |
|
804 | |||
737 | def default_perms(added=None, removed=None): |
|
805 | def default_perms(added=None, removed=None): | |
738 | expected_perms = set(Permission.DEFAULT_USER_PERMISSIONS) |
|
806 | expected_perms = set(Permission.DEFAULT_USER_PERMISSIONS) | |
739 | if removed: |
|
807 | if removed: | |
740 | expected_perms.difference_update(removed) |
|
808 | expected_perms.difference_update(removed) | |
741 | if added: |
|
809 | if added: | |
742 | expected_perms.update(added) |
|
810 | expected_perms.update(added) | |
743 | return expected_perms |
|
811 | return expected_perms |
@@ -1,881 +1,897 b'' | |||||
1 |
|
1 | |||
2 | ; ######################################### |
|
2 | ; ######################################### | |
3 | ; RHODECODE COMMUNITY EDITION CONFIGURATION |
|
3 | ; RHODECODE COMMUNITY EDITION CONFIGURATION | |
4 | ; ######################################### |
|
4 | ; ######################################### | |
5 |
|
5 | |||
6 | [DEFAULT] |
|
6 | [DEFAULT] | |
7 | ; Debug flag sets all loggers to debug, and enables request tracking |
|
7 | ; Debug flag sets all loggers to debug, and enables request tracking | |
8 | debug = true |
|
8 | debug = true | |
9 |
|
9 | |||
10 | ; ######################################################################## |
|
10 | ; ######################################################################## | |
11 | ; EMAIL CONFIGURATION |
|
11 | ; EMAIL CONFIGURATION | |
12 | ; These settings will be used by the RhodeCode mailing system |
|
12 | ; These settings will be used by the RhodeCode mailing system | |
13 | ; ######################################################################## |
|
13 | ; ######################################################################## | |
14 |
|
14 | |||
15 | ; prefix all emails subjects with given prefix, helps filtering out emails |
|
15 | ; prefix all emails subjects with given prefix, helps filtering out emails | |
16 | #email_prefix = [RhodeCode] |
|
16 | #email_prefix = [RhodeCode] | |
17 |
|
17 | |||
18 | ; email FROM address all mails will be sent |
|
18 | ; email FROM address all mails will be sent | |
19 | #app_email_from = rhodecode-noreply@localhost |
|
19 | #app_email_from = rhodecode-noreply@localhost | |
20 |
|
20 | |||
21 | #smtp_server = mail.server.com |
|
21 | #smtp_server = mail.server.com | |
22 | #smtp_username = |
|
22 | #smtp_username = | |
23 | #smtp_password = |
|
23 | #smtp_password = | |
24 | #smtp_port = |
|
24 | #smtp_port = | |
25 | #smtp_use_tls = false |
|
25 | #smtp_use_tls = false | |
26 | #smtp_use_ssl = true |
|
26 | #smtp_use_ssl = true | |
27 |
|
27 | |||
28 | [server:main] |
|
28 | [server:main] | |
29 | ; COMMON HOST/IP CONFIG, This applies mostly to develop setup, |
|
29 | ; COMMON HOST/IP CONFIG, This applies mostly to develop setup, | |
30 | ; Host port for gunicorn are controlled by gunicorn_conf.py |
|
30 | ; Host port for gunicorn are controlled by gunicorn_conf.py | |
31 | host = 127.0.0.1 |
|
31 | host = 127.0.0.1 | |
32 | port = 10020 |
|
32 | port = 10020 | |
33 |
|
33 | |||
34 |
|
34 | |||
35 | ; ########################### |
|
35 | ; ########################### | |
36 | ; GUNICORN APPLICATION SERVER |
|
36 | ; GUNICORN APPLICATION SERVER | |
37 | ; ########################### |
|
37 | ; ########################### | |
38 |
|
38 | |||
39 | ; run with gunicorn --config gunicorn_conf.py --paste rhodecode.ini |
|
39 | ; run with gunicorn --config gunicorn_conf.py --paste rhodecode.ini | |
40 |
|
40 | |||
41 | ; Module to use, this setting shouldn't be changed |
|
41 | ; Module to use, this setting shouldn't be changed | |
42 | use = egg:gunicorn#main |
|
42 | use = egg:gunicorn#main | |
43 |
|
43 | |||
44 | ; Prefix middleware for RhodeCode. |
|
44 | ; Prefix middleware for RhodeCode. | |
45 | ; recommended when using proxy setup. |
|
45 | ; recommended when using proxy setup. | |
46 | ; allows to set RhodeCode under a prefix in server. |
|
46 | ; allows to set RhodeCode under a prefix in server. | |
47 | ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well. |
|
47 | ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well. | |
48 | ; And set your prefix like: `prefix = /custom_prefix` |
|
48 | ; And set your prefix like: `prefix = /custom_prefix` | |
49 | ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need |
|
49 | ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need | |
50 | ; to make your cookies only work on prefix url |
|
50 | ; to make your cookies only work on prefix url | |
51 | [filter:proxy-prefix] |
|
51 | [filter:proxy-prefix] | |
52 | use = egg:PasteDeploy#prefix |
|
52 | use = egg:PasteDeploy#prefix | |
53 | prefix = / |
|
53 | prefix = / | |
54 |
|
54 | |||
55 | [app:main] |
|
55 | [app:main] | |
56 | ; The %(here)s variable will be replaced with the absolute path of parent directory |
|
56 | ; The %(here)s variable will be replaced with the absolute path of parent directory | |
57 | ; of this file |
|
57 | ; of this file | |
58 | ; Each option in the app:main can be override by an environmental variable |
|
58 | ; Each option in the app:main can be override by an environmental variable | |
59 | ; |
|
59 | ; | |
60 | ;To override an option: |
|
60 | ;To override an option: | |
61 | ; |
|
61 | ; | |
62 | ;RC_<KeyName> |
|
62 | ;RC_<KeyName> | |
63 | ;Everything should be uppercase, . and - should be replaced by _. |
|
63 | ;Everything should be uppercase, . and - should be replaced by _. | |
64 | ;For example, if you have these configuration settings: |
|
64 | ;For example, if you have these configuration settings: | |
65 | ;rc_cache.repo_object.backend = foo |
|
65 | ;rc_cache.repo_object.backend = foo | |
66 | ;can be overridden by |
|
66 | ;can be overridden by | |
67 | ;export RC_CACHE_REPO_OBJECT_BACKEND=foo |
|
67 | ;export RC_CACHE_REPO_OBJECT_BACKEND=foo | |
68 |
|
68 | |||
69 | use = egg:rhodecode-enterprise-ce |
|
69 | use = egg:rhodecode-enterprise-ce | |
70 |
|
70 | |||
71 | ; enable proxy prefix middleware, defined above |
|
71 | ; enable proxy prefix middleware, defined above | |
72 | #filter-with = proxy-prefix |
|
72 | #filter-with = proxy-prefix | |
73 |
|
73 | |||
|
74 | ; control if environmental variables to be expanded into the .ini settings | |||
|
75 | rhodecode.env_expand = false | |||
|
76 | ||||
74 | ; encryption key used to encrypt social plugin tokens, |
|
77 | ; encryption key used to encrypt social plugin tokens, | |
75 | ; remote_urls with credentials etc, if not set it defaults to |
|
78 | ; remote_urls with credentials etc, if not set it defaults to | |
76 | ; `beaker.session.secret` |
|
79 | ; `beaker.session.secret` | |
77 | #rhodecode.encrypted_values.secret = |
|
80 | #rhodecode.encrypted_values.secret = | |
78 |
|
81 | |||
79 | ; decryption strict mode (enabled by default). It controls if decryption raises |
|
82 | ; decryption strict mode (enabled by default). It controls if decryption raises | |
80 | ; `SignatureVerificationError` in case of wrong key, or damaged encryption data. |
|
83 | ; `SignatureVerificationError` in case of wrong key, or damaged encryption data. | |
81 | #rhodecode.encrypted_values.strict = false |
|
84 | #rhodecode.encrypted_values.strict = false | |
82 |
|
85 | |||
83 | ; Pick algorithm for encryption. Either fernet (more secure) or aes (default) |
|
86 | ; Pick algorithm for encryption. Either fernet (more secure) or aes (default) | |
84 | ; fernet is safer, and we strongly recommend switching to it. |
|
87 | ; fernet is safer, and we strongly recommend switching to it. | |
85 | ; Due to backward compatibility aes is used as default. |
|
88 | ; Due to backward compatibility aes is used as default. | |
86 | #rhodecode.encrypted_values.algorithm = fernet |
|
89 | #rhodecode.encrypted_values.algorithm = fernet | |
87 |
|
90 | |||
88 | ; Return gzipped responses from RhodeCode (static files/application) |
|
91 | ; Return gzipped responses from RhodeCode (static files/application) | |
89 | gzip_responses = false |
|
92 | gzip_responses = false | |
90 |
|
93 | |||
91 | ; Auto-generate javascript routes file on startup |
|
94 | ; Auto-generate javascript routes file on startup | |
92 | generate_js_files = false |
|
95 | generate_js_files = false | |
93 |
|
96 | |||
94 | ; System global default language. |
|
97 | ; System global default language. | |
95 | ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh |
|
98 | ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh | |
96 | lang = en |
|
99 | lang = en | |
97 |
|
100 | |||
98 | ; Perform a full repository scan and import on each server start. |
|
101 | ; Perform a full repository scan and import on each server start. | |
99 | ; Settings this to true could lead to very long startup time. |
|
102 | ; Settings this to true could lead to very long startup time. | |
100 | startup.import_repos = true |
|
103 | startup.import_repos = true | |
101 |
|
104 | |||
102 | ; URL at which the application is running. This is used for Bootstrapping |
|
105 | ; URL at which the application is running. This is used for Bootstrapping | |
103 | ; requests in context when no web request is available. Used in ishell, or |
|
106 | ; requests in context when no web request is available. Used in ishell, or | |
104 | ; SSH calls. Set this for events to receive proper url for SSH calls. |
|
107 | ; SSH calls. Set this for events to receive proper url for SSH calls. | |
105 | app.base_url = http://rhodecode.local |
|
108 | app.base_url = http://rhodecode.local | |
106 |
|
109 | |||
107 | ; Host at which the Service API is running. |
|
110 | ; Host at which the Service API is running. | |
108 | app.service_api.host = http://rhodecode.local:10020 |
|
111 | app.service_api.host = http://rhodecode.local:10020 | |
109 |
|
112 | |||
110 | ; Secret for Service API authentication. |
|
113 | ; Secret for Service API authentication. | |
111 | app.service_api.token = |
|
114 | app.service_api.token = | |
112 |
|
115 | |||
113 | ; Unique application ID. Should be a random unique string for security. |
|
116 | ; Unique application ID. Should be a random unique string for security. | |
114 | app_instance_uuid = rc-production |
|
117 | app_instance_uuid = rc-production | |
115 |
|
118 | |||
116 | ; Cut off limit for large diffs (size in bytes). If overall diff size on |
|
119 | ; Cut off limit for large diffs (size in bytes). If overall diff size on | |
117 | ; commit, or pull request exceeds this limit this diff will be displayed |
|
120 | ; commit, or pull request exceeds this limit this diff will be displayed | |
118 | ; partially. E.g 512000 == 512Kb |
|
121 | ; partially. E.g 512000 == 512Kb | |
119 | cut_off_limit_diff = 1024000 |
|
122 | cut_off_limit_diff = 1024000 | |
120 |
|
123 | |||
121 | ; Cut off limit for large files inside diffs (size in bytes). Each individual |
|
124 | ; Cut off limit for large files inside diffs (size in bytes). Each individual | |
122 | ; file inside diff which exceeds this limit will be displayed partially. |
|
125 | ; file inside diff which exceeds this limit will be displayed partially. | |
123 | ; E.g 128000 == 128Kb |
|
126 | ; E.g 128000 == 128Kb | |
124 | cut_off_limit_file = 256000 |
|
127 | cut_off_limit_file = 256000 | |
125 |
|
128 | |||
126 | ; Use cached version of vcs repositories everywhere. Recommended to be `true` |
|
129 | ; Use cached version of vcs repositories everywhere. Recommended to be `true` | |
127 | vcs_full_cache = false |
|
130 | vcs_full_cache = false | |
128 |
|
131 | |||
129 | ; Force https in RhodeCode, fixes https redirects, assumes it's always https. |
|
132 | ; Force https in RhodeCode, fixes https redirects, assumes it's always https. | |
130 | ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache |
|
133 | ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache | |
131 | force_https = false |
|
134 | force_https = false | |
132 |
|
135 | |||
133 | ; use Strict-Transport-Security headers |
|
136 | ; use Strict-Transport-Security headers | |
134 | use_htsts = false |
|
137 | use_htsts = false | |
135 |
|
138 | |||
136 | ; Set to true if your repos are exposed using the dumb protocol |
|
139 | ; Set to true if your repos are exposed using the dumb protocol | |
137 | git_update_server_info = false |
|
140 | git_update_server_info = false | |
138 |
|
141 | |||
139 | ; RSS/ATOM feed options |
|
142 | ; RSS/ATOM feed options | |
140 | rss_cut_off_limit = 256000 |
|
143 | rss_cut_off_limit = 256000 | |
141 | rss_items_per_page = 10 |
|
144 | rss_items_per_page = 10 | |
142 | rss_include_diff = false |
|
145 | rss_include_diff = false | |
143 |
|
146 | |||
144 | ; gist URL alias, used to create nicer urls for gist. This should be an |
|
147 | ; gist URL alias, used to create nicer urls for gist. This should be an | |
145 | ; url that does rewrites to _admin/gists/{gistid}. |
|
148 | ; url that does rewrites to _admin/gists/{gistid}. | |
146 | ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal |
|
149 | ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal | |
147 | ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid} |
|
150 | ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid} | |
148 | gist_alias_url = |
|
151 | gist_alias_url = | |
149 |
|
152 | |||
150 | ; List of views (using glob pattern syntax) that AUTH TOKENS could be |
|
153 | ; List of views (using glob pattern syntax) that AUTH TOKENS could be | |
151 | ; used for access. |
|
154 | ; used for access. | |
152 | ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it |
|
155 | ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it | |
153 | ; came from the the logged in user who own this authentication token. |
|
156 | ; came from the the logged in user who own this authentication token. | |
154 | ; Additionally @TOKEN syntax can be used to bound the view to specific |
|
157 | ; Additionally @TOKEN syntax can be used to bound the view to specific | |
155 | ; authentication token. Such view would be only accessible when used together |
|
158 | ; authentication token. Such view would be only accessible when used together | |
156 | ; with this authentication token |
|
159 | ; with this authentication token | |
157 | ; list of all views can be found under `/_admin/permissions/auth_token_access` |
|
160 | ; list of all views can be found under `/_admin/permissions/auth_token_access` | |
158 | ; The list should be "," separated and on a single line. |
|
161 | ; The list should be "," separated and on a single line. | |
159 | ; Most common views to enable: |
|
162 | ; Most common views to enable: | |
160 |
|
163 | |||
161 | # RepoCommitsView:repo_commit_download |
|
164 | # RepoCommitsView:repo_commit_download | |
162 | # RepoCommitsView:repo_commit_patch |
|
165 | # RepoCommitsView:repo_commit_patch | |
163 | # RepoCommitsView:repo_commit_raw |
|
166 | # RepoCommitsView:repo_commit_raw | |
164 | # RepoCommitsView:repo_commit_raw@TOKEN |
|
167 | # RepoCommitsView:repo_commit_raw@TOKEN | |
165 | # RepoFilesView:repo_files_diff |
|
168 | # RepoFilesView:repo_files_diff | |
166 | # RepoFilesView:repo_archivefile |
|
169 | # RepoFilesView:repo_archivefile | |
167 | # RepoFilesView:repo_file_raw |
|
170 | # RepoFilesView:repo_file_raw | |
168 | # GistView:* |
|
171 | # GistView:* | |
169 | api_access_controllers_whitelist = |
|
172 | api_access_controllers_whitelist = | |
170 |
|
173 | |||
171 | ; Default encoding used to convert from and to unicode |
|
174 | ; Default encoding used to convert from and to unicode | |
172 | ; can be also a comma separated list of encoding in case of mixed encodings |
|
175 | ; can be also a comma separated list of encoding in case of mixed encodings | |
173 | default_encoding = UTF-8 |
|
176 | default_encoding = UTF-8 | |
174 |
|
177 | |||
175 | ; instance-id prefix |
|
178 | ; instance-id prefix | |
176 | ; a prefix key for this instance used for cache invalidation when running |
|
179 | ; a prefix key for this instance used for cache invalidation when running | |
177 | ; multiple instances of RhodeCode, make sure it's globally unique for |
|
180 | ; multiple instances of RhodeCode, make sure it's globally unique for | |
178 | ; all running RhodeCode instances. Leave empty if you don't use it |
|
181 | ; all running RhodeCode instances. Leave empty if you don't use it | |
179 | instance_id = |
|
182 | instance_id = | |
180 |
|
183 | |||
181 | ; Fallback authentication plugin. Set this to a plugin ID to force the usage |
|
184 | ; Fallback authentication plugin. Set this to a plugin ID to force the usage | |
182 | ; of an authentication plugin also if it is disabled by it's settings. |
|
185 | ; of an authentication plugin also if it is disabled by it's settings. | |
183 | ; This could be useful if you are unable to log in to the system due to broken |
|
186 | ; This could be useful if you are unable to log in to the system due to broken | |
184 | ; authentication settings. Then you can enable e.g. the internal RhodeCode auth |
|
187 | ; authentication settings. Then you can enable e.g. the internal RhodeCode auth | |
185 | ; module to log in again and fix the settings. |
|
188 | ; module to log in again and fix the settings. | |
186 | ; Available builtin plugin IDs (hash is part of the ID): |
|
189 | ; Available builtin plugin IDs (hash is part of the ID): | |
187 | ; egg:rhodecode-enterprise-ce#rhodecode |
|
190 | ; egg:rhodecode-enterprise-ce#rhodecode | |
188 | ; egg:rhodecode-enterprise-ce#pam |
|
191 | ; egg:rhodecode-enterprise-ce#pam | |
189 | ; egg:rhodecode-enterprise-ce#ldap |
|
192 | ; egg:rhodecode-enterprise-ce#ldap | |
190 | ; egg:rhodecode-enterprise-ce#jasig_cas |
|
193 | ; egg:rhodecode-enterprise-ce#jasig_cas | |
191 | ; egg:rhodecode-enterprise-ce#headers |
|
194 | ; egg:rhodecode-enterprise-ce#headers | |
192 | ; egg:rhodecode-enterprise-ce#crowd |
|
195 | ; egg:rhodecode-enterprise-ce#crowd | |
193 |
|
196 | |||
194 | #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode |
|
197 | #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode | |
195 |
|
198 | |||
196 | ; Flag to control loading of legacy plugins in py:/path format |
|
199 | ; Flag to control loading of legacy plugins in py:/path format | |
197 | auth_plugin.import_legacy_plugins = true |
|
200 | auth_plugin.import_legacy_plugins = true | |
198 |
|
201 | |||
199 | ; alternative return HTTP header for failed authentication. Default HTTP |
|
202 | ; alternative return HTTP header for failed authentication. Default HTTP | |
200 | ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with |
|
203 | ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with | |
201 | ; handling that causing a series of failed authentication calls. |
|
204 | ; handling that causing a series of failed authentication calls. | |
202 | ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code |
|
205 | ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code | |
203 | ; This will be served instead of default 401 on bad authentication |
|
206 | ; This will be served instead of default 401 on bad authentication | |
204 | auth_ret_code = |
|
207 | auth_ret_code = | |
205 |
|
208 | |||
206 | ; use special detection method when serving auth_ret_code, instead of serving |
|
209 | ; use special detection method when serving auth_ret_code, instead of serving | |
207 | ; ret_code directly, use 401 initially (Which triggers credentials prompt) |
|
210 | ; ret_code directly, use 401 initially (Which triggers credentials prompt) | |
208 | ; and then serve auth_ret_code to clients |
|
211 | ; and then serve auth_ret_code to clients | |
209 | auth_ret_code_detection = false |
|
212 | auth_ret_code_detection = false | |
210 |
|
213 | |||
211 | ; locking return code. When repository is locked return this HTTP code. 2XX |
|
214 | ; locking return code. When repository is locked return this HTTP code. 2XX | |
212 | ; codes don't break the transactions while 4XX codes do |
|
215 | ; codes don't break the transactions while 4XX codes do | |
213 | lock_ret_code = 423 |
|
216 | lock_ret_code = 423 | |
214 |
|
217 | |||
215 | ; Filesystem location were repositories should be stored |
|
218 | ; Filesystem location were repositories should be stored | |
216 | repo_store.path = /var/opt/rhodecode_repo_store |
|
219 | repo_store.path = /var/opt/rhodecode_repo_store | |
217 |
|
220 | |||
218 | ; allows to setup custom hooks in settings page |
|
221 | ; allows to setup custom hooks in settings page | |
219 | allow_custom_hooks_settings = true |
|
222 | allow_custom_hooks_settings = true | |
220 |
|
223 | |||
221 | ; Generated license token required for EE edition license. |
|
224 | ; Generated license token required for EE edition license. | |
222 | ; New generated token value can be found in Admin > settings > license page. |
|
225 | ; New generated token value can be found in Admin > settings > license page. | |
223 | license_token = abra-cada-bra1-rce3 |
|
226 | license_token = abra-cada-bra1-rce3 | |
224 |
|
227 | |||
225 | ; This flag hides sensitive information on the license page such as token, and license data |
|
228 | ; This flag hides sensitive information on the license page such as token, and license data | |
226 | license.hide_license_info = false |
|
229 | license.hide_license_info = false | |
227 |
|
230 | |||
|
231 | ; Import EE license from this license path | |||
|
232 | #license.import_path = %(here)s/rhodecode_enterprise.license | |||
|
233 | ||||
|
234 | ; import license 'if-missing' or 'force' (always override) | |||
|
235 | ; if-missing means apply license if it doesn't exist. 'force' option always overrides it | |||
|
236 | license.import_path_mode = if-missing | |||
|
237 | ||||
228 | ; supervisor connection uri, for managing supervisor and logs. |
|
238 | ; supervisor connection uri, for managing supervisor and logs. | |
229 | supervisor.uri = |
|
239 | supervisor.uri = | |
230 |
|
240 | |||
231 | ; supervisord group name/id we only want this RC instance to handle |
|
241 | ; supervisord group name/id we only want this RC instance to handle | |
232 | supervisor.group_id = dev |
|
242 | supervisor.group_id = dev | |
233 |
|
243 | |||
234 | ; Display extended labs settings |
|
244 | ; Display extended labs settings | |
235 | labs_settings_active = true |
|
245 | labs_settings_active = true | |
236 |
|
246 | |||
237 | ; Custom exception store path, defaults to TMPDIR |
|
247 | ; Custom exception store path, defaults to TMPDIR | |
238 | ; This is used to store exception from RhodeCode in shared directory |
|
248 | ; This is used to store exception from RhodeCode in shared directory | |
239 | #exception_tracker.store_path = |
|
249 | #exception_tracker.store_path = | |
240 |
|
250 | |||
241 | ; Send email with exception details when it happens |
|
251 | ; Send email with exception details when it happens | |
242 | #exception_tracker.send_email = false |
|
252 | #exception_tracker.send_email = false | |
243 |
|
253 | |||
244 | ; Comma separated list of recipients for exception emails, |
|
254 | ; Comma separated list of recipients for exception emails, | |
245 | ; e.g admin@rhodecode.com,devops@rhodecode.com |
|
255 | ; e.g admin@rhodecode.com,devops@rhodecode.com | |
246 | ; Can be left empty, then emails will be sent to ALL super-admins |
|
256 | ; Can be left empty, then emails will be sent to ALL super-admins | |
247 | #exception_tracker.send_email_recipients = |
|
257 | #exception_tracker.send_email_recipients = | |
248 |
|
258 | |||
249 | ; optional prefix to Add to email Subject |
|
259 | ; optional prefix to Add to email Subject | |
250 | #exception_tracker.email_prefix = [RHODECODE ERROR] |
|
260 | #exception_tracker.email_prefix = [RHODECODE ERROR] | |
251 |
|
261 | |||
252 | ; NOTE: this setting IS DEPRECATED: |
|
262 | ; NOTE: this setting IS DEPRECATED: | |
253 | ; file_store backend is always enabled |
|
263 | ; file_store backend is always enabled | |
254 | #file_store.enabled = true |
|
264 | #file_store.enabled = true | |
255 |
|
265 | |||
256 | ; NOTE: this setting IS DEPRECATED: |
|
266 | ; NOTE: this setting IS DEPRECATED: | |
257 | ; file_store.backend = X -> use `file_store.backend.type = filesystem_v2` instead |
|
267 | ; file_store.backend = X -> use `file_store.backend.type = filesystem_v2` instead | |
258 | ; Storage backend, available options are: local |
|
268 | ; Storage backend, available options are: local | |
259 | #file_store.backend = local |
|
269 | #file_store.backend = local | |
260 |
|
270 | |||
261 | ; NOTE: this setting IS DEPRECATED: |
|
271 | ; NOTE: this setting IS DEPRECATED: | |
262 | ; file_store.storage_path = X -> use `file_store.filesystem_v2.storage_path = X` instead |
|
272 | ; file_store.storage_path = X -> use `file_store.filesystem_v2.storage_path = X` instead | |
263 | ; path to store the uploaded binaries and artifacts |
|
273 | ; path to store the uploaded binaries and artifacts | |
264 | #file_store.storage_path = /var/opt/rhodecode_data/file_store |
|
274 | #file_store.storage_path = /var/opt/rhodecode_data/file_store | |
265 |
|
275 | |||
266 | ; Artifacts file-store, is used to store comment attachments and artifacts uploads. |
|
276 | ; Artifacts file-store, is used to store comment attachments and artifacts uploads. | |
267 | ; file_store backend type: filesystem_v1, filesystem_v2 or objectstore (s3-based) are available as options |
|
277 | ; file_store backend type: filesystem_v1, filesystem_v2 or objectstore (s3-based) are available as options | |
268 | ; filesystem_v1 is backwards compat with pre 5.1 storage changes |
|
278 | ; filesystem_v1 is backwards compat with pre 5.1 storage changes | |
269 | ; new installations should choose filesystem_v2 or objectstore (s3-based), pick filesystem when migrating from |
|
279 | ; new installations should choose filesystem_v2 or objectstore (s3-based), pick filesystem when migrating from | |
270 | ; previous installations to keep the artifacts without a need of migration |
|
280 | ; previous installations to keep the artifacts without a need of migration | |
271 | file_store.backend.type = filesystem_v1 |
|
281 | file_store.backend.type = filesystem_v1 | |
272 |
|
282 | |||
273 | ; filesystem options... |
|
283 | ; filesystem options... | |
274 | file_store.filesystem_v1.storage_path = /var/opt/rhodecode_data/test_artifacts_file_store |
|
284 | file_store.filesystem_v1.storage_path = /var/opt/rhodecode_data/test_artifacts_file_store | |
275 |
|
285 | |||
276 | ; filesystem_v2 options... |
|
286 | ; filesystem_v2 options... | |
277 | file_store.filesystem_v2.storage_path = /var/opt/rhodecode_data/test_artifacts_file_store_2 |
|
287 | file_store.filesystem_v2.storage_path = /var/opt/rhodecode_data/test_artifacts_file_store_2 | |
278 | file_store.filesystem_v2.shards = 8 |
|
288 | file_store.filesystem_v2.shards = 8 | |
279 |
|
289 | |||
280 | ; objectstore options... |
|
290 | ; objectstore options... | |
281 | ; url for s3 compatible storage that allows to upload artifacts |
|
291 | ; url for s3 compatible storage that allows to upload artifacts | |
282 | ; e.g http://minio:9000 |
|
292 | ; e.g http://minio:9000 | |
283 | #file_store.backend.type = objectstore |
|
293 | #file_store.backend.type = objectstore | |
284 | file_store.objectstore.url = http://s3-minio:9000 |
|
294 | file_store.objectstore.url = http://s3-minio:9000 | |
285 |
|
295 | |||
286 | ; a top-level bucket to put all other shards in |
|
296 | ; a top-level bucket to put all other shards in | |
287 | ; objects will be stored in rhodecode-file-store/shard-N based on the bucket_shards number |
|
297 | ; objects will be stored in rhodecode-file-store/shard-N based on the bucket_shards number | |
288 | file_store.objectstore.bucket = rhodecode-file-store-tests |
|
298 | file_store.objectstore.bucket = rhodecode-file-store-tests | |
289 |
|
299 | |||
290 | ; number of sharded buckets to create to distribute archives across |
|
300 | ; number of sharded buckets to create to distribute archives across | |
291 | ; default is 8 shards |
|
301 | ; default is 8 shards | |
292 | file_store.objectstore.bucket_shards = 8 |
|
302 | file_store.objectstore.bucket_shards = 8 | |
293 |
|
303 | |||
294 | ; key for s3 auth |
|
304 | ; key for s3 auth | |
295 | file_store.objectstore.key = s3admin |
|
305 | file_store.objectstore.key = s3admin | |
296 |
|
306 | |||
297 | ; secret for s3 auth |
|
307 | ; secret for s3 auth | |
298 | file_store.objectstore.secret = s3secret4 |
|
308 | file_store.objectstore.secret = s3secret4 | |
299 |
|
309 | |||
300 | ;region for s3 storage |
|
310 | ;region for s3 storage | |
301 | file_store.objectstore.region = eu-central-1 |
|
311 | file_store.objectstore.region = eu-central-1 | |
302 |
|
312 | |||
303 | ; Redis url to acquire/check generation of archives locks |
|
313 | ; Redis url to acquire/check generation of archives locks | |
304 | archive_cache.locking.url = redis://redis:6379/1 |
|
314 | archive_cache.locking.url = redis://redis:6379/1 | |
305 |
|
315 | |||
306 | ; Storage backend, only 'filesystem' and 'objectstore' are available now |
|
316 | ; Storage backend, only 'filesystem' and 'objectstore' are available now | |
307 | archive_cache.backend.type = filesystem |
|
317 | archive_cache.backend.type = filesystem | |
308 |
|
318 | |||
309 | ; url for s3 compatible storage that allows to upload artifacts |
|
319 | ; url for s3 compatible storage that allows to upload artifacts | |
310 | ; e.g http://minio:9000 |
|
320 | ; e.g http://minio:9000 | |
311 | archive_cache.objectstore.url = http://s3-minio:9000 |
|
321 | archive_cache.objectstore.url = http://s3-minio:9000 | |
312 |
|
322 | |||
313 | ; key for s3 auth |
|
323 | ; key for s3 auth | |
314 | archive_cache.objectstore.key = key |
|
324 | archive_cache.objectstore.key = key | |
315 |
|
325 | |||
316 | ; secret for s3 auth |
|
326 | ; secret for s3 auth | |
317 | archive_cache.objectstore.secret = secret |
|
327 | archive_cache.objectstore.secret = secret | |
318 |
|
328 | |||
319 | ;region for s3 storage |
|
329 | ;region for s3 storage | |
320 | archive_cache.objectstore.region = eu-central-1 |
|
330 | archive_cache.objectstore.region = eu-central-1 | |
321 |
|
331 | |||
322 | ; number of sharded buckets to create to distribute archives across |
|
332 | ; number of sharded buckets to create to distribute archives across | |
323 | ; default is 8 shards |
|
333 | ; default is 8 shards | |
324 | archive_cache.objectstore.bucket_shards = 8 |
|
334 | archive_cache.objectstore.bucket_shards = 8 | |
325 |
|
335 | |||
326 | ; a top-level bucket to put all other shards in |
|
336 | ; a top-level bucket to put all other shards in | |
327 | ; objects will be stored in rhodecode-archive-cache/shard-N based on the bucket_shards number |
|
337 | ; objects will be stored in rhodecode-archive-cache/shard-N based on the bucket_shards number | |
328 | archive_cache.objectstore.bucket = rhodecode-archive-cache |
|
338 | archive_cache.objectstore.bucket = rhodecode-archive-cache | |
329 |
|
339 | |||
330 | ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time |
|
340 | ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time | |
331 | archive_cache.objectstore.retry = false |
|
341 | archive_cache.objectstore.retry = false | |
332 |
|
342 | |||
333 | ; number of seconds to wait for next try using retry |
|
343 | ; number of seconds to wait for next try using retry | |
334 | archive_cache.objectstore.retry_backoff = 1 |
|
344 | archive_cache.objectstore.retry_backoff = 1 | |
335 |
|
345 | |||
336 | ; how many tries do do a retry fetch from this backend |
|
346 | ; how many tries do do a retry fetch from this backend | |
337 | archive_cache.objectstore.retry_attempts = 10 |
|
347 | archive_cache.objectstore.retry_attempts = 10 | |
338 |
|
348 | |||
339 | ; Default is $cache_dir/archive_cache if not set |
|
349 | ; Default is $cache_dir/archive_cache if not set | |
340 | ; Generated repo archives will be cached at this location |
|
350 | ; Generated repo archives will be cached at this location | |
341 | ; and served from the cache during subsequent requests for the same archive of |
|
351 | ; and served from the cache during subsequent requests for the same archive of | |
342 | ; the repository. This path is important to be shared across filesystems and with |
|
352 | ; the repository. This path is important to be shared across filesystems and with | |
343 | ; RhodeCode and vcsserver |
|
353 | ; RhodeCode and vcsserver | |
344 | archive_cache.filesystem.store_dir = %(here)s/rc-tests/archive_cache |
|
354 | archive_cache.filesystem.store_dir = %(here)s/rc-tests/archive_cache | |
345 |
|
355 | |||
346 | ; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb |
|
356 | ; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb | |
347 | archive_cache.filesystem.cache_size_gb = 2 |
|
357 | archive_cache.filesystem.cache_size_gb = 2 | |
348 |
|
358 | |||
349 | ; Eviction policy used to clear out after cache_size_gb limit is reached |
|
359 | ; Eviction policy used to clear out after cache_size_gb limit is reached | |
350 | archive_cache.filesystem.eviction_policy = least-recently-stored |
|
360 | archive_cache.filesystem.eviction_policy = least-recently-stored | |
351 |
|
361 | |||
352 | ; By default cache uses sharding technique, this specifies how many shards are there |
|
362 | ; By default cache uses sharding technique, this specifies how many shards are there | |
353 | ; default is 8 shards |
|
363 | ; default is 8 shards | |
354 | archive_cache.filesystem.cache_shards = 8 |
|
364 | archive_cache.filesystem.cache_shards = 8 | |
355 |
|
365 | |||
356 | ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time |
|
366 | ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time | |
357 | archive_cache.filesystem.retry = false |
|
367 | archive_cache.filesystem.retry = false | |
358 |
|
368 | |||
359 | ; number of seconds to wait for next try using retry |
|
369 | ; number of seconds to wait for next try using retry | |
360 | archive_cache.filesystem.retry_backoff = 1 |
|
370 | archive_cache.filesystem.retry_backoff = 1 | |
361 |
|
371 | |||
362 | ; how many tries do do a retry fetch from this backend |
|
372 | ; how many tries do do a retry fetch from this backend | |
363 | archive_cache.filesystem.retry_attempts = 10 |
|
373 | archive_cache.filesystem.retry_attempts = 10 | |
364 |
|
374 | |||
365 |
|
375 | |||
366 | ; ############# |
|
376 | ; ############# | |
367 | ; CELERY CONFIG |
|
377 | ; CELERY CONFIG | |
368 | ; ############# |
|
378 | ; ############# | |
369 |
|
379 | |||
370 | ; manually run celery: /path/to/celery worker --task-events --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini |
|
380 | ; manually run celery: /path/to/celery worker --task-events --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini | |
371 |
|
381 | |||
372 | use_celery = false |
|
382 | use_celery = false | |
373 |
|
383 | |||
374 | ; path to store schedule database |
|
384 | ; path to store schedule database | |
375 | #celerybeat-schedule.path = |
|
385 | #celerybeat-schedule.path = | |
376 |
|
386 | |||
377 | ; connection url to the message broker (default redis) |
|
387 | ; connection url to the message broker (default redis) | |
378 | celery.broker_url = redis://redis:6379/8 |
|
388 | celery.broker_url = redis://redis:6379/8 | |
379 |
|
389 | |||
380 | ; results backend to get results for (default redis) |
|
390 | ; results backend to get results for (default redis) | |
381 | celery.result_backend = redis://redis:6379/8 |
|
391 | celery.result_backend = redis://redis:6379/8 | |
382 |
|
392 | |||
383 | ; rabbitmq example |
|
393 | ; rabbitmq example | |
384 | #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost |
|
394 | #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost | |
385 |
|
395 | |||
386 | ; maximum tasks to execute before worker restart |
|
396 | ; maximum tasks to execute before worker restart | |
387 | celery.max_tasks_per_child = 20 |
|
397 | celery.max_tasks_per_child = 20 | |
388 |
|
398 | |||
389 | ; tasks will never be sent to the queue, but executed locally instead. |
|
399 | ; tasks will never be sent to the queue, but executed locally instead. | |
390 | celery.task_always_eager = true |
|
400 | celery.task_always_eager = true | |
391 | celery.task_store_eager_result = true |
|
401 | celery.task_store_eager_result = true | |
392 |
|
402 | |||
393 | ; ############# |
|
403 | ; ############# | |
394 | ; DOGPILE CACHE |
|
404 | ; DOGPILE CACHE | |
395 | ; ############# |
|
405 | ; ############# | |
396 |
|
406 | |||
397 | ; Default cache dir for caches. Putting this into a ramdisk can boost performance. |
|
407 | ; Default cache dir for caches. Putting this into a ramdisk can boost performance. | |
398 | ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space |
|
408 | ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space | |
399 | cache_dir = %(here)s/rc-test-data |
|
409 | cache_dir = %(here)s/rc-test-data | |
400 |
|
410 | |||
401 | ; ********************************************* |
|
411 | ; ********************************************* | |
402 | ; `sql_cache_short` cache for heavy SQL queries |
|
412 | ; `sql_cache_short` cache for heavy SQL queries | |
403 | ; Only supported backend is `memory_lru` |
|
413 | ; Only supported backend is `memory_lru` | |
404 | ; ********************************************* |
|
414 | ; ********************************************* | |
405 | rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru |
|
415 | rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru | |
406 | rc_cache.sql_cache_short.expiration_time = 0 |
|
416 | rc_cache.sql_cache_short.expiration_time = 0 | |
407 |
|
417 | |||
408 |
|
418 | |||
409 | ; ***************************************************** |
|
419 | ; ***************************************************** | |
410 | ; `cache_repo_longterm` cache for repo object instances |
|
420 | ; `cache_repo_longterm` cache for repo object instances | |
411 | ; Only supported backend is `memory_lru` |
|
421 | ; Only supported backend is `memory_lru` | |
412 | ; ***************************************************** |
|
422 | ; ***************************************************** | |
413 | rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru |
|
423 | rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru | |
414 | ; by default we use 30 Days, cache is still invalidated on push |
|
424 | ; by default we use 30 Days, cache is still invalidated on push | |
415 | rc_cache.cache_repo_longterm.expiration_time = 2592000 |
|
425 | rc_cache.cache_repo_longterm.expiration_time = 2592000 | |
416 | ; max items in LRU cache, set to smaller number to save memory, and expire last used caches |
|
426 | ; max items in LRU cache, set to smaller number to save memory, and expire last used caches | |
417 | rc_cache.cache_repo_longterm.max_size = 10000 |
|
427 | rc_cache.cache_repo_longterm.max_size = 10000 | |
418 |
|
428 | |||
419 |
|
429 | |||
420 | ; ********************************************* |
|
430 | ; ********************************************* | |
421 | ; `cache_general` cache for general purpose use |
|
431 | ; `cache_general` cache for general purpose use | |
422 | ; for simplicity use rc.file_namespace backend, |
|
432 | ; for simplicity use rc.file_namespace backend, | |
423 | ; for performance and scale use rc.redis |
|
433 | ; for performance and scale use rc.redis | |
424 | ; ********************************************* |
|
434 | ; ********************************************* | |
425 | rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace |
|
435 | rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace | |
426 | rc_cache.cache_general.expiration_time = 43200 |
|
436 | rc_cache.cache_general.expiration_time = 43200 | |
427 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set |
|
437 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set | |
428 | rc_cache.cache_general.arguments.filename = %(here)s/rc-tests/cache-backend/cache_general_db |
|
438 | rc_cache.cache_general.arguments.filename = %(here)s/rc-tests/cache-backend/cache_general_db | |
429 |
|
439 | |||
430 | ; alternative `cache_general` redis backend with distributed lock |
|
440 | ; alternative `cache_general` redis backend with distributed lock | |
431 | #rc_cache.cache_general.backend = dogpile.cache.rc.redis |
|
441 | #rc_cache.cache_general.backend = dogpile.cache.rc.redis | |
432 | #rc_cache.cache_general.expiration_time = 300 |
|
442 | #rc_cache.cache_general.expiration_time = 300 | |
433 |
|
443 | |||
434 | ; redis_expiration_time needs to be greater then expiration_time |
|
444 | ; redis_expiration_time needs to be greater then expiration_time | |
435 | #rc_cache.cache_general.arguments.redis_expiration_time = 7200 |
|
445 | #rc_cache.cache_general.arguments.redis_expiration_time = 7200 | |
436 |
|
446 | |||
437 | #rc_cache.cache_general.arguments.host = localhost |
|
447 | #rc_cache.cache_general.arguments.host = localhost | |
438 | #rc_cache.cache_general.arguments.port = 6379 |
|
448 | #rc_cache.cache_general.arguments.port = 6379 | |
439 | #rc_cache.cache_general.arguments.db = 0 |
|
449 | #rc_cache.cache_general.arguments.db = 0 | |
440 | #rc_cache.cache_general.arguments.socket_timeout = 30 |
|
450 | #rc_cache.cache_general.arguments.socket_timeout = 30 | |
441 | ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends |
|
451 | ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends | |
442 | #rc_cache.cache_general.arguments.distributed_lock = true |
|
452 | #rc_cache.cache_general.arguments.distributed_lock = true | |
443 |
|
453 | |||
444 | ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen |
|
454 | ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen | |
445 | #rc_cache.cache_general.arguments.lock_auto_renewal = true |
|
455 | #rc_cache.cache_general.arguments.lock_auto_renewal = true | |
446 |
|
456 | |||
447 | ; ************************************************* |
|
457 | ; ************************************************* | |
448 | ; `cache_perms` cache for permission tree, auth TTL |
|
458 | ; `cache_perms` cache for permission tree, auth TTL | |
449 | ; for simplicity use rc.file_namespace backend, |
|
459 | ; for simplicity use rc.file_namespace backend, | |
450 | ; for performance and scale use rc.redis |
|
460 | ; for performance and scale use rc.redis | |
451 | ; ************************************************* |
|
461 | ; ************************************************* | |
452 | rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace |
|
462 | rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace | |
453 | rc_cache.cache_perms.expiration_time = 0 |
|
463 | rc_cache.cache_perms.expiration_time = 0 | |
454 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set |
|
464 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set | |
455 | rc_cache.cache_perms.arguments.filename = %(here)s/rc-tests/cache-backend/cache_perms_db |
|
465 | rc_cache.cache_perms.arguments.filename = %(here)s/rc-tests/cache-backend/cache_perms_db | |
456 |
|
466 | |||
457 | ; alternative `cache_perms` redis backend with distributed lock |
|
467 | ; alternative `cache_perms` redis backend with distributed lock | |
458 | #rc_cache.cache_perms.backend = dogpile.cache.rc.redis |
|
468 | #rc_cache.cache_perms.backend = dogpile.cache.rc.redis | |
459 | #rc_cache.cache_perms.expiration_time = 300 |
|
469 | #rc_cache.cache_perms.expiration_time = 300 | |
460 |
|
470 | |||
461 | ; redis_expiration_time needs to be greater then expiration_time |
|
471 | ; redis_expiration_time needs to be greater then expiration_time | |
462 | #rc_cache.cache_perms.arguments.redis_expiration_time = 7200 |
|
472 | #rc_cache.cache_perms.arguments.redis_expiration_time = 7200 | |
463 |
|
473 | |||
464 | #rc_cache.cache_perms.arguments.host = localhost |
|
474 | #rc_cache.cache_perms.arguments.host = localhost | |
465 | #rc_cache.cache_perms.arguments.port = 6379 |
|
475 | #rc_cache.cache_perms.arguments.port = 6379 | |
466 | #rc_cache.cache_perms.arguments.db = 0 |
|
476 | #rc_cache.cache_perms.arguments.db = 0 | |
467 | #rc_cache.cache_perms.arguments.socket_timeout = 30 |
|
477 | #rc_cache.cache_perms.arguments.socket_timeout = 30 | |
468 | ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends |
|
478 | ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends | |
469 | #rc_cache.cache_perms.arguments.distributed_lock = true |
|
479 | #rc_cache.cache_perms.arguments.distributed_lock = true | |
470 |
|
480 | |||
471 | ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen |
|
481 | ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen | |
472 | #rc_cache.cache_perms.arguments.lock_auto_renewal = true |
|
482 | #rc_cache.cache_perms.arguments.lock_auto_renewal = true | |
473 |
|
483 | |||
474 | ; *************************************************** |
|
484 | ; *************************************************** | |
475 | ; `cache_repo` cache for file tree, Readme, RSS FEEDS |
|
485 | ; `cache_repo` cache for file tree, Readme, RSS FEEDS | |
476 | ; for simplicity use rc.file_namespace backend, |
|
486 | ; for simplicity use rc.file_namespace backend, | |
477 | ; for performance and scale use rc.redis |
|
487 | ; for performance and scale use rc.redis | |
478 | ; *************************************************** |
|
488 | ; *************************************************** | |
479 | rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace |
|
489 | rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace | |
480 | rc_cache.cache_repo.expiration_time = 2592000 |
|
490 | rc_cache.cache_repo.expiration_time = 2592000 | |
481 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set |
|
491 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set | |
482 | rc_cache.cache_repo.arguments.filename = %(here)s/rc-tests/cache-backend/cache_repo_db |
|
492 | rc_cache.cache_repo.arguments.filename = %(here)s/rc-tests/cache-backend/cache_repo_db | |
483 |
|
493 | |||
484 | ; alternative `cache_repo` redis backend with distributed lock |
|
494 | ; alternative `cache_repo` redis backend with distributed lock | |
485 | #rc_cache.cache_repo.backend = dogpile.cache.rc.redis |
|
495 | #rc_cache.cache_repo.backend = dogpile.cache.rc.redis | |
486 | #rc_cache.cache_repo.expiration_time = 2592000 |
|
496 | #rc_cache.cache_repo.expiration_time = 2592000 | |
487 |
|
497 | |||
488 | ; redis_expiration_time needs to be greater then expiration_time |
|
498 | ; redis_expiration_time needs to be greater then expiration_time | |
489 | #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400 |
|
499 | #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400 | |
490 |
|
500 | |||
491 | #rc_cache.cache_repo.arguments.host = localhost |
|
501 | #rc_cache.cache_repo.arguments.host = localhost | |
492 | #rc_cache.cache_repo.arguments.port = 6379 |
|
502 | #rc_cache.cache_repo.arguments.port = 6379 | |
493 | #rc_cache.cache_repo.arguments.db = 1 |
|
503 | #rc_cache.cache_repo.arguments.db = 1 | |
494 | #rc_cache.cache_repo.arguments.socket_timeout = 30 |
|
504 | #rc_cache.cache_repo.arguments.socket_timeout = 30 | |
495 | ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends |
|
505 | ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends | |
496 | #rc_cache.cache_repo.arguments.distributed_lock = true |
|
506 | #rc_cache.cache_repo.arguments.distributed_lock = true | |
497 |
|
507 | |||
498 | ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen |
|
508 | ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen | |
499 | #rc_cache.cache_repo.arguments.lock_auto_renewal = true |
|
509 | #rc_cache.cache_repo.arguments.lock_auto_renewal = true | |
500 |
|
510 | |||
501 | ; ############## |
|
511 | ; ############## | |
502 | ; BEAKER SESSION |
|
512 | ; BEAKER SESSION | |
503 | ; ############## |
|
513 | ; ############## | |
504 |
|
514 | |||
505 | ; beaker.session.type is type of storage options for the logged users sessions. Current allowed |
|
515 | ; beaker.session.type is type of storage options for the logged users sessions. Current allowed | |
506 | ; types are file, ext:redis, ext:database, ext:memcached |
|
516 | ; types are file, ext:redis, ext:database, ext:memcached | |
507 | ; Fastest ones are ext:redis and ext:database, DO NOT use memory type for session |
|
517 | ; Fastest ones are ext:redis and ext:database, DO NOT use memory type for session | |
508 | beaker.session.type = file |
|
518 | beaker.session.type = file | |
509 | beaker.session.data_dir = %(here)s/rc-tests/data/sessions |
|
519 | beaker.session.data_dir = %(here)s/rc-tests/data/sessions | |
510 |
|
520 | |||
511 | ; Redis based sessions |
|
521 | ; Redis based sessions | |
512 | #beaker.session.type = ext:redis |
|
522 | #beaker.session.type = ext:redis | |
513 | #beaker.session.url = redis://redis:6379/2 |
|
523 | #beaker.session.url = redis://redis:6379/2 | |
514 |
|
524 | |||
515 | ; DB based session, fast, and allows easy management over logged in users |
|
525 | ; DB based session, fast, and allows easy management over logged in users | |
516 | #beaker.session.type = ext:database |
|
526 | #beaker.session.type = ext:database | |
517 | #beaker.session.table_name = db_session |
|
527 | #beaker.session.table_name = db_session | |
518 | #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode |
|
528 | #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode | |
519 | #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode |
|
529 | #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode | |
520 | #beaker.session.sa.pool_recycle = 3600 |
|
530 | #beaker.session.sa.pool_recycle = 3600 | |
521 | #beaker.session.sa.echo = false |
|
531 | #beaker.session.sa.echo = false | |
522 |
|
532 | |||
523 | beaker.session.key = rhodecode |
|
533 | beaker.session.key = rhodecode | |
524 | beaker.session.secret = test-rc-uytcxaz |
|
534 | beaker.session.secret = test-rc-uytcxaz | |
525 | beaker.session.lock_dir = %(here)s/rc-tests/data/sessions/lock |
|
535 | beaker.session.lock_dir = %(here)s/rc-tests/data/sessions/lock | |
526 |
|
536 | |||
527 | ; Secure encrypted cookie. Requires AES and AES python libraries |
|
537 | ; Secure encrypted cookie. Requires AES and AES python libraries | |
528 | ; you must disable beaker.session.secret to use this |
|
538 | ; you must disable beaker.session.secret to use this | |
529 | #beaker.session.encrypt_key = key_for_encryption |
|
539 | #beaker.session.encrypt_key = key_for_encryption | |
530 | #beaker.session.validate_key = validation_key |
|
540 | #beaker.session.validate_key = validation_key | |
531 |
|
541 | |||
532 | ; Sets session as invalid (also logging out user) if it haven not been |
|
542 | ; Sets session as invalid (also logging out user) if it haven not been | |
533 | ; accessed for given amount of time in seconds |
|
543 | ; accessed for given amount of time in seconds | |
534 | beaker.session.timeout = 2592000 |
|
544 | beaker.session.timeout = 2592000 | |
535 | beaker.session.httponly = true |
|
545 | beaker.session.httponly = true | |
536 |
|
546 | |||
537 | ; Path to use for the cookie. Set to prefix if you use prefix middleware |
|
547 | ; Path to use for the cookie. Set to prefix if you use prefix middleware | |
538 | #beaker.session.cookie_path = /custom_prefix |
|
548 | #beaker.session.cookie_path = /custom_prefix | |
539 |
|
549 | |||
540 | ; Set https secure cookie |
|
550 | ; Set https secure cookie | |
541 | beaker.session.secure = false |
|
551 | beaker.session.secure = false | |
542 |
|
552 | |||
543 | ; default cookie expiration time in seconds, set to `true` to set expire |
|
553 | ; default cookie expiration time in seconds, set to `true` to set expire | |
544 | ; at browser close |
|
554 | ; at browser close | |
545 | #beaker.session.cookie_expires = 3600 |
|
555 | #beaker.session.cookie_expires = 3600 | |
546 |
|
556 | |||
547 | ; ############################# |
|
557 | ; ############################# | |
548 | ; SEARCH INDEXING CONFIGURATION |
|
558 | ; SEARCH INDEXING CONFIGURATION | |
549 | ; ############################# |
|
559 | ; ############################# | |
550 |
|
560 | |||
551 | ; Full text search indexer is available in rhodecode-tools under |
|
561 | ; Full text search indexer is available in rhodecode-tools under | |
552 | ; `rhodecode-tools index` command |
|
562 | ; `rhodecode-tools index` command | |
553 |
|
563 | |||
554 | ; WHOOSH Backend, doesn't require additional services to run |
|
564 | ; WHOOSH Backend, doesn't require additional services to run | |
555 | ; it works good with few dozen repos |
|
565 | ; it works good with few dozen repos | |
556 | search.module = rhodecode.lib.index.whoosh |
|
566 | search.module = rhodecode.lib.index.whoosh | |
557 | search.location = %(here)s/rc-tests/data/index |
|
567 | search.location = %(here)s/rc-tests/data/index | |
558 |
|
568 | |||
559 | ; #################### |
|
569 | ; #################### | |
560 | ; CHANNELSTREAM CONFIG |
|
570 | ; CHANNELSTREAM CONFIG | |
561 | ; #################### |
|
571 | ; #################### | |
562 |
|
572 | |||
563 | ; channelstream enables persistent connections and live notification |
|
573 | ; channelstream enables persistent connections and live notification | |
564 | ; in the system. It's also used by the chat system |
|
574 | ; in the system. It's also used by the chat system | |
565 |
|
575 | |||
566 | channelstream.enabled = false |
|
576 | channelstream.enabled = false | |
567 |
|
577 | |||
568 | ; server address for channelstream server on the backend |
|
578 | ; server address for channelstream server on the backend | |
569 | channelstream.server = channelstream:9800 |
|
579 | channelstream.server = channelstream:9800 | |
570 |
|
580 | |||
571 | ; location of the channelstream server from outside world |
|
581 | ; location of the channelstream server from outside world | |
572 | ; use ws:// for http or wss:// for https. This address needs to be handled |
|
582 | ; use ws:// for http or wss:// for https. This address needs to be handled | |
573 | ; by external HTTP server such as Nginx or Apache |
|
583 | ; by external HTTP server such as Nginx or Apache | |
574 | ; see Nginx/Apache configuration examples in our docs |
|
584 | ; see Nginx/Apache configuration examples in our docs | |
575 | channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream |
|
585 | channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream | |
576 | channelstream.secret = ENV_GENERATED |
|
586 | channelstream.secret = ENV_GENERATED | |
577 | channelstream.history.location = %(here)s/rc-tests/channelstream_history |
|
587 | channelstream.history.location = %(here)s/rc-tests/channelstream_history | |
578 |
|
588 | |||
579 | ; Internal application path that Javascript uses to connect into. |
|
589 | ; Internal application path that Javascript uses to connect into. | |
580 | ; If you use proxy-prefix the prefix should be added before /_channelstream |
|
590 | ; If you use proxy-prefix the prefix should be added before /_channelstream | |
581 | channelstream.proxy_path = /_channelstream |
|
591 | channelstream.proxy_path = /_channelstream | |
582 |
|
592 | |||
583 |
|
593 | |||
584 | ; ############################## |
|
594 | ; ############################## | |
585 | ; MAIN RHODECODE DATABASE CONFIG |
|
595 | ; MAIN RHODECODE DATABASE CONFIG | |
586 | ; ############################## |
|
596 | ; ############################## | |
587 |
|
597 | |||
588 | #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30 |
|
598 | #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30 | |
589 | #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode |
|
599 | #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode | |
590 | #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8 |
|
600 | #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8 | |
591 | ; pymysql is an alternative driver for MySQL, use in case of problems with default one |
|
601 | ; pymysql is an alternative driver for MySQL, use in case of problems with default one | |
592 | #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode |
|
602 | #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode | |
593 |
|
603 | |||
594 | sqlalchemy.db1.url = sqlite:///%(here)s/rc-tests/rhodecode_test.db?timeout=30 |
|
604 | sqlalchemy.db1.url = sqlite:///%(here)s/rc-tests/rhodecode_test.db?timeout=30 | |
595 |
|
605 | |||
596 | ; see sqlalchemy docs for other advanced settings |
|
606 | ; see sqlalchemy docs for other advanced settings | |
597 | ; print the sql statements to output |
|
607 | ; print the sql statements to output | |
598 | sqlalchemy.db1.echo = false |
|
608 | sqlalchemy.db1.echo = false | |
599 |
|
609 | |||
600 | ; recycle the connections after this amount of seconds |
|
610 | ; recycle the connections after this amount of seconds | |
601 | sqlalchemy.db1.pool_recycle = 3600 |
|
611 | sqlalchemy.db1.pool_recycle = 3600 | |
602 |
|
612 | |||
603 | ; the number of connections to keep open inside the connection pool. |
|
613 | ; the number of connections to keep open inside the connection pool. | |
604 | ; 0 indicates no limit |
|
614 | ; 0 indicates no limit | |
605 | ; the general calculus with gevent is: |
|
615 | ; the general calculus with gevent is: | |
606 | ; if your system allows 500 concurrent greenlets (max_connections) that all do database access, |
|
616 | ; if your system allows 500 concurrent greenlets (max_connections) that all do database access, | |
607 | ; then increase pool size + max overflow so that they add up to 500. |
|
617 | ; then increase pool size + max overflow so that they add up to 500. | |
608 | #sqlalchemy.db1.pool_size = 5 |
|
618 | #sqlalchemy.db1.pool_size = 5 | |
609 |
|
619 | |||
610 | ; The number of connections to allow in connection pool "overflow", that is |
|
620 | ; The number of connections to allow in connection pool "overflow", that is | |
611 | ; connections that can be opened above and beyond the pool_size setting, |
|
621 | ; connections that can be opened above and beyond the pool_size setting, | |
612 | ; which defaults to five. |
|
622 | ; which defaults to five. | |
613 | #sqlalchemy.db1.max_overflow = 10 |
|
623 | #sqlalchemy.db1.max_overflow = 10 | |
614 |
|
624 | |||
615 | ; Connection check ping, used to detect broken database connections |
|
625 | ; Connection check ping, used to detect broken database connections | |
616 | ; could be enabled to better handle cases if MySQL has gone away errors |
|
626 | ; could be enabled to better handle cases if MySQL has gone away errors | |
617 | #sqlalchemy.db1.ping_connection = true |
|
627 | #sqlalchemy.db1.ping_connection = true | |
618 |
|
628 | |||
619 | ; ########## |
|
629 | ; ########## | |
620 | ; VCS CONFIG |
|
630 | ; VCS CONFIG | |
621 | ; ########## |
|
631 | ; ########## | |
622 | vcs.server.enable = true |
|
632 | vcs.server.enable = true | |
623 | vcs.server = vcsserver:10010 |
|
633 | vcs.server = vcsserver:10010 | |
624 |
|
634 | |||
625 | ; Web server connectivity protocol, responsible for web based VCS operations |
|
635 | ; Web server connectivity protocol, responsible for web based VCS operations | |
626 | ; Available protocols are: |
|
636 | ; Available protocols are: | |
627 | ; `http` - use http-rpc backend (default) |
|
637 | ; `http` - use http-rpc backend (default) | |
628 | vcs.server.protocol = http |
|
638 | vcs.server.protocol = http | |
629 |
|
639 | |||
630 | ; Push/Pull operations protocol, available options are: |
|
640 | ; Push/Pull operations protocol, available options are: | |
631 | ; `http` - use http-rpc backend (default) |
|
641 | ; `http` - use http-rpc backend (default) | |
632 | vcs.scm_app_implementation = http |
|
642 | vcs.scm_app_implementation = http | |
633 |
|
643 | |||
634 | ; Push/Pull operations hooks protocol, available options are: |
|
644 | ; Push/Pull operations hooks protocol, available options are: | |
635 | ; `http` - use http-rpc backend (default) |
|
645 | ; `http` - use http-rpc backend (default) | |
636 | ; `celery` - use celery based hooks |
|
646 | ; `celery` - use celery based hooks | |
637 | #DEPRECATED:vcs.hooks.protocol = http |
|
647 | #DEPRECATED:vcs.hooks.protocol = http | |
638 | vcs.hooks.protocol = http |
|
648 | vcs.hooks.protocol = http | |
639 |
|
649 | |||
640 | ; Host on which this instance is listening for hooks. vcsserver will call this host to pull/push hooks so it should be |
|
650 | ; Host on which this instance is listening for hooks. vcsserver will call this host to pull/push hooks so it should be | |
641 | ; accessible via network. |
|
651 | ; accessible via network. | |
642 | ; Use vcs.hooks.host = "*" to bind to current hostname (for Docker) |
|
652 | ; Use vcs.hooks.host = "*" to bind to current hostname (for Docker) | |
643 | vcs.hooks.host = * |
|
653 | vcs.hooks.host = * | |
644 |
|
654 | |||
645 | ; Start VCSServer with this instance as a subprocess, useful for development |
|
655 | ; Start VCSServer with this instance as a subprocess, useful for development | |
646 | vcs.start_server = false |
|
656 | vcs.start_server = false | |
647 |
|
657 | |||
648 | ; List of enabled VCS backends, available options are: |
|
658 | ; List of enabled VCS backends, available options are: | |
649 | ; `hg` - mercurial |
|
659 | ; `hg` - mercurial | |
650 | ; `git` - git |
|
660 | ; `git` - git | |
651 | ; `svn` - subversion |
|
661 | ; `svn` - subversion | |
652 | vcs.backends = hg, git, svn |
|
662 | vcs.backends = hg, git, svn | |
653 |
|
663 | |||
654 | ; Wait this number of seconds before killing connection to the vcsserver |
|
664 | ; Wait this number of seconds before killing connection to the vcsserver | |
655 | vcs.connection_timeout = 3600 |
|
665 | vcs.connection_timeout = 3600 | |
656 |
|
666 | |||
657 | ; Cache flag to cache vcsserver remote calls locally |
|
667 | ; Cache flag to cache vcsserver remote calls locally | |
658 | ; It uses cache_region `cache_repo` |
|
668 | ; It uses cache_region `cache_repo` | |
659 | vcs.methods.cache = false |
|
669 | vcs.methods.cache = false | |
660 |
|
670 | |||
|
671 | ; Filesystem location where Git lfs objects should be stored | |||
|
672 | vcs.git.lfs.storage_location = /var/opt/rhodecode_repo_store/.cache/git_lfs_store | |||
|
673 | ||||
|
674 | ; Filesystem location where Mercurial largefile objects should be stored | |||
|
675 | vcs.hg.largefiles.storage_location = /var/opt/rhodecode_repo_store/.cache/hg_largefiles_store | |||
|
676 | ||||
661 | ; #################################################### |
|
677 | ; #################################################### | |
662 | ; Subversion proxy support (mod_dav_svn) |
|
678 | ; Subversion proxy support (mod_dav_svn) | |
663 | ; Maps RhodeCode repo groups into SVN paths for Apache |
|
679 | ; Maps RhodeCode repo groups into SVN paths for Apache | |
664 | ; #################################################### |
|
680 | ; #################################################### | |
665 |
|
681 | |||
666 | ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out. |
|
682 | ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out. | |
667 | ; Set a numeric version for your current SVN e.g 1.8, or 1.12 |
|
683 | ; Set a numeric version for your current SVN e.g 1.8, or 1.12 | |
668 | ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible |
|
684 | ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible | |
669 | #vcs.svn.compatible_version = 1.8 |
|
685 | #vcs.svn.compatible_version = 1.8 | |
670 |
|
686 | |||
671 | ; Redis connection settings for svn integrations logic |
|
687 | ; Redis connection settings for svn integrations logic | |
672 | ; This connection string needs to be the same on ce and vcsserver |
|
688 | ; This connection string needs to be the same on ce and vcsserver | |
673 | vcs.svn.redis_conn = redis://redis:6379/0 |
|
689 | vcs.svn.redis_conn = redis://redis:6379/0 | |
674 |
|
690 | |||
675 | ; Enable SVN proxy of requests over HTTP |
|
691 | ; Enable SVN proxy of requests over HTTP | |
676 | vcs.svn.proxy.enabled = true |
|
692 | vcs.svn.proxy.enabled = true | |
677 |
|
693 | |||
678 | ; host to connect to running SVN subsystem |
|
694 | ; host to connect to running SVN subsystem | |
679 | vcs.svn.proxy.host = http://svn:8090 |
|
695 | vcs.svn.proxy.host = http://svn:8090 | |
680 |
|
696 | |||
681 | ; Enable or disable the config file generation. |
|
697 | ; Enable or disable the config file generation. | |
682 | svn.proxy.generate_config = false |
|
698 | svn.proxy.generate_config = false | |
683 |
|
699 | |||
684 | ; Generate config file with `SVNListParentPath` set to `On`. |
|
700 | ; Generate config file with `SVNListParentPath` set to `On`. | |
685 | svn.proxy.list_parent_path = true |
|
701 | svn.proxy.list_parent_path = true | |
686 |
|
702 | |||
687 | ; Set location and file name of generated config file. |
|
703 | ; Set location and file name of generated config file. | |
688 | svn.proxy.config_file_path = %(here)s/rc-tests/mod_dav_svn.conf |
|
704 | svn.proxy.config_file_path = %(here)s/rc-tests/mod_dav_svn.conf | |
689 |
|
705 | |||
690 | ; alternative mod_dav config template. This needs to be a valid mako template |
|
706 | ; alternative mod_dav config template. This needs to be a valid mako template | |
691 | ; Example template can be found in the source code: |
|
707 | ; Example template can be found in the source code: | |
692 | ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako |
|
708 | ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako | |
693 | #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako |
|
709 | #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako | |
694 |
|
710 | |||
695 | ; Used as a prefix to the `Location` block in the generated config file. |
|
711 | ; Used as a prefix to the `Location` block in the generated config file. | |
696 | ; In most cases it should be set to `/`. |
|
712 | ; In most cases it should be set to `/`. | |
697 | svn.proxy.location_root = / |
|
713 | svn.proxy.location_root = / | |
698 |
|
714 | |||
699 | ; Command to reload the mod dav svn configuration on change. |
|
715 | ; Command to reload the mod dav svn configuration on change. | |
700 | ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh |
|
716 | ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh | |
701 | ; Make sure user who runs RhodeCode process is allowed to reload Apache |
|
717 | ; Make sure user who runs RhodeCode process is allowed to reload Apache | |
702 | #svn.proxy.reload_cmd = /etc/init.d/apache2 reload |
|
718 | #svn.proxy.reload_cmd = /etc/init.d/apache2 reload | |
703 |
|
719 | |||
704 | ; If the timeout expires before the reload command finishes, the command will |
|
720 | ; If the timeout expires before the reload command finishes, the command will | |
705 | ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds. |
|
721 | ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds. | |
706 | #svn.proxy.reload_timeout = 10 |
|
722 | #svn.proxy.reload_timeout = 10 | |
707 |
|
723 | |||
708 | ; #################### |
|
724 | ; #################### | |
709 | ; SSH Support Settings |
|
725 | ; SSH Support Settings | |
710 | ; #################### |
|
726 | ; #################### | |
711 |
|
727 | |||
712 | ; Defines if a custom authorized_keys file should be created and written on |
|
728 | ; Defines if a custom authorized_keys file should be created and written on | |
713 | ; any change user ssh keys. Setting this to false also disables possibility |
|
729 | ; any change user ssh keys. Setting this to false also disables possibility | |
714 | ; of adding SSH keys by users from web interface. Super admins can still |
|
730 | ; of adding SSH keys by users from web interface. Super admins can still | |
715 | ; manage SSH Keys. |
|
731 | ; manage SSH Keys. | |
716 | ssh.generate_authorized_keyfile = true |
|
732 | ssh.generate_authorized_keyfile = true | |
717 |
|
733 | |||
718 | ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding` |
|
734 | ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding` | |
719 | # ssh.authorized_keys_ssh_opts = |
|
735 | # ssh.authorized_keys_ssh_opts = | |
720 |
|
736 | |||
721 | ; Path to the authorized_keys file where the generate entries are placed. |
|
737 | ; Path to the authorized_keys file where the generate entries are placed. | |
722 | ; It is possible to have multiple key files specified in `sshd_config` e.g. |
|
738 | ; It is possible to have multiple key files specified in `sshd_config` e.g. | |
723 | ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode |
|
739 | ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode | |
724 | ssh.authorized_keys_file_path = %(here)s/rc-tests/authorized_keys_rhodecode |
|
740 | ssh.authorized_keys_file_path = %(here)s/rc-tests/authorized_keys_rhodecode | |
725 |
|
741 | |||
726 | ; Command to execute the SSH wrapper. The binary is available in the |
|
742 | ; Command to execute the SSH wrapper. The binary is available in the | |
727 | ; RhodeCode installation directory. |
|
743 | ; RhodeCode installation directory. | |
728 | ; legacy: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper |
|
744 | ; legacy: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper | |
729 | ; new rewrite: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2 |
|
745 | ; new rewrite: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2 | |
730 | #DEPRECATED: ssh.wrapper_cmd = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper |
|
746 | #DEPRECATED: ssh.wrapper_cmd = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper | |
731 | ssh.wrapper_cmd.v2 = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2 |
|
747 | ssh.wrapper_cmd.v2 = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2 | |
732 |
|
748 | |||
733 | ; Allow shell when executing the ssh-wrapper command |
|
749 | ; Allow shell when executing the ssh-wrapper command | |
734 | ssh.wrapper_cmd_allow_shell = false |
|
750 | ssh.wrapper_cmd_allow_shell = false | |
735 |
|
751 | |||
736 | ; Enables logging, and detailed output send back to the client during SSH |
|
752 | ; Enables logging, and detailed output send back to the client during SSH | |
737 | ; operations. Useful for debugging, shouldn't be used in production. |
|
753 | ; operations. Useful for debugging, shouldn't be used in production. | |
738 | ssh.enable_debug_logging = true |
|
754 | ssh.enable_debug_logging = true | |
739 |
|
755 | |||
740 | ; Paths to binary executable, by default they are the names, but we can |
|
756 | ; Paths to binary executable, by default they are the names, but we can | |
741 | ; override them if we want to use a custom one |
|
757 | ; override them if we want to use a custom one | |
742 | ssh.executable.hg = /usr/local/bin/rhodecode_bin/vcs_bin/hg |
|
758 | ssh.executable.hg = /usr/local/bin/rhodecode_bin/vcs_bin/hg | |
743 | ssh.executable.git = /usr/local/bin/rhodecode_bin/vcs_bin/git |
|
759 | ssh.executable.git = /usr/local/bin/rhodecode_bin/vcs_bin/git | |
744 | ssh.executable.svn = /usr/local/bin/rhodecode_bin/vcs_bin/svnserve |
|
760 | ssh.executable.svn = /usr/local/bin/rhodecode_bin/vcs_bin/svnserve | |
745 |
|
761 | |||
746 | ; Enables SSH key generator web interface. Disabling this still allows users |
|
762 | ; Enables SSH key generator web interface. Disabling this still allows users | |
747 | ; to add their own keys. |
|
763 | ; to add their own keys. | |
748 | ssh.enable_ui_key_generator = true |
|
764 | ssh.enable_ui_key_generator = true | |
749 |
|
765 | |||
750 | ; Statsd client config, this is used to send metrics to statsd |
|
766 | ; Statsd client config, this is used to send metrics to statsd | |
751 | ; We recommend setting statsd_exported and scrape them using Prometheus |
|
767 | ; We recommend setting statsd_exported and scrape them using Prometheus | |
752 | #statsd.enabled = false |
|
768 | #statsd.enabled = false | |
753 | #statsd.statsd_host = 0.0.0.0 |
|
769 | #statsd.statsd_host = 0.0.0.0 | |
754 | #statsd.statsd_port = 8125 |
|
770 | #statsd.statsd_port = 8125 | |
755 | #statsd.statsd_prefix = |
|
771 | #statsd.statsd_prefix = | |
756 | #statsd.statsd_ipv6 = false |
|
772 | #statsd.statsd_ipv6 = false | |
757 |
|
773 | |||
758 | ; configure logging automatically at server startup set to false |
|
774 | ; configure logging automatically at server startup set to false | |
759 | ; to use the below custom logging config. |
|
775 | ; to use the below custom logging config. | |
760 | ; RC_LOGGING_FORMATTER |
|
776 | ; RC_LOGGING_FORMATTER | |
761 | ; RC_LOGGING_LEVEL |
|
777 | ; RC_LOGGING_LEVEL | |
762 | ; env variables can control the settings for logging in case of autoconfigure |
|
778 | ; env variables can control the settings for logging in case of autoconfigure | |
763 |
|
779 | |||
764 | logging.autoconfigure = false |
|
780 | logging.autoconfigure = false | |
765 |
|
781 | |||
766 | ; specify your own custom logging config file to configure logging |
|
782 | ; specify your own custom logging config file to configure logging | |
767 | #logging.logging_conf_file = /path/to/custom_logging.ini |
|
783 | #logging.logging_conf_file = /path/to/custom_logging.ini | |
768 |
|
784 | |||
769 | ; Dummy marker to add new entries after. |
|
785 | ; Dummy marker to add new entries after. | |
770 | ; Add any custom entries below. Please don't remove this marker. |
|
786 | ; Add any custom entries below. Please don't remove this marker. | |
771 | custom.conf = 1 |
|
787 | custom.conf = 1 | |
772 |
|
788 | |||
773 |
|
789 | |||
774 | ; ##################### |
|
790 | ; ##################### | |
775 | ; LOGGING CONFIGURATION |
|
791 | ; LOGGING CONFIGURATION | |
776 | ; ##################### |
|
792 | ; ##################### | |
777 |
|
793 | |||
778 | [loggers] |
|
794 | [loggers] | |
779 | keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper, dogpile |
|
795 | keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper, dogpile | |
780 |
|
796 | |||
781 | [handlers] |
|
797 | [handlers] | |
782 | keys = console, console_sql |
|
798 | keys = console, console_sql | |
783 |
|
799 | |||
784 | [formatters] |
|
800 | [formatters] | |
785 | keys = generic, json, color_formatter, color_formatter_sql |
|
801 | keys = generic, json, color_formatter, color_formatter_sql | |
786 |
|
802 | |||
787 | ; ####### |
|
803 | ; ####### | |
788 | ; LOGGERS |
|
804 | ; LOGGERS | |
789 | ; ####### |
|
805 | ; ####### | |
790 | [logger_root] |
|
806 | [logger_root] | |
791 | level = NOTSET |
|
807 | level = NOTSET | |
792 | handlers = console |
|
808 | handlers = console | |
793 |
|
809 | |||
794 | [logger_routes] |
|
810 | [logger_routes] | |
795 | level = DEBUG |
|
811 | level = DEBUG | |
796 | handlers = |
|
812 | handlers = | |
797 | qualname = routes.middleware |
|
813 | qualname = routes.middleware | |
798 | ## "level = DEBUG" logs the route matched and routing variables. |
|
814 | ## "level = DEBUG" logs the route matched and routing variables. | |
799 | propagate = 1 |
|
815 | propagate = 1 | |
800 |
|
816 | |||
801 | [logger_sqlalchemy] |
|
817 | [logger_sqlalchemy] | |
802 | level = INFO |
|
818 | level = INFO | |
803 | handlers = console_sql |
|
819 | handlers = console_sql | |
804 | qualname = sqlalchemy.engine |
|
820 | qualname = sqlalchemy.engine | |
805 | propagate = 0 |
|
821 | propagate = 0 | |
806 |
|
822 | |||
807 | [logger_beaker] |
|
823 | [logger_beaker] | |
808 | level = DEBUG |
|
824 | level = DEBUG | |
809 | handlers = |
|
825 | handlers = | |
810 | qualname = beaker.container |
|
826 | qualname = beaker.container | |
811 | propagate = 1 |
|
827 | propagate = 1 | |
812 |
|
828 | |||
813 | [logger_dogpile] |
|
829 | [logger_dogpile] | |
814 | level = INFO |
|
830 | level = INFO | |
815 | handlers = console |
|
831 | handlers = console | |
816 | qualname = dogpile |
|
832 | qualname = dogpile | |
817 | propagate = 1 |
|
833 | propagate = 1 | |
818 |
|
834 | |||
819 | [logger_rhodecode] |
|
835 | [logger_rhodecode] | |
820 | level = DEBUG |
|
836 | level = DEBUG | |
821 | handlers = |
|
837 | handlers = | |
822 | qualname = rhodecode |
|
838 | qualname = rhodecode | |
823 | propagate = 1 |
|
839 | propagate = 1 | |
824 |
|
840 | |||
825 | [logger_ssh_wrapper] |
|
841 | [logger_ssh_wrapper] | |
826 | level = DEBUG |
|
842 | level = DEBUG | |
827 | handlers = |
|
843 | handlers = | |
828 | qualname = ssh_wrapper |
|
844 | qualname = ssh_wrapper | |
829 | propagate = 1 |
|
845 | propagate = 1 | |
830 |
|
846 | |||
831 | [logger_celery] |
|
847 | [logger_celery] | |
832 | level = DEBUG |
|
848 | level = DEBUG | |
833 | handlers = |
|
849 | handlers = | |
834 | qualname = celery |
|
850 | qualname = celery | |
835 |
|
851 | |||
836 |
|
852 | |||
837 | ; ######## |
|
853 | ; ######## | |
838 | ; HANDLERS |
|
854 | ; HANDLERS | |
839 | ; ######## |
|
855 | ; ######## | |
840 |
|
856 | |||
841 | [handler_console] |
|
857 | [handler_console] | |
842 | class = StreamHandler |
|
858 | class = StreamHandler | |
843 | args = (sys.stderr, ) |
|
859 | args = (sys.stderr, ) | |
844 | level = DEBUG |
|
860 | level = DEBUG | |
845 | ; To enable JSON formatted logs replace 'generic/color_formatter' with 'json' |
|
861 | ; To enable JSON formatted logs replace 'generic/color_formatter' with 'json' | |
846 | ; This allows sending properly formatted logs to grafana loki or elasticsearch |
|
862 | ; This allows sending properly formatted logs to grafana loki or elasticsearch | |
847 | formatter = generic |
|
863 | formatter = generic | |
848 |
|
864 | |||
849 | [handler_console_sql] |
|
865 | [handler_console_sql] | |
850 | ; "level = DEBUG" logs SQL queries and results. |
|
866 | ; "level = DEBUG" logs SQL queries and results. | |
851 | ; "level = INFO" logs SQL queries. |
|
867 | ; "level = INFO" logs SQL queries. | |
852 | ; "level = WARN" logs neither. (Recommended for production systems.) |
|
868 | ; "level = WARN" logs neither. (Recommended for production systems.) | |
853 | class = StreamHandler |
|
869 | class = StreamHandler | |
854 | args = (sys.stderr, ) |
|
870 | args = (sys.stderr, ) | |
855 | level = WARN |
|
871 | level = WARN | |
856 | ; To enable JSON formatted logs replace 'generic/color_formatter_sql' with 'json' |
|
872 | ; To enable JSON formatted logs replace 'generic/color_formatter_sql' with 'json' | |
857 | ; This allows sending properly formatted logs to grafana loki or elasticsearch |
|
873 | ; This allows sending properly formatted logs to grafana loki or elasticsearch | |
858 | formatter = generic |
|
874 | formatter = generic | |
859 |
|
875 | |||
860 | ; ########## |
|
876 | ; ########## | |
861 | ; FORMATTERS |
|
877 | ; FORMATTERS | |
862 | ; ########## |
|
878 | ; ########## | |
863 |
|
879 | |||
864 | [formatter_generic] |
|
880 | [formatter_generic] | |
865 | class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter |
|
881 | class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter | |
866 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s |
|
882 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s | |
867 | datefmt = %Y-%m-%d %H:%M:%S |
|
883 | datefmt = %Y-%m-%d %H:%M:%S | |
868 |
|
884 | |||
869 | [formatter_color_formatter] |
|
885 | [formatter_color_formatter] | |
870 | class = rhodecode.lib.logging_formatter.ColorFormatter |
|
886 | class = rhodecode.lib.logging_formatter.ColorFormatter | |
871 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s |
|
887 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s | |
872 | datefmt = %Y-%m-%d %H:%M:%S |
|
888 | datefmt = %Y-%m-%d %H:%M:%S | |
873 |
|
889 | |||
874 | [formatter_color_formatter_sql] |
|
890 | [formatter_color_formatter_sql] | |
875 | class = rhodecode.lib.logging_formatter.ColorFormatterSql |
|
891 | class = rhodecode.lib.logging_formatter.ColorFormatterSql | |
876 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s |
|
892 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s | |
877 | datefmt = %Y-%m-%d %H:%M:%S |
|
893 | datefmt = %Y-%m-%d %H:%M:%S | |
878 |
|
894 | |||
879 | [formatter_json] |
|
895 | [formatter_json] | |
880 | format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s |
|
896 | format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s | |
881 | class = rhodecode.lib._vendor.jsonlogger.JsonFormatter |
|
897 | class = rhodecode.lib._vendor.jsonlogger.JsonFormatter |
General Comments 0
You need to be logged in to leave comments.
Login now