Show More
@@ -1,622 +1,621 b'' | |||||
1 | ################################################################################ |
|
1 | ################################################################################ | |
2 | ################################################################################ |
|
2 | ################################################################################ | |
3 | # Kallithea - Development config: # |
|
3 | # Kallithea - Development config: # | |
4 | # listening on *:5000 # |
|
4 | # listening on *:5000 # | |
5 | # sqlite and kallithea.db # |
|
5 | # sqlite and kallithea.db # | |
6 | # initial_repo_scan = true # |
|
6 | # initial_repo_scan = true # | |
7 | # debug = true # |
|
7 | # debug = true # | |
8 | # verbose and colorful logging # |
|
8 | # verbose and colorful logging # | |
9 | # # |
|
9 | # # | |
10 | # The %(here)s variable will be replaced with the parent directory of this file# |
|
10 | # The %(here)s variable will be replaced with the parent directory of this file# | |
11 | ################################################################################ |
|
11 | ################################################################################ | |
12 | ################################################################################ |
|
12 | ################################################################################ | |
13 |
|
13 | |||
14 | [DEFAULT] |
|
14 | [DEFAULT] | |
15 |
|
15 | |||
16 | ################################################################################ |
|
16 | ################################################################################ | |
17 | ## Email settings ## |
|
17 | ## Email settings ## | |
18 | ## ## |
|
18 | ## ## | |
19 | ## Refer to the documentation ("Email settings") for more details. ## |
|
19 | ## Refer to the documentation ("Email settings") for more details. ## | |
20 | ## ## |
|
20 | ## ## | |
21 | ## It is recommended to use a valid sender address that passes access ## |
|
21 | ## It is recommended to use a valid sender address that passes access ## | |
22 | ## validation and spam filtering in mail servers. ## |
|
22 | ## validation and spam filtering in mail servers. ## | |
23 | ################################################################################ |
|
23 | ################################################################################ | |
24 |
|
24 | |||
25 | ## 'From' header for application emails. You can optionally add a name. |
|
25 | ## 'From' header for application emails. You can optionally add a name. | |
26 | ## Default: |
|
26 | ## Default: | |
27 | #app_email_from = Kallithea |
|
27 | #app_email_from = Kallithea | |
28 | ## Examples: |
|
28 | ## Examples: | |
29 | #app_email_from = Kallithea <kallithea-noreply@example.com> |
|
29 | #app_email_from = Kallithea <kallithea-noreply@example.com> | |
30 | #app_email_from = kallithea-noreply@example.com |
|
30 | #app_email_from = kallithea-noreply@example.com | |
31 |
|
31 | |||
32 | ## Subject prefix for application emails. |
|
32 | ## Subject prefix for application emails. | |
33 | ## A space between this prefix and the real subject is automatically added. |
|
33 | ## A space between this prefix and the real subject is automatically added. | |
34 | ## Default: |
|
34 | ## Default: | |
35 | #email_prefix = |
|
35 | #email_prefix = | |
36 | ## Example: |
|
36 | ## Example: | |
37 | #email_prefix = [Kallithea] |
|
37 | #email_prefix = [Kallithea] | |
38 |
|
38 | |||
39 | ## Recipients for error emails and fallback recipients of application mails. |
|
39 | ## Recipients for error emails and fallback recipients of application mails. | |
40 | ## Multiple addresses can be specified, space-separated. |
|
40 | ## Multiple addresses can be specified, space-separated. | |
41 | ## Only addresses are allowed, do not add any name part. |
|
41 | ## Only addresses are allowed, do not add any name part. | |
42 | ## Default: |
|
42 | ## Default: | |
43 | #email_to = |
|
43 | #email_to = | |
44 | ## Examples: |
|
44 | ## Examples: | |
45 | #email_to = admin@example.com |
|
45 | #email_to = admin@example.com | |
46 | #email_to = admin@example.com another_admin@example.com |
|
46 | #email_to = admin@example.com another_admin@example.com | |
47 | email_to = |
|
47 | email_to = | |
48 |
|
48 | |||
49 | ## 'From' header for error emails. You can optionally add a name. |
|
49 | ## 'From' header for error emails. You can optionally add a name. | |
50 | ## Default: (none) |
|
50 | ## Default: (none) | |
51 | ## Examples: |
|
51 | ## Examples: | |
52 | #error_email_from = Kallithea Errors <kallithea-noreply@example.com> |
|
52 | #error_email_from = Kallithea Errors <kallithea-noreply@example.com> | |
53 | #error_email_from = kallithea_errors@example.com |
|
53 | #error_email_from = kallithea_errors@example.com | |
54 | error_email_from = |
|
54 | error_email_from = | |
55 |
|
55 | |||
56 | ## SMTP server settings |
|
56 | ## SMTP server settings | |
57 | ## If specifying credentials, make sure to use secure connections. |
|
57 | ## If specifying credentials, make sure to use secure connections. | |
58 | ## Default: Send unencrypted unauthenticated mails to the specified smtp_server. |
|
58 | ## Default: Send unencrypted unauthenticated mails to the specified smtp_server. | |
59 | ## For "SSL", use smtp_use_ssl = true and smtp_port = 465. |
|
59 | ## For "SSL", use smtp_use_ssl = true and smtp_port = 465. | |
60 | ## For "STARTTLS", use smtp_use_tls = true and smtp_port = 587. |
|
60 | ## For "STARTTLS", use smtp_use_tls = true and smtp_port = 587. | |
61 | smtp_server = |
|
61 | smtp_server = | |
62 | #smtp_username = |
|
62 | #smtp_username = | |
63 | #smtp_password = |
|
63 | #smtp_password = | |
64 | smtp_port = |
|
64 | smtp_port = | |
65 | #smtp_use_ssl = false |
|
65 | #smtp_use_ssl = false | |
66 | #smtp_use_tls = false |
|
66 | #smtp_use_tls = false | |
67 |
|
67 | |||
68 | [server:main] |
|
68 | [server:main] | |
69 | ## Gearbox default web server ## |
|
69 | ## Gearbox default web server ## | |
70 | #use = egg:gearbox#wsgiref |
|
70 | #use = egg:gearbox#wsgiref | |
71 | ## nr of worker threads to spawn |
|
71 | ## nr of worker threads to spawn | |
72 | #threadpool_workers = 1 |
|
72 | #threadpool_workers = 1 | |
73 | ## max request before thread respawn |
|
73 | ## max request before thread respawn | |
74 | #threadpool_max_requests = 100 |
|
74 | #threadpool_max_requests = 100 | |
75 | ## option to use threads of process |
|
75 | ## option to use threads of process | |
76 | #use_threadpool = true |
|
76 | #use_threadpool = true | |
77 |
|
77 | |||
78 | ## Gearbox gevent web server ## |
|
78 | ## Gearbox gevent web server ## | |
79 | #use = egg:gearbox#gevent |
|
79 | #use = egg:gearbox#gevent | |
80 |
|
80 | |||
81 | ## WAITRESS ## |
|
81 | ## WAITRESS ## | |
82 | use = egg:waitress#main |
|
82 | use = egg:waitress#main | |
83 | ## number of worker threads |
|
83 | ## number of worker threads | |
84 | threads = 1 |
|
84 | threads = 1 | |
85 | ## MAX BODY SIZE 100GB |
|
85 | ## MAX BODY SIZE 100GB | |
86 | max_request_body_size = 107374182400 |
|
86 | max_request_body_size = 107374182400 | |
87 | ## use poll instead of select, fixes fd limits, may not work on old |
|
87 | ## use poll instead of select, fixes fd limits, may not work on old | |
88 | ## windows systems. |
|
88 | ## windows systems. | |
89 | #asyncore_use_poll = True |
|
89 | #asyncore_use_poll = True | |
90 |
|
90 | |||
91 | ## GUNICORN ## |
|
91 | ## GUNICORN ## | |
92 | #use = egg:gunicorn#main |
|
92 | #use = egg:gunicorn#main | |
93 | ## number of process workers. You must set `instance_id = *` when this option |
|
93 | ## number of process workers. You must set `instance_id = *` when this option | |
94 | ## is set to more than one worker |
|
94 | ## is set to more than one worker | |
95 | #workers = 1 |
|
95 | #workers = 1 | |
96 | ## process name |
|
96 | ## process name | |
97 | #proc_name = kallithea |
|
97 | #proc_name = kallithea | |
98 | ## type of worker class, one of sync, eventlet, gevent, tornado |
|
98 | ## type of worker class, one of sync, eventlet, gevent, tornado | |
99 | ## recommended for bigger setup is using of of other than sync one |
|
99 | ## recommended for bigger setup is using of of other than sync one | |
100 | #worker_class = sync |
|
100 | #worker_class = sync | |
101 | #max_requests = 1000 |
|
101 | #max_requests = 1000 | |
102 | ## amount of time a worker can handle request before it gets killed and |
|
102 | ## amount of time a worker can handle request before it gets killed and | |
103 | ## restarted |
|
103 | ## restarted | |
104 | #timeout = 3600 |
|
104 | #timeout = 3600 | |
105 |
|
105 | |||
106 | ## UWSGI ## |
|
106 | ## UWSGI ## | |
107 | ## run with uwsgi --ini-paste-logged <inifile.ini> |
|
107 | ## run with uwsgi --ini-paste-logged <inifile.ini> | |
108 | #[uwsgi] |
|
108 | #[uwsgi] | |
109 | #socket = /tmp/uwsgi.sock |
|
109 | #socket = /tmp/uwsgi.sock | |
110 | #master = true |
|
110 | #master = true | |
111 | #http = 127.0.0.1:5000 |
|
111 | #http = 127.0.0.1:5000 | |
112 |
|
112 | |||
113 | ## set as deamon and redirect all output to file |
|
113 | ## set as deamon and redirect all output to file | |
114 | #daemonize = ./uwsgi_kallithea.log |
|
114 | #daemonize = ./uwsgi_kallithea.log | |
115 |
|
115 | |||
116 | ## master process PID |
|
116 | ## master process PID | |
117 | #pidfile = ./uwsgi_kallithea.pid |
|
117 | #pidfile = ./uwsgi_kallithea.pid | |
118 |
|
118 | |||
119 | ## stats server with workers statistics, use uwsgitop |
|
119 | ## stats server with workers statistics, use uwsgitop | |
120 | ## for monitoring, `uwsgitop 127.0.0.1:1717` |
|
120 | ## for monitoring, `uwsgitop 127.0.0.1:1717` | |
121 | #stats = 127.0.0.1:1717 |
|
121 | #stats = 127.0.0.1:1717 | |
122 | #memory-report = true |
|
122 | #memory-report = true | |
123 |
|
123 | |||
124 | ## log 5XX errors |
|
124 | ## log 5XX errors | |
125 | #log-5xx = true |
|
125 | #log-5xx = true | |
126 |
|
126 | |||
127 | ## Set the socket listen queue size. |
|
127 | ## Set the socket listen queue size. | |
128 | #listen = 256 |
|
128 | #listen = 256 | |
129 |
|
129 | |||
130 | ## Gracefully Reload workers after the specified amount of managed requests |
|
130 | ## Gracefully Reload workers after the specified amount of managed requests | |
131 | ## (avoid memory leaks). |
|
131 | ## (avoid memory leaks). | |
132 | #max-requests = 1000 |
|
132 | #max-requests = 1000 | |
133 |
|
133 | |||
134 | ## enable large buffers |
|
134 | ## enable large buffers | |
135 | #buffer-size = 65535 |
|
135 | #buffer-size = 65535 | |
136 |
|
136 | |||
137 | ## socket and http timeouts ## |
|
137 | ## socket and http timeouts ## | |
138 | #http-timeout = 3600 |
|
138 | #http-timeout = 3600 | |
139 | #socket-timeout = 3600 |
|
139 | #socket-timeout = 3600 | |
140 |
|
140 | |||
141 | ## Log requests slower than the specified number of milliseconds. |
|
141 | ## Log requests slower than the specified number of milliseconds. | |
142 | #log-slow = 10 |
|
142 | #log-slow = 10 | |
143 |
|
143 | |||
144 | ## Exit if no app can be loaded. |
|
144 | ## Exit if no app can be loaded. | |
145 | #need-app = true |
|
145 | #need-app = true | |
146 |
|
146 | |||
147 | ## Set lazy mode (load apps in workers instead of master). |
|
147 | ## Set lazy mode (load apps in workers instead of master). | |
148 | #lazy = true |
|
148 | #lazy = true | |
149 |
|
149 | |||
150 | ## scaling ## |
|
150 | ## scaling ## | |
151 | ## set cheaper algorithm to use, if not set default will be used |
|
151 | ## set cheaper algorithm to use, if not set default will be used | |
152 | #cheaper-algo = spare |
|
152 | #cheaper-algo = spare | |
153 |
|
153 | |||
154 | ## minimum number of workers to keep at all times |
|
154 | ## minimum number of workers to keep at all times | |
155 | #cheaper = 1 |
|
155 | #cheaper = 1 | |
156 |
|
156 | |||
157 | ## number of workers to spawn at startup |
|
157 | ## number of workers to spawn at startup | |
158 | #cheaper-initial = 1 |
|
158 | #cheaper-initial = 1 | |
159 |
|
159 | |||
160 | ## maximum number of workers that can be spawned |
|
160 | ## maximum number of workers that can be spawned | |
161 | #workers = 4 |
|
161 | #workers = 4 | |
162 |
|
162 | |||
163 | ## how many workers should be spawned at a time |
|
163 | ## how many workers should be spawned at a time | |
164 | #cheaper-step = 1 |
|
164 | #cheaper-step = 1 | |
165 |
|
165 | |||
166 | ## COMMON ## |
|
166 | ## COMMON ## | |
167 | #host = 127.0.0.1 |
|
167 | #host = 127.0.0.1 | |
168 | host = 0.0.0.0 |
|
168 | host = 0.0.0.0 | |
169 | port = 5000 |
|
169 | port = 5000 | |
170 |
|
170 | |||
171 | ## middleware for hosting the WSGI application under a URL prefix |
|
171 | ## middleware for hosting the WSGI application under a URL prefix | |
172 | #[filter:proxy-prefix] |
|
172 | #[filter:proxy-prefix] | |
173 | #use = egg:PasteDeploy#prefix |
|
173 | #use = egg:PasteDeploy#prefix | |
174 | #prefix = /<your-prefix> |
|
174 | #prefix = /<your-prefix> | |
175 |
|
175 | |||
176 | [app:main] |
|
176 | [app:main] | |
177 | use = egg:kallithea |
|
177 | use = egg:kallithea | |
178 | ## enable proxy prefix middleware |
|
178 | ## enable proxy prefix middleware | |
179 | #filter-with = proxy-prefix |
|
179 | #filter-with = proxy-prefix | |
180 |
|
180 | |||
181 | full_stack = true |
|
181 | full_stack = true | |
182 | static_files = true |
|
182 | static_files = true | |
183 | ## Available Languages: |
|
183 | ## Available Languages: | |
184 | ## cs de fr hu ja nl_BE pl pt_BR ru sk zh_CN zh_TW |
|
184 | ## cs de fr hu ja nl_BE pl pt_BR ru sk zh_CN zh_TW | |
185 | lang = |
|
185 | lang = | |
186 | cache_dir = %(here)s/data |
|
186 | cache_dir = %(here)s/data | |
187 | index_dir = %(here)s/data/index |
|
187 | index_dir = %(here)s/data/index | |
188 |
|
188 | |||
189 | ## perform a full repository scan on each server start, this should be |
|
189 | ## perform a full repository scan on each server start, this should be | |
190 | ## set to false after first startup, to allow faster server restarts. |
|
190 | ## set to false after first startup, to allow faster server restarts. | |
191 | #initial_repo_scan = false |
|
191 | #initial_repo_scan = false | |
192 | initial_repo_scan = true |
|
192 | initial_repo_scan = true | |
193 |
|
193 | |||
194 | ## uncomment and set this path to use archive download cache |
|
194 | ## uncomment and set this path to use archive download cache | |
195 | archive_cache_dir = %(here)s/tarballcache |
|
195 | archive_cache_dir = %(here)s/tarballcache | |
196 |
|
196 | |||
197 | ## change this to unique ID for security |
|
197 | ## change this to unique ID for security | |
198 | app_instance_uuid = development-not-secret |
|
198 | app_instance_uuid = development-not-secret | |
199 |
|
199 | |||
200 | ## cut off limit for large diffs (size in bytes) |
|
200 | ## cut off limit for large diffs (size in bytes) | |
201 | cut_off_limit = 256000 |
|
201 | cut_off_limit = 256000 | |
202 |
|
202 | |||
203 | ## force https in Kallithea, fixes https redirects, assumes it's always https |
|
203 | ## force https in Kallithea, fixes https redirects, assumes it's always https | |
204 | force_https = false |
|
204 | force_https = false | |
205 |
|
205 | |||
206 | ## use Strict-Transport-Security headers |
|
206 | ## use Strict-Transport-Security headers | |
207 | use_htsts = false |
|
207 | use_htsts = false | |
208 |
|
208 | |||
209 | ## number of commits stats will parse on each iteration |
|
209 | ## number of commits stats will parse on each iteration | |
210 | commit_parse_limit = 25 |
|
210 | commit_parse_limit = 25 | |
211 |
|
211 | |||
212 | ## path to git executable |
|
212 | ## path to git executable | |
213 | git_path = git |
|
213 | git_path = git | |
214 |
|
214 | |||
215 | ## git rev filter option, --all is the default filter, if you need to |
|
215 | ## git rev filter option, --all is the default filter, if you need to | |
216 | ## hide all refs in changelog switch this to --branches --tags |
|
216 | ## hide all refs in changelog switch this to --branches --tags | |
217 | #git_rev_filter = --branches --tags |
|
217 | #git_rev_filter = --branches --tags | |
218 |
|
218 | |||
219 | ## RSS feed options |
|
219 | ## RSS feed options | |
220 | rss_cut_off_limit = 256000 |
|
220 | rss_cut_off_limit = 256000 | |
221 | rss_items_per_page = 10 |
|
221 | rss_items_per_page = 10 | |
222 | rss_include_diff = false |
|
222 | rss_include_diff = false | |
223 |
|
223 | |||
224 | ## options for showing and identifying changesets |
|
224 | ## options for showing and identifying changesets | |
225 | show_sha_length = 12 |
|
225 | show_sha_length = 12 | |
226 | show_revision_number = false |
|
226 | show_revision_number = false | |
227 |
|
227 | |||
228 | ## Canonical URL to use when creating full URLs in UI and texts. |
|
228 | ## Canonical URL to use when creating full URLs in UI and texts. | |
229 | ## Useful when the site is available under different names or protocols. |
|
229 | ## Useful when the site is available under different names or protocols. | |
230 | ## Defaults to what is provided in the WSGI environment. |
|
230 | ## Defaults to what is provided in the WSGI environment. | |
231 | #canonical_url = https://kallithea.example.com/repos |
|
231 | #canonical_url = https://kallithea.example.com/repos | |
232 |
|
232 | |||
233 | ## gist URL alias, used to create nicer urls for gist. This should be an |
|
233 | ## gist URL alias, used to create nicer urls for gist. This should be an | |
234 | ## url that does rewrites to _admin/gists/<gistid>. |
|
234 | ## url that does rewrites to _admin/gists/<gistid>. | |
235 | ## example: http://gist.example.com/{gistid}. Empty means use the internal |
|
235 | ## example: http://gist.example.com/{gistid}. Empty means use the internal | |
236 | ## Kallithea url, ie. http[s]://kallithea.example.com/_admin/gists/<gistid> |
|
236 | ## Kallithea url, ie. http[s]://kallithea.example.com/_admin/gists/<gistid> | |
237 | gist_alias_url = |
|
237 | gist_alias_url = | |
238 |
|
238 | |||
239 | ## white list of API enabled controllers. This allows to add list of |
|
239 | ## white list of API enabled controllers. This allows to add list of | |
240 | ## controllers to which access will be enabled by api_key. eg: to enable |
|
240 | ## controllers to which access will be enabled by api_key. eg: to enable | |
241 | ## api access to raw_files put `FilesController:raw`, to enable access to patches |
|
241 | ## api access to raw_files put `FilesController:raw`, to enable access to patches | |
242 | ## add `ChangesetController:changeset_patch`. This list should be "," separated |
|
242 | ## add `ChangesetController:changeset_patch`. This list should be "," separated | |
243 | ## Syntax is <ControllerClass>:<function>. Check debug logs for generated names |
|
243 | ## Syntax is <ControllerClass>:<function>. Check debug logs for generated names | |
244 | ## Recommended settings below are commented out: |
|
244 | ## Recommended settings below are commented out: | |
245 | api_access_controllers_whitelist = |
|
245 | api_access_controllers_whitelist = | |
246 | # ChangesetController:changeset_patch, |
|
246 | # ChangesetController:changeset_patch, | |
247 | # ChangesetController:changeset_raw, |
|
247 | # ChangesetController:changeset_raw, | |
248 | # FilesController:raw, |
|
248 | # FilesController:raw, | |
249 | # FilesController:archivefile |
|
249 | # FilesController:archivefile | |
250 |
|
250 | |||
251 | ## default encoding used to convert from and to unicode |
|
251 | ## default encoding used to convert from and to unicode | |
252 | ## can be also a comma separated list of encoding in case of mixed encodings |
|
252 | ## can be also a comma separated list of encoding in case of mixed encodings | |
253 | default_encoding = utf8 |
|
253 | default_encoding = utf8 | |
254 |
|
254 | |||
255 | ## issue tracker for Kallithea (leave blank to disable, absent for default) |
|
255 | ## issue tracker for Kallithea (leave blank to disable, absent for default) | |
256 | #bugtracker = https://bitbucket.org/conservancy/kallithea/issues |
|
256 | #bugtracker = https://bitbucket.org/conservancy/kallithea/issues | |
257 |
|
257 | |||
258 | ## issue tracking mapping for commits messages |
|
258 | ## issue tracking mapping for commits messages | |
259 | ## comment out issue_pat, issue_server, issue_prefix to enable |
|
259 | ## comment out issue_pat, issue_server, issue_prefix to enable | |
260 |
|
260 | |||
261 | ## pattern to get the issues from commit messages |
|
261 | ## pattern to get the issues from commit messages | |
262 | ## default one used here is #<numbers> with a regex passive group for `#` |
|
262 | ## default one used here is #<numbers> with a regex passive group for `#` | |
263 | ## {id} will be all groups matched from this pattern |
|
263 | ## {id} will be all groups matched from this pattern | |
264 |
|
264 | |||
265 | issue_pat = (?:\s*#)(\d+) |
|
265 | issue_pat = (?:\s*#)(\d+) | |
266 |
|
266 | |||
267 | ## server url to the issue, each {id} will be replaced with match |
|
267 | ## server url to the issue, each {id} will be replaced with match | |
268 | ## fetched from the regex and {repo} is replaced with full repository name |
|
268 | ## fetched from the regex and {repo} is replaced with full repository name | |
269 | ## including groups {repo_name} is replaced with just name of repo |
|
269 | ## including groups {repo_name} is replaced with just name of repo | |
270 |
|
270 | |||
271 | issue_server_link = https://issues.example.com/{repo}/issue/{id} |
|
271 | issue_server_link = https://issues.example.com/{repo}/issue/{id} | |
272 |
|
272 | |||
273 | ## prefix to add to link to indicate it's an url |
|
273 | ## prefix to add to link to indicate it's an url | |
274 | ## #314 will be replaced by <issue_prefix><id> |
|
274 | ## #314 will be replaced by <issue_prefix><id> | |
275 |
|
275 | |||
276 | issue_prefix = # |
|
276 | issue_prefix = # | |
277 |
|
277 | |||
278 | ## issue_pat, issue_server_link, issue_prefix can have suffixes to specify |
|
278 | ## issue_pat, issue_server_link, issue_prefix can have suffixes to specify | |
279 | ## multiple patterns, to other issues server, wiki or others |
|
279 | ## multiple patterns, to other issues server, wiki or others | |
280 | ## below an example how to create a wiki pattern |
|
280 | ## below an example how to create a wiki pattern | |
281 | # wiki-some-id -> https://wiki.example.com/some-id |
|
281 | # wiki-some-id -> https://wiki.example.com/some-id | |
282 |
|
282 | |||
283 | #issue_pat_wiki = (?:wiki-)(.+) |
|
283 | #issue_pat_wiki = (?:wiki-)(.+) | |
284 | #issue_server_link_wiki = https://wiki.example.com/{id} |
|
284 | #issue_server_link_wiki = https://wiki.example.com/{id} | |
285 | #issue_prefix_wiki = WIKI- |
|
285 | #issue_prefix_wiki = WIKI- | |
286 |
|
286 | |||
287 | ## alternative return HTTP header for failed authentication. Default HTTP |
|
287 | ## alternative return HTTP header for failed authentication. Default HTTP | |
288 | ## response is 401 HTTPUnauthorized. Currently Mercurial clients have trouble with |
|
288 | ## response is 401 HTTPUnauthorized. Currently Mercurial clients have trouble with | |
289 | ## handling that. Set this variable to 403 to return HTTPForbidden |
|
289 | ## handling that. Set this variable to 403 to return HTTPForbidden | |
290 | auth_ret_code = |
|
290 | auth_ret_code = | |
291 |
|
291 | |||
292 | ## locking return code. When repository is locked return this HTTP code. 2XX |
|
292 | ## locking return code. When repository is locked return this HTTP code. 2XX | |
293 | ## codes don't break the transactions while 4XX codes do |
|
293 | ## codes don't break the transactions while 4XX codes do | |
294 | lock_ret_code = 423 |
|
294 | lock_ret_code = 423 | |
295 |
|
295 | |||
296 | ## allows to change the repository location in settings page |
|
296 | ## allows to change the repository location in settings page | |
297 | allow_repo_location_change = True |
|
297 | allow_repo_location_change = True | |
298 |
|
298 | |||
299 | ## allows to setup custom hooks in settings page |
|
299 | ## allows to setup custom hooks in settings page | |
300 | allow_custom_hooks_settings = True |
|
300 | allow_custom_hooks_settings = True | |
301 |
|
301 | |||
302 | ## extra extensions for indexing, space separated and without the leading '.'. |
|
302 | ## extra extensions for indexing, space separated and without the leading '.'. | |
303 | # index.extensions = |
|
303 | # index.extensions = | |
304 | # gemfile |
|
304 | # gemfile | |
305 | # lock |
|
305 | # lock | |
306 |
|
306 | |||
307 | ## extra filenames for indexing, space separated |
|
307 | ## extra filenames for indexing, space separated | |
308 | # index.filenames = |
|
308 | # index.filenames = | |
309 | # .dockerignore |
|
309 | # .dockerignore | |
310 | # .editorconfig |
|
310 | # .editorconfig | |
311 | # INSTALL |
|
311 | # INSTALL | |
312 | # CHANGELOG |
|
312 | # CHANGELOG | |
313 |
|
313 | |||
314 | #################################### |
|
314 | #################################### | |
315 | ### CELERY CONFIG #### |
|
315 | ### CELERY CONFIG #### | |
316 | #################################### |
|
316 | #################################### | |
317 |
|
317 | |||
318 | use_celery = false |
|
318 | use_celery = false | |
319 |
|
319 | |||
320 | ## Example: connect to the virtual host 'rabbitmqhost' on localhost as rabbitmq: |
|
320 | ## Example: connect to the virtual host 'rabbitmqhost' on localhost as rabbitmq: | |
321 | broker.url = amqp://rabbitmq:qewqew@localhost:5672/rabbitmqhost |
|
321 | broker.url = amqp://rabbitmq:qewqew@localhost:5672/rabbitmqhost | |
322 |
|
322 | |||
323 | celery.imports = kallithea.lib.celerylib.tasks |
|
323 | celery.imports = kallithea.lib.celerylib.tasks | |
324 | celery.accept.content = pickle |
|
324 | celery.accept.content = pickle | |
325 | celery.result.backend = amqp |
|
325 | celery.result.backend = amqp | |
326 | celery.result.dburi = amqp:// |
|
326 | celery.result.dburi = amqp:// | |
327 | celery.result.serialier = json |
|
327 | celery.result.serialier = json | |
328 |
|
328 | |||
329 | #celery.send.task.error.emails = true |
|
329 | #celery.send.task.error.emails = true | |
330 | #celery.amqp.task.result.expires = 18000 |
|
330 | #celery.amqp.task.result.expires = 18000 | |
331 |
|
331 | |||
332 | celeryd.concurrency = 2 |
|
332 | celeryd.concurrency = 2 | |
333 | celeryd.max.tasks.per.child = 1 |
|
333 | celeryd.max.tasks.per.child = 1 | |
334 |
|
334 | |||
335 | ## If true, tasks will never be sent to the queue, but executed locally instead. |
|
335 | ## If true, tasks will never be sent to the queue, but executed locally instead. | |
336 | celery.always.eager = false |
|
336 | celery.always.eager = false | |
337 |
|
337 | |||
338 | #################################### |
|
338 | #################################### | |
339 | ### BEAKER CACHE #### |
|
339 | ### BEAKER CACHE #### | |
340 | #################################### |
|
340 | #################################### | |
341 |
|
341 | |||
342 | beaker.cache.data_dir = %(here)s/data/cache/data |
|
342 | beaker.cache.data_dir = %(here)s/data/cache/data | |
343 | beaker.cache.lock_dir = %(here)s/data/cache/lock |
|
343 | beaker.cache.lock_dir = %(here)s/data/cache/lock | |
344 |
|
344 | |||
345 | beaker.cache.regions = short_term,long_term,sql_cache_short |
|
345 | beaker.cache.regions = short_term,long_term,sql_cache_short | |
346 |
|
346 | |||
347 | beaker.cache.short_term.type = memory |
|
347 | beaker.cache.short_term.type = memory | |
348 | beaker.cache.short_term.expire = 60 |
|
348 | beaker.cache.short_term.expire = 60 | |
349 | beaker.cache.short_term.key_length = 256 |
|
349 | beaker.cache.short_term.key_length = 256 | |
350 |
|
350 | |||
351 | beaker.cache.long_term.type = memory |
|
351 | beaker.cache.long_term.type = memory | |
352 | beaker.cache.long_term.expire = 36000 |
|
352 | beaker.cache.long_term.expire = 36000 | |
353 | beaker.cache.long_term.key_length = 256 |
|
353 | beaker.cache.long_term.key_length = 256 | |
354 |
|
354 | |||
355 | beaker.cache.sql_cache_short.type = memory |
|
355 | beaker.cache.sql_cache_short.type = memory | |
356 | beaker.cache.sql_cache_short.expire = 10 |
|
356 | beaker.cache.sql_cache_short.expire = 10 | |
357 | beaker.cache.sql_cache_short.key_length = 256 |
|
357 | beaker.cache.sql_cache_short.key_length = 256 | |
358 |
|
358 | |||
359 | #################################### |
|
359 | #################################### | |
360 | ### BEAKER SESSION #### |
|
360 | ### BEAKER SESSION #### | |
361 | #################################### |
|
361 | #################################### | |
362 |
|
362 | |||
363 | ## Name of session cookie. Should be unique for a given host and path, even when running |
|
363 | ## Name of session cookie. Should be unique for a given host and path, even when running | |
364 | ## on different ports. Otherwise, cookie sessions will be shared and messed up. |
|
364 | ## on different ports. Otherwise, cookie sessions will be shared and messed up. | |
365 | beaker.session.key = kallithea |
|
365 | beaker.session.key = kallithea | |
366 | ## Sessions should always only be accessible by the browser, not directly by JavaScript. |
|
366 | ## Sessions should always only be accessible by the browser, not directly by JavaScript. | |
367 | beaker.session.httponly = true |
|
367 | beaker.session.httponly = true | |
368 | ## Session lifetime. 2592000 seconds is 30 days. |
|
368 | ## Session lifetime. 2592000 seconds is 30 days. | |
369 | beaker.session.timeout = 2592000 |
|
369 | beaker.session.timeout = 2592000 | |
370 |
|
370 | |||
371 | ## Server secret used with HMAC to ensure integrity of cookies. |
|
371 | ## Server secret used with HMAC to ensure integrity of cookies. | |
372 | beaker.session.secret = development-not-secret |
|
372 | beaker.session.secret = development-not-secret | |
373 | ## Further, encrypt the data with AES. |
|
373 | ## Further, encrypt the data with AES. | |
374 | #beaker.session.encrypt_key = <key_for_encryption> |
|
374 | #beaker.session.encrypt_key = <key_for_encryption> | |
375 | #beaker.session.validate_key = <validation_key> |
|
375 | #beaker.session.validate_key = <validation_key> | |
376 |
|
376 | |||
377 | ## Type of storage used for the session, current types are |
|
377 | ## Type of storage used for the session, current types are | |
378 | ## dbm, file, memcached, database, and memory. |
|
378 | ## dbm, file, memcached, database, and memory. | |
379 |
|
379 | |||
380 | ## File system storage of session data. (default) |
|
380 | ## File system storage of session data. (default) | |
381 | #beaker.session.type = file |
|
381 | #beaker.session.type = file | |
382 |
|
382 | |||
383 | ## Cookie only, store all session data inside the cookie. Requires secure secrets. |
|
383 | ## Cookie only, store all session data inside the cookie. Requires secure secrets. | |
384 | #beaker.session.type = cookie |
|
384 | #beaker.session.type = cookie | |
385 |
|
385 | |||
386 | ## Database storage of session data. |
|
386 | ## Database storage of session data. | |
387 | #beaker.session.type = ext:database |
|
387 | #beaker.session.type = ext:database | |
388 | #beaker.session.sa.url = postgresql://postgres:qwe@localhost/kallithea |
|
388 | #beaker.session.sa.url = postgresql://postgres:qwe@localhost/kallithea | |
389 | #beaker.session.table_name = db_session |
|
389 | #beaker.session.table_name = db_session | |
390 |
|
390 | |||
391 | ############################ |
|
391 | ############################ | |
392 | ## ERROR HANDLING SYSTEMS ## |
|
392 | ## ERROR HANDLING SYSTEMS ## | |
393 | ############################ |
|
393 | ############################ | |
394 |
|
394 | |||
395 | # Propagate email settings to ErrorReporter of TurboGears2 |
|
395 | # Propagate email settings to ErrorReporter of TurboGears2 | |
396 | # You do not normally need to change these lines |
|
396 | # You do not normally need to change these lines | |
397 | get trace_errors.error_email = email_to |
|
397 | get trace_errors.error_email = email_to | |
398 | get trace_errors.smtp_server = smtp_server |
|
398 | get trace_errors.smtp_server = smtp_server | |
399 | get trace_errors.smtp_port = smtp_port |
|
399 | get trace_errors.smtp_port = smtp_port | |
400 | get trace_errors.from_address = error_email_from |
|
400 | get trace_errors.from_address = error_email_from | |
401 |
|
401 | |||
402 | #################### |
|
402 | #################### | |
403 | ### [appenlight] ### |
|
403 | ### [appenlight] ### | |
404 | #################### |
|
404 | #################### | |
405 |
|
405 | |||
406 | ## AppEnlight is tailored to work with Kallithea, see |
|
406 | ## AppEnlight is tailored to work with Kallithea, see | |
407 | ## http://appenlight.com for details how to obtain an account |
|
407 | ## http://appenlight.com for details how to obtain an account | |
408 | ## you must install python package `appenlight_client` to make it work |
|
408 | ## you must install python package `appenlight_client` to make it work | |
409 |
|
409 | |||
410 | ## appenlight enabled |
|
410 | ## appenlight enabled | |
411 | appenlight = false |
|
411 | appenlight = false | |
412 |
|
412 | |||
413 | appenlight.server_url = https://api.appenlight.com |
|
413 | appenlight.server_url = https://api.appenlight.com | |
414 | appenlight.api_key = YOUR_API_KEY |
|
414 | appenlight.api_key = YOUR_API_KEY | |
415 |
|
415 | |||
416 | ## TWEAK AMOUNT OF INFO SENT HERE |
|
416 | ## TWEAK AMOUNT OF INFO SENT HERE | |
417 |
|
417 | |||
418 | ## enables 404 error logging (default False) |
|
418 | ## enables 404 error logging (default False) | |
419 | appenlight.report_404 = false |
|
419 | appenlight.report_404 = false | |
420 |
|
420 | |||
421 | ## time in seconds after request is considered being slow (default 1) |
|
421 | ## time in seconds after request is considered being slow (default 1) | |
422 | appenlight.slow_request_time = 1 |
|
422 | appenlight.slow_request_time = 1 | |
423 |
|
423 | |||
424 | ## record slow requests in application |
|
424 | ## record slow requests in application | |
425 | ## (needs to be enabled for slow datastore recording and time tracking) |
|
425 | ## (needs to be enabled for slow datastore recording and time tracking) | |
426 | appenlight.slow_requests = true |
|
426 | appenlight.slow_requests = true | |
427 |
|
427 | |||
428 | ## enable hooking to application loggers |
|
428 | ## enable hooking to application loggers | |
429 | #appenlight.logging = true |
|
429 | #appenlight.logging = true | |
430 |
|
430 | |||
431 | ## minimum log level for log capture |
|
431 | ## minimum log level for log capture | |
432 | #appenlight.logging.level = WARNING |
|
432 | #appenlight.logging.level = WARNING | |
433 |
|
433 | |||
434 | ## send logs only from erroneous/slow requests |
|
434 | ## send logs only from erroneous/slow requests | |
435 | ## (saves API quota for intensive logging) |
|
435 | ## (saves API quota for intensive logging) | |
436 | appenlight.logging_on_error = false |
|
436 | appenlight.logging_on_error = false | |
437 |
|
437 | |||
438 | ## list of additional keywords that should be grabbed from environ object |
|
438 | ## list of additional keywords that should be grabbed from environ object | |
439 | ## can be string with comma separated list of words in lowercase |
|
439 | ## can be string with comma separated list of words in lowercase | |
440 | ## (by default client will always send following info: |
|
440 | ## (by default client will always send following info: | |
441 | ## 'REMOTE_USER', 'REMOTE_ADDR', 'SERVER_NAME', 'CONTENT_TYPE' + all keys that |
|
441 | ## 'REMOTE_USER', 'REMOTE_ADDR', 'SERVER_NAME', 'CONTENT_TYPE' + all keys that | |
442 | ## start with HTTP* this list be extended with additional keywords here |
|
442 | ## start with HTTP* this list be extended with additional keywords here | |
443 | appenlight.environ_keys_whitelist = |
|
443 | appenlight.environ_keys_whitelist = | |
444 |
|
444 | |||
445 | ## list of keywords that should be blanked from request object |
|
445 | ## list of keywords that should be blanked from request object | |
446 | ## can be string with comma separated list of words in lowercase |
|
446 | ## can be string with comma separated list of words in lowercase | |
447 | ## (by default client will always blank keys that contain following words |
|
447 | ## (by default client will always blank keys that contain following words | |
448 | ## 'password', 'passwd', 'pwd', 'auth_tkt', 'secret', 'csrf' |
|
448 | ## 'password', 'passwd', 'pwd', 'auth_tkt', 'secret', 'csrf' | |
449 | ## this list be extended with additional keywords set here |
|
449 | ## this list be extended with additional keywords set here | |
450 | appenlight.request_keys_blacklist = |
|
450 | appenlight.request_keys_blacklist = | |
451 |
|
451 | |||
452 | ## list of namespaces that should be ignores when gathering log entries |
|
452 | ## list of namespaces that should be ignores when gathering log entries | |
453 | ## can be string with comma separated list of namespaces |
|
453 | ## can be string with comma separated list of namespaces | |
454 | ## (by default the client ignores own entries: appenlight_client.client) |
|
454 | ## (by default the client ignores own entries: appenlight_client.client) | |
455 | appenlight.log_namespace_blacklist = |
|
455 | appenlight.log_namespace_blacklist = | |
456 |
|
456 | |||
457 | ################ |
|
457 | ################ | |
458 | ### [sentry] ### |
|
458 | ### [sentry] ### | |
459 | ################ |
|
459 | ################ | |
460 |
|
460 | |||
461 | ## sentry is a alternative open source error aggregator |
|
461 | ## sentry is a alternative open source error aggregator | |
462 | ## you must install python packages `sentry` and `raven` to enable |
|
462 | ## you must install python packages `sentry` and `raven` to enable | |
463 |
|
463 | |||
464 | sentry.dsn = YOUR_DNS |
|
464 | sentry.dsn = YOUR_DNS | |
465 | sentry.servers = |
|
465 | sentry.servers = | |
466 | sentry.name = |
|
466 | sentry.name = | |
467 | sentry.key = |
|
467 | sentry.key = | |
468 | sentry.public_key = |
|
468 | sentry.public_key = | |
469 | sentry.secret_key = |
|
469 | sentry.secret_key = | |
470 | sentry.project = |
|
470 | sentry.project = | |
471 | sentry.site = |
|
471 | sentry.site = | |
472 | sentry.include_paths = |
|
472 | sentry.include_paths = | |
473 | sentry.exclude_paths = |
|
473 | sentry.exclude_paths = | |
474 |
|
474 | |||
475 | ################################################################################ |
|
475 | ################################################################################ | |
476 | ## WARNING: *DEBUG MODE MUST BE OFF IN A PRODUCTION ENVIRONMENT* ## |
|
476 | ## WARNING: *DEBUG MODE MUST BE OFF IN A PRODUCTION ENVIRONMENT* ## | |
477 | ## Debug mode will enable the interactive debugging tool, allowing ANYONE to ## |
|
477 | ## Debug mode will enable the interactive debugging tool, allowing ANYONE to ## | |
478 | ## execute malicious code after an exception is raised. ## |
|
478 | ## execute malicious code after an exception is raised. ## | |
479 | ################################################################################ |
|
479 | ################################################################################ | |
480 | #debug = false |
|
480 | #debug = false | |
481 | debug = true |
|
481 | debug = true | |
482 |
|
482 | |||
483 | ################################## |
|
483 | ################################## | |
484 | ### LOGVIEW CONFIG ### |
|
484 | ### LOGVIEW CONFIG ### | |
485 | ################################## |
|
485 | ################################## | |
486 |
|
486 | |||
487 | logview.sqlalchemy = #faa |
|
487 | logview.sqlalchemy = #faa | |
488 | logview.pylons.templating = #bfb |
|
488 | logview.pylons.templating = #bfb | |
489 | logview.pylons.util = #eee |
|
489 | logview.pylons.util = #eee | |
490 |
|
490 | |||
491 | ######################################################### |
|
491 | ######################################################### | |
492 | ### DB CONFIGS - EACH DB WILL HAVE IT'S OWN CONFIG ### |
|
492 | ### DB CONFIGS - EACH DB WILL HAVE IT'S OWN CONFIG ### | |
493 | ######################################################### |
|
493 | ######################################################### | |
494 |
|
494 | |||
495 | # SQLITE [default] |
|
495 | # SQLITE [default] | |
496 | sqlalchemy.url = sqlite:///%(here)s/kallithea.db?timeout=60 |
|
496 | sqlalchemy.url = sqlite:///%(here)s/kallithea.db?timeout=60 | |
497 |
|
497 | |||
498 | # POSTGRESQL |
|
498 | # POSTGRESQL | |
499 | #sqlalchemy.url = postgresql://user:pass@localhost/kallithea |
|
499 | #sqlalchemy.url = postgresql://user:pass@localhost/kallithea | |
500 |
|
500 | |||
501 | # MySQL |
|
501 | # MySQL | |
502 | #sqlalchemy.url = mysql://user:pass@localhost/kallithea?charset=utf8 |
|
502 | #sqlalchemy.url = mysql://user:pass@localhost/kallithea?charset=utf8 | |
503 |
|
503 | |||
504 | # see sqlalchemy docs for others |
|
504 | # see sqlalchemy docs for others | |
505 |
|
505 | |||
506 | sqlalchemy.echo = false |
|
|||
507 | sqlalchemy.pool_recycle = 3600 |
|
506 | sqlalchemy.pool_recycle = 3600 | |
508 |
|
507 | |||
509 | ################################ |
|
508 | ################################ | |
510 | ### ALEMBIC CONFIGURATION #### |
|
509 | ### ALEMBIC CONFIGURATION #### | |
511 | ################################ |
|
510 | ################################ | |
512 |
|
511 | |||
513 | [alembic] |
|
512 | [alembic] | |
514 | script_location = kallithea:alembic |
|
513 | script_location = kallithea:alembic | |
515 |
|
514 | |||
516 | ################################ |
|
515 | ################################ | |
517 | ### LOGGING CONFIGURATION #### |
|
516 | ### LOGGING CONFIGURATION #### | |
518 | ################################ |
|
517 | ################################ | |
519 |
|
518 | |||
520 | [loggers] |
|
519 | [loggers] | |
521 | keys = root, routes, kallithea, sqlalchemy, tg, gearbox, beaker, templates, whoosh_indexer |
|
520 | keys = root, routes, kallithea, sqlalchemy, tg, gearbox, beaker, templates, whoosh_indexer | |
522 |
|
521 | |||
523 | [handlers] |
|
522 | [handlers] | |
524 | keys = console, console_sql |
|
523 | keys = console, console_sql | |
525 |
|
524 | |||
526 | [formatters] |
|
525 | [formatters] | |
527 | keys = generic, color_formatter, color_formatter_sql |
|
526 | keys = generic, color_formatter, color_formatter_sql | |
528 |
|
527 | |||
529 | ############# |
|
528 | ############# | |
530 | ## LOGGERS ## |
|
529 | ## LOGGERS ## | |
531 | ############# |
|
530 | ############# | |
532 |
|
531 | |||
533 | [logger_root] |
|
532 | [logger_root] | |
534 | level = NOTSET |
|
533 | level = NOTSET | |
535 | handlers = console |
|
534 | handlers = console | |
536 |
|
535 | |||
537 | [logger_routes] |
|
536 | [logger_routes] | |
538 | level = DEBUG |
|
537 | level = DEBUG | |
539 | handlers = |
|
538 | handlers = | |
540 | qualname = routes.middleware |
|
539 | qualname = routes.middleware | |
541 | ## "level = DEBUG" logs the route matched and routing variables. |
|
540 | ## "level = DEBUG" logs the route matched and routing variables. | |
542 | propagate = 1 |
|
541 | propagate = 1 | |
543 |
|
542 | |||
544 | [logger_beaker] |
|
543 | [logger_beaker] | |
545 | level = DEBUG |
|
544 | level = DEBUG | |
546 | handlers = |
|
545 | handlers = | |
547 | qualname = beaker.container |
|
546 | qualname = beaker.container | |
548 | propagate = 1 |
|
547 | propagate = 1 | |
549 |
|
548 | |||
550 | [logger_templates] |
|
549 | [logger_templates] | |
551 | level = INFO |
|
550 | level = INFO | |
552 | handlers = |
|
551 | handlers = | |
553 | qualname = pylons.templating |
|
552 | qualname = pylons.templating | |
554 | propagate = 1 |
|
553 | propagate = 1 | |
555 |
|
554 | |||
556 | [logger_kallithea] |
|
555 | [logger_kallithea] | |
557 | level = DEBUG |
|
556 | level = DEBUG | |
558 | handlers = |
|
557 | handlers = | |
559 | qualname = kallithea |
|
558 | qualname = kallithea | |
560 | propagate = 1 |
|
559 | propagate = 1 | |
561 |
|
560 | |||
562 | [logger_tg] |
|
561 | [logger_tg] | |
563 | level = DEBUG |
|
562 | level = DEBUG | |
564 | handlers = |
|
563 | handlers = | |
565 | qualname = tg |
|
564 | qualname = tg | |
566 | propagate = 1 |
|
565 | propagate = 1 | |
567 |
|
566 | |||
568 | [logger_gearbox] |
|
567 | [logger_gearbox] | |
569 | level = DEBUG |
|
568 | level = DEBUG | |
570 | handlers = |
|
569 | handlers = | |
571 | qualname = gearbox |
|
570 | qualname = gearbox | |
572 | propagate = 1 |
|
571 | propagate = 1 | |
573 |
|
572 | |||
574 | [logger_sqlalchemy] |
|
573 | [logger_sqlalchemy] | |
575 | level = INFO |
|
574 | level = INFO | |
576 | handlers = console_sql |
|
575 | handlers = console_sql | |
577 | qualname = sqlalchemy.engine |
|
576 | qualname = sqlalchemy.engine | |
578 | propagate = 0 |
|
577 | propagate = 0 | |
579 |
|
578 | |||
580 | [logger_whoosh_indexer] |
|
579 | [logger_whoosh_indexer] | |
581 | level = DEBUG |
|
580 | level = DEBUG | |
582 | handlers = |
|
581 | handlers = | |
583 | qualname = whoosh_indexer |
|
582 | qualname = whoosh_indexer | |
584 | propagate = 1 |
|
583 | propagate = 1 | |
585 |
|
584 | |||
586 | ############## |
|
585 | ############## | |
587 | ## HANDLERS ## |
|
586 | ## HANDLERS ## | |
588 | ############## |
|
587 | ############## | |
589 |
|
588 | |||
590 | [handler_console] |
|
589 | [handler_console] | |
591 | class = StreamHandler |
|
590 | class = StreamHandler | |
592 | args = (sys.stderr,) |
|
591 | args = (sys.stderr,) | |
593 | #level = INFO |
|
592 | #level = INFO | |
594 | level = DEBUG |
|
593 | level = DEBUG | |
595 | #formatter = generic |
|
594 | #formatter = generic | |
596 | formatter = color_formatter |
|
595 | formatter = color_formatter | |
597 |
|
596 | |||
598 | [handler_console_sql] |
|
597 | [handler_console_sql] | |
599 | class = StreamHandler |
|
598 | class = StreamHandler | |
600 | args = (sys.stderr,) |
|
599 | args = (sys.stderr,) | |
601 | #level = WARN |
|
600 | #level = WARN | |
602 | level = DEBUG |
|
601 | level = DEBUG | |
603 | #formatter = generic |
|
602 | #formatter = generic | |
604 | formatter = color_formatter_sql |
|
603 | formatter = color_formatter_sql | |
605 |
|
604 | |||
606 | ################ |
|
605 | ################ | |
607 | ## FORMATTERS ## |
|
606 | ## FORMATTERS ## | |
608 | ################ |
|
607 | ################ | |
609 |
|
608 | |||
610 | [formatter_generic] |
|
609 | [formatter_generic] | |
611 | format = %(asctime)s.%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s |
|
610 | format = %(asctime)s.%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s | |
612 | datefmt = %Y-%m-%d %H:%M:%S |
|
611 | datefmt = %Y-%m-%d %H:%M:%S | |
613 |
|
612 | |||
614 | [formatter_color_formatter] |
|
613 | [formatter_color_formatter] | |
615 | class = kallithea.lib.colored_formatter.ColorFormatter |
|
614 | class = kallithea.lib.colored_formatter.ColorFormatter | |
616 | format = %(asctime)s.%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s |
|
615 | format = %(asctime)s.%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s | |
617 | datefmt = %Y-%m-%d %H:%M:%S |
|
616 | datefmt = %Y-%m-%d %H:%M:%S | |
618 |
|
617 | |||
619 | [formatter_color_formatter_sql] |
|
618 | [formatter_color_formatter_sql] | |
620 | class = kallithea.lib.colored_formatter.ColorFormatterSql |
|
619 | class = kallithea.lib.colored_formatter.ColorFormatterSql | |
621 | format = %(asctime)s.%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s |
|
620 | format = %(asctime)s.%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s | |
622 | datefmt = %Y-%m-%d %H:%M:%S |
|
621 | datefmt = %Y-%m-%d %H:%M:%S |
@@ -1,516 +1,515 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 | # This program is free software: you can redistribute it and/or modify |
|
2 | # This program is free software: you can redistribute it and/or modify | |
3 | # it under the terms of the GNU General Public License as published by |
|
3 | # it under the terms of the GNU General Public License as published by | |
4 | # the Free Software Foundation, either version 3 of the License, or |
|
4 | # the Free Software Foundation, either version 3 of the License, or | |
5 | # (at your option) any later version. |
|
5 | # (at your option) any later version. | |
6 | # |
|
6 | # | |
7 | # This program is distributed in the hope that it will be useful, |
|
7 | # This program is distributed in the hope that it will be useful, | |
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
10 | # GNU General Public License for more details. |
|
10 | # GNU General Public License for more details. | |
11 | # |
|
11 | # | |
12 | # You should have received a copy of the GNU General Public License |
|
12 | # You should have received a copy of the GNU General Public License | |
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |
14 | """ |
|
14 | """ | |
15 | kallithea.lib.db_manage |
|
15 | kallithea.lib.db_manage | |
16 | ~~~~~~~~~~~~~~~~~~~~~~~ |
|
16 | ~~~~~~~~~~~~~~~~~~~~~~~ | |
17 |
|
17 | |||
18 | Database creation, and setup module for Kallithea. Used for creation |
|
18 | Database creation, and setup module for Kallithea. Used for creation | |
19 | of database as well as for migration operations |
|
19 | of database as well as for migration operations | |
20 |
|
20 | |||
21 | This file was forked by the Kallithea project in July 2014. |
|
21 | This file was forked by the Kallithea project in July 2014. | |
22 | Original author and date, and relevant copyright and licensing information is below: |
|
22 | Original author and date, and relevant copyright and licensing information is below: | |
23 | :created_on: Apr 10, 2010 |
|
23 | :created_on: Apr 10, 2010 | |
24 | :author: marcink |
|
24 | :author: marcink | |
25 | :copyright: (c) 2013 RhodeCode GmbH, and others. |
|
25 | :copyright: (c) 2013 RhodeCode GmbH, and others. | |
26 | :license: GPLv3, see LICENSE.md for more details. |
|
26 | :license: GPLv3, see LICENSE.md for more details. | |
27 | """ |
|
27 | """ | |
28 |
|
28 | |||
29 | import os |
|
29 | import os | |
30 | import sys |
|
30 | import sys | |
31 | import time |
|
31 | import time | |
32 | import uuid |
|
32 | import uuid | |
33 | import logging |
|
33 | import logging | |
34 | import sqlalchemy |
|
34 | import sqlalchemy | |
35 | from os.path import dirname |
|
35 | from os.path import dirname | |
36 |
|
36 | |||
37 | import alembic.config |
|
37 | import alembic.config | |
38 | import alembic.command |
|
38 | import alembic.command | |
39 |
|
39 | |||
40 | from kallithea.model.user import UserModel |
|
40 | from kallithea.model.user import UserModel | |
41 | from kallithea.model.base import init_model |
|
41 | from kallithea.model.base import init_model | |
42 | from kallithea.model.db import User, Permission, Ui, \ |
|
42 | from kallithea.model.db import User, Permission, Ui, \ | |
43 | Setting, UserToPerm, RepoGroup, \ |
|
43 | Setting, UserToPerm, RepoGroup, \ | |
44 | UserRepoGroupToPerm, CacheInvalidation, Repository |
|
44 | UserRepoGroupToPerm, CacheInvalidation, Repository | |
45 |
|
45 | |||
46 | from sqlalchemy.engine import create_engine |
|
46 | from sqlalchemy.engine import create_engine | |
47 | from kallithea.model.repo_group import RepoGroupModel |
|
47 | from kallithea.model.repo_group import RepoGroupModel | |
48 | #from kallithea.model import meta |
|
48 | #from kallithea.model import meta | |
49 | from kallithea.model.meta import Session, Base |
|
49 | from kallithea.model.meta import Session, Base | |
50 | from kallithea.model.repo import RepoModel |
|
50 | from kallithea.model.repo import RepoModel | |
51 | from kallithea.model.permission import PermissionModel |
|
51 | from kallithea.model.permission import PermissionModel | |
52 |
|
52 | |||
53 |
|
53 | |||
54 | log = logging.getLogger(__name__) |
|
54 | log = logging.getLogger(__name__) | |
55 |
|
55 | |||
56 |
|
56 | |||
57 | def notify(msg): |
|
57 | def notify(msg): | |
58 | """ |
|
58 | """ | |
59 | Notification for migrations messages |
|
59 | Notification for migrations messages | |
60 | """ |
|
60 | """ | |
61 | ml = len(msg) + (4 * 2) |
|
61 | ml = len(msg) + (4 * 2) | |
62 | print('\n%s\n*** %s ***\n%s' % ('*' * ml, msg, '*' * ml)).upper() |
|
62 | print('\n%s\n*** %s ***\n%s' % ('*' * ml, msg, '*' * ml)).upper() | |
63 |
|
63 | |||
64 |
|
64 | |||
65 | class DbManage(object): |
|
65 | class DbManage(object): | |
66 |
def __init__(self |
|
66 | def __init__(self, dbconf, root, tests=False, SESSION=None, cli_args=None): | |
67 | self.dbname = dbconf.split('/')[-1] |
|
67 | self.dbname = dbconf.split('/')[-1] | |
68 | self.tests = tests |
|
68 | self.tests = tests | |
69 | self.root = root |
|
69 | self.root = root | |
70 | self.dburi = dbconf |
|
70 | self.dburi = dbconf | |
71 | self.log_sql = log_sql |
|
|||
72 | self.db_exists = False |
|
71 | self.db_exists = False | |
73 | self.cli_args = cli_args or {} |
|
72 | self.cli_args = cli_args or {} | |
74 | self.init_db(SESSION=SESSION) |
|
73 | self.init_db(SESSION=SESSION) | |
75 |
|
74 | |||
76 | def _ask_ok(self, msg): |
|
75 | def _ask_ok(self, msg): | |
77 | """Invoke ask_ok unless the force_ask option provides the answer""" |
|
76 | """Invoke ask_ok unless the force_ask option provides the answer""" | |
78 | force_ask = self.cli_args.get('force_ask') |
|
77 | force_ask = self.cli_args.get('force_ask') | |
79 | if force_ask is not None: |
|
78 | if force_ask is not None: | |
80 | return force_ask |
|
79 | return force_ask | |
81 | from kallithea.lib.paster_commands.common import ask_ok |
|
80 | from kallithea.lib.paster_commands.common import ask_ok | |
82 | return ask_ok(msg) |
|
81 | return ask_ok(msg) | |
83 |
|
82 | |||
84 | def init_db(self, SESSION=None): |
|
83 | def init_db(self, SESSION=None): | |
85 | if SESSION: |
|
84 | if SESSION: | |
86 | self.sa = SESSION |
|
85 | self.sa = SESSION | |
87 | else: |
|
86 | else: | |
88 | #init new sessions |
|
87 | #init new sessions | |
89 |
engine = create_engine(self.dburi |
|
88 | engine = create_engine(self.dburi) | |
90 | init_model(engine) |
|
89 | init_model(engine) | |
91 | self.sa = Session() |
|
90 | self.sa = Session() | |
92 |
|
91 | |||
93 | def create_tables(self, override=False): |
|
92 | def create_tables(self, override=False): | |
94 | """ |
|
93 | """ | |
95 | Create a auth database |
|
94 | Create a auth database | |
96 | """ |
|
95 | """ | |
97 |
|
96 | |||
98 | log.info("Any existing database is going to be destroyed") |
|
97 | log.info("Any existing database is going to be destroyed") | |
99 | if self.tests: |
|
98 | if self.tests: | |
100 | destroy = True |
|
99 | destroy = True | |
101 | else: |
|
100 | else: | |
102 | destroy = self._ask_ok('Are you sure to destroy old database ? [y/n]') |
|
101 | destroy = self._ask_ok('Are you sure to destroy old database ? [y/n]') | |
103 | if not destroy: |
|
102 | if not destroy: | |
104 | print 'Nothing done.' |
|
103 | print 'Nothing done.' | |
105 | sys.exit(0) |
|
104 | sys.exit(0) | |
106 | if destroy: |
|
105 | if destroy: | |
107 | # drop and re-create old schemas |
|
106 | # drop and re-create old schemas | |
108 |
|
107 | |||
109 | url = sqlalchemy.engine.url.make_url(self.dburi) |
|
108 | url = sqlalchemy.engine.url.make_url(self.dburi) | |
110 | database = url.database |
|
109 | database = url.database | |
111 |
|
110 | |||
112 | # Some databases enforce foreign key constraints and Base.metadata.drop_all() doesn't work |
|
111 | # Some databases enforce foreign key constraints and Base.metadata.drop_all() doesn't work | |
113 | if url.drivername == 'mysql': |
|
112 | if url.drivername == 'mysql': | |
114 | url.database = None # don't connect to the database (it might not exist) |
|
113 | url.database = None # don't connect to the database (it might not exist) | |
115 | engine = sqlalchemy.create_engine(url) |
|
114 | engine = sqlalchemy.create_engine(url) | |
116 | with engine.connect() as conn: |
|
115 | with engine.connect() as conn: | |
117 | conn.execute('DROP DATABASE IF EXISTS ' + database) |
|
116 | conn.execute('DROP DATABASE IF EXISTS ' + database) | |
118 | conn.execute('CREATE DATABASE ' + database) |
|
117 | conn.execute('CREATE DATABASE ' + database) | |
119 | elif url.drivername == 'postgresql': |
|
118 | elif url.drivername == 'postgresql': | |
120 | from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT |
|
119 | from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT | |
121 | url.database = 'postgres' # connect to the system database (as the real one might not exist) |
|
120 | url.database = 'postgres' # connect to the system database (as the real one might not exist) | |
122 | engine = sqlalchemy.create_engine(url) |
|
121 | engine = sqlalchemy.create_engine(url) | |
123 | with engine.connect() as conn: |
|
122 | with engine.connect() as conn: | |
124 | conn.connection.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) |
|
123 | conn.connection.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) | |
125 | conn.execute('DROP DATABASE IF EXISTS ' + database) |
|
124 | conn.execute('DROP DATABASE IF EXISTS ' + database) | |
126 | conn.execute('CREATE DATABASE ' + database) |
|
125 | conn.execute('CREATE DATABASE ' + database) | |
127 | else: |
|
126 | else: | |
128 | # known to work on SQLite - possibly not on other databases with strong referential integrity |
|
127 | # known to work on SQLite - possibly not on other databases with strong referential integrity | |
129 | Base.metadata.drop_all() |
|
128 | Base.metadata.drop_all() | |
130 |
|
129 | |||
131 | checkfirst = not override |
|
130 | checkfirst = not override | |
132 | Base.metadata.create_all(checkfirst=checkfirst) |
|
131 | Base.metadata.create_all(checkfirst=checkfirst) | |
133 |
|
132 | |||
134 | # Create an Alembic configuration and generate the version table, |
|
133 | # Create an Alembic configuration and generate the version table, | |
135 | # "stamping" it with the most recent Alembic migration revision, to |
|
134 | # "stamping" it with the most recent Alembic migration revision, to | |
136 | # tell Alembic that all the schema upgrades are already in effect. |
|
135 | # tell Alembic that all the schema upgrades are already in effect. | |
137 | alembic_cfg = alembic.config.Config() |
|
136 | alembic_cfg = alembic.config.Config() | |
138 | alembic_cfg.set_main_option('script_location', 'kallithea:alembic') |
|
137 | alembic_cfg.set_main_option('script_location', 'kallithea:alembic') | |
139 | alembic_cfg.set_main_option('sqlalchemy.url', self.dburi) |
|
138 | alembic_cfg.set_main_option('sqlalchemy.url', self.dburi) | |
140 | # This command will give an error in an Alembic multi-head scenario, |
|
139 | # This command will give an error in an Alembic multi-head scenario, | |
141 | # but in practice, such a scenario should not come up during database |
|
140 | # but in practice, such a scenario should not come up during database | |
142 | # creation, even during development. |
|
141 | # creation, even during development. | |
143 | alembic.command.stamp(alembic_cfg, 'head') |
|
142 | alembic.command.stamp(alembic_cfg, 'head') | |
144 |
|
143 | |||
145 | log.info('Created tables for %s', self.dbname) |
|
144 | log.info('Created tables for %s', self.dbname) | |
146 |
|
145 | |||
147 | def fix_repo_paths(self): |
|
146 | def fix_repo_paths(self): | |
148 | """ |
|
147 | """ | |
149 | Fixes a old kallithea version path into new one without a '*' |
|
148 | Fixes a old kallithea version path into new one without a '*' | |
150 | """ |
|
149 | """ | |
151 |
|
150 | |||
152 | paths = Ui.query() \ |
|
151 | paths = Ui.query() \ | |
153 | .filter(Ui.ui_key == '/') \ |
|
152 | .filter(Ui.ui_key == '/') \ | |
154 | .scalar() |
|
153 | .scalar() | |
155 |
|
154 | |||
156 | paths.ui_value = paths.ui_value.replace('*', '') |
|
155 | paths.ui_value = paths.ui_value.replace('*', '') | |
157 |
|
156 | |||
158 | self.sa.commit() |
|
157 | self.sa.commit() | |
159 |
|
158 | |||
160 | def fix_default_user(self): |
|
159 | def fix_default_user(self): | |
161 | """ |
|
160 | """ | |
162 | Fixes a old default user with some 'nicer' default values, |
|
161 | Fixes a old default user with some 'nicer' default values, | |
163 | used mostly for anonymous access |
|
162 | used mostly for anonymous access | |
164 | """ |
|
163 | """ | |
165 | def_user = User.query().filter_by(is_default_user=True).one() |
|
164 | def_user = User.query().filter_by(is_default_user=True).one() | |
166 |
|
165 | |||
167 | def_user.name = 'Anonymous' |
|
166 | def_user.name = 'Anonymous' | |
168 | def_user.lastname = 'User' |
|
167 | def_user.lastname = 'User' | |
169 | def_user.email = 'anonymous@kallithea-scm.org' |
|
168 | def_user.email = 'anonymous@kallithea-scm.org' | |
170 |
|
169 | |||
171 | self.sa.commit() |
|
170 | self.sa.commit() | |
172 |
|
171 | |||
173 | def fix_settings(self): |
|
172 | def fix_settings(self): | |
174 | """ |
|
173 | """ | |
175 | Fixes kallithea settings adds ga_code key for google analytics |
|
174 | Fixes kallithea settings adds ga_code key for google analytics | |
176 | """ |
|
175 | """ | |
177 |
|
176 | |||
178 | hgsettings3 = Setting('ga_code', '') |
|
177 | hgsettings3 = Setting('ga_code', '') | |
179 |
|
178 | |||
180 | self.sa.add(hgsettings3) |
|
179 | self.sa.add(hgsettings3) | |
181 | self.sa.commit() |
|
180 | self.sa.commit() | |
182 |
|
181 | |||
183 | def admin_prompt(self, second=False): |
|
182 | def admin_prompt(self, second=False): | |
184 | if not self.tests: |
|
183 | if not self.tests: | |
185 | import getpass |
|
184 | import getpass | |
186 |
|
185 | |||
187 | username = self.cli_args.get('username') |
|
186 | username = self.cli_args.get('username') | |
188 | password = self.cli_args.get('password') |
|
187 | password = self.cli_args.get('password') | |
189 | email = self.cli_args.get('email') |
|
188 | email = self.cli_args.get('email') | |
190 |
|
189 | |||
191 | def get_password(): |
|
190 | def get_password(): | |
192 | password = getpass.getpass('Specify admin password ' |
|
191 | password = getpass.getpass('Specify admin password ' | |
193 | '(min 6 chars):') |
|
192 | '(min 6 chars):') | |
194 | confirm = getpass.getpass('Confirm password:') |
|
193 | confirm = getpass.getpass('Confirm password:') | |
195 |
|
194 | |||
196 | if password != confirm: |
|
195 | if password != confirm: | |
197 | log.error('passwords mismatch') |
|
196 | log.error('passwords mismatch') | |
198 | return False |
|
197 | return False | |
199 | if len(password) < 6: |
|
198 | if len(password) < 6: | |
200 | log.error('password is to short use at least 6 characters') |
|
199 | log.error('password is to short use at least 6 characters') | |
201 | return False |
|
200 | return False | |
202 |
|
201 | |||
203 | return password |
|
202 | return password | |
204 | if username is None: |
|
203 | if username is None: | |
205 | username = raw_input('Specify admin username:') |
|
204 | username = raw_input('Specify admin username:') | |
206 | if password is None: |
|
205 | if password is None: | |
207 | password = get_password() |
|
206 | password = get_password() | |
208 | if not password: |
|
207 | if not password: | |
209 | #second try |
|
208 | #second try | |
210 | password = get_password() |
|
209 | password = get_password() | |
211 | if not password: |
|
210 | if not password: | |
212 | sys.exit() |
|
211 | sys.exit() | |
213 | if email is None: |
|
212 | if email is None: | |
214 | email = raw_input('Specify admin email:') |
|
213 | email = raw_input('Specify admin email:') | |
215 | self.create_user(username, password, email, True) |
|
214 | self.create_user(username, password, email, True) | |
216 | else: |
|
215 | else: | |
217 | log.info('creating admin and regular test users') |
|
216 | log.info('creating admin and regular test users') | |
218 | from kallithea.tests.base import TEST_USER_ADMIN_LOGIN, \ |
|
217 | from kallithea.tests.base import TEST_USER_ADMIN_LOGIN, \ | |
219 | TEST_USER_ADMIN_PASS, TEST_USER_ADMIN_EMAIL, \ |
|
218 | TEST_USER_ADMIN_PASS, TEST_USER_ADMIN_EMAIL, \ | |
220 | TEST_USER_REGULAR_LOGIN, TEST_USER_REGULAR_PASS, \ |
|
219 | TEST_USER_REGULAR_LOGIN, TEST_USER_REGULAR_PASS, \ | |
221 | TEST_USER_REGULAR_EMAIL, TEST_USER_REGULAR2_LOGIN, \ |
|
220 | TEST_USER_REGULAR_EMAIL, TEST_USER_REGULAR2_LOGIN, \ | |
222 | TEST_USER_REGULAR2_PASS, TEST_USER_REGULAR2_EMAIL |
|
221 | TEST_USER_REGULAR2_PASS, TEST_USER_REGULAR2_EMAIL | |
223 |
|
222 | |||
224 | self.create_user(TEST_USER_ADMIN_LOGIN, TEST_USER_ADMIN_PASS, |
|
223 | self.create_user(TEST_USER_ADMIN_LOGIN, TEST_USER_ADMIN_PASS, | |
225 | TEST_USER_ADMIN_EMAIL, True) |
|
224 | TEST_USER_ADMIN_EMAIL, True) | |
226 |
|
225 | |||
227 | self.create_user(TEST_USER_REGULAR_LOGIN, TEST_USER_REGULAR_PASS, |
|
226 | self.create_user(TEST_USER_REGULAR_LOGIN, TEST_USER_REGULAR_PASS, | |
228 | TEST_USER_REGULAR_EMAIL, False) |
|
227 | TEST_USER_REGULAR_EMAIL, False) | |
229 |
|
228 | |||
230 | self.create_user(TEST_USER_REGULAR2_LOGIN, TEST_USER_REGULAR2_PASS, |
|
229 | self.create_user(TEST_USER_REGULAR2_LOGIN, TEST_USER_REGULAR2_PASS, | |
231 | TEST_USER_REGULAR2_EMAIL, False) |
|
230 | TEST_USER_REGULAR2_EMAIL, False) | |
232 |
|
231 | |||
233 | def create_ui_settings(self, repo_store_path): |
|
232 | def create_ui_settings(self, repo_store_path): | |
234 | """ |
|
233 | """ | |
235 | Creates ui settings, fills out hooks |
|
234 | Creates ui settings, fills out hooks | |
236 | """ |
|
235 | """ | |
237 |
|
236 | |||
238 | #HOOKS |
|
237 | #HOOKS | |
239 | hooks1_key = Ui.HOOK_UPDATE |
|
238 | hooks1_key = Ui.HOOK_UPDATE | |
240 | hooks1_ = Ui.query() \ |
|
239 | hooks1_ = Ui.query() \ | |
241 | .filter(Ui.ui_key == hooks1_key).scalar() |
|
240 | .filter(Ui.ui_key == hooks1_key).scalar() | |
242 |
|
241 | |||
243 | hooks1 = Ui() if hooks1_ is None else hooks1_ |
|
242 | hooks1 = Ui() if hooks1_ is None else hooks1_ | |
244 | hooks1.ui_section = 'hooks' |
|
243 | hooks1.ui_section = 'hooks' | |
245 | hooks1.ui_key = hooks1_key |
|
244 | hooks1.ui_key = hooks1_key | |
246 | hooks1.ui_value = 'hg update >&2' |
|
245 | hooks1.ui_value = 'hg update >&2' | |
247 | hooks1.ui_active = False |
|
246 | hooks1.ui_active = False | |
248 | self.sa.add(hooks1) |
|
247 | self.sa.add(hooks1) | |
249 |
|
248 | |||
250 | hooks2_key = Ui.HOOK_REPO_SIZE |
|
249 | hooks2_key = Ui.HOOK_REPO_SIZE | |
251 | hooks2_ = Ui.query() \ |
|
250 | hooks2_ = Ui.query() \ | |
252 | .filter(Ui.ui_key == hooks2_key).scalar() |
|
251 | .filter(Ui.ui_key == hooks2_key).scalar() | |
253 | hooks2 = Ui() if hooks2_ is None else hooks2_ |
|
252 | hooks2 = Ui() if hooks2_ is None else hooks2_ | |
254 | hooks2.ui_section = 'hooks' |
|
253 | hooks2.ui_section = 'hooks' | |
255 | hooks2.ui_key = hooks2_key |
|
254 | hooks2.ui_key = hooks2_key | |
256 | hooks2.ui_value = 'python:kallithea.lib.hooks.repo_size' |
|
255 | hooks2.ui_value = 'python:kallithea.lib.hooks.repo_size' | |
257 | self.sa.add(hooks2) |
|
256 | self.sa.add(hooks2) | |
258 |
|
257 | |||
259 | hooks3 = Ui() |
|
258 | hooks3 = Ui() | |
260 | hooks3.ui_section = 'hooks' |
|
259 | hooks3.ui_section = 'hooks' | |
261 | hooks3.ui_key = Ui.HOOK_PUSH |
|
260 | hooks3.ui_key = Ui.HOOK_PUSH | |
262 | hooks3.ui_value = 'python:kallithea.lib.hooks.log_push_action' |
|
261 | hooks3.ui_value = 'python:kallithea.lib.hooks.log_push_action' | |
263 | self.sa.add(hooks3) |
|
262 | self.sa.add(hooks3) | |
264 |
|
263 | |||
265 | hooks4 = Ui() |
|
264 | hooks4 = Ui() | |
266 | hooks4.ui_section = 'hooks' |
|
265 | hooks4.ui_section = 'hooks' | |
267 | hooks4.ui_key = Ui.HOOK_PRE_PUSH |
|
266 | hooks4.ui_key = Ui.HOOK_PRE_PUSH | |
268 | hooks4.ui_value = 'python:kallithea.lib.hooks.pre_push' |
|
267 | hooks4.ui_value = 'python:kallithea.lib.hooks.pre_push' | |
269 | self.sa.add(hooks4) |
|
268 | self.sa.add(hooks4) | |
270 |
|
269 | |||
271 | hooks5 = Ui() |
|
270 | hooks5 = Ui() | |
272 | hooks5.ui_section = 'hooks' |
|
271 | hooks5.ui_section = 'hooks' | |
273 | hooks5.ui_key = Ui.HOOK_PULL |
|
272 | hooks5.ui_key = Ui.HOOK_PULL | |
274 | hooks5.ui_value = 'python:kallithea.lib.hooks.log_pull_action' |
|
273 | hooks5.ui_value = 'python:kallithea.lib.hooks.log_pull_action' | |
275 | self.sa.add(hooks5) |
|
274 | self.sa.add(hooks5) | |
276 |
|
275 | |||
277 | hooks6 = Ui() |
|
276 | hooks6 = Ui() | |
278 | hooks6.ui_section = 'hooks' |
|
277 | hooks6.ui_section = 'hooks' | |
279 | hooks6.ui_key = Ui.HOOK_PRE_PULL |
|
278 | hooks6.ui_key = Ui.HOOK_PRE_PULL | |
280 | hooks6.ui_value = 'python:kallithea.lib.hooks.pre_pull' |
|
279 | hooks6.ui_value = 'python:kallithea.lib.hooks.pre_pull' | |
281 | self.sa.add(hooks6) |
|
280 | self.sa.add(hooks6) | |
282 |
|
281 | |||
283 | # enable largefiles |
|
282 | # enable largefiles | |
284 | largefiles = Ui() |
|
283 | largefiles = Ui() | |
285 | largefiles.ui_section = 'extensions' |
|
284 | largefiles.ui_section = 'extensions' | |
286 | largefiles.ui_key = 'largefiles' |
|
285 | largefiles.ui_key = 'largefiles' | |
287 | largefiles.ui_value = '' |
|
286 | largefiles.ui_value = '' | |
288 | self.sa.add(largefiles) |
|
287 | self.sa.add(largefiles) | |
289 |
|
288 | |||
290 | # set default largefiles cache dir, defaults to |
|
289 | # set default largefiles cache dir, defaults to | |
291 | # /repo location/.cache/largefiles |
|
290 | # /repo location/.cache/largefiles | |
292 | largefiles = Ui() |
|
291 | largefiles = Ui() | |
293 | largefiles.ui_section = 'largefiles' |
|
292 | largefiles.ui_section = 'largefiles' | |
294 | largefiles.ui_key = 'usercache' |
|
293 | largefiles.ui_key = 'usercache' | |
295 | largefiles.ui_value = os.path.join(repo_store_path, '.cache', |
|
294 | largefiles.ui_value = os.path.join(repo_store_path, '.cache', | |
296 | 'largefiles') |
|
295 | 'largefiles') | |
297 | self.sa.add(largefiles) |
|
296 | self.sa.add(largefiles) | |
298 |
|
297 | |||
299 | # enable hgsubversion disabled by default |
|
298 | # enable hgsubversion disabled by default | |
300 | hgsubversion = Ui() |
|
299 | hgsubversion = Ui() | |
301 | hgsubversion.ui_section = 'extensions' |
|
300 | hgsubversion.ui_section = 'extensions' | |
302 | hgsubversion.ui_key = 'hgsubversion' |
|
301 | hgsubversion.ui_key = 'hgsubversion' | |
303 | hgsubversion.ui_value = '' |
|
302 | hgsubversion.ui_value = '' | |
304 | hgsubversion.ui_active = False |
|
303 | hgsubversion.ui_active = False | |
305 | self.sa.add(hgsubversion) |
|
304 | self.sa.add(hgsubversion) | |
306 |
|
305 | |||
307 | # enable hggit disabled by default |
|
306 | # enable hggit disabled by default | |
308 | hggit = Ui() |
|
307 | hggit = Ui() | |
309 | hggit.ui_section = 'extensions' |
|
308 | hggit.ui_section = 'extensions' | |
310 | hggit.ui_key = 'hggit' |
|
309 | hggit.ui_key = 'hggit' | |
311 | hggit.ui_value = '' |
|
310 | hggit.ui_value = '' | |
312 | hggit.ui_active = False |
|
311 | hggit.ui_active = False | |
313 | self.sa.add(hggit) |
|
312 | self.sa.add(hggit) | |
314 |
|
313 | |||
315 | def create_auth_plugin_options(self, skip_existing=False): |
|
314 | def create_auth_plugin_options(self, skip_existing=False): | |
316 | """ |
|
315 | """ | |
317 | Create default auth plugin settings, and make it active |
|
316 | Create default auth plugin settings, and make it active | |
318 |
|
317 | |||
319 | :param skip_existing: |
|
318 | :param skip_existing: | |
320 | """ |
|
319 | """ | |
321 |
|
320 | |||
322 | for k, v, t in [('auth_plugins', 'kallithea.lib.auth_modules.auth_internal', 'list'), |
|
321 | for k, v, t in [('auth_plugins', 'kallithea.lib.auth_modules.auth_internal', 'list'), | |
323 | ('auth_internal_enabled', 'True', 'bool')]: |
|
322 | ('auth_internal_enabled', 'True', 'bool')]: | |
324 | if skip_existing and Setting.get_by_name(k) != None: |
|
323 | if skip_existing and Setting.get_by_name(k) != None: | |
325 | log.debug('Skipping option %s', k) |
|
324 | log.debug('Skipping option %s', k) | |
326 | continue |
|
325 | continue | |
327 | setting = Setting(k, v, t) |
|
326 | setting = Setting(k, v, t) | |
328 | self.sa.add(setting) |
|
327 | self.sa.add(setting) | |
329 |
|
328 | |||
330 | def create_default_options(self, skip_existing=False): |
|
329 | def create_default_options(self, skip_existing=False): | |
331 | """Creates default settings""" |
|
330 | """Creates default settings""" | |
332 |
|
331 | |||
333 | for k, v, t in [ |
|
332 | for k, v, t in [ | |
334 | ('default_repo_enable_locking', False, 'bool'), |
|
333 | ('default_repo_enable_locking', False, 'bool'), | |
335 | ('default_repo_enable_downloads', False, 'bool'), |
|
334 | ('default_repo_enable_downloads', False, 'bool'), | |
336 | ('default_repo_enable_statistics', False, 'bool'), |
|
335 | ('default_repo_enable_statistics', False, 'bool'), | |
337 | ('default_repo_private', False, 'bool'), |
|
336 | ('default_repo_private', False, 'bool'), | |
338 | ('default_repo_type', 'hg', 'unicode')]: |
|
337 | ('default_repo_type', 'hg', 'unicode')]: | |
339 |
|
338 | |||
340 | if skip_existing and Setting.get_by_name(k) is not None: |
|
339 | if skip_existing and Setting.get_by_name(k) is not None: | |
341 | log.debug('Skipping option %s', k) |
|
340 | log.debug('Skipping option %s', k) | |
342 | continue |
|
341 | continue | |
343 | setting = Setting(k, v, t) |
|
342 | setting = Setting(k, v, t) | |
344 | self.sa.add(setting) |
|
343 | self.sa.add(setting) | |
345 |
|
344 | |||
346 | def fixup_groups(self): |
|
345 | def fixup_groups(self): | |
347 | def_usr = User.get_default_user() |
|
346 | def_usr = User.get_default_user() | |
348 | for g in RepoGroup.query().all(): |
|
347 | for g in RepoGroup.query().all(): | |
349 | g.group_name = g.get_new_name(g.name) |
|
348 | g.group_name = g.get_new_name(g.name) | |
350 | # get default perm |
|
349 | # get default perm | |
351 | default = UserRepoGroupToPerm.query() \ |
|
350 | default = UserRepoGroupToPerm.query() \ | |
352 | .filter(UserRepoGroupToPerm.group == g) \ |
|
351 | .filter(UserRepoGroupToPerm.group == g) \ | |
353 | .filter(UserRepoGroupToPerm.user == def_usr) \ |
|
352 | .filter(UserRepoGroupToPerm.user == def_usr) \ | |
354 | .scalar() |
|
353 | .scalar() | |
355 |
|
354 | |||
356 | if default is None: |
|
355 | if default is None: | |
357 | log.debug('missing default permission for group %s adding', g) |
|
356 | log.debug('missing default permission for group %s adding', g) | |
358 | RepoGroupModel()._create_default_perms(g) |
|
357 | RepoGroupModel()._create_default_perms(g) | |
359 |
|
358 | |||
360 | def reset_permissions(self, username): |
|
359 | def reset_permissions(self, username): | |
361 | """ |
|
360 | """ | |
362 | Resets permissions to default state, useful when old systems had |
|
361 | Resets permissions to default state, useful when old systems had | |
363 | bad permissions, we must clean them up |
|
362 | bad permissions, we must clean them up | |
364 |
|
363 | |||
365 | :param username: |
|
364 | :param username: | |
366 | """ |
|
365 | """ | |
367 | default_user = User.get_by_username(username) |
|
366 | default_user = User.get_by_username(username) | |
368 | if not default_user: |
|
367 | if not default_user: | |
369 | return |
|
368 | return | |
370 |
|
369 | |||
371 | u2p = UserToPerm.query() \ |
|
370 | u2p = UserToPerm.query() \ | |
372 | .filter(UserToPerm.user == default_user).all() |
|
371 | .filter(UserToPerm.user == default_user).all() | |
373 | fixed = False |
|
372 | fixed = False | |
374 | if len(u2p) != len(Permission.DEFAULT_USER_PERMISSIONS): |
|
373 | if len(u2p) != len(Permission.DEFAULT_USER_PERMISSIONS): | |
375 | for p in u2p: |
|
374 | for p in u2p: | |
376 | Session().delete(p) |
|
375 | Session().delete(p) | |
377 | fixed = True |
|
376 | fixed = True | |
378 | self.populate_default_permissions() |
|
377 | self.populate_default_permissions() | |
379 | return fixed |
|
378 | return fixed | |
380 |
|
379 | |||
381 | def update_repo_info(self): |
|
380 | def update_repo_info(self): | |
382 | for repo in Repository.query(): |
|
381 | for repo in Repository.query(): | |
383 | repo.update_changeset_cache() |
|
382 | repo.update_changeset_cache() | |
384 |
|
383 | |||
385 | def config_prompt(self, test_repo_path='', retries=3): |
|
384 | def config_prompt(self, test_repo_path='', retries=3): | |
386 | _path = self.cli_args.get('repos_location') |
|
385 | _path = self.cli_args.get('repos_location') | |
387 | if retries == 3: |
|
386 | if retries == 3: | |
388 | log.info('Setting up repositories config') |
|
387 | log.info('Setting up repositories config') | |
389 |
|
388 | |||
390 | if _path is not None: |
|
389 | if _path is not None: | |
391 | path = _path |
|
390 | path = _path | |
392 | elif not self.tests and not test_repo_path: |
|
391 | elif not self.tests and not test_repo_path: | |
393 | path = raw_input( |
|
392 | path = raw_input( | |
394 | 'Enter a valid absolute path to store repositories. ' |
|
393 | 'Enter a valid absolute path to store repositories. ' | |
395 | 'All repositories in that path will be added automatically:' |
|
394 | 'All repositories in that path will be added automatically:' | |
396 | ) |
|
395 | ) | |
397 | else: |
|
396 | else: | |
398 | path = test_repo_path |
|
397 | path = test_repo_path | |
399 | path_ok = True |
|
398 | path_ok = True | |
400 |
|
399 | |||
401 | # check proper dir |
|
400 | # check proper dir | |
402 | if not os.path.isdir(path): |
|
401 | if not os.path.isdir(path): | |
403 | path_ok = False |
|
402 | path_ok = False | |
404 | log.error('Given path %s is not a valid directory', path) |
|
403 | log.error('Given path %s is not a valid directory', path) | |
405 |
|
404 | |||
406 | elif not os.path.isabs(path): |
|
405 | elif not os.path.isabs(path): | |
407 | path_ok = False |
|
406 | path_ok = False | |
408 | log.error('Given path %s is not an absolute path', path) |
|
407 | log.error('Given path %s is not an absolute path', path) | |
409 |
|
408 | |||
410 | # check if path is at least readable. |
|
409 | # check if path is at least readable. | |
411 | if not os.access(path, os.R_OK): |
|
410 | if not os.access(path, os.R_OK): | |
412 | path_ok = False |
|
411 | path_ok = False | |
413 | log.error('Given path %s is not readable', path) |
|
412 | log.error('Given path %s is not readable', path) | |
414 |
|
413 | |||
415 | # check write access, warn user about non writeable paths |
|
414 | # check write access, warn user about non writeable paths | |
416 | elif not os.access(path, os.W_OK) and path_ok: |
|
415 | elif not os.access(path, os.W_OK) and path_ok: | |
417 | log.warning('No write permission to given path %s', path) |
|
416 | log.warning('No write permission to given path %s', path) | |
418 | if not self._ask_ok('Given path %s is not writeable, do you want to ' |
|
417 | if not self._ask_ok('Given path %s is not writeable, do you want to ' | |
419 | 'continue with read only mode ? [y/n]' % (path,)): |
|
418 | 'continue with read only mode ? [y/n]' % (path,)): | |
420 | log.error('Canceled by user') |
|
419 | log.error('Canceled by user') | |
421 | sys.exit(-1) |
|
420 | sys.exit(-1) | |
422 |
|
421 | |||
423 | if retries == 0: |
|
422 | if retries == 0: | |
424 | sys.exit('max retries reached') |
|
423 | sys.exit('max retries reached') | |
425 | if not path_ok: |
|
424 | if not path_ok: | |
426 | if _path is not None: |
|
425 | if _path is not None: | |
427 | sys.exit('Invalid repo path: %s' % _path) |
|
426 | sys.exit('Invalid repo path: %s' % _path) | |
428 | retries -= 1 |
|
427 | retries -= 1 | |
429 | return self.config_prompt(test_repo_path, retries) # recursing!!! |
|
428 | return self.config_prompt(test_repo_path, retries) # recursing!!! | |
430 |
|
429 | |||
431 | real_path = os.path.normpath(os.path.realpath(path)) |
|
430 | real_path = os.path.normpath(os.path.realpath(path)) | |
432 |
|
431 | |||
433 | if real_path != os.path.normpath(path): |
|
432 | if real_path != os.path.normpath(path): | |
434 | log.warning('Using normalized path %s instead of %s', real_path, path) |
|
433 | log.warning('Using normalized path %s instead of %s', real_path, path) | |
435 |
|
434 | |||
436 | return real_path |
|
435 | return real_path | |
437 |
|
436 | |||
438 | def create_settings(self, path): |
|
437 | def create_settings(self, path): | |
439 |
|
438 | |||
440 | self.create_ui_settings(path) |
|
439 | self.create_ui_settings(path) | |
441 |
|
440 | |||
442 | ui_config = [ |
|
441 | ui_config = [ | |
443 | ('web', 'allow_archive', 'gz zip bz2'), |
|
442 | ('web', 'allow_archive', 'gz zip bz2'), | |
444 | ('web', 'baseurl', '/'), |
|
443 | ('web', 'baseurl', '/'), | |
445 | ('paths', '/', path), |
|
444 | ('paths', '/', path), | |
446 | #('phases', 'publish', 'false') |
|
445 | #('phases', 'publish', 'false') | |
447 | ] |
|
446 | ] | |
448 | for section, key, value in ui_config: |
|
447 | for section, key, value in ui_config: | |
449 | ui_conf = Ui() |
|
448 | ui_conf = Ui() | |
450 | setattr(ui_conf, 'ui_section', section) |
|
449 | setattr(ui_conf, 'ui_section', section) | |
451 | setattr(ui_conf, 'ui_key', key) |
|
450 | setattr(ui_conf, 'ui_key', key) | |
452 | setattr(ui_conf, 'ui_value', value) |
|
451 | setattr(ui_conf, 'ui_value', value) | |
453 | self.sa.add(ui_conf) |
|
452 | self.sa.add(ui_conf) | |
454 |
|
453 | |||
455 | settings = [ |
|
454 | settings = [ | |
456 | ('realm', 'Kallithea', 'unicode'), |
|
455 | ('realm', 'Kallithea', 'unicode'), | |
457 | ('title', '', 'unicode'), |
|
456 | ('title', '', 'unicode'), | |
458 | ('ga_code', '', 'unicode'), |
|
457 | ('ga_code', '', 'unicode'), | |
459 | ('show_public_icon', True, 'bool'), |
|
458 | ('show_public_icon', True, 'bool'), | |
460 | ('show_private_icon', True, 'bool'), |
|
459 | ('show_private_icon', True, 'bool'), | |
461 | ('stylify_metatags', False, 'bool'), |
|
460 | ('stylify_metatags', False, 'bool'), | |
462 | ('dashboard_items', 100, 'int'), # TODO: call it page_size |
|
461 | ('dashboard_items', 100, 'int'), # TODO: call it page_size | |
463 | ('admin_grid_items', 25, 'int'), |
|
462 | ('admin_grid_items', 25, 'int'), | |
464 | ('show_version', True, 'bool'), |
|
463 | ('show_version', True, 'bool'), | |
465 | ('use_gravatar', True, 'bool'), |
|
464 | ('use_gravatar', True, 'bool'), | |
466 | ('gravatar_url', User.DEFAULT_GRAVATAR_URL, 'unicode'), |
|
465 | ('gravatar_url', User.DEFAULT_GRAVATAR_URL, 'unicode'), | |
467 | ('clone_uri_tmpl', Repository.DEFAULT_CLONE_URI, 'unicode'), |
|
466 | ('clone_uri_tmpl', Repository.DEFAULT_CLONE_URI, 'unicode'), | |
468 | ('update_url', Setting.DEFAULT_UPDATE_URL, 'unicode'), |
|
467 | ('update_url', Setting.DEFAULT_UPDATE_URL, 'unicode'), | |
469 | ] |
|
468 | ] | |
470 | for key, val, type_ in settings: |
|
469 | for key, val, type_ in settings: | |
471 | sett = Setting(key, val, type_) |
|
470 | sett = Setting(key, val, type_) | |
472 | self.sa.add(sett) |
|
471 | self.sa.add(sett) | |
473 |
|
472 | |||
474 | self.create_auth_plugin_options() |
|
473 | self.create_auth_plugin_options() | |
475 | self.create_default_options() |
|
474 | self.create_default_options() | |
476 |
|
475 | |||
477 | log.info('created ui config') |
|
476 | log.info('created ui config') | |
478 |
|
477 | |||
479 | def create_user(self, username, password, email='', admin=False): |
|
478 | def create_user(self, username, password, email='', admin=False): | |
480 | log.info('creating user %s', username) |
|
479 | log.info('creating user %s', username) | |
481 | UserModel().create_or_update(username, password, email, |
|
480 | UserModel().create_or_update(username, password, email, | |
482 | firstname=u'Kallithea', lastname=u'Admin', |
|
481 | firstname=u'Kallithea', lastname=u'Admin', | |
483 | active=True, admin=admin, |
|
482 | active=True, admin=admin, | |
484 | extern_type=User.DEFAULT_AUTH_TYPE) |
|
483 | extern_type=User.DEFAULT_AUTH_TYPE) | |
485 |
|
484 | |||
486 | def create_default_user(self): |
|
485 | def create_default_user(self): | |
487 | log.info('creating default user') |
|
486 | log.info('creating default user') | |
488 | # create default user for handling default permissions. |
|
487 | # create default user for handling default permissions. | |
489 | user = UserModel().create_or_update(username=User.DEFAULT_USER, |
|
488 | user = UserModel().create_or_update(username=User.DEFAULT_USER, | |
490 | password=str(uuid.uuid1())[:20], |
|
489 | password=str(uuid.uuid1())[:20], | |
491 | email='anonymous@kallithea-scm.org', |
|
490 | email='anonymous@kallithea-scm.org', | |
492 | firstname=u'Anonymous', |
|
491 | firstname=u'Anonymous', | |
493 | lastname=u'User') |
|
492 | lastname=u'User') | |
494 | # based on configuration options activate/deactivate this user which |
|
493 | # based on configuration options activate/deactivate this user which | |
495 | # controls anonymous access |
|
494 | # controls anonymous access | |
496 | if self.cli_args.get('public_access') is False: |
|
495 | if self.cli_args.get('public_access') is False: | |
497 | log.info('Public access disabled') |
|
496 | log.info('Public access disabled') | |
498 | user.active = False |
|
497 | user.active = False | |
499 | Session().commit() |
|
498 | Session().commit() | |
500 |
|
499 | |||
501 | def create_permissions(self): |
|
500 | def create_permissions(self): | |
502 | """ |
|
501 | """ | |
503 | Creates all permissions defined in the system |
|
502 | Creates all permissions defined in the system | |
504 | """ |
|
503 | """ | |
505 | # module.(access|create|change|delete)_[name] |
|
504 | # module.(access|create|change|delete)_[name] | |
506 | # module.(none|read|write|admin) |
|
505 | # module.(none|read|write|admin) | |
507 | log.info('creating permissions') |
|
506 | log.info('creating permissions') | |
508 | PermissionModel().create_permissions() |
|
507 | PermissionModel().create_permissions() | |
509 |
|
508 | |||
510 | def populate_default_permissions(self): |
|
509 | def populate_default_permissions(self): | |
511 | """ |
|
510 | """ | |
512 | Populate default permissions. It will create only the default |
|
511 | Populate default permissions. It will create only the default | |
513 | permissions that are missing, and not alter already defined ones |
|
512 | permissions that are missing, and not alter already defined ones | |
514 | """ |
|
513 | """ | |
515 | log.info('creating default user permissions') |
|
514 | log.info('creating default user permissions') | |
516 | PermissionModel().create_default_permissions(user=User.DEFAULT_USER) |
|
515 | PermissionModel().create_default_permissions(user=User.DEFAULT_USER) |
@@ -1,107 +1,107 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 | # This program is free software: you can redistribute it and/or modify |
|
2 | # This program is free software: you can redistribute it and/or modify | |
3 | # it under the terms of the GNU General Public License as published by |
|
3 | # it under the terms of the GNU General Public License as published by | |
4 | # the Free Software Foundation, either version 3 of the License, or |
|
4 | # the Free Software Foundation, either version 3 of the License, or | |
5 | # (at your option) any later version. |
|
5 | # (at your option) any later version. | |
6 | # |
|
6 | # | |
7 | # This program is distributed in the hope that it will be useful, |
|
7 | # This program is distributed in the hope that it will be useful, | |
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
10 | # GNU General Public License for more details. |
|
10 | # GNU General Public License for more details. | |
11 | # |
|
11 | # | |
12 | # You should have received a copy of the GNU General Public License |
|
12 | # You should have received a copy of the GNU General Public License | |
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |
14 | """ |
|
14 | """ | |
15 | kallithea.lib.paster_commands.setup_db |
|
15 | kallithea.lib.paster_commands.setup_db | |
16 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
|
16 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | |
17 |
|
17 | |||
18 | Databaset setup gearbox command for Kallithea |
|
18 | Databaset setup gearbox command for Kallithea | |
19 | """ |
|
19 | """ | |
20 |
|
20 | |||
21 |
|
21 | |||
22 | import os |
|
22 | import os | |
23 | import sys |
|
23 | import sys | |
24 | import paste.deploy |
|
24 | import paste.deploy | |
25 |
|
25 | |||
26 | from kallithea.lib.db_manage import DbManage |
|
26 | from kallithea.lib.db_manage import DbManage | |
27 | from kallithea.lib.paster_commands.common import BasePasterCommand |
|
27 | from kallithea.lib.paster_commands.common import BasePasterCommand | |
28 | from kallithea.model.meta import Session |
|
28 | from kallithea.model.meta import Session | |
29 |
|
29 | |||
30 |
|
30 | |||
31 | # This is almost like SetupAppCommand ... but we have to pass options and it is |
|
31 | # This is almost like SetupAppCommand ... but we have to pass options and it is | |
32 | # thus simpler to drop websetup and reimplement everything |
|
32 | # thus simpler to drop websetup and reimplement everything | |
33 | class Command(BasePasterCommand): |
|
33 | class Command(BasePasterCommand): | |
34 | """Kallithea: Configure the database specified in the .ini file |
|
34 | """Kallithea: Configure the database specified in the .ini file | |
35 |
|
35 | |||
36 | Setup Kallithea according to its configuration file. This is |
|
36 | Setup Kallithea according to its configuration file. This is | |
37 | the second part of a two-phase web application installation |
|
37 | the second part of a two-phase web application installation | |
38 | process (the first phase is prepare-app). The setup process |
|
38 | process (the first phase is prepare-app). The setup process | |
39 | consist of things like setting up databases and creating the admin user |
|
39 | consist of things like setting up databases and creating the admin user | |
40 | """ |
|
40 | """ | |
41 |
|
41 | |||
42 | def get_description(self): |
|
42 | def get_description(self): | |
43 | return self.__doc__.splitlines()[0] |
|
43 | return self.__doc__.splitlines()[0] | |
44 |
|
44 | |||
45 | requires_db_session = False # only available after this command has been run |
|
45 | requires_db_session = False # only available after this command has been run | |
46 |
|
46 | |||
47 | def get_parser(self, prog_name): |
|
47 | def get_parser(self, prog_name): | |
48 | parser = super(Command, self).get_parser(prog_name) |
|
48 | parser = super(Command, self).get_parser(prog_name) | |
49 |
|
49 | |||
50 | parser.add_argument('--user', |
|
50 | parser.add_argument('--user', | |
51 | action='store', |
|
51 | action='store', | |
52 | dest='username', |
|
52 | dest='username', | |
53 | default=None, |
|
53 | default=None, | |
54 | help='Admin Username') |
|
54 | help='Admin Username') | |
55 | parser.add_argument('--email', |
|
55 | parser.add_argument('--email', | |
56 | action='store', |
|
56 | action='store', | |
57 | dest='email', |
|
57 | dest='email', | |
58 | default=None, |
|
58 | default=None, | |
59 | help='Admin Email') |
|
59 | help='Admin Email') | |
60 | parser.add_argument('--password', |
|
60 | parser.add_argument('--password', | |
61 | action='store', |
|
61 | action='store', | |
62 | dest='password', |
|
62 | dest='password', | |
63 | default=None, |
|
63 | default=None, | |
64 | help='Admin password min 6 chars') |
|
64 | help='Admin password min 6 chars') | |
65 | parser.add_argument('--repos', |
|
65 | parser.add_argument('--repos', | |
66 | action='store', |
|
66 | action='store', | |
67 | dest='repos_location', |
|
67 | dest='repos_location', | |
68 | default=None, |
|
68 | default=None, | |
69 | help='Absolute path to repositories location') |
|
69 | help='Absolute path to repositories location') | |
70 | parser.add_argument('--force-yes', |
|
70 | parser.add_argument('--force-yes', | |
71 | action='store_true', |
|
71 | action='store_true', | |
72 | dest='force_ask', |
|
72 | dest='force_ask', | |
73 | default=None, |
|
73 | default=None, | |
74 | help='Force yes to every question') |
|
74 | help='Force yes to every question') | |
75 | parser.add_argument('--force-no', |
|
75 | parser.add_argument('--force-no', | |
76 | action='store_false', |
|
76 | action='store_false', | |
77 | dest='force_ask', |
|
77 | dest='force_ask', | |
78 | default=None, |
|
78 | default=None, | |
79 | help='Force no to every question') |
|
79 | help='Force no to every question') | |
80 | parser.add_argument('--public-access', |
|
80 | parser.add_argument('--public-access', | |
81 | action='store_true', |
|
81 | action='store_true', | |
82 | dest='public_access', |
|
82 | dest='public_access', | |
83 | default=None, |
|
83 | default=None, | |
84 | help='Enable public access on this installation (default)') |
|
84 | help='Enable public access on this installation (default)') | |
85 | parser.add_argument('--no-public-access', |
|
85 | parser.add_argument('--no-public-access', | |
86 | action='store_false', |
|
86 | action='store_false', | |
87 | dest='public_access', |
|
87 | dest='public_access', | |
88 | default=None, |
|
88 | default=None, | |
89 | help='Disable public access on this installation ') |
|
89 | help='Disable public access on this installation ') | |
90 |
|
90 | |||
91 | return parser |
|
91 | return parser | |
92 |
|
92 | |||
93 | def take_action(self, opts): |
|
93 | def take_action(self, opts): | |
94 | path_to_ini_file = os.path.realpath(opts.config_file) |
|
94 | path_to_ini_file = os.path.realpath(opts.config_file) | |
95 | conf = paste.deploy.appconfig('config:' + path_to_ini_file) |
|
95 | conf = paste.deploy.appconfig('config:' + path_to_ini_file) | |
96 |
|
96 | |||
97 | dbconf = conf['sqlalchemy.url'] |
|
97 | dbconf = conf['sqlalchemy.url'] | |
98 |
dbmanage = DbManage( |
|
98 | dbmanage = DbManage(dbconf=dbconf, root=conf['here'], | |
99 | tests=False, cli_args=vars(opts)) |
|
99 | tests=False, cli_args=vars(opts)) | |
100 | dbmanage.create_tables(override=True) |
|
100 | dbmanage.create_tables(override=True) | |
101 | opts = dbmanage.config_prompt(None) |
|
101 | opts = dbmanage.config_prompt(None) | |
102 | dbmanage.create_settings(opts) |
|
102 | dbmanage.create_settings(opts) | |
103 | dbmanage.create_default_user() |
|
103 | dbmanage.create_default_user() | |
104 | dbmanage.admin_prompt() |
|
104 | dbmanage.admin_prompt() | |
105 | dbmanage.create_permissions() |
|
105 | dbmanage.create_permissions() | |
106 | dbmanage.populate_default_permissions() |
|
106 | dbmanage.populate_default_permissions() | |
107 | Session().commit() |
|
107 | Session().commit() |
@@ -1,622 +1,621 b'' | |||||
1 | ## -*- coding: utf-8 -*- |
|
1 | ## -*- coding: utf-8 -*- | |
2 | <%text>################################################################################</%text> |
|
2 | <%text>################################################################################</%text> | |
3 | <%text>################################################################################</%text> |
|
3 | <%text>################################################################################</%text> | |
4 | # Kallithea - config file generated with kallithea-config # |
|
4 | # Kallithea - config file generated with kallithea-config # | |
5 | <%text>################################################################################</%text> |
|
5 | <%text>################################################################################</%text> | |
6 | <%text>################################################################################</%text> |
|
6 | <%text>################################################################################</%text> | |
7 |
|
7 | |||
8 | [DEFAULT] |
|
8 | [DEFAULT] | |
9 |
|
9 | |||
10 | <%text>################################################################################</%text> |
|
10 | <%text>################################################################################</%text> | |
11 | <%text>## Email settings ##</%text> |
|
11 | <%text>## Email settings ##</%text> | |
12 | <%text>## ##</%text> |
|
12 | <%text>## ##</%text> | |
13 | <%text>## Refer to the documentation ("Email settings") for more details. ##</%text> |
|
13 | <%text>## Refer to the documentation ("Email settings") for more details. ##</%text> | |
14 | <%text>## ##</%text> |
|
14 | <%text>## ##</%text> | |
15 | <%text>## It is recommended to use a valid sender address that passes access ##</%text> |
|
15 | <%text>## It is recommended to use a valid sender address that passes access ##</%text> | |
16 | <%text>## validation and spam filtering in mail servers. ##</%text> |
|
16 | <%text>## validation and spam filtering in mail servers. ##</%text> | |
17 | <%text>################################################################################</%text> |
|
17 | <%text>################################################################################</%text> | |
18 |
|
18 | |||
19 | <%text>## 'From' header for application emails. You can optionally add a name.</%text> |
|
19 | <%text>## 'From' header for application emails. You can optionally add a name.</%text> | |
20 | <%text>## Default:</%text> |
|
20 | <%text>## Default:</%text> | |
21 | #app_email_from = Kallithea |
|
21 | #app_email_from = Kallithea | |
22 | <%text>## Examples:</%text> |
|
22 | <%text>## Examples:</%text> | |
23 | #app_email_from = Kallithea <kallithea-noreply@example.com> |
|
23 | #app_email_from = Kallithea <kallithea-noreply@example.com> | |
24 | #app_email_from = kallithea-noreply@example.com |
|
24 | #app_email_from = kallithea-noreply@example.com | |
25 |
|
25 | |||
26 | <%text>## Subject prefix for application emails.</%text> |
|
26 | <%text>## Subject prefix for application emails.</%text> | |
27 | <%text>## A space between this prefix and the real subject is automatically added.</%text> |
|
27 | <%text>## A space between this prefix and the real subject is automatically added.</%text> | |
28 | <%text>## Default:</%text> |
|
28 | <%text>## Default:</%text> | |
29 | #email_prefix = |
|
29 | #email_prefix = | |
30 | <%text>## Example:</%text> |
|
30 | <%text>## Example:</%text> | |
31 | #email_prefix = [Kallithea] |
|
31 | #email_prefix = [Kallithea] | |
32 |
|
32 | |||
33 | <%text>## Recipients for error emails and fallback recipients of application mails.</%text> |
|
33 | <%text>## Recipients for error emails and fallback recipients of application mails.</%text> | |
34 | <%text>## Multiple addresses can be specified, space-separated.</%text> |
|
34 | <%text>## Multiple addresses can be specified, space-separated.</%text> | |
35 | <%text>## Only addresses are allowed, do not add any name part.</%text> |
|
35 | <%text>## Only addresses are allowed, do not add any name part.</%text> | |
36 | <%text>## Default:</%text> |
|
36 | <%text>## Default:</%text> | |
37 | #email_to = |
|
37 | #email_to = | |
38 | <%text>## Examples:</%text> |
|
38 | <%text>## Examples:</%text> | |
39 | #email_to = admin@example.com |
|
39 | #email_to = admin@example.com | |
40 | #email_to = admin@example.com another_admin@example.com |
|
40 | #email_to = admin@example.com another_admin@example.com | |
41 | email_to = |
|
41 | email_to = | |
42 |
|
42 | |||
43 | <%text>## 'From' header for error emails. You can optionally add a name.</%text> |
|
43 | <%text>## 'From' header for error emails. You can optionally add a name.</%text> | |
44 | <%text>## Default: (none)</%text> |
|
44 | <%text>## Default: (none)</%text> | |
45 | <%text>## Examples:</%text> |
|
45 | <%text>## Examples:</%text> | |
46 | #error_email_from = Kallithea Errors <kallithea-noreply@example.com> |
|
46 | #error_email_from = Kallithea Errors <kallithea-noreply@example.com> | |
47 | #error_email_from = kallithea_errors@example.com |
|
47 | #error_email_from = kallithea_errors@example.com | |
48 | error_email_from = |
|
48 | error_email_from = | |
49 |
|
49 | |||
50 | <%text>## SMTP server settings</%text> |
|
50 | <%text>## SMTP server settings</%text> | |
51 | <%text>## If specifying credentials, make sure to use secure connections.</%text> |
|
51 | <%text>## If specifying credentials, make sure to use secure connections.</%text> | |
52 | <%text>## Default: Send unencrypted unauthenticated mails to the specified smtp_server.</%text> |
|
52 | <%text>## Default: Send unencrypted unauthenticated mails to the specified smtp_server.</%text> | |
53 | <%text>## For "SSL", use smtp_use_ssl = true and smtp_port = 465.</%text> |
|
53 | <%text>## For "SSL", use smtp_use_ssl = true and smtp_port = 465.</%text> | |
54 | <%text>## For "STARTTLS", use smtp_use_tls = true and smtp_port = 587.</%text> |
|
54 | <%text>## For "STARTTLS", use smtp_use_tls = true and smtp_port = 587.</%text> | |
55 | smtp_server = |
|
55 | smtp_server = | |
56 | #smtp_username = |
|
56 | #smtp_username = | |
57 | #smtp_password = |
|
57 | #smtp_password = | |
58 | smtp_port = |
|
58 | smtp_port = | |
59 | #smtp_use_ssl = false |
|
59 | #smtp_use_ssl = false | |
60 | #smtp_use_tls = false |
|
60 | #smtp_use_tls = false | |
61 |
|
61 | |||
62 | [server:main] |
|
62 | [server:main] | |
63 | %if http_server == 'gearbox': |
|
63 | %if http_server == 'gearbox': | |
64 | <%text>## Gearbox default web server ##</%text> |
|
64 | <%text>## Gearbox default web server ##</%text> | |
65 | use = egg:gearbox#wsgiref |
|
65 | use = egg:gearbox#wsgiref | |
66 | <%text>## nr of worker threads to spawn</%text> |
|
66 | <%text>## nr of worker threads to spawn</%text> | |
67 | threadpool_workers = 1 |
|
67 | threadpool_workers = 1 | |
68 | <%text>## max request before thread respawn</%text> |
|
68 | <%text>## max request before thread respawn</%text> | |
69 | threadpool_max_requests = 100 |
|
69 | threadpool_max_requests = 100 | |
70 | <%text>## option to use threads of process</%text> |
|
70 | <%text>## option to use threads of process</%text> | |
71 | use_threadpool = true |
|
71 | use_threadpool = true | |
72 |
|
72 | |||
73 | %elif http_server == 'gevent': |
|
73 | %elif http_server == 'gevent': | |
74 | <%text>## Gearbox gevent web server ##</%text> |
|
74 | <%text>## Gearbox gevent web server ##</%text> | |
75 | use = egg:gearbox#gevent |
|
75 | use = egg:gearbox#gevent | |
76 |
|
76 | |||
77 | %elif http_server == 'waitress': |
|
77 | %elif http_server == 'waitress': | |
78 | <%text>## WAITRESS ##</%text> |
|
78 | <%text>## WAITRESS ##</%text> | |
79 | use = egg:waitress#main |
|
79 | use = egg:waitress#main | |
80 | <%text>## number of worker threads</%text> |
|
80 | <%text>## number of worker threads</%text> | |
81 | threads = 1 |
|
81 | threads = 1 | |
82 | <%text>## MAX BODY SIZE 100GB</%text> |
|
82 | <%text>## MAX BODY SIZE 100GB</%text> | |
83 | max_request_body_size = 107374182400 |
|
83 | max_request_body_size = 107374182400 | |
84 | <%text>## use poll instead of select, fixes fd limits, may not work on old</%text> |
|
84 | <%text>## use poll instead of select, fixes fd limits, may not work on old</%text> | |
85 | <%text>## windows systems.</%text> |
|
85 | <%text>## windows systems.</%text> | |
86 | #asyncore_use_poll = True |
|
86 | #asyncore_use_poll = True | |
87 |
|
87 | |||
88 | %elif http_server == 'gunicorn': |
|
88 | %elif http_server == 'gunicorn': | |
89 | <%text>## GUNICORN ##</%text> |
|
89 | <%text>## GUNICORN ##</%text> | |
90 | use = egg:gunicorn#main |
|
90 | use = egg:gunicorn#main | |
91 | <%text>## number of process workers. You must set `instance_id = *` when this option</%text> |
|
91 | <%text>## number of process workers. You must set `instance_id = *` when this option</%text> | |
92 | <%text>## is set to more than one worker</%text> |
|
92 | <%text>## is set to more than one worker</%text> | |
93 | workers = 1 |
|
93 | workers = 1 | |
94 | <%text>## process name</%text> |
|
94 | <%text>## process name</%text> | |
95 | proc_name = kallithea |
|
95 | proc_name = kallithea | |
96 | <%text>## type of worker class, one of sync, eventlet, gevent, tornado</%text> |
|
96 | <%text>## type of worker class, one of sync, eventlet, gevent, tornado</%text> | |
97 | <%text>## recommended for bigger setup is using of of other than sync one</%text> |
|
97 | <%text>## recommended for bigger setup is using of of other than sync one</%text> | |
98 | worker_class = sync |
|
98 | worker_class = sync | |
99 | max_requests = 1000 |
|
99 | max_requests = 1000 | |
100 | <%text>## amount of time a worker can handle request before it gets killed and</%text> |
|
100 | <%text>## amount of time a worker can handle request before it gets killed and</%text> | |
101 | <%text>## restarted</%text> |
|
101 | <%text>## restarted</%text> | |
102 | timeout = 3600 |
|
102 | timeout = 3600 | |
103 |
|
103 | |||
104 | %elif http_server == 'uwsgi': |
|
104 | %elif http_server == 'uwsgi': | |
105 | <%text>## UWSGI ##</%text> |
|
105 | <%text>## UWSGI ##</%text> | |
106 | <%text>## run with uwsgi --ini-paste-logged <inifile.ini></%text> |
|
106 | <%text>## run with uwsgi --ini-paste-logged <inifile.ini></%text> | |
107 | [uwsgi] |
|
107 | [uwsgi] | |
108 | socket = /tmp/uwsgi.sock |
|
108 | socket = /tmp/uwsgi.sock | |
109 | master = true |
|
109 | master = true | |
110 | http = 127.0.0.1:5000 |
|
110 | http = 127.0.0.1:5000 | |
111 |
|
111 | |||
112 | <%text>## set as deamon and redirect all output to file</%text> |
|
112 | <%text>## set as deamon and redirect all output to file</%text> | |
113 | #daemonize = ./uwsgi_kallithea.log |
|
113 | #daemonize = ./uwsgi_kallithea.log | |
114 |
|
114 | |||
115 | <%text>## master process PID</%text> |
|
115 | <%text>## master process PID</%text> | |
116 | pidfile = ./uwsgi_kallithea.pid |
|
116 | pidfile = ./uwsgi_kallithea.pid | |
117 |
|
117 | |||
118 | <%text>## stats server with workers statistics, use uwsgitop</%text> |
|
118 | <%text>## stats server with workers statistics, use uwsgitop</%text> | |
119 | <%text>## for monitoring, `uwsgitop 127.0.0.1:1717`</%text> |
|
119 | <%text>## for monitoring, `uwsgitop 127.0.0.1:1717`</%text> | |
120 | stats = 127.0.0.1:1717 |
|
120 | stats = 127.0.0.1:1717 | |
121 | memory-report = true |
|
121 | memory-report = true | |
122 |
|
122 | |||
123 | <%text>## log 5XX errors</%text> |
|
123 | <%text>## log 5XX errors</%text> | |
124 | log-5xx = true |
|
124 | log-5xx = true | |
125 |
|
125 | |||
126 | <%text>## Set the socket listen queue size.</%text> |
|
126 | <%text>## Set the socket listen queue size.</%text> | |
127 | listen = 256 |
|
127 | listen = 256 | |
128 |
|
128 | |||
129 | <%text>## Gracefully Reload workers after the specified amount of managed requests</%text> |
|
129 | <%text>## Gracefully Reload workers after the specified amount of managed requests</%text> | |
130 | <%text>## (avoid memory leaks).</%text> |
|
130 | <%text>## (avoid memory leaks).</%text> | |
131 | max-requests = 1000 |
|
131 | max-requests = 1000 | |
132 |
|
132 | |||
133 | <%text>## enable large buffers</%text> |
|
133 | <%text>## enable large buffers</%text> | |
134 | buffer-size = 65535 |
|
134 | buffer-size = 65535 | |
135 |
|
135 | |||
136 | <%text>## socket and http timeouts ##</%text> |
|
136 | <%text>## socket and http timeouts ##</%text> | |
137 | http-timeout = 3600 |
|
137 | http-timeout = 3600 | |
138 | socket-timeout = 3600 |
|
138 | socket-timeout = 3600 | |
139 |
|
139 | |||
140 | <%text>## Log requests slower than the specified number of milliseconds.</%text> |
|
140 | <%text>## Log requests slower than the specified number of milliseconds.</%text> | |
141 | log-slow = 10 |
|
141 | log-slow = 10 | |
142 |
|
142 | |||
143 | <%text>## Exit if no app can be loaded.</%text> |
|
143 | <%text>## Exit if no app can be loaded.</%text> | |
144 | need-app = true |
|
144 | need-app = true | |
145 |
|
145 | |||
146 | <%text>## Set lazy mode (load apps in workers instead of master).</%text> |
|
146 | <%text>## Set lazy mode (load apps in workers instead of master).</%text> | |
147 | lazy = true |
|
147 | lazy = true | |
148 |
|
148 | |||
149 | <%text>## scaling ##</%text> |
|
149 | <%text>## scaling ##</%text> | |
150 | <%text>## set cheaper algorithm to use, if not set default will be used</%text> |
|
150 | <%text>## set cheaper algorithm to use, if not set default will be used</%text> | |
151 | cheaper-algo = spare |
|
151 | cheaper-algo = spare | |
152 |
|
152 | |||
153 | <%text>## minimum number of workers to keep at all times</%text> |
|
153 | <%text>## minimum number of workers to keep at all times</%text> | |
154 | cheaper = 1 |
|
154 | cheaper = 1 | |
155 |
|
155 | |||
156 | <%text>## number of workers to spawn at startup</%text> |
|
156 | <%text>## number of workers to spawn at startup</%text> | |
157 | cheaper-initial = 1 |
|
157 | cheaper-initial = 1 | |
158 |
|
158 | |||
159 | <%text>## maximum number of workers that can be spawned</%text> |
|
159 | <%text>## maximum number of workers that can be spawned</%text> | |
160 | workers = 4 |
|
160 | workers = 4 | |
161 |
|
161 | |||
162 | <%text>## how many workers should be spawned at a time</%text> |
|
162 | <%text>## how many workers should be spawned at a time</%text> | |
163 | cheaper-step = 1 |
|
163 | cheaper-step = 1 | |
164 |
|
164 | |||
165 | %endif |
|
165 | %endif | |
166 | <%text>## COMMON ##</%text> |
|
166 | <%text>## COMMON ##</%text> | |
167 | host = ${host} |
|
167 | host = ${host} | |
168 | port = ${port} |
|
168 | port = ${port} | |
169 |
|
169 | |||
170 | <%text>## middleware for hosting the WSGI application under a URL prefix</%text> |
|
170 | <%text>## middleware for hosting the WSGI application under a URL prefix</%text> | |
171 | #[filter:proxy-prefix] |
|
171 | #[filter:proxy-prefix] | |
172 | #use = egg:PasteDeploy#prefix |
|
172 | #use = egg:PasteDeploy#prefix | |
173 | #prefix = /<your-prefix> |
|
173 | #prefix = /<your-prefix> | |
174 |
|
174 | |||
175 | [app:main] |
|
175 | [app:main] | |
176 | use = egg:kallithea |
|
176 | use = egg:kallithea | |
177 | <%text>## enable proxy prefix middleware</%text> |
|
177 | <%text>## enable proxy prefix middleware</%text> | |
178 | #filter-with = proxy-prefix |
|
178 | #filter-with = proxy-prefix | |
179 |
|
179 | |||
180 | full_stack = true |
|
180 | full_stack = true | |
181 | static_files = true |
|
181 | static_files = true | |
182 | <%text>## Available Languages:</%text> |
|
182 | <%text>## Available Languages:</%text> | |
183 | <%text>## cs de fr hu ja nl_BE pl pt_BR ru sk zh_CN zh_TW</%text> |
|
183 | <%text>## cs de fr hu ja nl_BE pl pt_BR ru sk zh_CN zh_TW</%text> | |
184 | lang = |
|
184 | lang = | |
185 | cache_dir = ${here}/data |
|
185 | cache_dir = ${here}/data | |
186 | index_dir = ${here}/data/index |
|
186 | index_dir = ${here}/data/index | |
187 |
|
187 | |||
188 | <%text>## perform a full repository scan on each server start, this should be</%text> |
|
188 | <%text>## perform a full repository scan on each server start, this should be</%text> | |
189 | <%text>## set to false after first startup, to allow faster server restarts.</%text> |
|
189 | <%text>## set to false after first startup, to allow faster server restarts.</%text> | |
190 | initial_repo_scan = false |
|
190 | initial_repo_scan = false | |
191 |
|
191 | |||
192 | <%text>## uncomment and set this path to use archive download cache</%text> |
|
192 | <%text>## uncomment and set this path to use archive download cache</%text> | |
193 | archive_cache_dir = ${here}/tarballcache |
|
193 | archive_cache_dir = ${here}/tarballcache | |
194 |
|
194 | |||
195 | <%text>## change this to unique ID for security</%text> |
|
195 | <%text>## change this to unique ID for security</%text> | |
196 | app_instance_uuid = ${uuid()} |
|
196 | app_instance_uuid = ${uuid()} | |
197 |
|
197 | |||
198 | <%text>## cut off limit for large diffs (size in bytes)</%text> |
|
198 | <%text>## cut off limit for large diffs (size in bytes)</%text> | |
199 | cut_off_limit = 256000 |
|
199 | cut_off_limit = 256000 | |
200 |
|
200 | |||
201 | <%text>## force https in Kallithea, fixes https redirects, assumes it's always https</%text> |
|
201 | <%text>## force https in Kallithea, fixes https redirects, assumes it's always https</%text> | |
202 | force_https = false |
|
202 | force_https = false | |
203 |
|
203 | |||
204 | <%text>## use Strict-Transport-Security headers</%text> |
|
204 | <%text>## use Strict-Transport-Security headers</%text> | |
205 | use_htsts = false |
|
205 | use_htsts = false | |
206 |
|
206 | |||
207 | <%text>## number of commits stats will parse on each iteration</%text> |
|
207 | <%text>## number of commits stats will parse on each iteration</%text> | |
208 | commit_parse_limit = 25 |
|
208 | commit_parse_limit = 25 | |
209 |
|
209 | |||
210 | <%text>## path to git executable</%text> |
|
210 | <%text>## path to git executable</%text> | |
211 | git_path = git |
|
211 | git_path = git | |
212 |
|
212 | |||
213 | <%text>## git rev filter option, --all is the default filter, if you need to</%text> |
|
213 | <%text>## git rev filter option, --all is the default filter, if you need to</%text> | |
214 | <%text>## hide all refs in changelog switch this to --branches --tags</%text> |
|
214 | <%text>## hide all refs in changelog switch this to --branches --tags</%text> | |
215 | #git_rev_filter = --branches --tags |
|
215 | #git_rev_filter = --branches --tags | |
216 |
|
216 | |||
217 | <%text>## RSS feed options</%text> |
|
217 | <%text>## RSS feed options</%text> | |
218 | rss_cut_off_limit = 256000 |
|
218 | rss_cut_off_limit = 256000 | |
219 | rss_items_per_page = 10 |
|
219 | rss_items_per_page = 10 | |
220 | rss_include_diff = false |
|
220 | rss_include_diff = false | |
221 |
|
221 | |||
222 | <%text>## options for showing and identifying changesets</%text> |
|
222 | <%text>## options for showing and identifying changesets</%text> | |
223 | show_sha_length = 12 |
|
223 | show_sha_length = 12 | |
224 | show_revision_number = false |
|
224 | show_revision_number = false | |
225 |
|
225 | |||
226 | <%text>## Canonical URL to use when creating full URLs in UI and texts.</%text> |
|
226 | <%text>## Canonical URL to use when creating full URLs in UI and texts.</%text> | |
227 | <%text>## Useful when the site is available under different names or protocols.</%text> |
|
227 | <%text>## Useful when the site is available under different names or protocols.</%text> | |
228 | <%text>## Defaults to what is provided in the WSGI environment.</%text> |
|
228 | <%text>## Defaults to what is provided in the WSGI environment.</%text> | |
229 | #canonical_url = https://kallithea.example.com/repos |
|
229 | #canonical_url = https://kallithea.example.com/repos | |
230 |
|
230 | |||
231 | <%text>## gist URL alias, used to create nicer urls for gist. This should be an</%text> |
|
231 | <%text>## gist URL alias, used to create nicer urls for gist. This should be an</%text> | |
232 | <%text>## url that does rewrites to _admin/gists/<gistid>.</%text> |
|
232 | <%text>## url that does rewrites to _admin/gists/<gistid>.</%text> | |
233 | <%text>## example: http://gist.example.com/{gistid}. Empty means use the internal</%text> |
|
233 | <%text>## example: http://gist.example.com/{gistid}. Empty means use the internal</%text> | |
234 | <%text>## Kallithea url, ie. http[s]://kallithea.example.com/_admin/gists/<gistid></%text> |
|
234 | <%text>## Kallithea url, ie. http[s]://kallithea.example.com/_admin/gists/<gistid></%text> | |
235 | gist_alias_url = |
|
235 | gist_alias_url = | |
236 |
|
236 | |||
237 | <%text>## white list of API enabled controllers. This allows to add list of</%text> |
|
237 | <%text>## white list of API enabled controllers. This allows to add list of</%text> | |
238 | <%text>## controllers to which access will be enabled by api_key. eg: to enable</%text> |
|
238 | <%text>## controllers to which access will be enabled by api_key. eg: to enable</%text> | |
239 | <%text>## api access to raw_files put `FilesController:raw`, to enable access to patches</%text> |
|
239 | <%text>## api access to raw_files put `FilesController:raw`, to enable access to patches</%text> | |
240 | <%text>## add `ChangesetController:changeset_patch`. This list should be "," separated</%text> |
|
240 | <%text>## add `ChangesetController:changeset_patch`. This list should be "," separated</%text> | |
241 | <%text>## Syntax is <ControllerClass>:<function>. Check debug logs for generated names</%text> |
|
241 | <%text>## Syntax is <ControllerClass>:<function>. Check debug logs for generated names</%text> | |
242 | <%text>## Recommended settings below are commented out:</%text> |
|
242 | <%text>## Recommended settings below are commented out:</%text> | |
243 | api_access_controllers_whitelist = |
|
243 | api_access_controllers_whitelist = | |
244 | # ChangesetController:changeset_patch, |
|
244 | # ChangesetController:changeset_patch, | |
245 | # ChangesetController:changeset_raw, |
|
245 | # ChangesetController:changeset_raw, | |
246 | # FilesController:raw, |
|
246 | # FilesController:raw, | |
247 | # FilesController:archivefile |
|
247 | # FilesController:archivefile | |
248 |
|
248 | |||
249 | <%text>## default encoding used to convert from and to unicode</%text> |
|
249 | <%text>## default encoding used to convert from and to unicode</%text> | |
250 | <%text>## can be also a comma separated list of encoding in case of mixed encodings</%text> |
|
250 | <%text>## can be also a comma separated list of encoding in case of mixed encodings</%text> | |
251 | default_encoding = utf8 |
|
251 | default_encoding = utf8 | |
252 |
|
252 | |||
253 | <%text>## issue tracker for Kallithea (leave blank to disable, absent for default)</%text> |
|
253 | <%text>## issue tracker for Kallithea (leave blank to disable, absent for default)</%text> | |
254 | #bugtracker = https://bitbucket.org/conservancy/kallithea/issues |
|
254 | #bugtracker = https://bitbucket.org/conservancy/kallithea/issues | |
255 |
|
255 | |||
256 | <%text>## issue tracking mapping for commits messages</%text> |
|
256 | <%text>## issue tracking mapping for commits messages</%text> | |
257 | <%text>## comment out issue_pat, issue_server, issue_prefix to enable</%text> |
|
257 | <%text>## comment out issue_pat, issue_server, issue_prefix to enable</%text> | |
258 |
|
258 | |||
259 | <%text>## pattern to get the issues from commit messages</%text> |
|
259 | <%text>## pattern to get the issues from commit messages</%text> | |
260 | <%text>## default one used here is #<numbers> with a regex passive group for `#`</%text> |
|
260 | <%text>## default one used here is #<numbers> with a regex passive group for `#`</%text> | |
261 | <%text>## {id} will be all groups matched from this pattern</%text> |
|
261 | <%text>## {id} will be all groups matched from this pattern</%text> | |
262 |
|
262 | |||
263 | issue_pat = (?:\s*#)(\d+) |
|
263 | issue_pat = (?:\s*#)(\d+) | |
264 |
|
264 | |||
265 | <%text>## server url to the issue, each {id} will be replaced with match</%text> |
|
265 | <%text>## server url to the issue, each {id} will be replaced with match</%text> | |
266 | <%text>## fetched from the regex and {repo} is replaced with full repository name</%text> |
|
266 | <%text>## fetched from the regex and {repo} is replaced with full repository name</%text> | |
267 | <%text>## including groups {repo_name} is replaced with just name of repo</%text> |
|
267 | <%text>## including groups {repo_name} is replaced with just name of repo</%text> | |
268 |
|
268 | |||
269 | issue_server_link = https://issues.example.com/{repo}/issue/{id} |
|
269 | issue_server_link = https://issues.example.com/{repo}/issue/{id} | |
270 |
|
270 | |||
271 | <%text>## prefix to add to link to indicate it's an url</%text> |
|
271 | <%text>## prefix to add to link to indicate it's an url</%text> | |
272 | <%text>## #314 will be replaced by <issue_prefix><id></%text> |
|
272 | <%text>## #314 will be replaced by <issue_prefix><id></%text> | |
273 |
|
273 | |||
274 | issue_prefix = # |
|
274 | issue_prefix = # | |
275 |
|
275 | |||
276 | <%text>## issue_pat, issue_server_link, issue_prefix can have suffixes to specify</%text> |
|
276 | <%text>## issue_pat, issue_server_link, issue_prefix can have suffixes to specify</%text> | |
277 | <%text>## multiple patterns, to other issues server, wiki or others</%text> |
|
277 | <%text>## multiple patterns, to other issues server, wiki or others</%text> | |
278 | <%text>## below an example how to create a wiki pattern</%text> |
|
278 | <%text>## below an example how to create a wiki pattern</%text> | |
279 | # wiki-some-id -> https://wiki.example.com/some-id |
|
279 | # wiki-some-id -> https://wiki.example.com/some-id | |
280 |
|
280 | |||
281 | #issue_pat_wiki = (?:wiki-)(.+) |
|
281 | #issue_pat_wiki = (?:wiki-)(.+) | |
282 | #issue_server_link_wiki = https://wiki.example.com/{id} |
|
282 | #issue_server_link_wiki = https://wiki.example.com/{id} | |
283 | #issue_prefix_wiki = WIKI- |
|
283 | #issue_prefix_wiki = WIKI- | |
284 |
|
284 | |||
285 | <%text>## alternative return HTTP header for failed authentication. Default HTTP</%text> |
|
285 | <%text>## alternative return HTTP header for failed authentication. Default HTTP</%text> | |
286 | <%text>## response is 401 HTTPUnauthorized. Currently Mercurial clients have trouble with</%text> |
|
286 | <%text>## response is 401 HTTPUnauthorized. Currently Mercurial clients have trouble with</%text> | |
287 | <%text>## handling that. Set this variable to 403 to return HTTPForbidden</%text> |
|
287 | <%text>## handling that. Set this variable to 403 to return HTTPForbidden</%text> | |
288 | auth_ret_code = |
|
288 | auth_ret_code = | |
289 |
|
289 | |||
290 | <%text>## locking return code. When repository is locked return this HTTP code. 2XX</%text> |
|
290 | <%text>## locking return code. When repository is locked return this HTTP code. 2XX</%text> | |
291 | <%text>## codes don't break the transactions while 4XX codes do</%text> |
|
291 | <%text>## codes don't break the transactions while 4XX codes do</%text> | |
292 | lock_ret_code = 423 |
|
292 | lock_ret_code = 423 | |
293 |
|
293 | |||
294 | <%text>## allows to change the repository location in settings page</%text> |
|
294 | <%text>## allows to change the repository location in settings page</%text> | |
295 | allow_repo_location_change = True |
|
295 | allow_repo_location_change = True | |
296 |
|
296 | |||
297 | <%text>## allows to setup custom hooks in settings page</%text> |
|
297 | <%text>## allows to setup custom hooks in settings page</%text> | |
298 | allow_custom_hooks_settings = True |
|
298 | allow_custom_hooks_settings = True | |
299 |
|
299 | |||
300 | <%text>## extra extensions for indexing, space separated and without the leading '.'.</%text> |
|
300 | <%text>## extra extensions for indexing, space separated and without the leading '.'.</%text> | |
301 | # index.extensions = |
|
301 | # index.extensions = | |
302 | # gemfile |
|
302 | # gemfile | |
303 | # lock |
|
303 | # lock | |
304 |
|
304 | |||
305 | <%text>## extra filenames for indexing, space separated</%text> |
|
305 | <%text>## extra filenames for indexing, space separated</%text> | |
306 | # index.filenames = |
|
306 | # index.filenames = | |
307 | # .dockerignore |
|
307 | # .dockerignore | |
308 | # .editorconfig |
|
308 | # .editorconfig | |
309 | # INSTALL |
|
309 | # INSTALL | |
310 | # CHANGELOG |
|
310 | # CHANGELOG | |
311 |
|
311 | |||
312 | <%text>####################################</%text> |
|
312 | <%text>####################################</%text> | |
313 | <%text>### CELERY CONFIG ####</%text> |
|
313 | <%text>### CELERY CONFIG ####</%text> | |
314 | <%text>####################################</%text> |
|
314 | <%text>####################################</%text> | |
315 |
|
315 | |||
316 | use_celery = false |
|
316 | use_celery = false | |
317 |
|
317 | |||
318 | <%text>## Example: connect to the virtual host 'rabbitmqhost' on localhost as rabbitmq:</%text> |
|
318 | <%text>## Example: connect to the virtual host 'rabbitmqhost' on localhost as rabbitmq:</%text> | |
319 | broker.url = amqp://rabbitmq:qewqew@localhost:5672/rabbitmqhost |
|
319 | broker.url = amqp://rabbitmq:qewqew@localhost:5672/rabbitmqhost | |
320 |
|
320 | |||
321 | celery.imports = kallithea.lib.celerylib.tasks |
|
321 | celery.imports = kallithea.lib.celerylib.tasks | |
322 | celery.accept.content = pickle |
|
322 | celery.accept.content = pickle | |
323 | celery.result.backend = amqp |
|
323 | celery.result.backend = amqp | |
324 | celery.result.dburi = amqp:// |
|
324 | celery.result.dburi = amqp:// | |
325 | celery.result.serialier = json |
|
325 | celery.result.serialier = json | |
326 |
|
326 | |||
327 | #celery.send.task.error.emails = true |
|
327 | #celery.send.task.error.emails = true | |
328 | #celery.amqp.task.result.expires = 18000 |
|
328 | #celery.amqp.task.result.expires = 18000 | |
329 |
|
329 | |||
330 | celeryd.concurrency = 2 |
|
330 | celeryd.concurrency = 2 | |
331 | celeryd.max.tasks.per.child = 1 |
|
331 | celeryd.max.tasks.per.child = 1 | |
332 |
|
332 | |||
333 | <%text>## If true, tasks will never be sent to the queue, but executed locally instead.</%text> |
|
333 | <%text>## If true, tasks will never be sent to the queue, but executed locally instead.</%text> | |
334 | celery.always.eager = false |
|
334 | celery.always.eager = false | |
335 |
|
335 | |||
336 | <%text>####################################</%text> |
|
336 | <%text>####################################</%text> | |
337 | <%text>### BEAKER CACHE ####</%text> |
|
337 | <%text>### BEAKER CACHE ####</%text> | |
338 | <%text>####################################</%text> |
|
338 | <%text>####################################</%text> | |
339 |
|
339 | |||
340 | beaker.cache.data_dir = ${here}/data/cache/data |
|
340 | beaker.cache.data_dir = ${here}/data/cache/data | |
341 | beaker.cache.lock_dir = ${here}/data/cache/lock |
|
341 | beaker.cache.lock_dir = ${here}/data/cache/lock | |
342 |
|
342 | |||
343 | beaker.cache.regions = short_term,long_term,sql_cache_short |
|
343 | beaker.cache.regions = short_term,long_term,sql_cache_short | |
344 |
|
344 | |||
345 | beaker.cache.short_term.type = memory |
|
345 | beaker.cache.short_term.type = memory | |
346 | beaker.cache.short_term.expire = 60 |
|
346 | beaker.cache.short_term.expire = 60 | |
347 | beaker.cache.short_term.key_length = 256 |
|
347 | beaker.cache.short_term.key_length = 256 | |
348 |
|
348 | |||
349 | beaker.cache.long_term.type = memory |
|
349 | beaker.cache.long_term.type = memory | |
350 | beaker.cache.long_term.expire = 36000 |
|
350 | beaker.cache.long_term.expire = 36000 | |
351 | beaker.cache.long_term.key_length = 256 |
|
351 | beaker.cache.long_term.key_length = 256 | |
352 |
|
352 | |||
353 | beaker.cache.sql_cache_short.type = memory |
|
353 | beaker.cache.sql_cache_short.type = memory | |
354 | beaker.cache.sql_cache_short.expire = 10 |
|
354 | beaker.cache.sql_cache_short.expire = 10 | |
355 | beaker.cache.sql_cache_short.key_length = 256 |
|
355 | beaker.cache.sql_cache_short.key_length = 256 | |
356 |
|
356 | |||
357 | <%text>####################################</%text> |
|
357 | <%text>####################################</%text> | |
358 | <%text>### BEAKER SESSION ####</%text> |
|
358 | <%text>### BEAKER SESSION ####</%text> | |
359 | <%text>####################################</%text> |
|
359 | <%text>####################################</%text> | |
360 |
|
360 | |||
361 | <%text>## Name of session cookie. Should be unique for a given host and path, even when running</%text> |
|
361 | <%text>## Name of session cookie. Should be unique for a given host and path, even when running</%text> | |
362 | <%text>## on different ports. Otherwise, cookie sessions will be shared and messed up.</%text> |
|
362 | <%text>## on different ports. Otherwise, cookie sessions will be shared and messed up.</%text> | |
363 | beaker.session.key = kallithea |
|
363 | beaker.session.key = kallithea | |
364 | <%text>## Sessions should always only be accessible by the browser, not directly by JavaScript.</%text> |
|
364 | <%text>## Sessions should always only be accessible by the browser, not directly by JavaScript.</%text> | |
365 | beaker.session.httponly = true |
|
365 | beaker.session.httponly = true | |
366 | <%text>## Session lifetime. 2592000 seconds is 30 days.</%text> |
|
366 | <%text>## Session lifetime. 2592000 seconds is 30 days.</%text> | |
367 | beaker.session.timeout = 2592000 |
|
367 | beaker.session.timeout = 2592000 | |
368 |
|
368 | |||
369 | <%text>## Server secret used with HMAC to ensure integrity of cookies.</%text> |
|
369 | <%text>## Server secret used with HMAC to ensure integrity of cookies.</%text> | |
370 | beaker.session.secret = ${uuid()} |
|
370 | beaker.session.secret = ${uuid()} | |
371 | <%text>## Further, encrypt the data with AES.</%text> |
|
371 | <%text>## Further, encrypt the data with AES.</%text> | |
372 | #beaker.session.encrypt_key = <key_for_encryption> |
|
372 | #beaker.session.encrypt_key = <key_for_encryption> | |
373 | #beaker.session.validate_key = <validation_key> |
|
373 | #beaker.session.validate_key = <validation_key> | |
374 |
|
374 | |||
375 | <%text>## Type of storage used for the session, current types are</%text> |
|
375 | <%text>## Type of storage used for the session, current types are</%text> | |
376 | <%text>## dbm, file, memcached, database, and memory.</%text> |
|
376 | <%text>## dbm, file, memcached, database, and memory.</%text> | |
377 |
|
377 | |||
378 | <%text>## File system storage of session data. (default)</%text> |
|
378 | <%text>## File system storage of session data. (default)</%text> | |
379 | #beaker.session.type = file |
|
379 | #beaker.session.type = file | |
380 |
|
380 | |||
381 | <%text>## Cookie only, store all session data inside the cookie. Requires secure secrets.</%text> |
|
381 | <%text>## Cookie only, store all session data inside the cookie. Requires secure secrets.</%text> | |
382 | #beaker.session.type = cookie |
|
382 | #beaker.session.type = cookie | |
383 |
|
383 | |||
384 | <%text>## Database storage of session data.</%text> |
|
384 | <%text>## Database storage of session data.</%text> | |
385 | #beaker.session.type = ext:database |
|
385 | #beaker.session.type = ext:database | |
386 | #beaker.session.sa.url = postgresql://postgres:qwe@localhost/kallithea |
|
386 | #beaker.session.sa.url = postgresql://postgres:qwe@localhost/kallithea | |
387 | #beaker.session.table_name = db_session |
|
387 | #beaker.session.table_name = db_session | |
388 |
|
388 | |||
389 | %if error_aggregation_service == 'appenlight': |
|
389 | %if error_aggregation_service == 'appenlight': | |
390 | <%text>############################</%text> |
|
390 | <%text>############################</%text> | |
391 | <%text>## ERROR HANDLING SYSTEMS ##</%text> |
|
391 | <%text>## ERROR HANDLING SYSTEMS ##</%text> | |
392 | <%text>############################</%text> |
|
392 | <%text>############################</%text> | |
393 |
|
393 | |||
394 | # Propagate email settings to ErrorReporter of TurboGears2 |
|
394 | # Propagate email settings to ErrorReporter of TurboGears2 | |
395 | # You do not normally need to change these lines |
|
395 | # You do not normally need to change these lines | |
396 | get trace_errors.error_email = email_to |
|
396 | get trace_errors.error_email = email_to | |
397 | get trace_errors.smtp_server = smtp_server |
|
397 | get trace_errors.smtp_server = smtp_server | |
398 | get trace_errors.smtp_port = smtp_port |
|
398 | get trace_errors.smtp_port = smtp_port | |
399 | get trace_errors.from_address = error_email_from |
|
399 | get trace_errors.from_address = error_email_from | |
400 |
|
400 | |||
401 | <%text>####################</%text> |
|
401 | <%text>####################</%text> | |
402 | <%text>### [appenlight] ###</%text> |
|
402 | <%text>### [appenlight] ###</%text> | |
403 | <%text>####################</%text> |
|
403 | <%text>####################</%text> | |
404 |
|
404 | |||
405 | <%text>## AppEnlight is tailored to work with Kallithea, see</%text> |
|
405 | <%text>## AppEnlight is tailored to work with Kallithea, see</%text> | |
406 | <%text>## http://appenlight.com for details how to obtain an account</%text> |
|
406 | <%text>## http://appenlight.com for details how to obtain an account</%text> | |
407 | <%text>## you must install python package `appenlight_client` to make it work</%text> |
|
407 | <%text>## you must install python package `appenlight_client` to make it work</%text> | |
408 |
|
408 | |||
409 | <%text>## appenlight enabled</%text> |
|
409 | <%text>## appenlight enabled</%text> | |
410 | appenlight = false |
|
410 | appenlight = false | |
411 |
|
411 | |||
412 | appenlight.server_url = https://api.appenlight.com |
|
412 | appenlight.server_url = https://api.appenlight.com | |
413 | appenlight.api_key = YOUR_API_KEY |
|
413 | appenlight.api_key = YOUR_API_KEY | |
414 |
|
414 | |||
415 | <%text>## TWEAK AMOUNT OF INFO SENT HERE</%text> |
|
415 | <%text>## TWEAK AMOUNT OF INFO SENT HERE</%text> | |
416 |
|
416 | |||
417 | <%text>## enables 404 error logging (default False)</%text> |
|
417 | <%text>## enables 404 error logging (default False)</%text> | |
418 | appenlight.report_404 = false |
|
418 | appenlight.report_404 = false | |
419 |
|
419 | |||
420 | <%text>## time in seconds after request is considered being slow (default 1)</%text> |
|
420 | <%text>## time in seconds after request is considered being slow (default 1)</%text> | |
421 | appenlight.slow_request_time = 1 |
|
421 | appenlight.slow_request_time = 1 | |
422 |
|
422 | |||
423 | <%text>## record slow requests in application</%text> |
|
423 | <%text>## record slow requests in application</%text> | |
424 | <%text>## (needs to be enabled for slow datastore recording and time tracking)</%text> |
|
424 | <%text>## (needs to be enabled for slow datastore recording and time tracking)</%text> | |
425 | appenlight.slow_requests = true |
|
425 | appenlight.slow_requests = true | |
426 |
|
426 | |||
427 | <%text>## enable hooking to application loggers</%text> |
|
427 | <%text>## enable hooking to application loggers</%text> | |
428 | #appenlight.logging = true |
|
428 | #appenlight.logging = true | |
429 |
|
429 | |||
430 | <%text>## minimum log level for log capture</%text> |
|
430 | <%text>## minimum log level for log capture</%text> | |
431 | #appenlight.logging.level = WARNING |
|
431 | #appenlight.logging.level = WARNING | |
432 |
|
432 | |||
433 | <%text>## send logs only from erroneous/slow requests</%text> |
|
433 | <%text>## send logs only from erroneous/slow requests</%text> | |
434 | <%text>## (saves API quota for intensive logging)</%text> |
|
434 | <%text>## (saves API quota for intensive logging)</%text> | |
435 | appenlight.logging_on_error = false |
|
435 | appenlight.logging_on_error = false | |
436 |
|
436 | |||
437 | <%text>## list of additional keywords that should be grabbed from environ object</%text> |
|
437 | <%text>## list of additional keywords that should be grabbed from environ object</%text> | |
438 | <%text>## can be string with comma separated list of words in lowercase</%text> |
|
438 | <%text>## can be string with comma separated list of words in lowercase</%text> | |
439 | <%text>## (by default client will always send following info:</%text> |
|
439 | <%text>## (by default client will always send following info:</%text> | |
440 | <%text>## 'REMOTE_USER', 'REMOTE_ADDR', 'SERVER_NAME', 'CONTENT_TYPE' + all keys that</%text> |
|
440 | <%text>## 'REMOTE_USER', 'REMOTE_ADDR', 'SERVER_NAME', 'CONTENT_TYPE' + all keys that</%text> | |
441 | <%text>## start with HTTP* this list be extended with additional keywords here</%text> |
|
441 | <%text>## start with HTTP* this list be extended with additional keywords here</%text> | |
442 | appenlight.environ_keys_whitelist = |
|
442 | appenlight.environ_keys_whitelist = | |
443 |
|
443 | |||
444 | <%text>## list of keywords that should be blanked from request object</%text> |
|
444 | <%text>## list of keywords that should be blanked from request object</%text> | |
445 | <%text>## can be string with comma separated list of words in lowercase</%text> |
|
445 | <%text>## can be string with comma separated list of words in lowercase</%text> | |
446 | <%text>## (by default client will always blank keys that contain following words</%text> |
|
446 | <%text>## (by default client will always blank keys that contain following words</%text> | |
447 | <%text>## 'password', 'passwd', 'pwd', 'auth_tkt', 'secret', 'csrf'</%text> |
|
447 | <%text>## 'password', 'passwd', 'pwd', 'auth_tkt', 'secret', 'csrf'</%text> | |
448 | <%text>## this list be extended with additional keywords set here</%text> |
|
448 | <%text>## this list be extended with additional keywords set here</%text> | |
449 | appenlight.request_keys_blacklist = |
|
449 | appenlight.request_keys_blacklist = | |
450 |
|
450 | |||
451 | <%text>## list of namespaces that should be ignores when gathering log entries</%text> |
|
451 | <%text>## list of namespaces that should be ignores when gathering log entries</%text> | |
452 | <%text>## can be string with comma separated list of namespaces</%text> |
|
452 | <%text>## can be string with comma separated list of namespaces</%text> | |
453 | <%text>## (by default the client ignores own entries: appenlight_client.client)</%text> |
|
453 | <%text>## (by default the client ignores own entries: appenlight_client.client)</%text> | |
454 | appenlight.log_namespace_blacklist = |
|
454 | appenlight.log_namespace_blacklist = | |
455 |
|
455 | |||
456 | %elif error_aggregation_service == 'sentry': |
|
456 | %elif error_aggregation_service == 'sentry': | |
457 | <%text>################</%text> |
|
457 | <%text>################</%text> | |
458 | <%text>### [sentry] ###</%text> |
|
458 | <%text>### [sentry] ###</%text> | |
459 | <%text>################</%text> |
|
459 | <%text>################</%text> | |
460 |
|
460 | |||
461 | <%text>## sentry is a alternative open source error aggregator</%text> |
|
461 | <%text>## sentry is a alternative open source error aggregator</%text> | |
462 | <%text>## you must install python packages `sentry` and `raven` to enable</%text> |
|
462 | <%text>## you must install python packages `sentry` and `raven` to enable</%text> | |
463 |
|
463 | |||
464 | sentry.dsn = YOUR_DNS |
|
464 | sentry.dsn = YOUR_DNS | |
465 | sentry.servers = |
|
465 | sentry.servers = | |
466 | sentry.name = |
|
466 | sentry.name = | |
467 | sentry.key = |
|
467 | sentry.key = | |
468 | sentry.public_key = |
|
468 | sentry.public_key = | |
469 | sentry.secret_key = |
|
469 | sentry.secret_key = | |
470 | sentry.project = |
|
470 | sentry.project = | |
471 | sentry.site = |
|
471 | sentry.site = | |
472 | sentry.include_paths = |
|
472 | sentry.include_paths = | |
473 | sentry.exclude_paths = |
|
473 | sentry.exclude_paths = | |
474 |
|
474 | |||
475 | %endif |
|
475 | %endif | |
476 | <%text>################################################################################</%text> |
|
476 | <%text>################################################################################</%text> | |
477 | <%text>## WARNING: *DEBUG MODE MUST BE OFF IN A PRODUCTION ENVIRONMENT* ##</%text> |
|
477 | <%text>## WARNING: *DEBUG MODE MUST BE OFF IN A PRODUCTION ENVIRONMENT* ##</%text> | |
478 | <%text>## Debug mode will enable the interactive debugging tool, allowing ANYONE to ##</%text> |
|
478 | <%text>## Debug mode will enable the interactive debugging tool, allowing ANYONE to ##</%text> | |
479 | <%text>## execute malicious code after an exception is raised. ##</%text> |
|
479 | <%text>## execute malicious code after an exception is raised. ##</%text> | |
480 | <%text>################################################################################</%text> |
|
480 | <%text>################################################################################</%text> | |
481 | debug = false |
|
481 | debug = false | |
482 |
|
482 | |||
483 | <%text>##################################</%text> |
|
483 | <%text>##################################</%text> | |
484 | <%text>### LOGVIEW CONFIG ###</%text> |
|
484 | <%text>### LOGVIEW CONFIG ###</%text> | |
485 | <%text>##################################</%text> |
|
485 | <%text>##################################</%text> | |
486 |
|
486 | |||
487 | logview.sqlalchemy = #faa |
|
487 | logview.sqlalchemy = #faa | |
488 | logview.pylons.templating = #bfb |
|
488 | logview.pylons.templating = #bfb | |
489 | logview.pylons.util = #eee |
|
489 | logview.pylons.util = #eee | |
490 |
|
490 | |||
491 | <%text>#########################################################</%text> |
|
491 | <%text>#########################################################</%text> | |
492 | <%text>### DB CONFIGS - EACH DB WILL HAVE IT'S OWN CONFIG ###</%text> |
|
492 | <%text>### DB CONFIGS - EACH DB WILL HAVE IT'S OWN CONFIG ###</%text> | |
493 | <%text>#########################################################</%text> |
|
493 | <%text>#########################################################</%text> | |
494 |
|
494 | |||
495 | %if database_engine == 'sqlite': |
|
495 | %if database_engine == 'sqlite': | |
496 | # SQLITE [default] |
|
496 | # SQLITE [default] | |
497 | sqlalchemy.url = sqlite:///${here}/kallithea.db?timeout=60 |
|
497 | sqlalchemy.url = sqlite:///${here}/kallithea.db?timeout=60 | |
498 |
|
498 | |||
499 | %elif database_engine == 'postgres': |
|
499 | %elif database_engine == 'postgres': | |
500 | # POSTGRESQL |
|
500 | # POSTGRESQL | |
501 | sqlalchemy.url = postgresql://user:pass@localhost/kallithea |
|
501 | sqlalchemy.url = postgresql://user:pass@localhost/kallithea | |
502 |
|
502 | |||
503 | %elif database_engine == 'mysql': |
|
503 | %elif database_engine == 'mysql': | |
504 | # MySQL |
|
504 | # MySQL | |
505 | sqlalchemy.url = mysql://user:pass@localhost/kallithea?charset=utf8 |
|
505 | sqlalchemy.url = mysql://user:pass@localhost/kallithea?charset=utf8 | |
506 |
|
506 | |||
507 | %endif |
|
507 | %endif | |
508 | # see sqlalchemy docs for others |
|
508 | # see sqlalchemy docs for others | |
509 |
|
509 | |||
510 | sqlalchemy.echo = false |
|
|||
511 | sqlalchemy.pool_recycle = 3600 |
|
510 | sqlalchemy.pool_recycle = 3600 | |
512 |
|
511 | |||
513 | <%text>################################</%text> |
|
512 | <%text>################################</%text> | |
514 | <%text>### ALEMBIC CONFIGURATION ####</%text> |
|
513 | <%text>### ALEMBIC CONFIGURATION ####</%text> | |
515 | <%text>################################</%text> |
|
514 | <%text>################################</%text> | |
516 |
|
515 | |||
517 | [alembic] |
|
516 | [alembic] | |
518 | script_location = kallithea:alembic |
|
517 | script_location = kallithea:alembic | |
519 |
|
518 | |||
520 | <%text>################################</%text> |
|
519 | <%text>################################</%text> | |
521 | <%text>### LOGGING CONFIGURATION ####</%text> |
|
520 | <%text>### LOGGING CONFIGURATION ####</%text> | |
522 | <%text>################################</%text> |
|
521 | <%text>################################</%text> | |
523 |
|
522 | |||
524 | [loggers] |
|
523 | [loggers] | |
525 | keys = root, routes, kallithea, sqlalchemy, tg, gearbox, beaker, templates, whoosh_indexer |
|
524 | keys = root, routes, kallithea, sqlalchemy, tg, gearbox, beaker, templates, whoosh_indexer | |
526 |
|
525 | |||
527 | [handlers] |
|
526 | [handlers] | |
528 | keys = console, console_sql |
|
527 | keys = console, console_sql | |
529 |
|
528 | |||
530 | [formatters] |
|
529 | [formatters] | |
531 | keys = generic, color_formatter, color_formatter_sql |
|
530 | keys = generic, color_formatter, color_formatter_sql | |
532 |
|
531 | |||
533 | <%text>#############</%text> |
|
532 | <%text>#############</%text> | |
534 | <%text>## LOGGERS ##</%text> |
|
533 | <%text>## LOGGERS ##</%text> | |
535 | <%text>#############</%text> |
|
534 | <%text>#############</%text> | |
536 |
|
535 | |||
537 | [logger_root] |
|
536 | [logger_root] | |
538 | level = NOTSET |
|
537 | level = NOTSET | |
539 | handlers = console |
|
538 | handlers = console | |
540 |
|
539 | |||
541 | [logger_routes] |
|
540 | [logger_routes] | |
542 | level = DEBUG |
|
541 | level = DEBUG | |
543 | handlers = |
|
542 | handlers = | |
544 | qualname = routes.middleware |
|
543 | qualname = routes.middleware | |
545 | <%text>## "level = DEBUG" logs the route matched and routing variables.</%text> |
|
544 | <%text>## "level = DEBUG" logs the route matched and routing variables.</%text> | |
546 | propagate = 1 |
|
545 | propagate = 1 | |
547 |
|
546 | |||
548 | [logger_beaker] |
|
547 | [logger_beaker] | |
549 | level = DEBUG |
|
548 | level = DEBUG | |
550 | handlers = |
|
549 | handlers = | |
551 | qualname = beaker.container |
|
550 | qualname = beaker.container | |
552 | propagate = 1 |
|
551 | propagate = 1 | |
553 |
|
552 | |||
554 | [logger_templates] |
|
553 | [logger_templates] | |
555 | level = INFO |
|
554 | level = INFO | |
556 | handlers = |
|
555 | handlers = | |
557 | qualname = pylons.templating |
|
556 | qualname = pylons.templating | |
558 | propagate = 1 |
|
557 | propagate = 1 | |
559 |
|
558 | |||
560 | [logger_kallithea] |
|
559 | [logger_kallithea] | |
561 | level = DEBUG |
|
560 | level = DEBUG | |
562 | handlers = |
|
561 | handlers = | |
563 | qualname = kallithea |
|
562 | qualname = kallithea | |
564 | propagate = 1 |
|
563 | propagate = 1 | |
565 |
|
564 | |||
566 | [logger_tg] |
|
565 | [logger_tg] | |
567 | level = DEBUG |
|
566 | level = DEBUG | |
568 | handlers = |
|
567 | handlers = | |
569 | qualname = tg |
|
568 | qualname = tg | |
570 | propagate = 1 |
|
569 | propagate = 1 | |
571 |
|
570 | |||
572 | [logger_gearbox] |
|
571 | [logger_gearbox] | |
573 | level = DEBUG |
|
572 | level = DEBUG | |
574 | handlers = |
|
573 | handlers = | |
575 | qualname = gearbox |
|
574 | qualname = gearbox | |
576 | propagate = 1 |
|
575 | propagate = 1 | |
577 |
|
576 | |||
578 | [logger_sqlalchemy] |
|
577 | [logger_sqlalchemy] | |
579 | level = INFO |
|
578 | level = INFO | |
580 | handlers = console_sql |
|
579 | handlers = console_sql | |
581 | qualname = sqlalchemy.engine |
|
580 | qualname = sqlalchemy.engine | |
582 | propagate = 0 |
|
581 | propagate = 0 | |
583 |
|
582 | |||
584 | [logger_whoosh_indexer] |
|
583 | [logger_whoosh_indexer] | |
585 | level = DEBUG |
|
584 | level = DEBUG | |
586 | handlers = |
|
585 | handlers = | |
587 | qualname = whoosh_indexer |
|
586 | qualname = whoosh_indexer | |
588 | propagate = 1 |
|
587 | propagate = 1 | |
589 |
|
588 | |||
590 | <%text>##############</%text> |
|
589 | <%text>##############</%text> | |
591 | <%text>## HANDLERS ##</%text> |
|
590 | <%text>## HANDLERS ##</%text> | |
592 | <%text>##############</%text> |
|
591 | <%text>##############</%text> | |
593 |
|
592 | |||
594 | [handler_console] |
|
593 | [handler_console] | |
595 | class = StreamHandler |
|
594 | class = StreamHandler | |
596 | args = (sys.stderr,) |
|
595 | args = (sys.stderr,) | |
597 | level = INFO |
|
596 | level = INFO | |
598 | formatter = generic |
|
597 | formatter = generic | |
599 |
|
598 | |||
600 | [handler_console_sql] |
|
599 | [handler_console_sql] | |
601 | class = StreamHandler |
|
600 | class = StreamHandler | |
602 | args = (sys.stderr,) |
|
601 | args = (sys.stderr,) | |
603 | level = WARN |
|
602 | level = WARN | |
604 | formatter = generic |
|
603 | formatter = generic | |
605 |
|
604 | |||
606 | <%text>################</%text> |
|
605 | <%text>################</%text> | |
607 | <%text>## FORMATTERS ##</%text> |
|
606 | <%text>## FORMATTERS ##</%text> | |
608 | <%text>################</%text> |
|
607 | <%text>################</%text> | |
609 |
|
608 | |||
610 | [formatter_generic] |
|
609 | [formatter_generic] | |
611 | format = %(asctime)s.%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s |
|
610 | format = %(asctime)s.%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s | |
612 | datefmt = %Y-%m-%d %H:%M:%S |
|
611 | datefmt = %Y-%m-%d %H:%M:%S | |
613 |
|
612 | |||
614 | [formatter_color_formatter] |
|
613 | [formatter_color_formatter] | |
615 | class = kallithea.lib.colored_formatter.ColorFormatter |
|
614 | class = kallithea.lib.colored_formatter.ColorFormatter | |
616 | format = %(asctime)s.%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s |
|
615 | format = %(asctime)s.%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s | |
617 | datefmt = %Y-%m-%d %H:%M:%S |
|
616 | datefmt = %Y-%m-%d %H:%M:%S | |
618 |
|
617 | |||
619 | [formatter_color_formatter_sql] |
|
618 | [formatter_color_formatter_sql] | |
620 | class = kallithea.lib.colored_formatter.ColorFormatterSql |
|
619 | class = kallithea.lib.colored_formatter.ColorFormatterSql | |
621 | format = %(asctime)s.%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s |
|
620 | format = %(asctime)s.%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s | |
622 | datefmt = %Y-%m-%d %H:%M:%S |
|
621 | datefmt = %Y-%m-%d %H:%M:%S |
@@ -1,390 +1,390 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 | # This program is free software: you can redistribute it and/or modify |
|
2 | # This program is free software: you can redistribute it and/or modify | |
3 | # it under the terms of the GNU General Public License as published by |
|
3 | # it under the terms of the GNU General Public License as published by | |
4 | # the Free Software Foundation, either version 3 of the License, or |
|
4 | # the Free Software Foundation, either version 3 of the License, or | |
5 | # (at your option) any later version. |
|
5 | # (at your option) any later version. | |
6 | # |
|
6 | # | |
7 | # This program is distributed in the hope that it will be useful, |
|
7 | # This program is distributed in the hope that it will be useful, | |
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
10 | # GNU General Public License for more details. |
|
10 | # GNU General Public License for more details. | |
11 | # |
|
11 | # | |
12 | # You should have received a copy of the GNU General Public License |
|
12 | # You should have received a copy of the GNU General Public License | |
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |
14 |
|
14 | |||
15 | """ |
|
15 | """ | |
16 | Helpers for fixture generation |
|
16 | Helpers for fixture generation | |
17 | """ |
|
17 | """ | |
18 |
|
18 | |||
19 | import logging |
|
19 | import logging | |
20 | import os |
|
20 | import os | |
21 | import shutil |
|
21 | import shutil | |
22 | import tarfile |
|
22 | import tarfile | |
23 | from os.path import dirname |
|
23 | from os.path import dirname | |
24 |
|
24 | |||
25 | from kallithea.model.db import Repository, User, RepoGroup, UserGroup, Gist |
|
25 | from kallithea.model.db import Repository, User, RepoGroup, UserGroup, Gist | |
26 | from kallithea.model.meta import Session |
|
26 | from kallithea.model.meta import Session | |
27 | from kallithea.model.repo import RepoModel |
|
27 | from kallithea.model.repo import RepoModel | |
28 | from kallithea.model.user import UserModel |
|
28 | from kallithea.model.user import UserModel | |
29 | from kallithea.model.repo_group import RepoGroupModel |
|
29 | from kallithea.model.repo_group import RepoGroupModel | |
30 | from kallithea.model.user_group import UserGroupModel |
|
30 | from kallithea.model.user_group import UserGroupModel | |
31 | from kallithea.model.gist import GistModel |
|
31 | from kallithea.model.gist import GistModel | |
32 | from kallithea.model.scm import ScmModel |
|
32 | from kallithea.model.scm import ScmModel | |
33 | from kallithea.lib.db_manage import DbManage |
|
33 | from kallithea.lib.db_manage import DbManage | |
34 | from kallithea.lib.vcs.backends.base import EmptyChangeset |
|
34 | from kallithea.lib.vcs.backends.base import EmptyChangeset | |
35 | from kallithea.tests.base import invalidate_all_caches, GIT_REPO, HG_REPO, TESTS_TMP_PATH, TEST_USER_ADMIN_LOGIN |
|
35 | from kallithea.tests.base import invalidate_all_caches, GIT_REPO, HG_REPO, TESTS_TMP_PATH, TEST_USER_ADMIN_LOGIN | |
36 |
|
36 | |||
37 |
|
37 | |||
38 | log = logging.getLogger(__name__) |
|
38 | log = logging.getLogger(__name__) | |
39 |
|
39 | |||
40 | FIXTURES = os.path.join(dirname(dirname(os.path.abspath(__file__))), 'tests', 'fixtures') |
|
40 | FIXTURES = os.path.join(dirname(dirname(os.path.abspath(__file__))), 'tests', 'fixtures') | |
41 |
|
41 | |||
42 |
|
42 | |||
43 | def error_function(*args, **kwargs): |
|
43 | def error_function(*args, **kwargs): | |
44 | raise Exception('Total Crash !') |
|
44 | raise Exception('Total Crash !') | |
45 |
|
45 | |||
46 |
|
46 | |||
47 | class Fixture(object): |
|
47 | class Fixture(object): | |
48 |
|
48 | |||
49 | def __init__(self): |
|
49 | def __init__(self): | |
50 | pass |
|
50 | pass | |
51 |
|
51 | |||
52 | def anon_access(self, status): |
|
52 | def anon_access(self, status): | |
53 | """ |
|
53 | """ | |
54 | Context manager for controlling anonymous access. |
|
54 | Context manager for controlling anonymous access. | |
55 | Anon access will be set and committed, but restored again when exiting the block. |
|
55 | Anon access will be set and committed, but restored again when exiting the block. | |
56 |
|
56 | |||
57 | Usage: |
|
57 | Usage: | |
58 |
|
58 | |||
59 | fixture = Fixture() |
|
59 | fixture = Fixture() | |
60 | with fixture.anon_access(False): |
|
60 | with fixture.anon_access(False): | |
61 | stuff |
|
61 | stuff | |
62 | """ |
|
62 | """ | |
63 |
|
63 | |||
64 | class context(object): |
|
64 | class context(object): | |
65 | def __enter__(self): |
|
65 | def __enter__(self): | |
66 | anon = User.get_default_user() |
|
66 | anon = User.get_default_user() | |
67 | self._before = anon.active |
|
67 | self._before = anon.active | |
68 | anon.active = status |
|
68 | anon.active = status | |
69 | Session().commit() |
|
69 | Session().commit() | |
70 | invalidate_all_caches() |
|
70 | invalidate_all_caches() | |
71 |
|
71 | |||
72 | def __exit__(self, exc_type, exc_val, exc_tb): |
|
72 | def __exit__(self, exc_type, exc_val, exc_tb): | |
73 | anon = User.get_default_user() |
|
73 | anon = User.get_default_user() | |
74 | anon.active = self._before |
|
74 | anon.active = self._before | |
75 | Session().commit() |
|
75 | Session().commit() | |
76 |
|
76 | |||
77 | return context() |
|
77 | return context() | |
78 |
|
78 | |||
79 | def _get_repo_create_params(self, **custom): |
|
79 | def _get_repo_create_params(self, **custom): | |
80 | """Return form values to be validated through RepoForm""" |
|
80 | """Return form values to be validated through RepoForm""" | |
81 | defs = dict( |
|
81 | defs = dict( | |
82 | repo_name=None, |
|
82 | repo_name=None, | |
83 | repo_type='hg', |
|
83 | repo_type='hg', | |
84 | clone_uri='', |
|
84 | clone_uri='', | |
85 | repo_group=u'-1', |
|
85 | repo_group=u'-1', | |
86 | repo_description=u'DESC', |
|
86 | repo_description=u'DESC', | |
87 | repo_private=False, |
|
87 | repo_private=False, | |
88 | repo_landing_rev='rev:tip', |
|
88 | repo_landing_rev='rev:tip', | |
89 | repo_copy_permissions=False, |
|
89 | repo_copy_permissions=False, | |
90 | repo_state=Repository.STATE_CREATED, |
|
90 | repo_state=Repository.STATE_CREATED, | |
91 | ) |
|
91 | ) | |
92 | defs.update(custom) |
|
92 | defs.update(custom) | |
93 | if 'repo_name_full' not in custom: |
|
93 | if 'repo_name_full' not in custom: | |
94 | defs.update({'repo_name_full': defs['repo_name']}) |
|
94 | defs.update({'repo_name_full': defs['repo_name']}) | |
95 |
|
95 | |||
96 | # fix the repo name if passed as repo_name_full |
|
96 | # fix the repo name if passed as repo_name_full | |
97 | if defs['repo_name']: |
|
97 | if defs['repo_name']: | |
98 | defs['repo_name'] = defs['repo_name'].split('/')[-1] |
|
98 | defs['repo_name'] = defs['repo_name'].split('/')[-1] | |
99 |
|
99 | |||
100 | return defs |
|
100 | return defs | |
101 |
|
101 | |||
102 | def _get_repo_group_create_params(self, **custom): |
|
102 | def _get_repo_group_create_params(self, **custom): | |
103 | """Return form values to be validated through RepoGroupForm""" |
|
103 | """Return form values to be validated through RepoGroupForm""" | |
104 | defs = dict( |
|
104 | defs = dict( | |
105 | group_name=None, |
|
105 | group_name=None, | |
106 | group_description=u'DESC', |
|
106 | group_description=u'DESC', | |
107 | parent_group_id=u'-1', |
|
107 | parent_group_id=u'-1', | |
108 | perms_updates=[], |
|
108 | perms_updates=[], | |
109 | perms_new=[], |
|
109 | perms_new=[], | |
110 | enable_locking=False, |
|
110 | enable_locking=False, | |
111 | recursive=False |
|
111 | recursive=False | |
112 | ) |
|
112 | ) | |
113 | defs.update(custom) |
|
113 | defs.update(custom) | |
114 |
|
114 | |||
115 | return defs |
|
115 | return defs | |
116 |
|
116 | |||
117 | def _get_user_create_params(self, name, **custom): |
|
117 | def _get_user_create_params(self, name, **custom): | |
118 | defs = dict( |
|
118 | defs = dict( | |
119 | username=name, |
|
119 | username=name, | |
120 | password='qweqwe', |
|
120 | password='qweqwe', | |
121 | email='%s+test@example.com' % name, |
|
121 | email='%s+test@example.com' % name, | |
122 | firstname=u'TestUser', |
|
122 | firstname=u'TestUser', | |
123 | lastname=u'Test', |
|
123 | lastname=u'Test', | |
124 | active=True, |
|
124 | active=True, | |
125 | admin=False, |
|
125 | admin=False, | |
126 | extern_type='internal', |
|
126 | extern_type='internal', | |
127 | extern_name=None |
|
127 | extern_name=None | |
128 | ) |
|
128 | ) | |
129 | defs.update(custom) |
|
129 | defs.update(custom) | |
130 |
|
130 | |||
131 | return defs |
|
131 | return defs | |
132 |
|
132 | |||
133 | def _get_user_group_create_params(self, name, **custom): |
|
133 | def _get_user_group_create_params(self, name, **custom): | |
134 | defs = dict( |
|
134 | defs = dict( | |
135 | users_group_name=name, |
|
135 | users_group_name=name, | |
136 | user_group_description=u'DESC', |
|
136 | user_group_description=u'DESC', | |
137 | users_group_active=True, |
|
137 | users_group_active=True, | |
138 | user_group_data={}, |
|
138 | user_group_data={}, | |
139 | ) |
|
139 | ) | |
140 | defs.update(custom) |
|
140 | defs.update(custom) | |
141 |
|
141 | |||
142 | return defs |
|
142 | return defs | |
143 |
|
143 | |||
144 | def create_repo(self, name, repo_group=None, **kwargs): |
|
144 | def create_repo(self, name, repo_group=None, **kwargs): | |
145 | if 'skip_if_exists' in kwargs: |
|
145 | if 'skip_if_exists' in kwargs: | |
146 | del kwargs['skip_if_exists'] |
|
146 | del kwargs['skip_if_exists'] | |
147 | r = Repository.get_by_repo_name(name) |
|
147 | r = Repository.get_by_repo_name(name) | |
148 | if r: |
|
148 | if r: | |
149 | return r |
|
149 | return r | |
150 |
|
150 | |||
151 | if isinstance(repo_group, RepoGroup): |
|
151 | if isinstance(repo_group, RepoGroup): | |
152 | repo_group = repo_group.group_id |
|
152 | repo_group = repo_group.group_id | |
153 |
|
153 | |||
154 | form_data = self._get_repo_create_params(repo_name=name, **kwargs) |
|
154 | form_data = self._get_repo_create_params(repo_name=name, **kwargs) | |
155 | form_data['repo_group'] = repo_group # patch form dict so it can be used directly by model |
|
155 | form_data['repo_group'] = repo_group # patch form dict so it can be used directly by model | |
156 | cur_user = kwargs.get('cur_user', TEST_USER_ADMIN_LOGIN) |
|
156 | cur_user = kwargs.get('cur_user', TEST_USER_ADMIN_LOGIN) | |
157 | RepoModel().create(form_data, cur_user) |
|
157 | RepoModel().create(form_data, cur_user) | |
158 | Session().commit() |
|
158 | Session().commit() | |
159 | ScmModel().mark_for_invalidation(name) |
|
159 | ScmModel().mark_for_invalidation(name) | |
160 | return Repository.get_by_repo_name(name) |
|
160 | return Repository.get_by_repo_name(name) | |
161 |
|
161 | |||
162 | def create_fork(self, repo_to_fork, fork_name, **kwargs): |
|
162 | def create_fork(self, repo_to_fork, fork_name, **kwargs): | |
163 | repo_to_fork = Repository.get_by_repo_name(repo_to_fork) |
|
163 | repo_to_fork = Repository.get_by_repo_name(repo_to_fork) | |
164 |
|
164 | |||
165 | form_data = self._get_repo_create_params(repo_name=fork_name, |
|
165 | form_data = self._get_repo_create_params(repo_name=fork_name, | |
166 | fork_parent_id=repo_to_fork, |
|
166 | fork_parent_id=repo_to_fork, | |
167 | repo_type=repo_to_fork.repo_type, |
|
167 | repo_type=repo_to_fork.repo_type, | |
168 | **kwargs) |
|
168 | **kwargs) | |
169 | # patch form dict so it can be used directly by model |
|
169 | # patch form dict so it can be used directly by model | |
170 | form_data['description'] = form_data['repo_description'] |
|
170 | form_data['description'] = form_data['repo_description'] | |
171 | form_data['private'] = form_data['repo_private'] |
|
171 | form_data['private'] = form_data['repo_private'] | |
172 | form_data['landing_rev'] = form_data['repo_landing_rev'] |
|
172 | form_data['landing_rev'] = form_data['repo_landing_rev'] | |
173 |
|
173 | |||
174 | owner = kwargs.get('cur_user', TEST_USER_ADMIN_LOGIN) |
|
174 | owner = kwargs.get('cur_user', TEST_USER_ADMIN_LOGIN) | |
175 | RepoModel().create_fork(form_data, cur_user=owner) |
|
175 | RepoModel().create_fork(form_data, cur_user=owner) | |
176 | Session().commit() |
|
176 | Session().commit() | |
177 | ScmModel().mark_for_invalidation(fork_name) |
|
177 | ScmModel().mark_for_invalidation(fork_name) | |
178 | r = Repository.get_by_repo_name(fork_name) |
|
178 | r = Repository.get_by_repo_name(fork_name) | |
179 | assert r |
|
179 | assert r | |
180 | return r |
|
180 | return r | |
181 |
|
181 | |||
182 | def destroy_repo(self, repo_name, **kwargs): |
|
182 | def destroy_repo(self, repo_name, **kwargs): | |
183 | RepoModel().delete(repo_name, **kwargs) |
|
183 | RepoModel().delete(repo_name, **kwargs) | |
184 | Session().commit() |
|
184 | Session().commit() | |
185 |
|
185 | |||
186 | def create_repo_group(self, name, parent_group_id=None, **kwargs): |
|
186 | def create_repo_group(self, name, parent_group_id=None, **kwargs): | |
187 | if 'skip_if_exists' in kwargs: |
|
187 | if 'skip_if_exists' in kwargs: | |
188 | del kwargs['skip_if_exists'] |
|
188 | del kwargs['skip_if_exists'] | |
189 | gr = RepoGroup.get_by_group_name(group_name=name) |
|
189 | gr = RepoGroup.get_by_group_name(group_name=name) | |
190 | if gr: |
|
190 | if gr: | |
191 | return gr |
|
191 | return gr | |
192 | form_data = self._get_repo_group_create_params(group_name=name, **kwargs) |
|
192 | form_data = self._get_repo_group_create_params(group_name=name, **kwargs) | |
193 | gr = RepoGroupModel().create( |
|
193 | gr = RepoGroupModel().create( | |
194 | group_name=form_data['group_name'], |
|
194 | group_name=form_data['group_name'], | |
195 | group_description=form_data['group_name'], |
|
195 | group_description=form_data['group_name'], | |
196 | parent=parent_group_id, |
|
196 | parent=parent_group_id, | |
197 | owner=kwargs.get('cur_user', TEST_USER_ADMIN_LOGIN), |
|
197 | owner=kwargs.get('cur_user', TEST_USER_ADMIN_LOGIN), | |
198 | ) |
|
198 | ) | |
199 | Session().commit() |
|
199 | Session().commit() | |
200 | gr = RepoGroup.get_by_group_name(gr.group_name) |
|
200 | gr = RepoGroup.get_by_group_name(gr.group_name) | |
201 | return gr |
|
201 | return gr | |
202 |
|
202 | |||
203 | def destroy_repo_group(self, repogroupid): |
|
203 | def destroy_repo_group(self, repogroupid): | |
204 | RepoGroupModel().delete(repogroupid) |
|
204 | RepoGroupModel().delete(repogroupid) | |
205 | Session().commit() |
|
205 | Session().commit() | |
206 |
|
206 | |||
207 | def create_user(self, name, **kwargs): |
|
207 | def create_user(self, name, **kwargs): | |
208 | if 'skip_if_exists' in kwargs: |
|
208 | if 'skip_if_exists' in kwargs: | |
209 | del kwargs['skip_if_exists'] |
|
209 | del kwargs['skip_if_exists'] | |
210 | user = User.get_by_username(name) |
|
210 | user = User.get_by_username(name) | |
211 | if user: |
|
211 | if user: | |
212 | return user |
|
212 | return user | |
213 | form_data = self._get_user_create_params(name, **kwargs) |
|
213 | form_data = self._get_user_create_params(name, **kwargs) | |
214 | user = UserModel().create(form_data) |
|
214 | user = UserModel().create(form_data) | |
215 | Session().commit() |
|
215 | Session().commit() | |
216 | user = User.get_by_username(user.username) |
|
216 | user = User.get_by_username(user.username) | |
217 | return user |
|
217 | return user | |
218 |
|
218 | |||
219 | def destroy_user(self, userid): |
|
219 | def destroy_user(self, userid): | |
220 | UserModel().delete(userid) |
|
220 | UserModel().delete(userid) | |
221 | Session().commit() |
|
221 | Session().commit() | |
222 |
|
222 | |||
223 | def create_user_group(self, name, **kwargs): |
|
223 | def create_user_group(self, name, **kwargs): | |
224 | if 'skip_if_exists' in kwargs: |
|
224 | if 'skip_if_exists' in kwargs: | |
225 | del kwargs['skip_if_exists'] |
|
225 | del kwargs['skip_if_exists'] | |
226 | gr = UserGroup.get_by_group_name(group_name=name) |
|
226 | gr = UserGroup.get_by_group_name(group_name=name) | |
227 | if gr: |
|
227 | if gr: | |
228 | return gr |
|
228 | return gr | |
229 | form_data = self._get_user_group_create_params(name, **kwargs) |
|
229 | form_data = self._get_user_group_create_params(name, **kwargs) | |
230 | owner = kwargs.get('cur_user', TEST_USER_ADMIN_LOGIN) |
|
230 | owner = kwargs.get('cur_user', TEST_USER_ADMIN_LOGIN) | |
231 | user_group = UserGroupModel().create( |
|
231 | user_group = UserGroupModel().create( | |
232 | name=form_data['users_group_name'], |
|
232 | name=form_data['users_group_name'], | |
233 | description=form_data['user_group_description'], |
|
233 | description=form_data['user_group_description'], | |
234 | owner=owner, active=form_data['users_group_active'], |
|
234 | owner=owner, active=form_data['users_group_active'], | |
235 | group_data=form_data['user_group_data']) |
|
235 | group_data=form_data['user_group_data']) | |
236 | Session().commit() |
|
236 | Session().commit() | |
237 | user_group = UserGroup.get_by_group_name(user_group.users_group_name) |
|
237 | user_group = UserGroup.get_by_group_name(user_group.users_group_name) | |
238 | return user_group |
|
238 | return user_group | |
239 |
|
239 | |||
240 | def destroy_user_group(self, usergroupid): |
|
240 | def destroy_user_group(self, usergroupid): | |
241 | UserGroupModel().delete(user_group=usergroupid, force=True) |
|
241 | UserGroupModel().delete(user_group=usergroupid, force=True) | |
242 | Session().commit() |
|
242 | Session().commit() | |
243 |
|
243 | |||
244 | def create_gist(self, **kwargs): |
|
244 | def create_gist(self, **kwargs): | |
245 | form_data = { |
|
245 | form_data = { | |
246 | 'description': u'new-gist', |
|
246 | 'description': u'new-gist', | |
247 | 'owner': TEST_USER_ADMIN_LOGIN, |
|
247 | 'owner': TEST_USER_ADMIN_LOGIN, | |
248 | 'gist_type': Gist.GIST_PUBLIC, |
|
248 | 'gist_type': Gist.GIST_PUBLIC, | |
249 | 'lifetime': -1, |
|
249 | 'lifetime': -1, | |
250 | 'gist_mapping': {'filename1.txt':{'content':'hello world'},} |
|
250 | 'gist_mapping': {'filename1.txt':{'content':'hello world'},} | |
251 | } |
|
251 | } | |
252 | form_data.update(kwargs) |
|
252 | form_data.update(kwargs) | |
253 | gist = GistModel().create( |
|
253 | gist = GistModel().create( | |
254 | description=form_data['description'],owner=form_data['owner'], |
|
254 | description=form_data['description'],owner=form_data['owner'], | |
255 | gist_mapping=form_data['gist_mapping'], gist_type=form_data['gist_type'], |
|
255 | gist_mapping=form_data['gist_mapping'], gist_type=form_data['gist_type'], | |
256 | lifetime=form_data['lifetime'] |
|
256 | lifetime=form_data['lifetime'] | |
257 | ) |
|
257 | ) | |
258 | Session().commit() |
|
258 | Session().commit() | |
259 |
|
259 | |||
260 | return gist |
|
260 | return gist | |
261 |
|
261 | |||
262 | def destroy_gists(self, gistid=None): |
|
262 | def destroy_gists(self, gistid=None): | |
263 | for g in Gist.query(): |
|
263 | for g in Gist.query(): | |
264 | if gistid: |
|
264 | if gistid: | |
265 | if gistid == g.gist_access_id: |
|
265 | if gistid == g.gist_access_id: | |
266 | GistModel().delete(g) |
|
266 | GistModel().delete(g) | |
267 | else: |
|
267 | else: | |
268 | GistModel().delete(g) |
|
268 | GistModel().delete(g) | |
269 | Session().commit() |
|
269 | Session().commit() | |
270 |
|
270 | |||
271 | def load_resource(self, resource_name, strip=True): |
|
271 | def load_resource(self, resource_name, strip=True): | |
272 | with open(os.path.join(FIXTURES, resource_name), 'rb') as f: |
|
272 | with open(os.path.join(FIXTURES, resource_name), 'rb') as f: | |
273 | source = f.read() |
|
273 | source = f.read() | |
274 | if strip: |
|
274 | if strip: | |
275 | source = source.strip() |
|
275 | source = source.strip() | |
276 |
|
276 | |||
277 | return source |
|
277 | return source | |
278 |
|
278 | |||
279 | def commit_change(self, repo, filename, content, message, vcs_type, |
|
279 | def commit_change(self, repo, filename, content, message, vcs_type, | |
280 | parent=None, newfile=False, author=None): |
|
280 | parent=None, newfile=False, author=None): | |
281 | repo = Repository.get_by_repo_name(repo) |
|
281 | repo = Repository.get_by_repo_name(repo) | |
282 | _cs = parent |
|
282 | _cs = parent | |
283 | if parent is None: |
|
283 | if parent is None: | |
284 | _cs = EmptyChangeset(alias=vcs_type) |
|
284 | _cs = EmptyChangeset(alias=vcs_type) | |
285 | if author is None: |
|
285 | if author is None: | |
286 | author = TEST_USER_ADMIN_LOGIN |
|
286 | author = TEST_USER_ADMIN_LOGIN | |
287 |
|
287 | |||
288 | if newfile: |
|
288 | if newfile: | |
289 | nodes = { |
|
289 | nodes = { | |
290 | filename: { |
|
290 | filename: { | |
291 | 'content': content |
|
291 | 'content': content | |
292 | } |
|
292 | } | |
293 | } |
|
293 | } | |
294 | cs = ScmModel().create_nodes( |
|
294 | cs = ScmModel().create_nodes( | |
295 | user=TEST_USER_ADMIN_LOGIN, repo=repo, |
|
295 | user=TEST_USER_ADMIN_LOGIN, repo=repo, | |
296 | message=message, |
|
296 | message=message, | |
297 | nodes=nodes, |
|
297 | nodes=nodes, | |
298 | parent_cs=_cs, |
|
298 | parent_cs=_cs, | |
299 | author=author, |
|
299 | author=author, | |
300 | ) |
|
300 | ) | |
301 | else: |
|
301 | else: | |
302 | cs = ScmModel().commit_change( |
|
302 | cs = ScmModel().commit_change( | |
303 | repo=repo.scm_instance, repo_name=repo.repo_name, |
|
303 | repo=repo.scm_instance, repo_name=repo.repo_name, | |
304 | cs=parent, user=TEST_USER_ADMIN_LOGIN, |
|
304 | cs=parent, user=TEST_USER_ADMIN_LOGIN, | |
305 | author=author, |
|
305 | author=author, | |
306 | message=message, |
|
306 | message=message, | |
307 | content=content, |
|
307 | content=content, | |
308 | f_path=filename |
|
308 | f_path=filename | |
309 | ) |
|
309 | ) | |
310 | return cs |
|
310 | return cs | |
311 |
|
311 | |||
312 |
|
312 | |||
313 | #============================================================================== |
|
313 | #============================================================================== | |
314 | # Global test environment setup |
|
314 | # Global test environment setup | |
315 | #============================================================================== |
|
315 | #============================================================================== | |
316 |
|
316 | |||
317 | def create_test_env(repos_test_path, config): |
|
317 | def create_test_env(repos_test_path, config): | |
318 | """ |
|
318 | """ | |
319 | Makes a fresh database and |
|
319 | Makes a fresh database and | |
320 | install test repository into tmp dir |
|
320 | install test repository into tmp dir | |
321 | """ |
|
321 | """ | |
322 |
|
322 | |||
323 | # PART ONE create db |
|
323 | # PART ONE create db | |
324 | dbconf = config['sqlalchemy.url'] |
|
324 | dbconf = config['sqlalchemy.url'] | |
325 | log.debug('making test db %s', dbconf) |
|
325 | log.debug('making test db %s', dbconf) | |
326 |
|
326 | |||
327 | # create test dir if it doesn't exist |
|
327 | # create test dir if it doesn't exist | |
328 | if not os.path.isdir(repos_test_path): |
|
328 | if not os.path.isdir(repos_test_path): | |
329 | log.debug('Creating testdir %s', repos_test_path) |
|
329 | log.debug('Creating testdir %s', repos_test_path) | |
330 | os.makedirs(repos_test_path) |
|
330 | os.makedirs(repos_test_path) | |
331 |
|
331 | |||
332 |
dbmanage = DbManage( |
|
332 | dbmanage = DbManage(dbconf=dbconf, root=config['here'], | |
333 | tests=True) |
|
333 | tests=True) | |
334 | dbmanage.create_tables(override=True) |
|
334 | dbmanage.create_tables(override=True) | |
335 | # for tests dynamically set new root paths based on generated content |
|
335 | # for tests dynamically set new root paths based on generated content | |
336 | dbmanage.create_settings(dbmanage.config_prompt(repos_test_path)) |
|
336 | dbmanage.create_settings(dbmanage.config_prompt(repos_test_path)) | |
337 | dbmanage.create_default_user() |
|
337 | dbmanage.create_default_user() | |
338 | dbmanage.admin_prompt() |
|
338 | dbmanage.admin_prompt() | |
339 | dbmanage.create_permissions() |
|
339 | dbmanage.create_permissions() | |
340 | dbmanage.populate_default_permissions() |
|
340 | dbmanage.populate_default_permissions() | |
341 | Session().commit() |
|
341 | Session().commit() | |
342 | # PART TWO make test repo |
|
342 | # PART TWO make test repo | |
343 | log.debug('making test vcs repositories') |
|
343 | log.debug('making test vcs repositories') | |
344 |
|
344 | |||
345 | idx_path = config['index_dir'] |
|
345 | idx_path = config['index_dir'] | |
346 | data_path = config['cache_dir'] |
|
346 | data_path = config['cache_dir'] | |
347 |
|
347 | |||
348 | #clean index and data |
|
348 | #clean index and data | |
349 | if idx_path and os.path.exists(idx_path): |
|
349 | if idx_path and os.path.exists(idx_path): | |
350 | log.debug('remove %s', idx_path) |
|
350 | log.debug('remove %s', idx_path) | |
351 | shutil.rmtree(idx_path) |
|
351 | shutil.rmtree(idx_path) | |
352 |
|
352 | |||
353 | if data_path and os.path.exists(data_path): |
|
353 | if data_path and os.path.exists(data_path): | |
354 | log.debug('remove %s', data_path) |
|
354 | log.debug('remove %s', data_path) | |
355 | shutil.rmtree(data_path) |
|
355 | shutil.rmtree(data_path) | |
356 |
|
356 | |||
357 | #CREATE DEFAULT TEST REPOS |
|
357 | #CREATE DEFAULT TEST REPOS | |
358 | tar = tarfile.open(os.path.join(FIXTURES, 'vcs_test_hg.tar.gz')) |
|
358 | tar = tarfile.open(os.path.join(FIXTURES, 'vcs_test_hg.tar.gz')) | |
359 | tar.extractall(os.path.join(TESTS_TMP_PATH, HG_REPO)) |
|
359 | tar.extractall(os.path.join(TESTS_TMP_PATH, HG_REPO)) | |
360 | tar.close() |
|
360 | tar.close() | |
361 |
|
361 | |||
362 | tar = tarfile.open(os.path.join(FIXTURES, 'vcs_test_git.tar.gz')) |
|
362 | tar = tarfile.open(os.path.join(FIXTURES, 'vcs_test_git.tar.gz')) | |
363 | tar.extractall(os.path.join(TESTS_TMP_PATH, GIT_REPO)) |
|
363 | tar.extractall(os.path.join(TESTS_TMP_PATH, GIT_REPO)) | |
364 | tar.close() |
|
364 | tar.close() | |
365 |
|
365 | |||
366 | #LOAD VCS test stuff |
|
366 | #LOAD VCS test stuff | |
367 | from kallithea.tests.vcs import setup_package |
|
367 | from kallithea.tests.vcs import setup_package | |
368 | setup_package() |
|
368 | setup_package() | |
369 |
|
369 | |||
370 |
|
370 | |||
371 | def create_test_index(repo_location, config, full_index): |
|
371 | def create_test_index(repo_location, config, full_index): | |
372 | """ |
|
372 | """ | |
373 | Makes default test index |
|
373 | Makes default test index | |
374 | """ |
|
374 | """ | |
375 |
|
375 | |||
376 | from kallithea.lib.indexers.daemon import WhooshIndexingDaemon |
|
376 | from kallithea.lib.indexers.daemon import WhooshIndexingDaemon | |
377 | from kallithea.lib.pidlock import DaemonLock, LockHeld |
|
377 | from kallithea.lib.pidlock import DaemonLock, LockHeld | |
378 |
|
378 | |||
379 | index_location = os.path.join(config['index_dir']) |
|
379 | index_location = os.path.join(config['index_dir']) | |
380 | if not os.path.exists(index_location): |
|
380 | if not os.path.exists(index_location): | |
381 | os.makedirs(index_location) |
|
381 | os.makedirs(index_location) | |
382 |
|
382 | |||
383 | try: |
|
383 | try: | |
384 | l = DaemonLock(file_=os.path.join(dirname(index_location), 'make_index.lock')) |
|
384 | l = DaemonLock(file_=os.path.join(dirname(index_location), 'make_index.lock')) | |
385 | WhooshIndexingDaemon(index_location=index_location, |
|
385 | WhooshIndexingDaemon(index_location=index_location, | |
386 | repo_location=repo_location) \ |
|
386 | repo_location=repo_location) \ | |
387 | .run(full_index=full_index) |
|
387 | .run(full_index=full_index) | |
388 | l.release() |
|
388 | l.release() | |
389 | except LockHeld: |
|
389 | except LockHeld: | |
390 | pass |
|
390 | pass |
@@ -1,624 +1,623 b'' | |||||
1 | ################################################################################ |
|
1 | ################################################################################ | |
2 | ################################################################################ |
|
2 | ################################################################################ | |
3 | # Kallithea - config for tests: # |
|
3 | # Kallithea - config for tests: # | |
4 | # sqlalchemy and kallithea_test.sqlite # |
|
4 | # sqlalchemy and kallithea_test.sqlite # | |
5 | # custom logging # |
|
5 | # custom logging # | |
6 | # # |
|
6 | # # | |
7 | # The %(here)s variable will be replaced with the parent directory of this file# |
|
7 | # The %(here)s variable will be replaced with the parent directory of this file# | |
8 | ################################################################################ |
|
8 | ################################################################################ | |
9 | ################################################################################ |
|
9 | ################################################################################ | |
10 |
|
10 | |||
11 | [DEFAULT] |
|
11 | [DEFAULT] | |
12 |
|
12 | |||
13 | ################################################################################ |
|
13 | ################################################################################ | |
14 | ## Email settings ## |
|
14 | ## Email settings ## | |
15 | ## ## |
|
15 | ## ## | |
16 | ## Refer to the documentation ("Email settings") for more details. ## |
|
16 | ## Refer to the documentation ("Email settings") for more details. ## | |
17 | ## ## |
|
17 | ## ## | |
18 | ## It is recommended to use a valid sender address that passes access ## |
|
18 | ## It is recommended to use a valid sender address that passes access ## | |
19 | ## validation and spam filtering in mail servers. ## |
|
19 | ## validation and spam filtering in mail servers. ## | |
20 | ################################################################################ |
|
20 | ################################################################################ | |
21 |
|
21 | |||
22 | ## 'From' header for application emails. You can optionally add a name. |
|
22 | ## 'From' header for application emails. You can optionally add a name. | |
23 | ## Default: |
|
23 | ## Default: | |
24 | #app_email_from = Kallithea |
|
24 | #app_email_from = Kallithea | |
25 | ## Examples: |
|
25 | ## Examples: | |
26 | #app_email_from = Kallithea <kallithea-noreply@example.com> |
|
26 | #app_email_from = Kallithea <kallithea-noreply@example.com> | |
27 | #app_email_from = kallithea-noreply@example.com |
|
27 | #app_email_from = kallithea-noreply@example.com | |
28 |
|
28 | |||
29 | ## Subject prefix for application emails. |
|
29 | ## Subject prefix for application emails. | |
30 | ## A space between this prefix and the real subject is automatically added. |
|
30 | ## A space between this prefix and the real subject is automatically added. | |
31 | ## Default: |
|
31 | ## Default: | |
32 | #email_prefix = |
|
32 | #email_prefix = | |
33 | ## Example: |
|
33 | ## Example: | |
34 | #email_prefix = [Kallithea] |
|
34 | #email_prefix = [Kallithea] | |
35 |
|
35 | |||
36 | ## Recipients for error emails and fallback recipients of application mails. |
|
36 | ## Recipients for error emails and fallback recipients of application mails. | |
37 | ## Multiple addresses can be specified, space-separated. |
|
37 | ## Multiple addresses can be specified, space-separated. | |
38 | ## Only addresses are allowed, do not add any name part. |
|
38 | ## Only addresses are allowed, do not add any name part. | |
39 | ## Default: |
|
39 | ## Default: | |
40 | #email_to = |
|
40 | #email_to = | |
41 | ## Examples: |
|
41 | ## Examples: | |
42 | #email_to = admin@example.com |
|
42 | #email_to = admin@example.com | |
43 | #email_to = admin@example.com another_admin@example.com |
|
43 | #email_to = admin@example.com another_admin@example.com | |
44 | email_to = |
|
44 | email_to = | |
45 |
|
45 | |||
46 | ## 'From' header for error emails. You can optionally add a name. |
|
46 | ## 'From' header for error emails. You can optionally add a name. | |
47 | ## Default: (none) |
|
47 | ## Default: (none) | |
48 | ## Examples: |
|
48 | ## Examples: | |
49 | #error_email_from = Kallithea Errors <kallithea-noreply@example.com> |
|
49 | #error_email_from = Kallithea Errors <kallithea-noreply@example.com> | |
50 | #error_email_from = kallithea_errors@example.com |
|
50 | #error_email_from = kallithea_errors@example.com | |
51 | error_email_from = |
|
51 | error_email_from = | |
52 |
|
52 | |||
53 | ## SMTP server settings |
|
53 | ## SMTP server settings | |
54 | ## If specifying credentials, make sure to use secure connections. |
|
54 | ## If specifying credentials, make sure to use secure connections. | |
55 | ## Default: Send unencrypted unauthenticated mails to the specified smtp_server. |
|
55 | ## Default: Send unencrypted unauthenticated mails to the specified smtp_server. | |
56 | ## For "SSL", use smtp_use_ssl = true and smtp_port = 465. |
|
56 | ## For "SSL", use smtp_use_ssl = true and smtp_port = 465. | |
57 | ## For "STARTTLS", use smtp_use_tls = true and smtp_port = 587. |
|
57 | ## For "STARTTLS", use smtp_use_tls = true and smtp_port = 587. | |
58 | smtp_server = |
|
58 | smtp_server = | |
59 | #smtp_username = |
|
59 | #smtp_username = | |
60 | #smtp_password = |
|
60 | #smtp_password = | |
61 | smtp_port = |
|
61 | smtp_port = | |
62 | #smtp_use_ssl = false |
|
62 | #smtp_use_ssl = false | |
63 | #smtp_use_tls = false |
|
63 | #smtp_use_tls = false | |
64 |
|
64 | |||
65 | [server:main] |
|
65 | [server:main] | |
66 | ## Gearbox default web server ## |
|
66 | ## Gearbox default web server ## | |
67 | #use = egg:gearbox#wsgiref |
|
67 | #use = egg:gearbox#wsgiref | |
68 | ## nr of worker threads to spawn |
|
68 | ## nr of worker threads to spawn | |
69 | #threadpool_workers = 1 |
|
69 | #threadpool_workers = 1 | |
70 | ## max request before thread respawn |
|
70 | ## max request before thread respawn | |
71 | #threadpool_max_requests = 100 |
|
71 | #threadpool_max_requests = 100 | |
72 | ## option to use threads of process |
|
72 | ## option to use threads of process | |
73 | #use_threadpool = true |
|
73 | #use_threadpool = true | |
74 |
|
74 | |||
75 | ## Gearbox gevent web server ## |
|
75 | ## Gearbox gevent web server ## | |
76 | #use = egg:gearbox#gevent |
|
76 | #use = egg:gearbox#gevent | |
77 |
|
77 | |||
78 | ## WAITRESS ## |
|
78 | ## WAITRESS ## | |
79 | use = egg:waitress#main |
|
79 | use = egg:waitress#main | |
80 | ## number of worker threads |
|
80 | ## number of worker threads | |
81 | threads = 1 |
|
81 | threads = 1 | |
82 | ## MAX BODY SIZE 100GB |
|
82 | ## MAX BODY SIZE 100GB | |
83 | max_request_body_size = 107374182400 |
|
83 | max_request_body_size = 107374182400 | |
84 | ## use poll instead of select, fixes fd limits, may not work on old |
|
84 | ## use poll instead of select, fixes fd limits, may not work on old | |
85 | ## windows systems. |
|
85 | ## windows systems. | |
86 | #asyncore_use_poll = True |
|
86 | #asyncore_use_poll = True | |
87 |
|
87 | |||
88 | ## GUNICORN ## |
|
88 | ## GUNICORN ## | |
89 | #use = egg:gunicorn#main |
|
89 | #use = egg:gunicorn#main | |
90 | ## number of process workers. You must set `instance_id = *` when this option |
|
90 | ## number of process workers. You must set `instance_id = *` when this option | |
91 | ## is set to more than one worker |
|
91 | ## is set to more than one worker | |
92 | #workers = 1 |
|
92 | #workers = 1 | |
93 | ## process name |
|
93 | ## process name | |
94 | #proc_name = kallithea |
|
94 | #proc_name = kallithea | |
95 | ## type of worker class, one of sync, eventlet, gevent, tornado |
|
95 | ## type of worker class, one of sync, eventlet, gevent, tornado | |
96 | ## recommended for bigger setup is using of of other than sync one |
|
96 | ## recommended for bigger setup is using of of other than sync one | |
97 | #worker_class = sync |
|
97 | #worker_class = sync | |
98 | #max_requests = 1000 |
|
98 | #max_requests = 1000 | |
99 | ## amount of time a worker can handle request before it gets killed and |
|
99 | ## amount of time a worker can handle request before it gets killed and | |
100 | ## restarted |
|
100 | ## restarted | |
101 | #timeout = 3600 |
|
101 | #timeout = 3600 | |
102 |
|
102 | |||
103 | ## UWSGI ## |
|
103 | ## UWSGI ## | |
104 | ## run with uwsgi --ini-paste-logged <inifile.ini> |
|
104 | ## run with uwsgi --ini-paste-logged <inifile.ini> | |
105 | #[uwsgi] |
|
105 | #[uwsgi] | |
106 | #socket = /tmp/uwsgi.sock |
|
106 | #socket = /tmp/uwsgi.sock | |
107 | #master = true |
|
107 | #master = true | |
108 | #http = 127.0.0.1:5000 |
|
108 | #http = 127.0.0.1:5000 | |
109 |
|
109 | |||
110 | ## set as deamon and redirect all output to file |
|
110 | ## set as deamon and redirect all output to file | |
111 | #daemonize = ./uwsgi_kallithea.log |
|
111 | #daemonize = ./uwsgi_kallithea.log | |
112 |
|
112 | |||
113 | ## master process PID |
|
113 | ## master process PID | |
114 | #pidfile = ./uwsgi_kallithea.pid |
|
114 | #pidfile = ./uwsgi_kallithea.pid | |
115 |
|
115 | |||
116 | ## stats server with workers statistics, use uwsgitop |
|
116 | ## stats server with workers statistics, use uwsgitop | |
117 | ## for monitoring, `uwsgitop 127.0.0.1:1717` |
|
117 | ## for monitoring, `uwsgitop 127.0.0.1:1717` | |
118 | #stats = 127.0.0.1:1717 |
|
118 | #stats = 127.0.0.1:1717 | |
119 | #memory-report = true |
|
119 | #memory-report = true | |
120 |
|
120 | |||
121 | ## log 5XX errors |
|
121 | ## log 5XX errors | |
122 | #log-5xx = true |
|
122 | #log-5xx = true | |
123 |
|
123 | |||
124 | ## Set the socket listen queue size. |
|
124 | ## Set the socket listen queue size. | |
125 | #listen = 256 |
|
125 | #listen = 256 | |
126 |
|
126 | |||
127 | ## Gracefully Reload workers after the specified amount of managed requests |
|
127 | ## Gracefully Reload workers after the specified amount of managed requests | |
128 | ## (avoid memory leaks). |
|
128 | ## (avoid memory leaks). | |
129 | #max-requests = 1000 |
|
129 | #max-requests = 1000 | |
130 |
|
130 | |||
131 | ## enable large buffers |
|
131 | ## enable large buffers | |
132 | #buffer-size = 65535 |
|
132 | #buffer-size = 65535 | |
133 |
|
133 | |||
134 | ## socket and http timeouts ## |
|
134 | ## socket and http timeouts ## | |
135 | #http-timeout = 3600 |
|
135 | #http-timeout = 3600 | |
136 | #socket-timeout = 3600 |
|
136 | #socket-timeout = 3600 | |
137 |
|
137 | |||
138 | ## Log requests slower than the specified number of milliseconds. |
|
138 | ## Log requests slower than the specified number of milliseconds. | |
139 | #log-slow = 10 |
|
139 | #log-slow = 10 | |
140 |
|
140 | |||
141 | ## Exit if no app can be loaded. |
|
141 | ## Exit if no app can be loaded. | |
142 | #need-app = true |
|
142 | #need-app = true | |
143 |
|
143 | |||
144 | ## Set lazy mode (load apps in workers instead of master). |
|
144 | ## Set lazy mode (load apps in workers instead of master). | |
145 | #lazy = true |
|
145 | #lazy = true | |
146 |
|
146 | |||
147 | ## scaling ## |
|
147 | ## scaling ## | |
148 | ## set cheaper algorithm to use, if not set default will be used |
|
148 | ## set cheaper algorithm to use, if not set default will be used | |
149 | #cheaper-algo = spare |
|
149 | #cheaper-algo = spare | |
150 |
|
150 | |||
151 | ## minimum number of workers to keep at all times |
|
151 | ## minimum number of workers to keep at all times | |
152 | #cheaper = 1 |
|
152 | #cheaper = 1 | |
153 |
|
153 | |||
154 | ## number of workers to spawn at startup |
|
154 | ## number of workers to spawn at startup | |
155 | #cheaper-initial = 1 |
|
155 | #cheaper-initial = 1 | |
156 |
|
156 | |||
157 | ## maximum number of workers that can be spawned |
|
157 | ## maximum number of workers that can be spawned | |
158 | #workers = 4 |
|
158 | #workers = 4 | |
159 |
|
159 | |||
160 | ## how many workers should be spawned at a time |
|
160 | ## how many workers should be spawned at a time | |
161 | #cheaper-step = 1 |
|
161 | #cheaper-step = 1 | |
162 |
|
162 | |||
163 | ## COMMON ## |
|
163 | ## COMMON ## | |
164 | host = 127.0.0.1 |
|
164 | host = 127.0.0.1 | |
165 | #port = 5000 |
|
165 | #port = 5000 | |
166 | port = 4999 |
|
166 | port = 4999 | |
167 |
|
167 | |||
168 | ## middleware for hosting the WSGI application under a URL prefix |
|
168 | ## middleware for hosting the WSGI application under a URL prefix | |
169 | #[filter:proxy-prefix] |
|
169 | #[filter:proxy-prefix] | |
170 | #use = egg:PasteDeploy#prefix |
|
170 | #use = egg:PasteDeploy#prefix | |
171 | #prefix = /<your-prefix> |
|
171 | #prefix = /<your-prefix> | |
172 |
|
172 | |||
173 | [app:main] |
|
173 | [app:main] | |
174 | use = egg:kallithea |
|
174 | use = egg:kallithea | |
175 | ## enable proxy prefix middleware |
|
175 | ## enable proxy prefix middleware | |
176 | #filter-with = proxy-prefix |
|
176 | #filter-with = proxy-prefix | |
177 |
|
177 | |||
178 | full_stack = true |
|
178 | full_stack = true | |
179 | static_files = true |
|
179 | static_files = true | |
180 | ## Available Languages: |
|
180 | ## Available Languages: | |
181 | ## cs de fr hu ja nl_BE pl pt_BR ru sk zh_CN zh_TW |
|
181 | ## cs de fr hu ja nl_BE pl pt_BR ru sk zh_CN zh_TW | |
182 | lang = |
|
182 | lang = | |
183 | #cache_dir = %(here)s/data |
|
183 | #cache_dir = %(here)s/data | |
184 | cache_dir = %(here)s/../../data/test/cache |
|
184 | cache_dir = %(here)s/../../data/test/cache | |
185 | #index_dir = %(here)s/data/index |
|
185 | #index_dir = %(here)s/data/index | |
186 | index_dir = %(here)s/../../data/test/index |
|
186 | index_dir = %(here)s/../../data/test/index | |
187 |
|
187 | |||
188 | ## perform a full repository scan on each server start, this should be |
|
188 | ## perform a full repository scan on each server start, this should be | |
189 | ## set to false after first startup, to allow faster server restarts. |
|
189 | ## set to false after first startup, to allow faster server restarts. | |
190 | initial_repo_scan = false |
|
190 | initial_repo_scan = false | |
191 |
|
191 | |||
192 | ## uncomment and set this path to use archive download cache |
|
192 | ## uncomment and set this path to use archive download cache | |
193 | #archive_cache_dir = %(here)s/tarballcache |
|
193 | #archive_cache_dir = %(here)s/tarballcache | |
194 | archive_cache_dir = %(here)s/../../data/test/tarballcache |
|
194 | archive_cache_dir = %(here)s/../../data/test/tarballcache | |
195 |
|
195 | |||
196 | ## change this to unique ID for security |
|
196 | ## change this to unique ID for security | |
197 | app_instance_uuid = test |
|
197 | app_instance_uuid = test | |
198 |
|
198 | |||
199 | ## cut off limit for large diffs (size in bytes) |
|
199 | ## cut off limit for large diffs (size in bytes) | |
200 | cut_off_limit = 256000 |
|
200 | cut_off_limit = 256000 | |
201 |
|
201 | |||
202 | ## force https in Kallithea, fixes https redirects, assumes it's always https |
|
202 | ## force https in Kallithea, fixes https redirects, assumes it's always https | |
203 | force_https = false |
|
203 | force_https = false | |
204 |
|
204 | |||
205 | ## use Strict-Transport-Security headers |
|
205 | ## use Strict-Transport-Security headers | |
206 | use_htsts = false |
|
206 | use_htsts = false | |
207 |
|
207 | |||
208 | ## number of commits stats will parse on each iteration |
|
208 | ## number of commits stats will parse on each iteration | |
209 | commit_parse_limit = 25 |
|
209 | commit_parse_limit = 25 | |
210 |
|
210 | |||
211 | ## path to git executable |
|
211 | ## path to git executable | |
212 | git_path = git |
|
212 | git_path = git | |
213 |
|
213 | |||
214 | ## git rev filter option, --all is the default filter, if you need to |
|
214 | ## git rev filter option, --all is the default filter, if you need to | |
215 | ## hide all refs in changelog switch this to --branches --tags |
|
215 | ## hide all refs in changelog switch this to --branches --tags | |
216 | #git_rev_filter = --branches --tags |
|
216 | #git_rev_filter = --branches --tags | |
217 |
|
217 | |||
218 | ## RSS feed options |
|
218 | ## RSS feed options | |
219 | rss_cut_off_limit = 256000 |
|
219 | rss_cut_off_limit = 256000 | |
220 | rss_items_per_page = 10 |
|
220 | rss_items_per_page = 10 | |
221 | rss_include_diff = false |
|
221 | rss_include_diff = false | |
222 |
|
222 | |||
223 | ## options for showing and identifying changesets |
|
223 | ## options for showing and identifying changesets | |
224 | show_sha_length = 12 |
|
224 | show_sha_length = 12 | |
225 | #show_revision_number = false |
|
225 | #show_revision_number = false | |
226 | show_revision_number = true |
|
226 | show_revision_number = true | |
227 |
|
227 | |||
228 | ## Canonical URL to use when creating full URLs in UI and texts. |
|
228 | ## Canonical URL to use when creating full URLs in UI and texts. | |
229 | ## Useful when the site is available under different names or protocols. |
|
229 | ## Useful when the site is available under different names or protocols. | |
230 | ## Defaults to what is provided in the WSGI environment. |
|
230 | ## Defaults to what is provided in the WSGI environment. | |
231 | #canonical_url = https://kallithea.example.com/repos |
|
231 | #canonical_url = https://kallithea.example.com/repos | |
232 |
|
232 | |||
233 | ## gist URL alias, used to create nicer urls for gist. This should be an |
|
233 | ## gist URL alias, used to create nicer urls for gist. This should be an | |
234 | ## url that does rewrites to _admin/gists/<gistid>. |
|
234 | ## url that does rewrites to _admin/gists/<gistid>. | |
235 | ## example: http://gist.example.com/{gistid}. Empty means use the internal |
|
235 | ## example: http://gist.example.com/{gistid}. Empty means use the internal | |
236 | ## Kallithea url, ie. http[s]://kallithea.example.com/_admin/gists/<gistid> |
|
236 | ## Kallithea url, ie. http[s]://kallithea.example.com/_admin/gists/<gistid> | |
237 | gist_alias_url = |
|
237 | gist_alias_url = | |
238 |
|
238 | |||
239 | ## white list of API enabled controllers. This allows to add list of |
|
239 | ## white list of API enabled controllers. This allows to add list of | |
240 | ## controllers to which access will be enabled by api_key. eg: to enable |
|
240 | ## controllers to which access will be enabled by api_key. eg: to enable | |
241 | ## api access to raw_files put `FilesController:raw`, to enable access to patches |
|
241 | ## api access to raw_files put `FilesController:raw`, to enable access to patches | |
242 | ## add `ChangesetController:changeset_patch`. This list should be "," separated |
|
242 | ## add `ChangesetController:changeset_patch`. This list should be "," separated | |
243 | ## Syntax is <ControllerClass>:<function>. Check debug logs for generated names |
|
243 | ## Syntax is <ControllerClass>:<function>. Check debug logs for generated names | |
244 | ## Recommended settings below are commented out: |
|
244 | ## Recommended settings below are commented out: | |
245 | api_access_controllers_whitelist = |
|
245 | api_access_controllers_whitelist = | |
246 | # ChangesetController:changeset_patch, |
|
246 | # ChangesetController:changeset_patch, | |
247 | # ChangesetController:changeset_raw, |
|
247 | # ChangesetController:changeset_raw, | |
248 | # FilesController:raw, |
|
248 | # FilesController:raw, | |
249 | # FilesController:archivefile |
|
249 | # FilesController:archivefile | |
250 |
|
250 | |||
251 | ## default encoding used to convert from and to unicode |
|
251 | ## default encoding used to convert from and to unicode | |
252 | ## can be also a comma separated list of encoding in case of mixed encodings |
|
252 | ## can be also a comma separated list of encoding in case of mixed encodings | |
253 | default_encoding = utf8 |
|
253 | default_encoding = utf8 | |
254 |
|
254 | |||
255 | ## issue tracker for Kallithea (leave blank to disable, absent for default) |
|
255 | ## issue tracker for Kallithea (leave blank to disable, absent for default) | |
256 | #bugtracker = https://bitbucket.org/conservancy/kallithea/issues |
|
256 | #bugtracker = https://bitbucket.org/conservancy/kallithea/issues | |
257 |
|
257 | |||
258 | ## issue tracking mapping for commits messages |
|
258 | ## issue tracking mapping for commits messages | |
259 | ## comment out issue_pat, issue_server, issue_prefix to enable |
|
259 | ## comment out issue_pat, issue_server, issue_prefix to enable | |
260 |
|
260 | |||
261 | ## pattern to get the issues from commit messages |
|
261 | ## pattern to get the issues from commit messages | |
262 | ## default one used here is #<numbers> with a regex passive group for `#` |
|
262 | ## default one used here is #<numbers> with a regex passive group for `#` | |
263 | ## {id} will be all groups matched from this pattern |
|
263 | ## {id} will be all groups matched from this pattern | |
264 |
|
264 | |||
265 | issue_pat = (?:\s*#)(\d+) |
|
265 | issue_pat = (?:\s*#)(\d+) | |
266 |
|
266 | |||
267 | ## server url to the issue, each {id} will be replaced with match |
|
267 | ## server url to the issue, each {id} will be replaced with match | |
268 | ## fetched from the regex and {repo} is replaced with full repository name |
|
268 | ## fetched from the regex and {repo} is replaced with full repository name | |
269 | ## including groups {repo_name} is replaced with just name of repo |
|
269 | ## including groups {repo_name} is replaced with just name of repo | |
270 |
|
270 | |||
271 | issue_server_link = https://issues.example.com/{repo}/issue/{id} |
|
271 | issue_server_link = https://issues.example.com/{repo}/issue/{id} | |
272 |
|
272 | |||
273 | ## prefix to add to link to indicate it's an url |
|
273 | ## prefix to add to link to indicate it's an url | |
274 | ## #314 will be replaced by <issue_prefix><id> |
|
274 | ## #314 will be replaced by <issue_prefix><id> | |
275 |
|
275 | |||
276 | issue_prefix = # |
|
276 | issue_prefix = # | |
277 |
|
277 | |||
278 | ## issue_pat, issue_server_link, issue_prefix can have suffixes to specify |
|
278 | ## issue_pat, issue_server_link, issue_prefix can have suffixes to specify | |
279 | ## multiple patterns, to other issues server, wiki or others |
|
279 | ## multiple patterns, to other issues server, wiki or others | |
280 | ## below an example how to create a wiki pattern |
|
280 | ## below an example how to create a wiki pattern | |
281 | # wiki-some-id -> https://wiki.example.com/some-id |
|
281 | # wiki-some-id -> https://wiki.example.com/some-id | |
282 |
|
282 | |||
283 | #issue_pat_wiki = (?:wiki-)(.+) |
|
283 | #issue_pat_wiki = (?:wiki-)(.+) | |
284 | #issue_server_link_wiki = https://wiki.example.com/{id} |
|
284 | #issue_server_link_wiki = https://wiki.example.com/{id} | |
285 | #issue_prefix_wiki = WIKI- |
|
285 | #issue_prefix_wiki = WIKI- | |
286 |
|
286 | |||
287 | ## alternative return HTTP header for failed authentication. Default HTTP |
|
287 | ## alternative return HTTP header for failed authentication. Default HTTP | |
288 | ## response is 401 HTTPUnauthorized. Currently Mercurial clients have trouble with |
|
288 | ## response is 401 HTTPUnauthorized. Currently Mercurial clients have trouble with | |
289 | ## handling that. Set this variable to 403 to return HTTPForbidden |
|
289 | ## handling that. Set this variable to 403 to return HTTPForbidden | |
290 | auth_ret_code = |
|
290 | auth_ret_code = | |
291 |
|
291 | |||
292 | ## locking return code. When repository is locked return this HTTP code. 2XX |
|
292 | ## locking return code. When repository is locked return this HTTP code. 2XX | |
293 | ## codes don't break the transactions while 4XX codes do |
|
293 | ## codes don't break the transactions while 4XX codes do | |
294 | lock_ret_code = 423 |
|
294 | lock_ret_code = 423 | |
295 |
|
295 | |||
296 | ## allows to change the repository location in settings page |
|
296 | ## allows to change the repository location in settings page | |
297 | allow_repo_location_change = True |
|
297 | allow_repo_location_change = True | |
298 |
|
298 | |||
299 | ## allows to setup custom hooks in settings page |
|
299 | ## allows to setup custom hooks in settings page | |
300 | allow_custom_hooks_settings = True |
|
300 | allow_custom_hooks_settings = True | |
301 |
|
301 | |||
302 | ## extra extensions for indexing, space separated and without the leading '.'. |
|
302 | ## extra extensions for indexing, space separated and without the leading '.'. | |
303 | # index.extensions = |
|
303 | # index.extensions = | |
304 | # gemfile |
|
304 | # gemfile | |
305 | # lock |
|
305 | # lock | |
306 |
|
306 | |||
307 | ## extra filenames for indexing, space separated |
|
307 | ## extra filenames for indexing, space separated | |
308 | # index.filenames = |
|
308 | # index.filenames = | |
309 | # .dockerignore |
|
309 | # .dockerignore | |
310 | # .editorconfig |
|
310 | # .editorconfig | |
311 | # INSTALL |
|
311 | # INSTALL | |
312 | # CHANGELOG |
|
312 | # CHANGELOG | |
313 |
|
313 | |||
314 | #################################### |
|
314 | #################################### | |
315 | ### CELERY CONFIG #### |
|
315 | ### CELERY CONFIG #### | |
316 | #################################### |
|
316 | #################################### | |
317 |
|
317 | |||
318 | use_celery = false |
|
318 | use_celery = false | |
319 |
|
319 | |||
320 | ## Example: connect to the virtual host 'rabbitmqhost' on localhost as rabbitmq: |
|
320 | ## Example: connect to the virtual host 'rabbitmqhost' on localhost as rabbitmq: | |
321 | broker.url = amqp://rabbitmq:qewqew@localhost:5672/rabbitmqhost |
|
321 | broker.url = amqp://rabbitmq:qewqew@localhost:5672/rabbitmqhost | |
322 |
|
322 | |||
323 | celery.imports = kallithea.lib.celerylib.tasks |
|
323 | celery.imports = kallithea.lib.celerylib.tasks | |
324 | celery.accept.content = pickle |
|
324 | celery.accept.content = pickle | |
325 | celery.result.backend = amqp |
|
325 | celery.result.backend = amqp | |
326 | celery.result.dburi = amqp:// |
|
326 | celery.result.dburi = amqp:// | |
327 | celery.result.serialier = json |
|
327 | celery.result.serialier = json | |
328 |
|
328 | |||
329 | #celery.send.task.error.emails = true |
|
329 | #celery.send.task.error.emails = true | |
330 | #celery.amqp.task.result.expires = 18000 |
|
330 | #celery.amqp.task.result.expires = 18000 | |
331 |
|
331 | |||
332 | celeryd.concurrency = 2 |
|
332 | celeryd.concurrency = 2 | |
333 | celeryd.max.tasks.per.child = 1 |
|
333 | celeryd.max.tasks.per.child = 1 | |
334 |
|
334 | |||
335 | ## If true, tasks will never be sent to the queue, but executed locally instead. |
|
335 | ## If true, tasks will never be sent to the queue, but executed locally instead. | |
336 | celery.always.eager = false |
|
336 | celery.always.eager = false | |
337 |
|
337 | |||
338 | #################################### |
|
338 | #################################### | |
339 | ### BEAKER CACHE #### |
|
339 | ### BEAKER CACHE #### | |
340 | #################################### |
|
340 | #################################### | |
341 |
|
341 | |||
342 | #beaker.cache.data_dir = %(here)s/data/cache/data |
|
342 | #beaker.cache.data_dir = %(here)s/data/cache/data | |
343 | beaker.cache.data_dir = %(here)s/../../data/test/cache/data |
|
343 | beaker.cache.data_dir = %(here)s/../../data/test/cache/data | |
344 | #beaker.cache.lock_dir = %(here)s/data/cache/lock |
|
344 | #beaker.cache.lock_dir = %(here)s/data/cache/lock | |
345 | beaker.cache.lock_dir = %(here)s/../../data/test/cache/lock |
|
345 | beaker.cache.lock_dir = %(here)s/../../data/test/cache/lock | |
346 |
|
346 | |||
347 | beaker.cache.regions = short_term,long_term,sql_cache_short |
|
347 | beaker.cache.regions = short_term,long_term,sql_cache_short | |
348 |
|
348 | |||
349 | beaker.cache.short_term.type = memory |
|
349 | beaker.cache.short_term.type = memory | |
350 | beaker.cache.short_term.expire = 60 |
|
350 | beaker.cache.short_term.expire = 60 | |
351 | beaker.cache.short_term.key_length = 256 |
|
351 | beaker.cache.short_term.key_length = 256 | |
352 |
|
352 | |||
353 | beaker.cache.long_term.type = memory |
|
353 | beaker.cache.long_term.type = memory | |
354 | beaker.cache.long_term.expire = 36000 |
|
354 | beaker.cache.long_term.expire = 36000 | |
355 | beaker.cache.long_term.key_length = 256 |
|
355 | beaker.cache.long_term.key_length = 256 | |
356 |
|
356 | |||
357 | beaker.cache.sql_cache_short.type = memory |
|
357 | beaker.cache.sql_cache_short.type = memory | |
358 | #beaker.cache.sql_cache_short.expire = 10 |
|
358 | #beaker.cache.sql_cache_short.expire = 10 | |
359 | beaker.cache.sql_cache_short.expire = 1 |
|
359 | beaker.cache.sql_cache_short.expire = 1 | |
360 | beaker.cache.sql_cache_short.key_length = 256 |
|
360 | beaker.cache.sql_cache_short.key_length = 256 | |
361 |
|
361 | |||
362 | #################################### |
|
362 | #################################### | |
363 | ### BEAKER SESSION #### |
|
363 | ### BEAKER SESSION #### | |
364 | #################################### |
|
364 | #################################### | |
365 |
|
365 | |||
366 | ## Name of session cookie. Should be unique for a given host and path, even when running |
|
366 | ## Name of session cookie. Should be unique for a given host and path, even when running | |
367 | ## on different ports. Otherwise, cookie sessions will be shared and messed up. |
|
367 | ## on different ports. Otherwise, cookie sessions will be shared and messed up. | |
368 | beaker.session.key = kallithea |
|
368 | beaker.session.key = kallithea | |
369 | ## Sessions should always only be accessible by the browser, not directly by JavaScript. |
|
369 | ## Sessions should always only be accessible by the browser, not directly by JavaScript. | |
370 | beaker.session.httponly = true |
|
370 | beaker.session.httponly = true | |
371 | ## Session lifetime. 2592000 seconds is 30 days. |
|
371 | ## Session lifetime. 2592000 seconds is 30 days. | |
372 | beaker.session.timeout = 2592000 |
|
372 | beaker.session.timeout = 2592000 | |
373 |
|
373 | |||
374 | ## Server secret used with HMAC to ensure integrity of cookies. |
|
374 | ## Server secret used with HMAC to ensure integrity of cookies. | |
375 | beaker.session.secret = {74e0cd75-b339-478b-b129-07dd221def1f} |
|
375 | beaker.session.secret = {74e0cd75-b339-478b-b129-07dd221def1f} | |
376 | ## Further, encrypt the data with AES. |
|
376 | ## Further, encrypt the data with AES. | |
377 | #beaker.session.encrypt_key = <key_for_encryption> |
|
377 | #beaker.session.encrypt_key = <key_for_encryption> | |
378 | #beaker.session.validate_key = <validation_key> |
|
378 | #beaker.session.validate_key = <validation_key> | |
379 |
|
379 | |||
380 | ## Type of storage used for the session, current types are |
|
380 | ## Type of storage used for the session, current types are | |
381 | ## dbm, file, memcached, database, and memory. |
|
381 | ## dbm, file, memcached, database, and memory. | |
382 |
|
382 | |||
383 | ## File system storage of session data. (default) |
|
383 | ## File system storage of session data. (default) | |
384 | #beaker.session.type = file |
|
384 | #beaker.session.type = file | |
385 |
|
385 | |||
386 | ## Cookie only, store all session data inside the cookie. Requires secure secrets. |
|
386 | ## Cookie only, store all session data inside the cookie. Requires secure secrets. | |
387 | #beaker.session.type = cookie |
|
387 | #beaker.session.type = cookie | |
388 |
|
388 | |||
389 | ## Database storage of session data. |
|
389 | ## Database storage of session data. | |
390 | #beaker.session.type = ext:database |
|
390 | #beaker.session.type = ext:database | |
391 | #beaker.session.sa.url = postgresql://postgres:qwe@localhost/kallithea |
|
391 | #beaker.session.sa.url = postgresql://postgres:qwe@localhost/kallithea | |
392 | #beaker.session.table_name = db_session |
|
392 | #beaker.session.table_name = db_session | |
393 |
|
393 | |||
394 | ############################ |
|
394 | ############################ | |
395 | ## ERROR HANDLING SYSTEMS ## |
|
395 | ## ERROR HANDLING SYSTEMS ## | |
396 | ############################ |
|
396 | ############################ | |
397 |
|
397 | |||
398 | # Propagate email settings to ErrorReporter of TurboGears2 |
|
398 | # Propagate email settings to ErrorReporter of TurboGears2 | |
399 | # You do not normally need to change these lines |
|
399 | # You do not normally need to change these lines | |
400 | get trace_errors.error_email = email_to |
|
400 | get trace_errors.error_email = email_to | |
401 | get trace_errors.smtp_server = smtp_server |
|
401 | get trace_errors.smtp_server = smtp_server | |
402 | get trace_errors.smtp_port = smtp_port |
|
402 | get trace_errors.smtp_port = smtp_port | |
403 | get trace_errors.from_address = error_email_from |
|
403 | get trace_errors.from_address = error_email_from | |
404 |
|
404 | |||
405 | #################### |
|
405 | #################### | |
406 | ### [appenlight] ### |
|
406 | ### [appenlight] ### | |
407 | #################### |
|
407 | #################### | |
408 |
|
408 | |||
409 | ## AppEnlight is tailored to work with Kallithea, see |
|
409 | ## AppEnlight is tailored to work with Kallithea, see | |
410 | ## http://appenlight.com for details how to obtain an account |
|
410 | ## http://appenlight.com for details how to obtain an account | |
411 | ## you must install python package `appenlight_client` to make it work |
|
411 | ## you must install python package `appenlight_client` to make it work | |
412 |
|
412 | |||
413 | ## appenlight enabled |
|
413 | ## appenlight enabled | |
414 | appenlight = false |
|
414 | appenlight = false | |
415 |
|
415 | |||
416 | appenlight.server_url = https://api.appenlight.com |
|
416 | appenlight.server_url = https://api.appenlight.com | |
417 | appenlight.api_key = YOUR_API_KEY |
|
417 | appenlight.api_key = YOUR_API_KEY | |
418 |
|
418 | |||
419 | ## TWEAK AMOUNT OF INFO SENT HERE |
|
419 | ## TWEAK AMOUNT OF INFO SENT HERE | |
420 |
|
420 | |||
421 | ## enables 404 error logging (default False) |
|
421 | ## enables 404 error logging (default False) | |
422 | appenlight.report_404 = false |
|
422 | appenlight.report_404 = false | |
423 |
|
423 | |||
424 | ## time in seconds after request is considered being slow (default 1) |
|
424 | ## time in seconds after request is considered being slow (default 1) | |
425 | appenlight.slow_request_time = 1 |
|
425 | appenlight.slow_request_time = 1 | |
426 |
|
426 | |||
427 | ## record slow requests in application |
|
427 | ## record slow requests in application | |
428 | ## (needs to be enabled for slow datastore recording and time tracking) |
|
428 | ## (needs to be enabled for slow datastore recording and time tracking) | |
429 | appenlight.slow_requests = true |
|
429 | appenlight.slow_requests = true | |
430 |
|
430 | |||
431 | ## enable hooking to application loggers |
|
431 | ## enable hooking to application loggers | |
432 | #appenlight.logging = true |
|
432 | #appenlight.logging = true | |
433 |
|
433 | |||
434 | ## minimum log level for log capture |
|
434 | ## minimum log level for log capture | |
435 | #appenlight.logging.level = WARNING |
|
435 | #appenlight.logging.level = WARNING | |
436 |
|
436 | |||
437 | ## send logs only from erroneous/slow requests |
|
437 | ## send logs only from erroneous/slow requests | |
438 | ## (saves API quota for intensive logging) |
|
438 | ## (saves API quota for intensive logging) | |
439 | appenlight.logging_on_error = false |
|
439 | appenlight.logging_on_error = false | |
440 |
|
440 | |||
441 | ## list of additional keywords that should be grabbed from environ object |
|
441 | ## list of additional keywords that should be grabbed from environ object | |
442 | ## can be string with comma separated list of words in lowercase |
|
442 | ## can be string with comma separated list of words in lowercase | |
443 | ## (by default client will always send following info: |
|
443 | ## (by default client will always send following info: | |
444 | ## 'REMOTE_USER', 'REMOTE_ADDR', 'SERVER_NAME', 'CONTENT_TYPE' + all keys that |
|
444 | ## 'REMOTE_USER', 'REMOTE_ADDR', 'SERVER_NAME', 'CONTENT_TYPE' + all keys that | |
445 | ## start with HTTP* this list be extended with additional keywords here |
|
445 | ## start with HTTP* this list be extended with additional keywords here | |
446 | appenlight.environ_keys_whitelist = |
|
446 | appenlight.environ_keys_whitelist = | |
447 |
|
447 | |||
448 | ## list of keywords that should be blanked from request object |
|
448 | ## list of keywords that should be blanked from request object | |
449 | ## can be string with comma separated list of words in lowercase |
|
449 | ## can be string with comma separated list of words in lowercase | |
450 | ## (by default client will always blank keys that contain following words |
|
450 | ## (by default client will always blank keys that contain following words | |
451 | ## 'password', 'passwd', 'pwd', 'auth_tkt', 'secret', 'csrf' |
|
451 | ## 'password', 'passwd', 'pwd', 'auth_tkt', 'secret', 'csrf' | |
452 | ## this list be extended with additional keywords set here |
|
452 | ## this list be extended with additional keywords set here | |
453 | appenlight.request_keys_blacklist = |
|
453 | appenlight.request_keys_blacklist = | |
454 |
|
454 | |||
455 | ## list of namespaces that should be ignores when gathering log entries |
|
455 | ## list of namespaces that should be ignores when gathering log entries | |
456 | ## can be string with comma separated list of namespaces |
|
456 | ## can be string with comma separated list of namespaces | |
457 | ## (by default the client ignores own entries: appenlight_client.client) |
|
457 | ## (by default the client ignores own entries: appenlight_client.client) | |
458 | appenlight.log_namespace_blacklist = |
|
458 | appenlight.log_namespace_blacklist = | |
459 |
|
459 | |||
460 | ################ |
|
460 | ################ | |
461 | ### [sentry] ### |
|
461 | ### [sentry] ### | |
462 | ################ |
|
462 | ################ | |
463 |
|
463 | |||
464 | ## sentry is a alternative open source error aggregator |
|
464 | ## sentry is a alternative open source error aggregator | |
465 | ## you must install python packages `sentry` and `raven` to enable |
|
465 | ## you must install python packages `sentry` and `raven` to enable | |
466 |
|
466 | |||
467 | sentry.dsn = YOUR_DNS |
|
467 | sentry.dsn = YOUR_DNS | |
468 | sentry.servers = |
|
468 | sentry.servers = | |
469 | sentry.name = |
|
469 | sentry.name = | |
470 | sentry.key = |
|
470 | sentry.key = | |
471 | sentry.public_key = |
|
471 | sentry.public_key = | |
472 | sentry.secret_key = |
|
472 | sentry.secret_key = | |
473 | sentry.project = |
|
473 | sentry.project = | |
474 | sentry.site = |
|
474 | sentry.site = | |
475 | sentry.include_paths = |
|
475 | sentry.include_paths = | |
476 | sentry.exclude_paths = |
|
476 | sentry.exclude_paths = | |
477 |
|
477 | |||
478 | ################################################################################ |
|
478 | ################################################################################ | |
479 | ## WARNING: *DEBUG MODE MUST BE OFF IN A PRODUCTION ENVIRONMENT* ## |
|
479 | ## WARNING: *DEBUG MODE MUST BE OFF IN A PRODUCTION ENVIRONMENT* ## | |
480 | ## Debug mode will enable the interactive debugging tool, allowing ANYONE to ## |
|
480 | ## Debug mode will enable the interactive debugging tool, allowing ANYONE to ## | |
481 | ## execute malicious code after an exception is raised. ## |
|
481 | ## execute malicious code after an exception is raised. ## | |
482 | ################################################################################ |
|
482 | ################################################################################ | |
483 | debug = false |
|
483 | debug = false | |
484 |
|
484 | |||
485 | ################################## |
|
485 | ################################## | |
486 | ### LOGVIEW CONFIG ### |
|
486 | ### LOGVIEW CONFIG ### | |
487 | ################################## |
|
487 | ################################## | |
488 |
|
488 | |||
489 | logview.sqlalchemy = #faa |
|
489 | logview.sqlalchemy = #faa | |
490 | logview.pylons.templating = #bfb |
|
490 | logview.pylons.templating = #bfb | |
491 | logview.pylons.util = #eee |
|
491 | logview.pylons.util = #eee | |
492 |
|
492 | |||
493 | ######################################################### |
|
493 | ######################################################### | |
494 | ### DB CONFIGS - EACH DB WILL HAVE IT'S OWN CONFIG ### |
|
494 | ### DB CONFIGS - EACH DB WILL HAVE IT'S OWN CONFIG ### | |
495 | ######################################################### |
|
495 | ######################################################### | |
496 |
|
496 | |||
497 | # SQLITE [default] |
|
497 | # SQLITE [default] | |
498 | #sqlalchemy.url = sqlite:///%(here)s/kallithea.db?timeout=60 |
|
498 | #sqlalchemy.url = sqlite:///%(here)s/kallithea.db?timeout=60 | |
499 | sqlalchemy.url = sqlite:///%(here)s/kallithea_test.sqlite |
|
499 | sqlalchemy.url = sqlite:///%(here)s/kallithea_test.sqlite | |
500 |
|
500 | |||
501 | # POSTGRESQL |
|
501 | # POSTGRESQL | |
502 | #sqlalchemy.url = postgresql://user:pass@localhost/kallithea |
|
502 | #sqlalchemy.url = postgresql://user:pass@localhost/kallithea | |
503 |
|
503 | |||
504 | # MySQL |
|
504 | # MySQL | |
505 | #sqlalchemy.url = mysql://user:pass@localhost/kallithea?charset=utf8 |
|
505 | #sqlalchemy.url = mysql://user:pass@localhost/kallithea?charset=utf8 | |
506 |
|
506 | |||
507 | # see sqlalchemy docs for others |
|
507 | # see sqlalchemy docs for others | |
508 |
|
508 | |||
509 | sqlalchemy.echo = false |
|
|||
510 | sqlalchemy.pool_recycle = 3600 |
|
509 | sqlalchemy.pool_recycle = 3600 | |
511 |
|
510 | |||
512 | ################################ |
|
511 | ################################ | |
513 | ### ALEMBIC CONFIGURATION #### |
|
512 | ### ALEMBIC CONFIGURATION #### | |
514 | ################################ |
|
513 | ################################ | |
515 |
|
514 | |||
516 | [alembic] |
|
515 | [alembic] | |
517 | script_location = kallithea:alembic |
|
516 | script_location = kallithea:alembic | |
518 |
|
517 | |||
519 | ################################ |
|
518 | ################################ | |
520 | ### LOGGING CONFIGURATION #### |
|
519 | ### LOGGING CONFIGURATION #### | |
521 | ################################ |
|
520 | ################################ | |
522 |
|
521 | |||
523 | [loggers] |
|
522 | [loggers] | |
524 | keys = root, routes, kallithea, sqlalchemy, tg, gearbox, beaker, templates, whoosh_indexer |
|
523 | keys = root, routes, kallithea, sqlalchemy, tg, gearbox, beaker, templates, whoosh_indexer | |
525 |
|
524 | |||
526 | [handlers] |
|
525 | [handlers] | |
527 | keys = console, console_sql |
|
526 | keys = console, console_sql | |
528 |
|
527 | |||
529 | [formatters] |
|
528 | [formatters] | |
530 | keys = generic, color_formatter, color_formatter_sql |
|
529 | keys = generic, color_formatter, color_formatter_sql | |
531 |
|
530 | |||
532 | ############# |
|
531 | ############# | |
533 | ## LOGGERS ## |
|
532 | ## LOGGERS ## | |
534 | ############# |
|
533 | ############# | |
535 |
|
534 | |||
536 | [logger_root] |
|
535 | [logger_root] | |
537 | level = NOTSET |
|
536 | level = NOTSET | |
538 | handlers = console |
|
537 | handlers = console | |
539 |
|
538 | |||
540 | [logger_routes] |
|
539 | [logger_routes] | |
541 | level = DEBUG |
|
540 | level = DEBUG | |
542 | handlers = |
|
541 | handlers = | |
543 | qualname = routes.middleware |
|
542 | qualname = routes.middleware | |
544 | ## "level = DEBUG" logs the route matched and routing variables. |
|
543 | ## "level = DEBUG" logs the route matched and routing variables. | |
545 | propagate = 1 |
|
544 | propagate = 1 | |
546 |
|
545 | |||
547 | [logger_beaker] |
|
546 | [logger_beaker] | |
548 | level = DEBUG |
|
547 | level = DEBUG | |
549 | handlers = |
|
548 | handlers = | |
550 | qualname = beaker.container |
|
549 | qualname = beaker.container | |
551 | propagate = 1 |
|
550 | propagate = 1 | |
552 |
|
551 | |||
553 | [logger_templates] |
|
552 | [logger_templates] | |
554 | level = INFO |
|
553 | level = INFO | |
555 | handlers = |
|
554 | handlers = | |
556 | qualname = pylons.templating |
|
555 | qualname = pylons.templating | |
557 | propagate = 1 |
|
556 | propagate = 1 | |
558 |
|
557 | |||
559 | [logger_kallithea] |
|
558 | [logger_kallithea] | |
560 | level = DEBUG |
|
559 | level = DEBUG | |
561 | handlers = |
|
560 | handlers = | |
562 | qualname = kallithea |
|
561 | qualname = kallithea | |
563 | propagate = 1 |
|
562 | propagate = 1 | |
564 |
|
563 | |||
565 | [logger_tg] |
|
564 | [logger_tg] | |
566 | level = DEBUG |
|
565 | level = DEBUG | |
567 | handlers = |
|
566 | handlers = | |
568 | qualname = tg |
|
567 | qualname = tg | |
569 | propagate = 1 |
|
568 | propagate = 1 | |
570 |
|
569 | |||
571 | [logger_gearbox] |
|
570 | [logger_gearbox] | |
572 | level = DEBUG |
|
571 | level = DEBUG | |
573 | handlers = |
|
572 | handlers = | |
574 | qualname = gearbox |
|
573 | qualname = gearbox | |
575 | propagate = 1 |
|
574 | propagate = 1 | |
576 |
|
575 | |||
577 | [logger_sqlalchemy] |
|
576 | [logger_sqlalchemy] | |
578 | level = INFO |
|
577 | level = INFO | |
579 | handlers = console_sql |
|
578 | handlers = console_sql | |
580 | qualname = sqlalchemy.engine |
|
579 | qualname = sqlalchemy.engine | |
581 | propagate = 0 |
|
580 | propagate = 0 | |
582 |
|
581 | |||
583 | [logger_whoosh_indexer] |
|
582 | [logger_whoosh_indexer] | |
584 | level = DEBUG |
|
583 | level = DEBUG | |
585 | handlers = |
|
584 | handlers = | |
586 | qualname = whoosh_indexer |
|
585 | qualname = whoosh_indexer | |
587 | propagate = 1 |
|
586 | propagate = 1 | |
588 |
|
587 | |||
589 | ############## |
|
588 | ############## | |
590 | ## HANDLERS ## |
|
589 | ## HANDLERS ## | |
591 | ############## |
|
590 | ############## | |
592 |
|
591 | |||
593 | [handler_console] |
|
592 | [handler_console] | |
594 | class = StreamHandler |
|
593 | class = StreamHandler | |
595 | args = (sys.stderr,) |
|
594 | args = (sys.stderr,) | |
596 | #level = INFO |
|
595 | #level = INFO | |
597 | level = DEBUG |
|
596 | level = DEBUG | |
598 | #formatter = generic |
|
597 | #formatter = generic | |
599 | formatter = color_formatter |
|
598 | formatter = color_formatter | |
600 |
|
599 | |||
601 | [handler_console_sql] |
|
600 | [handler_console_sql] | |
602 | class = StreamHandler |
|
601 | class = StreamHandler | |
603 | args = (sys.stderr,) |
|
602 | args = (sys.stderr,) | |
604 | level = WARN |
|
603 | level = WARN | |
605 | #formatter = generic |
|
604 | #formatter = generic | |
606 | formatter = color_formatter_sql |
|
605 | formatter = color_formatter_sql | |
607 |
|
606 | |||
608 | ################ |
|
607 | ################ | |
609 | ## FORMATTERS ## |
|
608 | ## FORMATTERS ## | |
610 | ################ |
|
609 | ################ | |
611 |
|
610 | |||
612 | [formatter_generic] |
|
611 | [formatter_generic] | |
613 | format = %(asctime)s.%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s |
|
612 | format = %(asctime)s.%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s | |
614 | datefmt = %Y-%m-%d %H:%M:%S |
|
613 | datefmt = %Y-%m-%d %H:%M:%S | |
615 |
|
614 | |||
616 | [formatter_color_formatter] |
|
615 | [formatter_color_formatter] | |
617 | class = kallithea.lib.colored_formatter.ColorFormatter |
|
616 | class = kallithea.lib.colored_formatter.ColorFormatter | |
618 | format = %(asctime)s.%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s |
|
617 | format = %(asctime)s.%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s | |
619 | datefmt = %Y-%m-%d %H:%M:%S |
|
618 | datefmt = %Y-%m-%d %H:%M:%S | |
620 |
|
619 | |||
621 | [formatter_color_formatter_sql] |
|
620 | [formatter_color_formatter_sql] | |
622 | class = kallithea.lib.colored_formatter.ColorFormatterSql |
|
621 | class = kallithea.lib.colored_formatter.ColorFormatterSql | |
623 | format = %(asctime)s.%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s |
|
622 | format = %(asctime)s.%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s | |
624 | datefmt = %Y-%m-%d %H:%M:%S |
|
623 | datefmt = %Y-%m-%d %H:%M:%S |
General Comments 0
You need to be logged in to leave comments.
Login now