##// END OF EJS Templates
fix(tests): fixed few tests
super-admin -
r5575:521d91fe default
parent child Browse files
Show More
@@ -1,811 +1,804 b''
1 1
2 2 # Copyright (C) 2010-2023 RhodeCode GmbH
3 3 #
4 4 # This program is free software: you can redistribute it and/or modify
5 5 # it under the terms of the GNU Affero General Public License, version 3
6 6 # (only), as published by the Free Software Foundation.
7 7 #
8 8 # This program is distributed in the hope that it will be useful,
9 9 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 10 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 11 # GNU General Public License for more details.
12 12 #
13 13 # You should have received a copy of the GNU Affero General Public License
14 14 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 15 #
16 16 # This program is dual-licensed. If you wish to learn more about the
17 17 # RhodeCode Enterprise Edition, including its added features, Support services,
18 18 # and proprietary license terms, please see https://rhodecode.com/licenses/
19 19
20 20 import pytest
21 21
22 22 from rhodecode.lib.auth import AuthUser
23 23 from rhodecode.model.db import (
24 24 RepoGroup, User, UserGroupRepoGroupToPerm, Permission, UserToPerm,
25 25 UserGroupToPerm)
26 26 from rhodecode.model.meta import Session
27 27 from rhodecode.model.permission import PermissionModel
28 28 from rhodecode.model.repo import RepoModel
29 29 from rhodecode.model.repo_group import RepoGroupModel
30 30 from rhodecode.model.user import UserModel
31 31 from rhodecode.model.user_group import UserGroupModel
32 32 from rhodecode.tests.fixture import Fixture
33 33
34 34
35 35 fixture = Fixture()
36 36
37 37
38 38 @pytest.fixture()
39 39 def repo_name(backend_hg):
40 40 return backend_hg.repo_name
41 41
42 42
43 43 class TestPermissions(object):
44 44
45 45 @pytest.fixture(scope='class', autouse=True)
46 46 def default_permissions(self, request, baseapp):
47 47 # recreate default user to get a clean start
48 48 PermissionModel().create_default_user_permissions(
49 49 user=User.DEFAULT_USER, force=True)
50 50 Session().commit()
51 51
52 52 @pytest.fixture(autouse=True)
53 53 def prepare_users(self, request):
54 54 # TODO: User creation is a duplicate of test_nofitications, check
55 55 # if that can be unified
56 56 self.u1 = UserModel().create_or_update(
57 username=u'u1', password=u'qweqwe',
58 email=u'u1@rhodecode.org', firstname=u'u1', lastname=u'u1'
57 username='u1', password='qweqwe',
58 email='u1@rhodecode.org', firstname='u1', lastname='u1'
59 59 )
60 60 self.u2 = UserModel().create_or_update(
61 username=u'u2', password=u'qweqwe',
62 email=u'u2@rhodecode.org', firstname=u'u2', lastname=u'u2'
61 username='u2', password='qweqwe',
62 email='u2@rhodecode.org', firstname='u2', lastname='u2'
63 63 )
64 64 self.u3 = UserModel().create_or_update(
65 username=u'u3', password=u'qweqwe',
66 email=u'u3@rhodecode.org', firstname=u'u3', lastname=u'u3'
65 username='u3', password='qweqwe',
66 email='u3@rhodecode.org', firstname='u3', lastname='u3'
67 67 )
68 68 self.anon = User.get_default_user()
69 69 self.a1 = UserModel().create_or_update(
70 username=u'a1', password=u'qweqwe',
71 email=u'a1@rhodecode.org', firstname=u'a1', lastname=u'a1',
70 username='a1', password='qweqwe',
71 email='a1@rhodecode.org', firstname='a1', lastname='a1',
72 72 admin=True
73 73 )
74 74 Session().commit()
75 75
76 76 request.addfinalizer(self.cleanup)
77 77
78 78 def cleanup(self):
79 79 if hasattr(self, 'test_repo'):
80 80 RepoModel().delete(repo=self.test_repo)
81 81 Session().commit()
82 82
83 83 if hasattr(self, 'g1'):
84 84 RepoGroupModel().delete(self.g1.group_id)
85 85 if hasattr(self, 'g2'):
86 86 RepoGroupModel().delete(self.g2.group_id)
87 87 Session().commit()
88 88
89 89 UserModel().delete(self.u1, handle_repos='delete', handle_repo_groups='delete')
90 90 UserModel().delete(self.u2, handle_repos='delete', handle_repo_groups='delete')
91 91 UserModel().delete(self.u3, handle_repos='delete', handle_repo_groups='delete')
92 92 UserModel().delete(self.a1, handle_repos='delete', handle_repo_groups='delete')
93 93 Session().commit()
94 94
95 95 if hasattr(self, 'ug1'):
96 96 UserGroupModel().delete(self.ug1, force=True)
97 97 Session().commit()
98 98
99 99 def test_default_perms_set(self, repo_name):
100 100 assert repo_perms(self.u1)[repo_name] == 'repository.read'
101 101 new_perm = 'repository.write'
102 102 RepoModel().grant_user_permission(repo=repo_name, user=self.u1,
103 103 perm=new_perm)
104 104 Session().commit()
105 105 assert repo_perms(self.u1)[repo_name] == new_perm
106 106
107 107 def test_default_admin_perms_set(self, repo_name):
108 108 assert repo_perms(self.a1)[repo_name] == 'repository.admin'
109 109 RepoModel().grant_user_permission(repo=repo_name, user=self.a1,
110 110 perm='repository.write')
111 111 Session().commit()
112 112 # cannot really downgrade admins permissions !? they still gets set as
113 113 # admin !
114 114 assert repo_perms(self.a1)[repo_name] == 'repository.admin'
115 115
116 116 def test_default_group_perms(self, repo_name):
117 117 self.g1 = fixture.create_repo_group('test1', skip_if_exists=True)
118 118 self.g2 = fixture.create_repo_group('test2', skip_if_exists=True)
119 119
120 120 assert repo_perms(self.u1)[repo_name] == 'repository.read'
121 121 assert group_perms(self.u1) == {
122 122 'test1': 'group.read', 'test2': 'group.read'}
123 123 assert global_perms(self.u1) == set(
124 124 Permission.DEFAULT_USER_PERMISSIONS)
125 125
126 126 def test_default_admin_group_perms(self, repo_name):
127 127 self.g1 = fixture.create_repo_group('test1', skip_if_exists=True)
128 128 self.g2 = fixture.create_repo_group('test2', skip_if_exists=True)
129 129
130 130 assert repo_perms(self.a1)[repo_name] == 'repository.admin'
131 131 assert group_perms(self.a1) == {
132 132 'test1': 'group.admin', 'test2': 'group.admin'}
133 133
134 134 def test_default_owner_repo_perms(self, backend, user_util, test_repo):
135 135 user = user_util.create_user()
136 136 repo = test_repo('minimal', backend.alias)
137 137 org_owner = repo.user
138 138 assert repo_perms(user)[repo.repo_name] == 'repository.read'
139 139
140 140 repo.user = user
141 141 assert repo_perms(user)[repo.repo_name] == 'repository.admin'
142 142 repo.user = org_owner
143 143
144 144 def test_default_owner_branch_perms(self, user_util, test_user_group):
145 145 user = user_util.create_user()
146 146 assert branch_perms(user) == {}
147 147
148 148 def test_default_owner_repo_group_perms(self, user_util, test_repo_group):
149 149 user = user_util.create_user()
150 150 org_owner = test_repo_group.user
151 151
152 152 assert group_perms(user)[test_repo_group.group_name] == 'group.read'
153 153
154 154 test_repo_group.user = user
155 155 assert group_perms(user)[test_repo_group.group_name] == 'group.admin'
156 156 test_repo_group.user = org_owner
157 157
158 158 def test_default_owner_user_group_perms(self, user_util, test_user_group):
159 159 user = user_util.create_user()
160 160 org_owner = test_user_group.user
161 161
162 162 assert user_group_perms(user)[test_user_group.users_group_name] == 'usergroup.read'
163 163
164 164 test_user_group.user = user
165 165 assert user_group_perms(user)[test_user_group.users_group_name] == 'usergroup.admin'
166 166
167 167 test_user_group.user = org_owner
168 168
169 169 def test_propagated_permissions_from_repo_group_to_private_repo(self, repo_name):
170 170 # make group
171 171 self.g1 = fixture.create_repo_group('TOP_LEVEL', skip_if_exists=True)
172 172 # both perms should be read !
173 173 assert group_perms(self.anon) == {
174 174 'TOP_LEVEL': 'group.read'
175 175 }
176 176
177 177 # Create repo inside the TOP_LEVEL
178 178 repo_name_in_group = RepoGroup.url_sep().join([self.g1.group_name, 'test_perm_on_private_repo'])
179 179 self.test_repo = fixture.create_repo(name=repo_name_in_group,
180 180 repo_type='hg',
181 181 repo_group=self.g1,
182 182 cur_user=self.u1,)
183 assert repo_perms(self.anon) == {
184 repo_name_in_group: 'repository.read',
185 'vcs_test_git': 'repository.read',
186 'vcs_test_hg': 'repository.read',
187 'vcs_test_svn': 'repository.read',
188 }
183 assert repo_perms(self.anon)[repo_name_in_group] == 'repository.read'
184 assert repo_perms(self.anon)['vcs_test_git'] == 'repository.read'
185 assert repo_perms(self.anon)['vcs_test_hg'] == 'repository.read'
186 assert repo_perms(self.anon)['vcs_test_svn'] == 'repository.read'
187
189 188 # Now change default user permissions
190 189 new_perm = 'repository.write'
191 190 perm_updates = [
192 191 [self.anon.user_id, new_perm, 'user']
193 192 ]
194 193 RepoGroupModel().update_permissions(
195 194 repo_group=self.g1, perm_updates=perm_updates, recursive='all')
196 195
197 196 Session().commit()
198 assert repo_perms(self.anon) == {
199 repo_name_in_group: new_perm,
200 'vcs_test_git': 'repository.read',
201 'vcs_test_hg': 'repository.read',
202 'vcs_test_svn': 'repository.read',
203 }
197 assert repo_perms(self.anon)[repo_name_in_group] == new_perm
198 assert repo_perms(self.anon)['vcs_test_git'] == 'repository.read'
199 assert repo_perms(self.anon)['vcs_test_hg'] == 'repository.read'
200 assert repo_perms(self.anon)['vcs_test_svn'] == 'repository.read'
204 201
205 202 # NOW MARK repo as private
206 203 changes = {
207 204 'repo_private': True
208 205 }
209 206 repo = RepoModel().get_by_repo_name(repo_name_in_group)
210 207 RepoModel().update(repo, **changes)
211 208 Session().commit()
212 209
213 210 # Private repo sets 'none' permission for default user
214 assert repo_perms(self.anon) == {
215 repo_name_in_group: 'repository.none',
216 'vcs_test_git': 'repository.read',
217 'vcs_test_hg': 'repository.read',
218 'vcs_test_svn': 'repository.read',
219 }
211 assert repo_perms(self.anon)[repo_name_in_group] == 'repository.none'
212 assert repo_perms(self.anon)['vcs_test_git'] == 'repository.read'
213 assert repo_perms(self.anon)['vcs_test_hg'] == 'repository.read'
214 assert repo_perms(self.anon)['vcs_test_svn'] == 'repository.read'
220 215
221 216 # apply same logic of "updated" recursive, but now the anon permissions should be not be impacted
222 217 new_perm = 'repository.write'
223 218 perm_updates = [
224 219 [self.anon.user_id, new_perm, 'user']
225 220 ]
226 221 RepoGroupModel().update_permissions(
227 222 repo_group=self.g1, perm_updates=perm_updates, recursive='all')
228 223
229 224 Session().commit()
230 assert repo_perms(self.anon) == {
231 repo_name_in_group: 'repository.none',
232 'vcs_test_git': 'repository.read',
233 'vcs_test_hg': 'repository.read',
234 'vcs_test_svn': 'repository.read',
235 }
225 assert repo_perms(self.anon)[repo_name_in_group] == 'repository.none'
226 assert repo_perms(self.anon)['vcs_test_git'] == 'repository.read'
227 assert repo_perms(self.anon)['vcs_test_hg'] == 'repository.read'
228 assert repo_perms(self.anon)['vcs_test_svn'] == 'repository.read'
236 229
237 230 def test_propagated_permission_from_users_group_by_explicit_perms_exist(
238 231 self, repo_name):
239 232 # make group
240 233 self.ug1 = fixture.create_user_group('G1')
241 234 UserGroupModel().add_user_to_group(self.ug1, self.u1)
242 235
243 236 # set permission to lower
244 237 new_perm = 'repository.none'
245 238 RepoModel().grant_user_permission(
246 239 repo=repo_name, user=self.u1, perm=new_perm)
247 240 Session().commit()
248 241 assert repo_perms(self.u1)[repo_name] == new_perm
249 242
250 243 # grant perm for group this should not override permission from user
251 244 # since it has explicitly set
252 245 new_perm_gr = 'repository.write'
253 246 RepoModel().grant_user_group_permission(
254 247 repo=repo_name, group_name=self.ug1, perm=new_perm_gr)
255 248 Session().commit()
256 249
257 250 assert repo_perms(self.u1)[repo_name] == new_perm
258 251 assert group_perms(self.u1) == {}
259 252
260 253 def test_propagated_permission_from_users_group(self, repo_name):
261 254 # make group
262 255 self.ug1 = fixture.create_user_group('G1')
263 256 UserGroupModel().add_user_to_group(self.ug1, self.u3)
264 257
265 258 # grant perm for group
266 259 # this should override default permission from user
267 260 new_perm_gr = 'repository.write'
268 261 RepoModel().grant_user_group_permission(
269 262 repo=repo_name, group_name=self.ug1, perm=new_perm_gr)
270 263 Session().commit()
271 264
272 265 assert repo_perms(self.u3)[repo_name] == new_perm_gr
273 266 assert group_perms(self.u3) == {}
274 267
275 268 def test_propagated_permission_from_users_group_lower_weight(
276 269 self, repo_name):
277 270 # make group with user
278 271 self.ug1 = fixture.create_user_group('G1')
279 272 UserGroupModel().add_user_to_group(self.ug1, self.u1)
280 273
281 274 # set permission to lower
282 275 new_perm_h = 'repository.write'
283 276 RepoModel().grant_user_permission(
284 277 repo=repo_name, user=self.u1, perm=new_perm_h)
285 278 Session().commit()
286 279
287 280 assert repo_perms(self.u1)[repo_name] == new_perm_h
288 281
289 282 # grant perm for group this should NOT override permission from user
290 283 # since it's lower than granted
291 284 new_perm_l = 'repository.read'
292 285 RepoModel().grant_user_group_permission(
293 286 repo=repo_name, group_name=self.ug1, perm=new_perm_l)
294 287 Session().commit()
295 288
296 289 assert repo_perms(self.u1)[repo_name] == new_perm_h
297 290 assert group_perms(self.u1) == {}
298 291
299 292 def test_repo_in_group_permissions(self):
300 293 self.g1 = fixture.create_repo_group('group1', skip_if_exists=True)
301 294 self.g2 = fixture.create_repo_group('group2', skip_if_exists=True)
302 295 # both perms should be read !
303 296 assert group_perms(self.u1) == \
304 {u'group1': u'group.read', u'group2': u'group.read'}
297 {'group1': 'group.read', 'group2': 'group.read'}
305 298
306 299 assert group_perms(self.anon) == \
307 {u'group1': u'group.read', u'group2': u'group.read'}
300 {'group1': 'group.read', 'group2': 'group.read'}
308 301
309 302 # Change perms to none for both groups
310 303 RepoGroupModel().grant_user_permission(
311 304 repo_group=self.g1, user=self.anon, perm='group.none')
312 305 RepoGroupModel().grant_user_permission(
313 306 repo_group=self.g2, user=self.anon, perm='group.none')
314 307
315 308 assert group_perms(self.u1) == \
316 {u'group1': u'group.none', u'group2': u'group.none'}
309 {'group1': 'group.none', 'group2': 'group.none'}
317 310 assert group_perms(self.anon) == \
318 {u'group1': u'group.none', u'group2': u'group.none'}
311 {'group1': 'group.none', 'group2': 'group.none'}
319 312
320 313 # add repo to group
321 314 name = RepoGroup.url_sep().join([self.g1.group_name, 'test_perm'])
322 315 self.test_repo = fixture.create_repo(name=name,
323 316 repo_type='hg',
324 317 repo_group=self.g1,
325 318 cur_user=self.u1,)
326 319
327 320 assert group_perms(self.u1) == \
328 {u'group1': u'group.none', u'group2': u'group.none'}
321 {'group1': 'group.none', 'group2': 'group.none'}
329 322 assert group_perms(self.anon) == \
330 {u'group1': u'group.none', u'group2': u'group.none'}
323 {'group1': 'group.none', 'group2': 'group.none'}
331 324
332 325 # grant permission for u2 !
333 326 RepoGroupModel().grant_user_permission(
334 327 repo_group=self.g1, user=self.u2, perm='group.read')
335 328 RepoGroupModel().grant_user_permission(
336 329 repo_group=self.g2, user=self.u2, perm='group.read')
337 330 Session().commit()
338 331 assert self.u1 != self.u2
339 332
340 333 # u1 and anon should have not change perms while u2 should !
341 334 assert group_perms(self.u1) == \
342 {u'group1': u'group.none', u'group2': u'group.none'}
335 {'group1': 'group.none', 'group2': 'group.none'}
343 336 assert group_perms(self.u2) == \
344 {u'group1': u'group.read', u'group2': u'group.read'}
337 {'group1': 'group.read', 'group2': 'group.read'}
345 338 assert group_perms(self.anon) == \
346 {u'group1': u'group.none', u'group2': u'group.none'}
339 {'group1': 'group.none', 'group2': 'group.none'}
347 340
348 341 def test_repo_group_user_as_user_group_member(self):
349 342 # create Group1
350 343 self.g1 = fixture.create_repo_group('group1', skip_if_exists=True)
351 assert group_perms(self.anon) == {u'group1': u'group.read'}
344 assert group_perms(self.anon) == {'group1': 'group.read'}
352 345
353 346 # set default permission to none
354 347 RepoGroupModel().grant_user_permission(
355 348 repo_group=self.g1, user=self.anon, perm='group.none')
356 349 Session().commit()
357 350
358 351 # make group
359 352 self.ug1 = fixture.create_user_group('G1')
360 353 # add user to group
361 354 UserGroupModel().add_user_to_group(self.ug1, self.u1)
362 355 Session().commit()
363 356
364 357 # check if user is in the group
365 358 ug1 = UserGroupModel().get(self.ug1.users_group_id)
366 359 members = [x.user_id for x in ug1.members]
367 360 assert members == [self.u1.user_id]
368 361 # add some user to that group
369 362
370 363 # check his permissions
371 assert group_perms(self.anon) == {u'group1': u'group.none'}
372 assert group_perms(self.u1) == {u'group1': u'group.none'}
364 assert group_perms(self.anon) == {'group1': 'group.none'}
365 assert group_perms(self.u1) == {'group1': 'group.none'}
373 366
374 367 # grant ug1 read permissions for
375 368 RepoGroupModel().grant_user_group_permission(
376 369 repo_group=self.g1, group_name=self.ug1, perm='group.read')
377 370 Session().commit()
378 371
379 372 # check if the
380 373 obj = Session().query(UserGroupRepoGroupToPerm)\
381 374 .filter(UserGroupRepoGroupToPerm.group == self.g1)\
382 375 .filter(UserGroupRepoGroupToPerm.users_group == self.ug1)\
383 376 .scalar()
384 377 assert obj.permission.permission_name == 'group.read'
385 378
386 assert group_perms(self.anon) == {u'group1': u'group.none'}
387 assert group_perms(self.u1) == {u'group1': u'group.read'}
379 assert group_perms(self.anon) == {'group1': 'group.none'}
380 assert group_perms(self.u1) == {'group1': 'group.read'}
388 381
389 382 def test_inherited_permissions_from_default_on_user_enabled(self):
390 383 # enable fork and create on default user
391 384 _form_result = {
392 385 'default_repo_create': 'hg.create.repository',
393 386 'default_fork_create': 'hg.fork.repository'
394 387 }
395 388 PermissionModel().set_new_user_perms(
396 389 User.get_default_user(), _form_result)
397 390 Session().commit()
398 391
399 392 # make sure inherit flag is turned on
400 393 self.u1.inherit_default_permissions = True
401 394 Session().commit()
402 395
403 396 # this user will have inherited permissions from default user
404 397 assert global_perms(self.u1) == default_perms()
405 398
406 399 def test_inherited_permissions_from_default_on_user_disabled(self):
407 400 # disable fork and create on default user
408 401 _form_result = {
409 402 'default_repo_create': 'hg.create.none',
410 403 'default_fork_create': 'hg.fork.none'
411 404 }
412 405 PermissionModel().set_new_user_perms(
413 406 User.get_default_user(), _form_result)
414 407 Session().commit()
415 408
416 409 # make sure inherit flag is turned on
417 410 self.u1.inherit_default_permissions = True
418 411 Session().commit()
419 412
420 413 # this user will have inherited permissions from default user
421 414 expected_perms = default_perms(
422 415 added=['hg.create.none', 'hg.fork.none'],
423 416 removed=['hg.create.repository', 'hg.fork.repository'])
424 417 assert global_perms(self.u1) == expected_perms
425 418
426 419 def test_non_inherited_permissions_from_default_on_user_enabled(self):
427 420 user_model = UserModel()
428 421 # enable fork and create on default user
429 422 usr = User.DEFAULT_USER
430 423 user_model.revoke_perm(usr, 'hg.create.none')
431 424 user_model.grant_perm(usr, 'hg.create.repository')
432 425 user_model.revoke_perm(usr, 'hg.fork.none')
433 426 user_model.grant_perm(usr, 'hg.fork.repository')
434 427
435 428 # disable global perms on specific user
436 429 user_model.revoke_perm(self.u1, 'hg.create.repository')
437 430 user_model.grant_perm(self.u1, 'hg.create.none')
438 431 user_model.revoke_perm(self.u1, 'hg.fork.repository')
439 432 user_model.grant_perm(self.u1, 'hg.fork.none')
440 433
441 434 # TODO(marcink): check branch permissions now ?
442 435
443 436 # make sure inherit flag is turned off
444 437 self.u1.inherit_default_permissions = False
445 438 Session().commit()
446 439
447 440 # this user will have non inherited permissions from he's
448 441 # explicitly set permissions
449 442 assert global_perms(self.u1) == {
450 443 'hg.create.none',
451 444 'hg.fork.none',
452 445 'hg.register.manual_activate',
453 446 'hg.password_reset.enabled',
454 447 'hg.extern_activate.auto',
455 448 'repository.read',
456 449 'group.read',
457 450 'usergroup.read',
458 451 'branch.push_force',
459 452 }
460 453
461 454 def test_non_inherited_permissions_from_default_on_user_disabled(self):
462 455 user_model = UserModel()
463 456 # disable fork and create on default user
464 457 usr = User.DEFAULT_USER
465 458 user_model.revoke_perm(usr, 'hg.create.repository')
466 459 user_model.grant_perm(usr, 'hg.create.none')
467 460 user_model.revoke_perm(usr, 'hg.fork.repository')
468 461 user_model.grant_perm(usr, 'hg.fork.none')
469 462
470 463 # enable global perms on specific user
471 464 user_model.revoke_perm(self.u1, 'hg.create.none')
472 465 user_model.grant_perm(self.u1, 'hg.create.repository')
473 466 user_model.revoke_perm(self.u1, 'hg.fork.none')
474 467 user_model.grant_perm(self.u1, 'hg.fork.repository')
475 468
476 469 # make sure inherit flag is turned off
477 470 self.u1.inherit_default_permissions = False
478 471 Session().commit()
479 472
480 473 # TODO(marcink): check branch perms
481 474
482 475 # this user will have non inherited permissions from he's
483 476 # explicitly set permissions
484 477 assert global_perms(self.u1) == {
485 478 'hg.create.repository',
486 479 'hg.fork.repository',
487 480 'hg.register.manual_activate',
488 481 'hg.password_reset.enabled',
489 482 'hg.extern_activate.auto',
490 483 'repository.read',
491 484 'group.read',
492 485 'usergroup.read',
493 486 'branch.push_force',
494 487 }
495 488
496 489 @pytest.mark.parametrize('perm, expected_perm', [
497 490 ('hg.inherit_default_perms.false', 'repository.none', ),
498 491 ('hg.inherit_default_perms.true', 'repository.read', ),
499 492 ])
500 493 def test_inherited_permissions_on_objects(self, perm, expected_perm):
501 494 _form_result = {
502 495 'default_inherit_default_permissions': perm,
503 496 }
504 497 PermissionModel().set_new_user_perms(
505 498 User.get_default_user(), _form_result)
506 499 Session().commit()
507 500
508 501 # make sure inherit flag is turned on
509 502 self.u1.inherit_default_permissions = True
510 503 Session().commit()
511 504
512 505 # TODO(marcink): check branch perms
513 506
514 507 # this user will have inherited permissions from default user
515 508 assert global_perms(self.u1) == {
516 509 'hg.create.none',
517 510 'hg.fork.none',
518 511 'hg.register.manual_activate',
519 512 'hg.password_reset.enabled',
520 513 'hg.extern_activate.auto',
521 514 'repository.read',
522 515 'group.read',
523 516 'usergroup.read',
524 517 'branch.push_force',
525 518 'hg.create.write_on_repogroup.true',
526 519 'hg.usergroup.create.false',
527 520 'hg.repogroup.create.false',
528 521 perm
529 522 }
530 523
531 524 assert set(repo_perms(self.u1).values()) == set([expected_perm])
532 525
533 526 def test_repo_owner_permissions_not_overwritten_by_group(self):
534 527 # create repo as USER,
535 528 self.test_repo = fixture.create_repo(name='myownrepo',
536 529 repo_type='hg',
537 530 cur_user=self.u1)
538 531
539 532 # he has permissions of admin as owner
540 533 assert repo_perms(self.u1)['myownrepo'] == 'repository.admin'
541 534
542 535 # set his permission as user group, he should still be admin
543 536 self.ug1 = fixture.create_user_group('G1')
544 537 UserGroupModel().add_user_to_group(self.ug1, self.u1)
545 538 RepoModel().grant_user_group_permission(
546 539 self.test_repo,
547 540 group_name=self.ug1,
548 541 perm='repository.none')
549 542 Session().commit()
550 543
551 544 assert repo_perms(self.u1)['myownrepo'] == 'repository.admin'
552 545
553 546 def test_repo_owner_permissions_not_overwritten_by_others(self):
554 547 # create repo as USER,
555 548 self.test_repo = fixture.create_repo(name='myownrepo',
556 549 repo_type='hg',
557 550 cur_user=self.u1)
558 551
559 552 # he has permissions of admin as owner
560 553 assert repo_perms(self.u1)['myownrepo'] == 'repository.admin'
561 554
562 555 # set his permission as user, he should still be admin
563 556 RepoModel().grant_user_permission(
564 557 self.test_repo, user=self.u1, perm='repository.none')
565 558 Session().commit()
566 559
567 560 assert repo_perms(self.u1)['myownrepo'] == 'repository.admin'
568 561
569 562 def test_repo_group_owner_permissions_not_overwritten_by_group(self):
570 563 # "u1" shall be owner without any special permission assigned
571 564 self.g1 = fixture.create_repo_group('test1')
572 565
573 566 # Make user group and grant a permission to user group
574 567 self.ug1 = fixture.create_user_group('G1')
575 568 UserGroupModel().add_user_to_group(self.ug1, self.u1)
576 569 RepoGroupModel().grant_user_group_permission(
577 570 repo_group=self.g1, group_name=self.ug1, perm='group.write')
578 571 Session().commit()
579 572
580 573 # Verify that user does not get any special permission if he is not
581 574 # owner
582 575 assert group_perms(self.u1) == {'test1': 'group.write'}
583 576
584 577 # Make him owner of the repo group
585 578 self.g1.user = self.u1
586 579 assert group_perms(self.u1) == {'test1': 'group.admin'}
587 580
588 581 def test_repo_group_owner_permissions_not_overwritten_by_others(self):
589 582 # "u1" shall be owner without any special permission assigned
590 583 self.g1 = fixture.create_repo_group('test1')
591 584 RepoGroupModel().grant_user_permission(
592 585 repo_group=self.g1, user=self.u1, perm='group.write')
593 586 Session().commit()
594 587
595 588 # Verify that user does not get any special permission if he is not
596 589 # owner
597 590 assert group_perms(self.u1) == {'test1': 'group.write'}
598 591
599 592 # Make him owner of the repo group
600 593 self.g1.user = self.u1
601 assert group_perms(self.u1) == {u'test1': 'group.admin'}
594 assert group_perms(self.u1) == {'test1': 'group.admin'}
602 595
603 596 def assert_user_perm_equal(
604 597 self, user, change_factor=0, compare_keys=None):
605 598 perms = UserToPerm.query().filter(UserToPerm.user == user).all()
606 599 assert len(perms) == \
607 600 len(Permission.DEFAULT_USER_PERMISSIONS) + change_factor
608 601 if compare_keys:
609 602 assert set(
610 603 x.permissions.permission_name for x in perms) == compare_keys
611 604
612 605 def assert_def_user_group_perm_equal(
613 606 self, user_group, change_factor=0, compare_keys=None):
614 607 perms = UserGroupToPerm.query().filter(
615 608 UserGroupToPerm.users_group == user_group).all()
616 609 assert len(perms) == \
617 610 len(Permission.DEFAULT_USER_PERMISSIONS) + change_factor
618 611 if compare_keys:
619 612 assert set(
620 613 x.permissions.permission_name for x in perms) == compare_keys
621 614
622 615 def test_set_default_permissions(self):
623 616 PermissionModel().create_default_user_permissions(user=self.u1)
624 617 self.assert_user_perm_equal(user=self.u1)
625 618
626 619 def test_set_default_permissions_after_one_is_missing(self):
627 620 PermissionModel().create_default_user_permissions(user=self.u1)
628 621 self.assert_user_perm_equal(user=self.u1)
629 622 # now we delete one, it should be re-created after another call
630 623 perms = UserToPerm.query().filter(UserToPerm.user == self.u1).all()
631 624 Session().delete(perms[0])
632 625 Session().commit()
633 626
634 627 self.assert_user_perm_equal(user=self.u1, change_factor=-1)
635 628
636 629 # create missing one !
637 630 PermissionModel().create_default_user_permissions(user=self.u1)
638 631 self.assert_user_perm_equal(user=self.u1)
639 632
640 633 @pytest.mark.parametrize("perm, modify_to", [
641 634 ('repository.read', 'repository.none'),
642 635 ('group.read', 'group.none'),
643 636 ('usergroup.read', 'usergroup.none'),
644 637 ('hg.create.repository', 'hg.create.none'),
645 638 ('hg.fork.repository', 'hg.fork.none'),
646 639 ('hg.register.manual_activate', 'hg.register.auto_activate',)
647 640 ])
648 641 def test_set_default_permissions_after_modification(self, perm, modify_to):
649 642 PermissionModel().create_default_user_permissions(user=self.u1)
650 643 self.assert_user_perm_equal(user=self.u1)
651 644
652 645 old = Permission.get_by_key(perm)
653 646 new = Permission.get_by_key(modify_to)
654 647 assert old is not None
655 648 assert new is not None
656 649
657 650 # now modify permissions
658 651 p = UserToPerm.query().filter(
659 652 UserToPerm.user == self.u1).filter(
660 653 UserToPerm.permission == old).one()
661 654 p.permission = new
662 655 Session().add(p)
663 656 Session().commit()
664 657
665 658 PermissionModel().create_default_user_permissions(user=self.u1)
666 659 self.assert_user_perm_equal(user=self.u1)
667 660
668 661 def test_clear_user_perms(self):
669 662 PermissionModel().create_default_user_permissions(user=self.u1)
670 663 self.assert_user_perm_equal(user=self.u1)
671 664
672 665 # now clear permissions
673 666 cleared = PermissionModel()._clear_user_perms(self.u1.user_id)
674 667 self.assert_user_perm_equal(user=self.u1,
675 668 change_factor=len(cleared)*-1)
676 669
677 670 def test_clear_user_group_perms(self):
678 671 self.ug1 = fixture.create_user_group('G1')
679 672 PermissionModel().create_default_user_group_permissions(
680 673 user_group=self.ug1)
681 674 self.assert_def_user_group_perm_equal(user_group=self.ug1)
682 675
683 676 # now clear permissions
684 677 cleared = PermissionModel()._clear_user_group_perms(
685 678 self.ug1.users_group_id)
686 679 self.assert_def_user_group_perm_equal(user_group=self.ug1,
687 680 change_factor=len(cleared)*-1)
688 681
689 682 @pytest.mark.parametrize("form_result", [
690 683 {},
691 684 {'default_repo_create': 'hg.create.repository'},
692 685 {'default_repo_create': 'hg.create.repository',
693 686 'default_repo_perm': 'repository.read'},
694 687 {'default_repo_create': 'hg.create.none',
695 688 'default_repo_perm': 'repository.write',
696 689 'default_fork_create': 'hg.fork.none'},
697 690 ])
698 691 def test_set_new_user_permissions(self, form_result):
699 692 _form_result = {}
700 693 _form_result.update(form_result)
701 694 PermissionModel().set_new_user_perms(self.u1, _form_result)
702 695 Session().commit()
703 696 change_factor = -1 * (len(Permission.DEFAULT_USER_PERMISSIONS)
704 697 - len(form_result.keys()))
705 698 self.assert_user_perm_equal(
706 699 self.u1, change_factor=change_factor)
707 700
708 701 @pytest.mark.parametrize("form_result", [
709 702 {},
710 703 {'default_repo_create': 'hg.create.repository'},
711 704 {'default_repo_create': 'hg.create.repository',
712 705 'default_repo_perm': 'repository.read'},
713 706 {'default_repo_create': 'hg.create.none',
714 707 'default_repo_perm': 'repository.write',
715 708 'default_fork_create': 'hg.fork.none'},
716 709 ])
717 710 def test_set_new_user_group_permissions(self, form_result):
718 711 _form_result = {}
719 712 _form_result.update(form_result)
720 713 self.ug1 = fixture.create_user_group('G1')
721 714 PermissionModel().set_new_user_group_perms(self.ug1, _form_result)
722 715 Session().commit()
723 716 change_factor = -1 * (len(Permission.DEFAULT_USER_PERMISSIONS)
724 717 - len(form_result.keys()))
725 718 self.assert_def_user_group_perm_equal(
726 719 self.ug1, change_factor=change_factor)
727 720
728 721 @pytest.mark.parametrize("group_active, expected_perm", [
729 722 (True, 'repository.admin'),
730 723 (False, 'repository.read'),
731 724 ])
732 725 def test_get_default_repo_perms_from_user_group_with_active_group(
733 726 self, backend, user_util, group_active, expected_perm):
734 727 repo = backend.create_repo()
735 728 user = user_util.create_user()
736 729 user_group = user_util.create_user_group(
737 730 members=[user], users_group_active=group_active)
738 731
739 732 user_util.grant_user_group_permission_to_repo(
740 733 repo, user_group, 'repository.admin')
741 734 permissions = repo_perms(user)
742 735 repo_permission = permissions.get(repo.repo_name)
743 736 assert repo_permission == expected_perm
744 737
745 738 @pytest.mark.parametrize("group_active, expected_perm", [
746 739 (True, 'group.admin'),
747 740 (False, 'group.read')
748 741 ])
749 742 def test_get_default_group_perms_from_user_group_with_active_group(
750 743 self, user_util, group_active, expected_perm):
751 744 user = user_util.create_user()
752 745 repo_group = user_util.create_repo_group()
753 746 user_group = user_util.create_user_group(
754 747 members=[user], users_group_active=group_active)
755 748
756 749 user_util.grant_user_group_permission_to_repo_group(
757 750 repo_group, user_group, 'group.admin')
758 751 permissions = group_perms(user)
759 752 group_permission = permissions.get(repo_group.name)
760 753 assert group_permission == expected_perm
761 754
762 755 @pytest.mark.parametrize("group_active, expected_perm", [
763 756 (True, 'usergroup.admin'),
764 757 (False, 'usergroup.read')
765 758 ])
766 759 def test_get_default_user_group_perms_from_user_group_with_active_group(
767 760 self, user_util, group_active, expected_perm):
768 761 user = user_util.create_user()
769 762 user_group = user_util.create_user_group(
770 763 members=[user], users_group_active=group_active)
771 764 target_user_group = user_util.create_user_group()
772 765
773 766 user_util.grant_user_group_permission_to_user_group(
774 767 target_user_group, user_group, 'usergroup.admin')
775 768 permissions = user_group_perms(user)
776 769 group_permission = permissions.get(target_user_group.users_group_name)
777 770 assert group_permission == expected_perm
778 771
779 772
780 773 def repo_perms(user):
781 774 auth_user = AuthUser(user_id=user.user_id)
782 775 return auth_user.permissions['repositories']
783 776
784 777
785 778 def branch_perms(user):
786 779 auth_user = AuthUser(user_id=user.user_id)
787 780 return auth_user.permissions['repository_branches']
788 781
789 782
790 783 def group_perms(user):
791 784 auth_user = AuthUser(user_id=user.user_id)
792 785 return auth_user.permissions['repositories_groups']
793 786
794 787
795 788 def user_group_perms(user):
796 789 auth_user = AuthUser(user_id=user.user_id)
797 790 return auth_user.permissions['user_groups']
798 791
799 792
800 793 def global_perms(user):
801 794 auth_user = AuthUser(user_id=user.user_id)
802 795 return auth_user.permissions['global']
803 796
804 797
805 798 def default_perms(added=None, removed=None):
806 799 expected_perms = set(Permission.DEFAULT_USER_PERMISSIONS)
807 800 if removed:
808 801 expected_perms.difference_update(removed)
809 802 if added:
810 803 expected_perms.update(added)
811 804 return expected_perms
@@ -1,897 +1,897 b''
1 1
2 2 ; #########################################
3 3 ; RHODECODE COMMUNITY EDITION CONFIGURATION
4 4 ; #########################################
5 5
6 6 [DEFAULT]
7 7 ; Debug flag sets all loggers to debug, and enables request tracking
8 8 debug = true
9 9
10 10 ; ########################################################################
11 11 ; EMAIL CONFIGURATION
12 12 ; These settings will be used by the RhodeCode mailing system
13 13 ; ########################################################################
14 14
15 15 ; prefix all emails subjects with given prefix, helps filtering out emails
16 16 #email_prefix = [RhodeCode]
17 17
18 18 ; email FROM address all mails will be sent
19 19 #app_email_from = rhodecode-noreply@localhost
20 20
21 21 #smtp_server = mail.server.com
22 22 #smtp_username =
23 23 #smtp_password =
24 24 #smtp_port =
25 25 #smtp_use_tls = false
26 26 #smtp_use_ssl = true
27 27
28 28 [server:main]
29 29 ; COMMON HOST/IP CONFIG, This applies mostly to develop setup,
30 30 ; Host port for gunicorn are controlled by gunicorn_conf.py
31 31 host = 127.0.0.1
32 32 port = 10020
33 33
34 34
35 35 ; ###########################
36 36 ; GUNICORN APPLICATION SERVER
37 37 ; ###########################
38 38
39 39 ; run with gunicorn --config gunicorn_conf.py --paste rhodecode.ini
40 40
41 41 ; Module to use, this setting shouldn't be changed
42 42 use = egg:gunicorn#main
43 43
44 44 ; Prefix middleware for RhodeCode.
45 45 ; recommended when using proxy setup.
46 46 ; allows to set RhodeCode under a prefix in server.
47 47 ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well.
48 48 ; And set your prefix like: `prefix = /custom_prefix`
49 49 ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need
50 50 ; to make your cookies only work on prefix url
51 51 [filter:proxy-prefix]
52 52 use = egg:PasteDeploy#prefix
53 53 prefix = /
54 54
55 55 [app:main]
56 56 ; The %(here)s variable will be replaced with the absolute path of parent directory
57 57 ; of this file
58 58 ; Each option in the app:main can be override by an environmental variable
59 59 ;
60 60 ;To override an option:
61 61 ;
62 62 ;RC_<KeyName>
63 63 ;Everything should be uppercase, . and - should be replaced by _.
64 64 ;For example, if you have these configuration settings:
65 65 ;rc_cache.repo_object.backend = foo
66 66 ;can be overridden by
67 67 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
68 68
69 69 use = egg:rhodecode-enterprise-ce
70 70
71 71 ; enable proxy prefix middleware, defined above
72 72 #filter-with = proxy-prefix
73 73
74 74 ; control if environmental variables to be expanded into the .ini settings
75 75 rhodecode.env_expand = false
76 76
77 77 ; encryption key used to encrypt social plugin tokens,
78 78 ; remote_urls with credentials etc, if not set it defaults to
79 79 ; `beaker.session.secret`
80 80 #rhodecode.encrypted_values.secret =
81 81
82 82 ; decryption strict mode (enabled by default). It controls if decryption raises
83 83 ; `SignatureVerificationError` in case of wrong key, or damaged encryption data.
84 84 #rhodecode.encrypted_values.strict = false
85 85
86 86 ; Pick algorithm for encryption. Either fernet (more secure) or aes (default)
87 87 ; fernet is safer, and we strongly recommend switching to it.
88 88 ; Due to backward compatibility aes is used as default.
89 89 #rhodecode.encrypted_values.algorithm = fernet
90 90
91 91 ; Return gzipped responses from RhodeCode (static files/application)
92 92 gzip_responses = false
93 93
94 94 ; Auto-generate javascript routes file on startup
95 95 generate_js_files = false
96 96
97 97 ; System global default language.
98 98 ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh
99 99 lang = en
100 100
101 101 ; Perform a full repository scan and import on each server start.
102 102 ; Settings this to true could lead to very long startup time.
103 103 startup.import_repos = true
104 104
105 105 ; URL at which the application is running. This is used for Bootstrapping
106 106 ; requests in context when no web request is available. Used in ishell, or
107 107 ; SSH calls. Set this for events to receive proper url for SSH calls.
108 108 app.base_url = http://rhodecode.local
109 109
110 110 ; Host at which the Service API is running.
111 111 app.service_api.host = http://rhodecode.local:10020
112 112
113 113 ; Secret for Service API authentication.
114 114 app.service_api.token =
115 115
116 116 ; Unique application ID. Should be a random unique string for security.
117 117 app_instance_uuid = rc-production
118 118
119 119 ; Cut off limit for large diffs (size in bytes). If overall diff size on
120 120 ; commit, or pull request exceeds this limit this diff will be displayed
121 121 ; partially. E.g 512000 == 512Kb
122 122 cut_off_limit_diff = 1024000
123 123
124 124 ; Cut off limit for large files inside diffs (size in bytes). Each individual
125 125 ; file inside diff which exceeds this limit will be displayed partially.
126 126 ; E.g 128000 == 128Kb
127 127 cut_off_limit_file = 256000
128 128
129 129 ; Use cached version of vcs repositories everywhere. Recommended to be `true`
130 130 vcs_full_cache = false
131 131
132 132 ; Force https in RhodeCode, fixes https redirects, assumes it's always https.
133 133 ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache
134 134 force_https = false
135 135
136 136 ; use Strict-Transport-Security headers
137 137 use_htsts = false
138 138
139 139 ; Set to true if your repos are exposed using the dumb protocol
140 140 git_update_server_info = false
141 141
142 142 ; RSS/ATOM feed options
143 143 rss_cut_off_limit = 256000
144 144 rss_items_per_page = 10
145 145 rss_include_diff = false
146 146
147 147 ; gist URL alias, used to create nicer urls for gist. This should be an
148 148 ; url that does rewrites to _admin/gists/{gistid}.
149 149 ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal
150 150 ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid}
151 151 gist_alias_url =
152 152
153 153 ; List of views (using glob pattern syntax) that AUTH TOKENS could be
154 154 ; used for access.
155 155 ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it
156 156 ; came from the the logged in user who own this authentication token.
157 157 ; Additionally @TOKEN syntax can be used to bound the view to specific
158 158 ; authentication token. Such view would be only accessible when used together
159 159 ; with this authentication token
160 160 ; list of all views can be found under `/_admin/permissions/auth_token_access`
161 161 ; The list should be "," separated and on a single line.
162 162 ; Most common views to enable:
163 163
164 164 # RepoCommitsView:repo_commit_download
165 165 # RepoCommitsView:repo_commit_patch
166 166 # RepoCommitsView:repo_commit_raw
167 167 # RepoCommitsView:repo_commit_raw@TOKEN
168 168 # RepoFilesView:repo_files_diff
169 169 # RepoFilesView:repo_archivefile
170 170 # RepoFilesView:repo_file_raw
171 171 # GistView:*
172 172 api_access_controllers_whitelist =
173 173
174 174 ; Default encoding used to convert from and to unicode
175 175 ; can be also a comma separated list of encoding in case of mixed encodings
176 176 default_encoding = UTF-8
177 177
178 178 ; instance-id prefix
179 179 ; a prefix key for this instance used for cache invalidation when running
180 180 ; multiple instances of RhodeCode, make sure it's globally unique for
181 181 ; all running RhodeCode instances. Leave empty if you don't use it
182 182 instance_id =
183 183
184 184 ; Fallback authentication plugin. Set this to a plugin ID to force the usage
185 185 ; of an authentication plugin also if it is disabled by it's settings.
186 186 ; This could be useful if you are unable to log in to the system due to broken
187 187 ; authentication settings. Then you can enable e.g. the internal RhodeCode auth
188 188 ; module to log in again and fix the settings.
189 189 ; Available builtin plugin IDs (hash is part of the ID):
190 190 ; egg:rhodecode-enterprise-ce#rhodecode
191 191 ; egg:rhodecode-enterprise-ce#pam
192 192 ; egg:rhodecode-enterprise-ce#ldap
193 193 ; egg:rhodecode-enterprise-ce#jasig_cas
194 194 ; egg:rhodecode-enterprise-ce#headers
195 195 ; egg:rhodecode-enterprise-ce#crowd
196 196
197 197 #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode
198 198
199 199 ; Flag to control loading of legacy plugins in py:/path format
200 200 auth_plugin.import_legacy_plugins = true
201 201
202 202 ; alternative return HTTP header for failed authentication. Default HTTP
203 203 ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with
204 204 ; handling that causing a series of failed authentication calls.
205 205 ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code
206 206 ; This will be served instead of default 401 on bad authentication
207 207 auth_ret_code =
208 208
209 209 ; use special detection method when serving auth_ret_code, instead of serving
210 210 ; ret_code directly, use 401 initially (Which triggers credentials prompt)
211 211 ; and then serve auth_ret_code to clients
212 212 auth_ret_code_detection = false
213 213
214 214 ; locking return code. When repository is locked return this HTTP code. 2XX
215 215 ; codes don't break the transactions while 4XX codes do
216 216 lock_ret_code = 423
217 217
218 218 ; Filesystem location were repositories should be stored
219 219 repo_store.path = /var/opt/rhodecode_repo_store
220 220
221 221 ; allows to setup custom hooks in settings page
222 222 allow_custom_hooks_settings = true
223 223
224 224 ; Generated license token required for EE edition license.
225 225 ; New generated token value can be found in Admin > settings > license page.
226 226 license_token = abra-cada-bra1-rce3
227 227
228 228 ; This flag hides sensitive information on the license page such as token, and license data
229 229 license.hide_license_info = false
230 230
231 231 ; Import EE license from this license path
232 #license.import_path = %(here)s/rhodecode_enterprise.license
232 license.import_path = %(here)s/rhodecode_enterprise.license
233 233
234 234 ; import license 'if-missing' or 'force' (always override)
235 235 ; if-missing means apply license if it doesn't exist. 'force' option always overrides it
236 236 license.import_path_mode = if-missing
237 237
238 238 ; supervisor connection uri, for managing supervisor and logs.
239 239 supervisor.uri =
240 240
241 241 ; supervisord group name/id we only want this RC instance to handle
242 242 supervisor.group_id = dev
243 243
244 244 ; Display extended labs settings
245 245 labs_settings_active = true
246 246
247 247 ; Custom exception store path, defaults to TMPDIR
248 248 ; This is used to store exception from RhodeCode in shared directory
249 249 #exception_tracker.store_path =
250 250
251 251 ; Send email with exception details when it happens
252 252 #exception_tracker.send_email = false
253 253
254 254 ; Comma separated list of recipients for exception emails,
255 255 ; e.g admin@rhodecode.com,devops@rhodecode.com
256 256 ; Can be left empty, then emails will be sent to ALL super-admins
257 257 #exception_tracker.send_email_recipients =
258 258
259 259 ; optional prefix to Add to email Subject
260 260 #exception_tracker.email_prefix = [RHODECODE ERROR]
261 261
262 262 ; NOTE: this setting IS DEPRECATED:
263 263 ; file_store backend is always enabled
264 264 #file_store.enabled = true
265 265
266 266 ; NOTE: this setting IS DEPRECATED:
267 267 ; file_store.backend = X -> use `file_store.backend.type = filesystem_v2` instead
268 268 ; Storage backend, available options are: local
269 269 #file_store.backend = local
270 270
271 271 ; NOTE: this setting IS DEPRECATED:
272 272 ; file_store.storage_path = X -> use `file_store.filesystem_v2.storage_path = X` instead
273 273 ; path to store the uploaded binaries and artifacts
274 274 #file_store.storage_path = /var/opt/rhodecode_data/file_store
275 275
276 276 ; Artifacts file-store, is used to store comment attachments and artifacts uploads.
277 277 ; file_store backend type: filesystem_v1, filesystem_v2 or objectstore (s3-based) are available as options
278 278 ; filesystem_v1 is backwards compat with pre 5.1 storage changes
279 279 ; new installations should choose filesystem_v2 or objectstore (s3-based), pick filesystem when migrating from
280 280 ; previous installations to keep the artifacts without a need of migration
281 281 file_store.backend.type = filesystem_v1
282 282
283 283 ; filesystem options...
284 284 file_store.filesystem_v1.storage_path = /var/opt/rhodecode_data/test_artifacts_file_store
285 285
286 286 ; filesystem_v2 options...
287 287 file_store.filesystem_v2.storage_path = /var/opt/rhodecode_data/test_artifacts_file_store_2
288 288 file_store.filesystem_v2.shards = 8
289 289
290 290 ; objectstore options...
291 291 ; url for s3 compatible storage that allows to upload artifacts
292 292 ; e.g http://minio:9000
293 293 #file_store.backend.type = objectstore
294 294 file_store.objectstore.url = http://s3-minio:9000
295 295
296 296 ; a top-level bucket to put all other shards in
297 297 ; objects will be stored in rhodecode-file-store/shard-N based on the bucket_shards number
298 298 file_store.objectstore.bucket = rhodecode-file-store-tests
299 299
300 300 ; number of sharded buckets to create to distribute archives across
301 301 ; default is 8 shards
302 302 file_store.objectstore.bucket_shards = 8
303 303
304 304 ; key for s3 auth
305 305 file_store.objectstore.key = s3admin
306 306
307 307 ; secret for s3 auth
308 308 file_store.objectstore.secret = s3secret4
309 309
310 310 ;region for s3 storage
311 311 file_store.objectstore.region = eu-central-1
312 312
313 313 ; Redis url to acquire/check generation of archives locks
314 314 archive_cache.locking.url = redis://redis:6379/1
315 315
316 316 ; Storage backend, only 'filesystem' and 'objectstore' are available now
317 317 archive_cache.backend.type = filesystem
318 318
319 319 ; url for s3 compatible storage that allows to upload artifacts
320 320 ; e.g http://minio:9000
321 321 archive_cache.objectstore.url = http://s3-minio:9000
322 322
323 323 ; key for s3 auth
324 324 archive_cache.objectstore.key = key
325 325
326 326 ; secret for s3 auth
327 327 archive_cache.objectstore.secret = secret
328 328
329 329 ;region for s3 storage
330 330 archive_cache.objectstore.region = eu-central-1
331 331
332 332 ; number of sharded buckets to create to distribute archives across
333 333 ; default is 8 shards
334 334 archive_cache.objectstore.bucket_shards = 8
335 335
336 336 ; a top-level bucket to put all other shards in
337 337 ; objects will be stored in rhodecode-archive-cache/shard-N based on the bucket_shards number
338 338 archive_cache.objectstore.bucket = rhodecode-archive-cache
339 339
340 340 ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time
341 341 archive_cache.objectstore.retry = false
342 342
343 343 ; number of seconds to wait for next try using retry
344 344 archive_cache.objectstore.retry_backoff = 1
345 345
346 346 ; how many tries do do a retry fetch from this backend
347 347 archive_cache.objectstore.retry_attempts = 10
348 348
349 349 ; Default is $cache_dir/archive_cache if not set
350 350 ; Generated repo archives will be cached at this location
351 351 ; and served from the cache during subsequent requests for the same archive of
352 352 ; the repository. This path is important to be shared across filesystems and with
353 353 ; RhodeCode and vcsserver
354 354 archive_cache.filesystem.store_dir = %(here)s/rc-tests/archive_cache
355 355
356 356 ; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb
357 357 archive_cache.filesystem.cache_size_gb = 2
358 358
359 359 ; Eviction policy used to clear out after cache_size_gb limit is reached
360 360 archive_cache.filesystem.eviction_policy = least-recently-stored
361 361
362 362 ; By default cache uses sharding technique, this specifies how many shards are there
363 363 ; default is 8 shards
364 364 archive_cache.filesystem.cache_shards = 8
365 365
366 366 ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time
367 367 archive_cache.filesystem.retry = false
368 368
369 369 ; number of seconds to wait for next try using retry
370 370 archive_cache.filesystem.retry_backoff = 1
371 371
372 372 ; how many tries do do a retry fetch from this backend
373 373 archive_cache.filesystem.retry_attempts = 10
374 374
375 375
376 376 ; #############
377 377 ; CELERY CONFIG
378 378 ; #############
379 379
380 380 ; manually run celery: /path/to/celery worker --task-events --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini
381 381
382 382 use_celery = false
383 383
384 384 ; path to store schedule database
385 385 #celerybeat-schedule.path =
386 386
387 387 ; connection url to the message broker (default redis)
388 388 celery.broker_url = redis://redis:6379/8
389 389
390 390 ; results backend to get results for (default redis)
391 391 celery.result_backend = redis://redis:6379/8
392 392
393 393 ; rabbitmq example
394 394 #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost
395 395
396 396 ; maximum tasks to execute before worker restart
397 397 celery.max_tasks_per_child = 20
398 398
399 399 ; tasks will never be sent to the queue, but executed locally instead.
400 400 celery.task_always_eager = true
401 401 celery.task_store_eager_result = true
402 402
403 403 ; #############
404 404 ; DOGPILE CACHE
405 405 ; #############
406 406
407 407 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
408 408 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
409 409 cache_dir = %(here)s/rc-test-data
410 410
411 411 ; *********************************************
412 412 ; `sql_cache_short` cache for heavy SQL queries
413 413 ; Only supported backend is `memory_lru`
414 414 ; *********************************************
415 415 rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru
416 416 rc_cache.sql_cache_short.expiration_time = 0
417 417
418 418
419 419 ; *****************************************************
420 420 ; `cache_repo_longterm` cache for repo object instances
421 421 ; Only supported backend is `memory_lru`
422 422 ; *****************************************************
423 423 rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru
424 424 ; by default we use 30 Days, cache is still invalidated on push
425 425 rc_cache.cache_repo_longterm.expiration_time = 2592000
426 426 ; max items in LRU cache, set to smaller number to save memory, and expire last used caches
427 427 rc_cache.cache_repo_longterm.max_size = 10000
428 428
429 429
430 430 ; *********************************************
431 431 ; `cache_general` cache for general purpose use
432 432 ; for simplicity use rc.file_namespace backend,
433 433 ; for performance and scale use rc.redis
434 434 ; *********************************************
435 435 rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace
436 436 rc_cache.cache_general.expiration_time = 43200
437 437 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
438 438 rc_cache.cache_general.arguments.filename = %(here)s/rc-tests/cache-backend/cache_general_db
439 439
440 440 ; alternative `cache_general` redis backend with distributed lock
441 441 #rc_cache.cache_general.backend = dogpile.cache.rc.redis
442 442 #rc_cache.cache_general.expiration_time = 300
443 443
444 444 ; redis_expiration_time needs to be greater then expiration_time
445 445 #rc_cache.cache_general.arguments.redis_expiration_time = 7200
446 446
447 447 #rc_cache.cache_general.arguments.host = localhost
448 448 #rc_cache.cache_general.arguments.port = 6379
449 449 #rc_cache.cache_general.arguments.db = 0
450 450 #rc_cache.cache_general.arguments.socket_timeout = 30
451 451 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
452 452 #rc_cache.cache_general.arguments.distributed_lock = true
453 453
454 454 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
455 455 #rc_cache.cache_general.arguments.lock_auto_renewal = true
456 456
457 457 ; *************************************************
458 458 ; `cache_perms` cache for permission tree, auth TTL
459 459 ; for simplicity use rc.file_namespace backend,
460 460 ; for performance and scale use rc.redis
461 461 ; *************************************************
462 462 rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace
463 463 rc_cache.cache_perms.expiration_time = 0
464 464 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
465 465 rc_cache.cache_perms.arguments.filename = %(here)s/rc-tests/cache-backend/cache_perms_db
466 466
467 467 ; alternative `cache_perms` redis backend with distributed lock
468 468 #rc_cache.cache_perms.backend = dogpile.cache.rc.redis
469 469 #rc_cache.cache_perms.expiration_time = 300
470 470
471 471 ; redis_expiration_time needs to be greater then expiration_time
472 472 #rc_cache.cache_perms.arguments.redis_expiration_time = 7200
473 473
474 474 #rc_cache.cache_perms.arguments.host = localhost
475 475 #rc_cache.cache_perms.arguments.port = 6379
476 476 #rc_cache.cache_perms.arguments.db = 0
477 477 #rc_cache.cache_perms.arguments.socket_timeout = 30
478 478 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
479 479 #rc_cache.cache_perms.arguments.distributed_lock = true
480 480
481 481 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
482 482 #rc_cache.cache_perms.arguments.lock_auto_renewal = true
483 483
484 484 ; ***************************************************
485 485 ; `cache_repo` cache for file tree, Readme, RSS FEEDS
486 486 ; for simplicity use rc.file_namespace backend,
487 487 ; for performance and scale use rc.redis
488 488 ; ***************************************************
489 489 rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace
490 490 rc_cache.cache_repo.expiration_time = 2592000
491 491 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
492 492 rc_cache.cache_repo.arguments.filename = %(here)s/rc-tests/cache-backend/cache_repo_db
493 493
494 494 ; alternative `cache_repo` redis backend with distributed lock
495 495 #rc_cache.cache_repo.backend = dogpile.cache.rc.redis
496 496 #rc_cache.cache_repo.expiration_time = 2592000
497 497
498 498 ; redis_expiration_time needs to be greater then expiration_time
499 499 #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400
500 500
501 501 #rc_cache.cache_repo.arguments.host = localhost
502 502 #rc_cache.cache_repo.arguments.port = 6379
503 503 #rc_cache.cache_repo.arguments.db = 1
504 504 #rc_cache.cache_repo.arguments.socket_timeout = 30
505 505 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
506 506 #rc_cache.cache_repo.arguments.distributed_lock = true
507 507
508 508 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
509 509 #rc_cache.cache_repo.arguments.lock_auto_renewal = true
510 510
511 511 ; ##############
512 512 ; BEAKER SESSION
513 513 ; ##############
514 514
515 515 ; beaker.session.type is type of storage options for the logged users sessions. Current allowed
516 516 ; types are file, ext:redis, ext:database, ext:memcached
517 517 ; Fastest ones are ext:redis and ext:database, DO NOT use memory type for session
518 518 beaker.session.type = file
519 519 beaker.session.data_dir = %(here)s/rc-tests/data/sessions
520 520
521 521 ; Redis based sessions
522 522 #beaker.session.type = ext:redis
523 523 #beaker.session.url = redis://redis:6379/2
524 524
525 525 ; DB based session, fast, and allows easy management over logged in users
526 526 #beaker.session.type = ext:database
527 527 #beaker.session.table_name = db_session
528 528 #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode
529 529 #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode
530 530 #beaker.session.sa.pool_recycle = 3600
531 531 #beaker.session.sa.echo = false
532 532
533 533 beaker.session.key = rhodecode
534 534 beaker.session.secret = test-rc-uytcxaz
535 535 beaker.session.lock_dir = %(here)s/rc-tests/data/sessions/lock
536 536
537 537 ; Secure encrypted cookie. Requires AES and AES python libraries
538 538 ; you must disable beaker.session.secret to use this
539 539 #beaker.session.encrypt_key = key_for_encryption
540 540 #beaker.session.validate_key = validation_key
541 541
542 542 ; Sets session as invalid (also logging out user) if it haven not been
543 543 ; accessed for given amount of time in seconds
544 544 beaker.session.timeout = 2592000
545 545 beaker.session.httponly = true
546 546
547 547 ; Path to use for the cookie. Set to prefix if you use prefix middleware
548 548 #beaker.session.cookie_path = /custom_prefix
549 549
550 550 ; Set https secure cookie
551 551 beaker.session.secure = false
552 552
553 553 ; default cookie expiration time in seconds, set to `true` to set expire
554 554 ; at browser close
555 555 #beaker.session.cookie_expires = 3600
556 556
557 557 ; #############################
558 558 ; SEARCH INDEXING CONFIGURATION
559 559 ; #############################
560 560
561 561 ; Full text search indexer is available in rhodecode-tools under
562 562 ; `rhodecode-tools index` command
563 563
564 564 ; WHOOSH Backend, doesn't require additional services to run
565 565 ; it works good with few dozen repos
566 566 search.module = rhodecode.lib.index.whoosh
567 567 search.location = %(here)s/rc-tests/data/index
568 568
569 569 ; ####################
570 570 ; CHANNELSTREAM CONFIG
571 571 ; ####################
572 572
573 573 ; channelstream enables persistent connections and live notification
574 574 ; in the system. It's also used by the chat system
575 575
576 576 channelstream.enabled = false
577 577
578 578 ; server address for channelstream server on the backend
579 579 channelstream.server = channelstream:9800
580 580
581 581 ; location of the channelstream server from outside world
582 582 ; use ws:// for http or wss:// for https. This address needs to be handled
583 583 ; by external HTTP server such as Nginx or Apache
584 584 ; see Nginx/Apache configuration examples in our docs
585 585 channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream
586 586 channelstream.secret = ENV_GENERATED
587 587 channelstream.history.location = %(here)s/rc-tests/channelstream_history
588 588
589 589 ; Internal application path that Javascript uses to connect into.
590 590 ; If you use proxy-prefix the prefix should be added before /_channelstream
591 591 channelstream.proxy_path = /_channelstream
592 592
593 593
594 594 ; ##############################
595 595 ; MAIN RHODECODE DATABASE CONFIG
596 596 ; ##############################
597 597
598 598 #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30
599 599 #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode
600 600 #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8
601 601 ; pymysql is an alternative driver for MySQL, use in case of problems with default one
602 602 #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode
603 603
604 604 sqlalchemy.db1.url = sqlite:///%(here)s/rc-tests/rhodecode_test.db?timeout=30
605 605
606 606 ; see sqlalchemy docs for other advanced settings
607 607 ; print the sql statements to output
608 608 sqlalchemy.db1.echo = false
609 609
610 610 ; recycle the connections after this amount of seconds
611 611 sqlalchemy.db1.pool_recycle = 3600
612 612
613 613 ; the number of connections to keep open inside the connection pool.
614 614 ; 0 indicates no limit
615 615 ; the general calculus with gevent is:
616 616 ; if your system allows 500 concurrent greenlets (max_connections) that all do database access,
617 617 ; then increase pool size + max overflow so that they add up to 500.
618 618 #sqlalchemy.db1.pool_size = 5
619 619
620 620 ; The number of connections to allow in connection pool "overflow", that is
621 621 ; connections that can be opened above and beyond the pool_size setting,
622 622 ; which defaults to five.
623 623 #sqlalchemy.db1.max_overflow = 10
624 624
625 625 ; Connection check ping, used to detect broken database connections
626 626 ; could be enabled to better handle cases if MySQL has gone away errors
627 627 #sqlalchemy.db1.ping_connection = true
628 628
629 629 ; ##########
630 630 ; VCS CONFIG
631 631 ; ##########
632 632 vcs.server.enable = true
633 633 vcs.server = vcsserver:10010
634 634
635 635 ; Web server connectivity protocol, responsible for web based VCS operations
636 636 ; Available protocols are:
637 637 ; `http` - use http-rpc backend (default)
638 638 vcs.server.protocol = http
639 639
640 640 ; Push/Pull operations protocol, available options are:
641 641 ; `http` - use http-rpc backend (default)
642 642 vcs.scm_app_implementation = http
643 643
644 644 ; Push/Pull operations hooks protocol, available options are:
645 645 ; `http` - use http-rpc backend (default)
646 646 ; `celery` - use celery based hooks
647 647 #DEPRECATED:vcs.hooks.protocol = http
648 vcs.hooks.protocol = http
648 vcs.hooks.protocol.v2 = celery
649 649
650 650 ; Host on which this instance is listening for hooks. vcsserver will call this host to pull/push hooks so it should be
651 651 ; accessible via network.
652 652 ; Use vcs.hooks.host = "*" to bind to current hostname (for Docker)
653 653 vcs.hooks.host = *
654 654
655 655 ; Start VCSServer with this instance as a subprocess, useful for development
656 656 vcs.start_server = false
657 657
658 658 ; List of enabled VCS backends, available options are:
659 659 ; `hg` - mercurial
660 660 ; `git` - git
661 661 ; `svn` - subversion
662 662 vcs.backends = hg, git, svn
663 663
664 664 ; Wait this number of seconds before killing connection to the vcsserver
665 665 vcs.connection_timeout = 3600
666 666
667 667 ; Cache flag to cache vcsserver remote calls locally
668 668 ; It uses cache_region `cache_repo`
669 669 vcs.methods.cache = false
670 670
671 671 ; Filesystem location where Git lfs objects should be stored
672 672 vcs.git.lfs.storage_location = /var/opt/rhodecode_repo_store/.cache/git_lfs_store
673 673
674 674 ; Filesystem location where Mercurial largefile objects should be stored
675 675 vcs.hg.largefiles.storage_location = /var/opt/rhodecode_repo_store/.cache/hg_largefiles_store
676 676
677 677 ; ####################################################
678 678 ; Subversion proxy support (mod_dav_svn)
679 679 ; Maps RhodeCode repo groups into SVN paths for Apache
680 680 ; ####################################################
681 681
682 682 ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out.
683 683 ; Set a numeric version for your current SVN e.g 1.8, or 1.12
684 684 ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible
685 685 #vcs.svn.compatible_version = 1.8
686 686
687 687 ; Redis connection settings for svn integrations logic
688 688 ; This connection string needs to be the same on ce and vcsserver
689 689 vcs.svn.redis_conn = redis://redis:6379/0
690 690
691 691 ; Enable SVN proxy of requests over HTTP
692 692 vcs.svn.proxy.enabled = true
693 693
694 694 ; host to connect to running SVN subsystem
695 695 vcs.svn.proxy.host = http://svn:8090
696 696
697 697 ; Enable or disable the config file generation.
698 698 svn.proxy.generate_config = false
699 699
700 700 ; Generate config file with `SVNListParentPath` set to `On`.
701 701 svn.proxy.list_parent_path = true
702 702
703 703 ; Set location and file name of generated config file.
704 704 svn.proxy.config_file_path = %(here)s/rc-tests/mod_dav_svn.conf
705 705
706 706 ; alternative mod_dav config template. This needs to be a valid mako template
707 707 ; Example template can be found in the source code:
708 708 ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako
709 709 #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako
710 710
711 711 ; Used as a prefix to the `Location` block in the generated config file.
712 712 ; In most cases it should be set to `/`.
713 713 svn.proxy.location_root = /
714 714
715 715 ; Command to reload the mod dav svn configuration on change.
716 716 ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh
717 717 ; Make sure user who runs RhodeCode process is allowed to reload Apache
718 718 #svn.proxy.reload_cmd = /etc/init.d/apache2 reload
719 719
720 720 ; If the timeout expires before the reload command finishes, the command will
721 721 ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds.
722 722 #svn.proxy.reload_timeout = 10
723 723
724 724 ; ####################
725 725 ; SSH Support Settings
726 726 ; ####################
727 727
728 728 ; Defines if a custom authorized_keys file should be created and written on
729 729 ; any change user ssh keys. Setting this to false also disables possibility
730 730 ; of adding SSH keys by users from web interface. Super admins can still
731 731 ; manage SSH Keys.
732 732 ssh.generate_authorized_keyfile = true
733 733
734 734 ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding`
735 735 # ssh.authorized_keys_ssh_opts =
736 736
737 737 ; Path to the authorized_keys file where the generate entries are placed.
738 738 ; It is possible to have multiple key files specified in `sshd_config` e.g.
739 739 ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode
740 740 ssh.authorized_keys_file_path = %(here)s/rc-tests/authorized_keys_rhodecode
741 741
742 742 ; Command to execute the SSH wrapper. The binary is available in the
743 743 ; RhodeCode installation directory.
744 744 ; legacy: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper
745 745 ; new rewrite: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2
746 746 #DEPRECATED: ssh.wrapper_cmd = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper
747 747 ssh.wrapper_cmd.v2 = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2
748 748
749 749 ; Allow shell when executing the ssh-wrapper command
750 750 ssh.wrapper_cmd_allow_shell = false
751 751
752 752 ; Enables logging, and detailed output send back to the client during SSH
753 753 ; operations. Useful for debugging, shouldn't be used in production.
754 754 ssh.enable_debug_logging = true
755 755
756 756 ; Paths to binary executable, by default they are the names, but we can
757 757 ; override them if we want to use a custom one
758 758 ssh.executable.hg = /usr/local/bin/rhodecode_bin/vcs_bin/hg
759 759 ssh.executable.git = /usr/local/bin/rhodecode_bin/vcs_bin/git
760 760 ssh.executable.svn = /usr/local/bin/rhodecode_bin/vcs_bin/svnserve
761 761
762 762 ; Enables SSH key generator web interface. Disabling this still allows users
763 763 ; to add their own keys.
764 764 ssh.enable_ui_key_generator = true
765 765
766 766 ; Statsd client config, this is used to send metrics to statsd
767 767 ; We recommend setting statsd_exported and scrape them using Prometheus
768 768 #statsd.enabled = false
769 769 #statsd.statsd_host = 0.0.0.0
770 770 #statsd.statsd_port = 8125
771 771 #statsd.statsd_prefix =
772 772 #statsd.statsd_ipv6 = false
773 773
774 774 ; configure logging automatically at server startup set to false
775 775 ; to use the below custom logging config.
776 776 ; RC_LOGGING_FORMATTER
777 777 ; RC_LOGGING_LEVEL
778 778 ; env variables can control the settings for logging in case of autoconfigure
779 779
780 780 logging.autoconfigure = false
781 781
782 782 ; specify your own custom logging config file to configure logging
783 783 #logging.logging_conf_file = /path/to/custom_logging.ini
784 784
785 785 ; Dummy marker to add new entries after.
786 786 ; Add any custom entries below. Please don't remove this marker.
787 787 custom.conf = 1
788 788
789 789
790 790 ; #####################
791 791 ; LOGGING CONFIGURATION
792 792 ; #####################
793 793
794 794 [loggers]
795 795 keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper, dogpile
796 796
797 797 [handlers]
798 798 keys = console, console_sql
799 799
800 800 [formatters]
801 801 keys = generic, json, color_formatter, color_formatter_sql
802 802
803 803 ; #######
804 804 ; LOGGERS
805 805 ; #######
806 806 [logger_root]
807 807 level = NOTSET
808 808 handlers = console
809 809
810 810 [logger_routes]
811 811 level = DEBUG
812 812 handlers =
813 813 qualname = routes.middleware
814 814 ## "level = DEBUG" logs the route matched and routing variables.
815 815 propagate = 1
816 816
817 817 [logger_sqlalchemy]
818 818 level = INFO
819 819 handlers = console_sql
820 820 qualname = sqlalchemy.engine
821 821 propagate = 0
822 822
823 823 [logger_beaker]
824 824 level = DEBUG
825 825 handlers =
826 826 qualname = beaker.container
827 827 propagate = 1
828 828
829 829 [logger_dogpile]
830 830 level = INFO
831 831 handlers = console
832 832 qualname = dogpile
833 833 propagate = 1
834 834
835 835 [logger_rhodecode]
836 836 level = DEBUG
837 837 handlers =
838 838 qualname = rhodecode
839 839 propagate = 1
840 840
841 841 [logger_ssh_wrapper]
842 842 level = DEBUG
843 843 handlers =
844 844 qualname = ssh_wrapper
845 845 propagate = 1
846 846
847 847 [logger_celery]
848 848 level = DEBUG
849 849 handlers =
850 850 qualname = celery
851 851
852 852
853 853 ; ########
854 854 ; HANDLERS
855 855 ; ########
856 856
857 857 [handler_console]
858 858 class = StreamHandler
859 859 args = (sys.stderr, )
860 860 level = DEBUG
861 861 ; To enable JSON formatted logs replace 'generic/color_formatter' with 'json'
862 862 ; This allows sending properly formatted logs to grafana loki or elasticsearch
863 863 formatter = generic
864 864
865 865 [handler_console_sql]
866 866 ; "level = DEBUG" logs SQL queries and results.
867 867 ; "level = INFO" logs SQL queries.
868 868 ; "level = WARN" logs neither. (Recommended for production systems.)
869 869 class = StreamHandler
870 870 args = (sys.stderr, )
871 871 level = WARN
872 872 ; To enable JSON formatted logs replace 'generic/color_formatter_sql' with 'json'
873 873 ; This allows sending properly formatted logs to grafana loki or elasticsearch
874 874 formatter = generic
875 875
876 876 ; ##########
877 877 ; FORMATTERS
878 878 ; ##########
879 879
880 880 [formatter_generic]
881 881 class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter
882 882 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
883 883 datefmt = %Y-%m-%d %H:%M:%S
884 884
885 885 [formatter_color_formatter]
886 886 class = rhodecode.lib.logging_formatter.ColorFormatter
887 887 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
888 888 datefmt = %Y-%m-%d %H:%M:%S
889 889
890 890 [formatter_color_formatter_sql]
891 891 class = rhodecode.lib.logging_formatter.ColorFormatterSql
892 892 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
893 893 datefmt = %Y-%m-%d %H:%M:%S
894 894
895 895 [formatter_json]
896 896 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
897 897 class = rhodecode.lib._vendor.jsonlogger.JsonFormatter
General Comments 0
You need to be logged in to leave comments. Login now