Show More
@@ -1,55 +1,56 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 |
|
2 | |||
3 | # Copyright (C) 2016-2020 RhodeCode GmbH |
|
3 | # Copyright (C) 2016-2020 RhodeCode GmbH | |
4 | # |
|
4 | # | |
5 | # This program is free software: you can redistribute it and/or modify |
|
5 | # This program is free software: you can redistribute it and/or modify | |
6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
6 | # it under the terms of the GNU Affero General Public License, version 3 | |
7 | # (only), as published by the Free Software Foundation. |
|
7 | # (only), as published by the Free Software Foundation. | |
8 | # |
|
8 | # | |
9 | # This program is distributed in the hope that it will be useful, |
|
9 | # This program is distributed in the hope that it will be useful, | |
10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | # GNU General Public License for more details. |
|
12 | # GNU General Public License for more details. | |
13 | # |
|
13 | # | |
14 | # You should have received a copy of the GNU Affero General Public License |
|
14 | # You should have received a copy of the GNU Affero General Public License | |
15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | # |
|
16 | # | |
17 | # This program is dual-licensed. If you wish to learn more about the |
|
17 | # This program is dual-licensed. If you wish to learn more about the | |
18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
18 | # RhodeCode Enterprise Edition, including its added features, Support services, | |
19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ | |
20 |
|
20 | |||
21 | import logging |
|
21 | import logging | |
22 |
|
22 | |||
23 | from rhodecode import events |
|
23 | from rhodecode import events | |
24 | from rhodecode.lib import rc_cache |
|
24 | from rhodecode.lib import rc_cache | |
25 |
|
25 | |||
26 | log = logging.getLogger(__name__) |
|
26 | log = logging.getLogger(__name__) | |
27 |
|
27 | |||
28 | # names of namespaces used for different permission related cached |
|
28 | # names of namespaces used for different permission related cached | |
29 | # during flush operation we need to take care of all those |
|
29 | # during flush operation we need to take care of all those | |
30 | cache_namespaces = [ |
|
30 | cache_namespaces = [ | |
31 | 'cache_user_auth.{}', |
|
31 | 'cache_user_auth.{}', | |
32 | 'cache_user_repo_acl_ids.{}', |
|
32 | 'cache_user_repo_acl_ids.{}', | |
33 | 'cache_user_user_group_acl_ids.{}', |
|
33 | 'cache_user_user_group_acl_ids.{}', | |
34 | 'cache_user_repo_group_acl_ids.{}' |
|
34 | 'cache_user_repo_group_acl_ids.{}' | |
35 | ] |
|
35 | ] | |
36 |
|
36 | |||
37 |
|
37 | |||
38 | def trigger_user_permission_flush(event): |
|
38 | def trigger_user_permission_flush(event): | |
39 | """ |
|
39 | """ | |
40 | Subscriber to the `UserPermissionsChange`. This triggers the |
|
40 | Subscriber to the `UserPermissionsChange`. This triggers the | |
41 | automatic flush of permission caches, so the users affected receive new permissions |
|
41 | automatic flush of permission caches, so the users affected receive new permissions | |
42 | Right Away |
|
42 | Right Away | |
43 | """ |
|
43 | """ | |
44 |
|
44 | invalidate = True | ||
45 | affected_user_ids = set(event.user_ids) |
|
45 | affected_user_ids = set(event.user_ids) | |
46 | for user_id in affected_user_ids: |
|
46 | for user_id in affected_user_ids: | |
47 | for cache_namespace_uid_tmpl in cache_namespaces: |
|
47 | for cache_namespace_uid_tmpl in cache_namespaces: | |
48 | cache_namespace_uid = cache_namespace_uid_tmpl.format(user_id) |
|
48 | cache_namespace_uid = cache_namespace_uid_tmpl.format(user_id) | |
49 |
del_keys = rc_cache.clear_cache_namespace( |
|
49 | del_keys = rc_cache.clear_cache_namespace( | |
50 | log.debug('Deleted %s cache keys for user_id: %s and namespace %s', |
|
50 | 'cache_perms', cache_namespace_uid, invalidate=invalidate) | |
|
51 | log.debug('Invalidated %s cache keys for user_id: %s and namespace %s', | |||
51 | del_keys, user_id, cache_namespace_uid) |
|
52 | del_keys, user_id, cache_namespace_uid) | |
52 |
|
53 | |||
53 |
|
54 | |||
54 | def includeme(config): |
|
55 | def includeme(config): | |
55 | config.add_subscriber(trigger_user_permission_flush, events.UserPermissionsChange) |
|
56 | config.add_subscriber(trigger_user_permission_flush, events.UserPermissionsChange) |
@@ -1,415 +1,418 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 |
|
2 | |||
3 | # Copyright (C) 2015-2020 RhodeCode GmbH |
|
3 | # Copyright (C) 2015-2020 RhodeCode GmbH | |
4 | # |
|
4 | # | |
5 | # This program is free software: you can redistribute it and/or modify |
|
5 | # This program is free software: you can redistribute it and/or modify | |
6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
6 | # it under the terms of the GNU Affero General Public License, version 3 | |
7 | # (only), as published by the Free Software Foundation. |
|
7 | # (only), as published by the Free Software Foundation. | |
8 | # |
|
8 | # | |
9 | # This program is distributed in the hope that it will be useful, |
|
9 | # This program is distributed in the hope that it will be useful, | |
10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | # GNU General Public License for more details. |
|
12 | # GNU General Public License for more details. | |
13 | # |
|
13 | # | |
14 | # You should have received a copy of the GNU Affero General Public License |
|
14 | # You should have received a copy of the GNU Affero General Public License | |
15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | # |
|
16 | # | |
17 | # This program is dual-licensed. If you wish to learn more about the |
|
17 | # This program is dual-licensed. If you wish to learn more about the | |
18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
18 | # RhodeCode Enterprise Edition, including its added features, Support services, | |
19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ | |
20 | import os |
|
20 | import os | |
21 | import time |
|
21 | import time | |
22 | import logging |
|
22 | import logging | |
23 | import functools |
|
23 | import functools | |
24 | import threading |
|
24 | import threading | |
25 |
|
25 | |||
26 | from dogpile.cache import CacheRegion |
|
26 | from dogpile.cache import CacheRegion | |
27 | from dogpile.cache.util import compat |
|
27 | from dogpile.cache.util import compat | |
28 |
|
28 | |||
29 | import rhodecode |
|
29 | import rhodecode | |
30 | from rhodecode.lib.utils import safe_str, sha1 |
|
30 | from rhodecode.lib.utils import safe_str, sha1 | |
31 | from rhodecode.lib.utils2 import safe_unicode, str2bool |
|
31 | from rhodecode.lib.utils2 import safe_unicode, str2bool | |
32 | from rhodecode.model.db import Session, CacheKey, IntegrityError |
|
32 | from rhodecode.model.db import Session, CacheKey, IntegrityError | |
33 |
|
33 | |||
34 | from rhodecode.lib.rc_cache import cache_key_meta |
|
34 | from rhodecode.lib.rc_cache import cache_key_meta | |
35 | from rhodecode.lib.rc_cache import region_meta |
|
35 | from rhodecode.lib.rc_cache import region_meta | |
36 |
|
36 | |||
37 | log = logging.getLogger(__name__) |
|
37 | log = logging.getLogger(__name__) | |
38 |
|
38 | |||
39 |
|
39 | |||
40 | def isCython(func): |
|
40 | def isCython(func): | |
41 | """ |
|
41 | """ | |
42 | Private helper that checks if a function is a cython function. |
|
42 | Private helper that checks if a function is a cython function. | |
43 | """ |
|
43 | """ | |
44 | return func.__class__.__name__ == 'cython_function_or_method' |
|
44 | return func.__class__.__name__ == 'cython_function_or_method' | |
45 |
|
45 | |||
46 |
|
46 | |||
47 | class RhodeCodeCacheRegion(CacheRegion): |
|
47 | class RhodeCodeCacheRegion(CacheRegion): | |
48 |
|
48 | |||
49 | def conditional_cache_on_arguments( |
|
49 | def conditional_cache_on_arguments( | |
50 | self, namespace=None, |
|
50 | self, namespace=None, | |
51 | expiration_time=None, |
|
51 | expiration_time=None, | |
52 | should_cache_fn=None, |
|
52 | should_cache_fn=None, | |
53 | to_str=compat.string_type, |
|
53 | to_str=compat.string_type, | |
54 | function_key_generator=None, |
|
54 | function_key_generator=None, | |
55 | condition=True): |
|
55 | condition=True): | |
56 | """ |
|
56 | """ | |
57 | Custom conditional decorator, that will not touch any dogpile internals if |
|
57 | Custom conditional decorator, that will not touch any dogpile internals if | |
58 | condition isn't meet. This works a bit different than should_cache_fn |
|
58 | condition isn't meet. This works a bit different than should_cache_fn | |
59 | And it's faster in cases we don't ever want to compute cached values |
|
59 | And it's faster in cases we don't ever want to compute cached values | |
60 | """ |
|
60 | """ | |
61 | expiration_time_is_callable = compat.callable(expiration_time) |
|
61 | expiration_time_is_callable = compat.callable(expiration_time) | |
62 |
|
62 | |||
63 | if function_key_generator is None: |
|
63 | if function_key_generator is None: | |
64 | function_key_generator = self.function_key_generator |
|
64 | function_key_generator = self.function_key_generator | |
65 |
|
65 | |||
66 | # workaround for py2 and cython problems, this block should be removed |
|
66 | # workaround for py2 and cython problems, this block should be removed | |
67 | # once we've migrated to py3 |
|
67 | # once we've migrated to py3 | |
68 | if 'cython' == 'cython': |
|
68 | if 'cython' == 'cython': | |
69 | def decorator(fn): |
|
69 | def decorator(fn): | |
70 | if to_str is compat.string_type: |
|
70 | if to_str is compat.string_type: | |
71 | # backwards compatible |
|
71 | # backwards compatible | |
72 | key_generator = function_key_generator(namespace, fn) |
|
72 | key_generator = function_key_generator(namespace, fn) | |
73 | else: |
|
73 | else: | |
74 | key_generator = function_key_generator(namespace, fn, to_str=to_str) |
|
74 | key_generator = function_key_generator(namespace, fn, to_str=to_str) | |
75 |
|
75 | |||
76 | @functools.wraps(fn) |
|
76 | @functools.wraps(fn) | |
77 | def decorate(*arg, **kw): |
|
77 | def decorate(*arg, **kw): | |
78 | key = key_generator(*arg, **kw) |
|
78 | key = key_generator(*arg, **kw) | |
79 |
|
79 | |||
80 | @functools.wraps(fn) |
|
80 | @functools.wraps(fn) | |
81 | def creator(): |
|
81 | def creator(): | |
82 | return fn(*arg, **kw) |
|
82 | return fn(*arg, **kw) | |
83 |
|
83 | |||
84 | if not condition: |
|
84 | if not condition: | |
85 | return creator() |
|
85 | return creator() | |
86 |
|
86 | |||
87 | timeout = expiration_time() if expiration_time_is_callable \ |
|
87 | timeout = expiration_time() if expiration_time_is_callable \ | |
88 | else expiration_time |
|
88 | else expiration_time | |
89 |
|
89 | |||
90 | return self.get_or_create(key, creator, timeout, should_cache_fn) |
|
90 | return self.get_or_create(key, creator, timeout, should_cache_fn) | |
91 |
|
91 | |||
92 | def invalidate(*arg, **kw): |
|
92 | def invalidate(*arg, **kw): | |
93 | key = key_generator(*arg, **kw) |
|
93 | key = key_generator(*arg, **kw) | |
94 | self.delete(key) |
|
94 | self.delete(key) | |
95 |
|
95 | |||
96 | def set_(value, *arg, **kw): |
|
96 | def set_(value, *arg, **kw): | |
97 | key = key_generator(*arg, **kw) |
|
97 | key = key_generator(*arg, **kw) | |
98 | self.set(key, value) |
|
98 | self.set(key, value) | |
99 |
|
99 | |||
100 | def get(*arg, **kw): |
|
100 | def get(*arg, **kw): | |
101 | key = key_generator(*arg, **kw) |
|
101 | key = key_generator(*arg, **kw) | |
102 | return self.get(key) |
|
102 | return self.get(key) | |
103 |
|
103 | |||
104 | def refresh(*arg, **kw): |
|
104 | def refresh(*arg, **kw): | |
105 | key = key_generator(*arg, **kw) |
|
105 | key = key_generator(*arg, **kw) | |
106 | value = fn(*arg, **kw) |
|
106 | value = fn(*arg, **kw) | |
107 | self.set(key, value) |
|
107 | self.set(key, value) | |
108 | return value |
|
108 | return value | |
109 |
|
109 | |||
110 | decorate.set = set_ |
|
110 | decorate.set = set_ | |
111 | decorate.invalidate = invalidate |
|
111 | decorate.invalidate = invalidate | |
112 | decorate.refresh = refresh |
|
112 | decorate.refresh = refresh | |
113 | decorate.get = get |
|
113 | decorate.get = get | |
114 | decorate.original = fn |
|
114 | decorate.original = fn | |
115 | decorate.key_generator = key_generator |
|
115 | decorate.key_generator = key_generator | |
116 | decorate.__wrapped__ = fn |
|
116 | decorate.__wrapped__ = fn | |
117 |
|
117 | |||
118 | return decorate |
|
118 | return decorate | |
119 | return decorator |
|
119 | return decorator | |
120 |
|
120 | |||
121 | def get_or_create_for_user_func(key_generator, user_func, *arg, **kw): |
|
121 | def get_or_create_for_user_func(key_generator, user_func, *arg, **kw): | |
122 |
|
122 | |||
123 | if not condition: |
|
123 | if not condition: | |
124 | log.debug('Calling un-cached func:%s', user_func.func_name) |
|
124 | log.debug('Calling un-cached func:%s', user_func.func_name) | |
125 | return user_func(*arg, **kw) |
|
125 | return user_func(*arg, **kw) | |
126 |
|
126 | |||
127 | key = key_generator(*arg, **kw) |
|
127 | key = key_generator(*arg, **kw) | |
128 |
|
128 | |||
129 | timeout = expiration_time() if expiration_time_is_callable \ |
|
129 | timeout = expiration_time() if expiration_time_is_callable \ | |
130 | else expiration_time |
|
130 | else expiration_time | |
131 |
|
131 | |||
132 | log.debug('Calling cached fn:%s', user_func.func_name) |
|
132 | log.debug('Calling cached fn:%s', user_func.func_name) | |
133 | return self.get_or_create(key, user_func, timeout, should_cache_fn, (arg, kw)) |
|
133 | return self.get_or_create(key, user_func, timeout, should_cache_fn, (arg, kw)) | |
134 |
|
134 | |||
135 | def cache_decorator(user_func): |
|
135 | def cache_decorator(user_func): | |
136 | if to_str is compat.string_type: |
|
136 | if to_str is compat.string_type: | |
137 | # backwards compatible |
|
137 | # backwards compatible | |
138 | key_generator = function_key_generator(namespace, user_func) |
|
138 | key_generator = function_key_generator(namespace, user_func) | |
139 | else: |
|
139 | else: | |
140 | key_generator = function_key_generator(namespace, user_func, to_str=to_str) |
|
140 | key_generator = function_key_generator(namespace, user_func, to_str=to_str) | |
141 |
|
141 | |||
142 | def refresh(*arg, **kw): |
|
142 | def refresh(*arg, **kw): | |
143 | """ |
|
143 | """ | |
144 | Like invalidate, but regenerates the value instead |
|
144 | Like invalidate, but regenerates the value instead | |
145 | """ |
|
145 | """ | |
146 | key = key_generator(*arg, **kw) |
|
146 | key = key_generator(*arg, **kw) | |
147 | value = user_func(*arg, **kw) |
|
147 | value = user_func(*arg, **kw) | |
148 | self.set(key, value) |
|
148 | self.set(key, value) | |
149 | return value |
|
149 | return value | |
150 |
|
150 | |||
151 | def invalidate(*arg, **kw): |
|
151 | def invalidate(*arg, **kw): | |
152 | key = key_generator(*arg, **kw) |
|
152 | key = key_generator(*arg, **kw) | |
153 | self.delete(key) |
|
153 | self.delete(key) | |
154 |
|
154 | |||
155 | def set_(value, *arg, **kw): |
|
155 | def set_(value, *arg, **kw): | |
156 | key = key_generator(*arg, **kw) |
|
156 | key = key_generator(*arg, **kw) | |
157 | self.set(key, value) |
|
157 | self.set(key, value) | |
158 |
|
158 | |||
159 | def get(*arg, **kw): |
|
159 | def get(*arg, **kw): | |
160 | key = key_generator(*arg, **kw) |
|
160 | key = key_generator(*arg, **kw) | |
161 | return self.get(key) |
|
161 | return self.get(key) | |
162 |
|
162 | |||
163 | user_func.set = set_ |
|
163 | user_func.set = set_ | |
164 | user_func.invalidate = invalidate |
|
164 | user_func.invalidate = invalidate | |
165 | user_func.get = get |
|
165 | user_func.get = get | |
166 | user_func.refresh = refresh |
|
166 | user_func.refresh = refresh | |
167 | user_func.key_generator = key_generator |
|
167 | user_func.key_generator = key_generator | |
168 | user_func.original = user_func |
|
168 | user_func.original = user_func | |
169 |
|
169 | |||
170 | # Use `decorate` to preserve the signature of :param:`user_func`. |
|
170 | # Use `decorate` to preserve the signature of :param:`user_func`. | |
171 | return decorator.decorate(user_func, functools.partial( |
|
171 | return decorator.decorate(user_func, functools.partial( | |
172 | get_or_create_for_user_func, key_generator)) |
|
172 | get_or_create_for_user_func, key_generator)) | |
173 |
|
173 | |||
174 | return cache_decorator |
|
174 | return cache_decorator | |
175 |
|
175 | |||
176 |
|
176 | |||
177 | def make_region(*arg, **kw): |
|
177 | def make_region(*arg, **kw): | |
178 | return RhodeCodeCacheRegion(*arg, **kw) |
|
178 | return RhodeCodeCacheRegion(*arg, **kw) | |
179 |
|
179 | |||
180 |
|
180 | |||
181 | def get_default_cache_settings(settings, prefixes=None): |
|
181 | def get_default_cache_settings(settings, prefixes=None): | |
182 | prefixes = prefixes or [] |
|
182 | prefixes = prefixes or [] | |
183 | cache_settings = {} |
|
183 | cache_settings = {} | |
184 | for key in settings.keys(): |
|
184 | for key in settings.keys(): | |
185 | for prefix in prefixes: |
|
185 | for prefix in prefixes: | |
186 | if key.startswith(prefix): |
|
186 | if key.startswith(prefix): | |
187 | name = key.split(prefix)[1].strip() |
|
187 | name = key.split(prefix)[1].strip() | |
188 | val = settings[key] |
|
188 | val = settings[key] | |
189 | if isinstance(val, compat.string_types): |
|
189 | if isinstance(val, compat.string_types): | |
190 | val = val.strip() |
|
190 | val = val.strip() | |
191 | cache_settings[name] = val |
|
191 | cache_settings[name] = val | |
192 | return cache_settings |
|
192 | return cache_settings | |
193 |
|
193 | |||
194 |
|
194 | |||
195 | def compute_key_from_params(*args): |
|
195 | def compute_key_from_params(*args): | |
196 | """ |
|
196 | """ | |
197 | Helper to compute key from given params to be used in cache manager |
|
197 | Helper to compute key from given params to be used in cache manager | |
198 | """ |
|
198 | """ | |
199 | return sha1("_".join(map(safe_str, args))) |
|
199 | return sha1("_".join(map(safe_str, args))) | |
200 |
|
200 | |||
201 |
|
201 | |||
202 | def backend_key_generator(backend): |
|
202 | def backend_key_generator(backend): | |
203 | """ |
|
203 | """ | |
204 | Special wrapper that also sends over the backend to the key generator |
|
204 | Special wrapper that also sends over the backend to the key generator | |
205 | """ |
|
205 | """ | |
206 | def wrapper(namespace, fn): |
|
206 | def wrapper(namespace, fn): | |
207 | return key_generator(backend, namespace, fn) |
|
207 | return key_generator(backend, namespace, fn) | |
208 | return wrapper |
|
208 | return wrapper | |
209 |
|
209 | |||
210 |
|
210 | |||
211 | def key_generator(backend, namespace, fn): |
|
211 | def key_generator(backend, namespace, fn): | |
212 | fname = fn.__name__ |
|
212 | fname = fn.__name__ | |
213 |
|
213 | |||
214 | def generate_key(*args): |
|
214 | def generate_key(*args): | |
215 | backend_prefix = getattr(backend, 'key_prefix', None) or 'backend_prefix' |
|
215 | backend_prefix = getattr(backend, 'key_prefix', None) or 'backend_prefix' | |
216 | namespace_pref = namespace or 'default_namespace' |
|
216 | namespace_pref = namespace or 'default_namespace' | |
217 | arg_key = compute_key_from_params(*args) |
|
217 | arg_key = compute_key_from_params(*args) | |
218 | final_key = "{}:{}:{}_{}".format(backend_prefix, namespace_pref, fname, arg_key) |
|
218 | final_key = "{}:{}:{}_{}".format(backend_prefix, namespace_pref, fname, arg_key) | |
219 |
|
219 | |||
220 | return final_key |
|
220 | return final_key | |
221 |
|
221 | |||
222 | return generate_key |
|
222 | return generate_key | |
223 |
|
223 | |||
224 |
|
224 | |||
225 | def get_or_create_region(region_name, region_namespace=None): |
|
225 | def get_or_create_region(region_name, region_namespace=None): | |
226 | from rhodecode.lib.rc_cache.backends import FileNamespaceBackend |
|
226 | from rhodecode.lib.rc_cache.backends import FileNamespaceBackend | |
227 | region_obj = region_meta.dogpile_cache_regions.get(region_name) |
|
227 | region_obj = region_meta.dogpile_cache_regions.get(region_name) | |
228 | if not region_obj: |
|
228 | if not region_obj: | |
229 | raise EnvironmentError( |
|
229 | raise EnvironmentError( | |
230 | 'Region `{}` not in configured: {}.'.format( |
|
230 | 'Region `{}` not in configured: {}.'.format( | |
231 | region_name, region_meta.dogpile_cache_regions.keys())) |
|
231 | region_name, region_meta.dogpile_cache_regions.keys())) | |
232 |
|
232 | |||
233 | region_uid_name = '{}:{}'.format(region_name, region_namespace) |
|
233 | region_uid_name = '{}:{}'.format(region_name, region_namespace) | |
234 | if isinstance(region_obj.actual_backend, FileNamespaceBackend): |
|
234 | if isinstance(region_obj.actual_backend, FileNamespaceBackend): | |
235 | region_exist = region_meta.dogpile_cache_regions.get(region_namespace) |
|
235 | region_exist = region_meta.dogpile_cache_regions.get(region_namespace) | |
236 | if region_exist: |
|
236 | if region_exist: | |
237 | log.debug('Using already configured region: %s', region_namespace) |
|
237 | log.debug('Using already configured region: %s', region_namespace) | |
238 | return region_exist |
|
238 | return region_exist | |
239 | cache_dir = region_meta.dogpile_config_defaults['cache_dir'] |
|
239 | cache_dir = region_meta.dogpile_config_defaults['cache_dir'] | |
240 | expiration_time = region_obj.expiration_time |
|
240 | expiration_time = region_obj.expiration_time | |
241 |
|
241 | |||
242 | if not os.path.isdir(cache_dir): |
|
242 | if not os.path.isdir(cache_dir): | |
243 | os.makedirs(cache_dir) |
|
243 | os.makedirs(cache_dir) | |
244 | new_region = make_region( |
|
244 | new_region = make_region( | |
245 | name=region_uid_name, |
|
245 | name=region_uid_name, | |
246 | function_key_generator=backend_key_generator(region_obj.actual_backend) |
|
246 | function_key_generator=backend_key_generator(region_obj.actual_backend) | |
247 | ) |
|
247 | ) | |
248 | namespace_filename = os.path.join( |
|
248 | namespace_filename = os.path.join( | |
249 | cache_dir, "{}.cache.dbm".format(region_namespace)) |
|
249 | cache_dir, "{}.cache.dbm".format(region_namespace)) | |
250 | # special type that allows 1db per namespace |
|
250 | # special type that allows 1db per namespace | |
251 | new_region.configure( |
|
251 | new_region.configure( | |
252 | backend='dogpile.cache.rc.file_namespace', |
|
252 | backend='dogpile.cache.rc.file_namespace', | |
253 | expiration_time=expiration_time, |
|
253 | expiration_time=expiration_time, | |
254 | arguments={"filename": namespace_filename} |
|
254 | arguments={"filename": namespace_filename} | |
255 | ) |
|
255 | ) | |
256 |
|
256 | |||
257 | # create and save in region caches |
|
257 | # create and save in region caches | |
258 | log.debug('configuring new region: %s', region_uid_name) |
|
258 | log.debug('configuring new region: %s', region_uid_name) | |
259 | region_obj = region_meta.dogpile_cache_regions[region_namespace] = new_region |
|
259 | region_obj = region_meta.dogpile_cache_regions[region_namespace] = new_region | |
260 |
|
260 | |||
261 | return region_obj |
|
261 | return region_obj | |
262 |
|
262 | |||
263 |
|
263 | |||
264 | def clear_cache_namespace(cache_region, cache_namespace_uid): |
|
264 | def clear_cache_namespace(cache_region, cache_namespace_uid, invalidate=False): | |
265 | region = get_or_create_region(cache_region, cache_namespace_uid) |
|
265 | region = get_or_create_region(cache_region, cache_namespace_uid) | |
266 | cache_keys = region.backend.list_keys(prefix=cache_namespace_uid) |
|
266 | cache_keys = region.backend.list_keys(prefix=cache_namespace_uid) | |
267 | num_delete_keys = len(cache_keys) |
|
267 | num_delete_keys = len(cache_keys) | |
268 | if num_delete_keys: |
|
268 | if invalidate: | |
269 | region.delete_multi(cache_keys) |
|
269 | region.invalidate(hard=False) | |
|
270 | else: | |||
|
271 | if num_delete_keys: | |||
|
272 | region.delete_multi(cache_keys) | |||
270 | return num_delete_keys |
|
273 | return num_delete_keys | |
271 |
|
274 | |||
272 |
|
275 | |||
273 | class ActiveRegionCache(object): |
|
276 | class ActiveRegionCache(object): | |
274 | def __init__(self, context, cache_data): |
|
277 | def __init__(self, context, cache_data): | |
275 | self.context = context |
|
278 | self.context = context | |
276 | self.cache_data = cache_data |
|
279 | self.cache_data = cache_data | |
277 |
|
280 | |||
278 | def should_invalidate(self): |
|
281 | def should_invalidate(self): | |
279 | return False |
|
282 | return False | |
280 |
|
283 | |||
281 |
|
284 | |||
282 | class FreshRegionCache(object): |
|
285 | class FreshRegionCache(object): | |
283 | def __init__(self, context, cache_data): |
|
286 | def __init__(self, context, cache_data): | |
284 | self.context = context |
|
287 | self.context = context | |
285 | self.cache_data = cache_data |
|
288 | self.cache_data = cache_data | |
286 |
|
289 | |||
287 | def should_invalidate(self): |
|
290 | def should_invalidate(self): | |
288 | return True |
|
291 | return True | |
289 |
|
292 | |||
290 |
|
293 | |||
291 | class InvalidationContext(object): |
|
294 | class InvalidationContext(object): | |
292 | """ |
|
295 | """ | |
293 | usage:: |
|
296 | usage:: | |
294 |
|
297 | |||
295 | from rhodecode.lib import rc_cache |
|
298 | from rhodecode.lib import rc_cache | |
296 |
|
299 | |||
297 | cache_namespace_uid = CacheKey.SOME_NAMESPACE.format(1) |
|
300 | cache_namespace_uid = CacheKey.SOME_NAMESPACE.format(1) | |
298 | region = rc_cache.get_or_create_region('cache_perms', cache_namespace_uid) |
|
301 | region = rc_cache.get_or_create_region('cache_perms', cache_namespace_uid) | |
299 |
|
302 | |||
300 | @region.conditional_cache_on_arguments(namespace=cache_namespace_uid, condition=True) |
|
303 | @region.conditional_cache_on_arguments(namespace=cache_namespace_uid, condition=True) | |
301 | def heavy_compute(cache_name, param1, param2): |
|
304 | def heavy_compute(cache_name, param1, param2): | |
302 | print('COMPUTE {}, {}, {}'.format(cache_name, param1, param2)) |
|
305 | print('COMPUTE {}, {}, {}'.format(cache_name, param1, param2)) | |
303 |
|
306 | |||
304 | # invalidation namespace is shared namespace key for all process caches |
|
307 | # invalidation namespace is shared namespace key for all process caches | |
305 | # we use it to send a global signal |
|
308 | # we use it to send a global signal | |
306 | invalidation_namespace = 'repo_cache:1' |
|
309 | invalidation_namespace = 'repo_cache:1' | |
307 |
|
310 | |||
308 | inv_context_manager = rc_cache.InvalidationContext( |
|
311 | inv_context_manager = rc_cache.InvalidationContext( | |
309 | uid=cache_namespace_uid, invalidation_namespace=invalidation_namespace) |
|
312 | uid=cache_namespace_uid, invalidation_namespace=invalidation_namespace) | |
310 | with inv_context_manager as invalidation_context: |
|
313 | with inv_context_manager as invalidation_context: | |
311 | args = ('one', 'two') |
|
314 | args = ('one', 'two') | |
312 | # re-compute and store cache if we get invalidate signal |
|
315 | # re-compute and store cache if we get invalidate signal | |
313 | if invalidation_context.should_invalidate(): |
|
316 | if invalidation_context.should_invalidate(): | |
314 | result = heavy_compute.refresh(*args) |
|
317 | result = heavy_compute.refresh(*args) | |
315 | else: |
|
318 | else: | |
316 | result = heavy_compute(*args) |
|
319 | result = heavy_compute(*args) | |
317 |
|
320 | |||
318 | compute_time = inv_context_manager.compute_time |
|
321 | compute_time = inv_context_manager.compute_time | |
319 | log.debug('result computed in %.4fs', compute_time) |
|
322 | log.debug('result computed in %.4fs', compute_time) | |
320 |
|
323 | |||
321 | # To send global invalidation signal, simply run |
|
324 | # To send global invalidation signal, simply run | |
322 | CacheKey.set_invalidate(invalidation_namespace) |
|
325 | CacheKey.set_invalidate(invalidation_namespace) | |
323 |
|
326 | |||
324 | """ |
|
327 | """ | |
325 |
|
328 | |||
326 | def __repr__(self): |
|
329 | def __repr__(self): | |
327 | return '<InvalidationContext:{}[{}]>'.format( |
|
330 | return '<InvalidationContext:{}[{}]>'.format( | |
328 | safe_str(self.cache_key), safe_str(self.uid)) |
|
331 | safe_str(self.cache_key), safe_str(self.uid)) | |
329 |
|
332 | |||
330 | def __init__(self, uid, invalidation_namespace='', |
|
333 | def __init__(self, uid, invalidation_namespace='', | |
331 | raise_exception=False, thread_scoped=None): |
|
334 | raise_exception=False, thread_scoped=None): | |
332 | self.uid = uid |
|
335 | self.uid = uid | |
333 | self.invalidation_namespace = invalidation_namespace |
|
336 | self.invalidation_namespace = invalidation_namespace | |
334 | self.raise_exception = raise_exception |
|
337 | self.raise_exception = raise_exception | |
335 | self.proc_id = safe_unicode(rhodecode.CONFIG.get('instance_id') or 'DEFAULT') |
|
338 | self.proc_id = safe_unicode(rhodecode.CONFIG.get('instance_id') or 'DEFAULT') | |
336 | self.thread_id = 'global' |
|
339 | self.thread_id = 'global' | |
337 |
|
340 | |||
338 | if thread_scoped is None: |
|
341 | if thread_scoped is None: | |
339 | # if we set "default" we can override this via .ini settings |
|
342 | # if we set "default" we can override this via .ini settings | |
340 | thread_scoped = str2bool(rhodecode.CONFIG.get('cache_thread_scoped')) |
|
343 | thread_scoped = str2bool(rhodecode.CONFIG.get('cache_thread_scoped')) | |
341 |
|
344 | |||
342 | # Append the thread id to the cache key if this invalidation context |
|
345 | # Append the thread id to the cache key if this invalidation context | |
343 | # should be scoped to the current thread. |
|
346 | # should be scoped to the current thread. | |
344 | if thread_scoped is True: |
|
347 | if thread_scoped is True: | |
345 | self.thread_id = threading.current_thread().ident |
|
348 | self.thread_id = threading.current_thread().ident | |
346 |
|
349 | |||
347 | self.cache_key = compute_key_from_params(uid) |
|
350 | self.cache_key = compute_key_from_params(uid) | |
348 | self.cache_key = 'proc:{}|thread:{}|params:{}'.format( |
|
351 | self.cache_key = 'proc:{}|thread:{}|params:{}'.format( | |
349 | self.proc_id, self.thread_id, self.cache_key) |
|
352 | self.proc_id, self.thread_id, self.cache_key) | |
350 | self.compute_time = 0 |
|
353 | self.compute_time = 0 | |
351 |
|
354 | |||
352 | def get_or_create_cache_obj(self, cache_type, invalidation_namespace=''): |
|
355 | def get_or_create_cache_obj(self, cache_type, invalidation_namespace=''): | |
353 | invalidation_namespace = invalidation_namespace or self.invalidation_namespace |
|
356 | invalidation_namespace = invalidation_namespace or self.invalidation_namespace | |
354 | # fetch all cache keys for this namespace and convert them to a map to find if we |
|
357 | # fetch all cache keys for this namespace and convert them to a map to find if we | |
355 | # have specific cache_key object registered. We do this because we want to have |
|
358 | # have specific cache_key object registered. We do this because we want to have | |
356 | # all consistent cache_state_uid for newly registered objects |
|
359 | # all consistent cache_state_uid for newly registered objects | |
357 | cache_obj_map = CacheKey.get_namespace_map(invalidation_namespace) |
|
360 | cache_obj_map = CacheKey.get_namespace_map(invalidation_namespace) | |
358 | cache_obj = cache_obj_map.get(self.cache_key) |
|
361 | cache_obj = cache_obj_map.get(self.cache_key) | |
359 | log.debug('Fetched cache obj %s using %s cache key.', cache_obj, self.cache_key) |
|
362 | log.debug('Fetched cache obj %s using %s cache key.', cache_obj, self.cache_key) | |
360 | if not cache_obj: |
|
363 | if not cache_obj: | |
361 | new_cache_args = invalidation_namespace |
|
364 | new_cache_args = invalidation_namespace | |
362 | first_cache_obj = next(cache_obj_map.itervalues()) if cache_obj_map else None |
|
365 | first_cache_obj = next(cache_obj_map.itervalues()) if cache_obj_map else None | |
363 | cache_state_uid = None |
|
366 | cache_state_uid = None | |
364 | if first_cache_obj: |
|
367 | if first_cache_obj: | |
365 | cache_state_uid = first_cache_obj.cache_state_uid |
|
368 | cache_state_uid = first_cache_obj.cache_state_uid | |
366 | cache_obj = CacheKey(self.cache_key, cache_args=new_cache_args, |
|
369 | cache_obj = CacheKey(self.cache_key, cache_args=new_cache_args, | |
367 | cache_state_uid=cache_state_uid) |
|
370 | cache_state_uid=cache_state_uid) | |
368 | cache_key_meta.cache_keys_by_pid.append(self.cache_key) |
|
371 | cache_key_meta.cache_keys_by_pid.append(self.cache_key) | |
369 |
|
372 | |||
370 | return cache_obj |
|
373 | return cache_obj | |
371 |
|
374 | |||
372 | def __enter__(self): |
|
375 | def __enter__(self): | |
373 | """ |
|
376 | """ | |
374 | Test if current object is valid, and return CacheRegion function |
|
377 | Test if current object is valid, and return CacheRegion function | |
375 | that does invalidation and calculation |
|
378 | that does invalidation and calculation | |
376 | """ |
|
379 | """ | |
377 | log.debug('Entering cache invalidation check context: %s', self.invalidation_namespace) |
|
380 | log.debug('Entering cache invalidation check context: %s', self.invalidation_namespace) | |
378 | # register or get a new key based on uid |
|
381 | # register or get a new key based on uid | |
379 | self.cache_obj = self.get_or_create_cache_obj(cache_type=self.uid) |
|
382 | self.cache_obj = self.get_or_create_cache_obj(cache_type=self.uid) | |
380 | cache_data = self.cache_obj.get_dict() |
|
383 | cache_data = self.cache_obj.get_dict() | |
381 | self._start_time = time.time() |
|
384 | self._start_time = time.time() | |
382 | if self.cache_obj.cache_active: |
|
385 | if self.cache_obj.cache_active: | |
383 | # means our cache obj is existing and marked as it's |
|
386 | # means our cache obj is existing and marked as it's | |
384 | # cache is not outdated, we return ActiveRegionCache |
|
387 | # cache is not outdated, we return ActiveRegionCache | |
385 | self.skip_cache_active_change = True |
|
388 | self.skip_cache_active_change = True | |
386 |
|
389 | |||
387 | return ActiveRegionCache(context=self, cache_data=cache_data) |
|
390 | return ActiveRegionCache(context=self, cache_data=cache_data) | |
388 |
|
391 | |||
389 | # the key is either not existing or set to False, we return |
|
392 | # the key is either not existing or set to False, we return | |
390 | # the real invalidator which re-computes value. We additionally set |
|
393 | # the real invalidator which re-computes value. We additionally set | |
391 | # the flag to actually update the Database objects |
|
394 | # the flag to actually update the Database objects | |
392 | self.skip_cache_active_change = False |
|
395 | self.skip_cache_active_change = False | |
393 | return FreshRegionCache(context=self, cache_data=cache_data) |
|
396 | return FreshRegionCache(context=self, cache_data=cache_data) | |
394 |
|
397 | |||
395 | def __exit__(self, exc_type, exc_val, exc_tb): |
|
398 | def __exit__(self, exc_type, exc_val, exc_tb): | |
396 | # save compute time |
|
399 | # save compute time | |
397 | self.compute_time = time.time() - self._start_time |
|
400 | self.compute_time = time.time() - self._start_time | |
398 |
|
401 | |||
399 | if self.skip_cache_active_change: |
|
402 | if self.skip_cache_active_change: | |
400 | return |
|
403 | return | |
401 |
|
404 | |||
402 | try: |
|
405 | try: | |
403 | self.cache_obj.cache_active = True |
|
406 | self.cache_obj.cache_active = True | |
404 | Session().add(self.cache_obj) |
|
407 | Session().add(self.cache_obj) | |
405 | Session().commit() |
|
408 | Session().commit() | |
406 | except IntegrityError: |
|
409 | except IntegrityError: | |
407 | # if we catch integrity error, it means we inserted this object |
|
410 | # if we catch integrity error, it means we inserted this object | |
408 | # assumption is that's really an edge race-condition case and |
|
411 | # assumption is that's really an edge race-condition case and | |
409 | # it's safe is to skip it |
|
412 | # it's safe is to skip it | |
410 | Session().rollback() |
|
413 | Session().rollback() | |
411 | except Exception: |
|
414 | except Exception: | |
412 | log.exception('Failed to commit on cache key update') |
|
415 | log.exception('Failed to commit on cache key update') | |
413 | Session().rollback() |
|
416 | Session().rollback() | |
414 | if self.raise_exception: |
|
417 | if self.raise_exception: | |
415 | raise |
|
418 | raise |
@@ -1,1024 +1,1025 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 |
|
2 | |||
3 | # Copyright (C) 2010-2020 RhodeCode GmbH |
|
3 | # Copyright (C) 2010-2020 RhodeCode GmbH | |
4 | # |
|
4 | # | |
5 | # This program is free software: you can redistribute it and/or modify |
|
5 | # This program is free software: you can redistribute it and/or modify | |
6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
6 | # it under the terms of the GNU Affero General Public License, version 3 | |
7 | # (only), as published by the Free Software Foundation. |
|
7 | # (only), as published by the Free Software Foundation. | |
8 | # |
|
8 | # | |
9 | # This program is distributed in the hope that it will be useful, |
|
9 | # This program is distributed in the hope that it will be useful, | |
10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | # GNU General Public License for more details. |
|
12 | # GNU General Public License for more details. | |
13 | # |
|
13 | # | |
14 | # You should have received a copy of the GNU Affero General Public License |
|
14 | # You should have received a copy of the GNU Affero General Public License | |
15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | # |
|
16 | # | |
17 | # This program is dual-licensed. If you wish to learn more about the |
|
17 | # This program is dual-licensed. If you wish to learn more about the | |
18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
18 | # RhodeCode Enterprise Edition, including its added features, Support services, | |
19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ | |
20 |
|
20 | |||
21 | """ |
|
21 | """ | |
22 | Scm model for RhodeCode |
|
22 | Scm model for RhodeCode | |
23 | """ |
|
23 | """ | |
24 |
|
24 | |||
25 | import os.path |
|
25 | import os.path | |
26 | import traceback |
|
26 | import traceback | |
27 | import logging |
|
27 | import logging | |
28 | import cStringIO |
|
28 | import cStringIO | |
29 |
|
29 | |||
30 | from sqlalchemy import func |
|
30 | from sqlalchemy import func | |
31 | from zope.cachedescriptors.property import Lazy as LazyProperty |
|
31 | from zope.cachedescriptors.property import Lazy as LazyProperty | |
32 |
|
32 | |||
33 | import rhodecode |
|
33 | import rhodecode | |
34 | from rhodecode.lib.vcs import get_backend |
|
34 | from rhodecode.lib.vcs import get_backend | |
35 | from rhodecode.lib.vcs.exceptions import RepositoryError, NodeNotChangedError |
|
35 | from rhodecode.lib.vcs.exceptions import RepositoryError, NodeNotChangedError | |
36 | from rhodecode.lib.vcs.nodes import FileNode |
|
36 | from rhodecode.lib.vcs.nodes import FileNode | |
37 | from rhodecode.lib.vcs.backends.base import EmptyCommit |
|
37 | from rhodecode.lib.vcs.backends.base import EmptyCommit | |
38 | from rhodecode.lib import helpers as h, rc_cache |
|
38 | from rhodecode.lib import helpers as h, rc_cache | |
39 | from rhodecode.lib.auth import ( |
|
39 | from rhodecode.lib.auth import ( | |
40 | HasRepoPermissionAny, HasRepoGroupPermissionAny, |
|
40 | HasRepoPermissionAny, HasRepoGroupPermissionAny, | |
41 | HasUserGroupPermissionAny) |
|
41 | HasUserGroupPermissionAny) | |
42 | from rhodecode.lib.exceptions import NonRelativePathError, IMCCommitError |
|
42 | from rhodecode.lib.exceptions import NonRelativePathError, IMCCommitError | |
43 | from rhodecode.lib import hooks_utils |
|
43 | from rhodecode.lib import hooks_utils | |
44 | from rhodecode.lib.utils import ( |
|
44 | from rhodecode.lib.utils import ( | |
45 | get_filesystem_repos, make_db_config) |
|
45 | get_filesystem_repos, make_db_config) | |
46 | from rhodecode.lib.utils2 import (safe_str, safe_unicode) |
|
46 | from rhodecode.lib.utils2 import (safe_str, safe_unicode) | |
47 | from rhodecode.lib.system_info import get_system_info |
|
47 | from rhodecode.lib.system_info import get_system_info | |
48 | from rhodecode.model import BaseModel |
|
48 | from rhodecode.model import BaseModel | |
49 | from rhodecode.model.db import ( |
|
49 | from rhodecode.model.db import ( | |
50 | or_, false, |
|
50 | or_, false, | |
51 | Repository, CacheKey, UserFollowing, UserLog, User, RepoGroup, |
|
51 | Repository, CacheKey, UserFollowing, UserLog, User, RepoGroup, | |
52 | PullRequest, FileStore) |
|
52 | PullRequest, FileStore) | |
53 | from rhodecode.model.settings import VcsSettingsModel |
|
53 | from rhodecode.model.settings import VcsSettingsModel | |
54 | from rhodecode.model.validation_schema.validators import url_validator, InvalidCloneUrl |
|
54 | from rhodecode.model.validation_schema.validators import url_validator, InvalidCloneUrl | |
55 |
|
55 | |||
56 | log = logging.getLogger(__name__) |
|
56 | log = logging.getLogger(__name__) | |
57 |
|
57 | |||
58 |
|
58 | |||
59 | class UserTemp(object): |
|
59 | class UserTemp(object): | |
60 | def __init__(self, user_id): |
|
60 | def __init__(self, user_id): | |
61 | self.user_id = user_id |
|
61 | self.user_id = user_id | |
62 |
|
62 | |||
63 | def __repr__(self): |
|
63 | def __repr__(self): | |
64 | return "<%s('id:%s')>" % (self.__class__.__name__, self.user_id) |
|
64 | return "<%s('id:%s')>" % (self.__class__.__name__, self.user_id) | |
65 |
|
65 | |||
66 |
|
66 | |||
67 | class RepoTemp(object): |
|
67 | class RepoTemp(object): | |
68 | def __init__(self, repo_id): |
|
68 | def __init__(self, repo_id): | |
69 | self.repo_id = repo_id |
|
69 | self.repo_id = repo_id | |
70 |
|
70 | |||
71 | def __repr__(self): |
|
71 | def __repr__(self): | |
72 | return "<%s('id:%s')>" % (self.__class__.__name__, self.repo_id) |
|
72 | return "<%s('id:%s')>" % (self.__class__.__name__, self.repo_id) | |
73 |
|
73 | |||
74 |
|
74 | |||
75 | class SimpleCachedRepoList(object): |
|
75 | class SimpleCachedRepoList(object): | |
76 | """ |
|
76 | """ | |
77 | Lighter version of of iteration of repos without the scm initialisation, |
|
77 | Lighter version of of iteration of repos without the scm initialisation, | |
78 | and with cache usage |
|
78 | and with cache usage | |
79 | """ |
|
79 | """ | |
80 | def __init__(self, db_repo_list, repos_path, order_by=None, perm_set=None): |
|
80 | def __init__(self, db_repo_list, repos_path, order_by=None, perm_set=None): | |
81 | self.db_repo_list = db_repo_list |
|
81 | self.db_repo_list = db_repo_list | |
82 | self.repos_path = repos_path |
|
82 | self.repos_path = repos_path | |
83 | self.order_by = order_by |
|
83 | self.order_by = order_by | |
84 | self.reversed = (order_by or '').startswith('-') |
|
84 | self.reversed = (order_by or '').startswith('-') | |
85 | if not perm_set: |
|
85 | if not perm_set: | |
86 | perm_set = ['repository.read', 'repository.write', |
|
86 | perm_set = ['repository.read', 'repository.write', | |
87 | 'repository.admin'] |
|
87 | 'repository.admin'] | |
88 | self.perm_set = perm_set |
|
88 | self.perm_set = perm_set | |
89 |
|
89 | |||
90 | def __len__(self): |
|
90 | def __len__(self): | |
91 | return len(self.db_repo_list) |
|
91 | return len(self.db_repo_list) | |
92 |
|
92 | |||
93 | def __repr__(self): |
|
93 | def __repr__(self): | |
94 | return '<%s (%s)>' % (self.__class__.__name__, self.__len__()) |
|
94 | return '<%s (%s)>' % (self.__class__.__name__, self.__len__()) | |
95 |
|
95 | |||
96 | def __iter__(self): |
|
96 | def __iter__(self): | |
97 | for dbr in self.db_repo_list: |
|
97 | for dbr in self.db_repo_list: | |
98 | # check permission at this level |
|
98 | # check permission at this level | |
99 | has_perm = HasRepoPermissionAny(*self.perm_set)( |
|
99 | has_perm = HasRepoPermissionAny(*self.perm_set)( | |
100 | dbr.repo_name, 'SimpleCachedRepoList check') |
|
100 | dbr.repo_name, 'SimpleCachedRepoList check') | |
101 | if not has_perm: |
|
101 | if not has_perm: | |
102 | continue |
|
102 | continue | |
103 |
|
103 | |||
104 | tmp_d = { |
|
104 | tmp_d = { | |
105 | 'name': dbr.repo_name, |
|
105 | 'name': dbr.repo_name, | |
106 | 'dbrepo': dbr.get_dict(), |
|
106 | 'dbrepo': dbr.get_dict(), | |
107 | 'dbrepo_fork': dbr.fork.get_dict() if dbr.fork else {} |
|
107 | 'dbrepo_fork': dbr.fork.get_dict() if dbr.fork else {} | |
108 | } |
|
108 | } | |
109 | yield tmp_d |
|
109 | yield tmp_d | |
110 |
|
110 | |||
111 |
|
111 | |||
112 | class _PermCheckIterator(object): |
|
112 | class _PermCheckIterator(object): | |
113 |
|
113 | |||
114 | def __init__( |
|
114 | def __init__( | |
115 | self, obj_list, obj_attr, perm_set, perm_checker, |
|
115 | self, obj_list, obj_attr, perm_set, perm_checker, | |
116 | extra_kwargs=None): |
|
116 | extra_kwargs=None): | |
117 | """ |
|
117 | """ | |
118 | Creates iterator from given list of objects, additionally |
|
118 | Creates iterator from given list of objects, additionally | |
119 | checking permission for them from perm_set var |
|
119 | checking permission for them from perm_set var | |
120 |
|
120 | |||
121 | :param obj_list: list of db objects |
|
121 | :param obj_list: list of db objects | |
122 | :param obj_attr: attribute of object to pass into perm_checker |
|
122 | :param obj_attr: attribute of object to pass into perm_checker | |
123 | :param perm_set: list of permissions to check |
|
123 | :param perm_set: list of permissions to check | |
124 | :param perm_checker: callable to check permissions against |
|
124 | :param perm_checker: callable to check permissions against | |
125 | """ |
|
125 | """ | |
126 | self.obj_list = obj_list |
|
126 | self.obj_list = obj_list | |
127 | self.obj_attr = obj_attr |
|
127 | self.obj_attr = obj_attr | |
128 | self.perm_set = perm_set |
|
128 | self.perm_set = perm_set | |
129 | self.perm_checker = perm_checker(*self.perm_set) |
|
129 | self.perm_checker = perm_checker(*self.perm_set) | |
130 | self.extra_kwargs = extra_kwargs or {} |
|
130 | self.extra_kwargs = extra_kwargs or {} | |
131 |
|
131 | |||
132 | def __len__(self): |
|
132 | def __len__(self): | |
133 | return len(self.obj_list) |
|
133 | return len(self.obj_list) | |
134 |
|
134 | |||
135 | def __repr__(self): |
|
135 | def __repr__(self): | |
136 | return '<%s (%s)>' % (self.__class__.__name__, self.__len__()) |
|
136 | return '<%s (%s)>' % (self.__class__.__name__, self.__len__()) | |
137 |
|
137 | |||
138 | def __iter__(self): |
|
138 | def __iter__(self): | |
139 | for db_obj in self.obj_list: |
|
139 | for db_obj in self.obj_list: | |
140 | # check permission at this level |
|
140 | # check permission at this level | |
141 | # NOTE(marcink): the __dict__.get() is ~4x faster then getattr() |
|
141 | # NOTE(marcink): the __dict__.get() is ~4x faster then getattr() | |
142 | name = db_obj.__dict__.get(self.obj_attr, None) |
|
142 | name = db_obj.__dict__.get(self.obj_attr, None) | |
143 | if not self.perm_checker(name, self.__class__.__name__, **self.extra_kwargs): |
|
143 | if not self.perm_checker(name, self.__class__.__name__, **self.extra_kwargs): | |
144 | continue |
|
144 | continue | |
145 |
|
145 | |||
146 | yield db_obj |
|
146 | yield db_obj | |
147 |
|
147 | |||
148 |
|
148 | |||
149 | class RepoList(_PermCheckIterator): |
|
149 | class RepoList(_PermCheckIterator): | |
150 |
|
150 | |||
151 | def __init__(self, db_repo_list, perm_set=None, extra_kwargs=None): |
|
151 | def __init__(self, db_repo_list, perm_set=None, extra_kwargs=None): | |
152 | if not perm_set: |
|
152 | if not perm_set: | |
153 | perm_set = ['repository.read', 'repository.write', 'repository.admin'] |
|
153 | perm_set = ['repository.read', 'repository.write', 'repository.admin'] | |
154 |
|
154 | |||
155 | super(RepoList, self).__init__( |
|
155 | super(RepoList, self).__init__( | |
156 | obj_list=db_repo_list, |
|
156 | obj_list=db_repo_list, | |
157 | obj_attr='_repo_name', perm_set=perm_set, |
|
157 | obj_attr='_repo_name', perm_set=perm_set, | |
158 | perm_checker=HasRepoPermissionAny, |
|
158 | perm_checker=HasRepoPermissionAny, | |
159 | extra_kwargs=extra_kwargs) |
|
159 | extra_kwargs=extra_kwargs) | |
160 |
|
160 | |||
161 |
|
161 | |||
162 | class RepoGroupList(_PermCheckIterator): |
|
162 | class RepoGroupList(_PermCheckIterator): | |
163 |
|
163 | |||
164 | def __init__(self, db_repo_group_list, perm_set=None, extra_kwargs=None): |
|
164 | def __init__(self, db_repo_group_list, perm_set=None, extra_kwargs=None): | |
165 | if not perm_set: |
|
165 | if not perm_set: | |
166 | perm_set = ['group.read', 'group.write', 'group.admin'] |
|
166 | perm_set = ['group.read', 'group.write', 'group.admin'] | |
167 |
|
167 | |||
168 | super(RepoGroupList, self).__init__( |
|
168 | super(RepoGroupList, self).__init__( | |
169 | obj_list=db_repo_group_list, |
|
169 | obj_list=db_repo_group_list, | |
170 | obj_attr='_group_name', perm_set=perm_set, |
|
170 | obj_attr='_group_name', perm_set=perm_set, | |
171 | perm_checker=HasRepoGroupPermissionAny, |
|
171 | perm_checker=HasRepoGroupPermissionAny, | |
172 | extra_kwargs=extra_kwargs) |
|
172 | extra_kwargs=extra_kwargs) | |
173 |
|
173 | |||
174 |
|
174 | |||
175 | class UserGroupList(_PermCheckIterator): |
|
175 | class UserGroupList(_PermCheckIterator): | |
176 |
|
176 | |||
177 | def __init__(self, db_user_group_list, perm_set=None, extra_kwargs=None): |
|
177 | def __init__(self, db_user_group_list, perm_set=None, extra_kwargs=None): | |
178 | if not perm_set: |
|
178 | if not perm_set: | |
179 | perm_set = ['usergroup.read', 'usergroup.write', 'usergroup.admin'] |
|
179 | perm_set = ['usergroup.read', 'usergroup.write', 'usergroup.admin'] | |
180 |
|
180 | |||
181 | super(UserGroupList, self).__init__( |
|
181 | super(UserGroupList, self).__init__( | |
182 | obj_list=db_user_group_list, |
|
182 | obj_list=db_user_group_list, | |
183 | obj_attr='users_group_name', perm_set=perm_set, |
|
183 | obj_attr='users_group_name', perm_set=perm_set, | |
184 | perm_checker=HasUserGroupPermissionAny, |
|
184 | perm_checker=HasUserGroupPermissionAny, | |
185 | extra_kwargs=extra_kwargs) |
|
185 | extra_kwargs=extra_kwargs) | |
186 |
|
186 | |||
187 |
|
187 | |||
188 | class ScmModel(BaseModel): |
|
188 | class ScmModel(BaseModel): | |
189 | """ |
|
189 | """ | |
190 | Generic Scm Model |
|
190 | Generic Scm Model | |
191 | """ |
|
191 | """ | |
192 |
|
192 | |||
193 | @LazyProperty |
|
193 | @LazyProperty | |
194 | def repos_path(self): |
|
194 | def repos_path(self): | |
195 | """ |
|
195 | """ | |
196 | Gets the repositories root path from database |
|
196 | Gets the repositories root path from database | |
197 | """ |
|
197 | """ | |
198 |
|
198 | |||
199 | settings_model = VcsSettingsModel(sa=self.sa) |
|
199 | settings_model = VcsSettingsModel(sa=self.sa) | |
200 | return settings_model.get_repos_location() |
|
200 | return settings_model.get_repos_location() | |
201 |
|
201 | |||
202 | def repo_scan(self, repos_path=None): |
|
202 | def repo_scan(self, repos_path=None): | |
203 | """ |
|
203 | """ | |
204 | Listing of repositories in given path. This path should not be a |
|
204 | Listing of repositories in given path. This path should not be a | |
205 | repository itself. Return a dictionary of repository objects |
|
205 | repository itself. Return a dictionary of repository objects | |
206 |
|
206 | |||
207 | :param repos_path: path to directory containing repositories |
|
207 | :param repos_path: path to directory containing repositories | |
208 | """ |
|
208 | """ | |
209 |
|
209 | |||
210 | if repos_path is None: |
|
210 | if repos_path is None: | |
211 | repos_path = self.repos_path |
|
211 | repos_path = self.repos_path | |
212 |
|
212 | |||
213 | log.info('scanning for repositories in %s', repos_path) |
|
213 | log.info('scanning for repositories in %s', repos_path) | |
214 |
|
214 | |||
215 | config = make_db_config() |
|
215 | config = make_db_config() | |
216 | config.set('extensions', 'largefiles', '') |
|
216 | config.set('extensions', 'largefiles', '') | |
217 | repos = {} |
|
217 | repos = {} | |
218 |
|
218 | |||
219 | for name, path in get_filesystem_repos(repos_path, recursive=True): |
|
219 | for name, path in get_filesystem_repos(repos_path, recursive=True): | |
220 | # name need to be decomposed and put back together using the / |
|
220 | # name need to be decomposed and put back together using the / | |
221 | # since this is internal storage separator for rhodecode |
|
221 | # since this is internal storage separator for rhodecode | |
222 | name = Repository.normalize_repo_name(name) |
|
222 | name = Repository.normalize_repo_name(name) | |
223 |
|
223 | |||
224 | try: |
|
224 | try: | |
225 | if name in repos: |
|
225 | if name in repos: | |
226 | raise RepositoryError('Duplicate repository name %s ' |
|
226 | raise RepositoryError('Duplicate repository name %s ' | |
227 | 'found in %s' % (name, path)) |
|
227 | 'found in %s' % (name, path)) | |
228 | elif path[0] in rhodecode.BACKENDS: |
|
228 | elif path[0] in rhodecode.BACKENDS: | |
229 | backend = get_backend(path[0]) |
|
229 | backend = get_backend(path[0]) | |
230 | repos[name] = backend(path[1], config=config, |
|
230 | repos[name] = backend(path[1], config=config, | |
231 | with_wire={"cache": False}) |
|
231 | with_wire={"cache": False}) | |
232 | except OSError: |
|
232 | except OSError: | |
233 | continue |
|
233 | continue | |
234 | except RepositoryError: |
|
234 | except RepositoryError: | |
235 | log.exception('Failed to create a repo') |
|
235 | log.exception('Failed to create a repo') | |
236 | continue |
|
236 | continue | |
237 |
|
237 | |||
238 | log.debug('found %s paths with repositories', len(repos)) |
|
238 | log.debug('found %s paths with repositories', len(repos)) | |
239 | return repos |
|
239 | return repos | |
240 |
|
240 | |||
241 | def get_repos(self, all_repos=None, sort_key=None): |
|
241 | def get_repos(self, all_repos=None, sort_key=None): | |
242 | """ |
|
242 | """ | |
243 | Get all repositories from db and for each repo create it's |
|
243 | Get all repositories from db and for each repo create it's | |
244 | backend instance and fill that backed with information from database |
|
244 | backend instance and fill that backed with information from database | |
245 |
|
245 | |||
246 | :param all_repos: list of repository names as strings |
|
246 | :param all_repos: list of repository names as strings | |
247 | give specific repositories list, good for filtering |
|
247 | give specific repositories list, good for filtering | |
248 |
|
248 | |||
249 | :param sort_key: initial sorting of repositories |
|
249 | :param sort_key: initial sorting of repositories | |
250 | """ |
|
250 | """ | |
251 | if all_repos is None: |
|
251 | if all_repos is None: | |
252 | all_repos = self.sa.query(Repository)\ |
|
252 | all_repos = self.sa.query(Repository)\ | |
253 | .filter(Repository.group_id == None)\ |
|
253 | .filter(Repository.group_id == None)\ | |
254 | .order_by(func.lower(Repository.repo_name)).all() |
|
254 | .order_by(func.lower(Repository.repo_name)).all() | |
255 | repo_iter = SimpleCachedRepoList( |
|
255 | repo_iter = SimpleCachedRepoList( | |
256 | all_repos, repos_path=self.repos_path, order_by=sort_key) |
|
256 | all_repos, repos_path=self.repos_path, order_by=sort_key) | |
257 | return repo_iter |
|
257 | return repo_iter | |
258 |
|
258 | |||
259 | def get_repo_groups(self, all_groups=None): |
|
259 | def get_repo_groups(self, all_groups=None): | |
260 | if all_groups is None: |
|
260 | if all_groups is None: | |
261 | all_groups = RepoGroup.query()\ |
|
261 | all_groups = RepoGroup.query()\ | |
262 | .filter(RepoGroup.group_parent_id == None).all() |
|
262 | .filter(RepoGroup.group_parent_id == None).all() | |
263 | return [x for x in RepoGroupList(all_groups)] |
|
263 | return [x for x in RepoGroupList(all_groups)] | |
264 |
|
264 | |||
265 | def mark_for_invalidation(self, repo_name, delete=False): |
|
265 | def mark_for_invalidation(self, repo_name, delete=False): | |
266 | """ |
|
266 | """ | |
267 | Mark caches of this repo invalid in the database. `delete` flag |
|
267 | Mark caches of this repo invalid in the database. `delete` flag | |
268 | removes the cache entries |
|
268 | removes the cache entries | |
269 |
|
269 | |||
270 | :param repo_name: the repo_name for which caches should be marked |
|
270 | :param repo_name: the repo_name for which caches should be marked | |
271 | invalid, or deleted |
|
271 | invalid, or deleted | |
272 | :param delete: delete the entry keys instead of setting bool |
|
272 | :param delete: delete the entry keys instead of setting bool | |
273 | flag on them, and also purge caches used by the dogpile |
|
273 | flag on them, and also purge caches used by the dogpile | |
274 | """ |
|
274 | """ | |
275 | repo = Repository.get_by_repo_name(repo_name) |
|
275 | repo = Repository.get_by_repo_name(repo_name) | |
276 |
|
276 | |||
277 | if repo: |
|
277 | if repo: | |
278 | invalidation_namespace = CacheKey.REPO_INVALIDATION_NAMESPACE.format( |
|
278 | invalidation_namespace = CacheKey.REPO_INVALIDATION_NAMESPACE.format( | |
279 | repo_id=repo.repo_id) |
|
279 | repo_id=repo.repo_id) | |
280 | CacheKey.set_invalidate(invalidation_namespace, delete=delete) |
|
280 | CacheKey.set_invalidate(invalidation_namespace, delete=delete) | |
281 |
|
281 | |||
282 | repo_id = repo.repo_id |
|
282 | repo_id = repo.repo_id | |
283 | config = repo._config |
|
283 | config = repo._config | |
284 | config.set('extensions', 'largefiles', '') |
|
284 | config.set('extensions', 'largefiles', '') | |
285 | repo.update_commit_cache(config=config, cs_cache=None) |
|
285 | repo.update_commit_cache(config=config, cs_cache=None) | |
286 | if delete: |
|
286 | if delete: | |
287 | cache_namespace_uid = 'cache_repo.{}'.format(repo_id) |
|
287 | cache_namespace_uid = 'cache_repo.{}'.format(repo_id) | |
288 |
rc_cache.clear_cache_namespace( |
|
288 | rc_cache.clear_cache_namespace( | |
|
289 | 'cache_repo', cache_namespace_uid, invalidate=True) | |||
289 |
|
290 | |||
290 | def toggle_following_repo(self, follow_repo_id, user_id): |
|
291 | def toggle_following_repo(self, follow_repo_id, user_id): | |
291 |
|
292 | |||
292 | f = self.sa.query(UserFollowing)\ |
|
293 | f = self.sa.query(UserFollowing)\ | |
293 | .filter(UserFollowing.follows_repo_id == follow_repo_id)\ |
|
294 | .filter(UserFollowing.follows_repo_id == follow_repo_id)\ | |
294 | .filter(UserFollowing.user_id == user_id).scalar() |
|
295 | .filter(UserFollowing.user_id == user_id).scalar() | |
295 |
|
296 | |||
296 | if f is not None: |
|
297 | if f is not None: | |
297 | try: |
|
298 | try: | |
298 | self.sa.delete(f) |
|
299 | self.sa.delete(f) | |
299 | return |
|
300 | return | |
300 | except Exception: |
|
301 | except Exception: | |
301 | log.error(traceback.format_exc()) |
|
302 | log.error(traceback.format_exc()) | |
302 | raise |
|
303 | raise | |
303 |
|
304 | |||
304 | try: |
|
305 | try: | |
305 | f = UserFollowing() |
|
306 | f = UserFollowing() | |
306 | f.user_id = user_id |
|
307 | f.user_id = user_id | |
307 | f.follows_repo_id = follow_repo_id |
|
308 | f.follows_repo_id = follow_repo_id | |
308 | self.sa.add(f) |
|
309 | self.sa.add(f) | |
309 | except Exception: |
|
310 | except Exception: | |
310 | log.error(traceback.format_exc()) |
|
311 | log.error(traceback.format_exc()) | |
311 | raise |
|
312 | raise | |
312 |
|
313 | |||
313 | def toggle_following_user(self, follow_user_id, user_id): |
|
314 | def toggle_following_user(self, follow_user_id, user_id): | |
314 | f = self.sa.query(UserFollowing)\ |
|
315 | f = self.sa.query(UserFollowing)\ | |
315 | .filter(UserFollowing.follows_user_id == follow_user_id)\ |
|
316 | .filter(UserFollowing.follows_user_id == follow_user_id)\ | |
316 | .filter(UserFollowing.user_id == user_id).scalar() |
|
317 | .filter(UserFollowing.user_id == user_id).scalar() | |
317 |
|
318 | |||
318 | if f is not None: |
|
319 | if f is not None: | |
319 | try: |
|
320 | try: | |
320 | self.sa.delete(f) |
|
321 | self.sa.delete(f) | |
321 | return |
|
322 | return | |
322 | except Exception: |
|
323 | except Exception: | |
323 | log.error(traceback.format_exc()) |
|
324 | log.error(traceback.format_exc()) | |
324 | raise |
|
325 | raise | |
325 |
|
326 | |||
326 | try: |
|
327 | try: | |
327 | f = UserFollowing() |
|
328 | f = UserFollowing() | |
328 | f.user_id = user_id |
|
329 | f.user_id = user_id | |
329 | f.follows_user_id = follow_user_id |
|
330 | f.follows_user_id = follow_user_id | |
330 | self.sa.add(f) |
|
331 | self.sa.add(f) | |
331 | except Exception: |
|
332 | except Exception: | |
332 | log.error(traceback.format_exc()) |
|
333 | log.error(traceback.format_exc()) | |
333 | raise |
|
334 | raise | |
334 |
|
335 | |||
335 | def is_following_repo(self, repo_name, user_id, cache=False): |
|
336 | def is_following_repo(self, repo_name, user_id, cache=False): | |
336 | r = self.sa.query(Repository)\ |
|
337 | r = self.sa.query(Repository)\ | |
337 | .filter(Repository.repo_name == repo_name).scalar() |
|
338 | .filter(Repository.repo_name == repo_name).scalar() | |
338 |
|
339 | |||
339 | f = self.sa.query(UserFollowing)\ |
|
340 | f = self.sa.query(UserFollowing)\ | |
340 | .filter(UserFollowing.follows_repository == r)\ |
|
341 | .filter(UserFollowing.follows_repository == r)\ | |
341 | .filter(UserFollowing.user_id == user_id).scalar() |
|
342 | .filter(UserFollowing.user_id == user_id).scalar() | |
342 |
|
343 | |||
343 | return f is not None |
|
344 | return f is not None | |
344 |
|
345 | |||
345 | def is_following_user(self, username, user_id, cache=False): |
|
346 | def is_following_user(self, username, user_id, cache=False): | |
346 | u = User.get_by_username(username) |
|
347 | u = User.get_by_username(username) | |
347 |
|
348 | |||
348 | f = self.sa.query(UserFollowing)\ |
|
349 | f = self.sa.query(UserFollowing)\ | |
349 | .filter(UserFollowing.follows_user == u)\ |
|
350 | .filter(UserFollowing.follows_user == u)\ | |
350 | .filter(UserFollowing.user_id == user_id).scalar() |
|
351 | .filter(UserFollowing.user_id == user_id).scalar() | |
351 |
|
352 | |||
352 | return f is not None |
|
353 | return f is not None | |
353 |
|
354 | |||
354 | def get_followers(self, repo): |
|
355 | def get_followers(self, repo): | |
355 | repo = self._get_repo(repo) |
|
356 | repo = self._get_repo(repo) | |
356 |
|
357 | |||
357 | return self.sa.query(UserFollowing)\ |
|
358 | return self.sa.query(UserFollowing)\ | |
358 | .filter(UserFollowing.follows_repository == repo).count() |
|
359 | .filter(UserFollowing.follows_repository == repo).count() | |
359 |
|
360 | |||
360 | def get_forks(self, repo): |
|
361 | def get_forks(self, repo): | |
361 | repo = self._get_repo(repo) |
|
362 | repo = self._get_repo(repo) | |
362 | return self.sa.query(Repository)\ |
|
363 | return self.sa.query(Repository)\ | |
363 | .filter(Repository.fork == repo).count() |
|
364 | .filter(Repository.fork == repo).count() | |
364 |
|
365 | |||
365 | def get_pull_requests(self, repo): |
|
366 | def get_pull_requests(self, repo): | |
366 | repo = self._get_repo(repo) |
|
367 | repo = self._get_repo(repo) | |
367 | return self.sa.query(PullRequest)\ |
|
368 | return self.sa.query(PullRequest)\ | |
368 | .filter(PullRequest.target_repo == repo)\ |
|
369 | .filter(PullRequest.target_repo == repo)\ | |
369 | .filter(PullRequest.status != PullRequest.STATUS_CLOSED).count() |
|
370 | .filter(PullRequest.status != PullRequest.STATUS_CLOSED).count() | |
370 |
|
371 | |||
371 | def get_artifacts(self, repo): |
|
372 | def get_artifacts(self, repo): | |
372 | repo = self._get_repo(repo) |
|
373 | repo = self._get_repo(repo) | |
373 | return self.sa.query(FileStore)\ |
|
374 | return self.sa.query(FileStore)\ | |
374 | .filter(FileStore.repo == repo)\ |
|
375 | .filter(FileStore.repo == repo)\ | |
375 | .filter(or_(FileStore.hidden == None, FileStore.hidden == false())).count() |
|
376 | .filter(or_(FileStore.hidden == None, FileStore.hidden == false())).count() | |
376 |
|
377 | |||
377 | def mark_as_fork(self, repo, fork, user): |
|
378 | def mark_as_fork(self, repo, fork, user): | |
378 | repo = self._get_repo(repo) |
|
379 | repo = self._get_repo(repo) | |
379 | fork = self._get_repo(fork) |
|
380 | fork = self._get_repo(fork) | |
380 | if fork and repo.repo_id == fork.repo_id: |
|
381 | if fork and repo.repo_id == fork.repo_id: | |
381 | raise Exception("Cannot set repository as fork of itself") |
|
382 | raise Exception("Cannot set repository as fork of itself") | |
382 |
|
383 | |||
383 | if fork and repo.repo_type != fork.repo_type: |
|
384 | if fork and repo.repo_type != fork.repo_type: | |
384 | raise RepositoryError( |
|
385 | raise RepositoryError( | |
385 | "Cannot set repository as fork of repository with other type") |
|
386 | "Cannot set repository as fork of repository with other type") | |
386 |
|
387 | |||
387 | repo.fork = fork |
|
388 | repo.fork = fork | |
388 | self.sa.add(repo) |
|
389 | self.sa.add(repo) | |
389 | return repo |
|
390 | return repo | |
390 |
|
391 | |||
391 | def pull_changes(self, repo, username, remote_uri=None, validate_uri=True): |
|
392 | def pull_changes(self, repo, username, remote_uri=None, validate_uri=True): | |
392 | dbrepo = self._get_repo(repo) |
|
393 | dbrepo = self._get_repo(repo) | |
393 | remote_uri = remote_uri or dbrepo.clone_uri |
|
394 | remote_uri = remote_uri or dbrepo.clone_uri | |
394 | if not remote_uri: |
|
395 | if not remote_uri: | |
395 | raise Exception("This repository doesn't have a clone uri") |
|
396 | raise Exception("This repository doesn't have a clone uri") | |
396 |
|
397 | |||
397 | repo = dbrepo.scm_instance(cache=False) |
|
398 | repo = dbrepo.scm_instance(cache=False) | |
398 | repo.config.clear_section('hooks') |
|
399 | repo.config.clear_section('hooks') | |
399 |
|
400 | |||
400 | try: |
|
401 | try: | |
401 | # NOTE(marcink): add extra validation so we skip invalid urls |
|
402 | # NOTE(marcink): add extra validation so we skip invalid urls | |
402 | # this is due this tasks can be executed via scheduler without |
|
403 | # this is due this tasks can be executed via scheduler without | |
403 | # proper validation of remote_uri |
|
404 | # proper validation of remote_uri | |
404 | if validate_uri: |
|
405 | if validate_uri: | |
405 | config = make_db_config(clear_session=False) |
|
406 | config = make_db_config(clear_session=False) | |
406 | url_validator(remote_uri, dbrepo.repo_type, config) |
|
407 | url_validator(remote_uri, dbrepo.repo_type, config) | |
407 | except InvalidCloneUrl: |
|
408 | except InvalidCloneUrl: | |
408 | raise |
|
409 | raise | |
409 |
|
410 | |||
410 | repo_name = dbrepo.repo_name |
|
411 | repo_name = dbrepo.repo_name | |
411 | try: |
|
412 | try: | |
412 | # TODO: we need to make sure those operations call proper hooks ! |
|
413 | # TODO: we need to make sure those operations call proper hooks ! | |
413 | repo.fetch(remote_uri) |
|
414 | repo.fetch(remote_uri) | |
414 |
|
415 | |||
415 | self.mark_for_invalidation(repo_name) |
|
416 | self.mark_for_invalidation(repo_name) | |
416 | except Exception: |
|
417 | except Exception: | |
417 | log.error(traceback.format_exc()) |
|
418 | log.error(traceback.format_exc()) | |
418 | raise |
|
419 | raise | |
419 |
|
420 | |||
420 | def push_changes(self, repo, username, remote_uri=None, validate_uri=True): |
|
421 | def push_changes(self, repo, username, remote_uri=None, validate_uri=True): | |
421 | dbrepo = self._get_repo(repo) |
|
422 | dbrepo = self._get_repo(repo) | |
422 | remote_uri = remote_uri or dbrepo.push_uri |
|
423 | remote_uri = remote_uri or dbrepo.push_uri | |
423 | if not remote_uri: |
|
424 | if not remote_uri: | |
424 | raise Exception("This repository doesn't have a clone uri") |
|
425 | raise Exception("This repository doesn't have a clone uri") | |
425 |
|
426 | |||
426 | repo = dbrepo.scm_instance(cache=False) |
|
427 | repo = dbrepo.scm_instance(cache=False) | |
427 | repo.config.clear_section('hooks') |
|
428 | repo.config.clear_section('hooks') | |
428 |
|
429 | |||
429 | try: |
|
430 | try: | |
430 | # NOTE(marcink): add extra validation so we skip invalid urls |
|
431 | # NOTE(marcink): add extra validation so we skip invalid urls | |
431 | # this is due this tasks can be executed via scheduler without |
|
432 | # this is due this tasks can be executed via scheduler without | |
432 | # proper validation of remote_uri |
|
433 | # proper validation of remote_uri | |
433 | if validate_uri: |
|
434 | if validate_uri: | |
434 | config = make_db_config(clear_session=False) |
|
435 | config = make_db_config(clear_session=False) | |
435 | url_validator(remote_uri, dbrepo.repo_type, config) |
|
436 | url_validator(remote_uri, dbrepo.repo_type, config) | |
436 | except InvalidCloneUrl: |
|
437 | except InvalidCloneUrl: | |
437 | raise |
|
438 | raise | |
438 |
|
439 | |||
439 | try: |
|
440 | try: | |
440 | repo.push(remote_uri) |
|
441 | repo.push(remote_uri) | |
441 | except Exception: |
|
442 | except Exception: | |
442 | log.error(traceback.format_exc()) |
|
443 | log.error(traceback.format_exc()) | |
443 | raise |
|
444 | raise | |
444 |
|
445 | |||
445 | def commit_change(self, repo, repo_name, commit, user, author, message, |
|
446 | def commit_change(self, repo, repo_name, commit, user, author, message, | |
446 | content, f_path): |
|
447 | content, f_path): | |
447 | """ |
|
448 | """ | |
448 | Commits changes |
|
449 | Commits changes | |
449 |
|
450 | |||
450 | :param repo: SCM instance |
|
451 | :param repo: SCM instance | |
451 |
|
452 | |||
452 | """ |
|
453 | """ | |
453 | user = self._get_user(user) |
|
454 | user = self._get_user(user) | |
454 |
|
455 | |||
455 | # decoding here will force that we have proper encoded values |
|
456 | # decoding here will force that we have proper encoded values | |
456 | # in any other case this will throw exceptions and deny commit |
|
457 | # in any other case this will throw exceptions and deny commit | |
457 | content = safe_str(content) |
|
458 | content = safe_str(content) | |
458 | path = safe_str(f_path) |
|
459 | path = safe_str(f_path) | |
459 | # message and author needs to be unicode |
|
460 | # message and author needs to be unicode | |
460 | # proper backend should then translate that into required type |
|
461 | # proper backend should then translate that into required type | |
461 | message = safe_unicode(message) |
|
462 | message = safe_unicode(message) | |
462 | author = safe_unicode(author) |
|
463 | author = safe_unicode(author) | |
463 | imc = repo.in_memory_commit |
|
464 | imc = repo.in_memory_commit | |
464 | imc.change(FileNode(path, content, mode=commit.get_file_mode(f_path))) |
|
465 | imc.change(FileNode(path, content, mode=commit.get_file_mode(f_path))) | |
465 | try: |
|
466 | try: | |
466 | # TODO: handle pre-push action ! |
|
467 | # TODO: handle pre-push action ! | |
467 | tip = imc.commit( |
|
468 | tip = imc.commit( | |
468 | message=message, author=author, parents=[commit], |
|
469 | message=message, author=author, parents=[commit], | |
469 | branch=commit.branch) |
|
470 | branch=commit.branch) | |
470 | except Exception as e: |
|
471 | except Exception as e: | |
471 | log.error(traceback.format_exc()) |
|
472 | log.error(traceback.format_exc()) | |
472 | raise IMCCommitError(str(e)) |
|
473 | raise IMCCommitError(str(e)) | |
473 | finally: |
|
474 | finally: | |
474 | # always clear caches, if commit fails we want fresh object also |
|
475 | # always clear caches, if commit fails we want fresh object also | |
475 | self.mark_for_invalidation(repo_name) |
|
476 | self.mark_for_invalidation(repo_name) | |
476 |
|
477 | |||
477 | # We trigger the post-push action |
|
478 | # We trigger the post-push action | |
478 | hooks_utils.trigger_post_push_hook( |
|
479 | hooks_utils.trigger_post_push_hook( | |
479 | username=user.username, action='push_local', hook_type='post_push', |
|
480 | username=user.username, action='push_local', hook_type='post_push', | |
480 | repo_name=repo_name, repo_type=repo.alias, commit_ids=[tip.raw_id]) |
|
481 | repo_name=repo_name, repo_type=repo.alias, commit_ids=[tip.raw_id]) | |
481 | return tip |
|
482 | return tip | |
482 |
|
483 | |||
483 | def _sanitize_path(self, f_path): |
|
484 | def _sanitize_path(self, f_path): | |
484 | if f_path.startswith('/') or f_path.startswith('./') or '../' in f_path: |
|
485 | if f_path.startswith('/') or f_path.startswith('./') or '../' in f_path: | |
485 | raise NonRelativePathError('%s is not an relative path' % f_path) |
|
486 | raise NonRelativePathError('%s is not an relative path' % f_path) | |
486 | if f_path: |
|
487 | if f_path: | |
487 | f_path = os.path.normpath(f_path) |
|
488 | f_path = os.path.normpath(f_path) | |
488 | return f_path |
|
489 | return f_path | |
489 |
|
490 | |||
490 | def get_dirnode_metadata(self, request, commit, dir_node): |
|
491 | def get_dirnode_metadata(self, request, commit, dir_node): | |
491 | if not dir_node.is_dir(): |
|
492 | if not dir_node.is_dir(): | |
492 | return [] |
|
493 | return [] | |
493 |
|
494 | |||
494 | data = [] |
|
495 | data = [] | |
495 | for node in dir_node: |
|
496 | for node in dir_node: | |
496 | if not node.is_file(): |
|
497 | if not node.is_file(): | |
497 | # we skip file-nodes |
|
498 | # we skip file-nodes | |
498 | continue |
|
499 | continue | |
499 |
|
500 | |||
500 | last_commit = node.last_commit |
|
501 | last_commit = node.last_commit | |
501 | last_commit_date = last_commit.date |
|
502 | last_commit_date = last_commit.date | |
502 | data.append({ |
|
503 | data.append({ | |
503 | 'name': node.name, |
|
504 | 'name': node.name, | |
504 | 'size': h.format_byte_size_binary(node.size), |
|
505 | 'size': h.format_byte_size_binary(node.size), | |
505 | 'modified_at': h.format_date(last_commit_date), |
|
506 | 'modified_at': h.format_date(last_commit_date), | |
506 | 'modified_ts': last_commit_date.isoformat(), |
|
507 | 'modified_ts': last_commit_date.isoformat(), | |
507 | 'revision': last_commit.revision, |
|
508 | 'revision': last_commit.revision, | |
508 | 'short_id': last_commit.short_id, |
|
509 | 'short_id': last_commit.short_id, | |
509 | 'message': h.escape(last_commit.message), |
|
510 | 'message': h.escape(last_commit.message), | |
510 | 'author': h.escape(last_commit.author), |
|
511 | 'author': h.escape(last_commit.author), | |
511 | 'user_profile': h.gravatar_with_user( |
|
512 | 'user_profile': h.gravatar_with_user( | |
512 | request, last_commit.author), |
|
513 | request, last_commit.author), | |
513 | }) |
|
514 | }) | |
514 |
|
515 | |||
515 | return data |
|
516 | return data | |
516 |
|
517 | |||
517 | def get_nodes(self, repo_name, commit_id, root_path='/', flat=True, |
|
518 | def get_nodes(self, repo_name, commit_id, root_path='/', flat=True, | |
518 | extended_info=False, content=False, max_file_bytes=None): |
|
519 | extended_info=False, content=False, max_file_bytes=None): | |
519 | """ |
|
520 | """ | |
520 | recursive walk in root dir and return a set of all path in that dir |
|
521 | recursive walk in root dir and return a set of all path in that dir | |
521 | based on repository walk function |
|
522 | based on repository walk function | |
522 |
|
523 | |||
523 | :param repo_name: name of repository |
|
524 | :param repo_name: name of repository | |
524 | :param commit_id: commit id for which to list nodes |
|
525 | :param commit_id: commit id for which to list nodes | |
525 | :param root_path: root path to list |
|
526 | :param root_path: root path to list | |
526 | :param flat: return as a list, if False returns a dict with description |
|
527 | :param flat: return as a list, if False returns a dict with description | |
527 | :param extended_info: show additional info such as md5, binary, size etc |
|
528 | :param extended_info: show additional info such as md5, binary, size etc | |
528 | :param content: add nodes content to the return data |
|
529 | :param content: add nodes content to the return data | |
529 | :param max_file_bytes: will not return file contents over this limit |
|
530 | :param max_file_bytes: will not return file contents over this limit | |
530 |
|
531 | |||
531 | """ |
|
532 | """ | |
532 | _files = list() |
|
533 | _files = list() | |
533 | _dirs = list() |
|
534 | _dirs = list() | |
534 | try: |
|
535 | try: | |
535 | _repo = self._get_repo(repo_name) |
|
536 | _repo = self._get_repo(repo_name) | |
536 | commit = _repo.scm_instance().get_commit(commit_id=commit_id) |
|
537 | commit = _repo.scm_instance().get_commit(commit_id=commit_id) | |
537 | root_path = root_path.lstrip('/') |
|
538 | root_path = root_path.lstrip('/') | |
538 | for __, dirs, files in commit.walk(root_path): |
|
539 | for __, dirs, files in commit.walk(root_path): | |
539 |
|
540 | |||
540 | for f in files: |
|
541 | for f in files: | |
541 | _content = None |
|
542 | _content = None | |
542 | _data = f_name = f.unicode_path |
|
543 | _data = f_name = f.unicode_path | |
543 |
|
544 | |||
544 | if not flat: |
|
545 | if not flat: | |
545 | _data = { |
|
546 | _data = { | |
546 | "name": h.escape(f_name), |
|
547 | "name": h.escape(f_name), | |
547 | "type": "file", |
|
548 | "type": "file", | |
548 | } |
|
549 | } | |
549 | if extended_info: |
|
550 | if extended_info: | |
550 | _data.update({ |
|
551 | _data.update({ | |
551 | "md5": f.md5, |
|
552 | "md5": f.md5, | |
552 | "binary": f.is_binary, |
|
553 | "binary": f.is_binary, | |
553 | "size": f.size, |
|
554 | "size": f.size, | |
554 | "extension": f.extension, |
|
555 | "extension": f.extension, | |
555 | "mimetype": f.mimetype, |
|
556 | "mimetype": f.mimetype, | |
556 | "lines": f.lines()[0] |
|
557 | "lines": f.lines()[0] | |
557 | }) |
|
558 | }) | |
558 |
|
559 | |||
559 | if content: |
|
560 | if content: | |
560 | over_size_limit = (max_file_bytes is not None |
|
561 | over_size_limit = (max_file_bytes is not None | |
561 | and f.size > max_file_bytes) |
|
562 | and f.size > max_file_bytes) | |
562 | full_content = None |
|
563 | full_content = None | |
563 | if not f.is_binary and not over_size_limit: |
|
564 | if not f.is_binary and not over_size_limit: | |
564 | full_content = safe_str(f.content) |
|
565 | full_content = safe_str(f.content) | |
565 |
|
566 | |||
566 | _data.update({ |
|
567 | _data.update({ | |
567 | "content": full_content, |
|
568 | "content": full_content, | |
568 | }) |
|
569 | }) | |
569 | _files.append(_data) |
|
570 | _files.append(_data) | |
570 |
|
571 | |||
571 | for d in dirs: |
|
572 | for d in dirs: | |
572 | _data = d_name = d.unicode_path |
|
573 | _data = d_name = d.unicode_path | |
573 | if not flat: |
|
574 | if not flat: | |
574 | _data = { |
|
575 | _data = { | |
575 | "name": h.escape(d_name), |
|
576 | "name": h.escape(d_name), | |
576 | "type": "dir", |
|
577 | "type": "dir", | |
577 | } |
|
578 | } | |
578 | if extended_info: |
|
579 | if extended_info: | |
579 | _data.update({ |
|
580 | _data.update({ | |
580 | "md5": None, |
|
581 | "md5": None, | |
581 | "binary": None, |
|
582 | "binary": None, | |
582 | "size": None, |
|
583 | "size": None, | |
583 | "extension": None, |
|
584 | "extension": None, | |
584 | }) |
|
585 | }) | |
585 | if content: |
|
586 | if content: | |
586 | _data.update({ |
|
587 | _data.update({ | |
587 | "content": None |
|
588 | "content": None | |
588 | }) |
|
589 | }) | |
589 | _dirs.append(_data) |
|
590 | _dirs.append(_data) | |
590 | except RepositoryError: |
|
591 | except RepositoryError: | |
591 | log.exception("Exception in get_nodes") |
|
592 | log.exception("Exception in get_nodes") | |
592 | raise |
|
593 | raise | |
593 |
|
594 | |||
594 | return _dirs, _files |
|
595 | return _dirs, _files | |
595 |
|
596 | |||
596 | def get_quick_filter_nodes(self, repo_name, commit_id, root_path='/'): |
|
597 | def get_quick_filter_nodes(self, repo_name, commit_id, root_path='/'): | |
597 | """ |
|
598 | """ | |
598 | Generate files for quick filter in files view |
|
599 | Generate files for quick filter in files view | |
599 | """ |
|
600 | """ | |
600 |
|
601 | |||
601 | _files = list() |
|
602 | _files = list() | |
602 | _dirs = list() |
|
603 | _dirs = list() | |
603 | try: |
|
604 | try: | |
604 | _repo = self._get_repo(repo_name) |
|
605 | _repo = self._get_repo(repo_name) | |
605 | commit = _repo.scm_instance().get_commit(commit_id=commit_id) |
|
606 | commit = _repo.scm_instance().get_commit(commit_id=commit_id) | |
606 | root_path = root_path.lstrip('/') |
|
607 | root_path = root_path.lstrip('/') | |
607 | for __, dirs, files in commit.walk(root_path): |
|
608 | for __, dirs, files in commit.walk(root_path): | |
608 |
|
609 | |||
609 | for f in files: |
|
610 | for f in files: | |
610 |
|
611 | |||
611 | _data = { |
|
612 | _data = { | |
612 | "name": h.escape(f.unicode_path), |
|
613 | "name": h.escape(f.unicode_path), | |
613 | "type": "file", |
|
614 | "type": "file", | |
614 | } |
|
615 | } | |
615 |
|
616 | |||
616 | _files.append(_data) |
|
617 | _files.append(_data) | |
617 |
|
618 | |||
618 | for d in dirs: |
|
619 | for d in dirs: | |
619 |
|
620 | |||
620 | _data = { |
|
621 | _data = { | |
621 | "name": h.escape(d.unicode_path), |
|
622 | "name": h.escape(d.unicode_path), | |
622 | "type": "dir", |
|
623 | "type": "dir", | |
623 | } |
|
624 | } | |
624 |
|
625 | |||
625 | _dirs.append(_data) |
|
626 | _dirs.append(_data) | |
626 | except RepositoryError: |
|
627 | except RepositoryError: | |
627 | log.exception("Exception in get_quick_filter_nodes") |
|
628 | log.exception("Exception in get_quick_filter_nodes") | |
628 | raise |
|
629 | raise | |
629 |
|
630 | |||
630 | return _dirs, _files |
|
631 | return _dirs, _files | |
631 |
|
632 | |||
632 | def get_node(self, repo_name, commit_id, file_path, |
|
633 | def get_node(self, repo_name, commit_id, file_path, | |
633 | extended_info=False, content=False, max_file_bytes=None, cache=True): |
|
634 | extended_info=False, content=False, max_file_bytes=None, cache=True): | |
634 | """ |
|
635 | """ | |
635 | retrieve single node from commit |
|
636 | retrieve single node from commit | |
636 | """ |
|
637 | """ | |
637 | try: |
|
638 | try: | |
638 |
|
639 | |||
639 | _repo = self._get_repo(repo_name) |
|
640 | _repo = self._get_repo(repo_name) | |
640 | commit = _repo.scm_instance().get_commit(commit_id=commit_id) |
|
641 | commit = _repo.scm_instance().get_commit(commit_id=commit_id) | |
641 |
|
642 | |||
642 | file_node = commit.get_node(file_path) |
|
643 | file_node = commit.get_node(file_path) | |
643 | if file_node.is_dir(): |
|
644 | if file_node.is_dir(): | |
644 | raise RepositoryError('The given path is a directory') |
|
645 | raise RepositoryError('The given path is a directory') | |
645 |
|
646 | |||
646 | _content = None |
|
647 | _content = None | |
647 | f_name = file_node.unicode_path |
|
648 | f_name = file_node.unicode_path | |
648 |
|
649 | |||
649 | file_data = { |
|
650 | file_data = { | |
650 | "name": h.escape(f_name), |
|
651 | "name": h.escape(f_name), | |
651 | "type": "file", |
|
652 | "type": "file", | |
652 | } |
|
653 | } | |
653 |
|
654 | |||
654 | if extended_info: |
|
655 | if extended_info: | |
655 | file_data.update({ |
|
656 | file_data.update({ | |
656 | "extension": file_node.extension, |
|
657 | "extension": file_node.extension, | |
657 | "mimetype": file_node.mimetype, |
|
658 | "mimetype": file_node.mimetype, | |
658 | }) |
|
659 | }) | |
659 |
|
660 | |||
660 | if cache: |
|
661 | if cache: | |
661 | md5 = file_node.md5 |
|
662 | md5 = file_node.md5 | |
662 | is_binary = file_node.is_binary |
|
663 | is_binary = file_node.is_binary | |
663 | size = file_node.size |
|
664 | size = file_node.size | |
664 | else: |
|
665 | else: | |
665 | is_binary, md5, size, _content = file_node.metadata_uncached() |
|
666 | is_binary, md5, size, _content = file_node.metadata_uncached() | |
666 |
|
667 | |||
667 | file_data.update({ |
|
668 | file_data.update({ | |
668 | "md5": md5, |
|
669 | "md5": md5, | |
669 | "binary": is_binary, |
|
670 | "binary": is_binary, | |
670 | "size": size, |
|
671 | "size": size, | |
671 | }) |
|
672 | }) | |
672 |
|
673 | |||
673 | if content and cache: |
|
674 | if content and cache: | |
674 | # get content + cache |
|
675 | # get content + cache | |
675 | size = file_node.size |
|
676 | size = file_node.size | |
676 | over_size_limit = (max_file_bytes is not None and size > max_file_bytes) |
|
677 | over_size_limit = (max_file_bytes is not None and size > max_file_bytes) | |
677 | full_content = None |
|
678 | full_content = None | |
678 | all_lines = 0 |
|
679 | all_lines = 0 | |
679 | if not file_node.is_binary and not over_size_limit: |
|
680 | if not file_node.is_binary and not over_size_limit: | |
680 | full_content = safe_unicode(file_node.content) |
|
681 | full_content = safe_unicode(file_node.content) | |
681 | all_lines, empty_lines = file_node.count_lines(full_content) |
|
682 | all_lines, empty_lines = file_node.count_lines(full_content) | |
682 |
|
683 | |||
683 | file_data.update({ |
|
684 | file_data.update({ | |
684 | "content": full_content, |
|
685 | "content": full_content, | |
685 | "lines": all_lines |
|
686 | "lines": all_lines | |
686 | }) |
|
687 | }) | |
687 | elif content: |
|
688 | elif content: | |
688 | # get content *without* cache |
|
689 | # get content *without* cache | |
689 | if _content is None: |
|
690 | if _content is None: | |
690 | is_binary, md5, size, _content = file_node.metadata_uncached() |
|
691 | is_binary, md5, size, _content = file_node.metadata_uncached() | |
691 |
|
692 | |||
692 | over_size_limit = (max_file_bytes is not None and size > max_file_bytes) |
|
693 | over_size_limit = (max_file_bytes is not None and size > max_file_bytes) | |
693 | full_content = None |
|
694 | full_content = None | |
694 | all_lines = 0 |
|
695 | all_lines = 0 | |
695 | if not is_binary and not over_size_limit: |
|
696 | if not is_binary and not over_size_limit: | |
696 | full_content = safe_unicode(_content) |
|
697 | full_content = safe_unicode(_content) | |
697 | all_lines, empty_lines = file_node.count_lines(full_content) |
|
698 | all_lines, empty_lines = file_node.count_lines(full_content) | |
698 |
|
699 | |||
699 | file_data.update({ |
|
700 | file_data.update({ | |
700 | "content": full_content, |
|
701 | "content": full_content, | |
701 | "lines": all_lines |
|
702 | "lines": all_lines | |
702 | }) |
|
703 | }) | |
703 |
|
704 | |||
704 | except RepositoryError: |
|
705 | except RepositoryError: | |
705 | log.exception("Exception in get_node") |
|
706 | log.exception("Exception in get_node") | |
706 | raise |
|
707 | raise | |
707 |
|
708 | |||
708 | return file_data |
|
709 | return file_data | |
709 |
|
710 | |||
710 | def get_fts_data(self, repo_name, commit_id, root_path='/'): |
|
711 | def get_fts_data(self, repo_name, commit_id, root_path='/'): | |
711 | """ |
|
712 | """ | |
712 | Fetch node tree for usage in full text search |
|
713 | Fetch node tree for usage in full text search | |
713 | """ |
|
714 | """ | |
714 |
|
715 | |||
715 | tree_info = list() |
|
716 | tree_info = list() | |
716 |
|
717 | |||
717 | try: |
|
718 | try: | |
718 | _repo = self._get_repo(repo_name) |
|
719 | _repo = self._get_repo(repo_name) | |
719 | commit = _repo.scm_instance().get_commit(commit_id=commit_id) |
|
720 | commit = _repo.scm_instance().get_commit(commit_id=commit_id) | |
720 | root_path = root_path.lstrip('/') |
|
721 | root_path = root_path.lstrip('/') | |
721 | for __, dirs, files in commit.walk(root_path): |
|
722 | for __, dirs, files in commit.walk(root_path): | |
722 |
|
723 | |||
723 | for f in files: |
|
724 | for f in files: | |
724 | is_binary, md5, size, _content = f.metadata_uncached() |
|
725 | is_binary, md5, size, _content = f.metadata_uncached() | |
725 | _data = { |
|
726 | _data = { | |
726 | "name": f.unicode_path, |
|
727 | "name": f.unicode_path, | |
727 | "md5": md5, |
|
728 | "md5": md5, | |
728 | "extension": f.extension, |
|
729 | "extension": f.extension, | |
729 | "binary": is_binary, |
|
730 | "binary": is_binary, | |
730 | "size": size |
|
731 | "size": size | |
731 | } |
|
732 | } | |
732 |
|
733 | |||
733 | tree_info.append(_data) |
|
734 | tree_info.append(_data) | |
734 |
|
735 | |||
735 | except RepositoryError: |
|
736 | except RepositoryError: | |
736 | log.exception("Exception in get_nodes") |
|
737 | log.exception("Exception in get_nodes") | |
737 | raise |
|
738 | raise | |
738 |
|
739 | |||
739 | return tree_info |
|
740 | return tree_info | |
740 |
|
741 | |||
741 | def create_nodes(self, user, repo, message, nodes, parent_commit=None, |
|
742 | def create_nodes(self, user, repo, message, nodes, parent_commit=None, | |
742 | author=None, trigger_push_hook=True): |
|
743 | author=None, trigger_push_hook=True): | |
743 | """ |
|
744 | """ | |
744 | Commits given multiple nodes into repo |
|
745 | Commits given multiple nodes into repo | |
745 |
|
746 | |||
746 | :param user: RhodeCode User object or user_id, the commiter |
|
747 | :param user: RhodeCode User object or user_id, the commiter | |
747 | :param repo: RhodeCode Repository object |
|
748 | :param repo: RhodeCode Repository object | |
748 | :param message: commit message |
|
749 | :param message: commit message | |
749 | :param nodes: mapping {filename:{'content':content},...} |
|
750 | :param nodes: mapping {filename:{'content':content},...} | |
750 | :param parent_commit: parent commit, can be empty than it's |
|
751 | :param parent_commit: parent commit, can be empty than it's | |
751 | initial commit |
|
752 | initial commit | |
752 | :param author: author of commit, cna be different that commiter |
|
753 | :param author: author of commit, cna be different that commiter | |
753 | only for git |
|
754 | only for git | |
754 | :param trigger_push_hook: trigger push hooks |
|
755 | :param trigger_push_hook: trigger push hooks | |
755 |
|
756 | |||
756 | :returns: new commited commit |
|
757 | :returns: new commited commit | |
757 | """ |
|
758 | """ | |
758 |
|
759 | |||
759 | user = self._get_user(user) |
|
760 | user = self._get_user(user) | |
760 | scm_instance = repo.scm_instance(cache=False) |
|
761 | scm_instance = repo.scm_instance(cache=False) | |
761 |
|
762 | |||
762 | processed_nodes = [] |
|
763 | processed_nodes = [] | |
763 | for f_path in nodes: |
|
764 | for f_path in nodes: | |
764 | f_path = self._sanitize_path(f_path) |
|
765 | f_path = self._sanitize_path(f_path) | |
765 | content = nodes[f_path]['content'] |
|
766 | content = nodes[f_path]['content'] | |
766 | f_path = safe_str(f_path) |
|
767 | f_path = safe_str(f_path) | |
767 | # decoding here will force that we have proper encoded values |
|
768 | # decoding here will force that we have proper encoded values | |
768 | # in any other case this will throw exceptions and deny commit |
|
769 | # in any other case this will throw exceptions and deny commit | |
769 | if isinstance(content, (basestring,)): |
|
770 | if isinstance(content, (basestring,)): | |
770 | content = safe_str(content) |
|
771 | content = safe_str(content) | |
771 | elif isinstance(content, (file, cStringIO.OutputType,)): |
|
772 | elif isinstance(content, (file, cStringIO.OutputType,)): | |
772 | content = content.read() |
|
773 | content = content.read() | |
773 | else: |
|
774 | else: | |
774 | raise Exception('Content is of unrecognized type %s' % ( |
|
775 | raise Exception('Content is of unrecognized type %s' % ( | |
775 | type(content) |
|
776 | type(content) | |
776 | )) |
|
777 | )) | |
777 | processed_nodes.append((f_path, content)) |
|
778 | processed_nodes.append((f_path, content)) | |
778 |
|
779 | |||
779 | message = safe_unicode(message) |
|
780 | message = safe_unicode(message) | |
780 | commiter = user.full_contact |
|
781 | commiter = user.full_contact | |
781 | author = safe_unicode(author) if author else commiter |
|
782 | author = safe_unicode(author) if author else commiter | |
782 |
|
783 | |||
783 | imc = scm_instance.in_memory_commit |
|
784 | imc = scm_instance.in_memory_commit | |
784 |
|
785 | |||
785 | if not parent_commit: |
|
786 | if not parent_commit: | |
786 | parent_commit = EmptyCommit(alias=scm_instance.alias) |
|
787 | parent_commit = EmptyCommit(alias=scm_instance.alias) | |
787 |
|
788 | |||
788 | if isinstance(parent_commit, EmptyCommit): |
|
789 | if isinstance(parent_commit, EmptyCommit): | |
789 | # EmptyCommit means we we're editing empty repository |
|
790 | # EmptyCommit means we we're editing empty repository | |
790 | parents = None |
|
791 | parents = None | |
791 | else: |
|
792 | else: | |
792 | parents = [parent_commit] |
|
793 | parents = [parent_commit] | |
793 | # add multiple nodes |
|
794 | # add multiple nodes | |
794 | for path, content in processed_nodes: |
|
795 | for path, content in processed_nodes: | |
795 | imc.add(FileNode(path, content=content)) |
|
796 | imc.add(FileNode(path, content=content)) | |
796 | # TODO: handle pre push scenario |
|
797 | # TODO: handle pre push scenario | |
797 | tip = imc.commit(message=message, |
|
798 | tip = imc.commit(message=message, | |
798 | author=author, |
|
799 | author=author, | |
799 | parents=parents, |
|
800 | parents=parents, | |
800 | branch=parent_commit.branch) |
|
801 | branch=parent_commit.branch) | |
801 |
|
802 | |||
802 | self.mark_for_invalidation(repo.repo_name) |
|
803 | self.mark_for_invalidation(repo.repo_name) | |
803 | if trigger_push_hook: |
|
804 | if trigger_push_hook: | |
804 | hooks_utils.trigger_post_push_hook( |
|
805 | hooks_utils.trigger_post_push_hook( | |
805 | username=user.username, action='push_local', |
|
806 | username=user.username, action='push_local', | |
806 | repo_name=repo.repo_name, repo_type=scm_instance.alias, |
|
807 | repo_name=repo.repo_name, repo_type=scm_instance.alias, | |
807 | hook_type='post_push', |
|
808 | hook_type='post_push', | |
808 | commit_ids=[tip.raw_id]) |
|
809 | commit_ids=[tip.raw_id]) | |
809 | return tip |
|
810 | return tip | |
810 |
|
811 | |||
811 | def update_nodes(self, user, repo, message, nodes, parent_commit=None, |
|
812 | def update_nodes(self, user, repo, message, nodes, parent_commit=None, | |
812 | author=None, trigger_push_hook=True): |
|
813 | author=None, trigger_push_hook=True): | |
813 | user = self._get_user(user) |
|
814 | user = self._get_user(user) | |
814 | scm_instance = repo.scm_instance(cache=False) |
|
815 | scm_instance = repo.scm_instance(cache=False) | |
815 |
|
816 | |||
816 | message = safe_unicode(message) |
|
817 | message = safe_unicode(message) | |
817 | commiter = user.full_contact |
|
818 | commiter = user.full_contact | |
818 | author = safe_unicode(author) if author else commiter |
|
819 | author = safe_unicode(author) if author else commiter | |
819 |
|
820 | |||
820 | imc = scm_instance.in_memory_commit |
|
821 | imc = scm_instance.in_memory_commit | |
821 |
|
822 | |||
822 | if not parent_commit: |
|
823 | if not parent_commit: | |
823 | parent_commit = EmptyCommit(alias=scm_instance.alias) |
|
824 | parent_commit = EmptyCommit(alias=scm_instance.alias) | |
824 |
|
825 | |||
825 | if isinstance(parent_commit, EmptyCommit): |
|
826 | if isinstance(parent_commit, EmptyCommit): | |
826 | # EmptyCommit means we we're editing empty repository |
|
827 | # EmptyCommit means we we're editing empty repository | |
827 | parents = None |
|
828 | parents = None | |
828 | else: |
|
829 | else: | |
829 | parents = [parent_commit] |
|
830 | parents = [parent_commit] | |
830 |
|
831 | |||
831 | # add multiple nodes |
|
832 | # add multiple nodes | |
832 | for _filename, data in nodes.items(): |
|
833 | for _filename, data in nodes.items(): | |
833 | # new filename, can be renamed from the old one, also sanitaze |
|
834 | # new filename, can be renamed from the old one, also sanitaze | |
834 | # the path for any hack around relative paths like ../../ etc. |
|
835 | # the path for any hack around relative paths like ../../ etc. | |
835 | filename = self._sanitize_path(data['filename']) |
|
836 | filename = self._sanitize_path(data['filename']) | |
836 | old_filename = self._sanitize_path(_filename) |
|
837 | old_filename = self._sanitize_path(_filename) | |
837 | content = data['content'] |
|
838 | content = data['content'] | |
838 | file_mode = data.get('mode') |
|
839 | file_mode = data.get('mode') | |
839 | filenode = FileNode(old_filename, content=content, mode=file_mode) |
|
840 | filenode = FileNode(old_filename, content=content, mode=file_mode) | |
840 | op = data['op'] |
|
841 | op = data['op'] | |
841 | if op == 'add': |
|
842 | if op == 'add': | |
842 | imc.add(filenode) |
|
843 | imc.add(filenode) | |
843 | elif op == 'del': |
|
844 | elif op == 'del': | |
844 | imc.remove(filenode) |
|
845 | imc.remove(filenode) | |
845 | elif op == 'mod': |
|
846 | elif op == 'mod': | |
846 | if filename != old_filename: |
|
847 | if filename != old_filename: | |
847 | # TODO: handle renames more efficient, needs vcs lib changes |
|
848 | # TODO: handle renames more efficient, needs vcs lib changes | |
848 | imc.remove(filenode) |
|
849 | imc.remove(filenode) | |
849 | imc.add(FileNode(filename, content=content, mode=file_mode)) |
|
850 | imc.add(FileNode(filename, content=content, mode=file_mode)) | |
850 | else: |
|
851 | else: | |
851 | imc.change(filenode) |
|
852 | imc.change(filenode) | |
852 |
|
853 | |||
853 | try: |
|
854 | try: | |
854 | # TODO: handle pre push scenario commit changes |
|
855 | # TODO: handle pre push scenario commit changes | |
855 | tip = imc.commit(message=message, |
|
856 | tip = imc.commit(message=message, | |
856 | author=author, |
|
857 | author=author, | |
857 | parents=parents, |
|
858 | parents=parents, | |
858 | branch=parent_commit.branch) |
|
859 | branch=parent_commit.branch) | |
859 | except NodeNotChangedError: |
|
860 | except NodeNotChangedError: | |
860 | raise |
|
861 | raise | |
861 | except Exception as e: |
|
862 | except Exception as e: | |
862 | log.exception("Unexpected exception during call to imc.commit") |
|
863 | log.exception("Unexpected exception during call to imc.commit") | |
863 | raise IMCCommitError(str(e)) |
|
864 | raise IMCCommitError(str(e)) | |
864 | finally: |
|
865 | finally: | |
865 | # always clear caches, if commit fails we want fresh object also |
|
866 | # always clear caches, if commit fails we want fresh object also | |
866 | self.mark_for_invalidation(repo.repo_name) |
|
867 | self.mark_for_invalidation(repo.repo_name) | |
867 |
|
868 | |||
868 | if trigger_push_hook: |
|
869 | if trigger_push_hook: | |
869 | hooks_utils.trigger_post_push_hook( |
|
870 | hooks_utils.trigger_post_push_hook( | |
870 | username=user.username, action='push_local', hook_type='post_push', |
|
871 | username=user.username, action='push_local', hook_type='post_push', | |
871 | repo_name=repo.repo_name, repo_type=scm_instance.alias, |
|
872 | repo_name=repo.repo_name, repo_type=scm_instance.alias, | |
872 | commit_ids=[tip.raw_id]) |
|
873 | commit_ids=[tip.raw_id]) | |
873 |
|
874 | |||
874 | return tip |
|
875 | return tip | |
875 |
|
876 | |||
876 | def delete_nodes(self, user, repo, message, nodes, parent_commit=None, |
|
877 | def delete_nodes(self, user, repo, message, nodes, parent_commit=None, | |
877 | author=None, trigger_push_hook=True): |
|
878 | author=None, trigger_push_hook=True): | |
878 | """ |
|
879 | """ | |
879 | Deletes given multiple nodes into `repo` |
|
880 | Deletes given multiple nodes into `repo` | |
880 |
|
881 | |||
881 | :param user: RhodeCode User object or user_id, the committer |
|
882 | :param user: RhodeCode User object or user_id, the committer | |
882 | :param repo: RhodeCode Repository object |
|
883 | :param repo: RhodeCode Repository object | |
883 | :param message: commit message |
|
884 | :param message: commit message | |
884 | :param nodes: mapping {filename:{'content':content},...} |
|
885 | :param nodes: mapping {filename:{'content':content},...} | |
885 | :param parent_commit: parent commit, can be empty than it's initial |
|
886 | :param parent_commit: parent commit, can be empty than it's initial | |
886 | commit |
|
887 | commit | |
887 | :param author: author of commit, cna be different that commiter only |
|
888 | :param author: author of commit, cna be different that commiter only | |
888 | for git |
|
889 | for git | |
889 | :param trigger_push_hook: trigger push hooks |
|
890 | :param trigger_push_hook: trigger push hooks | |
890 |
|
891 | |||
891 | :returns: new commit after deletion |
|
892 | :returns: new commit after deletion | |
892 | """ |
|
893 | """ | |
893 |
|
894 | |||
894 | user = self._get_user(user) |
|
895 | user = self._get_user(user) | |
895 | scm_instance = repo.scm_instance(cache=False) |
|
896 | scm_instance = repo.scm_instance(cache=False) | |
896 |
|
897 | |||
897 | processed_nodes = [] |
|
898 | processed_nodes = [] | |
898 | for f_path in nodes: |
|
899 | for f_path in nodes: | |
899 | f_path = self._sanitize_path(f_path) |
|
900 | f_path = self._sanitize_path(f_path) | |
900 | # content can be empty but for compatabilty it allows same dicts |
|
901 | # content can be empty but for compatabilty it allows same dicts | |
901 | # structure as add_nodes |
|
902 | # structure as add_nodes | |
902 | content = nodes[f_path].get('content') |
|
903 | content = nodes[f_path].get('content') | |
903 | processed_nodes.append((f_path, content)) |
|
904 | processed_nodes.append((f_path, content)) | |
904 |
|
905 | |||
905 | message = safe_unicode(message) |
|
906 | message = safe_unicode(message) | |
906 | commiter = user.full_contact |
|
907 | commiter = user.full_contact | |
907 | author = safe_unicode(author) if author else commiter |
|
908 | author = safe_unicode(author) if author else commiter | |
908 |
|
909 | |||
909 | imc = scm_instance.in_memory_commit |
|
910 | imc = scm_instance.in_memory_commit | |
910 |
|
911 | |||
911 | if not parent_commit: |
|
912 | if not parent_commit: | |
912 | parent_commit = EmptyCommit(alias=scm_instance.alias) |
|
913 | parent_commit = EmptyCommit(alias=scm_instance.alias) | |
913 |
|
914 | |||
914 | if isinstance(parent_commit, EmptyCommit): |
|
915 | if isinstance(parent_commit, EmptyCommit): | |
915 | # EmptyCommit means we we're editing empty repository |
|
916 | # EmptyCommit means we we're editing empty repository | |
916 | parents = None |
|
917 | parents = None | |
917 | else: |
|
918 | else: | |
918 | parents = [parent_commit] |
|
919 | parents = [parent_commit] | |
919 | # add multiple nodes |
|
920 | # add multiple nodes | |
920 | for path, content in processed_nodes: |
|
921 | for path, content in processed_nodes: | |
921 | imc.remove(FileNode(path, content=content)) |
|
922 | imc.remove(FileNode(path, content=content)) | |
922 |
|
923 | |||
923 | # TODO: handle pre push scenario |
|
924 | # TODO: handle pre push scenario | |
924 | tip = imc.commit(message=message, |
|
925 | tip = imc.commit(message=message, | |
925 | author=author, |
|
926 | author=author, | |
926 | parents=parents, |
|
927 | parents=parents, | |
927 | branch=parent_commit.branch) |
|
928 | branch=parent_commit.branch) | |
928 |
|
929 | |||
929 | self.mark_for_invalidation(repo.repo_name) |
|
930 | self.mark_for_invalidation(repo.repo_name) | |
930 | if trigger_push_hook: |
|
931 | if trigger_push_hook: | |
931 | hooks_utils.trigger_post_push_hook( |
|
932 | hooks_utils.trigger_post_push_hook( | |
932 | username=user.username, action='push_local', hook_type='post_push', |
|
933 | username=user.username, action='push_local', hook_type='post_push', | |
933 | repo_name=repo.repo_name, repo_type=scm_instance.alias, |
|
934 | repo_name=repo.repo_name, repo_type=scm_instance.alias, | |
934 | commit_ids=[tip.raw_id]) |
|
935 | commit_ids=[tip.raw_id]) | |
935 | return tip |
|
936 | return tip | |
936 |
|
937 | |||
937 | def strip(self, repo, commit_id, branch): |
|
938 | def strip(self, repo, commit_id, branch): | |
938 | scm_instance = repo.scm_instance(cache=False) |
|
939 | scm_instance = repo.scm_instance(cache=False) | |
939 | scm_instance.config.clear_section('hooks') |
|
940 | scm_instance.config.clear_section('hooks') | |
940 | scm_instance.strip(commit_id, branch) |
|
941 | scm_instance.strip(commit_id, branch) | |
941 | self.mark_for_invalidation(repo.repo_name) |
|
942 | self.mark_for_invalidation(repo.repo_name) | |
942 |
|
943 | |||
943 | def get_unread_journal(self): |
|
944 | def get_unread_journal(self): | |
944 | return self.sa.query(UserLog).count() |
|
945 | return self.sa.query(UserLog).count() | |
945 |
|
946 | |||
946 | @classmethod |
|
947 | @classmethod | |
947 | def backend_landing_ref(cls, repo_type): |
|
948 | def backend_landing_ref(cls, repo_type): | |
948 | """ |
|
949 | """ | |
949 | Return a default landing ref based on a repository type. |
|
950 | Return a default landing ref based on a repository type. | |
950 | """ |
|
951 | """ | |
951 |
|
952 | |||
952 | landing_ref = { |
|
953 | landing_ref = { | |
953 | 'hg': ('branch:default', 'default'), |
|
954 | 'hg': ('branch:default', 'default'), | |
954 | 'git': ('branch:master', 'master'), |
|
955 | 'git': ('branch:master', 'master'), | |
955 | 'svn': ('rev:tip', 'latest tip'), |
|
956 | 'svn': ('rev:tip', 'latest tip'), | |
956 | 'default': ('rev:tip', 'latest tip'), |
|
957 | 'default': ('rev:tip', 'latest tip'), | |
957 | } |
|
958 | } | |
958 |
|
959 | |||
959 | return landing_ref.get(repo_type) or landing_ref['default'] |
|
960 | return landing_ref.get(repo_type) or landing_ref['default'] | |
960 |
|
961 | |||
961 | def get_repo_landing_revs(self, translator, repo=None): |
|
962 | def get_repo_landing_revs(self, translator, repo=None): | |
962 | """ |
|
963 | """ | |
963 | Generates select option with tags branches and bookmarks (for hg only) |
|
964 | Generates select option with tags branches and bookmarks (for hg only) | |
964 | grouped by type |
|
965 | grouped by type | |
965 |
|
966 | |||
966 | :param repo: |
|
967 | :param repo: | |
967 | """ |
|
968 | """ | |
968 | _ = translator |
|
969 | _ = translator | |
969 | repo = self._get_repo(repo) |
|
970 | repo = self._get_repo(repo) | |
970 |
|
971 | |||
971 | if repo: |
|
972 | if repo: | |
972 | repo_type = repo.repo_type |
|
973 | repo_type = repo.repo_type | |
973 | else: |
|
974 | else: | |
974 | repo_type = 'default' |
|
975 | repo_type = 'default' | |
975 |
|
976 | |||
976 | default_landing_ref, landing_ref_lbl = self.backend_landing_ref(repo_type) |
|
977 | default_landing_ref, landing_ref_lbl = self.backend_landing_ref(repo_type) | |
977 |
|
978 | |||
978 | default_ref_options = [ |
|
979 | default_ref_options = [ | |
979 | [default_landing_ref, landing_ref_lbl] |
|
980 | [default_landing_ref, landing_ref_lbl] | |
980 | ] |
|
981 | ] | |
981 | default_choices = [ |
|
982 | default_choices = [ | |
982 | default_landing_ref |
|
983 | default_landing_ref | |
983 | ] |
|
984 | ] | |
984 |
|
985 | |||
985 | if not repo: |
|
986 | if not repo: | |
986 | return default_choices, default_ref_options |
|
987 | return default_choices, default_ref_options | |
987 |
|
988 | |||
988 | repo = repo.scm_instance() |
|
989 | repo = repo.scm_instance() | |
989 |
|
990 | |||
990 | ref_options = [('rev:tip', 'latest tip')] |
|
991 | ref_options = [('rev:tip', 'latest tip')] | |
991 | choices = ['rev:tip'] |
|
992 | choices = ['rev:tip'] | |
992 |
|
993 | |||
993 | # branches |
|
994 | # branches | |
994 | branch_group = [(u'branch:%s' % safe_unicode(b), safe_unicode(b)) for b in repo.branches] |
|
995 | branch_group = [(u'branch:%s' % safe_unicode(b), safe_unicode(b)) for b in repo.branches] | |
995 | if not branch_group: |
|
996 | if not branch_group: | |
996 | # new repo, or without maybe a branch? |
|
997 | # new repo, or without maybe a branch? | |
997 | branch_group = default_ref_options |
|
998 | branch_group = default_ref_options | |
998 |
|
999 | |||
999 | branches_group = (branch_group, _("Branches")) |
|
1000 | branches_group = (branch_group, _("Branches")) | |
1000 | ref_options.append(branches_group) |
|
1001 | ref_options.append(branches_group) | |
1001 | choices.extend([x[0] for x in branches_group[0]]) |
|
1002 | choices.extend([x[0] for x in branches_group[0]]) | |
1002 |
|
1003 | |||
1003 | # bookmarks for HG |
|
1004 | # bookmarks for HG | |
1004 | if repo.alias == 'hg': |
|
1005 | if repo.alias == 'hg': | |
1005 | bookmarks_group = ( |
|
1006 | bookmarks_group = ( | |
1006 | [(u'book:%s' % safe_unicode(b), safe_unicode(b)) |
|
1007 | [(u'book:%s' % safe_unicode(b), safe_unicode(b)) | |
1007 | for b in repo.bookmarks], |
|
1008 | for b in repo.bookmarks], | |
1008 | _("Bookmarks")) |
|
1009 | _("Bookmarks")) | |
1009 | ref_options.append(bookmarks_group) |
|
1010 | ref_options.append(bookmarks_group) | |
1010 | choices.extend([x[0] for x in bookmarks_group[0]]) |
|
1011 | choices.extend([x[0] for x in bookmarks_group[0]]) | |
1011 |
|
1012 | |||
1012 | # tags |
|
1013 | # tags | |
1013 | tags_group = ( |
|
1014 | tags_group = ( | |
1014 | [(u'tag:%s' % safe_unicode(t), safe_unicode(t)) |
|
1015 | [(u'tag:%s' % safe_unicode(t), safe_unicode(t)) | |
1015 | for t in repo.tags], |
|
1016 | for t in repo.tags], | |
1016 | _("Tags")) |
|
1017 | _("Tags")) | |
1017 | ref_options.append(tags_group) |
|
1018 | ref_options.append(tags_group) | |
1018 | choices.extend([x[0] for x in tags_group[0]]) |
|
1019 | choices.extend([x[0] for x in tags_group[0]]) | |
1019 |
|
1020 | |||
1020 | return choices, ref_options |
|
1021 | return choices, ref_options | |
1021 |
|
1022 | |||
1022 | def get_server_info(self, environ=None): |
|
1023 | def get_server_info(self, environ=None): | |
1023 | server_info = get_system_info(environ) |
|
1024 | server_info = get_system_info(environ) | |
1024 | return server_info |
|
1025 | return server_info |
General Comments 0
You need to be logged in to leave comments.
Login now