Show More
@@ -1,55 +1,56 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | |
|
3 | 3 | # Copyright (C) 2016-2020 RhodeCode GmbH |
|
4 | 4 | # |
|
5 | 5 | # This program is free software: you can redistribute it and/or modify |
|
6 | 6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
7 | 7 | # (only), as published by the Free Software Foundation. |
|
8 | 8 | # |
|
9 | 9 | # This program is distributed in the hope that it will be useful, |
|
10 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
12 | 12 | # GNU General Public License for more details. |
|
13 | 13 | # |
|
14 | 14 | # You should have received a copy of the GNU Affero General Public License |
|
15 | 15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
16 | 16 | # |
|
17 | 17 | # This program is dual-licensed. If you wish to learn more about the |
|
18 | 18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
19 | 19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
20 | 20 | |
|
21 | 21 | import logging |
|
22 | 22 | |
|
23 | 23 | from rhodecode import events |
|
24 | 24 | from rhodecode.lib import rc_cache |
|
25 | 25 | |
|
26 | 26 | log = logging.getLogger(__name__) |
|
27 | 27 | |
|
28 | 28 | # names of namespaces used for different permission related cached |
|
29 | 29 | # during flush operation we need to take care of all those |
|
30 | 30 | cache_namespaces = [ |
|
31 | 31 | 'cache_user_auth.{}', |
|
32 | 32 | 'cache_user_repo_acl_ids.{}', |
|
33 | 33 | 'cache_user_user_group_acl_ids.{}', |
|
34 | 34 | 'cache_user_repo_group_acl_ids.{}' |
|
35 | 35 | ] |
|
36 | 36 | |
|
37 | 37 | |
|
38 | 38 | def trigger_user_permission_flush(event): |
|
39 | 39 | """ |
|
40 | 40 | Subscriber to the `UserPermissionsChange`. This triggers the |
|
41 | 41 | automatic flush of permission caches, so the users affected receive new permissions |
|
42 | 42 | Right Away |
|
43 | 43 | """ |
|
44 | ||
|
44 | invalidate = True | |
|
45 | 45 | affected_user_ids = set(event.user_ids) |
|
46 | 46 | for user_id in affected_user_ids: |
|
47 | 47 | for cache_namespace_uid_tmpl in cache_namespaces: |
|
48 | 48 | cache_namespace_uid = cache_namespace_uid_tmpl.format(user_id) |
|
49 |
del_keys = rc_cache.clear_cache_namespace( |
|
|
50 | log.debug('Deleted %s cache keys for user_id: %s and namespace %s', | |
|
49 | del_keys = rc_cache.clear_cache_namespace( | |
|
50 | 'cache_perms', cache_namespace_uid, invalidate=invalidate) | |
|
51 | log.debug('Invalidated %s cache keys for user_id: %s and namespace %s', | |
|
51 | 52 | del_keys, user_id, cache_namespace_uid) |
|
52 | 53 | |
|
53 | 54 | |
|
54 | 55 | def includeme(config): |
|
55 | 56 | config.add_subscriber(trigger_user_permission_flush, events.UserPermissionsChange) |
@@ -1,415 +1,418 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | |
|
3 | 3 | # Copyright (C) 2015-2020 RhodeCode GmbH |
|
4 | 4 | # |
|
5 | 5 | # This program is free software: you can redistribute it and/or modify |
|
6 | 6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
7 | 7 | # (only), as published by the Free Software Foundation. |
|
8 | 8 | # |
|
9 | 9 | # This program is distributed in the hope that it will be useful, |
|
10 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
12 | 12 | # GNU General Public License for more details. |
|
13 | 13 | # |
|
14 | 14 | # You should have received a copy of the GNU Affero General Public License |
|
15 | 15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
16 | 16 | # |
|
17 | 17 | # This program is dual-licensed. If you wish to learn more about the |
|
18 | 18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
19 | 19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
20 | 20 | import os |
|
21 | 21 | import time |
|
22 | 22 | import logging |
|
23 | 23 | import functools |
|
24 | 24 | import threading |
|
25 | 25 | |
|
26 | 26 | from dogpile.cache import CacheRegion |
|
27 | 27 | from dogpile.cache.util import compat |
|
28 | 28 | |
|
29 | 29 | import rhodecode |
|
30 | 30 | from rhodecode.lib.utils import safe_str, sha1 |
|
31 | 31 | from rhodecode.lib.utils2 import safe_unicode, str2bool |
|
32 | 32 | from rhodecode.model.db import Session, CacheKey, IntegrityError |
|
33 | 33 | |
|
34 | 34 | from rhodecode.lib.rc_cache import cache_key_meta |
|
35 | 35 | from rhodecode.lib.rc_cache import region_meta |
|
36 | 36 | |
|
37 | 37 | log = logging.getLogger(__name__) |
|
38 | 38 | |
|
39 | 39 | |
|
40 | 40 | def isCython(func): |
|
41 | 41 | """ |
|
42 | 42 | Private helper that checks if a function is a cython function. |
|
43 | 43 | """ |
|
44 | 44 | return func.__class__.__name__ == 'cython_function_or_method' |
|
45 | 45 | |
|
46 | 46 | |
|
47 | 47 | class RhodeCodeCacheRegion(CacheRegion): |
|
48 | 48 | |
|
49 | 49 | def conditional_cache_on_arguments( |
|
50 | 50 | self, namespace=None, |
|
51 | 51 | expiration_time=None, |
|
52 | 52 | should_cache_fn=None, |
|
53 | 53 | to_str=compat.string_type, |
|
54 | 54 | function_key_generator=None, |
|
55 | 55 | condition=True): |
|
56 | 56 | """ |
|
57 | 57 | Custom conditional decorator, that will not touch any dogpile internals if |
|
58 | 58 | condition isn't meet. This works a bit different than should_cache_fn |
|
59 | 59 | And it's faster in cases we don't ever want to compute cached values |
|
60 | 60 | """ |
|
61 | 61 | expiration_time_is_callable = compat.callable(expiration_time) |
|
62 | 62 | |
|
63 | 63 | if function_key_generator is None: |
|
64 | 64 | function_key_generator = self.function_key_generator |
|
65 | 65 | |
|
66 | 66 | # workaround for py2 and cython problems, this block should be removed |
|
67 | 67 | # once we've migrated to py3 |
|
68 | 68 | if 'cython' == 'cython': |
|
69 | 69 | def decorator(fn): |
|
70 | 70 | if to_str is compat.string_type: |
|
71 | 71 | # backwards compatible |
|
72 | 72 | key_generator = function_key_generator(namespace, fn) |
|
73 | 73 | else: |
|
74 | 74 | key_generator = function_key_generator(namespace, fn, to_str=to_str) |
|
75 | 75 | |
|
76 | 76 | @functools.wraps(fn) |
|
77 | 77 | def decorate(*arg, **kw): |
|
78 | 78 | key = key_generator(*arg, **kw) |
|
79 | 79 | |
|
80 | 80 | @functools.wraps(fn) |
|
81 | 81 | def creator(): |
|
82 | 82 | return fn(*arg, **kw) |
|
83 | 83 | |
|
84 | 84 | if not condition: |
|
85 | 85 | return creator() |
|
86 | 86 | |
|
87 | 87 | timeout = expiration_time() if expiration_time_is_callable \ |
|
88 | 88 | else expiration_time |
|
89 | 89 | |
|
90 | 90 | return self.get_or_create(key, creator, timeout, should_cache_fn) |
|
91 | 91 | |
|
92 | 92 | def invalidate(*arg, **kw): |
|
93 | 93 | key = key_generator(*arg, **kw) |
|
94 | 94 | self.delete(key) |
|
95 | 95 | |
|
96 | 96 | def set_(value, *arg, **kw): |
|
97 | 97 | key = key_generator(*arg, **kw) |
|
98 | 98 | self.set(key, value) |
|
99 | 99 | |
|
100 | 100 | def get(*arg, **kw): |
|
101 | 101 | key = key_generator(*arg, **kw) |
|
102 | 102 | return self.get(key) |
|
103 | 103 | |
|
104 | 104 | def refresh(*arg, **kw): |
|
105 | 105 | key = key_generator(*arg, **kw) |
|
106 | 106 | value = fn(*arg, **kw) |
|
107 | 107 | self.set(key, value) |
|
108 | 108 | return value |
|
109 | 109 | |
|
110 | 110 | decorate.set = set_ |
|
111 | 111 | decorate.invalidate = invalidate |
|
112 | 112 | decorate.refresh = refresh |
|
113 | 113 | decorate.get = get |
|
114 | 114 | decorate.original = fn |
|
115 | 115 | decorate.key_generator = key_generator |
|
116 | 116 | decorate.__wrapped__ = fn |
|
117 | 117 | |
|
118 | 118 | return decorate |
|
119 | 119 | return decorator |
|
120 | 120 | |
|
121 | 121 | def get_or_create_for_user_func(key_generator, user_func, *arg, **kw): |
|
122 | 122 | |
|
123 | 123 | if not condition: |
|
124 | 124 | log.debug('Calling un-cached func:%s', user_func.func_name) |
|
125 | 125 | return user_func(*arg, **kw) |
|
126 | 126 | |
|
127 | 127 | key = key_generator(*arg, **kw) |
|
128 | 128 | |
|
129 | 129 | timeout = expiration_time() if expiration_time_is_callable \ |
|
130 | 130 | else expiration_time |
|
131 | 131 | |
|
132 | 132 | log.debug('Calling cached fn:%s', user_func.func_name) |
|
133 | 133 | return self.get_or_create(key, user_func, timeout, should_cache_fn, (arg, kw)) |
|
134 | 134 | |
|
135 | 135 | def cache_decorator(user_func): |
|
136 | 136 | if to_str is compat.string_type: |
|
137 | 137 | # backwards compatible |
|
138 | 138 | key_generator = function_key_generator(namespace, user_func) |
|
139 | 139 | else: |
|
140 | 140 | key_generator = function_key_generator(namespace, user_func, to_str=to_str) |
|
141 | 141 | |
|
142 | 142 | def refresh(*arg, **kw): |
|
143 | 143 | """ |
|
144 | 144 | Like invalidate, but regenerates the value instead |
|
145 | 145 | """ |
|
146 | 146 | key = key_generator(*arg, **kw) |
|
147 | 147 | value = user_func(*arg, **kw) |
|
148 | 148 | self.set(key, value) |
|
149 | 149 | return value |
|
150 | 150 | |
|
151 | 151 | def invalidate(*arg, **kw): |
|
152 | 152 | key = key_generator(*arg, **kw) |
|
153 | 153 | self.delete(key) |
|
154 | 154 | |
|
155 | 155 | def set_(value, *arg, **kw): |
|
156 | 156 | key = key_generator(*arg, **kw) |
|
157 | 157 | self.set(key, value) |
|
158 | 158 | |
|
159 | 159 | def get(*arg, **kw): |
|
160 | 160 | key = key_generator(*arg, **kw) |
|
161 | 161 | return self.get(key) |
|
162 | 162 | |
|
163 | 163 | user_func.set = set_ |
|
164 | 164 | user_func.invalidate = invalidate |
|
165 | 165 | user_func.get = get |
|
166 | 166 | user_func.refresh = refresh |
|
167 | 167 | user_func.key_generator = key_generator |
|
168 | 168 | user_func.original = user_func |
|
169 | 169 | |
|
170 | 170 | # Use `decorate` to preserve the signature of :param:`user_func`. |
|
171 | 171 | return decorator.decorate(user_func, functools.partial( |
|
172 | 172 | get_or_create_for_user_func, key_generator)) |
|
173 | 173 | |
|
174 | 174 | return cache_decorator |
|
175 | 175 | |
|
176 | 176 | |
|
177 | 177 | def make_region(*arg, **kw): |
|
178 | 178 | return RhodeCodeCacheRegion(*arg, **kw) |
|
179 | 179 | |
|
180 | 180 | |
|
181 | 181 | def get_default_cache_settings(settings, prefixes=None): |
|
182 | 182 | prefixes = prefixes or [] |
|
183 | 183 | cache_settings = {} |
|
184 | 184 | for key in settings.keys(): |
|
185 | 185 | for prefix in prefixes: |
|
186 | 186 | if key.startswith(prefix): |
|
187 | 187 | name = key.split(prefix)[1].strip() |
|
188 | 188 | val = settings[key] |
|
189 | 189 | if isinstance(val, compat.string_types): |
|
190 | 190 | val = val.strip() |
|
191 | 191 | cache_settings[name] = val |
|
192 | 192 | return cache_settings |
|
193 | 193 | |
|
194 | 194 | |
|
195 | 195 | def compute_key_from_params(*args): |
|
196 | 196 | """ |
|
197 | 197 | Helper to compute key from given params to be used in cache manager |
|
198 | 198 | """ |
|
199 | 199 | return sha1("_".join(map(safe_str, args))) |
|
200 | 200 | |
|
201 | 201 | |
|
202 | 202 | def backend_key_generator(backend): |
|
203 | 203 | """ |
|
204 | 204 | Special wrapper that also sends over the backend to the key generator |
|
205 | 205 | """ |
|
206 | 206 | def wrapper(namespace, fn): |
|
207 | 207 | return key_generator(backend, namespace, fn) |
|
208 | 208 | return wrapper |
|
209 | 209 | |
|
210 | 210 | |
|
211 | 211 | def key_generator(backend, namespace, fn): |
|
212 | 212 | fname = fn.__name__ |
|
213 | 213 | |
|
214 | 214 | def generate_key(*args): |
|
215 | 215 | backend_prefix = getattr(backend, 'key_prefix', None) or 'backend_prefix' |
|
216 | 216 | namespace_pref = namespace or 'default_namespace' |
|
217 | 217 | arg_key = compute_key_from_params(*args) |
|
218 | 218 | final_key = "{}:{}:{}_{}".format(backend_prefix, namespace_pref, fname, arg_key) |
|
219 | 219 | |
|
220 | 220 | return final_key |
|
221 | 221 | |
|
222 | 222 | return generate_key |
|
223 | 223 | |
|
224 | 224 | |
|
225 | 225 | def get_or_create_region(region_name, region_namespace=None): |
|
226 | 226 | from rhodecode.lib.rc_cache.backends import FileNamespaceBackend |
|
227 | 227 | region_obj = region_meta.dogpile_cache_regions.get(region_name) |
|
228 | 228 | if not region_obj: |
|
229 | 229 | raise EnvironmentError( |
|
230 | 230 | 'Region `{}` not in configured: {}.'.format( |
|
231 | 231 | region_name, region_meta.dogpile_cache_regions.keys())) |
|
232 | 232 | |
|
233 | 233 | region_uid_name = '{}:{}'.format(region_name, region_namespace) |
|
234 | 234 | if isinstance(region_obj.actual_backend, FileNamespaceBackend): |
|
235 | 235 | region_exist = region_meta.dogpile_cache_regions.get(region_namespace) |
|
236 | 236 | if region_exist: |
|
237 | 237 | log.debug('Using already configured region: %s', region_namespace) |
|
238 | 238 | return region_exist |
|
239 | 239 | cache_dir = region_meta.dogpile_config_defaults['cache_dir'] |
|
240 | 240 | expiration_time = region_obj.expiration_time |
|
241 | 241 | |
|
242 | 242 | if not os.path.isdir(cache_dir): |
|
243 | 243 | os.makedirs(cache_dir) |
|
244 | 244 | new_region = make_region( |
|
245 | 245 | name=region_uid_name, |
|
246 | 246 | function_key_generator=backend_key_generator(region_obj.actual_backend) |
|
247 | 247 | ) |
|
248 | 248 | namespace_filename = os.path.join( |
|
249 | 249 | cache_dir, "{}.cache.dbm".format(region_namespace)) |
|
250 | 250 | # special type that allows 1db per namespace |
|
251 | 251 | new_region.configure( |
|
252 | 252 | backend='dogpile.cache.rc.file_namespace', |
|
253 | 253 | expiration_time=expiration_time, |
|
254 | 254 | arguments={"filename": namespace_filename} |
|
255 | 255 | ) |
|
256 | 256 | |
|
257 | 257 | # create and save in region caches |
|
258 | 258 | log.debug('configuring new region: %s', region_uid_name) |
|
259 | 259 | region_obj = region_meta.dogpile_cache_regions[region_namespace] = new_region |
|
260 | 260 | |
|
261 | 261 | return region_obj |
|
262 | 262 | |
|
263 | 263 | |
|
264 | def clear_cache_namespace(cache_region, cache_namespace_uid): | |
|
264 | def clear_cache_namespace(cache_region, cache_namespace_uid, invalidate=False): | |
|
265 | 265 | region = get_or_create_region(cache_region, cache_namespace_uid) |
|
266 | 266 | cache_keys = region.backend.list_keys(prefix=cache_namespace_uid) |
|
267 | 267 | num_delete_keys = len(cache_keys) |
|
268 | if invalidate: | |
|
269 | region.invalidate(hard=False) | |
|
270 | else: | |
|
268 | 271 | if num_delete_keys: |
|
269 | 272 | region.delete_multi(cache_keys) |
|
270 | 273 | return num_delete_keys |
|
271 | 274 | |
|
272 | 275 | |
|
273 | 276 | class ActiveRegionCache(object): |
|
274 | 277 | def __init__(self, context, cache_data): |
|
275 | 278 | self.context = context |
|
276 | 279 | self.cache_data = cache_data |
|
277 | 280 | |
|
278 | 281 | def should_invalidate(self): |
|
279 | 282 | return False |
|
280 | 283 | |
|
281 | 284 | |
|
282 | 285 | class FreshRegionCache(object): |
|
283 | 286 | def __init__(self, context, cache_data): |
|
284 | 287 | self.context = context |
|
285 | 288 | self.cache_data = cache_data |
|
286 | 289 | |
|
287 | 290 | def should_invalidate(self): |
|
288 | 291 | return True |
|
289 | 292 | |
|
290 | 293 | |
|
291 | 294 | class InvalidationContext(object): |
|
292 | 295 | """ |
|
293 | 296 | usage:: |
|
294 | 297 | |
|
295 | 298 | from rhodecode.lib import rc_cache |
|
296 | 299 | |
|
297 | 300 | cache_namespace_uid = CacheKey.SOME_NAMESPACE.format(1) |
|
298 | 301 | region = rc_cache.get_or_create_region('cache_perms', cache_namespace_uid) |
|
299 | 302 | |
|
300 | 303 | @region.conditional_cache_on_arguments(namespace=cache_namespace_uid, condition=True) |
|
301 | 304 | def heavy_compute(cache_name, param1, param2): |
|
302 | 305 | print('COMPUTE {}, {}, {}'.format(cache_name, param1, param2)) |
|
303 | 306 | |
|
304 | 307 | # invalidation namespace is shared namespace key for all process caches |
|
305 | 308 | # we use it to send a global signal |
|
306 | 309 | invalidation_namespace = 'repo_cache:1' |
|
307 | 310 | |
|
308 | 311 | inv_context_manager = rc_cache.InvalidationContext( |
|
309 | 312 | uid=cache_namespace_uid, invalidation_namespace=invalidation_namespace) |
|
310 | 313 | with inv_context_manager as invalidation_context: |
|
311 | 314 | args = ('one', 'two') |
|
312 | 315 | # re-compute and store cache if we get invalidate signal |
|
313 | 316 | if invalidation_context.should_invalidate(): |
|
314 | 317 | result = heavy_compute.refresh(*args) |
|
315 | 318 | else: |
|
316 | 319 | result = heavy_compute(*args) |
|
317 | 320 | |
|
318 | 321 | compute_time = inv_context_manager.compute_time |
|
319 | 322 | log.debug('result computed in %.4fs', compute_time) |
|
320 | 323 | |
|
321 | 324 | # To send global invalidation signal, simply run |
|
322 | 325 | CacheKey.set_invalidate(invalidation_namespace) |
|
323 | 326 | |
|
324 | 327 | """ |
|
325 | 328 | |
|
326 | 329 | def __repr__(self): |
|
327 | 330 | return '<InvalidationContext:{}[{}]>'.format( |
|
328 | 331 | safe_str(self.cache_key), safe_str(self.uid)) |
|
329 | 332 | |
|
330 | 333 | def __init__(self, uid, invalidation_namespace='', |
|
331 | 334 | raise_exception=False, thread_scoped=None): |
|
332 | 335 | self.uid = uid |
|
333 | 336 | self.invalidation_namespace = invalidation_namespace |
|
334 | 337 | self.raise_exception = raise_exception |
|
335 | 338 | self.proc_id = safe_unicode(rhodecode.CONFIG.get('instance_id') or 'DEFAULT') |
|
336 | 339 | self.thread_id = 'global' |
|
337 | 340 | |
|
338 | 341 | if thread_scoped is None: |
|
339 | 342 | # if we set "default" we can override this via .ini settings |
|
340 | 343 | thread_scoped = str2bool(rhodecode.CONFIG.get('cache_thread_scoped')) |
|
341 | 344 | |
|
342 | 345 | # Append the thread id to the cache key if this invalidation context |
|
343 | 346 | # should be scoped to the current thread. |
|
344 | 347 | if thread_scoped is True: |
|
345 | 348 | self.thread_id = threading.current_thread().ident |
|
346 | 349 | |
|
347 | 350 | self.cache_key = compute_key_from_params(uid) |
|
348 | 351 | self.cache_key = 'proc:{}|thread:{}|params:{}'.format( |
|
349 | 352 | self.proc_id, self.thread_id, self.cache_key) |
|
350 | 353 | self.compute_time = 0 |
|
351 | 354 | |
|
352 | 355 | def get_or_create_cache_obj(self, cache_type, invalidation_namespace=''): |
|
353 | 356 | invalidation_namespace = invalidation_namespace or self.invalidation_namespace |
|
354 | 357 | # fetch all cache keys for this namespace and convert them to a map to find if we |
|
355 | 358 | # have specific cache_key object registered. We do this because we want to have |
|
356 | 359 | # all consistent cache_state_uid for newly registered objects |
|
357 | 360 | cache_obj_map = CacheKey.get_namespace_map(invalidation_namespace) |
|
358 | 361 | cache_obj = cache_obj_map.get(self.cache_key) |
|
359 | 362 | log.debug('Fetched cache obj %s using %s cache key.', cache_obj, self.cache_key) |
|
360 | 363 | if not cache_obj: |
|
361 | 364 | new_cache_args = invalidation_namespace |
|
362 | 365 | first_cache_obj = next(cache_obj_map.itervalues()) if cache_obj_map else None |
|
363 | 366 | cache_state_uid = None |
|
364 | 367 | if first_cache_obj: |
|
365 | 368 | cache_state_uid = first_cache_obj.cache_state_uid |
|
366 | 369 | cache_obj = CacheKey(self.cache_key, cache_args=new_cache_args, |
|
367 | 370 | cache_state_uid=cache_state_uid) |
|
368 | 371 | cache_key_meta.cache_keys_by_pid.append(self.cache_key) |
|
369 | 372 | |
|
370 | 373 | return cache_obj |
|
371 | 374 | |
|
372 | 375 | def __enter__(self): |
|
373 | 376 | """ |
|
374 | 377 | Test if current object is valid, and return CacheRegion function |
|
375 | 378 | that does invalidation and calculation |
|
376 | 379 | """ |
|
377 | 380 | log.debug('Entering cache invalidation check context: %s', self.invalidation_namespace) |
|
378 | 381 | # register or get a new key based on uid |
|
379 | 382 | self.cache_obj = self.get_or_create_cache_obj(cache_type=self.uid) |
|
380 | 383 | cache_data = self.cache_obj.get_dict() |
|
381 | 384 | self._start_time = time.time() |
|
382 | 385 | if self.cache_obj.cache_active: |
|
383 | 386 | # means our cache obj is existing and marked as it's |
|
384 | 387 | # cache is not outdated, we return ActiveRegionCache |
|
385 | 388 | self.skip_cache_active_change = True |
|
386 | 389 | |
|
387 | 390 | return ActiveRegionCache(context=self, cache_data=cache_data) |
|
388 | 391 | |
|
389 | 392 | # the key is either not existing or set to False, we return |
|
390 | 393 | # the real invalidator which re-computes value. We additionally set |
|
391 | 394 | # the flag to actually update the Database objects |
|
392 | 395 | self.skip_cache_active_change = False |
|
393 | 396 | return FreshRegionCache(context=self, cache_data=cache_data) |
|
394 | 397 | |
|
395 | 398 | def __exit__(self, exc_type, exc_val, exc_tb): |
|
396 | 399 | # save compute time |
|
397 | 400 | self.compute_time = time.time() - self._start_time |
|
398 | 401 | |
|
399 | 402 | if self.skip_cache_active_change: |
|
400 | 403 | return |
|
401 | 404 | |
|
402 | 405 | try: |
|
403 | 406 | self.cache_obj.cache_active = True |
|
404 | 407 | Session().add(self.cache_obj) |
|
405 | 408 | Session().commit() |
|
406 | 409 | except IntegrityError: |
|
407 | 410 | # if we catch integrity error, it means we inserted this object |
|
408 | 411 | # assumption is that's really an edge race-condition case and |
|
409 | 412 | # it's safe is to skip it |
|
410 | 413 | Session().rollback() |
|
411 | 414 | except Exception: |
|
412 | 415 | log.exception('Failed to commit on cache key update') |
|
413 | 416 | Session().rollback() |
|
414 | 417 | if self.raise_exception: |
|
415 | 418 | raise |
@@ -1,1024 +1,1025 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | |
|
3 | 3 | # Copyright (C) 2010-2020 RhodeCode GmbH |
|
4 | 4 | # |
|
5 | 5 | # This program is free software: you can redistribute it and/or modify |
|
6 | 6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
7 | 7 | # (only), as published by the Free Software Foundation. |
|
8 | 8 | # |
|
9 | 9 | # This program is distributed in the hope that it will be useful, |
|
10 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
12 | 12 | # GNU General Public License for more details. |
|
13 | 13 | # |
|
14 | 14 | # You should have received a copy of the GNU Affero General Public License |
|
15 | 15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
16 | 16 | # |
|
17 | 17 | # This program is dual-licensed. If you wish to learn more about the |
|
18 | 18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
19 | 19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
20 | 20 | |
|
21 | 21 | """ |
|
22 | 22 | Scm model for RhodeCode |
|
23 | 23 | """ |
|
24 | 24 | |
|
25 | 25 | import os.path |
|
26 | 26 | import traceback |
|
27 | 27 | import logging |
|
28 | 28 | import cStringIO |
|
29 | 29 | |
|
30 | 30 | from sqlalchemy import func |
|
31 | 31 | from zope.cachedescriptors.property import Lazy as LazyProperty |
|
32 | 32 | |
|
33 | 33 | import rhodecode |
|
34 | 34 | from rhodecode.lib.vcs import get_backend |
|
35 | 35 | from rhodecode.lib.vcs.exceptions import RepositoryError, NodeNotChangedError |
|
36 | 36 | from rhodecode.lib.vcs.nodes import FileNode |
|
37 | 37 | from rhodecode.lib.vcs.backends.base import EmptyCommit |
|
38 | 38 | from rhodecode.lib import helpers as h, rc_cache |
|
39 | 39 | from rhodecode.lib.auth import ( |
|
40 | 40 | HasRepoPermissionAny, HasRepoGroupPermissionAny, |
|
41 | 41 | HasUserGroupPermissionAny) |
|
42 | 42 | from rhodecode.lib.exceptions import NonRelativePathError, IMCCommitError |
|
43 | 43 | from rhodecode.lib import hooks_utils |
|
44 | 44 | from rhodecode.lib.utils import ( |
|
45 | 45 | get_filesystem_repos, make_db_config) |
|
46 | 46 | from rhodecode.lib.utils2 import (safe_str, safe_unicode) |
|
47 | 47 | from rhodecode.lib.system_info import get_system_info |
|
48 | 48 | from rhodecode.model import BaseModel |
|
49 | 49 | from rhodecode.model.db import ( |
|
50 | 50 | or_, false, |
|
51 | 51 | Repository, CacheKey, UserFollowing, UserLog, User, RepoGroup, |
|
52 | 52 | PullRequest, FileStore) |
|
53 | 53 | from rhodecode.model.settings import VcsSettingsModel |
|
54 | 54 | from rhodecode.model.validation_schema.validators import url_validator, InvalidCloneUrl |
|
55 | 55 | |
|
56 | 56 | log = logging.getLogger(__name__) |
|
57 | 57 | |
|
58 | 58 | |
|
59 | 59 | class UserTemp(object): |
|
60 | 60 | def __init__(self, user_id): |
|
61 | 61 | self.user_id = user_id |
|
62 | 62 | |
|
63 | 63 | def __repr__(self): |
|
64 | 64 | return "<%s('id:%s')>" % (self.__class__.__name__, self.user_id) |
|
65 | 65 | |
|
66 | 66 | |
|
67 | 67 | class RepoTemp(object): |
|
68 | 68 | def __init__(self, repo_id): |
|
69 | 69 | self.repo_id = repo_id |
|
70 | 70 | |
|
71 | 71 | def __repr__(self): |
|
72 | 72 | return "<%s('id:%s')>" % (self.__class__.__name__, self.repo_id) |
|
73 | 73 | |
|
74 | 74 | |
|
75 | 75 | class SimpleCachedRepoList(object): |
|
76 | 76 | """ |
|
77 | 77 | Lighter version of of iteration of repos without the scm initialisation, |
|
78 | 78 | and with cache usage |
|
79 | 79 | """ |
|
80 | 80 | def __init__(self, db_repo_list, repos_path, order_by=None, perm_set=None): |
|
81 | 81 | self.db_repo_list = db_repo_list |
|
82 | 82 | self.repos_path = repos_path |
|
83 | 83 | self.order_by = order_by |
|
84 | 84 | self.reversed = (order_by or '').startswith('-') |
|
85 | 85 | if not perm_set: |
|
86 | 86 | perm_set = ['repository.read', 'repository.write', |
|
87 | 87 | 'repository.admin'] |
|
88 | 88 | self.perm_set = perm_set |
|
89 | 89 | |
|
90 | 90 | def __len__(self): |
|
91 | 91 | return len(self.db_repo_list) |
|
92 | 92 | |
|
93 | 93 | def __repr__(self): |
|
94 | 94 | return '<%s (%s)>' % (self.__class__.__name__, self.__len__()) |
|
95 | 95 | |
|
96 | 96 | def __iter__(self): |
|
97 | 97 | for dbr in self.db_repo_list: |
|
98 | 98 | # check permission at this level |
|
99 | 99 | has_perm = HasRepoPermissionAny(*self.perm_set)( |
|
100 | 100 | dbr.repo_name, 'SimpleCachedRepoList check') |
|
101 | 101 | if not has_perm: |
|
102 | 102 | continue |
|
103 | 103 | |
|
104 | 104 | tmp_d = { |
|
105 | 105 | 'name': dbr.repo_name, |
|
106 | 106 | 'dbrepo': dbr.get_dict(), |
|
107 | 107 | 'dbrepo_fork': dbr.fork.get_dict() if dbr.fork else {} |
|
108 | 108 | } |
|
109 | 109 | yield tmp_d |
|
110 | 110 | |
|
111 | 111 | |
|
112 | 112 | class _PermCheckIterator(object): |
|
113 | 113 | |
|
114 | 114 | def __init__( |
|
115 | 115 | self, obj_list, obj_attr, perm_set, perm_checker, |
|
116 | 116 | extra_kwargs=None): |
|
117 | 117 | """ |
|
118 | 118 | Creates iterator from given list of objects, additionally |
|
119 | 119 | checking permission for them from perm_set var |
|
120 | 120 | |
|
121 | 121 | :param obj_list: list of db objects |
|
122 | 122 | :param obj_attr: attribute of object to pass into perm_checker |
|
123 | 123 | :param perm_set: list of permissions to check |
|
124 | 124 | :param perm_checker: callable to check permissions against |
|
125 | 125 | """ |
|
126 | 126 | self.obj_list = obj_list |
|
127 | 127 | self.obj_attr = obj_attr |
|
128 | 128 | self.perm_set = perm_set |
|
129 | 129 | self.perm_checker = perm_checker(*self.perm_set) |
|
130 | 130 | self.extra_kwargs = extra_kwargs or {} |
|
131 | 131 | |
|
132 | 132 | def __len__(self): |
|
133 | 133 | return len(self.obj_list) |
|
134 | 134 | |
|
135 | 135 | def __repr__(self): |
|
136 | 136 | return '<%s (%s)>' % (self.__class__.__name__, self.__len__()) |
|
137 | 137 | |
|
138 | 138 | def __iter__(self): |
|
139 | 139 | for db_obj in self.obj_list: |
|
140 | 140 | # check permission at this level |
|
141 | 141 | # NOTE(marcink): the __dict__.get() is ~4x faster then getattr() |
|
142 | 142 | name = db_obj.__dict__.get(self.obj_attr, None) |
|
143 | 143 | if not self.perm_checker(name, self.__class__.__name__, **self.extra_kwargs): |
|
144 | 144 | continue |
|
145 | 145 | |
|
146 | 146 | yield db_obj |
|
147 | 147 | |
|
148 | 148 | |
|
149 | 149 | class RepoList(_PermCheckIterator): |
|
150 | 150 | |
|
151 | 151 | def __init__(self, db_repo_list, perm_set=None, extra_kwargs=None): |
|
152 | 152 | if not perm_set: |
|
153 | 153 | perm_set = ['repository.read', 'repository.write', 'repository.admin'] |
|
154 | 154 | |
|
155 | 155 | super(RepoList, self).__init__( |
|
156 | 156 | obj_list=db_repo_list, |
|
157 | 157 | obj_attr='_repo_name', perm_set=perm_set, |
|
158 | 158 | perm_checker=HasRepoPermissionAny, |
|
159 | 159 | extra_kwargs=extra_kwargs) |
|
160 | 160 | |
|
161 | 161 | |
|
162 | 162 | class RepoGroupList(_PermCheckIterator): |
|
163 | 163 | |
|
164 | 164 | def __init__(self, db_repo_group_list, perm_set=None, extra_kwargs=None): |
|
165 | 165 | if not perm_set: |
|
166 | 166 | perm_set = ['group.read', 'group.write', 'group.admin'] |
|
167 | 167 | |
|
168 | 168 | super(RepoGroupList, self).__init__( |
|
169 | 169 | obj_list=db_repo_group_list, |
|
170 | 170 | obj_attr='_group_name', perm_set=perm_set, |
|
171 | 171 | perm_checker=HasRepoGroupPermissionAny, |
|
172 | 172 | extra_kwargs=extra_kwargs) |
|
173 | 173 | |
|
174 | 174 | |
|
175 | 175 | class UserGroupList(_PermCheckIterator): |
|
176 | 176 | |
|
177 | 177 | def __init__(self, db_user_group_list, perm_set=None, extra_kwargs=None): |
|
178 | 178 | if not perm_set: |
|
179 | 179 | perm_set = ['usergroup.read', 'usergroup.write', 'usergroup.admin'] |
|
180 | 180 | |
|
181 | 181 | super(UserGroupList, self).__init__( |
|
182 | 182 | obj_list=db_user_group_list, |
|
183 | 183 | obj_attr='users_group_name', perm_set=perm_set, |
|
184 | 184 | perm_checker=HasUserGroupPermissionAny, |
|
185 | 185 | extra_kwargs=extra_kwargs) |
|
186 | 186 | |
|
187 | 187 | |
|
188 | 188 | class ScmModel(BaseModel): |
|
189 | 189 | """ |
|
190 | 190 | Generic Scm Model |
|
191 | 191 | """ |
|
192 | 192 | |
|
193 | 193 | @LazyProperty |
|
194 | 194 | def repos_path(self): |
|
195 | 195 | """ |
|
196 | 196 | Gets the repositories root path from database |
|
197 | 197 | """ |
|
198 | 198 | |
|
199 | 199 | settings_model = VcsSettingsModel(sa=self.sa) |
|
200 | 200 | return settings_model.get_repos_location() |
|
201 | 201 | |
|
202 | 202 | def repo_scan(self, repos_path=None): |
|
203 | 203 | """ |
|
204 | 204 | Listing of repositories in given path. This path should not be a |
|
205 | 205 | repository itself. Return a dictionary of repository objects |
|
206 | 206 | |
|
207 | 207 | :param repos_path: path to directory containing repositories |
|
208 | 208 | """ |
|
209 | 209 | |
|
210 | 210 | if repos_path is None: |
|
211 | 211 | repos_path = self.repos_path |
|
212 | 212 | |
|
213 | 213 | log.info('scanning for repositories in %s', repos_path) |
|
214 | 214 | |
|
215 | 215 | config = make_db_config() |
|
216 | 216 | config.set('extensions', 'largefiles', '') |
|
217 | 217 | repos = {} |
|
218 | 218 | |
|
219 | 219 | for name, path in get_filesystem_repos(repos_path, recursive=True): |
|
220 | 220 | # name need to be decomposed and put back together using the / |
|
221 | 221 | # since this is internal storage separator for rhodecode |
|
222 | 222 | name = Repository.normalize_repo_name(name) |
|
223 | 223 | |
|
224 | 224 | try: |
|
225 | 225 | if name in repos: |
|
226 | 226 | raise RepositoryError('Duplicate repository name %s ' |
|
227 | 227 | 'found in %s' % (name, path)) |
|
228 | 228 | elif path[0] in rhodecode.BACKENDS: |
|
229 | 229 | backend = get_backend(path[0]) |
|
230 | 230 | repos[name] = backend(path[1], config=config, |
|
231 | 231 | with_wire={"cache": False}) |
|
232 | 232 | except OSError: |
|
233 | 233 | continue |
|
234 | 234 | except RepositoryError: |
|
235 | 235 | log.exception('Failed to create a repo') |
|
236 | 236 | continue |
|
237 | 237 | |
|
238 | 238 | log.debug('found %s paths with repositories', len(repos)) |
|
239 | 239 | return repos |
|
240 | 240 | |
|
241 | 241 | def get_repos(self, all_repos=None, sort_key=None): |
|
242 | 242 | """ |
|
243 | 243 | Get all repositories from db and for each repo create it's |
|
244 | 244 | backend instance and fill that backed with information from database |
|
245 | 245 | |
|
246 | 246 | :param all_repos: list of repository names as strings |
|
247 | 247 | give specific repositories list, good for filtering |
|
248 | 248 | |
|
249 | 249 | :param sort_key: initial sorting of repositories |
|
250 | 250 | """ |
|
251 | 251 | if all_repos is None: |
|
252 | 252 | all_repos = self.sa.query(Repository)\ |
|
253 | 253 | .filter(Repository.group_id == None)\ |
|
254 | 254 | .order_by(func.lower(Repository.repo_name)).all() |
|
255 | 255 | repo_iter = SimpleCachedRepoList( |
|
256 | 256 | all_repos, repos_path=self.repos_path, order_by=sort_key) |
|
257 | 257 | return repo_iter |
|
258 | 258 | |
|
259 | 259 | def get_repo_groups(self, all_groups=None): |
|
260 | 260 | if all_groups is None: |
|
261 | 261 | all_groups = RepoGroup.query()\ |
|
262 | 262 | .filter(RepoGroup.group_parent_id == None).all() |
|
263 | 263 | return [x for x in RepoGroupList(all_groups)] |
|
264 | 264 | |
|
265 | 265 | def mark_for_invalidation(self, repo_name, delete=False): |
|
266 | 266 | """ |
|
267 | 267 | Mark caches of this repo invalid in the database. `delete` flag |
|
268 | 268 | removes the cache entries |
|
269 | 269 | |
|
270 | 270 | :param repo_name: the repo_name for which caches should be marked |
|
271 | 271 | invalid, or deleted |
|
272 | 272 | :param delete: delete the entry keys instead of setting bool |
|
273 | 273 | flag on them, and also purge caches used by the dogpile |
|
274 | 274 | """ |
|
275 | 275 | repo = Repository.get_by_repo_name(repo_name) |
|
276 | 276 | |
|
277 | 277 | if repo: |
|
278 | 278 | invalidation_namespace = CacheKey.REPO_INVALIDATION_NAMESPACE.format( |
|
279 | 279 | repo_id=repo.repo_id) |
|
280 | 280 | CacheKey.set_invalidate(invalidation_namespace, delete=delete) |
|
281 | 281 | |
|
282 | 282 | repo_id = repo.repo_id |
|
283 | 283 | config = repo._config |
|
284 | 284 | config.set('extensions', 'largefiles', '') |
|
285 | 285 | repo.update_commit_cache(config=config, cs_cache=None) |
|
286 | 286 | if delete: |
|
287 | 287 | cache_namespace_uid = 'cache_repo.{}'.format(repo_id) |
|
288 |
rc_cache.clear_cache_namespace( |
|
|
288 | rc_cache.clear_cache_namespace( | |
|
289 | 'cache_repo', cache_namespace_uid, invalidate=True) | |
|
289 | 290 | |
|
290 | 291 | def toggle_following_repo(self, follow_repo_id, user_id): |
|
291 | 292 | |
|
292 | 293 | f = self.sa.query(UserFollowing)\ |
|
293 | 294 | .filter(UserFollowing.follows_repo_id == follow_repo_id)\ |
|
294 | 295 | .filter(UserFollowing.user_id == user_id).scalar() |
|
295 | 296 | |
|
296 | 297 | if f is not None: |
|
297 | 298 | try: |
|
298 | 299 | self.sa.delete(f) |
|
299 | 300 | return |
|
300 | 301 | except Exception: |
|
301 | 302 | log.error(traceback.format_exc()) |
|
302 | 303 | raise |
|
303 | 304 | |
|
304 | 305 | try: |
|
305 | 306 | f = UserFollowing() |
|
306 | 307 | f.user_id = user_id |
|
307 | 308 | f.follows_repo_id = follow_repo_id |
|
308 | 309 | self.sa.add(f) |
|
309 | 310 | except Exception: |
|
310 | 311 | log.error(traceback.format_exc()) |
|
311 | 312 | raise |
|
312 | 313 | |
|
313 | 314 | def toggle_following_user(self, follow_user_id, user_id): |
|
314 | 315 | f = self.sa.query(UserFollowing)\ |
|
315 | 316 | .filter(UserFollowing.follows_user_id == follow_user_id)\ |
|
316 | 317 | .filter(UserFollowing.user_id == user_id).scalar() |
|
317 | 318 | |
|
318 | 319 | if f is not None: |
|
319 | 320 | try: |
|
320 | 321 | self.sa.delete(f) |
|
321 | 322 | return |
|
322 | 323 | except Exception: |
|
323 | 324 | log.error(traceback.format_exc()) |
|
324 | 325 | raise |
|
325 | 326 | |
|
326 | 327 | try: |
|
327 | 328 | f = UserFollowing() |
|
328 | 329 | f.user_id = user_id |
|
329 | 330 | f.follows_user_id = follow_user_id |
|
330 | 331 | self.sa.add(f) |
|
331 | 332 | except Exception: |
|
332 | 333 | log.error(traceback.format_exc()) |
|
333 | 334 | raise |
|
334 | 335 | |
|
335 | 336 | def is_following_repo(self, repo_name, user_id, cache=False): |
|
336 | 337 | r = self.sa.query(Repository)\ |
|
337 | 338 | .filter(Repository.repo_name == repo_name).scalar() |
|
338 | 339 | |
|
339 | 340 | f = self.sa.query(UserFollowing)\ |
|
340 | 341 | .filter(UserFollowing.follows_repository == r)\ |
|
341 | 342 | .filter(UserFollowing.user_id == user_id).scalar() |
|
342 | 343 | |
|
343 | 344 | return f is not None |
|
344 | 345 | |
|
345 | 346 | def is_following_user(self, username, user_id, cache=False): |
|
346 | 347 | u = User.get_by_username(username) |
|
347 | 348 | |
|
348 | 349 | f = self.sa.query(UserFollowing)\ |
|
349 | 350 | .filter(UserFollowing.follows_user == u)\ |
|
350 | 351 | .filter(UserFollowing.user_id == user_id).scalar() |
|
351 | 352 | |
|
352 | 353 | return f is not None |
|
353 | 354 | |
|
354 | 355 | def get_followers(self, repo): |
|
355 | 356 | repo = self._get_repo(repo) |
|
356 | 357 | |
|
357 | 358 | return self.sa.query(UserFollowing)\ |
|
358 | 359 | .filter(UserFollowing.follows_repository == repo).count() |
|
359 | 360 | |
|
360 | 361 | def get_forks(self, repo): |
|
361 | 362 | repo = self._get_repo(repo) |
|
362 | 363 | return self.sa.query(Repository)\ |
|
363 | 364 | .filter(Repository.fork == repo).count() |
|
364 | 365 | |
|
365 | 366 | def get_pull_requests(self, repo): |
|
366 | 367 | repo = self._get_repo(repo) |
|
367 | 368 | return self.sa.query(PullRequest)\ |
|
368 | 369 | .filter(PullRequest.target_repo == repo)\ |
|
369 | 370 | .filter(PullRequest.status != PullRequest.STATUS_CLOSED).count() |
|
370 | 371 | |
|
371 | 372 | def get_artifacts(self, repo): |
|
372 | 373 | repo = self._get_repo(repo) |
|
373 | 374 | return self.sa.query(FileStore)\ |
|
374 | 375 | .filter(FileStore.repo == repo)\ |
|
375 | 376 | .filter(or_(FileStore.hidden == None, FileStore.hidden == false())).count() |
|
376 | 377 | |
|
377 | 378 | def mark_as_fork(self, repo, fork, user): |
|
378 | 379 | repo = self._get_repo(repo) |
|
379 | 380 | fork = self._get_repo(fork) |
|
380 | 381 | if fork and repo.repo_id == fork.repo_id: |
|
381 | 382 | raise Exception("Cannot set repository as fork of itself") |
|
382 | 383 | |
|
383 | 384 | if fork and repo.repo_type != fork.repo_type: |
|
384 | 385 | raise RepositoryError( |
|
385 | 386 | "Cannot set repository as fork of repository with other type") |
|
386 | 387 | |
|
387 | 388 | repo.fork = fork |
|
388 | 389 | self.sa.add(repo) |
|
389 | 390 | return repo |
|
390 | 391 | |
|
391 | 392 | def pull_changes(self, repo, username, remote_uri=None, validate_uri=True): |
|
392 | 393 | dbrepo = self._get_repo(repo) |
|
393 | 394 | remote_uri = remote_uri or dbrepo.clone_uri |
|
394 | 395 | if not remote_uri: |
|
395 | 396 | raise Exception("This repository doesn't have a clone uri") |
|
396 | 397 | |
|
397 | 398 | repo = dbrepo.scm_instance(cache=False) |
|
398 | 399 | repo.config.clear_section('hooks') |
|
399 | 400 | |
|
400 | 401 | try: |
|
401 | 402 | # NOTE(marcink): add extra validation so we skip invalid urls |
|
402 | 403 | # this is due this tasks can be executed via scheduler without |
|
403 | 404 | # proper validation of remote_uri |
|
404 | 405 | if validate_uri: |
|
405 | 406 | config = make_db_config(clear_session=False) |
|
406 | 407 | url_validator(remote_uri, dbrepo.repo_type, config) |
|
407 | 408 | except InvalidCloneUrl: |
|
408 | 409 | raise |
|
409 | 410 | |
|
410 | 411 | repo_name = dbrepo.repo_name |
|
411 | 412 | try: |
|
412 | 413 | # TODO: we need to make sure those operations call proper hooks ! |
|
413 | 414 | repo.fetch(remote_uri) |
|
414 | 415 | |
|
415 | 416 | self.mark_for_invalidation(repo_name) |
|
416 | 417 | except Exception: |
|
417 | 418 | log.error(traceback.format_exc()) |
|
418 | 419 | raise |
|
419 | 420 | |
|
420 | 421 | def push_changes(self, repo, username, remote_uri=None, validate_uri=True): |
|
421 | 422 | dbrepo = self._get_repo(repo) |
|
422 | 423 | remote_uri = remote_uri or dbrepo.push_uri |
|
423 | 424 | if not remote_uri: |
|
424 | 425 | raise Exception("This repository doesn't have a clone uri") |
|
425 | 426 | |
|
426 | 427 | repo = dbrepo.scm_instance(cache=False) |
|
427 | 428 | repo.config.clear_section('hooks') |
|
428 | 429 | |
|
429 | 430 | try: |
|
430 | 431 | # NOTE(marcink): add extra validation so we skip invalid urls |
|
431 | 432 | # this is due this tasks can be executed via scheduler without |
|
432 | 433 | # proper validation of remote_uri |
|
433 | 434 | if validate_uri: |
|
434 | 435 | config = make_db_config(clear_session=False) |
|
435 | 436 | url_validator(remote_uri, dbrepo.repo_type, config) |
|
436 | 437 | except InvalidCloneUrl: |
|
437 | 438 | raise |
|
438 | 439 | |
|
439 | 440 | try: |
|
440 | 441 | repo.push(remote_uri) |
|
441 | 442 | except Exception: |
|
442 | 443 | log.error(traceback.format_exc()) |
|
443 | 444 | raise |
|
444 | 445 | |
|
445 | 446 | def commit_change(self, repo, repo_name, commit, user, author, message, |
|
446 | 447 | content, f_path): |
|
447 | 448 | """ |
|
448 | 449 | Commits changes |
|
449 | 450 | |
|
450 | 451 | :param repo: SCM instance |
|
451 | 452 | |
|
452 | 453 | """ |
|
453 | 454 | user = self._get_user(user) |
|
454 | 455 | |
|
455 | 456 | # decoding here will force that we have proper encoded values |
|
456 | 457 | # in any other case this will throw exceptions and deny commit |
|
457 | 458 | content = safe_str(content) |
|
458 | 459 | path = safe_str(f_path) |
|
459 | 460 | # message and author needs to be unicode |
|
460 | 461 | # proper backend should then translate that into required type |
|
461 | 462 | message = safe_unicode(message) |
|
462 | 463 | author = safe_unicode(author) |
|
463 | 464 | imc = repo.in_memory_commit |
|
464 | 465 | imc.change(FileNode(path, content, mode=commit.get_file_mode(f_path))) |
|
465 | 466 | try: |
|
466 | 467 | # TODO: handle pre-push action ! |
|
467 | 468 | tip = imc.commit( |
|
468 | 469 | message=message, author=author, parents=[commit], |
|
469 | 470 | branch=commit.branch) |
|
470 | 471 | except Exception as e: |
|
471 | 472 | log.error(traceback.format_exc()) |
|
472 | 473 | raise IMCCommitError(str(e)) |
|
473 | 474 | finally: |
|
474 | 475 | # always clear caches, if commit fails we want fresh object also |
|
475 | 476 | self.mark_for_invalidation(repo_name) |
|
476 | 477 | |
|
477 | 478 | # We trigger the post-push action |
|
478 | 479 | hooks_utils.trigger_post_push_hook( |
|
479 | 480 | username=user.username, action='push_local', hook_type='post_push', |
|
480 | 481 | repo_name=repo_name, repo_type=repo.alias, commit_ids=[tip.raw_id]) |
|
481 | 482 | return tip |
|
482 | 483 | |
|
483 | 484 | def _sanitize_path(self, f_path): |
|
484 | 485 | if f_path.startswith('/') or f_path.startswith('./') or '../' in f_path: |
|
485 | 486 | raise NonRelativePathError('%s is not an relative path' % f_path) |
|
486 | 487 | if f_path: |
|
487 | 488 | f_path = os.path.normpath(f_path) |
|
488 | 489 | return f_path |
|
489 | 490 | |
|
490 | 491 | def get_dirnode_metadata(self, request, commit, dir_node): |
|
491 | 492 | if not dir_node.is_dir(): |
|
492 | 493 | return [] |
|
493 | 494 | |
|
494 | 495 | data = [] |
|
495 | 496 | for node in dir_node: |
|
496 | 497 | if not node.is_file(): |
|
497 | 498 | # we skip file-nodes |
|
498 | 499 | continue |
|
499 | 500 | |
|
500 | 501 | last_commit = node.last_commit |
|
501 | 502 | last_commit_date = last_commit.date |
|
502 | 503 | data.append({ |
|
503 | 504 | 'name': node.name, |
|
504 | 505 | 'size': h.format_byte_size_binary(node.size), |
|
505 | 506 | 'modified_at': h.format_date(last_commit_date), |
|
506 | 507 | 'modified_ts': last_commit_date.isoformat(), |
|
507 | 508 | 'revision': last_commit.revision, |
|
508 | 509 | 'short_id': last_commit.short_id, |
|
509 | 510 | 'message': h.escape(last_commit.message), |
|
510 | 511 | 'author': h.escape(last_commit.author), |
|
511 | 512 | 'user_profile': h.gravatar_with_user( |
|
512 | 513 | request, last_commit.author), |
|
513 | 514 | }) |
|
514 | 515 | |
|
515 | 516 | return data |
|
516 | 517 | |
|
517 | 518 | def get_nodes(self, repo_name, commit_id, root_path='/', flat=True, |
|
518 | 519 | extended_info=False, content=False, max_file_bytes=None): |
|
519 | 520 | """ |
|
520 | 521 | recursive walk in root dir and return a set of all path in that dir |
|
521 | 522 | based on repository walk function |
|
522 | 523 | |
|
523 | 524 | :param repo_name: name of repository |
|
524 | 525 | :param commit_id: commit id for which to list nodes |
|
525 | 526 | :param root_path: root path to list |
|
526 | 527 | :param flat: return as a list, if False returns a dict with description |
|
527 | 528 | :param extended_info: show additional info such as md5, binary, size etc |
|
528 | 529 | :param content: add nodes content to the return data |
|
529 | 530 | :param max_file_bytes: will not return file contents over this limit |
|
530 | 531 | |
|
531 | 532 | """ |
|
532 | 533 | _files = list() |
|
533 | 534 | _dirs = list() |
|
534 | 535 | try: |
|
535 | 536 | _repo = self._get_repo(repo_name) |
|
536 | 537 | commit = _repo.scm_instance().get_commit(commit_id=commit_id) |
|
537 | 538 | root_path = root_path.lstrip('/') |
|
538 | 539 | for __, dirs, files in commit.walk(root_path): |
|
539 | 540 | |
|
540 | 541 | for f in files: |
|
541 | 542 | _content = None |
|
542 | 543 | _data = f_name = f.unicode_path |
|
543 | 544 | |
|
544 | 545 | if not flat: |
|
545 | 546 | _data = { |
|
546 | 547 | "name": h.escape(f_name), |
|
547 | 548 | "type": "file", |
|
548 | 549 | } |
|
549 | 550 | if extended_info: |
|
550 | 551 | _data.update({ |
|
551 | 552 | "md5": f.md5, |
|
552 | 553 | "binary": f.is_binary, |
|
553 | 554 | "size": f.size, |
|
554 | 555 | "extension": f.extension, |
|
555 | 556 | "mimetype": f.mimetype, |
|
556 | 557 | "lines": f.lines()[0] |
|
557 | 558 | }) |
|
558 | 559 | |
|
559 | 560 | if content: |
|
560 | 561 | over_size_limit = (max_file_bytes is not None |
|
561 | 562 | and f.size > max_file_bytes) |
|
562 | 563 | full_content = None |
|
563 | 564 | if not f.is_binary and not over_size_limit: |
|
564 | 565 | full_content = safe_str(f.content) |
|
565 | 566 | |
|
566 | 567 | _data.update({ |
|
567 | 568 | "content": full_content, |
|
568 | 569 | }) |
|
569 | 570 | _files.append(_data) |
|
570 | 571 | |
|
571 | 572 | for d in dirs: |
|
572 | 573 | _data = d_name = d.unicode_path |
|
573 | 574 | if not flat: |
|
574 | 575 | _data = { |
|
575 | 576 | "name": h.escape(d_name), |
|
576 | 577 | "type": "dir", |
|
577 | 578 | } |
|
578 | 579 | if extended_info: |
|
579 | 580 | _data.update({ |
|
580 | 581 | "md5": None, |
|
581 | 582 | "binary": None, |
|
582 | 583 | "size": None, |
|
583 | 584 | "extension": None, |
|
584 | 585 | }) |
|
585 | 586 | if content: |
|
586 | 587 | _data.update({ |
|
587 | 588 | "content": None |
|
588 | 589 | }) |
|
589 | 590 | _dirs.append(_data) |
|
590 | 591 | except RepositoryError: |
|
591 | 592 | log.exception("Exception in get_nodes") |
|
592 | 593 | raise |
|
593 | 594 | |
|
594 | 595 | return _dirs, _files |
|
595 | 596 | |
|
596 | 597 | def get_quick_filter_nodes(self, repo_name, commit_id, root_path='/'): |
|
597 | 598 | """ |
|
598 | 599 | Generate files for quick filter in files view |
|
599 | 600 | """ |
|
600 | 601 | |
|
601 | 602 | _files = list() |
|
602 | 603 | _dirs = list() |
|
603 | 604 | try: |
|
604 | 605 | _repo = self._get_repo(repo_name) |
|
605 | 606 | commit = _repo.scm_instance().get_commit(commit_id=commit_id) |
|
606 | 607 | root_path = root_path.lstrip('/') |
|
607 | 608 | for __, dirs, files in commit.walk(root_path): |
|
608 | 609 | |
|
609 | 610 | for f in files: |
|
610 | 611 | |
|
611 | 612 | _data = { |
|
612 | 613 | "name": h.escape(f.unicode_path), |
|
613 | 614 | "type": "file", |
|
614 | 615 | } |
|
615 | 616 | |
|
616 | 617 | _files.append(_data) |
|
617 | 618 | |
|
618 | 619 | for d in dirs: |
|
619 | 620 | |
|
620 | 621 | _data = { |
|
621 | 622 | "name": h.escape(d.unicode_path), |
|
622 | 623 | "type": "dir", |
|
623 | 624 | } |
|
624 | 625 | |
|
625 | 626 | _dirs.append(_data) |
|
626 | 627 | except RepositoryError: |
|
627 | 628 | log.exception("Exception in get_quick_filter_nodes") |
|
628 | 629 | raise |
|
629 | 630 | |
|
630 | 631 | return _dirs, _files |
|
631 | 632 | |
|
632 | 633 | def get_node(self, repo_name, commit_id, file_path, |
|
633 | 634 | extended_info=False, content=False, max_file_bytes=None, cache=True): |
|
634 | 635 | """ |
|
635 | 636 | retrieve single node from commit |
|
636 | 637 | """ |
|
637 | 638 | try: |
|
638 | 639 | |
|
639 | 640 | _repo = self._get_repo(repo_name) |
|
640 | 641 | commit = _repo.scm_instance().get_commit(commit_id=commit_id) |
|
641 | 642 | |
|
642 | 643 | file_node = commit.get_node(file_path) |
|
643 | 644 | if file_node.is_dir(): |
|
644 | 645 | raise RepositoryError('The given path is a directory') |
|
645 | 646 | |
|
646 | 647 | _content = None |
|
647 | 648 | f_name = file_node.unicode_path |
|
648 | 649 | |
|
649 | 650 | file_data = { |
|
650 | 651 | "name": h.escape(f_name), |
|
651 | 652 | "type": "file", |
|
652 | 653 | } |
|
653 | 654 | |
|
654 | 655 | if extended_info: |
|
655 | 656 | file_data.update({ |
|
656 | 657 | "extension": file_node.extension, |
|
657 | 658 | "mimetype": file_node.mimetype, |
|
658 | 659 | }) |
|
659 | 660 | |
|
660 | 661 | if cache: |
|
661 | 662 | md5 = file_node.md5 |
|
662 | 663 | is_binary = file_node.is_binary |
|
663 | 664 | size = file_node.size |
|
664 | 665 | else: |
|
665 | 666 | is_binary, md5, size, _content = file_node.metadata_uncached() |
|
666 | 667 | |
|
667 | 668 | file_data.update({ |
|
668 | 669 | "md5": md5, |
|
669 | 670 | "binary": is_binary, |
|
670 | 671 | "size": size, |
|
671 | 672 | }) |
|
672 | 673 | |
|
673 | 674 | if content and cache: |
|
674 | 675 | # get content + cache |
|
675 | 676 | size = file_node.size |
|
676 | 677 | over_size_limit = (max_file_bytes is not None and size > max_file_bytes) |
|
677 | 678 | full_content = None |
|
678 | 679 | all_lines = 0 |
|
679 | 680 | if not file_node.is_binary and not over_size_limit: |
|
680 | 681 | full_content = safe_unicode(file_node.content) |
|
681 | 682 | all_lines, empty_lines = file_node.count_lines(full_content) |
|
682 | 683 | |
|
683 | 684 | file_data.update({ |
|
684 | 685 | "content": full_content, |
|
685 | 686 | "lines": all_lines |
|
686 | 687 | }) |
|
687 | 688 | elif content: |
|
688 | 689 | # get content *without* cache |
|
689 | 690 | if _content is None: |
|
690 | 691 | is_binary, md5, size, _content = file_node.metadata_uncached() |
|
691 | 692 | |
|
692 | 693 | over_size_limit = (max_file_bytes is not None and size > max_file_bytes) |
|
693 | 694 | full_content = None |
|
694 | 695 | all_lines = 0 |
|
695 | 696 | if not is_binary and not over_size_limit: |
|
696 | 697 | full_content = safe_unicode(_content) |
|
697 | 698 | all_lines, empty_lines = file_node.count_lines(full_content) |
|
698 | 699 | |
|
699 | 700 | file_data.update({ |
|
700 | 701 | "content": full_content, |
|
701 | 702 | "lines": all_lines |
|
702 | 703 | }) |
|
703 | 704 | |
|
704 | 705 | except RepositoryError: |
|
705 | 706 | log.exception("Exception in get_node") |
|
706 | 707 | raise |
|
707 | 708 | |
|
708 | 709 | return file_data |
|
709 | 710 | |
|
710 | 711 | def get_fts_data(self, repo_name, commit_id, root_path='/'): |
|
711 | 712 | """ |
|
712 | 713 | Fetch node tree for usage in full text search |
|
713 | 714 | """ |
|
714 | 715 | |
|
715 | 716 | tree_info = list() |
|
716 | 717 | |
|
717 | 718 | try: |
|
718 | 719 | _repo = self._get_repo(repo_name) |
|
719 | 720 | commit = _repo.scm_instance().get_commit(commit_id=commit_id) |
|
720 | 721 | root_path = root_path.lstrip('/') |
|
721 | 722 | for __, dirs, files in commit.walk(root_path): |
|
722 | 723 | |
|
723 | 724 | for f in files: |
|
724 | 725 | is_binary, md5, size, _content = f.metadata_uncached() |
|
725 | 726 | _data = { |
|
726 | 727 | "name": f.unicode_path, |
|
727 | 728 | "md5": md5, |
|
728 | 729 | "extension": f.extension, |
|
729 | 730 | "binary": is_binary, |
|
730 | 731 | "size": size |
|
731 | 732 | } |
|
732 | 733 | |
|
733 | 734 | tree_info.append(_data) |
|
734 | 735 | |
|
735 | 736 | except RepositoryError: |
|
736 | 737 | log.exception("Exception in get_nodes") |
|
737 | 738 | raise |
|
738 | 739 | |
|
739 | 740 | return tree_info |
|
740 | 741 | |
|
741 | 742 | def create_nodes(self, user, repo, message, nodes, parent_commit=None, |
|
742 | 743 | author=None, trigger_push_hook=True): |
|
743 | 744 | """ |
|
744 | 745 | Commits given multiple nodes into repo |
|
745 | 746 | |
|
746 | 747 | :param user: RhodeCode User object or user_id, the commiter |
|
747 | 748 | :param repo: RhodeCode Repository object |
|
748 | 749 | :param message: commit message |
|
749 | 750 | :param nodes: mapping {filename:{'content':content},...} |
|
750 | 751 | :param parent_commit: parent commit, can be empty than it's |
|
751 | 752 | initial commit |
|
752 | 753 | :param author: author of commit, cna be different that commiter |
|
753 | 754 | only for git |
|
754 | 755 | :param trigger_push_hook: trigger push hooks |
|
755 | 756 | |
|
756 | 757 | :returns: new commited commit |
|
757 | 758 | """ |
|
758 | 759 | |
|
759 | 760 | user = self._get_user(user) |
|
760 | 761 | scm_instance = repo.scm_instance(cache=False) |
|
761 | 762 | |
|
762 | 763 | processed_nodes = [] |
|
763 | 764 | for f_path in nodes: |
|
764 | 765 | f_path = self._sanitize_path(f_path) |
|
765 | 766 | content = nodes[f_path]['content'] |
|
766 | 767 | f_path = safe_str(f_path) |
|
767 | 768 | # decoding here will force that we have proper encoded values |
|
768 | 769 | # in any other case this will throw exceptions and deny commit |
|
769 | 770 | if isinstance(content, (basestring,)): |
|
770 | 771 | content = safe_str(content) |
|
771 | 772 | elif isinstance(content, (file, cStringIO.OutputType,)): |
|
772 | 773 | content = content.read() |
|
773 | 774 | else: |
|
774 | 775 | raise Exception('Content is of unrecognized type %s' % ( |
|
775 | 776 | type(content) |
|
776 | 777 | )) |
|
777 | 778 | processed_nodes.append((f_path, content)) |
|
778 | 779 | |
|
779 | 780 | message = safe_unicode(message) |
|
780 | 781 | commiter = user.full_contact |
|
781 | 782 | author = safe_unicode(author) if author else commiter |
|
782 | 783 | |
|
783 | 784 | imc = scm_instance.in_memory_commit |
|
784 | 785 | |
|
785 | 786 | if not parent_commit: |
|
786 | 787 | parent_commit = EmptyCommit(alias=scm_instance.alias) |
|
787 | 788 | |
|
788 | 789 | if isinstance(parent_commit, EmptyCommit): |
|
789 | 790 | # EmptyCommit means we we're editing empty repository |
|
790 | 791 | parents = None |
|
791 | 792 | else: |
|
792 | 793 | parents = [parent_commit] |
|
793 | 794 | # add multiple nodes |
|
794 | 795 | for path, content in processed_nodes: |
|
795 | 796 | imc.add(FileNode(path, content=content)) |
|
796 | 797 | # TODO: handle pre push scenario |
|
797 | 798 | tip = imc.commit(message=message, |
|
798 | 799 | author=author, |
|
799 | 800 | parents=parents, |
|
800 | 801 | branch=parent_commit.branch) |
|
801 | 802 | |
|
802 | 803 | self.mark_for_invalidation(repo.repo_name) |
|
803 | 804 | if trigger_push_hook: |
|
804 | 805 | hooks_utils.trigger_post_push_hook( |
|
805 | 806 | username=user.username, action='push_local', |
|
806 | 807 | repo_name=repo.repo_name, repo_type=scm_instance.alias, |
|
807 | 808 | hook_type='post_push', |
|
808 | 809 | commit_ids=[tip.raw_id]) |
|
809 | 810 | return tip |
|
810 | 811 | |
|
811 | 812 | def update_nodes(self, user, repo, message, nodes, parent_commit=None, |
|
812 | 813 | author=None, trigger_push_hook=True): |
|
813 | 814 | user = self._get_user(user) |
|
814 | 815 | scm_instance = repo.scm_instance(cache=False) |
|
815 | 816 | |
|
816 | 817 | message = safe_unicode(message) |
|
817 | 818 | commiter = user.full_contact |
|
818 | 819 | author = safe_unicode(author) if author else commiter |
|
819 | 820 | |
|
820 | 821 | imc = scm_instance.in_memory_commit |
|
821 | 822 | |
|
822 | 823 | if not parent_commit: |
|
823 | 824 | parent_commit = EmptyCommit(alias=scm_instance.alias) |
|
824 | 825 | |
|
825 | 826 | if isinstance(parent_commit, EmptyCommit): |
|
826 | 827 | # EmptyCommit means we we're editing empty repository |
|
827 | 828 | parents = None |
|
828 | 829 | else: |
|
829 | 830 | parents = [parent_commit] |
|
830 | 831 | |
|
831 | 832 | # add multiple nodes |
|
832 | 833 | for _filename, data in nodes.items(): |
|
833 | 834 | # new filename, can be renamed from the old one, also sanitaze |
|
834 | 835 | # the path for any hack around relative paths like ../../ etc. |
|
835 | 836 | filename = self._sanitize_path(data['filename']) |
|
836 | 837 | old_filename = self._sanitize_path(_filename) |
|
837 | 838 | content = data['content'] |
|
838 | 839 | file_mode = data.get('mode') |
|
839 | 840 | filenode = FileNode(old_filename, content=content, mode=file_mode) |
|
840 | 841 | op = data['op'] |
|
841 | 842 | if op == 'add': |
|
842 | 843 | imc.add(filenode) |
|
843 | 844 | elif op == 'del': |
|
844 | 845 | imc.remove(filenode) |
|
845 | 846 | elif op == 'mod': |
|
846 | 847 | if filename != old_filename: |
|
847 | 848 | # TODO: handle renames more efficient, needs vcs lib changes |
|
848 | 849 | imc.remove(filenode) |
|
849 | 850 | imc.add(FileNode(filename, content=content, mode=file_mode)) |
|
850 | 851 | else: |
|
851 | 852 | imc.change(filenode) |
|
852 | 853 | |
|
853 | 854 | try: |
|
854 | 855 | # TODO: handle pre push scenario commit changes |
|
855 | 856 | tip = imc.commit(message=message, |
|
856 | 857 | author=author, |
|
857 | 858 | parents=parents, |
|
858 | 859 | branch=parent_commit.branch) |
|
859 | 860 | except NodeNotChangedError: |
|
860 | 861 | raise |
|
861 | 862 | except Exception as e: |
|
862 | 863 | log.exception("Unexpected exception during call to imc.commit") |
|
863 | 864 | raise IMCCommitError(str(e)) |
|
864 | 865 | finally: |
|
865 | 866 | # always clear caches, if commit fails we want fresh object also |
|
866 | 867 | self.mark_for_invalidation(repo.repo_name) |
|
867 | 868 | |
|
868 | 869 | if trigger_push_hook: |
|
869 | 870 | hooks_utils.trigger_post_push_hook( |
|
870 | 871 | username=user.username, action='push_local', hook_type='post_push', |
|
871 | 872 | repo_name=repo.repo_name, repo_type=scm_instance.alias, |
|
872 | 873 | commit_ids=[tip.raw_id]) |
|
873 | 874 | |
|
874 | 875 | return tip |
|
875 | 876 | |
|
876 | 877 | def delete_nodes(self, user, repo, message, nodes, parent_commit=None, |
|
877 | 878 | author=None, trigger_push_hook=True): |
|
878 | 879 | """ |
|
879 | 880 | Deletes given multiple nodes into `repo` |
|
880 | 881 | |
|
881 | 882 | :param user: RhodeCode User object or user_id, the committer |
|
882 | 883 | :param repo: RhodeCode Repository object |
|
883 | 884 | :param message: commit message |
|
884 | 885 | :param nodes: mapping {filename:{'content':content},...} |
|
885 | 886 | :param parent_commit: parent commit, can be empty than it's initial |
|
886 | 887 | commit |
|
887 | 888 | :param author: author of commit, cna be different that commiter only |
|
888 | 889 | for git |
|
889 | 890 | :param trigger_push_hook: trigger push hooks |
|
890 | 891 | |
|
891 | 892 | :returns: new commit after deletion |
|
892 | 893 | """ |
|
893 | 894 | |
|
894 | 895 | user = self._get_user(user) |
|
895 | 896 | scm_instance = repo.scm_instance(cache=False) |
|
896 | 897 | |
|
897 | 898 | processed_nodes = [] |
|
898 | 899 | for f_path in nodes: |
|
899 | 900 | f_path = self._sanitize_path(f_path) |
|
900 | 901 | # content can be empty but for compatabilty it allows same dicts |
|
901 | 902 | # structure as add_nodes |
|
902 | 903 | content = nodes[f_path].get('content') |
|
903 | 904 | processed_nodes.append((f_path, content)) |
|
904 | 905 | |
|
905 | 906 | message = safe_unicode(message) |
|
906 | 907 | commiter = user.full_contact |
|
907 | 908 | author = safe_unicode(author) if author else commiter |
|
908 | 909 | |
|
909 | 910 | imc = scm_instance.in_memory_commit |
|
910 | 911 | |
|
911 | 912 | if not parent_commit: |
|
912 | 913 | parent_commit = EmptyCommit(alias=scm_instance.alias) |
|
913 | 914 | |
|
914 | 915 | if isinstance(parent_commit, EmptyCommit): |
|
915 | 916 | # EmptyCommit means we we're editing empty repository |
|
916 | 917 | parents = None |
|
917 | 918 | else: |
|
918 | 919 | parents = [parent_commit] |
|
919 | 920 | # add multiple nodes |
|
920 | 921 | for path, content in processed_nodes: |
|
921 | 922 | imc.remove(FileNode(path, content=content)) |
|
922 | 923 | |
|
923 | 924 | # TODO: handle pre push scenario |
|
924 | 925 | tip = imc.commit(message=message, |
|
925 | 926 | author=author, |
|
926 | 927 | parents=parents, |
|
927 | 928 | branch=parent_commit.branch) |
|
928 | 929 | |
|
929 | 930 | self.mark_for_invalidation(repo.repo_name) |
|
930 | 931 | if trigger_push_hook: |
|
931 | 932 | hooks_utils.trigger_post_push_hook( |
|
932 | 933 | username=user.username, action='push_local', hook_type='post_push', |
|
933 | 934 | repo_name=repo.repo_name, repo_type=scm_instance.alias, |
|
934 | 935 | commit_ids=[tip.raw_id]) |
|
935 | 936 | return tip |
|
936 | 937 | |
|
937 | 938 | def strip(self, repo, commit_id, branch): |
|
938 | 939 | scm_instance = repo.scm_instance(cache=False) |
|
939 | 940 | scm_instance.config.clear_section('hooks') |
|
940 | 941 | scm_instance.strip(commit_id, branch) |
|
941 | 942 | self.mark_for_invalidation(repo.repo_name) |
|
942 | 943 | |
|
943 | 944 | def get_unread_journal(self): |
|
944 | 945 | return self.sa.query(UserLog).count() |
|
945 | 946 | |
|
946 | 947 | @classmethod |
|
947 | 948 | def backend_landing_ref(cls, repo_type): |
|
948 | 949 | """ |
|
949 | 950 | Return a default landing ref based on a repository type. |
|
950 | 951 | """ |
|
951 | 952 | |
|
952 | 953 | landing_ref = { |
|
953 | 954 | 'hg': ('branch:default', 'default'), |
|
954 | 955 | 'git': ('branch:master', 'master'), |
|
955 | 956 | 'svn': ('rev:tip', 'latest tip'), |
|
956 | 957 | 'default': ('rev:tip', 'latest tip'), |
|
957 | 958 | } |
|
958 | 959 | |
|
959 | 960 | return landing_ref.get(repo_type) or landing_ref['default'] |
|
960 | 961 | |
|
961 | 962 | def get_repo_landing_revs(self, translator, repo=None): |
|
962 | 963 | """ |
|
963 | 964 | Generates select option with tags branches and bookmarks (for hg only) |
|
964 | 965 | grouped by type |
|
965 | 966 | |
|
966 | 967 | :param repo: |
|
967 | 968 | """ |
|
968 | 969 | _ = translator |
|
969 | 970 | repo = self._get_repo(repo) |
|
970 | 971 | |
|
971 | 972 | if repo: |
|
972 | 973 | repo_type = repo.repo_type |
|
973 | 974 | else: |
|
974 | 975 | repo_type = 'default' |
|
975 | 976 | |
|
976 | 977 | default_landing_ref, landing_ref_lbl = self.backend_landing_ref(repo_type) |
|
977 | 978 | |
|
978 | 979 | default_ref_options = [ |
|
979 | 980 | [default_landing_ref, landing_ref_lbl] |
|
980 | 981 | ] |
|
981 | 982 | default_choices = [ |
|
982 | 983 | default_landing_ref |
|
983 | 984 | ] |
|
984 | 985 | |
|
985 | 986 | if not repo: |
|
986 | 987 | return default_choices, default_ref_options |
|
987 | 988 | |
|
988 | 989 | repo = repo.scm_instance() |
|
989 | 990 | |
|
990 | 991 | ref_options = [('rev:tip', 'latest tip')] |
|
991 | 992 | choices = ['rev:tip'] |
|
992 | 993 | |
|
993 | 994 | # branches |
|
994 | 995 | branch_group = [(u'branch:%s' % safe_unicode(b), safe_unicode(b)) for b in repo.branches] |
|
995 | 996 | if not branch_group: |
|
996 | 997 | # new repo, or without maybe a branch? |
|
997 | 998 | branch_group = default_ref_options |
|
998 | 999 | |
|
999 | 1000 | branches_group = (branch_group, _("Branches")) |
|
1000 | 1001 | ref_options.append(branches_group) |
|
1001 | 1002 | choices.extend([x[0] for x in branches_group[0]]) |
|
1002 | 1003 | |
|
1003 | 1004 | # bookmarks for HG |
|
1004 | 1005 | if repo.alias == 'hg': |
|
1005 | 1006 | bookmarks_group = ( |
|
1006 | 1007 | [(u'book:%s' % safe_unicode(b), safe_unicode(b)) |
|
1007 | 1008 | for b in repo.bookmarks], |
|
1008 | 1009 | _("Bookmarks")) |
|
1009 | 1010 | ref_options.append(bookmarks_group) |
|
1010 | 1011 | choices.extend([x[0] for x in bookmarks_group[0]]) |
|
1011 | 1012 | |
|
1012 | 1013 | # tags |
|
1013 | 1014 | tags_group = ( |
|
1014 | 1015 | [(u'tag:%s' % safe_unicode(t), safe_unicode(t)) |
|
1015 | 1016 | for t in repo.tags], |
|
1016 | 1017 | _("Tags")) |
|
1017 | 1018 | ref_options.append(tags_group) |
|
1018 | 1019 | choices.extend([x[0] for x in tags_group[0]]) |
|
1019 | 1020 | |
|
1020 | 1021 | return choices, ref_options |
|
1021 | 1022 | |
|
1022 | 1023 | def get_server_info(self, environ=None): |
|
1023 | 1024 | server_info = get_system_info(environ) |
|
1024 | 1025 | return server_info |
General Comments 0
You need to be logged in to leave comments.
Login now