Show More
@@ -1,364 +1,364 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | |
|
3 | 3 | # Copyright (C) 2015-2020 RhodeCode GmbH |
|
4 | 4 | # |
|
5 | 5 | # This program is free software: you can redistribute it and/or modify |
|
6 | 6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
7 | 7 | # (only), as published by the Free Software Foundation. |
|
8 | 8 | # |
|
9 | 9 | # This program is distributed in the hope that it will be useful, |
|
10 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
12 | 12 | # GNU General Public License for more details. |
|
13 | 13 | # |
|
14 | 14 | # You should have received a copy of the GNU Affero General Public License |
|
15 | 15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
16 | 16 | # |
|
17 | 17 | # This program is dual-licensed. If you wish to learn more about the |
|
18 | 18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
19 | 19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
20 | 20 | |
|
21 | 21 | import time |
|
22 | 22 | import errno |
|
23 | 23 | import logging |
|
24 | 24 | |
|
25 | 25 | import msgpack |
|
26 | 26 | import gevent |
|
27 | 27 | import redis |
|
28 | 28 | |
|
29 | 29 | from dogpile.cache.api import CachedValue |
|
30 | 30 | from dogpile.cache.backends import memory as memory_backend |
|
31 | 31 | from dogpile.cache.backends import file as file_backend |
|
32 | 32 | from dogpile.cache.backends import redis as redis_backend |
|
33 | 33 | from dogpile.cache.backends.file import NO_VALUE, compat, FileLock |
|
34 | 34 | from dogpile.cache.util import memoized_property |
|
35 | 35 | |
|
36 | 36 | from pyramid.settings import asbool |
|
37 | 37 | |
|
38 | 38 | from rhodecode.lib.memory_lru_dict import LRUDict, LRUDictDebug |
|
39 | 39 | from rhodecode.lib.utils import safe_str |
|
40 | 40 | |
|
41 | 41 | |
|
42 | 42 | _default_max_size = 1024 |
|
43 | 43 | |
|
44 | 44 | log = logging.getLogger(__name__) |
|
45 | 45 | |
|
46 | 46 | |
|
47 | 47 | class LRUMemoryBackend(memory_backend.MemoryBackend): |
|
48 | 48 | key_prefix = 'lru_mem_backend' |
|
49 | 49 | pickle_values = False |
|
50 | 50 | |
|
51 | 51 | def __init__(self, arguments): |
|
52 | 52 | max_size = arguments.pop('max_size', _default_max_size) |
|
53 | 53 | |
|
54 | 54 | LRUDictClass = LRUDict |
|
55 | 55 | if arguments.pop('log_key_count', None): |
|
56 | 56 | LRUDictClass = LRUDictDebug |
|
57 | 57 | |
|
58 | 58 | arguments['cache_dict'] = LRUDictClass(max_size) |
|
59 | 59 | super(LRUMemoryBackend, self).__init__(arguments) |
|
60 | 60 | |
|
61 | 61 | def delete(self, key): |
|
62 | 62 | try: |
|
63 | 63 | del self._cache[key] |
|
64 | 64 | except KeyError: |
|
65 | 65 | # we don't care if key isn't there at deletion |
|
66 | 66 | pass |
|
67 | 67 | |
|
68 | 68 | def delete_multi(self, keys): |
|
69 | 69 | for key in keys: |
|
70 | 70 | self.delete(key) |
|
71 | 71 | |
|
72 | 72 | |
|
73 | 73 | class PickleSerializer(object): |
|
74 | 74 | |
|
75 | 75 | def _dumps(self, value, safe=False): |
|
76 | 76 | try: |
|
77 | 77 | return compat.pickle.dumps(value) |
|
78 | 78 | except Exception: |
|
79 | 79 | if safe: |
|
80 | 80 | return NO_VALUE |
|
81 | 81 | else: |
|
82 | 82 | raise |
|
83 | 83 | |
|
84 | 84 | def _loads(self, value, safe=True): |
|
85 | 85 | try: |
|
86 | 86 | return compat.pickle.loads(value) |
|
87 | 87 | except Exception: |
|
88 | 88 | if safe: |
|
89 | 89 | return NO_VALUE |
|
90 | 90 | else: |
|
91 | 91 | raise |
|
92 | 92 | |
|
93 | 93 | |
|
94 | 94 | class MsgPackSerializer(object): |
|
95 | 95 | |
|
96 | 96 | def _dumps(self, value, safe=False): |
|
97 | 97 | try: |
|
98 | 98 | return msgpack.packb(value) |
|
99 | 99 | except Exception: |
|
100 | 100 | if safe: |
|
101 | 101 | return NO_VALUE |
|
102 | 102 | else: |
|
103 | 103 | raise |
|
104 | 104 | |
|
105 | 105 | def _loads(self, value, safe=True): |
|
106 | 106 | """ |
|
107 | 107 | pickle maintained the `CachedValue` wrapper of the tuple |
|
108 | 108 | msgpack does not, so it must be added back in. |
|
109 | 109 | """ |
|
110 | 110 | try: |
|
111 | 111 | value = msgpack.unpackb(value, use_list=False) |
|
112 | 112 | return CachedValue(*value) |
|
113 | 113 | except Exception: |
|
114 | 114 | if safe: |
|
115 | 115 | return NO_VALUE |
|
116 | 116 | else: |
|
117 | 117 | raise |
|
118 | 118 | |
|
119 | 119 | |
|
120 | 120 | import fcntl |
|
121 | 121 | flock_org = fcntl.flock |
|
122 | 122 | |
|
123 | 123 | |
|
124 | 124 | class CustomLockFactory(FileLock): |
|
125 | 125 | |
|
126 | 126 | @memoized_property |
|
127 | 127 | def _module(self): |
|
128 | 128 | |
|
129 | 129 | def gevent_flock(fd, operation): |
|
130 | 130 | """ |
|
131 | 131 | Gevent compatible flock |
|
132 | 132 | """ |
|
133 | 133 | # set non-blocking, this will cause an exception if we cannot acquire a lock |
|
134 | 134 | operation |= fcntl.LOCK_NB |
|
135 | 135 | start_lock_time = time.time() |
|
136 | 136 | timeout = 60 * 15 # 15min |
|
137 | 137 | while True: |
|
138 | 138 | try: |
|
139 | 139 | flock_org(fd, operation) |
|
140 | 140 | # lock has been acquired |
|
141 | 141 | break |
|
142 | 142 | except (OSError, IOError) as e: |
|
143 | 143 | # raise on other errors than Resource temporarily unavailable |
|
144 | 144 | if e.errno != errno.EAGAIN: |
|
145 | 145 | raise |
|
146 | 146 | elif (time.time() - start_lock_time) > timeout: |
|
147 | 147 | # waited to much time on a lock, better fail than loop for ever |
|
148 | 148 | log.error('Failed to acquire lock on `%s` after waiting %ss', |
|
149 | 149 | self.filename, timeout) |
|
150 | 150 | raise |
|
151 | 151 | wait_timeout = 0.03 |
|
152 | 152 | log.debug('Failed to acquire lock on `%s`, retry in %ss', |
|
153 | 153 | self.filename, wait_timeout) |
|
154 | 154 | gevent.sleep(wait_timeout) |
|
155 | 155 | |
|
156 | 156 | fcntl.flock = gevent_flock |
|
157 | 157 | return fcntl |
|
158 | 158 | |
|
159 | 159 | |
|
160 | 160 | class FileNamespaceBackend(PickleSerializer, file_backend.DBMBackend): |
|
161 | 161 | key_prefix = 'file_backend' |
|
162 | 162 | |
|
163 | 163 | def __init__(self, arguments): |
|
164 | 164 | arguments['lock_factory'] = CustomLockFactory |
|
165 | 165 | db_file = arguments.get('filename') |
|
166 | 166 | |
|
167 | 167 | log.debug('initialing %s DB in %s', self.__class__.__name__, db_file) |
|
168 | 168 | try: |
|
169 | 169 | super(FileNamespaceBackend, self).__init__(arguments) |
|
170 | 170 | except Exception: |
|
171 |
log.e |
|
|
171 | log.exception('Failed to initialize db at: %s', db_file) | |
|
172 | 172 | raise |
|
173 | 173 | |
|
174 | 174 | def __repr__(self): |
|
175 | 175 | return '{} `{}`'.format(self.__class__, self.filename) |
|
176 | 176 | |
|
177 | 177 | def list_keys(self, prefix=''): |
|
178 | 178 | prefix = '{}:{}'.format(self.key_prefix, prefix) |
|
179 | 179 | |
|
180 | 180 | def cond(v): |
|
181 | 181 | if not prefix: |
|
182 | 182 | return True |
|
183 | 183 | |
|
184 | 184 | if v.startswith(prefix): |
|
185 | 185 | return True |
|
186 | 186 | return False |
|
187 | 187 | |
|
188 | 188 | with self._dbm_file(True) as dbm: |
|
189 | 189 | try: |
|
190 | 190 | return filter(cond, dbm.keys()) |
|
191 | 191 | except Exception: |
|
192 | 192 | log.error('Failed to fetch DBM keys from DB: %s', self.get_store()) |
|
193 | 193 | raise |
|
194 | 194 | |
|
195 | 195 | def get_store(self): |
|
196 | 196 | return self.filename |
|
197 | 197 | |
|
198 | 198 | def _dbm_get(self, key): |
|
199 | 199 | with self._dbm_file(False) as dbm: |
|
200 | 200 | if hasattr(dbm, 'get'): |
|
201 | 201 | value = dbm.get(key, NO_VALUE) |
|
202 | 202 | else: |
|
203 | 203 | # gdbm objects lack a .get method |
|
204 | 204 | try: |
|
205 | 205 | value = dbm[key] |
|
206 | 206 | except KeyError: |
|
207 | 207 | value = NO_VALUE |
|
208 | 208 | if value is not NO_VALUE: |
|
209 | 209 | value = self._loads(value) |
|
210 | 210 | return value |
|
211 | 211 | |
|
212 | 212 | def get(self, key): |
|
213 | 213 | try: |
|
214 | 214 | return self._dbm_get(key) |
|
215 | 215 | except Exception: |
|
216 | 216 | log.error('Failed to fetch DBM key %s from DB: %s', key, self.get_store()) |
|
217 | 217 | raise |
|
218 | 218 | |
|
219 | 219 | def set(self, key, value): |
|
220 | 220 | with self._dbm_file(True) as dbm: |
|
221 | 221 | dbm[key] = self._dumps(value) |
|
222 | 222 | |
|
223 | 223 | def set_multi(self, mapping): |
|
224 | 224 | with self._dbm_file(True) as dbm: |
|
225 | 225 | for key, value in mapping.items(): |
|
226 | 226 | dbm[key] = self._dumps(value) |
|
227 | 227 | |
|
228 | 228 | |
|
229 | 229 | class BaseRedisBackend(redis_backend.RedisBackend): |
|
230 | 230 | key_prefix = '' |
|
231 | 231 | |
|
232 | 232 | def __init__(self, arguments): |
|
233 | 233 | super(BaseRedisBackend, self).__init__(arguments) |
|
234 | 234 | self._lock_timeout = self.lock_timeout |
|
235 | 235 | self._lock_auto_renewal = asbool(arguments.pop("lock_auto_renewal", True)) |
|
236 | 236 | |
|
237 | 237 | if self._lock_auto_renewal and not self._lock_timeout: |
|
238 | 238 | # set default timeout for auto_renewal |
|
239 | 239 | self._lock_timeout = 30 |
|
240 | 240 | |
|
241 | 241 | def _create_client(self): |
|
242 | 242 | args = {} |
|
243 | 243 | |
|
244 | 244 | if self.url is not None: |
|
245 | 245 | args.update(url=self.url) |
|
246 | 246 | |
|
247 | 247 | else: |
|
248 | 248 | args.update( |
|
249 | 249 | host=self.host, password=self.password, |
|
250 | 250 | port=self.port, db=self.db |
|
251 | 251 | ) |
|
252 | 252 | |
|
253 | 253 | connection_pool = redis.ConnectionPool(**args) |
|
254 | 254 | |
|
255 | 255 | return redis.StrictRedis(connection_pool=connection_pool) |
|
256 | 256 | |
|
257 | 257 | def list_keys(self, prefix=''): |
|
258 | 258 | prefix = '{}:{}*'.format(self.key_prefix, prefix) |
|
259 | 259 | return self.client.keys(prefix) |
|
260 | 260 | |
|
261 | 261 | def get_store(self): |
|
262 | 262 | return self.client.connection_pool |
|
263 | 263 | |
|
264 | 264 | def get(self, key): |
|
265 | 265 | value = self.client.get(key) |
|
266 | 266 | if value is None: |
|
267 | 267 | return NO_VALUE |
|
268 | 268 | return self._loads(value) |
|
269 | 269 | |
|
270 | 270 | def get_multi(self, keys): |
|
271 | 271 | if not keys: |
|
272 | 272 | return [] |
|
273 | 273 | values = self.client.mget(keys) |
|
274 | 274 | loads = self._loads |
|
275 | 275 | return [ |
|
276 | 276 | loads(v) if v is not None else NO_VALUE |
|
277 | 277 | for v in values] |
|
278 | 278 | |
|
279 | 279 | def set(self, key, value): |
|
280 | 280 | if self.redis_expiration_time: |
|
281 | 281 | self.client.setex(key, self.redis_expiration_time, |
|
282 | 282 | self._dumps(value)) |
|
283 | 283 | else: |
|
284 | 284 | self.client.set(key, self._dumps(value)) |
|
285 | 285 | |
|
286 | 286 | def set_multi(self, mapping): |
|
287 | 287 | dumps = self._dumps |
|
288 | 288 | mapping = dict( |
|
289 | 289 | (k, dumps(v)) |
|
290 | 290 | for k, v in mapping.items() |
|
291 | 291 | ) |
|
292 | 292 | |
|
293 | 293 | if not self.redis_expiration_time: |
|
294 | 294 | self.client.mset(mapping) |
|
295 | 295 | else: |
|
296 | 296 | pipe = self.client.pipeline() |
|
297 | 297 | for key, value in mapping.items(): |
|
298 | 298 | pipe.setex(key, self.redis_expiration_time, value) |
|
299 | 299 | pipe.execute() |
|
300 | 300 | |
|
301 | 301 | def get_mutex(self, key): |
|
302 | 302 | if self.distributed_lock: |
|
303 | 303 | lock_key = redis_backend.u('_lock_{0}').format(safe_str(key)) |
|
304 | 304 | return get_mutex_lock(self.client, lock_key, self._lock_timeout, |
|
305 | 305 | auto_renewal=self._lock_auto_renewal) |
|
306 | 306 | else: |
|
307 | 307 | return None |
|
308 | 308 | |
|
309 | 309 | |
|
310 | 310 | class RedisPickleBackend(PickleSerializer, BaseRedisBackend): |
|
311 | 311 | key_prefix = 'redis_pickle_backend' |
|
312 | 312 | pass |
|
313 | 313 | |
|
314 | 314 | |
|
315 | 315 | class RedisMsgPackBackend(MsgPackSerializer, BaseRedisBackend): |
|
316 | 316 | key_prefix = 'redis_msgpack_backend' |
|
317 | 317 | pass |
|
318 | 318 | |
|
319 | 319 | |
|
320 | 320 | def get_mutex_lock(client, lock_key, lock_timeout, auto_renewal=False): |
|
321 | 321 | import redis_lock |
|
322 | 322 | |
|
323 | 323 | class _RedisLockWrapper(object): |
|
324 | 324 | """LockWrapper for redis_lock""" |
|
325 | 325 | |
|
326 | 326 | @classmethod |
|
327 | 327 | def get_lock(cls): |
|
328 | 328 | return redis_lock.Lock( |
|
329 | 329 | redis_client=client, |
|
330 | 330 | name=lock_key, |
|
331 | 331 | expire=lock_timeout, |
|
332 | 332 | auto_renewal=auto_renewal, |
|
333 | 333 | strict=True, |
|
334 | 334 | ) |
|
335 | 335 | |
|
336 | 336 | def __repr__(self): |
|
337 | 337 | return "{}:{}".format(self.__class__.__name__, lock_key) |
|
338 | 338 | |
|
339 | 339 | def __str__(self): |
|
340 | 340 | return "{}:{}".format(self.__class__.__name__, lock_key) |
|
341 | 341 | |
|
342 | 342 | def __init__(self): |
|
343 | 343 | self.lock = self.get_lock() |
|
344 | 344 | self.lock_key = lock_key |
|
345 | 345 | |
|
346 | 346 | def acquire(self, wait=True): |
|
347 | 347 | log.debug('Trying to acquire Redis lock for key %s', self.lock_key) |
|
348 | 348 | try: |
|
349 | 349 | acquired = self.lock.acquire(wait) |
|
350 | 350 | log.debug('Got lock for key %s, %s', self.lock_key, acquired) |
|
351 | 351 | return acquired |
|
352 | 352 | except redis_lock.AlreadyAcquired: |
|
353 | 353 | return False |
|
354 | 354 | except redis_lock.AlreadyStarted: |
|
355 | 355 | # refresh thread exists, but it also means we acquired the lock |
|
356 | 356 | return True |
|
357 | 357 | |
|
358 | 358 | def release(self): |
|
359 | 359 | try: |
|
360 | 360 | self.lock.release() |
|
361 | 361 | except redis_lock.NotAcquired: |
|
362 | 362 | pass |
|
363 | 363 | |
|
364 | 364 | return _RedisLockWrapper() |
@@ -1,378 +1,381 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | |
|
3 | 3 | # Copyright (C) 2016-2020 RhodeCode GmbH |
|
4 | 4 | # |
|
5 | 5 | # This program is free software: you can redistribute it and/or modify |
|
6 | 6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
7 | 7 | # (only), as published by the Free Software Foundation. |
|
8 | 8 | # |
|
9 | 9 | # This program is distributed in the hope that it will be useful, |
|
10 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
12 | 12 | # GNU General Public License for more details. |
|
13 | 13 | # |
|
14 | 14 | # You should have received a copy of the GNU Affero General Public License |
|
15 | 15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
16 | 16 | # |
|
17 | 17 | # This program is dual-licensed. If you wish to learn more about the |
|
18 | 18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
19 | 19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
20 | 20 | |
|
21 | 21 | """ |
|
22 | 22 | Client for the VCSServer implemented based on HTTP. |
|
23 | 23 | """ |
|
24 | 24 | |
|
25 | 25 | import copy |
|
26 | 26 | import logging |
|
27 | 27 | import threading |
|
28 | 28 | import time |
|
29 | 29 | import urllib2 |
|
30 | 30 | import urlparse |
|
31 | 31 | import uuid |
|
32 | 32 | import traceback |
|
33 | 33 | |
|
34 | 34 | import pycurl |
|
35 | 35 | import msgpack |
|
36 | 36 | import requests |
|
37 | 37 | from requests.packages.urllib3.util.retry import Retry |
|
38 | 38 | |
|
39 | 39 | import rhodecode |
|
40 | 40 | from rhodecode.lib import rc_cache |
|
41 | 41 | from rhodecode.lib.rc_cache.utils import compute_key_from_params |
|
42 | 42 | from rhodecode.lib.system_info import get_cert_path |
|
43 | 43 | from rhodecode.lib.vcs import exceptions, CurlSession |
|
44 | 44 | |
|
45 | 45 | log = logging.getLogger(__name__) |
|
46 | 46 | |
|
47 | 47 | |
|
48 | 48 | # TODO: mikhail: Keep it in sync with vcsserver's |
|
49 | 49 | # HTTPApplication.ALLOWED_EXCEPTIONS |
|
50 | 50 | EXCEPTIONS_MAP = { |
|
51 | 51 | 'KeyError': KeyError, |
|
52 | 52 | 'URLError': urllib2.URLError, |
|
53 | 53 | } |
|
54 | 54 | |
|
55 | 55 | |
|
56 | 56 | def _remote_call(url, payload, exceptions_map, session): |
|
57 | 57 | try: |
|
58 | 58 | response = session.post(url, data=msgpack.packb(payload)) |
|
59 | 59 | except pycurl.error as e: |
|
60 | 60 | msg = '{}. \npycurl traceback: {}'.format(e, traceback.format_exc()) |
|
61 | 61 | raise exceptions.HttpVCSCommunicationError(msg) |
|
62 | 62 | except Exception as e: |
|
63 | 63 | message = getattr(e, 'message', '') |
|
64 | 64 | if 'Failed to connect' in message: |
|
65 | 65 | # gevent doesn't return proper pycurl errors |
|
66 | 66 | raise exceptions.HttpVCSCommunicationError(e) |
|
67 | 67 | else: |
|
68 | 68 | raise |
|
69 | 69 | |
|
70 | 70 | if response.status_code >= 400: |
|
71 | 71 | log.error('Call to %s returned non 200 HTTP code: %s', |
|
72 | 72 | url, response.status_code) |
|
73 | 73 | raise exceptions.HttpVCSCommunicationError(repr(response.content)) |
|
74 | 74 | |
|
75 | 75 | try: |
|
76 | 76 | response = msgpack.unpackb(response.content) |
|
77 | 77 | except Exception: |
|
78 | 78 | log.exception('Failed to decode response %r', response.content) |
|
79 | 79 | raise |
|
80 | 80 | |
|
81 | 81 | error = response.get('error') |
|
82 | 82 | if error: |
|
83 | 83 | type_ = error.get('type', 'Exception') |
|
84 | 84 | exc = exceptions_map.get(type_, Exception) |
|
85 | 85 | exc = exc(error.get('message')) |
|
86 | 86 | try: |
|
87 | 87 | exc._vcs_kind = error['_vcs_kind'] |
|
88 | 88 | except KeyError: |
|
89 | 89 | pass |
|
90 | 90 | |
|
91 | 91 | try: |
|
92 | 92 | exc._vcs_server_traceback = error['traceback'] |
|
93 | 93 | exc._vcs_server_org_exc_name = error['org_exc'] |
|
94 | 94 | exc._vcs_server_org_exc_tb = error['org_exc_tb'] |
|
95 | 95 | except KeyError: |
|
96 | 96 | pass |
|
97 | 97 | |
|
98 | 98 | raise exc |
|
99 | 99 | return response.get('result') |
|
100 | 100 | |
|
101 | 101 | |
|
102 | 102 | def _streaming_remote_call(url, payload, exceptions_map, session, chunk_size): |
|
103 | 103 | try: |
|
104 | 104 | response = session.post(url, data=msgpack.packb(payload)) |
|
105 | 105 | except pycurl.error as e: |
|
106 | 106 | msg = '{}. \npycurl traceback: {}'.format(e, traceback.format_exc()) |
|
107 | 107 | raise exceptions.HttpVCSCommunicationError(msg) |
|
108 | 108 | except Exception as e: |
|
109 | 109 | message = getattr(e, 'message', '') |
|
110 | 110 | if 'Failed to connect' in message: |
|
111 | 111 | # gevent doesn't return proper pycurl errors |
|
112 | 112 | raise exceptions.HttpVCSCommunicationError(e) |
|
113 | 113 | else: |
|
114 | 114 | raise |
|
115 | 115 | |
|
116 | 116 | if response.status_code >= 400: |
|
117 | 117 | log.error('Call to %s returned non 200 HTTP code: %s', |
|
118 | 118 | url, response.status_code) |
|
119 | 119 | raise exceptions.HttpVCSCommunicationError(repr(response.content)) |
|
120 | 120 | |
|
121 | 121 | return response.iter_content(chunk_size=chunk_size) |
|
122 | 122 | |
|
123 | 123 | |
|
124 | 124 | class ServiceConnection(object): |
|
125 | 125 | def __init__(self, server_and_port, backend_endpoint, session_factory): |
|
126 | 126 | self.url = urlparse.urljoin('http://%s' % server_and_port, backend_endpoint) |
|
127 | 127 | self._session_factory = session_factory |
|
128 | 128 | |
|
129 | 129 | def __getattr__(self, name): |
|
130 | 130 | def f(*args, **kwargs): |
|
131 | 131 | return self._call(name, *args, **kwargs) |
|
132 | 132 | return f |
|
133 | 133 | |
|
134 | 134 | @exceptions.map_vcs_exceptions |
|
135 | 135 | def _call(self, name, *args, **kwargs): |
|
136 | 136 | payload = { |
|
137 | 137 | 'id': str(uuid.uuid4()), |
|
138 | 138 | 'method': name, |
|
139 | 139 | 'params': {'args': args, 'kwargs': kwargs} |
|
140 | 140 | } |
|
141 | 141 | return _remote_call( |
|
142 | 142 | self.url, payload, EXCEPTIONS_MAP, self._session_factory()) |
|
143 | 143 | |
|
144 | 144 | |
|
145 | 145 | class RemoteVCSMaker(object): |
|
146 | 146 | |
|
147 | 147 | def __init__(self, server_and_port, backend_endpoint, backend_type, session_factory): |
|
148 | 148 | self.url = urlparse.urljoin('http://%s' % server_and_port, backend_endpoint) |
|
149 | 149 | self.stream_url = urlparse.urljoin('http://%s' % server_and_port, backend_endpoint+'/stream') |
|
150 | 150 | |
|
151 | 151 | self._session_factory = session_factory |
|
152 | 152 | self.backend_type = backend_type |
|
153 | 153 | |
|
154 | 154 | @classmethod |
|
155 | 155 | def init_cache_region(cls, repo_id): |
|
156 | 156 | cache_namespace_uid = 'cache_repo.{}'.format(repo_id) |
|
157 | 157 | region = rc_cache.get_or_create_region('cache_repo', cache_namespace_uid) |
|
158 | 158 | return region, cache_namespace_uid |
|
159 | 159 | |
|
160 | 160 | def __call__(self, path, repo_id, config, with_wire=None): |
|
161 | 161 | log.debug('%s RepoMaker call on %s', self.backend_type.upper(), path) |
|
162 | 162 | return RemoteRepo(path, repo_id, config, self, with_wire=with_wire) |
|
163 | 163 | |
|
164 | 164 | def __getattr__(self, name): |
|
165 | 165 | def remote_attr(*args, **kwargs): |
|
166 | 166 | return self._call(name, *args, **kwargs) |
|
167 | 167 | return remote_attr |
|
168 | 168 | |
|
169 | 169 | @exceptions.map_vcs_exceptions |
|
170 | 170 | def _call(self, func_name, *args, **kwargs): |
|
171 | 171 | payload = { |
|
172 | 172 | 'id': str(uuid.uuid4()), |
|
173 | 173 | 'method': func_name, |
|
174 | 174 | 'backend': self.backend_type, |
|
175 | 175 | 'params': {'args': args, 'kwargs': kwargs} |
|
176 | 176 | } |
|
177 | 177 | url = self.url |
|
178 | 178 | return _remote_call(url, payload, EXCEPTIONS_MAP, self._session_factory()) |
|
179 | 179 | |
|
180 | 180 | |
|
181 | 181 | class RemoteRepo(object): |
|
182 | 182 | CHUNK_SIZE = 16384 |
|
183 | 183 | |
|
184 | 184 | def __init__(self, path, repo_id, config, remote_maker, with_wire=None): |
|
185 | 185 | self.url = remote_maker.url |
|
186 | 186 | self.stream_url = remote_maker.stream_url |
|
187 | 187 | self._session = remote_maker._session_factory() |
|
188 | 188 | self._cache_region, self._cache_namespace = \ |
|
189 | remote_maker.init_cache_region(repo_id) | |
|
189 | remote_maker.init_cache_region(self._repo_id_sanitizer(repo_id)) | |
|
190 | 190 | |
|
191 | 191 | with_wire = with_wire or {} |
|
192 | 192 | |
|
193 | 193 | repo_state_uid = with_wire.get('repo_state_uid') or 'state' |
|
194 | 194 | self._wire = { |
|
195 | 195 | "path": path, # repo path |
|
196 | 196 | "repo_id": repo_id, |
|
197 | 197 | "config": config, |
|
198 | 198 | "repo_state_uid": repo_state_uid, |
|
199 | 199 | "context": self._create_vcs_cache_context(path, repo_state_uid) |
|
200 | 200 | } |
|
201 | 201 | |
|
202 | 202 | if with_wire: |
|
203 | 203 | self._wire.update(with_wire) |
|
204 | 204 | |
|
205 | 205 | # NOTE(johbo): Trading complexity for performance. Avoiding the call to |
|
206 | 206 | # log.debug brings a few percent gain even if is is not active. |
|
207 | 207 | if log.isEnabledFor(logging.DEBUG): |
|
208 | 208 | self._call_with_logging = True |
|
209 | 209 | |
|
210 | 210 | self.cert_dir = get_cert_path(rhodecode.CONFIG.get('__file__')) |
|
211 | 211 | |
|
212 | def _repo_id_sanitizer(self, repo_id): | |
|
213 | return repo_id.replace('/', '__') | |
|
214 | ||
|
212 | 215 | def __getattr__(self, name): |
|
213 | 216 | |
|
214 | 217 | if name.startswith('stream:'): |
|
215 | 218 | def repo_remote_attr(*args, **kwargs): |
|
216 | 219 | return self._call_stream(name, *args, **kwargs) |
|
217 | 220 | else: |
|
218 | 221 | def repo_remote_attr(*args, **kwargs): |
|
219 | 222 | return self._call(name, *args, **kwargs) |
|
220 | 223 | |
|
221 | 224 | return repo_remote_attr |
|
222 | 225 | |
|
223 | 226 | def _base_call(self, name, *args, **kwargs): |
|
224 | 227 | # TODO: oliver: This is currently necessary pre-call since the |
|
225 | 228 | # config object is being changed for hooking scenarios |
|
226 | 229 | wire = copy.deepcopy(self._wire) |
|
227 | 230 | wire["config"] = wire["config"].serialize() |
|
228 | 231 | wire["config"].append(('vcs', 'ssl_dir', self.cert_dir)) |
|
229 | 232 | |
|
230 | 233 | payload = { |
|
231 | 234 | 'id': str(uuid.uuid4()), |
|
232 | 235 | 'method': name, |
|
233 | 236 | 'params': {'wire': wire, 'args': args, 'kwargs': kwargs} |
|
234 | 237 | } |
|
235 | 238 | |
|
236 | 239 | context_uid = wire.get('context') |
|
237 | 240 | return context_uid, payload |
|
238 | 241 | |
|
239 | 242 | @exceptions.map_vcs_exceptions |
|
240 | 243 | def _call(self, name, *args, **kwargs): |
|
241 | 244 | context_uid, payload = self._base_call(name, *args, **kwargs) |
|
242 | 245 | url = self.url |
|
243 | 246 | |
|
244 | 247 | start = time.time() |
|
245 | 248 | |
|
246 | 249 | cache_on = False |
|
247 | 250 | cache_key = '' |
|
248 | 251 | local_cache_on = rhodecode.CONFIG.get('vcs.methods.cache') |
|
249 | 252 | |
|
250 | 253 | cache_methods = [ |
|
251 | 254 | 'branches', 'tags', 'bookmarks', |
|
252 | 255 | 'is_large_file', 'is_binary', 'fctx_size', 'node_history', 'blob_raw_length', |
|
253 | 256 | 'revision', 'tree_items', |
|
254 | 257 | 'ctx_list', |
|
255 | 258 | 'bulk_request', |
|
256 | 259 | ] |
|
257 | 260 | |
|
258 | 261 | if local_cache_on and name in cache_methods: |
|
259 | 262 | cache_on = True |
|
260 | 263 | repo_state_uid = self._wire['repo_state_uid'] |
|
261 | 264 | call_args = [a for a in args] |
|
262 | 265 | cache_key = compute_key_from_params(repo_state_uid, name, *call_args) |
|
263 | 266 | |
|
264 | 267 | @self._cache_region.conditional_cache_on_arguments( |
|
265 | 268 | namespace=self._cache_namespace, condition=cache_on and cache_key) |
|
266 | 269 | def remote_call(_cache_key): |
|
267 | 270 | if self._call_with_logging: |
|
268 | 271 | log.debug('Calling %s@%s with args:%.10240r. wire_context: %s cache_on: %s', |
|
269 | 272 | url, name, args, context_uid, cache_on) |
|
270 | 273 | return _remote_call(url, payload, EXCEPTIONS_MAP, self._session) |
|
271 | 274 | |
|
272 | 275 | result = remote_call(cache_key) |
|
273 | 276 | if self._call_with_logging: |
|
274 | 277 | log.debug('Call %s@%s took: %.4fs. wire_context: %s', |
|
275 | 278 | url, name, time.time()-start, context_uid) |
|
276 | 279 | return result |
|
277 | 280 | |
|
278 | 281 | @exceptions.map_vcs_exceptions |
|
279 | 282 | def _call_stream(self, name, *args, **kwargs): |
|
280 | 283 | context_uid, payload = self._base_call(name, *args, **kwargs) |
|
281 | 284 | payload['chunk_size'] = self.CHUNK_SIZE |
|
282 | 285 | url = self.stream_url |
|
283 | 286 | |
|
284 | 287 | start = time.time() |
|
285 | 288 | if self._call_with_logging: |
|
286 | 289 | log.debug('Calling %s@%s with args:%.10240r. wire_context: %s', |
|
287 | 290 | url, name, args, context_uid) |
|
288 | 291 | |
|
289 | 292 | result = _streaming_remote_call(url, payload, EXCEPTIONS_MAP, self._session, |
|
290 | 293 | self.CHUNK_SIZE) |
|
291 | 294 | |
|
292 | 295 | if self._call_with_logging: |
|
293 | 296 | log.debug('Call %s@%s took: %.4fs. wire_context: %s', |
|
294 | 297 | url, name, time.time()-start, context_uid) |
|
295 | 298 | return result |
|
296 | 299 | |
|
297 | 300 | def __getitem__(self, key): |
|
298 | 301 | return self.revision(key) |
|
299 | 302 | |
|
300 | 303 | def _create_vcs_cache_context(self, *args): |
|
301 | 304 | """ |
|
302 | 305 | Creates a unique string which is passed to the VCSServer on every |
|
303 | 306 | remote call. It is used as cache key in the VCSServer. |
|
304 | 307 | """ |
|
305 | 308 | hash_key = '-'.join(map(str, args)) |
|
306 | 309 | return str(uuid.uuid5(uuid.NAMESPACE_URL, hash_key)) |
|
307 | 310 | |
|
308 | 311 | def invalidate_vcs_cache(self): |
|
309 | 312 | """ |
|
310 | 313 | This invalidates the context which is sent to the VCSServer on every |
|
311 | 314 | call to a remote method. It forces the VCSServer to create a fresh |
|
312 | 315 | repository instance on the next call to a remote method. |
|
313 | 316 | """ |
|
314 | 317 | self._wire['context'] = str(uuid.uuid4()) |
|
315 | 318 | |
|
316 | 319 | |
|
317 | 320 | class VcsHttpProxy(object): |
|
318 | 321 | |
|
319 | 322 | CHUNK_SIZE = 16384 |
|
320 | 323 | |
|
321 | 324 | def __init__(self, server_and_port, backend_endpoint): |
|
322 | 325 | retries = Retry(total=5, connect=None, read=None, redirect=None) |
|
323 | 326 | |
|
324 | 327 | adapter = requests.adapters.HTTPAdapter(max_retries=retries) |
|
325 | 328 | self.base_url = urlparse.urljoin('http://%s' % server_and_port, backend_endpoint) |
|
326 | 329 | self.session = requests.Session() |
|
327 | 330 | self.session.mount('http://', adapter) |
|
328 | 331 | |
|
329 | 332 | def handle(self, environment, input_data, *args, **kwargs): |
|
330 | 333 | data = { |
|
331 | 334 | 'environment': environment, |
|
332 | 335 | 'input_data': input_data, |
|
333 | 336 | 'args': args, |
|
334 | 337 | 'kwargs': kwargs |
|
335 | 338 | } |
|
336 | 339 | result = self.session.post( |
|
337 | 340 | self.base_url, msgpack.packb(data), stream=True) |
|
338 | 341 | return self._get_result(result) |
|
339 | 342 | |
|
340 | 343 | def _deserialize_and_raise(self, error): |
|
341 | 344 | exception = Exception(error['message']) |
|
342 | 345 | try: |
|
343 | 346 | exception._vcs_kind = error['_vcs_kind'] |
|
344 | 347 | except KeyError: |
|
345 | 348 | pass |
|
346 | 349 | raise exception |
|
347 | 350 | |
|
348 | 351 | def _iterate(self, result): |
|
349 | 352 | unpacker = msgpack.Unpacker() |
|
350 | 353 | for line in result.iter_content(chunk_size=self.CHUNK_SIZE): |
|
351 | 354 | unpacker.feed(line) |
|
352 | 355 | for chunk in unpacker: |
|
353 | 356 | yield chunk |
|
354 | 357 | |
|
355 | 358 | def _get_result(self, result): |
|
356 | 359 | iterator = self._iterate(result) |
|
357 | 360 | error = iterator.next() |
|
358 | 361 | if error: |
|
359 | 362 | self._deserialize_and_raise(error) |
|
360 | 363 | |
|
361 | 364 | status = iterator.next() |
|
362 | 365 | headers = iterator.next() |
|
363 | 366 | |
|
364 | 367 | return iterator, status, headers |
|
365 | 368 | |
|
366 | 369 | |
|
367 | 370 | class ThreadlocalSessionFactory(object): |
|
368 | 371 | """ |
|
369 | 372 | Creates one CurlSession per thread on demand. |
|
370 | 373 | """ |
|
371 | 374 | |
|
372 | 375 | def __init__(self): |
|
373 | 376 | self._thread_local = threading.local() |
|
374 | 377 | |
|
375 | 378 | def __call__(self): |
|
376 | 379 | if not hasattr(self._thread_local, 'curl_session'): |
|
377 | 380 | self._thread_local.curl_session = CurlSession() |
|
378 | 381 | return self._thread_local.curl_session |
@@ -1,1194 +1,1195 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | |
|
3 | 3 | # Copyright (C) 2010-2020 RhodeCode GmbH |
|
4 | 4 | # |
|
5 | 5 | # This program is free software: you can redistribute it and/or modify |
|
6 | 6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
7 | 7 | # (only), as published by the Free Software Foundation. |
|
8 | 8 | # |
|
9 | 9 | # This program is distributed in the hope that it will be useful, |
|
10 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
12 | 12 | # GNU General Public License for more details. |
|
13 | 13 | # |
|
14 | 14 | # You should have received a copy of the GNU Affero General Public License |
|
15 | 15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
16 | 16 | # |
|
17 | 17 | # This program is dual-licensed. If you wish to learn more about the |
|
18 | 18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
19 | 19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
20 | 20 | |
|
21 | 21 | import os |
|
22 | 22 | import re |
|
23 | 23 | import shutil |
|
24 | 24 | import time |
|
25 | 25 | import logging |
|
26 | 26 | import traceback |
|
27 | 27 | import datetime |
|
28 | 28 | |
|
29 | 29 | from pyramid.threadlocal import get_current_request |
|
30 | 30 | from zope.cachedescriptors.property import Lazy as LazyProperty |
|
31 | 31 | |
|
32 | 32 | from rhodecode import events |
|
33 | 33 | from rhodecode.lib.auth import HasUserGroupPermissionAny |
|
34 | 34 | from rhodecode.lib.caching_query import FromCache |
|
35 | 35 | from rhodecode.lib.exceptions import AttachedForksError, AttachedPullRequestsError |
|
36 | 36 | from rhodecode.lib import hooks_base |
|
37 | 37 | from rhodecode.lib.user_log_filter import user_log_filter |
|
38 | 38 | from rhodecode.lib.utils import make_db_config |
|
39 | 39 | from rhodecode.lib.utils2 import ( |
|
40 | 40 | safe_str, safe_unicode, remove_prefix, obfuscate_url_pw, |
|
41 | 41 | get_current_rhodecode_user, safe_int, action_logger_generic) |
|
42 | 42 | from rhodecode.lib.vcs.backends import get_backend |
|
43 | 43 | from rhodecode.model import BaseModel |
|
44 | 44 | from rhodecode.model.db import ( |
|
45 | 45 | _hash_key, func, case, joinedload, or_, in_filter_generator, |
|
46 | 46 | Session, Repository, UserRepoToPerm, UserGroupRepoToPerm, |
|
47 | 47 | UserRepoGroupToPerm, UserGroupRepoGroupToPerm, User, Permission, |
|
48 | 48 | Statistics, UserGroup, RepoGroup, RepositoryField, UserLog) |
|
49 | 49 | from rhodecode.model.permission import PermissionModel |
|
50 | 50 | from rhodecode.model.settings import VcsSettingsModel |
|
51 | 51 | |
|
52 | 52 | log = logging.getLogger(__name__) |
|
53 | 53 | |
|
54 | 54 | |
|
55 | 55 | class RepoModel(BaseModel): |
|
56 | 56 | |
|
57 | 57 | cls = Repository |
|
58 | 58 | |
|
59 | 59 | def _get_user_group(self, users_group): |
|
60 | 60 | return self._get_instance(UserGroup, users_group, |
|
61 | 61 | callback=UserGroup.get_by_group_name) |
|
62 | 62 | |
|
63 | 63 | def _get_repo_group(self, repo_group): |
|
64 | 64 | return self._get_instance(RepoGroup, repo_group, |
|
65 | 65 | callback=RepoGroup.get_by_group_name) |
|
66 | 66 | |
|
67 | 67 | def _create_default_perms(self, repository, private): |
|
68 | 68 | # create default permission |
|
69 | 69 | default = 'repository.read' |
|
70 | 70 | def_user = User.get_default_user() |
|
71 | 71 | for p in def_user.user_perms: |
|
72 | 72 | if p.permission.permission_name.startswith('repository.'): |
|
73 | 73 | default = p.permission.permission_name |
|
74 | 74 | break |
|
75 | 75 | |
|
76 | 76 | default_perm = 'repository.none' if private else default |
|
77 | 77 | |
|
78 | 78 | repo_to_perm = UserRepoToPerm() |
|
79 | 79 | repo_to_perm.permission = Permission.get_by_key(default_perm) |
|
80 | 80 | |
|
81 | 81 | repo_to_perm.repository = repository |
|
82 | 82 | repo_to_perm.user_id = def_user.user_id |
|
83 | 83 | |
|
84 | 84 | return repo_to_perm |
|
85 | 85 | |
|
86 | 86 | @LazyProperty |
|
87 | 87 | def repos_path(self): |
|
88 | 88 | """ |
|
89 | 89 | Gets the repositories root path from database |
|
90 | 90 | """ |
|
91 | 91 | settings_model = VcsSettingsModel(sa=self.sa) |
|
92 | 92 | return settings_model.get_repos_location() |
|
93 | 93 | |
|
94 | 94 | def get(self, repo_id): |
|
95 | 95 | repo = self.sa.query(Repository) \ |
|
96 | 96 | .filter(Repository.repo_id == repo_id) |
|
97 | 97 | |
|
98 | 98 | return repo.scalar() |
|
99 | 99 | |
|
100 | 100 | def get_repo(self, repository): |
|
101 | 101 | return self._get_repo(repository) |
|
102 | 102 | |
|
103 | 103 | def get_by_repo_name(self, repo_name, cache=False): |
|
104 | 104 | repo = self.sa.query(Repository) \ |
|
105 | 105 | .filter(Repository.repo_name == repo_name) |
|
106 | 106 | |
|
107 | 107 | if cache: |
|
108 | 108 | name_key = _hash_key(repo_name) |
|
109 | 109 | repo = repo.options( |
|
110 | 110 | FromCache("sql_cache_short", "get_repo_%s" % name_key)) |
|
111 | 111 | return repo.scalar() |
|
112 | 112 | |
|
113 | 113 | def _extract_id_from_repo_name(self, repo_name): |
|
114 | 114 | if repo_name.startswith('/'): |
|
115 | 115 | repo_name = repo_name.lstrip('/') |
|
116 | 116 | by_id_match = re.match(r'^_(\d{1,})', repo_name) |
|
117 | 117 | if by_id_match: |
|
118 | 118 | return by_id_match.groups()[0] |
|
119 | 119 | |
|
120 | 120 | def get_repo_by_id(self, repo_name): |
|
121 | 121 | """ |
|
122 | 122 | Extracts repo_name by id from special urls. |
|
123 | 123 | Example url is _11/repo_name |
|
124 | 124 | |
|
125 | 125 | :param repo_name: |
|
126 | 126 | :return: repo object if matched else None |
|
127 | 127 | """ |
|
128 | 128 | _repo_id = None |
|
129 | 129 | try: |
|
130 | 130 | _repo_id = self._extract_id_from_repo_name(repo_name) |
|
131 | 131 | if _repo_id: |
|
132 | 132 | return self.get(_repo_id) |
|
133 | 133 | except Exception: |
|
134 | 134 | log.exception('Failed to extract repo_name from URL') |
|
135 | 135 | if _repo_id: |
|
136 | 136 | Session().rollback() |
|
137 | 137 | |
|
138 | 138 | return None |
|
139 | 139 | |
|
140 | 140 | def get_repos_for_root(self, root, traverse=False): |
|
141 | 141 | if traverse: |
|
142 | 142 | like_expression = u'{}%'.format(safe_unicode(root)) |
|
143 | 143 | repos = Repository.query().filter( |
|
144 | 144 | Repository.repo_name.like(like_expression)).all() |
|
145 | 145 | else: |
|
146 | 146 | if root and not isinstance(root, RepoGroup): |
|
147 | 147 | raise ValueError( |
|
148 | 148 | 'Root must be an instance ' |
|
149 | 149 | 'of RepoGroup, got:{} instead'.format(type(root))) |
|
150 | 150 | repos = Repository.query().filter(Repository.group == root).all() |
|
151 | 151 | return repos |
|
152 | 152 | |
|
153 | 153 | def get_url(self, repo, request=None, permalink=False): |
|
154 | 154 | if not request: |
|
155 | 155 | request = get_current_request() |
|
156 | 156 | |
|
157 | 157 | if not request: |
|
158 | 158 | return |
|
159 | 159 | |
|
160 | 160 | if permalink: |
|
161 | 161 | return request.route_url( |
|
162 | 162 | 'repo_summary', repo_name='_{}'.format(safe_str(repo.repo_id))) |
|
163 | 163 | else: |
|
164 | 164 | return request.route_url( |
|
165 | 165 | 'repo_summary', repo_name=safe_str(repo.repo_name)) |
|
166 | 166 | |
|
167 | 167 | def get_commit_url(self, repo, commit_id, request=None, permalink=False): |
|
168 | 168 | if not request: |
|
169 | 169 | request = get_current_request() |
|
170 | 170 | |
|
171 | 171 | if not request: |
|
172 | 172 | return |
|
173 | 173 | |
|
174 | 174 | if permalink: |
|
175 | 175 | return request.route_url( |
|
176 | 176 | 'repo_commit', repo_name=safe_str(repo.repo_id), |
|
177 | 177 | commit_id=commit_id) |
|
178 | 178 | |
|
179 | 179 | else: |
|
180 | 180 | return request.route_url( |
|
181 | 181 | 'repo_commit', repo_name=safe_str(repo.repo_name), |
|
182 | 182 | commit_id=commit_id) |
|
183 | 183 | |
|
184 | 184 | def get_repo_log(self, repo, filter_term): |
|
185 | 185 | repo_log = UserLog.query()\ |
|
186 | 186 | .filter(or_(UserLog.repository_id == repo.repo_id, |
|
187 | 187 | UserLog.repository_name == repo.repo_name))\ |
|
188 | 188 | .options(joinedload(UserLog.user))\ |
|
189 | 189 | .options(joinedload(UserLog.repository))\ |
|
190 | 190 | .order_by(UserLog.action_date.desc()) |
|
191 | 191 | |
|
192 | 192 | repo_log = user_log_filter(repo_log, filter_term) |
|
193 | 193 | return repo_log |
|
194 | 194 | |
|
195 | 195 | @classmethod |
|
196 | 196 | def update_commit_cache(cls, repositories=None): |
|
197 | 197 | if not repositories: |
|
198 | 198 | repositories = Repository.getAll() |
|
199 | 199 | for repo in repositories: |
|
200 | 200 | repo.update_commit_cache() |
|
201 | 201 | |
|
202 | 202 | def get_repos_as_dict(self, repo_list=None, admin=False, |
|
203 | 203 | super_user_actions=False, short_name=None): |
|
204 | 204 | |
|
205 | 205 | _render = get_current_request().get_partial_renderer( |
|
206 | 206 | 'rhodecode:templates/data_table/_dt_elements.mako') |
|
207 | 207 | c = _render.get_call_context() |
|
208 | 208 | h = _render.get_helpers() |
|
209 | 209 | |
|
210 | 210 | def quick_menu(repo_name): |
|
211 | 211 | return _render('quick_menu', repo_name) |
|
212 | 212 | |
|
213 | 213 | def repo_lnk(name, rtype, rstate, private, archived, fork_of): |
|
214 | 214 | if short_name is not None: |
|
215 | 215 | short_name_var = short_name |
|
216 | 216 | else: |
|
217 | 217 | short_name_var = not admin |
|
218 | 218 | return _render('repo_name', name, rtype, rstate, private, archived, fork_of, |
|
219 | 219 | short_name=short_name_var, admin=False) |
|
220 | 220 | |
|
221 | 221 | def last_change(last_change): |
|
222 | 222 | if admin and isinstance(last_change, datetime.datetime) and not last_change.tzinfo: |
|
223 | 223 | ts = time.time() |
|
224 | 224 | utc_offset = (datetime.datetime.fromtimestamp(ts) |
|
225 | 225 | - datetime.datetime.utcfromtimestamp(ts)).total_seconds() |
|
226 | 226 | last_change = last_change + datetime.timedelta(seconds=utc_offset) |
|
227 | 227 | |
|
228 | 228 | return _render("last_change", last_change) |
|
229 | 229 | |
|
230 | 230 | def rss_lnk(repo_name): |
|
231 | 231 | return _render("rss", repo_name) |
|
232 | 232 | |
|
233 | 233 | def atom_lnk(repo_name): |
|
234 | 234 | return _render("atom", repo_name) |
|
235 | 235 | |
|
236 | 236 | def last_rev(repo_name, cs_cache): |
|
237 | 237 | return _render('revision', repo_name, cs_cache.get('revision'), |
|
238 | 238 | cs_cache.get('raw_id'), cs_cache.get('author'), |
|
239 | 239 | cs_cache.get('message'), cs_cache.get('date')) |
|
240 | 240 | |
|
241 | 241 | def desc(desc): |
|
242 | 242 | return _render('repo_desc', desc, c.visual.stylify_metatags) |
|
243 | 243 | |
|
244 | 244 | def state(repo_state): |
|
245 | 245 | return _render("repo_state", repo_state) |
|
246 | 246 | |
|
247 | 247 | def repo_actions(repo_name): |
|
248 | 248 | return _render('repo_actions', repo_name, super_user_actions) |
|
249 | 249 | |
|
250 | 250 | def user_profile(username): |
|
251 | 251 | return _render('user_profile', username) |
|
252 | 252 | |
|
253 | 253 | repos_data = [] |
|
254 | 254 | for repo in repo_list: |
|
255 | 255 | # NOTE(marcink): because we use only raw column we need to load it like that |
|
256 | 256 | changeset_cache = Repository._load_changeset_cache( |
|
257 | 257 | repo.repo_id, repo._changeset_cache) |
|
258 | 258 | |
|
259 | 259 | row = { |
|
260 | 260 | "menu": quick_menu(repo.repo_name), |
|
261 | 261 | |
|
262 | 262 | "name": repo_lnk(repo.repo_name, repo.repo_type, repo.repo_state, |
|
263 | 263 | repo.private, repo.archived, repo.fork), |
|
264 | 264 | |
|
265 | 265 | "desc": desc(h.escape(repo.description)), |
|
266 | 266 | |
|
267 | 267 | "last_change": last_change(repo.updated_on), |
|
268 | 268 | |
|
269 | 269 | "last_changeset": last_rev(repo.repo_name, changeset_cache), |
|
270 | 270 | "last_changeset_raw": changeset_cache.get('revision'), |
|
271 | 271 | |
|
272 | 272 | "owner": user_profile(repo.User.username), |
|
273 | 273 | |
|
274 | 274 | "state": state(repo.repo_state), |
|
275 | 275 | "rss": rss_lnk(repo.repo_name), |
|
276 | 276 | "atom": atom_lnk(repo.repo_name), |
|
277 | 277 | } |
|
278 | 278 | if admin: |
|
279 | 279 | row.update({ |
|
280 | 280 | "action": repo_actions(repo.repo_name), |
|
281 | 281 | }) |
|
282 | 282 | repos_data.append(row) |
|
283 | 283 | |
|
284 | 284 | return repos_data |
|
285 | 285 | |
|
286 | 286 | def get_repos_data_table( |
|
287 | 287 | self, draw, start, limit, |
|
288 | 288 | search_q, order_by, order_dir, |
|
289 | 289 | auth_user, repo_group_id): |
|
290 | 290 | from rhodecode.model.scm import RepoList |
|
291 | 291 | |
|
292 | 292 | _perms = ['repository.read', 'repository.write', 'repository.admin'] |
|
293 | 293 | |
|
294 | 294 | repos = Repository.query() \ |
|
295 | 295 | .filter(Repository.group_id == repo_group_id) \ |
|
296 | 296 | .all() |
|
297 | 297 | auth_repo_list = RepoList( |
|
298 | 298 | repos, perm_set=_perms, |
|
299 | 299 | extra_kwargs=dict(user=auth_user)) |
|
300 | 300 | |
|
301 | 301 | allowed_ids = [-1] |
|
302 | 302 | for repo in auth_repo_list: |
|
303 | 303 | allowed_ids.append(repo.repo_id) |
|
304 | 304 | |
|
305 | 305 | repos_data_total_count = Repository.query() \ |
|
306 | 306 | .filter(Repository.group_id == repo_group_id) \ |
|
307 | 307 | .filter(or_( |
|
308 | 308 | # generate multiple IN to fix limitation problems |
|
309 | 309 | *in_filter_generator(Repository.repo_id, allowed_ids)) |
|
310 | 310 | ) \ |
|
311 | 311 | .count() |
|
312 | 312 | |
|
313 | 313 | base_q = Session.query( |
|
314 | 314 | Repository.repo_id, |
|
315 | 315 | Repository.repo_name, |
|
316 | 316 | Repository.description, |
|
317 | 317 | Repository.repo_type, |
|
318 | 318 | Repository.repo_state, |
|
319 | 319 | Repository.private, |
|
320 | 320 | Repository.archived, |
|
321 | 321 | Repository.fork, |
|
322 | 322 | Repository.updated_on, |
|
323 | 323 | Repository._changeset_cache, |
|
324 | 324 | User, |
|
325 | 325 | ) \ |
|
326 | 326 | .filter(Repository.group_id == repo_group_id) \ |
|
327 | 327 | .filter(or_( |
|
328 | 328 | # generate multiple IN to fix limitation problems |
|
329 | 329 | *in_filter_generator(Repository.repo_id, allowed_ids)) |
|
330 | 330 | ) \ |
|
331 | 331 | .join(User, User.user_id == Repository.user_id) \ |
|
332 | 332 | .group_by(Repository, User) |
|
333 | 333 | |
|
334 | 334 | repos_data_total_filtered_count = base_q.count() |
|
335 | 335 | |
|
336 | 336 | sort_defined = False |
|
337 | 337 | if order_by == 'repo_name': |
|
338 | 338 | sort_col = func.lower(Repository.repo_name) |
|
339 | 339 | sort_defined = True |
|
340 | 340 | elif order_by == 'user_username': |
|
341 | 341 | sort_col = User.username |
|
342 | 342 | else: |
|
343 | 343 | sort_col = getattr(Repository, order_by, None) |
|
344 | 344 | |
|
345 | 345 | if sort_defined or sort_col: |
|
346 | 346 | if order_dir == 'asc': |
|
347 | 347 | sort_col = sort_col.asc() |
|
348 | 348 | else: |
|
349 | 349 | sort_col = sort_col.desc() |
|
350 | 350 | |
|
351 | 351 | base_q = base_q.order_by(sort_col) |
|
352 | 352 | base_q = base_q.offset(start).limit(limit) |
|
353 | 353 | |
|
354 | 354 | repos_list = base_q.all() |
|
355 | 355 | |
|
356 | 356 | repos_data = RepoModel().get_repos_as_dict( |
|
357 | 357 | repo_list=repos_list, admin=False) |
|
358 | 358 | |
|
359 | 359 | data = ({ |
|
360 | 360 | 'draw': draw, |
|
361 | 361 | 'data': repos_data, |
|
362 | 362 | 'recordsTotal': repos_data_total_count, |
|
363 | 363 | 'recordsFiltered': repos_data_total_filtered_count, |
|
364 | 364 | }) |
|
365 | 365 | return data |
|
366 | 366 | |
|
367 | 367 | def _get_defaults(self, repo_name): |
|
368 | 368 | """ |
|
369 | 369 | Gets information about repository, and returns a dict for |
|
370 | 370 | usage in forms |
|
371 | 371 | |
|
372 | 372 | :param repo_name: |
|
373 | 373 | """ |
|
374 | 374 | |
|
375 | 375 | repo_info = Repository.get_by_repo_name(repo_name) |
|
376 | 376 | |
|
377 | 377 | if repo_info is None: |
|
378 | 378 | return None |
|
379 | 379 | |
|
380 | 380 | defaults = repo_info.get_dict() |
|
381 | 381 | defaults['repo_name'] = repo_info.just_name |
|
382 | 382 | |
|
383 | 383 | groups = repo_info.groups_with_parents |
|
384 | 384 | parent_group = groups[-1] if groups else None |
|
385 | 385 | |
|
386 | 386 | # we use -1 as this is how in HTML, we mark an empty group |
|
387 | 387 | defaults['repo_group'] = getattr(parent_group, 'group_id', -1) |
|
388 | 388 | |
|
389 | 389 | keys_to_process = ( |
|
390 | 390 | {'k': 'repo_type', 'strip': False}, |
|
391 | 391 | {'k': 'repo_enable_downloads', 'strip': True}, |
|
392 | 392 | {'k': 'repo_description', 'strip': True}, |
|
393 | 393 | {'k': 'repo_enable_locking', 'strip': True}, |
|
394 | 394 | {'k': 'repo_landing_rev', 'strip': True}, |
|
395 | 395 | {'k': 'clone_uri', 'strip': False}, |
|
396 | 396 | {'k': 'push_uri', 'strip': False}, |
|
397 | 397 | {'k': 'repo_private', 'strip': True}, |
|
398 | 398 | {'k': 'repo_enable_statistics', 'strip': True} |
|
399 | 399 | ) |
|
400 | 400 | |
|
401 | 401 | for item in keys_to_process: |
|
402 | 402 | attr = item['k'] |
|
403 | 403 | if item['strip']: |
|
404 | 404 | attr = remove_prefix(item['k'], 'repo_') |
|
405 | 405 | |
|
406 | 406 | val = defaults[attr] |
|
407 | 407 | if item['k'] == 'repo_landing_rev': |
|
408 | 408 | val = ':'.join(defaults[attr]) |
|
409 | 409 | defaults[item['k']] = val |
|
410 | 410 | if item['k'] == 'clone_uri': |
|
411 | 411 | defaults['clone_uri_hidden'] = repo_info.clone_uri_hidden |
|
412 | 412 | if item['k'] == 'push_uri': |
|
413 | 413 | defaults['push_uri_hidden'] = repo_info.push_uri_hidden |
|
414 | 414 | |
|
415 | 415 | # fill owner |
|
416 | 416 | if repo_info.user: |
|
417 | 417 | defaults.update({'user': repo_info.user.username}) |
|
418 | 418 | else: |
|
419 | 419 | replacement_user = User.get_first_super_admin().username |
|
420 | 420 | defaults.update({'user': replacement_user}) |
|
421 | 421 | |
|
422 | 422 | return defaults |
|
423 | 423 | |
|
424 | 424 | def update(self, repo, **kwargs): |
|
425 | 425 | try: |
|
426 | 426 | cur_repo = self._get_repo(repo) |
|
427 | 427 | source_repo_name = cur_repo.repo_name |
|
428 | 428 | |
|
429 | 429 | affected_user_ids = [] |
|
430 | 430 | if 'user' in kwargs: |
|
431 | 431 | old_owner_id = cur_repo.user.user_id |
|
432 | 432 | new_owner = User.get_by_username(kwargs['user']) |
|
433 | 433 | cur_repo.user = new_owner |
|
434 | 434 | |
|
435 | 435 | if old_owner_id != new_owner.user_id: |
|
436 | 436 | affected_user_ids = [new_owner.user_id, old_owner_id] |
|
437 | 437 | |
|
438 | 438 | if 'repo_group' in kwargs: |
|
439 | 439 | cur_repo.group = RepoGroup.get(kwargs['repo_group']) |
|
440 | 440 | log.debug('Updating repo %s with params:%s', cur_repo, kwargs) |
|
441 | 441 | |
|
442 | 442 | update_keys = [ |
|
443 | 443 | (1, 'repo_description'), |
|
444 | 444 | (1, 'repo_landing_rev'), |
|
445 | 445 | (1, 'repo_private'), |
|
446 | 446 | (1, 'repo_enable_downloads'), |
|
447 | 447 | (1, 'repo_enable_locking'), |
|
448 | 448 | (1, 'repo_enable_statistics'), |
|
449 | 449 | (0, 'clone_uri'), |
|
450 | 450 | (0, 'push_uri'), |
|
451 | 451 | (0, 'fork_id') |
|
452 | 452 | ] |
|
453 | 453 | for strip, k in update_keys: |
|
454 | 454 | if k in kwargs: |
|
455 | 455 | val = kwargs[k] |
|
456 | 456 | if strip: |
|
457 | 457 | k = remove_prefix(k, 'repo_') |
|
458 | 458 | |
|
459 | 459 | setattr(cur_repo, k, val) |
|
460 | 460 | |
|
461 | 461 | new_name = cur_repo.get_new_name(kwargs['repo_name']) |
|
462 | 462 | cur_repo.repo_name = new_name |
|
463 | 463 | |
|
464 | 464 | # if private flag is set, reset default permission to NONE |
|
465 | 465 | if kwargs.get('repo_private'): |
|
466 | 466 | EMPTY_PERM = 'repository.none' |
|
467 | 467 | RepoModel().grant_user_permission( |
|
468 | 468 | repo=cur_repo, user=User.DEFAULT_USER, perm=EMPTY_PERM |
|
469 | 469 | ) |
|
470 | 470 | if kwargs.get('repo_landing_rev'): |
|
471 | 471 | landing_rev_val = kwargs['repo_landing_rev'] |
|
472 | 472 | RepoModel().set_landing_rev(cur_repo, landing_rev_val) |
|
473 | 473 | |
|
474 | 474 | # handle extra fields |
|
475 | 475 | for field in filter(lambda k: k.startswith(RepositoryField.PREFIX), kwargs): |
|
476 | 476 | k = RepositoryField.un_prefix_key(field) |
|
477 | 477 | ex_field = RepositoryField.get_by_key_name( |
|
478 | 478 | key=k, repo=cur_repo) |
|
479 | 479 | if ex_field: |
|
480 | 480 | ex_field.field_value = kwargs[field] |
|
481 | 481 | self.sa.add(ex_field) |
|
482 | 482 | |
|
483 | 483 | self.sa.add(cur_repo) |
|
484 | 484 | |
|
485 | 485 | if source_repo_name != new_name: |
|
486 | 486 | # rename repository |
|
487 | 487 | self._rename_filesystem_repo( |
|
488 | 488 | old=source_repo_name, new=new_name) |
|
489 | 489 | |
|
490 | 490 | if affected_user_ids: |
|
491 | 491 | PermissionModel().trigger_permission_flush(affected_user_ids) |
|
492 | 492 | |
|
493 | 493 | return cur_repo |
|
494 | 494 | except Exception: |
|
495 | 495 | log.error(traceback.format_exc()) |
|
496 | 496 | raise |
|
497 | 497 | |
|
498 | 498 | def _create_repo(self, repo_name, repo_type, description, owner, |
|
499 | 499 | private=False, clone_uri=None, repo_group=None, |
|
500 | 500 | landing_rev='rev:tip', fork_of=None, |
|
501 | 501 | copy_fork_permissions=False, enable_statistics=False, |
|
502 | 502 | enable_locking=False, enable_downloads=False, |
|
503 | 503 | copy_group_permissions=False, |
|
504 | 504 | state=Repository.STATE_PENDING): |
|
505 | 505 | """ |
|
506 | 506 | Create repository inside database with PENDING state, this should be |
|
507 | 507 | only executed by create() repo. With exception of importing existing |
|
508 | 508 | repos |
|
509 | 509 | """ |
|
510 | 510 | from rhodecode.model.scm import ScmModel |
|
511 | 511 | |
|
512 | 512 | owner = self._get_user(owner) |
|
513 | 513 | fork_of = self._get_repo(fork_of) |
|
514 | 514 | repo_group = self._get_repo_group(safe_int(repo_group)) |
|
515 | 515 | |
|
516 | 516 | try: |
|
517 | 517 | repo_name = safe_unicode(repo_name) |
|
518 | 518 | description = safe_unicode(description) |
|
519 | 519 | # repo name is just a name of repository |
|
520 | 520 | # while repo_name_full is a full qualified name that is combined |
|
521 | 521 | # with name and path of group |
|
522 | 522 | repo_name_full = repo_name |
|
523 | 523 | repo_name = repo_name.split(Repository.NAME_SEP)[-1] |
|
524 | 524 | |
|
525 | 525 | new_repo = Repository() |
|
526 | 526 | new_repo.repo_state = state |
|
527 | 527 | new_repo.enable_statistics = False |
|
528 | 528 | new_repo.repo_name = repo_name_full |
|
529 | 529 | new_repo.repo_type = repo_type |
|
530 | 530 | new_repo.user = owner |
|
531 | 531 | new_repo.group = repo_group |
|
532 | 532 | new_repo.description = description or repo_name |
|
533 | 533 | new_repo.private = private |
|
534 | 534 | new_repo.archived = False |
|
535 | 535 | new_repo.clone_uri = clone_uri |
|
536 | 536 | new_repo.landing_rev = landing_rev |
|
537 | 537 | |
|
538 | 538 | new_repo.enable_statistics = enable_statistics |
|
539 | 539 | new_repo.enable_locking = enable_locking |
|
540 | 540 | new_repo.enable_downloads = enable_downloads |
|
541 | 541 | |
|
542 | 542 | if repo_group: |
|
543 | 543 | new_repo.enable_locking = repo_group.enable_locking |
|
544 | 544 | |
|
545 | 545 | if fork_of: |
|
546 | 546 | parent_repo = fork_of |
|
547 | 547 | new_repo.fork = parent_repo |
|
548 | 548 | |
|
549 | 549 | events.trigger(events.RepoPreCreateEvent(new_repo)) |
|
550 | 550 | |
|
551 | 551 | self.sa.add(new_repo) |
|
552 | 552 | |
|
553 | 553 | EMPTY_PERM = 'repository.none' |
|
554 | 554 | if fork_of and copy_fork_permissions: |
|
555 | 555 | repo = fork_of |
|
556 | 556 | user_perms = UserRepoToPerm.query() \ |
|
557 | 557 | .filter(UserRepoToPerm.repository == repo).all() |
|
558 | 558 | group_perms = UserGroupRepoToPerm.query() \ |
|
559 | 559 | .filter(UserGroupRepoToPerm.repository == repo).all() |
|
560 | 560 | |
|
561 | 561 | for perm in user_perms: |
|
562 | 562 | UserRepoToPerm.create( |
|
563 | 563 | perm.user, new_repo, perm.permission) |
|
564 | 564 | |
|
565 | 565 | for perm in group_perms: |
|
566 | 566 | UserGroupRepoToPerm.create( |
|
567 | 567 | perm.users_group, new_repo, perm.permission) |
|
568 | 568 | # in case we copy permissions and also set this repo to private |
|
569 | 569 | # override the default user permission to make it a private repo |
|
570 | 570 | if private: |
|
571 | 571 | RepoModel(self.sa).grant_user_permission( |
|
572 | 572 | repo=new_repo, user=User.DEFAULT_USER, perm=EMPTY_PERM) |
|
573 | 573 | |
|
574 | 574 | elif repo_group and copy_group_permissions: |
|
575 | 575 | user_perms = UserRepoGroupToPerm.query() \ |
|
576 | 576 | .filter(UserRepoGroupToPerm.group == repo_group).all() |
|
577 | 577 | |
|
578 | 578 | group_perms = UserGroupRepoGroupToPerm.query() \ |
|
579 | 579 | .filter(UserGroupRepoGroupToPerm.group == repo_group).all() |
|
580 | 580 | |
|
581 | 581 | for perm in user_perms: |
|
582 | 582 | perm_name = perm.permission.permission_name.replace( |
|
583 | 583 | 'group.', 'repository.') |
|
584 | 584 | perm_obj = Permission.get_by_key(perm_name) |
|
585 | 585 | UserRepoToPerm.create(perm.user, new_repo, perm_obj) |
|
586 | 586 | |
|
587 | 587 | for perm in group_perms: |
|
588 | 588 | perm_name = perm.permission.permission_name.replace( |
|
589 | 589 | 'group.', 'repository.') |
|
590 | 590 | perm_obj = Permission.get_by_key(perm_name) |
|
591 | 591 | UserGroupRepoToPerm.create(perm.users_group, new_repo, perm_obj) |
|
592 | 592 | |
|
593 | 593 | if private: |
|
594 | 594 | RepoModel(self.sa).grant_user_permission( |
|
595 | 595 | repo=new_repo, user=User.DEFAULT_USER, perm=EMPTY_PERM) |
|
596 | 596 | |
|
597 | 597 | else: |
|
598 | 598 | perm_obj = self._create_default_perms(new_repo, private) |
|
599 | 599 | self.sa.add(perm_obj) |
|
600 | 600 | |
|
601 | 601 | # now automatically start following this repository as owner |
|
602 | 602 | ScmModel(self.sa).toggle_following_repo(new_repo.repo_id, owner.user_id) |
|
603 | 603 | |
|
604 | 604 | # we need to flush here, in order to check if database won't |
|
605 | 605 | # throw any exceptions, create filesystem dirs at the very end |
|
606 | 606 | self.sa.flush() |
|
607 | 607 | events.trigger(events.RepoCreateEvent(new_repo)) |
|
608 | 608 | return new_repo |
|
609 | 609 | |
|
610 | 610 | except Exception: |
|
611 | 611 | log.error(traceback.format_exc()) |
|
612 | 612 | raise |
|
613 | 613 | |
|
614 | 614 | def create(self, form_data, cur_user): |
|
615 | 615 | """ |
|
616 | 616 | Create repository using celery tasks |
|
617 | 617 | |
|
618 | 618 | :param form_data: |
|
619 | 619 | :param cur_user: |
|
620 | 620 | """ |
|
621 | 621 | from rhodecode.lib.celerylib import tasks, run_task |
|
622 | 622 | return run_task(tasks.create_repo, form_data, cur_user) |
|
623 | 623 | |
|
624 | 624 | def update_permissions(self, repo, perm_additions=None, perm_updates=None, |
|
625 | 625 | perm_deletions=None, check_perms=True, |
|
626 | 626 | cur_user=None): |
|
627 | 627 | if not perm_additions: |
|
628 | 628 | perm_additions = [] |
|
629 | 629 | if not perm_updates: |
|
630 | 630 | perm_updates = [] |
|
631 | 631 | if not perm_deletions: |
|
632 | 632 | perm_deletions = [] |
|
633 | 633 | |
|
634 | 634 | req_perms = ('usergroup.read', 'usergroup.write', 'usergroup.admin') |
|
635 | 635 | |
|
636 | 636 | changes = { |
|
637 | 637 | 'added': [], |
|
638 | 638 | 'updated': [], |
|
639 | 639 | 'deleted': [], |
|
640 | 640 | 'default_user_changed': None |
|
641 | 641 | } |
|
642 | 642 | |
|
643 | 643 | repo = self._get_repo(repo) |
|
644 | 644 | |
|
645 | 645 | # update permissions |
|
646 | 646 | for member_id, perm, member_type in perm_updates: |
|
647 | 647 | member_id = int(member_id) |
|
648 | 648 | if member_type == 'user': |
|
649 | 649 | member_name = User.get(member_id).username |
|
650 | 650 | if member_name == User.DEFAULT_USER: |
|
651 | 651 | # NOTE(dan): detect if we changed permissions for default user |
|
652 | 652 | perm_obj = self.sa.query(UserRepoToPerm) \ |
|
653 | 653 | .filter(UserRepoToPerm.user_id == member_id) \ |
|
654 | 654 | .filter(UserRepoToPerm.repository == repo) \ |
|
655 | 655 | .scalar() |
|
656 | 656 | if perm_obj and perm_obj.permission.permission_name != perm: |
|
657 | 657 | changes['default_user_changed'] = True |
|
658 | 658 | |
|
659 | 659 | # this updates also current one if found |
|
660 | 660 | self.grant_user_permission( |
|
661 | 661 | repo=repo, user=member_id, perm=perm) |
|
662 | 662 | elif member_type == 'user_group': |
|
663 | 663 | # check if we have permissions to alter this usergroup |
|
664 | 664 | member_name = UserGroup.get(member_id).users_group_name |
|
665 | 665 | if not check_perms or HasUserGroupPermissionAny( |
|
666 | 666 | *req_perms)(member_name, user=cur_user): |
|
667 | 667 | self.grant_user_group_permission( |
|
668 | 668 | repo=repo, group_name=member_id, perm=perm) |
|
669 | 669 | else: |
|
670 | 670 | raise ValueError("member_type must be 'user' or 'user_group' " |
|
671 | 671 | "got {} instead".format(member_type)) |
|
672 | 672 | changes['updated'].append({'type': member_type, 'id': member_id, |
|
673 | 673 | 'name': member_name, 'new_perm': perm}) |
|
674 | 674 | |
|
675 | 675 | # set new permissions |
|
676 | 676 | for member_id, perm, member_type in perm_additions: |
|
677 | 677 | member_id = int(member_id) |
|
678 | 678 | if member_type == 'user': |
|
679 | 679 | member_name = User.get(member_id).username |
|
680 | 680 | self.grant_user_permission( |
|
681 | 681 | repo=repo, user=member_id, perm=perm) |
|
682 | 682 | elif member_type == 'user_group': |
|
683 | 683 | # check if we have permissions to alter this usergroup |
|
684 | 684 | member_name = UserGroup.get(member_id).users_group_name |
|
685 | 685 | if not check_perms or HasUserGroupPermissionAny( |
|
686 | 686 | *req_perms)(member_name, user=cur_user): |
|
687 | 687 | self.grant_user_group_permission( |
|
688 | 688 | repo=repo, group_name=member_id, perm=perm) |
|
689 | 689 | else: |
|
690 | 690 | raise ValueError("member_type must be 'user' or 'user_group' " |
|
691 | 691 | "got {} instead".format(member_type)) |
|
692 | 692 | |
|
693 | 693 | changes['added'].append({'type': member_type, 'id': member_id, |
|
694 | 694 | 'name': member_name, 'new_perm': perm}) |
|
695 | 695 | # delete permissions |
|
696 | 696 | for member_id, perm, member_type in perm_deletions: |
|
697 | 697 | member_id = int(member_id) |
|
698 | 698 | if member_type == 'user': |
|
699 | 699 | member_name = User.get(member_id).username |
|
700 | 700 | self.revoke_user_permission(repo=repo, user=member_id) |
|
701 | 701 | elif member_type == 'user_group': |
|
702 | 702 | # check if we have permissions to alter this usergroup |
|
703 | 703 | member_name = UserGroup.get(member_id).users_group_name |
|
704 | 704 | if not check_perms or HasUserGroupPermissionAny( |
|
705 | 705 | *req_perms)(member_name, user=cur_user): |
|
706 | 706 | self.revoke_user_group_permission( |
|
707 | 707 | repo=repo, group_name=member_id) |
|
708 | 708 | else: |
|
709 | 709 | raise ValueError("member_type must be 'user' or 'user_group' " |
|
710 | 710 | "got {} instead".format(member_type)) |
|
711 | 711 | |
|
712 | 712 | changes['deleted'].append({'type': member_type, 'id': member_id, |
|
713 | 713 | 'name': member_name, 'new_perm': perm}) |
|
714 | 714 | return changes |
|
715 | 715 | |
|
716 | 716 | def create_fork(self, form_data, cur_user): |
|
717 | 717 | """ |
|
718 | 718 | Simple wrapper into executing celery task for fork creation |
|
719 | 719 | |
|
720 | 720 | :param form_data: |
|
721 | 721 | :param cur_user: |
|
722 | 722 | """ |
|
723 | 723 | from rhodecode.lib.celerylib import tasks, run_task |
|
724 | 724 | return run_task(tasks.create_repo_fork, form_data, cur_user) |
|
725 | 725 | |
|
726 | 726 | def archive(self, repo): |
|
727 | 727 | """ |
|
728 | 728 | Archive given repository. Set archive flag. |
|
729 | 729 | |
|
730 | 730 | :param repo: |
|
731 | 731 | """ |
|
732 | 732 | repo = self._get_repo(repo) |
|
733 | 733 | if repo: |
|
734 | 734 | |
|
735 | 735 | try: |
|
736 | 736 | repo.archived = True |
|
737 | 737 | self.sa.add(repo) |
|
738 | 738 | self.sa.commit() |
|
739 | 739 | except Exception: |
|
740 | 740 | log.error(traceback.format_exc()) |
|
741 | 741 | raise |
|
742 | 742 | |
|
743 | 743 | def delete(self, repo, forks=None, pull_requests=None, fs_remove=True, cur_user=None): |
|
744 | 744 | """ |
|
745 | 745 | Delete given repository, forks parameter defines what do do with |
|
746 | 746 | attached forks. Throws AttachedForksError if deleted repo has attached |
|
747 | 747 | forks |
|
748 | 748 | |
|
749 | 749 | :param repo: |
|
750 | 750 | :param forks: str 'delete' or 'detach' |
|
751 | 751 | :param pull_requests: str 'delete' or None |
|
752 | 752 | :param fs_remove: remove(archive) repo from filesystem |
|
753 | 753 | """ |
|
754 | 754 | if not cur_user: |
|
755 | 755 | cur_user = getattr(get_current_rhodecode_user(), 'username', None) |
|
756 | 756 | repo = self._get_repo(repo) |
|
757 | 757 | if repo: |
|
758 | 758 | if forks == 'detach': |
|
759 | 759 | for r in repo.forks: |
|
760 | 760 | r.fork = None |
|
761 | 761 | self.sa.add(r) |
|
762 | 762 | elif forks == 'delete': |
|
763 | 763 | for r in repo.forks: |
|
764 | 764 | self.delete(r, forks='delete') |
|
765 | 765 | elif [f for f in repo.forks]: |
|
766 | 766 | raise AttachedForksError() |
|
767 | 767 | |
|
768 | 768 | # check for pull requests |
|
769 | 769 | pr_sources = repo.pull_requests_source |
|
770 | 770 | pr_targets = repo.pull_requests_target |
|
771 | 771 | if pull_requests != 'delete' and (pr_sources or pr_targets): |
|
772 | 772 | raise AttachedPullRequestsError() |
|
773 | 773 | |
|
774 | 774 | old_repo_dict = repo.get_dict() |
|
775 | 775 | events.trigger(events.RepoPreDeleteEvent(repo)) |
|
776 | 776 | try: |
|
777 | 777 | self.sa.delete(repo) |
|
778 | 778 | if fs_remove: |
|
779 | 779 | self._delete_filesystem_repo(repo) |
|
780 | 780 | else: |
|
781 | 781 | log.debug('skipping removal from filesystem') |
|
782 | 782 | old_repo_dict.update({ |
|
783 | 783 | 'deleted_by': cur_user, |
|
784 | 784 | 'deleted_on': time.time(), |
|
785 | 785 | }) |
|
786 | 786 | hooks_base.delete_repository(**old_repo_dict) |
|
787 | 787 | events.trigger(events.RepoDeleteEvent(repo)) |
|
788 | 788 | except Exception: |
|
789 | 789 | log.error(traceback.format_exc()) |
|
790 | 790 | raise |
|
791 | 791 | |
|
792 | 792 | def grant_user_permission(self, repo, user, perm): |
|
793 | 793 | """ |
|
794 | 794 | Grant permission for user on given repository, or update existing one |
|
795 | 795 | if found |
|
796 | 796 | |
|
797 | 797 | :param repo: Instance of Repository, repository_id, or repository name |
|
798 | 798 | :param user: Instance of User, user_id or username |
|
799 | 799 | :param perm: Instance of Permission, or permission_name |
|
800 | 800 | """ |
|
801 | 801 | user = self._get_user(user) |
|
802 | 802 | repo = self._get_repo(repo) |
|
803 | 803 | permission = self._get_perm(perm) |
|
804 | 804 | |
|
805 | 805 | # check if we have that permission already |
|
806 | 806 | obj = self.sa.query(UserRepoToPerm) \ |
|
807 | 807 | .filter(UserRepoToPerm.user == user) \ |
|
808 | 808 | .filter(UserRepoToPerm.repository == repo) \ |
|
809 | 809 | .scalar() |
|
810 | 810 | if obj is None: |
|
811 | 811 | # create new ! |
|
812 | 812 | obj = UserRepoToPerm() |
|
813 | 813 | obj.repository = repo |
|
814 | 814 | obj.user = user |
|
815 | 815 | obj.permission = permission |
|
816 | 816 | self.sa.add(obj) |
|
817 | 817 | log.debug('Granted perm %s to %s on %s', perm, user, repo) |
|
818 | 818 | action_logger_generic( |
|
819 | 819 | 'granted permission: {} to user: {} on repo: {}'.format( |
|
820 | 820 | perm, user, repo), namespace='security.repo') |
|
821 | 821 | return obj |
|
822 | 822 | |
|
823 | 823 | def revoke_user_permission(self, repo, user): |
|
824 | 824 | """ |
|
825 | 825 | Revoke permission for user on given repository |
|
826 | 826 | |
|
827 | 827 | :param repo: Instance of Repository, repository_id, or repository name |
|
828 | 828 | :param user: Instance of User, user_id or username |
|
829 | 829 | """ |
|
830 | 830 | |
|
831 | 831 | user = self._get_user(user) |
|
832 | 832 | repo = self._get_repo(repo) |
|
833 | 833 | |
|
834 | 834 | obj = self.sa.query(UserRepoToPerm) \ |
|
835 | 835 | .filter(UserRepoToPerm.repository == repo) \ |
|
836 | 836 | .filter(UserRepoToPerm.user == user) \ |
|
837 | 837 | .scalar() |
|
838 | 838 | if obj: |
|
839 | 839 | self.sa.delete(obj) |
|
840 | 840 | log.debug('Revoked perm on %s on %s', repo, user) |
|
841 | 841 | action_logger_generic( |
|
842 | 842 | 'revoked permission from user: {} on repo: {}'.format( |
|
843 | 843 | user, repo), namespace='security.repo') |
|
844 | 844 | |
|
845 | 845 | def grant_user_group_permission(self, repo, group_name, perm): |
|
846 | 846 | """ |
|
847 | 847 | Grant permission for user group on given repository, or update |
|
848 | 848 | existing one if found |
|
849 | 849 | |
|
850 | 850 | :param repo: Instance of Repository, repository_id, or repository name |
|
851 | 851 | :param group_name: Instance of UserGroup, users_group_id, |
|
852 | 852 | or user group name |
|
853 | 853 | :param perm: Instance of Permission, or permission_name |
|
854 | 854 | """ |
|
855 | 855 | repo = self._get_repo(repo) |
|
856 | 856 | group_name = self._get_user_group(group_name) |
|
857 | 857 | permission = self._get_perm(perm) |
|
858 | 858 | |
|
859 | 859 | # check if we have that permission already |
|
860 | 860 | obj = self.sa.query(UserGroupRepoToPerm) \ |
|
861 | 861 | .filter(UserGroupRepoToPerm.users_group == group_name) \ |
|
862 | 862 | .filter(UserGroupRepoToPerm.repository == repo) \ |
|
863 | 863 | .scalar() |
|
864 | 864 | |
|
865 | 865 | if obj is None: |
|
866 | 866 | # create new |
|
867 | 867 | obj = UserGroupRepoToPerm() |
|
868 | 868 | |
|
869 | 869 | obj.repository = repo |
|
870 | 870 | obj.users_group = group_name |
|
871 | 871 | obj.permission = permission |
|
872 | 872 | self.sa.add(obj) |
|
873 | 873 | log.debug('Granted perm %s to %s on %s', perm, group_name, repo) |
|
874 | 874 | action_logger_generic( |
|
875 | 875 | 'granted permission: {} to usergroup: {} on repo: {}'.format( |
|
876 | 876 | perm, group_name, repo), namespace='security.repo') |
|
877 | 877 | |
|
878 | 878 | return obj |
|
879 | 879 | |
|
880 | 880 | def revoke_user_group_permission(self, repo, group_name): |
|
881 | 881 | """ |
|
882 | 882 | Revoke permission for user group on given repository |
|
883 | 883 | |
|
884 | 884 | :param repo: Instance of Repository, repository_id, or repository name |
|
885 | 885 | :param group_name: Instance of UserGroup, users_group_id, |
|
886 | 886 | or user group name |
|
887 | 887 | """ |
|
888 | 888 | repo = self._get_repo(repo) |
|
889 | 889 | group_name = self._get_user_group(group_name) |
|
890 | 890 | |
|
891 | 891 | obj = self.sa.query(UserGroupRepoToPerm) \ |
|
892 | 892 | .filter(UserGroupRepoToPerm.repository == repo) \ |
|
893 | 893 | .filter(UserGroupRepoToPerm.users_group == group_name) \ |
|
894 | 894 | .scalar() |
|
895 | 895 | if obj: |
|
896 | 896 | self.sa.delete(obj) |
|
897 | 897 | log.debug('Revoked perm to %s on %s', repo, group_name) |
|
898 | 898 | action_logger_generic( |
|
899 | 899 | 'revoked permission from usergroup: {} on repo: {}'.format( |
|
900 | 900 | group_name, repo), namespace='security.repo') |
|
901 | 901 | |
|
902 | 902 | def delete_stats(self, repo_name): |
|
903 | 903 | """ |
|
904 | 904 | removes stats for given repo |
|
905 | 905 | |
|
906 | 906 | :param repo_name: |
|
907 | 907 | """ |
|
908 | 908 | repo = self._get_repo(repo_name) |
|
909 | 909 | try: |
|
910 | 910 | obj = self.sa.query(Statistics) \ |
|
911 | 911 | .filter(Statistics.repository == repo).scalar() |
|
912 | 912 | if obj: |
|
913 | 913 | self.sa.delete(obj) |
|
914 | 914 | except Exception: |
|
915 | 915 | log.error(traceback.format_exc()) |
|
916 | 916 | raise |
|
917 | 917 | |
|
918 | 918 | def add_repo_field(self, repo_name, field_key, field_label, field_value='', |
|
919 | 919 | field_type='str', field_desc=''): |
|
920 | 920 | |
|
921 | 921 | repo = self._get_repo(repo_name) |
|
922 | 922 | |
|
923 | 923 | new_field = RepositoryField() |
|
924 | 924 | new_field.repository = repo |
|
925 | 925 | new_field.field_key = field_key |
|
926 | 926 | new_field.field_type = field_type # python type |
|
927 | 927 | new_field.field_value = field_value |
|
928 | 928 | new_field.field_desc = field_desc |
|
929 | 929 | new_field.field_label = field_label |
|
930 | 930 | self.sa.add(new_field) |
|
931 | 931 | return new_field |
|
932 | 932 | |
|
933 | 933 | def delete_repo_field(self, repo_name, field_key): |
|
934 | 934 | repo = self._get_repo(repo_name) |
|
935 | 935 | field = RepositoryField.get_by_key_name(field_key, repo) |
|
936 | 936 | if field: |
|
937 | 937 | self.sa.delete(field) |
|
938 | 938 | |
|
939 | 939 | def set_landing_rev(self, repo, landing_rev_name): |
|
940 | 940 | if landing_rev_name.startswith('branch:'): |
|
941 | 941 | landing_rev_name = landing_rev_name.split('branch:')[-1] |
|
942 | 942 | scm_instance = repo.scm_instance() |
|
943 | if scm_instance: | |
|
943 | 944 | return scm_instance._remote.set_head_ref(landing_rev_name) |
|
944 | 945 | |
|
945 | 946 | def _create_filesystem_repo(self, repo_name, repo_type, repo_group, |
|
946 | 947 | clone_uri=None, repo_store_location=None, |
|
947 | 948 | use_global_config=False, install_hooks=True): |
|
948 | 949 | """ |
|
949 | 950 | makes repository on filesystem. It's group aware means it'll create |
|
950 | 951 | a repository within a group, and alter the paths accordingly of |
|
951 | 952 | group location |
|
952 | 953 | |
|
953 | 954 | :param repo_name: |
|
954 | 955 | :param alias: |
|
955 | 956 | :param parent: |
|
956 | 957 | :param clone_uri: |
|
957 | 958 | :param repo_store_location: |
|
958 | 959 | """ |
|
959 | 960 | from rhodecode.lib.utils import is_valid_repo, is_valid_repo_group |
|
960 | 961 | from rhodecode.model.scm import ScmModel |
|
961 | 962 | |
|
962 | 963 | if Repository.NAME_SEP in repo_name: |
|
963 | 964 | raise ValueError( |
|
964 | 965 | 'repo_name must not contain groups got `%s`' % repo_name) |
|
965 | 966 | |
|
966 | 967 | if isinstance(repo_group, RepoGroup): |
|
967 | 968 | new_parent_path = os.sep.join(repo_group.full_path_splitted) |
|
968 | 969 | else: |
|
969 | 970 | new_parent_path = repo_group or '' |
|
970 | 971 | |
|
971 | 972 | if repo_store_location: |
|
972 | 973 | _paths = [repo_store_location] |
|
973 | 974 | else: |
|
974 | 975 | _paths = [self.repos_path, new_parent_path, repo_name] |
|
975 | 976 | # we need to make it str for mercurial |
|
976 | 977 | repo_path = os.path.join(*map(lambda x: safe_str(x), _paths)) |
|
977 | 978 | |
|
978 | 979 | # check if this path is not a repository |
|
979 | 980 | if is_valid_repo(repo_path, self.repos_path): |
|
980 | 981 | raise Exception('This path %s is a valid repository' % repo_path) |
|
981 | 982 | |
|
982 | 983 | # check if this path is a group |
|
983 | 984 | if is_valid_repo_group(repo_path, self.repos_path): |
|
984 | 985 | raise Exception('This path %s is a valid group' % repo_path) |
|
985 | 986 | |
|
986 | 987 | log.info('creating repo %s in %s from url: `%s`', |
|
987 | 988 | repo_name, safe_unicode(repo_path), |
|
988 | 989 | obfuscate_url_pw(clone_uri)) |
|
989 | 990 | |
|
990 | 991 | backend = get_backend(repo_type) |
|
991 | 992 | |
|
992 | 993 | config_repo = None if use_global_config else repo_name |
|
993 | 994 | if config_repo and new_parent_path: |
|
994 | 995 | config_repo = Repository.NAME_SEP.join( |
|
995 | 996 | (new_parent_path, config_repo)) |
|
996 | 997 | config = make_db_config(clear_session=False, repo=config_repo) |
|
997 | 998 | config.set('extensions', 'largefiles', '') |
|
998 | 999 | |
|
999 | 1000 | # patch and reset hooks section of UI config to not run any |
|
1000 | 1001 | # hooks on creating remote repo |
|
1001 | 1002 | config.clear_section('hooks') |
|
1002 | 1003 | |
|
1003 | 1004 | # TODO: johbo: Unify this, hardcoded "bare=True" does not look nice |
|
1004 | 1005 | if repo_type == 'git': |
|
1005 | 1006 | repo = backend( |
|
1006 | 1007 | repo_path, config=config, create=True, src_url=clone_uri, bare=True, |
|
1007 | 1008 | with_wire={"cache": False}) |
|
1008 | 1009 | else: |
|
1009 | 1010 | repo = backend( |
|
1010 | 1011 | repo_path, config=config, create=True, src_url=clone_uri, |
|
1011 | 1012 | with_wire={"cache": False}) |
|
1012 | 1013 | |
|
1013 | 1014 | if install_hooks: |
|
1014 | 1015 | repo.install_hooks() |
|
1015 | 1016 | |
|
1016 | 1017 | log.debug('Created repo %s with %s backend', |
|
1017 | 1018 | safe_unicode(repo_name), safe_unicode(repo_type)) |
|
1018 | 1019 | return repo |
|
1019 | 1020 | |
|
1020 | 1021 | def _rename_filesystem_repo(self, old, new): |
|
1021 | 1022 | """ |
|
1022 | 1023 | renames repository on filesystem |
|
1023 | 1024 | |
|
1024 | 1025 | :param old: old name |
|
1025 | 1026 | :param new: new name |
|
1026 | 1027 | """ |
|
1027 | 1028 | log.info('renaming repo from %s to %s', old, new) |
|
1028 | 1029 | |
|
1029 | 1030 | old_path = os.path.join(self.repos_path, old) |
|
1030 | 1031 | new_path = os.path.join(self.repos_path, new) |
|
1031 | 1032 | if os.path.isdir(new_path): |
|
1032 | 1033 | raise Exception( |
|
1033 | 1034 | 'Was trying to rename to already existing dir %s' % new_path |
|
1034 | 1035 | ) |
|
1035 | 1036 | shutil.move(old_path, new_path) |
|
1036 | 1037 | |
|
1037 | 1038 | def _delete_filesystem_repo(self, repo): |
|
1038 | 1039 | """ |
|
1039 | 1040 | removes repo from filesystem, the removal is acctually made by |
|
1040 | 1041 | added rm__ prefix into dir, and rename internat .hg/.git dirs so this |
|
1041 | 1042 | repository is no longer valid for rhodecode, can be undeleted later on |
|
1042 | 1043 | by reverting the renames on this repository |
|
1043 | 1044 | |
|
1044 | 1045 | :param repo: repo object |
|
1045 | 1046 | """ |
|
1046 | 1047 | rm_path = os.path.join(self.repos_path, repo.repo_name) |
|
1047 | 1048 | repo_group = repo.group |
|
1048 | 1049 | log.info("Removing repository %s", rm_path) |
|
1049 | 1050 | # disable hg/git internal that it doesn't get detected as repo |
|
1050 | 1051 | alias = repo.repo_type |
|
1051 | 1052 | |
|
1052 | 1053 | config = make_db_config(clear_session=False) |
|
1053 | 1054 | config.set('extensions', 'largefiles', '') |
|
1054 | 1055 | bare = getattr(repo.scm_instance(config=config), 'bare', False) |
|
1055 | 1056 | |
|
1056 | 1057 | # skip this for bare git repos |
|
1057 | 1058 | if not bare: |
|
1058 | 1059 | # disable VCS repo |
|
1059 | 1060 | vcs_path = os.path.join(rm_path, '.%s' % alias) |
|
1060 | 1061 | if os.path.exists(vcs_path): |
|
1061 | 1062 | shutil.move(vcs_path, os.path.join(rm_path, 'rm__.%s' % alias)) |
|
1062 | 1063 | |
|
1063 | 1064 | _now = datetime.datetime.now() |
|
1064 | 1065 | _ms = str(_now.microsecond).rjust(6, '0') |
|
1065 | 1066 | _d = 'rm__%s__%s' % (_now.strftime('%Y%m%d_%H%M%S_' + _ms), |
|
1066 | 1067 | repo.just_name) |
|
1067 | 1068 | if repo_group: |
|
1068 | 1069 | # if repository is in group, prefix the removal path with the group |
|
1069 | 1070 | args = repo_group.full_path_splitted + [_d] |
|
1070 | 1071 | _d = os.path.join(*args) |
|
1071 | 1072 | |
|
1072 | 1073 | if os.path.isdir(rm_path): |
|
1073 | 1074 | shutil.move(rm_path, os.path.join(self.repos_path, _d)) |
|
1074 | 1075 | |
|
1075 | 1076 | # finally cleanup diff-cache if it exists |
|
1076 | 1077 | cached_diffs_dir = repo.cached_diffs_dir |
|
1077 | 1078 | if os.path.isdir(cached_diffs_dir): |
|
1078 | 1079 | shutil.rmtree(cached_diffs_dir) |
|
1079 | 1080 | |
|
1080 | 1081 | |
|
1081 | 1082 | class ReadmeFinder: |
|
1082 | 1083 | """ |
|
1083 | 1084 | Utility which knows how to find a readme for a specific commit. |
|
1084 | 1085 | |
|
1085 | 1086 | The main idea is that this is a configurable algorithm. When creating an |
|
1086 | 1087 | instance you can define parameters, currently only the `default_renderer`. |
|
1087 | 1088 | Based on this configuration the method :meth:`search` behaves slightly |
|
1088 | 1089 | different. |
|
1089 | 1090 | """ |
|
1090 | 1091 | |
|
1091 | 1092 | readme_re = re.compile(r'^readme(\.[^\.]+)?$', re.IGNORECASE) |
|
1092 | 1093 | path_re = re.compile(r'^docs?', re.IGNORECASE) |
|
1093 | 1094 | |
|
1094 | 1095 | default_priorities = { |
|
1095 | 1096 | None: 0, |
|
1096 | 1097 | '.text': 2, |
|
1097 | 1098 | '.txt': 3, |
|
1098 | 1099 | '.rst': 1, |
|
1099 | 1100 | '.rest': 2, |
|
1100 | 1101 | '.md': 1, |
|
1101 | 1102 | '.mkdn': 2, |
|
1102 | 1103 | '.mdown': 3, |
|
1103 | 1104 | '.markdown': 4, |
|
1104 | 1105 | } |
|
1105 | 1106 | |
|
1106 | 1107 | path_priority = { |
|
1107 | 1108 | 'doc': 0, |
|
1108 | 1109 | 'docs': 1, |
|
1109 | 1110 | } |
|
1110 | 1111 | |
|
1111 | 1112 | FALLBACK_PRIORITY = 99 |
|
1112 | 1113 | |
|
1113 | 1114 | RENDERER_TO_EXTENSION = { |
|
1114 | 1115 | 'rst': ['.rst', '.rest'], |
|
1115 | 1116 | 'markdown': ['.md', 'mkdn', '.mdown', '.markdown'], |
|
1116 | 1117 | } |
|
1117 | 1118 | |
|
1118 | 1119 | def __init__(self, default_renderer=None): |
|
1119 | 1120 | self._default_renderer = default_renderer |
|
1120 | 1121 | self._renderer_extensions = self.RENDERER_TO_EXTENSION.get( |
|
1121 | 1122 | default_renderer, []) |
|
1122 | 1123 | |
|
1123 | 1124 | def search(self, commit, path=u'/'): |
|
1124 | 1125 | """ |
|
1125 | 1126 | Find a readme in the given `commit`. |
|
1126 | 1127 | """ |
|
1127 | 1128 | nodes = commit.get_nodes(path) |
|
1128 | 1129 | matches = self._match_readmes(nodes) |
|
1129 | 1130 | matches = self._sort_according_to_priority(matches) |
|
1130 | 1131 | if matches: |
|
1131 | 1132 | return matches[0].node |
|
1132 | 1133 | |
|
1133 | 1134 | paths = self._match_paths(nodes) |
|
1134 | 1135 | paths = self._sort_paths_according_to_priority(paths) |
|
1135 | 1136 | for path in paths: |
|
1136 | 1137 | match = self.search(commit, path=path) |
|
1137 | 1138 | if match: |
|
1138 | 1139 | return match |
|
1139 | 1140 | |
|
1140 | 1141 | return None |
|
1141 | 1142 | |
|
1142 | 1143 | def _match_readmes(self, nodes): |
|
1143 | 1144 | for node in nodes: |
|
1144 | 1145 | if not node.is_file(): |
|
1145 | 1146 | continue |
|
1146 | 1147 | path = node.path.rsplit('/', 1)[-1] |
|
1147 | 1148 | match = self.readme_re.match(path) |
|
1148 | 1149 | if match: |
|
1149 | 1150 | extension = match.group(1) |
|
1150 | 1151 | yield ReadmeMatch(node, match, self._priority(extension)) |
|
1151 | 1152 | |
|
1152 | 1153 | def _match_paths(self, nodes): |
|
1153 | 1154 | for node in nodes: |
|
1154 | 1155 | if not node.is_dir(): |
|
1155 | 1156 | continue |
|
1156 | 1157 | match = self.path_re.match(node.path) |
|
1157 | 1158 | if match: |
|
1158 | 1159 | yield node.path |
|
1159 | 1160 | |
|
1160 | 1161 | def _priority(self, extension): |
|
1161 | 1162 | renderer_priority = ( |
|
1162 | 1163 | 0 if extension in self._renderer_extensions else 1) |
|
1163 | 1164 | extension_priority = self.default_priorities.get( |
|
1164 | 1165 | extension, self.FALLBACK_PRIORITY) |
|
1165 | 1166 | return (renderer_priority, extension_priority) |
|
1166 | 1167 | |
|
1167 | 1168 | def _sort_according_to_priority(self, matches): |
|
1168 | 1169 | |
|
1169 | 1170 | def priority_and_path(match): |
|
1170 | 1171 | return (match.priority, match.path) |
|
1171 | 1172 | |
|
1172 | 1173 | return sorted(matches, key=priority_and_path) |
|
1173 | 1174 | |
|
1174 | 1175 | def _sort_paths_according_to_priority(self, paths): |
|
1175 | 1176 | |
|
1176 | 1177 | def priority_and_path(path): |
|
1177 | 1178 | return (self.path_priority.get(path, self.FALLBACK_PRIORITY), path) |
|
1178 | 1179 | |
|
1179 | 1180 | return sorted(paths, key=priority_and_path) |
|
1180 | 1181 | |
|
1181 | 1182 | |
|
1182 | 1183 | class ReadmeMatch: |
|
1183 | 1184 | |
|
1184 | 1185 | def __init__(self, node, match, priority): |
|
1185 | 1186 | self.node = node |
|
1186 | 1187 | self._match = match |
|
1187 | 1188 | self.priority = priority |
|
1188 | 1189 | |
|
1189 | 1190 | @property |
|
1190 | 1191 | def path(self): |
|
1191 | 1192 | return self.node.path |
|
1192 | 1193 | |
|
1193 | 1194 | def __repr__(self): |
|
1194 | 1195 | return '<ReadmeMatch {} priority={}'.format(self.path, self.priority) |
General Comments 0
You need to be logged in to leave comments.
Login now