Show More
@@ -1,430 +1,433 b'' | |||
|
1 | 1 | # Copyright (C) 2016-2023 RhodeCode GmbH |
|
2 | 2 | # |
|
3 | 3 | # This program is free software: you can redistribute it and/or modify |
|
4 | 4 | # it under the terms of the GNU Affero General Public License, version 3 |
|
5 | 5 | # (only), as published by the Free Software Foundation. |
|
6 | 6 | # |
|
7 | 7 | # This program is distributed in the hope that it will be useful, |
|
8 | 8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
9 | 9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
10 | 10 | # GNU General Public License for more details. |
|
11 | 11 | # |
|
12 | 12 | # You should have received a copy of the GNU Affero General Public License |
|
13 | 13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
14 | 14 | # |
|
15 | 15 | # This program is dual-licensed. If you wish to learn more about the |
|
16 | 16 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
17 | 17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
18 | 18 | |
|
19 | 19 | """ |
|
20 | 20 | Client for the VCSServer implemented based on HTTP. |
|
21 | 21 | """ |
|
22 | 22 | |
|
23 | 23 | import copy |
|
24 | 24 | import logging |
|
25 | 25 | import threading |
|
26 | 26 | import time |
|
27 | 27 | import urllib.request |
|
28 | 28 | import urllib.error |
|
29 | 29 | import urllib.parse |
|
30 | 30 | import urllib.parse |
|
31 | 31 | import uuid |
|
32 | 32 | import traceback |
|
33 | 33 | |
|
34 | 34 | import pycurl |
|
35 | 35 | import msgpack |
|
36 | 36 | import requests |
|
37 | 37 | from requests.packages.urllib3.util.retry import Retry |
|
38 | 38 | |
|
39 | 39 | import rhodecode |
|
40 | 40 | from rhodecode.lib import rc_cache |
|
41 | 41 | from rhodecode.lib.rc_cache.utils import compute_key_from_params |
|
42 | 42 | from rhodecode.lib.system_info import get_cert_path |
|
43 | 43 | from rhodecode.lib.vcs import exceptions, CurlSession |
|
44 | 44 | from rhodecode.lib.utils2 import str2bool |
|
45 | 45 | |
|
46 | 46 | log = logging.getLogger(__name__) |
|
47 | 47 | |
|
48 | 48 | |
|
49 | 49 | # TODO: mikhail: Keep it in sync with vcsserver's |
|
50 | 50 | # HTTPApplication.ALLOWED_EXCEPTIONS |
|
51 | 51 | EXCEPTIONS_MAP = { |
|
52 | 52 | 'KeyError': KeyError, |
|
53 | 53 | 'URLError': urllib.error.URLError, |
|
54 | 54 | } |
|
55 | 55 | |
|
56 | 56 | |
|
57 | 57 | def _remote_call(url, payload, exceptions_map, session, retries=3): |
|
58 | 58 | |
|
59 | 59 | for attempt in range(retries): |
|
60 | 60 | try: |
|
61 | 61 | response = session.post(url, data=msgpack.packb(payload)) |
|
62 | 62 | break |
|
63 | 63 | except pycurl.error as e: |
|
64 | 64 | error_code, error_message = e.args |
|
65 | 65 | if error_code == pycurl.E_RECV_ERROR: |
|
66 | 66 | log.warning(f'Received a "Connection reset by peer" error. ' |
|
67 | 67 | f'Retrying... ({attempt + 1}/{retries})') |
|
68 | 68 | continue # Retry if connection reset error. |
|
69 | 69 | msg = f'{e}. \npycurl traceback: {traceback.format_exc()}' |
|
70 | 70 | raise exceptions.HttpVCSCommunicationError(msg) |
|
71 | 71 | except Exception as e: |
|
72 | 72 | message = getattr(e, 'message', '') |
|
73 | 73 | if 'Failed to connect' in message: |
|
74 | 74 | # gevent doesn't return proper pycurl errors |
|
75 | 75 | raise exceptions.HttpVCSCommunicationError(e) |
|
76 | 76 | else: |
|
77 | 77 | raise |
|
78 | 78 | |
|
79 | 79 | if response.status_code >= 400: |
|
80 | 80 | content_type = response.content_type |
|
81 | 81 | log.error('Call to %s returned non 200 HTTP code: %s [%s]', |
|
82 | 82 | url, response.status_code, content_type) |
|
83 | 83 | raise exceptions.HttpVCSCommunicationError(repr(response.content)) |
|
84 | 84 | |
|
85 | 85 | try: |
|
86 | 86 | response = msgpack.unpackb(response.content) |
|
87 | 87 | except Exception: |
|
88 | 88 | log.exception('Failed to decode response from msgpack') |
|
89 | 89 | raise |
|
90 | 90 | |
|
91 | 91 | error = response.get('error') |
|
92 | 92 | if error: |
|
93 | 93 | type_ = error.get('type', 'Exception') |
|
94 | 94 | exc = exceptions_map.get(type_, Exception) |
|
95 | 95 | exc = exc(error.get('message')) |
|
96 | 96 | try: |
|
97 | 97 | exc._vcs_kind = error['_vcs_kind'] |
|
98 | 98 | except KeyError: |
|
99 | 99 | pass |
|
100 | 100 | |
|
101 | 101 | try: |
|
102 | 102 | exc._vcs_server_traceback = error['traceback'] |
|
103 | 103 | exc._vcs_server_org_exc_name = error['org_exc'] |
|
104 | 104 | exc._vcs_server_org_exc_tb = error['org_exc_tb'] |
|
105 | 105 | except KeyError: |
|
106 | 106 | pass |
|
107 | 107 | |
|
108 | 108 | exc.add_note(attach_exc_details(error)) |
|
109 | 109 | raise exc # raising the org exception from vcsserver |
|
110 | 110 | return response.get('result') |
|
111 | 111 | |
|
112 | 112 | |
|
113 | 113 | def attach_exc_details(error): |
|
114 | 114 | note = '-- EXC NOTE -- :\n' |
|
115 | 115 | note += f'vcs_kind: {error.get("_vcs_kind")}\n' |
|
116 | 116 | note += f'org_exc: {error.get("_vcs_kind")}\n' |
|
117 | 117 | note += f'tb: {error.get("traceback")}\n' |
|
118 | 118 | note += '-- END EXC NOTE --' |
|
119 | 119 | return note |
|
120 | 120 | |
|
121 | 121 | |
|
122 | 122 | def _streaming_remote_call(url, payload, exceptions_map, session, chunk_size): |
|
123 | 123 | try: |
|
124 | 124 | headers = { |
|
125 | 125 | 'X-RC-Method': payload.get('method'), |
|
126 | 126 | 'X-RC-Repo-Name': payload.get('_repo_name') |
|
127 | 127 | } |
|
128 | 128 | response = session.post(url, data=msgpack.packb(payload), headers=headers) |
|
129 | 129 | except pycurl.error as e: |
|
130 | 130 | error_code, error_message = e.args |
|
131 | 131 | msg = f'{e}. \npycurl traceback: {traceback.format_exc()}' |
|
132 | 132 | raise exceptions.HttpVCSCommunicationError(msg) |
|
133 | 133 | except Exception as e: |
|
134 | 134 | message = getattr(e, 'message', '') |
|
135 | 135 | if 'Failed to connect' in message: |
|
136 | 136 | # gevent doesn't return proper pycurl errors |
|
137 | 137 | raise exceptions.HttpVCSCommunicationError(e) |
|
138 | 138 | else: |
|
139 | 139 | raise |
|
140 | 140 | |
|
141 | 141 | if response.status_code >= 400: |
|
142 | 142 | log.error('Call to %s returned non 200 HTTP code: %s', |
|
143 | 143 | url, response.status_code) |
|
144 | 144 | raise exceptions.HttpVCSCommunicationError(repr(response.content)) |
|
145 | 145 | |
|
146 | 146 | return response.iter_content(chunk_size=chunk_size) |
|
147 | 147 | |
|
148 | 148 | |
|
149 | 149 | class ServiceConnection(object): |
|
150 | 150 | def __init__(self, server_and_port, backend_endpoint, session_factory): |
|
151 | 151 | self.url = urllib.parse.urljoin(f'http://{server_and_port}', backend_endpoint) |
|
152 | 152 | self._session_factory = session_factory |
|
153 | 153 | |
|
154 | 154 | def __getattr__(self, name): |
|
155 | 155 | def f(*args, **kwargs): |
|
156 | 156 | return self._call(name, *args, **kwargs) |
|
157 | 157 | return f |
|
158 | 158 | |
|
159 | 159 | @exceptions.map_vcs_exceptions |
|
160 | 160 | def _call(self, name, *args, **kwargs): |
|
161 | 161 | payload = { |
|
162 | 162 | 'id': str(uuid.uuid4()), |
|
163 | 163 | 'method': name, |
|
164 | 164 | 'params': {'args': args, 'kwargs': kwargs} |
|
165 | 165 | } |
|
166 | 166 | return _remote_call( |
|
167 | 167 | self.url, payload, EXCEPTIONS_MAP, self._session_factory()) |
|
168 | 168 | |
|
169 | 169 | |
|
170 | 170 | class RemoteVCSMaker(object): |
|
171 | 171 | |
|
172 | 172 | def __init__(self, server_and_port, backend_endpoint, backend_type, session_factory): |
|
173 | 173 | self.url = urllib.parse.urljoin(f'http://{server_and_port}', backend_endpoint) |
|
174 | 174 | self.stream_url = urllib.parse.urljoin(f'http://{server_and_port}', backend_endpoint+'/stream') |
|
175 | 175 | |
|
176 | 176 | self._session_factory = session_factory |
|
177 | 177 | self.backend_type = backend_type |
|
178 | 178 | |
|
179 | 179 | @classmethod |
|
180 | 180 | def init_cache_region(cls, repo_id): |
|
181 | 181 | cache_namespace_uid = f'repo.{repo_id}' |
|
182 | 182 | region = rc_cache.get_or_create_region('cache_repo', cache_namespace_uid) |
|
183 | 183 | return region, cache_namespace_uid |
|
184 | 184 | |
|
185 | 185 | def __call__(self, path, repo_id, config, with_wire=None): |
|
186 | 186 | log.debug('%s RepoMaker call on %s', self.backend_type.upper(), path) |
|
187 | 187 | return RemoteRepo(path, repo_id, config, self, with_wire=with_wire) |
|
188 | 188 | |
|
189 | 189 | def __getattr__(self, name): |
|
190 | 190 | def remote_attr(*args, **kwargs): |
|
191 | 191 | return self._call(name, *args, **kwargs) |
|
192 | 192 | return remote_attr |
|
193 | 193 | |
|
194 | 194 | @exceptions.map_vcs_exceptions |
|
195 | 195 | def _call(self, func_name, *args, **kwargs): |
|
196 | 196 | payload = { |
|
197 | 197 | 'id': str(uuid.uuid4()), |
|
198 | 198 | 'method': func_name, |
|
199 | 199 | 'backend': self.backend_type, |
|
200 | 200 | 'params': {'args': args, 'kwargs': kwargs} |
|
201 | 201 | } |
|
202 | 202 | url = self.url |
|
203 | 203 | return _remote_call(url, payload, EXCEPTIONS_MAP, self._session_factory()) |
|
204 | 204 | |
|
205 | 205 | |
|
206 | 206 | class RemoteRepo(object): |
|
207 | 207 | CHUNK_SIZE = 16384 |
|
208 | 208 | |
|
209 | 209 | def __init__(self, path, repo_id, config, remote_maker, with_wire=None): |
|
210 | 210 | self.url = remote_maker.url |
|
211 | 211 | self.stream_url = remote_maker.stream_url |
|
212 | 212 | self._session = remote_maker._session_factory() |
|
213 | 213 | |
|
214 | 214 | cache_repo_id = self._repo_id_sanitizer(repo_id) |
|
215 | 215 | _repo_name = self._get_repo_name(config, path) |
|
216 | 216 | self._cache_region, self._cache_namespace = \ |
|
217 | 217 | remote_maker.init_cache_region(cache_repo_id) |
|
218 | 218 | |
|
219 | 219 | with_wire = with_wire or {} |
|
220 | 220 | |
|
221 | 221 | repo_state_uid = with_wire.get('repo_state_uid') or 'state' |
|
222 | 222 | |
|
223 | 223 | self._wire = { |
|
224 | 224 | "_repo_name": _repo_name, |
|
225 | 225 | "path": path, # repo path |
|
226 | 226 | "repo_id": repo_id, |
|
227 | 227 | "cache_repo_id": cache_repo_id, |
|
228 | 228 | "config": config, |
|
229 | 229 | "repo_state_uid": repo_state_uid, |
|
230 | 230 | "context": self._create_vcs_cache_context(path, repo_state_uid) |
|
231 | 231 | } |
|
232 | 232 | |
|
233 | 233 | if with_wire: |
|
234 | 234 | self._wire.update(with_wire) |
|
235 | 235 | |
|
236 | 236 | # NOTE(johbo): Trading complexity for performance. Avoiding the call to |
|
237 | 237 | # log.debug brings a few percent gain even if is is not active. |
|
238 | 238 | if log.isEnabledFor(logging.DEBUG): |
|
239 | 239 | self._call_with_logging = True |
|
240 | 240 | |
|
241 | 241 | self.cert_dir = get_cert_path(rhodecode.CONFIG.get('__file__')) |
|
242 | 242 | |
|
243 | 243 | def _get_repo_name(self, config, path): |
|
244 | 244 | repo_store = config.get('paths', '/') |
|
245 | 245 | return path.split(repo_store)[-1].lstrip('/') |
|
246 | 246 | |
|
247 | 247 | def _repo_id_sanitizer(self, repo_id): |
|
248 | 248 | pathless = repo_id.replace('/', '__').replace('-', '_') |
|
249 | 249 | return ''.join(char if ord(char) < 128 else '_{}_'.format(ord(char)) for char in pathless) |
|
250 | 250 | |
|
251 | 251 | def __getattr__(self, name): |
|
252 | 252 | |
|
253 | 253 | if name.startswith('stream:'): |
|
254 | 254 | def repo_remote_attr(*args, **kwargs): |
|
255 | 255 | return self._call_stream(name, *args, **kwargs) |
|
256 | 256 | else: |
|
257 | 257 | def repo_remote_attr(*args, **kwargs): |
|
258 | 258 | return self._call(name, *args, **kwargs) |
|
259 | 259 | |
|
260 | 260 | return repo_remote_attr |
|
261 | 261 | |
|
262 | 262 | def _base_call(self, name, *args, **kwargs): |
|
263 | 263 | # TODO: oliver: This is currently necessary pre-call since the |
|
264 | 264 | # config object is being changed for hooking scenarios |
|
265 | 265 | wire = copy.deepcopy(self._wire) |
|
266 | 266 | wire["config"] = wire["config"].serialize() |
|
267 | 267 | wire["config"].append(('vcs', 'ssl_dir', self.cert_dir)) |
|
268 | 268 | |
|
269 | 269 | payload = { |
|
270 | 270 | 'id': str(uuid.uuid4()), |
|
271 | 271 | 'method': name, |
|
272 | 272 | "_repo_name": wire['_repo_name'], |
|
273 | 273 | 'params': {'wire': wire, 'args': args, 'kwargs': kwargs} |
|
274 | 274 | } |
|
275 | 275 | |
|
276 | 276 | context_uid = wire.get('context') |
|
277 | 277 | return context_uid, payload |
|
278 | 278 | |
|
279 | 279 | def get_local_cache(self, name, args): |
|
280 | 280 | cache_on = False |
|
281 | 281 | cache_key = '' |
|
282 | 282 | local_cache_on = rhodecode.ConfigGet().get_bool('vcs.methods.cache') |
|
283 | 283 | |
|
284 | 284 | cache_methods = [ |
|
285 | 285 | 'branches', 'tags', 'bookmarks', |
|
286 | 286 | 'is_large_file', 'is_binary', |
|
287 | 287 | 'fctx_size', 'stream:fctx_node_data', 'blob_raw_length', |
|
288 | 288 | 'node_history', |
|
289 | 289 | 'revision', 'tree_items', |
|
290 | 290 | 'ctx_list', 'ctx_branch', 'ctx_description', |
|
291 | 291 | 'bulk_request', |
|
292 | 292 | 'assert_correct_path', |
|
293 | 293 | 'is_path_valid_repository', |
|
294 | 294 | ] |
|
295 | 295 | |
|
296 | if local_cache_on and name in cache_methods: | |
|
296 | wire_cache = self._wire['cache'] | |
|
297 | ||
|
298 | if local_cache_on and wire_cache and name in cache_methods: | |
|
297 | 299 | cache_on = True |
|
298 | 300 | repo_state_uid = self._wire['repo_state_uid'] |
|
299 | 301 | call_args = [a for a in args] |
|
300 | 302 | cache_key = compute_key_from_params(repo_state_uid, name, *call_args) |
|
301 | 303 | |
|
302 | 304 | return cache_on, cache_key |
|
303 | 305 | |
|
304 | 306 | @exceptions.map_vcs_exceptions |
|
305 | 307 | def _call(self, name, *args, **kwargs): |
|
308 | ||
|
306 | 309 | context_uid, payload = self._base_call(name, *args, **kwargs) |
|
307 | 310 | url = self.url |
|
308 | 311 | |
|
309 | 312 | start = time.time() |
|
310 | 313 | cache_on, cache_key = self.get_local_cache(name, args) |
|
311 | 314 | |
|
312 | 315 | @self._cache_region.conditional_cache_on_arguments( |
|
313 | 316 | namespace=self._cache_namespace, condition=cache_on and cache_key) |
|
314 | 317 | def remote_call(_cache_key): |
|
315 | 318 | if self._call_with_logging: |
|
316 | 319 | args_repr = f'ARG: {str(args):.512}|KW: {str(kwargs):.512}' |
|
317 | 320 | log.debug('Calling %s@%s with args:%r. wire_context: %s cache_on: %s', |
|
318 | 321 | url, name, args_repr, context_uid, cache_on) |
|
319 | 322 | return _remote_call(url, payload, EXCEPTIONS_MAP, self._session) |
|
320 | 323 | |
|
321 | 324 | result = remote_call(cache_key) |
|
322 | 325 | if self._call_with_logging: |
|
323 | 326 | log.debug('Call %s@%s took: %.4fs. wire_context: %s', |
|
324 | 327 | url, name, time.time()-start, context_uid) |
|
325 | 328 | return result |
|
326 | 329 | |
|
327 | 330 | @exceptions.map_vcs_exceptions |
|
328 | 331 | def _call_stream(self, name, *args, **kwargs): |
|
329 | 332 | context_uid, payload = self._base_call(name, *args, **kwargs) |
|
330 | 333 | payload['chunk_size'] = self.CHUNK_SIZE |
|
331 | 334 | url = self.stream_url |
|
332 | 335 | |
|
333 | 336 | start = time.time() |
|
334 | 337 | cache_on, cache_key = self.get_local_cache(name, args) |
|
335 | 338 | |
|
336 | 339 | # Cache is a problem because this is a stream |
|
337 | 340 | def streaming_remote_call(_cache_key): |
|
338 | 341 | if self._call_with_logging: |
|
339 | 342 | args_repr = f'ARG: {str(args):.512}|KW: {str(kwargs):.512}' |
|
340 | 343 | log.debug('Calling %s@%s with args:%r. wire_context: %s cache_on: %s', |
|
341 | 344 | url, name, args_repr, context_uid, cache_on) |
|
342 | 345 | return _streaming_remote_call(url, payload, EXCEPTIONS_MAP, self._session, self.CHUNK_SIZE) |
|
343 | 346 | |
|
344 | 347 | result = streaming_remote_call(cache_key) |
|
345 | 348 | if self._call_with_logging: |
|
346 | 349 | log.debug('Call %s@%s took: %.4fs. wire_context: %s', |
|
347 | 350 | url, name, time.time()-start, context_uid) |
|
348 | 351 | return result |
|
349 | 352 | |
|
350 | 353 | def __getitem__(self, key): |
|
351 | 354 | return self.revision(key) |
|
352 | 355 | |
|
353 | 356 | def _create_vcs_cache_context(self, *args): |
|
354 | 357 | """ |
|
355 | 358 | Creates a unique string which is passed to the VCSServer on every |
|
356 | 359 | remote call. It is used as cache key in the VCSServer. |
|
357 | 360 | """ |
|
358 | 361 | hash_key = '-'.join(map(str, args)) |
|
359 | 362 | return str(uuid.uuid5(uuid.NAMESPACE_URL, hash_key)) |
|
360 | 363 | |
|
361 | 364 | def invalidate_vcs_cache(self): |
|
362 | 365 | """ |
|
363 | 366 | This invalidates the context which is sent to the VCSServer on every |
|
364 | 367 | call to a remote method. It forces the VCSServer to create a fresh |
|
365 | 368 | repository instance on the next call to a remote method. |
|
366 | 369 | """ |
|
367 | 370 | self._wire['context'] = str(uuid.uuid4()) |
|
368 | 371 | |
|
369 | 372 | |
|
370 | 373 | class VcsHttpProxy(object): |
|
371 | 374 | |
|
372 | 375 | CHUNK_SIZE = 16384 |
|
373 | 376 | |
|
374 | 377 | def __init__(self, server_and_port, backend_endpoint): |
|
375 | 378 | retries = Retry(total=5, connect=None, read=None, redirect=None) |
|
376 | 379 | |
|
377 | 380 | adapter = requests.adapters.HTTPAdapter(max_retries=retries) |
|
378 | 381 | self.base_url = urllib.parse.urljoin('http://%s' % server_and_port, backend_endpoint) |
|
379 | 382 | self.session = requests.Session() |
|
380 | 383 | self.session.mount('http://', adapter) |
|
381 | 384 | |
|
382 | 385 | def handle(self, environment, input_data, *args, **kwargs): |
|
383 | 386 | data = { |
|
384 | 387 | 'environment': environment, |
|
385 | 388 | 'input_data': input_data, |
|
386 | 389 | 'args': args, |
|
387 | 390 | 'kwargs': kwargs |
|
388 | 391 | } |
|
389 | 392 | result = self.session.post( |
|
390 | 393 | self.base_url, msgpack.packb(data), stream=True) |
|
391 | 394 | return self._get_result(result) |
|
392 | 395 | |
|
393 | 396 | def _deserialize_and_raise(self, error): |
|
394 | 397 | exception = Exception(error['message']) |
|
395 | 398 | try: |
|
396 | 399 | exception._vcs_kind = error['_vcs_kind'] |
|
397 | 400 | except KeyError: |
|
398 | 401 | pass |
|
399 | 402 | raise exception |
|
400 | 403 | |
|
401 | 404 | def _iterate(self, result): |
|
402 | 405 | unpacker = msgpack.Unpacker() |
|
403 | 406 | for line in result.iter_content(chunk_size=self.CHUNK_SIZE): |
|
404 | 407 | unpacker.feed(line) |
|
405 | 408 | yield from unpacker |
|
406 | 409 | |
|
407 | 410 | def _get_result(self, result): |
|
408 | 411 | iterator = self._iterate(result) |
|
409 | 412 | error = next(iterator) |
|
410 | 413 | if error: |
|
411 | 414 | self._deserialize_and_raise(error) |
|
412 | 415 | |
|
413 | 416 | status = next(iterator) |
|
414 | 417 | headers = next(iterator) |
|
415 | 418 | |
|
416 | 419 | return iterator, status, headers |
|
417 | 420 | |
|
418 | 421 | |
|
419 | 422 | class ThreadlocalSessionFactory(object): |
|
420 | 423 | """ |
|
421 | 424 | Creates one CurlSession per thread on demand. |
|
422 | 425 | """ |
|
423 | 426 | |
|
424 | 427 | def __init__(self): |
|
425 | 428 | self._thread_local = threading.local() |
|
426 | 429 | |
|
427 | 430 | def __call__(self): |
|
428 | 431 | if not hasattr(self._thread_local, 'curl_session'): |
|
429 | 432 | self._thread_local.curl_session = CurlSession() |
|
430 | 433 | return self._thread_local.curl_session |
@@ -1,1053 +1,1051 b'' | |||
|
1 | 1 | # Copyright (C) 2010-2023 RhodeCode GmbH |
|
2 | 2 | # |
|
3 | 3 | # This program is free software: you can redistribute it and/or modify |
|
4 | 4 | # it under the terms of the GNU Affero General Public License, version 3 |
|
5 | 5 | # (only), as published by the Free Software Foundation. |
|
6 | 6 | # |
|
7 | 7 | # This program is distributed in the hope that it will be useful, |
|
8 | 8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
9 | 9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
10 | 10 | # GNU General Public License for more details. |
|
11 | 11 | # |
|
12 | 12 | # You should have received a copy of the GNU Affero General Public License |
|
13 | 13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
14 | 14 | # |
|
15 | 15 | # This program is dual-licensed. If you wish to learn more about the |
|
16 | 16 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
17 | 17 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
18 | 18 | |
|
19 | 19 | """ |
|
20 | 20 | Scm model for RhodeCode |
|
21 | 21 | """ |
|
22 | 22 | |
|
23 | 23 | import os.path |
|
24 | 24 | import traceback |
|
25 | 25 | import logging |
|
26 | 26 | import io |
|
27 | 27 | |
|
28 | 28 | from sqlalchemy import func |
|
29 | 29 | from zope.cachedescriptors.property import Lazy as LazyProperty |
|
30 | 30 | |
|
31 | 31 | import rhodecode |
|
32 | 32 | from rhodecode.lib.str_utils import safe_bytes |
|
33 | 33 | from rhodecode.lib.vcs import get_backend |
|
34 | 34 | from rhodecode.lib.vcs.exceptions import RepositoryError, NodeNotChangedError |
|
35 | 35 | from rhodecode.lib.vcs.nodes import FileNode |
|
36 | 36 | from rhodecode.lib.vcs.backends.base import EmptyCommit |
|
37 | 37 | from rhodecode.lib import helpers as h, rc_cache |
|
38 | 38 | from rhodecode.lib.auth import ( |
|
39 | 39 | HasRepoPermissionAny, HasRepoGroupPermissionAny, |
|
40 | 40 | HasUserGroupPermissionAny) |
|
41 | 41 | from rhodecode.lib.exceptions import NonRelativePathError, IMCCommitError |
|
42 | 42 | from rhodecode.lib import hooks_utils |
|
43 | 43 | from rhodecode.lib.utils import ( |
|
44 | 44 | get_filesystem_repos, make_db_config) |
|
45 | 45 | from rhodecode.lib.str_utils import safe_str |
|
46 | 46 | from rhodecode.lib.system_info import get_system_info |
|
47 | 47 | from rhodecode.model import BaseModel |
|
48 | 48 | from rhodecode.model.db import ( |
|
49 | 49 | or_, false, null, |
|
50 | 50 | Repository, CacheKey, UserFollowing, UserLog, User, RepoGroup, |
|
51 | 51 | PullRequest, FileStore) |
|
52 | 52 | from rhodecode.model.settings import VcsSettingsModel |
|
53 | 53 | from rhodecode.model.validation_schema.validators import url_validator, InvalidCloneUrl |
|
54 | 54 | |
|
55 | 55 | log = logging.getLogger(__name__) |
|
56 | 56 | |
|
57 | 57 | |
|
58 | 58 | class UserTemp(object): |
|
59 | 59 | def __init__(self, user_id): |
|
60 | 60 | self.user_id = user_id |
|
61 | 61 | |
|
62 | 62 | def __repr__(self): |
|
63 | 63 | return "<{}('id:{}')>".format(self.__class__.__name__, self.user_id) |
|
64 | 64 | |
|
65 | 65 | |
|
66 | 66 | class RepoTemp(object): |
|
67 | 67 | def __init__(self, repo_id): |
|
68 | 68 | self.repo_id = repo_id |
|
69 | 69 | |
|
70 | 70 | def __repr__(self): |
|
71 | 71 | return "<{}('id:{}')>".format(self.__class__.__name__, self.repo_id) |
|
72 | 72 | |
|
73 | 73 | |
|
74 | 74 | class SimpleCachedRepoList(object): |
|
75 | 75 | """ |
|
76 | 76 | Lighter version of of iteration of repos without the scm initialisation, |
|
77 | 77 | and with cache usage |
|
78 | 78 | """ |
|
79 | 79 | def __init__(self, db_repo_list, repos_path, order_by=None, perm_set=None): |
|
80 | 80 | self.db_repo_list = db_repo_list |
|
81 | 81 | self.repos_path = repos_path |
|
82 | 82 | self.order_by = order_by |
|
83 | 83 | self.reversed = (order_by or '').startswith('-') |
|
84 | 84 | if not perm_set: |
|
85 | 85 | perm_set = ['repository.read', 'repository.write', |
|
86 | 86 | 'repository.admin'] |
|
87 | 87 | self.perm_set = perm_set |
|
88 | 88 | |
|
89 | 89 | def __len__(self): |
|
90 | 90 | return len(self.db_repo_list) |
|
91 | 91 | |
|
92 | 92 | def __repr__(self): |
|
93 | 93 | return '<{} ({})>'.format(self.__class__.__name__, self.__len__()) |
|
94 | 94 | |
|
95 | 95 | def __iter__(self): |
|
96 | 96 | for dbr in self.db_repo_list: |
|
97 | 97 | # check permission at this level |
|
98 | 98 | has_perm = HasRepoPermissionAny(*self.perm_set)( |
|
99 | 99 | dbr.repo_name, 'SimpleCachedRepoList check') |
|
100 | 100 | if not has_perm: |
|
101 | 101 | continue |
|
102 | 102 | |
|
103 | 103 | tmp_d = { |
|
104 | 104 | 'name': dbr.repo_name, |
|
105 | 105 | 'dbrepo': dbr.get_dict(), |
|
106 | 106 | 'dbrepo_fork': dbr.fork.get_dict() if dbr.fork else {} |
|
107 | 107 | } |
|
108 | 108 | yield tmp_d |
|
109 | 109 | |
|
110 | 110 | |
|
111 | 111 | class _PermCheckIterator(object): |
|
112 | 112 | |
|
113 | 113 | def __init__( |
|
114 | 114 | self, obj_list, obj_attr, perm_set, perm_checker, |
|
115 | 115 | extra_kwargs=None): |
|
116 | 116 | """ |
|
117 | 117 | Creates iterator from given list of objects, additionally |
|
118 | 118 | checking permission for them from perm_set var |
|
119 | 119 | |
|
120 | 120 | :param obj_list: list of db objects |
|
121 | 121 | :param obj_attr: attribute of object to pass into perm_checker |
|
122 | 122 | :param perm_set: list of permissions to check |
|
123 | 123 | :param perm_checker: callable to check permissions against |
|
124 | 124 | """ |
|
125 | 125 | self.obj_list = obj_list |
|
126 | 126 | self.obj_attr = obj_attr |
|
127 | 127 | self.perm_set = perm_set |
|
128 | 128 | self.perm_checker = perm_checker(*self.perm_set) |
|
129 | 129 | self.extra_kwargs = extra_kwargs or {} |
|
130 | 130 | |
|
131 | 131 | def __len__(self): |
|
132 | 132 | return len(self.obj_list) |
|
133 | 133 | |
|
134 | 134 | def __repr__(self): |
|
135 | 135 | return '<{} ({})>'.format(self.__class__.__name__, self.__len__()) |
|
136 | 136 | |
|
137 | 137 | def __iter__(self): |
|
138 | 138 | for db_obj in self.obj_list: |
|
139 | 139 | # check permission at this level |
|
140 | 140 | # NOTE(marcink): the __dict__.get() is ~4x faster then getattr() |
|
141 | 141 | name = db_obj.__dict__.get(self.obj_attr, None) |
|
142 | 142 | if not self.perm_checker(name, self.__class__.__name__, **self.extra_kwargs): |
|
143 | 143 | continue |
|
144 | 144 | |
|
145 | 145 | yield db_obj |
|
146 | 146 | |
|
147 | 147 | |
|
148 | 148 | class RepoList(_PermCheckIterator): |
|
149 | 149 | |
|
150 | 150 | def __init__(self, db_repo_list, perm_set=None, extra_kwargs=None): |
|
151 | 151 | if not perm_set: |
|
152 | 152 | perm_set = ['repository.read', 'repository.write', 'repository.admin'] |
|
153 | 153 | |
|
154 | 154 | super().__init__( |
|
155 | 155 | obj_list=db_repo_list, |
|
156 | 156 | obj_attr='_repo_name', perm_set=perm_set, |
|
157 | 157 | perm_checker=HasRepoPermissionAny, |
|
158 | 158 | extra_kwargs=extra_kwargs) |
|
159 | 159 | |
|
160 | 160 | |
|
161 | 161 | class RepoGroupList(_PermCheckIterator): |
|
162 | 162 | |
|
163 | 163 | def __init__(self, db_repo_group_list, perm_set=None, extra_kwargs=None): |
|
164 | 164 | if not perm_set: |
|
165 | 165 | perm_set = ['group.read', 'group.write', 'group.admin'] |
|
166 | 166 | |
|
167 | 167 | super().__init__( |
|
168 | 168 | obj_list=db_repo_group_list, |
|
169 | 169 | obj_attr='_group_name', perm_set=perm_set, |
|
170 | 170 | perm_checker=HasRepoGroupPermissionAny, |
|
171 | 171 | extra_kwargs=extra_kwargs) |
|
172 | 172 | |
|
173 | 173 | |
|
174 | 174 | class UserGroupList(_PermCheckIterator): |
|
175 | 175 | |
|
176 | 176 | def __init__(self, db_user_group_list, perm_set=None, extra_kwargs=None): |
|
177 | 177 | if not perm_set: |
|
178 | 178 | perm_set = ['usergroup.read', 'usergroup.write', 'usergroup.admin'] |
|
179 | 179 | |
|
180 | 180 | super().__init__( |
|
181 | 181 | obj_list=db_user_group_list, |
|
182 | 182 | obj_attr='users_group_name', perm_set=perm_set, |
|
183 | 183 | perm_checker=HasUserGroupPermissionAny, |
|
184 | 184 | extra_kwargs=extra_kwargs) |
|
185 | 185 | |
|
186 | 186 | |
|
187 | 187 | class ScmModel(BaseModel): |
|
188 | 188 | """ |
|
189 | 189 | Generic Scm Model |
|
190 | 190 | """ |
|
191 | 191 | |
|
192 | 192 | def repo_scan(self, repos_path=None): |
|
193 | 193 | """ |
|
194 | 194 | Listing of repositories in given path. This path should not be a |
|
195 | 195 | repository itself. Return a dictionary of repository objects |
|
196 | 196 | |
|
197 | 197 | :param repos_path: path to directory containing repositories |
|
198 | 198 | """ |
|
199 | 199 | |
|
200 | 200 | if repos_path is None: |
|
201 | 201 | repos_path = self.repos_path |
|
202 | 202 | |
|
203 | 203 | log.info('scanning for repositories in %s', repos_path) |
|
204 | 204 | |
|
205 | 205 | config = make_db_config() |
|
206 | 206 | config.set('extensions', 'largefiles', '') |
|
207 | 207 | repos = {} |
|
208 | 208 | |
|
209 | 209 | for name, path in get_filesystem_repos(repos_path, recursive=True): |
|
210 | 210 | # name need to be decomposed and put back together using the / |
|
211 | 211 | # since this is internal storage separator for rhodecode |
|
212 | 212 | name = Repository.normalize_repo_name(name) |
|
213 | 213 | |
|
214 | 214 | try: |
|
215 | 215 | if name in repos: |
|
216 |
raise RepositoryError('Duplicate repository name |
|
|
217 | 'found in %s' % (name, path)) | |
|
216 | raise RepositoryError(f'Duplicate repository name {name} found in {path}') | |
|
218 | 217 | elif path[0] in rhodecode.BACKENDS: |
|
219 | 218 | backend = get_backend(path[0]) |
|
220 | repos[name] = backend(path[1], config=config, | |
|
221 | with_wire={"cache": False}) | |
|
219 | repos[name] = backend(path[1], config=config, with_wire={"cache": False}) | |
|
222 | 220 | except OSError: |
|
223 | 221 | continue |
|
224 | 222 | except RepositoryError: |
|
225 | 223 | log.exception('Failed to create a repo') |
|
226 | 224 | continue |
|
227 | 225 | |
|
228 | 226 | log.debug('found %s paths with repositories', len(repos)) |
|
229 | 227 | return repos |
|
230 | 228 | |
|
231 | 229 | def get_repos(self, all_repos=None, sort_key=None): |
|
232 | 230 | """ |
|
233 | 231 | Get all repositories from db and for each repo create it's |
|
234 | 232 | backend instance and fill that backed with information from database |
|
235 | 233 | |
|
236 | 234 | :param all_repos: list of repository names as strings |
|
237 | 235 | give specific repositories list, good for filtering |
|
238 | 236 | |
|
239 | 237 | :param sort_key: initial sorting of repositories |
|
240 | 238 | """ |
|
241 | 239 | if all_repos is None: |
|
242 | 240 | all_repos = self.sa.query(Repository)\ |
|
243 | 241 | .filter(Repository.group_id == null())\ |
|
244 | 242 | .order_by(func.lower(Repository.repo_name)).all() |
|
245 | 243 | repo_iter = SimpleCachedRepoList( |
|
246 | 244 | all_repos, repos_path=self.repos_path, order_by=sort_key) |
|
247 | 245 | return repo_iter |
|
248 | 246 | |
|
249 | 247 | @staticmethod |
|
250 | 248 | def get_parent_commits(parent_commit, scm_instance): |
|
251 | 249 | if not parent_commit: |
|
252 | 250 | parent_commit = EmptyCommit(alias=scm_instance.alias) |
|
253 | 251 | |
|
254 | 252 | if isinstance(parent_commit, EmptyCommit): |
|
255 | 253 | # EmptyCommit means we're editing empty repository |
|
256 | 254 | parents = None |
|
257 | 255 | else: |
|
258 | 256 | parents = [parent_commit] |
|
259 | 257 | return parent_commit, parents |
|
260 | 258 | |
|
261 | 259 | def initialize_inmemory_vars(self, user, repo, message, author): |
|
262 | 260 | """ |
|
263 | 261 | Initialize node specific objects for further usage |
|
264 | 262 | """ |
|
265 | 263 | user = self._get_user(user) |
|
266 | 264 | scm_instance = repo.scm_instance(cache=False) |
|
267 | 265 | message = safe_str(message) |
|
268 | 266 | commiter = user.full_contact |
|
269 | 267 | author = safe_str(author) if author else commiter |
|
270 | 268 | imc = scm_instance.in_memory_commit |
|
271 | 269 | |
|
272 | 270 | return user, scm_instance, message, commiter, author, imc |
|
273 | 271 | |
|
274 | 272 | def get_repo_groups(self, all_groups=None): |
|
275 | 273 | if all_groups is None: |
|
276 | 274 | all_groups = RepoGroup.query()\ |
|
277 | 275 | .filter(RepoGroup.group_parent_id == null()).all() |
|
278 | 276 | return [x for x in RepoGroupList(all_groups)] |
|
279 | 277 | |
|
280 | 278 | def mark_for_invalidation(self, repo_name, delete=False): |
|
281 | 279 | """ |
|
282 | 280 | Mark caches of this repo invalid in the database. `delete` flag |
|
283 | 281 | removes the cache entries |
|
284 | 282 | |
|
285 | 283 | :param repo_name: the repo_name for which caches should be marked |
|
286 | 284 | invalid, or deleted |
|
287 | 285 | :param delete: delete the entry keys instead of setting bool |
|
288 | 286 | flag on them, and also purge caches used by the dogpile |
|
289 | 287 | """ |
|
290 | 288 | repo = Repository.get_by_repo_name(repo_name) |
|
291 | 289 | |
|
292 | 290 | if repo: |
|
293 | 291 | repo_namespace_key = CacheKey.REPO_INVALIDATION_NAMESPACE.format(repo_id=repo.repo_id) |
|
294 | 292 | CacheKey.set_invalidate(repo_namespace_key, delete=delete) |
|
295 | 293 | |
|
296 | 294 | repo_id = repo.repo_id |
|
297 | 295 | config = repo._config |
|
298 | 296 | config.set('extensions', 'largefiles', '') |
|
299 | 297 | repo.update_commit_cache(config=config, cs_cache=None) |
|
300 | 298 | if delete: |
|
301 | 299 | cache_namespace_uid = f'cache_repo.{repo_id}' |
|
302 | 300 | rc_cache.clear_cache_namespace('cache_repo', cache_namespace_uid, method=rc_cache.CLEAR_INVALIDATE) |
|
303 | 301 | |
|
304 | 302 | def toggle_following_repo(self, follow_repo_id, user_id): |
|
305 | 303 | |
|
306 | 304 | f = self.sa.query(UserFollowing)\ |
|
307 | 305 | .filter(UserFollowing.follows_repo_id == follow_repo_id)\ |
|
308 | 306 | .filter(UserFollowing.user_id == user_id).scalar() |
|
309 | 307 | |
|
310 | 308 | if f is not None: |
|
311 | 309 | try: |
|
312 | 310 | self.sa.delete(f) |
|
313 | 311 | return |
|
314 | 312 | except Exception: |
|
315 | 313 | log.error(traceback.format_exc()) |
|
316 | 314 | raise |
|
317 | 315 | |
|
318 | 316 | try: |
|
319 | 317 | f = UserFollowing() |
|
320 | 318 | f.user_id = user_id |
|
321 | 319 | f.follows_repo_id = follow_repo_id |
|
322 | 320 | self.sa.add(f) |
|
323 | 321 | except Exception: |
|
324 | 322 | log.error(traceback.format_exc()) |
|
325 | 323 | raise |
|
326 | 324 | |
|
327 | 325 | def toggle_following_user(self, follow_user_id, user_id): |
|
328 | 326 | f = self.sa.query(UserFollowing)\ |
|
329 | 327 | .filter(UserFollowing.follows_user_id == follow_user_id)\ |
|
330 | 328 | .filter(UserFollowing.user_id == user_id).scalar() |
|
331 | 329 | |
|
332 | 330 | if f is not None: |
|
333 | 331 | try: |
|
334 | 332 | self.sa.delete(f) |
|
335 | 333 | return |
|
336 | 334 | except Exception: |
|
337 | 335 | log.error(traceback.format_exc()) |
|
338 | 336 | raise |
|
339 | 337 | |
|
340 | 338 | try: |
|
341 | 339 | f = UserFollowing() |
|
342 | 340 | f.user_id = user_id |
|
343 | 341 | f.follows_user_id = follow_user_id |
|
344 | 342 | self.sa.add(f) |
|
345 | 343 | except Exception: |
|
346 | 344 | log.error(traceback.format_exc()) |
|
347 | 345 | raise |
|
348 | 346 | |
|
349 | 347 | def is_following_repo(self, repo_name, user_id, cache=False): |
|
350 | 348 | r = self.sa.query(Repository)\ |
|
351 | 349 | .filter(Repository.repo_name == repo_name).scalar() |
|
352 | 350 | |
|
353 | 351 | f = self.sa.query(UserFollowing)\ |
|
354 | 352 | .filter(UserFollowing.follows_repository == r)\ |
|
355 | 353 | .filter(UserFollowing.user_id == user_id).scalar() |
|
356 | 354 | |
|
357 | 355 | return f is not None |
|
358 | 356 | |
|
359 | 357 | def is_following_user(self, username, user_id, cache=False): |
|
360 | 358 | u = User.get_by_username(username) |
|
361 | 359 | |
|
362 | 360 | f = self.sa.query(UserFollowing)\ |
|
363 | 361 | .filter(UserFollowing.follows_user == u)\ |
|
364 | 362 | .filter(UserFollowing.user_id == user_id).scalar() |
|
365 | 363 | |
|
366 | 364 | return f is not None |
|
367 | 365 | |
|
368 | 366 | def get_followers(self, repo): |
|
369 | 367 | repo = self._get_repo(repo) |
|
370 | 368 | |
|
371 | 369 | return self.sa.query(UserFollowing)\ |
|
372 | 370 | .filter(UserFollowing.follows_repository == repo).count() |
|
373 | 371 | |
|
374 | 372 | def get_forks(self, repo): |
|
375 | 373 | repo = self._get_repo(repo) |
|
376 | 374 | return self.sa.query(Repository)\ |
|
377 | 375 | .filter(Repository.fork == repo).count() |
|
378 | 376 | |
|
379 | 377 | def get_pull_requests(self, repo): |
|
380 | 378 | repo = self._get_repo(repo) |
|
381 | 379 | return self.sa.query(PullRequest)\ |
|
382 | 380 | .filter(PullRequest.target_repo == repo)\ |
|
383 | 381 | .filter(PullRequest.status != PullRequest.STATUS_CLOSED).count() |
|
384 | 382 | |
|
385 | 383 | def get_artifacts(self, repo): |
|
386 | 384 | repo = self._get_repo(repo) |
|
387 | 385 | return self.sa.query(FileStore)\ |
|
388 | 386 | .filter(FileStore.repo == repo)\ |
|
389 | 387 | .filter(or_(FileStore.hidden == null(), FileStore.hidden == false())).count() |
|
390 | 388 | |
|
391 | 389 | def mark_as_fork(self, repo, fork, user): |
|
392 | 390 | repo = self._get_repo(repo) |
|
393 | 391 | fork = self._get_repo(fork) |
|
394 | 392 | if fork and repo.repo_id == fork.repo_id: |
|
395 | 393 | raise Exception("Cannot set repository as fork of itself") |
|
396 | 394 | |
|
397 | 395 | if fork and repo.repo_type != fork.repo_type: |
|
398 | 396 | raise RepositoryError( |
|
399 | 397 | "Cannot set repository as fork of repository with other type") |
|
400 | 398 | |
|
401 | 399 | repo.fork = fork |
|
402 | 400 | self.sa.add(repo) |
|
403 | 401 | return repo |
|
404 | 402 | |
|
405 | 403 | def pull_changes(self, repo, username, remote_uri=None, validate_uri=True, **kwargs): |
|
406 | 404 | dbrepo = self._get_repo(repo) |
|
407 | 405 | remote_uri = remote_uri or dbrepo.clone_uri |
|
408 | 406 | if not remote_uri: |
|
409 | 407 | raise Exception("This repository doesn't have a clone uri") |
|
410 | 408 | |
|
411 | 409 | repo = dbrepo.scm_instance(cache=False) |
|
412 | 410 | repo.config.clear_section('hooks') |
|
413 | 411 | |
|
414 | 412 | try: |
|
415 | 413 | # NOTE(marcink): add extra validation so we skip invalid urls |
|
416 | 414 | # this is due this tasks can be executed via scheduler without |
|
417 | 415 | # proper validation of remote_uri |
|
418 | 416 | if validate_uri: |
|
419 | 417 | config = make_db_config(clear_session=False) |
|
420 | 418 | url_validator(remote_uri, dbrepo.repo_type, config) |
|
421 | 419 | except InvalidCloneUrl: |
|
422 | 420 | raise |
|
423 | 421 | |
|
424 | 422 | repo_name = dbrepo.repo_name |
|
425 | 423 | try: |
|
426 | 424 | # TODO: we need to make sure those operations call proper hooks ! |
|
427 | 425 | repo.fetch(remote_uri, **kwargs) |
|
428 | 426 | |
|
429 | 427 | self.mark_for_invalidation(repo_name) |
|
430 | 428 | except Exception: |
|
431 | 429 | log.error(traceback.format_exc()) |
|
432 | 430 | raise |
|
433 | 431 | |
|
434 | 432 | def push_changes(self, repo, username, remote_uri=None, validate_uri=True, **kwargs): |
|
435 | 433 | dbrepo = self._get_repo(repo) |
|
436 | 434 | remote_uri = remote_uri or dbrepo.push_uri |
|
437 | 435 | if not remote_uri: |
|
438 | 436 | raise Exception("This repository doesn't have a clone uri") |
|
439 | 437 | |
|
440 | 438 | repo = dbrepo.scm_instance(cache=False) |
|
441 | 439 | repo.config.clear_section('hooks') |
|
442 | 440 | |
|
443 | 441 | try: |
|
444 | 442 | # NOTE(marcink): add extra validation so we skip invalid urls |
|
445 | 443 | # this is due this tasks can be executed via scheduler without |
|
446 | 444 | # proper validation of remote_uri |
|
447 | 445 | if validate_uri: |
|
448 | 446 | config = make_db_config(clear_session=False) |
|
449 | 447 | url_validator(remote_uri, dbrepo.repo_type, config) |
|
450 | 448 | except InvalidCloneUrl: |
|
451 | 449 | raise |
|
452 | 450 | |
|
453 | 451 | try: |
|
454 | 452 | repo.push(remote_uri, **kwargs) |
|
455 | 453 | except Exception: |
|
456 | 454 | log.error(traceback.format_exc()) |
|
457 | 455 | raise |
|
458 | 456 | |
|
459 | 457 | def commit_change(self, repo, repo_name, commit, user, author, message, |
|
460 | 458 | content: bytes, f_path: bytes, branch: str = None): |
|
461 | 459 | """ |
|
462 | 460 | Commits changes |
|
463 | 461 | """ |
|
464 | 462 | user = self._get_user(user) |
|
465 | 463 | |
|
466 | 464 | # message and author needs to be unicode |
|
467 | 465 | # proper backend should then translate that into required type |
|
468 | 466 | message = safe_str(message) |
|
469 | 467 | author = safe_str(author) |
|
470 | 468 | imc = repo.in_memory_commit |
|
471 | 469 | imc.change(FileNode(f_path, content, mode=commit.get_file_mode(f_path))) |
|
472 | 470 | try: |
|
473 | 471 | # TODO: handle pre-push action ! |
|
474 | 472 | tip = imc.commit( |
|
475 | 473 | message=message, author=author, parents=[commit], |
|
476 | 474 | branch=branch or commit.branch) |
|
477 | 475 | except Exception as e: |
|
478 | 476 | log.error(traceback.format_exc()) |
|
479 | 477 | raise IMCCommitError(str(e)) |
|
480 | 478 | finally: |
|
481 | 479 | # always clear caches, if commit fails we want fresh object also |
|
482 | 480 | self.mark_for_invalidation(repo_name) |
|
483 | 481 | |
|
484 | 482 | # We trigger the post-push action |
|
485 | 483 | hooks_utils.trigger_post_push_hook( |
|
486 | 484 | username=user.username, action='push_local', hook_type='post_push', |
|
487 | 485 | repo_name=repo_name, repo_type=repo.alias, commit_ids=[tip.raw_id]) |
|
488 | 486 | return tip |
|
489 | 487 | |
|
490 | 488 | def _sanitize_path(self, f_path: bytes): |
|
491 | 489 | if f_path.startswith(b'/') or f_path.startswith(b'./') or b'../' in f_path: |
|
492 | 490 | raise NonRelativePathError(b'%b is not an relative path' % f_path) |
|
493 | 491 | if f_path: |
|
494 | 492 | f_path = os.path.normpath(f_path) |
|
495 | 493 | return f_path |
|
496 | 494 | |
|
497 | 495 | def get_dirnode_metadata(self, request, commit, dir_node): |
|
498 | 496 | if not dir_node.is_dir(): |
|
499 | 497 | return [] |
|
500 | 498 | |
|
501 | 499 | data = [] |
|
502 | 500 | for node in dir_node: |
|
503 | 501 | if not node.is_file(): |
|
504 | 502 | # we skip file-nodes |
|
505 | 503 | continue |
|
506 | 504 | |
|
507 | 505 | last_commit = node.last_commit |
|
508 | 506 | last_commit_date = last_commit.date |
|
509 | 507 | data.append({ |
|
510 | 508 | 'name': node.name, |
|
511 | 509 | 'size': h.format_byte_size_binary(node.size), |
|
512 | 510 | 'modified_at': h.format_date(last_commit_date), |
|
513 | 511 | 'modified_ts': last_commit_date.isoformat(), |
|
514 | 512 | 'revision': last_commit.revision, |
|
515 | 513 | 'short_id': last_commit.short_id, |
|
516 | 514 | 'message': h.escape(last_commit.message), |
|
517 | 515 | 'author': h.escape(last_commit.author), |
|
518 | 516 | 'user_profile': h.gravatar_with_user( |
|
519 | 517 | request, last_commit.author), |
|
520 | 518 | }) |
|
521 | 519 | |
|
522 | 520 | return data |
|
523 | 521 | |
|
524 | 522 | def get_nodes(self, repo_name, commit_id, root_path='/', flat=True, |
|
525 | 523 | extended_info=False, content=False, max_file_bytes=None): |
|
526 | 524 | """ |
|
527 | 525 | recursive walk in root dir and return a set of all path in that dir |
|
528 | 526 | based on repository walk function |
|
529 | 527 | |
|
530 | 528 | :param repo_name: name of repository |
|
531 | 529 | :param commit_id: commit id for which to list nodes |
|
532 | 530 | :param root_path: root path to list |
|
533 | 531 | :param flat: return as a list, if False returns a dict with description |
|
534 | 532 | :param extended_info: show additional info such as md5, binary, size etc |
|
535 | 533 | :param content: add nodes content to the return data |
|
536 | 534 | :param max_file_bytes: will not return file contents over this limit |
|
537 | 535 | |
|
538 | 536 | """ |
|
539 | 537 | _files = list() |
|
540 | 538 | _dirs = list() |
|
541 | 539 | |
|
542 | 540 | try: |
|
543 | 541 | _repo = self._get_repo(repo_name) |
|
544 | 542 | commit = _repo.scm_instance().get_commit(commit_id=commit_id) |
|
545 | 543 | root_path = root_path.lstrip('/') |
|
546 | 544 | |
|
547 | 545 | # get RootNode, inject pre-load options before walking |
|
548 | 546 | top_node = commit.get_node(root_path) |
|
549 | 547 | extended_info_pre_load = [] |
|
550 | 548 | if extended_info: |
|
551 | 549 | extended_info_pre_load += ['md5'] |
|
552 | 550 | top_node.default_pre_load = ['is_binary', 'size'] + extended_info_pre_load |
|
553 | 551 | |
|
554 | 552 | for __, dirs, files in commit.walk(top_node): |
|
555 | 553 | |
|
556 | 554 | for f in files: |
|
557 | 555 | _content = None |
|
558 | 556 | _data = f_name = f.str_path |
|
559 | 557 | |
|
560 | 558 | if not flat: |
|
561 | 559 | _data = { |
|
562 | 560 | "name": h.escape(f_name), |
|
563 | 561 | "type": "file", |
|
564 | 562 | } |
|
565 | 563 | if extended_info: |
|
566 | 564 | _data.update({ |
|
567 | 565 | "md5": f.md5, |
|
568 | 566 | "binary": f.is_binary, |
|
569 | 567 | "size": f.size, |
|
570 | 568 | "extension": f.extension, |
|
571 | 569 | "mimetype": f.mimetype, |
|
572 | 570 | "lines": f.lines()[0] |
|
573 | 571 | }) |
|
574 | 572 | |
|
575 | 573 | if content: |
|
576 | 574 | over_size_limit = (max_file_bytes is not None |
|
577 | 575 | and f.size > max_file_bytes) |
|
578 | 576 | full_content = None |
|
579 | 577 | if not f.is_binary and not over_size_limit: |
|
580 | 578 | full_content = f.str_content |
|
581 | 579 | |
|
582 | 580 | _data.update({ |
|
583 | 581 | "content": full_content, |
|
584 | 582 | }) |
|
585 | 583 | _files.append(_data) |
|
586 | 584 | |
|
587 | 585 | for d in dirs: |
|
588 | 586 | _data = d_name = d.str_path |
|
589 | 587 | if not flat: |
|
590 | 588 | _data = { |
|
591 | 589 | "name": h.escape(d_name), |
|
592 | 590 | "type": "dir", |
|
593 | 591 | } |
|
594 | 592 | if extended_info: |
|
595 | 593 | _data.update({ |
|
596 | 594 | "md5": "", |
|
597 | 595 | "binary": False, |
|
598 | 596 | "size": 0, |
|
599 | 597 | "extension": "", |
|
600 | 598 | }) |
|
601 | 599 | if content: |
|
602 | 600 | _data.update({ |
|
603 | 601 | "content": None |
|
604 | 602 | }) |
|
605 | 603 | _dirs.append(_data) |
|
606 | 604 | except RepositoryError: |
|
607 | 605 | log.exception("Exception in get_nodes") |
|
608 | 606 | raise |
|
609 | 607 | |
|
610 | 608 | return _dirs, _files |
|
611 | 609 | |
|
612 | 610 | def get_quick_filter_nodes(self, repo_name, commit_id, root_path='/'): |
|
613 | 611 | """ |
|
614 | 612 | Generate files for quick filter in files view |
|
615 | 613 | """ |
|
616 | 614 | |
|
617 | 615 | _files = list() |
|
618 | 616 | _dirs = list() |
|
619 | 617 | try: |
|
620 | 618 | _repo = self._get_repo(repo_name) |
|
621 | 619 | commit = _repo.scm_instance().get_commit(commit_id=commit_id) |
|
622 | 620 | root_path = root_path.lstrip('/') |
|
623 | 621 | |
|
624 | 622 | top_node = commit.get_node(root_path) |
|
625 | 623 | top_node.default_pre_load = [] |
|
626 | 624 | |
|
627 | 625 | for __, dirs, files in commit.walk(top_node): |
|
628 | 626 | for f in files: |
|
629 | 627 | |
|
630 | 628 | _data = { |
|
631 | 629 | "name": h.escape(f.str_path), |
|
632 | 630 | "type": "file", |
|
633 | 631 | } |
|
634 | 632 | |
|
635 | 633 | _files.append(_data) |
|
636 | 634 | |
|
637 | 635 | for d in dirs: |
|
638 | 636 | |
|
639 | 637 | _data = { |
|
640 | 638 | "name": h.escape(d.str_path), |
|
641 | 639 | "type": "dir", |
|
642 | 640 | } |
|
643 | 641 | |
|
644 | 642 | _dirs.append(_data) |
|
645 | 643 | except RepositoryError: |
|
646 | 644 | log.exception("Exception in get_quick_filter_nodes") |
|
647 | 645 | raise |
|
648 | 646 | |
|
649 | 647 | return _dirs, _files |
|
650 | 648 | |
|
651 | 649 | def get_node(self, repo_name, commit_id, file_path, |
|
652 | 650 | extended_info=False, content=False, max_file_bytes=None, cache=True): |
|
653 | 651 | """ |
|
654 | 652 | retrieve single node from commit |
|
655 | 653 | """ |
|
656 | 654 | |
|
657 | 655 | try: |
|
658 | 656 | |
|
659 | 657 | _repo = self._get_repo(repo_name) |
|
660 | 658 | commit = _repo.scm_instance().get_commit(commit_id=commit_id) |
|
661 | 659 | |
|
662 | 660 | file_node = commit.get_node(file_path) |
|
663 | 661 | if file_node.is_dir(): |
|
664 | 662 | raise RepositoryError('The given path is a directory') |
|
665 | 663 | |
|
666 | 664 | _content = None |
|
667 | 665 | f_name = file_node.str_path |
|
668 | 666 | |
|
669 | 667 | file_data = { |
|
670 | 668 | "name": h.escape(f_name), |
|
671 | 669 | "type": "file", |
|
672 | 670 | } |
|
673 | 671 | |
|
674 | 672 | if extended_info: |
|
675 | 673 | file_data.update({ |
|
676 | 674 | "extension": file_node.extension, |
|
677 | 675 | "mimetype": file_node.mimetype, |
|
678 | 676 | }) |
|
679 | 677 | |
|
680 | 678 | if cache: |
|
681 | 679 | md5 = file_node.md5 |
|
682 | 680 | is_binary = file_node.is_binary |
|
683 | 681 | size = file_node.size |
|
684 | 682 | else: |
|
685 | 683 | is_binary, md5, size, _content = file_node.metadata_uncached() |
|
686 | 684 | |
|
687 | 685 | file_data.update({ |
|
688 | 686 | "md5": md5, |
|
689 | 687 | "binary": is_binary, |
|
690 | 688 | "size": size, |
|
691 | 689 | }) |
|
692 | 690 | |
|
693 | 691 | if content and cache: |
|
694 | 692 | # get content + cache |
|
695 | 693 | size = file_node.size |
|
696 | 694 | over_size_limit = (max_file_bytes is not None and size > max_file_bytes) |
|
697 | 695 | full_content = None |
|
698 | 696 | all_lines = 0 |
|
699 | 697 | if not file_node.is_binary and not over_size_limit: |
|
700 | 698 | full_content = safe_str(file_node.content) |
|
701 | 699 | all_lines, empty_lines = file_node.count_lines(full_content) |
|
702 | 700 | |
|
703 | 701 | file_data.update({ |
|
704 | 702 | "content": full_content, |
|
705 | 703 | "lines": all_lines |
|
706 | 704 | }) |
|
707 | 705 | elif content: |
|
708 | 706 | # get content *without* cache |
|
709 | 707 | if _content is None: |
|
710 | 708 | is_binary, md5, size, _content = file_node.metadata_uncached() |
|
711 | 709 | |
|
712 | 710 | over_size_limit = (max_file_bytes is not None and size > max_file_bytes) |
|
713 | 711 | full_content = None |
|
714 | 712 | all_lines = 0 |
|
715 | 713 | if not is_binary and not over_size_limit: |
|
716 | 714 | full_content = safe_str(_content) |
|
717 | 715 | all_lines, empty_lines = file_node.count_lines(full_content) |
|
718 | 716 | |
|
719 | 717 | file_data.update({ |
|
720 | 718 | "content": full_content, |
|
721 | 719 | "lines": all_lines |
|
722 | 720 | }) |
|
723 | 721 | |
|
724 | 722 | except RepositoryError: |
|
725 | 723 | log.exception("Exception in get_node") |
|
726 | 724 | raise |
|
727 | 725 | |
|
728 | 726 | return file_data |
|
729 | 727 | |
|
730 | 728 | def get_fts_data(self, repo_name, commit_id, root_path='/'): |
|
731 | 729 | """ |
|
732 | 730 | Fetch node tree for usage in full text search |
|
733 | 731 | """ |
|
734 | 732 | |
|
735 | 733 | tree_info = list() |
|
736 | 734 | |
|
737 | 735 | try: |
|
738 | 736 | _repo = self._get_repo(repo_name) |
|
739 | 737 | commit = _repo.scm_instance().get_commit(commit_id=commit_id) |
|
740 | 738 | root_path = root_path.lstrip('/') |
|
741 | 739 | top_node = commit.get_node(root_path) |
|
742 | 740 | top_node.default_pre_load = [] |
|
743 | 741 | |
|
744 | 742 | for __, dirs, files in commit.walk(top_node): |
|
745 | 743 | |
|
746 | 744 | for f in files: |
|
747 | 745 | is_binary, md5, size, _content = f.metadata_uncached() |
|
748 | 746 | _data = { |
|
749 | 747 | "name": f.str_path, |
|
750 | 748 | "md5": md5, |
|
751 | 749 | "extension": f.extension, |
|
752 | 750 | "binary": is_binary, |
|
753 | 751 | "size": size |
|
754 | 752 | } |
|
755 | 753 | |
|
756 | 754 | tree_info.append(_data) |
|
757 | 755 | |
|
758 | 756 | except RepositoryError: |
|
759 | 757 | log.exception("Exception in get_nodes") |
|
760 | 758 | raise |
|
761 | 759 | |
|
762 | 760 | return tree_info |
|
763 | 761 | |
|
764 | 762 | def create_nodes(self, user, repo, message, nodes, parent_commit=None, |
|
765 | 763 | author=None, trigger_push_hook=True): |
|
766 | 764 | """ |
|
767 | 765 | Commits given multiple nodes into repo |
|
768 | 766 | |
|
769 | 767 | :param user: RhodeCode User object or user_id, the commiter |
|
770 | 768 | :param repo: RhodeCode Repository object |
|
771 | 769 | :param message: commit message |
|
772 | 770 | :param nodes: mapping {filename:{'content':content},...} |
|
773 | 771 | :param parent_commit: parent commit, can be empty than it's |
|
774 | 772 | initial commit |
|
775 | 773 | :param author: author of commit, cna be different that commiter |
|
776 | 774 | only for git |
|
777 | 775 | :param trigger_push_hook: trigger push hooks |
|
778 | 776 | |
|
779 | 777 | :returns: new committed commit |
|
780 | 778 | """ |
|
781 | 779 | user, scm_instance, message, commiter, author, imc = self.initialize_inmemory_vars( |
|
782 | 780 | user, repo, message, author) |
|
783 | 781 | |
|
784 | 782 | parent_commit, parents = self.get_parent_commits(parent_commit, scm_instance) |
|
785 | 783 | |
|
786 | 784 | upload_file_types = (io.BytesIO, io.BufferedRandom) |
|
787 | 785 | processed_nodes = [] |
|
788 | 786 | for filename, content_dict in nodes.items(): |
|
789 | 787 | if not isinstance(filename, bytes): |
|
790 | 788 | raise ValueError(f'filename key in nodes needs to be bytes , or {upload_file_types}') |
|
791 | 789 | content = content_dict['content'] |
|
792 | 790 | if not isinstance(content, upload_file_types + (bytes,)): |
|
793 | 791 | raise ValueError('content key value in nodes needs to be bytes') |
|
794 | 792 | |
|
795 | 793 | for f_path in nodes: |
|
796 | 794 | f_path = self._sanitize_path(f_path) |
|
797 | 795 | content = nodes[f_path]['content'] |
|
798 | 796 | |
|
799 | 797 | # decoding here will force that we have proper encoded values |
|
800 | 798 | # in any other case this will throw exceptions and deny commit |
|
801 | 799 | |
|
802 | 800 | if isinstance(content, bytes): |
|
803 | 801 | pass |
|
804 | 802 | elif isinstance(content, upload_file_types): |
|
805 | 803 | content = content.read() |
|
806 | 804 | else: |
|
807 | 805 | raise Exception(f'Content is of unrecognized type {type(content)}, expected {upload_file_types}') |
|
808 | 806 | processed_nodes.append((f_path, content)) |
|
809 | 807 | |
|
810 | 808 | # add multiple nodes |
|
811 | 809 | for path, content in processed_nodes: |
|
812 | 810 | imc.add(FileNode(path, content=content)) |
|
813 | 811 | |
|
814 | 812 | # TODO: handle pre push scenario |
|
815 | 813 | tip = imc.commit(message=message, |
|
816 | 814 | author=author, |
|
817 | 815 | parents=parents, |
|
818 | 816 | branch=parent_commit.branch) |
|
819 | 817 | |
|
820 | 818 | self.mark_for_invalidation(repo.repo_name) |
|
821 | 819 | if trigger_push_hook: |
|
822 | 820 | hooks_utils.trigger_post_push_hook( |
|
823 | 821 | username=user.username, action='push_local', |
|
824 | 822 | repo_name=repo.repo_name, repo_type=scm_instance.alias, |
|
825 | 823 | hook_type='post_push', |
|
826 | 824 | commit_ids=[tip.raw_id]) |
|
827 | 825 | return tip |
|
828 | 826 | |
|
829 | 827 | def update_nodes(self, user, repo, message, nodes, parent_commit=None, |
|
830 | 828 | author=None, trigger_push_hook=True): |
|
831 | 829 | user, scm_instance, message, commiter, author, imc = self.initialize_inmemory_vars( |
|
832 | 830 | user, repo, message, author) |
|
833 | 831 | |
|
834 | 832 | parent_commit, parents = self.get_parent_commits(parent_commit, scm_instance) |
|
835 | 833 | |
|
836 | 834 | # add multiple nodes |
|
837 | 835 | for _filename, data in nodes.items(): |
|
838 | 836 | # new filename, can be renamed from the old one, also sanitaze |
|
839 | 837 | # the path for any hack around relative paths like ../../ etc. |
|
840 | 838 | filename = self._sanitize_path(data['filename']) |
|
841 | 839 | old_filename = self._sanitize_path(_filename) |
|
842 | 840 | content = data['content'] |
|
843 | 841 | file_mode = data.get('mode') |
|
844 | 842 | filenode = FileNode(old_filename, content=content, mode=file_mode) |
|
845 | 843 | op = data['op'] |
|
846 | 844 | if op == 'add': |
|
847 | 845 | imc.add(filenode) |
|
848 | 846 | elif op == 'del': |
|
849 | 847 | imc.remove(filenode) |
|
850 | 848 | elif op == 'mod': |
|
851 | 849 | if filename != old_filename: |
|
852 | 850 | # TODO: handle renames more efficient, needs vcs lib changes |
|
853 | 851 | imc.remove(filenode) |
|
854 | 852 | imc.add(FileNode(filename, content=content, mode=file_mode)) |
|
855 | 853 | else: |
|
856 | 854 | imc.change(filenode) |
|
857 | 855 | |
|
858 | 856 | try: |
|
859 | 857 | # TODO: handle pre push scenario commit changes |
|
860 | 858 | tip = imc.commit(message=message, |
|
861 | 859 | author=author, |
|
862 | 860 | parents=parents, |
|
863 | 861 | branch=parent_commit.branch) |
|
864 | 862 | except NodeNotChangedError: |
|
865 | 863 | raise |
|
866 | 864 | except Exception as e: |
|
867 | 865 | log.exception("Unexpected exception during call to imc.commit") |
|
868 | 866 | raise IMCCommitError(str(e)) |
|
869 | 867 | finally: |
|
870 | 868 | # always clear caches, if commit fails we want fresh object also |
|
871 | 869 | self.mark_for_invalidation(repo.repo_name) |
|
872 | 870 | |
|
873 | 871 | if trigger_push_hook: |
|
874 | 872 | hooks_utils.trigger_post_push_hook( |
|
875 | 873 | username=user.username, action='push_local', hook_type='post_push', |
|
876 | 874 | repo_name=repo.repo_name, repo_type=scm_instance.alias, |
|
877 | 875 | commit_ids=[tip.raw_id]) |
|
878 | 876 | |
|
879 | 877 | return tip |
|
880 | 878 | |
|
881 | 879 | def update_binary_node(self, user, repo, message, node, parent_commit=None, author=None): |
|
882 | 880 | user, scm_instance, message, commiter, author, imc = self.initialize_inmemory_vars( |
|
883 | 881 | user, repo, message, author) |
|
884 | 882 | |
|
885 | 883 | parent_commit, parents = self.get_parent_commits(parent_commit, scm_instance) |
|
886 | 884 | |
|
887 | 885 | file_path = node.get('file_path') |
|
888 | 886 | if isinstance(raw_content := node.get('content'), (io.BytesIO, io.BufferedRandom)): |
|
889 | 887 | content = raw_content.read() |
|
890 | 888 | else: |
|
891 | 889 | raise Exception("Wrong content was provided") |
|
892 | 890 | file_node = FileNode(file_path, content=content) |
|
893 | 891 | imc.change(file_node) |
|
894 | 892 | |
|
895 | 893 | try: |
|
896 | 894 | tip = imc.commit(message=message, |
|
897 | 895 | author=author, |
|
898 | 896 | parents=parents, |
|
899 | 897 | branch=parent_commit.branch) |
|
900 | 898 | except NodeNotChangedError: |
|
901 | 899 | raise |
|
902 | 900 | except Exception as e: |
|
903 | 901 | log.exception("Unexpected exception during call to imc.commit") |
|
904 | 902 | raise IMCCommitError(str(e)) |
|
905 | 903 | finally: |
|
906 | 904 | self.mark_for_invalidation(repo.repo_name) |
|
907 | 905 | |
|
908 | 906 | hooks_utils.trigger_post_push_hook( |
|
909 | 907 | username=user.username, action='push_local', hook_type='post_push', |
|
910 | 908 | repo_name=repo.repo_name, repo_type=scm_instance.alias, |
|
911 | 909 | commit_ids=[tip.raw_id]) |
|
912 | 910 | return tip |
|
913 | 911 | |
|
914 | 912 | def delete_nodes(self, user, repo, message, nodes, parent_commit=None, |
|
915 | 913 | author=None, trigger_push_hook=True): |
|
916 | 914 | """ |
|
917 | 915 | Deletes given multiple nodes into `repo` |
|
918 | 916 | |
|
919 | 917 | :param user: RhodeCode User object or user_id, the committer |
|
920 | 918 | :param repo: RhodeCode Repository object |
|
921 | 919 | :param message: commit message |
|
922 | 920 | :param nodes: mapping {filename:{'content':content},...} |
|
923 | 921 | :param parent_commit: parent commit, can be empty than it's initial |
|
924 | 922 | commit |
|
925 | 923 | :param author: author of commit, cna be different that commiter only |
|
926 | 924 | for git |
|
927 | 925 | :param trigger_push_hook: trigger push hooks |
|
928 | 926 | |
|
929 | 927 | :returns: new commit after deletion |
|
930 | 928 | """ |
|
931 | 929 | |
|
932 | 930 | user, scm_instance, message, commiter, author, imc = self.initialize_inmemory_vars( |
|
933 | 931 | user, repo, message, author) |
|
934 | 932 | |
|
935 | 933 | processed_nodes = [] |
|
936 | 934 | for f_path in nodes: |
|
937 | 935 | f_path = self._sanitize_path(f_path) |
|
938 | 936 | # content can be empty but for compatibility it allows same dicts |
|
939 | 937 | # structure as add_nodes |
|
940 | 938 | content = nodes[f_path].get('content') |
|
941 | 939 | processed_nodes.append((safe_bytes(f_path), content)) |
|
942 | 940 | |
|
943 | 941 | parent_commit, parents = self.get_parent_commits(parent_commit, scm_instance) |
|
944 | 942 | |
|
945 | 943 | # add multiple nodes |
|
946 | 944 | for path, content in processed_nodes: |
|
947 | 945 | imc.remove(FileNode(path, content=content)) |
|
948 | 946 | |
|
949 | 947 | # TODO: handle pre push scenario |
|
950 | 948 | tip = imc.commit(message=message, |
|
951 | 949 | author=author, |
|
952 | 950 | parents=parents, |
|
953 | 951 | branch=parent_commit.branch) |
|
954 | 952 | |
|
955 | 953 | self.mark_for_invalidation(repo.repo_name) |
|
956 | 954 | if trigger_push_hook: |
|
957 | 955 | hooks_utils.trigger_post_push_hook( |
|
958 | 956 | username=user.username, action='push_local', hook_type='post_push', |
|
959 | 957 | repo_name=repo.repo_name, repo_type=scm_instance.alias, |
|
960 | 958 | commit_ids=[tip.raw_id]) |
|
961 | 959 | return tip |
|
962 | 960 | |
|
963 | 961 | def strip(self, repo, commit_id, branch): |
|
964 | 962 | scm_instance = repo.scm_instance(cache=False) |
|
965 | 963 | scm_instance.config.clear_section('hooks') |
|
966 | 964 | scm_instance.strip(commit_id, branch) |
|
967 | 965 | self.mark_for_invalidation(repo.repo_name) |
|
968 | 966 | |
|
969 | 967 | def get_unread_journal(self): |
|
970 | 968 | return self.sa.query(UserLog).count() |
|
971 | 969 | |
|
972 | 970 | @classmethod |
|
973 | 971 | def backend_landing_ref(cls, repo_type): |
|
974 | 972 | """ |
|
975 | 973 | Return a default landing ref based on a repository type. |
|
976 | 974 | """ |
|
977 | 975 | |
|
978 | 976 | landing_ref = { |
|
979 | 977 | 'hg': ('branch:default', 'default'), |
|
980 | 978 | 'git': ('branch:master', 'master'), |
|
981 | 979 | 'svn': ('rev:tip', 'latest tip'), |
|
982 | 980 | 'default': ('rev:tip', 'latest tip'), |
|
983 | 981 | } |
|
984 | 982 | |
|
985 | 983 | return landing_ref.get(repo_type) or landing_ref['default'] |
|
986 | 984 | |
|
987 | 985 | def get_repo_landing_revs(self, translator, repo=None): |
|
988 | 986 | """ |
|
989 | 987 | Generates select option with tags branches and bookmarks (for hg only) |
|
990 | 988 | grouped by type |
|
991 | 989 | |
|
992 | 990 | :param repo: |
|
993 | 991 | """ |
|
994 | 992 | from rhodecode.lib.vcs.backends.git import GitRepository |
|
995 | 993 | |
|
996 | 994 | _ = translator |
|
997 | 995 | repo = self._get_repo(repo) |
|
998 | 996 | |
|
999 | 997 | if repo: |
|
1000 | 998 | repo_type = repo.repo_type |
|
1001 | 999 | else: |
|
1002 | 1000 | repo_type = 'default' |
|
1003 | 1001 | |
|
1004 | 1002 | default_landing_ref, landing_ref_lbl = self.backend_landing_ref(repo_type) |
|
1005 | 1003 | |
|
1006 | 1004 | default_ref_options = [ |
|
1007 | 1005 | [default_landing_ref, landing_ref_lbl] |
|
1008 | 1006 | ] |
|
1009 | 1007 | default_choices = [ |
|
1010 | 1008 | default_landing_ref |
|
1011 | 1009 | ] |
|
1012 | 1010 | |
|
1013 | 1011 | if not repo: |
|
1014 | 1012 | # presented at NEW repo creation |
|
1015 | 1013 | return default_choices, default_ref_options |
|
1016 | 1014 | |
|
1017 | 1015 | repo = repo.scm_instance() |
|
1018 | 1016 | |
|
1019 | 1017 | ref_options = [(default_landing_ref, landing_ref_lbl)] |
|
1020 | 1018 | choices = [default_landing_ref] |
|
1021 | 1019 | |
|
1022 | 1020 | # branches |
|
1023 | 1021 | branch_group = [(f'branch:{safe_str(b)}', safe_str(b)) for b in repo.branches] |
|
1024 | 1022 | if not branch_group: |
|
1025 | 1023 | # new repo, or without maybe a branch? |
|
1026 | 1024 | branch_group = default_ref_options |
|
1027 | 1025 | |
|
1028 | 1026 | branches_group = (branch_group, _("Branches")) |
|
1029 | 1027 | ref_options.append(branches_group) |
|
1030 | 1028 | choices.extend([x[0] for x in branches_group[0]]) |
|
1031 | 1029 | |
|
1032 | 1030 | # bookmarks for HG |
|
1033 | 1031 | if repo.alias == 'hg': |
|
1034 | 1032 | bookmarks_group = ( |
|
1035 | 1033 | [(f'book:{safe_str(b)}', safe_str(b)) |
|
1036 | 1034 | for b in repo.bookmarks], |
|
1037 | 1035 | _("Bookmarks")) |
|
1038 | 1036 | ref_options.append(bookmarks_group) |
|
1039 | 1037 | choices.extend([x[0] for x in bookmarks_group[0]]) |
|
1040 | 1038 | |
|
1041 | 1039 | # tags |
|
1042 | 1040 | tags_group = ( |
|
1043 | 1041 | [(f'tag:{safe_str(t)}', safe_str(t)) |
|
1044 | 1042 | for t in repo.tags], |
|
1045 | 1043 | _("Tags")) |
|
1046 | 1044 | ref_options.append(tags_group) |
|
1047 | 1045 | choices.extend([x[0] for x in tags_group[0]]) |
|
1048 | 1046 | |
|
1049 | 1047 | return choices, ref_options |
|
1050 | 1048 | |
|
1051 | 1049 | def get_server_info(self, environ=None): |
|
1052 | 1050 | server_info = get_system_info(environ) |
|
1053 | 1051 | return server_info |
General Comments 0
You need to be logged in to leave comments.
Login now