Show More
@@ -1,194 +1,193 b'' | |||
|
1 | 1 | # RhodeCode VCSServer provides access to different vcs backends via network. |
|
2 | 2 | # Copyright (C) 2014-2023 RhodeCode GmbH |
|
3 | 3 | # |
|
4 | 4 | # This program is free software; you can redistribute it and/or modify |
|
5 | 5 | # it under the terms of the GNU General Public License as published by |
|
6 | 6 | # the Free Software Foundation; either version 3 of the License, or |
|
7 | 7 | # (at your option) any later version. |
|
8 | 8 | # |
|
9 | 9 | # This program is distributed in the hope that it will be useful, |
|
10 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
12 | 12 | # GNU General Public License for more details. |
|
13 | 13 | # |
|
14 | 14 | # You should have received a copy of the GNU General Public License |
|
15 | 15 | # along with this program; if not, write to the Free Software Foundation, |
|
16 | 16 | # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
|
17 | 17 | import os |
|
18 | 18 | import sys |
|
19 | 19 | import tempfile |
|
20 | import traceback | |
|
21 | 20 | import logging |
|
22 | 21 | import urllib.parse |
|
23 | 22 | |
|
24 | 23 | from vcsserver.lib.rc_cache.archive_cache import get_archival_cache_store |
|
25 | 24 | |
|
26 | 25 | from vcsserver import exceptions |
|
27 | 26 | from vcsserver.exceptions import NoContentException |
|
28 | 27 | from vcsserver.hgcompat import archival |
|
29 | 28 | from vcsserver.str_utils import safe_bytes |
|
30 | ||
|
29 | from vcsserver.lib.exc_tracking import format_exc | |
|
31 | 30 | log = logging.getLogger(__name__) |
|
32 | 31 | |
|
33 | 32 | |
|
34 | 33 | class RepoFactory(object): |
|
35 | 34 | """ |
|
36 | 35 | Utility to create instances of repository |
|
37 | 36 | |
|
38 | 37 | It provides internal caching of the `repo` object based on |
|
39 | 38 | the :term:`call context`. |
|
40 | 39 | """ |
|
41 | 40 | repo_type = None |
|
42 | 41 | |
|
43 | 42 | def __init__(self): |
|
44 | 43 | pass |
|
45 | 44 | |
|
46 | 45 | def _create_config(self, path, config): |
|
47 | 46 | config = {} |
|
48 | 47 | return config |
|
49 | 48 | |
|
50 | 49 | def _create_repo(self, wire, create): |
|
51 | 50 | raise NotImplementedError() |
|
52 | 51 | |
|
53 | 52 | def repo(self, wire, create=False): |
|
54 | 53 | raise NotImplementedError() |
|
55 | 54 | |
|
56 | 55 | |
|
57 | 56 | def obfuscate_qs(query_string): |
|
58 | 57 | if query_string is None: |
|
59 | 58 | return None |
|
60 | 59 | |
|
61 | 60 | parsed = [] |
|
62 | 61 | for k, v in urllib.parse.parse_qsl(query_string, keep_blank_values=True): |
|
63 | 62 | if k in ['auth_token', 'api_key']: |
|
64 | 63 | v = "*****" |
|
65 | 64 | parsed.append((k, v)) |
|
66 | 65 | |
|
67 | 66 | return '&'.join('{}{}'.format( |
|
68 | 67 | k, f'={v}' if v else '') for k, v in parsed) |
|
69 | 68 | |
|
70 | 69 | |
|
71 | 70 | def raise_from_original(new_type, org_exc: Exception): |
|
72 | 71 | """ |
|
73 | 72 | Raise a new exception type with original args and traceback. |
|
74 | 73 | """ |
|
75 | ||
|
76 |
exc_type, exc_value, exc_traceback = |
|
|
74 | exc_info = sys.exc_info() | |
|
75 | exc_type, exc_value, exc_traceback = exc_info | |
|
77 | 76 | new_exc = new_type(*exc_value.args) |
|
78 | 77 | |
|
79 | 78 | # store the original traceback into the new exc |
|
80 |
new_exc._org_exc_tb = |
|
|
79 | new_exc._org_exc_tb = format_exc(exc_info) | |
|
81 | 80 | |
|
82 | 81 | try: |
|
83 | 82 | raise new_exc.with_traceback(exc_traceback) |
|
84 | 83 | finally: |
|
85 | 84 | del exc_traceback |
|
86 | 85 | |
|
87 | 86 | |
|
88 | 87 | class ArchiveNode(object): |
|
89 | 88 | def __init__(self, path, mode, is_link, raw_bytes): |
|
90 | 89 | self.path = path |
|
91 | 90 | self.mode = mode |
|
92 | 91 | self.is_link = is_link |
|
93 | 92 | self.raw_bytes = raw_bytes |
|
94 | 93 | |
|
95 | 94 | |
|
96 | 95 | def store_archive_in_cache(node_walker, archive_key, kind, mtime, archive_at_path, archive_dir_name, |
|
97 | 96 | commit_id, write_metadata=True, extra_metadata=None, cache_config=None): |
|
98 | 97 | """ |
|
99 | 98 | Function that would store generate archive and send it to a dedicated backend store |
|
100 | 99 | In here we use diskcache |
|
101 | 100 | |
|
102 | 101 | :param node_walker: a generator returning nodes to add to archive |
|
103 | 102 | :param archive_key: key used to store the path |
|
104 | 103 | :param kind: archive kind |
|
105 | 104 | :param mtime: time of creation |
|
106 | 105 | :param archive_at_path: default '/' the path at archive was started. |
|
107 | 106 | If this is not '/' it means it's a partial archive |
|
108 | 107 | :param archive_dir_name: inside dir name when creating an archive |
|
109 | 108 | :param commit_id: commit sha of revision archive was created at |
|
110 | 109 | :param write_metadata: |
|
111 | 110 | :param extra_metadata: |
|
112 | 111 | :param cache_config: |
|
113 | 112 | |
|
114 | 113 | walker should be a file walker, for example, |
|
115 | 114 | def node_walker(): |
|
116 | 115 | for file_info in files: |
|
117 | 116 | yield ArchiveNode(fn, mode, is_link, ctx[fn].data) |
|
118 | 117 | """ |
|
119 | 118 | extra_metadata = extra_metadata or {} |
|
120 | 119 | |
|
121 | 120 | d_cache = get_archival_cache_store(config=cache_config) |
|
122 | 121 | |
|
123 | 122 | if archive_key in d_cache: |
|
124 | 123 | with d_cache as d_cache_reader: |
|
125 | 124 | reader, tag = d_cache_reader.get(archive_key, read=True, tag=True, retry=True) |
|
126 | 125 | return reader.name |
|
127 | 126 | |
|
128 | 127 | archive_tmp_path = safe_bytes(tempfile.mkstemp()[1]) |
|
129 | 128 | log.debug('Creating new temp archive in %s', archive_tmp_path) |
|
130 | 129 | |
|
131 | 130 | if kind == "tgz": |
|
132 | 131 | archiver = archival.tarit(archive_tmp_path, mtime, b"gz") |
|
133 | 132 | elif kind == "tbz2": |
|
134 | 133 | archiver = archival.tarit(archive_tmp_path, mtime, b"bz2") |
|
135 | 134 | elif kind == 'zip': |
|
136 | 135 | archiver = archival.zipit(archive_tmp_path, mtime) |
|
137 | 136 | else: |
|
138 | 137 | raise exceptions.ArchiveException()( |
|
139 | 138 | f'Remote does not support: "{kind}" archive type.') |
|
140 | 139 | |
|
141 | 140 | for f in node_walker(commit_id, archive_at_path): |
|
142 | 141 | f_path = os.path.join(safe_bytes(archive_dir_name), safe_bytes(f.path).lstrip(b'/')) |
|
143 | 142 | try: |
|
144 | 143 | archiver.addfile(f_path, f.mode, f.is_link, f.raw_bytes()) |
|
145 | 144 | except NoContentException: |
|
146 | 145 | # NOTE(marcink): this is a special case for SVN so we can create "empty" |
|
147 | 146 | # directories which are not supported by archiver |
|
148 | 147 | archiver.addfile(os.path.join(f_path, b'.dir'), f.mode, f.is_link, b'') |
|
149 | 148 | |
|
150 | 149 | if write_metadata: |
|
151 | 150 | metadata = dict([ |
|
152 | 151 | ('commit_id', commit_id), |
|
153 | 152 | ('mtime', mtime), |
|
154 | 153 | ]) |
|
155 | 154 | metadata.update(extra_metadata) |
|
156 | 155 | |
|
157 | 156 | meta = [safe_bytes(f"{f_name}:{value}") for f_name, value in metadata.items()] |
|
158 | 157 | f_path = os.path.join(safe_bytes(archive_dir_name), b'.archival.txt') |
|
159 | 158 | archiver.addfile(f_path, 0o644, False, b'\n'.join(meta)) |
|
160 | 159 | |
|
161 | 160 | archiver.done() |
|
162 | 161 | |
|
163 | 162 | # ensure set & get are atomic |
|
164 | 163 | with d_cache.transact(): |
|
165 | 164 | |
|
166 | 165 | with open(archive_tmp_path, 'rb') as archive_file: |
|
167 | 166 | add_result = d_cache.set(archive_key, archive_file, read=True, tag='db-name', retry=True) |
|
168 | 167 | if not add_result: |
|
169 | 168 | log.error('Failed to store cache for key=%s', archive_key) |
|
170 | 169 | |
|
171 | 170 | os.remove(archive_tmp_path) |
|
172 | 171 | |
|
173 | 172 | reader, tag = d_cache.get(archive_key, read=True, tag=True, retry=True) |
|
174 | 173 | if not reader: |
|
175 | 174 | raise AssertionError(f'empty reader on key={archive_key} added={add_result}') |
|
176 | 175 | |
|
177 | 176 | return reader.name |
|
178 | 177 | |
|
179 | 178 | |
|
180 | 179 | class BinaryEnvelope(object): |
|
181 | 180 | def __init__(self, val): |
|
182 | 181 | self.val = val |
|
183 | 182 | |
|
184 | 183 | |
|
185 | 184 | class BytesEnvelope(bytes): |
|
186 | 185 | def __new__(cls, content): |
|
187 | 186 | if isinstance(content, bytes): |
|
188 | 187 | return super().__new__(cls, content) |
|
189 | 188 | else: |
|
190 | 189 | raise TypeError('BytesEnvelope content= param must be bytes. Use BinaryEnvelope to wrap other types') |
|
191 | 190 | |
|
192 | 191 | |
|
193 | 192 | class BinaryBytesEnvelope(BytesEnvelope): |
|
194 | 193 | pass |
@@ -1,777 +1,774 b'' | |||
|
1 | 1 | # RhodeCode VCSServer provides access to different vcs backends via network. |
|
2 | 2 | # Copyright (C) 2014-2023 RhodeCode GmbH |
|
3 | 3 | # |
|
4 | 4 | # This program is free software; you can redistribute it and/or modify |
|
5 | 5 | # it under the terms of the GNU General Public License as published by |
|
6 | 6 | # the Free Software Foundation; either version 3 of the License, or |
|
7 | 7 | # (at your option) any later version. |
|
8 | 8 | # |
|
9 | 9 | # This program is distributed in the hope that it will be useful, |
|
10 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
12 | 12 | # GNU General Public License for more details. |
|
13 | 13 | # |
|
14 | 14 | # You should have received a copy of the GNU General Public License |
|
15 | 15 | # along with this program; if not, write to the Free Software Foundation, |
|
16 | 16 | # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
|
17 | 17 | |
|
18 | 18 | import io |
|
19 | 19 | import os |
|
20 | 20 | import sys |
|
21 | 21 | import locale |
|
22 | 22 | import logging |
|
23 | 23 | import uuid |
|
24 | 24 | import time |
|
25 | 25 | import wsgiref.util |
|
26 | import traceback | |
|
27 | 26 | import tempfile |
|
28 | 27 | import psutil |
|
29 | 28 | |
|
30 | 29 | from itertools import chain |
|
31 | 30 | |
|
32 | 31 | import msgpack |
|
33 | 32 | import configparser |
|
34 | 33 | |
|
35 | 34 | from pyramid.config import Configurator |
|
36 | 35 | from pyramid.wsgi import wsgiapp |
|
37 | 36 | from pyramid.response import Response |
|
38 | 37 | |
|
39 | 38 | from vcsserver.base import BytesEnvelope, BinaryEnvelope |
|
40 | 39 | from vcsserver.lib.rc_json import json |
|
41 | 40 | from vcsserver.config.settings_maker import SettingsMaker |
|
42 | 41 | from vcsserver.str_utils import safe_int |
|
43 | 42 | from vcsserver.lib.statsd_client import StatsdClient |
|
44 | 43 | from vcsserver.tweens.request_wrapper import get_call_context, get_headers_call_context |
|
45 | 44 | |
|
46 | 45 | log = logging.getLogger(__name__) |
|
47 | 46 | |
|
48 | 47 | # due to Mercurial/glibc2.27 problems we need to detect if locale settings are |
|
49 | 48 | # causing problems and "fix" it in case they do and fallback to LC_ALL = C |
|
50 | 49 | |
|
51 | 50 | try: |
|
52 | 51 | locale.setlocale(locale.LC_ALL, '') |
|
53 | 52 | except locale.Error as e: |
|
54 | 53 | log.error( |
|
55 | 54 | 'LOCALE ERROR: failed to set LC_ALL, fallback to LC_ALL=C, org error: %s', e) |
|
56 | 55 | os.environ['LC_ALL'] = 'C' |
|
57 | 56 | |
|
58 | 57 | |
|
59 | 58 | import vcsserver |
|
60 | 59 | from vcsserver import remote_wsgi, scm_app, settings, hgpatches |
|
61 | 60 | from vcsserver.git_lfs.app import GIT_LFS_CONTENT_TYPE, GIT_LFS_PROTO_PAT |
|
62 | 61 | from vcsserver.echo_stub import remote_wsgi as remote_wsgi_stub |
|
63 | 62 | from vcsserver.echo_stub.echo_app import EchoApp |
|
64 | 63 | from vcsserver.exceptions import HTTPRepoLocked, HTTPRepoBranchProtected |
|
65 | from vcsserver.lib.exc_tracking import store_exception | |
|
64 | from vcsserver.lib.exc_tracking import store_exception, format_exc | |
|
66 | 65 | from vcsserver.server import VcsServer |
|
67 | 66 | |
|
68 | 67 | strict_vcs = True |
|
69 | 68 | |
|
70 | 69 | git_import_err = None |
|
71 | 70 | try: |
|
72 | 71 | from vcsserver.remote.git import GitFactory, GitRemote |
|
73 | 72 | except ImportError as e: |
|
74 | 73 | GitFactory = None |
|
75 | 74 | GitRemote = None |
|
76 | 75 | git_import_err = e |
|
77 | 76 | if strict_vcs: |
|
78 | 77 | raise |
|
79 | 78 | |
|
80 | 79 | |
|
81 | 80 | hg_import_err = None |
|
82 | 81 | try: |
|
83 | 82 | from vcsserver.remote.hg import MercurialFactory, HgRemote |
|
84 | 83 | except ImportError as e: |
|
85 | 84 | MercurialFactory = None |
|
86 | 85 | HgRemote = None |
|
87 | 86 | hg_import_err = e |
|
88 | 87 | if strict_vcs: |
|
89 | 88 | raise |
|
90 | 89 | |
|
91 | 90 | |
|
92 | 91 | svn_import_err = None |
|
93 | 92 | try: |
|
94 | 93 | from vcsserver.remote.svn import SubversionFactory, SvnRemote |
|
95 | 94 | except ImportError as e: |
|
96 | 95 | SubversionFactory = None |
|
97 | 96 | SvnRemote = None |
|
98 | 97 | svn_import_err = e |
|
99 | 98 | if strict_vcs: |
|
100 | 99 | raise |
|
101 | 100 | |
|
102 | 101 | |
|
103 | 102 | def _is_request_chunked(environ): |
|
104 | 103 | stream = environ.get('HTTP_TRANSFER_ENCODING', '') == 'chunked' |
|
105 | 104 | return stream |
|
106 | 105 | |
|
107 | 106 | |
|
108 | 107 | def log_max_fd(): |
|
109 | 108 | try: |
|
110 | 109 | maxfd = psutil.Process().rlimit(psutil.RLIMIT_NOFILE)[1] |
|
111 | 110 | log.info('Max file descriptors value: %s', maxfd) |
|
112 | 111 | except Exception: |
|
113 | 112 | pass |
|
114 | 113 | |
|
115 | 114 | |
|
116 | 115 | class VCS(object): |
|
117 | 116 | def __init__(self, locale_conf=None, cache_config=None): |
|
118 | 117 | self.locale = locale_conf |
|
119 | 118 | self.cache_config = cache_config |
|
120 | 119 | self._configure_locale() |
|
121 | 120 | |
|
122 | 121 | log_max_fd() |
|
123 | 122 | |
|
124 | 123 | if GitFactory and GitRemote: |
|
125 | 124 | git_factory = GitFactory() |
|
126 | 125 | self._git_remote = GitRemote(git_factory) |
|
127 | 126 | else: |
|
128 | 127 | log.error("Git client import failed: %s", git_import_err) |
|
129 | 128 | |
|
130 | 129 | if MercurialFactory and HgRemote: |
|
131 | 130 | hg_factory = MercurialFactory() |
|
132 | 131 | self._hg_remote = HgRemote(hg_factory) |
|
133 | 132 | else: |
|
134 | 133 | log.error("Mercurial client import failed: %s", hg_import_err) |
|
135 | 134 | |
|
136 | 135 | if SubversionFactory and SvnRemote: |
|
137 | 136 | svn_factory = SubversionFactory() |
|
138 | 137 | |
|
139 | 138 | # hg factory is used for svn url validation |
|
140 | 139 | hg_factory = MercurialFactory() |
|
141 | 140 | self._svn_remote = SvnRemote(svn_factory, hg_factory=hg_factory) |
|
142 | 141 | else: |
|
143 | 142 | log.error("Subversion client import failed: %s", svn_import_err) |
|
144 | 143 | |
|
145 | 144 | self._vcsserver = VcsServer() |
|
146 | 145 | |
|
147 | 146 | def _configure_locale(self): |
|
148 | 147 | if self.locale: |
|
149 | 148 | log.info('Settings locale: `LC_ALL` to %s', self.locale) |
|
150 | 149 | else: |
|
151 | 150 | log.info('Configuring locale subsystem based on environment variables') |
|
152 | 151 | try: |
|
153 | 152 | # If self.locale is the empty string, then the locale |
|
154 | 153 | # module will use the environment variables. See the |
|
155 | 154 | # documentation of the package `locale`. |
|
156 | 155 | locale.setlocale(locale.LC_ALL, self.locale) |
|
157 | 156 | |
|
158 | 157 | language_code, encoding = locale.getlocale() |
|
159 | 158 | log.info( |
|
160 | 159 | 'Locale set to language code "%s" with encoding "%s".', |
|
161 | 160 | language_code, encoding) |
|
162 | 161 | except locale.Error: |
|
163 | 162 | log.exception('Cannot set locale, not configuring the locale system') |
|
164 | 163 | |
|
165 | 164 | |
|
166 | 165 | class WsgiProxy(object): |
|
167 | 166 | def __init__(self, wsgi): |
|
168 | 167 | self.wsgi = wsgi |
|
169 | 168 | |
|
170 | 169 | def __call__(self, environ, start_response): |
|
171 | 170 | input_data = environ['wsgi.input'].read() |
|
172 | 171 | input_data = msgpack.unpackb(input_data) |
|
173 | 172 | |
|
174 | 173 | error = None |
|
175 | 174 | try: |
|
176 | 175 | data, status, headers = self.wsgi.handle( |
|
177 | 176 | input_data['environment'], input_data['input_data'], |
|
178 | 177 | *input_data['args'], **input_data['kwargs']) |
|
179 | 178 | except Exception as e: |
|
180 | 179 | data, status, headers = [], None, None |
|
181 | 180 | error = { |
|
182 | 181 | 'message': str(e), |
|
183 | 182 | '_vcs_kind': getattr(e, '_vcs_kind', None) |
|
184 | 183 | } |
|
185 | 184 | |
|
186 | 185 | start_response(200, {}) |
|
187 | 186 | return self._iterator(error, status, headers, data) |
|
188 | 187 | |
|
189 | 188 | def _iterator(self, error, status, headers, data): |
|
190 | 189 | initial_data = [ |
|
191 | 190 | error, |
|
192 | 191 | status, |
|
193 | 192 | headers, |
|
194 | 193 | ] |
|
195 | 194 | |
|
196 | 195 | for d in chain(initial_data, data): |
|
197 | 196 | yield msgpack.packb(d) |
|
198 | 197 | |
|
199 | 198 | |
|
200 | 199 | def not_found(request): |
|
201 | 200 | return {'status': '404 NOT FOUND'} |
|
202 | 201 | |
|
203 | 202 | |
|
204 | 203 | class VCSViewPredicate(object): |
|
205 | 204 | def __init__(self, val, config): |
|
206 | 205 | self.remotes = val |
|
207 | 206 | |
|
208 | 207 | def text(self): |
|
209 | 208 | return f'vcs view method = {list(self.remotes.keys())}' |
|
210 | 209 | |
|
211 | 210 | phash = text |
|
212 | 211 | |
|
213 | 212 | def __call__(self, context, request): |
|
214 | 213 | """ |
|
215 | 214 | View predicate that returns true if given backend is supported by |
|
216 | 215 | defined remotes. |
|
217 | 216 | """ |
|
218 | 217 | backend = request.matchdict.get('backend') |
|
219 | 218 | return backend in self.remotes |
|
220 | 219 | |
|
221 | 220 | |
|
222 | 221 | class HTTPApplication(object): |
|
223 | 222 | ALLOWED_EXCEPTIONS = ('KeyError', 'URLError') |
|
224 | 223 | |
|
225 | 224 | remote_wsgi = remote_wsgi |
|
226 | 225 | _use_echo_app = False |
|
227 | 226 | |
|
228 | 227 | def __init__(self, settings=None, global_config=None): |
|
229 | 228 | |
|
230 | 229 | self.config = Configurator(settings=settings) |
|
231 | 230 | # Init our statsd at very start |
|
232 | 231 | self.config.registry.statsd = StatsdClient.statsd |
|
233 | 232 | self.config.registry.vcs_call_context = {} |
|
234 | 233 | |
|
235 | 234 | self.global_config = global_config |
|
236 | 235 | self.config.include('vcsserver.lib.rc_cache') |
|
237 | 236 | self.config.include('vcsserver.lib.rc_cache.archive_cache') |
|
238 | 237 | |
|
239 | 238 | settings_locale = settings.get('locale', '') or 'en_US.UTF-8' |
|
240 | 239 | vcs = VCS(locale_conf=settings_locale, cache_config=settings) |
|
241 | 240 | self._remotes = { |
|
242 | 241 | 'hg': vcs._hg_remote, |
|
243 | 242 | 'git': vcs._git_remote, |
|
244 | 243 | 'svn': vcs._svn_remote, |
|
245 | 244 | 'server': vcs._vcsserver, |
|
246 | 245 | } |
|
247 | 246 | if settings.get('dev.use_echo_app', 'false').lower() == 'true': |
|
248 | 247 | self._use_echo_app = True |
|
249 | 248 | log.warning("Using EchoApp for VCS operations.") |
|
250 | 249 | self.remote_wsgi = remote_wsgi_stub |
|
251 | 250 | |
|
252 | 251 | self._configure_settings(global_config, settings) |
|
253 | 252 | |
|
254 | 253 | self._configure() |
|
255 | 254 | |
|
256 | 255 | def _configure_settings(self, global_config, app_settings): |
|
257 | 256 | """ |
|
258 | 257 | Configure the settings module. |
|
259 | 258 | """ |
|
260 | 259 | settings_merged = global_config.copy() |
|
261 | 260 | settings_merged.update(app_settings) |
|
262 | 261 | |
|
263 | 262 | git_path = app_settings.get('git_path', None) |
|
264 | 263 | if git_path: |
|
265 | 264 | settings.GIT_EXECUTABLE = git_path |
|
266 | 265 | binary_dir = app_settings.get('core.binary_dir', None) |
|
267 | 266 | if binary_dir: |
|
268 | 267 | settings.BINARY_DIR = binary_dir |
|
269 | 268 | |
|
270 | 269 | # Store the settings to make them available to other modules. |
|
271 | 270 | vcsserver.PYRAMID_SETTINGS = settings_merged |
|
272 | 271 | vcsserver.CONFIG = settings_merged |
|
273 | 272 | |
|
274 | 273 | def _configure(self): |
|
275 | 274 | self.config.add_renderer(name='msgpack', factory=self._msgpack_renderer_factory) |
|
276 | 275 | |
|
277 | 276 | self.config.add_route('service', '/_service') |
|
278 | 277 | self.config.add_route('status', '/status') |
|
279 | 278 | self.config.add_route('hg_proxy', '/proxy/hg') |
|
280 | 279 | self.config.add_route('git_proxy', '/proxy/git') |
|
281 | 280 | |
|
282 | 281 | # rpc methods |
|
283 | 282 | self.config.add_route('vcs', '/{backend}') |
|
284 | 283 | |
|
285 | 284 | # streaming rpc remote methods |
|
286 | 285 | self.config.add_route('vcs_stream', '/{backend}/stream') |
|
287 | 286 | |
|
288 | 287 | # vcs operations clone/push as streaming |
|
289 | 288 | self.config.add_route('stream_git', '/stream/git/*repo_name') |
|
290 | 289 | self.config.add_route('stream_hg', '/stream/hg/*repo_name') |
|
291 | 290 | |
|
292 | 291 | self.config.add_view(self.status_view, route_name='status', renderer='json') |
|
293 | 292 | self.config.add_view(self.service_view, route_name='service', renderer='msgpack') |
|
294 | 293 | |
|
295 | 294 | self.config.add_view(self.hg_proxy(), route_name='hg_proxy') |
|
296 | 295 | self.config.add_view(self.git_proxy(), route_name='git_proxy') |
|
297 | 296 | self.config.add_view(self.vcs_view, route_name='vcs', renderer='msgpack', |
|
298 | 297 | vcs_view=self._remotes) |
|
299 | 298 | self.config.add_view(self.vcs_stream_view, route_name='vcs_stream', |
|
300 | 299 | vcs_view=self._remotes) |
|
301 | 300 | |
|
302 | 301 | self.config.add_view(self.hg_stream(), route_name='stream_hg') |
|
303 | 302 | self.config.add_view(self.git_stream(), route_name='stream_git') |
|
304 | 303 | |
|
305 | 304 | self.config.add_view_predicate('vcs_view', VCSViewPredicate) |
|
306 | 305 | |
|
307 | 306 | self.config.add_notfound_view(not_found, renderer='json') |
|
308 | 307 | |
|
309 | 308 | self.config.add_view(self.handle_vcs_exception, context=Exception) |
|
310 | 309 | |
|
311 | 310 | self.config.add_tween( |
|
312 | 311 | 'vcsserver.tweens.request_wrapper.RequestWrapperTween', |
|
313 | 312 | ) |
|
314 | 313 | self.config.add_request_method( |
|
315 | 314 | 'vcsserver.lib.request_counter.get_request_counter', |
|
316 | 315 | 'request_count') |
|
317 | 316 | |
|
318 | 317 | def wsgi_app(self): |
|
319 | 318 | return self.config.make_wsgi_app() |
|
320 | 319 | |
|
321 | 320 | def _vcs_view_params(self, request): |
|
322 | 321 | remote = self._remotes[request.matchdict['backend']] |
|
323 | 322 | payload = msgpack.unpackb(request.body, use_list=True) |
|
324 | 323 | |
|
325 | 324 | method = payload.get('method') |
|
326 | 325 | params = payload['params'] |
|
327 | 326 | wire = params.get('wire') |
|
328 | 327 | args = params.get('args') |
|
329 | 328 | kwargs = params.get('kwargs') |
|
330 | 329 | context_uid = None |
|
331 | 330 | |
|
332 | 331 | request.registry.vcs_call_context = { |
|
333 | 332 | 'method': method, |
|
334 | 333 | 'repo_name': payload.get('_repo_name'), |
|
335 | 334 | } |
|
336 | 335 | |
|
337 | 336 | if wire: |
|
338 | 337 | try: |
|
339 | 338 | wire['context'] = context_uid = uuid.UUID(wire['context']) |
|
340 | 339 | except KeyError: |
|
341 | 340 | pass |
|
342 | 341 | args.insert(0, wire) |
|
343 | 342 | repo_state_uid = wire.get('repo_state_uid') if wire else None |
|
344 | 343 | |
|
345 | 344 | # NOTE(marcink): trading complexity for slight performance |
|
346 | 345 | if log.isEnabledFor(logging.DEBUG): |
|
347 | 346 | # also we SKIP printing out any of those methods args since they maybe excessive |
|
348 | 347 | just_args_methods = { |
|
349 | 348 | 'commitctx': ('content', 'removed', 'updated'), |
|
350 | 349 | 'commit': ('content', 'removed', 'updated') |
|
351 | 350 | } |
|
352 | 351 | if method in just_args_methods: |
|
353 | 352 | skip_args = just_args_methods[method] |
|
354 | 353 | call_args = '' |
|
355 | 354 | call_kwargs = {} |
|
356 | 355 | for k in kwargs: |
|
357 | 356 | if k in skip_args: |
|
358 | 357 | # replace our skip key with dummy |
|
359 | 358 | call_kwargs[k] = f'RemovedParam({k})' |
|
360 | 359 | else: |
|
361 | 360 | call_kwargs[k] = kwargs[k] |
|
362 | 361 | else: |
|
363 | 362 | call_args = args[1:] |
|
364 | 363 | call_kwargs = kwargs |
|
365 | 364 | |
|
366 | 365 | log.debug('Method requested:`%s` with args:%s kwargs:%s context_uid: %s, repo_state_uid:%s', |
|
367 | 366 | method, call_args, call_kwargs, context_uid, repo_state_uid) |
|
368 | 367 | |
|
369 | 368 | statsd = request.registry.statsd |
|
370 | 369 | if statsd: |
|
371 | 370 | statsd.incr( |
|
372 | 371 | 'vcsserver_method_total', tags=[ |
|
373 | 372 | f"method:{method}", |
|
374 | 373 | ]) |
|
375 | 374 | return payload, remote, method, args, kwargs |
|
376 | 375 | |
|
377 | 376 | def vcs_view(self, request): |
|
378 | 377 | |
|
379 | 378 | payload, remote, method, args, kwargs = self._vcs_view_params(request) |
|
380 | 379 | payload_id = payload.get('id') |
|
381 | 380 | |
|
382 | 381 | try: |
|
383 | 382 | resp = getattr(remote, method)(*args, **kwargs) |
|
384 | 383 | except Exception as e: |
|
385 | 384 | exc_info = list(sys.exc_info()) |
|
386 | 385 | exc_type, exc_value, exc_traceback = exc_info |
|
387 | 386 | |
|
388 | 387 | org_exc = getattr(e, '_org_exc', None) |
|
389 | 388 | org_exc_name = None |
|
390 | 389 | org_exc_tb = '' |
|
391 | 390 | if org_exc: |
|
392 | 391 | org_exc_name = org_exc.__class__.__name__ |
|
393 | 392 | org_exc_tb = getattr(e, '_org_exc_tb', '') |
|
394 | 393 | # replace our "faked" exception with our org |
|
395 | 394 | exc_info[0] = org_exc.__class__ |
|
396 | 395 | exc_info[1] = org_exc |
|
397 | 396 | |
|
398 | 397 | should_store_exc = True |
|
399 | 398 | if org_exc: |
|
400 | 399 | def get_exc_fqn(_exc_obj): |
|
401 | 400 | module_name = getattr(org_exc.__class__, '__module__', 'UNKNOWN') |
|
402 | 401 | return module_name + '.' + org_exc_name |
|
403 | 402 | |
|
404 | 403 | exc_fqn = get_exc_fqn(org_exc) |
|
405 | 404 | |
|
406 | 405 | if exc_fqn in ['mercurial.error.RepoLookupError', |
|
407 | 406 | 'vcsserver.exceptions.RefNotFoundException']: |
|
408 | 407 | should_store_exc = False |
|
409 | 408 | |
|
410 | 409 | if should_store_exc: |
|
411 | 410 | store_exception(id(exc_info), exc_info, request_path=request.path) |
|
412 | 411 | |
|
413 |
tb_info = |
|
|
414 | traceback.format_exception(exc_type, exc_value, exc_traceback)) | |
|
412 | tb_info = format_exc(exc_info) | |
|
415 | 413 | |
|
416 | 414 | type_ = e.__class__.__name__ |
|
417 | 415 | if type_ not in self.ALLOWED_EXCEPTIONS: |
|
418 | 416 | type_ = None |
|
419 | 417 | |
|
420 | 418 | resp = { |
|
421 | 419 | 'id': payload_id, |
|
422 | 420 | 'error': { |
|
423 | 421 | 'message': str(e), |
|
424 | 422 | 'traceback': tb_info, |
|
425 | 423 | 'org_exc': org_exc_name, |
|
426 | 424 | 'org_exc_tb': org_exc_tb, |
|
427 | 425 | 'type': type_ |
|
428 | 426 | } |
|
429 | 427 | } |
|
430 | 428 | |
|
431 | 429 | try: |
|
432 | 430 | resp['error']['_vcs_kind'] = getattr(e, '_vcs_kind', None) |
|
433 | 431 | except AttributeError: |
|
434 | 432 | pass |
|
435 | 433 | else: |
|
436 | 434 | resp = { |
|
437 | 435 | 'id': payload_id, |
|
438 | 436 | 'result': resp |
|
439 | 437 | } |
|
440 | 438 | log.debug('Serving data for method %s', method) |
|
441 | 439 | return resp |
|
442 | 440 | |
|
443 | 441 | def vcs_stream_view(self, request): |
|
444 | 442 | payload, remote, method, args, kwargs = self._vcs_view_params(request) |
|
445 | 443 | # this method has a stream: marker we remove it here |
|
446 | 444 | method = method.split('stream:')[-1] |
|
447 | 445 | chunk_size = safe_int(payload.get('chunk_size')) or 4096 |
|
448 | 446 | |
|
449 | 447 | try: |
|
450 | 448 | resp = getattr(remote, method)(*args, **kwargs) |
|
451 | 449 | except Exception as e: |
|
452 | 450 | raise |
|
453 | 451 | |
|
454 | 452 | def get_chunked_data(method_resp): |
|
455 | 453 | stream = io.BytesIO(method_resp) |
|
456 | 454 | while 1: |
|
457 | 455 | chunk = stream.read(chunk_size) |
|
458 | 456 | if not chunk: |
|
459 | 457 | break |
|
460 | 458 | yield chunk |
|
461 | 459 | |
|
462 | 460 | response = Response(app_iter=get_chunked_data(resp)) |
|
463 | 461 | response.content_type = 'application/octet-stream' |
|
464 | 462 | |
|
465 | 463 | return response |
|
466 | 464 | |
|
467 | 465 | def status_view(self, request): |
|
468 | 466 | import vcsserver |
|
469 | 467 | return {'status': 'OK', 'vcsserver_version': vcsserver.__version__, |
|
470 | 468 | 'pid': os.getpid()} |
|
471 | 469 | |
|
472 | 470 | def service_view(self, request): |
|
473 | 471 | import vcsserver |
|
474 | 472 | |
|
475 | 473 | payload = msgpack.unpackb(request.body, use_list=True) |
|
476 | 474 | server_config, app_config = {}, {} |
|
477 | 475 | |
|
478 | 476 | try: |
|
479 | 477 | path = self.global_config['__file__'] |
|
480 | 478 | config = configparser.RawConfigParser() |
|
481 | 479 | |
|
482 | 480 | config.read(path) |
|
483 | 481 | |
|
484 | 482 | if config.has_section('server:main'): |
|
485 | 483 | server_config = dict(config.items('server:main')) |
|
486 | 484 | if config.has_section('app:main'): |
|
487 | 485 | app_config = dict(config.items('app:main')) |
|
488 | 486 | |
|
489 | 487 | except Exception: |
|
490 | 488 | log.exception('Failed to read .ini file for display') |
|
491 | 489 | |
|
492 | 490 | environ = list(os.environ.items()) |
|
493 | 491 | |
|
494 | 492 | resp = { |
|
495 | 493 | 'id': payload.get('id'), |
|
496 | 494 | 'result': dict( |
|
497 | 495 | version=vcsserver.__version__, |
|
498 | 496 | config=server_config, |
|
499 | 497 | app_config=app_config, |
|
500 | 498 | environ=environ, |
|
501 | 499 | payload=payload, |
|
502 | 500 | ) |
|
503 | 501 | } |
|
504 | 502 | return resp |
|
505 | 503 | |
|
506 | 504 | def _msgpack_renderer_factory(self, info): |
|
507 | 505 | |
|
508 | 506 | def _render(value, system): |
|
509 | 507 | bin_type = False |
|
510 | 508 | res = value.get('result') |
|
511 | 509 | if isinstance(res, BytesEnvelope): |
|
512 | 510 | log.debug('Result is wrapped in BytesEnvelope type') |
|
513 | 511 | bin_type = True |
|
514 | 512 | elif isinstance(res, BinaryEnvelope): |
|
515 | 513 | log.debug('Result is wrapped in BinaryEnvelope type') |
|
516 | 514 | value['result'] = res.val |
|
517 | 515 | bin_type = True |
|
518 | 516 | |
|
519 | 517 | request = system.get('request') |
|
520 | 518 | if request is not None: |
|
521 | 519 | response = request.response |
|
522 | 520 | ct = response.content_type |
|
523 | 521 | if ct == response.default_content_type: |
|
524 | 522 | response.content_type = 'application/x-msgpack' |
|
525 | 523 | if bin_type: |
|
526 | 524 | response.content_type = 'application/x-msgpack-bin' |
|
527 | 525 | |
|
528 | 526 | return msgpack.packb(value, use_bin_type=bin_type) |
|
529 | 527 | return _render |
|
530 | 528 | |
|
531 | 529 | def set_env_from_config(self, environ, config): |
|
532 | 530 | dict_conf = {} |
|
533 | 531 | try: |
|
534 | 532 | for elem in config: |
|
535 | 533 | if elem[0] == 'rhodecode': |
|
536 | 534 | dict_conf = json.loads(elem[2]) |
|
537 | 535 | break |
|
538 | 536 | except Exception: |
|
539 | 537 | log.exception('Failed to fetch SCM CONFIG') |
|
540 | 538 | return |
|
541 | 539 | |
|
542 | 540 | username = dict_conf.get('username') |
|
543 | 541 | if username: |
|
544 | 542 | environ['REMOTE_USER'] = username |
|
545 | 543 | # mercurial specific, some extension api rely on this |
|
546 | 544 | environ['HGUSER'] = username |
|
547 | 545 | |
|
548 | 546 | ip = dict_conf.get('ip') |
|
549 | 547 | if ip: |
|
550 | 548 | environ['REMOTE_HOST'] = ip |
|
551 | 549 | |
|
552 | 550 | if _is_request_chunked(environ): |
|
553 | 551 | # set the compatibility flag for webob |
|
554 | 552 | environ['wsgi.input_terminated'] = True |
|
555 | 553 | |
|
556 | 554 | def hg_proxy(self): |
|
557 | 555 | @wsgiapp |
|
558 | 556 | def _hg_proxy(environ, start_response): |
|
559 | 557 | app = WsgiProxy(self.remote_wsgi.HgRemoteWsgi()) |
|
560 | 558 | return app(environ, start_response) |
|
561 | 559 | return _hg_proxy |
|
562 | 560 | |
|
563 | 561 | def git_proxy(self): |
|
564 | 562 | @wsgiapp |
|
565 | 563 | def _git_proxy(environ, start_response): |
|
566 | 564 | app = WsgiProxy(self.remote_wsgi.GitRemoteWsgi()) |
|
567 | 565 | return app(environ, start_response) |
|
568 | 566 | return _git_proxy |
|
569 | 567 | |
|
570 | 568 | def hg_stream(self): |
|
571 | 569 | if self._use_echo_app: |
|
572 | 570 | @wsgiapp |
|
573 | 571 | def _hg_stream(environ, start_response): |
|
574 | 572 | app = EchoApp('fake_path', 'fake_name', None) |
|
575 | 573 | return app(environ, start_response) |
|
576 | 574 | return _hg_stream |
|
577 | 575 | else: |
|
578 | 576 | @wsgiapp |
|
579 | 577 | def _hg_stream(environ, start_response): |
|
580 | 578 | log.debug('http-app: handling hg stream') |
|
581 | 579 | call_context = get_headers_call_context(environ) |
|
582 | 580 | |
|
583 | 581 | repo_path = call_context['repo_path'] |
|
584 | 582 | repo_name = call_context['repo_name'] |
|
585 | 583 | config = call_context['repo_config'] |
|
586 | 584 | |
|
587 | 585 | app = scm_app.create_hg_wsgi_app( |
|
588 | 586 | repo_path, repo_name, config) |
|
589 | 587 | |
|
590 | 588 | # Consistent path information for hgweb |
|
591 | 589 | environ['PATH_INFO'] = call_context['path_info'] |
|
592 | 590 | environ['REPO_NAME'] = repo_name |
|
593 | 591 | self.set_env_from_config(environ, config) |
|
594 | 592 | |
|
595 | 593 | log.debug('http-app: starting app handler ' |
|
596 | 594 | 'with %s and process request', app) |
|
597 | 595 | return app(environ, ResponseFilter(start_response)) |
|
598 | 596 | return _hg_stream |
|
599 | 597 | |
|
600 | 598 | def git_stream(self): |
|
601 | 599 | if self._use_echo_app: |
|
602 | 600 | @wsgiapp |
|
603 | 601 | def _git_stream(environ, start_response): |
|
604 | 602 | app = EchoApp('fake_path', 'fake_name', None) |
|
605 | 603 | return app(environ, start_response) |
|
606 | 604 | return _git_stream |
|
607 | 605 | else: |
|
608 | 606 | @wsgiapp |
|
609 | 607 | def _git_stream(environ, start_response): |
|
610 | 608 | log.debug('http-app: handling git stream') |
|
611 | 609 | |
|
612 | 610 | call_context = get_headers_call_context(environ) |
|
613 | 611 | |
|
614 | 612 | repo_path = call_context['repo_path'] |
|
615 | 613 | repo_name = call_context['repo_name'] |
|
616 | 614 | config = call_context['repo_config'] |
|
617 | 615 | |
|
618 | 616 | environ['PATH_INFO'] = call_context['path_info'] |
|
619 | 617 | self.set_env_from_config(environ, config) |
|
620 | 618 | |
|
621 | 619 | content_type = environ.get('CONTENT_TYPE', '') |
|
622 | 620 | |
|
623 | 621 | path = environ['PATH_INFO'] |
|
624 | 622 | is_lfs_request = GIT_LFS_CONTENT_TYPE in content_type |
|
625 | 623 | log.debug( |
|
626 | 624 | 'LFS: Detecting if request `%s` is LFS server path based ' |
|
627 | 625 | 'on content type:`%s`, is_lfs:%s', |
|
628 | 626 | path, content_type, is_lfs_request) |
|
629 | 627 | |
|
630 | 628 | if not is_lfs_request: |
|
631 | 629 | # fallback detection by path |
|
632 | 630 | if GIT_LFS_PROTO_PAT.match(path): |
|
633 | 631 | is_lfs_request = True |
|
634 | 632 | log.debug( |
|
635 | 633 | 'LFS: fallback detection by path of: `%s`, is_lfs:%s', |
|
636 | 634 | path, is_lfs_request) |
|
637 | 635 | |
|
638 | 636 | if is_lfs_request: |
|
639 | 637 | app = scm_app.create_git_lfs_wsgi_app( |
|
640 | 638 | repo_path, repo_name, config) |
|
641 | 639 | else: |
|
642 | 640 | app = scm_app.create_git_wsgi_app( |
|
643 | 641 | repo_path, repo_name, config) |
|
644 | 642 | |
|
645 | 643 | log.debug('http-app: starting app handler ' |
|
646 | 644 | 'with %s and process request', app) |
|
647 | 645 | |
|
648 | 646 | return app(environ, start_response) |
|
649 | 647 | |
|
650 | 648 | return _git_stream |
|
651 | 649 | |
|
652 | 650 | def handle_vcs_exception(self, exception, request): |
|
653 | 651 | _vcs_kind = getattr(exception, '_vcs_kind', '') |
|
654 | 652 | |
|
655 | 653 | if _vcs_kind == 'repo_locked': |
|
656 | 654 | headers_call_context = get_headers_call_context(request.environ) |
|
657 | 655 | status_code = safe_int(headers_call_context['locked_status_code']) |
|
658 | 656 | |
|
659 | 657 | return HTTPRepoLocked( |
|
660 | 658 | title=str(exception), status_code=status_code, headers=[('X-Rc-Locked', '1')]) |
|
661 | 659 | |
|
662 | 660 | elif _vcs_kind == 'repo_branch_protected': |
|
663 | 661 | # Get custom repo-branch-protected status code if present. |
|
664 | 662 | return HTTPRepoBranchProtected( |
|
665 | 663 | title=str(exception), headers=[('X-Rc-Branch-Protection', '1')]) |
|
666 | 664 | |
|
667 | 665 | exc_info = request.exc_info |
|
668 | 666 | store_exception(id(exc_info), exc_info) |
|
669 | 667 | |
|
670 | 668 | traceback_info = 'unavailable' |
|
671 | 669 | if request.exc_info: |
|
672 | exc_type, exc_value, exc_tb = request.exc_info | |
|
673 | traceback_info = ''.join(traceback.format_exception(exc_type, exc_value, exc_tb)) | |
|
670 | traceback_info = format_exc(request.exc_info) | |
|
674 | 671 | |
|
675 | 672 | log.error( |
|
676 |
'error occurred handling this request for path: %s, \n |
|
|
673 | 'error occurred handling this request for path: %s, \n%s', | |
|
677 | 674 | request.path, traceback_info) |
|
678 | 675 | |
|
679 | 676 | statsd = request.registry.statsd |
|
680 | 677 | if statsd: |
|
681 | 678 | exc_type = f"{exception.__class__.__module__}.{exception.__class__.__name__}" |
|
682 | 679 | statsd.incr('vcsserver_exception_total', |
|
683 | 680 | tags=[f"type:{exc_type}"]) |
|
684 | 681 | raise exception |
|
685 | 682 | |
|
686 | 683 | |
|
687 | 684 | class ResponseFilter(object): |
|
688 | 685 | |
|
689 | 686 | def __init__(self, start_response): |
|
690 | 687 | self._start_response = start_response |
|
691 | 688 | |
|
692 | 689 | def __call__(self, status, response_headers, exc_info=None): |
|
693 | 690 | headers = tuple( |
|
694 | 691 | (h, v) for h, v in response_headers |
|
695 | 692 | if not wsgiref.util.is_hop_by_hop(h)) |
|
696 | 693 | return self._start_response(status, headers, exc_info) |
|
697 | 694 | |
|
698 | 695 | |
|
699 | 696 | def sanitize_settings_and_apply_defaults(global_config, settings): |
|
700 | 697 | global_settings_maker = SettingsMaker(global_config) |
|
701 | 698 | settings_maker = SettingsMaker(settings) |
|
702 | 699 | |
|
703 | 700 | settings_maker.make_setting('logging.autoconfigure', False, parser='bool') |
|
704 | 701 | |
|
705 | 702 | logging_conf = os.path.join(os.path.dirname(global_config.get('__file__')), 'logging.ini') |
|
706 | 703 | settings_maker.enable_logging(logging_conf) |
|
707 | 704 | |
|
708 | 705 | # Default includes, possible to change as a user |
|
709 | 706 | pyramid_includes = settings_maker.make_setting('pyramid.includes', [], parser='list:newline') |
|
710 | 707 | log.debug("Using the following pyramid.includes: %s", pyramid_includes) |
|
711 | 708 | |
|
712 | 709 | settings_maker.make_setting('__file__', global_config.get('__file__')) |
|
713 | 710 | |
|
714 | 711 | settings_maker.make_setting('pyramid.default_locale_name', 'en') |
|
715 | 712 | settings_maker.make_setting('locale', 'en_US.UTF-8') |
|
716 | 713 | |
|
717 | 714 | settings_maker.make_setting('core.binary_dir', '') |
|
718 | 715 | |
|
719 | 716 | temp_store = tempfile.gettempdir() |
|
720 | 717 | default_cache_dir = os.path.join(temp_store, 'rc_cache') |
|
721 | 718 | # save default, cache dir, and use it for all backends later. |
|
722 | 719 | default_cache_dir = settings_maker.make_setting( |
|
723 | 720 | 'cache_dir', |
|
724 | 721 | default=default_cache_dir, default_when_empty=True, |
|
725 | 722 | parser='dir:ensured') |
|
726 | 723 | |
|
727 | 724 | # exception store cache |
|
728 | 725 | settings_maker.make_setting( |
|
729 | 726 | 'exception_tracker.store_path', |
|
730 | 727 | default=os.path.join(default_cache_dir, 'exc_store'), default_when_empty=True, |
|
731 | 728 | parser='dir:ensured' |
|
732 | 729 | ) |
|
733 | 730 | |
|
734 | 731 | # repo_object cache defaults |
|
735 | 732 | settings_maker.make_setting( |
|
736 | 733 | 'rc_cache.repo_object.backend', |
|
737 | 734 | default='dogpile.cache.rc.file_namespace', |
|
738 | 735 | parser='string') |
|
739 | 736 | settings_maker.make_setting( |
|
740 | 737 | 'rc_cache.repo_object.expiration_time', |
|
741 | 738 | default=30 * 24 * 60 * 60, # 30days |
|
742 | 739 | parser='int') |
|
743 | 740 | settings_maker.make_setting( |
|
744 | 741 | 'rc_cache.repo_object.arguments.filename', |
|
745 | 742 | default=os.path.join(default_cache_dir, 'vcsserver_cache_repo_object.db'), |
|
746 | 743 | parser='string') |
|
747 | 744 | |
|
748 | 745 | # statsd |
|
749 | 746 | settings_maker.make_setting('statsd.enabled', False, parser='bool') |
|
750 | 747 | settings_maker.make_setting('statsd.statsd_host', 'statsd-exporter', parser='string') |
|
751 | 748 | settings_maker.make_setting('statsd.statsd_port', 9125, parser='int') |
|
752 | 749 | settings_maker.make_setting('statsd.statsd_prefix', '') |
|
753 | 750 | settings_maker.make_setting('statsd.statsd_ipv6', False, parser='bool') |
|
754 | 751 | |
|
755 | 752 | settings_maker.env_expand() |
|
756 | 753 | |
|
757 | 754 | |
|
758 | 755 | def main(global_config, **settings): |
|
759 | 756 | start_time = time.time() |
|
760 | 757 | log.info('Pyramid app config starting') |
|
761 | 758 | |
|
762 | 759 | if MercurialFactory: |
|
763 | 760 | hgpatches.patch_largefiles_capabilities() |
|
764 | 761 | hgpatches.patch_subrepo_type_mapping() |
|
765 | 762 | |
|
766 | 763 | # Fill in and sanitize the defaults & do ENV expansion |
|
767 | 764 | sanitize_settings_and_apply_defaults(global_config, settings) |
|
768 | 765 | |
|
769 | 766 | # init and bootstrap StatsdClient |
|
770 | 767 | StatsdClient.setup(settings) |
|
771 | 768 | |
|
772 | 769 | pyramid_app = HTTPApplication(settings=settings, global_config=global_config).wsgi_app() |
|
773 | 770 | total_time = time.time() - start_time |
|
774 | 771 | log.info('Pyramid app created and configured in %.2fs', total_time) |
|
775 | 772 | return pyramid_app |
|
776 | 773 | |
|
777 | 774 |
@@ -1,172 +1,267 b'' | |||
|
1 | 1 | # RhodeCode VCSServer provides access to different vcs backends via network. |
|
2 | 2 | # Copyright (C) 2014-2023 RhodeCode GmbH |
|
3 | 3 | # |
|
4 | 4 | # This program is free software; you can redistribute it and/or modify |
|
5 | 5 | # it under the terms of the GNU General Public License as published by |
|
6 | 6 | # the Free Software Foundation; either version 3 of the License, or |
|
7 | 7 | # (at your option) any later version. |
|
8 | 8 | # |
|
9 | 9 | # This program is distributed in the hope that it will be useful, |
|
10 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
12 | 12 | # GNU General Public License for more details. |
|
13 | 13 | # |
|
14 | 14 | # You should have received a copy of the GNU General Public License |
|
15 | 15 | # along with this program; if not, write to the Free Software Foundation, |
|
16 | 16 | # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
|
17 | 17 | |
|
18 | 18 | |
|
19 | 19 | import os |
|
20 | 20 | import time |
|
21 | import sys | |
|
21 | 22 | import datetime |
|
22 | 23 | import msgpack |
|
23 | 24 | import logging |
|
24 | 25 | import traceback |
|
25 | 26 | import tempfile |
|
27 | import glob | |
|
26 | 28 | |
|
27 | 29 | log = logging.getLogger(__name__) |
|
28 | 30 | |
|
29 | 31 | # NOTE: Any changes should be synced with exc_tracking at rhodecode.lib.exc_tracking |
|
30 | 32 | global_prefix = 'vcsserver' |
|
31 | 33 | exc_store_dir_name = 'rc_exception_store_v1' |
|
32 | 34 | |
|
33 | 35 | |
|
34 | def exc_serialize(exc_id, tb, exc_type): | |
|
36 | def exc_serialize(exc_id, tb, exc_type, extra_data=None): | |
|
35 | 37 | |
|
36 | 38 | data = { |
|
37 | 39 | 'version': 'v1', |
|
38 | 40 | 'exc_id': exc_id, |
|
39 | 41 | 'exc_utc_date': datetime.datetime.utcnow().isoformat(), |
|
40 | 42 | 'exc_timestamp': repr(time.time()), |
|
41 | 43 | 'exc_message': tb, |
|
42 | 44 | 'exc_type': exc_type, |
|
43 | 45 | } |
|
46 | if extra_data: | |
|
47 | data.update(extra_data) | |
|
44 | 48 | return msgpack.packb(data), data |
|
45 | 49 | |
|
46 | 50 | |
|
47 | 51 | def exc_unserialize(tb): |
|
48 | 52 | return msgpack.unpackb(tb) |
|
49 | 53 | |
|
50 | 54 | |
|
55 | _exc_store = None | |
|
56 | ||
|
57 | ||
|
51 | 58 | def get_exc_store(): |
|
52 | 59 | """ |
|
53 | 60 | Get and create exception store if it's not existing |
|
54 | 61 | """ |
|
62 | global _exc_store | |
|
63 | ||
|
64 | if _exc_store is not None: | |
|
65 | # quick global cache | |
|
66 | return _exc_store | |
|
67 | ||
|
55 | 68 | import vcsserver as app |
|
56 | 69 | |
|
57 | 70 | exc_store_dir = app.CONFIG.get('exception_tracker.store_path', '') or tempfile.gettempdir() |
|
58 | 71 | _exc_store_path = os.path.join(exc_store_dir, exc_store_dir_name) |
|
59 | 72 | |
|
60 | 73 | _exc_store_path = os.path.abspath(_exc_store_path) |
|
61 | 74 | if not os.path.isdir(_exc_store_path): |
|
62 | 75 | os.makedirs(_exc_store_path) |
|
63 | 76 | log.debug('Initializing exceptions store at %s', _exc_store_path) |
|
77 | _exc_store = _exc_store_path | |
|
78 | ||
|
64 | 79 | return _exc_store_path |
|
65 | 80 | |
|
66 | 81 | |
|
67 | def _store_exception(exc_id, exc_info, prefix, request_path=''): | |
|
68 | exc_type, exc_value, exc_traceback = exc_info | |
|
82 | def get_detailed_tb(exc_info): | |
|
83 | from io import StringIO | |
|
84 | ||
|
85 | try: | |
|
86 | from pip._vendor.rich import traceback as rich_tb, scope as rich_scope, console as rich_console | |
|
87 | except ImportError: | |
|
88 | try: | |
|
89 | from rich import traceback as rich_tb, scope as rich_scope, console as rich_console | |
|
90 | except ImportError: | |
|
91 | return None | |
|
92 | ||
|
93 | console = rich_console.Console( | |
|
94 | width=160, | |
|
95 | file=StringIO() | |
|
96 | ) | |
|
97 | ||
|
98 | exc = rich_tb.Traceback.extract(*exc_info, show_locals=True) | |
|
99 | ||
|
100 | tb_rich = rich_tb.Traceback( | |
|
101 | trace=exc, | |
|
102 | width=160, | |
|
103 | extra_lines=3, | |
|
104 | theme=None, | |
|
105 | word_wrap=False, | |
|
106 | show_locals=False, | |
|
107 | max_frames=100 | |
|
108 | ) | |
|
109 | ||
|
110 | formatted_locals = "" | |
|
69 | 111 | |
|
70 | tb = ''.join(traceback.format_exception( | |
|
71 | exc_type, exc_value, exc_traceback, None)) | |
|
112 | # last_stack = exc.stacks[-1] | |
|
113 | # last_frame = last_stack.frames[-1] | |
|
114 | # if last_frame and last_frame.locals: | |
|
115 | # console.print( | |
|
116 | # rich_scope.render_scope( | |
|
117 | # last_frame.locals, | |
|
118 | # title=f'{last_frame.filename}:{last_frame.lineno}')) | |
|
119 | ||
|
120 | console.print(tb_rich) | |
|
121 | formatted_locals = console.file.getvalue() | |
|
122 | ||
|
123 | return formatted_locals | |
|
124 | ||
|
72 | 125 | |
|
73 | detailed_tb = getattr(exc_value, '_org_exc_tb', None) | |
|
126 | def get_request_metadata(request=None) -> dict: | |
|
127 | request_metadata = {} | |
|
128 | if not request: | |
|
129 | from pyramid.threadlocal import get_current_request | |
|
130 | request = get_current_request() | |
|
74 | 131 | |
|
132 | # NOTE(marcink): store request information into exc_data | |
|
133 | if request: | |
|
134 | request_metadata['client_address'] = getattr(request, 'client_addr', '') | |
|
135 | request_metadata['user_agent'] = getattr(request, 'user_agent', '') | |
|
136 | request_metadata['method'] = getattr(request, 'method', '') | |
|
137 | request_metadata['url'] = getattr(request, 'url', '') | |
|
138 | return request_metadata | |
|
139 | ||
|
140 | ||
|
141 | def format_exc(exc_info): | |
|
142 | exc_type, exc_value, exc_traceback = exc_info | |
|
143 | tb = "++ TRACEBACK ++\n\n" | |
|
144 | tb += "".join(traceback.format_exception(exc_type, exc_value, exc_traceback, None)) | |
|
145 | ||
|
146 | detailed_tb = getattr(exc_value, "_org_exc_tb", None) | |
|
147 | ||
|
75 | 148 | if detailed_tb: |
|
76 | 149 | remote_tb = detailed_tb |
|
77 | 150 | if isinstance(detailed_tb, str): |
|
78 | 151 | remote_tb = [detailed_tb] |
|
79 | ||
|
152 | ||
|
80 | 153 | tb += ( |
|
81 |
|
|
|
82 |
|
|
|
83 |
|
|
|
84 |
|
|
|
154 | "\n+++ BEG SOURCE EXCEPTION +++\n\n" | |
|
155 | "{}\n" | |
|
156 | "+++ END SOURCE EXCEPTION +++\n" | |
|
157 | "".format("\n".join(remote_tb)) | |
|
85 | 158 | ) |
|
86 | ||
|
159 | ||
|
87 | 160 | # Avoid that remote_tb also appears in the frame |
|
88 | 161 | del remote_tb |
|
162 | ||
|
163 | locals_tb = get_detailed_tb(exc_info) | |
|
164 | if locals_tb: | |
|
165 | tb += f"\n+++ DETAILS +++\n\n{locals_tb}\n" "" | |
|
166 | return tb | |
|
167 | ||
|
168 | ||
|
169 | def _store_exception(exc_id, exc_info, prefix, request_path=''): | |
|
170 | """ | |
|
171 | Low level function to store exception in the exception tracker | |
|
172 | """ | |
|
173 | ||
|
174 | extra_data = {} | |
|
175 | extra_data.update(get_request_metadata()) | |
|
176 | ||
|
177 | exc_type, exc_value, exc_traceback = exc_info | |
|
178 | tb = format_exc(exc_info) | |
|
89 | 179 | |
|
90 | 180 | exc_type_name = exc_type.__name__ |
|
181 | exc_data, org_data = exc_serialize(exc_id, tb, exc_type_name, extra_data=extra_data) | |
|
182 | ||
|
183 | exc_pref_id = '{}_{}_{}'.format(exc_id, prefix, org_data['exc_timestamp']) | |
|
91 | 184 | exc_store_path = get_exc_store() |
|
92 | exc_data, org_data = exc_serialize(exc_id, tb, exc_type_name) | |
|
93 | exc_pref_id = '{}_{}_{}'.format(exc_id, prefix, org_data['exc_timestamp']) | |
|
94 | 185 | if not os.path.isdir(exc_store_path): |
|
95 | 186 | os.makedirs(exc_store_path) |
|
96 | 187 | stored_exc_path = os.path.join(exc_store_path, exc_pref_id) |
|
97 | 188 | with open(stored_exc_path, 'wb') as f: |
|
98 | 189 | f.write(exc_data) |
|
99 | 190 | log.debug('Stored generated exception %s as: %s', exc_id, stored_exc_path) |
|
100 | 191 | |
|
101 | 192 | log.error( |
|
102 | 193 | 'error occurred handling this request.\n' |
|
103 |
'Path: `%s`, |
|
|
194 | 'Path: `%s`, %s', | |
|
104 | 195 | request_path, tb) |
|
105 | 196 | |
|
106 | 197 | |
|
107 | 198 | def store_exception(exc_id, exc_info, prefix=global_prefix, request_path=''): |
|
108 | 199 | """ |
|
109 | 200 | Example usage:: |
|
110 | 201 | |
|
111 | 202 | exc_info = sys.exc_info() |
|
112 | 203 | store_exception(id(exc_info), exc_info) |
|
113 | 204 | """ |
|
114 | 205 | |
|
115 | 206 | try: |
|
207 | exc_type = exc_info[0] | |
|
208 | exc_type_name = exc_type.__name__ | |
|
209 | ||
|
116 | 210 | _store_exception(exc_id=exc_id, exc_info=exc_info, prefix=prefix, |
|
117 | 211 | request_path=request_path) |
|
212 | return exc_id, exc_type_name | |
|
118 | 213 | except Exception: |
|
119 | 214 | log.exception('Failed to store exception `%s` information', exc_id) |
|
120 | 215 | # there's no way this can fail, it will crash server badly if it does. |
|
121 | 216 | pass |
|
122 | 217 | |
|
123 | 218 | |
|
124 | 219 | def _find_exc_file(exc_id, prefix=global_prefix): |
|
125 | 220 | exc_store_path = get_exc_store() |
|
126 | 221 | if prefix: |
|
127 | 222 | exc_id = f'{exc_id}_{prefix}' |
|
128 | 223 | else: |
|
129 | 224 | # search without a prefix |
|
130 | 225 | exc_id = f'{exc_id}' |
|
131 | 226 | |
|
132 | # we need to search the store for such start pattern as above | |
|
133 | for fname in os.listdir(exc_store_path): | |
|
134 | if fname.startswith(exc_id): | |
|
135 | exc_id = os.path.join(exc_store_path, fname) | |
|
136 | break | |
|
137 | continue | |
|
138 | else: | |
|
139 | exc_id = None | |
|
227 | found_exc_id = None | |
|
228 | matches = glob.glob(os.path.join(exc_store_path, exc_id) + '*') | |
|
229 | if matches: | |
|
230 | found_exc_id = matches[0] | |
|
140 | 231 | |
|
141 | return exc_id | |
|
232 | return found_exc_id | |
|
142 | 233 | |
|
143 | 234 | |
|
144 | 235 | def _read_exception(exc_id, prefix): |
|
145 | 236 | exc_id_file_path = _find_exc_file(exc_id=exc_id, prefix=prefix) |
|
146 | 237 | if exc_id_file_path: |
|
147 | 238 | with open(exc_id_file_path, 'rb') as f: |
|
148 | 239 | return exc_unserialize(f.read()) |
|
149 | 240 | else: |
|
150 | 241 | log.debug('Exception File `%s` not found', exc_id_file_path) |
|
151 | 242 | return None |
|
152 | 243 | |
|
153 | 244 | |
|
154 | 245 | def read_exception(exc_id, prefix=global_prefix): |
|
155 | 246 | try: |
|
156 | 247 | return _read_exception(exc_id=exc_id, prefix=prefix) |
|
157 | 248 | except Exception: |
|
158 | 249 | log.exception('Failed to read exception `%s` information', exc_id) |
|
159 | 250 | # there's no way this can fail, it will crash server badly if it does. |
|
160 | 251 | return None |
|
161 | 252 | |
|
162 | 253 | |
|
163 | 254 | def delete_exception(exc_id, prefix=global_prefix): |
|
164 | 255 | try: |
|
165 | 256 | exc_id_file_path = _find_exc_file(exc_id, prefix=prefix) |
|
166 | 257 | if exc_id_file_path: |
|
167 | 258 | os.remove(exc_id_file_path) |
|
168 | 259 | |
|
169 | 260 | except Exception: |
|
170 | 261 | log.exception('Failed to remove exception `%s` information', exc_id) |
|
171 | 262 | # there's no way this can fail, it will crash server badly if it does. |
|
172 | 263 | pass |
|
264 | ||
|
265 | ||
|
266 | def generate_id(): | |
|
267 | return id(object()) |
@@ -1,1164 +1,1199 b'' | |||
|
1 | 1 | # RhodeCode VCSServer provides access to different vcs backends via network. |
|
2 | 2 | # Copyright (C) 2014-2023 RhodeCode GmbH |
|
3 | 3 | # |
|
4 | 4 | # This program is free software; you can redistribute it and/or modify |
|
5 | 5 | # it under the terms of the GNU General Public License as published by |
|
6 | 6 | # the Free Software Foundation; either version 3 of the License, or |
|
7 | 7 | # (at your option) any later version. |
|
8 | 8 | # |
|
9 | 9 | # This program is distributed in the hope that it will be useful, |
|
10 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
12 | 12 | # GNU General Public License for more details. |
|
13 | 13 | # |
|
14 | 14 | # You should have received a copy of the GNU General Public License |
|
15 | 15 | # along with this program; if not, write to the Free Software Foundation, |
|
16 | 16 | # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
|
17 | 17 | import binascii |
|
18 | 18 | import io |
|
19 | 19 | import logging |
|
20 | 20 | import stat |
|
21 | import sys | |
|
21 | 22 | import urllib.request |
|
22 | 23 | import urllib.parse |
|
23 | import traceback | |
|
24 | 24 | import hashlib |
|
25 | 25 | |
|
26 | 26 | from hgext import largefiles, rebase, purge |
|
27 | 27 | |
|
28 | 28 | from mercurial import commands |
|
29 | 29 | from mercurial import unionrepo |
|
30 | 30 | from mercurial import verify |
|
31 | 31 | from mercurial import repair |
|
32 | 32 | from mercurial.error import AmbiguousPrefixLookupError |
|
33 | 33 | |
|
34 | 34 | import vcsserver |
|
35 | 35 | from vcsserver import exceptions |
|
36 | from vcsserver.base import RepoFactory, obfuscate_qs, raise_from_original, store_archive_in_cache, ArchiveNode, BytesEnvelope, \ | |
|
37 | BinaryEnvelope | |
|
36 | from vcsserver.base import ( | |
|
37 | RepoFactory, | |
|
38 | obfuscate_qs, | |
|
39 | raise_from_original, | |
|
40 | store_archive_in_cache, | |
|
41 | ArchiveNode, | |
|
42 | BytesEnvelope, | |
|
43 | BinaryEnvelope, | |
|
44 | ) | |
|
38 | 45 | from vcsserver.hgcompat import ( |
|
39 | archival, bin, clone, config as hgconfig, diffopts, hex, get_ctx, | |
|
40 | hg_url as url_parser, httpbasicauthhandler, httpdigestauthhandler, | |
|
41 | makepeer, instance, match, memctx, exchange, memfilectx, nullrev, hg_merge, | |
|
42 | patch, peer, revrange, ui, hg_tag, Abort, LookupError, RepoError, | |
|
43 | RepoLookupError, InterventionRequired, RequirementError, | |
|
44 | alwaysmatcher, patternmatcher, hgutil, hgext_strip) | |
|
46 | archival, | |
|
47 | bin, | |
|
48 | clone, | |
|
49 | config as hgconfig, | |
|
50 | diffopts, | |
|
51 | hex, | |
|
52 | get_ctx, | |
|
53 | hg_url as url_parser, | |
|
54 | httpbasicauthhandler, | |
|
55 | httpdigestauthhandler, | |
|
56 | makepeer, | |
|
57 | instance, | |
|
58 | match, | |
|
59 | memctx, | |
|
60 | exchange, | |
|
61 | memfilectx, | |
|
62 | nullrev, | |
|
63 | hg_merge, | |
|
64 | patch, | |
|
65 | peer, | |
|
66 | revrange, | |
|
67 | ui, | |
|
68 | hg_tag, | |
|
69 | Abort, | |
|
70 | LookupError, | |
|
71 | RepoError, | |
|
72 | RepoLookupError, | |
|
73 | InterventionRequired, | |
|
74 | RequirementError, | |
|
75 | alwaysmatcher, | |
|
76 | patternmatcher, | |
|
77 | hgutil, | |
|
78 | hgext_strip, | |
|
79 | ) | |
|
45 | 80 | from vcsserver.str_utils import ascii_bytes, ascii_str, safe_str, safe_bytes |
|
46 | 81 | from vcsserver.vcs_base import RemoteBase |
|
47 | 82 | from vcsserver.config import hooks as hooks_config |
|
48 | ||
|
83 | from vcsserver.lib.exc_tracking import format_exc | |
|
49 | 84 | |
|
50 | 85 | log = logging.getLogger(__name__) |
|
51 | 86 | |
|
52 | 87 | |
|
53 | 88 | def make_ui_from_config(repo_config): |
|
54 | 89 | |
|
55 | 90 | class LoggingUI(ui.ui): |
|
56 | 91 | |
|
57 | 92 | def status(self, *msg, **opts): |
|
58 | 93 | str_msg = map(safe_str, msg) |
|
59 | 94 | log.info(' '.join(str_msg).rstrip('\n')) |
|
60 | 95 | #super(LoggingUI, self).status(*msg, **opts) |
|
61 | 96 | |
|
62 | 97 | def warn(self, *msg, **opts): |
|
63 | 98 | str_msg = map(safe_str, msg) |
|
64 | 99 | log.warning('ui_logger:'+' '.join(str_msg).rstrip('\n')) |
|
65 | 100 | #super(LoggingUI, self).warn(*msg, **opts) |
|
66 | 101 | |
|
67 | 102 | def error(self, *msg, **opts): |
|
68 | 103 | str_msg = map(safe_str, msg) |
|
69 | 104 | log.error('ui_logger:'+' '.join(str_msg).rstrip('\n')) |
|
70 | 105 | #super(LoggingUI, self).error(*msg, **opts) |
|
71 | 106 | |
|
72 | 107 | def note(self, *msg, **opts): |
|
73 | 108 | str_msg = map(safe_str, msg) |
|
74 | 109 | log.info('ui_logger:'+' '.join(str_msg).rstrip('\n')) |
|
75 | 110 | #super(LoggingUI, self).note(*msg, **opts) |
|
76 | 111 | |
|
77 | 112 | def debug(self, *msg, **opts): |
|
78 | 113 | str_msg = map(safe_str, msg) |
|
79 | 114 | log.debug('ui_logger:'+' '.join(str_msg).rstrip('\n')) |
|
80 | 115 | #super(LoggingUI, self).debug(*msg, **opts) |
|
81 | 116 | |
|
82 | 117 | baseui = LoggingUI() |
|
83 | 118 | |
|
84 | 119 | # clean the baseui object |
|
85 | 120 | baseui._ocfg = hgconfig.config() |
|
86 | 121 | baseui._ucfg = hgconfig.config() |
|
87 | 122 | baseui._tcfg = hgconfig.config() |
|
88 | 123 | |
|
89 | 124 | for section, option, value in repo_config: |
|
90 | 125 | baseui.setconfig(ascii_bytes(section), ascii_bytes(option), ascii_bytes(value)) |
|
91 | 126 | |
|
92 | 127 | # make our hgweb quiet so it doesn't print output |
|
93 | 128 | baseui.setconfig(b'ui', b'quiet', b'true') |
|
94 | 129 | |
|
95 | 130 | baseui.setconfig(b'ui', b'paginate', b'never') |
|
96 | 131 | # for better Error reporting of Mercurial |
|
97 | 132 | baseui.setconfig(b'ui', b'message-output', b'stderr') |
|
98 | 133 | |
|
99 | 134 | # force mercurial to only use 1 thread, otherwise it may try to set a |
|
100 | 135 | # signal in a non-main thread, thus generating a ValueError. |
|
101 | 136 | baseui.setconfig(b'worker', b'numcpus', 1) |
|
102 | 137 | |
|
103 | 138 | # If there is no config for the largefiles extension, we explicitly disable |
|
104 | 139 | # it here. This overrides settings from repositories hgrc file. Recent |
|
105 | 140 | # mercurial versions enable largefiles in hgrc on clone from largefile |
|
106 | 141 | # repo. |
|
107 | 142 | if not baseui.hasconfig(b'extensions', b'largefiles'): |
|
108 | 143 | log.debug('Explicitly disable largefiles extension for repo.') |
|
109 | 144 | baseui.setconfig(b'extensions', b'largefiles', b'!') |
|
110 | 145 | |
|
111 | 146 | return baseui |
|
112 | 147 | |
|
113 | 148 | |
|
114 | 149 | def reraise_safe_exceptions(func): |
|
115 | 150 | """Decorator for converting mercurial exceptions to something neutral.""" |
|
116 | 151 | |
|
117 | 152 | def wrapper(*args, **kwargs): |
|
118 | 153 | try: |
|
119 | 154 | return func(*args, **kwargs) |
|
120 | 155 | except (Abort, InterventionRequired) as e: |
|
121 | 156 | raise_from_original(exceptions.AbortException(e), e) |
|
122 | 157 | except RepoLookupError as e: |
|
123 | 158 | raise_from_original(exceptions.LookupException(e), e) |
|
124 | 159 | except RequirementError as e: |
|
125 | 160 | raise_from_original(exceptions.RequirementException(e), e) |
|
126 | 161 | except RepoError as e: |
|
127 | 162 | raise_from_original(exceptions.VcsException(e), e) |
|
128 | 163 | except LookupError as e: |
|
129 | 164 | raise_from_original(exceptions.LookupException(e), e) |
|
130 | 165 | except Exception as e: |
|
131 | 166 | if not hasattr(e, '_vcs_kind'): |
|
132 | 167 | log.exception("Unhandled exception in hg remote call") |
|
133 | 168 | raise_from_original(exceptions.UnhandledException(e), e) |
|
134 | 169 | |
|
135 | 170 | raise |
|
136 | 171 | return wrapper |
|
137 | 172 | |
|
138 | 173 | |
|
139 | 174 | class MercurialFactory(RepoFactory): |
|
140 | 175 | repo_type = 'hg' |
|
141 | 176 | |
|
142 | 177 | def _create_config(self, config, hooks=True): |
|
143 | 178 | if not hooks: |
|
144 | 179 | |
|
145 | 180 | hooks_to_clean = { |
|
146 | 181 | |
|
147 | 182 | hooks_config.HOOK_REPO_SIZE, |
|
148 | 183 | hooks_config.HOOK_PRE_PULL, |
|
149 | 184 | hooks_config.HOOK_PULL, |
|
150 | 185 | |
|
151 | 186 | hooks_config.HOOK_PRE_PUSH, |
|
152 | 187 | # TODO: what about PRETXT, this was disabled in pre 5.0.0 |
|
153 | 188 | hooks_config.HOOK_PRETX_PUSH, |
|
154 | 189 | |
|
155 | 190 | } |
|
156 | 191 | new_config = [] |
|
157 | 192 | for section, option, value in config: |
|
158 | 193 | if section == 'hooks' and option in hooks_to_clean: |
|
159 | 194 | continue |
|
160 | 195 | new_config.append((section, option, value)) |
|
161 | 196 | config = new_config |
|
162 | 197 | |
|
163 | 198 | baseui = make_ui_from_config(config) |
|
164 | 199 | return baseui |
|
165 | 200 | |
|
166 | 201 | def _create_repo(self, wire, create): |
|
167 | 202 | baseui = self._create_config(wire["config"]) |
|
168 | 203 | repo = instance(baseui, safe_bytes(wire["path"]), create) |
|
169 | 204 | log.debug('repository created: got HG object: %s', repo) |
|
170 | 205 | return repo |
|
171 | 206 | |
|
172 | 207 | def repo(self, wire, create=False): |
|
173 | 208 | """ |
|
174 | 209 | Get a repository instance for the given path. |
|
175 | 210 | """ |
|
176 | 211 | return self._create_repo(wire, create) |
|
177 | 212 | |
|
178 | 213 | |
|
179 | 214 | def patch_ui_message_output(baseui): |
|
180 | 215 | baseui.setconfig(b'ui', b'quiet', b'false') |
|
181 | 216 | output = io.BytesIO() |
|
182 | 217 | |
|
183 | 218 | def write(data, **unused_kwargs): |
|
184 | 219 | output.write(data) |
|
185 | 220 | |
|
186 | 221 | baseui.status = write |
|
187 | 222 | baseui.write = write |
|
188 | 223 | baseui.warn = write |
|
189 | 224 | baseui.debug = write |
|
190 | 225 | |
|
191 | 226 | return baseui, output |
|
192 | 227 | |
|
193 | 228 | |
|
194 | 229 | def get_obfuscated_url(url_obj): |
|
195 | 230 | url_obj.passwd = b'*****' if url_obj.passwd else url_obj.passwd |
|
196 | 231 | url_obj.query = obfuscate_qs(url_obj.query) |
|
197 | 232 | obfuscated_uri = str(url_obj) |
|
198 | 233 | return obfuscated_uri |
|
199 | 234 | |
|
200 | 235 | |
|
201 | 236 | def normalize_url_for_hg(url: str): |
|
202 | 237 | _proto = None |
|
203 | 238 | |
|
204 | 239 | if '+' in url[:url.find('://')]: |
|
205 | 240 | _proto = url[0:url.find('+')] |
|
206 | 241 | url = url[url.find('+') + 1:] |
|
207 | 242 | return url, _proto |
|
208 | 243 | |
|
209 | 244 | |
|
210 | 245 | class HgRemote(RemoteBase): |
|
211 | 246 | |
|
212 | 247 | def __init__(self, factory): |
|
213 | 248 | self._factory = factory |
|
214 | 249 | self._bulk_methods = { |
|
215 | 250 | "affected_files": self.ctx_files, |
|
216 | 251 | "author": self.ctx_user, |
|
217 | 252 | "branch": self.ctx_branch, |
|
218 | 253 | "children": self.ctx_children, |
|
219 | 254 | "date": self.ctx_date, |
|
220 | 255 | "message": self.ctx_description, |
|
221 | 256 | "parents": self.ctx_parents, |
|
222 | 257 | "status": self.ctx_status, |
|
223 | 258 | "obsolete": self.ctx_obsolete, |
|
224 | 259 | "phase": self.ctx_phase, |
|
225 | 260 | "hidden": self.ctx_hidden, |
|
226 | 261 | "_file_paths": self.ctx_list, |
|
227 | 262 | } |
|
228 | 263 | self._bulk_file_methods = { |
|
229 | 264 | "size": self.fctx_size, |
|
230 | 265 | "data": self.fctx_node_data, |
|
231 | 266 | "flags": self.fctx_flags, |
|
232 | 267 | "is_binary": self.is_binary, |
|
233 | 268 | "md5": self.md5_hash, |
|
234 | 269 | } |
|
235 | 270 | |
|
236 | 271 | def _get_ctx(self, repo, ref): |
|
237 | 272 | return get_ctx(repo, ref) |
|
238 | 273 | |
|
239 | 274 | @reraise_safe_exceptions |
|
240 | 275 | def discover_hg_version(self): |
|
241 | 276 | from mercurial import util |
|
242 | 277 | return safe_str(util.version()) |
|
243 | 278 | |
|
244 | 279 | @reraise_safe_exceptions |
|
245 | 280 | def is_empty(self, wire): |
|
246 | 281 | repo = self._factory.repo(wire) |
|
247 | 282 | |
|
248 | 283 | try: |
|
249 | 284 | return len(repo) == 0 |
|
250 | 285 | except Exception: |
|
251 | 286 | log.exception("failed to read object_store") |
|
252 | 287 | return False |
|
253 | 288 | |
|
254 | 289 | @reraise_safe_exceptions |
|
255 | 290 | def bookmarks(self, wire): |
|
256 | 291 | cache_on, context_uid, repo_id = self._cache_on(wire) |
|
257 | 292 | region = self._region(wire) |
|
258 | 293 | |
|
259 | 294 | @region.conditional_cache_on_arguments(condition=cache_on) |
|
260 | 295 | def _bookmarks(_context_uid, _repo_id): |
|
261 | 296 | repo = self._factory.repo(wire) |
|
262 | 297 | return {safe_str(name): ascii_str(hex(sha)) for name, sha in repo._bookmarks.items()} |
|
263 | 298 | |
|
264 | 299 | return _bookmarks(context_uid, repo_id) |
|
265 | 300 | |
|
266 | 301 | @reraise_safe_exceptions |
|
267 | 302 | def branches(self, wire, normal, closed): |
|
268 | 303 | cache_on, context_uid, repo_id = self._cache_on(wire) |
|
269 | 304 | region = self._region(wire) |
|
270 | 305 | |
|
271 | 306 | @region.conditional_cache_on_arguments(condition=cache_on) |
|
272 | 307 | def _branches(_context_uid, _repo_id, _normal, _closed): |
|
273 | 308 | repo = self._factory.repo(wire) |
|
274 | 309 | iter_branches = repo.branchmap().iterbranches() |
|
275 | 310 | bt = {} |
|
276 | 311 | for branch_name, _heads, tip_node, is_closed in iter_branches: |
|
277 | 312 | if normal and not is_closed: |
|
278 | 313 | bt[safe_str(branch_name)] = ascii_str(hex(tip_node)) |
|
279 | 314 | if closed and is_closed: |
|
280 | 315 | bt[safe_str(branch_name)] = ascii_str(hex(tip_node)) |
|
281 | 316 | |
|
282 | 317 | return bt |
|
283 | 318 | |
|
284 | 319 | return _branches(context_uid, repo_id, normal, closed) |
|
285 | 320 | |
|
286 | 321 | @reraise_safe_exceptions |
|
287 | 322 | def bulk_request(self, wire, commit_id, pre_load): |
|
288 | 323 | cache_on, context_uid, repo_id = self._cache_on(wire) |
|
289 | 324 | region = self._region(wire) |
|
290 | 325 | |
|
291 | 326 | @region.conditional_cache_on_arguments(condition=cache_on) |
|
292 | 327 | def _bulk_request(_repo_id, _commit_id, _pre_load): |
|
293 | 328 | result = {} |
|
294 | 329 | for attr in pre_load: |
|
295 | 330 | try: |
|
296 | 331 | method = self._bulk_methods[attr] |
|
297 | 332 | wire.update({'cache': False}) # disable cache for bulk calls so we don't double cache |
|
298 | 333 | result[attr] = method(wire, commit_id) |
|
299 | 334 | except KeyError as e: |
|
300 | 335 | raise exceptions.VcsException(e)( |
|
301 | 336 | 'Unknown bulk attribute: "%s"' % attr) |
|
302 | 337 | return result |
|
303 | 338 | |
|
304 | 339 | return _bulk_request(repo_id, commit_id, sorted(pre_load)) |
|
305 | 340 | |
|
306 | 341 | @reraise_safe_exceptions |
|
307 | 342 | def ctx_branch(self, wire, commit_id): |
|
308 | 343 | cache_on, context_uid, repo_id = self._cache_on(wire) |
|
309 | 344 | region = self._region(wire) |
|
310 | 345 | |
|
311 | 346 | @region.conditional_cache_on_arguments(condition=cache_on) |
|
312 | 347 | def _ctx_branch(_repo_id, _commit_id): |
|
313 | 348 | repo = self._factory.repo(wire) |
|
314 | 349 | ctx = self._get_ctx(repo, commit_id) |
|
315 | 350 | return ctx.branch() |
|
316 | 351 | return _ctx_branch(repo_id, commit_id) |
|
317 | 352 | |
|
318 | 353 | @reraise_safe_exceptions |
|
319 | 354 | def ctx_date(self, wire, commit_id): |
|
320 | 355 | cache_on, context_uid, repo_id = self._cache_on(wire) |
|
321 | 356 | region = self._region(wire) |
|
322 | 357 | |
|
323 | 358 | @region.conditional_cache_on_arguments(condition=cache_on) |
|
324 | 359 | def _ctx_date(_repo_id, _commit_id): |
|
325 | 360 | repo = self._factory.repo(wire) |
|
326 | 361 | ctx = self._get_ctx(repo, commit_id) |
|
327 | 362 | return ctx.date() |
|
328 | 363 | return _ctx_date(repo_id, commit_id) |
|
329 | 364 | |
|
330 | 365 | @reraise_safe_exceptions |
|
331 | 366 | def ctx_description(self, wire, revision): |
|
332 | 367 | repo = self._factory.repo(wire) |
|
333 | 368 | ctx = self._get_ctx(repo, revision) |
|
334 | 369 | return ctx.description() |
|
335 | 370 | |
|
336 | 371 | @reraise_safe_exceptions |
|
337 | 372 | def ctx_files(self, wire, commit_id): |
|
338 | 373 | cache_on, context_uid, repo_id = self._cache_on(wire) |
|
339 | 374 | region = self._region(wire) |
|
340 | 375 | |
|
341 | 376 | @region.conditional_cache_on_arguments(condition=cache_on) |
|
342 | 377 | def _ctx_files(_repo_id, _commit_id): |
|
343 | 378 | repo = self._factory.repo(wire) |
|
344 | 379 | ctx = self._get_ctx(repo, commit_id) |
|
345 | 380 | return ctx.files() |
|
346 | 381 | |
|
347 | 382 | return _ctx_files(repo_id, commit_id) |
|
348 | 383 | |
|
349 | 384 | @reraise_safe_exceptions |
|
350 | 385 | def ctx_list(self, path, revision): |
|
351 | 386 | repo = self._factory.repo(path) |
|
352 | 387 | ctx = self._get_ctx(repo, revision) |
|
353 | 388 | return list(ctx) |
|
354 | 389 | |
|
355 | 390 | @reraise_safe_exceptions |
|
356 | 391 | def ctx_parents(self, wire, commit_id): |
|
357 | 392 | cache_on, context_uid, repo_id = self._cache_on(wire) |
|
358 | 393 | region = self._region(wire) |
|
359 | 394 | |
|
360 | 395 | @region.conditional_cache_on_arguments(condition=cache_on) |
|
361 | 396 | def _ctx_parents(_repo_id, _commit_id): |
|
362 | 397 | repo = self._factory.repo(wire) |
|
363 | 398 | ctx = self._get_ctx(repo, commit_id) |
|
364 | 399 | return [parent.hex() for parent in ctx.parents() |
|
365 | 400 | if not (parent.hidden() or parent.obsolete())] |
|
366 | 401 | |
|
367 | 402 | return _ctx_parents(repo_id, commit_id) |
|
368 | 403 | |
|
369 | 404 | @reraise_safe_exceptions |
|
370 | 405 | def ctx_children(self, wire, commit_id): |
|
371 | 406 | cache_on, context_uid, repo_id = self._cache_on(wire) |
|
372 | 407 | region = self._region(wire) |
|
373 | 408 | |
|
374 | 409 | @region.conditional_cache_on_arguments(condition=cache_on) |
|
375 | 410 | def _ctx_children(_repo_id, _commit_id): |
|
376 | 411 | repo = self._factory.repo(wire) |
|
377 | 412 | ctx = self._get_ctx(repo, commit_id) |
|
378 | 413 | return [child.hex() for child in ctx.children() |
|
379 | 414 | if not (child.hidden() or child.obsolete())] |
|
380 | 415 | |
|
381 | 416 | return _ctx_children(repo_id, commit_id) |
|
382 | 417 | |
|
383 | 418 | @reraise_safe_exceptions |
|
384 | 419 | def ctx_phase(self, wire, commit_id): |
|
385 | 420 | cache_on, context_uid, repo_id = self._cache_on(wire) |
|
386 | 421 | region = self._region(wire) |
|
387 | 422 | |
|
388 | 423 | @region.conditional_cache_on_arguments(condition=cache_on) |
|
389 | 424 | def _ctx_phase(_context_uid, _repo_id, _commit_id): |
|
390 | 425 | repo = self._factory.repo(wire) |
|
391 | 426 | ctx = self._get_ctx(repo, commit_id) |
|
392 | 427 | # public=0, draft=1, secret=3 |
|
393 | 428 | return ctx.phase() |
|
394 | 429 | return _ctx_phase(context_uid, repo_id, commit_id) |
|
395 | 430 | |
|
396 | 431 | @reraise_safe_exceptions |
|
397 | 432 | def ctx_obsolete(self, wire, commit_id): |
|
398 | 433 | cache_on, context_uid, repo_id = self._cache_on(wire) |
|
399 | 434 | region = self._region(wire) |
|
400 | 435 | |
|
401 | 436 | @region.conditional_cache_on_arguments(condition=cache_on) |
|
402 | 437 | def _ctx_obsolete(_context_uid, _repo_id, _commit_id): |
|
403 | 438 | repo = self._factory.repo(wire) |
|
404 | 439 | ctx = self._get_ctx(repo, commit_id) |
|
405 | 440 | return ctx.obsolete() |
|
406 | 441 | return _ctx_obsolete(context_uid, repo_id, commit_id) |
|
407 | 442 | |
|
408 | 443 | @reraise_safe_exceptions |
|
409 | 444 | def ctx_hidden(self, wire, commit_id): |
|
410 | 445 | cache_on, context_uid, repo_id = self._cache_on(wire) |
|
411 | 446 | region = self._region(wire) |
|
412 | 447 | |
|
413 | 448 | @region.conditional_cache_on_arguments(condition=cache_on) |
|
414 | 449 | def _ctx_hidden(_context_uid, _repo_id, _commit_id): |
|
415 | 450 | repo = self._factory.repo(wire) |
|
416 | 451 | ctx = self._get_ctx(repo, commit_id) |
|
417 | 452 | return ctx.hidden() |
|
418 | 453 | return _ctx_hidden(context_uid, repo_id, commit_id) |
|
419 | 454 | |
|
420 | 455 | @reraise_safe_exceptions |
|
421 | 456 | def ctx_substate(self, wire, revision): |
|
422 | 457 | repo = self._factory.repo(wire) |
|
423 | 458 | ctx = self._get_ctx(repo, revision) |
|
424 | 459 | return ctx.substate |
|
425 | 460 | |
|
426 | 461 | @reraise_safe_exceptions |
|
427 | 462 | def ctx_status(self, wire, revision): |
|
428 | 463 | repo = self._factory.repo(wire) |
|
429 | 464 | ctx = self._get_ctx(repo, revision) |
|
430 | 465 | status = repo[ctx.p1().node()].status(other=ctx.node()) |
|
431 | 466 | # object of status (odd, custom named tuple in mercurial) is not |
|
432 | 467 | # correctly serializable, we make it a list, as the underling |
|
433 | 468 | # API expects this to be a list |
|
434 | 469 | return list(status) |
|
435 | 470 | |
|
436 | 471 | @reraise_safe_exceptions |
|
437 | 472 | def ctx_user(self, wire, revision): |
|
438 | 473 | repo = self._factory.repo(wire) |
|
439 | 474 | ctx = self._get_ctx(repo, revision) |
|
440 | 475 | return ctx.user() |
|
441 | 476 | |
|
442 | 477 | @reraise_safe_exceptions |
|
443 | 478 | def check_url(self, url, config): |
|
444 | 479 | url, _proto = normalize_url_for_hg(url) |
|
445 | 480 | url_obj = url_parser(safe_bytes(url)) |
|
446 | 481 | |
|
447 | 482 | test_uri = safe_str(url_obj.authinfo()[0]) |
|
448 | 483 | authinfo = url_obj.authinfo()[1] |
|
449 | 484 | obfuscated_uri = get_obfuscated_url(url_obj) |
|
450 | 485 | log.info("Checking URL for remote cloning/import: %s", obfuscated_uri) |
|
451 | 486 | |
|
452 | 487 | handlers = [] |
|
453 | 488 | if authinfo: |
|
454 | 489 | # create a password manager |
|
455 | 490 | passmgr = urllib.request.HTTPPasswordMgrWithDefaultRealm() |
|
456 | 491 | passmgr.add_password(*authinfo) |
|
457 | 492 | |
|
458 | 493 | handlers.extend((httpbasicauthhandler(passmgr), |
|
459 | 494 | httpdigestauthhandler(passmgr))) |
|
460 | 495 | |
|
461 | 496 | o = urllib.request.build_opener(*handlers) |
|
462 | 497 | o.addheaders = [('Content-Type', 'application/mercurial-0.1'), |
|
463 | 498 | ('Accept', 'application/mercurial-0.1')] |
|
464 | 499 | |
|
465 | 500 | q = {"cmd": 'between'} |
|
466 | 501 | q.update({'pairs': "{}-{}".format('0' * 40, '0' * 40)}) |
|
467 | 502 | qs = '?%s' % urllib.parse.urlencode(q) |
|
468 | 503 | cu = f"{test_uri}{qs}" |
|
469 | 504 | req = urllib.request.Request(cu, None, {}) |
|
470 | 505 | |
|
471 | 506 | try: |
|
472 | 507 | log.debug("Trying to open URL %s", obfuscated_uri) |
|
473 | 508 | resp = o.open(req) |
|
474 | 509 | if resp.code != 200: |
|
475 | 510 | raise exceptions.URLError()('Return Code is not 200') |
|
476 | 511 | except Exception as e: |
|
477 | 512 | log.warning("URL cannot be opened: %s", obfuscated_uri, exc_info=True) |
|
478 | 513 | # means it cannot be cloned |
|
479 | 514 | raise exceptions.URLError(e)(f"[{obfuscated_uri}] org_exc: {e}") |
|
480 | 515 | |
|
481 | 516 | # now check if it's a proper hg repo, but don't do it for svn |
|
482 | 517 | try: |
|
483 | 518 | if _proto == 'svn': |
|
484 | 519 | pass |
|
485 | 520 | else: |
|
486 | 521 | # check for pure hg repos |
|
487 | 522 | log.debug( |
|
488 | 523 | "Verifying if URL is a Mercurial repository: %s", obfuscated_uri) |
|
489 | 524 | ui = make_ui_from_config(config) |
|
490 | 525 | peer_checker = makepeer(ui, safe_bytes(url)) |
|
491 | 526 | peer_checker.lookup(b'tip') |
|
492 | 527 | except Exception as e: |
|
493 | 528 | log.warning("URL is not a valid Mercurial repository: %s", |
|
494 | 529 | obfuscated_uri) |
|
495 | 530 | raise exceptions.URLError(e)( |
|
496 | 531 | "url [%s] does not look like an hg repo org_exc: %s" |
|
497 | 532 | % (obfuscated_uri, e)) |
|
498 | 533 | |
|
499 | 534 | log.info("URL is a valid Mercurial repository: %s", obfuscated_uri) |
|
500 | 535 | return True |
|
501 | 536 | |
|
502 | 537 | @reraise_safe_exceptions |
|
503 | 538 | def diff(self, wire, commit_id_1, commit_id_2, file_filter, opt_git, opt_ignorews, context): |
|
504 | 539 | repo = self._factory.repo(wire) |
|
505 | 540 | |
|
506 | 541 | if file_filter: |
|
507 | 542 | # unpack the file-filter |
|
508 | 543 | repo_path, node_path = file_filter |
|
509 | 544 | match_filter = match(safe_bytes(repo_path), b'', [safe_bytes(node_path)]) |
|
510 | 545 | else: |
|
511 | 546 | match_filter = file_filter |
|
512 | 547 | opts = diffopts(git=opt_git, ignorews=opt_ignorews, context=context, showfunc=1) |
|
513 | 548 | |
|
514 | 549 | try: |
|
515 | 550 | diff_iter = patch.diff( |
|
516 | 551 | repo, node1=commit_id_1, node2=commit_id_2, match=match_filter, opts=opts) |
|
517 | 552 | return BytesEnvelope(b"".join(diff_iter)) |
|
518 | 553 | except RepoLookupError as e: |
|
519 | 554 | raise exceptions.LookupException(e)() |
|
520 | 555 | |
|
521 | 556 | @reraise_safe_exceptions |
|
522 | 557 | def node_history(self, wire, revision, path, limit): |
|
523 | 558 | cache_on, context_uid, repo_id = self._cache_on(wire) |
|
524 | 559 | region = self._region(wire) |
|
525 | 560 | |
|
526 | 561 | @region.conditional_cache_on_arguments(condition=cache_on) |
|
527 | 562 | def _node_history(_context_uid, _repo_id, _revision, _path, _limit): |
|
528 | 563 | repo = self._factory.repo(wire) |
|
529 | 564 | |
|
530 | 565 | ctx = self._get_ctx(repo, revision) |
|
531 | 566 | fctx = ctx.filectx(safe_bytes(path)) |
|
532 | 567 | |
|
533 | 568 | def history_iter(): |
|
534 | 569 | limit_rev = fctx.rev() |
|
535 | 570 | for obj in reversed(list(fctx.filelog())): |
|
536 | 571 | obj = fctx.filectx(obj) |
|
537 | 572 | ctx = obj.changectx() |
|
538 | 573 | if ctx.hidden() or ctx.obsolete(): |
|
539 | 574 | continue |
|
540 | 575 | |
|
541 | 576 | if limit_rev >= obj.rev(): |
|
542 | 577 | yield obj |
|
543 | 578 | |
|
544 | 579 | history = [] |
|
545 | 580 | for cnt, obj in enumerate(history_iter()): |
|
546 | 581 | if limit and cnt >= limit: |
|
547 | 582 | break |
|
548 | 583 | history.append(hex(obj.node())) |
|
549 | 584 | |
|
550 | 585 | return [x for x in history] |
|
551 | 586 | return _node_history(context_uid, repo_id, revision, path, limit) |
|
552 | 587 | |
|
553 | 588 | @reraise_safe_exceptions |
|
554 | 589 | def node_history_untill(self, wire, revision, path, limit): |
|
555 | 590 | cache_on, context_uid, repo_id = self._cache_on(wire) |
|
556 | 591 | region = self._region(wire) |
|
557 | 592 | |
|
558 | 593 | @region.conditional_cache_on_arguments(condition=cache_on) |
|
559 | 594 | def _node_history_until(_context_uid, _repo_id): |
|
560 | 595 | repo = self._factory.repo(wire) |
|
561 | 596 | ctx = self._get_ctx(repo, revision) |
|
562 | 597 | fctx = ctx.filectx(safe_bytes(path)) |
|
563 | 598 | |
|
564 | 599 | file_log = list(fctx.filelog()) |
|
565 | 600 | if limit: |
|
566 | 601 | # Limit to the last n items |
|
567 | 602 | file_log = file_log[-limit:] |
|
568 | 603 | |
|
569 | 604 | return [hex(fctx.filectx(cs).node()) for cs in reversed(file_log)] |
|
570 | 605 | return _node_history_until(context_uid, repo_id, revision, path, limit) |
|
571 | 606 | |
|
572 | 607 | @reraise_safe_exceptions |
|
573 | 608 | def bulk_file_request(self, wire, commit_id, path, pre_load): |
|
574 | 609 | cache_on, context_uid, repo_id = self._cache_on(wire) |
|
575 | 610 | region = self._region(wire) |
|
576 | 611 | |
|
577 | 612 | @region.conditional_cache_on_arguments(condition=cache_on) |
|
578 | 613 | def _bulk_file_request(_repo_id, _commit_id, _path, _pre_load): |
|
579 | 614 | result = {} |
|
580 | 615 | for attr in pre_load: |
|
581 | 616 | try: |
|
582 | 617 | method = self._bulk_file_methods[attr] |
|
583 | 618 | wire.update({'cache': False}) # disable cache for bulk calls so we don't double cache |
|
584 | 619 | result[attr] = method(wire, _commit_id, _path) |
|
585 | 620 | except KeyError as e: |
|
586 | 621 | raise exceptions.VcsException(e)(f'Unknown bulk attribute: "{attr}"') |
|
587 | 622 | return result |
|
588 | 623 | |
|
589 | 624 | return BinaryEnvelope(_bulk_file_request(repo_id, commit_id, path, sorted(pre_load))) |
|
590 | 625 | |
|
591 | 626 | @reraise_safe_exceptions |
|
592 | 627 | def fctx_annotate(self, wire, revision, path): |
|
593 | 628 | repo = self._factory.repo(wire) |
|
594 | 629 | ctx = self._get_ctx(repo, revision) |
|
595 | 630 | fctx = ctx.filectx(safe_bytes(path)) |
|
596 | 631 | |
|
597 | 632 | result = [] |
|
598 | 633 | for i, annotate_obj in enumerate(fctx.annotate(), 1): |
|
599 | 634 | ln_no = i |
|
600 | 635 | sha = hex(annotate_obj.fctx.node()) |
|
601 | 636 | content = annotate_obj.text |
|
602 | 637 | result.append((ln_no, ascii_str(sha), content)) |
|
603 | 638 | return BinaryEnvelope(result) |
|
604 | 639 | |
|
605 | 640 | @reraise_safe_exceptions |
|
606 | 641 | def fctx_node_data(self, wire, revision, path): |
|
607 | 642 | repo = self._factory.repo(wire) |
|
608 | 643 | ctx = self._get_ctx(repo, revision) |
|
609 | 644 | fctx = ctx.filectx(safe_bytes(path)) |
|
610 | 645 | return BytesEnvelope(fctx.data()) |
|
611 | 646 | |
|
612 | 647 | @reraise_safe_exceptions |
|
613 | 648 | def fctx_flags(self, wire, commit_id, path): |
|
614 | 649 | cache_on, context_uid, repo_id = self._cache_on(wire) |
|
615 | 650 | region = self._region(wire) |
|
616 | 651 | |
|
617 | 652 | @region.conditional_cache_on_arguments(condition=cache_on) |
|
618 | 653 | def _fctx_flags(_repo_id, _commit_id, _path): |
|
619 | 654 | repo = self._factory.repo(wire) |
|
620 | 655 | ctx = self._get_ctx(repo, commit_id) |
|
621 | 656 | fctx = ctx.filectx(safe_bytes(path)) |
|
622 | 657 | return fctx.flags() |
|
623 | 658 | |
|
624 | 659 | return _fctx_flags(repo_id, commit_id, path) |
|
625 | 660 | |
|
626 | 661 | @reraise_safe_exceptions |
|
627 | 662 | def fctx_size(self, wire, commit_id, path): |
|
628 | 663 | cache_on, context_uid, repo_id = self._cache_on(wire) |
|
629 | 664 | region = self._region(wire) |
|
630 | 665 | |
|
631 | 666 | @region.conditional_cache_on_arguments(condition=cache_on) |
|
632 | 667 | def _fctx_size(_repo_id, _revision, _path): |
|
633 | 668 | repo = self._factory.repo(wire) |
|
634 | 669 | ctx = self._get_ctx(repo, commit_id) |
|
635 | 670 | fctx = ctx.filectx(safe_bytes(path)) |
|
636 | 671 | return fctx.size() |
|
637 | 672 | return _fctx_size(repo_id, commit_id, path) |
|
638 | 673 | |
|
639 | 674 | @reraise_safe_exceptions |
|
640 | 675 | def get_all_commit_ids(self, wire, name): |
|
641 | 676 | cache_on, context_uid, repo_id = self._cache_on(wire) |
|
642 | 677 | region = self._region(wire) |
|
643 | 678 | |
|
644 | 679 | @region.conditional_cache_on_arguments(condition=cache_on) |
|
645 | 680 | def _get_all_commit_ids(_context_uid, _repo_id, _name): |
|
646 | 681 | repo = self._factory.repo(wire) |
|
647 | 682 | revs = [ascii_str(repo[x].hex()) for x in repo.filtered(b'visible').changelog.revs()] |
|
648 | 683 | return revs |
|
649 | 684 | return _get_all_commit_ids(context_uid, repo_id, name) |
|
650 | 685 | |
|
651 | 686 | @reraise_safe_exceptions |
|
652 | 687 | def get_config_value(self, wire, section, name, untrusted=False): |
|
653 | 688 | repo = self._factory.repo(wire) |
|
654 | 689 | return repo.ui.config(ascii_bytes(section), ascii_bytes(name), untrusted=untrusted) |
|
655 | 690 | |
|
656 | 691 | @reraise_safe_exceptions |
|
657 | 692 | def is_large_file(self, wire, commit_id, path): |
|
658 | 693 | cache_on, context_uid, repo_id = self._cache_on(wire) |
|
659 | 694 | region = self._region(wire) |
|
660 | 695 | |
|
661 | 696 | @region.conditional_cache_on_arguments(condition=cache_on) |
|
662 | 697 | def _is_large_file(_context_uid, _repo_id, _commit_id, _path): |
|
663 | 698 | return largefiles.lfutil.isstandin(safe_bytes(path)) |
|
664 | 699 | |
|
665 | 700 | return _is_large_file(context_uid, repo_id, commit_id, path) |
|
666 | 701 | |
|
667 | 702 | @reraise_safe_exceptions |
|
668 | 703 | def is_binary(self, wire, revision, path): |
|
669 | 704 | cache_on, context_uid, repo_id = self._cache_on(wire) |
|
670 | 705 | region = self._region(wire) |
|
671 | 706 | |
|
672 | 707 | @region.conditional_cache_on_arguments(condition=cache_on) |
|
673 | 708 | def _is_binary(_repo_id, _sha, _path): |
|
674 | 709 | repo = self._factory.repo(wire) |
|
675 | 710 | ctx = self._get_ctx(repo, revision) |
|
676 | 711 | fctx = ctx.filectx(safe_bytes(path)) |
|
677 | 712 | return fctx.isbinary() |
|
678 | 713 | |
|
679 | 714 | return _is_binary(repo_id, revision, path) |
|
680 | 715 | |
|
681 | 716 | @reraise_safe_exceptions |
|
682 | 717 | def md5_hash(self, wire, revision, path): |
|
683 | 718 | cache_on, context_uid, repo_id = self._cache_on(wire) |
|
684 | 719 | region = self._region(wire) |
|
685 | 720 | |
|
686 | 721 | @region.conditional_cache_on_arguments(condition=cache_on) |
|
687 | 722 | def _md5_hash(_repo_id, _sha, _path): |
|
688 | 723 | repo = self._factory.repo(wire) |
|
689 | 724 | ctx = self._get_ctx(repo, revision) |
|
690 | 725 | fctx = ctx.filectx(safe_bytes(path)) |
|
691 | 726 | return hashlib.md5(fctx.data()).hexdigest() |
|
692 | 727 | |
|
693 | 728 | return _md5_hash(repo_id, revision, path) |
|
694 | 729 | |
|
695 | 730 | @reraise_safe_exceptions |
|
696 | 731 | def in_largefiles_store(self, wire, sha): |
|
697 | 732 | repo = self._factory.repo(wire) |
|
698 | 733 | return largefiles.lfutil.instore(repo, sha) |
|
699 | 734 | |
|
700 | 735 | @reraise_safe_exceptions |
|
701 | 736 | def in_user_cache(self, wire, sha): |
|
702 | 737 | repo = self._factory.repo(wire) |
|
703 | 738 | return largefiles.lfutil.inusercache(repo.ui, sha) |
|
704 | 739 | |
|
705 | 740 | @reraise_safe_exceptions |
|
706 | 741 | def store_path(self, wire, sha): |
|
707 | 742 | repo = self._factory.repo(wire) |
|
708 | 743 | return largefiles.lfutil.storepath(repo, sha) |
|
709 | 744 | |
|
710 | 745 | @reraise_safe_exceptions |
|
711 | 746 | def link(self, wire, sha, path): |
|
712 | 747 | repo = self._factory.repo(wire) |
|
713 | 748 | largefiles.lfutil.link( |
|
714 | 749 | largefiles.lfutil.usercachepath(repo.ui, sha), path) |
|
715 | 750 | |
|
716 | 751 | @reraise_safe_exceptions |
|
717 | 752 | def localrepository(self, wire, create=False): |
|
718 | 753 | self._factory.repo(wire, create=create) |
|
719 | 754 | |
|
720 | 755 | @reraise_safe_exceptions |
|
721 | 756 | def lookup(self, wire, revision, both): |
|
722 | 757 | cache_on, context_uid, repo_id = self._cache_on(wire) |
|
723 | 758 | region = self._region(wire) |
|
724 | 759 | |
|
725 | 760 | @region.conditional_cache_on_arguments(condition=cache_on) |
|
726 | 761 | def _lookup(_context_uid, _repo_id, _revision, _both): |
|
727 | 762 | repo = self._factory.repo(wire) |
|
728 | 763 | rev = _revision |
|
729 | 764 | if isinstance(rev, int): |
|
730 | 765 | # NOTE(marcink): |
|
731 | 766 | # since Mercurial doesn't support negative indexes properly |
|
732 | 767 | # we need to shift accordingly by one to get proper index, e.g |
|
733 | 768 | # repo[-1] => repo[-2] |
|
734 | 769 | # repo[0] => repo[-1] |
|
735 | 770 | if rev <= 0: |
|
736 | 771 | rev = rev + -1 |
|
737 | 772 | try: |
|
738 | 773 | ctx = self._get_ctx(repo, rev) |
|
739 |
except |
|
|
774 | except AmbiguousPrefixLookupError: | |
|
740 | 775 | e = RepoLookupError(rev) |
|
741 |
e._org_exc_tb = |
|
|
776 | e._org_exc_tb = format_exc(sys.exc_info()) | |
|
742 | 777 | raise exceptions.LookupException(e)(rev) |
|
743 | 778 | except (TypeError, RepoLookupError, binascii.Error) as e: |
|
744 |
e._org_exc_tb = |
|
|
779 | e._org_exc_tb = format_exc(sys.exc_info()) | |
|
745 | 780 | raise exceptions.LookupException(e)(rev) |
|
746 | 781 | except LookupError as e: |
|
747 |
e._org_exc_tb = |
|
|
782 | e._org_exc_tb = format_exc(sys.exc_info()) | |
|
748 | 783 | raise exceptions.LookupException(e)(e.name) |
|
749 | 784 | |
|
750 | 785 | if not both: |
|
751 | 786 | return ctx.hex() |
|
752 | 787 | |
|
753 | 788 | ctx = repo[ctx.hex()] |
|
754 | 789 | return ctx.hex(), ctx.rev() |
|
755 | 790 | |
|
756 | 791 | return _lookup(context_uid, repo_id, revision, both) |
|
757 | 792 | |
|
758 | 793 | @reraise_safe_exceptions |
|
759 | 794 | def sync_push(self, wire, url): |
|
760 | 795 | if not self.check_url(url, wire['config']): |
|
761 | 796 | return |
|
762 | 797 | |
|
763 | 798 | repo = self._factory.repo(wire) |
|
764 | 799 | |
|
765 | 800 | # Disable any prompts for this repo |
|
766 | 801 | repo.ui.setconfig(b'ui', b'interactive', b'off', b'-y') |
|
767 | 802 | |
|
768 | 803 | bookmarks = list(dict(repo._bookmarks).keys()) |
|
769 | 804 | remote = peer(repo, {}, safe_bytes(url)) |
|
770 | 805 | # Disable any prompts for this remote |
|
771 | 806 | remote.ui.setconfig(b'ui', b'interactive', b'off', b'-y') |
|
772 | 807 | |
|
773 | 808 | return exchange.push( |
|
774 | 809 | repo, remote, newbranch=True, bookmarks=bookmarks).cgresult |
|
775 | 810 | |
|
776 | 811 | @reraise_safe_exceptions |
|
777 | 812 | def revision(self, wire, rev): |
|
778 | 813 | repo = self._factory.repo(wire) |
|
779 | 814 | ctx = self._get_ctx(repo, rev) |
|
780 | 815 | return ctx.rev() |
|
781 | 816 | |
|
782 | 817 | @reraise_safe_exceptions |
|
783 | 818 | def rev_range(self, wire, commit_filter): |
|
784 | 819 | cache_on, context_uid, repo_id = self._cache_on(wire) |
|
785 | 820 | region = self._region(wire) |
|
786 | 821 | |
|
787 | 822 | @region.conditional_cache_on_arguments(condition=cache_on) |
|
788 | 823 | def _rev_range(_context_uid, _repo_id, _filter): |
|
789 | 824 | repo = self._factory.repo(wire) |
|
790 | 825 | revisions = [ |
|
791 | 826 | ascii_str(repo[rev].hex()) |
|
792 | 827 | for rev in revrange(repo, list(map(ascii_bytes, commit_filter))) |
|
793 | 828 | ] |
|
794 | 829 | return revisions |
|
795 | 830 | |
|
796 | 831 | return _rev_range(context_uid, repo_id, sorted(commit_filter)) |
|
797 | 832 | |
|
798 | 833 | @reraise_safe_exceptions |
|
799 | 834 | def rev_range_hash(self, wire, node): |
|
800 | 835 | repo = self._factory.repo(wire) |
|
801 | 836 | |
|
802 | 837 | def get_revs(repo, rev_opt): |
|
803 | 838 | if rev_opt: |
|
804 | 839 | revs = revrange(repo, rev_opt) |
|
805 | 840 | if len(revs) == 0: |
|
806 | 841 | return (nullrev, nullrev) |
|
807 | 842 | return max(revs), min(revs) |
|
808 | 843 | else: |
|
809 | 844 | return len(repo) - 1, 0 |
|
810 | 845 | |
|
811 | 846 | stop, start = get_revs(repo, [node + ':']) |
|
812 | 847 | revs = [ascii_str(repo[r].hex()) for r in range(start, stop + 1)] |
|
813 | 848 | return revs |
|
814 | 849 | |
|
815 | 850 | @reraise_safe_exceptions |
|
816 | 851 | def revs_from_revspec(self, wire, rev_spec, *args, **kwargs): |
|
817 | 852 | org_path = safe_bytes(wire["path"]) |
|
818 | 853 | other_path = safe_bytes(kwargs.pop('other_path', '')) |
|
819 | 854 | |
|
820 | 855 | # case when we want to compare two independent repositories |
|
821 | 856 | if other_path and other_path != wire["path"]: |
|
822 | 857 | baseui = self._factory._create_config(wire["config"]) |
|
823 | 858 | repo = unionrepo.makeunionrepository(baseui, other_path, org_path) |
|
824 | 859 | else: |
|
825 | 860 | repo = self._factory.repo(wire) |
|
826 | 861 | return list(repo.revs(rev_spec, *args)) |
|
827 | 862 | |
|
828 | 863 | @reraise_safe_exceptions |
|
829 | 864 | def verify(self, wire,): |
|
830 | 865 | repo = self._factory.repo(wire) |
|
831 | 866 | baseui = self._factory._create_config(wire['config']) |
|
832 | 867 | |
|
833 | 868 | baseui, output = patch_ui_message_output(baseui) |
|
834 | 869 | |
|
835 | 870 | repo.ui = baseui |
|
836 | 871 | verify.verify(repo) |
|
837 | 872 | return output.getvalue() |
|
838 | 873 | |
|
839 | 874 | @reraise_safe_exceptions |
|
840 | 875 | def hg_update_cache(self, wire,): |
|
841 | 876 | repo = self._factory.repo(wire) |
|
842 | 877 | baseui = self._factory._create_config(wire['config']) |
|
843 | 878 | baseui, output = patch_ui_message_output(baseui) |
|
844 | 879 | |
|
845 | 880 | repo.ui = baseui |
|
846 | 881 | with repo.wlock(), repo.lock(): |
|
847 | 882 | repo.updatecaches(full=True) |
|
848 | 883 | |
|
849 | 884 | return output.getvalue() |
|
850 | 885 | |
|
851 | 886 | @reraise_safe_exceptions |
|
852 | 887 | def hg_rebuild_fn_cache(self, wire,): |
|
853 | 888 | repo = self._factory.repo(wire) |
|
854 | 889 | baseui = self._factory._create_config(wire['config']) |
|
855 | 890 | baseui, output = patch_ui_message_output(baseui) |
|
856 | 891 | |
|
857 | 892 | repo.ui = baseui |
|
858 | 893 | |
|
859 | 894 | repair.rebuildfncache(baseui, repo) |
|
860 | 895 | |
|
861 | 896 | return output.getvalue() |
|
862 | 897 | |
|
863 | 898 | @reraise_safe_exceptions |
|
864 | 899 | def tags(self, wire): |
|
865 | 900 | cache_on, context_uid, repo_id = self._cache_on(wire) |
|
866 | 901 | region = self._region(wire) |
|
867 | 902 | |
|
868 | 903 | @region.conditional_cache_on_arguments(condition=cache_on) |
|
869 | 904 | def _tags(_context_uid, _repo_id): |
|
870 | 905 | repo = self._factory.repo(wire) |
|
871 | 906 | return {safe_str(name): ascii_str(hex(sha)) for name, sha in repo.tags().items()} |
|
872 | 907 | |
|
873 | 908 | return _tags(context_uid, repo_id) |
|
874 | 909 | |
|
875 | 910 | @reraise_safe_exceptions |
|
876 | 911 | def update(self, wire, node='', clean=False): |
|
877 | 912 | repo = self._factory.repo(wire) |
|
878 | 913 | baseui = self._factory._create_config(wire['config']) |
|
879 | 914 | node = safe_bytes(node) |
|
880 | 915 | |
|
881 | 916 | commands.update(baseui, repo, node=node, clean=clean) |
|
882 | 917 | |
|
883 | 918 | @reraise_safe_exceptions |
|
884 | 919 | def identify(self, wire): |
|
885 | 920 | repo = self._factory.repo(wire) |
|
886 | 921 | baseui = self._factory._create_config(wire['config']) |
|
887 | 922 | output = io.BytesIO() |
|
888 | 923 | baseui.write = output.write |
|
889 | 924 | # This is required to get a full node id |
|
890 | 925 | baseui.debugflag = True |
|
891 | 926 | commands.identify(baseui, repo, id=True) |
|
892 | 927 | |
|
893 | 928 | return output.getvalue() |
|
894 | 929 | |
|
895 | 930 | @reraise_safe_exceptions |
|
896 | 931 | def heads(self, wire, branch=None): |
|
897 | 932 | repo = self._factory.repo(wire) |
|
898 | 933 | baseui = self._factory._create_config(wire['config']) |
|
899 | 934 | output = io.BytesIO() |
|
900 | 935 | |
|
901 | 936 | def write(data, **unused_kwargs): |
|
902 | 937 | output.write(data) |
|
903 | 938 | |
|
904 | 939 | baseui.write = write |
|
905 | 940 | if branch: |
|
906 | 941 | args = [safe_bytes(branch)] |
|
907 | 942 | else: |
|
908 | 943 | args = [] |
|
909 | 944 | commands.heads(baseui, repo, template=b'{node} ', *args) |
|
910 | 945 | |
|
911 | 946 | return output.getvalue() |
|
912 | 947 | |
|
913 | 948 | @reraise_safe_exceptions |
|
914 | 949 | def ancestor(self, wire, revision1, revision2): |
|
915 | 950 | repo = self._factory.repo(wire) |
|
916 | 951 | changelog = repo.changelog |
|
917 | 952 | lookup = repo.lookup |
|
918 | 953 | a = changelog.ancestor(lookup(safe_bytes(revision1)), lookup(safe_bytes(revision2))) |
|
919 | 954 | return hex(a) |
|
920 | 955 | |
|
921 | 956 | @reraise_safe_exceptions |
|
922 | 957 | def clone(self, wire, source, dest, update_after_clone=False, hooks=True): |
|
923 | 958 | baseui = self._factory._create_config(wire["config"], hooks=hooks) |
|
924 | 959 | clone(baseui, safe_bytes(source), safe_bytes(dest), noupdate=not update_after_clone) |
|
925 | 960 | |
|
926 | 961 | @reraise_safe_exceptions |
|
927 | 962 | def commitctx(self, wire, message, parents, commit_time, commit_timezone, user, files, extra, removed, updated): |
|
928 | 963 | |
|
929 | 964 | repo = self._factory.repo(wire) |
|
930 | 965 | baseui = self._factory._create_config(wire['config']) |
|
931 | 966 | publishing = baseui.configbool(b'phases', b'publish') |
|
932 | 967 | |
|
933 | 968 | def _filectxfn(_repo, ctx, path: bytes): |
|
934 | 969 | """ |
|
935 | 970 | Marks given path as added/changed/removed in a given _repo. This is |
|
936 | 971 | for internal mercurial commit function. |
|
937 | 972 | """ |
|
938 | 973 | |
|
939 | 974 | # check if this path is removed |
|
940 | 975 | if safe_str(path) in removed: |
|
941 | 976 | # returning None is a way to mark node for removal |
|
942 | 977 | return None |
|
943 | 978 | |
|
944 | 979 | # check if this path is added |
|
945 | 980 | for node in updated: |
|
946 | 981 | if safe_bytes(node['path']) == path: |
|
947 | 982 | return memfilectx( |
|
948 | 983 | _repo, |
|
949 | 984 | changectx=ctx, |
|
950 | 985 | path=safe_bytes(node['path']), |
|
951 | 986 | data=safe_bytes(node['content']), |
|
952 | 987 | islink=False, |
|
953 | 988 | isexec=bool(node['mode'] & stat.S_IXUSR), |
|
954 | 989 | copysource=False) |
|
955 | 990 | abort_exc = exceptions.AbortException() |
|
956 | 991 | raise abort_exc(f"Given path haven't been marked as added, changed or removed ({path})") |
|
957 | 992 | |
|
958 | 993 | if publishing: |
|
959 | 994 | new_commit_phase = b'public' |
|
960 | 995 | else: |
|
961 | 996 | new_commit_phase = b'draft' |
|
962 | 997 | with repo.ui.configoverride({(b'phases', b'new-commit'): new_commit_phase}): |
|
963 | 998 | kwargs = {safe_bytes(k): safe_bytes(v) for k, v in extra.items()} |
|
964 | 999 | commit_ctx = memctx( |
|
965 | 1000 | repo=repo, |
|
966 | 1001 | parents=parents, |
|
967 | 1002 | text=safe_bytes(message), |
|
968 | 1003 | files=[safe_bytes(x) for x in files], |
|
969 | 1004 | filectxfn=_filectxfn, |
|
970 | 1005 | user=safe_bytes(user), |
|
971 | 1006 | date=(commit_time, commit_timezone), |
|
972 | 1007 | extra=kwargs) |
|
973 | 1008 | |
|
974 | 1009 | n = repo.commitctx(commit_ctx) |
|
975 | 1010 | new_id = hex(n) |
|
976 | 1011 | |
|
977 | 1012 | return new_id |
|
978 | 1013 | |
|
979 | 1014 | @reraise_safe_exceptions |
|
980 | 1015 | def pull(self, wire, url, commit_ids=None): |
|
981 | 1016 | repo = self._factory.repo(wire) |
|
982 | 1017 | # Disable any prompts for this repo |
|
983 | 1018 | repo.ui.setconfig(b'ui', b'interactive', b'off', b'-y') |
|
984 | 1019 | |
|
985 | 1020 | remote = peer(repo, {}, safe_bytes(url)) |
|
986 | 1021 | # Disable any prompts for this remote |
|
987 | 1022 | remote.ui.setconfig(b'ui', b'interactive', b'off', b'-y') |
|
988 | 1023 | |
|
989 | 1024 | if commit_ids: |
|
990 | 1025 | commit_ids = [bin(commit_id) for commit_id in commit_ids] |
|
991 | 1026 | |
|
992 | 1027 | return exchange.pull( |
|
993 | 1028 | repo, remote, heads=commit_ids, force=None).cgresult |
|
994 | 1029 | |
|
995 | 1030 | @reraise_safe_exceptions |
|
996 | 1031 | def pull_cmd(self, wire, source, bookmark='', branch='', revision='', hooks=True): |
|
997 | 1032 | repo = self._factory.repo(wire) |
|
998 | 1033 | baseui = self._factory._create_config(wire['config'], hooks=hooks) |
|
999 | 1034 | |
|
1000 | 1035 | source = safe_bytes(source) |
|
1001 | 1036 | |
|
1002 | 1037 | # Mercurial internally has a lot of logic that checks ONLY if |
|
1003 | 1038 | # option is defined, we just pass those if they are defined then |
|
1004 | 1039 | opts = {} |
|
1005 | 1040 | |
|
1006 | 1041 | if bookmark: |
|
1007 | 1042 | opts['bookmark'] = [safe_bytes(x) for x in bookmark] \ |
|
1008 | 1043 | if isinstance(bookmark, list) else safe_bytes(bookmark) |
|
1009 | 1044 | |
|
1010 | 1045 | if branch: |
|
1011 | 1046 | opts['branch'] = [safe_bytes(x) for x in branch] \ |
|
1012 | 1047 | if isinstance(branch, list) else safe_bytes(branch) |
|
1013 | 1048 | |
|
1014 | 1049 | if revision: |
|
1015 | 1050 | opts['rev'] = [safe_bytes(x) for x in revision] \ |
|
1016 | 1051 | if isinstance(revision, list) else safe_bytes(revision) |
|
1017 | 1052 | |
|
1018 | 1053 | commands.pull(baseui, repo, source, **opts) |
|
1019 | 1054 | |
|
1020 | 1055 | @reraise_safe_exceptions |
|
1021 | 1056 | def push(self, wire, revisions, dest_path, hooks: bool = True, push_branches: bool = False): |
|
1022 | 1057 | repo = self._factory.repo(wire) |
|
1023 | 1058 | baseui = self._factory._create_config(wire['config'], hooks=hooks) |
|
1024 | 1059 | |
|
1025 | 1060 | revisions = [safe_bytes(x) for x in revisions] \ |
|
1026 | 1061 | if isinstance(revisions, list) else safe_bytes(revisions) |
|
1027 | 1062 | |
|
1028 | 1063 | commands.push(baseui, repo, safe_bytes(dest_path), |
|
1029 | 1064 | rev=revisions, |
|
1030 | 1065 | new_branch=push_branches) |
|
1031 | 1066 | |
|
1032 | 1067 | @reraise_safe_exceptions |
|
1033 | 1068 | def strip(self, wire, revision, update, backup): |
|
1034 | 1069 | repo = self._factory.repo(wire) |
|
1035 | 1070 | ctx = self._get_ctx(repo, revision) |
|
1036 | 1071 | hgext_strip.strip( |
|
1037 | 1072 | repo.baseui, repo, ctx.node(), update=update, backup=backup) |
|
1038 | 1073 | |
|
1039 | 1074 | @reraise_safe_exceptions |
|
1040 | 1075 | def get_unresolved_files(self, wire): |
|
1041 | 1076 | repo = self._factory.repo(wire) |
|
1042 | 1077 | |
|
1043 | 1078 | log.debug('Calculating unresolved files for repo: %s', repo) |
|
1044 | 1079 | output = io.BytesIO() |
|
1045 | 1080 | |
|
1046 | 1081 | def write(data, **unused_kwargs): |
|
1047 | 1082 | output.write(data) |
|
1048 | 1083 | |
|
1049 | 1084 | baseui = self._factory._create_config(wire['config']) |
|
1050 | 1085 | baseui.write = write |
|
1051 | 1086 | |
|
1052 | 1087 | commands.resolve(baseui, repo, list=True) |
|
1053 | 1088 | unresolved = output.getvalue().splitlines(0) |
|
1054 | 1089 | return unresolved |
|
1055 | 1090 | |
|
1056 | 1091 | @reraise_safe_exceptions |
|
1057 | 1092 | def merge(self, wire, revision): |
|
1058 | 1093 | repo = self._factory.repo(wire) |
|
1059 | 1094 | baseui = self._factory._create_config(wire['config']) |
|
1060 | 1095 | repo.ui.setconfig(b'ui', b'merge', b'internal:dump') |
|
1061 | 1096 | |
|
1062 | 1097 | # In case of sub repositories are used mercurial prompts the user in |
|
1063 | 1098 | # case of merge conflicts or different sub repository sources. By |
|
1064 | 1099 | # setting the interactive flag to `False` mercurial doesn't prompt the |
|
1065 | 1100 | # used but instead uses a default value. |
|
1066 | 1101 | repo.ui.setconfig(b'ui', b'interactive', False) |
|
1067 | 1102 | commands.merge(baseui, repo, rev=safe_bytes(revision)) |
|
1068 | 1103 | |
|
1069 | 1104 | @reraise_safe_exceptions |
|
1070 | 1105 | def merge_state(self, wire): |
|
1071 | 1106 | repo = self._factory.repo(wire) |
|
1072 | 1107 | repo.ui.setconfig(b'ui', b'merge', b'internal:dump') |
|
1073 | 1108 | |
|
1074 | 1109 | # In case of sub repositories are used mercurial prompts the user in |
|
1075 | 1110 | # case of merge conflicts or different sub repository sources. By |
|
1076 | 1111 | # setting the interactive flag to `False` mercurial doesn't prompt the |
|
1077 | 1112 | # used but instead uses a default value. |
|
1078 | 1113 | repo.ui.setconfig(b'ui', b'interactive', False) |
|
1079 | 1114 | ms = hg_merge.mergestate(repo) |
|
1080 | 1115 | return [x for x in ms.unresolved()] |
|
1081 | 1116 | |
|
1082 | 1117 | @reraise_safe_exceptions |
|
1083 | 1118 | def commit(self, wire, message, username, close_branch=False): |
|
1084 | 1119 | repo = self._factory.repo(wire) |
|
1085 | 1120 | baseui = self._factory._create_config(wire['config']) |
|
1086 | 1121 | repo.ui.setconfig(b'ui', b'username', safe_bytes(username)) |
|
1087 | 1122 | commands.commit(baseui, repo, message=safe_bytes(message), close_branch=close_branch) |
|
1088 | 1123 | |
|
1089 | 1124 | @reraise_safe_exceptions |
|
1090 | 1125 | def rebase(self, wire, source='', dest='', abort=False): |
|
1091 | 1126 | repo = self._factory.repo(wire) |
|
1092 | 1127 | baseui = self._factory._create_config(wire['config']) |
|
1093 | 1128 | repo.ui.setconfig(b'ui', b'merge', b'internal:dump') |
|
1094 | 1129 | # In case of sub repositories are used mercurial prompts the user in |
|
1095 | 1130 | # case of merge conflicts or different sub repository sources. By |
|
1096 | 1131 | # setting the interactive flag to `False` mercurial doesn't prompt the |
|
1097 | 1132 | # used but instead uses a default value. |
|
1098 | 1133 | repo.ui.setconfig(b'ui', b'interactive', False) |
|
1099 | 1134 | |
|
1100 | 1135 | rebase.rebase(baseui, repo, base=safe_bytes(source or ''), dest=safe_bytes(dest or ''), |
|
1101 | 1136 | abort=abort, keep=not abort) |
|
1102 | 1137 | |
|
1103 | 1138 | @reraise_safe_exceptions |
|
1104 | 1139 | def tag(self, wire, name, revision, message, local, user, tag_time, tag_timezone): |
|
1105 | 1140 | repo = self._factory.repo(wire) |
|
1106 | 1141 | ctx = self._get_ctx(repo, revision) |
|
1107 | 1142 | node = ctx.node() |
|
1108 | 1143 | |
|
1109 | 1144 | date = (tag_time, tag_timezone) |
|
1110 | 1145 | try: |
|
1111 | 1146 | hg_tag.tag(repo, safe_bytes(name), node, safe_bytes(message), local, safe_bytes(user), date) |
|
1112 | 1147 | except Abort as e: |
|
1113 | 1148 | log.exception("Tag operation aborted") |
|
1114 | 1149 | # Exception can contain unicode which we convert |
|
1115 | 1150 | raise exceptions.AbortException(e)(repr(e)) |
|
1116 | 1151 | |
|
1117 | 1152 | @reraise_safe_exceptions |
|
1118 | 1153 | def bookmark(self, wire, bookmark, revision=''): |
|
1119 | 1154 | repo = self._factory.repo(wire) |
|
1120 | 1155 | baseui = self._factory._create_config(wire['config']) |
|
1121 | 1156 | revision = revision or '' |
|
1122 | 1157 | commands.bookmark(baseui, repo, safe_bytes(bookmark), rev=safe_bytes(revision), force=True) |
|
1123 | 1158 | |
|
1124 | 1159 | @reraise_safe_exceptions |
|
1125 | 1160 | def install_hooks(self, wire, force=False): |
|
1126 | 1161 | # we don't need any special hooks for Mercurial |
|
1127 | 1162 | pass |
|
1128 | 1163 | |
|
1129 | 1164 | @reraise_safe_exceptions |
|
1130 | 1165 | def get_hooks_info(self, wire): |
|
1131 | 1166 | return { |
|
1132 | 1167 | 'pre_version': vcsserver.__version__, |
|
1133 | 1168 | 'post_version': vcsserver.__version__, |
|
1134 | 1169 | } |
|
1135 | 1170 | |
|
1136 | 1171 | @reraise_safe_exceptions |
|
1137 | 1172 | def set_head_ref(self, wire, head_name): |
|
1138 | 1173 | pass |
|
1139 | 1174 | |
|
1140 | 1175 | @reraise_safe_exceptions |
|
1141 | 1176 | def archive_repo(self, wire, archive_name_key, kind, mtime, archive_at_path, |
|
1142 | 1177 | archive_dir_name, commit_id, cache_config): |
|
1143 | 1178 | |
|
1144 | 1179 | def file_walker(_commit_id, path): |
|
1145 | 1180 | repo = self._factory.repo(wire) |
|
1146 | 1181 | ctx = repo[_commit_id] |
|
1147 | 1182 | is_root = path in ['', '/'] |
|
1148 | 1183 | if is_root: |
|
1149 | 1184 | matcher = alwaysmatcher(badfn=None) |
|
1150 | 1185 | else: |
|
1151 | 1186 | matcher = patternmatcher('', [(b'glob', safe_bytes(path)+b'/**', b'')], badfn=None) |
|
1152 | 1187 | file_iter = ctx.manifest().walk(matcher) |
|
1153 | 1188 | |
|
1154 | 1189 | for fn in file_iter: |
|
1155 | 1190 | file_path = fn |
|
1156 | 1191 | flags = ctx.flags(fn) |
|
1157 | 1192 | mode = b'x' in flags and 0o755 or 0o644 |
|
1158 | 1193 | is_link = b'l' in flags |
|
1159 | 1194 | |
|
1160 | 1195 | yield ArchiveNode(file_path, mode, is_link, ctx[fn].data) |
|
1161 | 1196 | |
|
1162 | 1197 | return store_archive_in_cache( |
|
1163 | 1198 | file_walker, archive_name_key, kind, mtime, archive_at_path, archive_dir_name, commit_id, cache_config=cache_config) |
|
1164 | 1199 |
General Comments 0
You need to be logged in to leave comments.
Login now