##// END OF EJS Templates
merge: with lataest default branch
super-admin -
r1041:e1e68d19 merge python3
parent child Browse files
Show More
@@ -0,0 +1,16 b''
1 [DEFAULT]
2 done = false
3
4 [task:bump_version]
5 done = true
6
7 [task:fixes_on_stable]
8 done = true
9
10 [task:pip2nix_generated]
11 done = true
12
13 [release]
14 state = prepared
15 version = 4.27.1
16
@@ -0,0 +1,53 b''
1 ; #####################
2 ; LOGGING CONFIGURATION
3 ; #####################
4 ; Logging template, used for configure the logging
5 ; some variables here are replaced by RhodeCode to default values
6
7 [loggers]
8 keys = root, vcsserver
9
10 [handlers]
11 keys = console
12
13 [formatters]
14 keys = generic, json
15
16 ; #######
17 ; LOGGERS
18 ; #######
19 [logger_root]
20 level = NOTSET
21 handlers = console
22
23 [logger_vcsserver]
24 level = $RC_LOGGING_LEVEL
25 handlers =
26 qualname = vcsserver
27 propagate = 1
28
29 ; ########
30 ; HANDLERS
31 ; ########
32
33 [handler_console]
34 class = StreamHandler
35 args = (sys.stderr, )
36 level = $RC_LOGGING_LEVEL
37 ; To enable JSON formatted logs replace generic with json
38 ; This allows sending properly formatted logs to grafana loki or elasticsearch
39 #formatter = json
40 #formatter = generic
41 formatter = $RC_LOGGING_FORMATTER
42
43 ; ##########
44 ; FORMATTERS
45 ; ##########
46
47 [formatter_generic]
48 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
49 datefmt = %Y-%m-%d %H:%M:%S
50
51 [formatter_json]
52 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
53 class = vcsserver.lib._vendor.jsonlogger.JsonFormatter
@@ -0,0 +1,10 b''
1 diff -rup configparser-4.0.2-orig/pyproject.toml configparser-4.0.2/pyproject.toml
2 --- configparser-4.0.2-orig/pyproject.toml 2021-03-22 21:28:11.000000000 +0100
3 +++ configparser-4.0.2/pyproject.toml 2021-03-22 21:28:11.000000000 +0100
4 @@ -1,5 +1,5 @@
5 [build-system]
6 -requires = ["setuptools>=40.7", "wheel", "setuptools_scm>=1.15"]
7 +requires = ["setuptools<=42.0", "wheel", "setuptools_scm<6.0.0"]
8 build-backend = "setuptools.build_meta"
9
10 [tool.black]
@@ -0,0 +1,7 b''
1 diff -rup importlib-metadata-1.6.0-orig/yproject.toml importlib-metadata-1.6.0/pyproject.toml
2 --- importlib-metadata-1.6.0-orig/yproject.toml 2021-03-22 22:10:33.000000000 +0100
3 +++ importlib-metadata-1.6.0/pyproject.toml 2021-03-22 22:11:09.000000000 +0100
4 @@ -1,3 +1,3 @@
5 [build-system]
6 -requires = ["setuptools>=30.3", "wheel", "setuptools_scm"]
7 +requires = ["setuptools<42.0", "wheel", "setuptools_scm<6.0.0"]
@@ -0,0 +1,12 b''
1 diff -rup pytest-4.6.9-orig/setup.py pytest-4.6.9/setup.py
2 --- pytest-4.6.9-orig/setup.py 2018-04-10 10:23:04.000000000 +0200
3 +++ pytest-4.6.9/setup.py 2018-04-10 10:23:34.000000000 +0200
4 @@ -24,7 +24,7 @@ def main():
5 def main():
6 setup(
7 use_scm_version={"write_to": "src/_pytest/_version.py"},
8 - setup_requires=["setuptools-scm", "setuptools>=40.0"],
9 + setup_requires=["setuptools-scm<6.0.0", "setuptools<=42.0"],
10 package_dir={"": "src"},
11 # fmt: off
12 extras_require={ No newline at end of file
@@ -0,0 +1,10 b''
1 diff -rup zip-1.2.0-orig/pyproject.toml zip-1.2.0/pyproject.toml
2 --- zip-1.2.0-orig/pyproject.toml 2021-03-23 10:55:37.000000000 +0100
3 +++ zip-1.2.0/pyproject.toml 2021-03-23 10:56:05.000000000 +0100
4 @@ -1,5 +1,5 @@
5 [build-system]
6 -requires = ["setuptools>=34.4", "wheel", "setuptools_scm>=1.15"]
7 +requires = ["setuptools<42.0", "wheel", "setuptools_scm<6.0.0"]
8 build-backend = "setuptools.build_meta"
9
10 [tool.black]
1 NO CONTENT: new file 100644
@@ -0,0 +1,207 b''
1 # -*- coding: utf-8 -*-
2
3 # Copyright (C) 2010-2020 RhodeCode GmbH
4 #
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
8 #
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
13 #
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20
21 import os
22 import textwrap
23 import string
24 import functools
25 import logging
26 import tempfile
27 import logging.config
28 log = logging.getLogger(__name__)
29
30 # skip keys, that are set here, so we don't double process those
31 set_keys = {
32 '__file__': ''
33 }
34
35
36 def str2bool(_str):
37 """
38 returns True/False value from given string, it tries to translate the
39 string into boolean
40
41 :param _str: string value to translate into boolean
42 :rtype: boolean
43 :returns: boolean from given string
44 """
45 if _str is None:
46 return False
47 if _str in (True, False):
48 return _str
49 _str = str(_str).strip().lower()
50 return _str in ('t', 'true', 'y', 'yes', 'on', '1')
51
52
53 def aslist(obj, sep=None, strip=True):
54 """
55 Returns given string separated by sep as list
56
57 :param obj:
58 :param sep:
59 :param strip:
60 """
61 if isinstance(obj, (basestring,)):
62 if obj in ['', ""]:
63 return []
64
65 lst = obj.split(sep)
66 if strip:
67 lst = [v.strip() for v in lst]
68 return lst
69 elif isinstance(obj, (list, tuple)):
70 return obj
71 elif obj is None:
72 return []
73 else:
74 return [obj]
75
76
77 class SettingsMaker(object):
78
79 def __init__(self, app_settings):
80 self.settings = app_settings
81
82 @classmethod
83 def _bool_func(cls, input_val):
84 if isinstance(input_val, unicode):
85 input_val = input_val.encode('utf8')
86 return str2bool(input_val)
87
88 @classmethod
89 def _int_func(cls, input_val):
90 return int(input_val)
91
92 @classmethod
93 def _list_func(cls, input_val, sep=','):
94 return aslist(input_val, sep=sep)
95
96 @classmethod
97 def _string_func(cls, input_val, lower=True):
98 if lower:
99 input_val = input_val.lower()
100 return input_val
101
102 @classmethod
103 def _float_func(cls, input_val):
104 return float(input_val)
105
106 @classmethod
107 def _dir_func(cls, input_val, ensure_dir=False, mode=0o755):
108
109 # ensure we have our dir created
110 if not os.path.isdir(input_val) and ensure_dir:
111 os.makedirs(input_val, mode=mode)
112
113 if not os.path.isdir(input_val):
114 raise Exception('Dir at {} does not exist'.format(input_val))
115 return input_val
116
117 @classmethod
118 def _file_path_func(cls, input_val, ensure_dir=False, mode=0o755):
119 dirname = os.path.dirname(input_val)
120 cls._dir_func(dirname, ensure_dir=ensure_dir)
121 return input_val
122
123 @classmethod
124 def _key_transformator(cls, key):
125 return "{}_{}".format('RC'.upper(), key.upper().replace('.', '_').replace('-', '_'))
126
127 def maybe_env_key(self, key):
128 # now maybe we have this KEY in env, search and use the value with higher priority.
129 transformed_key = self._key_transformator(key)
130 envvar_value = os.environ.get(transformed_key)
131 if envvar_value:
132 log.debug('using `%s` key instead of `%s` key for config', transformed_key, key)
133
134 return envvar_value
135
136 def env_expand(self):
137 replaced = {}
138 for k, v in self.settings.items():
139 if k not in set_keys:
140 envvar_value = self.maybe_env_key(k)
141 if envvar_value:
142 replaced[k] = envvar_value
143 set_keys[k] = envvar_value
144
145 # replace ALL keys updated
146 self.settings.update(replaced)
147
148 def enable_logging(self, logging_conf=None, level='INFO', formatter='generic'):
149 """
150 Helper to enable debug on running instance
151 :return:
152 """
153
154 if not str2bool(self.settings.get('logging.autoconfigure')):
155 log.info('logging configuration based on main .ini file')
156 return
157
158 if logging_conf is None:
159 logging_conf = self.settings.get('logging.logging_conf_file') or ''
160
161 if not os.path.isfile(logging_conf):
162 log.error('Unable to setup logging based on %s, '
163 'file does not exist.... specify path using logging.logging_conf_file= config setting. ', logging_conf)
164 return
165
166 with open(logging_conf, 'rb') as f:
167 ini_template = textwrap.dedent(f.read())
168 ini_template = string.Template(ini_template).safe_substitute(
169 RC_LOGGING_LEVEL=os.environ.get('RC_LOGGING_LEVEL', '') or level,
170 RC_LOGGING_FORMATTER=os.environ.get('RC_LOGGING_FORMATTER', '') or formatter
171 )
172
173 with tempfile.NamedTemporaryFile(prefix='rc_logging_', suffix='.ini', delete=False) as f:
174 log.info('Saved Temporary LOGGING config at %s', f.name)
175 f.write(ini_template)
176
177 logging.config.fileConfig(f.name)
178 os.remove(f.name)
179
180 def make_setting(self, key, default, lower=False, default_when_empty=False, parser=None):
181 input_val = self.settings.get(key, default)
182
183 if default_when_empty and not input_val:
184 # use default value when value is set in the config but it is empty
185 input_val = default
186
187 parser_func = {
188 'bool': self._bool_func,
189 'int': self._int_func,
190 'list': self._list_func,
191 'list:newline': functools.partial(self._list_func, sep='/n'),
192 'list:spacesep': functools.partial(self._list_func, sep=' '),
193 'string': functools.partial(self._string_func, lower=lower),
194 'dir': self._dir_func,
195 'dir:ensured': functools.partial(self._dir_func, ensure_dir=True),
196 'file': self._file_path_func,
197 'file:ensured': functools.partial(self._file_path_func, ensure_dir=True),
198 None: lambda i: i
199 }[parser]
200
201 envvar_value = self.maybe_env_key(key)
202 if envvar_value:
203 input_val = envvar_value
204 set_keys[key] = input_val
205
206 self.settings[key] = parser_func(input_val)
207 return self.settings[key]
@@ -0,0 +1,26 b''
1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 # Copyright (C) 2014-2020 RhodeCode GmbH
3 #
4 # This program is free software; you can redistribute it and/or modify
5 # it under the terms of the GNU General Public License as published by
6 # the Free Software Foundation; either version 3 of the License, or
7 # (at your option) any later version.
8 #
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
13 #
14 # You should have received a copy of the GNU General Public License
15 # along with this program; if not, write to the Free Software Foundation,
16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
18 # This package contains non rhodecode licensed packages that are
19 # vendored for various reasons
20
21 import os
22 import sys
23
24 vendor_dir = os.path.abspath(os.path.dirname(__file__))
25
26 sys.path.append(vendor_dir)
@@ -0,0 +1,243 b''
1 '''
2 This library is provided to allow standard python logging
3 to output log data as JSON formatted strings
4 '''
5 import logging
6 import json
7 import re
8 from datetime import date, datetime, time, tzinfo, timedelta
9 import traceback
10 import importlib
11
12 from inspect import istraceback
13
14 from collections import OrderedDict
15
16
17 def _inject_req_id(record, *args, **kwargs):
18 return record
19
20
21 ExceptionAwareFormatter = logging.Formatter
22
23
24 ZERO = timedelta(0)
25 HOUR = timedelta(hours=1)
26
27
28 class UTC(tzinfo):
29 """UTC"""
30
31 def utcoffset(self, dt):
32 return ZERO
33
34 def tzname(self, dt):
35 return "UTC"
36
37 def dst(self, dt):
38 return ZERO
39
40 utc = UTC()
41
42
43 # skip natural LogRecord attributes
44 # http://docs.python.org/library/logging.html#logrecord-attributes
45 RESERVED_ATTRS = (
46 'args', 'asctime', 'created', 'exc_info', 'exc_text', 'filename',
47 'funcName', 'levelname', 'levelno', 'lineno', 'module',
48 'msecs', 'message', 'msg', 'name', 'pathname', 'process',
49 'processName', 'relativeCreated', 'stack_info', 'thread', 'threadName')
50
51
52 def merge_record_extra(record, target, reserved):
53 """
54 Merges extra attributes from LogRecord object into target dictionary
55
56 :param record: logging.LogRecord
57 :param target: dict to update
58 :param reserved: dict or list with reserved keys to skip
59 """
60 for key, value in record.__dict__.items():
61 # this allows to have numeric keys
62 if (key not in reserved
63 and not (hasattr(key, "startswith")
64 and key.startswith('_'))):
65 target[key] = value
66 return target
67
68
69 class JsonEncoder(json.JSONEncoder):
70 """
71 A custom encoder extending the default JSONEncoder
72 """
73
74 def default(self, obj):
75 if isinstance(obj, (date, datetime, time)):
76 return self.format_datetime_obj(obj)
77
78 elif istraceback(obj):
79 return ''.join(traceback.format_tb(obj)).strip()
80
81 elif type(obj) == Exception \
82 or isinstance(obj, Exception) \
83 or type(obj) == type:
84 return str(obj)
85
86 try:
87 return super(JsonEncoder, self).default(obj)
88
89 except TypeError:
90 try:
91 return str(obj)
92
93 except Exception:
94 return None
95
96 def format_datetime_obj(self, obj):
97 return obj.isoformat()
98
99
100 class JsonFormatter(ExceptionAwareFormatter):
101 """
102 A custom formatter to format logging records as json strings.
103 Extra values will be formatted as str() if not supported by
104 json default encoder
105 """
106
107 def __init__(self, *args, **kwargs):
108 """
109 :param json_default: a function for encoding non-standard objects
110 as outlined in http://docs.python.org/2/library/json.html
111 :param json_encoder: optional custom encoder
112 :param json_serializer: a :meth:`json.dumps`-compatible callable
113 that will be used to serialize the log record.
114 :param json_indent: an optional :meth:`json.dumps`-compatible numeric value
115 that will be used to customize the indent of the output json.
116 :param prefix: an optional string prefix added at the beginning of
117 the formatted string
118 :param json_indent: indent parameter for json.dumps
119 :param json_ensure_ascii: ensure_ascii parameter for json.dumps
120 :param reserved_attrs: an optional list of fields that will be skipped when
121 outputting json log record. Defaults to all log record attributes:
122 http://docs.python.org/library/logging.html#logrecord-attributes
123 :param timestamp: an optional string/boolean field to add a timestamp when
124 outputting the json log record. If string is passed, timestamp will be added
125 to log record using string as key. If True boolean is passed, timestamp key
126 will be "timestamp". Defaults to False/off.
127 """
128 self.json_default = self._str_to_fn(kwargs.pop("json_default", None))
129 self.json_encoder = self._str_to_fn(kwargs.pop("json_encoder", None))
130 self.json_serializer = self._str_to_fn(kwargs.pop("json_serializer", json.dumps))
131 self.json_indent = kwargs.pop("json_indent", None)
132 self.json_ensure_ascii = kwargs.pop("json_ensure_ascii", True)
133 self.prefix = kwargs.pop("prefix", "")
134 reserved_attrs = kwargs.pop("reserved_attrs", RESERVED_ATTRS)
135 self.reserved_attrs = dict(zip(reserved_attrs, reserved_attrs))
136 self.timestamp = kwargs.pop("timestamp", True)
137
138 # super(JsonFormatter, self).__init__(*args, **kwargs)
139 logging.Formatter.__init__(self, *args, **kwargs)
140 if not self.json_encoder and not self.json_default:
141 self.json_encoder = JsonEncoder
142
143 self._required_fields = self.parse()
144 self._skip_fields = dict(zip(self._required_fields,
145 self._required_fields))
146 self._skip_fields.update(self.reserved_attrs)
147
148 def _str_to_fn(self, fn_as_str):
149 """
150 If the argument is not a string, return whatever was passed in.
151 Parses a string such as package.module.function, imports the module
152 and returns the function.
153
154 :param fn_as_str: The string to parse. If not a string, return it.
155 """
156 if not isinstance(fn_as_str, str):
157 return fn_as_str
158
159 path, _, function = fn_as_str.rpartition('.')
160 module = importlib.import_module(path)
161 return getattr(module, function)
162
163 def parse(self):
164 """
165 Parses format string looking for substitutions
166
167 This method is responsible for returning a list of fields (as strings)
168 to include in all log messages.
169 """
170 standard_formatters = re.compile(r'\((.+?)\)', re.IGNORECASE)
171 return standard_formatters.findall(self._fmt)
172
173 def add_fields(self, log_record, record, message_dict):
174 """
175 Override this method to implement custom logic for adding fields.
176 """
177 for field in self._required_fields:
178 log_record[field] = record.__dict__.get(field)
179 log_record.update(message_dict)
180 merge_record_extra(record, log_record, reserved=self._skip_fields)
181
182 if self.timestamp:
183 key = self.timestamp if type(self.timestamp) == str else 'timestamp'
184 log_record[key] = datetime.fromtimestamp(record.created, tz=utc)
185
186 def process_log_record(self, log_record):
187 """
188 Override this method to implement custom logic
189 on the possibly ordered dictionary.
190 """
191 return log_record
192
193 def jsonify_log_record(self, log_record):
194 """Returns a json string of the log record."""
195 return self.json_serializer(log_record,
196 default=self.json_default,
197 cls=self.json_encoder,
198 indent=self.json_indent,
199 ensure_ascii=self.json_ensure_ascii)
200
201 def serialize_log_record(self, log_record):
202 """Returns the final representation of the log record."""
203 return "%s%s" % (self.prefix, self.jsonify_log_record(log_record))
204
205 def format(self, record):
206 """Formats a log record and serializes to json"""
207 message_dict = {}
208 # FIXME: logging.LogRecord.msg and logging.LogRecord.message in typeshed
209 # are always type of str. We shouldn't need to override that.
210 if isinstance(record.msg, dict):
211 message_dict = record.msg
212 record.message = None
213 else:
214 record.message = record.getMessage()
215 # only format time if needed
216 if "asctime" in self._required_fields:
217 record.asctime = self.formatTime(record, self.datefmt)
218
219 # Display formatted exception, but allow overriding it in the
220 # user-supplied dict.
221 if record.exc_info and not message_dict.get('exc_info'):
222 message_dict['exc_info'] = self.formatException(record.exc_info)
223 if not message_dict.get('exc_info') and record.exc_text:
224 message_dict['exc_info'] = record.exc_text
225 # Display formatted record of stack frames
226 # default format is a string returned from :func:`traceback.print_stack`
227 try:
228 if record.stack_info and not message_dict.get('stack_info'):
229 message_dict['stack_info'] = self.formatStack(record.stack_info)
230 except AttributeError:
231 # Python2.7 doesn't have stack_info.
232 pass
233
234 try:
235 log_record = OrderedDict()
236 except NameError:
237 log_record = {}
238
239 _inject_req_id(record, with_prefix=False)
240 self.add_fields(log_record, record, message_dict)
241 log_record = self.process_log_record(log_record)
242
243 return self.serialize_log_record(log_record)
@@ -0,0 +1,390 b''
1 import sys
2 import threading
3 import weakref
4 from base64 import b64encode
5 from logging import getLogger
6 from os import urandom
7
8 from redis import StrictRedis
9
10 __version__ = '3.7.0'
11
12 loggers = {
13 k: getLogger("vcsserver." + ".".join((__name__, k)))
14 for k in [
15 "acquire",
16 "refresh.thread.start",
17 "refresh.thread.stop",
18 "refresh.thread.exit",
19 "refresh.start",
20 "refresh.shutdown",
21 "refresh.exit",
22 "release",
23 ]
24 }
25
26 PY3 = sys.version_info[0] == 3
27
28 if PY3:
29 text_type = str
30 binary_type = bytes
31 else:
32 text_type = unicode # noqa
33 binary_type = str
34
35
36 # Check if the id match. If not, return an error code.
37 UNLOCK_SCRIPT = b"""
38 if redis.call("get", KEYS[1]) ~= ARGV[1] then
39 return 1
40 else
41 redis.call("del", KEYS[2])
42 redis.call("lpush", KEYS[2], 1)
43 redis.call("pexpire", KEYS[2], ARGV[2])
44 redis.call("del", KEYS[1])
45 return 0
46 end
47 """
48
49 # Covers both cases when key doesn't exist and doesn't equal to lock's id
50 EXTEND_SCRIPT = b"""
51 if redis.call("get", KEYS[1]) ~= ARGV[1] then
52 return 1
53 elseif redis.call("ttl", KEYS[1]) < 0 then
54 return 2
55 else
56 redis.call("expire", KEYS[1], ARGV[2])
57 return 0
58 end
59 """
60
61 RESET_SCRIPT = b"""
62 redis.call('del', KEYS[2])
63 redis.call('lpush', KEYS[2], 1)
64 redis.call('pexpire', KEYS[2], ARGV[2])
65 return redis.call('del', KEYS[1])
66 """
67
68 RESET_ALL_SCRIPT = b"""
69 local locks = redis.call('keys', 'lock:*')
70 local signal
71 for _, lock in pairs(locks) do
72 signal = 'lock-signal:' .. string.sub(lock, 6)
73 redis.call('del', signal)
74 redis.call('lpush', signal, 1)
75 redis.call('expire', signal, 1)
76 redis.call('del', lock)
77 end
78 return #locks
79 """
80
81
82 class AlreadyAcquired(RuntimeError):
83 pass
84
85
86 class NotAcquired(RuntimeError):
87 pass
88
89
90 class AlreadyStarted(RuntimeError):
91 pass
92
93
94 class TimeoutNotUsable(RuntimeError):
95 pass
96
97
98 class InvalidTimeout(RuntimeError):
99 pass
100
101
102 class TimeoutTooLarge(RuntimeError):
103 pass
104
105
106 class NotExpirable(RuntimeError):
107 pass
108
109
110 class Lock(object):
111 """
112 A Lock context manager implemented via redis SETNX/BLPOP.
113 """
114 unlock_script = None
115 extend_script = None
116 reset_script = None
117 reset_all_script = None
118
119 def __init__(self, redis_client, name, expire=None, id=None, auto_renewal=False, strict=True, signal_expire=1000):
120 """
121 :param redis_client:
122 An instance of :class:`~StrictRedis`.
123 :param name:
124 The name (redis key) the lock should have.
125 :param expire:
126 The lock expiry time in seconds. If left at the default (None)
127 the lock will not expire.
128 :param id:
129 The ID (redis value) the lock should have. A random value is
130 generated when left at the default.
131
132 Note that if you specify this then the lock is marked as "held". Acquires
133 won't be possible.
134 :param auto_renewal:
135 If set to ``True``, Lock will automatically renew the lock so that it
136 doesn't expire for as long as the lock is held (acquire() called
137 or running in a context manager).
138
139 Implementation note: Renewal will happen using a daemon thread with
140 an interval of ``expire*2/3``. If wishing to use a different renewal
141 time, subclass Lock, call ``super().__init__()`` then set
142 ``self._lock_renewal_interval`` to your desired interval.
143 :param strict:
144 If set ``True`` then the ``redis_client`` needs to be an instance of ``redis.StrictRedis``.
145 :param signal_expire:
146 Advanced option to override signal list expiration in milliseconds. Increase it for very slow clients. Default: ``1000``.
147 """
148 if strict and not isinstance(redis_client, StrictRedis):
149 raise ValueError("redis_client must be instance of StrictRedis. "
150 "Use strict=False if you know what you're doing.")
151 if auto_renewal and expire is None:
152 raise ValueError("Expire may not be None when auto_renewal is set")
153
154 self._client = redis_client
155
156 if expire:
157 expire = int(expire)
158 if expire < 0:
159 raise ValueError("A negative expire is not acceptable.")
160 else:
161 expire = None
162 self._expire = expire
163
164 self._signal_expire = signal_expire
165 if id is None:
166 self._id = b64encode(urandom(18)).decode('ascii')
167 elif isinstance(id, binary_type):
168 try:
169 self._id = id.decode('ascii')
170 except UnicodeDecodeError:
171 self._id = b64encode(id).decode('ascii')
172 elif isinstance(id, text_type):
173 self._id = id
174 else:
175 raise TypeError("Incorrect type for `id`. Must be bytes/str not %s." % type(id))
176 self._name = 'lock:' + name
177 self._signal = 'lock-signal:' + name
178 self._lock_renewal_interval = (float(expire) * 2 / 3
179 if auto_renewal
180 else None)
181 self._lock_renewal_thread = None
182
183 self.register_scripts(redis_client)
184
185 @classmethod
186 def register_scripts(cls, redis_client):
187 global reset_all_script
188 if reset_all_script is None:
189 reset_all_script = redis_client.register_script(RESET_ALL_SCRIPT)
190 cls.unlock_script = redis_client.register_script(UNLOCK_SCRIPT)
191 cls.extend_script = redis_client.register_script(EXTEND_SCRIPT)
192 cls.reset_script = redis_client.register_script(RESET_SCRIPT)
193 cls.reset_all_script = redis_client.register_script(RESET_ALL_SCRIPT)
194
195 @property
196 def _held(self):
197 return self.id == self.get_owner_id()
198
199 def reset(self):
200 """
201 Forcibly deletes the lock. Use this with care.
202 """
203 self.reset_script(client=self._client, keys=(self._name, self._signal), args=(self.id, self._signal_expire))
204
205 @property
206 def id(self):
207 return self._id
208
209 def get_owner_id(self):
210 owner_id = self._client.get(self._name)
211 if isinstance(owner_id, binary_type):
212 owner_id = owner_id.decode('ascii', 'replace')
213 return owner_id
214
215 def acquire(self, blocking=True, timeout=None):
216 """
217 :param blocking:
218 Boolean value specifying whether lock should be blocking or not.
219 :param timeout:
220 An integer value specifying the maximum number of seconds to block.
221 """
222 logger = loggers["acquire"]
223
224 logger.debug("Getting blocking: %s acquire on %r ...", blocking, self._name)
225
226 if self._held:
227 owner_id = self.get_owner_id()
228 raise AlreadyAcquired("Already acquired from this Lock instance. Lock id: {}".format(owner_id))
229
230 if not blocking and timeout is not None:
231 raise TimeoutNotUsable("Timeout cannot be used if blocking=False")
232
233 if timeout:
234 timeout = int(timeout)
235 if timeout < 0:
236 raise InvalidTimeout("Timeout (%d) cannot be less than or equal to 0" % timeout)
237
238 if self._expire and not self._lock_renewal_interval and timeout > self._expire:
239 raise TimeoutTooLarge("Timeout (%d) cannot be greater than expire (%d)" % (timeout, self._expire))
240
241 busy = True
242 blpop_timeout = timeout or self._expire or 0
243 timed_out = False
244 while busy:
245 busy = not self._client.set(self._name, self._id, nx=True, ex=self._expire)
246 if busy:
247 if timed_out:
248 return False
249 elif blocking:
250 timed_out = not self._client.blpop(self._signal, blpop_timeout) and timeout
251 else:
252 logger.warning("Failed to get %r.", self._name)
253 return False
254
255 logger.debug("Got lock for %r.", self._name)
256 if self._lock_renewal_interval is not None:
257 self._start_lock_renewer()
258 return True
259
260 def extend(self, expire=None):
261 """Extends expiration time of the lock.
262
263 :param expire:
264 New expiration time. If ``None`` - `expire` provided during
265 lock initialization will be taken.
266 """
267 if expire:
268 expire = int(expire)
269 if expire < 0:
270 raise ValueError("A negative expire is not acceptable.")
271 elif self._expire is not None:
272 expire = self._expire
273 else:
274 raise TypeError(
275 "To extend a lock 'expire' must be provided as an "
276 "argument to extend() method or at initialization time."
277 )
278
279 error = self.extend_script(client=self._client, keys=(self._name, self._signal), args=(self._id, expire))
280 if error == 1:
281 raise NotAcquired("Lock %s is not acquired or it already expired." % self._name)
282 elif error == 2:
283 raise NotExpirable("Lock %s has no assigned expiration time" % self._name)
284 elif error:
285 raise RuntimeError("Unsupported error code %s from EXTEND script" % error)
286
287 @staticmethod
288 def _lock_renewer(lockref, interval, stop):
289 """
290 Renew the lock key in redis every `interval` seconds for as long
291 as `self._lock_renewal_thread.should_exit` is False.
292 """
293 while not stop.wait(timeout=interval):
294 loggers["refresh.thread.start"].debug("Refreshing lock")
295 lock = lockref()
296 if lock is None:
297 loggers["refresh.thread.stop"].debug(
298 "The lock no longer exists, stopping lock refreshing"
299 )
300 break
301 lock.extend(expire=lock._expire)
302 del lock
303 loggers["refresh.thread.exit"].debug("Exit requested, stopping lock refreshing")
304
305 def _start_lock_renewer(self):
306 """
307 Starts the lock refresher thread.
308 """
309 if self._lock_renewal_thread is not None:
310 raise AlreadyStarted("Lock refresh thread already started")
311
312 loggers["refresh.start"].debug(
313 "Starting thread to refresh lock every %s seconds",
314 self._lock_renewal_interval
315 )
316 self._lock_renewal_stop = threading.Event()
317 self._lock_renewal_thread = threading.Thread(
318 group=None,
319 target=self._lock_renewer,
320 kwargs={'lockref': weakref.ref(self),
321 'interval': self._lock_renewal_interval,
322 'stop': self._lock_renewal_stop}
323 )
324 self._lock_renewal_thread.setDaemon(True)
325 self._lock_renewal_thread.start()
326
327 def _stop_lock_renewer(self):
328 """
329 Stop the lock renewer.
330
331 This signals the renewal thread and waits for its exit.
332 """
333 if self._lock_renewal_thread is None or not self._lock_renewal_thread.is_alive():
334 return
335 loggers["refresh.shutdown"].debug("Signalling the lock refresher to stop")
336 self._lock_renewal_stop.set()
337 self._lock_renewal_thread.join()
338 self._lock_renewal_thread = None
339 loggers["refresh.exit"].debug("Lock refresher has stopped")
340
341 def __enter__(self):
342 acquired = self.acquire(blocking=True)
343 assert acquired, "Lock wasn't acquired, but blocking=True"
344 return self
345
346 def __exit__(self, exc_type=None, exc_value=None, traceback=None):
347 self.release()
348
349 def release(self):
350 """Releases the lock, that was acquired with the same object.
351
352 .. note::
353
354 If you want to release a lock that you acquired in a different place you have two choices:
355
356 * Use ``Lock("name", id=id_from_other_place).release()``
357 * Use ``Lock("name").reset()``
358 """
359 if self._lock_renewal_thread is not None:
360 self._stop_lock_renewer()
361 loggers["release"].debug("Releasing %r.", self._name)
362 error = self.unlock_script(client=self._client, keys=(self._name, self._signal), args=(self._id, self._signal_expire))
363 if error == 1:
364 raise NotAcquired("Lock %s is not acquired or it already expired." % self._name)
365 elif error:
366 raise RuntimeError("Unsupported error code %s from EXTEND script." % error)
367
368 def locked(self):
369 """
370 Return true if the lock is acquired.
371
372 Checks that lock with same name already exists. This method returns true, even if
373 lock have another id.
374 """
375 return self._client.exists(self._name) == 1
376
377
378 reset_all_script = None
379
380
381 def reset_all(redis_client):
382 """
383 Forcibly deletes all locks if its remains (like a crash reason). Use this with care.
384
385 :param redis_client:
386 An instance of :class:`~StrictRedis`.
387 """
388 Lock.register_scripts(redis_client)
389
390 reset_all_script(client=redis_client) # noqa
@@ -0,0 +1,52 b''
1 from __future__ import absolute_import, division, unicode_literals
2
3 import logging
4
5 from .stream import TCPStatsClient, UnixSocketStatsClient # noqa
6 from .udp import StatsClient # noqa
7
8 HOST = 'localhost'
9 PORT = 8125
10 IPV6 = False
11 PREFIX = None
12 MAXUDPSIZE = 512
13
14 log = logging.getLogger('rhodecode.statsd')
15
16
17 def statsd_config(config, prefix='statsd.'):
18 _config = {}
19 for key in config.keys():
20 if key.startswith(prefix):
21 _config[key[len(prefix):]] = config[key]
22 return _config
23
24
25 def client_from_config(configuration, prefix='statsd.', **kwargs):
26 from pyramid.settings import asbool
27
28 _config = statsd_config(configuration, prefix)
29 statsd_enabled = asbool(_config.pop('enabled', False))
30 if not statsd_enabled:
31 log.debug('statsd client not enabled by statsd.enabled = flag, skipping...')
32 return
33
34 host = _config.pop('statsd_host', HOST)
35 port = _config.pop('statsd_port', PORT)
36 prefix = _config.pop('statsd_prefix', PREFIX)
37 maxudpsize = _config.pop('statsd_maxudpsize', MAXUDPSIZE)
38 ipv6 = asbool(_config.pop('statsd_ipv6', IPV6))
39 log.debug('configured statsd client %s:%s', host, port)
40
41 try:
42 client = StatsClient(
43 host=host, port=port, prefix=prefix, maxudpsize=maxudpsize, ipv6=ipv6)
44 except Exception:
45 log.exception('StatsD is enabled, but failed to connect to statsd server, fallback: disable statsd')
46 client = None
47
48 return client
49
50
51 def get_statsd_client(request):
52 return client_from_config(request.registry.settings)
@@ -0,0 +1,156 b''
1 from __future__ import absolute_import, division, unicode_literals
2
3 import re
4 import random
5 from collections import deque
6 from datetime import timedelta
7 from repoze.lru import lru_cache
8
9 from .timer import Timer
10
11 TAG_INVALID_CHARS_RE = re.compile(
12 r"[^\w\d_\-:/\.]",
13 #re.UNICODE
14 )
15 TAG_INVALID_CHARS_SUBS = "_"
16
17 # we save and expose methods called by statsd for discovery
18 buckets_dict = {
19
20 }
21
22
23 @lru_cache(maxsize=500)
24 def _normalize_tags_with_cache(tag_list):
25 return [TAG_INVALID_CHARS_RE.sub(TAG_INVALID_CHARS_SUBS, tag) for tag in tag_list]
26
27
28 def normalize_tags(tag_list):
29 # We have to turn our input tag list into a non-mutable tuple for it to
30 # be hashable (and thus usable) by the @lru_cache decorator.
31 return _normalize_tags_with_cache(tuple(tag_list))
32
33
34 class StatsClientBase(object):
35 """A Base class for various statsd clients."""
36
37 def close(self):
38 """Used to close and clean up any underlying resources."""
39 raise NotImplementedError()
40
41 def _send(self):
42 raise NotImplementedError()
43
44 def pipeline(self):
45 raise NotImplementedError()
46
47 def timer(self, stat, rate=1, tags=None, auto_send=True):
48 """
49 statsd = StatsdClient.statsd
50 with statsd.timer('bucket_name', auto_send=True) as tmr:
51 # This block will be timed.
52 for i in xrange(0, 100000):
53 i ** 2
54 # you can access time here...
55 elapsed_ms = tmr.ms
56 """
57 return Timer(self, stat, rate, tags, auto_send=auto_send)
58
59 def timing(self, stat, delta, rate=1, tags=None, use_decimals=True):
60 """
61 Send new timing information.
62
63 `delta` can be either a number of milliseconds or a timedelta.
64 """
65 if isinstance(delta, timedelta):
66 # Convert timedelta to number of milliseconds.
67 delta = delta.total_seconds() * 1000.
68 if use_decimals:
69 fmt = '%0.6f|ms'
70 else:
71 fmt = '%s|ms'
72 self._send_stat(stat, fmt % delta, rate, tags)
73
74 def incr(self, stat, count=1, rate=1, tags=None):
75 """Increment a stat by `count`."""
76 self._send_stat(stat, '%s|c' % count, rate, tags)
77
78 def decr(self, stat, count=1, rate=1, tags=None):
79 """Decrement a stat by `count`."""
80 self.incr(stat, -count, rate, tags)
81
82 def gauge(self, stat, value, rate=1, delta=False, tags=None):
83 """Set a gauge value."""
84 if value < 0 and not delta:
85 if rate < 1:
86 if random.random() > rate:
87 return
88 with self.pipeline() as pipe:
89 pipe._send_stat(stat, '0|g', 1)
90 pipe._send_stat(stat, '%s|g' % value, 1)
91 else:
92 prefix = '+' if delta and value >= 0 else ''
93 self._send_stat(stat, '%s%s|g' % (prefix, value), rate, tags)
94
95 def set(self, stat, value, rate=1):
96 """Set a set value."""
97 self._send_stat(stat, '%s|s' % value, rate)
98
99 def histogram(self, stat, value, rate=1, tags=None):
100 """Set a histogram"""
101 self._send_stat(stat, '%s|h' % value, rate, tags)
102
103 def _send_stat(self, stat, value, rate, tags=None):
104 self._after(self._prepare(stat, value, rate, tags))
105
106 def _prepare(self, stat, value, rate, tags=None):
107 global buckets_dict
108 buckets_dict[stat] = 1
109
110 if rate < 1:
111 if random.random() > rate:
112 return
113 value = '%s|@%s' % (value, rate)
114
115 if self._prefix:
116 stat = '%s.%s' % (self._prefix, stat)
117
118 res = '%s:%s%s' % (
119 stat,
120 value,
121 ("|#" + ",".join(normalize_tags(tags))) if tags else "",
122 )
123 return res
124
125 def _after(self, data):
126 if data:
127 self._send(data)
128
129
130 class PipelineBase(StatsClientBase):
131
132 def __init__(self, client):
133 self._client = client
134 self._prefix = client._prefix
135 self._stats = deque()
136
137 def _send(self):
138 raise NotImplementedError()
139
140 def _after(self, data):
141 if data is not None:
142 self._stats.append(data)
143
144 def __enter__(self):
145 return self
146
147 def __exit__(self, typ, value, tb):
148 self.send()
149
150 def send(self):
151 if not self._stats:
152 return
153 self._send()
154
155 def pipeline(self):
156 return self.__class__(self)
@@ -0,0 +1,75 b''
1 from __future__ import absolute_import, division, unicode_literals
2
3 import socket
4
5 from .base import StatsClientBase, PipelineBase
6
7
8 class StreamPipeline(PipelineBase):
9 def _send(self):
10 self._client._after('\n'.join(self._stats))
11 self._stats.clear()
12
13
14 class StreamClientBase(StatsClientBase):
15 def connect(self):
16 raise NotImplementedError()
17
18 def close(self):
19 if self._sock and hasattr(self._sock, 'close'):
20 self._sock.close()
21 self._sock = None
22
23 def reconnect(self):
24 self.close()
25 self.connect()
26
27 def pipeline(self):
28 return StreamPipeline(self)
29
30 def _send(self, data):
31 """Send data to statsd."""
32 if not self._sock:
33 self.connect()
34 self._do_send(data)
35
36 def _do_send(self, data):
37 self._sock.sendall(data.encode('ascii') + b'\n')
38
39
40 class TCPStatsClient(StreamClientBase):
41 """TCP version of StatsClient."""
42
43 def __init__(self, host='localhost', port=8125, prefix=None,
44 timeout=None, ipv6=False):
45 """Create a new client."""
46 self._host = host
47 self._port = port
48 self._ipv6 = ipv6
49 self._timeout = timeout
50 self._prefix = prefix
51 self._sock = None
52
53 def connect(self):
54 fam = socket.AF_INET6 if self._ipv6 else socket.AF_INET
55 family, _, _, _, addr = socket.getaddrinfo(
56 self._host, self._port, fam, socket.SOCK_STREAM)[0]
57 self._sock = socket.socket(family, socket.SOCK_STREAM)
58 self._sock.settimeout(self._timeout)
59 self._sock.connect(addr)
60
61
62 class UnixSocketStatsClient(StreamClientBase):
63 """Unix domain socket version of StatsClient."""
64
65 def __init__(self, socket_path, prefix=None, timeout=None):
66 """Create a new client."""
67 self._socket_path = socket_path
68 self._timeout = timeout
69 self._prefix = prefix
70 self._sock = None
71
72 def connect(self):
73 self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
74 self._sock.settimeout(self._timeout)
75 self._sock.connect(self._socket_path)
@@ -0,0 +1,75 b''
1 from __future__ import absolute_import, division, unicode_literals
2
3 import functools
4
5 # Use timer that's not susceptible to time of day adjustments.
6 try:
7 # perf_counter is only present on Py3.3+
8 from time import perf_counter as time_now
9 except ImportError:
10 # fall back to using time
11 from time import time as time_now
12
13
14 def safe_wraps(wrapper, *args, **kwargs):
15 """Safely wraps partial functions."""
16 while isinstance(wrapper, functools.partial):
17 wrapper = wrapper.func
18 return functools.wraps(wrapper, *args, **kwargs)
19
20
21 class Timer(object):
22 """A context manager/decorator for statsd.timing()."""
23
24 def __init__(self, client, stat, rate=1, tags=None, use_decimals=True, auto_send=True):
25 self.client = client
26 self.stat = stat
27 self.rate = rate
28 self.tags = tags
29 self.ms = None
30 self._sent = False
31 self._start_time = None
32 self.use_decimals = use_decimals
33 self.auto_send = auto_send
34
35 def __call__(self, f):
36 """Thread-safe timing function decorator."""
37 @safe_wraps(f)
38 def _wrapped(*args, **kwargs):
39 start_time = time_now()
40 try:
41 return f(*args, **kwargs)
42 finally:
43 elapsed_time_ms = 1000.0 * (time_now() - start_time)
44 self.client.timing(self.stat, elapsed_time_ms, self.rate, self.tags, self.use_decimals)
45 self._sent = True
46 return _wrapped
47
48 def __enter__(self):
49 return self.start()
50
51 def __exit__(self, typ, value, tb):
52 self.stop(send=self.auto_send)
53
54 def start(self):
55 self.ms = None
56 self._sent = False
57 self._start_time = time_now()
58 return self
59
60 def stop(self, send=True):
61 if self._start_time is None:
62 raise RuntimeError('Timer has not started.')
63 dt = time_now() - self._start_time
64 self.ms = 1000.0 * dt # Convert to milliseconds.
65 if send:
66 self.send()
67 return self
68
69 def send(self):
70 if self.ms is None:
71 raise RuntimeError('No data recorded.')
72 if self._sent:
73 raise RuntimeError('Already sent data.')
74 self._sent = True
75 self.client.timing(self.stat, self.ms, self.rate, self.tags, self.use_decimals)
@@ -0,0 +1,55 b''
1 from __future__ import absolute_import, division, unicode_literals
2
3 import socket
4
5 from .base import StatsClientBase, PipelineBase
6
7
8 class Pipeline(PipelineBase):
9
10 def __init__(self, client):
11 super(Pipeline, self).__init__(client)
12 self._maxudpsize = client._maxudpsize
13
14 def _send(self):
15 data = self._stats.popleft()
16 while self._stats:
17 # Use popleft to preserve the order of the stats.
18 stat = self._stats.popleft()
19 if len(stat) + len(data) + 1 >= self._maxudpsize:
20 self._client._after(data)
21 data = stat
22 else:
23 data += '\n' + stat
24 self._client._after(data)
25
26
27 class StatsClient(StatsClientBase):
28 """A client for statsd."""
29
30 def __init__(self, host='localhost', port=8125, prefix=None,
31 maxudpsize=512, ipv6=False):
32 """Create a new client."""
33 fam = socket.AF_INET6 if ipv6 else socket.AF_INET
34 family, _, _, _, addr = socket.getaddrinfo(
35 host, port, fam, socket.SOCK_DGRAM)[0]
36 self._addr = addr
37 self._sock = socket.socket(family, socket.SOCK_DGRAM)
38 self._prefix = prefix
39 self._maxudpsize = maxudpsize
40
41 def _send(self, data):
42 """Send data to statsd."""
43 try:
44 self._sock.sendto(data.encode('ascii'), self._addr)
45 except (socket.error, RuntimeError):
46 # No time for love, Dr. Jones!
47 pass
48
49 def close(self):
50 if self._sock and hasattr(self._sock, 'close'):
51 self._sock.close()
52 self._sock = None
53
54 def pipeline(self):
55 return Pipeline(self)
@@ -0,0 +1,49 b''
1 from vcsserver.lib._vendor.statsd import client_from_config
2
3
4 class StatsdClientNotInitialised(Exception):
5 pass
6
7
8 class _Singleton(type):
9 """A metaclass that creates a Singleton base class when called."""
10
11 _instances = {}
12
13 def __call__(cls, *args, **kwargs):
14 if cls not in cls._instances:
15 cls._instances[cls] = super(_Singleton, cls).__call__(*args, **kwargs)
16 return cls._instances[cls]
17
18
19 class Singleton(_Singleton("SingletonMeta", (object,), {})):
20 pass
21
22
23 class StatsdClientClass(Singleton):
24 setup_run = False
25 statsd_client = None
26 statsd = None
27
28 def __getattribute__(self, name):
29
30 if name.startswith("statsd"):
31 if self.setup_run:
32 return super(StatsdClientClass, self).__getattribute__(name)
33 else:
34 return None
35 #raise StatsdClientNotInitialised("requested key was %s" % name)
36
37 return super(StatsdClientClass, self).__getattribute__(name)
38
39 def setup(self, settings):
40 """
41 Initialize the client
42 """
43 statsd = client_from_config(settings)
44 self.statsd = statsd
45 self.statsd_client = statsd
46 self.setup_run = True
47
48
49 StatsdClient = StatsdClientClass()
@@ -1,6 +1,5 b''
1 1 [bumpversion]
2 2 current_version = 5.0.0
3 3 message = release: Bump version {current_version} to {new_version}
4 4
5 5 [bumpversion:file:vcsserver/VERSION]
6
@@ -1,63 +1,82 b''
1 1 c6fad7d1e61f22b1f4a4863eff207a04c27e9462 v4.0.0
2 2 77b6e243b4cc5b702c15abd6d737798edbac60dc v4.0.1
3 3 a359c072337fdd8e1e71df72cc520b8a9b042f80 v4.1.0
4 4 49aa7ed030a36b7ceba149a21e587cb5d20b4946 v4.1.1
5 5 f38ed1e1a31dce3c170b4d31585ba43471cf0705 v4.1.2
6 6 21269ba7bafd8f0c77e79dd86a31eb9bce7643d2 v4.2.0
7 7 b53930c918c25b2c8f69ceddc6641e511be27fd3 v4.2.1
8 8 6627ff4119723d8b2b60918e8b1aa49e9f055aab v4.3.0
9 9 d38f2c2b861dde6c4178923f7cf15ea58b85aa92 v4.3.1
10 10 1232313f9e6adac5ce5399c2a891dc1e72b79022 v4.4.0
11 11 cbb9f1d329ae5768379cdec55a62ebdd546c4e27 v4.4.1
12 12 24ffe44a27fcd1c5b6936144e176b9f6dd2f3a17 v4.4.2
13 13 beaeeaa440cd17471110d4621b8816506c0dff4a v4.5.0
14 14 668e5c656f61dd94595611844e1106d1361aa6a7 v4.5.1
15 15 ae0640240cb7a77e6dc8c77e64dd80d79732cb5b v4.5.2
16 16 7af06899f426813583fe60449d7517cc49c15b28 v4.6.0
17 17 8f7f4299bf341b43f94dadafa1ea73d6cea2c9ba v4.6.1
18 18 de00a831a0709ffaac57f948738ea927b97223a9 v4.7.0
19 19 57f527e0646d731768fb5e0fe742b12a35bdc63b v4.7.1
20 20 f9b09787da9845e4a105f4bffdc252099902cefb v4.7.2
21 21 0b7c790b726f08385e6ebdf4f257c905787b9244 v4.8.0
22 22 f4123e725b74d0e82fe89982ab8791a66062e2b3 v4.9.0
23 23 940bac044a0fe1ec839759df81399b50141be720 v4.9.1
24 24 582d9ebbe46bdddac4b26eacae36ee5ecabca267 v4.10.0
25 25 12fbd08d0ab57acce9c0bdccee75633cfa08d7f4 v4.10.1
26 26 78352f95021a9d128f5803fdbca7036daef5dabe v4.10.2
27 27 a47ccfb020cda78c8680e3844aaf0b82b1390f3b v4.10.3
28 28 347ae9ae544bba8deb417995285287a3b6be1611 v4.10.4
29 29 9b257ac49841f850434be0d518baca0827e6c8cc v4.10.5
30 30 e8bf26eea118694edc4ffe50c6c5aa91022bc434 v4.10.6
31 31 71fa9274ba59fb982104f0b9b3d0d024c78675f7 v4.11.0
32 32 92471577ef25636e5babe8001d47fc8e51521522 v4.11.1
33 33 0277edbcda5a8d075e1e41a95bcee6dcf21f3f77 v4.11.2
34 34 6c5ecbf0778ef870e5b23d9fad5340135b563356 v4.11.3
35 35 be788a89a939ebd63606220064bd624fa9d5c9c9 v4.11.4
36 36 15c90a04098a373ac761fab07695fd80dde3bcdb v4.11.5
37 37 77aff155b3251cc00394a49f5e8f2c99e33149a7 v4.11.6
38 38 c218a1ce5d370c2e671d42a91684b3fc2c91b81d v4.12.0
39 39 80085fb846cc948195a5c76b579ca34cbc49b59b v4.12.1
40 40 346f04fc8a18df3235defbe6e71bd552c0d46481 v4.12.2
41 41 764fdd752322f3e0c13ea00957f2d548bf4363a7 v4.12.3
42 42 b58038974a5cecbb9c100d32ad2e4c68582f1a78 v4.12.4
43 43 e1d42d92a0fec0c80b56c82f37bc7b5472613706 v4.13.0
44 44 c3ded3ff17e9bb2a47002a808984a7a946f58a1c v4.13.1
45 45 7ff81aa47b1b40cdef9dd5bcdd439f59c269db3d v4.13.2
46 46 628a08e6aaeff2c3f9e0e268e854f870e6778e53 v4.13.3
47 47 941d675f10cfa7d774815bfacfb37085751b7a0d v4.14.0
48 48 75e11d32c0be0a457198f07888e7ef650cfa6888 v4.14.1
49 49 6c6f49fda0191c4641dcd43aa0d4376b8b728d40 v4.15.0
50 50 184dea5e01c36e6474c83d3bb34719cdfec22b0d v4.15.1
51 51 a4dc3669345553582296b2ce1485229a6c6f0522 v4.15.2
52 52 d2a4a1a66f204668841da1cdccfa29083e1ef7a3 v4.16.0
53 53 744cf8f2c8f23051978fc293404bf475cc5a31f6 v4.16.1
54 54 e68aff93ce4ad11fea13420e914f7dfb05c39566 v4.16.2
55 55 647aeff9752dc1aa00796fa280d0d2ce2f511bc9 v4.17.0
56 56 5e0c2990e095bba1dc903cf0e6ef6ac035e0ccf9 v4.17.1
57 57 8a824544d95037d76d99b104b5d2363858101d53 v4.17.2
58 58 ccd806a2d9482f61bd7e8956a02a28eb24a1d46a v4.17.3
59 59 e533ca02ccc205189b7bad9f227a312212772022 v4.17.4
60 60 ba6a6dc9ecd7fd8b1dcd6eb0c4ee0210e897c426 v4.18.0
61 61 17bc818b41bcf6883b9ff0da31f01d8c2a5d0781 v4.18.1
62 62 1e9f12aa01f82c335abc9017efe94ce1c30b52ba v4.18.2
63 63 f4cc6b3c5680bdf4541d7d442fbb7086640fb547 v4.18.3
64 5dc0277e4f77bd4cc3042d99625bb5d3ba480c8c v4.19.0
65 3a815eeb1b1efa340dda9b81a8da3cf24a7d605b v4.19.1
66 8841da3680fba841e5a54ebccd8ca56c078f7553 v4.19.2
67 4b0dec7fd80b1ca38e5073e5e562a5a450f73669 v4.19.3
68 1485aa75ffe1b1ec48352dce7b7492d92f85e95f v4.20.0
69 5b740274011766ef2f73803cc196d081e1e7f1d4 v4.20.1
70 5a7835234e2c45e8fb8184c60f548a64b5842af8 v4.21.0
71 26af88343015f8b89d5a66f92bc7547c51fcf0df v4.22.0
72 cf54e5f700fe5dc50af1a1bdf5197c18cf52105f v4.23.0
73 179d989bcfe02c6227f9f6aa9236cbbe1c14c400 v4.23.1
74 383aee8b1652affaa26aefe336a89ee366b2b26d v4.23.2
75 bc1a8141cc51fc23c455ebc50c6609c810b46f8d v4.24.0
76 530a1c03caabc806ea1ef34605f8f67f18c70e55 v4.24.1
77 5908ae65cee1043982e1b26d7b618af5fcfebbb3 v4.25.0
78 cce8bcdf75090d5943a1e9706fe5212d7b5d1fa1 v4.25.1
79 8610c4bf846c63bbc95d3ddfb53fadaaa9c7aa42 v4.25.2
80 d46b7d1be72c76c9f9aaeab6a342951d54459f49 v4.26.0
81 6fba0daab1e20a9e18fb70fa59bd21753e0a5b90 v4.27.0
82 6195da4fc454087173918ae59cae946289458676 v4.27.1
@@ -1,237 +1,275 b''
1 1 ## -*- coding: utf-8 -*-
2 2
3 3 ; #################################
4 4 ; RHODECODE VCSSERVER CONFIGURATION
5 5 ; #################################
6 6
7 7 [server:main]
8 8 ; COMMON HOST/IP CONFIG
9 9 host = 0.0.0.0
10 10 port = 9900
11 11
12 12 ; ##################################################
13 13 ; WAITRESS WSGI SERVER - Recommended for Development
14 14 ; ##################################################
15 15
16 16 ; use server type
17 17 use = egg:waitress#main
18 18
19 19 ; number of worker threads
20 20 threads = 5
21 21
22 22 ; MAX BODY SIZE 100GB
23 23 max_request_body_size = 107374182400
24 24
25 25 ; Use poll instead of select, fixes file descriptors limits problems.
26 26 ; May not work on old windows systems.
27 27 asyncore_use_poll = true
28 28
29 29
30 30 ; ###########################
31 31 ; GUNICORN APPLICATION SERVER
32 32 ; ###########################
33 33
34 ; run with gunicorn --log-config rhodecode.ini --paste rhodecode.ini
34 ; run with gunicorn --paste rhodecode.ini
35 35
36 36 ; Module to use, this setting shouldn't be changed
37 37 #use = egg:gunicorn#main
38 38
39 39 ; Sets the number of process workers. More workers means more concurrent connections
40 40 ; RhodeCode can handle at the same time. Each additional worker also it increases
41 41 ; memory usage as each has it's own set of caches.
42 42 ; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
43 43 ; than 8-10 unless for really big deployments .e.g 700-1000 users.
44 44 ; `instance_id = *` must be set in the [app:main] section below (which is the default)
45 45 ; when using more than 1 worker.
46 46 #workers = 2
47 47
48 48 ; Gunicorn access log level
49 49 #loglevel = info
50 50
51 51 ; Process name visible in process list
52 52 #proc_name = rhodecode_vcsserver
53 53
54 54 ; Type of worker class, one of `sync`, `gevent`
55 55 ; currently `sync` is the only option allowed.
56 56 #worker_class = sync
57 57
58 58 ; The maximum number of simultaneous clients. Valid only for gevent
59 59 #worker_connections = 10
60 60
61 61 ; Max number of requests that worker will handle before being gracefully restarted.
62 62 ; Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
63 63 #max_requests = 1000
64 64 #max_requests_jitter = 30
65 65
66 66 ; Amount of time a worker can spend with handling a request before it
67 67 ; gets killed and restarted. By default set to 21600 (6hrs)
68 68 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
69 69 #timeout = 21600
70 70
71 71 ; The maximum size of HTTP request line in bytes.
72 72 ; 0 for unlimited
73 73 #limit_request_line = 0
74 74
75 75 ; Limit the number of HTTP headers fields in a request.
76 76 ; By default this value is 100 and can't be larger than 32768.
77 77 #limit_request_fields = 32768
78 78
79 79 ; Limit the allowed size of an HTTP request header field.
80 80 ; Value is a positive number or 0.
81 81 ; Setting it to 0 will allow unlimited header field sizes.
82 82 #limit_request_field_size = 0
83 83
84 84 ; Timeout for graceful workers restart.
85 85 ; After receiving a restart signal, workers have this much time to finish
86 86 ; serving requests. Workers still alive after the timeout (starting from the
87 87 ; receipt of the restart signal) are force killed.
88 88 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
89 #graceful_timeout = 3600
89 #graceful_timeout = 21600
90 90
91 91 # The number of seconds to wait for requests on a Keep-Alive connection.
92 92 # Generally set in the 1-5 seconds range.
93 93 #keepalive = 2
94 94
95 95 ; Maximum memory usage that each worker can use before it will receive a
96 96 ; graceful restart signal 0 = memory monitoring is disabled
97 97 ; Examples: 268435456 (256MB), 536870912 (512MB)
98 98 ; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
99 99 #memory_max_usage = 0
100 100
101 101 ; How often in seconds to check for memory usage for each gunicorn worker
102 102 #memory_usage_check_interval = 60
103 103
104 104 ; Threshold value for which we don't recycle worker if GarbageCollection
105 105 ; frees up enough resources. Before each restart we try to run GC on worker
106 106 ; in case we get enough free memory after that, restart will not happen.
107 107 #memory_usage_recovery_threshold = 0.8
108 108
109 109
110 110 [app:main]
111 111 ; The %(here)s variable will be replaced with the absolute path of parent directory
112 112 ; of this file
113 ; Each option in the app:main can be override by an environmental variable
114 ;
115 ;To override an option:
116 ;
117 ;RC_<KeyName>
118 ;Everything should be uppercase, . and - should be replaced by _.
119 ;For example, if you have these configuration settings:
120 ;rc_cache.repo_object.backend = foo
121 ;can be overridden by
122 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
123
113 124 use = egg:rhodecode-vcsserver
114 125
115 126
116 127 ; #############
117 128 ; DEBUG OPTIONS
118 129 ; #############
119 130
120 131 # During development the we want to have the debug toolbar enabled
121 132 pyramid.includes =
122 133 pyramid_debugtoolbar
123 134
124 135 debugtoolbar.hosts = 0.0.0.0/0
125 136 debugtoolbar.exclude_prefixes =
126 137 /css
127 138 /fonts
128 139 /images
129 140 /js
130 141
131 142 ; #################
132 143 ; END DEBUG OPTIONS
133 144 ; #################
134 145
135 146 ; Pyramid default locales, we need this to be set
136 pyramid.default_locale_name = en
147 #pyramid.default_locale_name = en
137 148
138 149 ; default locale used by VCS systems
139 locale = en_US.UTF-8
150 #locale = en_US.UTF-8
140 151
141 152 ; path to binaries for vcsserver, it should be set by the installer
142 ; at installation time, e.g /home/user/vcsserver-1/profile/bin
153 ; at installation time, e.g /home/user/.rccontrol/vcsserver-1/profile/bin
143 154 ; it can also be a path to nix-build output in case of development
144 155 core.binary_dir = ""
145 156
146 157 ; Custom exception store path, defaults to TMPDIR
147 158 ; This is used to store exception from RhodeCode in shared directory
148 159 #exception_tracker.store_path =
149 160
150 161 ; #############
151 162 ; DOGPILE CACHE
152 163 ; #############
153 164
154 165 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
155 166 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
156 cache_dir = %(here)s/data
167 #cache_dir = %(here)s/data
157 168
158 169 ; ***************************************
159 170 ; `repo_object` cache, default file based
160 171 ; ***************************************
161 172
162 173 ; `repo_object` cache settings for vcs methods for repositories
163 rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
174 #rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
164 175
165 176 ; cache auto-expires after N seconds
166 177 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
167 rc_cache.repo_object.expiration_time = 2592000
178 #rc_cache.repo_object.expiration_time = 2592000
168 179
169 180 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
170 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache.db
181 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache_repo_object.db
171 182
172 183 ; ***********************************************************
173 184 ; `repo_object` cache with redis backend
174 185 ; recommended for larger instance, and for better performance
175 186 ; ***********************************************************
176 187
177 188 ; `repo_object` cache settings for vcs methods for repositories
178 189 #rc_cache.repo_object.backend = dogpile.cache.rc.redis_msgpack
179 190
180 191 ; cache auto-expires after N seconds
181 192 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
182 193 #rc_cache.repo_object.expiration_time = 2592000
183 194
184 195 ; redis_expiration_time needs to be greater then expiration_time
185 196 #rc_cache.repo_object.arguments.redis_expiration_time = 3592000
186 197
187 198 #rc_cache.repo_object.arguments.host = localhost
188 199 #rc_cache.repo_object.arguments.port = 6379
189 200 #rc_cache.repo_object.arguments.db = 5
190 201 #rc_cache.repo_object.arguments.socket_timeout = 30
191 202 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
192 203 #rc_cache.repo_object.arguments.distributed_lock = true
193 204
205 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
206 #rc_cache.repo_object.arguments.lock_auto_renewal = true
207
208 ; Statsd client config, this is used to send metrics to statsd
209 ; We recommend setting statsd_exported and scrape them using Promethues
210 #statsd.enabled = false
211 #statsd.statsd_host = 0.0.0.0
212 #statsd.statsd_port = 8125
213 #statsd.statsd_prefix =
214 #statsd.statsd_ipv6 = false
215
216 ; configure logging automatically at server startup set to false
217 ; to use the below custom logging config.
218 ; RC_LOGGING_FORMATTER
219 ; RC_LOGGING_LEVEL
220 ; env variables can control the settings for logging in case of autoconfigure
221
222 #logging.autoconfigure = true
223
224 ; specify your own custom logging config file to configure logging
225 #logging.logging_conf_file = /path/to/custom_logging.ini
194 226
195 227 ; #####################
196 228 ; LOGGING CONFIGURATION
197 229 ; #####################
230
198 231 [loggers]
199 232 keys = root, vcsserver
200 233
201 234 [handlers]
202 235 keys = console
203 236
204 237 [formatters]
205 keys = generic
238 keys = generic, json
206 239
207 240 ; #######
208 241 ; LOGGERS
209 242 ; #######
210 243 [logger_root]
211 244 level = NOTSET
212 245 handlers = console
213 246
214 247 [logger_vcsserver]
215 248 level = DEBUG
216 249 handlers =
217 250 qualname = vcsserver
218 251 propagate = 1
219 252
220
221 253 ; ########
222 254 ; HANDLERS
223 255 ; ########
224 256
225 257 [handler_console]
226 258 class = StreamHandler
227 259 args = (sys.stderr, )
228 260 level = DEBUG
261 ; To enable JSON formatted logs replace 'generic' with 'json'
262 ; This allows sending properly formatted logs to grafana loki or elasticsearch
229 263 formatter = generic
230 264
231 265 ; ##########
232 266 ; FORMATTERS
233 267 ; ##########
234 268
235 269 [formatter_generic]
236 270 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
237 271 datefmt = %Y-%m-%d %H:%M:%S
272
273 [formatter_json]
274 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
275 class = vcsserver.lib._vendor.jsonlogger.JsonFormatter
@@ -1,262 +1,393 b''
1 1 """
2 2 Gunicorn config extension and hooks. This config file adds some extra settings and memory management.
3 3 Gunicorn configuration should be managed by .ini files entries of RhodeCode or VCSServer
4 4 """
5 5
6 6 import gc
7 7 import os
8 8 import sys
9 9 import math
10 10 import time
11 11 import threading
12 12 import traceback
13 13 import random
14 import socket
14 15 from gunicorn.glogging import Logger
15 16
16 17
17 18 def get_workers():
18 19 import multiprocessing
19 20 return multiprocessing.cpu_count() * 2 + 1
20 21
21 22 # GLOBAL
22 23 errorlog = '-'
23 24 accesslog = '-'
24 25
25 26
26 27 # SERVER MECHANICS
27 28 # None == system temp dir
28 29 # worker_tmp_dir is recommended to be set to some tmpfs
29 30 worker_tmp_dir = None
30 31 tmp_upload_dir = None
31 32
33 #reuse_port = True
34
32 35 # Custom log format
36 #access_log_format = (
37 # '%(t)s %(p)s INFO [GNCRN] %(h)-15s rqt:%(L)s %(s)s %(b)-6s "%(m)s:%(U)s %(q)s" usr:%(u)s "%(f)s" "%(a)s"')
38
39 # loki format for easier parsing in grafana
33 40 access_log_format = (
34 '%(t)s %(p)s INFO [GNCRN] %(h)-15s rqt:%(L)s %(s)s %(b)-6s "%(m)s:%(U)s %(q)s" usr:%(u)s "%(f)s" "%(a)s"')
41 'time="%(t)s" pid=%(p)s level="INFO" type="[GNCRN]" ip="%(h)-15s" rqt="%(L)s" response_code="%(s)s" response_bytes="%(b)-6s" uri="%(m)s:%(U)s %(q)s" user=":%(u)s" user_agent="%(a)s"')
35 42
36 43 # self adjust workers based on CPU count
37 44 # workers = get_workers()
38 45
39 46
40 47 def _get_process_rss(pid=None):
41 48 try:
42 49 import psutil
43 50 if pid:
44 51 proc = psutil.Process(pid)
45 52 else:
46 53 proc = psutil.Process()
47 54 return proc.memory_info().rss
48 55 except Exception:
49 56 return None
50 57
51 58
52 59 def _get_config(ini_path):
53 60 import configparser
54 61
55 62 try:
56 63 config = configparser.RawConfigParser()
57 64 config.read(ini_path)
58 65 return config
59 66 except Exception:
60 67 return None
61 68
62 69
63 70 def _time_with_offset(memory_usage_check_interval):
64 71 return time.time() - random.randint(0, memory_usage_check_interval/2.0)
65 72
66 73
67 74 def pre_fork(server, worker):
68 75 pass
69 76
70 77
71 78 def post_fork(server, worker):
72 79
73 80 # memory spec defaults
74 81 _memory_max_usage = 0
75 82 _memory_usage_check_interval = 60
76 83 _memory_usage_recovery_threshold = 0.8
77 84
78 85 ini_path = os.path.abspath(server.cfg.paste)
79 86 conf = _get_config(ini_path)
80 87
81 88 section = 'server:main'
82 89 if conf and conf.has_section(section):
83 90
84 91 if conf.has_option(section, 'memory_max_usage'):
85 92 _memory_max_usage = conf.getint(section, 'memory_max_usage')
86 93
87 94 if conf.has_option(section, 'memory_usage_check_interval'):
88 95 _memory_usage_check_interval = conf.getint(section, 'memory_usage_check_interval')
89 96
90 97 if conf.has_option(section, 'memory_usage_recovery_threshold'):
91 98 _memory_usage_recovery_threshold = conf.getfloat(section, 'memory_usage_recovery_threshold')
92 99
93 worker._memory_max_usage = _memory_max_usage
94 worker._memory_usage_check_interval = _memory_usage_check_interval
95 worker._memory_usage_recovery_threshold = _memory_usage_recovery_threshold
100 worker._memory_max_usage = int(os.environ.get('RC_GUNICORN_MEMORY_MAX_USAGE', '')
101 or _memory_max_usage)
102 worker._memory_usage_check_interval = int(os.environ.get('RC_GUNICORN_MEMORY_USAGE_CHECK_INTERVAL', '')
103 or _memory_usage_check_interval)
104 worker._memory_usage_recovery_threshold = float(os.environ.get('RC_GUNICORN_MEMORY_USAGE_RECOVERY_THRESHOLD', '')
105 or _memory_usage_recovery_threshold)
96 106
97 107 # register memory last check time, with some random offset so we don't recycle all
98 108 # at once
99 109 worker._last_memory_check_time = _time_with_offset(_memory_usage_check_interval)
100 110
101 111 if _memory_max_usage:
102 112 server.log.info("[%-10s] WORKER spawned with max memory set at %s", worker.pid,
103 113 _format_data_size(_memory_max_usage))
104 114 else:
105 115 server.log.info("[%-10s] WORKER spawned", worker.pid)
106 116
107 117
108 118 def pre_exec(server):
109 119 server.log.info("Forked child, re-executing.")
110 120
111 121
112 122 def on_starting(server):
113 123 server_lbl = '{} {}'.format(server.proc_name, server.address)
114 124 server.log.info("Server %s is starting.", server_lbl)
115 125
116 126
117 127 def when_ready(server):
118 128 server.log.info("Server %s is ready. Spawning workers", server)
119 129
120 130
121 131 def on_reload(server):
122 132 pass
123 133
124 134
125 135 def _format_data_size(size, unit="B", precision=1, binary=True):
126 136 """Format a number using SI units (kilo, mega, etc.).
127 137
128 138 ``size``: The number as a float or int.
129 139
130 140 ``unit``: The unit name in plural form. Examples: "bytes", "B".
131 141
132 142 ``precision``: How many digits to the right of the decimal point. Default
133 143 is 1. 0 suppresses the decimal point.
134 144
135 145 ``binary``: If false, use base-10 decimal prefixes (kilo = K = 1000).
136 146 If true, use base-2 binary prefixes (kibi = Ki = 1024).
137 147
138 148 ``full_name``: If false (default), use the prefix abbreviation ("k" or
139 149 "Ki"). If true, use the full prefix ("kilo" or "kibi"). If false,
140 150 use abbreviation ("k" or "Ki").
141 151
142 152 """
143 153
144 154 if not binary:
145 155 base = 1000
146 156 multiples = ('', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
147 157 else:
148 158 base = 1024
149 159 multiples = ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi')
150 160
151 161 sign = ""
152 162 if size > 0:
153 163 m = int(math.log(size, base))
154 164 elif size < 0:
155 165 sign = "-"
156 166 size = -size
157 167 m = int(math.log(size, base))
158 168 else:
159 169 m = 0
160 170 if m > 8:
161 171 m = 8
162 172
163 173 if m == 0:
164 174 precision = '%.0f'
165 175 else:
166 176 precision = '%%.%df' % precision
167 177
168 178 size = precision % (size / math.pow(base, m))
169 179
170 180 return '%s%s %s%s' % (sign, size.strip(), multiples[m], unit)
171 181
172 182
173 183 def _check_memory_usage(worker):
174 184 memory_max_usage = worker._memory_max_usage
175 185 if not memory_max_usage:
176 186 return
177 187
178 188 memory_usage_check_interval = worker._memory_usage_check_interval
179 189 memory_usage_recovery_threshold = memory_max_usage * worker._memory_usage_recovery_threshold
180 190
181 191 elapsed = time.time() - worker._last_memory_check_time
182 192 if elapsed > memory_usage_check_interval:
183 193 mem_usage = _get_process_rss()
184 194 if mem_usage and mem_usage > memory_max_usage:
185 195 worker.log.info(
186 196 "memory usage %s > %s, forcing gc",
187 197 _format_data_size(mem_usage), _format_data_size(memory_max_usage))
188 198 # Try to clean it up by forcing a full collection.
189 199 gc.collect()
190 200 mem_usage = _get_process_rss()
191 201 if mem_usage > memory_usage_recovery_threshold:
192 202 # Didn't clean up enough, we'll have to terminate.
193 203 worker.log.warning(
194 204 "memory usage %s > %s after gc, quitting",
195 205 _format_data_size(mem_usage), _format_data_size(memory_max_usage))
196 206 # This will cause worker to auto-restart itself
197 207 worker.alive = False
198 208 worker._last_memory_check_time = time.time()
199 209
200 210
201 211 def worker_int(worker):
202 212 worker.log.info("[%-10s] worker received INT or QUIT signal", worker.pid)
203 213
204 214 # get traceback info, on worker crash
205 215 id2name = dict([(th.ident, th.name) for th in threading.enumerate()])
206 216 code = []
207 217 for thread_id, stack in sys._current_frames().items():
208 218 code.append(
209 219 "\n# Thread: %s(%d)" % (id2name.get(thread_id, ""), thread_id))
210 220 for fname, lineno, name, line in traceback.extract_stack(stack):
211 221 code.append('File: "%s", line %d, in %s' % (fname, lineno, name))
212 222 if line:
213 223 code.append(" %s" % (line.strip()))
214 224 worker.log.debug("\n".join(code))
215 225
216 226
217 227 def worker_abort(worker):
218 228 worker.log.info("[%-10s] worker received SIGABRT signal", worker.pid)
219 229
220 230
221 231 def worker_exit(server, worker):
222 232 worker.log.info("[%-10s] worker exit", worker.pid)
223 233
224 234
225 235 def child_exit(server, worker):
226 236 worker.log.info("[%-10s] worker child exit", worker.pid)
227 237
228 238
229 239 def pre_request(worker, req):
230 240 worker.start_time = time.time()
231 241 worker.log.debug(
232 242 "GNCRN PRE WORKER [cnt:%s]: %s %s", worker.nr, req.method, req.path)
233 243
234 244
235 245 def post_request(worker, req, environ, resp):
236 246 total_time = time.time() - worker.start_time
237 247 # Gunicorn sometimes has problems with reading the status_code
238 248 status_code = getattr(resp, 'status_code', '')
239 249 worker.log.debug(
240 250 "GNCRN POST WORKER [cnt:%s]: %s %s resp: %s, Load Time: %.4fs",
241 251 worker.nr, req.method, req.path, status_code, total_time)
242 252 _check_memory_usage(worker)
243 253
244 254
255 def _filter_proxy(ip):
256 """
257 Passed in IP addresses in HEADERS can be in a special format of multiple
258 ips. Those comma separated IPs are passed from various proxies in the
259 chain of request processing. The left-most being the original client.
260 We only care about the first IP which came from the org. client.
261
262 :param ip: ip string from headers
263 """
264 if ',' in ip:
265 _ips = ip.split(',')
266 _first_ip = _ips[0].strip()
267 return _first_ip
268 return ip
269
270
271 def _filter_port(ip):
272 """
273 Removes a port from ip, there are 4 main cases to handle here.
274 - ipv4 eg. 127.0.0.1
275 - ipv6 eg. ::1
276 - ipv4+port eg. 127.0.0.1:8080
277 - ipv6+port eg. [::1]:8080
278
279 :param ip:
280 """
281 def is_ipv6(ip_addr):
282 if hasattr(socket, 'inet_pton'):
283 try:
284 socket.inet_pton(socket.AF_INET6, ip_addr)
285 except socket.error:
286 return False
287 else:
288 return False
289 return True
290
291 if ':' not in ip: # must be ipv4 pure ip
292 return ip
293
294 if '[' in ip and ']' in ip: # ipv6 with port
295 return ip.split(']')[0][1:].lower()
296
297 # must be ipv6 or ipv4 with port
298 if is_ipv6(ip):
299 return ip
300 else:
301 ip, _port = ip.split(':')[:2] # means ipv4+port
302 return ip
303
304
305 def get_ip_addr(environ):
306 proxy_key = 'HTTP_X_REAL_IP'
307 proxy_key2 = 'HTTP_X_FORWARDED_FOR'
308 def_key = 'REMOTE_ADDR'
309 _filters = lambda x: _filter_port(_filter_proxy(x))
310
311 ip = environ.get(proxy_key)
312 if ip:
313 return _filters(ip)
314
315 ip = environ.get(proxy_key2)
316 if ip:
317 return _filters(ip)
318
319 ip = environ.get(def_key, '0.0.0.0')
320 return _filters(ip)
321
322
245 323 class RhodeCodeLogger(Logger):
246 324 """
247 325 Custom Logger that allows some customization that gunicorn doesn't allow
248 326 """
249 327
250 328 datefmt = r"%Y-%m-%d %H:%M:%S"
251 329
252 330 def __init__(self, cfg):
253 331 Logger.__init__(self, cfg)
254 332
255 333 def now(self):
256 334 """ return date in RhodeCode Log format """
257 335 now = time.time()
258 336 msecs = int((now - long(now)) * 1000)
259 337 return time.strftime(self.datefmt, time.localtime(now)) + '.{0:03d}'.format(msecs)
260 338
339 def atoms(self, resp, req, environ, request_time):
340 """ Gets atoms for log formatting.
341 """
342 status = resp.status
343 if isinstance(status, str):
344 status = status.split(None, 1)[0]
345 atoms = {
346 'h': get_ip_addr(environ),
347 'l': '-',
348 'u': self._get_user(environ) or '-',
349 't': self.now(),
350 'r': "%s %s %s" % (environ['REQUEST_METHOD'],
351 environ['RAW_URI'],
352 environ["SERVER_PROTOCOL"]),
353 's': status,
354 'm': environ.get('REQUEST_METHOD'),
355 'U': environ.get('PATH_INFO'),
356 'q': environ.get('QUERY_STRING'),
357 'H': environ.get('SERVER_PROTOCOL'),
358 'b': getattr(resp, 'sent', None) is not None and str(resp.sent) or '-',
359 'B': getattr(resp, 'sent', None),
360 'f': environ.get('HTTP_REFERER', '-'),
361 'a': environ.get('HTTP_USER_AGENT', '-'),
362 'T': request_time.seconds,
363 'D': (request_time.seconds * 1000000) + request_time.microseconds,
364 'M': (request_time.seconds * 1000) + int(request_time.microseconds/1000),
365 'L': "%d.%06d" % (request_time.seconds, request_time.microseconds),
366 'p': "<%s>" % os.getpid()
367 }
368
369 # add request headers
370 if hasattr(req, 'headers'):
371 req_headers = req.headers
372 else:
373 req_headers = req
374
375 if hasattr(req_headers, "items"):
376 req_headers = req_headers.items()
377
378 atoms.update({"{%s}i" % k.lower(): v for k, v in req_headers})
379
380 resp_headers = resp.headers
381 if hasattr(resp_headers, "items"):
382 resp_headers = resp_headers.items()
383
384 # add response headers
385 atoms.update({"{%s}o" % k.lower(): v for k, v in resp_headers})
386
387 # add environ variables
388 environ_variables = environ.items()
389 atoms.update({"{%s}e" % k.lower(): v for k, v in environ_variables})
390
391 return atoms
261 392
262 393 logger_class = RhodeCodeLogger
@@ -1,200 +1,238 b''
1 1 ## -*- coding: utf-8 -*-
2 2
3 3 ; #################################
4 4 ; RHODECODE VCSSERVER CONFIGURATION
5 5 ; #################################
6 6
7 7 [server:main]
8 8 ; COMMON HOST/IP CONFIG
9 9 host = 127.0.0.1
10 10 port = 9900
11 11
12 12
13 13 ; ###########################
14 14 ; GUNICORN APPLICATION SERVER
15 15 ; ###########################
16 16
17 ; run with gunicorn --log-config rhodecode.ini --paste rhodecode.ini
17 ; run with gunicorn --paste rhodecode.ini
18 18
19 19 ; Module to use, this setting shouldn't be changed
20 20 use = egg:gunicorn#main
21 21
22 22 ; Sets the number of process workers. More workers means more concurrent connections
23 23 ; RhodeCode can handle at the same time. Each additional worker also it increases
24 24 ; memory usage as each has it's own set of caches.
25 25 ; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
26 26 ; than 8-10 unless for really big deployments .e.g 700-1000 users.
27 27 ; `instance_id = *` must be set in the [app:main] section below (which is the default)
28 28 ; when using more than 1 worker.
29 29 workers = 2
30 30
31 31 ; Gunicorn access log level
32 32 loglevel = info
33 33
34 34 ; Process name visible in process list
35 35 proc_name = rhodecode_vcsserver
36 36
37 37 ; Type of worker class, one of `sync`, `gevent`
38 38 ; currently `sync` is the only option allowed.
39 39 worker_class = sync
40 40
41 41 ; The maximum number of simultaneous clients. Valid only for gevent
42 42 worker_connections = 10
43 43
44 44 ; Max number of requests that worker will handle before being gracefully restarted.
45 45 ; Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
46 46 max_requests = 1000
47 47 max_requests_jitter = 30
48 48
49 49 ; Amount of time a worker can spend with handling a request before it
50 50 ; gets killed and restarted. By default set to 21600 (6hrs)
51 51 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
52 52 timeout = 21600
53 53
54 54 ; The maximum size of HTTP request line in bytes.
55 55 ; 0 for unlimited
56 56 limit_request_line = 0
57 57
58 58 ; Limit the number of HTTP headers fields in a request.
59 59 ; By default this value is 100 and can't be larger than 32768.
60 60 limit_request_fields = 32768
61 61
62 62 ; Limit the allowed size of an HTTP request header field.
63 63 ; Value is a positive number or 0.
64 64 ; Setting it to 0 will allow unlimited header field sizes.
65 65 limit_request_field_size = 0
66 66
67 67 ; Timeout for graceful workers restart.
68 68 ; After receiving a restart signal, workers have this much time to finish
69 69 ; serving requests. Workers still alive after the timeout (starting from the
70 70 ; receipt of the restart signal) are force killed.
71 71 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
72 graceful_timeout = 3600
72 graceful_timeout = 21600
73 73
74 74 # The number of seconds to wait for requests on a Keep-Alive connection.
75 75 # Generally set in the 1-5 seconds range.
76 76 keepalive = 2
77 77
78 78 ; Maximum memory usage that each worker can use before it will receive a
79 79 ; graceful restart signal 0 = memory monitoring is disabled
80 80 ; Examples: 268435456 (256MB), 536870912 (512MB)
81 81 ; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
82 82 memory_max_usage = 0
83 83
84 84 ; How often in seconds to check for memory usage for each gunicorn worker
85 85 memory_usage_check_interval = 60
86 86
87 87 ; Threshold value for which we don't recycle worker if GarbageCollection
88 88 ; frees up enough resources. Before each restart we try to run GC on worker
89 89 ; in case we get enough free memory after that, restart will not happen.
90 90 memory_usage_recovery_threshold = 0.8
91 91
92 92
93 93 [app:main]
94 94 ; The %(here)s variable will be replaced with the absolute path of parent directory
95 95 ; of this file
96 ; Each option in the app:main can be override by an environmental variable
97 ;
98 ;To override an option:
99 ;
100 ;RC_<KeyName>
101 ;Everything should be uppercase, . and - should be replaced by _.
102 ;For example, if you have these configuration settings:
103 ;rc_cache.repo_object.backend = foo
104 ;can be overridden by
105 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
106
96 107 use = egg:rhodecode-vcsserver
97 108
98 109 ; Pyramid default locales, we need this to be set
99 pyramid.default_locale_name = en
110 #pyramid.default_locale_name = en
100 111
101 112 ; default locale used by VCS systems
102 locale = en_US.UTF-8
113 #locale = en_US.UTF-8
103 114
104 115 ; path to binaries for vcsserver, it should be set by the installer
105 ; at installation time, e.g /home/user/vcsserver-1/profile/bin
116 ; at installation time, e.g /home/user/.rccontrol/vcsserver-1/profile/bin
106 117 ; it can also be a path to nix-build output in case of development
107 118 core.binary_dir = ""
108 119
109 120 ; Custom exception store path, defaults to TMPDIR
110 121 ; This is used to store exception from RhodeCode in shared directory
111 122 #exception_tracker.store_path =
112 123
113 124 ; #############
114 125 ; DOGPILE CACHE
115 126 ; #############
116 127
117 128 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
118 129 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
119 cache_dir = %(here)s/data
130 #cache_dir = %(here)s/data
120 131
121 132 ; ***************************************
122 133 ; `repo_object` cache, default file based
123 134 ; ***************************************
124 135
125 136 ; `repo_object` cache settings for vcs methods for repositories
126 rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
137 #rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
127 138
128 139 ; cache auto-expires after N seconds
129 140 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
130 rc_cache.repo_object.expiration_time = 2592000
141 #rc_cache.repo_object.expiration_time = 2592000
131 142
132 143 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
133 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache.db
144 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache_repo_object.db
134 145
135 146 ; ***********************************************************
136 147 ; `repo_object` cache with redis backend
137 148 ; recommended for larger instance, and for better performance
138 149 ; ***********************************************************
139 150
140 151 ; `repo_object` cache settings for vcs methods for repositories
141 152 #rc_cache.repo_object.backend = dogpile.cache.rc.redis_msgpack
142 153
143 154 ; cache auto-expires after N seconds
144 155 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
145 156 #rc_cache.repo_object.expiration_time = 2592000
146 157
147 158 ; redis_expiration_time needs to be greater then expiration_time
148 159 #rc_cache.repo_object.arguments.redis_expiration_time = 3592000
149 160
150 161 #rc_cache.repo_object.arguments.host = localhost
151 162 #rc_cache.repo_object.arguments.port = 6379
152 163 #rc_cache.repo_object.arguments.db = 5
153 164 #rc_cache.repo_object.arguments.socket_timeout = 30
154 165 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
155 166 #rc_cache.repo_object.arguments.distributed_lock = true
156 167
168 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
169 #rc_cache.repo_object.arguments.lock_auto_renewal = true
170
171 ; Statsd client config, this is used to send metrics to statsd
172 ; We recommend setting statsd_exported and scrape them using Promethues
173 #statsd.enabled = false
174 #statsd.statsd_host = 0.0.0.0
175 #statsd.statsd_port = 8125
176 #statsd.statsd_prefix =
177 #statsd.statsd_ipv6 = false
178
179 ; configure logging automatically at server startup set to false
180 ; to use the below custom logging config.
181 ; RC_LOGGING_FORMATTER
182 ; RC_LOGGING_LEVEL
183 ; env variables can control the settings for logging in case of autoconfigure
184
185 #logging.autoconfigure = true
186
187 ; specify your own custom logging config file to configure logging
188 #logging.logging_conf_file = /path/to/custom_logging.ini
157 189
158 190 ; #####################
159 191 ; LOGGING CONFIGURATION
160 192 ; #####################
193
161 194 [loggers]
162 195 keys = root, vcsserver
163 196
164 197 [handlers]
165 198 keys = console
166 199
167 200 [formatters]
168 keys = generic
201 keys = generic, json
169 202
170 203 ; #######
171 204 ; LOGGERS
172 205 ; #######
173 206 [logger_root]
174 207 level = NOTSET
175 208 handlers = console
176 209
177 210 [logger_vcsserver]
178 level = DEBUG
211 level = INFO
179 212 handlers =
180 213 qualname = vcsserver
181 214 propagate = 1
182 215
183
184 216 ; ########
185 217 ; HANDLERS
186 218 ; ########
187 219
188 220 [handler_console]
189 221 class = StreamHandler
190 222 args = (sys.stderr, )
191 223 level = INFO
224 ; To enable JSON formatted logs replace 'generic' with 'json'
225 ; This allows sending properly formatted logs to grafana loki or elasticsearch
192 226 formatter = generic
193 227
194 228 ; ##########
195 229 ; FORMATTERS
196 230 ; ##########
197 231
198 232 [formatter_generic]
199 233 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
200 234 datefmt = %Y-%m-%d %H:%M:%S
235
236 [formatter_json]
237 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
238 class = vcsserver.lib._vendor.jsonlogger.JsonFormatter
@@ -1,134 +1,163 b''
1 1 # -*- coding: utf-8 -*-
2
2 3 # RhodeCode VCSServer provides access to different vcs backends via network.
3 4 # Copyright (C) 2014-2019 RodeCode GmbH
4 5 #
5 6 # This program is free software; you can redistribute it and/or modify
6 7 # it under the terms of the GNU General Public License as published by
7 8 # the Free Software Foundation; either version 3 of the License, or
8 9 # (at your option) any later version.
9 10 #
10 11 # This program is distributed in the hope that it will be useful,
11 12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 14 # GNU General Public License for more details.
14 15 #
15 16 # You should have received a copy of the GNU General Public License
16 17 # along with this program; if not, write to the Free Software Foundation,
17 18 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
18 19
19 20 # Import early to make sure things are patched up properly
20 21 from setuptools import setup, find_packages
21 22
22 23 import os
24 import re
23 25 import sys
24 26 import pkgutil
25 27 import platform
26 28 import codecs
27 29
28 try: # for pip >= 10
30 import pip
31
32 pip_major_version = int(pip.__version__.split(".")[0])
33 if pip_major_version >= 20:
29 34 from pip._internal.req import parse_requirements
30 except ImportError: # for pip <= 9.0.3
35 from pip._internal.network.session import PipSession
36 elif pip_major_version >= 10:
37 from pip._internal.req import parse_requirements
38 from pip._internal.download import PipSession
39 else:
31 40 from pip.req import parse_requirements
41 from pip.download import PipSession
42
32 43
33 try: # for pip >= 10
34 from pip._internal.download import PipSession
35 except ImportError: # for pip <= 9.0.3
36 from pip.download import PipSession
44 def get_package_name(req_object):
45 package_name = None
46 try:
47 from pip._internal.req.constructors import install_req_from_parsed_requirement
48 except ImportError:
49 install_req_from_parsed_requirement = None
50
51 # In 20.1 of pip, the requirements object changed
52 if hasattr(req_object, 'req'):
53 package_name = req_object.req.name
54
55 if package_name is None:
56 if install_req_from_parsed_requirement:
57 package = install_req_from_parsed_requirement(req_object)
58 package_name = package.req.name
59
60 if package_name is None:
61 # fallback for older pip
62 package_name = re.split('===|<=|!=|==|>=|~=|<|>', req_object.requirement)[0]
63
64 return package_name
37 65
38 66
39 67 if sys.version_info < (2, 7):
40 68 raise Exception('VCSServer requires Python 2.7 or later')
41 69
42 70 here = os.path.abspath(os.path.dirname(__file__))
43 71
44 72 # defines current platform
45 73 __platform__ = platform.system()
46 74 __license__ = 'GPL V3'
47 75 __author__ = 'RhodeCode GmbH'
48 76 __url__ = 'https://code.rhodecode.com'
49 77 is_windows = __platform__ in ('Windows',)
50 78
51 79
52 80 def _get_requirements(req_filename, exclude=None, extras=None):
53 81 extras = extras or []
54 82 exclude = exclude or []
55 83
56 84 try:
57 85 parsed = parse_requirements(
58 86 os.path.join(here, req_filename), session=PipSession())
59 87 except TypeError:
60 88 # try pip < 6.0.0, that doesn't support session
61 89 parsed = parse_requirements(os.path.join(here, req_filename))
62 90
63 91 requirements = []
64 for ir in parsed:
65 if ir.req and ir.name not in exclude:
66 requirements.append(str(ir.req))
92 for int_req in parsed:
93 req_name = get_package_name(int_req)
94 if req_name not in exclude:
95 requirements.append(req_name)
67 96 return requirements + extras
68 97
69 98
70 99 # requirements extract
71 setup_requirements = ['pytest-runner']
100 setup_requirements = []
72 101 install_requirements = _get_requirements(
73 102 'requirements.txt', exclude=['setuptools'])
74 103 test_requirements = _get_requirements(
75 104 'requirements_test.txt', extras=['configobj'])
76 105
77 106
78 107 def get_version():
79 108 version = pkgutil.get_data('vcsserver', 'VERSION')
80 109 return version.decode().strip()
81 110
82 111
83 112 # additional files that goes into package itself
84 113 package_data = {
85 114 '': ['*.txt', '*.rst'],
86 115 'configs': ['*.ini'],
87 116 'vcsserver': ['VERSION'],
88 117 }
89 118
90 119 description = 'Version Control System Server'
91 120 keywords = ' '.join(['Version Control System'])
92 121
93 122 # README/DESCRIPTION generation
94 123 readme_file = 'README.rst'
95 124 changelog_file = 'CHANGES.rst'
96 125 try:
97 126 long_description = codecs.open(readme_file).read() + '\n\n' + \
98 127 codecs.open(changelog_file).read()
99 128 except IOError as err:
100 129 sys.stderr.write(
101 130 "[WARNING] Cannot find file specified as long_description (%s)\n "
102 131 "or changelog (%s) skipping that file" % (readme_file, changelog_file))
103 132 long_description = description
104 133
105 134
106 135 setup(
107 136 name='rhodecode-vcsserver',
108 137 version=get_version(),
109 138 description=description,
110 139 long_description=long_description,
111 140 keywords=keywords,
112 141 license=__license__,
113 142 author=__author__,
114 143 author_email='support@rhodecode.com',
115 144 url=__url__,
116 145 setup_requires=setup_requirements,
117 146 install_requires=install_requirements,
118 147 tests_require=test_requirements,
119 148 zip_safe=False,
120 149 packages=find_packages(exclude=["docs", "tests*"]),
121 150 package_data=package_data,
122 151 include_package_data=True,
123 152 classifiers=[
124 153 'Development Status :: 6 - Mature',
125 154 'Intended Audience :: Developers',
126 155 'Operating System :: OS Independent',
127 156 'Topic :: Software Development :: Version Control',
128 157 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
129 158 'Programming Language :: Python :: 2.7',
130 159 ],
131 160 entry_points={
132 161 'paste.app_factory': ['main=vcsserver.http_main:main']
133 162 },
134 163 )
@@ -1,76 +1,132 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 2 # Copyright (C) 2014-2020 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
17 import os
18 18 import sys
19 19 import traceback
20 20 import logging
21 21 import urllib.parse
22 22
23 23 from vcsserver.lib.rc_cache import region_meta
24
25 from vcsserver import exceptions
26 from vcsserver.exceptions import NoContentException
27 from vcsserver.hgcompat import (archival)
28
24 29 log = logging.getLogger(__name__)
25 30
26 31
27 32 class RepoFactory(object):
28 33 """
29 34 Utility to create instances of repository
30 35
31 36 It provides internal caching of the `repo` object based on
32 37 the :term:`call context`.
33 38 """
34 39 repo_type = None
35 40
36 41 def __init__(self):
37 42 self._cache_region = region_meta.dogpile_cache_regions['repo_object']
38 43
39 44 def _create_config(self, path, config):
40 45 config = {}
41 46 return config
42 47
43 48 def _create_repo(self, wire, create):
44 49 raise NotImplementedError()
45 50
46 51 def repo(self, wire, create=False):
47 52 raise NotImplementedError()
48 53
49 54
50 55 def obfuscate_qs(query_string):
51 56 if query_string is None:
52 57 return None
53 58
54 59 parsed = []
55 60 for k, v in urllib.parse.parse_qsl(query_string, keep_blank_values=True):
56 61 if k in ['auth_token', 'api_key']:
57 62 v = "*****"
58 63 parsed.append((k, v))
59 64
60 65 return '&'.join('{}{}'.format(
61 66 k, '={}'.format(v) if v else '') for k, v in parsed)
62 67
63 68
64 69 def raise_from_original(new_type):
65 70 """
66 71 Raise a new exception type with original args and traceback.
67 72 """
68 73 exc_type, exc_value, exc_traceback = sys.exc_info()
69 74 new_exc = new_type(*exc_value.args)
70 75 # store the original traceback into the new exc
71 76 new_exc._org_exc_tb = traceback.format_exc(exc_traceback)
72 77
73 78 try:
74 79 raise new_exc.with_traceback(exc_traceback)
75 80 finally:
76 81 del exc_traceback
82
83
84 class ArchiveNode(object):
85 def __init__(self, path, mode, is_link, raw_bytes):
86 self.path = path
87 self.mode = mode
88 self.is_link = is_link
89 self.raw_bytes = raw_bytes
90
91
92 def archive_repo(walker, archive_dest_path, kind, mtime, archive_at_path,
93 archive_dir_name, commit_id, write_metadata=True, extra_metadata=None):
94 """
95 walker should be a file walker, for example:
96 def walker():
97 for file_info in files:
98 yield ArchiveNode(fn, mode, is_link, ctx[fn].data)
99 """
100 extra_metadata = extra_metadata or {}
101
102 if kind == "tgz":
103 archiver = archival.tarit(archive_dest_path, mtime, "gz")
104 elif kind == "tbz2":
105 archiver = archival.tarit(archive_dest_path, mtime, "bz2")
106 elif kind == 'zip':
107 archiver = archival.zipit(archive_dest_path, mtime)
108 else:
109 raise exceptions.ArchiveException()(
110 'Remote does not support: "%s" archive type.' % kind)
111
112 for f in walker(commit_id, archive_at_path):
113 f_path = os.path.join(archive_dir_name, f.path.lstrip('/'))
114 try:
115 archiver.addfile(f_path, f.mode, f.is_link, f.raw_bytes())
116 except NoContentException:
117 # NOTE(marcink): this is a special case for SVN so we can create "empty"
118 # directories which arent supported by archiver
119 archiver.addfile(os.path.join(f_path, '.dir'), f.mode, f.is_link, '')
120
121 if write_metadata:
122 metadata = dict([
123 ('commit_id', commit_id),
124 ('mtime', mtime),
125 ])
126 metadata.update(extra_metadata)
127
128 meta = ["%s:%s" % (f_name, value) for f_name, value in metadata.items()]
129 f_path = os.path.join(archive_dir_name, '.archival.txt')
130 archiver.addfile(f_path, 0o644, False, '\n'.join(meta))
131
132 return archiver.done()
@@ -1,121 +1,125 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 2 # Copyright (C) 2014-2020 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 17
18 18 """
19 19 Special exception handling over the wire.
20 20
21 21 Since we cannot assume that our client is able to import our exception classes,
22 22 this module provides a "wrapping" mechanism to raise plain exceptions
23 23 which contain an extra attribute `_vcs_kind` to allow a client to distinguish
24 24 different error conditions.
25 25 """
26 26
27 27 from pyramid.httpexceptions import HTTPLocked, HTTPForbidden
28 28
29 29
30 30 def _make_exception(kind, org_exc, *args):
31 31 """
32 32 Prepares a base `Exception` instance to be sent over the wire.
33 33
34 34 To give our caller a hint what this is about, it will attach an attribute
35 35 `_vcs_kind` to the exception.
36 36 """
37 37 exc = Exception(*args)
38 38 exc._vcs_kind = kind
39 39 exc._org_exc = org_exc
40 40 exc._org_exc_tb = getattr(org_exc, '_org_exc_tb', '')
41 41 return exc
42 42
43 43
44 44 def AbortException(org_exc=None):
45 45 def _make_exception_wrapper(*args):
46 46 return _make_exception('abort', org_exc, *args)
47 47 return _make_exception_wrapper
48 48
49 49
50 50 def ArchiveException(org_exc=None):
51 51 def _make_exception_wrapper(*args):
52 52 return _make_exception('archive', org_exc, *args)
53 53 return _make_exception_wrapper
54 54
55 55
56 56 def LookupException(org_exc=None):
57 57 def _make_exception_wrapper(*args):
58 58 return _make_exception('lookup', org_exc, *args)
59 59 return _make_exception_wrapper
60 60
61 61
62 62 def VcsException(org_exc=None):
63 63 def _make_exception_wrapper(*args):
64 64 return _make_exception('error', org_exc, *args)
65 65 return _make_exception_wrapper
66 66
67 67
68 68 def RepositoryLockedException(org_exc=None):
69 69 def _make_exception_wrapper(*args):
70 70 return _make_exception('repo_locked', org_exc, *args)
71 71 return _make_exception_wrapper
72 72
73 73
74 74 def RepositoryBranchProtectedException(org_exc=None):
75 75 def _make_exception_wrapper(*args):
76 76 return _make_exception('repo_branch_protected', org_exc, *args)
77 77 return _make_exception_wrapper
78 78
79 79
80 80 def RequirementException(org_exc=None):
81 81 def _make_exception_wrapper(*args):
82 82 return _make_exception('requirement', org_exc, *args)
83 83 return _make_exception_wrapper
84 84
85 85
86 86 def UnhandledException(org_exc=None):
87 87 def _make_exception_wrapper(*args):
88 88 return _make_exception('unhandled', org_exc, *args)
89 89 return _make_exception_wrapper
90 90
91 91
92 92 def URLError(org_exc=None):
93 93 def _make_exception_wrapper(*args):
94 94 return _make_exception('url_error', org_exc, *args)
95 95 return _make_exception_wrapper
96 96
97 97
98 98 def SubrepoMergeException(org_exc=None):
99 99 def _make_exception_wrapper(*args):
100 100 return _make_exception('subrepo_merge_error', org_exc, *args)
101 101 return _make_exception_wrapper
102 102
103 103
104 104 class HTTPRepoLocked(HTTPLocked):
105 105 """
106 106 Subclass of HTTPLocked response that allows to set the title and status
107 107 code via constructor arguments.
108 108 """
109 109 def __init__(self, title, status_code=None, **kwargs):
110 110 self.code = status_code or HTTPLocked.code
111 111 self.title = title
112 112 super(HTTPRepoLocked, self).__init__(**kwargs)
113 113
114 114
115 115 class HTTPRepoBranchProtected(HTTPForbidden):
116 116 def __init__(self, *args, **kwargs):
117 117 super(HTTPForbidden, self).__init__(*args, **kwargs)
118 118
119 119
120 120 class RefNotFoundException(KeyError):
121 121 pass
122
123
124 class NoContentException(ValueError):
125 pass
@@ -1,1192 +1,1281 b''
1 # RhodeCode VCSServer provides access to different vcs backends via network.
1 RhodeCode VCSServer provides access to different vcs backends via network.
2 2 # Copyright (C) 2014-2020 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 17
18 18 import collections
19 19 import logging
20 20 import os
21 21 import posixpath as vcspath
22 22 import re
23 23 import stat
24 24 import traceback
25 25 import urllib.request, urllib.parse, urllib.error
26 26 import urllib.request, urllib.error, urllib.parse
27 27 from functools import wraps
28 28
29 29 import more_itertools
30 30 import pygit2
31 31 from pygit2 import Repository as LibGit2Repo
32 from pygit2 import index as LibGit2Index
32 33 from dulwich import index, objects
33 34 from dulwich.client import HttpGitClient, LocalGitClient
34 35 from dulwich.errors import (
35 36 NotGitRepository, ChecksumMismatch, WrongObjectException,
36 37 MissingCommitError, ObjectMissing, HangupException,
37 38 UnexpectedCommandError)
38 39 from dulwich.repo import Repo as DulwichRepo
39 40 from dulwich.server import update_server_info
40 41
41 42 from vcsserver import exceptions, settings, subprocessio
42 43 from vcsserver.utils import safe_str, safe_int, safe_unicode
43 from vcsserver.base import RepoFactory, obfuscate_qs
44 from vcsserver.base import RepoFactory, obfuscate_qs, ArchiveNode, archive_repo
44 45 from vcsserver.hgcompat import (
45 46 hg_url as url_parser, httpbasicauthhandler, httpdigestauthhandler)
46 47 from vcsserver.git_lfs.lib import LFSOidStore
47 48 from vcsserver.vcs_base import RemoteBase
48 49
49 50 DIR_STAT = stat.S_IFDIR
50 51 FILE_MODE = stat.S_IFMT
51 52 GIT_LINK = objects.S_IFGITLINK
52 53 PEELED_REF_MARKER = '^{}'
53 54
54 55
55 56 log = logging.getLogger(__name__)
56 57
57 58
58 59 def str_to_dulwich(value):
59 60 """
60 61 Dulwich 0.10.1a requires `unicode` objects to be passed in.
61 62 """
62 63 return value.decode(settings.WIRE_ENCODING)
63 64
64 65
65 66 def reraise_safe_exceptions(func):
66 67 """Converts Dulwich exceptions to something neutral."""
67 68
68 69 @wraps(func)
69 70 def wrapper(*args, **kwargs):
70 71 try:
71 72 return func(*args, **kwargs)
72 73 except (ChecksumMismatch, WrongObjectException, MissingCommitError, ObjectMissing,) as e:
73 74 exc = exceptions.LookupException(org_exc=e)
74 75 raise exc(safe_str(e))
75 76 except (HangupException, UnexpectedCommandError) as e:
76 77 exc = exceptions.VcsException(org_exc=e)
77 78 raise exc(safe_str(e))
78 79 except Exception as e:
79 80 # NOTE(marcink): becuase of how dulwich handles some exceptions
80 81 # (KeyError on empty repos), we cannot track this and catch all
81 82 # exceptions, it's an exceptions from other handlers
82 83 #if not hasattr(e, '_vcs_kind'):
83 84 #log.exception("Unhandled exception in git remote call")
84 85 #raise_from_original(exceptions.UnhandledException)
85 86 raise
86 87 return wrapper
87 88
88 89
89 90 class Repo(DulwichRepo):
90 91 """
91 92 A wrapper for dulwich Repo class.
92 93
93 94 Since dulwich is sometimes keeping .idx file descriptors open, it leads to
94 95 "Too many open files" error. We need to close all opened file descriptors
95 96 once the repo object is destroyed.
96 97 """
97 98 def __del__(self):
98 99 if hasattr(self, 'object_store'):
99 100 self.close()
100 101
101 102
102 103 class Repository(LibGit2Repo):
103 104
104 105 def __enter__(self):
105 106 return self
106 107
107 108 def __exit__(self, exc_type, exc_val, exc_tb):
108 109 self.free()
109 110
110 111
111 112 class GitFactory(RepoFactory):
112 113 repo_type = 'git'
113 114
114 115 def _create_repo(self, wire, create, use_libgit2=False):
115 116 if use_libgit2:
116 117 return Repository(wire['path'])
117 118 else:
118 119 repo_path = str_to_dulwich(wire['path'])
119 120 return Repo(repo_path)
120 121
121 122 def repo(self, wire, create=False, use_libgit2=False):
122 123 """
123 124 Get a repository instance for the given path.
124 125 """
125 126 return self._create_repo(wire, create, use_libgit2)
126 127
127 128 def repo_libgit2(self, wire):
128 129 return self.repo(wire, use_libgit2=True)
129 130
130 131
131 132 class GitRemote(RemoteBase):
132 133
133 134 def __init__(self, factory):
134 135 self._factory = factory
135 136 self._bulk_methods = {
136 137 "date": self.date,
137 138 "author": self.author,
138 139 "branch": self.branch,
139 140 "message": self.message,
140 141 "parents": self.parents,
141 142 "_commit": self.revision,
142 143 }
143 144
144 145 def _wire_to_config(self, wire):
145 146 if 'config' in wire:
146 147 return dict([(x[0] + '_' + x[1], x[2]) for x in wire['config']])
147 148 return {}
148 149
149 150 def _remote_conf(self, config):
150 151 params = [
151 152 '-c', 'core.askpass=""',
152 153 ]
153 154 ssl_cert_dir = config.get('vcs_ssl_dir')
154 155 if ssl_cert_dir:
155 156 params.extend(['-c', 'http.sslCAinfo={}'.format(ssl_cert_dir)])
156 157 return params
157 158
158 159 @reraise_safe_exceptions
159 160 def discover_git_version(self):
160 161 stdout, _ = self.run_git_command(
161 162 {}, ['--version'], _bare=True, _safe=True)
162 163 prefix = 'git version'
163 164 if stdout.startswith(prefix):
164 165 stdout = stdout[len(prefix):]
165 166 return stdout.strip()
166 167
167 168 @reraise_safe_exceptions
168 169 def is_empty(self, wire):
169 170 repo_init = self._factory.repo_libgit2(wire)
170 171 with repo_init as repo:
171 172
172 173 try:
173 174 has_head = repo.head.name
174 175 if has_head:
175 176 return False
176 177
177 178 # NOTE(marcink): check again using more expensive method
178 179 return repo.is_empty
179 180 except Exception:
180 181 pass
181 182
182 183 return True
183 184
184 185 @reraise_safe_exceptions
185 186 def assert_correct_path(self, wire):
186 187 cache_on, context_uid, repo_id = self._cache_on(wire)
187 @self.region.conditional_cache_on_arguments(condition=cache_on)
188 region = self._region(wire)
189 @region.conditional_cache_on_arguments(condition=cache_on)
188 190 def _assert_correct_path(_context_uid, _repo_id):
189 191 try:
190 192 repo_init = self._factory.repo_libgit2(wire)
191 193 with repo_init as repo:
192 194 pass
193 195 except pygit2.GitError:
194 196 path = wire.get('path')
195 197 tb = traceback.format_exc()
196 198 log.debug("Invalid Git path `%s`, tb: %s", path, tb)
197 199 return False
198 200
199 201 return True
200 202 return _assert_correct_path(context_uid, repo_id)
201 203
202 204 @reraise_safe_exceptions
203 205 def bare(self, wire):
204 206 repo_init = self._factory.repo_libgit2(wire)
205 207 with repo_init as repo:
206 208 return repo.is_bare
207 209
208 210 @reraise_safe_exceptions
209 211 def blob_as_pretty_string(self, wire, sha):
210 212 repo_init = self._factory.repo_libgit2(wire)
211 213 with repo_init as repo:
212 214 blob_obj = repo[sha]
213 215 blob = blob_obj.data
214 216 return blob
215 217
216 218 @reraise_safe_exceptions
217 219 def blob_raw_length(self, wire, sha):
218 220 cache_on, context_uid, repo_id = self._cache_on(wire)
219 @self.region.conditional_cache_on_arguments(condition=cache_on)
221 region = self._region(wire)
222 @region.conditional_cache_on_arguments(condition=cache_on)
220 223 def _blob_raw_length(_repo_id, _sha):
221 224
222 225 repo_init = self._factory.repo_libgit2(wire)
223 226 with repo_init as repo:
224 227 blob = repo[sha]
225 228 return blob.size
226 229
227 230 return _blob_raw_length(repo_id, sha)
228 231
229 232 def _parse_lfs_pointer(self, raw_content):
230 233
231 234 spec_string = 'version https://git-lfs.github.com/spec'
232 235 if raw_content and raw_content.startswith(spec_string):
233 236 pattern = re.compile(r"""
234 237 (?:\n)?
235 238 ^version[ ]https://git-lfs\.github\.com/spec/(?P<spec_ver>v\d+)\n
236 239 ^oid[ ] sha256:(?P<oid_hash>[0-9a-f]{64})\n
237 240 ^size[ ](?P<oid_size>[0-9]+)\n
238 241 (?:\n)?
239 242 """, re.VERBOSE | re.MULTILINE)
240 243 match = pattern.match(raw_content)
241 244 if match:
242 245 return match.groupdict()
243 246
244 247 return {}
245 248
246 249 @reraise_safe_exceptions
247 250 def is_large_file(self, wire, commit_id):
248 251 cache_on, context_uid, repo_id = self._cache_on(wire)
249 252
250 @self.region.conditional_cache_on_arguments(condition=cache_on)
253 region = self._region(wire)
254 @region.conditional_cache_on_arguments(condition=cache_on)
251 255 def _is_large_file(_repo_id, _sha):
252 256 repo_init = self._factory.repo_libgit2(wire)
253 257 with repo_init as repo:
254 258 blob = repo[commit_id]
255 259 if blob.is_binary:
256 260 return {}
257 261
258 262 return self._parse_lfs_pointer(blob.data)
259 263
260 264 return _is_large_file(repo_id, commit_id)
261 265
262 266 @reraise_safe_exceptions
263 267 def is_binary(self, wire, tree_id):
264 268 cache_on, context_uid, repo_id = self._cache_on(wire)
265 269
266 @self.region.conditional_cache_on_arguments(condition=cache_on)
270 region = self._region(wire)
271 @region.conditional_cache_on_arguments(condition=cache_on)
267 272 def _is_binary(_repo_id, _tree_id):
268 273 repo_init = self._factory.repo_libgit2(wire)
269 274 with repo_init as repo:
270 275 blob_obj = repo[tree_id]
271 276 return blob_obj.is_binary
272 277
273 278 return _is_binary(repo_id, tree_id)
274 279
275 280 @reraise_safe_exceptions
276 281 def in_largefiles_store(self, wire, oid):
277 282 conf = self._wire_to_config(wire)
278 283 repo_init = self._factory.repo_libgit2(wire)
279 284 with repo_init as repo:
280 285 repo_name = repo.path
281 286
282 287 store_location = conf.get('vcs_git_lfs_store_location')
283 288 if store_location:
284 289
285 290 store = LFSOidStore(
286 291 oid=oid, repo=repo_name, store_location=store_location)
287 292 return store.has_oid()
288 293
289 294 return False
290 295
291 296 @reraise_safe_exceptions
292 297 def store_path(self, wire, oid):
293 298 conf = self._wire_to_config(wire)
294 299 repo_init = self._factory.repo_libgit2(wire)
295 300 with repo_init as repo:
296 301 repo_name = repo.path
297 302
298 303 store_location = conf.get('vcs_git_lfs_store_location')
299 304 if store_location:
300 305 store = LFSOidStore(
301 306 oid=oid, repo=repo_name, store_location=store_location)
302 307 return store.oid_path
303 308 raise ValueError('Unable to fetch oid with path {}'.format(oid))
304 309
305 310 @reraise_safe_exceptions
306 311 def bulk_request(self, wire, rev, pre_load):
307 312 cache_on, context_uid, repo_id = self._cache_on(wire)
308 @self.region.conditional_cache_on_arguments(condition=cache_on)
313 region = self._region(wire)
314 @region.conditional_cache_on_arguments(condition=cache_on)
309 315 def _bulk_request(_repo_id, _rev, _pre_load):
310 316 result = {}
311 317 for attr in pre_load:
312 318 try:
313 319 method = self._bulk_methods[attr]
314 320 args = [wire, rev]
315 321 result[attr] = method(*args)
316 322 except KeyError as e:
317 323 raise exceptions.VcsException(e)(
318 324 "Unknown bulk attribute: %s" % attr)
319 325 return result
320 326
321 327 return _bulk_request(repo_id, rev, sorted(pre_load))
322 328
323 329 def _build_opener(self, url):
324 330 handlers = []
325 331 url_obj = url_parser(url)
326 332 _, authinfo = url_obj.authinfo()
327 333
328 334 if authinfo:
329 335 # create a password manager
330 336 passmgr = urllib.request.HTTPPasswordMgrWithDefaultRealm()
331 337 passmgr.add_password(*authinfo)
332 338
333 339 handlers.extend((httpbasicauthhandler(passmgr),
334 340 httpdigestauthhandler(passmgr)))
335 341
336 342 return urllib.request.build_opener(*handlers)
337 343
338 344 def _type_id_to_name(self, type_id):
339 345 return {
340 346 1: b'commit',
341 347 2: b'tree',
342 348 3: b'blob',
343 349 4: b'tag'
344 350 }[type_id]
345 351
346 352 @reraise_safe_exceptions
347 353 def check_url(self, url, config):
348 354 url_obj = url_parser(url)
349 355 test_uri, _ = url_obj.authinfo()
350 356 url_obj.passwd = '*****' if url_obj.passwd else url_obj.passwd
351 357 url_obj.query = obfuscate_qs(url_obj.query)
352 358 cleaned_uri = str(url_obj)
353 359 log.info("Checking URL for remote cloning/import: %s", cleaned_uri)
354 360
355 361 if not test_uri.endswith('info/refs'):
356 362 test_uri = test_uri.rstrip('/') + '/info/refs'
357 363
358 364 o = self._build_opener(url)
359 365 o.addheaders = [('User-Agent', 'git/1.7.8.0')] # fake some git
360 366
361 367 q = {"service": 'git-upload-pack'}
362 368 qs = '?%s' % urllib.parse.urlencode(q)
363 369 cu = "%s%s" % (test_uri, qs)
364 370 req = urllib.request.Request(cu, None, {})
365 371
366 372 try:
367 373 log.debug("Trying to open URL %s", cleaned_uri)
368 374 resp = o.open(req)
369 375 if resp.code != 200:
370 376 raise exceptions.URLError()('Return Code is not 200')
371 377 except Exception as e:
372 378 log.warning("URL cannot be opened: %s", cleaned_uri, exc_info=True)
373 379 # means it cannot be cloned
374 380 raise exceptions.URLError(e)("[%s] org_exc: %s" % (cleaned_uri, e))
375 381
376 382 # now detect if it's proper git repo
377 383 gitdata = resp.read()
378 384 if 'service=git-upload-pack' in gitdata:
379 385 pass
380 386 elif re.findall(r'[0-9a-fA-F]{40}\s+refs', gitdata):
381 387 # old style git can return some other format !
382 388 pass
383 389 else:
384 390 raise exceptions.URLError()(
385 391 "url [%s] does not look like an git" % (cleaned_uri,))
386 392
387 393 return True
388 394
389 395 @reraise_safe_exceptions
390 396 def clone(self, wire, url, deferred, valid_refs, update_after_clone):
391 397 # TODO(marcink): deprecate this method. Last i checked we don't use it anymore
392 398 remote_refs = self.pull(wire, url, apply_refs=False)
393 399 repo = self._factory.repo(wire)
394 400 if isinstance(valid_refs, list):
395 401 valid_refs = tuple(valid_refs)
396 402
397 403 for k in remote_refs:
398 404 # only parse heads/tags and skip so called deferred tags
399 405 if k.startswith(valid_refs) and not k.endswith(deferred):
400 406 repo[k] = remote_refs[k]
401 407
402 408 if update_after_clone:
403 409 # we want to checkout HEAD
404 410 repo["HEAD"] = remote_refs["HEAD"]
405 411 index.build_index_from_tree(repo.path, repo.index_path(),
406 412 repo.object_store, repo["HEAD"].tree)
407 413
408 414 @reraise_safe_exceptions
409 415 def branch(self, wire, commit_id):
410 416 cache_on, context_uid, repo_id = self._cache_on(wire)
411 @self.region.conditional_cache_on_arguments(condition=cache_on)
417 region = self._region(wire)
418 @region.conditional_cache_on_arguments(condition=cache_on)
412 419 def _branch(_context_uid, _repo_id, _commit_id):
413 420 regex = re.compile('^refs/heads')
414 421
415 422 def filter_with(ref):
416 423 return regex.match(ref[0]) and ref[1] == _commit_id
417 424
418 425 branches = list(filter(filter_with, list(self.get_refs(wire).items())))
419 426 return [x[0].split('refs/heads/')[-1] for x in branches]
420 427
421 428 return _branch(context_uid, repo_id, commit_id)
422 429
423 430 @reraise_safe_exceptions
424 431 def commit_branches(self, wire, commit_id):
425 432 cache_on, context_uid, repo_id = self._cache_on(wire)
426 @self.region.conditional_cache_on_arguments(condition=cache_on)
433 region = self._region(wire)
434 @region.conditional_cache_on_arguments(condition=cache_on)
427 435 def _commit_branches(_context_uid, _repo_id, _commit_id):
428 436 repo_init = self._factory.repo_libgit2(wire)
429 437 with repo_init as repo:
430 438 branches = [x for x in repo.branches.with_commit(_commit_id)]
431 439 return branches
432 440
433 441 return _commit_branches(context_uid, repo_id, commit_id)
434 442
435 443 @reraise_safe_exceptions
436 444 def add_object(self, wire, content):
437 445 repo_init = self._factory.repo_libgit2(wire)
438 446 with repo_init as repo:
439 447 blob = objects.Blob()
440 448 blob.set_raw_string(content)
441 449 repo.object_store.add_object(blob)
442 450 return blob.id
443 451
444 452 # TODO: this is quite complex, check if that can be simplified
445 453 @reraise_safe_exceptions
446 454 def commit(self, wire, commit_data, branch, commit_tree, updated, removed):
455 # Defines the root tree
456 class _Root(object):
457 def __repr__(self):
458 return 'ROOT TREE'
459 ROOT = _Root()
460
447 461 repo = self._factory.repo(wire)
448 462 object_store = repo.object_store
449 463
450 464 # Create tree and populates it with blobs
451 commit_tree = commit_tree and repo[commit_tree] or objects.Tree()
465
466 if commit_tree and repo[commit_tree]:
467 git_commit = repo[commit_data['parents'][0]]
468 commit_tree = repo[git_commit.tree] # root tree
469 else:
470 commit_tree = objects.Tree()
452 471
453 472 for node in updated:
454 473 # Compute subdirs if needed
455 474 dirpath, nodename = vcspath.split(node['path'])
456 475 dirnames = list(map(safe_str, dirpath and dirpath.split('/') or []))
457 476 parent = commit_tree
458 477 ancestors = [('', parent)]
459 478
460 479 # Tries to dig for the deepest existing tree
461 480 while dirnames:
462 481 curdir = dirnames.pop(0)
463 482 try:
464 483 dir_id = parent[curdir][1]
465 484 except KeyError:
466 485 # put curdir back into dirnames and stops
467 486 dirnames.insert(0, curdir)
468 487 break
469 488 else:
470 489 # If found, updates parent
471 490 parent = repo[dir_id]
472 491 ancestors.append((curdir, parent))
473 492 # Now parent is deepest existing tree and we need to create
474 493 # subtrees for dirnames (in reverse order)
475 494 # [this only applies for nodes from added]
476 495 new_trees = []
477 496
478 497 blob = objects.Blob.from_string(node['content'])
479 498
480 499 if dirnames:
481 500 # If there are trees which should be created we need to build
482 501 # them now (in reverse order)
483 502 reversed_dirnames = list(reversed(dirnames))
484 503 curtree = objects.Tree()
485 504 curtree[node['node_path']] = node['mode'], blob.id
486 505 new_trees.append(curtree)
487 506 for dirname in reversed_dirnames[:-1]:
488 507 newtree = objects.Tree()
489 508 newtree[dirname] = (DIR_STAT, curtree.id)
490 509 new_trees.append(newtree)
491 510 curtree = newtree
492 511 parent[reversed_dirnames[-1]] = (DIR_STAT, curtree.id)
493 512 else:
494 513 parent.add(name=node['node_path'], mode=node['mode'], hexsha=blob.id)
495 514
496 515 new_trees.append(parent)
497 516 # Update ancestors
498 517 reversed_ancestors = reversed(
499 518 [(a[1], b[1], b[0]) for a, b in zip(ancestors, ancestors[1:])])
500 519 for parent, tree, path in reversed_ancestors:
501 520 parent[path] = (DIR_STAT, tree.id)
502 521 object_store.add_object(tree)
503 522
504 523 object_store.add_object(blob)
505 524 for tree in new_trees:
506 525 object_store.add_object(tree)
507 526
508 527 for node_path in removed:
509 528 paths = node_path.split('/')
510 tree = commit_tree
511 trees = [tree]
529 tree = commit_tree # start with top-level
530 trees = [{'tree': tree, 'path': ROOT}]
512 531 # Traverse deep into the forest...
532 # resolve final tree by iterating the path.
533 # e.g a/b/c.txt will get
534 # - root as tree then
535 # - 'a' as tree,
536 # - 'b' as tree,
537 # - stop at c as blob.
513 538 for path in paths:
514 539 try:
515 540 obj = repo[tree[path][1]]
516 541 if isinstance(obj, objects.Tree):
517 trees.append(obj)
542 trees.append({'tree': obj, 'path': path})
518 543 tree = obj
519 544 except KeyError:
520 545 break
546 #PROBLEM:
547 """
548 We're not editing same reference tree object
549 """
521 550 # Cut down the blob and all rotten trees on the way back...
522 for path, tree in reversed(list(zip(paths, trees))):
523 del tree[path]
524 if tree:
551 for path, tree_data in reversed(list(zip(paths, trees))):
552 tree = tree_data['tree']
553 tree.__delitem__(path)
554 # This operation edits the tree, we need to mark new commit back
555
556 if len(tree) > 0:
525 557 # This tree still has elements - don't remove it or any
526 558 # of it's parents
527 559 break
528 560
529 561 object_store.add_object(commit_tree)
530 562
531 563 # Create commit
532 564 commit = objects.Commit()
533 565 commit.tree = commit_tree.id
534 566 for k, v in commit_data.items():
535 567 setattr(commit, k, v)
536 568 object_store.add_object(commit)
537 569
538 570 self.create_branch(wire, branch, commit.id)
539 571
540 572 # dulwich set-ref
541 573 ref = 'refs/heads/%s' % branch
542 574 repo.refs[ref] = commit.id
543 575
544 576 return commit.id
545 577
546 578 @reraise_safe_exceptions
547 579 def pull(self, wire, url, apply_refs=True, refs=None, update_after=False):
548 580 if url != 'default' and '://' not in url:
549 581 client = LocalGitClient(url)
550 582 else:
551 583 url_obj = url_parser(url)
552 584 o = self._build_opener(url)
553 585 url, _ = url_obj.authinfo()
554 586 client = HttpGitClient(base_url=url, opener=o)
555 587 repo = self._factory.repo(wire)
556 588
557 589 determine_wants = repo.object_store.determine_wants_all
558 590 if refs:
559 591 def determine_wants_requested(references):
560 592 return [references[r] for r in references if r in refs]
561 593 determine_wants = determine_wants_requested
562 594
563 595 try:
564 596 remote_refs = client.fetch(
565 597 path=url, target=repo, determine_wants=determine_wants)
566 598 except NotGitRepository as e:
567 599 log.warning(
568 600 'Trying to fetch from "%s" failed, not a Git repository.', url)
569 601 # Exception can contain unicode which we convert
570 602 raise exceptions.AbortException(e)(repr(e))
571 603
572 604 # mikhail: client.fetch() returns all the remote refs, but fetches only
573 605 # refs filtered by `determine_wants` function. We need to filter result
574 606 # as well
575 607 if refs:
576 608 remote_refs = {k: remote_refs[k] for k in remote_refs if k in refs}
577 609
578 610 if apply_refs:
579 611 # TODO: johbo: Needs proper test coverage with a git repository
580 612 # that contains a tag object, so that we would end up with
581 613 # a peeled ref at this point.
582 614 for k in remote_refs:
583 615 if k.endswith(PEELED_REF_MARKER):
584 616 log.debug("Skipping peeled reference %s", k)
585 617 continue
586 618 repo[k] = remote_refs[k]
587 619
588 620 if refs and not update_after:
589 621 # mikhail: explicitly set the head to the last ref.
590 repo['HEAD'] = remote_refs[refs[-1]]
622 repo["HEAD"] = remote_refs[refs[-1]]
591 623
592 624 if update_after:
593 625 # we want to checkout HEAD
594 626 repo["HEAD"] = remote_refs["HEAD"]
595 627 index.build_index_from_tree(repo.path, repo.index_path(),
596 628 repo.object_store, repo["HEAD"].tree)
597 629 return remote_refs
598 630
599 631 @reraise_safe_exceptions
600 632 def sync_fetch(self, wire, url, refs=None, all_refs=False):
601 633 repo = self._factory.repo(wire)
602 634 if refs and not isinstance(refs, (list, tuple)):
603 635 refs = [refs]
604 636
605 637 config = self._wire_to_config(wire)
606 638 # get all remote refs we'll use to fetch later
607 639 cmd = ['ls-remote']
608 640 if not all_refs:
609 641 cmd += ['--heads', '--tags']
610 642 cmd += [url]
611 643 output, __ = self.run_git_command(
612 644 wire, cmd, fail_on_stderr=False,
613 645 _copts=self._remote_conf(config),
614 646 extra_env={'GIT_TERMINAL_PROMPT': '0'})
615 647
616 648 remote_refs = collections.OrderedDict()
617 649 fetch_refs = []
618 650
619 651 for ref_line in output.splitlines():
620 652 sha, ref = ref_line.split('\t')
621 653 sha = sha.strip()
622 654 if ref in remote_refs:
623 655 # duplicate, skip
624 656 continue
625 657 if ref.endswith(PEELED_REF_MARKER):
626 658 log.debug("Skipping peeled reference %s", ref)
627 659 continue
628 660 # don't sync HEAD
629 661 if ref in ['HEAD']:
630 662 continue
631 663
632 664 remote_refs[ref] = sha
633 665
634 666 if refs and sha in refs:
635 667 # we filter fetch using our specified refs
636 668 fetch_refs.append('{}:{}'.format(ref, ref))
637 669 elif not refs:
638 670 fetch_refs.append('{}:{}'.format(ref, ref))
639 671 log.debug('Finished obtaining fetch refs, total: %s', len(fetch_refs))
640 672
641 673 if fetch_refs:
642 674 for chunk in more_itertools.chunked(fetch_refs, 1024 * 4):
643 675 fetch_refs_chunks = list(chunk)
644 676 log.debug('Fetching %s refs from import url', len(fetch_refs_chunks))
645 677 _out, _err = self.run_git_command(
646 678 wire, ['fetch', url, '--force', '--prune', '--'] + fetch_refs_chunks,
647 679 fail_on_stderr=False,
648 680 _copts=self._remote_conf(config),
649 681 extra_env={'GIT_TERMINAL_PROMPT': '0'})
650 682
651 683 return remote_refs
652 684
653 685 @reraise_safe_exceptions
654 686 def sync_push(self, wire, url, refs=None):
655 687 if not self.check_url(url, wire):
656 688 return
657 689 config = self._wire_to_config(wire)
658 690 self._factory.repo(wire)
659 691 self.run_git_command(
660 692 wire, ['push', url, '--mirror'], fail_on_stderr=False,
661 693 _copts=self._remote_conf(config),
662 694 extra_env={'GIT_TERMINAL_PROMPT': '0'})
663 695
664 696 @reraise_safe_exceptions
665 697 def get_remote_refs(self, wire, url):
666 698 repo = Repo(url)
667 699 return repo.get_refs()
668 700
669 701 @reraise_safe_exceptions
670 702 def get_description(self, wire):
671 703 repo = self._factory.repo(wire)
672 704 return repo.get_description()
673 705
674 706 @reraise_safe_exceptions
675 707 def get_missing_revs(self, wire, rev1, rev2, path2):
676 708 repo = self._factory.repo(wire)
677 709 LocalGitClient(thin_packs=False).fetch(path2, repo)
678 710
679 711 wire_remote = wire.copy()
680 712 wire_remote['path'] = path2
681 713 repo_remote = self._factory.repo(wire_remote)
682 714 LocalGitClient(thin_packs=False).fetch(wire["path"], repo_remote)
683 715
684 716 revs = [
685 717 x.commit.id
686 718 for x in repo_remote.get_walker(include=[rev2], exclude=[rev1])]
687 719 return revs
688 720
689 721 @reraise_safe_exceptions
690 722 def get_object(self, wire, sha, maybe_unreachable=False):
691 723 cache_on, context_uid, repo_id = self._cache_on(wire)
692 @self.region.conditional_cache_on_arguments(condition=cache_on)
724 region = self._region(wire)
725 @region.conditional_cache_on_arguments(condition=cache_on)
693 726 def _get_object(_context_uid, _repo_id, _sha):
694 727 repo_init = self._factory.repo_libgit2(wire)
695 728 with repo_init as repo:
696 729
697 730 missing_commit_err = 'Commit {} does not exist for `{}`'.format(sha, wire['path'])
698 731 try:
699 732 commit = repo.revparse_single(sha)
700 733 except KeyError:
701 734 # NOTE(marcink): KeyError doesn't give us any meaningful information
702 735 # here, we instead give something more explicit
703 736 e = exceptions.RefNotFoundException('SHA: %s not found', sha)
704 737 raise exceptions.LookupException(e)(missing_commit_err)
705 738 except ValueError as e:
706 739 raise exceptions.LookupException(e)(missing_commit_err)
707 740
708 741 is_tag = False
709 742 if isinstance(commit, pygit2.Tag):
710 743 commit = repo.get(commit.target)
711 744 is_tag = True
712 745
713 746 check_dangling = True
714 747 if is_tag:
715 748 check_dangling = False
716 749
717 750 if check_dangling and maybe_unreachable:
718 751 check_dangling = False
719 752
720 753 # we used a reference and it parsed means we're not having a dangling commit
721 754 if sha != commit.hex:
722 755 check_dangling = False
723 756
724 757 if check_dangling:
725 758 # check for dangling commit
726 759 for branch in repo.branches.with_commit(commit.hex):
727 760 if branch:
728 761 break
729 762 else:
730 763 # NOTE(marcink): Empty error doesn't give us any meaningful information
731 764 # here, we instead give something more explicit
732 765 e = exceptions.RefNotFoundException('SHA: %s not found in branches', sha)
733 766 raise exceptions.LookupException(e)(missing_commit_err)
734 767
735 768 commit_id = commit.hex
736 769 type_id = commit.type_str
737 770
738 771 return {
739 772 'id': commit_id,
740 773 'type': self._type_id_to_name(type_id),
741 774 'commit_id': commit_id,
742 775 'idx': 0
743 776 }
744 777
745 778 return _get_object(context_uid, repo_id, sha)
746 779
747 780 @reraise_safe_exceptions
748 781 def get_refs(self, wire):
749 782 cache_on, context_uid, repo_id = self._cache_on(wire)
750 @self.region.conditional_cache_on_arguments(condition=cache_on)
783 region = self._region(wire)
784 @region.conditional_cache_on_arguments(condition=cache_on)
751 785 def _get_refs(_context_uid, _repo_id):
752 786
753 787 repo_init = self._factory.repo_libgit2(wire)
754 788 with repo_init as repo:
755 789 regex = re.compile('^refs/(heads|tags)/')
756 790 return {x.name: x.target.hex for x in
757 791 [ref for ref in repo.listall_reference_objects() if regex.match(ref.name)]}
758 792
759 793 return _get_refs(context_uid, repo_id)
760 794
761 795 @reraise_safe_exceptions
762 796 def get_branch_pointers(self, wire):
763 797 cache_on, context_uid, repo_id = self._cache_on(wire)
764 @self.region.conditional_cache_on_arguments(condition=cache_on)
798 region = self._region(wire)
799 @region.conditional_cache_on_arguments(condition=cache_on)
765 800 def _get_branch_pointers(_context_uid, _repo_id):
766 801
767 802 repo_init = self._factory.repo_libgit2(wire)
768 803 regex = re.compile('^refs/heads')
769 804 with repo_init as repo:
770 805 branches = [ref for ref in repo.listall_reference_objects() if regex.match(ref.name)]
771 806 return {x.target.hex: x.shorthand for x in branches}
772 807
773 808 return _get_branch_pointers(context_uid, repo_id)
774 809
775 810 @reraise_safe_exceptions
776 811 def head(self, wire, show_exc=True):
777 812 cache_on, context_uid, repo_id = self._cache_on(wire)
778 @self.region.conditional_cache_on_arguments(condition=cache_on)
813 region = self._region(wire)
814 @region.conditional_cache_on_arguments(condition=cache_on)
779 815 def _head(_context_uid, _repo_id, _show_exc):
780 816 repo_init = self._factory.repo_libgit2(wire)
781 817 with repo_init as repo:
782 818 try:
783 819 return repo.head.peel().hex
784 820 except Exception:
785 821 if show_exc:
786 822 raise
787 823 return _head(context_uid, repo_id, show_exc)
788 824
789 825 @reraise_safe_exceptions
790 826 def init(self, wire):
791 827 repo_path = str_to_dulwich(wire['path'])
792 828 self.repo = Repo.init(repo_path)
793 829
794 830 @reraise_safe_exceptions
795 831 def init_bare(self, wire):
796 832 repo_path = str_to_dulwich(wire['path'])
797 833 self.repo = Repo.init_bare(repo_path)
798 834
799 835 @reraise_safe_exceptions
800 836 def revision(self, wire, rev):
801 837
802 838 cache_on, context_uid, repo_id = self._cache_on(wire)
803 @self.region.conditional_cache_on_arguments(condition=cache_on)
839 region = self._region(wire)
840 @region.conditional_cache_on_arguments(condition=cache_on)
804 841 def _revision(_context_uid, _repo_id, _rev):
805 842 repo_init = self._factory.repo_libgit2(wire)
806 843 with repo_init as repo:
807 844 commit = repo[rev]
808 845 obj_data = {
809 846 'id': commit.id.hex,
810 847 }
811 848 # tree objects itself don't have tree_id attribute
812 849 if hasattr(commit, 'tree_id'):
813 850 obj_data['tree'] = commit.tree_id.hex
814 851
815 852 return obj_data
816 853 return _revision(context_uid, repo_id, rev)
817 854
818 855 @reraise_safe_exceptions
819 856 def date(self, wire, commit_id):
820 857 cache_on, context_uid, repo_id = self._cache_on(wire)
821 @self.region.conditional_cache_on_arguments(condition=cache_on)
858 region = self._region(wire)
859 @region.conditional_cache_on_arguments(condition=cache_on)
822 860 def _date(_repo_id, _commit_id):
823 861 repo_init = self._factory.repo_libgit2(wire)
824 862 with repo_init as repo:
825 863 commit = repo[commit_id]
826 864
827 865 if hasattr(commit, 'commit_time'):
828 866 commit_time, commit_time_offset = commit.commit_time, commit.commit_time_offset
829 867 else:
830 868 commit = commit.get_object()
831 869 commit_time, commit_time_offset = commit.commit_time, commit.commit_time_offset
832 870
833 871 # TODO(marcink): check dulwich difference of offset vs timezone
834 872 return [commit_time, commit_time_offset]
835 873 return _date(repo_id, commit_id)
836 874
837 875 @reraise_safe_exceptions
838 876 def author(self, wire, commit_id):
839 877 cache_on, context_uid, repo_id = self._cache_on(wire)
840 @self.region.conditional_cache_on_arguments(condition=cache_on)
878 region = self._region(wire)
879 @region.conditional_cache_on_arguments(condition=cache_on)
841 880 def _author(_repo_id, _commit_id):
842 881 repo_init = self._factory.repo_libgit2(wire)
843 882 with repo_init as repo:
844 883 commit = repo[commit_id]
845 884
846 885 if hasattr(commit, 'author'):
847 886 author = commit.author
848 887 else:
849 888 author = commit.get_object().author
850 889
851 890 if author.email:
852 891 return "{} <{}>".format(author.name, author.email)
853 892
854 893 try:
855 894 return "{}".format(author.name)
856 895 except Exception:
857 896 return "{}".format(safe_unicode(author.raw_name))
858 897
859 898 return _author(repo_id, commit_id)
860 899
861 900 @reraise_safe_exceptions
862 901 def message(self, wire, commit_id):
863 902 cache_on, context_uid, repo_id = self._cache_on(wire)
864 @self.region.conditional_cache_on_arguments(condition=cache_on)
903 region = self._region(wire)
904 @region.conditional_cache_on_arguments(condition=cache_on)
865 905 def _message(_repo_id, _commit_id):
866 906 repo_init = self._factory.repo_libgit2(wire)
867 907 with repo_init as repo:
868 908 commit = repo[commit_id]
869 909 return commit.message
870 910 return _message(repo_id, commit_id)
871 911
872 912 @reraise_safe_exceptions
873 913 def parents(self, wire, commit_id):
874 914 cache_on, context_uid, repo_id = self._cache_on(wire)
875 @self.region.conditional_cache_on_arguments(condition=cache_on)
915 region = self._region(wire)
916 @region.conditional_cache_on_arguments(condition=cache_on)
876 917 def _parents(_repo_id, _commit_id):
877 918 repo_init = self._factory.repo_libgit2(wire)
878 919 with repo_init as repo:
879 920 commit = repo[commit_id]
880 921 if hasattr(commit, 'parent_ids'):
881 922 parent_ids = commit.parent_ids
882 923 else:
883 924 parent_ids = commit.get_object().parent_ids
884 925
885 926 return [x.hex for x in parent_ids]
886 927 return _parents(repo_id, commit_id)
887 928
888 929 @reraise_safe_exceptions
889 930 def children(self, wire, commit_id):
890 931 cache_on, context_uid, repo_id = self._cache_on(wire)
891 @self.region.conditional_cache_on_arguments(condition=cache_on)
932 region = self._region(wire)
933 @region.conditional_cache_on_arguments(condition=cache_on)
892 934 def _children(_repo_id, _commit_id):
893 935 output, __ = self.run_git_command(
894 936 wire, ['rev-list', '--all', '--children'])
895 937
896 938 child_ids = []
897 939 pat = re.compile(r'^%s' % commit_id)
898 940 for l in output.splitlines():
899 941 if pat.match(l):
900 942 found_ids = l.split(' ')[1:]
901 943 child_ids.extend(found_ids)
902 944
903 945 return child_ids
904 946 return _children(repo_id, commit_id)
905 947
906 948 @reraise_safe_exceptions
907 949 def set_refs(self, wire, key, value):
908 950 repo_init = self._factory.repo_libgit2(wire)
909 951 with repo_init as repo:
910 952 repo.references.create(key, value, force=True)
911 953
912 954 @reraise_safe_exceptions
913 955 def create_branch(self, wire, branch_name, commit_id, force=False):
914 956 repo_init = self._factory.repo_libgit2(wire)
915 957 with repo_init as repo:
916 958 commit = repo[commit_id]
917 959
918 960 if force:
919 961 repo.branches.local.create(branch_name, commit, force=force)
920 962 elif not repo.branches.get(branch_name):
921 963 # create only if that branch isn't existing
922 964 repo.branches.local.create(branch_name, commit, force=force)
923 965
924 966 @reraise_safe_exceptions
925 967 def remove_ref(self, wire, key):
926 968 repo_init = self._factory.repo_libgit2(wire)
927 969 with repo_init as repo:
928 970 repo.references.delete(key)
929 971
930 972 @reraise_safe_exceptions
931 973 def tag_remove(self, wire, tag_name):
932 974 repo_init = self._factory.repo_libgit2(wire)
933 975 with repo_init as repo:
934 976 key = 'refs/tags/{}'.format(tag_name)
935 977 repo.references.delete(key)
936 978
937 979 @reraise_safe_exceptions
938 980 def tree_changes(self, wire, source_id, target_id):
939 981 # TODO(marcink): remove this seems it's only used by tests
940 982 repo = self._factory.repo(wire)
941 983 source = repo[source_id].tree if source_id else None
942 984 target = repo[target_id].tree
943 985 result = repo.object_store.tree_changes(source, target)
944 986 return list(result)
945 987
946 988 @reraise_safe_exceptions
947 989 def tree_and_type_for_path(self, wire, commit_id, path):
948 990
949 991 cache_on, context_uid, repo_id = self._cache_on(wire)
950 @self.region.conditional_cache_on_arguments(condition=cache_on)
992 region = self._region(wire)
993 @region.conditional_cache_on_arguments(condition=cache_on)
951 994 def _tree_and_type_for_path(_context_uid, _repo_id, _commit_id, _path):
952 995 repo_init = self._factory.repo_libgit2(wire)
953 996
954 997 with repo_init as repo:
955 998 commit = repo[commit_id]
956 999 try:
957 1000 tree = commit.tree[path]
958 1001 except KeyError:
959 1002 return None, None, None
960 1003
961 1004 return tree.id.hex, tree.type_str, tree.filemode
962 1005 return _tree_and_type_for_path(context_uid, repo_id, commit_id, path)
963 1006
964 1007 @reraise_safe_exceptions
965 1008 def tree_items(self, wire, tree_id):
966 1009 cache_on, context_uid, repo_id = self._cache_on(wire)
967 @self.region.conditional_cache_on_arguments(condition=cache_on)
1010 region = self._region(wire)
1011 @region.conditional_cache_on_arguments(condition=cache_on)
968 1012 def _tree_items(_repo_id, _tree_id):
969 1013
970 1014 repo_init = self._factory.repo_libgit2(wire)
971 1015 with repo_init as repo:
972 1016 try:
973 1017 tree = repo[tree_id]
974 1018 except KeyError:
975 1019 raise ObjectMissing('No tree with id: {}'.format(tree_id))
976 1020
977 1021 result = []
978 1022 for item in tree:
979 1023 item_sha = item.hex
980 1024 item_mode = item.filemode
981 1025 item_type = item.type_str
982 1026
983 1027 if item_type == 'commit':
984 1028 # NOTE(marcink): submodules we translate to 'link' for backward compat
985 1029 item_type = 'link'
986 1030
987 1031 result.append((item.name, item_mode, item_sha, item_type))
988 1032 return result
989 1033 return _tree_items(repo_id, tree_id)
990 1034
991 1035 @reraise_safe_exceptions
992 1036 def diff_2(self, wire, commit_id_1, commit_id_2, file_filter, opt_ignorews, context):
993 1037 """
994 1038 Old version that uses subprocess to call diff
995 1039 """
996 1040
997 1041 flags = [
998 1042 '-U%s' % context, '--patch',
999 1043 '--binary',
1000 1044 '--find-renames',
1001 1045 '--no-indent-heuristic',
1002 1046 # '--indent-heuristic',
1003 1047 #'--full-index',
1004 1048 #'--abbrev=40'
1005 1049 ]
1006 1050
1007 1051 if opt_ignorews:
1008 1052 flags.append('--ignore-all-space')
1009 1053
1010 1054 if commit_id_1 == self.EMPTY_COMMIT:
1011 1055 cmd = ['show'] + flags + [commit_id_2]
1012 1056 else:
1013 1057 cmd = ['diff'] + flags + [commit_id_1, commit_id_2]
1014 1058
1015 1059 if file_filter:
1016 1060 cmd.extend(['--', file_filter])
1017 1061
1018 1062 diff, __ = self.run_git_command(wire, cmd)
1019 1063 # If we used 'show' command, strip first few lines (until actual diff
1020 1064 # starts)
1021 1065 if commit_id_1 == self.EMPTY_COMMIT:
1022 1066 lines = diff.splitlines()
1023 1067 x = 0
1024 1068 for line in lines:
1025 1069 if line.startswith('diff'):
1026 1070 break
1027 1071 x += 1
1028 1072 # Append new line just like 'diff' command do
1029 1073 diff = '\n'.join(lines[x:]) + '\n'
1030 1074 return diff
1031 1075
1032 1076 @reraise_safe_exceptions
1033 1077 def diff(self, wire, commit_id_1, commit_id_2, file_filter, opt_ignorews, context):
1034 1078 repo_init = self._factory.repo_libgit2(wire)
1035 1079 with repo_init as repo:
1036 1080 swap = True
1037 1081 flags = 0
1038 1082 flags |= pygit2.GIT_DIFF_SHOW_BINARY
1039 1083
1040 1084 if opt_ignorews:
1041 1085 flags |= pygit2.GIT_DIFF_IGNORE_WHITESPACE
1042 1086
1043 1087 if commit_id_1 == self.EMPTY_COMMIT:
1044 1088 comm1 = repo[commit_id_2]
1045 1089 diff_obj = comm1.tree.diff_to_tree(
1046 1090 flags=flags, context_lines=context, swap=swap)
1047 1091
1048 1092 else:
1049 1093 comm1 = repo[commit_id_2]
1050 1094 comm2 = repo[commit_id_1]
1051 1095 diff_obj = comm1.tree.diff_to_tree(
1052 1096 comm2.tree, flags=flags, context_lines=context, swap=swap)
1053 1097 similar_flags = 0
1054 1098 similar_flags |= pygit2.GIT_DIFF_FIND_RENAMES
1055 1099 diff_obj.find_similar(flags=similar_flags)
1056 1100
1057 1101 if file_filter:
1058 1102 for p in diff_obj:
1059 1103 if p.delta.old_file.path == file_filter:
1060 1104 return p.patch or ''
1061 1105 # fo matching path == no diff
1062 1106 return ''
1063 1107 return diff_obj.patch or ''
1064 1108
1065 1109 @reraise_safe_exceptions
1066 1110 def node_history(self, wire, commit_id, path, limit):
1067 1111 cache_on, context_uid, repo_id = self._cache_on(wire)
1068 @self.region.conditional_cache_on_arguments(condition=cache_on)
1112 region = self._region(wire)
1113 @region.conditional_cache_on_arguments(condition=cache_on)
1069 1114 def _node_history(_context_uid, _repo_id, _commit_id, _path, _limit):
1070 1115 # optimize for n==1, rev-list is much faster for that use-case
1071 1116 if limit == 1:
1072 1117 cmd = ['rev-list', '-1', commit_id, '--', path]
1073 1118 else:
1074 1119 cmd = ['log']
1075 1120 if limit:
1076 1121 cmd.extend(['-n', str(safe_int(limit, 0))])
1077 1122 cmd.extend(['--pretty=format: %H', '-s', commit_id, '--', path])
1078 1123
1079 1124 output, __ = self.run_git_command(wire, cmd)
1080 1125 commit_ids = re.findall(r'[0-9a-fA-F]{40}', output)
1081 1126
1082 1127 return [x for x in commit_ids]
1083 1128 return _node_history(context_uid, repo_id, commit_id, path, limit)
1084 1129
1085 1130 @reraise_safe_exceptions
1086 1131 def node_annotate(self, wire, commit_id, path):
1087 1132
1088 1133 cmd = ['blame', '-l', '--root', '-r', commit_id, '--', path]
1089 1134 # -l ==> outputs long shas (and we need all 40 characters)
1090 1135 # --root ==> doesn't put '^' character for boundaries
1091 1136 # -r commit_id ==> blames for the given commit
1092 1137 output, __ = self.run_git_command(wire, cmd)
1093 1138
1094 1139 result = []
1095 1140 for i, blame_line in enumerate(output.split('\n')[:-1]):
1096 1141 line_no = i + 1
1097 1142 commit_id, line = re.split(r' ', blame_line, 1)
1098 1143 result.append((line_no, commit_id, line))
1099 1144 return result
1100 1145
1101 1146 @reraise_safe_exceptions
1102 1147 def update_server_info(self, wire):
1103 1148 repo = self._factory.repo(wire)
1104 1149 update_server_info(repo)
1105 1150
1106 1151 @reraise_safe_exceptions
1107 1152 def get_all_commit_ids(self, wire):
1108 1153
1109 1154 cache_on, context_uid, repo_id = self._cache_on(wire)
1110 @self.region.conditional_cache_on_arguments(condition=cache_on)
1155 region = self._region(wire)
1156 @region.conditional_cache_on_arguments(condition=cache_on)
1111 1157 def _get_all_commit_ids(_context_uid, _repo_id):
1112 1158
1113 1159 cmd = ['rev-list', '--reverse', '--date-order', '--branches', '--tags']
1114 1160 try:
1115 1161 output, __ = self.run_git_command(wire, cmd)
1116 1162 return output.splitlines()
1117 1163 except Exception:
1118 1164 # Can be raised for empty repositories
1119 1165 return []
1120 1166 return _get_all_commit_ids(context_uid, repo_id)
1121 1167
1122 1168 @reraise_safe_exceptions
1123 1169 def run_git_command(self, wire, cmd, **opts):
1124 1170 path = wire.get('path', None)
1125 1171
1126 1172 if path and os.path.isdir(path):
1127 1173 opts['cwd'] = path
1128 1174
1129 1175 if '_bare' in opts:
1130 1176 _copts = []
1131 1177 del opts['_bare']
1132 1178 else:
1133 1179 _copts = ['-c', 'core.quotepath=false', ]
1134 1180 safe_call = False
1135 1181 if '_safe' in opts:
1136 1182 # no exc on failure
1137 1183 del opts['_safe']
1138 1184 safe_call = True
1139 1185
1140 1186 if '_copts' in opts:
1141 1187 _copts.extend(opts['_copts'] or [])
1142 1188 del opts['_copts']
1143 1189
1144 1190 gitenv = os.environ.copy()
1145 1191 gitenv.update(opts.pop('extra_env', {}))
1146 1192 # need to clean fix GIT_DIR !
1147 1193 if 'GIT_DIR' in gitenv:
1148 1194 del gitenv['GIT_DIR']
1149 1195 gitenv['GIT_CONFIG_NOGLOBAL'] = '1'
1150 1196 gitenv['GIT_DISCOVERY_ACROSS_FILESYSTEM'] = '1'
1151 1197
1152 1198 cmd = [settings.GIT_EXECUTABLE] + _copts + cmd
1153 1199 _opts = {'env': gitenv, 'shell': False}
1154 1200
1155 1201 proc = None
1156 1202 try:
1157 1203 _opts.update(opts)
1158 1204 proc = subprocessio.SubprocessIOChunker(cmd, **_opts)
1159 1205
1160 1206 return ''.join(proc), ''.join(proc.error)
1161 1207 except (EnvironmentError, OSError) as err:
1162 1208 cmd = ' '.join(cmd) # human friendly CMD
1163 1209 tb_err = ("Couldn't run git command (%s).\n"
1164 1210 "Original error was:%s\n"
1165 1211 "Call options:%s\n"
1166 1212 % (cmd, err, _opts))
1167 1213 log.exception(tb_err)
1168 1214 if safe_call:
1169 1215 return '', err
1170 1216 else:
1171 1217 raise exceptions.VcsException()(tb_err)
1172 1218 finally:
1173 1219 if proc:
1174 1220 proc.close()
1175 1221
1176 1222 @reraise_safe_exceptions
1177 1223 def install_hooks(self, wire, force=False):
1178 1224 from vcsserver.hook_utils import install_git_hooks
1179 1225 bare = self.bare(wire)
1180 1226 path = wire['path']
1181 1227 return install_git_hooks(path, bare, force_create=force)
1182 1228
1183 1229 @reraise_safe_exceptions
1184 1230 def get_hooks_info(self, wire):
1185 1231 from vcsserver.hook_utils import (
1186 1232 get_git_pre_hook_version, get_git_post_hook_version)
1187 1233 bare = self.bare(wire)
1188 1234 path = wire['path']
1189 1235 return {
1190 1236 'pre_version': get_git_pre_hook_version(path, bare),
1191 1237 'post_version': get_git_post_hook_version(path, bare),
1192 1238 }
1239
1240 @reraise_safe_exceptions
1241 def set_head_ref(self, wire, head_name):
1242 log.debug('Setting refs/head to `%s`', head_name)
1243 cmd = ['symbolic-ref', '"HEAD"', '"refs/heads/%s"' % head_name]
1244 output, __ = self.run_git_command(wire, cmd)
1245 return [head_name] + output.splitlines()
1246
1247 @reraise_safe_exceptions
1248 def archive_repo(self, wire, archive_dest_path, kind, mtime, archive_at_path,
1249 archive_dir_name, commit_id):
1250
1251 def file_walker(_commit_id, path):
1252 repo_init = self._factory.repo_libgit2(wire)
1253
1254 with repo_init as repo:
1255 commit = repo[commit_id]
1256
1257 if path in ['', '/']:
1258 tree = commit.tree
1259 else:
1260 tree = commit.tree[path.rstrip('/')]
1261 tree_id = tree.id.hex
1262 try:
1263 tree = repo[tree_id]
1264 except KeyError:
1265 raise ObjectMissing('No tree with id: {}'.format(tree_id))
1266
1267 index = LibGit2Index.Index()
1268 index.read_tree(tree)
1269 file_iter = index
1270
1271 for fn in file_iter:
1272 file_path = fn.path
1273 mode = fn.mode
1274 is_link = stat.S_ISLNK(mode)
1275 if mode == pygit2.GIT_FILEMODE_COMMIT:
1276 log.debug('Skipping path %s as a commit node', file_path)
1277 continue
1278 yield ArchiveNode(file_path, mode, is_link, repo[fn.hex].read_raw)
1279
1280 return archive_repo(file_walker, archive_dest_path, kind, mtime, archive_at_path,
1281 archive_dir_name, commit_id)
@@ -1,1009 +1,1047 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 2 # Copyright (C) 2014-2020 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
17 import functools
18 18 import io
19 19 import logging
20 import os
20 21 import stat
21 22 import urllib.request, urllib.parse, urllib.error
22 23 import urllib.request, urllib.error, urllib.parse
23 24 import traceback
24 25
25 26 from hgext import largefiles, rebase, purge
26 27 from hgext.strip import strip as hgext_strip
27 28 from mercurial import commands
28 29 from mercurial import unionrepo
29 30 from mercurial import verify
30 31 from mercurial import repair
31 32
32 33 import vcsserver
33 34 from vcsserver import exceptions
34 from vcsserver.base import RepoFactory, obfuscate_qs, raise_from_original
35 from vcsserver.base import RepoFactory, obfuscate_qs, raise_from_original, archive_repo, ArchiveNode
35 36 from vcsserver.hgcompat import (
36 37 archival, bin, clone, config as hgconfig, diffopts, hex, get_ctx,
37 38 hg_url as url_parser, httpbasicauthhandler, httpdigestauthhandler,
38 39 makepeer, instance, match, memctx, exchange, memfilectx, nullrev, hg_merge,
39 40 patch, peer, revrange, ui, hg_tag, Abort, LookupError, RepoError,
40 RepoLookupError, InterventionRequired, RequirementError)
41 RepoLookupError, InterventionRequired, RequirementError,
42 alwaysmatcher, patternmatcher, hgutil)
41 43 from vcsserver.vcs_base import RemoteBase
42 44
43 45 log = logging.getLogger(__name__)
44 46
45 47
46 48 def make_ui_from_config(repo_config):
47 49
48 50 class LoggingUI(ui.ui):
49 51 def status(self, *msg, **opts):
50 52 log.info(' '.join(msg).rstrip('\n'))
51 53 super(LoggingUI, self).status(*msg, **opts)
52 54
53 55 def warn(self, *msg, **opts):
54 56 log.warn(' '.join(msg).rstrip('\n'))
55 57 super(LoggingUI, self).warn(*msg, **opts)
56 58
57 59 def error(self, *msg, **opts):
58 60 log.error(' '.join(msg).rstrip('\n'))
59 61 super(LoggingUI, self).error(*msg, **opts)
60 62
61 63 def note(self, *msg, **opts):
62 64 log.info(' '.join(msg).rstrip('\n'))
63 65 super(LoggingUI, self).note(*msg, **opts)
64 66
65 67 def debug(self, *msg, **opts):
66 68 log.debug(' '.join(msg).rstrip('\n'))
67 69 super(LoggingUI, self).debug(*msg, **opts)
68 70
69 71 baseui = LoggingUI()
70 72
71 73 # clean the baseui object
72 74 baseui._ocfg = hgconfig.config()
73 75 baseui._ucfg = hgconfig.config()
74 76 baseui._tcfg = hgconfig.config()
75 77
76 78 for section, option, value in repo_config:
77 79 baseui.setconfig(section, option, value)
78 80
79 81 # make our hgweb quiet so it doesn't print output
80 82 baseui.setconfig('ui', 'quiet', 'true')
81 83
82 84 baseui.setconfig('ui', 'paginate', 'never')
83 85 # for better Error reporting of Mercurial
84 86 baseui.setconfig('ui', 'message-output', 'stderr')
85 87
86 88 # force mercurial to only use 1 thread, otherwise it may try to set a
87 89 # signal in a non-main thread, thus generating a ValueError.
88 90 baseui.setconfig('worker', 'numcpus', 1)
89 91
90 92 # If there is no config for the largefiles extension, we explicitly disable
91 93 # it here. This overrides settings from repositories hgrc file. Recent
92 94 # mercurial versions enable largefiles in hgrc on clone from largefile
93 95 # repo.
94 96 if not baseui.hasconfig('extensions', 'largefiles'):
95 97 log.debug('Explicitly disable largefiles extension for repo.')
96 98 baseui.setconfig('extensions', 'largefiles', '!')
97 99
98 100 return baseui
99 101
100 102
101 103 def reraise_safe_exceptions(func):
102 104 """Decorator for converting mercurial exceptions to something neutral."""
103 105
104 106 def wrapper(*args, **kwargs):
105 107 try:
106 108 return func(*args, **kwargs)
107 109 except (Abort, InterventionRequired) as e:
108 110 raise_from_original(exceptions.AbortException(e))
109 111 except RepoLookupError as e:
110 112 raise_from_original(exceptions.LookupException(e))
111 113 except RequirementError as e:
112 114 raise_from_original(exceptions.RequirementException(e))
113 115 except RepoError as e:
114 116 raise_from_original(exceptions.VcsException(e))
115 117 except LookupError as e:
116 118 raise_from_original(exceptions.LookupException(e))
117 119 except Exception as e:
118 120 if not hasattr(e, '_vcs_kind'):
119 121 log.exception("Unhandled exception in hg remote call")
120 122 raise_from_original(exceptions.UnhandledException(e))
121 123
122 124 raise
123 125 return wrapper
124 126
125 127
126 128 class MercurialFactory(RepoFactory):
127 129 repo_type = 'hg'
128 130
129 131 def _create_config(self, config, hooks=True):
130 132 if not hooks:
131 133 hooks_to_clean = frozenset((
132 134 'changegroup.repo_size', 'preoutgoing.pre_pull',
133 135 'outgoing.pull_logger', 'prechangegroup.pre_push'))
134 136 new_config = []
135 137 for section, option, value in config:
136 138 if section == 'hooks' and option in hooks_to_clean:
137 139 continue
138 140 new_config.append((section, option, value))
139 141 config = new_config
140 142
141 143 baseui = make_ui_from_config(config)
142 144 return baseui
143 145
144 146 def _create_repo(self, wire, create):
145 147 baseui = self._create_config(wire["config"])
146 148 return instance(baseui, wire["path"], create)
147 149
148 150 def repo(self, wire, create=False):
149 151 """
150 152 Get a repository instance for the given path.
151 153 """
152 154 return self._create_repo(wire, create)
153 155
154 156
155 157 def patch_ui_message_output(baseui):
156 158 baseui.setconfig('ui', 'quiet', 'false')
157 159 output = io.BytesIO()
158 160
159 161 def write(data, **unused_kwargs):
160 162 output.write(data)
161 163
162 164 baseui.status = write
163 165 baseui.write = write
164 166 baseui.warn = write
165 167 baseui.debug = write
166 168
167 169 return baseui, output
168 170
169 171
170 172 class HgRemote(RemoteBase):
171 173
172 174 def __init__(self, factory):
173 175 self._factory = factory
174 176 self._bulk_methods = {
175 177 "affected_files": self.ctx_files,
176 178 "author": self.ctx_user,
177 179 "branch": self.ctx_branch,
178 180 "children": self.ctx_children,
179 181 "date": self.ctx_date,
180 182 "message": self.ctx_description,
181 183 "parents": self.ctx_parents,
182 184 "status": self.ctx_status,
183 185 "obsolete": self.ctx_obsolete,
184 186 "phase": self.ctx_phase,
185 187 "hidden": self.ctx_hidden,
186 188 "_file_paths": self.ctx_list,
187 189 }
188 190
189 191 def _get_ctx(self, repo, ref):
190 192 return get_ctx(repo, ref)
191 193
192 194 @reraise_safe_exceptions
193 195 def discover_hg_version(self):
194 196 from mercurial import util
195 197 return util.version()
196 198
197 199 @reraise_safe_exceptions
198 200 def is_empty(self, wire):
199 201 repo = self._factory.repo(wire)
200 202
201 203 try:
202 204 return len(repo) == 0
203 205 except Exception:
204 206 log.exception("failed to read object_store")
205 207 return False
206 208
207 209 @reraise_safe_exceptions
208 def archive_repo(self, archive_path, mtime, file_info, kind):
209 if kind == "tgz":
210 archiver = archival.tarit(archive_path, mtime, "gz")
211 elif kind == "tbz2":
212 archiver = archival.tarit(archive_path, mtime, "bz2")
213 elif kind == 'zip':
214 archiver = archival.zipit(archive_path, mtime)
215 else:
216 raise exceptions.ArchiveException()(
217 'Remote does not support: "%s".' % kind)
218
219 for f_path, f_mode, f_is_link, f_content in file_info:
220 archiver.addfile(f_path, f_mode, f_is_link, f_content)
221 archiver.done()
222
223 @reraise_safe_exceptions
224 210 def bookmarks(self, wire):
225 211 cache_on, context_uid, repo_id = self._cache_on(wire)
226 @self.region.conditional_cache_on_arguments(condition=cache_on)
212 region = self._region(wire)
213 @region.conditional_cache_on_arguments(condition=cache_on)
227 214 def _bookmarks(_context_uid, _repo_id):
228 215 repo = self._factory.repo(wire)
229 216 return dict(repo._bookmarks)
230 217
231 218 return _bookmarks(context_uid, repo_id)
232 219
233 220 @reraise_safe_exceptions
234 221 def branches(self, wire, normal, closed):
235 222 cache_on, context_uid, repo_id = self._cache_on(wire)
236 @self.region.conditional_cache_on_arguments(condition=cache_on)
223 region = self._region(wire)
224 @region.conditional_cache_on_arguments(condition=cache_on)
237 225 def _branches(_context_uid, _repo_id, _normal, _closed):
238 226 repo = self._factory.repo(wire)
239 227 iter_branches = repo.branchmap().iterbranches()
240 228 bt = {}
241 229 for branch_name, _heads, tip, is_closed in iter_branches:
242 230 if normal and not is_closed:
243 231 bt[branch_name] = tip
244 232 if closed and is_closed:
245 233 bt[branch_name] = tip
246 234
247 235 return bt
248 236
249 237 return _branches(context_uid, repo_id, normal, closed)
250 238
251 239 @reraise_safe_exceptions
252 240 def bulk_request(self, wire, commit_id, pre_load):
253 241 cache_on, context_uid, repo_id = self._cache_on(wire)
254 @self.region.conditional_cache_on_arguments(condition=cache_on)
242 region = self._region(wire)
243 @region.conditional_cache_on_arguments(condition=cache_on)
255 244 def _bulk_request(_repo_id, _commit_id, _pre_load):
256 245 result = {}
257 246 for attr in pre_load:
258 247 try:
259 248 method = self._bulk_methods[attr]
260 249 result[attr] = method(wire, commit_id)
261 250 except KeyError as e:
262 251 raise exceptions.VcsException(e)(
263 252 'Unknown bulk attribute: "%s"' % attr)
264 253 return result
265 254
266 255 return _bulk_request(repo_id, commit_id, sorted(pre_load))
267 256
268 257 @reraise_safe_exceptions
269 258 def ctx_branch(self, wire, commit_id):
270 259 cache_on, context_uid, repo_id = self._cache_on(wire)
271 @self.region.conditional_cache_on_arguments(condition=cache_on)
260 region = self._region(wire)
261 @region.conditional_cache_on_arguments(condition=cache_on)
272 262 def _ctx_branch(_repo_id, _commit_id):
273 263 repo = self._factory.repo(wire)
274 264 ctx = self._get_ctx(repo, commit_id)
275 265 return ctx.branch()
276 266 return _ctx_branch(repo_id, commit_id)
277 267
278 268 @reraise_safe_exceptions
279 269 def ctx_date(self, wire, commit_id):
280 270 cache_on, context_uid, repo_id = self._cache_on(wire)
281 @self.region.conditional_cache_on_arguments(condition=cache_on)
271 region = self._region(wire)
272 @region.conditional_cache_on_arguments(condition=cache_on)
282 273 def _ctx_date(_repo_id, _commit_id):
283 274 repo = self._factory.repo(wire)
284 275 ctx = self._get_ctx(repo, commit_id)
285 276 return ctx.date()
286 277 return _ctx_date(repo_id, commit_id)
287 278
288 279 @reraise_safe_exceptions
289 280 def ctx_description(self, wire, revision):
290 281 repo = self._factory.repo(wire)
291 282 ctx = self._get_ctx(repo, revision)
292 283 return ctx.description()
293 284
294 285 @reraise_safe_exceptions
295 286 def ctx_files(self, wire, commit_id):
296 287 cache_on, context_uid, repo_id = self._cache_on(wire)
297 @self.region.conditional_cache_on_arguments(condition=cache_on)
288 region = self._region(wire)
289 @region.conditional_cache_on_arguments(condition=cache_on)
298 290 def _ctx_files(_repo_id, _commit_id):
299 291 repo = self._factory.repo(wire)
300 292 ctx = self._get_ctx(repo, commit_id)
301 293 return ctx.files()
302 294
303 295 return _ctx_files(repo_id, commit_id)
304 296
305 297 @reraise_safe_exceptions
306 298 def ctx_list(self, path, revision):
307 299 repo = self._factory.repo(path)
308 300 ctx = self._get_ctx(repo, revision)
309 301 return list(ctx)
310 302
311 303 @reraise_safe_exceptions
312 304 def ctx_parents(self, wire, commit_id):
313 305 cache_on, context_uid, repo_id = self._cache_on(wire)
314 @self.region.conditional_cache_on_arguments(condition=cache_on)
306 region = self._region(wire)
307 @region.conditional_cache_on_arguments(condition=cache_on)
315 308 def _ctx_parents(_repo_id, _commit_id):
316 309 repo = self._factory.repo(wire)
317 310 ctx = self._get_ctx(repo, commit_id)
318 311 return [parent.hex() for parent in ctx.parents()
319 312 if not (parent.hidden() or parent.obsolete())]
320 313
321 314 return _ctx_parents(repo_id, commit_id)
322 315
323 316 @reraise_safe_exceptions
324 317 def ctx_children(self, wire, commit_id):
325 318 cache_on, context_uid, repo_id = self._cache_on(wire)
326 @self.region.conditional_cache_on_arguments(condition=cache_on)
319 region = self._region(wire)
320 @region.conditional_cache_on_arguments(condition=cache_on)
327 321 def _ctx_children(_repo_id, _commit_id):
328 322 repo = self._factory.repo(wire)
329 323 ctx = self._get_ctx(repo, commit_id)
330 324 return [child.hex() for child in ctx.children()
331 325 if not (child.hidden() or child.obsolete())]
332 326
333 327 return _ctx_children(repo_id, commit_id)
334 328
335 329 @reraise_safe_exceptions
336 330 def ctx_phase(self, wire, commit_id):
337 331 cache_on, context_uid, repo_id = self._cache_on(wire)
338 @self.region.conditional_cache_on_arguments(condition=cache_on)
332 region = self._region(wire)
333 @region.conditional_cache_on_arguments(condition=cache_on)
339 334 def _ctx_phase(_context_uid, _repo_id, _commit_id):
340 335 repo = self._factory.repo(wire)
341 336 ctx = self._get_ctx(repo, commit_id)
342 337 # public=0, draft=1, secret=3
343 338 return ctx.phase()
344 339 return _ctx_phase(context_uid, repo_id, commit_id)
345 340
346 341 @reraise_safe_exceptions
347 342 def ctx_obsolete(self, wire, commit_id):
348 343 cache_on, context_uid, repo_id = self._cache_on(wire)
349 @self.region.conditional_cache_on_arguments(condition=cache_on)
344 region = self._region(wire)
345 @region.conditional_cache_on_arguments(condition=cache_on)
350 346 def _ctx_obsolete(_context_uid, _repo_id, _commit_id):
351 347 repo = self._factory.repo(wire)
352 348 ctx = self._get_ctx(repo, commit_id)
353 349 return ctx.obsolete()
354 350 return _ctx_obsolete(context_uid, repo_id, commit_id)
355 351
356 352 @reraise_safe_exceptions
357 353 def ctx_hidden(self, wire, commit_id):
358 354 cache_on, context_uid, repo_id = self._cache_on(wire)
359 @self.region.conditional_cache_on_arguments(condition=cache_on)
355 region = self._region(wire)
356 @region.conditional_cache_on_arguments(condition=cache_on)
360 357 def _ctx_hidden(_context_uid, _repo_id, _commit_id):
361 358 repo = self._factory.repo(wire)
362 359 ctx = self._get_ctx(repo, commit_id)
363 360 return ctx.hidden()
364 361 return _ctx_hidden(context_uid, repo_id, commit_id)
365 362
366 363 @reraise_safe_exceptions
367 364 def ctx_substate(self, wire, revision):
368 365 repo = self._factory.repo(wire)
369 366 ctx = self._get_ctx(repo, revision)
370 367 return ctx.substate
371 368
372 369 @reraise_safe_exceptions
373 370 def ctx_status(self, wire, revision):
374 371 repo = self._factory.repo(wire)
375 372 ctx = self._get_ctx(repo, revision)
376 373 status = repo[ctx.p1().node()].status(other=ctx.node())
377 374 # object of status (odd, custom named tuple in mercurial) is not
378 375 # correctly serializable, we make it a list, as the underling
379 376 # API expects this to be a list
380 377 return list(status)
381 378
382 379 @reraise_safe_exceptions
383 380 def ctx_user(self, wire, revision):
384 381 repo = self._factory.repo(wire)
385 382 ctx = self._get_ctx(repo, revision)
386 383 return ctx.user()
387 384
388 385 @reraise_safe_exceptions
389 386 def check_url(self, url, config):
390 387 _proto = None
391 388 if '+' in url[:url.find('://')]:
392 389 _proto = url[0:url.find('+')]
393 390 url = url[url.find('+') + 1:]
394 391 handlers = []
395 392 url_obj = url_parser(url)
396 393 test_uri, authinfo = url_obj.authinfo()
397 394 url_obj.passwd = '*****' if url_obj.passwd else url_obj.passwd
398 395 url_obj.query = obfuscate_qs(url_obj.query)
399 396
400 397 cleaned_uri = str(url_obj)
401 398 log.info("Checking URL for remote cloning/import: %s", cleaned_uri)
402 399
403 400 if authinfo:
404 401 # create a password manager
405 402 passmgr = urllib.request.HTTPPasswordMgrWithDefaultRealm()
406 403 passmgr.add_password(*authinfo)
407 404
408 405 handlers.extend((httpbasicauthhandler(passmgr),
409 406 httpdigestauthhandler(passmgr)))
410 407
411 408 o = urllib.request.build_opener(*handlers)
412 409 o.addheaders = [('Content-Type', 'application/mercurial-0.1'),
413 410 ('Accept', 'application/mercurial-0.1')]
414 411
415 412 q = {"cmd": 'between'}
416 413 q.update({'pairs': "%s-%s" % ('0' * 40, '0' * 40)})
417 414 qs = '?%s' % urllib.parse.urlencode(q)
418 415 cu = "%s%s" % (test_uri, qs)
419 416 req = urllib.request.Request(cu, None, {})
420 417
421 418 try:
422 419 log.debug("Trying to open URL %s", cleaned_uri)
423 420 resp = o.open(req)
424 421 if resp.code != 200:
425 422 raise exceptions.URLError()('Return Code is not 200')
426 423 except Exception as e:
427 424 log.warning("URL cannot be opened: %s", cleaned_uri, exc_info=True)
428 425 # means it cannot be cloned
429 426 raise exceptions.URLError(e)("[%s] org_exc: %s" % (cleaned_uri, e))
430 427
431 428 # now check if it's a proper hg repo, but don't do it for svn
432 429 try:
433 430 if _proto == 'svn':
434 431 pass
435 432 else:
436 433 # check for pure hg repos
437 434 log.debug(
438 435 "Verifying if URL is a Mercurial repository: %s",
439 436 cleaned_uri)
440 437 ui = make_ui_from_config(config)
441 438 peer_checker = makepeer(ui, url)
442 439 peer_checker.lookup('tip')
443 440 except Exception as e:
444 441 log.warning("URL is not a valid Mercurial repository: %s",
445 442 cleaned_uri)
446 443 raise exceptions.URLError(e)(
447 444 "url [%s] does not look like an hg repo org_exc: %s"
448 445 % (cleaned_uri, e))
449 446
450 447 log.info("URL is a valid Mercurial repository: %s", cleaned_uri)
451 448 return True
452 449
453 450 @reraise_safe_exceptions
454 451 def diff(self, wire, commit_id_1, commit_id_2, file_filter, opt_git, opt_ignorews, context):
455 452 repo = self._factory.repo(wire)
456 453
457 454 if file_filter:
458 455 match_filter = match(file_filter[0], '', [file_filter[1]])
459 456 else:
460 457 match_filter = file_filter
461 458 opts = diffopts(git=opt_git, ignorews=opt_ignorews, context=context, showfunc=1)
462 459
463 460 try:
464 461 return "".join(patch.diff(
465 462 repo, node1=commit_id_1, node2=commit_id_2, match=match_filter, opts=opts))
466 463 except RepoLookupError as e:
467 464 raise exceptions.LookupException(e)()
468 465
469 466 @reraise_safe_exceptions
470 467 def node_history(self, wire, revision, path, limit):
471 468 cache_on, context_uid, repo_id = self._cache_on(wire)
472 @self.region.conditional_cache_on_arguments(condition=cache_on)
469 region = self._region(wire)
470 @region.conditional_cache_on_arguments(condition=cache_on)
473 471 def _node_history(_context_uid, _repo_id, _revision, _path, _limit):
474 472 repo = self._factory.repo(wire)
475 473
476 474 ctx = self._get_ctx(repo, revision)
477 475 fctx = ctx.filectx(path)
478 476
479 477 def history_iter():
480 478 limit_rev = fctx.rev()
481 479 for obj in reversed(list(fctx.filelog())):
482 480 obj = fctx.filectx(obj)
483 481 ctx = obj.changectx()
484 482 if ctx.hidden() or ctx.obsolete():
485 483 continue
486 484
487 485 if limit_rev >= obj.rev():
488 486 yield obj
489 487
490 488 history = []
491 489 for cnt, obj in enumerate(history_iter()):
492 490 if limit and cnt >= limit:
493 491 break
494 492 history.append(hex(obj.node()))
495 493
496 494 return [x for x in history]
497 495 return _node_history(context_uid, repo_id, revision, path, limit)
498 496
499 497 @reraise_safe_exceptions
500 498 def node_history_untill(self, wire, revision, path, limit):
501 499 cache_on, context_uid, repo_id = self._cache_on(wire)
502 @self.region.conditional_cache_on_arguments(condition=cache_on)
500 region = self._region(wire)
501 @region.conditional_cache_on_arguments(condition=cache_on)
503 502 def _node_history_until(_context_uid, _repo_id):
504 503 repo = self._factory.repo(wire)
505 504 ctx = self._get_ctx(repo, revision)
506 505 fctx = ctx.filectx(path)
507 506
508 507 file_log = list(fctx.filelog())
509 508 if limit:
510 509 # Limit to the last n items
511 510 file_log = file_log[-limit:]
512 511
513 512 return [hex(fctx.filectx(cs).node()) for cs in reversed(file_log)]
514 513 return _node_history_until(context_uid, repo_id, revision, path, limit)
515 514
516 515 @reraise_safe_exceptions
517 516 def fctx_annotate(self, wire, revision, path):
518 517 repo = self._factory.repo(wire)
519 518 ctx = self._get_ctx(repo, revision)
520 519 fctx = ctx.filectx(path)
521 520
522 521 result = []
523 522 for i, annotate_obj in enumerate(fctx.annotate(), 1):
524 523 ln_no = i
525 524 sha = hex(annotate_obj.fctx.node())
526 525 content = annotate_obj.text
527 526 result.append((ln_no, sha, content))
528 527 return result
529 528
530 529 @reraise_safe_exceptions
531 530 def fctx_node_data(self, wire, revision, path):
532 531 repo = self._factory.repo(wire)
533 532 ctx = self._get_ctx(repo, revision)
534 533 fctx = ctx.filectx(path)
535 534 return fctx.data()
536 535
537 536 @reraise_safe_exceptions
538 537 def fctx_flags(self, wire, commit_id, path):
539 538 cache_on, context_uid, repo_id = self._cache_on(wire)
540 @self.region.conditional_cache_on_arguments(condition=cache_on)
539 region = self._region(wire)
540 @region.conditional_cache_on_arguments(condition=cache_on)
541 541 def _fctx_flags(_repo_id, _commit_id, _path):
542 542 repo = self._factory.repo(wire)
543 543 ctx = self._get_ctx(repo, commit_id)
544 544 fctx = ctx.filectx(path)
545 545 return fctx.flags()
546 546
547 547 return _fctx_flags(repo_id, commit_id, path)
548 548
549 549 @reraise_safe_exceptions
550 550 def fctx_size(self, wire, commit_id, path):
551 551 cache_on, context_uid, repo_id = self._cache_on(wire)
552 @self.region.conditional_cache_on_arguments(condition=cache_on)
552 region = self._region(wire)
553 @region.conditional_cache_on_arguments(condition=cache_on)
553 554 def _fctx_size(_repo_id, _revision, _path):
554 555 repo = self._factory.repo(wire)
555 556 ctx = self._get_ctx(repo, commit_id)
556 557 fctx = ctx.filectx(path)
557 558 return fctx.size()
558 559 return _fctx_size(repo_id, commit_id, path)
559 560
560 561 @reraise_safe_exceptions
561 562 def get_all_commit_ids(self, wire, name):
562 563 cache_on, context_uid, repo_id = self._cache_on(wire)
563 @self.region.conditional_cache_on_arguments(condition=cache_on)
564 region = self._region(wire)
565 @region.conditional_cache_on_arguments(condition=cache_on)
564 566 def _get_all_commit_ids(_context_uid, _repo_id, _name):
565 567 repo = self._factory.repo(wire)
566 568 repo = repo.filtered(name)
567 569 revs = [hex(x[7]) for x in repo.changelog.index]
568 570 return revs
569 571 return _get_all_commit_ids(context_uid, repo_id, name)
570 572
571 573 @reraise_safe_exceptions
572 574 def get_config_value(self, wire, section, name, untrusted=False):
573 575 repo = self._factory.repo(wire)
574 576 return repo.ui.config(section, name, untrusted=untrusted)
575 577
576 578 @reraise_safe_exceptions
577 579 def is_large_file(self, wire, commit_id, path):
578 580 cache_on, context_uid, repo_id = self._cache_on(wire)
579 @self.region.conditional_cache_on_arguments(condition=cache_on)
581 region = self._region(wire)
582 @region.conditional_cache_on_arguments(condition=cache_on)
580 583 def _is_large_file(_context_uid, _repo_id, _commit_id, _path):
581 584 return largefiles.lfutil.isstandin(path)
582 585
583 586 return _is_large_file(context_uid, repo_id, commit_id, path)
584 587
585 588 @reraise_safe_exceptions
586 589 def is_binary(self, wire, revision, path):
587 590 cache_on, context_uid, repo_id = self._cache_on(wire)
588 591
589 @self.region.conditional_cache_on_arguments(condition=cache_on)
592 region = self._region(wire)
593 @region.conditional_cache_on_arguments(condition=cache_on)
590 594 def _is_binary(_repo_id, _sha, _path):
591 595 repo = self._factory.repo(wire)
592 596 ctx = self._get_ctx(repo, revision)
593 597 fctx = ctx.filectx(path)
594 598 return fctx.isbinary()
595 599
596 600 return _is_binary(repo_id, revision, path)
597 601
598 602 @reraise_safe_exceptions
599 603 def in_largefiles_store(self, wire, sha):
600 604 repo = self._factory.repo(wire)
601 605 return largefiles.lfutil.instore(repo, sha)
602 606
603 607 @reraise_safe_exceptions
604 608 def in_user_cache(self, wire, sha):
605 609 repo = self._factory.repo(wire)
606 610 return largefiles.lfutil.inusercache(repo.ui, sha)
607 611
608 612 @reraise_safe_exceptions
609 613 def store_path(self, wire, sha):
610 614 repo = self._factory.repo(wire)
611 615 return largefiles.lfutil.storepath(repo, sha)
612 616
613 617 @reraise_safe_exceptions
614 618 def link(self, wire, sha, path):
615 619 repo = self._factory.repo(wire)
616 620 largefiles.lfutil.link(
617 621 largefiles.lfutil.usercachepath(repo.ui, sha), path)
618 622
619 623 @reraise_safe_exceptions
620 624 def localrepository(self, wire, create=False):
621 625 self._factory.repo(wire, create=create)
622 626
623 627 @reraise_safe_exceptions
624 628 def lookup(self, wire, revision, both):
625 629 cache_on, context_uid, repo_id = self._cache_on(wire)
626 @self.region.conditional_cache_on_arguments(condition=cache_on)
630
631 region = self._region(wire)
632 @region.conditional_cache_on_arguments(condition=cache_on)
627 633 def _lookup(_context_uid, _repo_id, _revision, _both):
628 634
629 635 repo = self._factory.repo(wire)
630 636 rev = _revision
631 637 if isinstance(rev, int):
632 638 # NOTE(marcink):
633 639 # since Mercurial doesn't support negative indexes properly
634 640 # we need to shift accordingly by one to get proper index, e.g
635 641 # repo[-1] => repo[-2]
636 642 # repo[0] => repo[-1]
637 643 if rev <= 0:
638 644 rev = rev + -1
639 645 try:
640 646 ctx = self._get_ctx(repo, rev)
641 647 except (TypeError, RepoLookupError) as e:
642 648 e._org_exc_tb = traceback.format_exc()
643 649 raise exceptions.LookupException(e)(rev)
644 650 except LookupError as e:
645 651 e._org_exc_tb = traceback.format_exc()
646 652 raise exceptions.LookupException(e)(e.name)
647 653
648 654 if not both:
649 655 return ctx.hex()
650 656
651 657 ctx = repo[ctx.hex()]
652 658 return ctx.hex(), ctx.rev()
653 659
654 660 return _lookup(context_uid, repo_id, revision, both)
655 661
656 662 @reraise_safe_exceptions
657 663 def sync_push(self, wire, url):
658 664 if not self.check_url(url, wire['config']):
659 665 return
660 666
661 667 repo = self._factory.repo(wire)
662 668
663 669 # Disable any prompts for this repo
664 670 repo.ui.setconfig('ui', 'interactive', 'off', '-y')
665 671
666 672 bookmarks = list(dict(repo._bookmarks).keys())
667 673 remote = peer(repo, {}, url)
668 674 # Disable any prompts for this remote
669 675 remote.ui.setconfig('ui', 'interactive', 'off', '-y')
670 676
671 677 return exchange.push(
672 678 repo, remote, newbranch=True, bookmarks=bookmarks).cgresult
673 679
674 680 @reraise_safe_exceptions
675 681 def revision(self, wire, rev):
676 682 repo = self._factory.repo(wire)
677 683 ctx = self._get_ctx(repo, rev)
678 684 return ctx.rev()
679 685
680 686 @reraise_safe_exceptions
681 687 def rev_range(self, wire, commit_filter):
682 688 cache_on, context_uid, repo_id = self._cache_on(wire)
683 689
684 @self.region.conditional_cache_on_arguments(condition=cache_on)
690 region = self._region(wire)
691 @region.conditional_cache_on_arguments(condition=cache_on)
685 692 def _rev_range(_context_uid, _repo_id, _filter):
686 693 repo = self._factory.repo(wire)
687 694 revisions = [rev for rev in revrange(repo, commit_filter)]
688 695 return revisions
689 696
690 697 return _rev_range(context_uid, repo_id, sorted(commit_filter))
691 698
692 699 @reraise_safe_exceptions
693 700 def rev_range_hash(self, wire, node):
694 701 repo = self._factory.repo(wire)
695 702
696 703 def get_revs(repo, rev_opt):
697 704 if rev_opt:
698 705 revs = revrange(repo, rev_opt)
699 706 if len(revs) == 0:
700 707 return (nullrev, nullrev)
701 708 return max(revs), min(revs)
702 709 else:
703 710 return len(repo) - 1, 0
704 711
705 712 stop, start = get_revs(repo, [node + ':'])
706 713 revs = [hex(repo[r].node()) for r in range(start, stop + 1)]
707 714 return revs
708 715
709 716 @reraise_safe_exceptions
710 717 def revs_from_revspec(self, wire, rev_spec, *args, **kwargs):
711 718 other_path = kwargs.pop('other_path', None)
712 719
713 720 # case when we want to compare two independent repositories
714 721 if other_path and other_path != wire["path"]:
715 722 baseui = self._factory._create_config(wire["config"])
716 723 repo = unionrepo.makeunionrepository(baseui, other_path, wire["path"])
717 724 else:
718 725 repo = self._factory.repo(wire)
719 726 return list(repo.revs(rev_spec, *args))
720 727
721 728 @reraise_safe_exceptions
722 729 def verify(self, wire,):
723 730 repo = self._factory.repo(wire)
724 731 baseui = self._factory._create_config(wire['config'])
725 732
726 733 baseui, output = patch_ui_message_output(baseui)
727 734
728 735 repo.ui = baseui
729 736 verify.verify(repo)
730 737 return output.getvalue()
731 738
732 739 @reraise_safe_exceptions
733 740 def hg_update_cache(self, wire,):
734 741 repo = self._factory.repo(wire)
735 742 baseui = self._factory._create_config(wire['config'])
736 743 baseui, output = patch_ui_message_output(baseui)
737 744
738 745 repo.ui = baseui
739 746 with repo.wlock(), repo.lock():
740 747 repo.updatecaches(full=True)
741 748
742 749 return output.getvalue()
743 750
744 751 @reraise_safe_exceptions
745 752 def hg_rebuild_fn_cache(self, wire,):
746 753 repo = self._factory.repo(wire)
747 754 baseui = self._factory._create_config(wire['config'])
748 755 baseui, output = patch_ui_message_output(baseui)
749 756
750 757 repo.ui = baseui
751 758
752 759 repair.rebuildfncache(baseui, repo)
753 760
754 761 return output.getvalue()
755 762
756 763 @reraise_safe_exceptions
757 764 def tags(self, wire):
758 765 cache_on, context_uid, repo_id = self._cache_on(wire)
759 @self.region.conditional_cache_on_arguments(condition=cache_on)
766 region = self._region(wire)
767 @region.conditional_cache_on_arguments(condition=cache_on)
760 768 def _tags(_context_uid, _repo_id):
761 769 repo = self._factory.repo(wire)
762 770 return repo.tags()
763 771
764 772 return _tags(context_uid, repo_id)
765 773
766 774 @reraise_safe_exceptions
767 775 def update(self, wire, node=None, clean=False):
768 776 repo = self._factory.repo(wire)
769 777 baseui = self._factory._create_config(wire['config'])
770 778 commands.update(baseui, repo, node=node, clean=clean)
771 779
772 780 @reraise_safe_exceptions
773 781 def identify(self, wire):
774 782 repo = self._factory.repo(wire)
775 783 baseui = self._factory._create_config(wire['config'])
776 784 output = io.BytesIO()
777 785 baseui.write = output.write
778 786 # This is required to get a full node id
779 787 baseui.debugflag = True
780 788 commands.identify(baseui, repo, id=True)
781 789
782 790 return output.getvalue()
783 791
784 792 @reraise_safe_exceptions
785 793 def heads(self, wire, branch=None):
786 794 repo = self._factory.repo(wire)
787 795 baseui = self._factory._create_config(wire['config'])
788 796 output = io.BytesIO()
789 797
790 798 def write(data, **unused_kwargs):
791 799 output.write(data)
792 800
793 801 baseui.write = write
794 802 if branch:
795 803 args = [branch]
796 804 else:
797 805 args = []
798 806 commands.heads(baseui, repo, template='{node} ', *args)
799 807
800 808 return output.getvalue()
801 809
802 810 @reraise_safe_exceptions
803 811 def ancestor(self, wire, revision1, revision2):
804 812 repo = self._factory.repo(wire)
805 813 changelog = repo.changelog
806 814 lookup = repo.lookup
807 815 a = changelog.ancestor(lookup(revision1), lookup(revision2))
808 816 return hex(a)
809 817
810 818 @reraise_safe_exceptions
811 819 def clone(self, wire, source, dest, update_after_clone=False, hooks=True):
812 820 baseui = self._factory._create_config(wire["config"], hooks=hooks)
813 821 clone(baseui, source, dest, noupdate=not update_after_clone)
814 822
815 823 @reraise_safe_exceptions
816 824 def commitctx(self, wire, message, parents, commit_time, commit_timezone, user, files, extra, removed, updated):
817 825
818 826 repo = self._factory.repo(wire)
819 827 baseui = self._factory._create_config(wire['config'])
820 828 publishing = baseui.configbool('phases', 'publish')
821 829 if publishing:
822 830 new_commit = 'public'
823 831 else:
824 832 new_commit = 'draft'
825 833
826 834 def _filectxfn(_repo, ctx, path):
827 835 """
828 836 Marks given path as added/changed/removed in a given _repo. This is
829 837 for internal mercurial commit function.
830 838 """
831 839
832 840 # check if this path is removed
833 841 if path in removed:
834 842 # returning None is a way to mark node for removal
835 843 return None
836 844
837 845 # check if this path is added
838 846 for node in updated:
839 847 if node['path'] == path:
840 848 return memfilectx(
841 849 _repo,
842 850 changectx=ctx,
843 851 path=node['path'],
844 852 data=node['content'],
845 853 islink=False,
846 854 isexec=bool(node['mode'] & stat.S_IXUSR),
847 855 copysource=False)
848 856
849 857 raise exceptions.AbortException()(
850 858 "Given path haven't been marked as added, "
851 859 "changed or removed (%s)" % path)
852 860
853 861 with repo.ui.configoverride({('phases', 'new-commit'): new_commit}):
854 862
855 863 commit_ctx = memctx(
856 864 repo=repo,
857 865 parents=parents,
858 866 text=message,
859 867 files=files,
860 868 filectxfn=_filectxfn,
861 869 user=user,
862 870 date=(commit_time, commit_timezone),
863 871 extra=extra)
864 872
865 873 n = repo.commitctx(commit_ctx)
866 874 new_id = hex(n)
867 875
868 876 return new_id
869 877
870 878 @reraise_safe_exceptions
871 879 def pull(self, wire, url, commit_ids=None):
872 880 repo = self._factory.repo(wire)
873 881 # Disable any prompts for this repo
874 882 repo.ui.setconfig('ui', 'interactive', 'off', '-y')
875 883
876 884 remote = peer(repo, {}, url)
877 885 # Disable any prompts for this remote
878 886 remote.ui.setconfig('ui', 'interactive', 'off', '-y')
879 887
880 888 if commit_ids:
881 889 commit_ids = [bin(commit_id) for commit_id in commit_ids]
882 890
883 891 return exchange.pull(
884 892 repo, remote, heads=commit_ids, force=None).cgresult
885 893
886 894 @reraise_safe_exceptions
887 895 def pull_cmd(self, wire, source, bookmark=None, branch=None, revision=None, hooks=True):
888 896 repo = self._factory.repo(wire)
889 897 baseui = self._factory._create_config(wire['config'], hooks=hooks)
890 898
891 899 # Mercurial internally has a lot of logic that checks ONLY if
892 900 # option is defined, we just pass those if they are defined then
893 901 opts = {}
894 902 if bookmark:
895 903 opts['bookmark'] = bookmark
896 904 if branch:
897 905 opts['branch'] = branch
898 906 if revision:
899 907 opts['rev'] = revision
900 908
901 909 commands.pull(baseui, repo, source, **opts)
902 910
903 911 @reraise_safe_exceptions
904 912 def push(self, wire, revisions, dest_path, hooks=True, push_branches=False):
905 913 repo = self._factory.repo(wire)
906 914 baseui = self._factory._create_config(wire['config'], hooks=hooks)
907 915 commands.push(baseui, repo, dest=dest_path, rev=revisions,
908 916 new_branch=push_branches)
909 917
910 918 @reraise_safe_exceptions
911 919 def strip(self, wire, revision, update, backup):
912 920 repo = self._factory.repo(wire)
913 921 ctx = self._get_ctx(repo, revision)
914 922 hgext_strip(
915 923 repo.baseui, repo, ctx.node(), update=update, backup=backup)
916 924
917 925 @reraise_safe_exceptions
918 926 def get_unresolved_files(self, wire):
919 927 repo = self._factory.repo(wire)
920 928
921 929 log.debug('Calculating unresolved files for repo: %s', repo)
922 930 output = io.BytesIO()
923 931
924 932 def write(data, **unused_kwargs):
925 933 output.write(data)
926 934
927 935 baseui = self._factory._create_config(wire['config'])
928 936 baseui.write = write
929 937
930 938 commands.resolve(baseui, repo, list=True)
931 939 unresolved = output.getvalue().splitlines(0)
932 940 return unresolved
933 941
934 942 @reraise_safe_exceptions
935 943 def merge(self, wire, revision):
936 944 repo = self._factory.repo(wire)
937 945 baseui = self._factory._create_config(wire['config'])
938 946 repo.ui.setconfig('ui', 'merge', 'internal:dump')
939 947
940 948 # In case of sub repositories are used mercurial prompts the user in
941 949 # case of merge conflicts or different sub repository sources. By
942 950 # setting the interactive flag to `False` mercurial doesn't prompt the
943 951 # used but instead uses a default value.
944 952 repo.ui.setconfig('ui', 'interactive', False)
945 953 commands.merge(baseui, repo, rev=revision)
946 954
947 955 @reraise_safe_exceptions
948 956 def merge_state(self, wire):
949 957 repo = self._factory.repo(wire)
950 958 repo.ui.setconfig('ui', 'merge', 'internal:dump')
951 959
952 960 # In case of sub repositories are used mercurial prompts the user in
953 961 # case of merge conflicts or different sub repository sources. By
954 962 # setting the interactive flag to `False` mercurial doesn't prompt the
955 963 # used but instead uses a default value.
956 964 repo.ui.setconfig('ui', 'interactive', False)
957 965 ms = hg_merge.mergestate(repo)
958 966 return [x for x in ms.unresolved()]
959 967
960 968 @reraise_safe_exceptions
961 969 def commit(self, wire, message, username, close_branch=False):
962 970 repo = self._factory.repo(wire)
963 971 baseui = self._factory._create_config(wire['config'])
964 972 repo.ui.setconfig('ui', 'username', username)
965 973 commands.commit(baseui, repo, message=message, close_branch=close_branch)
966 974
967 975 @reraise_safe_exceptions
968 976 def rebase(self, wire, source=None, dest=None, abort=False):
969 977 repo = self._factory.repo(wire)
970 978 baseui = self._factory._create_config(wire['config'])
971 979 repo.ui.setconfig('ui', 'merge', 'internal:dump')
972 980 # In case of sub repositories are used mercurial prompts the user in
973 981 # case of merge conflicts or different sub repository sources. By
974 982 # setting the interactive flag to `False` mercurial doesn't prompt the
975 983 # used but instead uses a default value.
976 984 repo.ui.setconfig('ui', 'interactive', False)
977 985 rebase.rebase(baseui, repo, base=source, dest=dest, abort=abort, keep=not abort)
978 986
979 987 @reraise_safe_exceptions
980 988 def tag(self, wire, name, revision, message, local, user, tag_time, tag_timezone):
981 989 repo = self._factory.repo(wire)
982 990 ctx = self._get_ctx(repo, revision)
983 991 node = ctx.node()
984 992
985 993 date = (tag_time, tag_timezone)
986 994 try:
987 995 hg_tag.tag(repo, name, node, message, local, user, date)
988 996 except Abort as e:
989 997 log.exception("Tag operation aborted")
990 998 # Exception can contain unicode which we convert
991 999 raise exceptions.AbortException(e)(repr(e))
992 1000
993 1001 @reraise_safe_exceptions
994 1002 def bookmark(self, wire, bookmark, revision=None):
995 1003 repo = self._factory.repo(wire)
996 1004 baseui = self._factory._create_config(wire['config'])
997 1005 commands.bookmark(baseui, repo, bookmark, rev=revision, force=True)
998 1006
999 1007 @reraise_safe_exceptions
1000 1008 def install_hooks(self, wire, force=False):
1001 1009 # we don't need any special hooks for Mercurial
1002 1010 pass
1003 1011
1004 1012 @reraise_safe_exceptions
1005 1013 def get_hooks_info(self, wire):
1006 1014 return {
1007 1015 'pre_version': vcsserver.__version__,
1008 1016 'post_version': vcsserver.__version__,
1009 1017 }
1018
1019 @reraise_safe_exceptions
1020 def set_head_ref(self, wire, head_name):
1021 pass
1022
1023 @reraise_safe_exceptions
1024 def archive_repo(self, wire, archive_dest_path, kind, mtime, archive_at_path,
1025 archive_dir_name, commit_id):
1026
1027 def file_walker(_commit_id, path):
1028 repo = self._factory.repo(wire)
1029 ctx = repo[_commit_id]
1030 is_root = path in ['', '/']
1031 if is_root:
1032 matcher = alwaysmatcher(badfn=None)
1033 else:
1034 matcher = patternmatcher('', [(b'glob', path+'/**', b'')], badfn=None)
1035 file_iter = ctx.manifest().walk(matcher)
1036
1037 for fn in file_iter:
1038 file_path = fn
1039 flags = ctx.flags(fn)
1040 mode = b'x' in flags and 0o755 or 0o644
1041 is_link = b'l' in flags
1042
1043 yield ArchiveNode(file_path, mode, is_link, ctx[fn].data)
1044
1045 return archive_repo(file_walker, archive_dest_path, kind, mtime, archive_at_path,
1046 archive_dir_name, commit_id)
1047
@@ -1,79 +1,79 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 2 # Copyright (C) 2014-2020 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 17
18 18 """
19 19 Mercurial libs compatibility
20 20 """
21 21
22 22 import mercurial
23 23 from mercurial import demandimport
24 24 # patch demandimport, due to bug in mercurial when it always triggers
25 25 # demandimport.enable()
26 26 demandimport.enable = lambda *args, **kwargs: 1
27 27
28 28 from mercurial import ui
29 29 from mercurial import patch
30 30 from mercurial import config
31 31 from mercurial import extensions
32 32 from mercurial import scmutil
33 33 from mercurial import archival
34 34 from mercurial import discovery
35 35 from mercurial import unionrepo
36 36 from mercurial import localrepo
37 37 from mercurial import merge as hg_merge
38 38 from mercurial import subrepo
39 39 from mercurial import subrepoutil
40 40 from mercurial import tags as hg_tag
41
41 from mercurial import util as hgutil
42 42 from mercurial.commands import clone, nullid, pull
43 43 from mercurial.context import memctx, memfilectx
44 44 from mercurial.error import (
45 45 LookupError, RepoError, RepoLookupError, Abort, InterventionRequired,
46 46 RequirementError, ProgrammingError)
47 47 from mercurial.hgweb import hgweb_mod
48 48 from mercurial.localrepo import instance
49 from mercurial.match import match
49 from mercurial.match import match, alwaysmatcher, patternmatcher
50 50 from mercurial.mdiff import diffopts
51 51 from mercurial.node import bin, hex
52 52 from mercurial.encoding import tolocal
53 53 from mercurial.discovery import findcommonoutgoing
54 54 from mercurial.hg import peer
55 55 from mercurial.httppeer import makepeer
56 56 from mercurial.util import url as hg_url
57 57 from mercurial.scmutil import revrange, revsymbol
58 58 from mercurial.node import nullrev
59 59 from mercurial import exchange
60 60 from hgext import largefiles
61 61
62 62 # those authnadlers are patched for python 2.6.5 bug an
63 63 # infinit looping when given invalid resources
64 64 from mercurial.url import httpbasicauthhandler, httpdigestauthhandler
65 65
66 66
67 67 def get_ctx(repo, ref):
68 68 try:
69 69 ctx = repo[ref]
70 except ProgrammingError:
70 except (ProgrammingError, TypeError):
71 71 # we're unable to find the rev using a regular lookup, we fallback
72 72 # to slower, but backward compat revsymbol usage
73 73 ctx = revsymbol(repo, ref)
74 74 except (LookupError, RepoLookupError):
75 75 # Similar case as above but only for refs that are not numeric
76 76 if isinstance(ref, (int, long)):
77 77 raise
78 78 ctx = revsymbol(repo, ref)
79 79 return ctx
@@ -1,729 +1,729 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # RhodeCode VCSServer provides access to different vcs backends via network.
4 4 # Copyright (C) 2014-2020 RhodeCode GmbH
5 5 #
6 6 # This program is free software; you can redistribute it and/or modify
7 7 # it under the terms of the GNU General Public License as published by
8 8 # the Free Software Foundation; either version 3 of the License, or
9 9 # (at your option) any later version.
10 10 #
11 11 # This program is distributed in the hope that it will be useful,
12 12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 14 # GNU General Public License for more details.
15 15 #
16 16 # You should have received a copy of the GNU General Public License
17 17 # along with this program; if not, write to the Free Software Foundation,
18 18 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 19
20 20 import io
21 21 import os
22 22 import sys
23 23 import logging
24 24 import collections
25 25 import importlib
26 26 import base64
27 27
28 28 from http.client import HTTPConnection
29 29
30 30
31 31 import mercurial.scmutil
32 32 import mercurial.node
33 33 import simplejson as json
34 34
35 35 from vcsserver import exceptions, subprocessio, settings
36 36
37 37 log = logging.getLogger(__name__)
38 38
39 39
40 40 class HooksHttpClient(object):
41 41 connection = None
42 42
43 43 def __init__(self, hooks_uri):
44 44 self.hooks_uri = hooks_uri
45 45
46 46 def __call__(self, method, extras):
47 47 connection = HTTPConnection(self.hooks_uri)
48 48 body = self._serialize(method, extras)
49 49 try:
50 50 connection.request('POST', '/', body)
51 51 except Exception:
52 log.error('Connection failed on %s', connection)
52 log.error('Hooks calling Connection failed on %s', connection.__dict__)
53 53 raise
54 54 response = connection.getresponse()
55 55
56 56 response_data = response.read()
57 57
58 58 try:
59 59 return json.loads(response_data)
60 60 except Exception:
61 61 log.exception('Failed to decode hook response json data. '
62 62 'response_code:%s, raw_data:%s',
63 63 response.status, response_data)
64 64 raise
65 65
66 66 def _serialize(self, hook_name, extras):
67 67 data = {
68 68 'method': hook_name,
69 69 'extras': extras
70 70 }
71 71 return json.dumps(data)
72 72
73 73
74 74 class HooksDummyClient(object):
75 75 def __init__(self, hooks_module):
76 76 self._hooks_module = importlib.import_module(hooks_module)
77 77
78 78 def __call__(self, hook_name, extras):
79 79 with self._hooks_module.Hooks() as hooks:
80 80 return getattr(hooks, hook_name)(extras)
81 81
82 82
83 83 class HooksShadowRepoClient(object):
84 84
85 85 def __call__(self, hook_name, extras):
86 86 return {'output': '', 'status': 0}
87 87
88 88
89 89 class RemoteMessageWriter(object):
90 90 """Writer base class."""
91 91 def write(self, message):
92 92 raise NotImplementedError()
93 93
94 94
95 95 class HgMessageWriter(RemoteMessageWriter):
96 96 """Writer that knows how to send messages to mercurial clients."""
97 97
98 98 def __init__(self, ui):
99 99 self.ui = ui
100 100
101 101 def write(self, message):
102 102 # TODO: Check why the quiet flag is set by default.
103 103 old = self.ui.quiet
104 104 self.ui.quiet = False
105 105 self.ui.status(message.encode('utf-8'))
106 106 self.ui.quiet = old
107 107
108 108
109 109 class GitMessageWriter(RemoteMessageWriter):
110 110 """Writer that knows how to send messages to git clients."""
111 111
112 112 def __init__(self, stdout=None):
113 113 self.stdout = stdout or sys.stdout
114 114
115 115 def write(self, message):
116 116 self.stdout.write(message.encode('utf-8'))
117 117
118 118
119 119 class SvnMessageWriter(RemoteMessageWriter):
120 120 """Writer that knows how to send messages to svn clients."""
121 121
122 122 def __init__(self, stderr=None):
123 123 # SVN needs data sent to stderr for back-to-client messaging
124 124 self.stderr = stderr or sys.stderr
125 125
126 126 def write(self, message):
127 127 self.stderr.write(message.encode('utf-8'))
128 128
129 129
130 130 def _handle_exception(result):
131 131 exception_class = result.get('exception')
132 132 exception_traceback = result.get('exception_traceback')
133 133
134 134 if exception_traceback:
135 135 log.error('Got traceback from remote call:%s', exception_traceback)
136 136
137 137 if exception_class == 'HTTPLockedRC':
138 138 raise exceptions.RepositoryLockedException()(*result['exception_args'])
139 139 elif exception_class == 'HTTPBranchProtected':
140 140 raise exceptions.RepositoryBranchProtectedException()(*result['exception_args'])
141 141 elif exception_class == 'RepositoryError':
142 142 raise exceptions.VcsException()(*result['exception_args'])
143 143 elif exception_class:
144 144 raise Exception('Got remote exception "%s" with args "%s"' %
145 145 (exception_class, result['exception_args']))
146 146
147 147
148 148 def _get_hooks_client(extras):
149 149 hooks_uri = extras.get('hooks_uri')
150 150 is_shadow_repo = extras.get('is_shadow_repo')
151 151 if hooks_uri:
152 152 return HooksHttpClient(extras['hooks_uri'])
153 153 elif is_shadow_repo:
154 154 return HooksShadowRepoClient()
155 155 else:
156 156 return HooksDummyClient(extras['hooks_module'])
157 157
158 158
159 159 def _call_hook(hook_name, extras, writer):
160 160 hooks_client = _get_hooks_client(extras)
161 161 log.debug('Hooks, using client:%s', hooks_client)
162 162 result = hooks_client(hook_name, extras)
163 163 log.debug('Hooks got result: %s', result)
164 164
165 165 _handle_exception(result)
166 166 writer.write(result['output'])
167 167
168 168 return result['status']
169 169
170 170
171 171 def _extras_from_ui(ui):
172 172 hook_data = ui.config('rhodecode', 'RC_SCM_DATA')
173 173 if not hook_data:
174 174 # maybe it's inside environ ?
175 175 env_hook_data = os.environ.get('RC_SCM_DATA')
176 176 if env_hook_data:
177 177 hook_data = env_hook_data
178 178
179 179 extras = {}
180 180 if hook_data:
181 181 extras = json.loads(hook_data)
182 182 return extras
183 183
184 184
185 185 def _rev_range_hash(repo, node, check_heads=False):
186 186 from vcsserver.hgcompat import get_ctx
187 187
188 188 commits = []
189 189 revs = []
190 190 start = get_ctx(repo, node).rev()
191 191 end = len(repo)
192 192 for rev in range(start, end):
193 193 revs.append(rev)
194 194 ctx = get_ctx(repo, rev)
195 195 commit_id = mercurial.node.hex(ctx.node())
196 196 branch = ctx.branch()
197 197 commits.append((commit_id, branch))
198 198
199 199 parent_heads = []
200 200 if check_heads:
201 201 parent_heads = _check_heads(repo, start, end, revs)
202 202 return commits, parent_heads
203 203
204 204
205 205 def _check_heads(repo, start, end, commits):
206 206 from vcsserver.hgcompat import get_ctx
207 207 changelog = repo.changelog
208 208 parents = set()
209 209
210 210 for new_rev in commits:
211 211 for p in changelog.parentrevs(new_rev):
212 212 if p == mercurial.node.nullrev:
213 213 continue
214 214 if p < start:
215 215 parents.add(p)
216 216
217 217 for p in parents:
218 218 branch = get_ctx(repo, p).branch()
219 219 # The heads descending from that parent, on the same branch
220 220 parent_heads = set([p])
221 221 reachable = set([p])
222 222 for x in range(p + 1, end):
223 223 if get_ctx(repo, x).branch() != branch:
224 224 continue
225 225 for pp in changelog.parentrevs(x):
226 226 if pp in reachable:
227 227 reachable.add(x)
228 228 parent_heads.discard(pp)
229 229 parent_heads.add(x)
230 230 # More than one head? Suggest merging
231 231 if len(parent_heads) > 1:
232 232 return list(parent_heads)
233 233
234 234 return []
235 235
236 236
237 237 def _get_git_env():
238 238 env = {}
239 239 for k, v in os.environ.items():
240 240 if k.startswith('GIT'):
241 241 env[k] = v
242 242
243 243 # serialized version
244 244 return [(k, v) for k, v in env.items()]
245 245
246 246
247 247 def _get_hg_env(old_rev, new_rev, txnid, repo_path):
248 248 env = {}
249 249 for k, v in os.environ.items():
250 250 if k.startswith('HG'):
251 251 env[k] = v
252 252
253 253 env['HG_NODE'] = old_rev
254 254 env['HG_NODE_LAST'] = new_rev
255 255 env['HG_TXNID'] = txnid
256 256 env['HG_PENDING'] = repo_path
257 257
258 258 return [(k, v) for k, v in env.items()]
259 259
260 260
261 261 def repo_size(ui, repo, **kwargs):
262 262 extras = _extras_from_ui(ui)
263 263 return _call_hook('repo_size', extras, HgMessageWriter(ui))
264 264
265 265
266 266 def pre_pull(ui, repo, **kwargs):
267 267 extras = _extras_from_ui(ui)
268 268 return _call_hook('pre_pull', extras, HgMessageWriter(ui))
269 269
270 270
271 271 def pre_pull_ssh(ui, repo, **kwargs):
272 272 extras = _extras_from_ui(ui)
273 273 if extras and extras.get('SSH'):
274 274 return pre_pull(ui, repo, **kwargs)
275 275 return 0
276 276
277 277
278 278 def post_pull(ui, repo, **kwargs):
279 279 extras = _extras_from_ui(ui)
280 280 return _call_hook('post_pull', extras, HgMessageWriter(ui))
281 281
282 282
283 283 def post_pull_ssh(ui, repo, **kwargs):
284 284 extras = _extras_from_ui(ui)
285 285 if extras and extras.get('SSH'):
286 286 return post_pull(ui, repo, **kwargs)
287 287 return 0
288 288
289 289
290 290 def pre_push(ui, repo, node=None, **kwargs):
291 291 """
292 292 Mercurial pre_push hook
293 293 """
294 294 extras = _extras_from_ui(ui)
295 295 detect_force_push = extras.get('detect_force_push')
296 296
297 297 rev_data = []
298 298 if node and kwargs.get('hooktype') == 'pretxnchangegroup':
299 299 branches = collections.defaultdict(list)
300 300 commits, _heads = _rev_range_hash(repo, node, check_heads=detect_force_push)
301 301 for commit_id, branch in commits:
302 302 branches[branch].append(commit_id)
303 303
304 304 for branch, commits in branches.items():
305 305 old_rev = kwargs.get('node_last') or commits[0]
306 306 rev_data.append({
307 307 'total_commits': len(commits),
308 308 'old_rev': old_rev,
309 309 'new_rev': commits[-1],
310 310 'ref': '',
311 311 'type': 'branch',
312 312 'name': branch,
313 313 })
314 314
315 315 for push_ref in rev_data:
316 316 push_ref['multiple_heads'] = _heads
317 317
318 318 repo_path = os.path.join(
319 319 extras.get('repo_store', ''), extras.get('repository', ''))
320 320 push_ref['hg_env'] = _get_hg_env(
321 321 old_rev=push_ref['old_rev'],
322 322 new_rev=push_ref['new_rev'], txnid=kwargs.get('txnid'),
323 323 repo_path=repo_path)
324 324
325 325 extras['hook_type'] = kwargs.get('hooktype', 'pre_push')
326 326 extras['commit_ids'] = rev_data
327 327
328 328 return _call_hook('pre_push', extras, HgMessageWriter(ui))
329 329
330 330
331 331 def pre_push_ssh(ui, repo, node=None, **kwargs):
332 332 extras = _extras_from_ui(ui)
333 333 if extras.get('SSH'):
334 334 return pre_push(ui, repo, node, **kwargs)
335 335
336 336 return 0
337 337
338 338
339 339 def pre_push_ssh_auth(ui, repo, node=None, **kwargs):
340 340 """
341 341 Mercurial pre_push hook for SSH
342 342 """
343 343 extras = _extras_from_ui(ui)
344 344 if extras.get('SSH'):
345 345 permission = extras['SSH_PERMISSIONS']
346 346
347 347 if 'repository.write' == permission or 'repository.admin' == permission:
348 348 return 0
349 349
350 350 # non-zero ret code
351 351 return 1
352 352
353 353 return 0
354 354
355 355
356 356 def post_push(ui, repo, node, **kwargs):
357 357 """
358 358 Mercurial post_push hook
359 359 """
360 360 extras = _extras_from_ui(ui)
361 361
362 362 commit_ids = []
363 363 branches = []
364 364 bookmarks = []
365 365 tags = []
366 366
367 367 commits, _heads = _rev_range_hash(repo, node)
368 368 for commit_id, branch in commits:
369 369 commit_ids.append(commit_id)
370 370 if branch not in branches:
371 371 branches.append(branch)
372 372
373 373 if hasattr(ui, '_rc_pushkey_branches'):
374 374 bookmarks = ui._rc_pushkey_branches
375 375
376 376 extras['hook_type'] = kwargs.get('hooktype', 'post_push')
377 377 extras['commit_ids'] = commit_ids
378 378 extras['new_refs'] = {
379 379 'branches': branches,
380 380 'bookmarks': bookmarks,
381 381 'tags': tags
382 382 }
383 383
384 384 return _call_hook('post_push', extras, HgMessageWriter(ui))
385 385
386 386
387 387 def post_push_ssh(ui, repo, node, **kwargs):
388 388 """
389 389 Mercurial post_push hook for SSH
390 390 """
391 391 if _extras_from_ui(ui).get('SSH'):
392 392 return post_push(ui, repo, node, **kwargs)
393 393 return 0
394 394
395 395
396 396 def key_push(ui, repo, **kwargs):
397 397 from vcsserver.hgcompat import get_ctx
398 398 if kwargs['new'] != '0' and kwargs['namespace'] == 'bookmarks':
399 399 # store new bookmarks in our UI object propagated later to post_push
400 400 ui._rc_pushkey_branches = get_ctx(repo, kwargs['key']).bookmarks()
401 401 return
402 402
403 403
404 404 # backward compat
405 405 log_pull_action = post_pull
406 406
407 407 # backward compat
408 408 log_push_action = post_push
409 409
410 410
411 411 def handle_git_pre_receive(unused_repo_path, unused_revs, unused_env):
412 412 """
413 413 Old hook name: keep here for backward compatibility.
414 414
415 415 This is only required when the installed git hooks are not upgraded.
416 416 """
417 417 pass
418 418
419 419
420 420 def handle_git_post_receive(unused_repo_path, unused_revs, unused_env):
421 421 """
422 422 Old hook name: keep here for backward compatibility.
423 423
424 424 This is only required when the installed git hooks are not upgraded.
425 425 """
426 426 pass
427 427
428 428
429 429 HookResponse = collections.namedtuple('HookResponse', ('status', 'output'))
430 430
431 431
432 432 def git_pre_pull(extras):
433 433 """
434 434 Pre pull hook.
435 435
436 436 :param extras: dictionary containing the keys defined in simplevcs
437 437 :type extras: dict
438 438
439 439 :return: status code of the hook. 0 for success.
440 440 :rtype: int
441 441 """
442 442 if 'pull' not in extras['hooks']:
443 443 return HookResponse(0, '')
444 444
445 445 stdout = io.BytesIO()
446 446 try:
447 447 status = _call_hook('pre_pull', extras, GitMessageWriter(stdout))
448 448 except Exception as error:
449 449 status = 128
450 450 stdout.write('ERROR: %s\n' % str(error))
451 451
452 452 return HookResponse(status, stdout.getvalue())
453 453
454 454
455 455 def git_post_pull(extras):
456 456 """
457 457 Post pull hook.
458 458
459 459 :param extras: dictionary containing the keys defined in simplevcs
460 460 :type extras: dict
461 461
462 462 :return: status code of the hook. 0 for success.
463 463 :rtype: int
464 464 """
465 465 if 'pull' not in extras['hooks']:
466 466 return HookResponse(0, '')
467 467
468 468 stdout = io.BytesIO()
469 469 try:
470 470 status = _call_hook('post_pull', extras, GitMessageWriter(stdout))
471 471 except Exception as error:
472 472 status = 128
473 473 stdout.write('ERROR: %s\n' % error)
474 474
475 475 return HookResponse(status, stdout.getvalue())
476 476
477 477
478 478 def _parse_git_ref_lines(revision_lines):
479 479 rev_data = []
480 480 for revision_line in revision_lines or []:
481 481 old_rev, new_rev, ref = revision_line.strip().split(' ')
482 482 ref_data = ref.split('/', 2)
483 483 if ref_data[1] in ('tags', 'heads'):
484 484 rev_data.append({
485 485 # NOTE(marcink):
486 486 # we're unable to tell total_commits for git at this point
487 487 # but we set the variable for consistency with GIT
488 488 'total_commits': -1,
489 489 'old_rev': old_rev,
490 490 'new_rev': new_rev,
491 491 'ref': ref,
492 492 'type': ref_data[1],
493 493 'name': ref_data[2],
494 494 })
495 495 return rev_data
496 496
497 497
498 498 def git_pre_receive(unused_repo_path, revision_lines, env):
499 499 """
500 500 Pre push hook.
501 501
502 502 :param extras: dictionary containing the keys defined in simplevcs
503 503 :type extras: dict
504 504
505 505 :return: status code of the hook. 0 for success.
506 506 :rtype: int
507 507 """
508 508 extras = json.loads(env['RC_SCM_DATA'])
509 509 rev_data = _parse_git_ref_lines(revision_lines)
510 510 if 'push' not in extras['hooks']:
511 511 return 0
512 512 empty_commit_id = '0' * 40
513 513
514 514 detect_force_push = extras.get('detect_force_push')
515 515
516 516 for push_ref in rev_data:
517 517 # store our git-env which holds the temp store
518 518 push_ref['git_env'] = _get_git_env()
519 519 push_ref['pruned_sha'] = ''
520 520 if not detect_force_push:
521 521 # don't check for forced-push when we don't need to
522 522 continue
523 523
524 524 type_ = push_ref['type']
525 525 new_branch = push_ref['old_rev'] == empty_commit_id
526 526 delete_branch = push_ref['new_rev'] == empty_commit_id
527 527 if type_ == 'heads' and not (new_branch or delete_branch):
528 528 old_rev = push_ref['old_rev']
529 529 new_rev = push_ref['new_rev']
530 530 cmd = [settings.GIT_EXECUTABLE, 'rev-list', old_rev, '^{}'.format(new_rev)]
531 531 stdout, stderr = subprocessio.run_command(
532 532 cmd, env=os.environ.copy())
533 533 # means we're having some non-reachable objects, this forced push was used
534 534 if stdout:
535 535 push_ref['pruned_sha'] = stdout.splitlines()
536 536
537 537 extras['hook_type'] = 'pre_receive'
538 538 extras['commit_ids'] = rev_data
539 539 return _call_hook('pre_push', extras, GitMessageWriter())
540 540
541 541
542 542 def git_post_receive(unused_repo_path, revision_lines, env):
543 543 """
544 544 Post push hook.
545 545
546 546 :param extras: dictionary containing the keys defined in simplevcs
547 547 :type extras: dict
548 548
549 549 :return: status code of the hook. 0 for success.
550 550 :rtype: int
551 551 """
552 552 extras = json.loads(env['RC_SCM_DATA'])
553 553 if 'push' not in extras['hooks']:
554 554 return 0
555 555
556 556 rev_data = _parse_git_ref_lines(revision_lines)
557 557
558 558 git_revs = []
559 559
560 560 # N.B.(skreft): it is ok to just call git, as git before calling a
561 561 # subcommand sets the PATH environment variable so that it point to the
562 562 # correct version of the git executable.
563 563 empty_commit_id = '0' * 40
564 564 branches = []
565 565 tags = []
566 566 for push_ref in rev_data:
567 567 type_ = push_ref['type']
568 568
569 569 if type_ == 'heads':
570 570 if push_ref['old_rev'] == empty_commit_id:
571 571 # starting new branch case
572 572 if push_ref['name'] not in branches:
573 573 branches.append(push_ref['name'])
574 574
575 575 # Fix up head revision if needed
576 576 cmd = [settings.GIT_EXECUTABLE, 'show', 'HEAD']
577 577 try:
578 578 subprocessio.run_command(cmd, env=os.environ.copy())
579 579 except Exception:
580 cmd = [settings.GIT_EXECUTABLE, 'symbolic-ref', 'HEAD',
581 'refs/heads/%s' % push_ref['name']]
580 cmd = [settings.GIT_EXECUTABLE, 'symbolic-ref', '"HEAD"',
581 '"refs/heads/%s"' % push_ref['name']]
582 582 print("Setting default branch to %s" % push_ref['name'])
583 583 subprocessio.run_command(cmd, env=os.environ.copy())
584 584
585 585 cmd = [settings.GIT_EXECUTABLE, 'for-each-ref',
586 586 '--format=%(refname)', 'refs/heads/*']
587 587 stdout, stderr = subprocessio.run_command(
588 588 cmd, env=os.environ.copy())
589 589 heads = stdout
590 590 heads = heads.replace(push_ref['ref'], '')
591 591 heads = ' '.join(head for head
592 592 in heads.splitlines() if head) or '.'
593 593 cmd = [settings.GIT_EXECUTABLE, 'log', '--reverse',
594 594 '--pretty=format:%H', '--', push_ref['new_rev'],
595 595 '--not', heads]
596 596 stdout, stderr = subprocessio.run_command(
597 597 cmd, env=os.environ.copy())
598 598 git_revs.extend(stdout.splitlines())
599 599 elif push_ref['new_rev'] == empty_commit_id:
600 600 # delete branch case
601 601 git_revs.append('delete_branch=>%s' % push_ref['name'])
602 602 else:
603 603 if push_ref['name'] not in branches:
604 604 branches.append(push_ref['name'])
605 605
606 606 cmd = [settings.GIT_EXECUTABLE, 'log',
607 607 '{old_rev}..{new_rev}'.format(**push_ref),
608 608 '--reverse', '--pretty=format:%H']
609 609 stdout, stderr = subprocessio.run_command(
610 610 cmd, env=os.environ.copy())
611 611 git_revs.extend(stdout.splitlines())
612 612 elif type_ == 'tags':
613 613 if push_ref['name'] not in tags:
614 614 tags.append(push_ref['name'])
615 615 git_revs.append('tag=>%s' % push_ref['name'])
616 616
617 617 extras['hook_type'] = 'post_receive'
618 618 extras['commit_ids'] = git_revs
619 619 extras['new_refs'] = {
620 620 'branches': branches,
621 621 'bookmarks': [],
622 622 'tags': tags,
623 623 }
624 624
625 625 if 'repo_size' in extras['hooks']:
626 626 try:
627 627 _call_hook('repo_size', extras, GitMessageWriter())
628 628 except:
629 629 pass
630 630
631 631 return _call_hook('post_push', extras, GitMessageWriter())
632 632
633 633
634 634 def _get_extras_from_txn_id(path, txn_id):
635 635 extras = {}
636 636 try:
637 637 cmd = [settings.SVNLOOK_EXECUTABLE, 'pget',
638 638 '-t', txn_id,
639 639 '--revprop', path, 'rc-scm-extras']
640 640 stdout, stderr = subprocessio.run_command(
641 641 cmd, env=os.environ.copy())
642 642 extras = json.loads(base64.urlsafe_b64decode(stdout))
643 643 except Exception:
644 644 log.exception('Failed to extract extras info from txn_id')
645 645
646 646 return extras
647 647
648 648
649 649 def _get_extras_from_commit_id(commit_id, path):
650 650 extras = {}
651 651 try:
652 652 cmd = [settings.SVNLOOK_EXECUTABLE, 'pget',
653 653 '-r', commit_id,
654 654 '--revprop', path, 'rc-scm-extras']
655 655 stdout, stderr = subprocessio.run_command(
656 656 cmd, env=os.environ.copy())
657 657 extras = json.loads(base64.urlsafe_b64decode(stdout))
658 658 except Exception:
659 659 log.exception('Failed to extract extras info from commit_id')
660 660
661 661 return extras
662 662
663 663
664 664 def svn_pre_commit(repo_path, commit_data, env):
665 665 path, txn_id = commit_data
666 666 branches = []
667 667 tags = []
668 668
669 669 if env.get('RC_SCM_DATA'):
670 670 extras = json.loads(env['RC_SCM_DATA'])
671 671 else:
672 672 # fallback method to read from TXN-ID stored data
673 673 extras = _get_extras_from_txn_id(path, txn_id)
674 674 if not extras:
675 675 return 0
676 676
677 677 extras['hook_type'] = 'pre_commit'
678 678 extras['commit_ids'] = [txn_id]
679 679 extras['txn_id'] = txn_id
680 680 extras['new_refs'] = {
681 681 'total_commits': 1,
682 682 'branches': branches,
683 683 'bookmarks': [],
684 684 'tags': tags,
685 685 }
686 686
687 687 return _call_hook('pre_push', extras, SvnMessageWriter())
688 688
689 689
690 690 def svn_post_commit(repo_path, commit_data, env):
691 691 """
692 692 commit_data is path, rev, txn_id
693 693 """
694 694 if len(commit_data) == 3:
695 695 path, commit_id, txn_id = commit_data
696 696 elif len(commit_data) == 2:
697 697 log.error('Failed to extract txn_id from commit_data using legacy method. '
698 698 'Some functionality might be limited')
699 699 path, commit_id = commit_data
700 700 txn_id = None
701 701
702 702 branches = []
703 703 tags = []
704 704
705 705 if env.get('RC_SCM_DATA'):
706 706 extras = json.loads(env['RC_SCM_DATA'])
707 707 else:
708 708 # fallback method to read from TXN-ID stored data
709 709 extras = _get_extras_from_commit_id(commit_id, path)
710 710 if not extras:
711 711 return 0
712 712
713 713 extras['hook_type'] = 'post_commit'
714 714 extras['commit_ids'] = [commit_id]
715 715 extras['txn_id'] = txn_id
716 716 extras['new_refs'] = {
717 717 'branches': branches,
718 718 'bookmarks': [],
719 719 'tags': tags,
720 720 'total_commits': 1,
721 721 }
722 722
723 723 if 'repo_size' in extras['hooks']:
724 724 try:
725 725 _call_hook('repo_size', extras, SvnMessageWriter())
726 726 except Exception:
727 727 pass
728 728
729 729 return _call_hook('post_push', extras, SvnMessageWriter())
@@ -1,702 +1,740 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 2 # Copyright (C) 2014-2020 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 17
18 18 import os
19 19 import sys
20 20 import base64
21 21 import locale
22 22 import logging
23 23 import uuid
24 import time
24 25 import wsgiref.util
25 26 import traceback
26 27 import tempfile
28 import psutil
29
27 30 from itertools import chain
28 31 from io import StringIO
29 32
30 33 import simplejson as json
31 34 import msgpack
32 35 import configparser
36
33 37 from pyramid.config import Configurator
34 38 from pyramid.settings import asbool, aslist
35 39 from pyramid.wsgi import wsgiapp
36 40 from pyramid.response import Response
37 41
42 from vcsserver.config.settings_maker import SettingsMaker
38 43 from vcsserver.utils import safe_int
44 from vcsserver.lib.statsd_client import StatsdClient
39 45
40 46 log = logging.getLogger(__name__)
41 47
42 48 # due to Mercurial/glibc2.27 problems we need to detect if locale settings are
43 49 # causing problems and "fix" it in case they do and fallback to LC_ALL = C
44 50
45 51 try:
46 52 locale.setlocale(locale.LC_ALL, '')
47 53 except locale.Error as e:
48 54 log.error(
49 55 'LOCALE ERROR: failed to set LC_ALL, fallback to LC_ALL=C, org error: %s', e)
50 56 os.environ['LC_ALL'] = 'C'
51 57
58
52 59 import vcsserver
53 60 from vcsserver import remote_wsgi, scm_app, settings, hgpatches
54 61 from vcsserver.git_lfs.app import GIT_LFS_CONTENT_TYPE, GIT_LFS_PROTO_PAT
55 62 from vcsserver.echo_stub import remote_wsgi as remote_wsgi_stub
56 63 from vcsserver.echo_stub.echo_app import EchoApp
57 64 from vcsserver.exceptions import HTTPRepoLocked, HTTPRepoBranchProtected
58 65 from vcsserver.lib.exc_tracking import store_exception
59 66 from vcsserver.server import VcsServer
60 67
61 68 strict_vcs = True
62 69
63 70 git_import_err = None
64 71 try:
65 72 from vcsserver.git import GitFactory, GitRemote
66 73 except ImportError as e:
67 74 GitFactory = None
68 75 GitRemote = None
69 76 git_import_err = e
70 77 if strict_vcs:
71 78 raise
72 79
73 80
74 81 hg_import_err = None
75 82 try:
76 83 from vcsserver.hg import MercurialFactory, HgRemote
77 84 except ImportError as e:
78 85 MercurialFactory = None
79 86 HgRemote = None
80 87 hg_import_err = e
81 88 if strict_vcs:
82 89 raise
83 90
84 91
85 92 svn_import_err = None
86 93 try:
87 94 from vcsserver.svn import SubversionFactory, SvnRemote
88 95 except ImportError as e:
89 96 SubversionFactory = None
90 97 SvnRemote = None
91 98 svn_import_err = e
92 99 if strict_vcs:
93 100 raise
94 101
95 102
96 103 def _is_request_chunked(environ):
97 104 stream = environ.get('HTTP_TRANSFER_ENCODING', '') == 'chunked'
98 105 return stream
99 106
100 107
101 def _int_setting(settings, name, default):
102 settings[name] = int(settings.get(name, default))
103 return settings[name]
104
105
106 def _bool_setting(settings, name, default):
107 input_val = settings.get(name, default)
108 if isinstance(input_val, str):
109 input_val = input_val.encode('utf8')
110 settings[name] = asbool(input_val)
111 return settings[name]
112
113
114 def _list_setting(settings, name, default):
115 raw_value = settings.get(name, default)
116
117 # Otherwise we assume it uses pyramids space/newline separation.
118 settings[name] = aslist(raw_value)
119 return settings[name]
120
121
122 def _string_setting(settings, name, default, lower=True, default_when_empty=False):
123 value = settings.get(name, default)
124
125 if default_when_empty and not value:
126 # use default value when value is empty
127 value = default
128
129 if lower:
130 value = value.lower()
131 settings[name] = value
132 return settings[name]
108 def log_max_fd():
109 try:
110 maxfd = psutil.Process().rlimit(psutil.RLIMIT_NOFILE)[1]
111 log.info('Max file descriptors value: %s', maxfd)
112 except Exception:
113 pass
133 114
134 115
135 116 class VCS(object):
136 117 def __init__(self, locale_conf=None, cache_config=None):
137 118 self.locale = locale_conf
138 119 self.cache_config = cache_config
139 120 self._configure_locale()
140 121
122 log_max_fd()
123
141 124 if GitFactory and GitRemote:
142 125 git_factory = GitFactory()
143 126 self._git_remote = GitRemote(git_factory)
144 127 else:
145 128 log.error("Git client import failed: %s", git_import_err)
146 129
147 130 if MercurialFactory and HgRemote:
148 131 hg_factory = MercurialFactory()
149 132 self._hg_remote = HgRemote(hg_factory)
150 133 else:
151 134 log.error("Mercurial client import failed: %s", hg_import_err)
152 135
153 136 if SubversionFactory and SvnRemote:
154 137 svn_factory = SubversionFactory()
155 138
156 139 # hg factory is used for svn url validation
157 140 hg_factory = MercurialFactory()
158 141 self._svn_remote = SvnRemote(svn_factory, hg_factory=hg_factory)
159 142 else:
160 143 log.error("Subversion client import failed: %s", svn_import_err)
161 144
162 145 self._vcsserver = VcsServer()
163 146
164 147 def _configure_locale(self):
165 148 if self.locale:
166 149 log.info('Settings locale: `LC_ALL` to %s', self.locale)
167 150 else:
168 151 log.info('Configuring locale subsystem based on environment variables')
169 152 try:
170 153 # If self.locale is the empty string, then the locale
171 154 # module will use the environment variables. See the
172 155 # documentation of the package `locale`.
173 156 locale.setlocale(locale.LC_ALL, self.locale)
174 157
175 158 language_code, encoding = locale.getlocale()
176 159 log.info(
177 160 'Locale set to language code "%s" with encoding "%s".',
178 161 language_code, encoding)
179 162 except locale.Error:
180 163 log.exception('Cannot set locale, not configuring the locale system')
181 164
182 165
183 166 class WsgiProxy(object):
184 167 def __init__(self, wsgi):
185 168 self.wsgi = wsgi
186 169
187 170 def __call__(self, environ, start_response):
188 171 input_data = environ['wsgi.input'].read()
189 172 input_data = msgpack.unpackb(input_data)
190 173
191 174 error = None
192 175 try:
193 176 data, status, headers = self.wsgi.handle(
194 177 input_data['environment'], input_data['input_data'],
195 178 *input_data['args'], **input_data['kwargs'])
196 179 except Exception as e:
197 180 data, status, headers = [], None, None
198 181 error = {
199 182 'message': str(e),
200 183 '_vcs_kind': getattr(e, '_vcs_kind', None)
201 184 }
202 185
203 186 start_response(200, {})
204 187 return self._iterator(error, status, headers, data)
205 188
206 189 def _iterator(self, error, status, headers, data):
207 190 initial_data = [
208 191 error,
209 192 status,
210 193 headers,
211 194 ]
212 195
213 196 for d in chain(initial_data, data):
214 197 yield msgpack.packb(d)
215 198
216 199
217 200 def not_found(request):
218 201 return {'status': '404 NOT FOUND'}
219 202
220 203
221 204 class VCSViewPredicate(object):
222 205 def __init__(self, val, config):
223 206 self.remotes = val
224 207
225 208 def text(self):
226 209 return 'vcs view method = %s' % (list(self.remotes.keys()),)
227 210
228 211 phash = text
229 212
230 213 def __call__(self, context, request):
231 214 """
232 215 View predicate that returns true if given backend is supported by
233 216 defined remotes.
234 217 """
235 218 backend = request.matchdict.get('backend')
236 219 return backend in self.remotes
237 220
238 221
239 222 class HTTPApplication(object):
240 223 ALLOWED_EXCEPTIONS = ('KeyError', 'URLError')
241 224
242 225 remote_wsgi = remote_wsgi
243 226 _use_echo_app = False
244 227
245 228 def __init__(self, settings=None, global_config=None):
246 self._sanitize_settings_and_apply_defaults(settings)
247 229
248 230 self.config = Configurator(settings=settings)
231 # Init our statsd at very start
232 self.config.registry.statsd = StatsdClient.statsd
233
249 234 self.global_config = global_config
250 235 self.config.include('vcsserver.lib.rc_cache')
251 236
252 237 settings_locale = settings.get('locale', '') or 'en_US.UTF-8'
253 238 vcs = VCS(locale_conf=settings_locale, cache_config=settings)
254 239 self._remotes = {
255 240 'hg': vcs._hg_remote,
256 241 'git': vcs._git_remote,
257 242 'svn': vcs._svn_remote,
258 243 'server': vcs._vcsserver,
259 244 }
260 245 if settings.get('dev.use_echo_app', 'false').lower() == 'true':
261 246 self._use_echo_app = True
262 247 log.warning("Using EchoApp for VCS operations.")
263 248 self.remote_wsgi = remote_wsgi_stub
264 249
265 250 self._configure_settings(global_config, settings)
251
266 252 self._configure()
267 253
268 254 def _configure_settings(self, global_config, app_settings):
269 255 """
270 256 Configure the settings module.
271 257 """
272 258 settings_merged = global_config.copy()
273 259 settings_merged.update(app_settings)
274 260
275 261 git_path = app_settings.get('git_path', None)
276 262 if git_path:
277 263 settings.GIT_EXECUTABLE = git_path
278 264 binary_dir = app_settings.get('core.binary_dir', None)
279 265 if binary_dir:
280 266 settings.BINARY_DIR = binary_dir
281 267
282 268 # Store the settings to make them available to other modules.
283 269 vcsserver.PYRAMID_SETTINGS = settings_merged
284 270 vcsserver.CONFIG = settings_merged
285 271
286 def _sanitize_settings_and_apply_defaults(self, settings):
287 temp_store = tempfile.gettempdir()
288 default_cache_dir = os.path.join(temp_store, 'rc_cache')
289
290 # save default, cache dir, and use it for all backends later.
291 default_cache_dir = _string_setting(
292 settings,
293 'cache_dir',
294 default_cache_dir, lower=False, default_when_empty=True)
295
296 # ensure we have our dir created
297 if not os.path.isdir(default_cache_dir):
298 os.makedirs(default_cache_dir, mode=0o755)
299
300 # exception store cache
301 _string_setting(
302 settings,
303 'exception_tracker.store_path',
304 temp_store, lower=False, default_when_empty=True)
305
306 # repo_object cache
307 _string_setting(
308 settings,
309 'rc_cache.repo_object.backend',
310 'dogpile.cache.rc.file_namespace', lower=False)
311 _int_setting(
312 settings,
313 'rc_cache.repo_object.expiration_time',
314 30 * 24 * 60 * 60)
315 _string_setting(
316 settings,
317 'rc_cache.repo_object.arguments.filename',
318 os.path.join(default_cache_dir, 'vcsserver_cache_1'), lower=False)
319
320 272 def _configure(self):
321 273 self.config.add_renderer(name='msgpack', factory=self._msgpack_renderer_factory)
322 274
323 275 self.config.add_route('service', '/_service')
324 276 self.config.add_route('status', '/status')
325 277 self.config.add_route('hg_proxy', '/proxy/hg')
326 278 self.config.add_route('git_proxy', '/proxy/git')
327 279
328 280 # rpc methods
329 281 self.config.add_route('vcs', '/{backend}')
330 282
331 283 # streaming rpc remote methods
332 284 self.config.add_route('vcs_stream', '/{backend}/stream')
333 285
334 286 # vcs operations clone/push as streaming
335 287 self.config.add_route('stream_git', '/stream/git/*repo_name')
336 288 self.config.add_route('stream_hg', '/stream/hg/*repo_name')
337 289
338 290 self.config.add_view(self.status_view, route_name='status', renderer='json')
339 291 self.config.add_view(self.service_view, route_name='service', renderer='msgpack')
340 292
341 293 self.config.add_view(self.hg_proxy(), route_name='hg_proxy')
342 294 self.config.add_view(self.git_proxy(), route_name='git_proxy')
343 295 self.config.add_view(self.vcs_view, route_name='vcs', renderer='msgpack',
344 296 vcs_view=self._remotes)
345 297 self.config.add_view(self.vcs_stream_view, route_name='vcs_stream',
346 298 vcs_view=self._remotes)
347 299
348 300 self.config.add_view(self.hg_stream(), route_name='stream_hg')
349 301 self.config.add_view(self.git_stream(), route_name='stream_git')
350 302
351 303 self.config.add_view_predicate('vcs_view', VCSViewPredicate)
352 304
353 305 self.config.add_notfound_view(not_found, renderer='json')
354 306
355 307 self.config.add_view(self.handle_vcs_exception, context=Exception)
356 308
357 309 self.config.add_tween(
358 310 'vcsserver.tweens.request_wrapper.RequestWrapperTween',
359 311 )
360 312 self.config.add_request_method(
361 313 'vcsserver.lib.request_counter.get_request_counter',
362 314 'request_count')
363 315
364 316 def wsgi_app(self):
365 317 return self.config.make_wsgi_app()
366 318
367 319 def _vcs_view_params(self, request):
368 320 remote = self._remotes[request.matchdict['backend']]
369 321 payload = msgpack.unpackb(request.body, use_list=True)
370 322 method = payload.get('method')
371 323 params = payload['params']
372 324 wire = params.get('wire')
373 325 args = params.get('args')
374 326 kwargs = params.get('kwargs')
375 327 context_uid = None
376 328
377 329 if wire:
378 330 try:
379 331 wire['context'] = context_uid = uuid.UUID(wire['context'])
380 332 except KeyError:
381 333 pass
382 334 args.insert(0, wire)
383 335 repo_state_uid = wire.get('repo_state_uid') if wire else None
384 336
385 337 # NOTE(marcink): trading complexity for slight performance
386 338 if log.isEnabledFor(logging.DEBUG):
387 339 no_args_methods = [
388 'archive_repo'
340
389 341 ]
390 342 if method in no_args_methods:
391 343 call_args = ''
392 344 else:
393 345 call_args = args[1:]
394 346
395 log.debug('method requested:%s with args:%s kwargs:%s context_uid: %s, repo_state_uid:%s',
347 log.debug('Method requested:`%s` with args:%s kwargs:%s context_uid: %s, repo_state_uid:%s',
396 348 method, call_args, kwargs, context_uid, repo_state_uid)
397 349
350 statsd = request.registry.statsd
351 if statsd:
352 statsd.incr(
353 'vcsserver_method_total', tags=[
354 "method:{}".format(method),
355 ])
398 356 return payload, remote, method, args, kwargs
399 357
400 358 def vcs_view(self, request):
401 359
402 360 payload, remote, method, args, kwargs = self._vcs_view_params(request)
403 361 payload_id = payload.get('id')
404 362
405 363 try:
406 364 resp = getattr(remote, method)(*args, **kwargs)
407 365 except Exception as e:
408 366 exc_info = list(sys.exc_info())
409 367 exc_type, exc_value, exc_traceback = exc_info
410 368
411 369 org_exc = getattr(e, '_org_exc', None)
412 370 org_exc_name = None
413 371 org_exc_tb = ''
414 372 if org_exc:
415 373 org_exc_name = org_exc.__class__.__name__
416 374 org_exc_tb = getattr(e, '_org_exc_tb', '')
417 375 # replace our "faked" exception with our org
418 376 exc_info[0] = org_exc.__class__
419 377 exc_info[1] = org_exc
420 378
421 379 should_store_exc = True
422 380 if org_exc:
423 381 def get_exc_fqn(_exc_obj):
424 382 module_name = getattr(org_exc.__class__, '__module__', 'UNKNOWN')
425 383 return module_name + '.' + org_exc_name
426 384
427 385 exc_fqn = get_exc_fqn(org_exc)
428 386
429 387 if exc_fqn in ['mercurial.error.RepoLookupError',
430 388 'vcsserver.exceptions.RefNotFoundException']:
431 389 should_store_exc = False
432 390
433 391 if should_store_exc:
434 store_exception(id(exc_info), exc_info)
392 store_exception(id(exc_info), exc_info, request_path=request.path)
435 393
436 394 tb_info = ''.join(
437 395 traceback.format_exception(exc_type, exc_value, exc_traceback))
438 396
439 397 type_ = e.__class__.__name__
440 398 if type_ not in self.ALLOWED_EXCEPTIONS:
441 399 type_ = None
442 400
443 401 resp = {
444 402 'id': payload_id,
445 403 'error': {
446 404 'message': e.message,
447 405 'traceback': tb_info,
448 406 'org_exc': org_exc_name,
449 407 'org_exc_tb': org_exc_tb,
450 408 'type': type_
451 409 }
452 410 }
411
453 412 try:
454 413 resp['error']['_vcs_kind'] = getattr(e, '_vcs_kind', None)
455 414 except AttributeError:
456 415 pass
457 416 else:
458 417 resp = {
459 418 'id': payload_id,
460 419 'result': resp
461 420 }
462 421
463 422 return resp
464 423
465 424 def vcs_stream_view(self, request):
466 425 payload, remote, method, args, kwargs = self._vcs_view_params(request)
467 426 # this method has a stream: marker we remove it here
468 427 method = method.split('stream:')[-1]
469 428 chunk_size = safe_int(payload.get('chunk_size')) or 4096
470 429
471 430 try:
472 431 resp = getattr(remote, method)(*args, **kwargs)
473 432 except Exception as e:
474 433 raise
475 434
476 435 def get_chunked_data(method_resp):
477 436 stream = StringIO(method_resp)
478 437 while 1:
479 438 chunk = stream.read(chunk_size)
480 439 if not chunk:
481 440 break
482 441 yield chunk
483 442
484 443 response = Response(app_iter=get_chunked_data(resp))
485 444 response.content_type = 'application/octet-stream'
486 445
487 446 return response
488 447
489 448 def status_view(self, request):
490 449 import vcsserver
491 450 return {'status': 'OK', 'vcsserver_version': vcsserver.__version__,
492 451 'pid': os.getpid()}
493 452
494 453 def service_view(self, request):
495 454 import vcsserver
496 455
497 456 payload = msgpack.unpackb(request.body, use_list=True)
498 457 server_config, app_config = {}, {}
499 458
500 459 try:
501 460 path = self.global_config['__file__']
502 461 config = configparser.RawConfigParser()
503 462
504 463 config.read(path)
505 464
506 465 if config.has_section('server:main'):
507 466 server_config = dict(config.items('server:main'))
508 467 if config.has_section('app:main'):
509 468 app_config = dict(config.items('app:main'))
510 469
511 470 except Exception:
512 471 log.exception('Failed to read .ini file for display')
513 472
514 473 environ = list(os.environ.items())
515 474
516 475 resp = {
517 476 'id': payload.get('id'),
518 477 'result': dict(
519 478 version=vcsserver.__version__,
520 479 config=server_config,
521 480 app_config=app_config,
522 481 environ=environ,
523 482 payload=payload,
524 483 )
525 484 }
526 485 return resp
527 486
528 487 def _msgpack_renderer_factory(self, info):
529 488 def _render(value, system):
530 489 request = system.get('request')
531 490 if request is not None:
532 491 response = request.response
533 492 ct = response.content_type
534 493 if ct == response.default_content_type:
535 494 response.content_type = 'application/x-msgpack'
536 495 return msgpack.packb(value)
537 496 return _render
538 497
539 498 def set_env_from_config(self, environ, config):
540 499 dict_conf = {}
541 500 try:
542 501 for elem in config:
543 502 if elem[0] == 'rhodecode':
544 503 dict_conf = json.loads(elem[2])
545 504 break
546 505 except Exception:
547 506 log.exception('Failed to fetch SCM CONFIG')
548 507 return
549 508
550 509 username = dict_conf.get('username')
551 510 if username:
552 511 environ['REMOTE_USER'] = username
553 512 # mercurial specific, some extension api rely on this
554 513 environ['HGUSER'] = username
555 514
556 515 ip = dict_conf.get('ip')
557 516 if ip:
558 517 environ['REMOTE_HOST'] = ip
559 518
560 519 if _is_request_chunked(environ):
561 520 # set the compatibility flag for webob
562 521 environ['wsgi.input_terminated'] = True
563 522
564 523 def hg_proxy(self):
565 524 @wsgiapp
566 525 def _hg_proxy(environ, start_response):
567 526 app = WsgiProxy(self.remote_wsgi.HgRemoteWsgi())
568 527 return app(environ, start_response)
569 528 return _hg_proxy
570 529
571 530 def git_proxy(self):
572 531 @wsgiapp
573 532 def _git_proxy(environ, start_response):
574 533 app = WsgiProxy(self.remote_wsgi.GitRemoteWsgi())
575 534 return app(environ, start_response)
576 535 return _git_proxy
577 536
578 537 def hg_stream(self):
579 538 if self._use_echo_app:
580 539 @wsgiapp
581 540 def _hg_stream(environ, start_response):
582 541 app = EchoApp('fake_path', 'fake_name', None)
583 542 return app(environ, start_response)
584 543 return _hg_stream
585 544 else:
586 545 @wsgiapp
587 546 def _hg_stream(environ, start_response):
588 547 log.debug('http-app: handling hg stream')
589 548 repo_path = environ['HTTP_X_RC_REPO_PATH']
590 549 repo_name = environ['HTTP_X_RC_REPO_NAME']
591 550 packed_config = base64.b64decode(
592 551 environ['HTTP_X_RC_REPO_CONFIG'])
593 552 config = msgpack.unpackb(packed_config)
594 553 app = scm_app.create_hg_wsgi_app(
595 554 repo_path, repo_name, config)
596 555
597 556 # Consistent path information for hgweb
598 557 environ['PATH_INFO'] = environ['HTTP_X_RC_PATH_INFO']
599 558 environ['REPO_NAME'] = repo_name
600 559 self.set_env_from_config(environ, config)
601 560
602 561 log.debug('http-app: starting app handler '
603 562 'with %s and process request', app)
604 563 return app(environ, ResponseFilter(start_response))
605 564 return _hg_stream
606 565
607 566 def git_stream(self):
608 567 if self._use_echo_app:
609 568 @wsgiapp
610 569 def _git_stream(environ, start_response):
611 570 app = EchoApp('fake_path', 'fake_name', None)
612 571 return app(environ, start_response)
613 572 return _git_stream
614 573 else:
615 574 @wsgiapp
616 575 def _git_stream(environ, start_response):
617 576 log.debug('http-app: handling git stream')
618 577 repo_path = environ['HTTP_X_RC_REPO_PATH']
619 578 repo_name = environ['HTTP_X_RC_REPO_NAME']
620 579 packed_config = base64.b64decode(
621 580 environ['HTTP_X_RC_REPO_CONFIG'])
622 581 config = msgpack.unpackb(packed_config)
623 582
624 583 environ['PATH_INFO'] = environ['HTTP_X_RC_PATH_INFO']
625 584 self.set_env_from_config(environ, config)
626 585
627 586 content_type = environ.get('CONTENT_TYPE', '')
628 587
629 588 path = environ['PATH_INFO']
630 589 is_lfs_request = GIT_LFS_CONTENT_TYPE in content_type
631 590 log.debug(
632 591 'LFS: Detecting if request `%s` is LFS server path based '
633 592 'on content type:`%s`, is_lfs:%s',
634 593 path, content_type, is_lfs_request)
635 594
636 595 if not is_lfs_request:
637 596 # fallback detection by path
638 597 if GIT_LFS_PROTO_PAT.match(path):
639 598 is_lfs_request = True
640 599 log.debug(
641 600 'LFS: fallback detection by path of: `%s`, is_lfs:%s',
642 601 path, is_lfs_request)
643 602
644 603 if is_lfs_request:
645 604 app = scm_app.create_git_lfs_wsgi_app(
646 605 repo_path, repo_name, config)
647 606 else:
648 607 app = scm_app.create_git_wsgi_app(
649 608 repo_path, repo_name, config)
650 609
651 610 log.debug('http-app: starting app handler '
652 611 'with %s and process request', app)
653 612
654 613 return app(environ, start_response)
655 614
656 615 return _git_stream
657 616
658 617 def handle_vcs_exception(self, exception, request):
659 618 _vcs_kind = getattr(exception, '_vcs_kind', '')
660 619 if _vcs_kind == 'repo_locked':
661 620 # Get custom repo-locked status code if present.
662 621 status_code = request.headers.get('X-RC-Locked-Status-Code')
663 622 return HTTPRepoLocked(
664 623 title=exception.message, status_code=status_code)
665 624
666 625 elif _vcs_kind == 'repo_branch_protected':
667 626 # Get custom repo-branch-protected status code if present.
668 627 return HTTPRepoBranchProtected(title=exception.message)
669 628
670 629 exc_info = request.exc_info
671 630 store_exception(id(exc_info), exc_info)
672 631
673 632 traceback_info = 'unavailable'
674 633 if request.exc_info:
675 634 exc_type, exc_value, exc_tb = request.exc_info
676 635 traceback_info = ''.join(traceback.format_exception(exc_type, exc_value, exc_tb))
677 636
678 637 log.error(
679 638 'error occurred handling this request for path: %s, \n tb: %s',
680 639 request.path, traceback_info)
640
641 statsd = request.registry.statsd
642 if statsd:
643 exc_type = "{}.{}".format(exception.__class__.__module__, exception.__class__.__name__)
644 statsd.incr('vcsserver_exception_total',
645 tags=["type:{}".format(exc_type)])
681 646 raise exception
682 647
683 648
684 649 class ResponseFilter(object):
685 650
686 651 def __init__(self, start_response):
687 652 self._start_response = start_response
688 653
689 654 def __call__(self, status, response_headers, exc_info=None):
690 655 headers = tuple(
691 656 (h, v) for h, v in response_headers
692 657 if not wsgiref.util.is_hop_by_hop(h))
693 658 return self._start_response(status, headers, exc_info)
694 659
695 660
661 def sanitize_settings_and_apply_defaults(global_config, settings):
662 global_settings_maker = SettingsMaker(global_config)
663 settings_maker = SettingsMaker(settings)
664
665 settings_maker.make_setting('logging.autoconfigure', False, parser='bool')
666
667 logging_conf = os.path.join(os.path.dirname(global_config.get('__file__')), 'logging.ini')
668 settings_maker.enable_logging(logging_conf)
669
670 # Default includes, possible to change as a user
671 pyramid_includes = settings_maker.make_setting('pyramid.includes', [], parser='list:newline')
672 log.debug("Using the following pyramid.includes: %s", pyramid_includes)
673
674 settings_maker.make_setting('__file__', global_config.get('__file__'))
675
676 settings_maker.make_setting('pyramid.default_locale_name', 'en')
677 settings_maker.make_setting('locale', 'en_US.UTF-8')
678
679 settings_maker.make_setting('core.binary_dir', '')
680
681 temp_store = tempfile.gettempdir()
682 default_cache_dir = os.path.join(temp_store, 'rc_cache')
683 # save default, cache dir, and use it for all backends later.
684 default_cache_dir = settings_maker.make_setting(
685 'cache_dir',
686 default=default_cache_dir, default_when_empty=True,
687 parser='dir:ensured')
688
689 # exception store cache
690 settings_maker.make_setting(
691 'exception_tracker.store_path',
692 default=os.path.join(default_cache_dir, 'exc_store'), default_when_empty=True,
693 parser='dir:ensured'
694 )
695
696 # repo_object cache defaults
697 settings_maker.make_setting(
698 'rc_cache.repo_object.backend',
699 default='dogpile.cache.rc.file_namespace',
700 parser='string')
701 settings_maker.make_setting(
702 'rc_cache.repo_object.expiration_time',
703 default=30 * 24 * 60 * 60, # 30days
704 parser='int')
705 settings_maker.make_setting(
706 'rc_cache.repo_object.arguments.filename',
707 default=os.path.join(default_cache_dir, 'vcsserver_cache_repo_object.db'),
708 parser='string')
709
710 # statsd
711 settings_maker.make_setting('statsd.enabled', False, parser='bool')
712 settings_maker.make_setting('statsd.statsd_host', 'statsd-exporter', parser='string')
713 settings_maker.make_setting('statsd.statsd_port', 9125, parser='int')
714 settings_maker.make_setting('statsd.statsd_prefix', '')
715 settings_maker.make_setting('statsd.statsd_ipv6', False, parser='bool')
716
717 settings_maker.env_expand()
718
719
696 720 def main(global_config, **settings):
721 start_time = time.time()
722 log.info('Pyramid app config starting')
723
697 724 if MercurialFactory:
698 725 hgpatches.patch_largefiles_capabilities()
699 726 hgpatches.patch_subrepo_type_mapping()
700 727
701 app = HTTPApplication(settings=settings, global_config=global_config)
702 return app.wsgi_app()
728 # Fill in and sanitize the defaults & do ENV expansion
729 sanitize_settings_and_apply_defaults(global_config, settings)
730
731 # init and bootstrap StatsdClient
732 StatsdClient.setup(settings)
733
734 pyramid_app = HTTPApplication(settings=settings, global_config=global_config).wsgi_app()
735 total_time = time.time() - start_time
736 log.info('Pyramid app `%s` created and configured in %.2fs',
737 getattr(pyramid_app, 'func_name', 'pyramid_app'), total_time)
738 return pyramid_app
739
740
@@ -1,167 +1,173 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # RhodeCode VCSServer provides access to different vcs backends via network.
4 4 # Copyright (C) 2014-2020 RhodeCode GmbH
5 5 #
6 6 # This program is free software; you can redistribute it and/or modify
7 7 # it under the terms of the GNU General Public License as published by
8 8 # the Free Software Foundation; either version 3 of the License, or
9 9 # (at your option) any later version.
10 10 #
11 11 # This program is distributed in the hope that it will be useful,
12 12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 14 # GNU General Public License for more details.
15 15 #
16 16 # You should have received a copy of the GNU General Public License
17 17 # along with this program; if not, write to the Free Software Foundation,
18 18 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 19
20 20
21 21 import os
22 22 import time
23 23 import datetime
24 24 import msgpack
25 25 import logging
26 26 import traceback
27 27 import tempfile
28 28
29 29 log = logging.getLogger(__name__)
30 30
31 31 # NOTE: Any changes should be synced with exc_tracking at rhodecode.lib.exc_tracking
32 32 global_prefix = 'vcsserver'
33 33 exc_store_dir_name = 'rc_exception_store_v1'
34 34
35 35
36 36 def exc_serialize(exc_id, tb, exc_type):
37 37
38 38 data = {
39 39 'version': 'v1',
40 40 'exc_id': exc_id,
41 41 'exc_utc_date': datetime.datetime.utcnow().isoformat(),
42 42 'exc_timestamp': repr(time.time()),
43 43 'exc_message': tb,
44 44 'exc_type': exc_type,
45 45 }
46 46 return msgpack.packb(data), data
47 47
48 48
49 49 def exc_unserialize(tb):
50 50 return msgpack.unpackb(tb)
51 51
52 52
53 53 def get_exc_store():
54 54 """
55 55 Get and create exception store if it's not existing
56 56 """
57 57 import vcsserver as app
58 58
59 59 exc_store_dir = app.CONFIG.get('exception_tracker.store_path', '') or tempfile.gettempdir()
60 60 _exc_store_path = os.path.join(exc_store_dir, exc_store_dir_name)
61 61
62 62 _exc_store_path = os.path.abspath(_exc_store_path)
63 63 if not os.path.isdir(_exc_store_path):
64 64 os.makedirs(_exc_store_path)
65 65 log.debug('Initializing exceptions store at %s', _exc_store_path)
66 66 return _exc_store_path
67 67
68 68
69 def _store_exception(exc_id, exc_info, prefix):
69 def _store_exception(exc_id, exc_info, prefix, request_path=''):
70 70 exc_type, exc_value, exc_traceback = exc_info
71 71
72 72 tb = ''.join(traceback.format_exception(
73 73 exc_type, exc_value, exc_traceback, None))
74 74
75 75 detailed_tb = getattr(exc_value, '_org_exc_tb', None)
76 76
77 77 if detailed_tb:
78 78 if isinstance(detailed_tb, str):
79 79 remote_tb = [detailed_tb]
80 80
81 81 tb += (
82 82 '\n+++ BEG SOURCE EXCEPTION +++\n\n'
83 83 '{}\n'
84 84 '+++ END SOURCE EXCEPTION +++\n'
85 85 ''.format('\n'.join(remote_tb))
86 86 )
87 87
88 88 # Avoid that remote_tb also appears in the frame
89 89 del remote_tb
90 90
91 91 exc_type_name = exc_type.__name__
92 92 exc_store_path = get_exc_store()
93 93 exc_data, org_data = exc_serialize(exc_id, tb, exc_type_name)
94 94 exc_pref_id = '{}_{}_{}'.format(exc_id, prefix, org_data['exc_timestamp'])
95 95 if not os.path.isdir(exc_store_path):
96 96 os.makedirs(exc_store_path)
97 97 stored_exc_path = os.path.join(exc_store_path, exc_pref_id)
98 98 with open(stored_exc_path, 'wb') as f:
99 99 f.write(exc_data)
100 100 log.debug('Stored generated exception %s as: %s', exc_id, stored_exc_path)
101 101
102 log.error(
103 'error occurred handling this request.\n'
104 'Path: `%s`, tb: %s',
105 request_path, tb)
102 106
103 def store_exception(exc_id, exc_info, prefix=global_prefix):
107
108 def store_exception(exc_id, exc_info, prefix=global_prefix, request_path=''):
104 109 """
105 110 Example usage::
106 111
107 112 exc_info = sys.exc_info()
108 113 store_exception(id(exc_info), exc_info)
109 114 """
110 115
111 116 try:
112 _store_exception(exc_id=exc_id, exc_info=exc_info, prefix=prefix)
117 _store_exception(exc_id=exc_id, exc_info=exc_info, prefix=prefix,
118 request_path=request_path)
113 119 except Exception:
114 120 log.exception('Failed to store exception `%s` information', exc_id)
115 121 # there's no way this can fail, it will crash server badly if it does.
116 122 pass
117 123
118 124
119 125 def _find_exc_file(exc_id, prefix=global_prefix):
120 126 exc_store_path = get_exc_store()
121 127 if prefix:
122 128 exc_id = '{}_{}'.format(exc_id, prefix)
123 129 else:
124 130 # search without a prefix
125 131 exc_id = '{}'.format(exc_id)
126 132
127 133 # we need to search the store for such start pattern as above
128 134 for fname in os.listdir(exc_store_path):
129 135 if fname.startswith(exc_id):
130 136 exc_id = os.path.join(exc_store_path, fname)
131 137 break
132 138 continue
133 139 else:
134 140 exc_id = None
135 141
136 142 return exc_id
137 143
138 144
139 145 def _read_exception(exc_id, prefix):
140 146 exc_id_file_path = _find_exc_file(exc_id=exc_id, prefix=prefix)
141 147 if exc_id_file_path:
142 148 with open(exc_id_file_path, 'rb') as f:
143 149 return exc_unserialize(f.read())
144 150 else:
145 151 log.debug('Exception File `%s` not found', exc_id_file_path)
146 152 return None
147 153
148 154
149 155 def read_exception(exc_id, prefix=global_prefix):
150 156 try:
151 157 return _read_exception(exc_id=exc_id, prefix=prefix)
152 158 except Exception:
153 159 log.exception('Failed to read exception `%s` information', exc_id)
154 160 # there's no way this can fail, it will crash server badly if it does.
155 161 return None
156 162
157 163
158 164 def delete_exception(exc_id, prefix=global_prefix):
159 165 try:
160 166 exc_id_file_path = _find_exc_file(exc_id, prefix=prefix)
161 167 if exc_id_file_path:
162 168 os.remove(exc_id_file_path)
163 169
164 170 except Exception:
165 171 log.exception('Failed to remove exception `%s` information', exc_id)
166 172 # there's no way this can fail, it will crash server badly if it does.
167 173 pass
@@ -1,72 +1,79 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 2 # Copyright (C) 2014-2020 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 17
18 18 import logging
19 19 from dogpile.cache import register_backend
20 20
21 21 register_backend(
22 22 "dogpile.cache.rc.memory_lru", "vcsserver.lib.rc_cache.backends",
23 23 "LRUMemoryBackend")
24 24
25 25 register_backend(
26 26 "dogpile.cache.rc.file_namespace", "vcsserver.lib.rc_cache.backends",
27 27 "FileNamespaceBackend")
28 28
29 29 register_backend(
30 30 "dogpile.cache.rc.redis", "vcsserver.lib.rc_cache.backends",
31 31 "RedisPickleBackend")
32 32
33 33 register_backend(
34 34 "dogpile.cache.rc.redis_msgpack", "vcsserver.lib.rc_cache.backends",
35 35 "RedisMsgPackBackend")
36 36
37 37
38 38 log = logging.getLogger(__name__)
39 39
40 40 from . import region_meta
41 from .utils import (get_default_cache_settings, backend_key_generator, make_region)
41 from .utils import (
42 get_default_cache_settings, backend_key_generator, get_or_create_region,
43 clear_cache_namespace, make_region)
42 44
43 45
44 46 def configure_dogpile_cache(settings):
45 47 cache_dir = settings.get('cache_dir')
46 48 if cache_dir:
47 49 region_meta.dogpile_config_defaults['cache_dir'] = cache_dir
48 50
49 51 rc_cache_data = get_default_cache_settings(settings, prefixes=['rc_cache.'])
50 52
51 53 # inspect available namespaces
52 54 avail_regions = set()
53 55 for key in rc_cache_data.keys():
54 56 namespace_name = key.split('.', 1)[0]
55 avail_regions.add(namespace_name)
56 log.debug('dogpile: found following cache regions: %s', avail_regions)
57 if namespace_name in avail_regions:
58 continue
57 59
58 # register them into namespace
59 for region_name in avail_regions:
60 avail_regions.add(namespace_name)
61 log.debug('dogpile: found following cache regions: %s', namespace_name)
62
60 63 new_region = make_region(
61 name=region_name,
64 name=namespace_name,
62 65 function_key_generator=None
63 66 )
64 67
65 new_region.configure_from_config(settings, 'rc_cache.{}.'.format(region_name))
68 new_region.configure_from_config(settings, 'rc_cache.{}.'.format(namespace_name))
66 69 new_region.function_key_generator = backend_key_generator(new_region.actual_backend)
67 log.debug('dogpile: registering a new region %s[%s]', region_name, new_region.__dict__)
68 region_meta.dogpile_cache_regions[region_name] = new_region
70 if log.isEnabledFor(logging.DEBUG):
71 region_args = dict(backend=new_region.actual_backend.__class__,
72 region_invalidator=new_region.region_invalidator.__class__)
73 log.debug('dogpile: registering a new region `%s` %s', namespace_name, region_args)
74
75 region_meta.dogpile_cache_regions[namespace_name] = new_region
69 76
70 77
71 78 def includeme(config):
72 79 configure_dogpile_cache(config.registry.settings)
@@ -1,253 +1,329 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 2 # Copyright (C) 2014-2020 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 17
18 18 import time
19 19 import errno
20 20 import logging
21 21
22 22 import msgpack
23 23 import redis
24 24
25 25 from dogpile.cache.api import CachedValue
26 26 from dogpile.cache.backends import memory as memory_backend
27 27 from dogpile.cache.backends import file as file_backend
28 28 from dogpile.cache.backends import redis as redis_backend
29 29 from dogpile.cache.backends.file import NO_VALUE, FileLock
30 30 from dogpile.cache.util import memoized_property
31 31
32 from pyramid.settings import asbool
33
32 34 from vcsserver.lib.memory_lru_dict import LRUDict, LRUDictDebug
35 from vcsserver.utils import safe_str, safe_unicode
33 36
34 37
35 38 _default_max_size = 1024
36 39
37 40 log = logging.getLogger(__name__)
38 41
39 42
40 43 class LRUMemoryBackend(memory_backend.MemoryBackend):
41 44 key_prefix = 'lru_mem_backend'
42 45 pickle_values = False
43 46
44 47 def __init__(self, arguments):
45 48 max_size = arguments.pop('max_size', _default_max_size)
46 49
47 50 LRUDictClass = LRUDict
48 51 if arguments.pop('log_key_count', None):
49 52 LRUDictClass = LRUDictDebug
50 53
51 54 arguments['cache_dict'] = LRUDictClass(max_size)
52 55 super(LRUMemoryBackend, self).__init__(arguments)
53 56
54 57 def delete(self, key):
55 58 try:
56 59 del self._cache[key]
57 60 except KeyError:
58 61 # we don't care if key isn't there at deletion
59 62 pass
60 63
61 64 def delete_multi(self, keys):
62 65 for key in keys:
63 66 self.delete(key)
64 67
65 68
66 69 class PickleSerializer(object):
67 70
68 71 def _dumps(self, value, safe=False):
69 72 try:
70 73 return pickle.dumps(value)
71 74 except Exception:
72 75 if safe:
73 76 return NO_VALUE
74 77 else:
75 78 raise
76 79
77 80 def _loads(self, value, safe=True):
78 81 try:
79 82 return pickle.loads(value)
80 83 except Exception:
81 84 if safe:
82 85 return NO_VALUE
83 86 else:
84 87 raise
85 88
86 89
87 90 class MsgPackSerializer(object):
88 91
89 92 def _dumps(self, value, safe=False):
90 93 try:
91 94 return msgpack.packb(value)
92 95 except Exception:
93 96 if safe:
94 97 return NO_VALUE
95 98 else:
96 99 raise
97 100
98 101 def _loads(self, value, safe=True):
99 102 """
100 103 pickle maintained the `CachedValue` wrapper of the tuple
101 104 msgpack does not, so it must be added back in.
102 105 """
103 106 try:
104 107 value = msgpack.unpackb(value, use_list=False)
105 108 return CachedValue(*value)
106 109 except Exception:
107 110 if safe:
108 111 return NO_VALUE
109 112 else:
110 113 raise
111 114
112 115
113 116 import fcntl
114 117 flock_org = fcntl.flock
115 118
116 119
117 120 class CustomLockFactory(FileLock):
118 121
119 122 pass
120 123
121 124
122 125 class FileNamespaceBackend(PickleSerializer, file_backend.DBMBackend):
123 126 key_prefix = 'file_backend'
124 127
125 128 def __init__(self, arguments):
126 129 arguments['lock_factory'] = CustomLockFactory
130 db_file = arguments.get('filename')
131
132 log.debug('initialing %s DB in %s', self.__class__.__name__, db_file)
133 try:
127 134 super(FileNamespaceBackend, self).__init__(arguments)
135 except Exception:
136 log.exception('Failed to initialize db at: %s', db_file)
137 raise
128 138
129 139 def __repr__(self):
130 140 return '{} `{}`'.format(self.__class__, self.filename)
131 141
132 142 def list_keys(self, prefix=''):
133 143 prefix = '{}:{}'.format(self.key_prefix, prefix)
134 144
135 145 def cond(v):
136 146 if not prefix:
137 147 return True
138 148
139 149 if v.startswith(prefix):
140 150 return True
141 151 return False
142 152
143 153 with self._dbm_file(True) as dbm:
144
154 try:
145 155 return filter(cond, dbm.keys())
156 except Exception:
157 log.error('Failed to fetch DBM keys from DB: %s', self.get_store())
158 raise
146 159
147 160 def get_store(self):
148 161 return self.filename
149 162
150 def get(self, key):
163 def _dbm_get(self, key):
151 164 with self._dbm_file(False) as dbm:
152 165 if hasattr(dbm, 'get'):
153 166 value = dbm.get(key, NO_VALUE)
154 167 else:
155 168 # gdbm objects lack a .get method
156 169 try:
157 170 value = dbm[key]
158 171 except KeyError:
159 172 value = NO_VALUE
160 173 if value is not NO_VALUE:
161 174 value = self._loads(value)
162 175 return value
163 176
177 def get(self, key):
178 try:
179 return self._dbm_get(key)
180 except Exception:
181 log.error('Failed to fetch DBM key %s from DB: %s', key, self.get_store())
182 raise
183
164 184 def set(self, key, value):
165 185 with self._dbm_file(True) as dbm:
166 186 dbm[key] = self._dumps(value)
167 187
168 188 def set_multi(self, mapping):
169 189 with self._dbm_file(True) as dbm:
170 190 for key, value in mapping.items():
171 191 dbm[key] = self._dumps(value)
172 192
173 193
174 194 class BaseRedisBackend(redis_backend.RedisBackend):
195 key_prefix = ''
196
197 def __init__(self, arguments):
198 super(BaseRedisBackend, self).__init__(arguments)
199 self._lock_timeout = self.lock_timeout
200 self._lock_auto_renewal = asbool(arguments.pop("lock_auto_renewal", True))
201
202 if self._lock_auto_renewal and not self._lock_timeout:
203 # set default timeout for auto_renewal
204 self._lock_timeout = 30
175 205
176 206 def _create_client(self):
177 207 args = {}
178 208
179 209 if self.url is not None:
180 210 args.update(url=self.url)
181 211
182 212 else:
183 213 args.update(
184 214 host=self.host, password=self.password,
185 215 port=self.port, db=self.db
186 216 )
187 217
188 218 connection_pool = redis.ConnectionPool(**args)
189 219
190 220 return redis.StrictRedis(connection_pool=connection_pool)
191 221
192 222 def list_keys(self, prefix=''):
193 223 prefix = '{}:{}*'.format(self.key_prefix, prefix)
194 224 return self.client.keys(prefix)
195 225
196 226 def get_store(self):
197 227 return self.client.connection_pool
198 228
199 229 def get(self, key):
200 230 value = self.client.get(key)
201 231 if value is None:
202 232 return NO_VALUE
203 233 return self._loads(value)
204 234
205 235 def get_multi(self, keys):
206 236 if not keys:
207 237 return []
208 238 values = self.client.mget(keys)
209 239 loads = self._loads
210 240 return [
211 241 loads(v) if v is not None else NO_VALUE
212 242 for v in values]
213 243
214 244 def set(self, key, value):
215 245 if self.redis_expiration_time:
216 246 self.client.setex(key, self.redis_expiration_time,
217 247 self._dumps(value))
218 248 else:
219 249 self.client.set(key, self._dumps(value))
220 250
221 251 def set_multi(self, mapping):
222 252 dumps = self._dumps
223 253 mapping = dict(
224 254 (k, dumps(v))
225 255 for k, v in mapping.items()
226 256 )
227 257
228 258 if not self.redis_expiration_time:
229 259 self.client.mset(mapping)
230 260 else:
231 261 pipe = self.client.pipeline()
232 262 for key, value in mapping.items():
233 263 pipe.setex(key, self.redis_expiration_time, value)
234 264 pipe.execute()
235 265
236 266 def get_mutex(self, key):
237 u = redis_backend.u
238 267 if self.distributed_lock:
239 lock_key = u('_lock_{0}').format(key)
240 log.debug('Trying to acquire Redis lock for key %s', lock_key)
241 return self.client.lock(lock_key, self.lock_timeout, self.lock_sleep)
268 lock_key = u'_lock_{0}'.format(safe_unicode(key))
269 return get_mutex_lock(self.client, lock_key, self._lock_timeout,
270 auto_renewal=self._lock_auto_renewal)
242 271 else:
243 272 return None
244 273
245 274
246 275 class RedisPickleBackend(PickleSerializer, BaseRedisBackend):
247 276 key_prefix = 'redis_pickle_backend'
248 277 pass
249 278
250 279
251 280 class RedisMsgPackBackend(MsgPackSerializer, BaseRedisBackend):
252 281 key_prefix = 'redis_msgpack_backend'
253 282 pass
283
284
285 def get_mutex_lock(client, lock_key, lock_timeout, auto_renewal=False):
286 import redis_lock
287
288 class _RedisLockWrapper(object):
289 """LockWrapper for redis_lock"""
290
291 @classmethod
292 def get_lock(cls):
293 return redis_lock.Lock(
294 redis_client=client,
295 name=lock_key,
296 expire=lock_timeout,
297 auto_renewal=auto_renewal,
298 strict=True,
299 )
300
301 def __repr__(self):
302 return "{}:{}".format(self.__class__.__name__, lock_key)
303
304 def __str__(self):
305 return "{}:{}".format(self.__class__.__name__, lock_key)
306
307 def __init__(self):
308 self.lock = self.get_lock()
309 self.lock_key = lock_key
310
311 def acquire(self, wait=True):
312 log.debug('Trying to acquire Redis lock for key %s', self.lock_key)
313 try:
314 acquired = self.lock.acquire(wait)
315 log.debug('Got lock for key %s, %s', self.lock_key, acquired)
316 return acquired
317 except redis_lock.AlreadyAcquired:
318 return False
319 except redis_lock.AlreadyStarted:
320 # refresh thread exists, but it also means we acquired the lock
321 return True
322
323 def release(self):
324 try:
325 self.lock.release()
326 except redis_lock.NotAcquired:
327 pass
328
329 return _RedisLockWrapper()
@@ -1,153 +1,263 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 2 # Copyright (C) 2014-2020 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 17
18 18 import os
19 import time
19 20 import logging
20 21 import functools
21 from decorator import decorate
22 22
23 23 from dogpile.cache import CacheRegion
24 24 from dogpile.cache.util import compat
25 25
26 26 from vcsserver.utils import safe_str, sha1
27 27
28 from vcsserver.lib.rc_cache import region_meta
28 29
29 30 log = logging.getLogger(__name__)
30 31
31 32
32 33 class RhodeCodeCacheRegion(CacheRegion):
33 34
34 35 def conditional_cache_on_arguments(
35 36 self, namespace=None,
36 37 expiration_time=None,
37 38 should_cache_fn=None,
38 39 to_str=compat.string_type,
39 40 function_key_generator=None,
40 41 condition=True):
41 42 """
42 43 Custom conditional decorator, that will not touch any dogpile internals if
43 44 condition isn't meet. This works a bit different than should_cache_fn
44 45 And it's faster in cases we don't ever want to compute cached values
45 46 """
46 47 expiration_time_is_callable = compat.callable(expiration_time)
47 48
48 49 if function_key_generator is None:
49 50 function_key_generator = self.function_key_generator
50 51
52 # workaround for py2 and cython problems, this block should be removed
53 # once we've migrated to py3
54 if 'cython' == 'cython':
55 def decorator(fn):
56 if to_str is compat.string_type:
57 # backwards compatible
58 key_generator = function_key_generator(namespace, fn)
59 else:
60 key_generator = function_key_generator(namespace, fn, to_str=to_str)
61
62 @functools.wraps(fn)
63 def decorate(*arg, **kw):
64 key = key_generator(*arg, **kw)
65
66 @functools.wraps(fn)
67 def creator():
68 return fn(*arg, **kw)
69
70 if not condition:
71 return creator()
72
73 timeout = expiration_time() if expiration_time_is_callable \
74 else expiration_time
75
76 return self.get_or_create(key, creator, timeout, should_cache_fn)
77
78 def invalidate(*arg, **kw):
79 key = key_generator(*arg, **kw)
80 self.delete(key)
81
82 def set_(value, *arg, **kw):
83 key = key_generator(*arg, **kw)
84 self.set(key, value)
85
86 def get(*arg, **kw):
87 key = key_generator(*arg, **kw)
88 return self.get(key)
89
90 def refresh(*arg, **kw):
91 key = key_generator(*arg, **kw)
92 value = fn(*arg, **kw)
93 self.set(key, value)
94 return value
95
96 decorate.set = set_
97 decorate.invalidate = invalidate
98 decorate.refresh = refresh
99 decorate.get = get
100 decorate.original = fn
101 decorate.key_generator = key_generator
102 decorate.__wrapped__ = fn
103
104 return decorate
105 return decorator
106
51 107 def get_or_create_for_user_func(key_generator, user_func, *arg, **kw):
52 108
53 109 if not condition:
54 log.debug('Calling un-cached func:%s', user_func.func_name)
55 return user_func(*arg, **kw)
110 log.debug('Calling un-cached method:%s', user_func.func_name)
111 start = time.time()
112 result = user_func(*arg, **kw)
113 total = time.time() - start
114 log.debug('un-cached method:%s took %.4fs', user_func.func_name, total)
115 return result
56 116
57 117 key = key_generator(*arg, **kw)
58 118
59 119 timeout = expiration_time() if expiration_time_is_callable \
60 120 else expiration_time
61 121
62 log.debug('Calling cached fn:%s', user_func.func_name)
122 log.debug('Calling cached method:`%s`', user_func.func_name)
63 123 return self.get_or_create(key, user_func, timeout, should_cache_fn, (arg, kw))
64 124
65 125 def cache_decorator(user_func):
66 126 if to_str is compat.string_type:
67 127 # backwards compatible
68 128 key_generator = function_key_generator(namespace, user_func)
69 129 else:
70 130 key_generator = function_key_generator(namespace, user_func, to_str=to_str)
71 131
72 132 def refresh(*arg, **kw):
73 133 """
74 134 Like invalidate, but regenerates the value instead
75 135 """
76 136 key = key_generator(*arg, **kw)
77 137 value = user_func(*arg, **kw)
78 138 self.set(key, value)
79 139 return value
80 140
81 141 def invalidate(*arg, **kw):
82 142 key = key_generator(*arg, **kw)
83 143 self.delete(key)
84 144
85 145 def set_(value, *arg, **kw):
86 146 key = key_generator(*arg, **kw)
87 147 self.set(key, value)
88 148
89 149 def get(*arg, **kw):
90 150 key = key_generator(*arg, **kw)
91 151 return self.get(key)
92 152
93 153 user_func.set = set_
94 154 user_func.invalidate = invalidate
95 155 user_func.get = get
96 156 user_func.refresh = refresh
97 157 user_func.key_generator = key_generator
98 158 user_func.original = user_func
99 159
100 160 # Use `decorate` to preserve the signature of :param:`user_func`.
101
102 return decorate(user_func, functools.partial(
161 return decorator.decorate(user_func, functools.partial(
103 162 get_or_create_for_user_func, key_generator))
104 163
105 164 return cache_decorator
106 165
107 166
108 167 def make_region(*arg, **kw):
109 168 return RhodeCodeCacheRegion(*arg, **kw)
110 169
111 170
112 171 def get_default_cache_settings(settings, prefixes=None):
113 172 prefixes = prefixes or []
114 173 cache_settings = {}
115 174 for key in settings.keys():
116 175 for prefix in prefixes:
117 176 if key.startswith(prefix):
118 177 name = key.split(prefix)[1].strip()
119 178 val = settings[key]
120 179 if isinstance(val, compat.string_types):
121 180 val = val.strip()
122 181 cache_settings[name] = val
123 182 return cache_settings
124 183
125 184
126 185 def compute_key_from_params(*args):
127 186 """
128 187 Helper to compute key from given params to be used in cache manager
129 188 """
130 189 return sha1("_".join(map(safe_str, args)))
131 190
132 191
133 192 def backend_key_generator(backend):
134 193 """
135 194 Special wrapper that also sends over the backend to the key generator
136 195 """
137 196 def wrapper(namespace, fn):
138 197 return key_generator(backend, namespace, fn)
139 198 return wrapper
140 199
141 200
142 201 def key_generator(backend, namespace, fn):
143 202 fname = fn.__name__
144 203
145 204 def generate_key(*args):
146 205 backend_prefix = getattr(backend, 'key_prefix', None) or 'backend_prefix'
147 206 namespace_pref = namespace or 'default_namespace'
148 207 arg_key = compute_key_from_params(*args)
149 208 final_key = "{}:{}:{}_{}".format(backend_prefix, namespace_pref, fname, arg_key)
150 209
151 210 return final_key
152 211
153 212 return generate_key
213
214
215 def get_or_create_region(region_name, region_namespace=None):
216 from vcsserver.lib.rc_cache.backends import FileNamespaceBackend
217 region_obj = region_meta.dogpile_cache_regions.get(region_name)
218 if not region_obj:
219 raise EnvironmentError(
220 'Region `{}` not in configured: {}.'.format(
221 region_name, region_meta.dogpile_cache_regions.keys()))
222
223 region_uid_name = '{}:{}'.format(region_name, region_namespace)
224 if isinstance(region_obj.actual_backend, FileNamespaceBackend):
225 region_exist = region_meta.dogpile_cache_regions.get(region_namespace)
226 if region_exist:
227 log.debug('Using already configured region: %s', region_namespace)
228 return region_exist
229 cache_dir = region_meta.dogpile_config_defaults['cache_dir']
230 expiration_time = region_obj.expiration_time
231
232 if not os.path.isdir(cache_dir):
233 os.makedirs(cache_dir)
234 new_region = make_region(
235 name=region_uid_name,
236 function_key_generator=backend_key_generator(region_obj.actual_backend)
237 )
238 namespace_filename = os.path.join(
239 cache_dir, "{}.cache.dbm".format(region_namespace))
240 # special type that allows 1db per namespace
241 new_region.configure(
242 backend='dogpile.cache.rc.file_namespace',
243 expiration_time=expiration_time,
244 arguments={"filename": namespace_filename}
245 )
246
247 # create and save in region caches
248 log.debug('configuring new region: %s', region_uid_name)
249 region_obj = region_meta.dogpile_cache_regions[region_namespace] = new_region
250
251 return region_obj
252
253
254 def clear_cache_namespace(cache_region, cache_namespace_uid, invalidate=False):
255 region = get_or_create_region(cache_region, cache_namespace_uid)
256 cache_keys = region.backend.list_keys(prefix=cache_namespace_uid)
257 num_delete_keys = len(cache_keys)
258 if invalidate:
259 region.invalidate(hard=False)
260 else:
261 if num_delete_keys:
262 region.delete_multi(cache_keys)
263 return num_delete_keys
@@ -1,791 +1,867 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 2 # Copyright (C) 2014-2020 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 17
18 18
19 19
20 20 import os
21 21 import subprocess
22 import time
22 23 from urllib.error import URLError
23 24 import urllib.parse
24 25 import logging
25 26 import posixpath as vcspath
26 27 import io
27 28 import urllib.request, urllib.parse, urllib.error
28 29 import traceback
29 30
30 31 import svn.client
31 32 import svn.core
32 33 import svn.delta
33 34 import svn.diff
34 35 import svn.fs
35 36 import svn.repos
36 37
37 38 from vcsserver import svn_diff, exceptions, subprocessio, settings
38 from vcsserver.base import RepoFactory, raise_from_original
39 from vcsserver.base import RepoFactory, raise_from_original, ArchiveNode, archive_repo
40 from vcsserver.exceptions import NoContentException
41 from vcsserver.utils import safe_str
39 42 from vcsserver.vcs_base import RemoteBase
40 43
41 44 log = logging.getLogger(__name__)
42 45
43 46
44 47 svn_compatible_versions_map = {
45 48 'pre-1.4-compatible': '1.3',
46 49 'pre-1.5-compatible': '1.4',
47 50 'pre-1.6-compatible': '1.5',
48 51 'pre-1.8-compatible': '1.7',
49 52 'pre-1.9-compatible': '1.8',
50 53 }
51 54
52 55 current_compatible_version = '1.12'
53 56
54 57
55 58 def reraise_safe_exceptions(func):
56 59 """Decorator for converting svn exceptions to something neutral."""
57 60 def wrapper(*args, **kwargs):
58 61 try:
59 62 return func(*args, **kwargs)
60 63 except Exception as e:
61 64 if not hasattr(e, '_vcs_kind'):
62 65 log.exception("Unhandled exception in svn remote call")
63 66 raise_from_original(exceptions.UnhandledException(e))
64 67 raise
65 68 return wrapper
66 69
67 70
68 71 class SubversionFactory(RepoFactory):
69 72 repo_type = 'svn'
70 73
71 74 def _create_repo(self, wire, create, compatible_version):
72 75 path = svn.core.svn_path_canonicalize(wire['path'])
73 76 if create:
74 77 fs_config = {'compatible-version': current_compatible_version}
75 78 if compatible_version:
76 79
77 80 compatible_version_string = \
78 81 svn_compatible_versions_map.get(compatible_version) \
79 82 or compatible_version
80 83 fs_config['compatible-version'] = compatible_version_string
81 84
82 85 log.debug('Create SVN repo with config "%s"', fs_config)
83 86 repo = svn.repos.create(path, "", "", None, fs_config)
84 87 else:
85 88 repo = svn.repos.open(path)
86 89
87 90 log.debug('Got SVN object: %s', repo)
88 91 return repo
89 92
90 93 def repo(self, wire, create=False, compatible_version=None):
91 94 """
92 95 Get a repository instance for the given path.
93 96 """
94 97 return self._create_repo(wire, create, compatible_version)
95 98
96 99
97 100 NODE_TYPE_MAPPING = {
98 101 svn.core.svn_node_file: 'file',
99 102 svn.core.svn_node_dir: 'dir',
100 103 }
101 104
102 105
103 106 class SvnRemote(RemoteBase):
104 107
105 108 def __init__(self, factory, hg_factory=None):
106 109 self._factory = factory
107 110 # TODO: Remove once we do not use internal Mercurial objects anymore
108 111 # for subversion
109 112 self._hg_factory = hg_factory
110 113
111 114 @reraise_safe_exceptions
112 115 def discover_svn_version(self):
113 116 try:
114 117 import svn.core
115 118 svn_ver = svn.core.SVN_VERSION
116 119 except ImportError:
117 120 svn_ver = None
118 121 return svn_ver
119 122
120 123 @reraise_safe_exceptions
121 124 def is_empty(self, wire):
122 125
123 126 try:
124 127 return self.lookup(wire, -1) == 0
125 128 except Exception:
126 129 log.exception("failed to read object_store")
127 130 return False
128 131
129 132 def check_url(self, url, config_items):
130 133 # this can throw exception if not installed, but we detect this
131 134 from hgsubversion import svnrepo
132 135
133 136 baseui = self._hg_factory._create_config(config_items)
134 137 # uuid function get's only valid UUID from proper repo, else
135 138 # throws exception
136 139 try:
137 140 svnrepo.svnremoterepo(baseui, url).svn.uuid
138 141 except Exception:
139 142 tb = traceback.format_exc()
140 143 log.debug("Invalid Subversion url: `%s`, tb: %s", url, tb)
141 144 raise URLError(
142 145 '"%s" is not a valid Subversion source url.' % (url, ))
143 146 return True
144 147
145 148 def is_path_valid_repository(self, wire, path):
146 149
147 150 # NOTE(marcink): short circuit the check for SVN repo
148 151 # the repos.open might be expensive to check, but we have one cheap
149 152 # pre condition that we can use, to check for 'format' file
150 153
151 154 if not os.path.isfile(os.path.join(path, 'format')):
152 155 return False
153 156
154 157 try:
155 158 svn.repos.open(path)
156 159 except svn.core.SubversionException:
157 160 tb = traceback.format_exc()
158 161 log.debug("Invalid Subversion path `%s`, tb: %s", path, tb)
159 162 return False
160 163 return True
161 164
162 165 @reraise_safe_exceptions
163 166 def verify(self, wire,):
164 167 repo_path = wire['path']
165 168 if not self.is_path_valid_repository(wire, repo_path):
166 169 raise Exception(
167 170 "Path %s is not a valid Subversion repository." % repo_path)
168 171
169 172 cmd = ['svnadmin', 'info', repo_path]
170 173 stdout, stderr = subprocessio.run_command(cmd)
171 174 return stdout
172 175
173 176 def lookup(self, wire, revision):
174 177 if revision not in [-1, None, 'HEAD']:
175 178 raise NotImplementedError
176 179 repo = self._factory.repo(wire)
177 180 fs_ptr = svn.repos.fs(repo)
178 181 head = svn.fs.youngest_rev(fs_ptr)
179 182 return head
180 183
181 184 def lookup_interval(self, wire, start_ts, end_ts):
182 185 repo = self._factory.repo(wire)
183 186 fsobj = svn.repos.fs(repo)
184 187 start_rev = None
185 188 end_rev = None
186 189 if start_ts:
187 190 start_ts_svn = apr_time_t(start_ts)
188 191 start_rev = svn.repos.dated_revision(repo, start_ts_svn) + 1
189 192 else:
190 193 start_rev = 1
191 194 if end_ts:
192 195 end_ts_svn = apr_time_t(end_ts)
193 196 end_rev = svn.repos.dated_revision(repo, end_ts_svn)
194 197 else:
195 198 end_rev = svn.fs.youngest_rev(fsobj)
196 199 return start_rev, end_rev
197 200
198 201 def revision_properties(self, wire, revision):
199 202
200 203 cache_on, context_uid, repo_id = self._cache_on(wire)
201 @self.region.conditional_cache_on_arguments(condition=cache_on)
204 region = self._region(wire)
205 @region.conditional_cache_on_arguments(condition=cache_on)
202 206 def _revision_properties(_repo_id, _revision):
203 207 repo = self._factory.repo(wire)
204 208 fs_ptr = svn.repos.fs(repo)
205 209 return svn.fs.revision_proplist(fs_ptr, revision)
206 210 return _revision_properties(repo_id, revision)
207 211
208 212 def revision_changes(self, wire, revision):
209 213
210 214 repo = self._factory.repo(wire)
211 215 fsobj = svn.repos.fs(repo)
212 216 rev_root = svn.fs.revision_root(fsobj, revision)
213 217
214 218 editor = svn.repos.ChangeCollector(fsobj, rev_root)
215 219 editor_ptr, editor_baton = svn.delta.make_editor(editor)
216 220 base_dir = ""
217 221 send_deltas = False
218 222 svn.repos.replay2(
219 223 rev_root, base_dir, svn.core.SVN_INVALID_REVNUM, send_deltas,
220 224 editor_ptr, editor_baton, None)
221 225
222 226 added = []
223 227 changed = []
224 228 removed = []
225 229
226 230 # TODO: CHANGE_ACTION_REPLACE: Figure out where it belongs
227 231 for path, change in editor.changes.items():
228 232 # TODO: Decide what to do with directory nodes. Subversion can add
229 233 # empty directories.
230 234
231 235 if change.item_kind == svn.core.svn_node_dir:
232 236 continue
233 237 if change.action in [svn.repos.CHANGE_ACTION_ADD]:
234 238 added.append(path)
235 239 elif change.action in [svn.repos.CHANGE_ACTION_MODIFY,
236 240 svn.repos.CHANGE_ACTION_REPLACE]:
237 241 changed.append(path)
238 242 elif change.action in [svn.repos.CHANGE_ACTION_DELETE]:
239 243 removed.append(path)
240 244 else:
241 245 raise NotImplementedError(
242 246 "Action %s not supported on path %s" % (
243 247 change.action, path))
244 248
245 249 changes = {
246 250 'added': added,
247 251 'changed': changed,
248 252 'removed': removed,
249 253 }
250 254 return changes
251 255
252 256 @reraise_safe_exceptions
253 257 def node_history(self, wire, path, revision, limit):
254 258 cache_on, context_uid, repo_id = self._cache_on(wire)
255 @self.region.conditional_cache_on_arguments(condition=cache_on)
259 region = self._region(wire)
260 @region.conditional_cache_on_arguments(condition=cache_on)
256 261 def _assert_correct_path(_context_uid, _repo_id, _path, _revision, _limit):
257 262 cross_copies = False
258 263 repo = self._factory.repo(wire)
259 264 fsobj = svn.repos.fs(repo)
260 265 rev_root = svn.fs.revision_root(fsobj, revision)
261 266
262 267 history_revisions = []
263 268 history = svn.fs.node_history(rev_root, path)
264 269 history = svn.fs.history_prev(history, cross_copies)
265 270 while history:
266 271 __, node_revision = svn.fs.history_location(history)
267 272 history_revisions.append(node_revision)
268 273 if limit and len(history_revisions) >= limit:
269 274 break
270 275 history = svn.fs.history_prev(history, cross_copies)
271 276 return history_revisions
272 277 return _assert_correct_path(context_uid, repo_id, path, revision, limit)
273 278
274 279 def node_properties(self, wire, path, revision):
275 280 cache_on, context_uid, repo_id = self._cache_on(wire)
276 @self.region.conditional_cache_on_arguments(condition=cache_on)
281 region = self._region(wire)
282 @region.conditional_cache_on_arguments(condition=cache_on)
277 283 def _node_properties(_repo_id, _path, _revision):
278 284 repo = self._factory.repo(wire)
279 285 fsobj = svn.repos.fs(repo)
280 286 rev_root = svn.fs.revision_root(fsobj, revision)
281 287 return svn.fs.node_proplist(rev_root, path)
282 288 return _node_properties(repo_id, path, revision)
283 289
284 290 def file_annotate(self, wire, path, revision):
285 abs_path = 'file://' + urllib.request.pathname2url(
291 abs_path = 'file://' + urllib.pathname2url(
286 292 vcspath.join(wire['path'], path))
287 293 file_uri = svn.core.svn_path_canonicalize(abs_path)
288 294
289 295 start_rev = svn_opt_revision_value_t(0)
290 296 peg_rev = svn_opt_revision_value_t(revision)
291 297 end_rev = peg_rev
292 298
293 299 annotations = []
294 300
295 301 def receiver(line_no, revision, author, date, line, pool):
296 302 annotations.append((line_no, revision, line))
297 303
298 304 # TODO: Cannot use blame5, missing typemap function in the swig code
299 305 try:
300 306 svn.client.blame2(
301 307 file_uri, peg_rev, start_rev, end_rev,
302 308 receiver, svn.client.create_context())
303 309 except svn.core.SubversionException as exc:
304 310 log.exception("Error during blame operation.")
305 311 raise Exception(
306 312 "Blame not supported or file does not exist at path %s. "
307 313 "Error %s." % (path, exc))
308 314
309 315 return annotations
310 316
311 317 def get_node_type(self, wire, path, revision=None):
312 318
313 319 cache_on, context_uid, repo_id = self._cache_on(wire)
314 @self.region.conditional_cache_on_arguments(condition=cache_on)
320 region = self._region(wire)
321 @region.conditional_cache_on_arguments(condition=cache_on)
315 322 def _get_node_type(_repo_id, _path, _revision):
316 323 repo = self._factory.repo(wire)
317 324 fs_ptr = svn.repos.fs(repo)
318 325 if _revision is None:
319 326 _revision = svn.fs.youngest_rev(fs_ptr)
320 327 root = svn.fs.revision_root(fs_ptr, _revision)
321 328 node = svn.fs.check_path(root, path)
322 329 return NODE_TYPE_MAPPING.get(node, None)
323 330 return _get_node_type(repo_id, path, revision)
324 331
325 332 def get_nodes(self, wire, path, revision=None):
326 333
327 334 cache_on, context_uid, repo_id = self._cache_on(wire)
328 @self.region.conditional_cache_on_arguments(condition=cache_on)
335 region = self._region(wire)
336 @region.conditional_cache_on_arguments(condition=cache_on)
329 337 def _get_nodes(_repo_id, _path, _revision):
330 338 repo = self._factory.repo(wire)
331 339 fsobj = svn.repos.fs(repo)
332 340 if _revision is None:
333 341 _revision = svn.fs.youngest_rev(fsobj)
334 342 root = svn.fs.revision_root(fsobj, _revision)
335 343 entries = svn.fs.dir_entries(root, path)
336 344 result = []
337 345 for entry_path, entry_info in entries.items():
338 346 result.append(
339 347 (entry_path, NODE_TYPE_MAPPING.get(entry_info.kind, None)))
340 348 return result
341 349 return _get_nodes(repo_id, path, revision)
342 350
343 351 def get_file_content(self, wire, path, rev=None):
344 352 repo = self._factory.repo(wire)
345 353 fsobj = svn.repos.fs(repo)
346 354 if rev is None:
347 355 rev = svn.fs.youngest_revision(fsobj)
348 356 root = svn.fs.revision_root(fsobj, rev)
349 357 content = svn.core.Stream(svn.fs.file_contents(root, path))
350 358 return content.read()
351 359
352 360 def get_file_size(self, wire, path, revision=None):
353 361
354 362 cache_on, context_uid, repo_id = self._cache_on(wire)
355 @self.region.conditional_cache_on_arguments(condition=cache_on)
363 region = self._region(wire)
364 @region.conditional_cache_on_arguments(condition=cache_on)
356 365 def _get_file_size(_repo_id, _path, _revision):
357 366 repo = self._factory.repo(wire)
358 367 fsobj = svn.repos.fs(repo)
359 368 if _revision is None:
360 369 _revision = svn.fs.youngest_revision(fsobj)
361 370 root = svn.fs.revision_root(fsobj, _revision)
362 371 size = svn.fs.file_length(root, path)
363 372 return size
364 373 return _get_file_size(repo_id, path, revision)
365 374
366 375 def create_repository(self, wire, compatible_version=None):
367 376 log.info('Creating Subversion repository in path "%s"', wire['path'])
368 377 self._factory.repo(wire, create=True,
369 378 compatible_version=compatible_version)
370 379
371 380 def get_url_and_credentials(self, src_url):
372 381 obj = urllib.parse.urlparse(src_url)
373 382 username = obj.username or None
374 383 password = obj.password or None
375 384 return username, password, src_url
376 385
377 386 def import_remote_repository(self, wire, src_url):
378 387 repo_path = wire['path']
379 388 if not self.is_path_valid_repository(wire, repo_path):
380 389 raise Exception(
381 390 "Path %s is not a valid Subversion repository." % repo_path)
382 391
383 392 username, password, src_url = self.get_url_and_credentials(src_url)
384 393 rdump_cmd = ['svnrdump', 'dump', '--non-interactive',
385 394 '--trust-server-cert-failures=unknown-ca']
386 395 if username and password:
387 396 rdump_cmd += ['--username', username, '--password', password]
388 397 rdump_cmd += [src_url]
389 398
390 399 rdump = subprocess.Popen(
391 400 rdump_cmd,
392 401 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
393 402 load = subprocess.Popen(
394 403 ['svnadmin', 'load', repo_path], stdin=rdump.stdout)
395 404
396 405 # TODO: johbo: This can be a very long operation, might be better
397 406 # to track some kind of status and provide an api to check if the
398 407 # import is done.
399 408 rdump.wait()
400 409 load.wait()
401 410
402 411 log.debug('Return process ended with code: %s', rdump.returncode)
403 412 if rdump.returncode != 0:
404 413 errors = rdump.stderr.read()
405 414 log.error('svnrdump dump failed: statuscode %s: message: %s',
406 415 rdump.returncode, errors)
407 416 reason = 'UNKNOWN'
408 417 if 'svnrdump: E230001:' in errors:
409 418 reason = 'INVALID_CERTIFICATE'
410 419
411 420 if reason == 'UNKNOWN':
412 421 reason = 'UNKNOWN:{}'.format(errors)
413 422 raise Exception(
414 423 'Failed to dump the remote repository from %s. Reason:%s' % (
415 424 src_url, reason))
416 425 if load.returncode != 0:
417 426 raise Exception(
418 427 'Failed to load the dump of remote repository from %s.' %
419 428 (src_url, ))
420 429
421 430 def commit(self, wire, message, author, timestamp, updated, removed):
422 431 assert isinstance(message, str)
423 432 assert isinstance(author, str)
424 433
425 434 repo = self._factory.repo(wire)
426 435 fsobj = svn.repos.fs(repo)
427 436
428 437 rev = svn.fs.youngest_rev(fsobj)
429 438 txn = svn.repos.fs_begin_txn_for_commit(repo, rev, author, message)
430 439 txn_root = svn.fs.txn_root(txn)
431 440
432 441 for node in updated:
433 442 TxnNodeProcessor(node, txn_root).update()
434 443 for node in removed:
435 444 TxnNodeProcessor(node, txn_root).remove()
436 445
437 446 commit_id = svn.repos.fs_commit_txn(repo, txn)
438 447
439 448 if timestamp:
440 449 apr_time = apr_time_t(timestamp)
441 450 ts_formatted = svn.core.svn_time_to_cstring(apr_time)
442 451 svn.fs.change_rev_prop(fsobj, commit_id, 'svn:date', ts_formatted)
443 452
444 453 log.debug('Committed revision "%s" to "%s".', commit_id, wire['path'])
445 454 return commit_id
446 455
447 456 def diff(self, wire, rev1, rev2, path1=None, path2=None,
448 457 ignore_whitespace=False, context=3):
449 458
450 459 wire.update(cache=False)
451 460 repo = self._factory.repo(wire)
452 461 diff_creator = SvnDiffer(
453 462 repo, rev1, path1, rev2, path2, ignore_whitespace, context)
454 463 try:
455 464 return diff_creator.generate_diff()
456 465 except svn.core.SubversionException as e:
457 466 log.exception(
458 467 "Error during diff operation operation. "
459 468 "Path might not exist %s, %s" % (path1, path2))
460 469 return ""
461 470
462 471 @reraise_safe_exceptions
463 472 def is_large_file(self, wire, path):
464 473 return False
465 474
466 475 @reraise_safe_exceptions
467 476 def is_binary(self, wire, rev, path):
468 477 cache_on, context_uid, repo_id = self._cache_on(wire)
469 478
470 @self.region.conditional_cache_on_arguments(condition=cache_on)
479 region = self._region(wire)
480 @region.conditional_cache_on_arguments(condition=cache_on)
471 481 def _is_binary(_repo_id, _rev, _path):
472 482 raw_bytes = self.get_file_content(wire, path, rev)
473 483 return raw_bytes and '\0' in raw_bytes
474 484
475 485 return _is_binary(repo_id, rev, path)
476 486
477 487 @reraise_safe_exceptions
478 488 def run_svn_command(self, wire, cmd, **opts):
479 489 path = wire.get('path', None)
480 490
481 491 if path and os.path.isdir(path):
482 492 opts['cwd'] = path
483 493
484 safe_call = False
485 if '_safe' in opts:
486 safe_call = True
494 safe_call = opts.pop('_safe', False)
487 495
488 496 svnenv = os.environ.copy()
489 497 svnenv.update(opts.pop('extra_env', {}))
490 498
491 499 _opts = {'env': svnenv, 'shell': False}
492 500
493 501 try:
494 502 _opts.update(opts)
495 503 p = subprocessio.SubprocessIOChunker(cmd, **_opts)
496 504
497 505 return ''.join(p), ''.join(p.error)
498 506 except (EnvironmentError, OSError) as err:
507 if safe_call:
508 return '', safe_str(err).strip()
509 else:
499 510 cmd = ' '.join(cmd) # human friendly CMD
500 511 tb_err = ("Couldn't run svn command (%s).\n"
501 512 "Original error was:%s\n"
502 513 "Call options:%s\n"
503 514 % (cmd, err, _opts))
504 515 log.exception(tb_err)
505 if safe_call:
506 return '', err
507 else:
508 516 raise exceptions.VcsException()(tb_err)
509 517
510 518 @reraise_safe_exceptions
511 519 def install_hooks(self, wire, force=False):
512 520 from vcsserver.hook_utils import install_svn_hooks
513 521 repo_path = wire['path']
514 522 binary_dir = settings.BINARY_DIR
515 523 executable = None
516 524 if binary_dir:
517 525 executable = os.path.join(binary_dir, 'python')
518 526 return install_svn_hooks(
519 527 repo_path, executable=executable, force_create=force)
520 528
521 529 @reraise_safe_exceptions
522 530 def get_hooks_info(self, wire):
523 531 from vcsserver.hook_utils import (
524 532 get_svn_pre_hook_version, get_svn_post_hook_version)
525 533 repo_path = wire['path']
526 534 return {
527 535 'pre_version': get_svn_pre_hook_version(repo_path),
528 536 'post_version': get_svn_post_hook_version(repo_path),
529 537 }
530 538
539 @reraise_safe_exceptions
540 def set_head_ref(self, wire, head_name):
541 pass
542
543 @reraise_safe_exceptions
544 def archive_repo(self, wire, archive_dest_path, kind, mtime, archive_at_path,
545 archive_dir_name, commit_id):
546
547 def walk_tree(root, root_dir, _commit_id):
548 """
549 Special recursive svn repo walker
550 """
551
552 filemode_default = 0o100644
553 filemode_executable = 0o100755
554
555 file_iter = svn.fs.dir_entries(root, root_dir)
556 for f_name in file_iter:
557 f_type = NODE_TYPE_MAPPING.get(file_iter[f_name].kind, None)
558
559 if f_type == 'dir':
560 # return only DIR, and then all entries in that dir
561 yield os.path.join(root_dir, f_name), {'mode': filemode_default}, f_type
562 new_root = os.path.join(root_dir, f_name)
563 for _f_name, _f_data, _f_type in walk_tree(root, new_root, _commit_id):
564 yield _f_name, _f_data, _f_type
565 else:
566 f_path = os.path.join(root_dir, f_name).rstrip('/')
567 prop_list = svn.fs.node_proplist(root, f_path)
568
569 f_mode = filemode_default
570 if prop_list.get('svn:executable'):
571 f_mode = filemode_executable
572
573 f_is_link = False
574 if prop_list.get('svn:special'):
575 f_is_link = True
576
577 data = {
578 'is_link': f_is_link,
579 'mode': f_mode,
580 'content_stream': svn.core.Stream(svn.fs.file_contents(root, f_path)).read
581 }
582
583 yield f_path, data, f_type
584
585 def file_walker(_commit_id, path):
586 repo = self._factory.repo(wire)
587 root = svn.fs.revision_root(svn.repos.fs(repo), int(commit_id))
588
589 def no_content():
590 raise NoContentException()
591
592 for f_name, f_data, f_type in walk_tree(root, path, _commit_id):
593 file_path = f_name
594
595 if f_type == 'dir':
596 mode = f_data['mode']
597 yield ArchiveNode(file_path, mode, False, no_content)
598 else:
599 mode = f_data['mode']
600 is_link = f_data['is_link']
601 data_stream = f_data['content_stream']
602 yield ArchiveNode(file_path, mode, is_link, data_stream)
603
604 return archive_repo(file_walker, archive_dest_path, kind, mtime, archive_at_path,
605 archive_dir_name, commit_id)
606
531 607
532 608 class SvnDiffer(object):
533 609 """
534 610 Utility to create diffs based on difflib and the Subversion api
535 611 """
536 612
537 613 binary_content = False
538 614
539 615 def __init__(
540 616 self, repo, src_rev, src_path, tgt_rev, tgt_path,
541 617 ignore_whitespace, context):
542 618 self.repo = repo
543 619 self.ignore_whitespace = ignore_whitespace
544 620 self.context = context
545 621
546 622 fsobj = svn.repos.fs(repo)
547 623
548 624 self.tgt_rev = tgt_rev
549 625 self.tgt_path = tgt_path or ''
550 626 self.tgt_root = svn.fs.revision_root(fsobj, tgt_rev)
551 627 self.tgt_kind = svn.fs.check_path(self.tgt_root, self.tgt_path)
552 628
553 629 self.src_rev = src_rev
554 630 self.src_path = src_path or self.tgt_path
555 631 self.src_root = svn.fs.revision_root(fsobj, src_rev)
556 632 self.src_kind = svn.fs.check_path(self.src_root, self.src_path)
557 633
558 634 self._validate()
559 635
560 636 def _validate(self):
561 637 if (self.tgt_kind != svn.core.svn_node_none and
562 638 self.src_kind != svn.core.svn_node_none and
563 639 self.src_kind != self.tgt_kind):
564 640 # TODO: johbo: proper error handling
565 641 raise Exception(
566 642 "Source and target are not compatible for diff generation. "
567 643 "Source type: %s, target type: %s" %
568 644 (self.src_kind, self.tgt_kind))
569 645
570 646 def generate_diff(self):
571 647 buf = io.StringIO()
572 648 if self.tgt_kind == svn.core.svn_node_dir:
573 649 self._generate_dir_diff(buf)
574 650 else:
575 651 self._generate_file_diff(buf)
576 652 return buf.getvalue()
577 653
578 654 def _generate_dir_diff(self, buf):
579 655 editor = DiffChangeEditor()
580 656 editor_ptr, editor_baton = svn.delta.make_editor(editor)
581 657 svn.repos.dir_delta2(
582 658 self.src_root,
583 659 self.src_path,
584 660 '', # src_entry
585 661 self.tgt_root,
586 662 self.tgt_path,
587 663 editor_ptr, editor_baton,
588 664 authorization_callback_allow_all,
589 665 False, # text_deltas
590 666 svn.core.svn_depth_infinity, # depth
591 667 False, # entry_props
592 668 False, # ignore_ancestry
593 669 )
594 670
595 671 for path, __, change in sorted(editor.changes):
596 672 self._generate_node_diff(
597 673 buf, change, path, self.tgt_path, path, self.src_path)
598 674
599 675 def _generate_file_diff(self, buf):
600 676 change = None
601 677 if self.src_kind == svn.core.svn_node_none:
602 678 change = "add"
603 679 elif self.tgt_kind == svn.core.svn_node_none:
604 680 change = "delete"
605 681 tgt_base, tgt_path = vcspath.split(self.tgt_path)
606 682 src_base, src_path = vcspath.split(self.src_path)
607 683 self._generate_node_diff(
608 684 buf, change, tgt_path, tgt_base, src_path, src_base)
609 685
610 686 def _generate_node_diff(
611 687 self, buf, change, tgt_path, tgt_base, src_path, src_base):
612 688
613 689 if self.src_rev == self.tgt_rev and tgt_base == src_base:
614 690 # makes consistent behaviour with git/hg to return empty diff if
615 691 # we compare same revisions
616 692 return
617 693
618 694 tgt_full_path = vcspath.join(tgt_base, tgt_path)
619 695 src_full_path = vcspath.join(src_base, src_path)
620 696
621 697 self.binary_content = False
622 698 mime_type = self._get_mime_type(tgt_full_path)
623 699
624 700 if mime_type and not mime_type.startswith('text'):
625 701 self.binary_content = True
626 702 buf.write("=" * 67 + '\n')
627 703 buf.write("Cannot display: file marked as a binary type.\n")
628 704 buf.write("svn:mime-type = %s\n" % mime_type)
629 705 buf.write("Index: %s\n" % (tgt_path, ))
630 706 buf.write("=" * 67 + '\n')
631 707 buf.write("diff --git a/%(tgt_path)s b/%(tgt_path)s\n" % {
632 708 'tgt_path': tgt_path})
633 709
634 710 if change == 'add':
635 711 # TODO: johbo: SVN is missing a zero here compared to git
636 712 buf.write("new file mode 10644\n")
637 713
638 714 #TODO(marcink): intro to binary detection of svn patches
639 715 # if self.binary_content:
640 716 # buf.write('GIT binary patch\n')
641 717
642 718 buf.write("--- /dev/null\t(revision 0)\n")
643 719 src_lines = []
644 720 else:
645 721 if change == 'delete':
646 722 buf.write("deleted file mode 10644\n")
647 723
648 724 #TODO(marcink): intro to binary detection of svn patches
649 725 # if self.binary_content:
650 726 # buf.write('GIT binary patch\n')
651 727
652 728 buf.write("--- a/%s\t(revision %s)\n" % (
653 729 src_path, self.src_rev))
654 730 src_lines = self._svn_readlines(self.src_root, src_full_path)
655 731
656 732 if change == 'delete':
657 733 buf.write("+++ /dev/null\t(revision %s)\n" % (self.tgt_rev, ))
658 734 tgt_lines = []
659 735 else:
660 736 buf.write("+++ b/%s\t(revision %s)\n" % (
661 737 tgt_path, self.tgt_rev))
662 738 tgt_lines = self._svn_readlines(self.tgt_root, tgt_full_path)
663 739
664 740 if not self.binary_content:
665 741 udiff = svn_diff.unified_diff(
666 742 src_lines, tgt_lines, context=self.context,
667 743 ignore_blank_lines=self.ignore_whitespace,
668 744 ignore_case=False,
669 745 ignore_space_changes=self.ignore_whitespace)
670 746 buf.writelines(udiff)
671 747
672 748 def _get_mime_type(self, path):
673 749 try:
674 750 mime_type = svn.fs.node_prop(
675 751 self.tgt_root, path, svn.core.SVN_PROP_MIME_TYPE)
676 752 except svn.core.SubversionException:
677 753 mime_type = svn.fs.node_prop(
678 754 self.src_root, path, svn.core.SVN_PROP_MIME_TYPE)
679 755 return mime_type
680 756
681 757 def _svn_readlines(self, fs_root, node_path):
682 758 if self.binary_content:
683 759 return []
684 760 node_kind = svn.fs.check_path(fs_root, node_path)
685 761 if node_kind not in (
686 762 svn.core.svn_node_file, svn.core.svn_node_symlink):
687 763 return []
688 764 content = svn.core.Stream(
689 765 svn.fs.file_contents(fs_root, node_path)).read()
690 766 return content.splitlines(True)
691 767
692 768
693 769 class DiffChangeEditor(svn.delta.Editor):
694 770 """
695 771 Records changes between two given revisions
696 772 """
697 773
698 774 def __init__(self):
699 775 self.changes = []
700 776
701 777 def delete_entry(self, path, revision, parent_baton, pool=None):
702 778 self.changes.append((path, None, 'delete'))
703 779
704 780 def add_file(
705 781 self, path, parent_baton, copyfrom_path, copyfrom_revision,
706 782 file_pool=None):
707 783 self.changes.append((path, 'file', 'add'))
708 784
709 785 def open_file(self, path, parent_baton, base_revision, file_pool=None):
710 786 self.changes.append((path, 'file', 'change'))
711 787
712 788
713 789 def authorization_callback_allow_all(root, path, pool):
714 790 return True
715 791
716 792
717 793 class TxnNodeProcessor(object):
718 794 """
719 795 Utility to process the change of one node within a transaction root.
720 796
721 797 It encapsulates the knowledge of how to add, update or remove
722 798 a node for a given transaction root. The purpose is to support the method
723 799 `SvnRemote.commit`.
724 800 """
725 801
726 802 def __init__(self, node, txn_root):
727 803 assert isinstance(node['path'], str)
728 804
729 805 self.node = node
730 806 self.txn_root = txn_root
731 807
732 808 def update(self):
733 809 self._ensure_parent_dirs()
734 810 self._add_file_if_node_does_not_exist()
735 811 self._update_file_content()
736 812 self._update_file_properties()
737 813
738 814 def remove(self):
739 815 svn.fs.delete(self.txn_root, self.node['path'])
740 816 # TODO: Clean up directory if empty
741 817
742 818 def _ensure_parent_dirs(self):
743 819 curdir = vcspath.dirname(self.node['path'])
744 820 dirs_to_create = []
745 821 while not self._svn_path_exists(curdir):
746 822 dirs_to_create.append(curdir)
747 823 curdir = vcspath.dirname(curdir)
748 824
749 825 for curdir in reversed(dirs_to_create):
750 826 log.debug('Creating missing directory "%s"', curdir)
751 827 svn.fs.make_dir(self.txn_root, curdir)
752 828
753 829 def _svn_path_exists(self, path):
754 830 path_status = svn.fs.check_path(self.txn_root, path)
755 831 return path_status != svn.core.svn_node_none
756 832
757 833 def _add_file_if_node_does_not_exist(self):
758 834 kind = svn.fs.check_path(self.txn_root, self.node['path'])
759 835 if kind == svn.core.svn_node_none:
760 836 svn.fs.make_file(self.txn_root, self.node['path'])
761 837
762 838 def _update_file_content(self):
763 839 assert isinstance(self.node['content'], str)
764 840 handler, baton = svn.fs.apply_textdelta(
765 841 self.txn_root, self.node['path'], None, None)
766 842 svn.delta.svn_txdelta_send_string(self.node['content'], handler, baton)
767 843
768 844 def _update_file_properties(self):
769 845 properties = self.node.get('properties', {})
770 846 for key, value in properties.items():
771 847 svn.fs.change_node_prop(
772 848 self.txn_root, self.node['path'], key, value)
773 849
774 850
775 851 def apr_time_t(timestamp):
776 852 """
777 853 Convert a Python timestamp into APR timestamp type apr_time_t
778 854 """
779 855 return timestamp * 1E6
780 856
781 857
782 858 def svn_opt_revision_value_t(num):
783 859 """
784 860 Put `num` into a `svn_opt_revision_value_t` structure.
785 861 """
786 862 value = svn.core.svn_opt_revision_value_t()
787 863 value.number = num
788 864 revision = svn.core.svn_opt_revision_t()
789 865 revision.kind = svn.core.svn_opt_revision_number
790 866 revision.value = value
791 867 return revision
@@ -1,57 +1,56 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 2 # Copyright (C) 2014-2020 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 17
18 18 import socket
19
20 19 import pytest
21 20
22 21
23 22 def pytest_addoption(parser):
24 23 parser.addoption(
25 '--repeat', type=int, default=100,
24 '--perf-repeat-vcs', type=int, default=100,
26 25 help="Number of repetitions in performance tests.")
27 26
28 27
29 28 @pytest.fixture(scope='session')
30 29 def repeat(request):
31 30 """
32 31 The number of repetitions is based on this fixture.
33 32
34 33 Slower calls may divide it by 10 or 100. It is chosen in a way so that the
35 34 tests are not too slow in our default test suite.
36 35 """
37 return request.config.getoption('--repeat')
36 return request.config.getoption('--perf-repeat-vcs')
38 37
39 38
40 39 @pytest.fixture(scope='session')
41 40 def vcsserver_port(request):
42 41 port = get_available_port()
43 42 print('Using vcsserver port %s' % (port, ))
44 43 return port
45 44
46 45
47 46 def get_available_port():
48 47 family = socket.AF_INET
49 48 socktype = socket.SOCK_STREAM
50 49 host = '127.0.0.1'
51 50
52 51 mysocket = socket.socket(family, socktype)
53 52 mysocket.bind((host, 0))
54 53 port = mysocket.getsockname()[1]
55 54 mysocket.close()
56 55 del mysocket
57 56 return port
@@ -1,160 +1,160 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 2 # Copyright (C) 2014-2020 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 17
18 18 import inspect
19 19
20 20 import pytest
21 21 import dulwich.errors
22 22 from mock import Mock, patch
23 23
24 24 from vcsserver import git
25 25
26 26
27 27 SAMPLE_REFS = {
28 28 'HEAD': 'fd627b9e0dd80b47be81af07c4a98518244ed2f7',
29 29 'refs/tags/v0.1.9': '341d28f0eec5ddf0b6b77871e13c2bbd6bec685c',
30 30 'refs/tags/v0.1.8': '74ebce002c088b8a5ecf40073db09375515ecd68',
31 31 'refs/tags/v0.1.1': 'e6ea6d16e2f26250124a1f4b4fe37a912f9d86a0',
32 32 'refs/tags/v0.1.3': '5a3a8fb005554692b16e21dee62bf02667d8dc3e',
33 33 }
34 34
35 35
36 36 @pytest.fixture
37 37 def git_remote():
38 38 """
39 39 A GitRemote instance with a mock factory.
40 40 """
41 41 factory = Mock()
42 42 remote = git.GitRemote(factory)
43 43 return remote
44 44
45 45
46 46 def test_discover_git_version(git_remote):
47 47 version = git_remote.discover_git_version()
48 48 assert version
49 49
50 50
51 51 class TestGitFetch(object):
52 52 def setup(self):
53 53 self.mock_repo = Mock()
54 54 factory = Mock()
55 55 factory.repo = Mock(return_value=self.mock_repo)
56 56 self.remote_git = git.GitRemote(factory)
57 57
58 58 def test_fetches_all_when_no_commit_ids_specified(self):
59 59 def side_effect(determine_wants, *args, **kwargs):
60 60 determine_wants(SAMPLE_REFS)
61 61
62 62 with patch('dulwich.client.LocalGitClient.fetch') as mock_fetch:
63 63 mock_fetch.side_effect = side_effect
64 64 self.remote_git.pull(wire={}, url='/tmp/', apply_refs=False)
65 65 determine_wants = self.mock_repo.object_store.determine_wants_all
66 66 determine_wants.assert_called_once_with(SAMPLE_REFS)
67 67
68 68 def test_fetches_specified_commits(self):
69 69 selected_refs = {
70 70 'refs/tags/v0.1.8': '74ebce002c088b8a5ecf40073db09375515ecd68',
71 71 'refs/tags/v0.1.3': '5a3a8fb005554692b16e21dee62bf02667d8dc3e',
72 72 }
73 73
74 74 def side_effect(determine_wants, *args, **kwargs):
75 75 result = determine_wants(SAMPLE_REFS)
76 76 assert sorted(result) == sorted(selected_refs.values())
77 77 return result
78 78
79 79 with patch('dulwich.client.LocalGitClient.fetch') as mock_fetch:
80 80 mock_fetch.side_effect = side_effect
81 81 self.remote_git.pull(
82 82 wire={}, url='/tmp/', apply_refs=False,
83 83 refs=selected_refs.keys())
84 84 determine_wants = self.mock_repo.object_store.determine_wants_all
85 85 assert determine_wants.call_count == 0
86 86
87 87 def test_get_remote_refs(self):
88 88 factory = Mock()
89 89 remote_git = git.GitRemote(factory)
90 90 url = 'http://example.com/test/test.git'
91 91 sample_refs = {
92 92 'refs/tags/v0.1.8': '74ebce002c088b8a5ecf40073db09375515ecd68',
93 93 'refs/tags/v0.1.3': '5a3a8fb005554692b16e21dee62bf02667d8dc3e',
94 94 }
95 95
96 96 with patch('vcsserver.git.Repo', create=False) as mock_repo:
97 97 mock_repo().get_refs.return_value = sample_refs
98 98 remote_refs = remote_git.get_remote_refs(wire={}, url=url)
99 99 mock_repo().get_refs.assert_called_once_with()
100 100 assert remote_refs == sample_refs
101 101
102 102
103 103 class TestReraiseSafeExceptions(object):
104 104
105 105 def test_method_decorated_with_reraise_safe_exceptions(self):
106 106 factory = Mock()
107 107 git_remote = git.GitRemote(factory)
108 108
109 109 def fake_function():
110 110 return None
111 111
112 112 decorator = git.reraise_safe_exceptions(fake_function)
113 113
114 114 methods = inspect.getmembers(git_remote, predicate=inspect.ismethod)
115 115 for method_name, method in methods:
116 if not method_name.startswith('_'):
116 if not method_name.startswith('_') and method_name not in ['vcsserver_invalidate_cache']:
117 117 assert method.im_func.__code__ == decorator.__code__
118 118
119 119 @pytest.mark.parametrize('side_effect, expected_type', [
120 120 (dulwich.errors.ChecksumMismatch('0000000', 'deadbeef'), 'lookup'),
121 121 (dulwich.errors.NotCommitError('deadbeef'), 'lookup'),
122 122 (dulwich.errors.MissingCommitError('deadbeef'), 'lookup'),
123 123 (dulwich.errors.ObjectMissing('deadbeef'), 'lookup'),
124 124 (dulwich.errors.HangupException(), 'error'),
125 125 (dulwich.errors.UnexpectedCommandError('test-cmd'), 'error'),
126 126 ])
127 127 def test_safe_exceptions_reraised(self, side_effect, expected_type):
128 128 @git.reraise_safe_exceptions
129 129 def fake_method():
130 130 raise side_effect
131 131
132 132 with pytest.raises(Exception) as exc_info:
133 133 fake_method()
134 134 assert type(exc_info.value) == Exception
135 135 assert exc_info.value._vcs_kind == expected_type
136 136
137 137
138 138 class TestDulwichRepoWrapper(object):
139 139 def test_calls_close_on_delete(self):
140 140 isdir_patcher = patch('dulwich.repo.os.path.isdir', return_value=True)
141 141 with isdir_patcher:
142 142 repo = git.Repo('/tmp/abcde')
143 143 with patch.object(git.DulwichRepo, 'close') as close_mock:
144 144 del repo
145 145 close_mock.assert_called_once_with()
146 146
147 147
148 148 class TestGitFactory(object):
149 149 def test_create_repo_returns_dulwich_wrapper(self):
150 150
151 151 with patch('vcsserver.lib.rc_cache.region_meta.dogpile_cache_regions') as mock:
152 152 mock.side_effect = {'repo_objects': ''}
153 153 factory = git.GitFactory()
154 154 wire = {
155 155 'path': '/tmp/abcde'
156 156 }
157 157 isdir_patcher = patch('dulwich.repo.os.path.isdir', return_value=True)
158 158 with isdir_patcher:
159 159 result = factory._create_repo(wire, True)
160 160 assert isinstance(result, git.Repo)
@@ -1,108 +1,108 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 2 # Copyright (C) 2014-2020 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 17
18 18 import inspect
19 19 import sys
20 20 import traceback
21 21
22 22 import pytest
23 23 from mercurial.error import LookupError
24 24 from mock import Mock, MagicMock, patch
25 25
26 26 from vcsserver import exceptions, hg, hgcompat
27 27
28 28
29 29 class TestDiff(object):
30 30 def test_raising_safe_exception_when_lookup_failed(self):
31 31
32 32 factory = Mock()
33 33 hg_remote = hg.HgRemote(factory)
34 34 with patch('mercurial.patch.diff') as diff_mock:
35 35 diff_mock.side_effect = LookupError(
36 36 'deadbeef', 'index', 'message')
37 37 with pytest.raises(Exception) as exc_info:
38 38 hg_remote.diff(
39 39 wire={}, commit_id_1='deadbeef', commit_id_2='deadbee1',
40 40 file_filter=None, opt_git=True, opt_ignorews=True,
41 41 context=3)
42 42 assert type(exc_info.value) == Exception
43 43 assert exc_info.value._vcs_kind == 'lookup'
44 44
45 45
46 46 class TestReraiseSafeExceptions(object):
47 47 def test_method_decorated_with_reraise_safe_exceptions(self):
48 48 factory = Mock()
49 49 hg_remote = hg.HgRemote(factory)
50 50 methods = inspect.getmembers(hg_remote, predicate=inspect.ismethod)
51 51 decorator = hg.reraise_safe_exceptions(None)
52 52 for method_name, method in methods:
53 if not method_name.startswith('_'):
53 if not method_name.startswith('_') and method_name not in ['vcsserver_invalidate_cache']:
54 54 assert method.im_func.__code__ == decorator.__code__
55 55
56 56 @pytest.mark.parametrize('side_effect, expected_type', [
57 57 (hgcompat.Abort(), 'abort'),
58 58 (hgcompat.InterventionRequired(), 'abort'),
59 59 (hgcompat.RepoLookupError(), 'lookup'),
60 60 (hgcompat.LookupError('deadbeef', 'index', 'message'), 'lookup'),
61 61 (hgcompat.RepoError(), 'error'),
62 62 (hgcompat.RequirementError(), 'requirement'),
63 63 ])
64 64 def test_safe_exceptions_reraised(self, side_effect, expected_type):
65 65 @hg.reraise_safe_exceptions
66 66 def fake_method():
67 67 raise side_effect
68 68
69 69 with pytest.raises(Exception) as exc_info:
70 70 fake_method()
71 71 assert type(exc_info.value) == Exception
72 72 assert exc_info.value._vcs_kind == expected_type
73 73
74 74 def test_keeps_original_traceback(self):
75 75 @hg.reraise_safe_exceptions
76 76 def fake_method():
77 77 try:
78 78 raise hgcompat.Abort()
79 79 except:
80 80 self.original_traceback = traceback.format_tb(
81 81 sys.exc_info()[2])
82 82 raise
83 83
84 84 try:
85 85 fake_method()
86 86 except Exception:
87 87 new_traceback = traceback.format_tb(sys.exc_info()[2])
88 88
89 89 new_traceback_tail = new_traceback[-len(self.original_traceback):]
90 90 assert new_traceback_tail == self.original_traceback
91 91
92 92 def test_maps_unknow_exceptions_to_unhandled(self):
93 93 @hg.reraise_safe_exceptions
94 94 def stub_method():
95 95 raise ValueError('stub')
96 96
97 97 with pytest.raises(Exception) as exc_info:
98 98 stub_method()
99 99 assert exc_info.value._vcs_kind == 'unhandled'
100 100
101 101 def test_does_not_map_known_exceptions(self):
102 102 @hg.reraise_safe_exceptions
103 103 def stub_method():
104 104 raise exceptions.LookupException()('stub')
105 105
106 106 with pytest.raises(Exception) as exc_info:
107 107 stub_method()
108 108 assert exc_info.value._vcs_kind == 'lookup'
@@ -1,39 +1,42 b''
1 1 """
2 2 Tests used to profile the HTTP based implementation.
3 3 """
4 4
5 5 import pytest
6 6 import webtest
7 7
8 8 from vcsserver.http_main import main
9 9
10 10
11 11 @pytest.fixture
12 12 def vcs_app():
13 13 stub_settings = {
14 14 'dev.use_echo_app': 'true',
15 15 'locale': 'en_US.UTF-8',
16 16 }
17 vcs_app = main({}, **stub_settings)
17 stub_global_conf = {
18 '__file__': ''
19 }
20 vcs_app = main(stub_global_conf, **stub_settings)
18 21 app = webtest.TestApp(vcs_app)
19 22 return app
20 23
21 24
22 25 @pytest.fixture(scope='module')
23 26 def data():
24 27 one_kb = 'x' * 1024
25 28 return one_kb * 1024 * 10
26 29
27 30
28 31 def test_http_app_streaming_with_data(data, repeat, vcs_app):
29 32 app = vcs_app
30 33 for x in range(repeat / 10):
31 34 response = app.post('/stream/git/', params=data)
32 35 assert response.status_code == 200
33 36
34 37
35 38 def test_http_app_streaming_no_data(repeat, vcs_app):
36 39 app = vcs_app
37 40 for x in range(repeat / 10):
38 41 response = app.post('/stream/git/')
39 42 assert response.status_code == 200
@@ -1,57 +1,57 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 2 # Copyright (C) 2014-2020 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 17
18 18 import mock
19 19 import pytest
20 20
21 21 from vcsserver import http_main
22 22 from vcsserver.base import obfuscate_qs
23 23
24 24
25 25 @mock.patch('vcsserver.http_main.VCS', mock.Mock())
26 26 @mock.patch('vcsserver.hgpatches.patch_largefiles_capabilities')
27 27 def test_applies_largefiles_patch(patch_largefiles_capabilities):
28 http_main.main({})
28 http_main.main({'__file__': ''})
29 29 patch_largefiles_capabilities.assert_called_once_with()
30 30
31 31
32 32 @mock.patch('vcsserver.http_main.VCS', mock.Mock())
33 33 @mock.patch('vcsserver.http_main.MercurialFactory', None)
34 34 @mock.patch(
35 35 'vcsserver.hgpatches.patch_largefiles_capabilities',
36 36 mock.Mock(side_effect=Exception("Must not be called")))
37 37 def test_applies_largefiles_patch_only_if_mercurial_is_available():
38 http_main.main({})
38 http_main.main({'__file__': ''})
39 39
40 40
41 41 @pytest.mark.parametrize('given, expected', [
42 42 ('bad', 'bad'),
43 43 ('query&foo=bar', 'query&foo=bar'),
44 44 ('equery&auth_token=bar', 'equery&auth_token=*****'),
45 45 ('a;b;c;query&foo=bar&auth_token=secret',
46 46 'a&b&c&query&foo=bar&auth_token=*****'),
47 47 ('', ''),
48 48 (None, None),
49 49 ('foo=bar', 'foo=bar'),
50 50 ('auth_token=secret', 'auth_token=*****'),
51 51 ('auth_token=secret&api_key=secret2',
52 52 'auth_token=*****&api_key=*****'),
53 53 ('auth_token=secret&api_key=secret2&param=value',
54 54 'auth_token=*****&api_key=*****&param=value'),
55 55 ])
56 56 def test_obfuscate_qs(given, expected):
57 57 assert expected == obfuscate_qs(given)
@@ -1,64 +1,107 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 2 # Copyright (C) 2014-2020 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 17
18 18 import time
19 19 import logging
20 20
21 21 import vcsserver
22 22 from vcsserver.utils import safe_str
23 23
24 24
25 25 log = logging.getLogger(__name__)
26 26
27 27
28 def get_access_path(request):
29 environ = request.environ
30 return environ.get('PATH_INFO')
28 def get_access_path(environ):
29 path = environ.get('PATH_INFO')
30 return path
31 31
32 32
33 33 def get_user_agent(environ):
34 34 return environ.get('HTTP_USER_AGENT')
35 35
36 36
37 def get_vcs_method(environ):
38 return environ.get('HTTP_X_RC_METHOD')
39
40
41 def get_vcs_repo(environ):
42 return environ.get('HTTP_X_RC_REPO_NAME')
43
44
37 45 class RequestWrapperTween(object):
38 46 def __init__(self, handler, registry):
39 47 self.handler = handler
40 48 self.registry = registry
41 49
42 50 # one-time configuration code goes here
43 51
44 52 def __call__(self, request):
45 53 start = time.time()
54 log.debug('Starting request time measurement')
55 response = None
56
57 ua = get_user_agent(request.environ)
58 vcs_method = get_vcs_method(request.environ)
59 repo_name = get_vcs_repo(request.environ)
60
46 61 try:
47 62 response = self.handler(request)
48 63 finally:
49 end = time.time()
50 total = end - start
51 64 count = request.request_count()
52 65 _ver_ = vcsserver.__version__
66 _path = safe_str(get_access_path(request.environ))
67 ip = '127.0.0.1'
68 match_route = request.matched_route.name if request.matched_route else "NOT_FOUND"
69 resp_code = getattr(response, 'status_code', 'UNDEFINED')
70
71 total = time.time() - start
72
73 _view_path = "{}/{}@{}".format(_path, vcs_method, repo_name)
53 74 log.info(
54 75 'Req[%4s] IP: %s %s Request to %s time: %.4fs [%s], VCSServer %s',
55 count, '127.0.0.1', request.environ.get('REQUEST_METHOD'),
56 safe_str(get_access_path(request)), total, get_user_agent(request.environ), _ver_)
76 count, ip, request.environ.get('REQUEST_METHOD'),
77 _view_path, total, ua, _ver_,
78 extra={"time": total, "ver": _ver_, "code": resp_code,
79 "path": _path, "view_name": match_route, "user_agent": ua,
80 "vcs_method": vcs_method, "repo_name": repo_name}
81 )
82
83 statsd = request.registry.statsd
84 if statsd:
85 match_route = request.matched_route.name if request.matched_route else _path
86 elapsed_time_ms = round(1000.0 * total) # use ms only
87 statsd.timing(
88 "vcsserver_req_timing.histogram", elapsed_time_ms,
89 tags=[
90 "view_name:{}".format(match_route),
91 "code:{}".format(resp_code)
92 ],
93 use_decimals=False
94 )
95 statsd.incr(
96 "vcsserver_req_total", tags=[
97 "view_name:{}".format(match_route),
98 "code:{}".format(resp_code)
99 ])
57 100
58 101 return response
59 102
60 103
61 104 def includeme(config):
62 105 config.add_tween(
63 106 'vcsserver.tweens.request_wrapper.RequestWrapperTween',
64 107 )
@@ -1,32 +1,46 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 2 # Copyright (C) 2014-2020 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 17
18 from vcsserver.lib import rc_cache
18 19
19 20 class RemoteBase(object):
20 21 EMPTY_COMMIT = '0' * 40
21 22
22 @property
23 def region(self):
24 return self._factory._cache_region
23 def _region(self, wire):
24 cache_repo_id = wire.get('cache_repo_id', '')
25 cache_namespace_uid = 'cache_repo.{}'.format(cache_repo_id)
26 return rc_cache.get_or_create_region('repo_object', cache_namespace_uid)
25 27
26 28 def _cache_on(self, wire):
27 29 context = wire.get('context', '')
28 30 context_uid = '{}'.format(context)
29 31 repo_id = wire.get('repo_id', '')
30 32 cache = wire.get('cache', True)
31 33 cache_on = context and cache
32 34 return cache_on, context_uid, repo_id
35
36 def vcsserver_invalidate_cache(self, wire, delete):
37 from vcsserver.lib import rc_cache
38 repo_id = wire.get('repo_id', '')
39 cache_repo_id = wire.get('cache_repo_id', '')
40 cache_namespace_uid = 'cache_repo.{}'.format(cache_repo_id)
41
42 if delete:
43 rc_cache.clear_cache_namespace(
44 'repo_object', cache_namespace_uid, invalidate=True)
45
46 return {'invalidated': {'repo_id': repo_id, 'delete': delete}}
General Comments 0
You need to be logged in to leave comments. Login now