##// END OF EJS Templates
merge: with lataest default branch
super-admin -
r1041:e1e68d19 merge python3
parent child Browse files
Show More
@@ -0,0 +1,16 b''
1 [DEFAULT]
2 done = false
3
4 [task:bump_version]
5 done = true
6
7 [task:fixes_on_stable]
8 done = true
9
10 [task:pip2nix_generated]
11 done = true
12
13 [release]
14 state = prepared
15 version = 4.27.1
16
@@ -0,0 +1,53 b''
1 ; #####################
2 ; LOGGING CONFIGURATION
3 ; #####################
4 ; Logging template, used for configure the logging
5 ; some variables here are replaced by RhodeCode to default values
6
7 [loggers]
8 keys = root, vcsserver
9
10 [handlers]
11 keys = console
12
13 [formatters]
14 keys = generic, json
15
16 ; #######
17 ; LOGGERS
18 ; #######
19 [logger_root]
20 level = NOTSET
21 handlers = console
22
23 [logger_vcsserver]
24 level = $RC_LOGGING_LEVEL
25 handlers =
26 qualname = vcsserver
27 propagate = 1
28
29 ; ########
30 ; HANDLERS
31 ; ########
32
33 [handler_console]
34 class = StreamHandler
35 args = (sys.stderr, )
36 level = $RC_LOGGING_LEVEL
37 ; To enable JSON formatted logs replace generic with json
38 ; This allows sending properly formatted logs to grafana loki or elasticsearch
39 #formatter = json
40 #formatter = generic
41 formatter = $RC_LOGGING_FORMATTER
42
43 ; ##########
44 ; FORMATTERS
45 ; ##########
46
47 [formatter_generic]
48 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
49 datefmt = %Y-%m-%d %H:%M:%S
50
51 [formatter_json]
52 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
53 class = vcsserver.lib._vendor.jsonlogger.JsonFormatter
@@ -0,0 +1,10 b''
1 diff -rup configparser-4.0.2-orig/pyproject.toml configparser-4.0.2/pyproject.toml
2 --- configparser-4.0.2-orig/pyproject.toml 2021-03-22 21:28:11.000000000 +0100
3 +++ configparser-4.0.2/pyproject.toml 2021-03-22 21:28:11.000000000 +0100
4 @@ -1,5 +1,5 @@
5 [build-system]
6 -requires = ["setuptools>=40.7", "wheel", "setuptools_scm>=1.15"]
7 +requires = ["setuptools<=42.0", "wheel", "setuptools_scm<6.0.0"]
8 build-backend = "setuptools.build_meta"
9
10 [tool.black]
@@ -0,0 +1,7 b''
1 diff -rup importlib-metadata-1.6.0-orig/yproject.toml importlib-metadata-1.6.0/pyproject.toml
2 --- importlib-metadata-1.6.0-orig/yproject.toml 2021-03-22 22:10:33.000000000 +0100
3 +++ importlib-metadata-1.6.0/pyproject.toml 2021-03-22 22:11:09.000000000 +0100
4 @@ -1,3 +1,3 @@
5 [build-system]
6 -requires = ["setuptools>=30.3", "wheel", "setuptools_scm"]
7 +requires = ["setuptools<42.0", "wheel", "setuptools_scm<6.0.0"]
@@ -0,0 +1,12 b''
1 diff -rup pytest-4.6.9-orig/setup.py pytest-4.6.9/setup.py
2 --- pytest-4.6.9-orig/setup.py 2018-04-10 10:23:04.000000000 +0200
3 +++ pytest-4.6.9/setup.py 2018-04-10 10:23:34.000000000 +0200
4 @@ -24,7 +24,7 @@ def main():
5 def main():
6 setup(
7 use_scm_version={"write_to": "src/_pytest/_version.py"},
8 - setup_requires=["setuptools-scm", "setuptools>=40.0"],
9 + setup_requires=["setuptools-scm<6.0.0", "setuptools<=42.0"],
10 package_dir={"": "src"},
11 # fmt: off
12 extras_require={ No newline at end of file
@@ -0,0 +1,10 b''
1 diff -rup zip-1.2.0-orig/pyproject.toml zip-1.2.0/pyproject.toml
2 --- zip-1.2.0-orig/pyproject.toml 2021-03-23 10:55:37.000000000 +0100
3 +++ zip-1.2.0/pyproject.toml 2021-03-23 10:56:05.000000000 +0100
4 @@ -1,5 +1,5 @@
5 [build-system]
6 -requires = ["setuptools>=34.4", "wheel", "setuptools_scm>=1.15"]
7 +requires = ["setuptools<42.0", "wheel", "setuptools_scm<6.0.0"]
8 build-backend = "setuptools.build_meta"
9
10 [tool.black]
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
@@ -0,0 +1,207 b''
1 # -*- coding: utf-8 -*-
2
3 # Copyright (C) 2010-2020 RhodeCode GmbH
4 #
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
8 #
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
13 #
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20
21 import os
22 import textwrap
23 import string
24 import functools
25 import logging
26 import tempfile
27 import logging.config
28 log = logging.getLogger(__name__)
29
30 # skip keys, that are set here, so we don't double process those
31 set_keys = {
32 '__file__': ''
33 }
34
35
36 def str2bool(_str):
37 """
38 returns True/False value from given string, it tries to translate the
39 string into boolean
40
41 :param _str: string value to translate into boolean
42 :rtype: boolean
43 :returns: boolean from given string
44 """
45 if _str is None:
46 return False
47 if _str in (True, False):
48 return _str
49 _str = str(_str).strip().lower()
50 return _str in ('t', 'true', 'y', 'yes', 'on', '1')
51
52
53 def aslist(obj, sep=None, strip=True):
54 """
55 Returns given string separated by sep as list
56
57 :param obj:
58 :param sep:
59 :param strip:
60 """
61 if isinstance(obj, (basestring,)):
62 if obj in ['', ""]:
63 return []
64
65 lst = obj.split(sep)
66 if strip:
67 lst = [v.strip() for v in lst]
68 return lst
69 elif isinstance(obj, (list, tuple)):
70 return obj
71 elif obj is None:
72 return []
73 else:
74 return [obj]
75
76
77 class SettingsMaker(object):
78
79 def __init__(self, app_settings):
80 self.settings = app_settings
81
82 @classmethod
83 def _bool_func(cls, input_val):
84 if isinstance(input_val, unicode):
85 input_val = input_val.encode('utf8')
86 return str2bool(input_val)
87
88 @classmethod
89 def _int_func(cls, input_val):
90 return int(input_val)
91
92 @classmethod
93 def _list_func(cls, input_val, sep=','):
94 return aslist(input_val, sep=sep)
95
96 @classmethod
97 def _string_func(cls, input_val, lower=True):
98 if lower:
99 input_val = input_val.lower()
100 return input_val
101
102 @classmethod
103 def _float_func(cls, input_val):
104 return float(input_val)
105
106 @classmethod
107 def _dir_func(cls, input_val, ensure_dir=False, mode=0o755):
108
109 # ensure we have our dir created
110 if not os.path.isdir(input_val) and ensure_dir:
111 os.makedirs(input_val, mode=mode)
112
113 if not os.path.isdir(input_val):
114 raise Exception('Dir at {} does not exist'.format(input_val))
115 return input_val
116
117 @classmethod
118 def _file_path_func(cls, input_val, ensure_dir=False, mode=0o755):
119 dirname = os.path.dirname(input_val)
120 cls._dir_func(dirname, ensure_dir=ensure_dir)
121 return input_val
122
123 @classmethod
124 def _key_transformator(cls, key):
125 return "{}_{}".format('RC'.upper(), key.upper().replace('.', '_').replace('-', '_'))
126
127 def maybe_env_key(self, key):
128 # now maybe we have this KEY in env, search and use the value with higher priority.
129 transformed_key = self._key_transformator(key)
130 envvar_value = os.environ.get(transformed_key)
131 if envvar_value:
132 log.debug('using `%s` key instead of `%s` key for config', transformed_key, key)
133
134 return envvar_value
135
136 def env_expand(self):
137 replaced = {}
138 for k, v in self.settings.items():
139 if k not in set_keys:
140 envvar_value = self.maybe_env_key(k)
141 if envvar_value:
142 replaced[k] = envvar_value
143 set_keys[k] = envvar_value
144
145 # replace ALL keys updated
146 self.settings.update(replaced)
147
148 def enable_logging(self, logging_conf=None, level='INFO', formatter='generic'):
149 """
150 Helper to enable debug on running instance
151 :return:
152 """
153
154 if not str2bool(self.settings.get('logging.autoconfigure')):
155 log.info('logging configuration based on main .ini file')
156 return
157
158 if logging_conf is None:
159 logging_conf = self.settings.get('logging.logging_conf_file') or ''
160
161 if not os.path.isfile(logging_conf):
162 log.error('Unable to setup logging based on %s, '
163 'file does not exist.... specify path using logging.logging_conf_file= config setting. ', logging_conf)
164 return
165
166 with open(logging_conf, 'rb') as f:
167 ini_template = textwrap.dedent(f.read())
168 ini_template = string.Template(ini_template).safe_substitute(
169 RC_LOGGING_LEVEL=os.environ.get('RC_LOGGING_LEVEL', '') or level,
170 RC_LOGGING_FORMATTER=os.environ.get('RC_LOGGING_FORMATTER', '') or formatter
171 )
172
173 with tempfile.NamedTemporaryFile(prefix='rc_logging_', suffix='.ini', delete=False) as f:
174 log.info('Saved Temporary LOGGING config at %s', f.name)
175 f.write(ini_template)
176
177 logging.config.fileConfig(f.name)
178 os.remove(f.name)
179
180 def make_setting(self, key, default, lower=False, default_when_empty=False, parser=None):
181 input_val = self.settings.get(key, default)
182
183 if default_when_empty and not input_val:
184 # use default value when value is set in the config but it is empty
185 input_val = default
186
187 parser_func = {
188 'bool': self._bool_func,
189 'int': self._int_func,
190 'list': self._list_func,
191 'list:newline': functools.partial(self._list_func, sep='/n'),
192 'list:spacesep': functools.partial(self._list_func, sep=' '),
193 'string': functools.partial(self._string_func, lower=lower),
194 'dir': self._dir_func,
195 'dir:ensured': functools.partial(self._dir_func, ensure_dir=True),
196 'file': self._file_path_func,
197 'file:ensured': functools.partial(self._file_path_func, ensure_dir=True),
198 None: lambda i: i
199 }[parser]
200
201 envvar_value = self.maybe_env_key(key)
202 if envvar_value:
203 input_val = envvar_value
204 set_keys[key] = input_val
205
206 self.settings[key] = parser_func(input_val)
207 return self.settings[key]
@@ -0,0 +1,26 b''
1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 # Copyright (C) 2014-2020 RhodeCode GmbH
3 #
4 # This program is free software; you can redistribute it and/or modify
5 # it under the terms of the GNU General Public License as published by
6 # the Free Software Foundation; either version 3 of the License, or
7 # (at your option) any later version.
8 #
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
13 #
14 # You should have received a copy of the GNU General Public License
15 # along with this program; if not, write to the Free Software Foundation,
16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
18 # This package contains non rhodecode licensed packages that are
19 # vendored for various reasons
20
21 import os
22 import sys
23
24 vendor_dir = os.path.abspath(os.path.dirname(__file__))
25
26 sys.path.append(vendor_dir)
@@ -0,0 +1,243 b''
1 '''
2 This library is provided to allow standard python logging
3 to output log data as JSON formatted strings
4 '''
5 import logging
6 import json
7 import re
8 from datetime import date, datetime, time, tzinfo, timedelta
9 import traceback
10 import importlib
11
12 from inspect import istraceback
13
14 from collections import OrderedDict
15
16
17 def _inject_req_id(record, *args, **kwargs):
18 return record
19
20
21 ExceptionAwareFormatter = logging.Formatter
22
23
24 ZERO = timedelta(0)
25 HOUR = timedelta(hours=1)
26
27
28 class UTC(tzinfo):
29 """UTC"""
30
31 def utcoffset(self, dt):
32 return ZERO
33
34 def tzname(self, dt):
35 return "UTC"
36
37 def dst(self, dt):
38 return ZERO
39
40 utc = UTC()
41
42
43 # skip natural LogRecord attributes
44 # http://docs.python.org/library/logging.html#logrecord-attributes
45 RESERVED_ATTRS = (
46 'args', 'asctime', 'created', 'exc_info', 'exc_text', 'filename',
47 'funcName', 'levelname', 'levelno', 'lineno', 'module',
48 'msecs', 'message', 'msg', 'name', 'pathname', 'process',
49 'processName', 'relativeCreated', 'stack_info', 'thread', 'threadName')
50
51
52 def merge_record_extra(record, target, reserved):
53 """
54 Merges extra attributes from LogRecord object into target dictionary
55
56 :param record: logging.LogRecord
57 :param target: dict to update
58 :param reserved: dict or list with reserved keys to skip
59 """
60 for key, value in record.__dict__.items():
61 # this allows to have numeric keys
62 if (key not in reserved
63 and not (hasattr(key, "startswith")
64 and key.startswith('_'))):
65 target[key] = value
66 return target
67
68
69 class JsonEncoder(json.JSONEncoder):
70 """
71 A custom encoder extending the default JSONEncoder
72 """
73
74 def default(self, obj):
75 if isinstance(obj, (date, datetime, time)):
76 return self.format_datetime_obj(obj)
77
78 elif istraceback(obj):
79 return ''.join(traceback.format_tb(obj)).strip()
80
81 elif type(obj) == Exception \
82 or isinstance(obj, Exception) \
83 or type(obj) == type:
84 return str(obj)
85
86 try:
87 return super(JsonEncoder, self).default(obj)
88
89 except TypeError:
90 try:
91 return str(obj)
92
93 except Exception:
94 return None
95
96 def format_datetime_obj(self, obj):
97 return obj.isoformat()
98
99
100 class JsonFormatter(ExceptionAwareFormatter):
101 """
102 A custom formatter to format logging records as json strings.
103 Extra values will be formatted as str() if not supported by
104 json default encoder
105 """
106
107 def __init__(self, *args, **kwargs):
108 """
109 :param json_default: a function for encoding non-standard objects
110 as outlined in http://docs.python.org/2/library/json.html
111 :param json_encoder: optional custom encoder
112 :param json_serializer: a :meth:`json.dumps`-compatible callable
113 that will be used to serialize the log record.
114 :param json_indent: an optional :meth:`json.dumps`-compatible numeric value
115 that will be used to customize the indent of the output json.
116 :param prefix: an optional string prefix added at the beginning of
117 the formatted string
118 :param json_indent: indent parameter for json.dumps
119 :param json_ensure_ascii: ensure_ascii parameter for json.dumps
120 :param reserved_attrs: an optional list of fields that will be skipped when
121 outputting json log record. Defaults to all log record attributes:
122 http://docs.python.org/library/logging.html#logrecord-attributes
123 :param timestamp: an optional string/boolean field to add a timestamp when
124 outputting the json log record. If string is passed, timestamp will be added
125 to log record using string as key. If True boolean is passed, timestamp key
126 will be "timestamp". Defaults to False/off.
127 """
128 self.json_default = self._str_to_fn(kwargs.pop("json_default", None))
129 self.json_encoder = self._str_to_fn(kwargs.pop("json_encoder", None))
130 self.json_serializer = self._str_to_fn(kwargs.pop("json_serializer", json.dumps))
131 self.json_indent = kwargs.pop("json_indent", None)
132 self.json_ensure_ascii = kwargs.pop("json_ensure_ascii", True)
133 self.prefix = kwargs.pop("prefix", "")
134 reserved_attrs = kwargs.pop("reserved_attrs", RESERVED_ATTRS)
135 self.reserved_attrs = dict(zip(reserved_attrs, reserved_attrs))
136 self.timestamp = kwargs.pop("timestamp", True)
137
138 # super(JsonFormatter, self).__init__(*args, **kwargs)
139 logging.Formatter.__init__(self, *args, **kwargs)
140 if not self.json_encoder and not self.json_default:
141 self.json_encoder = JsonEncoder
142
143 self._required_fields = self.parse()
144 self._skip_fields = dict(zip(self._required_fields,
145 self._required_fields))
146 self._skip_fields.update(self.reserved_attrs)
147
148 def _str_to_fn(self, fn_as_str):
149 """
150 If the argument is not a string, return whatever was passed in.
151 Parses a string such as package.module.function, imports the module
152 and returns the function.
153
154 :param fn_as_str: The string to parse. If not a string, return it.
155 """
156 if not isinstance(fn_as_str, str):
157 return fn_as_str
158
159 path, _, function = fn_as_str.rpartition('.')
160 module = importlib.import_module(path)
161 return getattr(module, function)
162
163 def parse(self):
164 """
165 Parses format string looking for substitutions
166
167 This method is responsible for returning a list of fields (as strings)
168 to include in all log messages.
169 """
170 standard_formatters = re.compile(r'\((.+?)\)', re.IGNORECASE)
171 return standard_formatters.findall(self._fmt)
172
173 def add_fields(self, log_record, record, message_dict):
174 """
175 Override this method to implement custom logic for adding fields.
176 """
177 for field in self._required_fields:
178 log_record[field] = record.__dict__.get(field)
179 log_record.update(message_dict)
180 merge_record_extra(record, log_record, reserved=self._skip_fields)
181
182 if self.timestamp:
183 key = self.timestamp if type(self.timestamp) == str else 'timestamp'
184 log_record[key] = datetime.fromtimestamp(record.created, tz=utc)
185
186 def process_log_record(self, log_record):
187 """
188 Override this method to implement custom logic
189 on the possibly ordered dictionary.
190 """
191 return log_record
192
193 def jsonify_log_record(self, log_record):
194 """Returns a json string of the log record."""
195 return self.json_serializer(log_record,
196 default=self.json_default,
197 cls=self.json_encoder,
198 indent=self.json_indent,
199 ensure_ascii=self.json_ensure_ascii)
200
201 def serialize_log_record(self, log_record):
202 """Returns the final representation of the log record."""
203 return "%s%s" % (self.prefix, self.jsonify_log_record(log_record))
204
205 def format(self, record):
206 """Formats a log record and serializes to json"""
207 message_dict = {}
208 # FIXME: logging.LogRecord.msg and logging.LogRecord.message in typeshed
209 # are always type of str. We shouldn't need to override that.
210 if isinstance(record.msg, dict):
211 message_dict = record.msg
212 record.message = None
213 else:
214 record.message = record.getMessage()
215 # only format time if needed
216 if "asctime" in self._required_fields:
217 record.asctime = self.formatTime(record, self.datefmt)
218
219 # Display formatted exception, but allow overriding it in the
220 # user-supplied dict.
221 if record.exc_info and not message_dict.get('exc_info'):
222 message_dict['exc_info'] = self.formatException(record.exc_info)
223 if not message_dict.get('exc_info') and record.exc_text:
224 message_dict['exc_info'] = record.exc_text
225 # Display formatted record of stack frames
226 # default format is a string returned from :func:`traceback.print_stack`
227 try:
228 if record.stack_info and not message_dict.get('stack_info'):
229 message_dict['stack_info'] = self.formatStack(record.stack_info)
230 except AttributeError:
231 # Python2.7 doesn't have stack_info.
232 pass
233
234 try:
235 log_record = OrderedDict()
236 except NameError:
237 log_record = {}
238
239 _inject_req_id(record, with_prefix=False)
240 self.add_fields(log_record, record, message_dict)
241 log_record = self.process_log_record(log_record)
242
243 return self.serialize_log_record(log_record)
@@ -0,0 +1,390 b''
1 import sys
2 import threading
3 import weakref
4 from base64 import b64encode
5 from logging import getLogger
6 from os import urandom
7
8 from redis import StrictRedis
9
10 __version__ = '3.7.0'
11
12 loggers = {
13 k: getLogger("vcsserver." + ".".join((__name__, k)))
14 for k in [
15 "acquire",
16 "refresh.thread.start",
17 "refresh.thread.stop",
18 "refresh.thread.exit",
19 "refresh.start",
20 "refresh.shutdown",
21 "refresh.exit",
22 "release",
23 ]
24 }
25
26 PY3 = sys.version_info[0] == 3
27
28 if PY3:
29 text_type = str
30 binary_type = bytes
31 else:
32 text_type = unicode # noqa
33 binary_type = str
34
35
36 # Check if the id match. If not, return an error code.
37 UNLOCK_SCRIPT = b"""
38 if redis.call("get", KEYS[1]) ~= ARGV[1] then
39 return 1
40 else
41 redis.call("del", KEYS[2])
42 redis.call("lpush", KEYS[2], 1)
43 redis.call("pexpire", KEYS[2], ARGV[2])
44 redis.call("del", KEYS[1])
45 return 0
46 end
47 """
48
49 # Covers both cases when key doesn't exist and doesn't equal to lock's id
50 EXTEND_SCRIPT = b"""
51 if redis.call("get", KEYS[1]) ~= ARGV[1] then
52 return 1
53 elseif redis.call("ttl", KEYS[1]) < 0 then
54 return 2
55 else
56 redis.call("expire", KEYS[1], ARGV[2])
57 return 0
58 end
59 """
60
61 RESET_SCRIPT = b"""
62 redis.call('del', KEYS[2])
63 redis.call('lpush', KEYS[2], 1)
64 redis.call('pexpire', KEYS[2], ARGV[2])
65 return redis.call('del', KEYS[1])
66 """
67
68 RESET_ALL_SCRIPT = b"""
69 local locks = redis.call('keys', 'lock:*')
70 local signal
71 for _, lock in pairs(locks) do
72 signal = 'lock-signal:' .. string.sub(lock, 6)
73 redis.call('del', signal)
74 redis.call('lpush', signal, 1)
75 redis.call('expire', signal, 1)
76 redis.call('del', lock)
77 end
78 return #locks
79 """
80
81
82 class AlreadyAcquired(RuntimeError):
83 pass
84
85
86 class NotAcquired(RuntimeError):
87 pass
88
89
90 class AlreadyStarted(RuntimeError):
91 pass
92
93
94 class TimeoutNotUsable(RuntimeError):
95 pass
96
97
98 class InvalidTimeout(RuntimeError):
99 pass
100
101
102 class TimeoutTooLarge(RuntimeError):
103 pass
104
105
106 class NotExpirable(RuntimeError):
107 pass
108
109
110 class Lock(object):
111 """
112 A Lock context manager implemented via redis SETNX/BLPOP.
113 """
114 unlock_script = None
115 extend_script = None
116 reset_script = None
117 reset_all_script = None
118
119 def __init__(self, redis_client, name, expire=None, id=None, auto_renewal=False, strict=True, signal_expire=1000):
120 """
121 :param redis_client:
122 An instance of :class:`~StrictRedis`.
123 :param name:
124 The name (redis key) the lock should have.
125 :param expire:
126 The lock expiry time in seconds. If left at the default (None)
127 the lock will not expire.
128 :param id:
129 The ID (redis value) the lock should have. A random value is
130 generated when left at the default.
131
132 Note that if you specify this then the lock is marked as "held". Acquires
133 won't be possible.
134 :param auto_renewal:
135 If set to ``True``, Lock will automatically renew the lock so that it
136 doesn't expire for as long as the lock is held (acquire() called
137 or running in a context manager).
138
139 Implementation note: Renewal will happen using a daemon thread with
140 an interval of ``expire*2/3``. If wishing to use a different renewal
141 time, subclass Lock, call ``super().__init__()`` then set
142 ``self._lock_renewal_interval`` to your desired interval.
143 :param strict:
144 If set ``True`` then the ``redis_client`` needs to be an instance of ``redis.StrictRedis``.
145 :param signal_expire:
146 Advanced option to override signal list expiration in milliseconds. Increase it for very slow clients. Default: ``1000``.
147 """
148 if strict and not isinstance(redis_client, StrictRedis):
149 raise ValueError("redis_client must be instance of StrictRedis. "
150 "Use strict=False if you know what you're doing.")
151 if auto_renewal and expire is None:
152 raise ValueError("Expire may not be None when auto_renewal is set")
153
154 self._client = redis_client
155
156 if expire:
157 expire = int(expire)
158 if expire < 0:
159 raise ValueError("A negative expire is not acceptable.")
160 else:
161 expire = None
162 self._expire = expire
163
164 self._signal_expire = signal_expire
165 if id is None:
166 self._id = b64encode(urandom(18)).decode('ascii')
167 elif isinstance(id, binary_type):
168 try:
169 self._id = id.decode('ascii')
170 except UnicodeDecodeError:
171 self._id = b64encode(id).decode('ascii')
172 elif isinstance(id, text_type):
173 self._id = id
174 else:
175 raise TypeError("Incorrect type for `id`. Must be bytes/str not %s." % type(id))
176 self._name = 'lock:' + name
177 self._signal = 'lock-signal:' + name
178 self._lock_renewal_interval = (float(expire) * 2 / 3
179 if auto_renewal
180 else None)
181 self._lock_renewal_thread = None
182
183 self.register_scripts(redis_client)
184
185 @classmethod
186 def register_scripts(cls, redis_client):
187 global reset_all_script
188 if reset_all_script is None:
189 reset_all_script = redis_client.register_script(RESET_ALL_SCRIPT)
190 cls.unlock_script = redis_client.register_script(UNLOCK_SCRIPT)
191 cls.extend_script = redis_client.register_script(EXTEND_SCRIPT)
192 cls.reset_script = redis_client.register_script(RESET_SCRIPT)
193 cls.reset_all_script = redis_client.register_script(RESET_ALL_SCRIPT)
194
195 @property
196 def _held(self):
197 return self.id == self.get_owner_id()
198
199 def reset(self):
200 """
201 Forcibly deletes the lock. Use this with care.
202 """
203 self.reset_script(client=self._client, keys=(self._name, self._signal), args=(self.id, self._signal_expire))
204
205 @property
206 def id(self):
207 return self._id
208
209 def get_owner_id(self):
210 owner_id = self._client.get(self._name)
211 if isinstance(owner_id, binary_type):
212 owner_id = owner_id.decode('ascii', 'replace')
213 return owner_id
214
215 def acquire(self, blocking=True, timeout=None):
216 """
217 :param blocking:
218 Boolean value specifying whether lock should be blocking or not.
219 :param timeout:
220 An integer value specifying the maximum number of seconds to block.
221 """
222 logger = loggers["acquire"]
223
224 logger.debug("Getting blocking: %s acquire on %r ...", blocking, self._name)
225
226 if self._held:
227 owner_id = self.get_owner_id()
228 raise AlreadyAcquired("Already acquired from this Lock instance. Lock id: {}".format(owner_id))
229
230 if not blocking and timeout is not None:
231 raise TimeoutNotUsable("Timeout cannot be used if blocking=False")
232
233 if timeout:
234 timeout = int(timeout)
235 if timeout < 0:
236 raise InvalidTimeout("Timeout (%d) cannot be less than or equal to 0" % timeout)
237
238 if self._expire and not self._lock_renewal_interval and timeout > self._expire:
239 raise TimeoutTooLarge("Timeout (%d) cannot be greater than expire (%d)" % (timeout, self._expire))
240
241 busy = True
242 blpop_timeout = timeout or self._expire or 0
243 timed_out = False
244 while busy:
245 busy = not self._client.set(self._name, self._id, nx=True, ex=self._expire)
246 if busy:
247 if timed_out:
248 return False
249 elif blocking:
250 timed_out = not self._client.blpop(self._signal, blpop_timeout) and timeout
251 else:
252 logger.warning("Failed to get %r.", self._name)
253 return False
254
255 logger.debug("Got lock for %r.", self._name)
256 if self._lock_renewal_interval is not None:
257 self._start_lock_renewer()
258 return True
259
260 def extend(self, expire=None):
261 """Extends expiration time of the lock.
262
263 :param expire:
264 New expiration time. If ``None`` - `expire` provided during
265 lock initialization will be taken.
266 """
267 if expire:
268 expire = int(expire)
269 if expire < 0:
270 raise ValueError("A negative expire is not acceptable.")
271 elif self._expire is not None:
272 expire = self._expire
273 else:
274 raise TypeError(
275 "To extend a lock 'expire' must be provided as an "
276 "argument to extend() method or at initialization time."
277 )
278
279 error = self.extend_script(client=self._client, keys=(self._name, self._signal), args=(self._id, expire))
280 if error == 1:
281 raise NotAcquired("Lock %s is not acquired or it already expired." % self._name)
282 elif error == 2:
283 raise NotExpirable("Lock %s has no assigned expiration time" % self._name)
284 elif error:
285 raise RuntimeError("Unsupported error code %s from EXTEND script" % error)
286
287 @staticmethod
288 def _lock_renewer(lockref, interval, stop):
289 """
290 Renew the lock key in redis every `interval` seconds for as long
291 as `self._lock_renewal_thread.should_exit` is False.
292 """
293 while not stop.wait(timeout=interval):
294 loggers["refresh.thread.start"].debug("Refreshing lock")
295 lock = lockref()
296 if lock is None:
297 loggers["refresh.thread.stop"].debug(
298 "The lock no longer exists, stopping lock refreshing"
299 )
300 break
301 lock.extend(expire=lock._expire)
302 del lock
303 loggers["refresh.thread.exit"].debug("Exit requested, stopping lock refreshing")
304
305 def _start_lock_renewer(self):
306 """
307 Starts the lock refresher thread.
308 """
309 if self._lock_renewal_thread is not None:
310 raise AlreadyStarted("Lock refresh thread already started")
311
312 loggers["refresh.start"].debug(
313 "Starting thread to refresh lock every %s seconds",
314 self._lock_renewal_interval
315 )
316 self._lock_renewal_stop = threading.Event()
317 self._lock_renewal_thread = threading.Thread(
318 group=None,
319 target=self._lock_renewer,
320 kwargs={'lockref': weakref.ref(self),
321 'interval': self._lock_renewal_interval,
322 'stop': self._lock_renewal_stop}
323 )
324 self._lock_renewal_thread.setDaemon(True)
325 self._lock_renewal_thread.start()
326
327 def _stop_lock_renewer(self):
328 """
329 Stop the lock renewer.
330
331 This signals the renewal thread and waits for its exit.
332 """
333 if self._lock_renewal_thread is None or not self._lock_renewal_thread.is_alive():
334 return
335 loggers["refresh.shutdown"].debug("Signalling the lock refresher to stop")
336 self._lock_renewal_stop.set()
337 self._lock_renewal_thread.join()
338 self._lock_renewal_thread = None
339 loggers["refresh.exit"].debug("Lock refresher has stopped")
340
341 def __enter__(self):
342 acquired = self.acquire(blocking=True)
343 assert acquired, "Lock wasn't acquired, but blocking=True"
344 return self
345
346 def __exit__(self, exc_type=None, exc_value=None, traceback=None):
347 self.release()
348
349 def release(self):
350 """Releases the lock, that was acquired with the same object.
351
352 .. note::
353
354 If you want to release a lock that you acquired in a different place you have two choices:
355
356 * Use ``Lock("name", id=id_from_other_place).release()``
357 * Use ``Lock("name").reset()``
358 """
359 if self._lock_renewal_thread is not None:
360 self._stop_lock_renewer()
361 loggers["release"].debug("Releasing %r.", self._name)
362 error = self.unlock_script(client=self._client, keys=(self._name, self._signal), args=(self._id, self._signal_expire))
363 if error == 1:
364 raise NotAcquired("Lock %s is not acquired or it already expired." % self._name)
365 elif error:
366 raise RuntimeError("Unsupported error code %s from EXTEND script." % error)
367
368 def locked(self):
369 """
370 Return true if the lock is acquired.
371
372 Checks that lock with same name already exists. This method returns true, even if
373 lock have another id.
374 """
375 return self._client.exists(self._name) == 1
376
377
378 reset_all_script = None
379
380
381 def reset_all(redis_client):
382 """
383 Forcibly deletes all locks if its remains (like a crash reason). Use this with care.
384
385 :param redis_client:
386 An instance of :class:`~StrictRedis`.
387 """
388 Lock.register_scripts(redis_client)
389
390 reset_all_script(client=redis_client) # noqa
@@ -0,0 +1,52 b''
1 from __future__ import absolute_import, division, unicode_literals
2
3 import logging
4
5 from .stream import TCPStatsClient, UnixSocketStatsClient # noqa
6 from .udp import StatsClient # noqa
7
8 HOST = 'localhost'
9 PORT = 8125
10 IPV6 = False
11 PREFIX = None
12 MAXUDPSIZE = 512
13
14 log = logging.getLogger('rhodecode.statsd')
15
16
17 def statsd_config(config, prefix='statsd.'):
18 _config = {}
19 for key in config.keys():
20 if key.startswith(prefix):
21 _config[key[len(prefix):]] = config[key]
22 return _config
23
24
25 def client_from_config(configuration, prefix='statsd.', **kwargs):
26 from pyramid.settings import asbool
27
28 _config = statsd_config(configuration, prefix)
29 statsd_enabled = asbool(_config.pop('enabled', False))
30 if not statsd_enabled:
31 log.debug('statsd client not enabled by statsd.enabled = flag, skipping...')
32 return
33
34 host = _config.pop('statsd_host', HOST)
35 port = _config.pop('statsd_port', PORT)
36 prefix = _config.pop('statsd_prefix', PREFIX)
37 maxudpsize = _config.pop('statsd_maxudpsize', MAXUDPSIZE)
38 ipv6 = asbool(_config.pop('statsd_ipv6', IPV6))
39 log.debug('configured statsd client %s:%s', host, port)
40
41 try:
42 client = StatsClient(
43 host=host, port=port, prefix=prefix, maxudpsize=maxudpsize, ipv6=ipv6)
44 except Exception:
45 log.exception('StatsD is enabled, but failed to connect to statsd server, fallback: disable statsd')
46 client = None
47
48 return client
49
50
51 def get_statsd_client(request):
52 return client_from_config(request.registry.settings)
@@ -0,0 +1,156 b''
1 from __future__ import absolute_import, division, unicode_literals
2
3 import re
4 import random
5 from collections import deque
6 from datetime import timedelta
7 from repoze.lru import lru_cache
8
9 from .timer import Timer
10
11 TAG_INVALID_CHARS_RE = re.compile(
12 r"[^\w\d_\-:/\.]",
13 #re.UNICODE
14 )
15 TAG_INVALID_CHARS_SUBS = "_"
16
17 # we save and expose methods called by statsd for discovery
18 buckets_dict = {
19
20 }
21
22
23 @lru_cache(maxsize=500)
24 def _normalize_tags_with_cache(tag_list):
25 return [TAG_INVALID_CHARS_RE.sub(TAG_INVALID_CHARS_SUBS, tag) for tag in tag_list]
26
27
28 def normalize_tags(tag_list):
29 # We have to turn our input tag list into a non-mutable tuple for it to
30 # be hashable (and thus usable) by the @lru_cache decorator.
31 return _normalize_tags_with_cache(tuple(tag_list))
32
33
34 class StatsClientBase(object):
35 """A Base class for various statsd clients."""
36
37 def close(self):
38 """Used to close and clean up any underlying resources."""
39 raise NotImplementedError()
40
41 def _send(self):
42 raise NotImplementedError()
43
44 def pipeline(self):
45 raise NotImplementedError()
46
47 def timer(self, stat, rate=1, tags=None, auto_send=True):
48 """
49 statsd = StatsdClient.statsd
50 with statsd.timer('bucket_name', auto_send=True) as tmr:
51 # This block will be timed.
52 for i in xrange(0, 100000):
53 i ** 2
54 # you can access time here...
55 elapsed_ms = tmr.ms
56 """
57 return Timer(self, stat, rate, tags, auto_send=auto_send)
58
59 def timing(self, stat, delta, rate=1, tags=None, use_decimals=True):
60 """
61 Send new timing information.
62
63 `delta` can be either a number of milliseconds or a timedelta.
64 """
65 if isinstance(delta, timedelta):
66 # Convert timedelta to number of milliseconds.
67 delta = delta.total_seconds() * 1000.
68 if use_decimals:
69 fmt = '%0.6f|ms'
70 else:
71 fmt = '%s|ms'
72 self._send_stat(stat, fmt % delta, rate, tags)
73
74 def incr(self, stat, count=1, rate=1, tags=None):
75 """Increment a stat by `count`."""
76 self._send_stat(stat, '%s|c' % count, rate, tags)
77
78 def decr(self, stat, count=1, rate=1, tags=None):
79 """Decrement a stat by `count`."""
80 self.incr(stat, -count, rate, tags)
81
82 def gauge(self, stat, value, rate=1, delta=False, tags=None):
83 """Set a gauge value."""
84 if value < 0 and not delta:
85 if rate < 1:
86 if random.random() > rate:
87 return
88 with self.pipeline() as pipe:
89 pipe._send_stat(stat, '0|g', 1)
90 pipe._send_stat(stat, '%s|g' % value, 1)
91 else:
92 prefix = '+' if delta and value >= 0 else ''
93 self._send_stat(stat, '%s%s|g' % (prefix, value), rate, tags)
94
95 def set(self, stat, value, rate=1):
96 """Set a set value."""
97 self._send_stat(stat, '%s|s' % value, rate)
98
99 def histogram(self, stat, value, rate=1, tags=None):
100 """Set a histogram"""
101 self._send_stat(stat, '%s|h' % value, rate, tags)
102
103 def _send_stat(self, stat, value, rate, tags=None):
104 self._after(self._prepare(stat, value, rate, tags))
105
106 def _prepare(self, stat, value, rate, tags=None):
107 global buckets_dict
108 buckets_dict[stat] = 1
109
110 if rate < 1:
111 if random.random() > rate:
112 return
113 value = '%s|@%s' % (value, rate)
114
115 if self._prefix:
116 stat = '%s.%s' % (self._prefix, stat)
117
118 res = '%s:%s%s' % (
119 stat,
120 value,
121 ("|#" + ",".join(normalize_tags(tags))) if tags else "",
122 )
123 return res
124
125 def _after(self, data):
126 if data:
127 self._send(data)
128
129
130 class PipelineBase(StatsClientBase):
131
132 def __init__(self, client):
133 self._client = client
134 self._prefix = client._prefix
135 self._stats = deque()
136
137 def _send(self):
138 raise NotImplementedError()
139
140 def _after(self, data):
141 if data is not None:
142 self._stats.append(data)
143
144 def __enter__(self):
145 return self
146
147 def __exit__(self, typ, value, tb):
148 self.send()
149
150 def send(self):
151 if not self._stats:
152 return
153 self._send()
154
155 def pipeline(self):
156 return self.__class__(self)
@@ -0,0 +1,75 b''
1 from __future__ import absolute_import, division, unicode_literals
2
3 import socket
4
5 from .base import StatsClientBase, PipelineBase
6
7
8 class StreamPipeline(PipelineBase):
9 def _send(self):
10 self._client._after('\n'.join(self._stats))
11 self._stats.clear()
12
13
14 class StreamClientBase(StatsClientBase):
15 def connect(self):
16 raise NotImplementedError()
17
18 def close(self):
19 if self._sock and hasattr(self._sock, 'close'):
20 self._sock.close()
21 self._sock = None
22
23 def reconnect(self):
24 self.close()
25 self.connect()
26
27 def pipeline(self):
28 return StreamPipeline(self)
29
30 def _send(self, data):
31 """Send data to statsd."""
32 if not self._sock:
33 self.connect()
34 self._do_send(data)
35
36 def _do_send(self, data):
37 self._sock.sendall(data.encode('ascii') + b'\n')
38
39
40 class TCPStatsClient(StreamClientBase):
41 """TCP version of StatsClient."""
42
43 def __init__(self, host='localhost', port=8125, prefix=None,
44 timeout=None, ipv6=False):
45 """Create a new client."""
46 self._host = host
47 self._port = port
48 self._ipv6 = ipv6
49 self._timeout = timeout
50 self._prefix = prefix
51 self._sock = None
52
53 def connect(self):
54 fam = socket.AF_INET6 if self._ipv6 else socket.AF_INET
55 family, _, _, _, addr = socket.getaddrinfo(
56 self._host, self._port, fam, socket.SOCK_STREAM)[0]
57 self._sock = socket.socket(family, socket.SOCK_STREAM)
58 self._sock.settimeout(self._timeout)
59 self._sock.connect(addr)
60
61
62 class UnixSocketStatsClient(StreamClientBase):
63 """Unix domain socket version of StatsClient."""
64
65 def __init__(self, socket_path, prefix=None, timeout=None):
66 """Create a new client."""
67 self._socket_path = socket_path
68 self._timeout = timeout
69 self._prefix = prefix
70 self._sock = None
71
72 def connect(self):
73 self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
74 self._sock.settimeout(self._timeout)
75 self._sock.connect(self._socket_path)
@@ -0,0 +1,75 b''
1 from __future__ import absolute_import, division, unicode_literals
2
3 import functools
4
5 # Use timer that's not susceptible to time of day adjustments.
6 try:
7 # perf_counter is only present on Py3.3+
8 from time import perf_counter as time_now
9 except ImportError:
10 # fall back to using time
11 from time import time as time_now
12
13
14 def safe_wraps(wrapper, *args, **kwargs):
15 """Safely wraps partial functions."""
16 while isinstance(wrapper, functools.partial):
17 wrapper = wrapper.func
18 return functools.wraps(wrapper, *args, **kwargs)
19
20
21 class Timer(object):
22 """A context manager/decorator for statsd.timing()."""
23
24 def __init__(self, client, stat, rate=1, tags=None, use_decimals=True, auto_send=True):
25 self.client = client
26 self.stat = stat
27 self.rate = rate
28 self.tags = tags
29 self.ms = None
30 self._sent = False
31 self._start_time = None
32 self.use_decimals = use_decimals
33 self.auto_send = auto_send
34
35 def __call__(self, f):
36 """Thread-safe timing function decorator."""
37 @safe_wraps(f)
38 def _wrapped(*args, **kwargs):
39 start_time = time_now()
40 try:
41 return f(*args, **kwargs)
42 finally:
43 elapsed_time_ms = 1000.0 * (time_now() - start_time)
44 self.client.timing(self.stat, elapsed_time_ms, self.rate, self.tags, self.use_decimals)
45 self._sent = True
46 return _wrapped
47
48 def __enter__(self):
49 return self.start()
50
51 def __exit__(self, typ, value, tb):
52 self.stop(send=self.auto_send)
53
54 def start(self):
55 self.ms = None
56 self._sent = False
57 self._start_time = time_now()
58 return self
59
60 def stop(self, send=True):
61 if self._start_time is None:
62 raise RuntimeError('Timer has not started.')
63 dt = time_now() - self._start_time
64 self.ms = 1000.0 * dt # Convert to milliseconds.
65 if send:
66 self.send()
67 return self
68
69 def send(self):
70 if self.ms is None:
71 raise RuntimeError('No data recorded.')
72 if self._sent:
73 raise RuntimeError('Already sent data.')
74 self._sent = True
75 self.client.timing(self.stat, self.ms, self.rate, self.tags, self.use_decimals)
@@ -0,0 +1,55 b''
1 from __future__ import absolute_import, division, unicode_literals
2
3 import socket
4
5 from .base import StatsClientBase, PipelineBase
6
7
8 class Pipeline(PipelineBase):
9
10 def __init__(self, client):
11 super(Pipeline, self).__init__(client)
12 self._maxudpsize = client._maxudpsize
13
14 def _send(self):
15 data = self._stats.popleft()
16 while self._stats:
17 # Use popleft to preserve the order of the stats.
18 stat = self._stats.popleft()
19 if len(stat) + len(data) + 1 >= self._maxudpsize:
20 self._client._after(data)
21 data = stat
22 else:
23 data += '\n' + stat
24 self._client._after(data)
25
26
27 class StatsClient(StatsClientBase):
28 """A client for statsd."""
29
30 def __init__(self, host='localhost', port=8125, prefix=None,
31 maxudpsize=512, ipv6=False):
32 """Create a new client."""
33 fam = socket.AF_INET6 if ipv6 else socket.AF_INET
34 family, _, _, _, addr = socket.getaddrinfo(
35 host, port, fam, socket.SOCK_DGRAM)[0]
36 self._addr = addr
37 self._sock = socket.socket(family, socket.SOCK_DGRAM)
38 self._prefix = prefix
39 self._maxudpsize = maxudpsize
40
41 def _send(self, data):
42 """Send data to statsd."""
43 try:
44 self._sock.sendto(data.encode('ascii'), self._addr)
45 except (socket.error, RuntimeError):
46 # No time for love, Dr. Jones!
47 pass
48
49 def close(self):
50 if self._sock and hasattr(self._sock, 'close'):
51 self._sock.close()
52 self._sock = None
53
54 def pipeline(self):
55 return Pipeline(self)
@@ -0,0 +1,49 b''
1 from vcsserver.lib._vendor.statsd import client_from_config
2
3
4 class StatsdClientNotInitialised(Exception):
5 pass
6
7
8 class _Singleton(type):
9 """A metaclass that creates a Singleton base class when called."""
10
11 _instances = {}
12
13 def __call__(cls, *args, **kwargs):
14 if cls not in cls._instances:
15 cls._instances[cls] = super(_Singleton, cls).__call__(*args, **kwargs)
16 return cls._instances[cls]
17
18
19 class Singleton(_Singleton("SingletonMeta", (object,), {})):
20 pass
21
22
23 class StatsdClientClass(Singleton):
24 setup_run = False
25 statsd_client = None
26 statsd = None
27
28 def __getattribute__(self, name):
29
30 if name.startswith("statsd"):
31 if self.setup_run:
32 return super(StatsdClientClass, self).__getattribute__(name)
33 else:
34 return None
35 #raise StatsdClientNotInitialised("requested key was %s" % name)
36
37 return super(StatsdClientClass, self).__getattribute__(name)
38
39 def setup(self, settings):
40 """
41 Initialize the client
42 """
43 statsd = client_from_config(settings)
44 self.statsd = statsd
45 self.statsd_client = statsd
46 self.setup_run = True
47
48
49 StatsdClient = StatsdClientClass()
@@ -3,4 +3,3 b' current_version = 5.0.0'
3 message = release: Bump version {current_version} to {new_version}
3 message = release: Bump version {current_version} to {new_version}
4
4
5 [bumpversion:file:vcsserver/VERSION]
5 [bumpversion:file:vcsserver/VERSION]
6
@@ -61,3 +61,22 b' ba6a6dc9ecd7fd8b1dcd6eb0c4ee0210e897c426'
61 17bc818b41bcf6883b9ff0da31f01d8c2a5d0781 v4.18.1
61 17bc818b41bcf6883b9ff0da31f01d8c2a5d0781 v4.18.1
62 1e9f12aa01f82c335abc9017efe94ce1c30b52ba v4.18.2
62 1e9f12aa01f82c335abc9017efe94ce1c30b52ba v4.18.2
63 f4cc6b3c5680bdf4541d7d442fbb7086640fb547 v4.18.3
63 f4cc6b3c5680bdf4541d7d442fbb7086640fb547 v4.18.3
64 5dc0277e4f77bd4cc3042d99625bb5d3ba480c8c v4.19.0
65 3a815eeb1b1efa340dda9b81a8da3cf24a7d605b v4.19.1
66 8841da3680fba841e5a54ebccd8ca56c078f7553 v4.19.2
67 4b0dec7fd80b1ca38e5073e5e562a5a450f73669 v4.19.3
68 1485aa75ffe1b1ec48352dce7b7492d92f85e95f v4.20.0
69 5b740274011766ef2f73803cc196d081e1e7f1d4 v4.20.1
70 5a7835234e2c45e8fb8184c60f548a64b5842af8 v4.21.0
71 26af88343015f8b89d5a66f92bc7547c51fcf0df v4.22.0
72 cf54e5f700fe5dc50af1a1bdf5197c18cf52105f v4.23.0
73 179d989bcfe02c6227f9f6aa9236cbbe1c14c400 v4.23.1
74 383aee8b1652affaa26aefe336a89ee366b2b26d v4.23.2
75 bc1a8141cc51fc23c455ebc50c6609c810b46f8d v4.24.0
76 530a1c03caabc806ea1ef34605f8f67f18c70e55 v4.24.1
77 5908ae65cee1043982e1b26d7b618af5fcfebbb3 v4.25.0
78 cce8bcdf75090d5943a1e9706fe5212d7b5d1fa1 v4.25.1
79 8610c4bf846c63bbc95d3ddfb53fadaaa9c7aa42 v4.25.2
80 d46b7d1be72c76c9f9aaeab6a342951d54459f49 v4.26.0
81 6fba0daab1e20a9e18fb70fa59bd21753e0a5b90 v4.27.0
82 6195da4fc454087173918ae59cae946289458676 v4.27.1
@@ -31,7 +31,7 b' asyncore_use_poll = true'
31 ; GUNICORN APPLICATION SERVER
31 ; GUNICORN APPLICATION SERVER
32 ; ###########################
32 ; ###########################
33
33
34 ; run with gunicorn --log-config rhodecode.ini --paste rhodecode.ini
34 ; run with gunicorn --paste rhodecode.ini
35
35
36 ; Module to use, this setting shouldn't be changed
36 ; Module to use, this setting shouldn't be changed
37 #use = egg:gunicorn#main
37 #use = egg:gunicorn#main
@@ -86,7 +86,7 b' asyncore_use_poll = true'
86 ; serving requests. Workers still alive after the timeout (starting from the
86 ; serving requests. Workers still alive after the timeout (starting from the
87 ; receipt of the restart signal) are force killed.
87 ; receipt of the restart signal) are force killed.
88 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
88 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
89 #graceful_timeout = 3600
89 #graceful_timeout = 21600
90
90
91 # The number of seconds to wait for requests on a Keep-Alive connection.
91 # The number of seconds to wait for requests on a Keep-Alive connection.
92 # Generally set in the 1-5 seconds range.
92 # Generally set in the 1-5 seconds range.
@@ -110,6 +110,17 b' asyncore_use_poll = true'
110 [app:main]
110 [app:main]
111 ; The %(here)s variable will be replaced with the absolute path of parent directory
111 ; The %(here)s variable will be replaced with the absolute path of parent directory
112 ; of this file
112 ; of this file
113 ; Each option in the app:main can be override by an environmental variable
114 ;
115 ;To override an option:
116 ;
117 ;RC_<KeyName>
118 ;Everything should be uppercase, . and - should be replaced by _.
119 ;For example, if you have these configuration settings:
120 ;rc_cache.repo_object.backend = foo
121 ;can be overridden by
122 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
123
113 use = egg:rhodecode-vcsserver
124 use = egg:rhodecode-vcsserver
114
125
115
126
@@ -133,13 +144,13 b' debugtoolbar.exclude_prefixes ='
133 ; #################
144 ; #################
134
145
135 ; Pyramid default locales, we need this to be set
146 ; Pyramid default locales, we need this to be set
136 pyramid.default_locale_name = en
147 #pyramid.default_locale_name = en
137
148
138 ; default locale used by VCS systems
149 ; default locale used by VCS systems
139 locale = en_US.UTF-8
150 #locale = en_US.UTF-8
140
151
141 ; path to binaries for vcsserver, it should be set by the installer
152 ; path to binaries for vcsserver, it should be set by the installer
142 ; at installation time, e.g /home/user/vcsserver-1/profile/bin
153 ; at installation time, e.g /home/user/.rccontrol/vcsserver-1/profile/bin
143 ; it can also be a path to nix-build output in case of development
154 ; it can also be a path to nix-build output in case of development
144 core.binary_dir = ""
155 core.binary_dir = ""
145
156
@@ -153,21 +164,21 b' core.binary_dir = ""'
153
164
154 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
165 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
155 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
166 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
156 cache_dir = %(here)s/data
167 #cache_dir = %(here)s/data
157
168
158 ; ***************************************
169 ; ***************************************
159 ; `repo_object` cache, default file based
170 ; `repo_object` cache, default file based
160 ; ***************************************
171 ; ***************************************
161
172
162 ; `repo_object` cache settings for vcs methods for repositories
173 ; `repo_object` cache settings for vcs methods for repositories
163 rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
174 #rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
164
175
165 ; cache auto-expires after N seconds
176 ; cache auto-expires after N seconds
166 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
177 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
167 rc_cache.repo_object.expiration_time = 2592000
178 #rc_cache.repo_object.expiration_time = 2592000
168
179
169 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
180 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
170 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache.db
181 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache_repo_object.db
171
182
172 ; ***********************************************************
183 ; ***********************************************************
173 ; `repo_object` cache with redis backend
184 ; `repo_object` cache with redis backend
@@ -191,10 +202,32 b' rc_cache.repo_object.expiration_time = 2'
191 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
202 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
192 #rc_cache.repo_object.arguments.distributed_lock = true
203 #rc_cache.repo_object.arguments.distributed_lock = true
193
204
205 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
206 #rc_cache.repo_object.arguments.lock_auto_renewal = true
207
208 ; Statsd client config, this is used to send metrics to statsd
209 ; We recommend setting statsd_exported and scrape them using Promethues
210 #statsd.enabled = false
211 #statsd.statsd_host = 0.0.0.0
212 #statsd.statsd_port = 8125
213 #statsd.statsd_prefix =
214 #statsd.statsd_ipv6 = false
215
216 ; configure logging automatically at server startup set to false
217 ; to use the below custom logging config.
218 ; RC_LOGGING_FORMATTER
219 ; RC_LOGGING_LEVEL
220 ; env variables can control the settings for logging in case of autoconfigure
221
222 #logging.autoconfigure = true
223
224 ; specify your own custom logging config file to configure logging
225 #logging.logging_conf_file = /path/to/custom_logging.ini
194
226
195 ; #####################
227 ; #####################
196 ; LOGGING CONFIGURATION
228 ; LOGGING CONFIGURATION
197 ; #####################
229 ; #####################
230
198 [loggers]
231 [loggers]
199 keys = root, vcsserver
232 keys = root, vcsserver
200
233
@@ -202,7 +235,7 b' keys = root, vcsserver'
202 keys = console
235 keys = console
203
236
204 [formatters]
237 [formatters]
205 keys = generic
238 keys = generic, json
206
239
207 ; #######
240 ; #######
208 ; LOGGERS
241 ; LOGGERS
@@ -217,7 +250,6 b' handlers ='
217 qualname = vcsserver
250 qualname = vcsserver
218 propagate = 1
251 propagate = 1
219
252
220
221 ; ########
253 ; ########
222 ; HANDLERS
254 ; HANDLERS
223 ; ########
255 ; ########
@@ -226,6 +258,8 b' propagate = 1'
226 class = StreamHandler
258 class = StreamHandler
227 args = (sys.stderr, )
259 args = (sys.stderr, )
228 level = DEBUG
260 level = DEBUG
261 ; To enable JSON formatted logs replace 'generic' with 'json'
262 ; This allows sending properly formatted logs to grafana loki or elasticsearch
229 formatter = generic
263 formatter = generic
230
264
231 ; ##########
265 ; ##########
@@ -235,3 +269,7 b' formatter = generic'
235 [formatter_generic]
269 [formatter_generic]
236 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
270 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
237 datefmt = %Y-%m-%d %H:%M:%S
271 datefmt = %Y-%m-%d %H:%M:%S
272
273 [formatter_json]
274 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
275 class = vcsserver.lib._vendor.jsonlogger.JsonFormatter
@@ -11,6 +11,7 b' import time'
11 import threading
11 import threading
12 import traceback
12 import traceback
13 import random
13 import random
14 import socket
14 from gunicorn.glogging import Logger
15 from gunicorn.glogging import Logger
15
16
16
17
@@ -29,9 +30,15 b" accesslog = '-'"
29 worker_tmp_dir = None
30 worker_tmp_dir = None
30 tmp_upload_dir = None
31 tmp_upload_dir = None
31
32
33 #reuse_port = True
34
32 # Custom log format
35 # Custom log format
36 #access_log_format = (
37 # '%(t)s %(p)s INFO [GNCRN] %(h)-15s rqt:%(L)s %(s)s %(b)-6s "%(m)s:%(U)s %(q)s" usr:%(u)s "%(f)s" "%(a)s"')
38
39 # loki format for easier parsing in grafana
33 access_log_format = (
40 access_log_format = (
34 '%(t)s %(p)s INFO [GNCRN] %(h)-15s rqt:%(L)s %(s)s %(b)-6s "%(m)s:%(U)s %(q)s" usr:%(u)s "%(f)s" "%(a)s"')
41 'time="%(t)s" pid=%(p)s level="INFO" type="[GNCRN]" ip="%(h)-15s" rqt="%(L)s" response_code="%(s)s" response_bytes="%(b)-6s" uri="%(m)s:%(U)s %(q)s" user=":%(u)s" user_agent="%(a)s"')
35
42
36 # self adjust workers based on CPU count
43 # self adjust workers based on CPU count
37 # workers = get_workers()
44 # workers = get_workers()
@@ -90,9 +97,12 b' def post_fork(server, worker):'
90 if conf.has_option(section, 'memory_usage_recovery_threshold'):
97 if conf.has_option(section, 'memory_usage_recovery_threshold'):
91 _memory_usage_recovery_threshold = conf.getfloat(section, 'memory_usage_recovery_threshold')
98 _memory_usage_recovery_threshold = conf.getfloat(section, 'memory_usage_recovery_threshold')
92
99
93 worker._memory_max_usage = _memory_max_usage
100 worker._memory_max_usage = int(os.environ.get('RC_GUNICORN_MEMORY_MAX_USAGE', '')
94 worker._memory_usage_check_interval = _memory_usage_check_interval
101 or _memory_max_usage)
95 worker._memory_usage_recovery_threshold = _memory_usage_recovery_threshold
102 worker._memory_usage_check_interval = int(os.environ.get('RC_GUNICORN_MEMORY_USAGE_CHECK_INTERVAL', '')
103 or _memory_usage_check_interval)
104 worker._memory_usage_recovery_threshold = float(os.environ.get('RC_GUNICORN_MEMORY_USAGE_RECOVERY_THRESHOLD', '')
105 or _memory_usage_recovery_threshold)
96
106
97 # register memory last check time, with some random offset so we don't recycle all
107 # register memory last check time, with some random offset so we don't recycle all
98 # at once
108 # at once
@@ -242,6 +252,74 b' def post_request(worker, req, environ, r'
242 _check_memory_usage(worker)
252 _check_memory_usage(worker)
243
253
244
254
255 def _filter_proxy(ip):
256 """
257 Passed in IP addresses in HEADERS can be in a special format of multiple
258 ips. Those comma separated IPs are passed from various proxies in the
259 chain of request processing. The left-most being the original client.
260 We only care about the first IP which came from the org. client.
261
262 :param ip: ip string from headers
263 """
264 if ',' in ip:
265 _ips = ip.split(',')
266 _first_ip = _ips[0].strip()
267 return _first_ip
268 return ip
269
270
271 def _filter_port(ip):
272 """
273 Removes a port from ip, there are 4 main cases to handle here.
274 - ipv4 eg. 127.0.0.1
275 - ipv6 eg. ::1
276 - ipv4+port eg. 127.0.0.1:8080
277 - ipv6+port eg. [::1]:8080
278
279 :param ip:
280 """
281 def is_ipv6(ip_addr):
282 if hasattr(socket, 'inet_pton'):
283 try:
284 socket.inet_pton(socket.AF_INET6, ip_addr)
285 except socket.error:
286 return False
287 else:
288 return False
289 return True
290
291 if ':' not in ip: # must be ipv4 pure ip
292 return ip
293
294 if '[' in ip and ']' in ip: # ipv6 with port
295 return ip.split(']')[0][1:].lower()
296
297 # must be ipv6 or ipv4 with port
298 if is_ipv6(ip):
299 return ip
300 else:
301 ip, _port = ip.split(':')[:2] # means ipv4+port
302 return ip
303
304
305 def get_ip_addr(environ):
306 proxy_key = 'HTTP_X_REAL_IP'
307 proxy_key2 = 'HTTP_X_FORWARDED_FOR'
308 def_key = 'REMOTE_ADDR'
309 _filters = lambda x: _filter_port(_filter_proxy(x))
310
311 ip = environ.get(proxy_key)
312 if ip:
313 return _filters(ip)
314
315 ip = environ.get(proxy_key2)
316 if ip:
317 return _filters(ip)
318
319 ip = environ.get(def_key, '0.0.0.0')
320 return _filters(ip)
321
322
245 class RhodeCodeLogger(Logger):
323 class RhodeCodeLogger(Logger):
246 """
324 """
247 Custom Logger that allows some customization that gunicorn doesn't allow
325 Custom Logger that allows some customization that gunicorn doesn't allow
@@ -258,5 +336,58 b' class RhodeCodeLogger(Logger):'
258 msecs = int((now - long(now)) * 1000)
336 msecs = int((now - long(now)) * 1000)
259 return time.strftime(self.datefmt, time.localtime(now)) + '.{0:03d}'.format(msecs)
337 return time.strftime(self.datefmt, time.localtime(now)) + '.{0:03d}'.format(msecs)
260
338
339 def atoms(self, resp, req, environ, request_time):
340 """ Gets atoms for log formatting.
341 """
342 status = resp.status
343 if isinstance(status, str):
344 status = status.split(None, 1)[0]
345 atoms = {
346 'h': get_ip_addr(environ),
347 'l': '-',
348 'u': self._get_user(environ) or '-',
349 't': self.now(),
350 'r': "%s %s %s" % (environ['REQUEST_METHOD'],
351 environ['RAW_URI'],
352 environ["SERVER_PROTOCOL"]),
353 's': status,
354 'm': environ.get('REQUEST_METHOD'),
355 'U': environ.get('PATH_INFO'),
356 'q': environ.get('QUERY_STRING'),
357 'H': environ.get('SERVER_PROTOCOL'),
358 'b': getattr(resp, 'sent', None) is not None and str(resp.sent) or '-',
359 'B': getattr(resp, 'sent', None),
360 'f': environ.get('HTTP_REFERER', '-'),
361 'a': environ.get('HTTP_USER_AGENT', '-'),
362 'T': request_time.seconds,
363 'D': (request_time.seconds * 1000000) + request_time.microseconds,
364 'M': (request_time.seconds * 1000) + int(request_time.microseconds/1000),
365 'L': "%d.%06d" % (request_time.seconds, request_time.microseconds),
366 'p': "<%s>" % os.getpid()
367 }
368
369 # add request headers
370 if hasattr(req, 'headers'):
371 req_headers = req.headers
372 else:
373 req_headers = req
374
375 if hasattr(req_headers, "items"):
376 req_headers = req_headers.items()
377
378 atoms.update({"{%s}i" % k.lower(): v for k, v in req_headers})
379
380 resp_headers = resp.headers
381 if hasattr(resp_headers, "items"):
382 resp_headers = resp_headers.items()
383
384 # add response headers
385 atoms.update({"{%s}o" % k.lower(): v for k, v in resp_headers})
386
387 # add environ variables
388 environ_variables = environ.items()
389 atoms.update({"{%s}e" % k.lower(): v for k, v in environ_variables})
390
391 return atoms
261
392
262 logger_class = RhodeCodeLogger
393 logger_class = RhodeCodeLogger
@@ -14,7 +14,7 b' port = 9900'
14 ; GUNICORN APPLICATION SERVER
14 ; GUNICORN APPLICATION SERVER
15 ; ###########################
15 ; ###########################
16
16
17 ; run with gunicorn --log-config rhodecode.ini --paste rhodecode.ini
17 ; run with gunicorn --paste rhodecode.ini
18
18
19 ; Module to use, this setting shouldn't be changed
19 ; Module to use, this setting shouldn't be changed
20 use = egg:gunicorn#main
20 use = egg:gunicorn#main
@@ -69,7 +69,7 b' limit_request_field_size = 0'
69 ; serving requests. Workers still alive after the timeout (starting from the
69 ; serving requests. Workers still alive after the timeout (starting from the
70 ; receipt of the restart signal) are force killed.
70 ; receipt of the restart signal) are force killed.
71 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
71 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
72 graceful_timeout = 3600
72 graceful_timeout = 21600
73
73
74 # The number of seconds to wait for requests on a Keep-Alive connection.
74 # The number of seconds to wait for requests on a Keep-Alive connection.
75 # Generally set in the 1-5 seconds range.
75 # Generally set in the 1-5 seconds range.
@@ -93,16 +93,27 b' memory_usage_recovery_threshold = 0.8'
93 [app:main]
93 [app:main]
94 ; The %(here)s variable will be replaced with the absolute path of parent directory
94 ; The %(here)s variable will be replaced with the absolute path of parent directory
95 ; of this file
95 ; of this file
96 ; Each option in the app:main can be override by an environmental variable
97 ;
98 ;To override an option:
99 ;
100 ;RC_<KeyName>
101 ;Everything should be uppercase, . and - should be replaced by _.
102 ;For example, if you have these configuration settings:
103 ;rc_cache.repo_object.backend = foo
104 ;can be overridden by
105 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
106
96 use = egg:rhodecode-vcsserver
107 use = egg:rhodecode-vcsserver
97
108
98 ; Pyramid default locales, we need this to be set
109 ; Pyramid default locales, we need this to be set
99 pyramid.default_locale_name = en
110 #pyramid.default_locale_name = en
100
111
101 ; default locale used by VCS systems
112 ; default locale used by VCS systems
102 locale = en_US.UTF-8
113 #locale = en_US.UTF-8
103
114
104 ; path to binaries for vcsserver, it should be set by the installer
115 ; path to binaries for vcsserver, it should be set by the installer
105 ; at installation time, e.g /home/user/vcsserver-1/profile/bin
116 ; at installation time, e.g /home/user/.rccontrol/vcsserver-1/profile/bin
106 ; it can also be a path to nix-build output in case of development
117 ; it can also be a path to nix-build output in case of development
107 core.binary_dir = ""
118 core.binary_dir = ""
108
119
@@ -116,21 +127,21 b' core.binary_dir = ""'
116
127
117 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
128 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
118 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
129 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
119 cache_dir = %(here)s/data
130 #cache_dir = %(here)s/data
120
131
121 ; ***************************************
132 ; ***************************************
122 ; `repo_object` cache, default file based
133 ; `repo_object` cache, default file based
123 ; ***************************************
134 ; ***************************************
124
135
125 ; `repo_object` cache settings for vcs methods for repositories
136 ; `repo_object` cache settings for vcs methods for repositories
126 rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
137 #rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
127
138
128 ; cache auto-expires after N seconds
139 ; cache auto-expires after N seconds
129 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
140 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
130 rc_cache.repo_object.expiration_time = 2592000
141 #rc_cache.repo_object.expiration_time = 2592000
131
142
132 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
143 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
133 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache.db
144 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache_repo_object.db
134
145
135 ; ***********************************************************
146 ; ***********************************************************
136 ; `repo_object` cache with redis backend
147 ; `repo_object` cache with redis backend
@@ -154,10 +165,32 b' rc_cache.repo_object.expiration_time = 2'
154 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
165 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
155 #rc_cache.repo_object.arguments.distributed_lock = true
166 #rc_cache.repo_object.arguments.distributed_lock = true
156
167
168 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
169 #rc_cache.repo_object.arguments.lock_auto_renewal = true
170
171 ; Statsd client config, this is used to send metrics to statsd
172 ; We recommend setting statsd_exported and scrape them using Promethues
173 #statsd.enabled = false
174 #statsd.statsd_host = 0.0.0.0
175 #statsd.statsd_port = 8125
176 #statsd.statsd_prefix =
177 #statsd.statsd_ipv6 = false
178
179 ; configure logging automatically at server startup set to false
180 ; to use the below custom logging config.
181 ; RC_LOGGING_FORMATTER
182 ; RC_LOGGING_LEVEL
183 ; env variables can control the settings for logging in case of autoconfigure
184
185 #logging.autoconfigure = true
186
187 ; specify your own custom logging config file to configure logging
188 #logging.logging_conf_file = /path/to/custom_logging.ini
157
189
158 ; #####################
190 ; #####################
159 ; LOGGING CONFIGURATION
191 ; LOGGING CONFIGURATION
160 ; #####################
192 ; #####################
193
161 [loggers]
194 [loggers]
162 keys = root, vcsserver
195 keys = root, vcsserver
163
196
@@ -165,7 +198,7 b' keys = root, vcsserver'
165 keys = console
198 keys = console
166
199
167 [formatters]
200 [formatters]
168 keys = generic
201 keys = generic, json
169
202
170 ; #######
203 ; #######
171 ; LOGGERS
204 ; LOGGERS
@@ -175,12 +208,11 b' level = NOTSET'
175 handlers = console
208 handlers = console
176
209
177 [logger_vcsserver]
210 [logger_vcsserver]
178 level = DEBUG
211 level = INFO
179 handlers =
212 handlers =
180 qualname = vcsserver
213 qualname = vcsserver
181 propagate = 1
214 propagate = 1
182
215
183
184 ; ########
216 ; ########
185 ; HANDLERS
217 ; HANDLERS
186 ; ########
218 ; ########
@@ -189,6 +221,8 b' propagate = 1'
189 class = StreamHandler
221 class = StreamHandler
190 args = (sys.stderr, )
222 args = (sys.stderr, )
191 level = INFO
223 level = INFO
224 ; To enable JSON formatted logs replace 'generic' with 'json'
225 ; This allows sending properly formatted logs to grafana loki or elasticsearch
192 formatter = generic
226 formatter = generic
193
227
194 ; ##########
228 ; ##########
@@ -198,3 +232,7 b' formatter = generic'
198 [formatter_generic]
232 [formatter_generic]
199 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
233 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
200 datefmt = %Y-%m-%d %H:%M:%S
234 datefmt = %Y-%m-%d %H:%M:%S
235
236 [formatter_json]
237 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
238 class = vcsserver.lib._vendor.jsonlogger.JsonFormatter
@@ -1,4 +1,5 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2 # RhodeCode VCSServer provides access to different vcs backends via network.
3 # RhodeCode VCSServer provides access to different vcs backends via network.
3 # Copyright (C) 2014-2019 RodeCode GmbH
4 # Copyright (C) 2014-2019 RodeCode GmbH
4 #
5 #
@@ -20,20 +21,47 b''
20 from setuptools import setup, find_packages
21 from setuptools import setup, find_packages
21
22
22 import os
23 import os
24 import re
23 import sys
25 import sys
24 import pkgutil
26 import pkgutil
25 import platform
27 import platform
26 import codecs
28 import codecs
27
29
28 try: # for pip >= 10
30 import pip
31
32 pip_major_version = int(pip.__version__.split(".")[0])
33 if pip_major_version >= 20:
29 from pip._internal.req import parse_requirements
34 from pip._internal.req import parse_requirements
30 except ImportError: # for pip <= 9.0.3
35 from pip._internal.network.session import PipSession
36 elif pip_major_version >= 10:
37 from pip._internal.req import parse_requirements
38 from pip._internal.download import PipSession
39 else:
31 from pip.req import parse_requirements
40 from pip.req import parse_requirements
41 from pip.download import PipSession
42
32
43
33 try: # for pip >= 10
44 def get_package_name(req_object):
34 from pip._internal.download import PipSession
45 package_name = None
35 except ImportError: # for pip <= 9.0.3
46 try:
36 from pip.download import PipSession
47 from pip._internal.req.constructors import install_req_from_parsed_requirement
48 except ImportError:
49 install_req_from_parsed_requirement = None
50
51 # In 20.1 of pip, the requirements object changed
52 if hasattr(req_object, 'req'):
53 package_name = req_object.req.name
54
55 if package_name is None:
56 if install_req_from_parsed_requirement:
57 package = install_req_from_parsed_requirement(req_object)
58 package_name = package.req.name
59
60 if package_name is None:
61 # fallback for older pip
62 package_name = re.split('===|<=|!=|==|>=|~=|<|>', req_object.requirement)[0]
63
64 return package_name
37
65
38
66
39 if sys.version_info < (2, 7):
67 if sys.version_info < (2, 7):
@@ -61,14 +89,15 b' def _get_requirements(req_filename, excl'
61 parsed = parse_requirements(os.path.join(here, req_filename))
89 parsed = parse_requirements(os.path.join(here, req_filename))
62
90
63 requirements = []
91 requirements = []
64 for ir in parsed:
92 for int_req in parsed:
65 if ir.req and ir.name not in exclude:
93 req_name = get_package_name(int_req)
66 requirements.append(str(ir.req))
94 if req_name not in exclude:
95 requirements.append(req_name)
67 return requirements + extras
96 return requirements + extras
68
97
69
98
70 # requirements extract
99 # requirements extract
71 setup_requirements = ['pytest-runner']
100 setup_requirements = []
72 install_requirements = _get_requirements(
101 install_requirements = _get_requirements(
73 'requirements.txt', exclude=['setuptools'])
102 'requirements.txt', exclude=['setuptools'])
74 test_requirements = _get_requirements(
103 test_requirements = _get_requirements(
@@ -14,13 +14,18 b''
14 # You should have received a copy of the GNU General Public License
14 # You should have received a copy of the GNU General Public License
15 # along with this program; if not, write to the Free Software Foundation,
15 # along with this program; if not, write to the Free Software Foundation,
16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
17 import os
18 import sys
18 import sys
19 import traceback
19 import traceback
20 import logging
20 import logging
21 import urllib.parse
21 import urllib.parse
22
22
23 from vcsserver.lib.rc_cache import region_meta
23 from vcsserver.lib.rc_cache import region_meta
24
25 from vcsserver import exceptions
26 from vcsserver.exceptions import NoContentException
27 from vcsserver.hgcompat import (archival)
28
24 log = logging.getLogger(__name__)
29 log = logging.getLogger(__name__)
25
30
26
31
@@ -74,3 +79,54 b' def raise_from_original(new_type):'
74 raise new_exc.with_traceback(exc_traceback)
79 raise new_exc.with_traceback(exc_traceback)
75 finally:
80 finally:
76 del exc_traceback
81 del exc_traceback
82
83
84 class ArchiveNode(object):
85 def __init__(self, path, mode, is_link, raw_bytes):
86 self.path = path
87 self.mode = mode
88 self.is_link = is_link
89 self.raw_bytes = raw_bytes
90
91
92 def archive_repo(walker, archive_dest_path, kind, mtime, archive_at_path,
93 archive_dir_name, commit_id, write_metadata=True, extra_metadata=None):
94 """
95 walker should be a file walker, for example:
96 def walker():
97 for file_info in files:
98 yield ArchiveNode(fn, mode, is_link, ctx[fn].data)
99 """
100 extra_metadata = extra_metadata or {}
101
102 if kind == "tgz":
103 archiver = archival.tarit(archive_dest_path, mtime, "gz")
104 elif kind == "tbz2":
105 archiver = archival.tarit(archive_dest_path, mtime, "bz2")
106 elif kind == 'zip':
107 archiver = archival.zipit(archive_dest_path, mtime)
108 else:
109 raise exceptions.ArchiveException()(
110 'Remote does not support: "%s" archive type.' % kind)
111
112 for f in walker(commit_id, archive_at_path):
113 f_path = os.path.join(archive_dir_name, f.path.lstrip('/'))
114 try:
115 archiver.addfile(f_path, f.mode, f.is_link, f.raw_bytes())
116 except NoContentException:
117 # NOTE(marcink): this is a special case for SVN so we can create "empty"
118 # directories which arent supported by archiver
119 archiver.addfile(os.path.join(f_path, '.dir'), f.mode, f.is_link, '')
120
121 if write_metadata:
122 metadata = dict([
123 ('commit_id', commit_id),
124 ('mtime', mtime),
125 ])
126 metadata.update(extra_metadata)
127
128 meta = ["%s:%s" % (f_name, value) for f_name, value in metadata.items()]
129 f_path = os.path.join(archive_dir_name, '.archival.txt')
130 archiver.addfile(f_path, 0o644, False, '\n'.join(meta))
131
132 return archiver.done()
@@ -119,3 +119,7 b' class HTTPRepoBranchProtected(HTTPForbid'
119
119
120 class RefNotFoundException(KeyError):
120 class RefNotFoundException(KeyError):
121 pass
121 pass
122
123
124 class NoContentException(ValueError):
125 pass
@@ -1,4 +1,4 b''
1 # RhodeCode VCSServer provides access to different vcs backends via network.
1 RhodeCode VCSServer provides access to different vcs backends via network.
2 # Copyright (C) 2014-2020 RhodeCode GmbH
2 # Copyright (C) 2014-2020 RhodeCode GmbH
3 #
3 #
4 # This program is free software; you can redistribute it and/or modify
4 # This program is free software; you can redistribute it and/or modify
@@ -29,6 +29,7 b' from functools import wraps'
29 import more_itertools
29 import more_itertools
30 import pygit2
30 import pygit2
31 from pygit2 import Repository as LibGit2Repo
31 from pygit2 import Repository as LibGit2Repo
32 from pygit2 import index as LibGit2Index
32 from dulwich import index, objects
33 from dulwich import index, objects
33 from dulwich.client import HttpGitClient, LocalGitClient
34 from dulwich.client import HttpGitClient, LocalGitClient
34 from dulwich.errors import (
35 from dulwich.errors import (
@@ -40,7 +41,7 b' from dulwich.server import update_server'
40
41
41 from vcsserver import exceptions, settings, subprocessio
42 from vcsserver import exceptions, settings, subprocessio
42 from vcsserver.utils import safe_str, safe_int, safe_unicode
43 from vcsserver.utils import safe_str, safe_int, safe_unicode
43 from vcsserver.base import RepoFactory, obfuscate_qs
44 from vcsserver.base import RepoFactory, obfuscate_qs, ArchiveNode, archive_repo
44 from vcsserver.hgcompat import (
45 from vcsserver.hgcompat import (
45 hg_url as url_parser, httpbasicauthhandler, httpdigestauthhandler)
46 hg_url as url_parser, httpbasicauthhandler, httpdigestauthhandler)
46 from vcsserver.git_lfs.lib import LFSOidStore
47 from vcsserver.git_lfs.lib import LFSOidStore
@@ -184,7 +185,8 b' class GitRemote(RemoteBase):'
184 @reraise_safe_exceptions
185 @reraise_safe_exceptions
185 def assert_correct_path(self, wire):
186 def assert_correct_path(self, wire):
186 cache_on, context_uid, repo_id = self._cache_on(wire)
187 cache_on, context_uid, repo_id = self._cache_on(wire)
187 @self.region.conditional_cache_on_arguments(condition=cache_on)
188 region = self._region(wire)
189 @region.conditional_cache_on_arguments(condition=cache_on)
188 def _assert_correct_path(_context_uid, _repo_id):
190 def _assert_correct_path(_context_uid, _repo_id):
189 try:
191 try:
190 repo_init = self._factory.repo_libgit2(wire)
192 repo_init = self._factory.repo_libgit2(wire)
@@ -216,7 +218,8 b' class GitRemote(RemoteBase):'
216 @reraise_safe_exceptions
218 @reraise_safe_exceptions
217 def blob_raw_length(self, wire, sha):
219 def blob_raw_length(self, wire, sha):
218 cache_on, context_uid, repo_id = self._cache_on(wire)
220 cache_on, context_uid, repo_id = self._cache_on(wire)
219 @self.region.conditional_cache_on_arguments(condition=cache_on)
221 region = self._region(wire)
222 @region.conditional_cache_on_arguments(condition=cache_on)
220 def _blob_raw_length(_repo_id, _sha):
223 def _blob_raw_length(_repo_id, _sha):
221
224
222 repo_init = self._factory.repo_libgit2(wire)
225 repo_init = self._factory.repo_libgit2(wire)
@@ -247,7 +250,8 b' class GitRemote(RemoteBase):'
247 def is_large_file(self, wire, commit_id):
250 def is_large_file(self, wire, commit_id):
248 cache_on, context_uid, repo_id = self._cache_on(wire)
251 cache_on, context_uid, repo_id = self._cache_on(wire)
249
252
250 @self.region.conditional_cache_on_arguments(condition=cache_on)
253 region = self._region(wire)
254 @region.conditional_cache_on_arguments(condition=cache_on)
251 def _is_large_file(_repo_id, _sha):
255 def _is_large_file(_repo_id, _sha):
252 repo_init = self._factory.repo_libgit2(wire)
256 repo_init = self._factory.repo_libgit2(wire)
253 with repo_init as repo:
257 with repo_init as repo:
@@ -263,7 +267,8 b' class GitRemote(RemoteBase):'
263 def is_binary(self, wire, tree_id):
267 def is_binary(self, wire, tree_id):
264 cache_on, context_uid, repo_id = self._cache_on(wire)
268 cache_on, context_uid, repo_id = self._cache_on(wire)
265
269
266 @self.region.conditional_cache_on_arguments(condition=cache_on)
270 region = self._region(wire)
271 @region.conditional_cache_on_arguments(condition=cache_on)
267 def _is_binary(_repo_id, _tree_id):
272 def _is_binary(_repo_id, _tree_id):
268 repo_init = self._factory.repo_libgit2(wire)
273 repo_init = self._factory.repo_libgit2(wire)
269 with repo_init as repo:
274 with repo_init as repo:
@@ -305,7 +310,8 b' class GitRemote(RemoteBase):'
305 @reraise_safe_exceptions
310 @reraise_safe_exceptions
306 def bulk_request(self, wire, rev, pre_load):
311 def bulk_request(self, wire, rev, pre_load):
307 cache_on, context_uid, repo_id = self._cache_on(wire)
312 cache_on, context_uid, repo_id = self._cache_on(wire)
308 @self.region.conditional_cache_on_arguments(condition=cache_on)
313 region = self._region(wire)
314 @region.conditional_cache_on_arguments(condition=cache_on)
309 def _bulk_request(_repo_id, _rev, _pre_load):
315 def _bulk_request(_repo_id, _rev, _pre_load):
310 result = {}
316 result = {}
311 for attr in pre_load:
317 for attr in pre_load:
@@ -408,7 +414,8 b' class GitRemote(RemoteBase):'
408 @reraise_safe_exceptions
414 @reraise_safe_exceptions
409 def branch(self, wire, commit_id):
415 def branch(self, wire, commit_id):
410 cache_on, context_uid, repo_id = self._cache_on(wire)
416 cache_on, context_uid, repo_id = self._cache_on(wire)
411 @self.region.conditional_cache_on_arguments(condition=cache_on)
417 region = self._region(wire)
418 @region.conditional_cache_on_arguments(condition=cache_on)
412 def _branch(_context_uid, _repo_id, _commit_id):
419 def _branch(_context_uid, _repo_id, _commit_id):
413 regex = re.compile('^refs/heads')
420 regex = re.compile('^refs/heads')
414
421
@@ -423,7 +430,8 b' class GitRemote(RemoteBase):'
423 @reraise_safe_exceptions
430 @reraise_safe_exceptions
424 def commit_branches(self, wire, commit_id):
431 def commit_branches(self, wire, commit_id):
425 cache_on, context_uid, repo_id = self._cache_on(wire)
432 cache_on, context_uid, repo_id = self._cache_on(wire)
426 @self.region.conditional_cache_on_arguments(condition=cache_on)
433 region = self._region(wire)
434 @region.conditional_cache_on_arguments(condition=cache_on)
427 def _commit_branches(_context_uid, _repo_id, _commit_id):
435 def _commit_branches(_context_uid, _repo_id, _commit_id):
428 repo_init = self._factory.repo_libgit2(wire)
436 repo_init = self._factory.repo_libgit2(wire)
429 with repo_init as repo:
437 with repo_init as repo:
@@ -444,11 +452,22 b' class GitRemote(RemoteBase):'
444 # TODO: this is quite complex, check if that can be simplified
452 # TODO: this is quite complex, check if that can be simplified
445 @reraise_safe_exceptions
453 @reraise_safe_exceptions
446 def commit(self, wire, commit_data, branch, commit_tree, updated, removed):
454 def commit(self, wire, commit_data, branch, commit_tree, updated, removed):
455 # Defines the root tree
456 class _Root(object):
457 def __repr__(self):
458 return 'ROOT TREE'
459 ROOT = _Root()
460
447 repo = self._factory.repo(wire)
461 repo = self._factory.repo(wire)
448 object_store = repo.object_store
462 object_store = repo.object_store
449
463
450 # Create tree and populates it with blobs
464 # Create tree and populates it with blobs
451 commit_tree = commit_tree and repo[commit_tree] or objects.Tree()
465
466 if commit_tree and repo[commit_tree]:
467 git_commit = repo[commit_data['parents'][0]]
468 commit_tree = repo[git_commit.tree] # root tree
469 else:
470 commit_tree = objects.Tree()
452
471
453 for node in updated:
472 for node in updated:
454 # Compute subdirs if needed
473 # Compute subdirs if needed
@@ -507,21 +526,34 b' class GitRemote(RemoteBase):'
507
526
508 for node_path in removed:
527 for node_path in removed:
509 paths = node_path.split('/')
528 paths = node_path.split('/')
510 tree = commit_tree
529 tree = commit_tree # start with top-level
511 trees = [tree]
530 trees = [{'tree': tree, 'path': ROOT}]
512 # Traverse deep into the forest...
531 # Traverse deep into the forest...
532 # resolve final tree by iterating the path.
533 # e.g a/b/c.txt will get
534 # - root as tree then
535 # - 'a' as tree,
536 # - 'b' as tree,
537 # - stop at c as blob.
513 for path in paths:
538 for path in paths:
514 try:
539 try:
515 obj = repo[tree[path][1]]
540 obj = repo[tree[path][1]]
516 if isinstance(obj, objects.Tree):
541 if isinstance(obj, objects.Tree):
517 trees.append(obj)
542 trees.append({'tree': obj, 'path': path})
518 tree = obj
543 tree = obj
519 except KeyError:
544 except KeyError:
520 break
545 break
546 #PROBLEM:
547 """
548 We're not editing same reference tree object
549 """
521 # Cut down the blob and all rotten trees on the way back...
550 # Cut down the blob and all rotten trees on the way back...
522 for path, tree in reversed(list(zip(paths, trees))):
551 for path, tree_data in reversed(list(zip(paths, trees))):
523 del tree[path]
552 tree = tree_data['tree']
524 if tree:
553 tree.__delitem__(path)
554 # This operation edits the tree, we need to mark new commit back
555
556 if len(tree) > 0:
525 # This tree still has elements - don't remove it or any
557 # This tree still has elements - don't remove it or any
526 # of it's parents
558 # of it's parents
527 break
559 break
@@ -587,7 +619,7 b' class GitRemote(RemoteBase):'
587
619
588 if refs and not update_after:
620 if refs and not update_after:
589 # mikhail: explicitly set the head to the last ref.
621 # mikhail: explicitly set the head to the last ref.
590 repo['HEAD'] = remote_refs[refs[-1]]
622 repo["HEAD"] = remote_refs[refs[-1]]
591
623
592 if update_after:
624 if update_after:
593 # we want to checkout HEAD
625 # we want to checkout HEAD
@@ -689,7 +721,8 b' class GitRemote(RemoteBase):'
689 @reraise_safe_exceptions
721 @reraise_safe_exceptions
690 def get_object(self, wire, sha, maybe_unreachable=False):
722 def get_object(self, wire, sha, maybe_unreachable=False):
691 cache_on, context_uid, repo_id = self._cache_on(wire)
723 cache_on, context_uid, repo_id = self._cache_on(wire)
692 @self.region.conditional_cache_on_arguments(condition=cache_on)
724 region = self._region(wire)
725 @region.conditional_cache_on_arguments(condition=cache_on)
693 def _get_object(_context_uid, _repo_id, _sha):
726 def _get_object(_context_uid, _repo_id, _sha):
694 repo_init = self._factory.repo_libgit2(wire)
727 repo_init = self._factory.repo_libgit2(wire)
695 with repo_init as repo:
728 with repo_init as repo:
@@ -747,7 +780,8 b' class GitRemote(RemoteBase):'
747 @reraise_safe_exceptions
780 @reraise_safe_exceptions
748 def get_refs(self, wire):
781 def get_refs(self, wire):
749 cache_on, context_uid, repo_id = self._cache_on(wire)
782 cache_on, context_uid, repo_id = self._cache_on(wire)
750 @self.region.conditional_cache_on_arguments(condition=cache_on)
783 region = self._region(wire)
784 @region.conditional_cache_on_arguments(condition=cache_on)
751 def _get_refs(_context_uid, _repo_id):
785 def _get_refs(_context_uid, _repo_id):
752
786
753 repo_init = self._factory.repo_libgit2(wire)
787 repo_init = self._factory.repo_libgit2(wire)
@@ -761,7 +795,8 b' class GitRemote(RemoteBase):'
761 @reraise_safe_exceptions
795 @reraise_safe_exceptions
762 def get_branch_pointers(self, wire):
796 def get_branch_pointers(self, wire):
763 cache_on, context_uid, repo_id = self._cache_on(wire)
797 cache_on, context_uid, repo_id = self._cache_on(wire)
764 @self.region.conditional_cache_on_arguments(condition=cache_on)
798 region = self._region(wire)
799 @region.conditional_cache_on_arguments(condition=cache_on)
765 def _get_branch_pointers(_context_uid, _repo_id):
800 def _get_branch_pointers(_context_uid, _repo_id):
766
801
767 repo_init = self._factory.repo_libgit2(wire)
802 repo_init = self._factory.repo_libgit2(wire)
@@ -775,7 +810,8 b' class GitRemote(RemoteBase):'
775 @reraise_safe_exceptions
810 @reraise_safe_exceptions
776 def head(self, wire, show_exc=True):
811 def head(self, wire, show_exc=True):
777 cache_on, context_uid, repo_id = self._cache_on(wire)
812 cache_on, context_uid, repo_id = self._cache_on(wire)
778 @self.region.conditional_cache_on_arguments(condition=cache_on)
813 region = self._region(wire)
814 @region.conditional_cache_on_arguments(condition=cache_on)
779 def _head(_context_uid, _repo_id, _show_exc):
815 def _head(_context_uid, _repo_id, _show_exc):
780 repo_init = self._factory.repo_libgit2(wire)
816 repo_init = self._factory.repo_libgit2(wire)
781 with repo_init as repo:
817 with repo_init as repo:
@@ -800,7 +836,8 b' class GitRemote(RemoteBase):'
800 def revision(self, wire, rev):
836 def revision(self, wire, rev):
801
837
802 cache_on, context_uid, repo_id = self._cache_on(wire)
838 cache_on, context_uid, repo_id = self._cache_on(wire)
803 @self.region.conditional_cache_on_arguments(condition=cache_on)
839 region = self._region(wire)
840 @region.conditional_cache_on_arguments(condition=cache_on)
804 def _revision(_context_uid, _repo_id, _rev):
841 def _revision(_context_uid, _repo_id, _rev):
805 repo_init = self._factory.repo_libgit2(wire)
842 repo_init = self._factory.repo_libgit2(wire)
806 with repo_init as repo:
843 with repo_init as repo:
@@ -818,7 +855,8 b' class GitRemote(RemoteBase):'
818 @reraise_safe_exceptions
855 @reraise_safe_exceptions
819 def date(self, wire, commit_id):
856 def date(self, wire, commit_id):
820 cache_on, context_uid, repo_id = self._cache_on(wire)
857 cache_on, context_uid, repo_id = self._cache_on(wire)
821 @self.region.conditional_cache_on_arguments(condition=cache_on)
858 region = self._region(wire)
859 @region.conditional_cache_on_arguments(condition=cache_on)
822 def _date(_repo_id, _commit_id):
860 def _date(_repo_id, _commit_id):
823 repo_init = self._factory.repo_libgit2(wire)
861 repo_init = self._factory.repo_libgit2(wire)
824 with repo_init as repo:
862 with repo_init as repo:
@@ -837,7 +875,8 b' class GitRemote(RemoteBase):'
837 @reraise_safe_exceptions
875 @reraise_safe_exceptions
838 def author(self, wire, commit_id):
876 def author(self, wire, commit_id):
839 cache_on, context_uid, repo_id = self._cache_on(wire)
877 cache_on, context_uid, repo_id = self._cache_on(wire)
840 @self.region.conditional_cache_on_arguments(condition=cache_on)
878 region = self._region(wire)
879 @region.conditional_cache_on_arguments(condition=cache_on)
841 def _author(_repo_id, _commit_id):
880 def _author(_repo_id, _commit_id):
842 repo_init = self._factory.repo_libgit2(wire)
881 repo_init = self._factory.repo_libgit2(wire)
843 with repo_init as repo:
882 with repo_init as repo:
@@ -861,7 +900,8 b' class GitRemote(RemoteBase):'
861 @reraise_safe_exceptions
900 @reraise_safe_exceptions
862 def message(self, wire, commit_id):
901 def message(self, wire, commit_id):
863 cache_on, context_uid, repo_id = self._cache_on(wire)
902 cache_on, context_uid, repo_id = self._cache_on(wire)
864 @self.region.conditional_cache_on_arguments(condition=cache_on)
903 region = self._region(wire)
904 @region.conditional_cache_on_arguments(condition=cache_on)
865 def _message(_repo_id, _commit_id):
905 def _message(_repo_id, _commit_id):
866 repo_init = self._factory.repo_libgit2(wire)
906 repo_init = self._factory.repo_libgit2(wire)
867 with repo_init as repo:
907 with repo_init as repo:
@@ -872,7 +912,8 b' class GitRemote(RemoteBase):'
872 @reraise_safe_exceptions
912 @reraise_safe_exceptions
873 def parents(self, wire, commit_id):
913 def parents(self, wire, commit_id):
874 cache_on, context_uid, repo_id = self._cache_on(wire)
914 cache_on, context_uid, repo_id = self._cache_on(wire)
875 @self.region.conditional_cache_on_arguments(condition=cache_on)
915 region = self._region(wire)
916 @region.conditional_cache_on_arguments(condition=cache_on)
876 def _parents(_repo_id, _commit_id):
917 def _parents(_repo_id, _commit_id):
877 repo_init = self._factory.repo_libgit2(wire)
918 repo_init = self._factory.repo_libgit2(wire)
878 with repo_init as repo:
919 with repo_init as repo:
@@ -888,7 +929,8 b' class GitRemote(RemoteBase):'
888 @reraise_safe_exceptions
929 @reraise_safe_exceptions
889 def children(self, wire, commit_id):
930 def children(self, wire, commit_id):
890 cache_on, context_uid, repo_id = self._cache_on(wire)
931 cache_on, context_uid, repo_id = self._cache_on(wire)
891 @self.region.conditional_cache_on_arguments(condition=cache_on)
932 region = self._region(wire)
933 @region.conditional_cache_on_arguments(condition=cache_on)
892 def _children(_repo_id, _commit_id):
934 def _children(_repo_id, _commit_id):
893 output, __ = self.run_git_command(
935 output, __ = self.run_git_command(
894 wire, ['rev-list', '--all', '--children'])
936 wire, ['rev-list', '--all', '--children'])
@@ -947,7 +989,8 b' class GitRemote(RemoteBase):'
947 def tree_and_type_for_path(self, wire, commit_id, path):
989 def tree_and_type_for_path(self, wire, commit_id, path):
948
990
949 cache_on, context_uid, repo_id = self._cache_on(wire)
991 cache_on, context_uid, repo_id = self._cache_on(wire)
950 @self.region.conditional_cache_on_arguments(condition=cache_on)
992 region = self._region(wire)
993 @region.conditional_cache_on_arguments(condition=cache_on)
951 def _tree_and_type_for_path(_context_uid, _repo_id, _commit_id, _path):
994 def _tree_and_type_for_path(_context_uid, _repo_id, _commit_id, _path):
952 repo_init = self._factory.repo_libgit2(wire)
995 repo_init = self._factory.repo_libgit2(wire)
953
996
@@ -964,7 +1007,8 b' class GitRemote(RemoteBase):'
964 @reraise_safe_exceptions
1007 @reraise_safe_exceptions
965 def tree_items(self, wire, tree_id):
1008 def tree_items(self, wire, tree_id):
966 cache_on, context_uid, repo_id = self._cache_on(wire)
1009 cache_on, context_uid, repo_id = self._cache_on(wire)
967 @self.region.conditional_cache_on_arguments(condition=cache_on)
1010 region = self._region(wire)
1011 @region.conditional_cache_on_arguments(condition=cache_on)
968 def _tree_items(_repo_id, _tree_id):
1012 def _tree_items(_repo_id, _tree_id):
969
1013
970 repo_init = self._factory.repo_libgit2(wire)
1014 repo_init = self._factory.repo_libgit2(wire)
@@ -1065,7 +1109,8 b' class GitRemote(RemoteBase):'
1065 @reraise_safe_exceptions
1109 @reraise_safe_exceptions
1066 def node_history(self, wire, commit_id, path, limit):
1110 def node_history(self, wire, commit_id, path, limit):
1067 cache_on, context_uid, repo_id = self._cache_on(wire)
1111 cache_on, context_uid, repo_id = self._cache_on(wire)
1068 @self.region.conditional_cache_on_arguments(condition=cache_on)
1112 region = self._region(wire)
1113 @region.conditional_cache_on_arguments(condition=cache_on)
1069 def _node_history(_context_uid, _repo_id, _commit_id, _path, _limit):
1114 def _node_history(_context_uid, _repo_id, _commit_id, _path, _limit):
1070 # optimize for n==1, rev-list is much faster for that use-case
1115 # optimize for n==1, rev-list is much faster for that use-case
1071 if limit == 1:
1116 if limit == 1:
@@ -1107,7 +1152,8 b' class GitRemote(RemoteBase):'
1107 def get_all_commit_ids(self, wire):
1152 def get_all_commit_ids(self, wire):
1108
1153
1109 cache_on, context_uid, repo_id = self._cache_on(wire)
1154 cache_on, context_uid, repo_id = self._cache_on(wire)
1110 @self.region.conditional_cache_on_arguments(condition=cache_on)
1155 region = self._region(wire)
1156 @region.conditional_cache_on_arguments(condition=cache_on)
1111 def _get_all_commit_ids(_context_uid, _repo_id):
1157 def _get_all_commit_ids(_context_uid, _repo_id):
1112
1158
1113 cmd = ['rev-list', '--reverse', '--date-order', '--branches', '--tags']
1159 cmd = ['rev-list', '--reverse', '--date-order', '--branches', '--tags']
@@ -1190,3 +1236,46 b' class GitRemote(RemoteBase):'
1190 'pre_version': get_git_pre_hook_version(path, bare),
1236 'pre_version': get_git_pre_hook_version(path, bare),
1191 'post_version': get_git_post_hook_version(path, bare),
1237 'post_version': get_git_post_hook_version(path, bare),
1192 }
1238 }
1239
1240 @reraise_safe_exceptions
1241 def set_head_ref(self, wire, head_name):
1242 log.debug('Setting refs/head to `%s`', head_name)
1243 cmd = ['symbolic-ref', '"HEAD"', '"refs/heads/%s"' % head_name]
1244 output, __ = self.run_git_command(wire, cmd)
1245 return [head_name] + output.splitlines()
1246
1247 @reraise_safe_exceptions
1248 def archive_repo(self, wire, archive_dest_path, kind, mtime, archive_at_path,
1249 archive_dir_name, commit_id):
1250
1251 def file_walker(_commit_id, path):
1252 repo_init = self._factory.repo_libgit2(wire)
1253
1254 with repo_init as repo:
1255 commit = repo[commit_id]
1256
1257 if path in ['', '/']:
1258 tree = commit.tree
1259 else:
1260 tree = commit.tree[path.rstrip('/')]
1261 tree_id = tree.id.hex
1262 try:
1263 tree = repo[tree_id]
1264 except KeyError:
1265 raise ObjectMissing('No tree with id: {}'.format(tree_id))
1266
1267 index = LibGit2Index.Index()
1268 index.read_tree(tree)
1269 file_iter = index
1270
1271 for fn in file_iter:
1272 file_path = fn.path
1273 mode = fn.mode
1274 is_link = stat.S_ISLNK(mode)
1275 if mode == pygit2.GIT_FILEMODE_COMMIT:
1276 log.debug('Skipping path %s as a commit node', file_path)
1277 continue
1278 yield ArchiveNode(file_path, mode, is_link, repo[fn.hex].read_raw)
1279
1280 return archive_repo(file_walker, archive_dest_path, kind, mtime, archive_at_path,
1281 archive_dir_name, commit_id)
@@ -14,9 +14,10 b''
14 # You should have received a copy of the GNU General Public License
14 # You should have received a copy of the GNU General Public License
15 # along with this program; if not, write to the Free Software Foundation,
15 # along with this program; if not, write to the Free Software Foundation,
16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
17 import functools
18 import io
18 import io
19 import logging
19 import logging
20 import os
20 import stat
21 import stat
21 import urllib.request, urllib.parse, urllib.error
22 import urllib.request, urllib.parse, urllib.error
22 import urllib.request, urllib.error, urllib.parse
23 import urllib.request, urllib.error, urllib.parse
@@ -31,13 +32,14 b' from mercurial import repair'
31
32
32 import vcsserver
33 import vcsserver
33 from vcsserver import exceptions
34 from vcsserver import exceptions
34 from vcsserver.base import RepoFactory, obfuscate_qs, raise_from_original
35 from vcsserver.base import RepoFactory, obfuscate_qs, raise_from_original, archive_repo, ArchiveNode
35 from vcsserver.hgcompat import (
36 from vcsserver.hgcompat import (
36 archival, bin, clone, config as hgconfig, diffopts, hex, get_ctx,
37 archival, bin, clone, config as hgconfig, diffopts, hex, get_ctx,
37 hg_url as url_parser, httpbasicauthhandler, httpdigestauthhandler,
38 hg_url as url_parser, httpbasicauthhandler, httpdigestauthhandler,
38 makepeer, instance, match, memctx, exchange, memfilectx, nullrev, hg_merge,
39 makepeer, instance, match, memctx, exchange, memfilectx, nullrev, hg_merge,
39 patch, peer, revrange, ui, hg_tag, Abort, LookupError, RepoError,
40 patch, peer, revrange, ui, hg_tag, Abort, LookupError, RepoError,
40 RepoLookupError, InterventionRequired, RequirementError)
41 RepoLookupError, InterventionRequired, RequirementError,
42 alwaysmatcher, patternmatcher, hgutil)
41 from vcsserver.vcs_base import RemoteBase
43 from vcsserver.vcs_base import RemoteBase
42
44
43 log = logging.getLogger(__name__)
45 log = logging.getLogger(__name__)
@@ -205,25 +207,10 b' class HgRemote(RemoteBase):'
205 return False
207 return False
206
208
207 @reraise_safe_exceptions
209 @reraise_safe_exceptions
208 def archive_repo(self, archive_path, mtime, file_info, kind):
209 if kind == "tgz":
210 archiver = archival.tarit(archive_path, mtime, "gz")
211 elif kind == "tbz2":
212 archiver = archival.tarit(archive_path, mtime, "bz2")
213 elif kind == 'zip':
214 archiver = archival.zipit(archive_path, mtime)
215 else:
216 raise exceptions.ArchiveException()(
217 'Remote does not support: "%s".' % kind)
218
219 for f_path, f_mode, f_is_link, f_content in file_info:
220 archiver.addfile(f_path, f_mode, f_is_link, f_content)
221 archiver.done()
222
223 @reraise_safe_exceptions
224 def bookmarks(self, wire):
210 def bookmarks(self, wire):
225 cache_on, context_uid, repo_id = self._cache_on(wire)
211 cache_on, context_uid, repo_id = self._cache_on(wire)
226 @self.region.conditional_cache_on_arguments(condition=cache_on)
212 region = self._region(wire)
213 @region.conditional_cache_on_arguments(condition=cache_on)
227 def _bookmarks(_context_uid, _repo_id):
214 def _bookmarks(_context_uid, _repo_id):
228 repo = self._factory.repo(wire)
215 repo = self._factory.repo(wire)
229 return dict(repo._bookmarks)
216 return dict(repo._bookmarks)
@@ -233,7 +220,8 b' class HgRemote(RemoteBase):'
233 @reraise_safe_exceptions
220 @reraise_safe_exceptions
234 def branches(self, wire, normal, closed):
221 def branches(self, wire, normal, closed):
235 cache_on, context_uid, repo_id = self._cache_on(wire)
222 cache_on, context_uid, repo_id = self._cache_on(wire)
236 @self.region.conditional_cache_on_arguments(condition=cache_on)
223 region = self._region(wire)
224 @region.conditional_cache_on_arguments(condition=cache_on)
237 def _branches(_context_uid, _repo_id, _normal, _closed):
225 def _branches(_context_uid, _repo_id, _normal, _closed):
238 repo = self._factory.repo(wire)
226 repo = self._factory.repo(wire)
239 iter_branches = repo.branchmap().iterbranches()
227 iter_branches = repo.branchmap().iterbranches()
@@ -251,7 +239,8 b' class HgRemote(RemoteBase):'
251 @reraise_safe_exceptions
239 @reraise_safe_exceptions
252 def bulk_request(self, wire, commit_id, pre_load):
240 def bulk_request(self, wire, commit_id, pre_load):
253 cache_on, context_uid, repo_id = self._cache_on(wire)
241 cache_on, context_uid, repo_id = self._cache_on(wire)
254 @self.region.conditional_cache_on_arguments(condition=cache_on)
242 region = self._region(wire)
243 @region.conditional_cache_on_arguments(condition=cache_on)
255 def _bulk_request(_repo_id, _commit_id, _pre_load):
244 def _bulk_request(_repo_id, _commit_id, _pre_load):
256 result = {}
245 result = {}
257 for attr in pre_load:
246 for attr in pre_load:
@@ -268,7 +257,8 b' class HgRemote(RemoteBase):'
268 @reraise_safe_exceptions
257 @reraise_safe_exceptions
269 def ctx_branch(self, wire, commit_id):
258 def ctx_branch(self, wire, commit_id):
270 cache_on, context_uid, repo_id = self._cache_on(wire)
259 cache_on, context_uid, repo_id = self._cache_on(wire)
271 @self.region.conditional_cache_on_arguments(condition=cache_on)
260 region = self._region(wire)
261 @region.conditional_cache_on_arguments(condition=cache_on)
272 def _ctx_branch(_repo_id, _commit_id):
262 def _ctx_branch(_repo_id, _commit_id):
273 repo = self._factory.repo(wire)
263 repo = self._factory.repo(wire)
274 ctx = self._get_ctx(repo, commit_id)
264 ctx = self._get_ctx(repo, commit_id)
@@ -278,7 +268,8 b' class HgRemote(RemoteBase):'
278 @reraise_safe_exceptions
268 @reraise_safe_exceptions
279 def ctx_date(self, wire, commit_id):
269 def ctx_date(self, wire, commit_id):
280 cache_on, context_uid, repo_id = self._cache_on(wire)
270 cache_on, context_uid, repo_id = self._cache_on(wire)
281 @self.region.conditional_cache_on_arguments(condition=cache_on)
271 region = self._region(wire)
272 @region.conditional_cache_on_arguments(condition=cache_on)
282 def _ctx_date(_repo_id, _commit_id):
273 def _ctx_date(_repo_id, _commit_id):
283 repo = self._factory.repo(wire)
274 repo = self._factory.repo(wire)
284 ctx = self._get_ctx(repo, commit_id)
275 ctx = self._get_ctx(repo, commit_id)
@@ -294,7 +285,8 b' class HgRemote(RemoteBase):'
294 @reraise_safe_exceptions
285 @reraise_safe_exceptions
295 def ctx_files(self, wire, commit_id):
286 def ctx_files(self, wire, commit_id):
296 cache_on, context_uid, repo_id = self._cache_on(wire)
287 cache_on, context_uid, repo_id = self._cache_on(wire)
297 @self.region.conditional_cache_on_arguments(condition=cache_on)
288 region = self._region(wire)
289 @region.conditional_cache_on_arguments(condition=cache_on)
298 def _ctx_files(_repo_id, _commit_id):
290 def _ctx_files(_repo_id, _commit_id):
299 repo = self._factory.repo(wire)
291 repo = self._factory.repo(wire)
300 ctx = self._get_ctx(repo, commit_id)
292 ctx = self._get_ctx(repo, commit_id)
@@ -311,7 +303,8 b' class HgRemote(RemoteBase):'
311 @reraise_safe_exceptions
303 @reraise_safe_exceptions
312 def ctx_parents(self, wire, commit_id):
304 def ctx_parents(self, wire, commit_id):
313 cache_on, context_uid, repo_id = self._cache_on(wire)
305 cache_on, context_uid, repo_id = self._cache_on(wire)
314 @self.region.conditional_cache_on_arguments(condition=cache_on)
306 region = self._region(wire)
307 @region.conditional_cache_on_arguments(condition=cache_on)
315 def _ctx_parents(_repo_id, _commit_id):
308 def _ctx_parents(_repo_id, _commit_id):
316 repo = self._factory.repo(wire)
309 repo = self._factory.repo(wire)
317 ctx = self._get_ctx(repo, commit_id)
310 ctx = self._get_ctx(repo, commit_id)
@@ -323,7 +316,8 b' class HgRemote(RemoteBase):'
323 @reraise_safe_exceptions
316 @reraise_safe_exceptions
324 def ctx_children(self, wire, commit_id):
317 def ctx_children(self, wire, commit_id):
325 cache_on, context_uid, repo_id = self._cache_on(wire)
318 cache_on, context_uid, repo_id = self._cache_on(wire)
326 @self.region.conditional_cache_on_arguments(condition=cache_on)
319 region = self._region(wire)
320 @region.conditional_cache_on_arguments(condition=cache_on)
327 def _ctx_children(_repo_id, _commit_id):
321 def _ctx_children(_repo_id, _commit_id):
328 repo = self._factory.repo(wire)
322 repo = self._factory.repo(wire)
329 ctx = self._get_ctx(repo, commit_id)
323 ctx = self._get_ctx(repo, commit_id)
@@ -335,7 +329,8 b' class HgRemote(RemoteBase):'
335 @reraise_safe_exceptions
329 @reraise_safe_exceptions
336 def ctx_phase(self, wire, commit_id):
330 def ctx_phase(self, wire, commit_id):
337 cache_on, context_uid, repo_id = self._cache_on(wire)
331 cache_on, context_uid, repo_id = self._cache_on(wire)
338 @self.region.conditional_cache_on_arguments(condition=cache_on)
332 region = self._region(wire)
333 @region.conditional_cache_on_arguments(condition=cache_on)
339 def _ctx_phase(_context_uid, _repo_id, _commit_id):
334 def _ctx_phase(_context_uid, _repo_id, _commit_id):
340 repo = self._factory.repo(wire)
335 repo = self._factory.repo(wire)
341 ctx = self._get_ctx(repo, commit_id)
336 ctx = self._get_ctx(repo, commit_id)
@@ -346,7 +341,8 b' class HgRemote(RemoteBase):'
346 @reraise_safe_exceptions
341 @reraise_safe_exceptions
347 def ctx_obsolete(self, wire, commit_id):
342 def ctx_obsolete(self, wire, commit_id):
348 cache_on, context_uid, repo_id = self._cache_on(wire)
343 cache_on, context_uid, repo_id = self._cache_on(wire)
349 @self.region.conditional_cache_on_arguments(condition=cache_on)
344 region = self._region(wire)
345 @region.conditional_cache_on_arguments(condition=cache_on)
350 def _ctx_obsolete(_context_uid, _repo_id, _commit_id):
346 def _ctx_obsolete(_context_uid, _repo_id, _commit_id):
351 repo = self._factory.repo(wire)
347 repo = self._factory.repo(wire)
352 ctx = self._get_ctx(repo, commit_id)
348 ctx = self._get_ctx(repo, commit_id)
@@ -356,7 +352,8 b' class HgRemote(RemoteBase):'
356 @reraise_safe_exceptions
352 @reraise_safe_exceptions
357 def ctx_hidden(self, wire, commit_id):
353 def ctx_hidden(self, wire, commit_id):
358 cache_on, context_uid, repo_id = self._cache_on(wire)
354 cache_on, context_uid, repo_id = self._cache_on(wire)
359 @self.region.conditional_cache_on_arguments(condition=cache_on)
355 region = self._region(wire)
356 @region.conditional_cache_on_arguments(condition=cache_on)
360 def _ctx_hidden(_context_uid, _repo_id, _commit_id):
357 def _ctx_hidden(_context_uid, _repo_id, _commit_id):
361 repo = self._factory.repo(wire)
358 repo = self._factory.repo(wire)
362 ctx = self._get_ctx(repo, commit_id)
359 ctx = self._get_ctx(repo, commit_id)
@@ -469,7 +466,8 b' class HgRemote(RemoteBase):'
469 @reraise_safe_exceptions
466 @reraise_safe_exceptions
470 def node_history(self, wire, revision, path, limit):
467 def node_history(self, wire, revision, path, limit):
471 cache_on, context_uid, repo_id = self._cache_on(wire)
468 cache_on, context_uid, repo_id = self._cache_on(wire)
472 @self.region.conditional_cache_on_arguments(condition=cache_on)
469 region = self._region(wire)
470 @region.conditional_cache_on_arguments(condition=cache_on)
473 def _node_history(_context_uid, _repo_id, _revision, _path, _limit):
471 def _node_history(_context_uid, _repo_id, _revision, _path, _limit):
474 repo = self._factory.repo(wire)
472 repo = self._factory.repo(wire)
475
473
@@ -499,7 +497,8 b' class HgRemote(RemoteBase):'
499 @reraise_safe_exceptions
497 @reraise_safe_exceptions
500 def node_history_untill(self, wire, revision, path, limit):
498 def node_history_untill(self, wire, revision, path, limit):
501 cache_on, context_uid, repo_id = self._cache_on(wire)
499 cache_on, context_uid, repo_id = self._cache_on(wire)
502 @self.region.conditional_cache_on_arguments(condition=cache_on)
500 region = self._region(wire)
501 @region.conditional_cache_on_arguments(condition=cache_on)
503 def _node_history_until(_context_uid, _repo_id):
502 def _node_history_until(_context_uid, _repo_id):
504 repo = self._factory.repo(wire)
503 repo = self._factory.repo(wire)
505 ctx = self._get_ctx(repo, revision)
504 ctx = self._get_ctx(repo, revision)
@@ -537,7 +536,8 b' class HgRemote(RemoteBase):'
537 @reraise_safe_exceptions
536 @reraise_safe_exceptions
538 def fctx_flags(self, wire, commit_id, path):
537 def fctx_flags(self, wire, commit_id, path):
539 cache_on, context_uid, repo_id = self._cache_on(wire)
538 cache_on, context_uid, repo_id = self._cache_on(wire)
540 @self.region.conditional_cache_on_arguments(condition=cache_on)
539 region = self._region(wire)
540 @region.conditional_cache_on_arguments(condition=cache_on)
541 def _fctx_flags(_repo_id, _commit_id, _path):
541 def _fctx_flags(_repo_id, _commit_id, _path):
542 repo = self._factory.repo(wire)
542 repo = self._factory.repo(wire)
543 ctx = self._get_ctx(repo, commit_id)
543 ctx = self._get_ctx(repo, commit_id)
@@ -549,7 +549,8 b' class HgRemote(RemoteBase):'
549 @reraise_safe_exceptions
549 @reraise_safe_exceptions
550 def fctx_size(self, wire, commit_id, path):
550 def fctx_size(self, wire, commit_id, path):
551 cache_on, context_uid, repo_id = self._cache_on(wire)
551 cache_on, context_uid, repo_id = self._cache_on(wire)
552 @self.region.conditional_cache_on_arguments(condition=cache_on)
552 region = self._region(wire)
553 @region.conditional_cache_on_arguments(condition=cache_on)
553 def _fctx_size(_repo_id, _revision, _path):
554 def _fctx_size(_repo_id, _revision, _path):
554 repo = self._factory.repo(wire)
555 repo = self._factory.repo(wire)
555 ctx = self._get_ctx(repo, commit_id)
556 ctx = self._get_ctx(repo, commit_id)
@@ -560,7 +561,8 b' class HgRemote(RemoteBase):'
560 @reraise_safe_exceptions
561 @reraise_safe_exceptions
561 def get_all_commit_ids(self, wire, name):
562 def get_all_commit_ids(self, wire, name):
562 cache_on, context_uid, repo_id = self._cache_on(wire)
563 cache_on, context_uid, repo_id = self._cache_on(wire)
563 @self.region.conditional_cache_on_arguments(condition=cache_on)
564 region = self._region(wire)
565 @region.conditional_cache_on_arguments(condition=cache_on)
564 def _get_all_commit_ids(_context_uid, _repo_id, _name):
566 def _get_all_commit_ids(_context_uid, _repo_id, _name):
565 repo = self._factory.repo(wire)
567 repo = self._factory.repo(wire)
566 repo = repo.filtered(name)
568 repo = repo.filtered(name)
@@ -576,7 +578,8 b' class HgRemote(RemoteBase):'
576 @reraise_safe_exceptions
578 @reraise_safe_exceptions
577 def is_large_file(self, wire, commit_id, path):
579 def is_large_file(self, wire, commit_id, path):
578 cache_on, context_uid, repo_id = self._cache_on(wire)
580 cache_on, context_uid, repo_id = self._cache_on(wire)
579 @self.region.conditional_cache_on_arguments(condition=cache_on)
581 region = self._region(wire)
582 @region.conditional_cache_on_arguments(condition=cache_on)
580 def _is_large_file(_context_uid, _repo_id, _commit_id, _path):
583 def _is_large_file(_context_uid, _repo_id, _commit_id, _path):
581 return largefiles.lfutil.isstandin(path)
584 return largefiles.lfutil.isstandin(path)
582
585
@@ -586,7 +589,8 b' class HgRemote(RemoteBase):'
586 def is_binary(self, wire, revision, path):
589 def is_binary(self, wire, revision, path):
587 cache_on, context_uid, repo_id = self._cache_on(wire)
590 cache_on, context_uid, repo_id = self._cache_on(wire)
588
591
589 @self.region.conditional_cache_on_arguments(condition=cache_on)
592 region = self._region(wire)
593 @region.conditional_cache_on_arguments(condition=cache_on)
590 def _is_binary(_repo_id, _sha, _path):
594 def _is_binary(_repo_id, _sha, _path):
591 repo = self._factory.repo(wire)
595 repo = self._factory.repo(wire)
592 ctx = self._get_ctx(repo, revision)
596 ctx = self._get_ctx(repo, revision)
@@ -623,7 +627,9 b' class HgRemote(RemoteBase):'
623 @reraise_safe_exceptions
627 @reraise_safe_exceptions
624 def lookup(self, wire, revision, both):
628 def lookup(self, wire, revision, both):
625 cache_on, context_uid, repo_id = self._cache_on(wire)
629 cache_on, context_uid, repo_id = self._cache_on(wire)
626 @self.region.conditional_cache_on_arguments(condition=cache_on)
630
631 region = self._region(wire)
632 @region.conditional_cache_on_arguments(condition=cache_on)
627 def _lookup(_context_uid, _repo_id, _revision, _both):
633 def _lookup(_context_uid, _repo_id, _revision, _both):
628
634
629 repo = self._factory.repo(wire)
635 repo = self._factory.repo(wire)
@@ -681,7 +687,8 b' class HgRemote(RemoteBase):'
681 def rev_range(self, wire, commit_filter):
687 def rev_range(self, wire, commit_filter):
682 cache_on, context_uid, repo_id = self._cache_on(wire)
688 cache_on, context_uid, repo_id = self._cache_on(wire)
683
689
684 @self.region.conditional_cache_on_arguments(condition=cache_on)
690 region = self._region(wire)
691 @region.conditional_cache_on_arguments(condition=cache_on)
685 def _rev_range(_context_uid, _repo_id, _filter):
692 def _rev_range(_context_uid, _repo_id, _filter):
686 repo = self._factory.repo(wire)
693 repo = self._factory.repo(wire)
687 revisions = [rev for rev in revrange(repo, commit_filter)]
694 revisions = [rev for rev in revrange(repo, commit_filter)]
@@ -756,7 +763,8 b' class HgRemote(RemoteBase):'
756 @reraise_safe_exceptions
763 @reraise_safe_exceptions
757 def tags(self, wire):
764 def tags(self, wire):
758 cache_on, context_uid, repo_id = self._cache_on(wire)
765 cache_on, context_uid, repo_id = self._cache_on(wire)
759 @self.region.conditional_cache_on_arguments(condition=cache_on)
766 region = self._region(wire)
767 @region.conditional_cache_on_arguments(condition=cache_on)
760 def _tags(_context_uid, _repo_id):
768 def _tags(_context_uid, _repo_id):
761 repo = self._factory.repo(wire)
769 repo = self._factory.repo(wire)
762 return repo.tags()
770 return repo.tags()
@@ -1007,3 +1015,33 b' class HgRemote(RemoteBase):'
1007 'pre_version': vcsserver.__version__,
1015 'pre_version': vcsserver.__version__,
1008 'post_version': vcsserver.__version__,
1016 'post_version': vcsserver.__version__,
1009 }
1017 }
1018
1019 @reraise_safe_exceptions
1020 def set_head_ref(self, wire, head_name):
1021 pass
1022
1023 @reraise_safe_exceptions
1024 def archive_repo(self, wire, archive_dest_path, kind, mtime, archive_at_path,
1025 archive_dir_name, commit_id):
1026
1027 def file_walker(_commit_id, path):
1028 repo = self._factory.repo(wire)
1029 ctx = repo[_commit_id]
1030 is_root = path in ['', '/']
1031 if is_root:
1032 matcher = alwaysmatcher(badfn=None)
1033 else:
1034 matcher = patternmatcher('', [(b'glob', path+'/**', b'')], badfn=None)
1035 file_iter = ctx.manifest().walk(matcher)
1036
1037 for fn in file_iter:
1038 file_path = fn
1039 flags = ctx.flags(fn)
1040 mode = b'x' in flags and 0o755 or 0o644
1041 is_link = b'l' in flags
1042
1043 yield ArchiveNode(file_path, mode, is_link, ctx[fn].data)
1044
1045 return archive_repo(file_walker, archive_dest_path, kind, mtime, archive_at_path,
1046 archive_dir_name, commit_id)
1047
@@ -38,7 +38,7 b' from mercurial import merge as hg_merge'
38 from mercurial import subrepo
38 from mercurial import subrepo
39 from mercurial import subrepoutil
39 from mercurial import subrepoutil
40 from mercurial import tags as hg_tag
40 from mercurial import tags as hg_tag
41
41 from mercurial import util as hgutil
42 from mercurial.commands import clone, nullid, pull
42 from mercurial.commands import clone, nullid, pull
43 from mercurial.context import memctx, memfilectx
43 from mercurial.context import memctx, memfilectx
44 from mercurial.error import (
44 from mercurial.error import (
@@ -46,7 +46,7 b' from mercurial.error import ('
46 RequirementError, ProgrammingError)
46 RequirementError, ProgrammingError)
47 from mercurial.hgweb import hgweb_mod
47 from mercurial.hgweb import hgweb_mod
48 from mercurial.localrepo import instance
48 from mercurial.localrepo import instance
49 from mercurial.match import match
49 from mercurial.match import match, alwaysmatcher, patternmatcher
50 from mercurial.mdiff import diffopts
50 from mercurial.mdiff import diffopts
51 from mercurial.node import bin, hex
51 from mercurial.node import bin, hex
52 from mercurial.encoding import tolocal
52 from mercurial.encoding import tolocal
@@ -67,7 +67,7 b' from mercurial.url import httpbasicauthh'
67 def get_ctx(repo, ref):
67 def get_ctx(repo, ref):
68 try:
68 try:
69 ctx = repo[ref]
69 ctx = repo[ref]
70 except ProgrammingError:
70 except (ProgrammingError, TypeError):
71 # we're unable to find the rev using a regular lookup, we fallback
71 # we're unable to find the rev using a regular lookup, we fallback
72 # to slower, but backward compat revsymbol usage
72 # to slower, but backward compat revsymbol usage
73 ctx = revsymbol(repo, ref)
73 ctx = revsymbol(repo, ref)
@@ -49,7 +49,7 b' class HooksHttpClient(object):'
49 try:
49 try:
50 connection.request('POST', '/', body)
50 connection.request('POST', '/', body)
51 except Exception:
51 except Exception:
52 log.error('Connection failed on %s', connection)
52 log.error('Hooks calling Connection failed on %s', connection.__dict__)
53 raise
53 raise
54 response = connection.getresponse()
54 response = connection.getresponse()
55
55
@@ -577,8 +577,8 b' def git_post_receive(unused_repo_path, r'
577 try:
577 try:
578 subprocessio.run_command(cmd, env=os.environ.copy())
578 subprocessio.run_command(cmd, env=os.environ.copy())
579 except Exception:
579 except Exception:
580 cmd = [settings.GIT_EXECUTABLE, 'symbolic-ref', 'HEAD',
580 cmd = [settings.GIT_EXECUTABLE, 'symbolic-ref', '"HEAD"',
581 'refs/heads/%s' % push_ref['name']]
581 '"refs/heads/%s"' % push_ref['name']]
582 print("Setting default branch to %s" % push_ref['name'])
582 print("Setting default branch to %s" % push_ref['name'])
583 subprocessio.run_command(cmd, env=os.environ.copy())
583 subprocessio.run_command(cmd, env=os.environ.copy())
584
584
@@ -21,21 +21,27 b' import base64'
21 import locale
21 import locale
22 import logging
22 import logging
23 import uuid
23 import uuid
24 import time
24 import wsgiref.util
25 import wsgiref.util
25 import traceback
26 import traceback
26 import tempfile
27 import tempfile
28 import psutil
29
27 from itertools import chain
30 from itertools import chain
28 from io import StringIO
31 from io import StringIO
29
32
30 import simplejson as json
33 import simplejson as json
31 import msgpack
34 import msgpack
32 import configparser
35 import configparser
36
33 from pyramid.config import Configurator
37 from pyramid.config import Configurator
34 from pyramid.settings import asbool, aslist
38 from pyramid.settings import asbool, aslist
35 from pyramid.wsgi import wsgiapp
39 from pyramid.wsgi import wsgiapp
36 from pyramid.response import Response
40 from pyramid.response import Response
37
41
42 from vcsserver.config.settings_maker import SettingsMaker
38 from vcsserver.utils import safe_int
43 from vcsserver.utils import safe_int
44 from vcsserver.lib.statsd_client import StatsdClient
39
45
40 log = logging.getLogger(__name__)
46 log = logging.getLogger(__name__)
41
47
@@ -49,6 +55,7 b' except locale.Error as e:'
49 'LOCALE ERROR: failed to set LC_ALL, fallback to LC_ALL=C, org error: %s', e)
55 'LOCALE ERROR: failed to set LC_ALL, fallback to LC_ALL=C, org error: %s', e)
50 os.environ['LC_ALL'] = 'C'
56 os.environ['LC_ALL'] = 'C'
51
57
58
52 import vcsserver
59 import vcsserver
53 from vcsserver import remote_wsgi, scm_app, settings, hgpatches
60 from vcsserver import remote_wsgi, scm_app, settings, hgpatches
54 from vcsserver.git_lfs.app import GIT_LFS_CONTENT_TYPE, GIT_LFS_PROTO_PAT
61 from vcsserver.git_lfs.app import GIT_LFS_CONTENT_TYPE, GIT_LFS_PROTO_PAT
@@ -98,38 +105,12 b' def _is_request_chunked(environ):'
98 return stream
105 return stream
99
106
100
107
101 def _int_setting(settings, name, default):
108 def log_max_fd():
102 settings[name] = int(settings.get(name, default))
109 try:
103 return settings[name]
110 maxfd = psutil.Process().rlimit(psutil.RLIMIT_NOFILE)[1]
104
111 log.info('Max file descriptors value: %s', maxfd)
105
112 except Exception:
106 def _bool_setting(settings, name, default):
113 pass
107 input_val = settings.get(name, default)
108 if isinstance(input_val, str):
109 input_val = input_val.encode('utf8')
110 settings[name] = asbool(input_val)
111 return settings[name]
112
113
114 def _list_setting(settings, name, default):
115 raw_value = settings.get(name, default)
116
117 # Otherwise we assume it uses pyramids space/newline separation.
118 settings[name] = aslist(raw_value)
119 return settings[name]
120
121
122 def _string_setting(settings, name, default, lower=True, default_when_empty=False):
123 value = settings.get(name, default)
124
125 if default_when_empty and not value:
126 # use default value when value is empty
127 value = default
128
129 if lower:
130 value = value.lower()
131 settings[name] = value
132 return settings[name]
133
114
134
115
135 class VCS(object):
116 class VCS(object):
@@ -138,6 +119,8 b' class VCS(object):'
138 self.cache_config = cache_config
119 self.cache_config = cache_config
139 self._configure_locale()
120 self._configure_locale()
140
121
122 log_max_fd()
123
141 if GitFactory and GitRemote:
124 if GitFactory and GitRemote:
142 git_factory = GitFactory()
125 git_factory = GitFactory()
143 self._git_remote = GitRemote(git_factory)
126 self._git_remote = GitRemote(git_factory)
@@ -243,9 +226,11 b' class HTTPApplication(object):'
243 _use_echo_app = False
226 _use_echo_app = False
244
227
245 def __init__(self, settings=None, global_config=None):
228 def __init__(self, settings=None, global_config=None):
246 self._sanitize_settings_and_apply_defaults(settings)
247
229
248 self.config = Configurator(settings=settings)
230 self.config = Configurator(settings=settings)
231 # Init our statsd at very start
232 self.config.registry.statsd = StatsdClient.statsd
233
249 self.global_config = global_config
234 self.global_config = global_config
250 self.config.include('vcsserver.lib.rc_cache')
235 self.config.include('vcsserver.lib.rc_cache')
251
236
@@ -263,6 +248,7 b' class HTTPApplication(object):'
263 self.remote_wsgi = remote_wsgi_stub
248 self.remote_wsgi = remote_wsgi_stub
264
249
265 self._configure_settings(global_config, settings)
250 self._configure_settings(global_config, settings)
251
266 self._configure()
252 self._configure()
267
253
268 def _configure_settings(self, global_config, app_settings):
254 def _configure_settings(self, global_config, app_settings):
@@ -283,40 +269,6 b' class HTTPApplication(object):'
283 vcsserver.PYRAMID_SETTINGS = settings_merged
269 vcsserver.PYRAMID_SETTINGS = settings_merged
284 vcsserver.CONFIG = settings_merged
270 vcsserver.CONFIG = settings_merged
285
271
286 def _sanitize_settings_and_apply_defaults(self, settings):
287 temp_store = tempfile.gettempdir()
288 default_cache_dir = os.path.join(temp_store, 'rc_cache')
289
290 # save default, cache dir, and use it for all backends later.
291 default_cache_dir = _string_setting(
292 settings,
293 'cache_dir',
294 default_cache_dir, lower=False, default_when_empty=True)
295
296 # ensure we have our dir created
297 if not os.path.isdir(default_cache_dir):
298 os.makedirs(default_cache_dir, mode=0o755)
299
300 # exception store cache
301 _string_setting(
302 settings,
303 'exception_tracker.store_path',
304 temp_store, lower=False, default_when_empty=True)
305
306 # repo_object cache
307 _string_setting(
308 settings,
309 'rc_cache.repo_object.backend',
310 'dogpile.cache.rc.file_namespace', lower=False)
311 _int_setting(
312 settings,
313 'rc_cache.repo_object.expiration_time',
314 30 * 24 * 60 * 60)
315 _string_setting(
316 settings,
317 'rc_cache.repo_object.arguments.filename',
318 os.path.join(default_cache_dir, 'vcsserver_cache_1'), lower=False)
319
320 def _configure(self):
272 def _configure(self):
321 self.config.add_renderer(name='msgpack', factory=self._msgpack_renderer_factory)
273 self.config.add_renderer(name='msgpack', factory=self._msgpack_renderer_factory)
322
274
@@ -385,16 +337,22 b' class HTTPApplication(object):'
385 # NOTE(marcink): trading complexity for slight performance
337 # NOTE(marcink): trading complexity for slight performance
386 if log.isEnabledFor(logging.DEBUG):
338 if log.isEnabledFor(logging.DEBUG):
387 no_args_methods = [
339 no_args_methods = [
388 'archive_repo'
340
389 ]
341 ]
390 if method in no_args_methods:
342 if method in no_args_methods:
391 call_args = ''
343 call_args = ''
392 else:
344 else:
393 call_args = args[1:]
345 call_args = args[1:]
394
346
395 log.debug('method requested:%s with args:%s kwargs:%s context_uid: %s, repo_state_uid:%s',
347 log.debug('Method requested:`%s` with args:%s kwargs:%s context_uid: %s, repo_state_uid:%s',
396 method, call_args, kwargs, context_uid, repo_state_uid)
348 method, call_args, kwargs, context_uid, repo_state_uid)
397
349
350 statsd = request.registry.statsd
351 if statsd:
352 statsd.incr(
353 'vcsserver_method_total', tags=[
354 "method:{}".format(method),
355 ])
398 return payload, remote, method, args, kwargs
356 return payload, remote, method, args, kwargs
399
357
400 def vcs_view(self, request):
358 def vcs_view(self, request):
@@ -431,7 +389,7 b' class HTTPApplication(object):'
431 should_store_exc = False
389 should_store_exc = False
432
390
433 if should_store_exc:
391 if should_store_exc:
434 store_exception(id(exc_info), exc_info)
392 store_exception(id(exc_info), exc_info, request_path=request.path)
435
393
436 tb_info = ''.join(
394 tb_info = ''.join(
437 traceback.format_exception(exc_type, exc_value, exc_traceback))
395 traceback.format_exception(exc_type, exc_value, exc_traceback))
@@ -450,6 +408,7 b' class HTTPApplication(object):'
450 'type': type_
408 'type': type_
451 }
409 }
452 }
410 }
411
453 try:
412 try:
454 resp['error']['_vcs_kind'] = getattr(e, '_vcs_kind', None)
413 resp['error']['_vcs_kind'] = getattr(e, '_vcs_kind', None)
455 except AttributeError:
414 except AttributeError:
@@ -678,6 +637,12 b' class HTTPApplication(object):'
678 log.error(
637 log.error(
679 'error occurred handling this request for path: %s, \n tb: %s',
638 'error occurred handling this request for path: %s, \n tb: %s',
680 request.path, traceback_info)
639 request.path, traceback_info)
640
641 statsd = request.registry.statsd
642 if statsd:
643 exc_type = "{}.{}".format(exception.__class__.__module__, exception.__class__.__name__)
644 statsd.incr('vcsserver_exception_total',
645 tags=["type:{}".format(exc_type)])
681 raise exception
646 raise exception
682
647
683
648
@@ -693,10 +658,83 b' class ResponseFilter(object):'
693 return self._start_response(status, headers, exc_info)
658 return self._start_response(status, headers, exc_info)
694
659
695
660
661 def sanitize_settings_and_apply_defaults(global_config, settings):
662 global_settings_maker = SettingsMaker(global_config)
663 settings_maker = SettingsMaker(settings)
664
665 settings_maker.make_setting('logging.autoconfigure', False, parser='bool')
666
667 logging_conf = os.path.join(os.path.dirname(global_config.get('__file__')), 'logging.ini')
668 settings_maker.enable_logging(logging_conf)
669
670 # Default includes, possible to change as a user
671 pyramid_includes = settings_maker.make_setting('pyramid.includes', [], parser='list:newline')
672 log.debug("Using the following pyramid.includes: %s", pyramid_includes)
673
674 settings_maker.make_setting('__file__', global_config.get('__file__'))
675
676 settings_maker.make_setting('pyramid.default_locale_name', 'en')
677 settings_maker.make_setting('locale', 'en_US.UTF-8')
678
679 settings_maker.make_setting('core.binary_dir', '')
680
681 temp_store = tempfile.gettempdir()
682 default_cache_dir = os.path.join(temp_store, 'rc_cache')
683 # save default, cache dir, and use it for all backends later.
684 default_cache_dir = settings_maker.make_setting(
685 'cache_dir',
686 default=default_cache_dir, default_when_empty=True,
687 parser='dir:ensured')
688
689 # exception store cache
690 settings_maker.make_setting(
691 'exception_tracker.store_path',
692 default=os.path.join(default_cache_dir, 'exc_store'), default_when_empty=True,
693 parser='dir:ensured'
694 )
695
696 # repo_object cache defaults
697 settings_maker.make_setting(
698 'rc_cache.repo_object.backend',
699 default='dogpile.cache.rc.file_namespace',
700 parser='string')
701 settings_maker.make_setting(
702 'rc_cache.repo_object.expiration_time',
703 default=30 * 24 * 60 * 60, # 30days
704 parser='int')
705 settings_maker.make_setting(
706 'rc_cache.repo_object.arguments.filename',
707 default=os.path.join(default_cache_dir, 'vcsserver_cache_repo_object.db'),
708 parser='string')
709
710 # statsd
711 settings_maker.make_setting('statsd.enabled', False, parser='bool')
712 settings_maker.make_setting('statsd.statsd_host', 'statsd-exporter', parser='string')
713 settings_maker.make_setting('statsd.statsd_port', 9125, parser='int')
714 settings_maker.make_setting('statsd.statsd_prefix', '')
715 settings_maker.make_setting('statsd.statsd_ipv6', False, parser='bool')
716
717 settings_maker.env_expand()
718
719
696 def main(global_config, **settings):
720 def main(global_config, **settings):
721 start_time = time.time()
722 log.info('Pyramid app config starting')
723
697 if MercurialFactory:
724 if MercurialFactory:
698 hgpatches.patch_largefiles_capabilities()
725 hgpatches.patch_largefiles_capabilities()
699 hgpatches.patch_subrepo_type_mapping()
726 hgpatches.patch_subrepo_type_mapping()
700
727
701 app = HTTPApplication(settings=settings, global_config=global_config)
728 # Fill in and sanitize the defaults & do ENV expansion
702 return app.wsgi_app()
729 sanitize_settings_and_apply_defaults(global_config, settings)
730
731 # init and bootstrap StatsdClient
732 StatsdClient.setup(settings)
733
734 pyramid_app = HTTPApplication(settings=settings, global_config=global_config).wsgi_app()
735 total_time = time.time() - start_time
736 log.info('Pyramid app `%s` created and configured in %.2fs',
737 getattr(pyramid_app, 'func_name', 'pyramid_app'), total_time)
738 return pyramid_app
739
740
@@ -66,7 +66,7 b' def get_exc_store():'
66 return _exc_store_path
66 return _exc_store_path
67
67
68
68
69 def _store_exception(exc_id, exc_info, prefix):
69 def _store_exception(exc_id, exc_info, prefix, request_path=''):
70 exc_type, exc_value, exc_traceback = exc_info
70 exc_type, exc_value, exc_traceback = exc_info
71
71
72 tb = ''.join(traceback.format_exception(
72 tb = ''.join(traceback.format_exception(
@@ -99,8 +99,13 b' def _store_exception(exc_id, exc_info, p'
99 f.write(exc_data)
99 f.write(exc_data)
100 log.debug('Stored generated exception %s as: %s', exc_id, stored_exc_path)
100 log.debug('Stored generated exception %s as: %s', exc_id, stored_exc_path)
101
101
102 log.error(
103 'error occurred handling this request.\n'
104 'Path: `%s`, tb: %s',
105 request_path, tb)
102
106
103 def store_exception(exc_id, exc_info, prefix=global_prefix):
107
108 def store_exception(exc_id, exc_info, prefix=global_prefix, request_path=''):
104 """
109 """
105 Example usage::
110 Example usage::
106
111
@@ -109,7 +114,8 b' def store_exception(exc_id, exc_info, pr'
109 """
114 """
110
115
111 try:
116 try:
112 _store_exception(exc_id=exc_id, exc_info=exc_info, prefix=prefix)
117 _store_exception(exc_id=exc_id, exc_info=exc_info, prefix=prefix,
118 request_path=request_path)
113 except Exception:
119 except Exception:
114 log.exception('Failed to store exception `%s` information', exc_id)
120 log.exception('Failed to store exception `%s` information', exc_id)
115 # there's no way this can fail, it will crash server badly if it does.
121 # there's no way this can fail, it will crash server badly if it does.
@@ -38,7 +38,9 b' register_backend('
38 log = logging.getLogger(__name__)
38 log = logging.getLogger(__name__)
39
39
40 from . import region_meta
40 from . import region_meta
41 from .utils import (get_default_cache_settings, backend_key_generator, make_region)
41 from .utils import (
42 get_default_cache_settings, backend_key_generator, get_or_create_region,
43 clear_cache_namespace, make_region)
42
44
43
45
44 def configure_dogpile_cache(settings):
46 def configure_dogpile_cache(settings):
@@ -52,20 +54,25 b' def configure_dogpile_cache(settings):'
52 avail_regions = set()
54 avail_regions = set()
53 for key in rc_cache_data.keys():
55 for key in rc_cache_data.keys():
54 namespace_name = key.split('.', 1)[0]
56 namespace_name = key.split('.', 1)[0]
55 avail_regions.add(namespace_name)
57 if namespace_name in avail_regions:
56 log.debug('dogpile: found following cache regions: %s', avail_regions)
58 continue
57
59
58 # register them into namespace
60 avail_regions.add(namespace_name)
59 for region_name in avail_regions:
61 log.debug('dogpile: found following cache regions: %s', namespace_name)
62
60 new_region = make_region(
63 new_region = make_region(
61 name=region_name,
64 name=namespace_name,
62 function_key_generator=None
65 function_key_generator=None
63 )
66 )
64
67
65 new_region.configure_from_config(settings, 'rc_cache.{}.'.format(region_name))
68 new_region.configure_from_config(settings, 'rc_cache.{}.'.format(namespace_name))
66 new_region.function_key_generator = backend_key_generator(new_region.actual_backend)
69 new_region.function_key_generator = backend_key_generator(new_region.actual_backend)
67 log.debug('dogpile: registering a new region %s[%s]', region_name, new_region.__dict__)
70 if log.isEnabledFor(logging.DEBUG):
68 region_meta.dogpile_cache_regions[region_name] = new_region
71 region_args = dict(backend=new_region.actual_backend.__class__,
72 region_invalidator=new_region.region_invalidator.__class__)
73 log.debug('dogpile: registering a new region `%s` %s', namespace_name, region_args)
74
75 region_meta.dogpile_cache_regions[namespace_name] = new_region
69
76
70
77
71 def includeme(config):
78 def includeme(config):
@@ -29,7 +29,10 b' from dogpile.cache.backends import redis'
29 from dogpile.cache.backends.file import NO_VALUE, FileLock
29 from dogpile.cache.backends.file import NO_VALUE, FileLock
30 from dogpile.cache.util import memoized_property
30 from dogpile.cache.util import memoized_property
31
31
32 from pyramid.settings import asbool
33
32 from vcsserver.lib.memory_lru_dict import LRUDict, LRUDictDebug
34 from vcsserver.lib.memory_lru_dict import LRUDict, LRUDictDebug
35 from vcsserver.utils import safe_str, safe_unicode
33
36
34
37
35 _default_max_size = 1024
38 _default_max_size = 1024
@@ -124,7 +127,14 b' class FileNamespaceBackend(PickleSeriali'
124
127
125 def __init__(self, arguments):
128 def __init__(self, arguments):
126 arguments['lock_factory'] = CustomLockFactory
129 arguments['lock_factory'] = CustomLockFactory
130 db_file = arguments.get('filename')
131
132 log.debug('initialing %s DB in %s', self.__class__.__name__, db_file)
133 try:
127 super(FileNamespaceBackend, self).__init__(arguments)
134 super(FileNamespaceBackend, self).__init__(arguments)
135 except Exception:
136 log.exception('Failed to initialize db at: %s', db_file)
137 raise
128
138
129 def __repr__(self):
139 def __repr__(self):
130 return '{} `{}`'.format(self.__class__, self.filename)
140 return '{} `{}`'.format(self.__class__, self.filename)
@@ -141,13 +151,16 b' class FileNamespaceBackend(PickleSeriali'
141 return False
151 return False
142
152
143 with self._dbm_file(True) as dbm:
153 with self._dbm_file(True) as dbm:
144
154 try:
145 return filter(cond, dbm.keys())
155 return filter(cond, dbm.keys())
156 except Exception:
157 log.error('Failed to fetch DBM keys from DB: %s', self.get_store())
158 raise
146
159
147 def get_store(self):
160 def get_store(self):
148 return self.filename
161 return self.filename
149
162
150 def get(self, key):
163 def _dbm_get(self, key):
151 with self._dbm_file(False) as dbm:
164 with self._dbm_file(False) as dbm:
152 if hasattr(dbm, 'get'):
165 if hasattr(dbm, 'get'):
153 value = dbm.get(key, NO_VALUE)
166 value = dbm.get(key, NO_VALUE)
@@ -161,6 +174,13 b' class FileNamespaceBackend(PickleSeriali'
161 value = self._loads(value)
174 value = self._loads(value)
162 return value
175 return value
163
176
177 def get(self, key):
178 try:
179 return self._dbm_get(key)
180 except Exception:
181 log.error('Failed to fetch DBM key %s from DB: %s', key, self.get_store())
182 raise
183
164 def set(self, key, value):
184 def set(self, key, value):
165 with self._dbm_file(True) as dbm:
185 with self._dbm_file(True) as dbm:
166 dbm[key] = self._dumps(value)
186 dbm[key] = self._dumps(value)
@@ -172,6 +192,16 b' class FileNamespaceBackend(PickleSeriali'
172
192
173
193
174 class BaseRedisBackend(redis_backend.RedisBackend):
194 class BaseRedisBackend(redis_backend.RedisBackend):
195 key_prefix = ''
196
197 def __init__(self, arguments):
198 super(BaseRedisBackend, self).__init__(arguments)
199 self._lock_timeout = self.lock_timeout
200 self._lock_auto_renewal = asbool(arguments.pop("lock_auto_renewal", True))
201
202 if self._lock_auto_renewal and not self._lock_timeout:
203 # set default timeout for auto_renewal
204 self._lock_timeout = 30
175
205
176 def _create_client(self):
206 def _create_client(self):
177 args = {}
207 args = {}
@@ -234,11 +264,10 b' class BaseRedisBackend(redis_backend.Red'
234 pipe.execute()
264 pipe.execute()
235
265
236 def get_mutex(self, key):
266 def get_mutex(self, key):
237 u = redis_backend.u
238 if self.distributed_lock:
267 if self.distributed_lock:
239 lock_key = u('_lock_{0}').format(key)
268 lock_key = u'_lock_{0}'.format(safe_unicode(key))
240 log.debug('Trying to acquire Redis lock for key %s', lock_key)
269 return get_mutex_lock(self.client, lock_key, self._lock_timeout,
241 return self.client.lock(lock_key, self.lock_timeout, self.lock_sleep)
270 auto_renewal=self._lock_auto_renewal)
242 else:
271 else:
243 return None
272 return None
244
273
@@ -251,3 +280,50 b' class RedisPickleBackend(PickleSerialize'
251 class RedisMsgPackBackend(MsgPackSerializer, BaseRedisBackend):
280 class RedisMsgPackBackend(MsgPackSerializer, BaseRedisBackend):
252 key_prefix = 'redis_msgpack_backend'
281 key_prefix = 'redis_msgpack_backend'
253 pass
282 pass
283
284
285 def get_mutex_lock(client, lock_key, lock_timeout, auto_renewal=False):
286 import redis_lock
287
288 class _RedisLockWrapper(object):
289 """LockWrapper for redis_lock"""
290
291 @classmethod
292 def get_lock(cls):
293 return redis_lock.Lock(
294 redis_client=client,
295 name=lock_key,
296 expire=lock_timeout,
297 auto_renewal=auto_renewal,
298 strict=True,
299 )
300
301 def __repr__(self):
302 return "{}:{}".format(self.__class__.__name__, lock_key)
303
304 def __str__(self):
305 return "{}:{}".format(self.__class__.__name__, lock_key)
306
307 def __init__(self):
308 self.lock = self.get_lock()
309 self.lock_key = lock_key
310
311 def acquire(self, wait=True):
312 log.debug('Trying to acquire Redis lock for key %s', self.lock_key)
313 try:
314 acquired = self.lock.acquire(wait)
315 log.debug('Got lock for key %s, %s', self.lock_key, acquired)
316 return acquired
317 except redis_lock.AlreadyAcquired:
318 return False
319 except redis_lock.AlreadyStarted:
320 # refresh thread exists, but it also means we acquired the lock
321 return True
322
323 def release(self):
324 try:
325 self.lock.release()
326 except redis_lock.NotAcquired:
327 pass
328
329 return _RedisLockWrapper()
@@ -16,15 +16,16 b''
16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
17
18 import os
18 import os
19 import time
19 import logging
20 import logging
20 import functools
21 import functools
21 from decorator import decorate
22
22
23 from dogpile.cache import CacheRegion
23 from dogpile.cache import CacheRegion
24 from dogpile.cache.util import compat
24 from dogpile.cache.util import compat
25
25
26 from vcsserver.utils import safe_str, sha1
26 from vcsserver.utils import safe_str, sha1
27
27
28 from vcsserver.lib.rc_cache import region_meta
28
29
29 log = logging.getLogger(__name__)
30 log = logging.getLogger(__name__)
30
31
@@ -48,18 +49,77 b' class RhodeCodeCacheRegion(CacheRegion):'
48 if function_key_generator is None:
49 if function_key_generator is None:
49 function_key_generator = self.function_key_generator
50 function_key_generator = self.function_key_generator
50
51
52 # workaround for py2 and cython problems, this block should be removed
53 # once we've migrated to py3
54 if 'cython' == 'cython':
55 def decorator(fn):
56 if to_str is compat.string_type:
57 # backwards compatible
58 key_generator = function_key_generator(namespace, fn)
59 else:
60 key_generator = function_key_generator(namespace, fn, to_str=to_str)
61
62 @functools.wraps(fn)
63 def decorate(*arg, **kw):
64 key = key_generator(*arg, **kw)
65
66 @functools.wraps(fn)
67 def creator():
68 return fn(*arg, **kw)
69
70 if not condition:
71 return creator()
72
73 timeout = expiration_time() if expiration_time_is_callable \
74 else expiration_time
75
76 return self.get_or_create(key, creator, timeout, should_cache_fn)
77
78 def invalidate(*arg, **kw):
79 key = key_generator(*arg, **kw)
80 self.delete(key)
81
82 def set_(value, *arg, **kw):
83 key = key_generator(*arg, **kw)
84 self.set(key, value)
85
86 def get(*arg, **kw):
87 key = key_generator(*arg, **kw)
88 return self.get(key)
89
90 def refresh(*arg, **kw):
91 key = key_generator(*arg, **kw)
92 value = fn(*arg, **kw)
93 self.set(key, value)
94 return value
95
96 decorate.set = set_
97 decorate.invalidate = invalidate
98 decorate.refresh = refresh
99 decorate.get = get
100 decorate.original = fn
101 decorate.key_generator = key_generator
102 decorate.__wrapped__ = fn
103
104 return decorate
105 return decorator
106
51 def get_or_create_for_user_func(key_generator, user_func, *arg, **kw):
107 def get_or_create_for_user_func(key_generator, user_func, *arg, **kw):
52
108
53 if not condition:
109 if not condition:
54 log.debug('Calling un-cached func:%s', user_func.func_name)
110 log.debug('Calling un-cached method:%s', user_func.func_name)
55 return user_func(*arg, **kw)
111 start = time.time()
112 result = user_func(*arg, **kw)
113 total = time.time() - start
114 log.debug('un-cached method:%s took %.4fs', user_func.func_name, total)
115 return result
56
116
57 key = key_generator(*arg, **kw)
117 key = key_generator(*arg, **kw)
58
118
59 timeout = expiration_time() if expiration_time_is_callable \
119 timeout = expiration_time() if expiration_time_is_callable \
60 else expiration_time
120 else expiration_time
61
121
62 log.debug('Calling cached fn:%s', user_func.func_name)
122 log.debug('Calling cached method:`%s`', user_func.func_name)
63 return self.get_or_create(key, user_func, timeout, should_cache_fn, (arg, kw))
123 return self.get_or_create(key, user_func, timeout, should_cache_fn, (arg, kw))
64
124
65 def cache_decorator(user_func):
125 def cache_decorator(user_func):
@@ -98,8 +158,7 b' class RhodeCodeCacheRegion(CacheRegion):'
98 user_func.original = user_func
158 user_func.original = user_func
99
159
100 # Use `decorate` to preserve the signature of :param:`user_func`.
160 # Use `decorate` to preserve the signature of :param:`user_func`.
101
161 return decorator.decorate(user_func, functools.partial(
102 return decorate(user_func, functools.partial(
103 get_or_create_for_user_func, key_generator))
162 get_or_create_for_user_func, key_generator))
104
163
105 return cache_decorator
164 return cache_decorator
@@ -151,3 +210,54 b' def key_generator(backend, namespace, fn'
151 return final_key
210 return final_key
152
211
153 return generate_key
212 return generate_key
213
214
215 def get_or_create_region(region_name, region_namespace=None):
216 from vcsserver.lib.rc_cache.backends import FileNamespaceBackend
217 region_obj = region_meta.dogpile_cache_regions.get(region_name)
218 if not region_obj:
219 raise EnvironmentError(
220 'Region `{}` not in configured: {}.'.format(
221 region_name, region_meta.dogpile_cache_regions.keys()))
222
223 region_uid_name = '{}:{}'.format(region_name, region_namespace)
224 if isinstance(region_obj.actual_backend, FileNamespaceBackend):
225 region_exist = region_meta.dogpile_cache_regions.get(region_namespace)
226 if region_exist:
227 log.debug('Using already configured region: %s', region_namespace)
228 return region_exist
229 cache_dir = region_meta.dogpile_config_defaults['cache_dir']
230 expiration_time = region_obj.expiration_time
231
232 if not os.path.isdir(cache_dir):
233 os.makedirs(cache_dir)
234 new_region = make_region(
235 name=region_uid_name,
236 function_key_generator=backend_key_generator(region_obj.actual_backend)
237 )
238 namespace_filename = os.path.join(
239 cache_dir, "{}.cache.dbm".format(region_namespace))
240 # special type that allows 1db per namespace
241 new_region.configure(
242 backend='dogpile.cache.rc.file_namespace',
243 expiration_time=expiration_time,
244 arguments={"filename": namespace_filename}
245 )
246
247 # create and save in region caches
248 log.debug('configuring new region: %s', region_uid_name)
249 region_obj = region_meta.dogpile_cache_regions[region_namespace] = new_region
250
251 return region_obj
252
253
254 def clear_cache_namespace(cache_region, cache_namespace_uid, invalidate=False):
255 region = get_or_create_region(cache_region, cache_namespace_uid)
256 cache_keys = region.backend.list_keys(prefix=cache_namespace_uid)
257 num_delete_keys = len(cache_keys)
258 if invalidate:
259 region.invalidate(hard=False)
260 else:
261 if num_delete_keys:
262 region.delete_multi(cache_keys)
263 return num_delete_keys
@@ -19,6 +19,7 b''
19
19
20 import os
20 import os
21 import subprocess
21 import subprocess
22 import time
22 from urllib.error import URLError
23 from urllib.error import URLError
23 import urllib.parse
24 import urllib.parse
24 import logging
25 import logging
@@ -35,7 +36,9 b' import svn.fs'
35 import svn.repos
36 import svn.repos
36
37
37 from vcsserver import svn_diff, exceptions, subprocessio, settings
38 from vcsserver import svn_diff, exceptions, subprocessio, settings
38 from vcsserver.base import RepoFactory, raise_from_original
39 from vcsserver.base import RepoFactory, raise_from_original, ArchiveNode, archive_repo
40 from vcsserver.exceptions import NoContentException
41 from vcsserver.utils import safe_str
39 from vcsserver.vcs_base import RemoteBase
42 from vcsserver.vcs_base import RemoteBase
40
43
41 log = logging.getLogger(__name__)
44 log = logging.getLogger(__name__)
@@ -198,7 +201,8 b' class SvnRemote(RemoteBase):'
198 def revision_properties(self, wire, revision):
201 def revision_properties(self, wire, revision):
199
202
200 cache_on, context_uid, repo_id = self._cache_on(wire)
203 cache_on, context_uid, repo_id = self._cache_on(wire)
201 @self.region.conditional_cache_on_arguments(condition=cache_on)
204 region = self._region(wire)
205 @region.conditional_cache_on_arguments(condition=cache_on)
202 def _revision_properties(_repo_id, _revision):
206 def _revision_properties(_repo_id, _revision):
203 repo = self._factory.repo(wire)
207 repo = self._factory.repo(wire)
204 fs_ptr = svn.repos.fs(repo)
208 fs_ptr = svn.repos.fs(repo)
@@ -252,7 +256,8 b' class SvnRemote(RemoteBase):'
252 @reraise_safe_exceptions
256 @reraise_safe_exceptions
253 def node_history(self, wire, path, revision, limit):
257 def node_history(self, wire, path, revision, limit):
254 cache_on, context_uid, repo_id = self._cache_on(wire)
258 cache_on, context_uid, repo_id = self._cache_on(wire)
255 @self.region.conditional_cache_on_arguments(condition=cache_on)
259 region = self._region(wire)
260 @region.conditional_cache_on_arguments(condition=cache_on)
256 def _assert_correct_path(_context_uid, _repo_id, _path, _revision, _limit):
261 def _assert_correct_path(_context_uid, _repo_id, _path, _revision, _limit):
257 cross_copies = False
262 cross_copies = False
258 repo = self._factory.repo(wire)
263 repo = self._factory.repo(wire)
@@ -273,7 +278,8 b' class SvnRemote(RemoteBase):'
273
278
274 def node_properties(self, wire, path, revision):
279 def node_properties(self, wire, path, revision):
275 cache_on, context_uid, repo_id = self._cache_on(wire)
280 cache_on, context_uid, repo_id = self._cache_on(wire)
276 @self.region.conditional_cache_on_arguments(condition=cache_on)
281 region = self._region(wire)
282 @region.conditional_cache_on_arguments(condition=cache_on)
277 def _node_properties(_repo_id, _path, _revision):
283 def _node_properties(_repo_id, _path, _revision):
278 repo = self._factory.repo(wire)
284 repo = self._factory.repo(wire)
279 fsobj = svn.repos.fs(repo)
285 fsobj = svn.repos.fs(repo)
@@ -282,7 +288,7 b' class SvnRemote(RemoteBase):'
282 return _node_properties(repo_id, path, revision)
288 return _node_properties(repo_id, path, revision)
283
289
284 def file_annotate(self, wire, path, revision):
290 def file_annotate(self, wire, path, revision):
285 abs_path = 'file://' + urllib.request.pathname2url(
291 abs_path = 'file://' + urllib.pathname2url(
286 vcspath.join(wire['path'], path))
292 vcspath.join(wire['path'], path))
287 file_uri = svn.core.svn_path_canonicalize(abs_path)
293 file_uri = svn.core.svn_path_canonicalize(abs_path)
288
294
@@ -311,7 +317,8 b' class SvnRemote(RemoteBase):'
311 def get_node_type(self, wire, path, revision=None):
317 def get_node_type(self, wire, path, revision=None):
312
318
313 cache_on, context_uid, repo_id = self._cache_on(wire)
319 cache_on, context_uid, repo_id = self._cache_on(wire)
314 @self.region.conditional_cache_on_arguments(condition=cache_on)
320 region = self._region(wire)
321 @region.conditional_cache_on_arguments(condition=cache_on)
315 def _get_node_type(_repo_id, _path, _revision):
322 def _get_node_type(_repo_id, _path, _revision):
316 repo = self._factory.repo(wire)
323 repo = self._factory.repo(wire)
317 fs_ptr = svn.repos.fs(repo)
324 fs_ptr = svn.repos.fs(repo)
@@ -325,7 +332,8 b' class SvnRemote(RemoteBase):'
325 def get_nodes(self, wire, path, revision=None):
332 def get_nodes(self, wire, path, revision=None):
326
333
327 cache_on, context_uid, repo_id = self._cache_on(wire)
334 cache_on, context_uid, repo_id = self._cache_on(wire)
328 @self.region.conditional_cache_on_arguments(condition=cache_on)
335 region = self._region(wire)
336 @region.conditional_cache_on_arguments(condition=cache_on)
329 def _get_nodes(_repo_id, _path, _revision):
337 def _get_nodes(_repo_id, _path, _revision):
330 repo = self._factory.repo(wire)
338 repo = self._factory.repo(wire)
331 fsobj = svn.repos.fs(repo)
339 fsobj = svn.repos.fs(repo)
@@ -352,7 +360,8 b' class SvnRemote(RemoteBase):'
352 def get_file_size(self, wire, path, revision=None):
360 def get_file_size(self, wire, path, revision=None):
353
361
354 cache_on, context_uid, repo_id = self._cache_on(wire)
362 cache_on, context_uid, repo_id = self._cache_on(wire)
355 @self.region.conditional_cache_on_arguments(condition=cache_on)
363 region = self._region(wire)
364 @region.conditional_cache_on_arguments(condition=cache_on)
356 def _get_file_size(_repo_id, _path, _revision):
365 def _get_file_size(_repo_id, _path, _revision):
357 repo = self._factory.repo(wire)
366 repo = self._factory.repo(wire)
358 fsobj = svn.repos.fs(repo)
367 fsobj = svn.repos.fs(repo)
@@ -467,7 +476,8 b' class SvnRemote(RemoteBase):'
467 def is_binary(self, wire, rev, path):
476 def is_binary(self, wire, rev, path):
468 cache_on, context_uid, repo_id = self._cache_on(wire)
477 cache_on, context_uid, repo_id = self._cache_on(wire)
469
478
470 @self.region.conditional_cache_on_arguments(condition=cache_on)
479 region = self._region(wire)
480 @region.conditional_cache_on_arguments(condition=cache_on)
471 def _is_binary(_repo_id, _rev, _path):
481 def _is_binary(_repo_id, _rev, _path):
472 raw_bytes = self.get_file_content(wire, path, rev)
482 raw_bytes = self.get_file_content(wire, path, rev)
473 return raw_bytes and '\0' in raw_bytes
483 return raw_bytes and '\0' in raw_bytes
@@ -481,9 +491,7 b' class SvnRemote(RemoteBase):'
481 if path and os.path.isdir(path):
491 if path and os.path.isdir(path):
482 opts['cwd'] = path
492 opts['cwd'] = path
483
493
484 safe_call = False
494 safe_call = opts.pop('_safe', False)
485 if '_safe' in opts:
486 safe_call = True
487
495
488 svnenv = os.environ.copy()
496 svnenv = os.environ.copy()
489 svnenv.update(opts.pop('extra_env', {}))
497 svnenv.update(opts.pop('extra_env', {}))
@@ -496,15 +504,15 b' class SvnRemote(RemoteBase):'
496
504
497 return ''.join(p), ''.join(p.error)
505 return ''.join(p), ''.join(p.error)
498 except (EnvironmentError, OSError) as err:
506 except (EnvironmentError, OSError) as err:
507 if safe_call:
508 return '', safe_str(err).strip()
509 else:
499 cmd = ' '.join(cmd) # human friendly CMD
510 cmd = ' '.join(cmd) # human friendly CMD
500 tb_err = ("Couldn't run svn command (%s).\n"
511 tb_err = ("Couldn't run svn command (%s).\n"
501 "Original error was:%s\n"
512 "Original error was:%s\n"
502 "Call options:%s\n"
513 "Call options:%s\n"
503 % (cmd, err, _opts))
514 % (cmd, err, _opts))
504 log.exception(tb_err)
515 log.exception(tb_err)
505 if safe_call:
506 return '', err
507 else:
508 raise exceptions.VcsException()(tb_err)
516 raise exceptions.VcsException()(tb_err)
509
517
510 @reraise_safe_exceptions
518 @reraise_safe_exceptions
@@ -528,6 +536,74 b' class SvnRemote(RemoteBase):'
528 'post_version': get_svn_post_hook_version(repo_path),
536 'post_version': get_svn_post_hook_version(repo_path),
529 }
537 }
530
538
539 @reraise_safe_exceptions
540 def set_head_ref(self, wire, head_name):
541 pass
542
543 @reraise_safe_exceptions
544 def archive_repo(self, wire, archive_dest_path, kind, mtime, archive_at_path,
545 archive_dir_name, commit_id):
546
547 def walk_tree(root, root_dir, _commit_id):
548 """
549 Special recursive svn repo walker
550 """
551
552 filemode_default = 0o100644
553 filemode_executable = 0o100755
554
555 file_iter = svn.fs.dir_entries(root, root_dir)
556 for f_name in file_iter:
557 f_type = NODE_TYPE_MAPPING.get(file_iter[f_name].kind, None)
558
559 if f_type == 'dir':
560 # return only DIR, and then all entries in that dir
561 yield os.path.join(root_dir, f_name), {'mode': filemode_default}, f_type
562 new_root = os.path.join(root_dir, f_name)
563 for _f_name, _f_data, _f_type in walk_tree(root, new_root, _commit_id):
564 yield _f_name, _f_data, _f_type
565 else:
566 f_path = os.path.join(root_dir, f_name).rstrip('/')
567 prop_list = svn.fs.node_proplist(root, f_path)
568
569 f_mode = filemode_default
570 if prop_list.get('svn:executable'):
571 f_mode = filemode_executable
572
573 f_is_link = False
574 if prop_list.get('svn:special'):
575 f_is_link = True
576
577 data = {
578 'is_link': f_is_link,
579 'mode': f_mode,
580 'content_stream': svn.core.Stream(svn.fs.file_contents(root, f_path)).read
581 }
582
583 yield f_path, data, f_type
584
585 def file_walker(_commit_id, path):
586 repo = self._factory.repo(wire)
587 root = svn.fs.revision_root(svn.repos.fs(repo), int(commit_id))
588
589 def no_content():
590 raise NoContentException()
591
592 for f_name, f_data, f_type in walk_tree(root, path, _commit_id):
593 file_path = f_name
594
595 if f_type == 'dir':
596 mode = f_data['mode']
597 yield ArchiveNode(file_path, mode, False, no_content)
598 else:
599 mode = f_data['mode']
600 is_link = f_data['is_link']
601 data_stream = f_data['content_stream']
602 yield ArchiveNode(file_path, mode, is_link, data_stream)
603
604 return archive_repo(file_walker, archive_dest_path, kind, mtime, archive_at_path,
605 archive_dir_name, commit_id)
606
531
607
532 class SvnDiffer(object):
608 class SvnDiffer(object):
533 """
609 """
@@ -16,13 +16,12 b''
16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
17
18 import socket
18 import socket
19
20 import pytest
19 import pytest
21
20
22
21
23 def pytest_addoption(parser):
22 def pytest_addoption(parser):
24 parser.addoption(
23 parser.addoption(
25 '--repeat', type=int, default=100,
24 '--perf-repeat-vcs', type=int, default=100,
26 help="Number of repetitions in performance tests.")
25 help="Number of repetitions in performance tests.")
27
26
28
27
@@ -34,7 +33,7 b' def repeat(request):'
34 Slower calls may divide it by 10 or 100. It is chosen in a way so that the
33 Slower calls may divide it by 10 or 100. It is chosen in a way so that the
35 tests are not too slow in our default test suite.
34 tests are not too slow in our default test suite.
36 """
35 """
37 return request.config.getoption('--repeat')
36 return request.config.getoption('--perf-repeat-vcs')
38
37
39
38
40 @pytest.fixture(scope='session')
39 @pytest.fixture(scope='session')
@@ -113,7 +113,7 b' class TestReraiseSafeExceptions(object):'
113
113
114 methods = inspect.getmembers(git_remote, predicate=inspect.ismethod)
114 methods = inspect.getmembers(git_remote, predicate=inspect.ismethod)
115 for method_name, method in methods:
115 for method_name, method in methods:
116 if not method_name.startswith('_'):
116 if not method_name.startswith('_') and method_name not in ['vcsserver_invalidate_cache']:
117 assert method.im_func.__code__ == decorator.__code__
117 assert method.im_func.__code__ == decorator.__code__
118
118
119 @pytest.mark.parametrize('side_effect, expected_type', [
119 @pytest.mark.parametrize('side_effect, expected_type', [
@@ -50,7 +50,7 b' class TestReraiseSafeExceptions(object):'
50 methods = inspect.getmembers(hg_remote, predicate=inspect.ismethod)
50 methods = inspect.getmembers(hg_remote, predicate=inspect.ismethod)
51 decorator = hg.reraise_safe_exceptions(None)
51 decorator = hg.reraise_safe_exceptions(None)
52 for method_name, method in methods:
52 for method_name, method in methods:
53 if not method_name.startswith('_'):
53 if not method_name.startswith('_') and method_name not in ['vcsserver_invalidate_cache']:
54 assert method.im_func.__code__ == decorator.__code__
54 assert method.im_func.__code__ == decorator.__code__
55
55
56 @pytest.mark.parametrize('side_effect, expected_type', [
56 @pytest.mark.parametrize('side_effect, expected_type', [
@@ -14,7 +14,10 b' def vcs_app():'
14 'dev.use_echo_app': 'true',
14 'dev.use_echo_app': 'true',
15 'locale': 'en_US.UTF-8',
15 'locale': 'en_US.UTF-8',
16 }
16 }
17 vcs_app = main({}, **stub_settings)
17 stub_global_conf = {
18 '__file__': ''
19 }
20 vcs_app = main(stub_global_conf, **stub_settings)
18 app = webtest.TestApp(vcs_app)
21 app = webtest.TestApp(vcs_app)
19 return app
22 return app
20
23
@@ -25,7 +25,7 b' from vcsserver.base import obfuscate_qs'
25 @mock.patch('vcsserver.http_main.VCS', mock.Mock())
25 @mock.patch('vcsserver.http_main.VCS', mock.Mock())
26 @mock.patch('vcsserver.hgpatches.patch_largefiles_capabilities')
26 @mock.patch('vcsserver.hgpatches.patch_largefiles_capabilities')
27 def test_applies_largefiles_patch(patch_largefiles_capabilities):
27 def test_applies_largefiles_patch(patch_largefiles_capabilities):
28 http_main.main({})
28 http_main.main({'__file__': ''})
29 patch_largefiles_capabilities.assert_called_once_with()
29 patch_largefiles_capabilities.assert_called_once_with()
30
30
31
31
@@ -35,7 +35,7 b' def test_applies_largefiles_patch(patch_'
35 'vcsserver.hgpatches.patch_largefiles_capabilities',
35 'vcsserver.hgpatches.patch_largefiles_capabilities',
36 mock.Mock(side_effect=Exception("Must not be called")))
36 mock.Mock(side_effect=Exception("Must not be called")))
37 def test_applies_largefiles_patch_only_if_mercurial_is_available():
37 def test_applies_largefiles_patch_only_if_mercurial_is_available():
38 http_main.main({})
38 http_main.main({'__file__': ''})
39
39
40
40
41 @pytest.mark.parametrize('given, expected', [
41 @pytest.mark.parametrize('given, expected', [
@@ -25,15 +25,23 b' from vcsserver.utils import safe_str'
25 log = logging.getLogger(__name__)
25 log = logging.getLogger(__name__)
26
26
27
27
28 def get_access_path(request):
28 def get_access_path(environ):
29 environ = request.environ
29 path = environ.get('PATH_INFO')
30 return environ.get('PATH_INFO')
30 return path
31
31
32
32
33 def get_user_agent(environ):
33 def get_user_agent(environ):
34 return environ.get('HTTP_USER_AGENT')
34 return environ.get('HTTP_USER_AGENT')
35
35
36
36
37 def get_vcs_method(environ):
38 return environ.get('HTTP_X_RC_METHOD')
39
40
41 def get_vcs_repo(environ):
42 return environ.get('HTTP_X_RC_REPO_NAME')
43
44
37 class RequestWrapperTween(object):
45 class RequestWrapperTween(object):
38 def __init__(self, handler, registry):
46 def __init__(self, handler, registry):
39 self.handler = handler
47 self.handler = handler
@@ -43,17 +51,52 b' class RequestWrapperTween(object):'
43
51
44 def __call__(self, request):
52 def __call__(self, request):
45 start = time.time()
53 start = time.time()
54 log.debug('Starting request time measurement')
55 response = None
56
57 ua = get_user_agent(request.environ)
58 vcs_method = get_vcs_method(request.environ)
59 repo_name = get_vcs_repo(request.environ)
60
46 try:
61 try:
47 response = self.handler(request)
62 response = self.handler(request)
48 finally:
63 finally:
49 end = time.time()
50 total = end - start
51 count = request.request_count()
64 count = request.request_count()
52 _ver_ = vcsserver.__version__
65 _ver_ = vcsserver.__version__
66 _path = safe_str(get_access_path(request.environ))
67 ip = '127.0.0.1'
68 match_route = request.matched_route.name if request.matched_route else "NOT_FOUND"
69 resp_code = getattr(response, 'status_code', 'UNDEFINED')
70
71 total = time.time() - start
72
73 _view_path = "{}/{}@{}".format(_path, vcs_method, repo_name)
53 log.info(
74 log.info(
54 'Req[%4s] IP: %s %s Request to %s time: %.4fs [%s], VCSServer %s',
75 'Req[%4s] IP: %s %s Request to %s time: %.4fs [%s], VCSServer %s',
55 count, '127.0.0.1', request.environ.get('REQUEST_METHOD'),
76 count, ip, request.environ.get('REQUEST_METHOD'),
56 safe_str(get_access_path(request)), total, get_user_agent(request.environ), _ver_)
77 _view_path, total, ua, _ver_,
78 extra={"time": total, "ver": _ver_, "code": resp_code,
79 "path": _path, "view_name": match_route, "user_agent": ua,
80 "vcs_method": vcs_method, "repo_name": repo_name}
81 )
82
83 statsd = request.registry.statsd
84 if statsd:
85 match_route = request.matched_route.name if request.matched_route else _path
86 elapsed_time_ms = round(1000.0 * total) # use ms only
87 statsd.timing(
88 "vcsserver_req_timing.histogram", elapsed_time_ms,
89 tags=[
90 "view_name:{}".format(match_route),
91 "code:{}".format(resp_code)
92 ],
93 use_decimals=False
94 )
95 statsd.incr(
96 "vcsserver_req_total", tags=[
97 "view_name:{}".format(match_route),
98 "code:{}".format(resp_code)
99 ])
57
100
58 return response
101 return response
59
102
@@ -15,13 +15,15 b''
15 # along with this program; if not, write to the Free Software Foundation,
15 # along with this program; if not, write to the Free Software Foundation,
16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
17
18 from vcsserver.lib import rc_cache
18
19
19 class RemoteBase(object):
20 class RemoteBase(object):
20 EMPTY_COMMIT = '0' * 40
21 EMPTY_COMMIT = '0' * 40
21
22
22 @property
23 def _region(self, wire):
23 def region(self):
24 cache_repo_id = wire.get('cache_repo_id', '')
24 return self._factory._cache_region
25 cache_namespace_uid = 'cache_repo.{}'.format(cache_repo_id)
26 return rc_cache.get_or_create_region('repo_object', cache_namespace_uid)
25
27
26 def _cache_on(self, wire):
28 def _cache_on(self, wire):
27 context = wire.get('context', '')
29 context = wire.get('context', '')
@@ -30,3 +32,15 b' class RemoteBase(object):'
30 cache = wire.get('cache', True)
32 cache = wire.get('cache', True)
31 cache_on = context and cache
33 cache_on = context and cache
32 return cache_on, context_uid, repo_id
34 return cache_on, context_uid, repo_id
35
36 def vcsserver_invalidate_cache(self, wire, delete):
37 from vcsserver.lib import rc_cache
38 repo_id = wire.get('repo_id', '')
39 cache_repo_id = wire.get('cache_repo_id', '')
40 cache_namespace_uid = 'cache_repo.{}'.format(cache_repo_id)
41
42 if delete:
43 rc_cache.clear_cache_namespace(
44 'repo_object', cache_namespace_uid, invalidate=True)
45
46 return {'invalidated': {'repo_id': repo_id, 'delete': delete}}
General Comments 0
You need to be logged in to leave comments. Login now