##// END OF EJS Templates
core: re-implemented the way how configuration can be made...
super-admin -
r1021:a797b226 default
parent child Browse files
Show More
@@ -0,0 +1,53 b''
1 ; #####################
2 ; LOGGING CONFIGURATION
3 ; #####################
4 ; Logging template, used for configure the logging
5 ; some variables here are replaced by RhodeCode to default values
6
7 [loggers]
8 keys = root, vcsserver
9
10 [handlers]
11 keys = console
12
13 [formatters]
14 keys = generic, json
15
16 ; #######
17 ; LOGGERS
18 ; #######
19 [logger_root]
20 level = NOTSET
21 handlers = console
22
23 [logger_vcsserver]
24 level = $RC_LOGGING_LEVEL
25 handlers =
26 qualname = vcsserver
27 propagate = 1
28
29 ; ########
30 ; HANDLERS
31 ; ########
32
33 [handler_console]
34 class = StreamHandler
35 args = (sys.stderr, )
36 level = $RC_LOGGING_LEVEL
37 ; To enable JSON formatted logs replace generic with json
38 ; This allows sending properly formatted logs to grafana loki or elasticsearch
39 #formatter = json
40 #formatter = generic
41 formatter = $RC_LOGGING_FORMATTER
42
43 ; ##########
44 ; FORMATTERS
45 ; ##########
46
47 [formatter_generic]
48 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
49 datefmt = %Y-%m-%d %H:%M:%S
50
51 [formatter_json]
52 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
53 class = vcsserver.lib._vendor.jsonlogger.JsonFormatter
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
@@ -0,0 +1,177 b''
1 # -*- coding: utf-8 -*-
2
3 # Copyright (C) 2010-2020 RhodeCode GmbH
4 #
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
8 #
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
13 #
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20
21 import os
22 import textwrap
23 import string
24 import functools
25 import logging
26 import tempfile
27 import logging.config
28 log = logging.getLogger(__name__)
29
30
31 def str2bool(_str):
32 """
33 returns True/False value from given string, it tries to translate the
34 string into boolean
35
36 :param _str: string value to translate into boolean
37 :rtype: boolean
38 :returns: boolean from given string
39 """
40 if _str is None:
41 return False
42 if _str in (True, False):
43 return _str
44 _str = str(_str).strip().lower()
45 return _str in ('t', 'true', 'y', 'yes', 'on', '1')
46
47
48 def aslist(obj, sep=None, strip=True):
49 """
50 Returns given string separated by sep as list
51
52 :param obj:
53 :param sep:
54 :param strip:
55 """
56 if isinstance(obj, (basestring,)):
57 lst = obj.split(sep)
58 if strip:
59 lst = [v.strip() for v in lst]
60 return lst
61 elif isinstance(obj, (list, tuple)):
62 return obj
63 elif obj is None:
64 return []
65 else:
66 return [obj]
67
68
69 class SettingsMaker(object):
70
71 def __init__(self, app_settings):
72 self.settings = app_settings
73
74 @classmethod
75 def _bool_func(cls, input_val):
76 if isinstance(input_val, unicode):
77 input_val = input_val.encode('utf8')
78 return str2bool(input_val)
79
80 @classmethod
81 def _int_func(cls, input_val):
82 return int(input_val)
83
84 @classmethod
85 def _list_func(cls, input_val, sep=','):
86 return aslist(input_val, sep=sep)
87
88 @classmethod
89 def _string_func(cls, input_val, lower=True):
90 if lower:
91 input_val = input_val.lower()
92 return input_val
93
94 @classmethod
95 def _float_func(cls, input_val):
96 return float(input_val)
97
98 @classmethod
99 def _dir_func(cls, input_val, ensure_dir=False, mode=0o755):
100
101 # ensure we have our dir created
102 if not os.path.isdir(input_val) and ensure_dir:
103 os.makedirs(input_val, mode=mode)
104
105 if not os.path.isdir(input_val):
106 raise Exception('Dir at {} does not exist'.format(input_val))
107 return input_val
108
109 @classmethod
110 def _file_path_func(cls, input_val, ensure_dir=False, mode=0o755):
111 dirname = os.path.dirname(input_val)
112 cls._dir_func(dirname, ensure_dir=ensure_dir)
113 return input_val
114
115 @classmethod
116 def _key_transformator(cls, key):
117 return "{}_{}".format('RC'.upper(), key.upper().replace('.', '_').replace('-', '_'))
118
119 def enable_logging(self, logging_conf=None):
120 """
121 Helper to enable debug on running instance
122 :return:
123 """
124 if not str2bool(self.settings.get('logging.autoconfigure')):
125 log.info('logging configuration based on main .ini file')
126 return
127
128 if logging_conf is None:
129 logging_conf = self.settings.get('logging.logging_conf_file') or ''
130
131 if not os.path.isfile(logging_conf):
132 log.error('Unable to setup logging based on %s, file does not exist...', logging_conf)
133 return
134
135 with open(logging_conf, 'rb') as f:
136 ini_template = textwrap.dedent(f.read())
137 ini_template = string.Template(ini_template).safe_substitute(
138 RC_LOGGING_LEVEL=os.environ.get('RC_LOGGING_LEVEL', '') or 'INFO',
139 RC_LOGGING_FORMATTER=os.environ.get('RC_LOGGING_FORMATTER', '') or 'generic'
140 )
141
142 with tempfile.NamedTemporaryFile(prefix='rc_logging_', suffix='.ini', delete=False) as f:
143 log.info('Saved Temporary LOGGING config at %s', f.name)
144 f.write(ini_template)
145
146 logging.config.fileConfig(f.name)
147 os.remove(f.name)
148
149 def make_setting(self, key, default, lower=False, default_when_empty=False, parser=None):
150
151 input_val = self.settings.get(key, default)
152
153 if default_when_empty and not input_val:
154 # use default value when value is set in the config but it is empty
155 input_val = default
156
157 parser_func = {
158 'bool': self._bool_func,
159 'int': self._int_func,
160 'list': self._list_func,
161 'list:newline': functools.partial(self._list_func, sep='/n'),
162 'string': functools.partial(self._string_func, lower=lower),
163 'dir': self._dir_func,
164 'dir:ensured': functools.partial(self._dir_func, ensure_dir=True),
165 'file': self._file_path_func,
166 'file:ensured': functools.partial(self._file_path_func, ensure_dir=True),
167 None: lambda i: i
168 }[parser]
169
170 # now maybe we have this KEY in env, search and use the value with higher priority.
171 transformed_key = self._key_transformator(key)
172 envvar_value = os.environ.get(transformed_key)
173 if envvar_value:
174 log.debug('using `%s` key instead of `%s` key for config', transformed_key, key)
175 input_val = envvar_value
176 self.settings[key] = parser_func(input_val)
177 return self.settings[key]
@@ -1,256 +1,271 b''
1 ## -*- coding: utf-8 -*-
1 ## -*- coding: utf-8 -*-
2
2
3 ; #################################
3 ; #################################
4 ; RHODECODE VCSSERVER CONFIGURATION
4 ; RHODECODE VCSSERVER CONFIGURATION
5 ; #################################
5 ; #################################
6
6
7 [server:main]
7 [server:main]
8 ; COMMON HOST/IP CONFIG
8 ; COMMON HOST/IP CONFIG
9 host = 0.0.0.0
9 host = 0.0.0.0
10 port = 9900
10 port = 9900
11
11
12 ; ##################################################
12 ; ##################################################
13 ; WAITRESS WSGI SERVER - Recommended for Development
13 ; WAITRESS WSGI SERVER - Recommended for Development
14 ; ##################################################
14 ; ##################################################
15
15
16 ; use server type
16 ; use server type
17 use = egg:waitress#main
17 use = egg:waitress#main
18
18
19 ; number of worker threads
19 ; number of worker threads
20 threads = 5
20 threads = 5
21
21
22 ; MAX BODY SIZE 100GB
22 ; MAX BODY SIZE 100GB
23 max_request_body_size = 107374182400
23 max_request_body_size = 107374182400
24
24
25 ; Use poll instead of select, fixes file descriptors limits problems.
25 ; Use poll instead of select, fixes file descriptors limits problems.
26 ; May not work on old windows systems.
26 ; May not work on old windows systems.
27 asyncore_use_poll = true
27 asyncore_use_poll = true
28
28
29
29
30 ; ###########################
30 ; ###########################
31 ; GUNICORN APPLICATION SERVER
31 ; GUNICORN APPLICATION SERVER
32 ; ###########################
32 ; ###########################
33
33
34 ; run with gunicorn --log-config rhodecode.ini --paste rhodecode.ini
34 ; run with gunicorn --paste rhodecode.ini
35
35
36 ; Module to use, this setting shouldn't be changed
36 ; Module to use, this setting shouldn't be changed
37 #use = egg:gunicorn#main
37 #use = egg:gunicorn#main
38
38
39 ; Sets the number of process workers. More workers means more concurrent connections
39 ; Sets the number of process workers. More workers means more concurrent connections
40 ; RhodeCode can handle at the same time. Each additional worker also it increases
40 ; RhodeCode can handle at the same time. Each additional worker also it increases
41 ; memory usage as each has it's own set of caches.
41 ; memory usage as each has it's own set of caches.
42 ; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
42 ; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
43 ; than 8-10 unless for really big deployments .e.g 700-1000 users.
43 ; than 8-10 unless for really big deployments .e.g 700-1000 users.
44 ; `instance_id = *` must be set in the [app:main] section below (which is the default)
44 ; `instance_id = *` must be set in the [app:main] section below (which is the default)
45 ; when using more than 1 worker.
45 ; when using more than 1 worker.
46 #workers = 2
46 #workers = 2
47
47
48 ; Gunicorn access log level
48 ; Gunicorn access log level
49 #loglevel = info
49 #loglevel = info
50
50
51 ; Process name visible in process list
51 ; Process name visible in process list
52 #proc_name = rhodecode_vcsserver
52 #proc_name = rhodecode_vcsserver
53
53
54 ; Type of worker class, one of `sync`, `gevent`
54 ; Type of worker class, one of `sync`, `gevent`
55 ; currently `sync` is the only option allowed.
55 ; currently `sync` is the only option allowed.
56 #worker_class = sync
56 #worker_class = sync
57
57
58 ; The maximum number of simultaneous clients. Valid only for gevent
58 ; The maximum number of simultaneous clients. Valid only for gevent
59 #worker_connections = 10
59 #worker_connections = 10
60
60
61 ; Max number of requests that worker will handle before being gracefully restarted.
61 ; Max number of requests that worker will handle before being gracefully restarted.
62 ; Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
62 ; Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
63 #max_requests = 1000
63 #max_requests = 1000
64 #max_requests_jitter = 30
64 #max_requests_jitter = 30
65
65
66 ; Amount of time a worker can spend with handling a request before it
66 ; Amount of time a worker can spend with handling a request before it
67 ; gets killed and restarted. By default set to 21600 (6hrs)
67 ; gets killed and restarted. By default set to 21600 (6hrs)
68 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
68 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
69 #timeout = 21600
69 #timeout = 21600
70
70
71 ; The maximum size of HTTP request line in bytes.
71 ; The maximum size of HTTP request line in bytes.
72 ; 0 for unlimited
72 ; 0 for unlimited
73 #limit_request_line = 0
73 #limit_request_line = 0
74
74
75 ; Limit the number of HTTP headers fields in a request.
75 ; Limit the number of HTTP headers fields in a request.
76 ; By default this value is 100 and can't be larger than 32768.
76 ; By default this value is 100 and can't be larger than 32768.
77 #limit_request_fields = 32768
77 #limit_request_fields = 32768
78
78
79 ; Limit the allowed size of an HTTP request header field.
79 ; Limit the allowed size of an HTTP request header field.
80 ; Value is a positive number or 0.
80 ; Value is a positive number or 0.
81 ; Setting it to 0 will allow unlimited header field sizes.
81 ; Setting it to 0 will allow unlimited header field sizes.
82 #limit_request_field_size = 0
82 #limit_request_field_size = 0
83
83
84 ; Timeout for graceful workers restart.
84 ; Timeout for graceful workers restart.
85 ; After receiving a restart signal, workers have this much time to finish
85 ; After receiving a restart signal, workers have this much time to finish
86 ; serving requests. Workers still alive after the timeout (starting from the
86 ; serving requests. Workers still alive after the timeout (starting from the
87 ; receipt of the restart signal) are force killed.
87 ; receipt of the restart signal) are force killed.
88 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
88 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
89 #graceful_timeout = 3600
89 #graceful_timeout = 21600
90
90
91 # The number of seconds to wait for requests on a Keep-Alive connection.
91 # The number of seconds to wait for requests on a Keep-Alive connection.
92 # Generally set in the 1-5 seconds range.
92 # Generally set in the 1-5 seconds range.
93 #keepalive = 2
93 #keepalive = 2
94
94
95 ; Maximum memory usage that each worker can use before it will receive a
95 ; Maximum memory usage that each worker can use before it will receive a
96 ; graceful restart signal 0 = memory monitoring is disabled
96 ; graceful restart signal 0 = memory monitoring is disabled
97 ; Examples: 268435456 (256MB), 536870912 (512MB)
97 ; Examples: 268435456 (256MB), 536870912 (512MB)
98 ; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
98 ; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
99 #memory_max_usage = 0
99 #memory_max_usage = 0
100
100
101 ; How often in seconds to check for memory usage for each gunicorn worker
101 ; How often in seconds to check for memory usage for each gunicorn worker
102 #memory_usage_check_interval = 60
102 #memory_usage_check_interval = 60
103
103
104 ; Threshold value for which we don't recycle worker if GarbageCollection
104 ; Threshold value for which we don't recycle worker if GarbageCollection
105 ; frees up enough resources. Before each restart we try to run GC on worker
105 ; frees up enough resources. Before each restart we try to run GC on worker
106 ; in case we get enough free memory after that, restart will not happen.
106 ; in case we get enough free memory after that, restart will not happen.
107 #memory_usage_recovery_threshold = 0.8
107 #memory_usage_recovery_threshold = 0.8
108
108
109
109
110 [app:main]
110 [app:main]
111 ; The %(here)s variable will be replaced with the absolute path of parent directory
111 ; The %(here)s variable will be replaced with the absolute path of parent directory
112 ; of this file
112 ; of this file
113 ; Each option in the app:main can be override by an environmental variable
114 ;
115 ;To override an option:
116 ;
117 ;RC_<KeyName>
118 ;Everything should be uppercase, . and - should be replaced by _.
119 ;For example, if you have these configuration settings:
120 ;rc_cache.repo_object.backend = foo
121 ;can be overridden by
122 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
123
113 use = egg:rhodecode-vcsserver
124 use = egg:rhodecode-vcsserver
114
125
115
126
116 ; #############
127 ; #############
117 ; DEBUG OPTIONS
128 ; DEBUG OPTIONS
118 ; #############
129 ; #############
119
130
120 # During development the we want to have the debug toolbar enabled
131 # During development the we want to have the debug toolbar enabled
121 pyramid.includes =
132 pyramid.includes =
122 pyramid_debugtoolbar
133 pyramid_debugtoolbar
123
134
124 debugtoolbar.hosts = 0.0.0.0/0
135 debugtoolbar.hosts = 0.0.0.0/0
125 debugtoolbar.exclude_prefixes =
136 debugtoolbar.exclude_prefixes =
126 /css
137 /css
127 /fonts
138 /fonts
128 /images
139 /images
129 /js
140 /js
130
141
131 ; #################
142 ; #################
132 ; END DEBUG OPTIONS
143 ; END DEBUG OPTIONS
133 ; #################
144 ; #################
134
145
135 ; Pyramid default locales, we need this to be set
146 ; Pyramid default locales, we need this to be set
136 pyramid.default_locale_name = en
147 #pyramid.default_locale_name = en
137
148
138 ; default locale used by VCS systems
149 ; default locale used by VCS systems
139 locale = en_US.UTF-8
150 #locale = en_US.UTF-8
140
151
141 ; path to binaries for vcsserver, it should be set by the installer
152 ; path to binaries for vcsserver, it should be set by the installer
142 ; at installation time, e.g /home/user/vcsserver-1/profile/bin
153 ; at installation time, e.g /home/user/.rccontrol/vcsserver-1/profile/bin
143 ; it can also be a path to nix-build output in case of development
154 ; it can also be a path to nix-build output in case of development
144 core.binary_dir = ""
155 core.binary_dir = ""
145
156
146 ; Custom exception store path, defaults to TMPDIR
157 ; Custom exception store path, defaults to TMPDIR
147 ; This is used to store exception from RhodeCode in shared directory
158 ; This is used to store exception from RhodeCode in shared directory
148 #exception_tracker.store_path =
159 #exception_tracker.store_path =
149
160
150 ; #############
161 ; #############
151 ; DOGPILE CACHE
162 ; DOGPILE CACHE
152 ; #############
163 ; #############
153
164
154 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
165 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
155 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
166 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
156 cache_dir = %(here)s/data
167 #cache_dir = %(here)s/data
157
168
158 ; ***************************************
169 ; ***************************************
159 ; `repo_object` cache, default file based
170 ; `repo_object` cache, default file based
160 ; ***************************************
171 ; ***************************************
161
172
162 ; `repo_object` cache settings for vcs methods for repositories
173 ; `repo_object` cache settings for vcs methods for repositories
163 rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
174 #rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
164
175
165 ; cache auto-expires after N seconds
176 ; cache auto-expires after N seconds
166 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
177 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
167 rc_cache.repo_object.expiration_time = 2592000
178 #rc_cache.repo_object.expiration_time = 2592000
168
179
169 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
180 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
170 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache.db
181 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache_repo_object.db
171
182
172 ; ***********************************************************
183 ; ***********************************************************
173 ; `repo_object` cache with redis backend
184 ; `repo_object` cache with redis backend
174 ; recommended for larger instance, and for better performance
185 ; recommended for larger instance, and for better performance
175 ; ***********************************************************
186 ; ***********************************************************
176
187
177 ; `repo_object` cache settings for vcs methods for repositories
188 ; `repo_object` cache settings for vcs methods for repositories
178 #rc_cache.repo_object.backend = dogpile.cache.rc.redis_msgpack
189 #rc_cache.repo_object.backend = dogpile.cache.rc.redis_msgpack
179
190
180 ; cache auto-expires after N seconds
191 ; cache auto-expires after N seconds
181 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
192 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
182 #rc_cache.repo_object.expiration_time = 2592000
193 #rc_cache.repo_object.expiration_time = 2592000
183
194
184 ; redis_expiration_time needs to be greater then expiration_time
195 ; redis_expiration_time needs to be greater then expiration_time
185 #rc_cache.repo_object.arguments.redis_expiration_time = 3592000
196 #rc_cache.repo_object.arguments.redis_expiration_time = 3592000
186
197
187 #rc_cache.repo_object.arguments.host = localhost
198 #rc_cache.repo_object.arguments.host = localhost
188 #rc_cache.repo_object.arguments.port = 6379
199 #rc_cache.repo_object.arguments.port = 6379
189 #rc_cache.repo_object.arguments.db = 5
200 #rc_cache.repo_object.arguments.db = 5
190 #rc_cache.repo_object.arguments.socket_timeout = 30
201 #rc_cache.repo_object.arguments.socket_timeout = 30
191 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
202 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
192 #rc_cache.repo_object.arguments.distributed_lock = true
203 #rc_cache.repo_object.arguments.distributed_lock = true
193
204
194 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
205 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
195 #rc_cache.repo_object.arguments.lock_auto_renewal = true
206 #rc_cache.repo_object.arguments.lock_auto_renewal = true
196
207
197 ; Statsd client config, this is used to send metrics to statsd
208 ; Statsd client config, this is used to send metrics to statsd
198 ; We recommend setting statsd_exported and scrape them using Promethues
209 ; We recommend setting statsd_exported and scrape them using Promethues
199 #statsd.enabled = false
210 #statsd.enabled = false
200 #statsd.statsd_host = 0.0.0.0
211 #statsd.statsd_host = 0.0.0.0
201 #statsd.statsd_port = 8125
212 #statsd.statsd_port = 8125
202 #statsd.statsd_prefix =
213 #statsd.statsd_prefix =
203 #statsd.statsd_ipv6 = false
214 #statsd.statsd_ipv6 = false
204
215
216 ; configure logging automatically at server startup set to false
217 ; to use the below custom logging config.
218 #logging.autoconfigure = true
219
220 ; specify your own custom logging config file to configure logging
221 #logging.logging_conf_file = /path/to/custom_logging.ini
222
205 ; #####################
223 ; #####################
206 ; LOGGING CONFIGURATION
224 ; LOGGING CONFIGURATION
207 ; #####################
225 ; #####################
208 [loggers]
226 #[loggers]
209 keys = root, vcsserver
227 #keys = root, vcsserver
210
228
211 [handlers]
229 #[handlers]
212 keys = console
230 #keys = console
213
231
214 [formatters]
232 #[formatters]
215 keys = generic
233 #keys = generic
216
234
217 ; #######
235 ; #######
218 ; LOGGERS
236 ; LOGGERS
219 ; #######
237 ; #######
220 [logger_root]
238 #[logger_root]
221 level = NOTSET
239 #level = NOTSET
222 handlers = console
240 #handlers = console
223
241
224 [logger_vcsserver]
242 #[logger_vcsserver]
225 level = DEBUG
243 #level = INFO
226 handlers =
244 #handlers =
227 qualname = vcsserver
245 #qualname = vcsserver
228 propagate = 1
246 #propagate = 1
229
230
247
231 ; ########
248 ; ########
232 ; HANDLERS
249 ; HANDLERS
233 ; ########
250 ; ########
234
251
235 [handler_console]
252 #[handler_console]
236 class = StreamHandler
253 #class = StreamHandler
237 args = (sys.stderr, )
254 #args = (sys.stderr, )
238 level = DEBUG
255 #level = INFO
239 formatter = generic
240 ; To enable JSON formatted logs replace generic with json
256 ; To enable JSON formatted logs replace generic with json
241 ; This allows sending properly formatted logs to grafana loki or elasticsearch
257 ; This allows sending properly formatted logs to grafana loki or elasticsearch
242 #formatter = json
258 #formatter = json
243
259 #formatter = generic
244
260
245 ; ##########
261 ; ##########
246 ; FORMATTERS
262 ; FORMATTERS
247 ; ##########
263 ; ##########
248
264
249 [formatter_generic]
265 #[formatter_generic]
250 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
266 #format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
251 datefmt = %Y-%m-%d %H:%M:%S
267 #datefmt = %Y-%m-%d %H:%M:%S
252
268
253 [formatter_json]
269 #[formatter_json]
254 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
270 #format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
255 class = vcsserver.lib._vendor.jsonlogger.JsonFormatter
271 #class = vcsserver.lib._vendor.jsonlogger.JsonFormatter
256
@@ -1,269 +1,272 b''
1 """
1 """
2 Gunicorn config extension and hooks. This config file adds some extra settings and memory management.
2 Gunicorn config extension and hooks. This config file adds some extra settings and memory management.
3 Gunicorn configuration should be managed by .ini files entries of RhodeCode or VCSServer
3 Gunicorn configuration should be managed by .ini files entries of RhodeCode or VCSServer
4 """
4 """
5
5
6 import gc
6 import gc
7 import os
7 import os
8 import sys
8 import sys
9 import math
9 import math
10 import time
10 import time
11 import threading
11 import threading
12 import traceback
12 import traceback
13 import random
13 import random
14 from gunicorn.glogging import Logger
14 from gunicorn.glogging import Logger
15
15
16
16
17 def get_workers():
17 def get_workers():
18 import multiprocessing
18 import multiprocessing
19 return multiprocessing.cpu_count() * 2 + 1
19 return multiprocessing.cpu_count() * 2 + 1
20
20
21 # GLOBAL
21 # GLOBAL
22 errorlog = '-'
22 errorlog = '-'
23 accesslog = '-'
23 accesslog = '-'
24
24
25
25
26 # SERVER MECHANICS
26 # SERVER MECHANICS
27 # None == system temp dir
27 # None == system temp dir
28 # worker_tmp_dir is recommended to be set to some tmpfs
28 # worker_tmp_dir is recommended to be set to some tmpfs
29 worker_tmp_dir = None
29 worker_tmp_dir = None
30 tmp_upload_dir = None
30 tmp_upload_dir = None
31
31
32 # Custom log format
32 # Custom log format
33 access_log_format = (
33 #access_log_format = (
34 '%(t)s %(p)s INFO [GNCRN] %(h)-15s rqt:%(L)s %(s)s %(b)-6s "%(m)s:%(U)s %(q)s" usr:%(u)s "%(f)s" "%(a)s"')
34 # '%(t)s %(p)s INFO [GNCRN] %(h)-15s rqt:%(L)s %(s)s %(b)-6s "%(m)s:%(U)s %(q)s" usr:%(u)s "%(f)s" "%(a)s"')
35
35
36 # loki format for easier parsing in grafana
36 # loki format for easier parsing in grafana
37 #access_log_format = (
37 access_log_format = (
38 # 'time="%(t)s" pid=%(p)s level="INFO" type="[GNCRN]" ip="%(h)-15s" rqt="%(L)s" response_code="%(s)s" response_bytes="%(b)-6s" uri="%(m)s:%(U)s %(q)s" user=":%(u)s" user_agent="%(a)s"')
38 'time="%(t)s" pid=%(p)s level="INFO" type="[GNCRN]" ip="%(h)-15s" rqt="%(L)s" response_code="%(s)s" response_bytes="%(b)-6s" uri="%(m)s:%(U)s %(q)s" user=":%(u)s" user_agent="%(a)s"')
39
39
40 # self adjust workers based on CPU count
40 # self adjust workers based on CPU count
41 # workers = get_workers()
41 # workers = get_workers()
42
42
43
43
44 def _get_process_rss(pid=None):
44 def _get_process_rss(pid=None):
45 try:
45 try:
46 import psutil
46 import psutil
47 if pid:
47 if pid:
48 proc = psutil.Process(pid)
48 proc = psutil.Process(pid)
49 else:
49 else:
50 proc = psutil.Process()
50 proc = psutil.Process()
51 return proc.memory_info().rss
51 return proc.memory_info().rss
52 except Exception:
52 except Exception:
53 return None
53 return None
54
54
55
55
56 def _get_config(ini_path):
56 def _get_config(ini_path):
57
57
58 try:
58 try:
59 import configparser
59 import configparser
60 except ImportError:
60 except ImportError:
61 import ConfigParser as configparser
61 import ConfigParser as configparser
62 try:
62 try:
63 config = configparser.RawConfigParser()
63 config = configparser.RawConfigParser()
64 config.read(ini_path)
64 config.read(ini_path)
65 return config
65 return config
66 except Exception:
66 except Exception:
67 return None
67 return None
68
68
69
69
70 def _time_with_offset(memory_usage_check_interval):
70 def _time_with_offset(memory_usage_check_interval):
71 return time.time() - random.randint(0, memory_usage_check_interval/2.0)
71 return time.time() - random.randint(0, memory_usage_check_interval/2.0)
72
72
73
73
74 def pre_fork(server, worker):
74 def pre_fork(server, worker):
75 pass
75 pass
76
76
77
77
78 def post_fork(server, worker):
78 def post_fork(server, worker):
79
79
80 # memory spec defaults
80 # memory spec defaults
81 _memory_max_usage = 0
81 _memory_max_usage = 0
82 _memory_usage_check_interval = 60
82 _memory_usage_check_interval = 60
83 _memory_usage_recovery_threshold = 0.8
83 _memory_usage_recovery_threshold = 0.8
84
84
85 ini_path = os.path.abspath(server.cfg.paste)
85 ini_path = os.path.abspath(server.cfg.paste)
86 conf = _get_config(ini_path)
86 conf = _get_config(ini_path)
87
87
88 section = 'server:main'
88 section = 'server:main'
89 if conf and conf.has_section(section):
89 if conf and conf.has_section(section):
90
90
91 if conf.has_option(section, 'memory_max_usage'):
91 if conf.has_option(section, 'memory_max_usage'):
92 _memory_max_usage = conf.getint(section, 'memory_max_usage')
92 _memory_max_usage = conf.getint(section, 'memory_max_usage')
93
93
94 if conf.has_option(section, 'memory_usage_check_interval'):
94 if conf.has_option(section, 'memory_usage_check_interval'):
95 _memory_usage_check_interval = conf.getint(section, 'memory_usage_check_interval')
95 _memory_usage_check_interval = conf.getint(section, 'memory_usage_check_interval')
96
96
97 if conf.has_option(section, 'memory_usage_recovery_threshold'):
97 if conf.has_option(section, 'memory_usage_recovery_threshold'):
98 _memory_usage_recovery_threshold = conf.getfloat(section, 'memory_usage_recovery_threshold')
98 _memory_usage_recovery_threshold = conf.getfloat(section, 'memory_usage_recovery_threshold')
99
99
100 worker._memory_max_usage = _memory_max_usage
100 worker._memory_max_usage = int(os.environ.get('RC_GUNICORN_MEMORY_MAX_USAGE', '')
101 worker._memory_usage_check_interval = _memory_usage_check_interval
101 or _memory_max_usage)
102 worker._memory_usage_recovery_threshold = _memory_usage_recovery_threshold
102 worker._memory_usage_check_interval = int(os.environ.get('RC_GUNICORN_MEMORY_USAGE_CHECK_INTERVAL', '')
103 or _memory_usage_check_interval)
104 worker._memory_usage_recovery_threshold = float(os.environ.get('RC_GUNICORN_MEMORY_USAGE_RECOVERY_THRESHOLD', '')
105 or _memory_usage_recovery_threshold)
103
106
104 # register memory last check time, with some random offset so we don't recycle all
107 # register memory last check time, with some random offset so we don't recycle all
105 # at once
108 # at once
106 worker._last_memory_check_time = _time_with_offset(_memory_usage_check_interval)
109 worker._last_memory_check_time = _time_with_offset(_memory_usage_check_interval)
107
110
108 if _memory_max_usage:
111 if _memory_max_usage:
109 server.log.info("[%-10s] WORKER spawned with max memory set at %s", worker.pid,
112 server.log.info("[%-10s] WORKER spawned with max memory set at %s", worker.pid,
110 _format_data_size(_memory_max_usage))
113 _format_data_size(_memory_max_usage))
111 else:
114 else:
112 server.log.info("[%-10s] WORKER spawned", worker.pid)
115 server.log.info("[%-10s] WORKER spawned", worker.pid)
113
116
114
117
115 def pre_exec(server):
118 def pre_exec(server):
116 server.log.info("Forked child, re-executing.")
119 server.log.info("Forked child, re-executing.")
117
120
118
121
119 def on_starting(server):
122 def on_starting(server):
120 server_lbl = '{} {}'.format(server.proc_name, server.address)
123 server_lbl = '{} {}'.format(server.proc_name, server.address)
121 server.log.info("Server %s is starting.", server_lbl)
124 server.log.info("Server %s is starting.", server_lbl)
122
125
123
126
124 def when_ready(server):
127 def when_ready(server):
125 server.log.info("Server %s is ready. Spawning workers", server)
128 server.log.info("Server %s is ready. Spawning workers", server)
126
129
127
130
128 def on_reload(server):
131 def on_reload(server):
129 pass
132 pass
130
133
131
134
132 def _format_data_size(size, unit="B", precision=1, binary=True):
135 def _format_data_size(size, unit="B", precision=1, binary=True):
133 """Format a number using SI units (kilo, mega, etc.).
136 """Format a number using SI units (kilo, mega, etc.).
134
137
135 ``size``: The number as a float or int.
138 ``size``: The number as a float or int.
136
139
137 ``unit``: The unit name in plural form. Examples: "bytes", "B".
140 ``unit``: The unit name in plural form. Examples: "bytes", "B".
138
141
139 ``precision``: How many digits to the right of the decimal point. Default
142 ``precision``: How many digits to the right of the decimal point. Default
140 is 1. 0 suppresses the decimal point.
143 is 1. 0 suppresses the decimal point.
141
144
142 ``binary``: If false, use base-10 decimal prefixes (kilo = K = 1000).
145 ``binary``: If false, use base-10 decimal prefixes (kilo = K = 1000).
143 If true, use base-2 binary prefixes (kibi = Ki = 1024).
146 If true, use base-2 binary prefixes (kibi = Ki = 1024).
144
147
145 ``full_name``: If false (default), use the prefix abbreviation ("k" or
148 ``full_name``: If false (default), use the prefix abbreviation ("k" or
146 "Ki"). If true, use the full prefix ("kilo" or "kibi"). If false,
149 "Ki"). If true, use the full prefix ("kilo" or "kibi"). If false,
147 use abbreviation ("k" or "Ki").
150 use abbreviation ("k" or "Ki").
148
151
149 """
152 """
150
153
151 if not binary:
154 if not binary:
152 base = 1000
155 base = 1000
153 multiples = ('', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
156 multiples = ('', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
154 else:
157 else:
155 base = 1024
158 base = 1024
156 multiples = ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi')
159 multiples = ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi')
157
160
158 sign = ""
161 sign = ""
159 if size > 0:
162 if size > 0:
160 m = int(math.log(size, base))
163 m = int(math.log(size, base))
161 elif size < 0:
164 elif size < 0:
162 sign = "-"
165 sign = "-"
163 size = -size
166 size = -size
164 m = int(math.log(size, base))
167 m = int(math.log(size, base))
165 else:
168 else:
166 m = 0
169 m = 0
167 if m > 8:
170 if m > 8:
168 m = 8
171 m = 8
169
172
170 if m == 0:
173 if m == 0:
171 precision = '%.0f'
174 precision = '%.0f'
172 else:
175 else:
173 precision = '%%.%df' % precision
176 precision = '%%.%df' % precision
174
177
175 size = precision % (size / math.pow(base, m))
178 size = precision % (size / math.pow(base, m))
176
179
177 return '%s%s %s%s' % (sign, size.strip(), multiples[m], unit)
180 return '%s%s %s%s' % (sign, size.strip(), multiples[m], unit)
178
181
179
182
180 def _check_memory_usage(worker):
183 def _check_memory_usage(worker):
181 memory_max_usage = worker._memory_max_usage
184 memory_max_usage = worker._memory_max_usage
182 if not memory_max_usage:
185 if not memory_max_usage:
183 return
186 return
184
187
185 memory_usage_check_interval = worker._memory_usage_check_interval
188 memory_usage_check_interval = worker._memory_usage_check_interval
186 memory_usage_recovery_threshold = memory_max_usage * worker._memory_usage_recovery_threshold
189 memory_usage_recovery_threshold = memory_max_usage * worker._memory_usage_recovery_threshold
187
190
188 elapsed = time.time() - worker._last_memory_check_time
191 elapsed = time.time() - worker._last_memory_check_time
189 if elapsed > memory_usage_check_interval:
192 if elapsed > memory_usage_check_interval:
190 mem_usage = _get_process_rss()
193 mem_usage = _get_process_rss()
191 if mem_usage and mem_usage > memory_max_usage:
194 if mem_usage and mem_usage > memory_max_usage:
192 worker.log.info(
195 worker.log.info(
193 "memory usage %s > %s, forcing gc",
196 "memory usage %s > %s, forcing gc",
194 _format_data_size(mem_usage), _format_data_size(memory_max_usage))
197 _format_data_size(mem_usage), _format_data_size(memory_max_usage))
195 # Try to clean it up by forcing a full collection.
198 # Try to clean it up by forcing a full collection.
196 gc.collect()
199 gc.collect()
197 mem_usage = _get_process_rss()
200 mem_usage = _get_process_rss()
198 if mem_usage > memory_usage_recovery_threshold:
201 if mem_usage > memory_usage_recovery_threshold:
199 # Didn't clean up enough, we'll have to terminate.
202 # Didn't clean up enough, we'll have to terminate.
200 worker.log.warning(
203 worker.log.warning(
201 "memory usage %s > %s after gc, quitting",
204 "memory usage %s > %s after gc, quitting",
202 _format_data_size(mem_usage), _format_data_size(memory_max_usage))
205 _format_data_size(mem_usage), _format_data_size(memory_max_usage))
203 # This will cause worker to auto-restart itself
206 # This will cause worker to auto-restart itself
204 worker.alive = False
207 worker.alive = False
205 worker._last_memory_check_time = time.time()
208 worker._last_memory_check_time = time.time()
206
209
207
210
208 def worker_int(worker):
211 def worker_int(worker):
209 worker.log.info("[%-10s] worker received INT or QUIT signal", worker.pid)
212 worker.log.info("[%-10s] worker received INT or QUIT signal", worker.pid)
210
213
211 # get traceback info, on worker crash
214 # get traceback info, on worker crash
212 id2name = dict([(th.ident, th.name) for th in threading.enumerate()])
215 id2name = dict([(th.ident, th.name) for th in threading.enumerate()])
213 code = []
216 code = []
214 for thread_id, stack in sys._current_frames().items():
217 for thread_id, stack in sys._current_frames().items():
215 code.append(
218 code.append(
216 "\n# Thread: %s(%d)" % (id2name.get(thread_id, ""), thread_id))
219 "\n# Thread: %s(%d)" % (id2name.get(thread_id, ""), thread_id))
217 for fname, lineno, name, line in traceback.extract_stack(stack):
220 for fname, lineno, name, line in traceback.extract_stack(stack):
218 code.append('File: "%s", line %d, in %s' % (fname, lineno, name))
221 code.append('File: "%s", line %d, in %s' % (fname, lineno, name))
219 if line:
222 if line:
220 code.append(" %s" % (line.strip()))
223 code.append(" %s" % (line.strip()))
221 worker.log.debug("\n".join(code))
224 worker.log.debug("\n".join(code))
222
225
223
226
224 def worker_abort(worker):
227 def worker_abort(worker):
225 worker.log.info("[%-10s] worker received SIGABRT signal", worker.pid)
228 worker.log.info("[%-10s] worker received SIGABRT signal", worker.pid)
226
229
227
230
228 def worker_exit(server, worker):
231 def worker_exit(server, worker):
229 worker.log.info("[%-10s] worker exit", worker.pid)
232 worker.log.info("[%-10s] worker exit", worker.pid)
230
233
231
234
232 def child_exit(server, worker):
235 def child_exit(server, worker):
233 worker.log.info("[%-10s] worker child exit", worker.pid)
236 worker.log.info("[%-10s] worker child exit", worker.pid)
234
237
235
238
236 def pre_request(worker, req):
239 def pre_request(worker, req):
237 worker.start_time = time.time()
240 worker.start_time = time.time()
238 worker.log.debug(
241 worker.log.debug(
239 "GNCRN PRE WORKER [cnt:%s]: %s %s", worker.nr, req.method, req.path)
242 "GNCRN PRE WORKER [cnt:%s]: %s %s", worker.nr, req.method, req.path)
240
243
241
244
242 def post_request(worker, req, environ, resp):
245 def post_request(worker, req, environ, resp):
243 total_time = time.time() - worker.start_time
246 total_time = time.time() - worker.start_time
244 # Gunicorn sometimes has problems with reading the status_code
247 # Gunicorn sometimes has problems with reading the status_code
245 status_code = getattr(resp, 'status_code', '')
248 status_code = getattr(resp, 'status_code', '')
246 worker.log.debug(
249 worker.log.debug(
247 "GNCRN POST WORKER [cnt:%s]: %s %s resp: %s, Load Time: %.4fs",
250 "GNCRN POST WORKER [cnt:%s]: %s %s resp: %s, Load Time: %.4fs",
248 worker.nr, req.method, req.path, status_code, total_time)
251 worker.nr, req.method, req.path, status_code, total_time)
249 _check_memory_usage(worker)
252 _check_memory_usage(worker)
250
253
251
254
252 class RhodeCodeLogger(Logger):
255 class RhodeCodeLogger(Logger):
253 """
256 """
254 Custom Logger that allows some customization that gunicorn doesn't allow
257 Custom Logger that allows some customization that gunicorn doesn't allow
255 """
258 """
256
259
257 datefmt = r"%Y-%m-%d %H:%M:%S"
260 datefmt = r"%Y-%m-%d %H:%M:%S"
258
261
259 def __init__(self, cfg):
262 def __init__(self, cfg):
260 Logger.__init__(self, cfg)
263 Logger.__init__(self, cfg)
261
264
262 def now(self):
265 def now(self):
263 """ return date in RhodeCode Log format """
266 """ return date in RhodeCode Log format """
264 now = time.time()
267 now = time.time()
265 msecs = int((now - long(now)) * 1000)
268 msecs = int((now - long(now)) * 1000)
266 return time.strftime(self.datefmt, time.localtime(now)) + '.{0:03d}'.format(msecs)
269 return time.strftime(self.datefmt, time.localtime(now)) + '.{0:03d}'.format(msecs)
267
270
268
271
269 logger_class = RhodeCodeLogger
272 logger_class = RhodeCodeLogger
@@ -1,219 +1,234 b''
1 ## -*- coding: utf-8 -*-
1 ## -*- coding: utf-8 -*-
2
2
3 ; #################################
3 ; #################################
4 ; RHODECODE VCSSERVER CONFIGURATION
4 ; RHODECODE VCSSERVER CONFIGURATION
5 ; #################################
5 ; #################################
6
6
7 [server:main]
7 [server:main]
8 ; COMMON HOST/IP CONFIG
8 ; COMMON HOST/IP CONFIG
9 host = 127.0.0.1
9 host = 127.0.0.1
10 port = 9900
10 port = 9900
11
11
12
12
13 ; ###########################
13 ; ###########################
14 ; GUNICORN APPLICATION SERVER
14 ; GUNICORN APPLICATION SERVER
15 ; ###########################
15 ; ###########################
16
16
17 ; run with gunicorn --log-config rhodecode.ini --paste rhodecode.ini
17 ; run with gunicorn --paste rhodecode.ini
18
18
19 ; Module to use, this setting shouldn't be changed
19 ; Module to use, this setting shouldn't be changed
20 use = egg:gunicorn#main
20 use = egg:gunicorn#main
21
21
22 ; Sets the number of process workers. More workers means more concurrent connections
22 ; Sets the number of process workers. More workers means more concurrent connections
23 ; RhodeCode can handle at the same time. Each additional worker also it increases
23 ; RhodeCode can handle at the same time. Each additional worker also it increases
24 ; memory usage as each has it's own set of caches.
24 ; memory usage as each has it's own set of caches.
25 ; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
25 ; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
26 ; than 8-10 unless for really big deployments .e.g 700-1000 users.
26 ; than 8-10 unless for really big deployments .e.g 700-1000 users.
27 ; `instance_id = *` must be set in the [app:main] section below (which is the default)
27 ; `instance_id = *` must be set in the [app:main] section below (which is the default)
28 ; when using more than 1 worker.
28 ; when using more than 1 worker.
29 workers = 2
29 workers = 2
30
30
31 ; Gunicorn access log level
31 ; Gunicorn access log level
32 loglevel = info
32 loglevel = info
33
33
34 ; Process name visible in process list
34 ; Process name visible in process list
35 proc_name = rhodecode_vcsserver
35 proc_name = rhodecode_vcsserver
36
36
37 ; Type of worker class, one of `sync`, `gevent`
37 ; Type of worker class, one of `sync`, `gevent`
38 ; currently `sync` is the only option allowed.
38 ; currently `sync` is the only option allowed.
39 worker_class = sync
39 worker_class = sync
40
40
41 ; The maximum number of simultaneous clients. Valid only for gevent
41 ; The maximum number of simultaneous clients. Valid only for gevent
42 worker_connections = 10
42 worker_connections = 10
43
43
44 ; Max number of requests that worker will handle before being gracefully restarted.
44 ; Max number of requests that worker will handle before being gracefully restarted.
45 ; Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
45 ; Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
46 max_requests = 1000
46 max_requests = 1000
47 max_requests_jitter = 30
47 max_requests_jitter = 30
48
48
49 ; Amount of time a worker can spend with handling a request before it
49 ; Amount of time a worker can spend with handling a request before it
50 ; gets killed and restarted. By default set to 21600 (6hrs)
50 ; gets killed and restarted. By default set to 21600 (6hrs)
51 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
51 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
52 timeout = 21600
52 timeout = 21600
53
53
54 ; The maximum size of HTTP request line in bytes.
54 ; The maximum size of HTTP request line in bytes.
55 ; 0 for unlimited
55 ; 0 for unlimited
56 limit_request_line = 0
56 limit_request_line = 0
57
57
58 ; Limit the number of HTTP headers fields in a request.
58 ; Limit the number of HTTP headers fields in a request.
59 ; By default this value is 100 and can't be larger than 32768.
59 ; By default this value is 100 and can't be larger than 32768.
60 limit_request_fields = 32768
60 limit_request_fields = 32768
61
61
62 ; Limit the allowed size of an HTTP request header field.
62 ; Limit the allowed size of an HTTP request header field.
63 ; Value is a positive number or 0.
63 ; Value is a positive number or 0.
64 ; Setting it to 0 will allow unlimited header field sizes.
64 ; Setting it to 0 will allow unlimited header field sizes.
65 limit_request_field_size = 0
65 limit_request_field_size = 0
66
66
67 ; Timeout for graceful workers restart.
67 ; Timeout for graceful workers restart.
68 ; After receiving a restart signal, workers have this much time to finish
68 ; After receiving a restart signal, workers have this much time to finish
69 ; serving requests. Workers still alive after the timeout (starting from the
69 ; serving requests. Workers still alive after the timeout (starting from the
70 ; receipt of the restart signal) are force killed.
70 ; receipt of the restart signal) are force killed.
71 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
71 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
72 graceful_timeout = 3600
72 graceful_timeout = 21600
73
73
74 # The number of seconds to wait for requests on a Keep-Alive connection.
74 # The number of seconds to wait for requests on a Keep-Alive connection.
75 # Generally set in the 1-5 seconds range.
75 # Generally set in the 1-5 seconds range.
76 keepalive = 2
76 keepalive = 2
77
77
78 ; Maximum memory usage that each worker can use before it will receive a
78 ; Maximum memory usage that each worker can use before it will receive a
79 ; graceful restart signal 0 = memory monitoring is disabled
79 ; graceful restart signal 0 = memory monitoring is disabled
80 ; Examples: 268435456 (256MB), 536870912 (512MB)
80 ; Examples: 268435456 (256MB), 536870912 (512MB)
81 ; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
81 ; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
82 memory_max_usage = 0
82 memory_max_usage = 0
83
83
84 ; How often in seconds to check for memory usage for each gunicorn worker
84 ; How often in seconds to check for memory usage for each gunicorn worker
85 memory_usage_check_interval = 60
85 memory_usage_check_interval = 60
86
86
87 ; Threshold value for which we don't recycle worker if GarbageCollection
87 ; Threshold value for which we don't recycle worker if GarbageCollection
88 ; frees up enough resources. Before each restart we try to run GC on worker
88 ; frees up enough resources. Before each restart we try to run GC on worker
89 ; in case we get enough free memory after that, restart will not happen.
89 ; in case we get enough free memory after that, restart will not happen.
90 memory_usage_recovery_threshold = 0.8
90 memory_usage_recovery_threshold = 0.8
91
91
92
92
93 [app:main]
93 [app:main]
94 ; The %(here)s variable will be replaced with the absolute path of parent directory
94 ; The %(here)s variable will be replaced with the absolute path of parent directory
95 ; of this file
95 ; of this file
96 ; Each option in the app:main can be override by an environmental variable
97 ;
98 ;To override an option:
99 ;
100 ;RC_<KeyName>
101 ;Everything should be uppercase, . and - should be replaced by _.
102 ;For example, if you have these configuration settings:
103 ;rc_cache.repo_object.backend = foo
104 ;can be overridden by
105 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
106
96 use = egg:rhodecode-vcsserver
107 use = egg:rhodecode-vcsserver
97
108
98 ; Pyramid default locales, we need this to be set
109 ; Pyramid default locales, we need this to be set
99 pyramid.default_locale_name = en
110 #pyramid.default_locale_name = en
100
111
101 ; default locale used by VCS systems
112 ; default locale used by VCS systems
102 locale = en_US.UTF-8
113 #locale = en_US.UTF-8
103
114
104 ; path to binaries for vcsserver, it should be set by the installer
115 ; path to binaries for vcsserver, it should be set by the installer
105 ; at installation time, e.g /home/user/vcsserver-1/profile/bin
116 ; at installation time, e.g /home/user/.rccontrol/vcsserver-1/profile/bin
106 ; it can also be a path to nix-build output in case of development
117 ; it can also be a path to nix-build output in case of development
107 core.binary_dir = ""
118 core.binary_dir = ""
108
119
109 ; Custom exception store path, defaults to TMPDIR
120 ; Custom exception store path, defaults to TMPDIR
110 ; This is used to store exception from RhodeCode in shared directory
121 ; This is used to store exception from RhodeCode in shared directory
111 #exception_tracker.store_path =
122 #exception_tracker.store_path =
112
123
113 ; #############
124 ; #############
114 ; DOGPILE CACHE
125 ; DOGPILE CACHE
115 ; #############
126 ; #############
116
127
117 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
128 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
118 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
129 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
119 cache_dir = %(here)s/data
130 #cache_dir = %(here)s/data
120
131
121 ; ***************************************
132 ; ***************************************
122 ; `repo_object` cache, default file based
133 ; `repo_object` cache, default file based
123 ; ***************************************
134 ; ***************************************
124
135
125 ; `repo_object` cache settings for vcs methods for repositories
136 ; `repo_object` cache settings for vcs methods for repositories
126 rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
137 #rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
127
138
128 ; cache auto-expires after N seconds
139 ; cache auto-expires after N seconds
129 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
140 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
130 rc_cache.repo_object.expiration_time = 2592000
141 #rc_cache.repo_object.expiration_time = 2592000
131
142
132 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
143 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
133 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache.db
144 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache_repo_object.db
134
145
135 ; ***********************************************************
146 ; ***********************************************************
136 ; `repo_object` cache with redis backend
147 ; `repo_object` cache with redis backend
137 ; recommended for larger instance, and for better performance
148 ; recommended for larger instance, and for better performance
138 ; ***********************************************************
149 ; ***********************************************************
139
150
140 ; `repo_object` cache settings for vcs methods for repositories
151 ; `repo_object` cache settings for vcs methods for repositories
141 #rc_cache.repo_object.backend = dogpile.cache.rc.redis_msgpack
152 #rc_cache.repo_object.backend = dogpile.cache.rc.redis_msgpack
142
153
143 ; cache auto-expires after N seconds
154 ; cache auto-expires after N seconds
144 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
155 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
145 #rc_cache.repo_object.expiration_time = 2592000
156 #rc_cache.repo_object.expiration_time = 2592000
146
157
147 ; redis_expiration_time needs to be greater then expiration_time
158 ; redis_expiration_time needs to be greater then expiration_time
148 #rc_cache.repo_object.arguments.redis_expiration_time = 3592000
159 #rc_cache.repo_object.arguments.redis_expiration_time = 3592000
149
160
150 #rc_cache.repo_object.arguments.host = localhost
161 #rc_cache.repo_object.arguments.host = localhost
151 #rc_cache.repo_object.arguments.port = 6379
162 #rc_cache.repo_object.arguments.port = 6379
152 #rc_cache.repo_object.arguments.db = 5
163 #rc_cache.repo_object.arguments.db = 5
153 #rc_cache.repo_object.arguments.socket_timeout = 30
164 #rc_cache.repo_object.arguments.socket_timeout = 30
154 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
165 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
155 #rc_cache.repo_object.arguments.distributed_lock = true
166 #rc_cache.repo_object.arguments.distributed_lock = true
156
167
157 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
168 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
158 #rc_cache.repo_object.arguments.lock_auto_renewal = true
169 #rc_cache.repo_object.arguments.lock_auto_renewal = true
159
170
160 ; Statsd client config, this is used to send metrics to statsd
171 ; Statsd client config, this is used to send metrics to statsd
161 ; We recommend setting statsd_exported and scrape them using Promethues
172 ; We recommend setting statsd_exported and scrape them using Promethues
162 #statsd.enabled = false
173 #statsd.enabled = false
163 #statsd.statsd_host = 0.0.0.0
174 #statsd.statsd_host = 0.0.0.0
164 #statsd.statsd_port = 8125
175 #statsd.statsd_port = 8125
165 #statsd.statsd_prefix =
176 #statsd.statsd_prefix =
166 #statsd.statsd_ipv6 = false
177 #statsd.statsd_ipv6 = false
167
178
179 ; configure logging automatically at server startup set to false
180 ; to use the below custom logging config.
181 #logging.autoconfigure = true
182
183 ; specify your own custom logging config file to configure logging
184 #logging.logging_conf_file = /path/to/custom_logging.ini
185
168 ; #####################
186 ; #####################
169 ; LOGGING CONFIGURATION
187 ; LOGGING CONFIGURATION
170 ; #####################
188 ; #####################
171 [loggers]
189 #[loggers]
172 keys = root, vcsserver
190 #keys = root, vcsserver
173
191
174 [handlers]
192 #[handlers]
175 keys = console
193 #keys = console
176
194
177 [formatters]
195 #[formatters]
178 keys = generic
196 #keys = generic
179
197
180 ; #######
198 ; #######
181 ; LOGGERS
199 ; LOGGERS
182 ; #######
200 ; #######
183 [logger_root]
201 #[logger_root]
184 level = NOTSET
202 #level = NOTSET
185 handlers = console
203 #handlers = console
186
204
187 [logger_vcsserver]
205 #[logger_vcsserver]
188 level = DEBUG
206 #level = INFO
189 handlers =
207 #handlers =
190 qualname = vcsserver
208 #qualname = vcsserver
191 propagate = 1
209 #propagate = 1
192
193
210
194 ; ########
211 ; ########
195 ; HANDLERS
212 ; HANDLERS
196 ; ########
213 ; ########
197
214
198 [handler_console]
215 #[handler_console]
199 class = StreamHandler
216 #class = StreamHandler
200 args = (sys.stderr, )
217 #args = (sys.stderr, )
201 level = INFO
218 #level = INFO
202 formatter = generic
203 ; To enable JSON formatted logs replace generic with json
219 ; To enable JSON formatted logs replace generic with json
204 ; This allows sending properly formatted logs to grafana loki or elasticsearch
220 ; This allows sending properly formatted logs to grafana loki or elasticsearch
205 #formatter = json
221 #formatter = json
206
222 #formatter = generic
207
223
208 ; ##########
224 ; ##########
209 ; FORMATTERS
225 ; FORMATTERS
210 ; ##########
226 ; ##########
211
227
212 [formatter_generic]
228 #[formatter_generic]
213 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
229 #format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
214 datefmt = %Y-%m-%d %H:%M:%S
230 #datefmt = %Y-%m-%d %H:%M:%S
215
231
216 [formatter_json]
232 #[formatter_json]
217 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
233 #format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
218 class = vcsserver.lib._vendor.jsonlogger.JsonFormatter
234 #class = vcsserver.lib._vendor.jsonlogger.JsonFormatter
219
@@ -1,720 +1,750 b''
1 # RhodeCode VCSServer provides access to different vcs backends via network.
1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 # Copyright (C) 2014-2020 RhodeCode GmbH
2 # Copyright (C) 2014-2020 RhodeCode GmbH
3 #
3 #
4 # This program is free software; you can redistribute it and/or modify
4 # This program is free software; you can redistribute it and/or modify
5 # it under the terms of the GNU General Public License as published by
5 # it under the terms of the GNU General Public License as published by
6 # the Free Software Foundation; either version 3 of the License, or
6 # the Free Software Foundation; either version 3 of the License, or
7 # (at your option) any later version.
7 # (at your option) any later version.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU General Public License
14 # You should have received a copy of the GNU General Public License
15 # along with this program; if not, write to the Free Software Foundation,
15 # along with this program; if not, write to the Free Software Foundation,
16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
17
18 import os
18 import os
19 import sys
19 import sys
20 import base64
20 import base64
21 import locale
21 import locale
22 import logging
22 import logging
23 import uuid
23 import uuid
24 import time
24 import wsgiref.util
25 import wsgiref.util
25 import traceback
26 import traceback
26 import tempfile
27 import tempfile
27 import psutil
28 import psutil
29
28 from itertools import chain
30 from itertools import chain
29 from cStringIO import StringIO
31 from cStringIO import StringIO
30
32
31 import simplejson as json
33 import simplejson as json
32 import msgpack
34 import msgpack
33 from pyramid.config import Configurator
35 from pyramid.config import Configurator
34 from pyramid.settings import asbool, aslist
35 from pyramid.wsgi import wsgiapp
36 from pyramid.wsgi import wsgiapp
36 from pyramid.compat import configparser
37 from pyramid.compat import configparser
37 from pyramid.response import Response
38 from pyramid.response import Response
38
39 from vcsserver.config.settings_maker import SettingsMaker
39 from vcsserver.utils import safe_int
40 from vcsserver.utils import safe_int
40 from vcsserver.lib.statsd_client import StatsdClient
41 from vcsserver.lib.statsd_client import StatsdClient
41
42
42 log = logging.getLogger(__name__)
43 log = logging.getLogger(__name__)
43
44
44 # due to Mercurial/glibc2.27 problems we need to detect if locale settings are
45 # due to Mercurial/glibc2.27 problems we need to detect if locale settings are
45 # causing problems and "fix" it in case they do and fallback to LC_ALL = C
46 # causing problems and "fix" it in case they do and fallback to LC_ALL = C
46
47
47 try:
48 try:
48 locale.setlocale(locale.LC_ALL, '')
49 locale.setlocale(locale.LC_ALL, '')
49 except locale.Error as e:
50 except locale.Error as e:
50 log.error(
51 log.error(
51 'LOCALE ERROR: failed to set LC_ALL, fallback to LC_ALL=C, org error: %s', e)
52 'LOCALE ERROR: failed to set LC_ALL, fallback to LC_ALL=C, org error: %s', e)
52 os.environ['LC_ALL'] = 'C'
53 os.environ['LC_ALL'] = 'C'
53
54
55
54 import vcsserver
56 import vcsserver
55 from vcsserver import remote_wsgi, scm_app, settings, hgpatches
57 from vcsserver import remote_wsgi, scm_app, settings, hgpatches
56 from vcsserver.git_lfs.app import GIT_LFS_CONTENT_TYPE, GIT_LFS_PROTO_PAT
58 from vcsserver.git_lfs.app import GIT_LFS_CONTENT_TYPE, GIT_LFS_PROTO_PAT
57 from vcsserver.echo_stub import remote_wsgi as remote_wsgi_stub
59 from vcsserver.echo_stub import remote_wsgi as remote_wsgi_stub
58 from vcsserver.echo_stub.echo_app import EchoApp
60 from vcsserver.echo_stub.echo_app import EchoApp
59 from vcsserver.exceptions import HTTPRepoLocked, HTTPRepoBranchProtected
61 from vcsserver.exceptions import HTTPRepoLocked, HTTPRepoBranchProtected
60 from vcsserver.lib.exc_tracking import store_exception
62 from vcsserver.lib.exc_tracking import store_exception
61 from vcsserver.server import VcsServer
63 from vcsserver.server import VcsServer
62
64
63 try:
65 try:
64 from vcsserver.git import GitFactory, GitRemote
66 from vcsserver.git import GitFactory, GitRemote
65 except ImportError:
67 except ImportError:
66 GitFactory = None
68 GitFactory = None
67 GitRemote = None
69 GitRemote = None
68
70
69 try:
71 try:
70 from vcsserver.hg import MercurialFactory, HgRemote
72 from vcsserver.hg import MercurialFactory, HgRemote
71 except ImportError:
73 except ImportError:
72 MercurialFactory = None
74 MercurialFactory = None
73 HgRemote = None
75 HgRemote = None
74
76
75 try:
77 try:
76 from vcsserver.svn import SubversionFactory, SvnRemote
78 from vcsserver.svn import SubversionFactory, SvnRemote
77 except ImportError:
79 except ImportError:
78 SubversionFactory = None
80 SubversionFactory = None
79 SvnRemote = None
81 SvnRemote = None
80
82
81
83
82 def _is_request_chunked(environ):
84 def _is_request_chunked(environ):
83 stream = environ.get('HTTP_TRANSFER_ENCODING', '') == 'chunked'
85 stream = environ.get('HTTP_TRANSFER_ENCODING', '') == 'chunked'
84 return stream
86 return stream
85
87
86
88
87 def _int_setting(settings, name, default):
88 settings[name] = int(settings.get(name, default))
89 return settings[name]
90
91
92 def _bool_setting(settings, name, default):
93 input_val = settings.get(name, default)
94 if isinstance(input_val, unicode):
95 input_val = input_val.encode('utf8')
96 settings[name] = asbool(input_val)
97 return settings[name]
98
99
100 def _list_setting(settings, name, default):
101 raw_value = settings.get(name, default)
102
103 # Otherwise we assume it uses pyramids space/newline separation.
104 settings[name] = aslist(raw_value)
105 return settings[name]
106
107
108 def _string_setting(settings, name, default, lower=True, default_when_empty=False):
109 value = settings.get(name, default)
110
111 if default_when_empty and not value:
112 # use default value when value is empty
113 value = default
114
115 if lower:
116 value = value.lower()
117 settings[name] = value
118 return settings[name]
119
120
121 def log_max_fd():
89 def log_max_fd():
122 try:
90 try:
123 maxfd = psutil.Process().rlimit(psutil.RLIMIT_NOFILE)[1]
91 maxfd = psutil.Process().rlimit(psutil.RLIMIT_NOFILE)[1]
124 log.info('Max file descriptors value: %s', maxfd)
92 log.info('Max file descriptors value: %s', maxfd)
125 except Exception:
93 except Exception:
126 pass
94 pass
127
95
128
96
129 class VCS(object):
97 class VCS(object):
130 def __init__(self, locale_conf=None, cache_config=None):
98 def __init__(self, locale_conf=None, cache_config=None):
131 self.locale = locale_conf
99 self.locale = locale_conf
132 self.cache_config = cache_config
100 self.cache_config = cache_config
133 self._configure_locale()
101 self._configure_locale()
134
102
135 log_max_fd()
103 log_max_fd()
136
104
137 if GitFactory and GitRemote:
105 if GitFactory and GitRemote:
138 git_factory = GitFactory()
106 git_factory = GitFactory()
139 self._git_remote = GitRemote(git_factory)
107 self._git_remote = GitRemote(git_factory)
140 else:
108 else:
141 log.info("Git client import failed")
109 log.info("Git client import failed")
142
110
143 if MercurialFactory and HgRemote:
111 if MercurialFactory and HgRemote:
144 hg_factory = MercurialFactory()
112 hg_factory = MercurialFactory()
145 self._hg_remote = HgRemote(hg_factory)
113 self._hg_remote = HgRemote(hg_factory)
146 else:
114 else:
147 log.info("Mercurial client import failed")
115 log.info("Mercurial client import failed")
148
116
149 if SubversionFactory and SvnRemote:
117 if SubversionFactory and SvnRemote:
150 svn_factory = SubversionFactory()
118 svn_factory = SubversionFactory()
151
119
152 # hg factory is used for svn url validation
120 # hg factory is used for svn url validation
153 hg_factory = MercurialFactory()
121 hg_factory = MercurialFactory()
154 self._svn_remote = SvnRemote(svn_factory, hg_factory=hg_factory)
122 self._svn_remote = SvnRemote(svn_factory, hg_factory=hg_factory)
155 else:
123 else:
156 log.warning("Subversion client import failed")
124 log.warning("Subversion client import failed")
157
125
158 self._vcsserver = VcsServer()
126 self._vcsserver = VcsServer()
159
127
160 def _configure_locale(self):
128 def _configure_locale(self):
161 if self.locale:
129 if self.locale:
162 log.info('Settings locale: `LC_ALL` to %s', self.locale)
130 log.info('Settings locale: `LC_ALL` to %s', self.locale)
163 else:
131 else:
164 log.info(
132 log.info(
165 'Configuring locale subsystem based on environment variables')
133 'Configuring locale subsystem based on environment variables')
166 try:
134 try:
167 # If self.locale is the empty string, then the locale
135 # If self.locale is the empty string, then the locale
168 # module will use the environment variables. See the
136 # module will use the environment variables. See the
169 # documentation of the package `locale`.
137 # documentation of the package `locale`.
170 locale.setlocale(locale.LC_ALL, self.locale)
138 locale.setlocale(locale.LC_ALL, self.locale)
171
139
172 language_code, encoding = locale.getlocale()
140 language_code, encoding = locale.getlocale()
173 log.info(
141 log.info(
174 'Locale set to language code "%s" with encoding "%s".',
142 'Locale set to language code "%s" with encoding "%s".',
175 language_code, encoding)
143 language_code, encoding)
176 except locale.Error:
144 except locale.Error:
177 log.exception(
145 log.exception(
178 'Cannot set locale, not configuring the locale system')
146 'Cannot set locale, not configuring the locale system')
179
147
180
148
181 class WsgiProxy(object):
149 class WsgiProxy(object):
182 def __init__(self, wsgi):
150 def __init__(self, wsgi):
183 self.wsgi = wsgi
151 self.wsgi = wsgi
184
152
185 def __call__(self, environ, start_response):
153 def __call__(self, environ, start_response):
186 input_data = environ['wsgi.input'].read()
154 input_data = environ['wsgi.input'].read()
187 input_data = msgpack.unpackb(input_data)
155 input_data = msgpack.unpackb(input_data)
188
156
189 error = None
157 error = None
190 try:
158 try:
191 data, status, headers = self.wsgi.handle(
159 data, status, headers = self.wsgi.handle(
192 input_data['environment'], input_data['input_data'],
160 input_data['environment'], input_data['input_data'],
193 *input_data['args'], **input_data['kwargs'])
161 *input_data['args'], **input_data['kwargs'])
194 except Exception as e:
162 except Exception as e:
195 data, status, headers = [], None, None
163 data, status, headers = [], None, None
196 error = {
164 error = {
197 'message': str(e),
165 'message': str(e),
198 '_vcs_kind': getattr(e, '_vcs_kind', None)
166 '_vcs_kind': getattr(e, '_vcs_kind', None)
199 }
167 }
200
168
201 start_response(200, {})
169 start_response(200, {})
202 return self._iterator(error, status, headers, data)
170 return self._iterator(error, status, headers, data)
203
171
204 def _iterator(self, error, status, headers, data):
172 def _iterator(self, error, status, headers, data):
205 initial_data = [
173 initial_data = [
206 error,
174 error,
207 status,
175 status,
208 headers,
176 headers,
209 ]
177 ]
210
178
211 for d in chain(initial_data, data):
179 for d in chain(initial_data, data):
212 yield msgpack.packb(d)
180 yield msgpack.packb(d)
213
181
214
182
215 def not_found(request):
183 def not_found(request):
216 return {'status': '404 NOT FOUND'}
184 return {'status': '404 NOT FOUND'}
217
185
218
186
219 class VCSViewPredicate(object):
187 class VCSViewPredicate(object):
220 def __init__(self, val, config):
188 def __init__(self, val, config):
221 self.remotes = val
189 self.remotes = val
222
190
223 def text(self):
191 def text(self):
224 return 'vcs view method = %s' % (self.remotes.keys(),)
192 return 'vcs view method = %s' % (self.remotes.keys(),)
225
193
226 phash = text
194 phash = text
227
195
228 def __call__(self, context, request):
196 def __call__(self, context, request):
229 """
197 """
230 View predicate that returns true if given backend is supported by
198 View predicate that returns true if given backend is supported by
231 defined remotes.
199 defined remotes.
232 """
200 """
233 backend = request.matchdict.get('backend')
201 backend = request.matchdict.get('backend')
234 return backend in self.remotes
202 return backend in self.remotes
235
203
236
204
237 class HTTPApplication(object):
205 class HTTPApplication(object):
238 ALLOWED_EXCEPTIONS = ('KeyError', 'URLError')
206 ALLOWED_EXCEPTIONS = ('KeyError', 'URLError')
239
207
240 remote_wsgi = remote_wsgi
208 remote_wsgi = remote_wsgi
241 _use_echo_app = False
209 _use_echo_app = False
242
210
243 def __init__(self, settings=None, global_config=None):
211 def __init__(self, settings=None, global_config=None):
244 self._sanitize_settings_and_apply_defaults(settings)
245
212
246 self.config = Configurator(settings=settings)
213 self.config = Configurator(settings=settings)
247 # Init our statsd at very start
214 # Init our statsd at very start
248 self.config.registry.statsd = StatsdClient.statsd
215 self.config.registry.statsd = StatsdClient.statsd
249
216
250 self.global_config = global_config
217 self.global_config = global_config
251 self.config.include('vcsserver.lib.rc_cache')
218 self.config.include('vcsserver.lib.rc_cache')
252
219
253 settings_locale = settings.get('locale', '') or 'en_US.UTF-8'
220 settings_locale = settings.get('locale', '') or 'en_US.UTF-8'
254 vcs = VCS(locale_conf=settings_locale, cache_config=settings)
221 vcs = VCS(locale_conf=settings_locale, cache_config=settings)
255 self._remotes = {
222 self._remotes = {
256 'hg': vcs._hg_remote,
223 'hg': vcs._hg_remote,
257 'git': vcs._git_remote,
224 'git': vcs._git_remote,
258 'svn': vcs._svn_remote,
225 'svn': vcs._svn_remote,
259 'server': vcs._vcsserver,
226 'server': vcs._vcsserver,
260 }
227 }
261 if settings.get('dev.use_echo_app', 'false').lower() == 'true':
228 if settings.get('dev.use_echo_app', 'false').lower() == 'true':
262 self._use_echo_app = True
229 self._use_echo_app = True
263 log.warning("Using EchoApp for VCS operations.")
230 log.warning("Using EchoApp for VCS operations.")
264 self.remote_wsgi = remote_wsgi_stub
231 self.remote_wsgi = remote_wsgi_stub
265
232
266 self._configure_settings(global_config, settings)
233 self._configure_settings(global_config, settings)
267
234
268 self._configure()
235 self._configure()
269
236
270 def _configure_settings(self, global_config, app_settings):
237 def _configure_settings(self, global_config, app_settings):
271 """
238 """
272 Configure the settings module.
239 Configure the settings module.
273 """
240 """
274 settings_merged = global_config.copy()
241 settings_merged = global_config.copy()
275 settings_merged.update(app_settings)
242 settings_merged.update(app_settings)
276
243
277 git_path = app_settings.get('git_path', None)
244 git_path = app_settings.get('git_path', None)
278 if git_path:
245 if git_path:
279 settings.GIT_EXECUTABLE = git_path
246 settings.GIT_EXECUTABLE = git_path
280 binary_dir = app_settings.get('core.binary_dir', None)
247 binary_dir = app_settings.get('core.binary_dir', None)
281 if binary_dir:
248 if binary_dir:
282 settings.BINARY_DIR = binary_dir
249 settings.BINARY_DIR = binary_dir
283
250
284 # Store the settings to make them available to other modules.
251 # Store the settings to make them available to other modules.
285 vcsserver.PYRAMID_SETTINGS = settings_merged
252 vcsserver.PYRAMID_SETTINGS = settings_merged
286 vcsserver.CONFIG = settings_merged
253 vcsserver.CONFIG = settings_merged
287
254
288 def _sanitize_settings_and_apply_defaults(self, settings):
289 temp_store = tempfile.gettempdir()
290 default_cache_dir = os.path.join(temp_store, 'rc_cache')
291
292 # save default, cache dir, and use it for all backends later.
293 default_cache_dir = _string_setting(
294 settings,
295 'cache_dir',
296 default_cache_dir, lower=False, default_when_empty=True)
297
298 # ensure we have our dir created
299 if not os.path.isdir(default_cache_dir):
300 os.makedirs(default_cache_dir, mode=0o755)
301
302 # exception store cache
303 _string_setting(
304 settings,
305 'exception_tracker.store_path',
306 temp_store, lower=False, default_when_empty=True)
307
308 # repo_object cache
309 _string_setting(
310 settings,
311 'rc_cache.repo_object.backend',
312 'dogpile.cache.rc.file_namespace', lower=False)
313 _int_setting(
314 settings,
315 'rc_cache.repo_object.expiration_time',
316 30 * 24 * 60 * 60)
317 _string_setting(
318 settings,
319 'rc_cache.repo_object.arguments.filename',
320 os.path.join(default_cache_dir, 'vcsserver_cache_1'), lower=False)
321
322 def _configure(self):
255 def _configure(self):
323 self.config.add_renderer(name='msgpack', factory=self._msgpack_renderer_factory)
256 self.config.add_renderer(name='msgpack', factory=self._msgpack_renderer_factory)
324
257
325 self.config.add_route('service', '/_service')
258 self.config.add_route('service', '/_service')
326 self.config.add_route('status', '/status')
259 self.config.add_route('status', '/status')
327 self.config.add_route('hg_proxy', '/proxy/hg')
260 self.config.add_route('hg_proxy', '/proxy/hg')
328 self.config.add_route('git_proxy', '/proxy/git')
261 self.config.add_route('git_proxy', '/proxy/git')
329
262
330 # rpc methods
263 # rpc methods
331 self.config.add_route('vcs', '/{backend}')
264 self.config.add_route('vcs', '/{backend}')
332
265
333 # streaming rpc remote methods
266 # streaming rpc remote methods
334 self.config.add_route('vcs_stream', '/{backend}/stream')
267 self.config.add_route('vcs_stream', '/{backend}/stream')
335
268
336 # vcs operations clone/push as streaming
269 # vcs operations clone/push as streaming
337 self.config.add_route('stream_git', '/stream/git/*repo_name')
270 self.config.add_route('stream_git', '/stream/git/*repo_name')
338 self.config.add_route('stream_hg', '/stream/hg/*repo_name')
271 self.config.add_route('stream_hg', '/stream/hg/*repo_name')
339
272
340 self.config.add_view(self.status_view, route_name='status', renderer='json')
273 self.config.add_view(self.status_view, route_name='status', renderer='json')
341 self.config.add_view(self.service_view, route_name='service', renderer='msgpack')
274 self.config.add_view(self.service_view, route_name='service', renderer='msgpack')
342
275
343 self.config.add_view(self.hg_proxy(), route_name='hg_proxy')
276 self.config.add_view(self.hg_proxy(), route_name='hg_proxy')
344 self.config.add_view(self.git_proxy(), route_name='git_proxy')
277 self.config.add_view(self.git_proxy(), route_name='git_proxy')
345 self.config.add_view(self.vcs_view, route_name='vcs', renderer='msgpack',
278 self.config.add_view(self.vcs_view, route_name='vcs', renderer='msgpack',
346 vcs_view=self._remotes)
279 vcs_view=self._remotes)
347 self.config.add_view(self.vcs_stream_view, route_name='vcs_stream',
280 self.config.add_view(self.vcs_stream_view, route_name='vcs_stream',
348 vcs_view=self._remotes)
281 vcs_view=self._remotes)
349
282
350 self.config.add_view(self.hg_stream(), route_name='stream_hg')
283 self.config.add_view(self.hg_stream(), route_name='stream_hg')
351 self.config.add_view(self.git_stream(), route_name='stream_git')
284 self.config.add_view(self.git_stream(), route_name='stream_git')
352
285
353 self.config.add_view_predicate('vcs_view', VCSViewPredicate)
286 self.config.add_view_predicate('vcs_view', VCSViewPredicate)
354
287
355 self.config.add_notfound_view(not_found, renderer='json')
288 self.config.add_notfound_view(not_found, renderer='json')
356
289
357 self.config.add_view(self.handle_vcs_exception, context=Exception)
290 self.config.add_view(self.handle_vcs_exception, context=Exception)
358
291
359 self.config.add_tween(
292 self.config.add_tween(
360 'vcsserver.tweens.request_wrapper.RequestWrapperTween',
293 'vcsserver.tweens.request_wrapper.RequestWrapperTween',
361 )
294 )
362 self.config.add_request_method(
295 self.config.add_request_method(
363 'vcsserver.lib.request_counter.get_request_counter',
296 'vcsserver.lib.request_counter.get_request_counter',
364 'request_count')
297 'request_count')
365
298
366 def wsgi_app(self):
299 def wsgi_app(self):
367 return self.config.make_wsgi_app()
300 return self.config.make_wsgi_app()
368
301
369 def _vcs_view_params(self, request):
302 def _vcs_view_params(self, request):
370 remote = self._remotes[request.matchdict['backend']]
303 remote = self._remotes[request.matchdict['backend']]
371 payload = msgpack.unpackb(request.body, use_list=True)
304 payload = msgpack.unpackb(request.body, use_list=True)
372 method = payload.get('method')
305 method = payload.get('method')
373 params = payload['params']
306 params = payload['params']
374 wire = params.get('wire')
307 wire = params.get('wire')
375 args = params.get('args')
308 args = params.get('args')
376 kwargs = params.get('kwargs')
309 kwargs = params.get('kwargs')
377 context_uid = None
310 context_uid = None
378
311
379 if wire:
312 if wire:
380 try:
313 try:
381 wire['context'] = context_uid = uuid.UUID(wire['context'])
314 wire['context'] = context_uid = uuid.UUID(wire['context'])
382 except KeyError:
315 except KeyError:
383 pass
316 pass
384 args.insert(0, wire)
317 args.insert(0, wire)
385 repo_state_uid = wire.get('repo_state_uid') if wire else None
318 repo_state_uid = wire.get('repo_state_uid') if wire else None
386
319
387 # NOTE(marcink): trading complexity for slight performance
320 # NOTE(marcink): trading complexity for slight performance
388 if log.isEnabledFor(logging.DEBUG):
321 if log.isEnabledFor(logging.DEBUG):
389 no_args_methods = [
322 no_args_methods = [
390
323
391 ]
324 ]
392 if method in no_args_methods:
325 if method in no_args_methods:
393 call_args = ''
326 call_args = ''
394 else:
327 else:
395 call_args = args[1:]
328 call_args = args[1:]
396
329
397 log.debug('Method requested:`%s` with args:%s kwargs:%s context_uid: %s, repo_state_uid:%s',
330 log.debug('Method requested:`%s` with args:%s kwargs:%s context_uid: %s, repo_state_uid:%s',
398 method, call_args, kwargs, context_uid, repo_state_uid)
331 method, call_args, kwargs, context_uid, repo_state_uid)
399
332
400 statsd = request.registry.statsd
333 statsd = request.registry.statsd
401 if statsd:
334 if statsd:
402 statsd.incr(
335 statsd.incr(
403 'vcsserver_method_total', tags=[
336 'vcsserver_method_total', tags=[
404 "method:{}".format(method),
337 "method:{}".format(method),
405 ])
338 ])
406 return payload, remote, method, args, kwargs
339 return payload, remote, method, args, kwargs
407
340
408 def vcs_view(self, request):
341 def vcs_view(self, request):
409
342
410 payload, remote, method, args, kwargs = self._vcs_view_params(request)
343 payload, remote, method, args, kwargs = self._vcs_view_params(request)
411 payload_id = payload.get('id')
344 payload_id = payload.get('id')
412
345
413 try:
346 try:
414 resp = getattr(remote, method)(*args, **kwargs)
347 resp = getattr(remote, method)(*args, **kwargs)
415 except Exception as e:
348 except Exception as e:
416 exc_info = list(sys.exc_info())
349 exc_info = list(sys.exc_info())
417 exc_type, exc_value, exc_traceback = exc_info
350 exc_type, exc_value, exc_traceback = exc_info
418
351
419 org_exc = getattr(e, '_org_exc', None)
352 org_exc = getattr(e, '_org_exc', None)
420 org_exc_name = None
353 org_exc_name = None
421 org_exc_tb = ''
354 org_exc_tb = ''
422 if org_exc:
355 if org_exc:
423 org_exc_name = org_exc.__class__.__name__
356 org_exc_name = org_exc.__class__.__name__
424 org_exc_tb = getattr(e, '_org_exc_tb', '')
357 org_exc_tb = getattr(e, '_org_exc_tb', '')
425 # replace our "faked" exception with our org
358 # replace our "faked" exception with our org
426 exc_info[0] = org_exc.__class__
359 exc_info[0] = org_exc.__class__
427 exc_info[1] = org_exc
360 exc_info[1] = org_exc
428
361
429 should_store_exc = True
362 should_store_exc = True
430 if org_exc:
363 if org_exc:
431 def get_exc_fqn(_exc_obj):
364 def get_exc_fqn(_exc_obj):
432 module_name = getattr(org_exc.__class__, '__module__', 'UNKNOWN')
365 module_name = getattr(org_exc.__class__, '__module__', 'UNKNOWN')
433 return module_name + '.' + org_exc_name
366 return module_name + '.' + org_exc_name
434
367
435 exc_fqn = get_exc_fqn(org_exc)
368 exc_fqn = get_exc_fqn(org_exc)
436
369
437 if exc_fqn in ['mercurial.error.RepoLookupError',
370 if exc_fqn in ['mercurial.error.RepoLookupError',
438 'vcsserver.exceptions.RefNotFoundException']:
371 'vcsserver.exceptions.RefNotFoundException']:
439 should_store_exc = False
372 should_store_exc = False
440
373
441 if should_store_exc:
374 if should_store_exc:
442 store_exception(id(exc_info), exc_info, request_path=request.path)
375 store_exception(id(exc_info), exc_info, request_path=request.path)
443
376
444 tb_info = ''.join(
377 tb_info = ''.join(
445 traceback.format_exception(exc_type, exc_value, exc_traceback))
378 traceback.format_exception(exc_type, exc_value, exc_traceback))
446
379
447 type_ = e.__class__.__name__
380 type_ = e.__class__.__name__
448 if type_ not in self.ALLOWED_EXCEPTIONS:
381 if type_ not in self.ALLOWED_EXCEPTIONS:
449 type_ = None
382 type_ = None
450
383
451 resp = {
384 resp = {
452 'id': payload_id,
385 'id': payload_id,
453 'error': {
386 'error': {
454 'message': e.message,
387 'message': e.message,
455 'traceback': tb_info,
388 'traceback': tb_info,
456 'org_exc': org_exc_name,
389 'org_exc': org_exc_name,
457 'org_exc_tb': org_exc_tb,
390 'org_exc_tb': org_exc_tb,
458 'type': type_
391 'type': type_
459 }
392 }
460 }
393 }
461
394
462 try:
395 try:
463 resp['error']['_vcs_kind'] = getattr(e, '_vcs_kind', None)
396 resp['error']['_vcs_kind'] = getattr(e, '_vcs_kind', None)
464 except AttributeError:
397 except AttributeError:
465 pass
398 pass
466 else:
399 else:
467 resp = {
400 resp = {
468 'id': payload_id,
401 'id': payload_id,
469 'result': resp
402 'result': resp
470 }
403 }
471
404
472 return resp
405 return resp
473
406
474 def vcs_stream_view(self, request):
407 def vcs_stream_view(self, request):
475 payload, remote, method, args, kwargs = self._vcs_view_params(request)
408 payload, remote, method, args, kwargs = self._vcs_view_params(request)
476 # this method has a stream: marker we remove it here
409 # this method has a stream: marker we remove it here
477 method = method.split('stream:')[-1]
410 method = method.split('stream:')[-1]
478 chunk_size = safe_int(payload.get('chunk_size')) or 4096
411 chunk_size = safe_int(payload.get('chunk_size')) or 4096
479
412
480 try:
413 try:
481 resp = getattr(remote, method)(*args, **kwargs)
414 resp = getattr(remote, method)(*args, **kwargs)
482 except Exception as e:
415 except Exception as e:
483 raise
416 raise
484
417
485 def get_chunked_data(method_resp):
418 def get_chunked_data(method_resp):
486 stream = StringIO(method_resp)
419 stream = StringIO(method_resp)
487 while 1:
420 while 1:
488 chunk = stream.read(chunk_size)
421 chunk = stream.read(chunk_size)
489 if not chunk:
422 if not chunk:
490 break
423 break
491 yield chunk
424 yield chunk
492
425
493 response = Response(app_iter=get_chunked_data(resp))
426 response = Response(app_iter=get_chunked_data(resp))
494 response.content_type = 'application/octet-stream'
427 response.content_type = 'application/octet-stream'
495
428
496 return response
429 return response
497
430
498 def status_view(self, request):
431 def status_view(self, request):
499 import vcsserver
432 import vcsserver
500 return {'status': 'OK', 'vcsserver_version': vcsserver.__version__,
433 return {'status': 'OK', 'vcsserver_version': vcsserver.__version__,
501 'pid': os.getpid()}
434 'pid': os.getpid()}
502
435
503 def service_view(self, request):
436 def service_view(self, request):
504 import vcsserver
437 import vcsserver
505
438
506 payload = msgpack.unpackb(request.body, use_list=True)
439 payload = msgpack.unpackb(request.body, use_list=True)
507 server_config, app_config = {}, {}
440 server_config, app_config = {}, {}
508
441
509 try:
442 try:
510 path = self.global_config['__file__']
443 path = self.global_config['__file__']
511 config = configparser.RawConfigParser()
444 config = configparser.RawConfigParser()
512
445
513 config.read(path)
446 config.read(path)
514
447
515 if config.has_section('server:main'):
448 if config.has_section('server:main'):
516 server_config = dict(config.items('server:main'))
449 server_config = dict(config.items('server:main'))
517 if config.has_section('app:main'):
450 if config.has_section('app:main'):
518 app_config = dict(config.items('app:main'))
451 app_config = dict(config.items('app:main'))
519
452
520 except Exception:
453 except Exception:
521 log.exception('Failed to read .ini file for display')
454 log.exception('Failed to read .ini file for display')
522
455
523 environ = os.environ.items()
456 environ = os.environ.items()
524
457
525 resp = {
458 resp = {
526 'id': payload.get('id'),
459 'id': payload.get('id'),
527 'result': dict(
460 'result': dict(
528 version=vcsserver.__version__,
461 version=vcsserver.__version__,
529 config=server_config,
462 config=server_config,
530 app_config=app_config,
463 app_config=app_config,
531 environ=environ,
464 environ=environ,
532 payload=payload,
465 payload=payload,
533 )
466 )
534 }
467 }
535 return resp
468 return resp
536
469
537 def _msgpack_renderer_factory(self, info):
470 def _msgpack_renderer_factory(self, info):
538 def _render(value, system):
471 def _render(value, system):
539 request = system.get('request')
472 request = system.get('request')
540 if request is not None:
473 if request is not None:
541 response = request.response
474 response = request.response
542 ct = response.content_type
475 ct = response.content_type
543 if ct == response.default_content_type:
476 if ct == response.default_content_type:
544 response.content_type = 'application/x-msgpack'
477 response.content_type = 'application/x-msgpack'
545 return msgpack.packb(value)
478 return msgpack.packb(value)
546 return _render
479 return _render
547
480
548 def set_env_from_config(self, environ, config):
481 def set_env_from_config(self, environ, config):
549 dict_conf = {}
482 dict_conf = {}
550 try:
483 try:
551 for elem in config:
484 for elem in config:
552 if elem[0] == 'rhodecode':
485 if elem[0] == 'rhodecode':
553 dict_conf = json.loads(elem[2])
486 dict_conf = json.loads(elem[2])
554 break
487 break
555 except Exception:
488 except Exception:
556 log.exception('Failed to fetch SCM CONFIG')
489 log.exception('Failed to fetch SCM CONFIG')
557 return
490 return
558
491
559 username = dict_conf.get('username')
492 username = dict_conf.get('username')
560 if username:
493 if username:
561 environ['REMOTE_USER'] = username
494 environ['REMOTE_USER'] = username
562 # mercurial specific, some extension api rely on this
495 # mercurial specific, some extension api rely on this
563 environ['HGUSER'] = username
496 environ['HGUSER'] = username
564
497
565 ip = dict_conf.get('ip')
498 ip = dict_conf.get('ip')
566 if ip:
499 if ip:
567 environ['REMOTE_HOST'] = ip
500 environ['REMOTE_HOST'] = ip
568
501
569 if _is_request_chunked(environ):
502 if _is_request_chunked(environ):
570 # set the compatibility flag for webob
503 # set the compatibility flag for webob
571 environ['wsgi.input_terminated'] = True
504 environ['wsgi.input_terminated'] = True
572
505
573 def hg_proxy(self):
506 def hg_proxy(self):
574 @wsgiapp
507 @wsgiapp
575 def _hg_proxy(environ, start_response):
508 def _hg_proxy(environ, start_response):
576 app = WsgiProxy(self.remote_wsgi.HgRemoteWsgi())
509 app = WsgiProxy(self.remote_wsgi.HgRemoteWsgi())
577 return app(environ, start_response)
510 return app(environ, start_response)
578 return _hg_proxy
511 return _hg_proxy
579
512
580 def git_proxy(self):
513 def git_proxy(self):
581 @wsgiapp
514 @wsgiapp
582 def _git_proxy(environ, start_response):
515 def _git_proxy(environ, start_response):
583 app = WsgiProxy(self.remote_wsgi.GitRemoteWsgi())
516 app = WsgiProxy(self.remote_wsgi.GitRemoteWsgi())
584 return app(environ, start_response)
517 return app(environ, start_response)
585 return _git_proxy
518 return _git_proxy
586
519
587 def hg_stream(self):
520 def hg_stream(self):
588 if self._use_echo_app:
521 if self._use_echo_app:
589 @wsgiapp
522 @wsgiapp
590 def _hg_stream(environ, start_response):
523 def _hg_stream(environ, start_response):
591 app = EchoApp('fake_path', 'fake_name', None)
524 app = EchoApp('fake_path', 'fake_name', None)
592 return app(environ, start_response)
525 return app(environ, start_response)
593 return _hg_stream
526 return _hg_stream
594 else:
527 else:
595 @wsgiapp
528 @wsgiapp
596 def _hg_stream(environ, start_response):
529 def _hg_stream(environ, start_response):
597 log.debug('http-app: handling hg stream')
530 log.debug('http-app: handling hg stream')
598 repo_path = environ['HTTP_X_RC_REPO_PATH']
531 repo_path = environ['HTTP_X_RC_REPO_PATH']
599 repo_name = environ['HTTP_X_RC_REPO_NAME']
532 repo_name = environ['HTTP_X_RC_REPO_NAME']
600 packed_config = base64.b64decode(
533 packed_config = base64.b64decode(
601 environ['HTTP_X_RC_REPO_CONFIG'])
534 environ['HTTP_X_RC_REPO_CONFIG'])
602 config = msgpack.unpackb(packed_config)
535 config = msgpack.unpackb(packed_config)
603 app = scm_app.create_hg_wsgi_app(
536 app = scm_app.create_hg_wsgi_app(
604 repo_path, repo_name, config)
537 repo_path, repo_name, config)
605
538
606 # Consistent path information for hgweb
539 # Consistent path information for hgweb
607 environ['PATH_INFO'] = environ['HTTP_X_RC_PATH_INFO']
540 environ['PATH_INFO'] = environ['HTTP_X_RC_PATH_INFO']
608 environ['REPO_NAME'] = repo_name
541 environ['REPO_NAME'] = repo_name
609 self.set_env_from_config(environ, config)
542 self.set_env_from_config(environ, config)
610
543
611 log.debug('http-app: starting app handler '
544 log.debug('http-app: starting app handler '
612 'with %s and process request', app)
545 'with %s and process request', app)
613 return app(environ, ResponseFilter(start_response))
546 return app(environ, ResponseFilter(start_response))
614 return _hg_stream
547 return _hg_stream
615
548
616 def git_stream(self):
549 def git_stream(self):
617 if self._use_echo_app:
550 if self._use_echo_app:
618 @wsgiapp
551 @wsgiapp
619 def _git_stream(environ, start_response):
552 def _git_stream(environ, start_response):
620 app = EchoApp('fake_path', 'fake_name', None)
553 app = EchoApp('fake_path', 'fake_name', None)
621 return app(environ, start_response)
554 return app(environ, start_response)
622 return _git_stream
555 return _git_stream
623 else:
556 else:
624 @wsgiapp
557 @wsgiapp
625 def _git_stream(environ, start_response):
558 def _git_stream(environ, start_response):
626 log.debug('http-app: handling git stream')
559 log.debug('http-app: handling git stream')
627 repo_path = environ['HTTP_X_RC_REPO_PATH']
560 repo_path = environ['HTTP_X_RC_REPO_PATH']
628 repo_name = environ['HTTP_X_RC_REPO_NAME']
561 repo_name = environ['HTTP_X_RC_REPO_NAME']
629 packed_config = base64.b64decode(
562 packed_config = base64.b64decode(
630 environ['HTTP_X_RC_REPO_CONFIG'])
563 environ['HTTP_X_RC_REPO_CONFIG'])
631 config = msgpack.unpackb(packed_config)
564 config = msgpack.unpackb(packed_config)
632
565
633 environ['PATH_INFO'] = environ['HTTP_X_RC_PATH_INFO']
566 environ['PATH_INFO'] = environ['HTTP_X_RC_PATH_INFO']
634 self.set_env_from_config(environ, config)
567 self.set_env_from_config(environ, config)
635
568
636 content_type = environ.get('CONTENT_TYPE', '')
569 content_type = environ.get('CONTENT_TYPE', '')
637
570
638 path = environ['PATH_INFO']
571 path = environ['PATH_INFO']
639 is_lfs_request = GIT_LFS_CONTENT_TYPE in content_type
572 is_lfs_request = GIT_LFS_CONTENT_TYPE in content_type
640 log.debug(
573 log.debug(
641 'LFS: Detecting if request `%s` is LFS server path based '
574 'LFS: Detecting if request `%s` is LFS server path based '
642 'on content type:`%s`, is_lfs:%s',
575 'on content type:`%s`, is_lfs:%s',
643 path, content_type, is_lfs_request)
576 path, content_type, is_lfs_request)
644
577
645 if not is_lfs_request:
578 if not is_lfs_request:
646 # fallback detection by path
579 # fallback detection by path
647 if GIT_LFS_PROTO_PAT.match(path):
580 if GIT_LFS_PROTO_PAT.match(path):
648 is_lfs_request = True
581 is_lfs_request = True
649 log.debug(
582 log.debug(
650 'LFS: fallback detection by path of: `%s`, is_lfs:%s',
583 'LFS: fallback detection by path of: `%s`, is_lfs:%s',
651 path, is_lfs_request)
584 path, is_lfs_request)
652
585
653 if is_lfs_request:
586 if is_lfs_request:
654 app = scm_app.create_git_lfs_wsgi_app(
587 app = scm_app.create_git_lfs_wsgi_app(
655 repo_path, repo_name, config)
588 repo_path, repo_name, config)
656 else:
589 else:
657 app = scm_app.create_git_wsgi_app(
590 app = scm_app.create_git_wsgi_app(
658 repo_path, repo_name, config)
591 repo_path, repo_name, config)
659
592
660 log.debug('http-app: starting app handler '
593 log.debug('http-app: starting app handler '
661 'with %s and process request', app)
594 'with %s and process request', app)
662
595
663 return app(environ, start_response)
596 return app(environ, start_response)
664
597
665 return _git_stream
598 return _git_stream
666
599
667 def handle_vcs_exception(self, exception, request):
600 def handle_vcs_exception(self, exception, request):
668 _vcs_kind = getattr(exception, '_vcs_kind', '')
601 _vcs_kind = getattr(exception, '_vcs_kind', '')
669 if _vcs_kind == 'repo_locked':
602 if _vcs_kind == 'repo_locked':
670 # Get custom repo-locked status code if present.
603 # Get custom repo-locked status code if present.
671 status_code = request.headers.get('X-RC-Locked-Status-Code')
604 status_code = request.headers.get('X-RC-Locked-Status-Code')
672 return HTTPRepoLocked(
605 return HTTPRepoLocked(
673 title=exception.message, status_code=status_code)
606 title=exception.message, status_code=status_code)
674
607
675 elif _vcs_kind == 'repo_branch_protected':
608 elif _vcs_kind == 'repo_branch_protected':
676 # Get custom repo-branch-protected status code if present.
609 # Get custom repo-branch-protected status code if present.
677 return HTTPRepoBranchProtected(title=exception.message)
610 return HTTPRepoBranchProtected(title=exception.message)
678
611
679 exc_info = request.exc_info
612 exc_info = request.exc_info
680 store_exception(id(exc_info), exc_info)
613 store_exception(id(exc_info), exc_info)
681
614
682 traceback_info = 'unavailable'
615 traceback_info = 'unavailable'
683 if request.exc_info:
616 if request.exc_info:
684 exc_type, exc_value, exc_tb = request.exc_info
617 exc_type, exc_value, exc_tb = request.exc_info
685 traceback_info = ''.join(traceback.format_exception(exc_type, exc_value, exc_tb))
618 traceback_info = ''.join(traceback.format_exception(exc_type, exc_value, exc_tb))
686
619
687 log.error(
620 log.error(
688 'error occurred handling this request for path: %s, \n tb: %s',
621 'error occurred handling this request for path: %s, \n tb: %s',
689 request.path, traceback_info)
622 request.path, traceback_info)
690
623
691 statsd = request.registry.statsd
624 statsd = request.registry.statsd
692 if statsd:
625 if statsd:
693 exc_type = "{}.{}".format(exception.__class__.__module__, exception.__class__.__name__)
626 exc_type = "{}.{}".format(exception.__class__.__module__, exception.__class__.__name__)
694 statsd.incr('vcsserver_exception_total',
627 statsd.incr('vcsserver_exception_total',
695 tags=["type:{}".format(exc_type)])
628 tags=["type:{}".format(exc_type)])
696 raise exception
629 raise exception
697
630
698
631
699 class ResponseFilter(object):
632 class ResponseFilter(object):
700
633
701 def __init__(self, start_response):
634 def __init__(self, start_response):
702 self._start_response = start_response
635 self._start_response = start_response
703
636
704 def __call__(self, status, response_headers, exc_info=None):
637 def __call__(self, status, response_headers, exc_info=None):
705 headers = tuple(
638 headers = tuple(
706 (h, v) for h, v in response_headers
639 (h, v) for h, v in response_headers
707 if not wsgiref.util.is_hop_by_hop(h))
640 if not wsgiref.util.is_hop_by_hop(h))
708 return self._start_response(status, headers, exc_info)
641 return self._start_response(status, headers, exc_info)
709
642
710
643
644 def sanitize_settings_and_apply_defaults(global_config, settings):
645 global_settings_maker = SettingsMaker(global_config)
646 settings_maker = SettingsMaker(settings)
647
648 settings_maker.make_setting(
649 'logging.autoconfigure',
650 default=True,
651 parser='bool')
652
653 logging_conf = os.path.join(os.path.dirname(global_config.get('__file__')), 'logging.ini')
654 settings_maker.enable_logging(logging_conf)
655
656 # Default includes, possible to change as a user
657 pyramid_includes = settings_maker.make_setting('pyramid.includes', [], parser='list:newline')
658 log.debug(
659 "Using the following pyramid.includes: %s",
660 pyramid_includes)
661
662 settings_maker.make_setting('__file__', global_config.get('__file__'))
663
664 settings_maker.make_setting(
665 'pyramid.default_locale_name',
666 default='en',
667 parser='string')
668 settings_maker.make_setting(
669 'locale',
670 default='en_US.UTF-8',
671 parser='string')
672
673 settings_maker.make_setting(
674 'core.binary_dir',
675 default='',
676 parser='string')
677
678 temp_store = tempfile.gettempdir()
679 default_cache_dir = os.path.join(temp_store, 'rc_cache')
680 # save default, cache dir, and use it for all backends later.
681 default_cache_dir = settings_maker.make_setting(
682 'cache_dir',
683 default=default_cache_dir, default_when_empty=True,
684 parser='dir:ensured')
685
686 # exception store cache
687 settings_maker.make_setting(
688 'exception_tracker.store_path',
689 default=os.path.join(default_cache_dir, 'exc_store'), default_when_empty=True,
690 parser='dir:ensured'
691 )
692
693 # repo_object cache defaults
694 settings_maker.make_setting(
695 'rc_cache.repo_object.backend',
696 default='dogpile.cache.rc.file_namespace',
697 parser='string')
698 settings_maker.make_setting(
699 'rc_cache.repo_object.expiration_time',
700 default=30 * 24 * 60 * 60, # 30days
701 parser='int')
702 settings_maker. make_setting(
703 'rc_cache.repo_object.arguments.filename',
704 default=os.path.join(default_cache_dir, 'vcsserver_cache_repo_object.db'),
705 parser='string')
706
707 # statsd
708 settings_maker. make_setting(
709 'statsd.enabled',
710 default=False,
711 parser='bool')
712 settings_maker. make_setting(
713 'statsd.statsd_host',
714 default='statsd-exporter',
715 parser='string')
716 settings_maker. make_setting(
717 'statsd.statsd_port',
718 default=9125,
719 parser='int')
720 settings_maker. make_setting(
721 'statsd.statsd_prefix',
722 default='',
723 parser='string')
724 settings_maker. make_setting(
725 'statsd.statsd_ipv6',
726 default=False,
727 parser='bool')
728
729
711 def main(global_config, **settings):
730 def main(global_config, **settings):
731 start_time = time.time()
732 log.info('Pyramid app config starting')
733
712 if MercurialFactory:
734 if MercurialFactory:
713 hgpatches.patch_largefiles_capabilities()
735 hgpatches.patch_largefiles_capabilities()
714 hgpatches.patch_subrepo_type_mapping()
736 hgpatches.patch_subrepo_type_mapping()
715
737
738 # Fill in and sanitize the defaults & do ENV expansion
739 sanitize_settings_and_apply_defaults(global_config, settings)
740
716 # init and bootstrap StatsdClient
741 # init and bootstrap StatsdClient
717 StatsdClient.setup(settings)
742 StatsdClient.setup(settings)
718
743
719 app = HTTPApplication(settings=settings, global_config=global_config)
744 pyramid_app = HTTPApplication(settings=settings, global_config=global_config).wsgi_app()
720 return app.wsgi_app()
745 total_time = time.time() - start_time
746 log.info('Pyramid app `%s` created and configured in %.2fs',
747 getattr(pyramid_app, 'func_name', 'pyramid_app'), total_time)
748 return pyramid_app
749
750
General Comments 0
You need to be logged in to leave comments. Login now