##// END OF EJS Templates
subprocess: Change all imports from `subprocess` -> `subprocess32`
Martin Bornhold -
r1007:e9c22488 default
parent child Browse files
Show More
@@ -1,300 +1,300 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2014-2016 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 """
22 22 Various version Control System version lib (vcs) management abstraction layer
23 23 for Python. Build with server client architecture.
24 24 """
25 25
26 26
27 27 VERSION = (0, 5, 0, 'dev')
28 28
29 29 __version__ = '.'.join((str(each) for each in VERSION[:4]))
30 30
31 31 __all__ = [
32 32 'get_version', 'get_vcs_instance', 'get_backend',
33 33 'VCSError', 'RepositoryError', 'CommitError'
34 34 ]
35 35
36 36 import atexit
37 37 import logging
38 import subprocess
38 import subprocess32
39 39 import time
40 40 import urlparse
41 41 from cStringIO import StringIO
42 42
43 43 import Pyro4
44 44 from Pyro4.errors import CommunicationError
45 45
46 46 from rhodecode.lib.vcs.conf import settings
47 47 from rhodecode.lib.vcs.backends import get_vcs_instance, get_backend
48 48 from rhodecode.lib.vcs.exceptions import (
49 49 VCSError, RepositoryError, CommitError, VCSCommunicationError)
50 50
51 51 log = logging.getLogger(__name__)
52 52
53 53 # The pycurl library directly accesses C API functions and is not patched by
54 54 # gevent. This will potentially lead to deadlocks due to incompatibility to
55 55 # gevent. Therefore we check if gevent is active and import a gevent compatible
56 56 # wrapper in that case.
57 57 try:
58 58 from gevent import monkey
59 59 if monkey.is_module_patched('__builtin__'):
60 60 import geventcurl as pycurl
61 61 log.debug('Using gevent comapatible pycurl: %s', pycurl)
62 62 else:
63 63 import pycurl
64 64 except ImportError:
65 65 import pycurl
66 66
67 67
68 68 def get_version():
69 69 """
70 70 Returns shorter version (digit parts only) as string.
71 71 """
72 72 return '.'.join((str(each) for each in VERSION[:3]))
73 73
74 74
75 75 def connect_pyro4(server_and_port):
76 76 from rhodecode.lib.vcs import connection, client
77 77 from rhodecode.lib.middleware.utils import scm_app
78 78
79 79 git_remote = client.RequestScopeProxyFactory(
80 80 settings.pyro_remote(settings.PYRO_GIT, server_and_port))
81 81 hg_remote = client.RequestScopeProxyFactory(
82 82 settings.pyro_remote(settings.PYRO_HG, server_and_port))
83 83 svn_remote = client.RequestScopeProxyFactory(
84 84 settings.pyro_remote(settings.PYRO_SVN, server_and_port))
85 85
86 86 connection.Git = client.RepoMaker(proxy_factory=git_remote)
87 87 connection.Hg = client.RepoMaker(proxy_factory=hg_remote)
88 88 connection.Svn = client.RepoMaker(proxy_factory=svn_remote)
89 89
90 90 scm_app.GIT_REMOTE_WSGI = Pyro4.Proxy(
91 91 settings.pyro_remote(
92 92 settings.PYRO_GIT_REMOTE_WSGI, server_and_port))
93 93 scm_app.HG_REMOTE_WSGI = Pyro4.Proxy(
94 94 settings.pyro_remote(
95 95 settings.PYRO_HG_REMOTE_WSGI, server_and_port))
96 96
97 97 @atexit.register
98 98 def free_connection_resources():
99 99 connection.Git = None
100 100 connection.Hg = None
101 101 connection.Svn = None
102 102
103 103
104 104 def connect_http(server_and_port):
105 105 from rhodecode.lib.vcs import connection, client_http
106 106 from rhodecode.lib.middleware.utils import scm_app
107 107
108 108 session_factory = client_http.ThreadlocalSessionFactory()
109 109
110 110 connection.Git = client_http.RepoMaker(
111 111 server_and_port, '/git', session_factory)
112 112 connection.Hg = client_http.RepoMaker(
113 113 server_and_port, '/hg', session_factory)
114 114 connection.Svn = client_http.RepoMaker(
115 115 server_and_port, '/svn', session_factory)
116 116
117 117 scm_app.HG_REMOTE_WSGI = client_http.VcsHttpProxy(
118 118 server_and_port, '/proxy/hg')
119 119 scm_app.GIT_REMOTE_WSGI = client_http.VcsHttpProxy(
120 120 server_and_port, '/proxy/git')
121 121
122 122 @atexit.register
123 123 def free_connection_resources():
124 124 connection.Git = None
125 125 connection.Hg = None
126 126 connection.Svn = None
127 127
128 128
129 129 def connect_vcs(server_and_port, protocol):
130 130 """
131 131 Initializes the connection to the vcs server.
132 132
133 133 :param server_and_port: str, e.g. "localhost:9900"
134 134 :param protocol: str, "pyro4" or "http"
135 135 """
136 136 if protocol == 'pyro4':
137 137 connect_pyro4(server_and_port)
138 138 elif protocol == 'http':
139 139 connect_http(server_and_port)
140 140 else:
141 141 raise Exception('Invalid vcs server protocol "{}"'.format(protocol))
142 142
143 143
144 144 # TODO: johbo: This function should be moved into our test suite, there is
145 145 # no reason to support starting the vcsserver in Enterprise itself.
146 146 def start_vcs_server(server_and_port, protocol, log_level=None):
147 147 """
148 148 Starts the vcs server in a subprocess.
149 149 """
150 150 log.info('Starting VCSServer as a sub process with %s protocol', protocol)
151 151 if protocol == 'http':
152 152 return _start_http_vcs_server(server_and_port, log_level)
153 153 elif protocol == 'pyro4':
154 154 return _start_pyro4_vcs_server(server_and_port, log_level)
155 155 else:
156 156 raise Exception('Invalid vcs server protocol "{}"'.format(protocol))
157 157
158 158
159 159 def _start_pyro4_vcs_server(server_and_port, log_level=None):
160 160 _try_to_shutdown_running_server(server_and_port, protocol='pyro4')
161 161 host, port = server_and_port.rsplit(":", 1)
162 162 host = host.strip('[]')
163 163 args = [
164 164 'vcsserver', '--port', port, '--host', host, '--locale', 'en_US.UTF-8',
165 165 '--threadpool', '32']
166 166 if log_level:
167 167 args += ['--log-level', log_level]
168 proc = subprocess.Popen(args)
168 proc = subprocess32.Popen(args)
169 169
170 170 def cleanup_server_process():
171 171 proc.kill()
172 172 atexit.register(cleanup_server_process)
173 173
174 174 server = create_vcsserver_proxy(server_and_port, protocol='pyro4')
175 175 _wait_until_vcs_server_is_reachable(server)
176 176
177 177
178 178 def _start_http_vcs_server(server_and_port, log_level=None):
179 179 # TODO: mikhail: shutdown if an http server already runs
180 180
181 181 host, port = server_and_port.rsplit(":", 1)
182 182 args = [
183 183 'pserve', 'rhodecode/tests/vcsserver_http.ini',
184 184 'http_port=%s' % (port, ), 'http_host=%s' % (host, )]
185 proc = subprocess.Popen(args)
185 proc = subprocess32.Popen(args)
186 186
187 187 def cleanup_server_process():
188 188 proc.kill()
189 189 atexit.register(cleanup_server_process)
190 190
191 191 server = create_vcsserver_proxy(server_and_port, protocol='http')
192 192 _wait_until_vcs_server_is_reachable(server)
193 193
194 194
195 195 def _wait_until_vcs_server_is_reachable(server, timeout=40):
196 196 begin = time.time()
197 197 while (time.time() - begin) < timeout:
198 198 try:
199 199 server.ping()
200 200 return
201 201 except (VCSCommunicationError, CommunicationError, pycurl.error):
202 202 log.debug('VCSServer not started yet, retry to connect.')
203 203 time.sleep(0.5)
204 204 raise Exception(
205 205 'Starting the VCSServer failed or took more than {} '
206 206 'seconds.'.format(timeout))
207 207
208 208
209 209 def _try_to_shutdown_running_server(server_and_port, protocol):
210 210 server = create_vcsserver_proxy(server_and_port, protocol)
211 211 try:
212 212 server.shutdown()
213 213 except (CommunicationError, pycurl.error):
214 214 return
215 215
216 216 # TODO: Not sure why this is important, but without it the following start
217 217 # of the server fails.
218 218 server = create_vcsserver_proxy(server_and_port, protocol)
219 219 server.ping()
220 220
221 221
222 222 def create_vcsserver_proxy(server_and_port, protocol):
223 223 if protocol == 'pyro4':
224 224 return _create_vcsserver_proxy_pyro4(server_and_port)
225 225 elif protocol == 'http':
226 226 return _create_vcsserver_proxy_http(server_and_port)
227 227 else:
228 228 raise Exception('Invalid vcs server protocol "{}"'.format(protocol))
229 229
230 230
231 231 def _create_vcsserver_proxy_pyro4(server_and_port):
232 232 server = Pyro4.Proxy(
233 233 settings.pyro_remote(settings.PYRO_VCSSERVER, server_and_port))
234 234 return server
235 235
236 236
237 237 def _create_vcsserver_proxy_http(server_and_port):
238 238 from rhodecode.lib.vcs import client_http
239 239
240 240 session = _create_http_rpc_session()
241 241 url = urlparse.urljoin('http://%s' % server_and_port, '/server')
242 242 return client_http.RemoteObject(url, session)
243 243
244 244
245 245 class CurlSession(object):
246 246 """
247 247 Modeled so that it provides a subset of the requests interface.
248 248
249 249 This has been created so that it does only provide a minimal API for our
250 250 needs. The parts which it provides are based on the API of the library
251 251 `requests` which allows us to easily benchmark against it.
252 252
253 253 Please have a look at the class :class:`requests.Session` when you extend
254 254 it.
255 255 """
256 256
257 257 def __init__(self):
258 258 curl = pycurl.Curl()
259 259 # TODO: johbo: I did test with 7.19 of libcurl. This version has
260 260 # trouble with 100 - continue being set in the expect header. This
261 261 # can lead to massive performance drops, switching it off here.
262 262 curl.setopt(curl.HTTPHEADER, ["Expect:"])
263 263 curl.setopt(curl.TCP_NODELAY, True)
264 264 curl.setopt(curl.PROTOCOLS, curl.PROTO_HTTP)
265 265 self._curl = curl
266 266
267 267 def post(self, url, data, allow_redirects=False):
268 268 response_buffer = StringIO()
269 269
270 270 curl = self._curl
271 271 curl.setopt(curl.URL, url)
272 272 curl.setopt(curl.POST, True)
273 273 curl.setopt(curl.POSTFIELDS, data)
274 274 curl.setopt(curl.FOLLOWLOCATION, allow_redirects)
275 275 curl.setopt(curl.WRITEDATA, response_buffer)
276 276 curl.perform()
277 277
278 278 return CurlResponse(response_buffer)
279 279
280 280
281 281 class CurlResponse(object):
282 282 """
283 283 The response of a request, modeled after the requests API.
284 284
285 285 This class provides a subset of the response interface known from the
286 286 library `requests`. It is intentionally kept similar, so that we can use
287 287 `requests` as a drop in replacement for benchmarking purposes.
288 288 """
289 289
290 290 def __init__(self, response_buffer):
291 291 self._response_buffer = response_buffer
292 292
293 293 @property
294 294 def content(self):
295 295 return self._response_buffer.getvalue()
296 296
297 297
298 298 def _create_http_rpc_session():
299 299 session = CurlSession()
300 300 return session
@@ -1,1025 +1,1025 b''
1 1 # (c) 2005 Ian Bicking and contributors; written for Paste
2 2 # (http://pythonpaste.org) Licensed under the MIT license:
3 3 # http://www.opensource.org/licenses/mit-license.php
4 4 #
5 5 # For discussion of daemonizing:
6 6 # http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/278731
7 7 #
8 8 # Code taken also from QP: http://www.mems-exchange.org/software/qp/ From
9 9 # lib/site.py
10 10
11 11 import atexit
12 12 import errno
13 13 import fnmatch
14 14 import logging
15 15 import optparse
16 16 import os
17 17 import re
18 import subprocess
18 import subprocess32
19 19 import sys
20 20 import textwrap
21 21 import threading
22 22 import time
23 23 import traceback
24 24
25 25 from logging.config import fileConfig
26 26 import ConfigParser as configparser
27 27 from paste.deploy import loadserver
28 28 from paste.deploy import loadapp
29 29
30 30 import rhodecode
31 31 from rhodecode.lib.compat import kill
32 32
33 33
34 34 def make_web_build_callback(filename):
35 p = subprocess.Popen('make web-build', shell=True,
36 stdout=subprocess.PIPE,
37 stderr=subprocess.PIPE,
35 p = subprocess32.Popen('make web-build', shell=True,
36 stdout=subprocess32.PIPE,
37 stderr=subprocess32.PIPE,
38 38 cwd=os.path.dirname(os.path.dirname(__file__)))
39 39 stdout, stderr = p.communicate()
40 40 stdout = ''.join(stdout)
41 41 stderr = ''.join(stderr)
42 42 if stdout:
43 43 print stdout
44 44 if stderr:
45 45 print ('%s %s %s' % ('-' * 20, 'ERRORS', '-' * 20))
46 46 print stderr
47 47
48 48
49 49 MAXFD = 1024
50 50 HERE = os.path.dirname(os.path.abspath(__file__))
51 51 SERVER_RUNNING_FILE = None
52 52
53 53
54 54 # watch those extra files for changes, server gets restarted if file changes
55 55 GLOBAL_EXTRA_FILES = {
56 56 'rhodecode/public/css/*.less': make_web_build_callback,
57 57 'rhodecode/public/js/src/**/*.js': make_web_build_callback,
58 58 }
59 59
60 60
61 61
62 62 ## HOOKS - inspired by gunicorn #
63 63
64 64 def when_ready(server):
65 65 """
66 66 Called just after the server is started.
67 67 """
68 68
69 69 def _remove_server_running_file():
70 70 if os.path.isfile(SERVER_RUNNING_FILE):
71 71 os.remove(SERVER_RUNNING_FILE)
72 72
73 73 if SERVER_RUNNING_FILE:
74 74 with open(SERVER_RUNNING_FILE, 'wb') as f:
75 75 f.write(str(os.getpid()))
76 76 # register cleanup of that file when server exits
77 77 atexit.register(_remove_server_running_file)
78 78
79 79
80 80 def setup_logging(config_uri, fileConfig=fileConfig,
81 81 configparser=configparser):
82 82 """
83 83 Set up logging via the logging module's fileConfig function with the
84 84 filename specified via ``config_uri`` (a string in the form
85 85 ``filename#sectionname``).
86 86
87 87 ConfigParser defaults are specified for the special ``__file__``
88 88 and ``here`` variables, similar to PasteDeploy config loading.
89 89 """
90 90 path, _ = _getpathsec(config_uri, None)
91 91 parser = configparser.ConfigParser()
92 92 parser.read([path])
93 93 if parser.has_section('loggers'):
94 94 config_file = os.path.abspath(path)
95 95 return fileConfig(
96 96 config_file,
97 97 {'__file__': config_file, 'here': os.path.dirname(config_file)}
98 98 )
99 99
100 100
101 101 def set_rhodecode_is_test(config_uri):
102 102 """If is_test is defined in the config file sets rhodecode.is_test."""
103 103 path, _ = _getpathsec(config_uri, None)
104 104 parser = configparser.ConfigParser()
105 105 parser.read(path)
106 106 rhodecode.is_test = (
107 107 parser.has_option('app:main', 'is_test') and
108 108 parser.getboolean('app:main', 'is_test'))
109 109
110 110
111 111 def _getpathsec(config_uri, name):
112 112 if '#' in config_uri:
113 113 path, section = config_uri.split('#', 1)
114 114 else:
115 115 path, section = config_uri, 'main'
116 116 if name:
117 117 section = name
118 118 return path, section
119 119
120 120
121 121 def parse_vars(args):
122 122 """
123 123 Given variables like ``['a=b', 'c=d']`` turns it into ``{'a':
124 124 'b', 'c': 'd'}``
125 125 """
126 126 result = {}
127 127 for arg in args:
128 128 if '=' not in arg:
129 129 raise ValueError(
130 130 'Variable assignment %r invalid (no "=")'
131 131 % arg)
132 132 name, value = arg.split('=', 1)
133 133 result[name] = value
134 134 return result
135 135
136 136
137 137 def _match_pattern(filename):
138 138 for pattern in GLOBAL_EXTRA_FILES:
139 139 if fnmatch.fnmatch(filename, pattern):
140 140 return pattern
141 141 return False
142 142
143 143
144 144 def generate_extra_file_list():
145 145
146 146 extra_list = []
147 147 for root, dirs, files in os.walk(HERE, topdown=True):
148 148 for fname in files:
149 149 stripped_src = os.path.join(
150 150 'rhodecode', os.path.relpath(os.path.join(root, fname), HERE))
151 151
152 152 if _match_pattern(stripped_src):
153 153 extra_list.append(stripped_src)
154 154
155 155 return extra_list
156 156
157 157
158 158 def run_callback_for_pattern(filename):
159 159 pattern = _match_pattern(filename)
160 160 if pattern:
161 161 _file_callback = GLOBAL_EXTRA_FILES.get(pattern)
162 162 if callable(_file_callback):
163 163 _file_callback(filename)
164 164
165 165
166 166 class DaemonizeException(Exception):
167 167 pass
168 168
169 169
170 170 class RcServerCommand(object):
171 171
172 172 usage = '%prog config_uri [start|stop|restart|status] [var=value]'
173 173 description = """\
174 174 This command serves a web application that uses a PasteDeploy
175 175 configuration file for the server and application.
176 176
177 177 If start/stop/restart is given, then --daemon is implied, and it will
178 178 start (normal operation), stop (--stop-daemon), or do both.
179 179
180 180 You can also include variable assignments like 'http_port=8080'
181 181 and then use %(http_port)s in your config files.
182 182 """
183 183 default_verbosity = 1
184 184
185 185 parser = optparse.OptionParser(
186 186 usage,
187 187 description=textwrap.dedent(description)
188 188 )
189 189 parser.add_option(
190 190 '-n', '--app-name',
191 191 dest='app_name',
192 192 metavar='NAME',
193 193 help="Load the named application (default main)")
194 194 parser.add_option(
195 195 '-s', '--server',
196 196 dest='server',
197 197 metavar='SERVER_TYPE',
198 198 help="Use the named server.")
199 199 parser.add_option(
200 200 '--server-name',
201 201 dest='server_name',
202 202 metavar='SECTION_NAME',
203 203 help=("Use the named server as defined in the configuration file "
204 204 "(default: main)"))
205 205 parser.add_option(
206 206 '--with-vcsserver',
207 207 dest='vcs_server',
208 208 action='store_true',
209 209 help=("Start the vcsserver instance together with the RhodeCode server"))
210 210 if hasattr(os, 'fork'):
211 211 parser.add_option(
212 212 '--daemon',
213 213 dest="daemon",
214 214 action="store_true",
215 215 help="Run in daemon (background) mode")
216 216 parser.add_option(
217 217 '--pid-file',
218 218 dest='pid_file',
219 219 metavar='FILENAME',
220 220 help=("Save PID to file (default to pyramid.pid if running in "
221 221 "daemon mode)"))
222 222 parser.add_option(
223 223 '--running-file',
224 224 dest='running_file',
225 225 metavar='RUNNING_FILE',
226 226 help="Create a running file after the server is initalized with "
227 227 "stored PID of process")
228 228 parser.add_option(
229 229 '--log-file',
230 230 dest='log_file',
231 231 metavar='LOG_FILE',
232 232 help="Save output to the given log file (redirects stdout)")
233 233 parser.add_option(
234 234 '--reload',
235 235 dest='reload',
236 236 action='store_true',
237 237 help="Use auto-restart file monitor")
238 238 parser.add_option(
239 239 '--reload-interval',
240 240 dest='reload_interval',
241 241 default=1,
242 242 help=("Seconds between checking files (low number can cause "
243 243 "significant CPU usage)"))
244 244 parser.add_option(
245 245 '--monitor-restart',
246 246 dest='monitor_restart',
247 247 action='store_true',
248 248 help="Auto-restart server if it dies")
249 249 parser.add_option(
250 250 '--status',
251 251 action='store_true',
252 252 dest='show_status',
253 253 help="Show the status of the (presumably daemonized) server")
254 254 parser.add_option(
255 255 '-v', '--verbose',
256 256 default=default_verbosity,
257 257 dest='verbose',
258 258 action='count',
259 259 help="Set verbose level (default "+str(default_verbosity)+")")
260 260 parser.add_option(
261 261 '-q', '--quiet',
262 262 action='store_const',
263 263 const=0,
264 264 dest='verbose',
265 265 help="Suppress verbose output")
266 266
267 267 if hasattr(os, 'setuid'):
268 268 # I don't think these are available on Windows
269 269 parser.add_option(
270 270 '--user',
271 271 dest='set_user',
272 272 metavar="USERNAME",
273 273 help="Set the user (usually only possible when run as root)")
274 274 parser.add_option(
275 275 '--group',
276 276 dest='set_group',
277 277 metavar="GROUP",
278 278 help="Set the group (usually only possible when run as root)")
279 279
280 280 parser.add_option(
281 281 '--stop-daemon',
282 282 dest='stop_daemon',
283 283 action='store_true',
284 284 help=('Stop a daemonized server (given a PID file, or default '
285 285 'pyramid.pid file)'))
286 286
287 287 _scheme_re = re.compile(r'^[a-z][a-z]+:', re.I)
288 288
289 289 _reloader_environ_key = 'PYTHON_RELOADER_SHOULD_RUN'
290 290 _monitor_environ_key = 'PASTE_MONITOR_SHOULD_RUN'
291 291
292 292 possible_subcommands = ('start', 'stop', 'restart', 'status')
293 293
294 294 def __init__(self, argv, quiet=False):
295 295 self.options, self.args = self.parser.parse_args(argv[1:])
296 296 if quiet:
297 297 self.options.verbose = 0
298 298
299 299 def out(self, msg): # pragma: no cover
300 300 if self.options.verbose > 0:
301 301 print(msg)
302 302
303 303 def get_options(self):
304 304 if (len(self.args) > 1
305 305 and self.args[1] in self.possible_subcommands):
306 306 restvars = self.args[2:]
307 307 else:
308 308 restvars = self.args[1:]
309 309
310 310 return parse_vars(restvars)
311 311
312 312 def run(self): # pragma: no cover
313 313 if self.options.stop_daemon:
314 314 return self.stop_daemon()
315 315
316 316 if not hasattr(self.options, 'set_user'):
317 317 # Windows case:
318 318 self.options.set_user = self.options.set_group = None
319 319
320 320 # @@: Is this the right stage to set the user at?
321 321 self.change_user_group(
322 322 self.options.set_user, self.options.set_group)
323 323
324 324 if not self.args:
325 325 self.out('Please provide configuration file as first argument, '
326 326 'most likely it should be production.ini')
327 327 return 2
328 328 app_spec = self.args[0]
329 329
330 330 if (len(self.args) > 1
331 331 and self.args[1] in self.possible_subcommands):
332 332 cmd = self.args[1]
333 333 else:
334 334 cmd = None
335 335
336 336 if self.options.reload:
337 337 if os.environ.get(self._reloader_environ_key):
338 338 if self.options.verbose > 1:
339 339 self.out('Running reloading file monitor')
340 340
341 341 install_reloader(int(self.options.reload_interval),
342 342 [app_spec] + generate_extra_file_list())
343 343 # if self.requires_config_file:
344 344 # watch_file(self.args[0])
345 345 else:
346 346 return self.restart_with_reloader()
347 347
348 348 if cmd not in (None, 'start', 'stop', 'restart', 'status'):
349 349 self.out(
350 350 'Error: must give start|stop|restart (not %s)' % cmd)
351 351 return 2
352 352
353 353 if cmd == 'status' or self.options.show_status:
354 354 return self.show_status()
355 355
356 356 if cmd == 'restart' or cmd == 'stop':
357 357 result = self.stop_daemon()
358 358 if result:
359 359 if cmd == 'restart':
360 360 self.out("Could not stop daemon; aborting")
361 361 else:
362 362 self.out("Could not stop daemon")
363 363 return result
364 364 if cmd == 'stop':
365 365 return result
366 366 self.options.daemon = True
367 367
368 368 if cmd == 'start':
369 369 self.options.daemon = True
370 370
371 371 app_name = self.options.app_name
372 372
373 373 vars = self.get_options()
374 374
375 375 if self.options.vcs_server:
376 376 vars['vcs.start_server'] = 'true'
377 377
378 378 if self.options.running_file:
379 379 global SERVER_RUNNING_FILE
380 380 SERVER_RUNNING_FILE = self.options.running_file
381 381
382 382 if not self._scheme_re.search(app_spec):
383 383 app_spec = 'config:' + app_spec
384 384 server_name = self.options.server_name
385 385 if self.options.server:
386 386 server_spec = 'egg:pyramid'
387 387 assert server_name is None
388 388 server_name = self.options.server
389 389 else:
390 390 server_spec = app_spec
391 391 base = os.getcwd()
392 392
393 393 if getattr(self.options, 'daemon', False):
394 394 if not self.options.pid_file:
395 395 self.options.pid_file = 'pyramid.pid'
396 396 if not self.options.log_file:
397 397 self.options.log_file = 'pyramid.log'
398 398
399 399 # Ensure the log file is writeable
400 400 if self.options.log_file:
401 401 try:
402 402 writeable_log_file = open(self.options.log_file, 'a')
403 403 except IOError as ioe:
404 404 msg = 'Error: Unable to write to log file: %s' % ioe
405 405 raise ValueError(msg)
406 406 writeable_log_file.close()
407 407
408 408 # Ensure the pid file is writeable
409 409 if self.options.pid_file:
410 410 try:
411 411 writeable_pid_file = open(self.options.pid_file, 'a')
412 412 except IOError as ioe:
413 413 msg = 'Error: Unable to write to pid file: %s' % ioe
414 414 raise ValueError(msg)
415 415 writeable_pid_file.close()
416 416
417 417
418 418 if getattr(self.options, 'daemon', False):
419 419 try:
420 420 self.daemonize()
421 421 except DaemonizeException as ex:
422 422 if self.options.verbose > 0:
423 423 self.out(str(ex))
424 424 return 2
425 425
426 426 if (self.options.monitor_restart
427 427 and not os.environ.get(self._monitor_environ_key)):
428 428 return self.restart_with_monitor()
429 429
430 430 if self.options.pid_file:
431 431 self.record_pid(self.options.pid_file)
432 432
433 433 if self.options.log_file:
434 434 stdout_log = LazyWriter(self.options.log_file, 'a')
435 435 sys.stdout = stdout_log
436 436 sys.stderr = stdout_log
437 437 logging.basicConfig(stream=stdout_log)
438 438
439 439 log_fn = app_spec
440 440 if log_fn.startswith('config:'):
441 441 log_fn = app_spec[len('config:'):]
442 442 elif log_fn.startswith('egg:'):
443 443 log_fn = None
444 444 if log_fn:
445 445 log_fn = os.path.join(base, log_fn)
446 446 setup_logging(log_fn)
447 447 set_rhodecode_is_test(log_fn)
448 448
449 449 server = self.loadserver(server_spec, name=server_name,
450 450 relative_to=base, global_conf=vars)
451 451 # starting hooks
452 452 app = self.loadapp(app_spec, name=app_name, relative_to=base,
453 453 global_conf=vars)
454 454
455 455 if self.options.verbose > 0:
456 456 if hasattr(os, 'getpid'):
457 457 msg = 'Starting %s in PID %i.' % (__name__, os.getpid())
458 458 else:
459 459 msg = 'Starting %s.' % (__name__,)
460 460 self.out(msg)
461 461 if SERVER_RUNNING_FILE:
462 462 self.out('PID file written as %s' % (SERVER_RUNNING_FILE, ))
463 463 elif not self.options.pid_file:
464 464 self.out('No PID file written by default.')
465 465
466 466 try:
467 467 when_ready(server)
468 468 server(app)
469 469 except (SystemExit, KeyboardInterrupt) as e:
470 470 if self.options.verbose > 1:
471 471 raise
472 472 if str(e):
473 473 msg = ' ' + str(e)
474 474 else:
475 475 msg = ''
476 476 self.out('Exiting%s (-v to see traceback)' % msg)
477 477
478 478
479 479 def loadapp(self, app_spec, name, relative_to, **kw): # pragma: no cover
480 480 return loadapp(app_spec, name=name, relative_to=relative_to, **kw)
481 481
482 482 def loadserver(self, server_spec, name, relative_to, **kw): # pragma:no cover
483 483 return loadserver(
484 484 server_spec, name=name, relative_to=relative_to, **kw)
485 485
486 486 def quote_first_command_arg(self, arg): # pragma: no cover
487 487 """
488 488 There's a bug in Windows when running an executable that's
489 489 located inside a path with a space in it. This method handles
490 490 that case, or on non-Windows systems or an executable with no
491 491 spaces, it just leaves well enough alone.
492 492 """
493 493 if sys.platform != 'win32' or ' ' not in arg:
494 494 # Problem does not apply:
495 495 return arg
496 496 try:
497 497 import win32api
498 498 except ImportError:
499 499 raise ValueError(
500 500 "The executable %r contains a space, and in order to "
501 501 "handle this issue you must have the win32api module "
502 502 "installed" % arg)
503 503 arg = win32api.GetShortPathName(arg)
504 504 return arg
505 505
506 506 def daemonize(self): # pragma: no cover
507 507 pid = live_pidfile(self.options.pid_file)
508 508 if pid:
509 509 raise DaemonizeException(
510 510 "Daemon is already running (PID: %s from PID file %s)"
511 511 % (pid, self.options.pid_file))
512 512
513 513 if self.options.verbose > 0:
514 514 self.out('Entering daemon mode')
515 515 pid = os.fork()
516 516 if pid:
517 517 # The forked process also has a handle on resources, so we
518 518 # *don't* want proper termination of the process, we just
519 519 # want to exit quick (which os._exit() does)
520 520 os._exit(0)
521 521 # Make this the session leader
522 522 os.setsid()
523 523 # Fork again for good measure!
524 524 pid = os.fork()
525 525 if pid:
526 526 os._exit(0)
527 527
528 528 # @@: Should we set the umask and cwd now?
529 529
530 530 import resource # Resource usage information.
531 531 maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
532 532 if maxfd == resource.RLIM_INFINITY:
533 533 maxfd = MAXFD
534 534 # Iterate through and close all file descriptors.
535 535 for fd in range(0, maxfd):
536 536 try:
537 537 os.close(fd)
538 538 except OSError: # ERROR, fd wasn't open to begin with (ignored)
539 539 pass
540 540
541 541 if hasattr(os, "devnull"):
542 542 REDIRECT_TO = os.devnull
543 543 else:
544 544 REDIRECT_TO = "/dev/null"
545 545 os.open(REDIRECT_TO, os.O_RDWR) # standard input (0)
546 546 # Duplicate standard input to standard output and standard error.
547 547 os.dup2(0, 1) # standard output (1)
548 548 os.dup2(0, 2) # standard error (2)
549 549
550 550 def _remove_pid_file(self, written_pid, filename, verbosity):
551 551 current_pid = os.getpid()
552 552 if written_pid != current_pid:
553 553 # A forked process must be exiting, not the process that
554 554 # wrote the PID file
555 555 return
556 556 if not os.path.exists(filename):
557 557 return
558 558 with open(filename) as f:
559 559 content = f.read().strip()
560 560 try:
561 561 pid_in_file = int(content)
562 562 except ValueError:
563 563 pass
564 564 else:
565 565 if pid_in_file != current_pid:
566 566 msg = "PID file %s contains %s, not expected PID %s"
567 567 self.out(msg % (filename, pid_in_file, current_pid))
568 568 return
569 569 if verbosity > 0:
570 570 self.out("Removing PID file %s" % filename)
571 571 try:
572 572 os.unlink(filename)
573 573 return
574 574 except OSError as e:
575 575 # Record, but don't give traceback
576 576 self.out("Cannot remove PID file: (%s)" % e)
577 577 # well, at least lets not leave the invalid PID around...
578 578 try:
579 579 with open(filename, 'w') as f:
580 580 f.write('')
581 581 except OSError as e:
582 582 self.out('Stale PID left in file: %s (%s)' % (filename, e))
583 583 else:
584 584 self.out('Stale PID removed')
585 585
586 586 def record_pid(self, pid_file):
587 587 pid = os.getpid()
588 588 if self.options.verbose > 1:
589 589 self.out('Writing PID %s to %s' % (pid, pid_file))
590 590 with open(pid_file, 'w') as f:
591 591 f.write(str(pid))
592 592 atexit.register(self._remove_pid_file, pid, pid_file, self.options.verbose)
593 593
594 594 def stop_daemon(self): # pragma: no cover
595 595 pid_file = self.options.pid_file or 'pyramid.pid'
596 596 if not os.path.exists(pid_file):
597 597 self.out('No PID file exists in %s' % pid_file)
598 598 return 1
599 599 pid = read_pidfile(pid_file)
600 600 if not pid:
601 601 self.out("Not a valid PID file in %s" % pid_file)
602 602 return 1
603 603 pid = live_pidfile(pid_file)
604 604 if not pid:
605 605 self.out("PID in %s is not valid (deleting)" % pid_file)
606 606 try:
607 607 os.unlink(pid_file)
608 608 except (OSError, IOError) as e:
609 609 self.out("Could not delete: %s" % e)
610 610 return 2
611 611 return 1
612 612 for j in range(10):
613 613 if not live_pidfile(pid_file):
614 614 break
615 615 import signal
616 616 kill(pid, signal.SIGTERM)
617 617 time.sleep(1)
618 618 else:
619 619 self.out("failed to kill web process %s" % pid)
620 620 return 3
621 621 if os.path.exists(pid_file):
622 622 os.unlink(pid_file)
623 623 return 0
624 624
625 625 def show_status(self): # pragma: no cover
626 626 pid_file = self.options.pid_file or 'pyramid.pid'
627 627 if not os.path.exists(pid_file):
628 628 self.out('No PID file %s' % pid_file)
629 629 return 1
630 630 pid = read_pidfile(pid_file)
631 631 if not pid:
632 632 self.out('No PID in file %s' % pid_file)
633 633 return 1
634 634 pid = live_pidfile(pid_file)
635 635 if not pid:
636 636 self.out('PID %s in %s is not running' % (pid, pid_file))
637 637 return 1
638 638 self.out('Server running in PID %s' % pid)
639 639 return 0
640 640
641 641 def restart_with_reloader(self): # pragma: no cover
642 642 self.restart_with_monitor(reloader=True)
643 643
644 644 def restart_with_monitor(self, reloader=False): # pragma: no cover
645 645 if self.options.verbose > 0:
646 646 if reloader:
647 647 self.out('Starting subprocess with file monitor')
648 648 else:
649 649 self.out('Starting subprocess with monitor parent')
650 650 while 1:
651 651 args = [self.quote_first_command_arg(sys.executable)] + sys.argv
652 652 new_environ = os.environ.copy()
653 653 if reloader:
654 654 new_environ[self._reloader_environ_key] = 'true'
655 655 else:
656 656 new_environ[self._monitor_environ_key] = 'true'
657 657 proc = None
658 658 try:
659 659 try:
660 660 _turn_sigterm_into_systemexit()
661 proc = subprocess.Popen(args, env=new_environ)
661 proc = subprocess32.Popen(args, env=new_environ)
662 662 exit_code = proc.wait()
663 663 proc = None
664 664 except KeyboardInterrupt:
665 665 self.out('^C caught in monitor process')
666 666 if self.options.verbose > 1:
667 667 raise
668 668 return 1
669 669 finally:
670 670 if proc is not None:
671 671 import signal
672 672 try:
673 673 kill(proc.pid, signal.SIGTERM)
674 674 except (OSError, IOError):
675 675 pass
676 676
677 677 if reloader:
678 678 # Reloader always exits with code 3; but if we are
679 679 # a monitor, any exit code will restart
680 680 if exit_code != 3:
681 681 return exit_code
682 682 if self.options.verbose > 0:
683 683 self.out('%s %s %s' % ('-' * 20, 'Restarting', '-' * 20))
684 684
685 685 def change_user_group(self, user, group): # pragma: no cover
686 686 if not user and not group:
687 687 return
688 688 import pwd
689 689 import grp
690 690 uid = gid = None
691 691 if group:
692 692 try:
693 693 gid = int(group)
694 694 group = grp.getgrgid(gid).gr_name
695 695 except ValueError:
696 696 try:
697 697 entry = grp.getgrnam(group)
698 698 except KeyError:
699 699 raise ValueError(
700 700 "Bad group: %r; no such group exists" % group)
701 701 gid = entry.gr_gid
702 702 try:
703 703 uid = int(user)
704 704 user = pwd.getpwuid(uid).pw_name
705 705 except ValueError:
706 706 try:
707 707 entry = pwd.getpwnam(user)
708 708 except KeyError:
709 709 raise ValueError(
710 710 "Bad username: %r; no such user exists" % user)
711 711 if not gid:
712 712 gid = entry.pw_gid
713 713 uid = entry.pw_uid
714 714 if self.options.verbose > 0:
715 715 self.out('Changing user to %s:%s (%s:%s)' % (
716 716 user, group or '(unknown)', uid, gid))
717 717 if gid:
718 718 os.setgid(gid)
719 719 if uid:
720 720 os.setuid(uid)
721 721
722 722
723 723 class LazyWriter(object):
724 724
725 725 """
726 726 File-like object that opens a file lazily when it is first written
727 727 to.
728 728 """
729 729
730 730 def __init__(self, filename, mode='w'):
731 731 self.filename = filename
732 732 self.fileobj = None
733 733 self.lock = threading.Lock()
734 734 self.mode = mode
735 735
736 736 def open(self):
737 737 if self.fileobj is None:
738 738 with self.lock:
739 739 self.fileobj = open(self.filename, self.mode)
740 740 return self.fileobj
741 741
742 742 def close(self):
743 743 fileobj = self.fileobj
744 744 if fileobj is not None:
745 745 fileobj.close()
746 746
747 747 def __del__(self):
748 748 self.close()
749 749
750 750 def write(self, text):
751 751 fileobj = self.open()
752 752 fileobj.write(text)
753 753 fileobj.flush()
754 754
755 755 def writelines(self, text):
756 756 fileobj = self.open()
757 757 fileobj.writelines(text)
758 758 fileobj.flush()
759 759
760 760 def flush(self):
761 761 self.open().flush()
762 762
763 763
764 764 def live_pidfile(pidfile): # pragma: no cover
765 765 """
766 766 (pidfile:str) -> int | None
767 767 Returns an int found in the named file, if there is one,
768 768 and if there is a running process with that process id.
769 769 Return None if no such process exists.
770 770 """
771 771 pid = read_pidfile(pidfile)
772 772 if pid:
773 773 try:
774 774 kill(int(pid), 0)
775 775 return pid
776 776 except OSError as e:
777 777 if e.errno == errno.EPERM:
778 778 return pid
779 779 return None
780 780
781 781
782 782 def read_pidfile(filename):
783 783 if os.path.exists(filename):
784 784 try:
785 785 with open(filename) as f:
786 786 content = f.read()
787 787 return int(content.strip())
788 788 except (ValueError, IOError):
789 789 return None
790 790 else:
791 791 return None
792 792
793 793
794 794 def ensure_port_cleanup(
795 795 bound_addresses, maxtries=30, sleeptime=2): # pragma: no cover
796 796 """
797 797 This makes sure any open ports are closed.
798 798
799 799 Does this by connecting to them until they give connection
800 800 refused. Servers should call like::
801 801
802 802 ensure_port_cleanup([80, 443])
803 803 """
804 804 atexit.register(_cleanup_ports, bound_addresses, maxtries=maxtries,
805 805 sleeptime=sleeptime)
806 806
807 807
808 808 def _cleanup_ports(
809 809 bound_addresses, maxtries=30, sleeptime=2): # pragma: no cover
810 810 # Wait for the server to bind to the port.
811 811 import socket
812 812 import errno
813 813 for bound_address in bound_addresses:
814 814 for attempt in range(maxtries):
815 815 sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
816 816 try:
817 817 sock.connect(bound_address)
818 818 except socket.error as e:
819 819 if e.args[0] != errno.ECONNREFUSED:
820 820 raise
821 821 break
822 822 else:
823 823 time.sleep(sleeptime)
824 824 else:
825 825 raise SystemExit('Timeout waiting for port.')
826 826 sock.close()
827 827
828 828
829 829 def _turn_sigterm_into_systemexit(): # pragma: no cover
830 830 """
831 831 Attempts to turn a SIGTERM exception into a SystemExit exception.
832 832 """
833 833 try:
834 834 import signal
835 835 except ImportError:
836 836 return
837 837 def handle_term(signo, frame):
838 838 raise SystemExit
839 839 signal.signal(signal.SIGTERM, handle_term)
840 840
841 841
842 842 def install_reloader(poll_interval=1, extra_files=None): # pragma: no cover
843 843 """
844 844 Install the reloading monitor.
845 845
846 846 On some platforms server threads may not terminate when the main
847 847 thread does, causing ports to remain open/locked. The
848 848 ``raise_keyboard_interrupt`` option creates a unignorable signal
849 849 which causes the whole application to shut-down (rudely).
850 850 """
851 851 mon = Monitor(poll_interval=poll_interval)
852 852 if extra_files is None:
853 853 extra_files = []
854 854 mon.extra_files.extend(extra_files)
855 855 t = threading.Thread(target=mon.periodic_reload)
856 856 t.setDaemon(True)
857 857 t.start()
858 858
859 859
860 860 class classinstancemethod(object):
861 861 """
862 862 Acts like a class method when called from a class, like an
863 863 instance method when called by an instance. The method should
864 864 take two arguments, 'self' and 'cls'; one of these will be None
865 865 depending on how the method was called.
866 866 """
867 867
868 868 def __init__(self, func):
869 869 self.func = func
870 870 self.__doc__ = func.__doc__
871 871
872 872 def __get__(self, obj, type=None):
873 873 return _methodwrapper(self.func, obj=obj, type=type)
874 874
875 875
876 876 class _methodwrapper(object):
877 877
878 878 def __init__(self, func, obj, type):
879 879 self.func = func
880 880 self.obj = obj
881 881 self.type = type
882 882
883 883 def __call__(self, *args, **kw):
884 884 assert not 'self' in kw and not 'cls' in kw, (
885 885 "You cannot use 'self' or 'cls' arguments to a "
886 886 "classinstancemethod")
887 887 return self.func(*((self.obj, self.type) + args), **kw)
888 888
889 889
890 890 class Monitor(object): # pragma: no cover
891 891 """
892 892 A file monitor and server restarter.
893 893
894 894 Use this like:
895 895
896 896 ..code-block:: Python
897 897
898 898 install_reloader()
899 899
900 900 Then make sure your server is installed with a shell script like::
901 901
902 902 err=3
903 903 while test "$err" -eq 3 ; do
904 904 python server.py
905 905 err="$?"
906 906 done
907 907
908 908 or is run from this .bat file (if you use Windows)::
909 909
910 910 @echo off
911 911 :repeat
912 912 python server.py
913 913 if %errorlevel% == 3 goto repeat
914 914
915 915 or run a monitoring process in Python (``pserve --reload`` does
916 916 this).
917 917
918 918 Use the ``watch_file(filename)`` function to cause a reload/restart for
919 919 other non-Python files (e.g., configuration files). If you have
920 920 a dynamic set of files that grows over time you can use something like::
921 921
922 922 def watch_config_files():
923 923 return CONFIG_FILE_CACHE.keys()
924 924 add_file_callback(watch_config_files)
925 925
926 926 Then every time the reloader polls files it will call
927 927 ``watch_config_files`` and check all the filenames it returns.
928 928 """
929 929 instances = []
930 930 global_extra_files = []
931 931 global_file_callbacks = []
932 932
933 933 def __init__(self, poll_interval):
934 934 self.module_mtimes = {}
935 935 self.keep_running = True
936 936 self.poll_interval = poll_interval
937 937 self.extra_files = list(self.global_extra_files)
938 938 self.instances.append(self)
939 939 self.file_callbacks = list(self.global_file_callbacks)
940 940
941 941 def _exit(self):
942 942 # use os._exit() here and not sys.exit() since within a
943 943 # thread sys.exit() just closes the given thread and
944 944 # won't kill the process; note os._exit does not call
945 945 # any atexit callbacks, nor does it do finally blocks,
946 946 # flush open files, etc. In otherwords, it is rude.
947 947 os._exit(3)
948 948
949 949 def periodic_reload(self):
950 950 while True:
951 951 if not self.check_reload():
952 952 self._exit()
953 953 break
954 954 time.sleep(self.poll_interval)
955 955
956 956 def check_reload(self):
957 957 filenames = list(self.extra_files)
958 958 for file_callback in self.file_callbacks:
959 959 try:
960 960 filenames.extend(file_callback())
961 961 except:
962 962 print(
963 963 "Error calling reloader callback %r:" % file_callback)
964 964 traceback.print_exc()
965 965 for module in list(sys.modules.values()):
966 966 try:
967 967 filename = module.__file__
968 968 except (AttributeError, ImportError):
969 969 continue
970 970 if filename is not None:
971 971 filenames.append(filename)
972 972
973 973 for filename in filenames:
974 974 try:
975 975 stat = os.stat(filename)
976 976 if stat:
977 977 mtime = stat.st_mtime
978 978 else:
979 979 mtime = 0
980 980 except (OSError, IOError):
981 981 continue
982 982 if filename.endswith('.pyc') and os.path.exists(filename[:-1]):
983 983 mtime = max(os.stat(filename[:-1]).st_mtime, mtime)
984 984 if not filename in self.module_mtimes:
985 985 self.module_mtimes[filename] = mtime
986 986 elif self.module_mtimes[filename] < mtime:
987 987 print("%s changed; reloading..." % filename)
988 988 run_callback_for_pattern(filename)
989 989 return False
990 990 return True
991 991
992 992 def watch_file(self, cls, filename):
993 993 """Watch the named file for changes"""
994 994 filename = os.path.abspath(filename)
995 995 if self is None:
996 996 for instance in cls.instances:
997 997 instance.watch_file(filename)
998 998 cls.global_extra_files.append(filename)
999 999 else:
1000 1000 self.extra_files.append(filename)
1001 1001
1002 1002 watch_file = classinstancemethod(watch_file)
1003 1003
1004 1004 def add_file_callback(self, cls, callback):
1005 1005 """Add a callback -- a function that takes no parameters -- that will
1006 1006 return a list of filenames to watch for changes."""
1007 1007 if self is None:
1008 1008 for instance in cls.instances:
1009 1009 instance.add_file_callback(callback)
1010 1010 cls.global_file_callbacks.append(callback)
1011 1011 else:
1012 1012 self.file_callbacks.append(callback)
1013 1013
1014 1014 add_file_callback = classinstancemethod(add_file_callback)
1015 1015
1016 1016 watch_file = Monitor.watch_file
1017 1017 add_file_callback = Monitor.add_file_callback
1018 1018
1019 1019
1020 1020 def main(argv=sys.argv, quiet=False):
1021 1021 command = RcServerCommand(argv, quiet=quiet)
1022 1022 return command.run()
1023 1023
1024 1024 if __name__ == '__main__': # pragma: no cover
1025 1025 sys.exit(main() or 0)
@@ -1,273 +1,273 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2010-2016 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 from subprocess import Popen, PIPE
21 from subprocess32 import Popen, PIPE
22 22 import os
23 23 import shutil
24 24 import sys
25 25 import tempfile
26 26
27 27 import pytest
28 28 from sqlalchemy.engine import url
29 29
30 30 from rhodecode.tests.fixture import TestINI
31 31
32 32
33 33 def _get_dbs_from_metafunc(metafunc):
34 34 if hasattr(metafunc.function, 'dbs'):
35 35 # Supported backends by this test function, created from
36 36 # pytest.mark.dbs
37 37 backends = metafunc.function.dbs.args
38 38 else:
39 39 backends = metafunc.config.getoption('--dbs')
40 40 return backends
41 41
42 42
43 43 def pytest_generate_tests(metafunc):
44 44 # Support test generation based on --dbs parameter
45 45 if 'db_backend' in metafunc.fixturenames:
46 46 requested_backends = set(metafunc.config.getoption('--dbs'))
47 47 backends = _get_dbs_from_metafunc(metafunc)
48 48 backends = requested_backends.intersection(backends)
49 49 # TODO: johbo: Disabling a backend did not work out with
50 50 # parametrization, find better way to achieve this.
51 51 if not backends:
52 52 metafunc.function._skip = True
53 53 metafunc.parametrize('db_backend_name', backends)
54 54
55 55
56 56 def pytest_collection_modifyitems(session, config, items):
57 57 remaining = [
58 58 i for i in items if not getattr(i.obj, '_skip', False)]
59 59 items[:] = remaining
60 60
61 61
62 62 @pytest.fixture
63 63 def db_backend(
64 64 request, db_backend_name, pylons_config, tmpdir_factory):
65 65 basetemp = tmpdir_factory.getbasetemp().strpath
66 66 klass = _get_backend(db_backend_name)
67 67
68 68 option_name = '--{}-connection-string'.format(db_backend_name)
69 69 connection_string = request.config.getoption(option_name) or None
70 70
71 71 return klass(
72 72 config_file=pylons_config, basetemp=basetemp,
73 73 connection_string=connection_string)
74 74
75 75
76 76 def _get_backend(backend_type):
77 77 return {
78 78 'sqlite': SQLiteDBBackend,
79 79 'postgres': PostgresDBBackend,
80 80 'mysql': MySQLDBBackend,
81 81 '': EmptyDBBackend
82 82 }[backend_type]
83 83
84 84
85 85 class DBBackend(object):
86 86 _store = os.path.dirname(os.path.abspath(__file__))
87 87 _type = None
88 88 _base_ini_config = [{'app:main': {'vcs.start_server': 'false'}}]
89 89 _db_url = [{'app:main': {'sqlalchemy.db1.url': ''}}]
90 90 _base_db_name = 'rhodecode_test_db_backend'
91 91
92 92 def __init__(
93 93 self, config_file, db_name=None, basetemp=None,
94 94 connection_string=None):
95 95 self.fixture_store = os.path.join(self._store, self._type)
96 96 self.db_name = db_name or self._base_db_name
97 97 self._base_ini_file = config_file
98 98 self.stderr = ''
99 99 self.stdout = ''
100 100 self._basetemp = basetemp or tempfile.gettempdir()
101 101 self._repos_location = os.path.join(self._basetemp, 'rc_test_repos')
102 102 self.connection_string = connection_string
103 103
104 104 @property
105 105 def connection_string(self):
106 106 return self._connection_string
107 107
108 108 @connection_string.setter
109 109 def connection_string(self, new_connection_string):
110 110 if not new_connection_string:
111 111 new_connection_string = self.get_default_connection_string()
112 112 else:
113 113 new_connection_string = new_connection_string.format(
114 114 db_name=self.db_name)
115 115 url_parts = url.make_url(new_connection_string)
116 116 self._connection_string = new_connection_string
117 117 self.user = url_parts.username
118 118 self.password = url_parts.password
119 119 self.host = url_parts.host
120 120
121 121 def get_default_connection_string(self):
122 122 raise NotImplementedError('default connection_string is required.')
123 123
124 124 def execute(self, cmd, env=None, *args):
125 125 """
126 126 Runs command on the system with given ``args``.
127 127 """
128 128
129 129 command = cmd + ' ' + ' '.join(args)
130 130 sys.stdout.write(command)
131 131
132 132 # Tell Python to use UTF-8 encoding out stdout
133 133 _env = os.environ.copy()
134 134 _env['PYTHONIOENCODING'] = 'UTF-8'
135 135 if env:
136 136 _env.update(env)
137 137 self.p = Popen(command, shell=True, stdout=PIPE, stderr=PIPE, env=_env)
138 138 self.stdout, self.stderr = self.p.communicate()
139 139 sys.stdout.write('COMMAND:'+command+'\n')
140 140 sys.stdout.write(self.stdout)
141 141 return self.stdout, self.stderr
142 142
143 143 def assert_returncode_success(self):
144 144 assert self.p.returncode == 0, self.stderr
145 145
146 146 def setup_rhodecode_db(self, ini_params=None, env=None):
147 147 if not ini_params:
148 148 ini_params = self._base_ini_config
149 149
150 150 ini_params.extend(self._db_url)
151 151 with TestINI(self._base_ini_file, ini_params,
152 152 self._type, destroy=True) as _ini_file:
153 153 if not os.path.isdir(self._repos_location):
154 154 os.makedirs(self._repos_location)
155 155 self.execute(
156 156 "paster setup-rhodecode {0} --user=marcink "
157 157 "--email=marcin@rhodeocode.com --password={1} "
158 158 "--repos={2} --force-yes".format(
159 159 _ini_file, 'qweqwe', self._repos_location), env=env)
160 160
161 161 def upgrade_database(self, ini_params=None):
162 162 if not ini_params:
163 163 ini_params = self._base_ini_config
164 164 ini_params.extend(self._db_url)
165 165
166 166 test_ini = TestINI(
167 167 self._base_ini_file, ini_params, self._type, destroy=True)
168 168 with test_ini as ini_file:
169 169 if not os.path.isdir(self._repos_location):
170 170 os.makedirs(self._repos_location)
171 171 self.execute(
172 172 "paster upgrade-db {} --force-yes".format(ini_file))
173 173
174 174 def setup_db(self):
175 175 raise NotImplementedError
176 176
177 177 def teardown_db(self):
178 178 raise NotImplementedError
179 179
180 180 def import_dump(self, dumpname):
181 181 raise NotImplementedError
182 182
183 183
184 184 class EmptyDBBackend(DBBackend):
185 185 _type = ''
186 186
187 187 def setup_db(self):
188 188 pass
189 189
190 190 def teardown_db(self):
191 191 pass
192 192
193 193 def import_dump(self, dumpname):
194 194 pass
195 195
196 196 def assert_returncode_success(self):
197 197 assert True
198 198
199 199
200 200 class SQLiteDBBackend(DBBackend):
201 201 _type = 'sqlite'
202 202
203 203 def get_default_connection_string(self):
204 204 return 'sqlite:///{}/{}.sqlite'.format(self._basetemp, self.db_name)
205 205
206 206 def setup_db(self):
207 207 # dump schema for tests
208 208 # cp -v $TEST_DB_NAME
209 209 self._db_url = [{'app:main': {
210 210 'sqlalchemy.db1.url': self.connection_string}}]
211 211
212 212 def import_dump(self, dumpname):
213 213 dump = os.path.join(self.fixture_store, dumpname)
214 214 shutil.copy(
215 215 dump,
216 216 os.path.join(self._basetemp, '{0.db_name}.sqlite'.format(self)))
217 217
218 218 def teardown_db(self):
219 219 self.execute("rm -rf {}.sqlite".format(
220 220 os.path.join(self._basetemp, self.db_name)))
221 221
222 222
223 223 class MySQLDBBackend(DBBackend):
224 224 _type = 'mysql'
225 225
226 226 def get_default_connection_string(self):
227 227 return 'mysql://root:qweqwe@127.0.0.1/{}'.format(self.db_name)
228 228
229 229 def setup_db(self):
230 230 # dump schema for tests
231 231 # mysqldump -uroot -pqweqwe $TEST_DB_NAME
232 232 self._db_url = [{'app:main': {
233 233 'sqlalchemy.db1.url': self.connection_string}}]
234 234 self.execute("mysql -v -u{} -p{} -e 'create database '{}';'".format(
235 235 self.user, self.password, self.db_name))
236 236
237 237 def import_dump(self, dumpname):
238 238 dump = os.path.join(self.fixture_store, dumpname)
239 239 self.execute("mysql -u{} -p{} {} < {}".format(
240 240 self.user, self.password, self.db_name, dump))
241 241
242 242 def teardown_db(self):
243 243 self.execute("mysql -v -u{} -p{} -e 'drop database '{}';'".format(
244 244 self.user, self.password, self.db_name))
245 245
246 246
247 247 class PostgresDBBackend(DBBackend):
248 248 _type = 'postgres'
249 249
250 250 def get_default_connection_string(self):
251 251 return 'postgresql://postgres:qweqwe@localhost/{}'.format(self.db_name)
252 252
253 253 def setup_db(self):
254 254 # dump schema for tests
255 255 # pg_dump -U postgres -h localhost $TEST_DB_NAME
256 256 self._db_url = [{'app:main': {
257 257 'sqlalchemy.db1.url':
258 258 self.connection_string}}]
259 259 self.execute("PGPASSWORD={} psql -U {} -h localhost "
260 260 "-c 'create database '{}';'".format(
261 261 self.password, self.user, self.db_name))
262 262
263 263 def teardown_db(self):
264 264 self.execute("PGPASSWORD={} psql -U {} -h localhost "
265 265 "-c 'drop database if exists '{}';'".format(
266 266 self.password, self.user, self.db_name))
267 267
268 268 def import_dump(self, dumpname):
269 269 dump = os.path.join(self.fixture_store, dumpname)
270 270 self.execute(
271 271 "PGPASSWORD={} psql -U {} -h localhost -d {} -1 "
272 272 "-f {}".format(
273 273 self.password, self.user, self.db_name, dump))
@@ -1,136 +1,136 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2016-2016 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 """
22 22 Checking the chunked data transfer via HTTP
23 23 """
24 24
25 25 import os
26 26 import time
27 import subprocess
27 import subprocess32
28 28
29 29 import pytest
30 30 import requests
31 31
32 32 from rhodecode.lib.middleware.utils import scm_app_http
33 33 from rhodecode.tests.utils import wait_for_url
34 34
35 35
36 36 def test_does_chunked_end_to_end_transfer(scm_app):
37 37 response = requests.post(scm_app, data='', stream=True)
38 38 assert response.headers['Transfer-Encoding'] == 'chunked'
39 39 times = [time.time() for chunk in response.raw.read_chunked()]
40 40 assert times[1] - times[0] > 0.1, "Chunks arrived at the same time"
41 41
42 42
43 43 @pytest.fixture
44 44 def echo_app_chunking(request, available_port_factory):
45 45 """
46 46 Run the EchoApp via Waitress in a subprocess.
47 47
48 48 Return the URL endpoint to reach the app.
49 49 """
50 50 port = available_port_factory()
51 51 command = (
52 52 'waitress-serve --send-bytes 1 --port {port} --call '
53 53 'rhodecode.tests.lib.middleware.utils.test_scm_app_http_chunking'
54 54 ':create_echo_app')
55 55 command = command.format(port=port)
56 proc = subprocess.Popen(command.split(' '), bufsize=0)
56 proc = subprocess32.Popen(command.split(' '), bufsize=0)
57 57 echo_app_url = 'http://localhost:' + str(port)
58 58
59 59 @request.addfinalizer
60 60 def stop_echo_app():
61 61 proc.kill()
62 62
63 63 return echo_app_url
64 64
65 65
66 66 @pytest.fixture
67 67 def scm_app(request, available_port_factory, echo_app_chunking):
68 68 """
69 69 Run the scm_app in Waitress.
70 70
71 71 Returns the URL endpoint where this app can be reached.
72 72 """
73 73 port = available_port_factory()
74 74 command = (
75 75 'waitress-serve --send-bytes 1 --port {port} --call '
76 76 'rhodecode.tests.lib.middleware.utils.test_scm_app_http_chunking'
77 77 ':create_scm_app')
78 78 command = command.format(port=port)
79 79 env = os.environ.copy()
80 80 env["RC_ECHO_URL"] = echo_app_chunking
81 proc = subprocess.Popen(command.split(' '), bufsize=0, env=env)
81 proc = subprocess32.Popen(command.split(' '), bufsize=0, env=env)
82 82 scm_app_url = 'http://localhost:' + str(port)
83 83 wait_for_url(scm_app_url)
84 84
85 85 @request.addfinalizer
86 86 def stop_echo_app():
87 87 proc.kill()
88 88
89 89 return scm_app_url
90 90
91 91
92 92 class EchoApp(object):
93 93 """
94 94 Stub WSGI application which returns a chunked response to every request.
95 95 """
96 96
97 97 def __init__(self, repo_path, repo_name, config):
98 98 self._repo_path = repo_path
99 99
100 100 def __call__(self, environ, start_response):
101 101 environ['wsgi.input'].read()
102 102 status = '200 OK'
103 103 headers = []
104 104 start_response(status, headers)
105 105 return result_generator()
106 106
107 107
108 108 def result_generator():
109 109 """
110 110 Simulate chunked results.
111 111
112 112 The intended usage is to simulate a chunked response as we would get it
113 113 out of a vcs operation during a call to "hg clone".
114 114 """
115 115 yield 'waiting 2 seconds'
116 116 # Wait long enough so that the first chunk can go out
117 117 time.sleep(2)
118 118 yield 'final chunk'
119 119 # Another small wait, otherwise they go together
120 120 time.sleep(0.1)
121 121
122 122
123 123 def create_echo_app():
124 124 """
125 125 Create EchoApp filled with stub data.
126 126 """
127 127 return EchoApp('stub_path', 'repo_name', {})
128 128
129 129
130 130 def create_scm_app():
131 131 """
132 132 Create a scm_app hooked up to speak to EchoApp.
133 133 """
134 134 echo_app_url = os.environ["RC_ECHO_URL"]
135 135 return scm_app_http.VcsHttpProxy(
136 136 echo_app_url, 'stub_path', 'stub_name', None)
@@ -1,463 +1,463 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2010-2016 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 """
22 22 Module to test the performance of pull, push and clone operations.
23 23
24 24 It works by replaying a group of commits to the repo.
25 25 """
26 26
27 27 import argparse
28 28 import collections
29 29 import ConfigParser
30 30 import functools
31 31 import itertools
32 32 import os
33 33 import pprint
34 34 import shutil
35 import subprocess
35 import subprocess32
36 36 import sys
37 37 import time
38 38
39 39 import api
40 40
41 41
42 42 def mean(container):
43 43 """Return the mean of the container."""
44 44 if not container:
45 45 return -1.0
46 46 return sum(container) / len(container)
47 47
48 48
49 49 def keep_cwd(f):
50 50 """Decorator that keeps track of the starting working directory."""
51 51 @functools.wraps(f)
52 52 def wrapped_f(*args, **kwargs):
53 53 cur_dir = os.getcwd()
54 54 try:
55 55 return f(*args, **kwargs)
56 56 finally:
57 57 os.chdir(cur_dir)
58 58
59 59 return wrapped_f
60 60
61 61
62 62 def timed(f):
63 63 """Decorator that returns the time it took to execute the function."""
64 64 @functools.wraps(f)
65 65 def wrapped_f(*args, **kwargs):
66 66 start_time = time.time()
67 67 try:
68 68 f(*args, **kwargs)
69 69 finally:
70 70 return time.time() - start_time
71 71
72 72 return wrapped_f
73 73
74 74
75 75 def execute(*popenargs, **kwargs):
76 76 """Extension of subprocess.check_output to support writing to stdin."""
77 77 input = kwargs.pop('stdin', None)
78 78 stdin = None
79 79 if input:
80 stdin = subprocess.PIPE
80 stdin = subprocess32.PIPE
81 81 #if 'stderr' not in kwargs:
82 # kwargs['stderr'] = subprocess.PIPE
82 # kwargs['stderr'] = subprocess32.PIPE
83 83 if 'stdout' in kwargs:
84 84 raise ValueError('stdout argument not allowed, it will be overridden.')
85 process = subprocess.Popen(stdin=stdin, stdout=subprocess.PIPE,
85 process = subprocess32.Popen(stdin=stdin, stdout=subprocess32.PIPE,
86 86 *popenargs, **kwargs)
87 87 output, error = process.communicate(input=input)
88 88 retcode = process.poll()
89 89 if retcode:
90 90 cmd = kwargs.get("args")
91 91 if cmd is None:
92 92 cmd = popenargs[0]
93 93 print cmd, output, error
94 raise subprocess.CalledProcessError(retcode, cmd, output=output)
94 raise subprocess32.CalledProcessError(retcode, cmd, output=output)
95 95 return output
96 96
97 97
98 98 def get_repo_name(repo_url):
99 99 """Extract the repo name from its url."""
100 100 repo_url = repo_url.rstrip('/')
101 101 return repo_url.split('/')[-1].split('.')[0]
102 102
103 103
104 104 class TestPerformanceBase(object):
105 105 def __init__(self, base_dir, repo_url, n_commits, max_commits,
106 106 skip_commits):
107 107 self.repo_url = repo_url
108 108 self.repo_name = get_repo_name(self.repo_url)
109 109 self.upstream_repo_name = '%s_upstream' % self.repo_name
110 110 self.base_dir = os.path.abspath(base_dir)
111 111 self.n_commits = n_commits
112 112 self.max_commits = max_commits
113 113 self.skip_commits = skip_commits
114 114 self.push_times = []
115 115 self.pull_times = []
116 116 self.empty_pull_times = []
117 117 self.clone_time = -1.0
118 118 self.last_commit = None
119 119
120 120 self.cloned_repo = ''
121 121 self.pull_repo = ''
122 122 self.orig_repo = ''
123 123
124 124 def run(self):
125 125 try:
126 126 self.test()
127 127 except Exception as error:
128 128 print error
129 129 finally:
130 130 self.cleanup()
131 131
132 132 print 'Clone time :', self.clone_time
133 133 print 'Push time :', mean(self.push_times)
134 134 print 'Pull time :', mean(self.pull_times)
135 135 print 'Empty pull time:', mean(self.empty_pull_times)
136 136
137 137 return {
138 138 'clone': self.clone_time,
139 139 'push': mean(self.push_times),
140 140 'pull': mean(self.pull_times),
141 141 'empty_pull': mean(self.empty_pull_times),
142 142 }
143 143
144 144 @keep_cwd
145 145 def test(self):
146 146 os.chdir(self.base_dir)
147 147
148 148 self.orig_repo = os.path.join(self.base_dir, self.repo_name)
149 149 if not os.path.exists(self.orig_repo):
150 150 self.clone_repo(self.repo_url, default_only=True)
151 151
152 152 upstream_url = self.create_repo(self.upstream_repo_name, self.repo_type)
153 153
154 154 self.add_remote(self.orig_repo, upstream_url)
155 155
156 156 self.pull_repo = os.path.join(self.base_dir, '%s_pull' % self.repo_name)
157 157 self.clone_repo(upstream_url, self.pull_repo)
158 158
159 159 commits = self.get_commits(self.orig_repo)
160 160 self.last_commit = commits[-1]
161 161 if self.skip_commits:
162 162 self.push(
163 163 self.orig_repo, commits[self.skip_commits - 1], 'upstream')
164 164 commits = commits[self.skip_commits:self.max_commits]
165 165
166 166 print 'Working with %d commits' % len(commits)
167 167 for i in xrange(self.n_commits - 1, len(commits), self.n_commits):
168 168 commit = commits[i]
169 169 print 'Processing commit %s (%d)' % (commit, i + 1)
170 170 self.push_times.append(
171 171 self.push(self.orig_repo, commit, 'upstream'))
172 172 self.check_remote_last_commit_is(commit, upstream_url)
173 173
174 174 self.pull_times.append(self.pull(self.pull_repo))
175 175 self.check_local_last_commit_is(commit, self.pull_repo)
176 176
177 177 self.empty_pull_times.append(self.pull(self.pull_repo))
178 178
179 179 self.cloned_repo = os.path.join(self.base_dir,
180 180 '%s_clone' % self.repo_name)
181 181 self.clone_time = self.clone_repo(upstream_url, self.cloned_repo)
182 182
183 183 def cleanup(self):
184 184 try:
185 185 self.delete_repo(self.upstream_repo_name)
186 186 except api.ApiError:
187 187 # Continue in case we could not delete the repo. Maybe we did not
188 188 # create it in the first place.
189 189 pass
190 190
191 191 shutil.rmtree(self.pull_repo, ignore_errors=True)
192 192 shutil.rmtree(self.cloned_repo, ignore_errors=True)
193 193
194 194 if os.path.exists(self.orig_repo):
195 195 self.remove_remote(self.orig_repo)
196 196
197 197
198 198 class RhodeCodeMixin(object):
199 199 """Mixin providing the methods to create and delete repos in RhodeCode."""
200 200 def __init__(self, api_key):
201 201 self.api = api.RCApi(api_key=api_key)
202 202
203 203 def create_repo(self, repo_name, repo_type):
204 204 return self.api.create_repo(repo_name, repo_type,
205 205 'Repo for perfomance testing')
206 206
207 207 def delete_repo(self, repo_name):
208 208 return self.api.delete_repo(repo_name)
209 209
210 210
211 211 class GitMixin(object):
212 212 """Mixin providing the git operations."""
213 213 @timed
214 214 def clone_repo(self, repo_url, destination=None, default_only=False):
215 215 args = ['git', 'clone']
216 216 if default_only:
217 217 args.extend(['--branch', 'master', '--single-branch'])
218 218 args.append(repo_url)
219 219 if destination:
220 220 args.append(destination)
221 221 execute(args)
222 222
223 223 @keep_cwd
224 224 def add_remote(self, repo, remote_url, remote_name='upstream'):
225 225 self.remove_remote(repo, remote_name)
226 226 os.chdir(repo)
227 227 execute(['git', 'remote', 'add', remote_name, remote_url])
228 228
229 229 @keep_cwd
230 230 def remove_remote(self, repo, remote_name='upstream'):
231 231 os.chdir(repo)
232 232 remotes = execute(['git', 'remote']).split('\n')
233 233 if remote_name in remotes:
234 234 execute(['git', 'remote', 'remove', remote_name])
235 235
236 236 @keep_cwd
237 237 def get_commits(self, repo, branch='master'):
238 238 os.chdir(repo)
239 239 commits_list = execute(
240 240 ['git', 'log', '--first-parent', branch, '--pretty=%H'])
241 241 return commits_list.strip().split('\n')[::-1]
242 242
243 243 @timed
244 244 def push(self, repo, commit, remote_name=None):
245 245 os.chdir(repo)
246 246 try:
247 247 execute(['git', 'reset', '--soft', commit])
248 248 args = ['git', 'push']
249 249 if remote_name:
250 250 args.append(remote_name)
251 251 execute(args)
252 252 finally:
253 253 execute(['git', 'reset', '--soft', 'HEAD@{1}'])
254 254
255 255 @timed
256 256 def pull(self, repo):
257 257 os.chdir(repo)
258 258 execute(['git', 'pull'])
259 259
260 260 def _remote_last_commit(self, repo_url):
261 261 output = execute(['git', 'ls-remote', repo_url, 'HEAD'])
262 262 return output.split()[0]
263 263
264 264 def check_remote_last_commit_is(self, commit, repo_url):
265 265 last_remote_commit = self._remote_last_commit(repo_url)
266 266 if last_remote_commit != commit:
267 267 raise Exception('Push did not work, expected commit %s but got %s' %
268 268 (commit, last_remote_commit))
269 269
270 270 @keep_cwd
271 271 def _local_last_commit(self, repo):
272 272 os.chdir(repo)
273 273 return execute(['git', 'rev-parse', 'HEAD']).strip()
274 274
275 275 def check_local_last_commit_is(self, commit, repo):
276 276 last_local_commit = self._local_last_commit(repo)
277 277 if last_local_commit != commit:
278 278 raise Exception('Pull did not work, expected commit %s but got %s' %
279 279 (commit, last_local_commit))
280 280
281 281
282 282 class HgMixin(object):
283 283 """Mixin providing the mercurial operations."""
284 284 @timed
285 285 def clone_repo(self, repo_url, destination=None, default_only=False):
286 286 args = ['hg', 'clone']
287 287 if default_only:
288 288 args.extend(['--branch', 'default'])
289 289 args.append(repo_url)
290 290 if destination:
291 291 args.append(destination)
292 292 execute(args)
293 293
294 294 @keep_cwd
295 295 def add_remote(self, repo, remote_url, remote_name='upstream'):
296 296 self.remove_remote(repo, remote_name)
297 297 os.chdir(repo)
298 298 hgrc = ConfigParser.RawConfigParser()
299 299 hgrc.read('.hg/hgrc')
300 300 hgrc.set('paths', remote_name, remote_url)
301 301 with open('.hg/hgrc', 'w') as f:
302 302 hgrc.write(f)
303 303
304 304 @keep_cwd
305 305 def remove_remote(self, repo, remote_name='upstream'):
306 306 os.chdir(repo)
307 307 hgrc = ConfigParser.RawConfigParser()
308 308 hgrc.read('.hg/hgrc')
309 309 hgrc.remove_option('paths', remote_name)
310 310 with open('.hg/hgrc', 'w') as f:
311 311 hgrc.write(f)
312 312
313 313 @keep_cwd
314 314 def get_commits(self, repo, branch='default'):
315 315 os.chdir(repo)
316 316 # See http://stackoverflow.com/questions/15376649/is-there-a-mercurial-equivalent-to-git-log-first-parent
317 317 commits_list = execute(['hg', 'log', '--branch', branch, '--template',
318 318 '{node}\n', '--follow-first'])
319 319 return commits_list.strip().split('\n')[::-1]
320 320
321 321 @timed
322 322 def push(self, repo, commit, remote_name=None):
323 323 os.chdir(repo)
324 324 args = ['hg', 'push', '--rev', commit, '--new-branch']
325 325 if remote_name:
326 326 args.append(remote_name)
327 327 execute(args)
328 328
329 329 @timed
330 330 def pull(self, repo):
331 331 os.chdir(repo)
332 332 execute(['hg', '--config', 'alias.pull=pull', 'pull', '-u'])
333 333
334 334 def _remote_last_commit(self, repo_url):
335 335 return execute(['hg', 'identify', repo_url])[:12]
336 336
337 337 def check_remote_last_commit_is(self, commit, repo_url):
338 338 last_remote_commit = self._remote_last_commit(repo_url)
339 339 if not commit.startswith(last_remote_commit):
340 340 raise Exception('Push did not work, expected commit %s but got %s' %
341 341 (commit, last_remote_commit))
342 342
343 343 @keep_cwd
344 344 def _local_last_commit(self, repo):
345 345 os.chdir(repo)
346 346 return execute(['hg', 'identify'])[:12]
347 347
348 348 def check_local_last_commit_is(self, commit, repo):
349 349 last_local_commit = self._local_last_commit(repo)
350 350 if not commit.startswith(last_local_commit):
351 351 raise Exception('Pull did not work, expected commit %s but got %s' %
352 352 (commit, last_local_commit))
353 353
354 354
355 355 class GitTestPerformance(GitMixin, RhodeCodeMixin, TestPerformanceBase):
356 356 def __init__(self, base_dir, repo_url, n_commits, max_commits, skip_commits,
357 357 api_key):
358 358 TestPerformanceBase.__init__(self, base_dir, repo_url, n_commits,
359 359 max_commits, skip_commits)
360 360 RhodeCodeMixin.__init__(self, api_key)
361 361 self.repo_type = 'git'
362 362
363 363
364 364 class HgTestPerformance(HgMixin, RhodeCodeMixin, TestPerformanceBase):
365 365 def __init__(self, base_dir, repo_url, n_commits, max_commits, skip_commits,
366 366 api_key):
367 367 TestPerformanceBase.__init__(self, base_dir, repo_url, n_commits,
368 368 max_commits, skip_commits)
369 369 RhodeCodeMixin.__init__(self, api_key)
370 370 self.repo_type = 'hg'
371 371
372 372
373 373 def get_test(base_dir, repo_url, repo_type, step, max_commits, skip_commits,
374 374 api_key):
375 375 max_commits = min(10 * step,
376 376 int((max_commits - skip_commits) / step) * step)
377 377 max_commits += skip_commits
378 378 if repo_type == 'git':
379 379 return GitTestPerformance(
380 380 base_dir, repo_url, step, max_commits, skip_commits, api_key)
381 381 elif repo_type == 'hg':
382 382 return HgTestPerformance(
383 383 base_dir, repo_url, step, max_commits, skip_commits, api_key)
384 384
385 385
386 386 def main(argv):
387 387 parser = argparse.ArgumentParser(
388 388 description='Performance tests for push/pull/clone for git and ' +
389 389 'mercurial repos.')
390 390 parser.add_argument(
391 391 '--tests', dest='tests', action='store', required=False, default='all',
392 392 help='The tests to run. Default: all. But could be any comma ' +
393 393 'separated list with python, hg, kernel or git')
394 394 parser.add_argument(
395 395 '--sizes', dest='sizes', action='store', required=False,
396 396 default='1,10,100,1000,2500',
397 397 help='The sizes to use. Default: 1,10,100,1000,2500')
398 398 parser.add_argument(
399 399 '--dir', dest='dir', action='store', required=True,
400 400 help='The dir where to store the repos')
401 401 parser.add_argument(
402 402 '--api-key', dest='api_key', action='store', required=True,
403 403 help='The api key of RhodeCode')
404 404 options = parser.parse_args(argv[1:])
405 405 print options
406 406
407 407 test_config = {
408 408 'python': {
409 409 'url': 'https://hg.python.org/cpython/',
410 410 'limit': 23322,
411 411 'type': 'hg',
412 412 # Do not time the first commit, as it is HUGE!
413 413 'skip': 1,
414 414 },
415 415 'hg': {
416 416 'url': 'http://selenic.com/hg',
417 417 'limit': 14396,
418 418 'type': 'hg',
419 419 },
420 420 'kernel': {
421 421 'url': 'https://github.com/torvalds/linux.git',
422 422 'limit': 46271,
423 423 'type': 'git',
424 424 },
425 425 'git': {
426 426 'url': 'https://github.com/git/git.git',
427 427 'limit': 13525,
428 428 'type': 'git',
429 429 }
430 430
431 431 }
432 432
433 433 test_names = options.tests.split(',')
434 434 if test_names == ['all']:
435 435 test_names = test_config.keys()
436 436 if not set(test_names) <= set(test_config.keys()):
437 437 print ('Invalid tests: only %s are valid but specified %s' %
438 438 (test_config.keys(), test_names))
439 439 return 1
440 440
441 441 sizes = options.sizes.split(',')
442 442 sizes = map(int, sizes)
443 443
444 444 base_dir = options.dir
445 445 api_key = options.api_key
446 446 results = collections.defaultdict(dict)
447 447 for test_name, size in itertools.product(test_names, sizes):
448 448 test = get_test(base_dir,
449 449 test_config[test_name]['url'],
450 450 test_config[test_name]['type'],
451 451 size,
452 452 test_config[test_name]['limit'],
453 453 test_config[test_name].get('skip', 0),
454 454 api_key)
455 455 print '*' * 80
456 456 print 'Running performance test: %s with size %d' % (test_name, size)
457 457 print '*' * 80
458 458 results[test_name][size] = test.run()
459 459 pprint.pprint(dict(results))
460 460
461 461
462 462 if __name__ == '__main__':
463 463 sys.exit(main(sys.argv))
@@ -1,155 +1,155 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2010-2016 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 """
22 22 This is a standalone script which will start VCS and RC.
23 23
24 24 Performance numbers will be written on each interval to:
25 25 vcs_profileX.csv
26 26 rc_profileX.csv
27 27
28 28 To stop the script by press Ctrl-C
29 29 """
30 30
31 31 import datetime
32 32 import os
33 33 import psutil
34 import subprocess
34 import subprocess32
35 35 import sys
36 36 import time
37 37 import traceback
38 38 import urllib
39 39
40 40 PROFILING_INTERVAL = 5
41 41 RC_WEBSITE = "http://localhost:5001/"
42 42
43 43
44 44 def get_file(prefix):
45 45 out_file = None
46 46 for i in xrange(100):
47 47 file_path = "%s_profile%.3d.csv" % (prefix, i)
48 48 if os.path.exists(file_path):
49 49 continue
50 50 out_file = open(file_path, "w")
51 51 out_file.write("Time; CPU %; Memory (MB); Total FDs; Dulwich FDs; Threads\n")
52 52 break
53 53 return out_file
54 54
55 55
56 56 def dump_system():
57 57 print "System Overview..."
58 58 print "\nCPU Count: %d (%d real)" % \
59 59 (psutil.cpu_count(), psutil.cpu_count(logical=False))
60 60 print "\nDisk:"
61 61 print psutil.disk_usage(os.sep)
62 62 print "\nMemory:"
63 63 print psutil.virtual_memory()
64 64 print "\nMemory (swap):"
65 65 print psutil.swap_memory()
66 66
67 67
68 68 def count_dulwich_fds(proc):
69 p = subprocess.Popen(["lsof", "-p", proc.pid], stdout=subprocess.PIPE)
69 p = subprocess32.Popen(["lsof", "-p", proc.pid], stdout=subprocess32.PIPE)
70 70 out, err = p.communicate()
71 71
72 72 count = 0
73 73 for line in out.splitlines():
74 74 content = line.split()
75 75 # http://git-scm.com/book/en/Git-Internals-Packfiles
76 76 if content[-1].endswith(".idx"):
77 77 count += 1
78 78
79 79 return count
80 80
81 81 def dump_process(pid, out_file):
82 82 now = datetime.datetime.now()
83 83 cpu = pid.cpu_percent()
84 84 mem = pid.memory_info()
85 85 fds = pid.num_fds()
86 86 dulwich_fds = count_dulwich_fds(pid)
87 87 threads = pid.num_threads()
88 88
89 89 content = [now.strftime('%m/%d/%y %H:%M:%S'),
90 90 cpu,
91 91 "%.2f" % (mem[0]/1024.0/1024.0),
92 92 fds, dulwich_fds, threads]
93 93 out_file.write("; ".join([str(item) for item in content]))
94 94 out_file.write("\n")
95 95
96 96
97 97 # Open output files
98 98 vcs_out = get_file("vcs")
99 99 if vcs_out is None:
100 100 print "Unable to enumerate output file for VCS"
101 101 sys.exit(1)
102 102 rc_out = get_file("rc")
103 103 if rc_out is None:
104 104 print "Unable to enumerate output file for RC"
105 105 sys.exit(1)
106 106
107 107 # Show system information
108 108 dump_system()
109 109
110 110 print "\nStarting VCS..."
111 111 vcs = psutil.Popen(["vcsserver"])
112 112 time.sleep(1)
113 113 if not vcs.is_running():
114 114 print "VCS - Failed to start"
115 115 sys.exit(1)
116 116 print "VCS - Ok"
117 117
118 118 print "\nStarting RhodeCode..."
119 119 rc = psutil.Popen("RC_VCSSERVER_TEST_DISABLE=1 paster serve test.ini",
120 shell=True, stdin=subprocess.PIPE)
120 shell=True, stdin=subprocess32.PIPE)
121 121 time.sleep(1)
122 122 if not rc.is_running():
123 123 print "RC - Failed to start"
124 124 vcs.terminate()
125 125 sys.exit(1)
126 126
127 127 # Send command to create the databases
128 128 rc.stdin.write("y\n")
129 129
130 130 # Verify that the website is up
131 131 time.sleep(4)
132 132 try:
133 133 urllib.urlopen(RC_WEBSITE)
134 134 except IOError:
135 135 print "RC - Website not started"
136 136 vcs.terminate()
137 137 sys.exit(1)
138 138 print "RC - Ok"
139 139
140 140 print "\nProfiling...\n%s\n" % ("-"*80)
141 141 while True:
142 142 try:
143 143 dump_process(vcs, vcs_out)
144 144 dump_process(rc, rc_out)
145 145 time.sleep(PROFILING_INTERVAL)
146 146 except Exception:
147 147 print traceback.format_exc()
148 148 break
149 149
150 150 # Finalize the profiling
151 151 vcs_out.close()
152 152 rc_out.close()
153 153
154 154 vcs.terminate()
155 155 rc.terminate()
@@ -1,305 +1,305 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2016-2016 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 """
22 22 VCS Performance measurement tool
23 23
24 24 Usage:
25 25
26 26 - Check that required vcs keys can be found in ~/.hgrc and ~/.netrc
27 27
28 28 - Start a local instance of RhodeCode Enterprise
29 29
30 30 - Launch the script:
31 31
32 32 TMPDIR=/tmp python vcs_performance.py \
33 33 --host=http://vm:5000 \
34 34 --api-key=55c4a33688577da24183dcac5fde4dddfdbf18dc \
35 35 --commits=10 --repositories=100 --log-level=info
36 36 """
37 37
38 38 import argparse
39 39 import functools
40 40 import logging
41 41 import os
42 42 import shutil
43 import subprocess
43 import subprocess32
44 44 import tempfile
45 45 import time
46 46 from itertools import chain
47 47
48 48 from api import RCApi, ApiError
49 49
50 50
51 51 log = logging.getLogger(__name__)
52 52
53 53
54 54 def timed(f):
55 55 """Decorator that returns the time it took to execute the function."""
56 56 @functools.wraps(f)
57 57 def wrapped_f(*args, **kwargs):
58 58 start_time = time.time()
59 59 try:
60 60 f(*args, **kwargs)
61 61 finally:
62 62 return time.time() - start_time
63 63
64 64 return wrapped_f
65 65
66 66
67 67 def mean(container):
68 68 """Return the mean of the container."""
69 69 if not container:
70 70 return -1.0
71 71 return sum(container) / len(container)
72 72
73 73
74 74 class Config(object):
75 75 args = None
76 76
77 77 def __init__(self):
78 78 parser = argparse.ArgumentParser(description='Runs VCS load tests')
79 79 parser.add_argument(
80 80 '--host', dest='host', action='store', required=True,
81 81 help='RhodeCode Enterprise host')
82 82 parser.add_argument(
83 83 '--api-key', dest='api_key', action='store', required=True,
84 84 help='API Key')
85 85 parser.add_argument(
86 86 '--file-size', dest='file_size', action='store', required=False,
87 87 default=1, type=int, help='File size in MB')
88 88 parser.add_argument(
89 89 '--repositories', dest='repositories', action='store',
90 90 required=False, default=1, type=int,
91 91 help='Number of repositories')
92 92 parser.add_argument(
93 93 '--commits', dest='commits', action='store', required=False,
94 94 default=1, type=int, help='Number of commits')
95 95 parser.add_argument(
96 96 '--log-level', dest='log_level', action='store', required=False,
97 97 default='error', help='Logging level')
98 98 self.args = parser.parse_args()
99 99
100 100 def __getattr__(self, attr):
101 101 return getattr(self.args, attr)
102 102
103 103
104 104 class Repository(object):
105 105 FILE_NAME_TEMPLATE = "test_{:09d}.bin"
106 106
107 107 def __init__(self, name, base_path, api):
108 108 self.name = name
109 109 self.path = os.path.join(base_path, name)
110 110 self.api = api
111 111
112 112 def create(self):
113 113 self._create_filesystem_repo(self.path)
114 114 try:
115 115 self.url = self.api.create_repo(
116 116 self.name, self.TYPE, 'Performance tests')
117 117 except ApiError as e:
118 118 log.error('api: {}'.format(e))
119 119
120 120 def delete(self):
121 121 self._delete_filesystem_repo()
122 122 try:
123 123 self.api.delete_repo(self.name)
124 124 except ApiError as e:
125 125 log.error('api: {}'.format(e))
126 126
127 127 def create_commits(self, number, file_size):
128 128 for i in xrange(number):
129 129 file_name = self.FILE_NAME_TEMPLATE.format(i)
130 130 log.debug("Create commit {}".format(file_name))
131 131 self._create_file(file_name, file_size)
132 132 self._create_commit(file_name)
133 133
134 134 @timed
135 135 def push(self):
136 136 raise NotImplementedError()
137 137
138 138 @timed
139 139 def clone(self, destination_path):
140 140 raise NotImplementedError()
141 141
142 142 @timed
143 143 def pull(self):
144 144 raise NotImplementedError()
145 145
146 146 def _run(self, *args):
147 147 command = [self.BASE_COMMAND] + list(args)
148 process = subprocess.Popen(
149 command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
148 process = subprocess32.Popen(
149 command, stdout=subprocess32.PIPE, stderr=subprocess32.PIPE)
150 150 return process.communicate()
151 151
152 152 def _create_file(self, name, size):
153 153 file_name = os.path.join(self.path, name)
154 154 with open(file_name, 'wb') as f:
155 155 f.write(os.urandom(1024))
156 156
157 157 def _delete_filesystem_repo(self):
158 158 shutil.rmtree(self.path)
159 159
160 160 def _create_filesystem_repo(self, path):
161 161 raise NotImplementedError()
162 162
163 163 def _create_commit(self, file_name):
164 164 raise NotImplementedError()
165 165
166 166
167 167 class GitRepository(Repository):
168 168 TYPE = 'git'
169 169 BASE_COMMAND = 'git'
170 170
171 171 @timed
172 172 def push(self):
173 173 os.chdir(self.path)
174 174 self._run('push', '--set-upstream', self.url, 'master')
175 175
176 176 @timed
177 177 def clone(self, destination_path):
178 178 self._run('clone', self.url, os.path.join(destination_path, self.name))
179 179
180 180 @timed
181 181 def pull(self, destination_path):
182 182 path = os.path.join(destination_path, self.name)
183 183 self._create_filesystem_repo(path)
184 184 os.chdir(path)
185 185 self._run('remote', 'add', 'origin', self.url)
186 186 self._run('pull', 'origin', 'master')
187 187
188 188 def _create_filesystem_repo(self, path):
189 189 self._run('init', path)
190 190
191 191 def _create_commit(self, file_name):
192 192 os.chdir(self.path)
193 193 self._run('add', file_name)
194 194 self._run('commit', file_name, '-m', '"Add {}"'.format(file_name))
195 195
196 196
197 197 class HgRepository(Repository):
198 198 TYPE = 'hg'
199 199 BASE_COMMAND = 'hg'
200 200
201 201 @timed
202 202 def push(self):
203 203 os.chdir(self.path)
204 204 self._run('push', self.url)
205 205
206 206 @timed
207 207 def clone(self, destination_path):
208 208 self._run('clone', self.url, os.path.join(destination_path, self.name))
209 209
210 210 @timed
211 211 def pull(self, destination_path):
212 212 path = os.path.join(destination_path, self.name)
213 213 self._create_filesystem_repo(path)
214 214 os.chdir(path)
215 215 self._run('pull', '-r', 'tip', self.url)
216 216
217 217 def _create_filesystem_repo(self, path):
218 218 self._run('init', path)
219 219
220 220 def _create_commit(self, file_name):
221 221 os.chdir(self.path)
222 222 self._run('add', file_name)
223 223 self._run('commit', file_name, '-m', '"Add {}"'.format(file_name))
224 224
225 225
226 226 class Benchmark(object):
227 227 REPO_CLASSES = {
228 228 'git': GitRepository,
229 229 'hg': HgRepository
230 230 }
231 231 REPO_NAME = '{}_performance_{:03d}'
232 232
233 233 def __init__(self, config):
234 234 self.api = RCApi(api_key=config.api_key, rc_endpoint=config.host)
235 235 self.source_path = tempfile.mkdtemp(suffix='vcsperformance')
236 236
237 237 self.config = config
238 238 self.git_repos = []
239 239 self.hg_repos = []
240 240
241 241 self._set_log_level()
242 242
243 243 def start(self):
244 244 self._create_repos()
245 245 repos = {
246 246 'git': self.git_repos,
247 247 'hg': self.hg_repos
248 248 }
249 249
250 250 clone_destination_path = tempfile.mkdtemp(suffix='clone')
251 251 pull_destination_path = tempfile.mkdtemp(suffix='pull')
252 252 operations = [
253 253 ('push', ),
254 254 ('clone', clone_destination_path),
255 255 ('pull', pull_destination_path)
256 256 ]
257 257
258 258 for operation in operations:
259 259 for type_ in repos:
260 260 times = self._measure(repos[type_], *operation)
261 261 print("Mean {} {} time: {:.3f} sec.".format(
262 262 type_, operation[0], mean(times)))
263 263
264 264 def cleanup(self):
265 265 log.info("Cleaning up...")
266 266 for repo in chain(self.git_repos, self.hg_repos):
267 267 repo.delete()
268 268
269 269 def _measure(self, repos, operation, *args):
270 270 times = []
271 271 for repo in repos:
272 272 method = getattr(repo, operation)
273 273 times.append(method(*args))
274 274 return times
275 275
276 276 def _create_repos(self):
277 277 log.info("Creating repositories...")
278 278 for i in xrange(self.config.repositories):
279 279 self.git_repos.append(self._create_repo('git', i))
280 280 self.hg_repos.append(self._create_repo('hg', i))
281 281
282 282 def _create_repo(self, type_, id_):
283 283 RepoClass = self.REPO_CLASSES[type_]
284 284 repo = RepoClass(
285 285 self.REPO_NAME.format(type_, id_), self.source_path, self.api)
286 286 repo.create()
287 287 repo.create_commits(self.config.commits, self.config.file_size)
288 288 return repo
289 289
290 290 def _set_log_level(self):
291 291 try:
292 292 log_level = getattr(logging, config.log_level.upper())
293 293 except:
294 294 log_level = logging.ERROR
295 295 handler = logging.StreamHandler()
296 296 log.addHandler(handler)
297 297 log.setLevel(log_level)
298 298
299 299 if __name__ == '__main__':
300 300 config = Config()
301 301 benchmark = Benchmark(config)
302 302 try:
303 303 benchmark.start()
304 304 finally:
305 305 benchmark.cleanup()
@@ -1,147 +1,147 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2010-2016 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 """
22 22 Base for test suite for making push/pull operations.
23 23
24 24 .. important::
25 25
26 26 You must have git >= 1.8.5 for tests to work fine. With 68b939b git started
27 27 to redirect things to stderr instead of stdout.
28 28 """
29 29
30 30 from os.path import join as jn
31 from subprocess import Popen, PIPE
31 from subprocess32 import Popen, PIPE
32 32 import logging
33 33 import os
34 34 import tempfile
35 35
36 36 from rhodecode.tests import GIT_REPO, HG_REPO
37 37
38 38 DEBUG = True
39 39 RC_LOG = os.path.join(tempfile.gettempdir(), 'rc.log')
40 40 REPO_GROUP = 'a_repo_group'
41 41 HG_REPO_WITH_GROUP = '%s/%s' % (REPO_GROUP, HG_REPO)
42 42 GIT_REPO_WITH_GROUP = '%s/%s' % (REPO_GROUP, GIT_REPO)
43 43
44 44 log = logging.getLogger(__name__)
45 45
46 46
47 47 class Command(object):
48 48
49 49 def __init__(self, cwd):
50 50 self.cwd = cwd
51 51 self.process = None
52 52
53 53 def execute(self, cmd, *args):
54 54 """
55 55 Runs command on the system with given ``args``.
56 56 """
57 57
58 58 command = cmd + ' ' + ' '.join(args)
59 59 if DEBUG:
60 60 log.debug('*** CMD %s ***' % (command,))
61 61
62 62 env = dict(os.environ)
63 63 # Delete coverage variables, as they make the test fail for Mercurial
64 64 for key in env.keys():
65 65 if key.startswith('COV_CORE_'):
66 66 del env[key]
67 67
68 68 self.process = Popen(command, shell=True, stdout=PIPE, stderr=PIPE,
69 69 cwd=self.cwd, env=env)
70 70 stdout, stderr = self.process.communicate()
71 71 if DEBUG:
72 72 log.debug('STDOUT:%s' % (stdout,))
73 73 log.debug('STDERR:%s' % (stderr,))
74 74 return stdout, stderr
75 75
76 76 def assert_returncode_success(self):
77 77 assert self.process.returncode == 0
78 78
79 79
80 80 def _add_files_and_push(vcs, dest, clone_url=None, **kwargs):
81 81 """
82 82 Generate some files, add it to DEST repo and push back
83 83 vcs is git or hg and defines what VCS we want to make those files for
84 84 """
85 85 # commit some stuff into this repo
86 86 cwd = path = jn(dest)
87 87 added_file = jn(path, '%ssetup.py' % tempfile._RandomNameSequence().next())
88 88 Command(cwd).execute('touch %s' % added_file)
89 89 Command(cwd).execute('%s add %s' % (vcs, added_file))
90 90 author_str = 'Marcin KuΕΊminski <me@email.com>'
91 91
92 92 git_ident = "git config user.name {} && git config user.email {}".format(
93 93 'Marcin KuΕΊminski', 'me@email.com')
94 94
95 95 for i in xrange(kwargs.get('files_no', 3)):
96 96 cmd = """echo 'added_line%s' >> %s""" % (i, added_file)
97 97 Command(cwd).execute(cmd)
98 98 if vcs == 'hg':
99 99 cmd = """hg commit -m 'commited new %s' -u '%s' %s """ % (
100 100 i, author_str, added_file
101 101 )
102 102 elif vcs == 'git':
103 103 cmd = """%s && git commit -m 'commited new %s' %s""" % (
104 104 git_ident, i, added_file)
105 105 Command(cwd).execute(cmd)
106 106
107 107 # PUSH it back
108 108 stdout = stderr = None
109 109 if vcs == 'hg':
110 110 stdout, stderr = Command(cwd).execute(
111 111 'hg push --verbose', clone_url)
112 112 elif vcs == 'git':
113 113 stdout, stderr = Command(cwd).execute(
114 114 """%s && git push --verbose %s master""" % (
115 115 git_ident, clone_url))
116 116
117 117 return stdout, stderr
118 118
119 119
120 120 def _check_proper_git_push(
121 121 stdout, stderr, branch='master', should_set_default_branch=False):
122 122 # Note: Git is writing most information to stderr intentionally
123 123 assert 'fatal' not in stderr
124 124 assert 'rejected' not in stderr
125 125 assert 'Pushing to' in stderr
126 126 assert '%s -> %s' % (branch, branch) in stderr
127 127
128 128 if should_set_default_branch:
129 129 assert "Setting default branch to %s" % branch in stderr
130 130 else:
131 131 assert "Setting default branch" not in stderr
132 132
133 133
134 134 def _check_proper_clone(stdout, stderr, vcs):
135 135 if vcs == 'hg':
136 136 assert 'requesting all changes' in stdout
137 137 assert 'adding changesets' in stdout
138 138 assert 'adding manifests' in stdout
139 139 assert 'adding file changes' in stdout
140 140
141 141 assert stderr == ''
142 142
143 143 if vcs == 'git':
144 144 assert '' == stdout
145 145 assert 'Cloning into' in stderr
146 146 assert 'abort:' not in stderr
147 147 assert 'fatal:' not in stderr
@@ -1,257 +1,257 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2010-2016 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 """
22 22 py.test config for test suite for making push/pull operations.
23 23
24 24 .. important::
25 25
26 26 You must have git >= 1.8.5 for tests to work fine. With 68b939b git started
27 27 to redirect things to stderr instead of stdout.
28 28 """
29 29
30 30 import ConfigParser
31 31 import os
32 import subprocess
32 import subprocess32
33 33 import tempfile
34 34 import textwrap
35 35 import pytest
36 36
37 37 import rhodecode
38 38 from rhodecode.model.db import Repository
39 39 from rhodecode.model.meta import Session
40 40 from rhodecode.model.settings import SettingsModel
41 41 from rhodecode.tests import (
42 42 GIT_REPO, HG_REPO, TEST_USER_ADMIN_LOGIN, TEST_USER_ADMIN_PASS,)
43 43 from rhodecode.tests.fixture import Fixture
44 44 from rhodecode.tests.utils import (
45 45 set_anonymous_access, is_url_reachable, wait_for_url)
46 46
47 47 RC_LOG = os.path.join(tempfile.gettempdir(), 'rc.log')
48 48 REPO_GROUP = 'a_repo_group'
49 49 HG_REPO_WITH_GROUP = '%s/%s' % (REPO_GROUP, HG_REPO)
50 50 GIT_REPO_WITH_GROUP = '%s/%s' % (REPO_GROUP, GIT_REPO)
51 51
52 52
53 53 def assert_no_running_instance(url):
54 54 if is_url_reachable(url):
55 55 print("Hint: Usually this means another instance of Enterprise "
56 56 "is running in the background.")
57 57 pytest.fail(
58 58 "Port is not free at %s, cannot start web interface" % url)
59 59
60 60
61 61 def get_host_url(pylons_config):
62 62 """Construct the host url using the port in the test configuration."""
63 63 config = ConfigParser.ConfigParser()
64 64 config.read(pylons_config)
65 65
66 66 return '127.0.0.1:%s' % config.get('server:main', 'port')
67 67
68 68
69 69 class RcWebServer(object):
70 70 """
71 71 Represents a running RCE web server used as a test fixture.
72 72 """
73 73 def __init__(self, pylons_config):
74 74 self.pylons_config = pylons_config
75 75
76 76 def repo_clone_url(self, repo_name, **kwargs):
77 77 params = {
78 78 'user': TEST_USER_ADMIN_LOGIN,
79 79 'passwd': TEST_USER_ADMIN_PASS,
80 80 'host': get_host_url(self.pylons_config),
81 81 'cloned_repo': repo_name,
82 82 }
83 83 params.update(**kwargs)
84 84 _url = 'http://%(user)s:%(passwd)s@%(host)s/%(cloned_repo)s' % params
85 85 return _url
86 86
87 87
88 88 @pytest.fixture(scope="module")
89 89 def rcextensions(request, pylonsapp, tmpdir_factory):
90 90 """
91 91 Installs a testing rcextensions pack to ensure they work as expected.
92 92 """
93 93 init_content = textwrap.dedent("""
94 94 # Forward import the example rcextensions to make it
95 95 # active for our tests.
96 96 from rhodecode.tests.other.example_rcextensions import *
97 97 """)
98 98
99 99 # Note: rcextensions are looked up based on the path of the ini file
100 100 root_path = tmpdir_factory.getbasetemp()
101 101 rcextensions_path = root_path.join('rcextensions')
102 102 init_path = rcextensions_path.join('__init__.py')
103 103
104 104 if rcextensions_path.check():
105 105 pytest.fail(
106 106 "Path for rcextensions already exists, please clean up before "
107 107 "test run this path: %s" % (rcextensions_path, ))
108 108 return
109 109
110 110 request.addfinalizer(rcextensions_path.remove)
111 111 init_path.write_binary(init_content, ensure=True)
112 112
113 113
114 114 @pytest.fixture(scope="module")
115 115 def repos(request, pylonsapp):
116 116 """Create a copy of each test repo in a repo group."""
117 117 fixture = Fixture()
118 118 repo_group = fixture.create_repo_group(REPO_GROUP)
119 119 repo_group_id = repo_group.group_id
120 120 fixture.create_fork(HG_REPO, HG_REPO,
121 121 repo_name_full=HG_REPO_WITH_GROUP,
122 122 repo_group=repo_group_id)
123 123 fixture.create_fork(GIT_REPO, GIT_REPO,
124 124 repo_name_full=GIT_REPO_WITH_GROUP,
125 125 repo_group=repo_group_id)
126 126
127 127 @request.addfinalizer
128 128 def cleanup():
129 129 fixture.destroy_repo(HG_REPO_WITH_GROUP)
130 130 fixture.destroy_repo(GIT_REPO_WITH_GROUP)
131 131 fixture.destroy_repo_group(repo_group_id)
132 132
133 133
134 134 @pytest.fixture(scope="module")
135 135 def rc_web_server_config(pylons_config):
136 136 """
137 137 Configuration file used for the fixture `rc_web_server`.
138 138 """
139 139 return pylons_config
140 140
141 141
142 142 @pytest.fixture(scope="module")
143 143 def rc_web_server(
144 144 request, pylonsapp, rc_web_server_config, repos, rcextensions):
145 145 """
146 146 Run the web server as a subprocess.
147 147
148 148 Since we have already a running vcsserver, this is not spawned again.
149 149 """
150 150 env = os.environ.copy()
151 151 env['RC_NO_TMP_PATH'] = '1'
152 152
153 153 server_out = open(RC_LOG, 'w')
154 154
155 155 # TODO: Would be great to capture the output and err of the subprocess
156 156 # and make it available in a section of the py.test report in case of an
157 157 # error.
158 158
159 159 host_url = 'http://' + get_host_url(rc_web_server_config)
160 160 assert_no_running_instance(host_url)
161 161 command = ['rcserver', rc_web_server_config]
162 162
163 163 print('Starting rcserver: {}'.format(host_url))
164 164 print('Command: {}'.format(command))
165 165 print('Logfile: {}'.format(RC_LOG))
166 166
167 proc = subprocess.Popen(
167 proc = subprocess32.Popen(
168 168 command, bufsize=0, env=env, stdout=server_out, stderr=server_out)
169 169
170 170 wait_for_url(host_url, timeout=30)
171 171
172 172 @request.addfinalizer
173 173 def stop_web_server():
174 174 # TODO: Find out how to integrate with the reporting of py.test to
175 175 # make this information available.
176 176 print "\nServer log file written to %s" % (RC_LOG, )
177 177 proc.kill()
178 178 server_out.close()
179 179
180 180 return RcWebServer(rc_web_server_config)
181 181
182 182
183 183 @pytest.fixture(scope='class', autouse=True)
184 184 def disable_anonymous_user_access(pylonsapp):
185 185 set_anonymous_access(False)
186 186
187 187
188 188 @pytest.fixture
189 189 def disable_locking(pylonsapp):
190 190 r = Repository.get_by_repo_name(GIT_REPO)
191 191 Repository.unlock(r)
192 192 r.enable_locking = False
193 193 Session().add(r)
194 194 Session().commit()
195 195
196 196 r = Repository.get_by_repo_name(HG_REPO)
197 197 Repository.unlock(r)
198 198 r.enable_locking = False
199 199 Session().add(r)
200 200 Session().commit()
201 201
202 202
203 203 @pytest.fixture
204 204 def enable_auth_plugins(request, pylonsapp, csrf_token):
205 205 """
206 206 Return a factory object that when called, allows to control which
207 207 authentication plugins are enabled.
208 208 """
209 209 def _enable_plugins(plugins_list, override=None):
210 210 override = override or {}
211 211 params = {
212 212 'auth_plugins': ','.join(plugins_list),
213 213 'csrf_token': csrf_token,
214 214 }
215 215
216 216 for module in plugins_list:
217 217 plugin = rhodecode.authentication.base.loadplugin(module)
218 218 plugin_name = plugin.name
219 219 enabled_plugin = 'auth_%s_enabled' % plugin_name
220 220 cache_ttl = 'auth_%s_cache_ttl' % plugin_name
221 221
222 222 # default params that are needed for each plugin,
223 223 # `enabled` and `cache_ttl`
224 224 params.update({
225 225 enabled_plugin: True,
226 226 cache_ttl: 0
227 227 })
228 228 if override.get:
229 229 params.update(override.get(module, {}))
230 230
231 231 validated_params = params
232 232 for k, v in validated_params.items():
233 233 setting = SettingsModel().create_or_update_setting(k, v)
234 234 Session().add(setting)
235 235 Session().commit()
236 236
237 237 def cleanup():
238 238 _enable_plugins(['egg:rhodecode-enterprise-ce#rhodecode'])
239 239
240 240 request.addfinalizer(cleanup)
241 241
242 242 return _enable_plugins
243 243
244 244
245 245 @pytest.fixture
246 246 def fs_repo_only(request, rhodecode_fixtures):
247 247 def fs_repo_fabric(repo_name, repo_type):
248 248 rhodecode_fixtures.create_repo(repo_name, repo_type=repo_type)
249 249 rhodecode_fixtures.destroy_repo(repo_name, fs_remove=False)
250 250
251 251 def cleanup():
252 252 rhodecode_fixtures.destroy_repo(repo_name, fs_remove=True)
253 253 rhodecode_fixtures.destroy_repo_on_filesystem(repo_name)
254 254
255 255 request.addfinalizer(cleanup)
256 256
257 257 return fs_repo_fabric
@@ -1,1791 +1,1791 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2010-2016 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 import collections
22 22 import datetime
23 23 import hashlib
24 24 import os
25 25 import re
26 26 import pprint
27 27 import shutil
28 28 import socket
29 import subprocess
29 import subprocess32
30 30 import time
31 31 import uuid
32 32
33 33 import mock
34 34 import pyramid.testing
35 35 import pytest
36 36 import colander
37 37 import requests
38 38 from webtest.app import TestApp
39 39
40 40 import rhodecode
41 41 from rhodecode.model.changeset_status import ChangesetStatusModel
42 42 from rhodecode.model.comment import ChangesetCommentsModel
43 43 from rhodecode.model.db import (
44 44 PullRequest, Repository, RhodeCodeSetting, ChangesetStatus, RepoGroup,
45 UserGroup, RepoRhodeCodeUi, RepoRhodeCodeSetting, RhodeCodeUi, Integration)
45 UserGroup, RepoRhodeCodeUi, RepoRhodeCodeSetting, RhodeCodeUi)
46 46 from rhodecode.model.meta import Session
47 47 from rhodecode.model.pull_request import PullRequestModel
48 48 from rhodecode.model.repo import RepoModel
49 49 from rhodecode.model.repo_group import RepoGroupModel
50 50 from rhodecode.model.user import UserModel
51 51 from rhodecode.model.settings import VcsSettingsModel
52 52 from rhodecode.model.user_group import UserGroupModel
53 53 from rhodecode.model.integration import IntegrationModel
54 54 from rhodecode.integrations import integration_type_registry
55 55 from rhodecode.integrations.types.base import IntegrationTypeBase
56 56 from rhodecode.lib.utils import repo2db_mapper
57 57 from rhodecode.lib.vcs import create_vcsserver_proxy
58 58 from rhodecode.lib.vcs.backends import get_backend
59 59 from rhodecode.lib.vcs.nodes import FileNode
60 60 from rhodecode.tests import (
61 61 login_user_session, get_new_dir, utils, TESTS_TMP_PATH,
62 62 TEST_USER_ADMIN_LOGIN, TEST_USER_REGULAR_LOGIN, TEST_USER_REGULAR2_LOGIN,
63 63 TEST_USER_REGULAR_PASS)
64 64 from rhodecode.tests.fixture import Fixture
65 65
66 66
67 67 def _split_comma(value):
68 68 return value.split(',')
69 69
70 70
71 71 def pytest_addoption(parser):
72 72 parser.addoption(
73 73 '--keep-tmp-path', action='store_true',
74 74 help="Keep the test temporary directories")
75 75 parser.addoption(
76 76 '--backends', action='store', type=_split_comma,
77 77 default=['git', 'hg', 'svn'],
78 78 help="Select which backends to test for backend specific tests.")
79 79 parser.addoption(
80 80 '--dbs', action='store', type=_split_comma,
81 81 default=['sqlite'],
82 82 help="Select which database to test for database specific tests. "
83 83 "Possible options are sqlite,postgres,mysql")
84 84 parser.addoption(
85 85 '--appenlight', '--ae', action='store_true',
86 86 help="Track statistics in appenlight.")
87 87 parser.addoption(
88 88 '--appenlight-api-key', '--ae-key',
89 89 help="API key for Appenlight.")
90 90 parser.addoption(
91 91 '--appenlight-url', '--ae-url',
92 92 default="https://ae.rhodecode.com",
93 93 help="Appenlight service URL, defaults to https://ae.rhodecode.com")
94 94 parser.addoption(
95 95 '--sqlite-connection-string', action='store',
96 96 default='', help="Connection string for the dbs tests with SQLite")
97 97 parser.addoption(
98 98 '--postgres-connection-string', action='store',
99 99 default='', help="Connection string for the dbs tests with Postgres")
100 100 parser.addoption(
101 101 '--mysql-connection-string', action='store',
102 102 default='', help="Connection string for the dbs tests with MySQL")
103 103 parser.addoption(
104 104 '--repeat', type=int, default=100,
105 105 help="Number of repetitions in performance tests.")
106 106
107 107
108 108 def pytest_configure(config):
109 109 # Appy the kombu patch early on, needed for test discovery on Python 2.7.11
110 110 from rhodecode.config import patches
111 111 patches.kombu_1_5_1_python_2_7_11()
112 112
113 113
114 114 def pytest_collection_modifyitems(session, config, items):
115 115 # nottest marked, compare nose, used for transition from nose to pytest
116 116 remaining = [
117 117 i for i in items if getattr(i.obj, '__test__', True)]
118 118 items[:] = remaining
119 119
120 120
121 121 def pytest_generate_tests(metafunc):
122 122 # Support test generation based on --backend parameter
123 123 if 'backend_alias' in metafunc.fixturenames:
124 124 backends = get_backends_from_metafunc(metafunc)
125 125 scope = None
126 126 if not backends:
127 127 pytest.skip("Not enabled for any of selected backends")
128 128 metafunc.parametrize('backend_alias', backends, scope=scope)
129 129 elif hasattr(metafunc.function, 'backends'):
130 130 backends = get_backends_from_metafunc(metafunc)
131 131 if not backends:
132 132 pytest.skip("Not enabled for any of selected backends")
133 133
134 134
135 135 def get_backends_from_metafunc(metafunc):
136 136 requested_backends = set(metafunc.config.getoption('--backends'))
137 137 if hasattr(metafunc.function, 'backends'):
138 138 # Supported backends by this test function, created from
139 139 # pytest.mark.backends
140 140 backends = metafunc.function.backends.args
141 141 elif hasattr(metafunc.cls, 'backend_alias'):
142 142 # Support class attribute "backend_alias", this is mainly
143 143 # for legacy reasons for tests not yet using pytest.mark.backends
144 144 backends = [metafunc.cls.backend_alias]
145 145 else:
146 146 backends = metafunc.config.getoption('--backends')
147 147 return requested_backends.intersection(backends)
148 148
149 149
150 150 @pytest.fixture(scope='session', autouse=True)
151 151 def activate_example_rcextensions(request):
152 152 """
153 153 Patch in an example rcextensions module which verifies passed in kwargs.
154 154 """
155 155 from rhodecode.tests.other import example_rcextensions
156 156
157 157 old_extensions = rhodecode.EXTENSIONS
158 158 rhodecode.EXTENSIONS = example_rcextensions
159 159
160 160 @request.addfinalizer
161 161 def cleanup():
162 162 rhodecode.EXTENSIONS = old_extensions
163 163
164 164
165 165 @pytest.fixture
166 166 def capture_rcextensions():
167 167 """
168 168 Returns the recorded calls to entry points in rcextensions.
169 169 """
170 170 calls = rhodecode.EXTENSIONS.calls
171 171 calls.clear()
172 172 # Note: At this moment, it is still the empty dict, but that will
173 173 # be filled during the test run and since it is a reference this
174 174 # is enough to make it work.
175 175 return calls
176 176
177 177
178 178 @pytest.fixture(scope='session')
179 179 def http_environ_session():
180 180 """
181 181 Allow to use "http_environ" in session scope.
182 182 """
183 183 return http_environ(
184 184 http_host_stub=http_host_stub())
185 185
186 186
187 187 @pytest.fixture
188 188 def http_host_stub():
189 189 """
190 190 Value of HTTP_HOST in the test run.
191 191 """
192 192 return 'test.example.com:80'
193 193
194 194
195 195 @pytest.fixture
196 196 def http_environ(http_host_stub):
197 197 """
198 198 HTTP extra environ keys.
199 199
200 200 User by the test application and as well for setting up the pylons
201 201 environment. In the case of the fixture "app" it should be possible
202 202 to override this for a specific test case.
203 203 """
204 204 return {
205 205 'SERVER_NAME': http_host_stub.split(':')[0],
206 206 'SERVER_PORT': http_host_stub.split(':')[1],
207 207 'HTTP_HOST': http_host_stub,
208 208 }
209 209
210 210
211 211 @pytest.fixture(scope='function')
212 212 def app(request, pylonsapp, http_environ):
213 213 app = TestApp(
214 214 pylonsapp,
215 215 extra_environ=http_environ)
216 216 if request.cls:
217 217 request.cls.app = app
218 218 return app
219 219
220 220
221 221 @pytest.fixture(scope='session')
222 222 def app_settings(pylonsapp, pylons_config):
223 223 """
224 224 Settings dictionary used to create the app.
225 225
226 226 Parses the ini file and passes the result through the sanitize and apply
227 227 defaults mechanism in `rhodecode.config.middleware`.
228 228 """
229 229 from paste.deploy.loadwsgi import loadcontext, APP
230 230 from rhodecode.config.middleware import (
231 231 sanitize_settings_and_apply_defaults)
232 232 context = loadcontext(APP, 'config:' + pylons_config)
233 233 settings = sanitize_settings_and_apply_defaults(context.config())
234 234 return settings
235 235
236 236
237 237 @pytest.fixture(scope='session')
238 238 def db(app_settings):
239 239 """
240 240 Initializes the database connection.
241 241
242 242 It uses the same settings which are used to create the ``pylonsapp`` or
243 243 ``app`` fixtures.
244 244 """
245 245 from rhodecode.config.utils import initialize_database
246 246 initialize_database(app_settings)
247 247
248 248
249 249 LoginData = collections.namedtuple('LoginData', ('csrf_token', 'user'))
250 250
251 251
252 252 def _autologin_user(app, *args):
253 253 session = login_user_session(app, *args)
254 254 csrf_token = rhodecode.lib.auth.get_csrf_token(session)
255 255 return LoginData(csrf_token, session['rhodecode_user'])
256 256
257 257
258 258 @pytest.fixture
259 259 def autologin_user(app):
260 260 """
261 261 Utility fixture which makes sure that the admin user is logged in
262 262 """
263 263 return _autologin_user(app)
264 264
265 265
266 266 @pytest.fixture
267 267 def autologin_regular_user(app):
268 268 """
269 269 Utility fixture which makes sure that the regular user is logged in
270 270 """
271 271 return _autologin_user(
272 272 app, TEST_USER_REGULAR_LOGIN, TEST_USER_REGULAR_PASS)
273 273
274 274
275 275 @pytest.fixture(scope='function')
276 276 def csrf_token(request, autologin_user):
277 277 return autologin_user.csrf_token
278 278
279 279
280 280 @pytest.fixture(scope='function')
281 281 def xhr_header(request):
282 282 return {'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'}
283 283
284 284
285 285 @pytest.fixture
286 286 def real_crypto_backend(monkeypatch):
287 287 """
288 288 Switch the production crypto backend on for this test.
289 289
290 290 During the test run the crypto backend is replaced with a faster
291 291 implementation based on the MD5 algorithm.
292 292 """
293 293 monkeypatch.setattr(rhodecode, 'is_test', False)
294 294
295 295
296 296 @pytest.fixture(scope='class')
297 297 def index_location(request, pylonsapp):
298 298 index_location = pylonsapp.config['app_conf']['search.location']
299 299 if request.cls:
300 300 request.cls.index_location = index_location
301 301 return index_location
302 302
303 303
304 304 @pytest.fixture(scope='session', autouse=True)
305 305 def tests_tmp_path(request):
306 306 """
307 307 Create temporary directory to be used during the test session.
308 308 """
309 309 if not os.path.exists(TESTS_TMP_PATH):
310 310 os.makedirs(TESTS_TMP_PATH)
311 311
312 312 if not request.config.getoption('--keep-tmp-path'):
313 313 @request.addfinalizer
314 314 def remove_tmp_path():
315 315 shutil.rmtree(TESTS_TMP_PATH)
316 316
317 317 return TESTS_TMP_PATH
318 318
319 319
320 320 @pytest.fixture(scope='session', autouse=True)
321 321 def patch_pyro_request_scope_proxy_factory(request):
322 322 """
323 323 Patch the pyro proxy factory to always use the same dummy request object
324 324 when under test. This will return the same pyro proxy on every call.
325 325 """
326 326 dummy_request = pyramid.testing.DummyRequest()
327 327
328 328 def mocked_call(self, request=None):
329 329 return self.getProxy(request=dummy_request)
330 330
331 331 patcher = mock.patch(
332 332 'rhodecode.lib.vcs.client.RequestScopeProxyFactory.__call__',
333 333 new=mocked_call)
334 334 patcher.start()
335 335
336 336 @request.addfinalizer
337 337 def undo_patching():
338 338 patcher.stop()
339 339
340 340
341 341 @pytest.fixture
342 342 def test_repo_group(request):
343 343 """
344 344 Create a temporary repository group, and destroy it after
345 345 usage automatically
346 346 """
347 347 fixture = Fixture()
348 348 repogroupid = 'test_repo_group_%s' % int(time.time())
349 349 repo_group = fixture.create_repo_group(repogroupid)
350 350
351 351 def _cleanup():
352 352 fixture.destroy_repo_group(repogroupid)
353 353
354 354 request.addfinalizer(_cleanup)
355 355 return repo_group
356 356
357 357
358 358 @pytest.fixture
359 359 def test_user_group(request):
360 360 """
361 361 Create a temporary user group, and destroy it after
362 362 usage automatically
363 363 """
364 364 fixture = Fixture()
365 365 usergroupid = 'test_user_group_%s' % int(time.time())
366 366 user_group = fixture.create_user_group(usergroupid)
367 367
368 368 def _cleanup():
369 369 fixture.destroy_user_group(user_group)
370 370
371 371 request.addfinalizer(_cleanup)
372 372 return user_group
373 373
374 374
375 375 @pytest.fixture(scope='session')
376 376 def test_repo(request):
377 377 container = TestRepoContainer()
378 378 request.addfinalizer(container._cleanup)
379 379 return container
380 380
381 381
382 382 class TestRepoContainer(object):
383 383 """
384 384 Container for test repositories which are used read only.
385 385
386 386 Repositories will be created on demand and re-used during the lifetime
387 387 of this object.
388 388
389 389 Usage to get the svn test repository "minimal"::
390 390
391 391 test_repo = TestContainer()
392 392 repo = test_repo('minimal', 'svn')
393 393
394 394 """
395 395
396 396 dump_extractors = {
397 397 'git': utils.extract_git_repo_from_dump,
398 398 'hg': utils.extract_hg_repo_from_dump,
399 399 'svn': utils.extract_svn_repo_from_dump,
400 400 }
401 401
402 402 def __init__(self):
403 403 self._cleanup_repos = []
404 404 self._fixture = Fixture()
405 405 self._repos = {}
406 406
407 407 def __call__(self, dump_name, backend_alias):
408 408 key = (dump_name, backend_alias)
409 409 if key not in self._repos:
410 410 repo = self._create_repo(dump_name, backend_alias)
411 411 self._repos[key] = repo.repo_id
412 412 return Repository.get(self._repos[key])
413 413
414 414 def _create_repo(self, dump_name, backend_alias):
415 415 repo_name = '%s-%s' % (backend_alias, dump_name)
416 416 backend_class = get_backend(backend_alias)
417 417 dump_extractor = self.dump_extractors[backend_alias]
418 418 repo_path = dump_extractor(dump_name, repo_name)
419 419 vcs_repo = backend_class(repo_path)
420 420 repo2db_mapper({repo_name: vcs_repo})
421 421 repo = RepoModel().get_by_repo_name(repo_name)
422 422 self._cleanup_repos.append(repo_name)
423 423 return repo
424 424
425 425 def _cleanup(self):
426 426 for repo_name in reversed(self._cleanup_repos):
427 427 self._fixture.destroy_repo(repo_name)
428 428
429 429
430 430 @pytest.fixture
431 431 def backend(request, backend_alias, pylonsapp, test_repo):
432 432 """
433 433 Parametrized fixture which represents a single backend implementation.
434 434
435 435 It respects the option `--backends` to focus the test run on specific
436 436 backend implementations.
437 437
438 438 It also supports `pytest.mark.xfail_backends` to mark tests as failing
439 439 for specific backends. This is intended as a utility for incremental
440 440 development of a new backend implementation.
441 441 """
442 442 if backend_alias not in request.config.getoption('--backends'):
443 443 pytest.skip("Backend %s not selected." % (backend_alias, ))
444 444
445 445 utils.check_xfail_backends(request.node, backend_alias)
446 446 utils.check_skip_backends(request.node, backend_alias)
447 447
448 448 repo_name = 'vcs_test_%s' % (backend_alias, )
449 449 backend = Backend(
450 450 alias=backend_alias,
451 451 repo_name=repo_name,
452 452 test_name=request.node.name,
453 453 test_repo_container=test_repo)
454 454 request.addfinalizer(backend.cleanup)
455 455 return backend
456 456
457 457
458 458 @pytest.fixture
459 459 def backend_git(request, pylonsapp, test_repo):
460 460 return backend(request, 'git', pylonsapp, test_repo)
461 461
462 462
463 463 @pytest.fixture
464 464 def backend_hg(request, pylonsapp, test_repo):
465 465 return backend(request, 'hg', pylonsapp, test_repo)
466 466
467 467
468 468 @pytest.fixture
469 469 def backend_svn(request, pylonsapp, test_repo):
470 470 return backend(request, 'svn', pylonsapp, test_repo)
471 471
472 472
473 473 @pytest.fixture
474 474 def backend_random(backend_git):
475 475 """
476 476 Use this to express that your tests need "a backend.
477 477
478 478 A few of our tests need a backend, so that we can run the code. This
479 479 fixture is intended to be used for such cases. It will pick one of the
480 480 backends and run the tests.
481 481
482 482 The fixture `backend` would run the test multiple times for each
483 483 available backend which is a pure waste of time if the test is
484 484 independent of the backend type.
485 485 """
486 486 # TODO: johbo: Change this to pick a random backend
487 487 return backend_git
488 488
489 489
490 490 @pytest.fixture
491 491 def backend_stub(backend_git):
492 492 """
493 493 Use this to express that your tests need a backend stub
494 494
495 495 TODO: mikhail: Implement a real stub logic instead of returning
496 496 a git backend
497 497 """
498 498 return backend_git
499 499
500 500
501 501 @pytest.fixture
502 502 def repo_stub(backend_stub):
503 503 """
504 504 Use this to express that your tests need a repository stub
505 505 """
506 506 return backend_stub.create_repo()
507 507
508 508
509 509 class Backend(object):
510 510 """
511 511 Represents the test configuration for one supported backend
512 512
513 513 Provides easy access to different test repositories based on
514 514 `__getitem__`. Such repositories will only be created once per test
515 515 session.
516 516 """
517 517
518 518 invalid_repo_name = re.compile(r'[^0-9a-zA-Z]+')
519 519 _master_repo = None
520 520 _commit_ids = {}
521 521
522 522 def __init__(self, alias, repo_name, test_name, test_repo_container):
523 523 self.alias = alias
524 524 self.repo_name = repo_name
525 525 self._cleanup_repos = []
526 526 self._test_name = test_name
527 527 self._test_repo_container = test_repo_container
528 528 # TODO: johbo: Used as a delegate interim. Not yet sure if Backend or
529 529 # Fixture will survive in the end.
530 530 self._fixture = Fixture()
531 531
532 532 def __getitem__(self, key):
533 533 return self._test_repo_container(key, self.alias)
534 534
535 535 @property
536 536 def repo(self):
537 537 """
538 538 Returns the "current" repository. This is the vcs_test repo or the
539 539 last repo which has been created with `create_repo`.
540 540 """
541 541 from rhodecode.model.db import Repository
542 542 return Repository.get_by_repo_name(self.repo_name)
543 543
544 544 @property
545 545 def default_branch_name(self):
546 546 VcsRepository = get_backend(self.alias)
547 547 return VcsRepository.DEFAULT_BRANCH_NAME
548 548
549 549 @property
550 550 def default_head_id(self):
551 551 """
552 552 Returns the default head id of the underlying backend.
553 553
554 554 This will be the default branch name in case the backend does have a
555 555 default branch. In the other cases it will point to a valid head
556 556 which can serve as the base to create a new commit on top of it.
557 557 """
558 558 vcsrepo = self.repo.scm_instance()
559 559 head_id = (
560 560 vcsrepo.DEFAULT_BRANCH_NAME or
561 561 vcsrepo.commit_ids[-1])
562 562 return head_id
563 563
564 564 @property
565 565 def commit_ids(self):
566 566 """
567 567 Returns the list of commits for the last created repository
568 568 """
569 569 return self._commit_ids
570 570
571 571 def create_master_repo(self, commits):
572 572 """
573 573 Create a repository and remember it as a template.
574 574
575 575 This allows to easily create derived repositories to construct
576 576 more complex scenarios for diff, compare and pull requests.
577 577
578 578 Returns a commit map which maps from commit message to raw_id.
579 579 """
580 580 self._master_repo = self.create_repo(commits=commits)
581 581 return self._commit_ids
582 582
583 583 def create_repo(
584 584 self, commits=None, number_of_commits=0, heads=None,
585 585 name_suffix=u'', **kwargs):
586 586 """
587 587 Create a repository and record it for later cleanup.
588 588
589 589 :param commits: Optional. A sequence of dict instances.
590 590 Will add a commit per entry to the new repository.
591 591 :param number_of_commits: Optional. If set to a number, this number of
592 592 commits will be added to the new repository.
593 593 :param heads: Optional. Can be set to a sequence of of commit
594 594 names which shall be pulled in from the master repository.
595 595
596 596 """
597 597 self.repo_name = self._next_repo_name() + name_suffix
598 598 repo = self._fixture.create_repo(
599 599 self.repo_name, repo_type=self.alias, **kwargs)
600 600 self._cleanup_repos.append(repo.repo_name)
601 601
602 602 commits = commits or [
603 603 {'message': 'Commit %s of %s' % (x, self.repo_name)}
604 604 for x in xrange(number_of_commits)]
605 605 self._add_commits_to_repo(repo.scm_instance(), commits)
606 606 if heads:
607 607 self.pull_heads(repo, heads)
608 608
609 609 return repo
610 610
611 611 def pull_heads(self, repo, heads):
612 612 """
613 613 Make sure that repo contains all commits mentioned in `heads`
614 614 """
615 615 vcsmaster = self._master_repo.scm_instance()
616 616 vcsrepo = repo.scm_instance()
617 617 vcsrepo.config.clear_section('hooks')
618 618 commit_ids = [self._commit_ids[h] for h in heads]
619 619 vcsrepo.pull(vcsmaster.path, commit_ids=commit_ids)
620 620
621 621 def create_fork(self):
622 622 repo_to_fork = self.repo_name
623 623 self.repo_name = self._next_repo_name()
624 624 repo = self._fixture.create_fork(repo_to_fork, self.repo_name)
625 625 self._cleanup_repos.append(self.repo_name)
626 626 return repo
627 627
628 628 def new_repo_name(self, suffix=u''):
629 629 self.repo_name = self._next_repo_name() + suffix
630 630 self._cleanup_repos.append(self.repo_name)
631 631 return self.repo_name
632 632
633 633 def _next_repo_name(self):
634 634 return u"%s_%s" % (
635 635 self.invalid_repo_name.sub(u'_', self._test_name),
636 636 len(self._cleanup_repos))
637 637
638 638 def ensure_file(self, filename, content='Test content\n'):
639 639 assert self._cleanup_repos, "Avoid writing into vcs_test repos"
640 640 commits = [
641 641 {'added': [
642 642 FileNode(filename, content=content),
643 643 ]},
644 644 ]
645 645 self._add_commits_to_repo(self.repo.scm_instance(), commits)
646 646
647 647 def enable_downloads(self):
648 648 repo = self.repo
649 649 repo.enable_downloads = True
650 650 Session().add(repo)
651 651 Session().commit()
652 652
653 653 def cleanup(self):
654 654 for repo_name in reversed(self._cleanup_repos):
655 655 self._fixture.destroy_repo(repo_name)
656 656
657 657 def _add_commits_to_repo(self, repo, commits):
658 658 commit_ids = _add_commits_to_repo(repo, commits)
659 659 if not commit_ids:
660 660 return
661 661 self._commit_ids = commit_ids
662 662
663 663 # Creating refs for Git to allow fetching them from remote repository
664 664 if self.alias == 'git':
665 665 refs = {}
666 666 for message in self._commit_ids:
667 667 # TODO: mikhail: do more special chars replacements
668 668 ref_name = 'refs/test-refs/{}'.format(
669 669 message.replace(' ', ''))
670 670 refs[ref_name] = self._commit_ids[message]
671 671 self._create_refs(repo, refs)
672 672
673 673 def _create_refs(self, repo, refs):
674 674 for ref_name in refs:
675 675 repo.set_refs(ref_name, refs[ref_name])
676 676
677 677
678 678 @pytest.fixture
679 679 def vcsbackend(request, backend_alias, tests_tmp_path, pylonsapp, test_repo):
680 680 """
681 681 Parametrized fixture which represents a single vcs backend implementation.
682 682
683 683 See the fixture `backend` for more details. This one implements the same
684 684 concept, but on vcs level. So it does not provide model instances etc.
685 685
686 686 Parameters are generated dynamically, see :func:`pytest_generate_tests`
687 687 for how this works.
688 688 """
689 689 if backend_alias not in request.config.getoption('--backends'):
690 690 pytest.skip("Backend %s not selected." % (backend_alias, ))
691 691
692 692 utils.check_xfail_backends(request.node, backend_alias)
693 693 utils.check_skip_backends(request.node, backend_alias)
694 694
695 695 repo_name = 'vcs_test_%s' % (backend_alias, )
696 696 repo_path = os.path.join(tests_tmp_path, repo_name)
697 697 backend = VcsBackend(
698 698 alias=backend_alias,
699 699 repo_path=repo_path,
700 700 test_name=request.node.name,
701 701 test_repo_container=test_repo)
702 702 request.addfinalizer(backend.cleanup)
703 703 return backend
704 704
705 705
706 706 @pytest.fixture
707 707 def vcsbackend_git(request, tests_tmp_path, pylonsapp, test_repo):
708 708 return vcsbackend(request, 'git', tests_tmp_path, pylonsapp, test_repo)
709 709
710 710
711 711 @pytest.fixture
712 712 def vcsbackend_hg(request, tests_tmp_path, pylonsapp, test_repo):
713 713 return vcsbackend(request, 'hg', tests_tmp_path, pylonsapp, test_repo)
714 714
715 715
716 716 @pytest.fixture
717 717 def vcsbackend_svn(request, tests_tmp_path, pylonsapp, test_repo):
718 718 return vcsbackend(request, 'svn', tests_tmp_path, pylonsapp, test_repo)
719 719
720 720
721 721 @pytest.fixture
722 722 def vcsbackend_random(vcsbackend_git):
723 723 """
724 724 Use this to express that your tests need "a vcsbackend".
725 725
726 726 The fixture `vcsbackend` would run the test multiple times for each
727 727 available vcs backend which is a pure waste of time if the test is
728 728 independent of the vcs backend type.
729 729 """
730 730 # TODO: johbo: Change this to pick a random backend
731 731 return vcsbackend_git
732 732
733 733
734 734 @pytest.fixture
735 735 def vcsbackend_stub(vcsbackend_git):
736 736 """
737 737 Use this to express that your test just needs a stub of a vcsbackend.
738 738
739 739 Plan is to eventually implement an in-memory stub to speed tests up.
740 740 """
741 741 return vcsbackend_git
742 742
743 743
744 744 class VcsBackend(object):
745 745 """
746 746 Represents the test configuration for one supported vcs backend.
747 747 """
748 748
749 749 invalid_repo_name = re.compile(r'[^0-9a-zA-Z]+')
750 750
751 751 def __init__(self, alias, repo_path, test_name, test_repo_container):
752 752 self.alias = alias
753 753 self._repo_path = repo_path
754 754 self._cleanup_repos = []
755 755 self._test_name = test_name
756 756 self._test_repo_container = test_repo_container
757 757
758 758 def __getitem__(self, key):
759 759 return self._test_repo_container(key, self.alias).scm_instance()
760 760
761 761 @property
762 762 def repo(self):
763 763 """
764 764 Returns the "current" repository. This is the vcs_test repo of the last
765 765 repo which has been created.
766 766 """
767 767 Repository = get_backend(self.alias)
768 768 return Repository(self._repo_path)
769 769
770 770 @property
771 771 def backend(self):
772 772 """
773 773 Returns the backend implementation class.
774 774 """
775 775 return get_backend(self.alias)
776 776
777 777 def create_repo(self, commits=None, number_of_commits=0, _clone_repo=None):
778 778 repo_name = self._next_repo_name()
779 779 self._repo_path = get_new_dir(repo_name)
780 780 repo_class = get_backend(self.alias)
781 781 src_url = None
782 782 if _clone_repo:
783 783 src_url = _clone_repo.path
784 784 repo = repo_class(self._repo_path, create=True, src_url=src_url)
785 785 self._cleanup_repos.append(repo)
786 786
787 787 commits = commits or [
788 788 {'message': 'Commit %s of %s' % (x, repo_name)}
789 789 for x in xrange(number_of_commits)]
790 790 _add_commits_to_repo(repo, commits)
791 791 return repo
792 792
793 793 def clone_repo(self, repo):
794 794 return self.create_repo(_clone_repo=repo)
795 795
796 796 def cleanup(self):
797 797 for repo in self._cleanup_repos:
798 798 shutil.rmtree(repo.path)
799 799
800 800 def new_repo_path(self):
801 801 repo_name = self._next_repo_name()
802 802 self._repo_path = get_new_dir(repo_name)
803 803 return self._repo_path
804 804
805 805 def _next_repo_name(self):
806 806 return "%s_%s" % (
807 807 self.invalid_repo_name.sub('_', self._test_name),
808 808 len(self._cleanup_repos))
809 809
810 810 def add_file(self, repo, filename, content='Test content\n'):
811 811 imc = repo.in_memory_commit
812 812 imc.add(FileNode(filename, content=content))
813 813 imc.commit(
814 814 message=u'Automatic commit from vcsbackend fixture',
815 815 author=u'Automatic')
816 816
817 817 def ensure_file(self, filename, content='Test content\n'):
818 818 assert self._cleanup_repos, "Avoid writing into vcs_test repos"
819 819 self.add_file(self.repo, filename, content)
820 820
821 821
822 822 def _add_commits_to_repo(vcs_repo, commits):
823 823 commit_ids = {}
824 824 if not commits:
825 825 return commit_ids
826 826
827 827 imc = vcs_repo.in_memory_commit
828 828 commit = None
829 829
830 830 for idx, commit in enumerate(commits):
831 831 message = unicode(commit.get('message', 'Commit %s' % idx))
832 832
833 833 for node in commit.get('added', []):
834 834 imc.add(FileNode(node.path, content=node.content))
835 835 for node in commit.get('changed', []):
836 836 imc.change(FileNode(node.path, content=node.content))
837 837 for node in commit.get('removed', []):
838 838 imc.remove(FileNode(node.path))
839 839
840 840 parents = [
841 841 vcs_repo.get_commit(commit_id=commit_ids[p])
842 842 for p in commit.get('parents', [])]
843 843
844 844 operations = ('added', 'changed', 'removed')
845 845 if not any((commit.get(o) for o in operations)):
846 846 imc.add(FileNode('file_%s' % idx, content=message))
847 847
848 848 commit = imc.commit(
849 849 message=message,
850 850 author=unicode(commit.get('author', 'Automatic')),
851 851 date=commit.get('date'),
852 852 branch=commit.get('branch'),
853 853 parents=parents)
854 854
855 855 commit_ids[commit.message] = commit.raw_id
856 856
857 857 return commit_ids
858 858
859 859
860 860 @pytest.fixture
861 861 def reposerver(request):
862 862 """
863 863 Allows to serve a backend repository
864 864 """
865 865
866 866 repo_server = RepoServer()
867 867 request.addfinalizer(repo_server.cleanup)
868 868 return repo_server
869 869
870 870
871 871 class RepoServer(object):
872 872 """
873 873 Utility to serve a local repository for the duration of a test case.
874 874
875 875 Supports only Subversion so far.
876 876 """
877 877
878 878 url = None
879 879
880 880 def __init__(self):
881 881 self._cleanup_servers = []
882 882
883 883 def serve(self, vcsrepo):
884 884 if vcsrepo.alias != 'svn':
885 885 raise TypeError("Backend %s not supported" % vcsrepo.alias)
886 886
887 proc = subprocess.Popen(
887 proc = subprocess32.Popen(
888 888 ['svnserve', '-d', '--foreground', '--listen-host', 'localhost',
889 889 '--root', vcsrepo.path])
890 890 self._cleanup_servers.append(proc)
891 891 self.url = 'svn://localhost'
892 892
893 893 def cleanup(self):
894 894 for proc in self._cleanup_servers:
895 895 proc.terminate()
896 896
897 897
898 898 @pytest.fixture
899 899 def pr_util(backend, request):
900 900 """
901 901 Utility for tests of models and for functional tests around pull requests.
902 902
903 903 It gives an instance of :class:`PRTestUtility` which provides various
904 904 utility methods around one pull request.
905 905
906 906 This fixture uses `backend` and inherits its parameterization.
907 907 """
908 908
909 909 util = PRTestUtility(backend)
910 910
911 911 @request.addfinalizer
912 912 def cleanup():
913 913 util.cleanup()
914 914
915 915 return util
916 916
917 917
918 918 class PRTestUtility(object):
919 919
920 920 pull_request = None
921 921 pull_request_id = None
922 922 mergeable_patcher = None
923 923 mergeable_mock = None
924 924 notification_patcher = None
925 925
926 926 def __init__(self, backend):
927 927 self.backend = backend
928 928
929 929 def create_pull_request(
930 930 self, commits=None, target_head=None, source_head=None,
931 931 revisions=None, approved=False, author=None, mergeable=False,
932 932 enable_notifications=True, name_suffix=u'', reviewers=None,
933 933 title=u"Test", description=u"Description"):
934 934 self.set_mergeable(mergeable)
935 935 if not enable_notifications:
936 936 # mock notification side effect
937 937 self.notification_patcher = mock.patch(
938 938 'rhodecode.model.notification.NotificationModel.create')
939 939 self.notification_patcher.start()
940 940
941 941 if not self.pull_request:
942 942 if not commits:
943 943 commits = [
944 944 {'message': 'c1'},
945 945 {'message': 'c2'},
946 946 {'message': 'c3'},
947 947 ]
948 948 target_head = 'c1'
949 949 source_head = 'c2'
950 950 revisions = ['c2']
951 951
952 952 self.commit_ids = self.backend.create_master_repo(commits)
953 953 self.target_repository = self.backend.create_repo(
954 954 heads=[target_head], name_suffix=name_suffix)
955 955 self.source_repository = self.backend.create_repo(
956 956 heads=[source_head], name_suffix=name_suffix)
957 957 self.author = author or UserModel().get_by_username(
958 958 TEST_USER_ADMIN_LOGIN)
959 959
960 960 model = PullRequestModel()
961 961 self.create_parameters = {
962 962 'created_by': self.author,
963 963 'source_repo': self.source_repository.repo_name,
964 964 'source_ref': self._default_branch_reference(source_head),
965 965 'target_repo': self.target_repository.repo_name,
966 966 'target_ref': self._default_branch_reference(target_head),
967 967 'revisions': [self.commit_ids[r] for r in revisions],
968 968 'reviewers': reviewers or self._get_reviewers(),
969 969 'title': title,
970 970 'description': description,
971 971 }
972 972 self.pull_request = model.create(**self.create_parameters)
973 973 assert model.get_versions(self.pull_request) == []
974 974
975 975 self.pull_request_id = self.pull_request.pull_request_id
976 976
977 977 if approved:
978 978 self.approve()
979 979
980 980 Session().add(self.pull_request)
981 981 Session().commit()
982 982
983 983 return self.pull_request
984 984
985 985 def approve(self):
986 986 self.create_status_votes(
987 987 ChangesetStatus.STATUS_APPROVED,
988 988 *self.pull_request.reviewers)
989 989
990 990 def close(self):
991 991 PullRequestModel().close_pull_request(self.pull_request, self.author)
992 992
993 993 def _default_branch_reference(self, commit_message):
994 994 reference = '%s:%s:%s' % (
995 995 'branch',
996 996 self.backend.default_branch_name,
997 997 self.commit_ids[commit_message])
998 998 return reference
999 999
1000 1000 def _get_reviewers(self):
1001 1001 model = UserModel()
1002 1002 return [
1003 1003 model.get_by_username(TEST_USER_REGULAR_LOGIN),
1004 1004 model.get_by_username(TEST_USER_REGULAR2_LOGIN),
1005 1005 ]
1006 1006
1007 1007 def update_source_repository(self, head=None):
1008 1008 heads = [head or 'c3']
1009 1009 self.backend.pull_heads(self.source_repository, heads=heads)
1010 1010
1011 1011 def add_one_commit(self, head=None):
1012 1012 self.update_source_repository(head=head)
1013 1013 old_commit_ids = set(self.pull_request.revisions)
1014 1014 PullRequestModel().update_commits(self.pull_request)
1015 1015 commit_ids = set(self.pull_request.revisions)
1016 1016 new_commit_ids = commit_ids - old_commit_ids
1017 1017 assert len(new_commit_ids) == 1
1018 1018 return new_commit_ids.pop()
1019 1019
1020 1020 def remove_one_commit(self):
1021 1021 assert len(self.pull_request.revisions) == 2
1022 1022 source_vcs = self.source_repository.scm_instance()
1023 1023 removed_commit_id = source_vcs.commit_ids[-1]
1024 1024
1025 1025 # TODO: johbo: Git and Mercurial have an inconsistent vcs api here,
1026 1026 # remove the if once that's sorted out.
1027 1027 if self.backend.alias == "git":
1028 1028 kwargs = {'branch_name': self.backend.default_branch_name}
1029 1029 else:
1030 1030 kwargs = {}
1031 1031 source_vcs.strip(removed_commit_id, **kwargs)
1032 1032
1033 1033 PullRequestModel().update_commits(self.pull_request)
1034 1034 assert len(self.pull_request.revisions) == 1
1035 1035 return removed_commit_id
1036 1036
1037 1037 def create_comment(self, linked_to=None):
1038 1038 comment = ChangesetCommentsModel().create(
1039 1039 text=u"Test comment",
1040 1040 repo=self.target_repository.repo_name,
1041 1041 user=self.author,
1042 1042 pull_request=self.pull_request)
1043 1043 assert comment.pull_request_version_id is None
1044 1044
1045 1045 if linked_to:
1046 1046 PullRequestModel()._link_comments_to_version(linked_to)
1047 1047
1048 1048 return comment
1049 1049
1050 1050 def create_inline_comment(
1051 1051 self, linked_to=None, line_no=u'n1', file_path='file_1'):
1052 1052 comment = ChangesetCommentsModel().create(
1053 1053 text=u"Test comment",
1054 1054 repo=self.target_repository.repo_name,
1055 1055 user=self.author,
1056 1056 line_no=line_no,
1057 1057 f_path=file_path,
1058 1058 pull_request=self.pull_request)
1059 1059 assert comment.pull_request_version_id is None
1060 1060
1061 1061 if linked_to:
1062 1062 PullRequestModel()._link_comments_to_version(linked_to)
1063 1063
1064 1064 return comment
1065 1065
1066 1066 def create_version_of_pull_request(self):
1067 1067 pull_request = self.create_pull_request()
1068 1068 version = PullRequestModel()._create_version_from_snapshot(
1069 1069 pull_request)
1070 1070 return version
1071 1071
1072 1072 def create_status_votes(self, status, *reviewers):
1073 1073 for reviewer in reviewers:
1074 1074 ChangesetStatusModel().set_status(
1075 1075 repo=self.pull_request.target_repo,
1076 1076 status=status,
1077 1077 user=reviewer.user_id,
1078 1078 pull_request=self.pull_request)
1079 1079
1080 1080 def set_mergeable(self, value):
1081 1081 if not self.mergeable_patcher:
1082 1082 self.mergeable_patcher = mock.patch.object(
1083 1083 VcsSettingsModel, 'get_general_settings')
1084 1084 self.mergeable_mock = self.mergeable_patcher.start()
1085 1085 self.mergeable_mock.return_value = {
1086 1086 'rhodecode_pr_merge_enabled': value}
1087 1087
1088 1088 def cleanup(self):
1089 1089 # In case the source repository is already cleaned up, the pull
1090 1090 # request will already be deleted.
1091 1091 pull_request = PullRequest().get(self.pull_request_id)
1092 1092 if pull_request:
1093 1093 PullRequestModel().delete(pull_request)
1094 1094 Session().commit()
1095 1095
1096 1096 if self.notification_patcher:
1097 1097 self.notification_patcher.stop()
1098 1098
1099 1099 if self.mergeable_patcher:
1100 1100 self.mergeable_patcher.stop()
1101 1101
1102 1102
1103 1103 @pytest.fixture
1104 1104 def user_admin(pylonsapp):
1105 1105 """
1106 1106 Provides the default admin test user as an instance of `db.User`.
1107 1107 """
1108 1108 user = UserModel().get_by_username(TEST_USER_ADMIN_LOGIN)
1109 1109 return user
1110 1110
1111 1111
1112 1112 @pytest.fixture
1113 1113 def user_regular(pylonsapp):
1114 1114 """
1115 1115 Provides the default regular test user as an instance of `db.User`.
1116 1116 """
1117 1117 user = UserModel().get_by_username(TEST_USER_REGULAR_LOGIN)
1118 1118 return user
1119 1119
1120 1120
1121 1121 @pytest.fixture
1122 1122 def user_util(request, pylonsapp):
1123 1123 """
1124 1124 Provides a wired instance of `UserUtility` with integrated cleanup.
1125 1125 """
1126 1126 utility = UserUtility(test_name=request.node.name)
1127 1127 request.addfinalizer(utility.cleanup)
1128 1128 return utility
1129 1129
1130 1130
1131 1131 # TODO: johbo: Split this up into utilities per domain or something similar
1132 1132 class UserUtility(object):
1133 1133
1134 1134 def __init__(self, test_name="test"):
1135 1135 self._test_name = test_name
1136 1136 self.fixture = Fixture()
1137 1137 self.repo_group_ids = []
1138 1138 self.user_ids = []
1139 1139 self.user_group_ids = []
1140 1140 self.user_repo_permission_ids = []
1141 1141 self.user_group_repo_permission_ids = []
1142 1142 self.user_repo_group_permission_ids = []
1143 1143 self.user_group_repo_group_permission_ids = []
1144 1144 self.user_user_group_permission_ids = []
1145 1145 self.user_group_user_group_permission_ids = []
1146 1146 self.user_permissions = []
1147 1147
1148 1148 def create_repo_group(
1149 1149 self, owner=TEST_USER_ADMIN_LOGIN, auto_cleanup=True):
1150 1150 group_name = "{prefix}_repogroup_{count}".format(
1151 1151 prefix=self._test_name,
1152 1152 count=len(self.repo_group_ids))
1153 1153 repo_group = self.fixture.create_repo_group(
1154 1154 group_name, cur_user=owner)
1155 1155 if auto_cleanup:
1156 1156 self.repo_group_ids.append(repo_group.group_id)
1157 1157 return repo_group
1158 1158
1159 1159 def create_user(self, auto_cleanup=True, **kwargs):
1160 1160 user_name = "{prefix}_user_{count}".format(
1161 1161 prefix=self._test_name,
1162 1162 count=len(self.user_ids))
1163 1163 user = self.fixture.create_user(user_name, **kwargs)
1164 1164 if auto_cleanup:
1165 1165 self.user_ids.append(user.user_id)
1166 1166 return user
1167 1167
1168 1168 def create_user_with_group(self):
1169 1169 user = self.create_user()
1170 1170 user_group = self.create_user_group(members=[user])
1171 1171 return user, user_group
1172 1172
1173 1173 def create_user_group(self, members=None, auto_cleanup=True, **kwargs):
1174 1174 group_name = "{prefix}_usergroup_{count}".format(
1175 1175 prefix=self._test_name,
1176 1176 count=len(self.user_group_ids))
1177 1177 user_group = self.fixture.create_user_group(group_name, **kwargs)
1178 1178 if auto_cleanup:
1179 1179 self.user_group_ids.append(user_group.users_group_id)
1180 1180 if members:
1181 1181 for user in members:
1182 1182 UserGroupModel().add_user_to_group(user_group, user)
1183 1183 return user_group
1184 1184
1185 1185 def grant_user_permission(self, user_name, permission_name):
1186 1186 self._inherit_default_user_permissions(user_name, False)
1187 1187 self.user_permissions.append((user_name, permission_name))
1188 1188
1189 1189 def grant_user_permission_to_repo_group(
1190 1190 self, repo_group, user, permission_name):
1191 1191 permission = RepoGroupModel().grant_user_permission(
1192 1192 repo_group, user, permission_name)
1193 1193 self.user_repo_group_permission_ids.append(
1194 1194 (repo_group.group_id, user.user_id))
1195 1195 return permission
1196 1196
1197 1197 def grant_user_group_permission_to_repo_group(
1198 1198 self, repo_group, user_group, permission_name):
1199 1199 permission = RepoGroupModel().grant_user_group_permission(
1200 1200 repo_group, user_group, permission_name)
1201 1201 self.user_group_repo_group_permission_ids.append(
1202 1202 (repo_group.group_id, user_group.users_group_id))
1203 1203 return permission
1204 1204
1205 1205 def grant_user_permission_to_repo(
1206 1206 self, repo, user, permission_name):
1207 1207 permission = RepoModel().grant_user_permission(
1208 1208 repo, user, permission_name)
1209 1209 self.user_repo_permission_ids.append(
1210 1210 (repo.repo_id, user.user_id))
1211 1211 return permission
1212 1212
1213 1213 def grant_user_group_permission_to_repo(
1214 1214 self, repo, user_group, permission_name):
1215 1215 permission = RepoModel().grant_user_group_permission(
1216 1216 repo, user_group, permission_name)
1217 1217 self.user_group_repo_permission_ids.append(
1218 1218 (repo.repo_id, user_group.users_group_id))
1219 1219 return permission
1220 1220
1221 1221 def grant_user_permission_to_user_group(
1222 1222 self, target_user_group, user, permission_name):
1223 1223 permission = UserGroupModel().grant_user_permission(
1224 1224 target_user_group, user, permission_name)
1225 1225 self.user_user_group_permission_ids.append(
1226 1226 (target_user_group.users_group_id, user.user_id))
1227 1227 return permission
1228 1228
1229 1229 def grant_user_group_permission_to_user_group(
1230 1230 self, target_user_group, user_group, permission_name):
1231 1231 permission = UserGroupModel().grant_user_group_permission(
1232 1232 target_user_group, user_group, permission_name)
1233 1233 self.user_group_user_group_permission_ids.append(
1234 1234 (target_user_group.users_group_id, user_group.users_group_id))
1235 1235 return permission
1236 1236
1237 1237 def revoke_user_permission(self, user_name, permission_name):
1238 1238 self._inherit_default_user_permissions(user_name, True)
1239 1239 UserModel().revoke_perm(user_name, permission_name)
1240 1240
1241 1241 def _inherit_default_user_permissions(self, user_name, value):
1242 1242 user = UserModel().get_by_username(user_name)
1243 1243 user.inherit_default_permissions = value
1244 1244 Session().add(user)
1245 1245 Session().commit()
1246 1246
1247 1247 def cleanup(self):
1248 1248 self._cleanup_permissions()
1249 1249 self._cleanup_repo_groups()
1250 1250 self._cleanup_user_groups()
1251 1251 self._cleanup_users()
1252 1252
1253 1253 def _cleanup_permissions(self):
1254 1254 if self.user_permissions:
1255 1255 for user_name, permission_name in self.user_permissions:
1256 1256 self.revoke_user_permission(user_name, permission_name)
1257 1257
1258 1258 for permission in self.user_repo_permission_ids:
1259 1259 RepoModel().revoke_user_permission(*permission)
1260 1260
1261 1261 for permission in self.user_group_repo_permission_ids:
1262 1262 RepoModel().revoke_user_group_permission(*permission)
1263 1263
1264 1264 for permission in self.user_repo_group_permission_ids:
1265 1265 RepoGroupModel().revoke_user_permission(*permission)
1266 1266
1267 1267 for permission in self.user_group_repo_group_permission_ids:
1268 1268 RepoGroupModel().revoke_user_group_permission(*permission)
1269 1269
1270 1270 for permission in self.user_user_group_permission_ids:
1271 1271 UserGroupModel().revoke_user_permission(*permission)
1272 1272
1273 1273 for permission in self.user_group_user_group_permission_ids:
1274 1274 UserGroupModel().revoke_user_group_permission(*permission)
1275 1275
1276 1276 def _cleanup_repo_groups(self):
1277 1277 def _repo_group_compare(first_group_id, second_group_id):
1278 1278 """
1279 1279 Gives higher priority to the groups with the most complex paths
1280 1280 """
1281 1281 first_group = RepoGroup.get(first_group_id)
1282 1282 second_group = RepoGroup.get(second_group_id)
1283 1283 first_group_parts = (
1284 1284 len(first_group.group_name.split('/')) if first_group else 0)
1285 1285 second_group_parts = (
1286 1286 len(second_group.group_name.split('/')) if second_group else 0)
1287 1287 return cmp(second_group_parts, first_group_parts)
1288 1288
1289 1289 sorted_repo_group_ids = sorted(
1290 1290 self.repo_group_ids, cmp=_repo_group_compare)
1291 1291 for repo_group_id in sorted_repo_group_ids:
1292 1292 self.fixture.destroy_repo_group(repo_group_id)
1293 1293
1294 1294 def _cleanup_user_groups(self):
1295 1295 def _user_group_compare(first_group_id, second_group_id):
1296 1296 """
1297 1297 Gives higher priority to the groups with the most complex paths
1298 1298 """
1299 1299 first_group = UserGroup.get(first_group_id)
1300 1300 second_group = UserGroup.get(second_group_id)
1301 1301 first_group_parts = (
1302 1302 len(first_group.users_group_name.split('/'))
1303 1303 if first_group else 0)
1304 1304 second_group_parts = (
1305 1305 len(second_group.users_group_name.split('/'))
1306 1306 if second_group else 0)
1307 1307 return cmp(second_group_parts, first_group_parts)
1308 1308
1309 1309 sorted_user_group_ids = sorted(
1310 1310 self.user_group_ids, cmp=_user_group_compare)
1311 1311 for user_group_id in sorted_user_group_ids:
1312 1312 self.fixture.destroy_user_group(user_group_id)
1313 1313
1314 1314 def _cleanup_users(self):
1315 1315 for user_id in self.user_ids:
1316 1316 self.fixture.destroy_user(user_id)
1317 1317
1318 1318
1319 1319 # TODO: Think about moving this into a pytest-pyro package and make it a
1320 1320 # pytest plugin
1321 1321 @pytest.hookimpl(tryfirst=True, hookwrapper=True)
1322 1322 def pytest_runtest_makereport(item, call):
1323 1323 """
1324 1324 Adding the remote traceback if the exception has this information.
1325 1325
1326 1326 Pyro4 attaches this information as the attribute `_pyroTraceback`
1327 1327 to the exception instance.
1328 1328 """
1329 1329 outcome = yield
1330 1330 report = outcome.get_result()
1331 1331 if call.excinfo:
1332 1332 _add_pyro_remote_traceback(report, call.excinfo.value)
1333 1333
1334 1334
1335 1335 def _add_pyro_remote_traceback(report, exc):
1336 1336 pyro_traceback = getattr(exc, '_pyroTraceback', None)
1337 1337
1338 1338 if pyro_traceback:
1339 1339 traceback = ''.join(pyro_traceback)
1340 1340 section = 'Pyro4 remote traceback ' + report.when
1341 1341 report.sections.append((section, traceback))
1342 1342
1343 1343
1344 1344 @pytest.fixture(scope='session')
1345 1345 def testrun():
1346 1346 return {
1347 1347 'uuid': uuid.uuid4(),
1348 1348 'start': datetime.datetime.utcnow().isoformat(),
1349 1349 'timestamp': int(time.time()),
1350 1350 }
1351 1351
1352 1352
1353 1353 @pytest.fixture(autouse=True)
1354 1354 def collect_appenlight_stats(request, testrun):
1355 1355 """
1356 1356 This fixture reports memory consumtion of single tests.
1357 1357
1358 1358 It gathers data based on `psutil` and sends them to Appenlight. The option
1359 1359 ``--ae`` has te be used to enable this fixture and the API key for your
1360 1360 application has to be provided in ``--ae-key``.
1361 1361 """
1362 1362 try:
1363 1363 # cygwin cannot have yet psutil support.
1364 1364 import psutil
1365 1365 except ImportError:
1366 1366 return
1367 1367
1368 1368 if not request.config.getoption('--appenlight'):
1369 1369 return
1370 1370 else:
1371 1371 # Only request the pylonsapp fixture if appenlight tracking is
1372 1372 # enabled. This will speed up a test run of unit tests by 2 to 3
1373 1373 # seconds if appenlight is not enabled.
1374 1374 pylonsapp = request.getfuncargvalue("pylonsapp")
1375 1375 url = '{}/api/logs'.format(request.config.getoption('--appenlight-url'))
1376 1376 client = AppenlightClient(
1377 1377 url=url,
1378 1378 api_key=request.config.getoption('--appenlight-api-key'),
1379 1379 namespace=request.node.nodeid,
1380 1380 request=str(testrun['uuid']),
1381 1381 testrun=testrun)
1382 1382
1383 1383 client.collect({
1384 1384 'message': "Starting",
1385 1385 })
1386 1386
1387 1387 server_and_port = pylonsapp.config['vcs.server']
1388 1388 server = create_vcsserver_proxy(server_and_port)
1389 1389 with server:
1390 1390 vcs_pid = server.get_pid()
1391 1391 server.run_gc()
1392 1392 vcs_process = psutil.Process(vcs_pid)
1393 1393 mem = vcs_process.memory_info()
1394 1394 client.tag_before('vcsserver.rss', mem.rss)
1395 1395 client.tag_before('vcsserver.vms', mem.vms)
1396 1396
1397 1397 test_process = psutil.Process()
1398 1398 mem = test_process.memory_info()
1399 1399 client.tag_before('test.rss', mem.rss)
1400 1400 client.tag_before('test.vms', mem.vms)
1401 1401
1402 1402 client.tag_before('time', time.time())
1403 1403
1404 1404 @request.addfinalizer
1405 1405 def send_stats():
1406 1406 client.tag_after('time', time.time())
1407 1407 with server:
1408 1408 gc_stats = server.run_gc()
1409 1409 for tag, value in gc_stats.items():
1410 1410 client.tag_after(tag, value)
1411 1411 mem = vcs_process.memory_info()
1412 1412 client.tag_after('vcsserver.rss', mem.rss)
1413 1413 client.tag_after('vcsserver.vms', mem.vms)
1414 1414
1415 1415 mem = test_process.memory_info()
1416 1416 client.tag_after('test.rss', mem.rss)
1417 1417 client.tag_after('test.vms', mem.vms)
1418 1418
1419 1419 client.collect({
1420 1420 'message': "Finished",
1421 1421 })
1422 1422 client.send_stats()
1423 1423
1424 1424 return client
1425 1425
1426 1426
1427 1427 class AppenlightClient():
1428 1428
1429 1429 url_template = '{url}?protocol_version=0.5'
1430 1430
1431 1431 def __init__(
1432 1432 self, url, api_key, add_server=True, add_timestamp=True,
1433 1433 namespace=None, request=None, testrun=None):
1434 1434 self.url = self.url_template.format(url=url)
1435 1435 self.api_key = api_key
1436 1436 self.add_server = add_server
1437 1437 self.add_timestamp = add_timestamp
1438 1438 self.namespace = namespace
1439 1439 self.request = request
1440 1440 self.server = socket.getfqdn(socket.gethostname())
1441 1441 self.tags_before = {}
1442 1442 self.tags_after = {}
1443 1443 self.stats = []
1444 1444 self.testrun = testrun or {}
1445 1445
1446 1446 def tag_before(self, tag, value):
1447 1447 self.tags_before[tag] = value
1448 1448
1449 1449 def tag_after(self, tag, value):
1450 1450 self.tags_after[tag] = value
1451 1451
1452 1452 def collect(self, data):
1453 1453 if self.add_server:
1454 1454 data.setdefault('server', self.server)
1455 1455 if self.add_timestamp:
1456 1456 data.setdefault('date', datetime.datetime.utcnow().isoformat())
1457 1457 if self.namespace:
1458 1458 data.setdefault('namespace', self.namespace)
1459 1459 if self.request:
1460 1460 data.setdefault('request', self.request)
1461 1461 self.stats.append(data)
1462 1462
1463 1463 def send_stats(self):
1464 1464 tags = [
1465 1465 ('testrun', self.request),
1466 1466 ('testrun.start', self.testrun['start']),
1467 1467 ('testrun.timestamp', self.testrun['timestamp']),
1468 1468 ('test', self.namespace),
1469 1469 ]
1470 1470 for key, value in self.tags_before.items():
1471 1471 tags.append((key + '.before', value))
1472 1472 try:
1473 1473 delta = self.tags_after[key] - value
1474 1474 tags.append((key + '.delta', delta))
1475 1475 except Exception:
1476 1476 pass
1477 1477 for key, value in self.tags_after.items():
1478 1478 tags.append((key + '.after', value))
1479 1479 self.collect({
1480 1480 'message': "Collected tags",
1481 1481 'tags': tags,
1482 1482 })
1483 1483
1484 1484 response = requests.post(
1485 1485 self.url,
1486 1486 headers={
1487 1487 'X-appenlight-api-key': self.api_key},
1488 1488 json=self.stats,
1489 1489 )
1490 1490
1491 1491 if not response.status_code == 200:
1492 1492 pprint.pprint(self.stats)
1493 1493 print response.headers
1494 1494 print response.text
1495 1495 raise Exception('Sending to appenlight failed')
1496 1496
1497 1497
1498 1498 @pytest.fixture
1499 1499 def gist_util(request, pylonsapp):
1500 1500 """
1501 1501 Provides a wired instance of `GistUtility` with integrated cleanup.
1502 1502 """
1503 1503 utility = GistUtility()
1504 1504 request.addfinalizer(utility.cleanup)
1505 1505 return utility
1506 1506
1507 1507
1508 1508 class GistUtility(object):
1509 1509 def __init__(self):
1510 1510 self.fixture = Fixture()
1511 1511 self.gist_ids = []
1512 1512
1513 1513 def create_gist(self, **kwargs):
1514 1514 gist = self.fixture.create_gist(**kwargs)
1515 1515 self.gist_ids.append(gist.gist_id)
1516 1516 return gist
1517 1517
1518 1518 def cleanup(self):
1519 1519 for id_ in self.gist_ids:
1520 1520 self.fixture.destroy_gists(str(id_))
1521 1521
1522 1522
1523 1523 @pytest.fixture
1524 1524 def enabled_backends(request):
1525 1525 backends = request.config.option.backends
1526 1526 return backends[:]
1527 1527
1528 1528
1529 1529 @pytest.fixture
1530 1530 def settings_util(request):
1531 1531 """
1532 1532 Provides a wired instance of `SettingsUtility` with integrated cleanup.
1533 1533 """
1534 1534 utility = SettingsUtility()
1535 1535 request.addfinalizer(utility.cleanup)
1536 1536 return utility
1537 1537
1538 1538
1539 1539 class SettingsUtility(object):
1540 1540 def __init__(self):
1541 1541 self.rhodecode_ui_ids = []
1542 1542 self.rhodecode_setting_ids = []
1543 1543 self.repo_rhodecode_ui_ids = []
1544 1544 self.repo_rhodecode_setting_ids = []
1545 1545
1546 1546 def create_repo_rhodecode_ui(
1547 1547 self, repo, section, value, key=None, active=True, cleanup=True):
1548 1548 key = key or hashlib.sha1(
1549 1549 '{}{}{}'.format(section, value, repo.repo_id)).hexdigest()
1550 1550
1551 1551 setting = RepoRhodeCodeUi()
1552 1552 setting.repository_id = repo.repo_id
1553 1553 setting.ui_section = section
1554 1554 setting.ui_value = value
1555 1555 setting.ui_key = key
1556 1556 setting.ui_active = active
1557 1557 Session().add(setting)
1558 1558 Session().commit()
1559 1559
1560 1560 if cleanup:
1561 1561 self.repo_rhodecode_ui_ids.append(setting.ui_id)
1562 1562 return setting
1563 1563
1564 1564 def create_rhodecode_ui(
1565 1565 self, section, value, key=None, active=True, cleanup=True):
1566 1566 key = key or hashlib.sha1('{}{}'.format(section, value)).hexdigest()
1567 1567
1568 1568 setting = RhodeCodeUi()
1569 1569 setting.ui_section = section
1570 1570 setting.ui_value = value
1571 1571 setting.ui_key = key
1572 1572 setting.ui_active = active
1573 1573 Session().add(setting)
1574 1574 Session().commit()
1575 1575
1576 1576 if cleanup:
1577 1577 self.rhodecode_ui_ids.append(setting.ui_id)
1578 1578 return setting
1579 1579
1580 1580 def create_repo_rhodecode_setting(
1581 1581 self, repo, name, value, type_, cleanup=True):
1582 1582 setting = RepoRhodeCodeSetting(
1583 1583 repo.repo_id, key=name, val=value, type=type_)
1584 1584 Session().add(setting)
1585 1585 Session().commit()
1586 1586
1587 1587 if cleanup:
1588 1588 self.repo_rhodecode_setting_ids.append(setting.app_settings_id)
1589 1589 return setting
1590 1590
1591 1591 def create_rhodecode_setting(self, name, value, type_, cleanup=True):
1592 1592 setting = RhodeCodeSetting(key=name, val=value, type=type_)
1593 1593 Session().add(setting)
1594 1594 Session().commit()
1595 1595
1596 1596 if cleanup:
1597 1597 self.rhodecode_setting_ids.append(setting.app_settings_id)
1598 1598
1599 1599 return setting
1600 1600
1601 1601 def cleanup(self):
1602 1602 for id_ in self.rhodecode_ui_ids:
1603 1603 setting = RhodeCodeUi.get(id_)
1604 1604 Session().delete(setting)
1605 1605
1606 1606 for id_ in self.rhodecode_setting_ids:
1607 1607 setting = RhodeCodeSetting.get(id_)
1608 1608 Session().delete(setting)
1609 1609
1610 1610 for id_ in self.repo_rhodecode_ui_ids:
1611 1611 setting = RepoRhodeCodeUi.get(id_)
1612 1612 Session().delete(setting)
1613 1613
1614 1614 for id_ in self.repo_rhodecode_setting_ids:
1615 1615 setting = RepoRhodeCodeSetting.get(id_)
1616 1616 Session().delete(setting)
1617 1617
1618 1618 Session().commit()
1619 1619
1620 1620
1621 1621 @pytest.fixture
1622 1622 def no_notifications(request):
1623 1623 notification_patcher = mock.patch(
1624 1624 'rhodecode.model.notification.NotificationModel.create')
1625 1625 notification_patcher.start()
1626 1626 request.addfinalizer(notification_patcher.stop)
1627 1627
1628 1628
1629 1629 @pytest.fixture
1630 1630 def silence_action_logger(request):
1631 1631 notification_patcher = mock.patch(
1632 1632 'rhodecode.lib.utils.action_logger')
1633 1633 notification_patcher.start()
1634 1634 request.addfinalizer(notification_patcher.stop)
1635 1635
1636 1636
1637 1637 @pytest.fixture(scope='session')
1638 1638 def repeat(request):
1639 1639 """
1640 1640 The number of repetitions is based on this fixture.
1641 1641
1642 1642 Slower calls may divide it by 10 or 100. It is chosen in a way so that the
1643 1643 tests are not too slow in our default test suite.
1644 1644 """
1645 1645 return request.config.getoption('--repeat')
1646 1646
1647 1647
1648 1648 @pytest.fixture
1649 1649 def rhodecode_fixtures():
1650 1650 return Fixture()
1651 1651
1652 1652
1653 1653 @pytest.fixture
1654 1654 def request_stub():
1655 1655 """
1656 1656 Stub request object.
1657 1657 """
1658 1658 request = pyramid.testing.DummyRequest()
1659 1659 request.scheme = 'https'
1660 1660 return request
1661 1661
1662 1662
1663 1663 @pytest.fixture
1664 1664 def config_stub(request, request_stub):
1665 1665 """
1666 1666 Set up pyramid.testing and return the Configurator.
1667 1667 """
1668 1668 config = pyramid.testing.setUp(request=request_stub)
1669 1669
1670 1670 @request.addfinalizer
1671 1671 def cleanup():
1672 1672 pyramid.testing.tearDown()
1673 1673
1674 1674 return config
1675 1675
1676 1676
1677 1677 @pytest.fixture
1678 1678 def StubIntegrationType():
1679 1679 class _StubIntegrationType(IntegrationTypeBase):
1680 1680 """ Test integration type class """
1681 1681
1682 1682 key = 'test'
1683 1683 display_name = 'Test integration type'
1684 1684 description = 'A test integration type for testing'
1685 1685 icon = 'test_icon_html_image'
1686 1686
1687 1687 def __init__(self, settings):
1688 1688 super(_StubIntegrationType, self).__init__(settings)
1689 1689 self.sent_events = [] # for testing
1690 1690
1691 1691 def send_event(self, event):
1692 1692 self.sent_events.append(event)
1693 1693
1694 1694 def settings_schema(self):
1695 1695 class SettingsSchema(colander.Schema):
1696 1696 test_string_field = colander.SchemaNode(
1697 1697 colander.String(),
1698 1698 missing=colander.required,
1699 1699 title='test string field',
1700 1700 )
1701 1701 test_int_field = colander.SchemaNode(
1702 1702 colander.Int(),
1703 1703 title='some integer setting',
1704 1704 )
1705 1705 return SettingsSchema()
1706 1706
1707 1707
1708 1708 integration_type_registry.register_integration_type(_StubIntegrationType)
1709 1709 return _StubIntegrationType
1710 1710
1711 1711 @pytest.fixture
1712 1712 def stub_integration_settings():
1713 1713 return {
1714 1714 'test_string_field': 'some data',
1715 1715 'test_int_field': 100,
1716 1716 }
1717 1717
1718 1718
1719 1719 @pytest.fixture
1720 1720 def repo_integration_stub(request, repo_stub, StubIntegrationType,
1721 1721 stub_integration_settings):
1722 1722 integration = IntegrationModel().create(
1723 1723 StubIntegrationType, settings=stub_integration_settings, enabled=True,
1724 1724 name='test repo integration',
1725 1725 repo=repo_stub, repo_group=None, child_repos_only=None)
1726 1726
1727 1727 @request.addfinalizer
1728 1728 def cleanup():
1729 1729 IntegrationModel().delete(integration)
1730 1730
1731 1731 return integration
1732 1732
1733 1733
1734 1734 @pytest.fixture
1735 1735 def repogroup_integration_stub(request, test_repo_group, StubIntegrationType,
1736 1736 stub_integration_settings):
1737 1737 integration = IntegrationModel().create(
1738 1738 StubIntegrationType, settings=stub_integration_settings, enabled=True,
1739 1739 name='test repogroup integration',
1740 1740 repo=None, repo_group=test_repo_group, child_repos_only=True)
1741 1741
1742 1742 @request.addfinalizer
1743 1743 def cleanup():
1744 1744 IntegrationModel().delete(integration)
1745 1745
1746 1746 return integration
1747 1747
1748 1748
1749 1749 @pytest.fixture
1750 1750 def repogroup_recursive_integration_stub(request, test_repo_group,
1751 1751 StubIntegrationType, stub_integration_settings):
1752 1752 integration = IntegrationModel().create(
1753 1753 StubIntegrationType, settings=stub_integration_settings, enabled=True,
1754 1754 name='test recursive repogroup integration',
1755 1755 repo=None, repo_group=test_repo_group, child_repos_only=False)
1756 1756
1757 1757 @request.addfinalizer
1758 1758 def cleanup():
1759 1759 IntegrationModel().delete(integration)
1760 1760
1761 1761 return integration
1762 1762
1763 1763
1764 1764 @pytest.fixture
1765 1765 def global_integration_stub(request, StubIntegrationType,
1766 1766 stub_integration_settings):
1767 1767 integration = IntegrationModel().create(
1768 1768 StubIntegrationType, settings=stub_integration_settings, enabled=True,
1769 1769 name='test global integration',
1770 1770 repo=None, repo_group=None, child_repos_only=None)
1771 1771
1772 1772 @request.addfinalizer
1773 1773 def cleanup():
1774 1774 IntegrationModel().delete(integration)
1775 1775
1776 1776 return integration
1777 1777
1778 1778
1779 1779 @pytest.fixture
1780 1780 def root_repos_integration_stub(request, StubIntegrationType,
1781 1781 stub_integration_settings):
1782 1782 integration = IntegrationModel().create(
1783 1783 StubIntegrationType, settings=stub_integration_settings, enabled=True,
1784 1784 name='test global integration',
1785 1785 repo=None, repo_group=None, child_repos_only=True)
1786 1786
1787 1787 @request.addfinalizer
1788 1788 def cleanup():
1789 1789 IntegrationModel().delete(integration)
1790 1790
1791 1791 return integration
@@ -1,455 +1,455 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2010-2016 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 import json
22 22 import logging.config
23 23 import os
24 24 import platform
25 25 import socket
26 import subprocess
26 import subprocess32
27 27 import time
28 28 from urllib2 import urlopen, URLError
29 29
30 30 import configobj
31 31 import pylons
32 32 import pytest
33 33 import webob
34 34 from beaker.session import SessionObject
35 35 from paste.deploy import loadapp
36 36 from pylons.i18n.translation import _get_translator
37 37 from pylons.util import ContextObj
38 38 from Pyro4.errors import CommunicationError
39 39 from routes.util import URLGenerator
40 40
41 41 from rhodecode.lib import vcs
42 42 from rhodecode.tests.fixture import TestINI
43 43 import rhodecode
44 44
45 45
46 46 def _parse_json(value):
47 47 return json.loads(value) if value else None
48 48
49 49
50 50 def pytest_addoption(parser):
51 51 group = parser.getgroup('pylons')
52 52 group.addoption(
53 53 '--with-pylons', dest='pylons_config',
54 54 help="Set up a Pylons environment with the specified config file.")
55 55 group.addoption(
56 56 '--pylons-config-override', action='store', type=_parse_json,
57 57 default=None, dest='pylons_config_override', help=(
58 58 "Overrides the .ini file settings. Should be specified in JSON"
59 59 " format, e.g. '{\"section\": {\"parameter\": \"value\", ...}}'"
60 60 )
61 61 )
62 62 parser.addini(
63 63 'pylons_config',
64 64 "Set up a Pylons environment with the specified config file.")
65 65
66 66 vcsgroup = parser.getgroup('vcs')
67 67 vcsgroup.addoption(
68 68 '--without-vcsserver', dest='with_vcsserver', action='store_false',
69 69 help="Do not start the VCSServer in a background process.")
70 70 vcsgroup.addoption(
71 71 '--with-vcsserver', dest='vcsserver_config_pyro4',
72 72 help="Start the VCSServer with the specified config file.")
73 73 vcsgroup.addoption(
74 74 '--with-vcsserver-http', dest='vcsserver_config_http',
75 75 help="Start the HTTP VCSServer with the specified config file.")
76 76 vcsgroup.addoption(
77 77 '--vcsserver-protocol', dest='vcsserver_protocol',
78 78 help="Start the VCSServer with HTTP / Pyro4 protocol support.")
79 79 vcsgroup.addoption(
80 80 '--vcsserver-config-override', action='store', type=_parse_json,
81 81 default=None, dest='vcsserver_config_override', help=(
82 82 "Overrides the .ini file settings for the VCSServer. "
83 83 "Should be specified in JSON "
84 84 "format, e.g. '{\"section\": {\"parameter\": \"value\", ...}}'"
85 85 )
86 86 )
87 87 vcsgroup.addoption(
88 88 '--vcsserver-port', action='store', type=int,
89 89 default=None, help=(
90 90 "Allows to set the port of the vcsserver. Useful when testing "
91 91 "against an already running server and random ports cause "
92 92 "trouble."))
93 93 parser.addini(
94 94 'vcsserver_config_pyro4',
95 95 "Start the VCSServer with the specified config file.")
96 96 parser.addini(
97 97 'vcsserver_config_http',
98 98 "Start the HTTP VCSServer with the specified config file.")
99 99 parser.addini(
100 100 'vcsserver_protocol',
101 101 "Start the VCSServer with HTTP / Pyro4 protocol support.")
102 102
103 103
104 104 @pytest.fixture(scope='session')
105 105 def vcsserver(request, vcsserver_port, vcsserver_factory):
106 106 """
107 107 Session scope VCSServer.
108 108
109 109 Tests wich need the VCSServer have to rely on this fixture in order
110 110 to ensure it will be running.
111 111
112 112 For specific needs, the fixture vcsserver_factory can be used. It allows to
113 113 adjust the configuration file for the test run.
114 114
115 115 Command line args:
116 116
117 117 --without-vcsserver: Allows to switch this fixture off. You have to
118 118 manually start the server.
119 119
120 120 --vcsserver-port: Will expect the VCSServer to listen on this port.
121 121 """
122 122
123 123 if not request.config.getoption('with_vcsserver'):
124 124 return None
125 125
126 126 use_http = _use_vcs_http_server(request.config)
127 127 return vcsserver_factory(
128 128 request, use_http=use_http, vcsserver_port=vcsserver_port)
129 129
130 130
131 131 @pytest.fixture(scope='session')
132 132 def vcsserver_factory(tmpdir_factory):
133 133 """
134 134 Use this if you need a running vcsserver with a special configuration.
135 135 """
136 136
137 137 def factory(request, use_http=True, overrides=(), vcsserver_port=None):
138 138
139 139 if vcsserver_port is None:
140 140 vcsserver_port = get_available_port()
141 141
142 142 overrides = list(overrides)
143 143 if use_http:
144 144 overrides.append({'server:main': {'port': vcsserver_port}})
145 145 else:
146 146 overrides.append({'DEFAULT': {'port': vcsserver_port}})
147 147
148 148 if is_cygwin():
149 149 platform_override = {'DEFAULT': {
150 150 'beaker.cache.repo_object.type': 'nocache'}}
151 151 overrides.append(platform_override)
152 152
153 153 option_name = (
154 154 'vcsserver_config_http' if use_http else 'vcsserver_config_pyro4')
155 155 override_option_name = 'vcsserver_config_override'
156 156 config_file = get_config(
157 157 request.config, option_name=option_name,
158 158 override_option_name=override_option_name, overrides=overrides,
159 159 basetemp=tmpdir_factory.getbasetemp().strpath,
160 160 prefix='test_vcs_')
161 161
162 162 print "Using the VCSServer configuration", config_file
163 163 ServerClass = HttpVCSServer if use_http else Pyro4VCSServer
164 164 server = ServerClass(config_file)
165 165 server.start()
166 166
167 167 @request.addfinalizer
168 168 def cleanup():
169 169 server.shutdown()
170 170
171 171 server.wait_until_ready()
172 172 return server
173 173
174 174 return factory
175 175
176 176
177 177 def is_cygwin():
178 178 return 'cygwin' in platform.system().lower()
179 179
180 180
181 181 def _use_vcs_http_server(config):
182 182 protocol_option = 'vcsserver_protocol'
183 183 protocol = (
184 184 config.getoption(protocol_option) or
185 185 config.getini(protocol_option) or
186 186 'http')
187 187 return protocol == 'http'
188 188
189 189
190 190 class VCSServer(object):
191 191 """
192 192 Represents a running VCSServer instance.
193 193 """
194 194
195 195 _args = []
196 196
197 197 def start(self):
198 198 print("Starting the VCSServer: {}".format(self._args))
199 self.process = subprocess.Popen(self._args)
199 self.process = subprocess32.Popen(self._args)
200 200
201 201 def wait_until_ready(self, timeout=30):
202 202 raise NotImplementedError()
203 203
204 204 def shutdown(self):
205 205 self.process.kill()
206 206
207 207
208 208 class Pyro4VCSServer(VCSServer):
209 209 def __init__(self, config_file):
210 210 """
211 211 :param config_file: The config file to start the server with
212 212 """
213 213
214 214 config_data = configobj.ConfigObj(config_file)
215 215 self._config = config_data['DEFAULT']
216 216
217 217 args = ['vcsserver', '--config', config_file]
218 218 self._args = args
219 219
220 220 def wait_until_ready(self, timeout=30):
221 221 remote_server = vcs.create_vcsserver_proxy(
222 222 self.server_and_port, 'pyro4')
223 223 start = time.time()
224 224 with remote_server:
225 225 while time.time() - start < timeout:
226 226 try:
227 227 remote_server.ping()
228 228 break
229 229 except CommunicationError:
230 230 time.sleep(0.2)
231 231 else:
232 232 pytest.exit(
233 233 "Starting the VCSServer failed or took more than {} "
234 234 "seconds.".format(timeout))
235 235
236 236 @property
237 237 def server_and_port(self):
238 238 return '{host}:{port}'.format(**self._config)
239 239
240 240
241 241 class HttpVCSServer(VCSServer):
242 242 """
243 243 Represents a running VCSServer instance.
244 244 """
245 245 def __init__(self, config_file):
246 246 config_data = configobj.ConfigObj(config_file)
247 247 self._config = config_data['server:main']
248 248
249 249 args = ['pserve', config_file]
250 250 self._args = args
251 251
252 252 @property
253 253 def http_url(self):
254 254 template = 'http://{host}:{port}/'
255 255 return template.format(**self._config)
256 256
257 257 def start(self):
258 self.process = subprocess.Popen(self._args)
258 self.process = subprocess32.Popen(self._args)
259 259
260 260 def wait_until_ready(self, timeout=30):
261 261 host = self._config['host']
262 262 port = self._config['port']
263 263 status_url = 'http://{host}:{port}/status'.format(host=host, port=port)
264 264 start = time.time()
265 265
266 266 while time.time() - start < timeout:
267 267 try:
268 268 urlopen(status_url)
269 269 break
270 270 except URLError:
271 271 time.sleep(0.2)
272 272 else:
273 273 pytest.exit(
274 274 "Starting the VCSServer failed or took more than {} "
275 275 "seconds.".format(timeout))
276 276
277 277 def shutdown(self):
278 278 self.process.kill()
279 279
280 280
281 281 @pytest.fixture(scope='session')
282 282 def pylons_config(request, tmpdir_factory, rcserver_port, vcsserver_port):
283 283 option_name = 'pylons_config'
284 284
285 285 overrides = [
286 286 {'server:main': {'port': rcserver_port}},
287 287 {'app:main': {
288 288 'vcs.server': 'localhost:%s' % vcsserver_port,
289 289 # johbo: We will always start the VCSServer on our own based on the
290 290 # fixtures of the test cases. For the test run it must always be
291 291 # off in the INI file.
292 292 'vcs.start_server': 'false',
293 293 }},
294 294 ]
295 295 if _use_vcs_http_server(request.config):
296 296 overrides.append({
297 297 'app:main': {
298 298 'vcs.server.protocol': 'http',
299 299 'vcs.scm_app_implementation': 'http',
300 300 'vcs.hooks.protocol': 'http',
301 301 }
302 302 })
303 303 else:
304 304 overrides.append({
305 305 'app:main': {
306 306 'vcs.server.protocol': 'pyro4',
307 307 'vcs.scm_app_implementation': 'pyro4',
308 308 'vcs.hooks.protocol': 'pyro4',
309 309 }
310 310 })
311 311
312 312 filename = get_config(
313 313 request.config, option_name=option_name,
314 314 override_option_name='{}_override'.format(option_name),
315 315 overrides=overrides,
316 316 basetemp=tmpdir_factory.getbasetemp().strpath,
317 317 prefix='test_rce_')
318 318 return filename
319 319
320 320
321 321 @pytest.fixture(scope='session')
322 322 def rcserver_port(request):
323 323 port = get_available_port()
324 324 print 'Using rcserver port %s' % (port, )
325 325 return port
326 326
327 327
328 328 @pytest.fixture(scope='session')
329 329 def vcsserver_port(request):
330 330 port = request.config.getoption('--vcsserver-port')
331 331 if port is None:
332 332 port = get_available_port()
333 333 print 'Using vcsserver port %s' % (port, )
334 334 return port
335 335
336 336
337 337 def get_available_port():
338 338 family = socket.AF_INET
339 339 socktype = socket.SOCK_STREAM
340 340 host = '127.0.0.1'
341 341
342 342 mysocket = socket.socket(family, socktype)
343 343 mysocket.bind((host, 0))
344 344 port = mysocket.getsockname()[1]
345 345 mysocket.close()
346 346 del mysocket
347 347 return port
348 348
349 349
350 350 @pytest.fixture(scope='session')
351 351 def available_port_factory():
352 352 """
353 353 Returns a callable which returns free port numbers.
354 354 """
355 355 return get_available_port
356 356
357 357
358 358 @pytest.fixture
359 359 def available_port(available_port_factory):
360 360 """
361 361 Gives you one free port for the current test.
362 362
363 363 Uses "available_port_factory" to retrieve the port.
364 364 """
365 365 return available_port_factory()
366 366
367 367
368 368 @pytest.fixture(scope='session')
369 369 def pylonsapp(pylons_config, vcsserver, http_environ_session):
370 370 print "Using the RhodeCode configuration", pylons_config
371 371 logging.config.fileConfig(
372 372 pylons_config, disable_existing_loggers=False)
373 373 app = _setup_pylons_environment(pylons_config, http_environ_session)
374 374 return app
375 375
376 376
377 377 @pytest.fixture(scope='session')
378 378 def testini_factory(tmpdir_factory, pylons_config):
379 379 """
380 380 Factory to create an INI file based on TestINI.
381 381
382 382 It will make sure to place the INI file in the correct directory.
383 383 """
384 384 basetemp = tmpdir_factory.getbasetemp().strpath
385 385 return TestIniFactory(basetemp, pylons_config)
386 386
387 387
388 388 class TestIniFactory(object):
389 389
390 390 def __init__(self, basetemp, template_ini):
391 391 self._basetemp = basetemp
392 392 self._template_ini = template_ini
393 393
394 394 def __call__(self, ini_params, new_file_prefix='test'):
395 395 ini_file = TestINI(
396 396 self._template_ini, ini_params=ini_params,
397 397 new_file_prefix=new_file_prefix, dir=self._basetemp)
398 398 result = ini_file.create()
399 399 return result
400 400
401 401
402 402 def get_config(
403 403 config, option_name, override_option_name, overrides=None,
404 404 basetemp=None, prefix='test'):
405 405 """
406 406 Find a configuration file and apply overrides for the given `prefix`.
407 407 """
408 408 config_file = (
409 409 config.getoption(option_name) or config.getini(option_name))
410 410 if not config_file:
411 411 pytest.exit(
412 412 "Configuration error, could not extract {}.".format(option_name))
413 413
414 414 overrides = overrides or []
415 415 config_override = config.getoption(override_option_name)
416 416 if config_override:
417 417 overrides.append(config_override)
418 418 temp_ini_file = TestINI(
419 419 config_file, ini_params=overrides, new_file_prefix=prefix,
420 420 dir=basetemp)
421 421
422 422 return temp_ini_file.create()
423 423
424 424
425 425 def _setup_pylons_environment(pylons_config, http_environ):
426 426 current_path = os.getcwd()
427 427 pylonsapp = loadapp(
428 428 'config:' + pylons_config, relative_to=current_path)
429 429
430 430 # Using rhodecode.CONFIG which is assigned during "load_environment".
431 431 # The indirect approach is used, because "pylonsapp" may actually be
432 432 # the Pyramid application.
433 433 pylonsapp_config = rhodecode.CONFIG
434 434 _init_stack(pylonsapp_config, environ=http_environ)
435 435
436 436 # For compatibility add the attribute "config" which would be
437 437 # present on the Pylons application.
438 438 pylonsapp.config = pylonsapp_config
439 439 return pylonsapp
440 440
441 441
442 442 def _init_stack(config=None, environ=None):
443 443 if not config:
444 444 config = pylons.test.pylonsapp.config
445 445 if not environ:
446 446 environ = {}
447 447 pylons.url._push_object(URLGenerator(config['routes.map'], environ or {}))
448 448 pylons.app_globals._push_object(config['pylons.app_globals'])
449 449 pylons.config._push_object(config)
450 450 pylons.tmpl_context._push_object(ContextObj())
451 451 # Initialize a translator for tests that utilize i18n
452 452 translator = _get_translator(pylons.config.get('lang'))
453 453 pylons.translator._push_object(translator)
454 454 pylons.session._push_object(SessionObject(environ or {}))
455 455 pylons.request._push_object(webob.Request.blank('', environ=environ))
@@ -1,217 +1,215 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2010-2016 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 """
22 22 Test suite for making push/pull operations
23 23 """
24 24
25 25 import os
26 26 import sys
27 27 import shutil
28 28 import logging
29 29 from os.path import join as jn
30 30 from os.path import dirname as dn
31 31
32 32 from tempfile import _RandomNameSequence
33 from subprocess import Popen, PIPE
34
33 from subprocess32 import Popen, PIPE
35 34 from paste.deploy import appconfig
36 from pylons import config
37 35
38 36 from rhodecode.lib.utils import add_cache
39 37 from rhodecode.lib.utils2 import engine_from_config
40 38 from rhodecode.lib.auth import get_crypt_password
41 39 from rhodecode.model import init_model
42 40 from rhodecode.model import meta
43 41 from rhodecode.model.db import User, Repository
44 42
45 from rhodecode.tests import TESTS_TMP_PATH, NEW_HG_REPO, HG_REPO
43 from rhodecode.tests import TESTS_TMP_PATH, HG_REPO
46 44 from rhodecode.config.environment import load_environment
47 45
48 46 rel_path = dn(dn(dn(dn(os.path.abspath(__file__)))))
49 47 conf = appconfig('config:rc.ini', relative_to=rel_path)
50 48 load_environment(conf.global_conf, conf.local_conf)
51 49
52 50 add_cache(conf)
53 51
54 52 USER = 'test_admin'
55 53 PASS = 'test12'
56 54 HOST = 'rc.local'
57 55 METHOD = 'pull'
58 56 DEBUG = True
59 57 log = logging.getLogger(__name__)
60 58
61 59
62 60 class Command(object):
63 61
64 62 def __init__(self, cwd):
65 63 self.cwd = cwd
66 64
67 65 def execute(self, cmd, *args):
68 66 """Runs command on the system with given ``args``.
69 67 """
70 68
71 69 command = cmd + ' ' + ' '.join(args)
72 70 log.debug('Executing %s' % command)
73 71 if DEBUG:
74 72 print command
75 73 p = Popen(command, shell=True, stdout=PIPE, stderr=PIPE, cwd=self.cwd)
76 74 stdout, stderr = p.communicate()
77 75 if DEBUG:
78 76 print stdout, stderr
79 77 return stdout, stderr
80 78
81 79
82 80 def get_session():
83 81 engine = engine_from_config(conf, 'sqlalchemy.db1.')
84 82 init_model(engine)
85 83 sa = meta.Session
86 84 return sa
87 85
88 86
89 87 def create_test_user(force=True):
90 88 print 'creating test user'
91 89 sa = get_session()
92 90
93 91 user = sa.query(User).filter(User.username == USER).scalar()
94 92
95 93 if force and user is not None:
96 94 print 'removing current user'
97 95 for repo in sa.query(Repository).filter(Repository.user == user).all():
98 96 sa.delete(repo)
99 97 sa.delete(user)
100 98 sa.commit()
101 99
102 100 if user is None or force:
103 101 print 'creating new one'
104 102 new_usr = User()
105 103 new_usr.username = USER
106 104 new_usr.password = get_crypt_password(PASS)
107 105 new_usr.email = 'mail@mail.com'
108 106 new_usr.name = 'test'
109 107 new_usr.lastname = 'lasttestname'
110 108 new_usr.active = True
111 109 new_usr.admin = True
112 110 sa.add(new_usr)
113 111 sa.commit()
114 112
115 113 print 'done'
116 114
117 115
118 116 def create_test_repo(force=True):
119 117 print 'creating test repo'
120 118 from rhodecode.model.repo import RepoModel
121 119 sa = get_session()
122 120
123 121 user = sa.query(User).filter(User.username == USER).scalar()
124 122 if user is None:
125 123 raise Exception('user not found')
126 124
127 125 repo = sa.query(Repository).filter(Repository.repo_name == HG_REPO).scalar()
128 126
129 127 if repo is None:
130 128 print 'repo not found creating'
131 129
132 130 form_data = {'repo_name': HG_REPO,
133 131 'repo_type': 'hg',
134 132 'private':False,
135 133 'clone_uri': '' }
136 134 rm = RepoModel(sa)
137 135 rm.base_path = '/home/hg'
138 136 rm.create(form_data, user)
139 137
140 138 print 'done'
141 139
142 140
143 141 def set_anonymous_access(enable=True):
144 142 sa = get_session()
145 143 user = sa.query(User).filter(User.username == 'default').one()
146 144 user.active = enable
147 145 sa.add(user)
148 146 sa.commit()
149 147
150 148
151 149 def get_anonymous_access():
152 150 sa = get_session()
153 151 return sa.query(User).filter(User.username == 'default').one().active
154 152
155 153
156 154 #==============================================================================
157 155 # TESTS
158 156 #==============================================================================
159 157 def test_clone_with_credentials(repo=HG_REPO, method=METHOD,
160 158 seq=None, backend='hg', check_output=True):
161 159 cwd = path = jn(TESTS_TMP_PATH, repo)
162 160
163 161 if seq is None:
164 162 seq = _RandomNameSequence().next()
165 163
166 164 try:
167 165 shutil.rmtree(path, ignore_errors=True)
168 166 os.makedirs(path)
169 167 except OSError:
170 168 raise
171 169
172 170 clone_url = 'http://%(user)s:%(pass)s@%(host)s/%(cloned_repo)s' % \
173 171 {'user': USER,
174 172 'pass': PASS,
175 173 'host': HOST,
176 174 'cloned_repo': repo, }
177 175
178 176 dest = path + seq
179 177 if method == 'pull':
180 178 stdout, stderr = Command(cwd).execute(backend, method, '--cwd', dest, clone_url)
181 179 else:
182 180 stdout, stderr = Command(cwd).execute(backend, method, clone_url, dest)
183 181 if check_output:
184 182 if backend == 'hg':
185 183 assert """adding file changes""" in stdout, 'no messages about cloning'
186 184 assert """abort""" not in stderr, 'got error from clone'
187 185 elif backend == 'git':
188 186 assert """Cloning into""" in stdout, 'no messages about cloning'
189 187
190 188 if __name__ == '__main__':
191 189 try:
192 190 create_test_user(force=False)
193 191 seq = None
194 192 import time
195 193
196 194 try:
197 195 METHOD = sys.argv[3]
198 196 except Exception:
199 197 pass
200 198
201 199 try:
202 200 backend = sys.argv[4]
203 201 except Exception:
204 202 backend = 'hg'
205 203
206 204 if METHOD == 'pull':
207 205 seq = _RandomNameSequence().next()
208 206 test_clone_with_credentials(repo=sys.argv[1], method='clone',
209 207 seq=seq, backend=backend)
210 208 s = time.time()
211 209 for i in range(1, int(sys.argv[2]) + 1):
212 210 print 'take', i
213 211 test_clone_with_credentials(repo=sys.argv[1], method=METHOD,
214 212 seq=seq, backend=backend)
215 213 print 'time taken %.3f' % (time.time() - s)
216 214 except Exception as e:
217 215 sys.exit('stop on %s' % e)
@@ -1,293 +1,293 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2010-2016 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 import threading
22 22 import time
23 23 import logging
24 24 import os.path
25 import subprocess
25 import subprocess32
26 26 import urllib2
27 27 from urlparse import urlparse, parse_qsl
28 28 from urllib import unquote_plus
29 29
30 30 import pytest
31 31 import rc_testdata
32 32 from lxml.html import fromstring, tostring
33 33 from lxml.cssselect import CSSSelector
34 34
35 35 from rhodecode.model.db import User
36 36 from rhodecode.model.meta import Session
37 37 from rhodecode.model.scm import ScmModel
38 38 from rhodecode.lib.vcs.backends.svn.repository import SubversionRepository
39 39
40 40
41 41 log = logging.getLogger(__name__)
42 42
43 43
44 44 def set_anonymous_access(enabled):
45 45 """(Dis)allows anonymous access depending on parameter `enabled`"""
46 46 user = User.get_default_user()
47 47 user.active = enabled
48 48 Session().add(user)
49 49 Session().commit()
50 50 log.info('anonymous access is now: %s', enabled)
51 51 assert enabled == User.get_default_user().active, (
52 52 'Cannot set anonymous access')
53 53
54 54
55 55 def check_xfail_backends(node, backend_alias):
56 56 # Using "xfail_backends" here intentionally, since this marks work
57 57 # which is "to be done" soon.
58 58 skip_marker = node.get_marker('xfail_backends')
59 59 if skip_marker and backend_alias in skip_marker.args:
60 60 msg = "Support for backend %s to be developed." % (backend_alias, )
61 61 msg = skip_marker.kwargs.get('reason', msg)
62 62 pytest.xfail(msg)
63 63
64 64
65 65 def check_skip_backends(node, backend_alias):
66 66 # Using "skip_backends" here intentionally, since this marks work which is
67 67 # not supported.
68 68 skip_marker = node.get_marker('skip_backends')
69 69 if skip_marker and backend_alias in skip_marker.args:
70 70 msg = "Feature not supported for backend %s." % (backend_alias, )
71 71 msg = skip_marker.kwargs.get('reason', msg)
72 72 pytest.skip(msg)
73 73
74 74
75 75 def extract_git_repo_from_dump(dump_name, repo_name):
76 76 """Create git repo `repo_name` from dump `dump_name`."""
77 77 repos_path = ScmModel().repos_path
78 78 target_path = os.path.join(repos_path, repo_name)
79 79 rc_testdata.extract_git_dump(dump_name, target_path)
80 80 return target_path
81 81
82 82
83 83 def extract_hg_repo_from_dump(dump_name, repo_name):
84 84 """Create hg repo `repo_name` from dump `dump_name`."""
85 85 repos_path = ScmModel().repos_path
86 86 target_path = os.path.join(repos_path, repo_name)
87 87 rc_testdata.extract_hg_dump(dump_name, target_path)
88 88 return target_path
89 89
90 90
91 91 def extract_svn_repo_from_dump(dump_name, repo_name):
92 92 """Create a svn repo `repo_name` from dump `dump_name`."""
93 93 repos_path = ScmModel().repos_path
94 94 target_path = os.path.join(repos_path, repo_name)
95 95 SubversionRepository(target_path, create=True)
96 96 _load_svn_dump_into_repo(dump_name, target_path)
97 97 return target_path
98 98
99 99
100 100 def assert_message_in_log(log_records, message, levelno, module):
101 101 messages = [
102 102 r.message for r in log_records
103 103 if r.module == module and r.levelno == levelno
104 104 ]
105 105 assert message in messages
106 106
107 107
108 108 def _load_svn_dump_into_repo(dump_name, repo_path):
109 109 """
110 110 Utility to populate a svn repository with a named dump
111 111
112 112 Currently the dumps are in rc_testdata. They might later on be
113 113 integrated with the main repository once they stabilize more.
114 114 """
115 115 dump = rc_testdata.load_svn_dump(dump_name)
116 load_dump = subprocess.Popen(
116 load_dump = subprocess32.Popen(
117 117 ['svnadmin', 'load', repo_path],
118 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
119 stderr=subprocess.PIPE)
118 stdin=subprocess32.PIPE, stdout=subprocess32.PIPE,
119 stderr=subprocess32.PIPE)
120 120 out, err = load_dump.communicate(dump)
121 121 if load_dump.returncode != 0:
122 122 log.error("Output of load_dump command: %s", out)
123 123 log.error("Error output of load_dump command: %s", err)
124 124 raise Exception(
125 125 'Failed to load dump "%s" into repository at path "%s".'
126 126 % (dump_name, repo_path))
127 127
128 128
129 129 class AssertResponse(object):
130 130 """
131 131 Utility that helps to assert things about a given HTML response.
132 132 """
133 133
134 134 def __init__(self, response):
135 135 self.response = response
136 136
137 137 def one_element_exists(self, css_selector):
138 138 self.get_element(css_selector)
139 139
140 140 def no_element_exists(self, css_selector):
141 141 assert not self._get_elements(css_selector)
142 142
143 143 def element_equals_to(self, css_selector, expected_content):
144 144 element = self.get_element(css_selector)
145 145 element_text = self._element_to_string(element)
146 146 assert expected_content in element_text
147 147
148 148 def element_contains(self, css_selector, expected_content):
149 149 element = self.get_element(css_selector)
150 150 assert expected_content in element.text_content()
151 151
152 152 def contains_one_link(self, link_text, href):
153 153 doc = fromstring(self.response.body)
154 154 sel = CSSSelector('a[href]')
155 155 elements = [
156 156 e for e in sel(doc) if e.text_content().strip() == link_text]
157 157 assert len(elements) == 1, "Did not find link or found multiple links"
158 158 self._ensure_url_equal(elements[0].attrib.get('href'), href)
159 159
160 160 def contains_one_anchor(self, anchor_id):
161 161 doc = fromstring(self.response.body)
162 162 sel = CSSSelector('#' + anchor_id)
163 163 elements = sel(doc)
164 164 assert len(elements) == 1
165 165
166 166 def _ensure_url_equal(self, found, expected):
167 167 assert _Url(found) == _Url(expected)
168 168
169 169 def get_element(self, css_selector):
170 170 elements = self._get_elements(css_selector)
171 171 assert len(elements) == 1
172 172 return elements[0]
173 173
174 174 def get_elements(self, css_selector):
175 175 return self._get_elements(css_selector)
176 176
177 177 def _get_elements(self, css_selector):
178 178 doc = fromstring(self.response.body)
179 179 sel = CSSSelector(css_selector)
180 180 elements = sel(doc)
181 181 return elements
182 182
183 183 def _element_to_string(self, element):
184 184 return tostring(element)
185 185
186 186
187 187 class _Url(object):
188 188 """
189 189 A url object that can be compared with other url orbjects
190 190 without regard to the vagaries of encoding, escaping, and ordering
191 191 of parameters in query strings.
192 192
193 193 Inspired by
194 194 http://stackoverflow.com/questions/5371992/comparing-two-urls-in-python
195 195 """
196 196
197 197 def __init__(self, url):
198 198 parts = urlparse(url)
199 199 _query = frozenset(parse_qsl(parts.query))
200 200 _path = unquote_plus(parts.path)
201 201 parts = parts._replace(query=_query, path=_path)
202 202 self.parts = parts
203 203
204 204 def __eq__(self, other):
205 205 return self.parts == other.parts
206 206
207 207 def __hash__(self):
208 208 return hash(self.parts)
209 209
210 210
211 211 def run_test_concurrently(times, raise_catched_exc=True):
212 212 """
213 213 Add this decorator to small pieces of code that you want to test
214 214 concurrently
215 215
216 216 ex:
217 217
218 218 @test_concurrently(25)
219 219 def my_test_function():
220 220 ...
221 221 """
222 222 def test_concurrently_decorator(test_func):
223 223 def wrapper(*args, **kwargs):
224 224 exceptions = []
225 225
226 226 def call_test_func():
227 227 try:
228 228 test_func(*args, **kwargs)
229 229 except Exception, e:
230 230 exceptions.append(e)
231 231 if raise_catched_exc:
232 232 raise
233 233 threads = []
234 234 for i in range(times):
235 235 threads.append(threading.Thread(target=call_test_func))
236 236 for t in threads:
237 237 t.start()
238 238 for t in threads:
239 239 t.join()
240 240 if exceptions:
241 241 raise Exception(
242 242 'test_concurrently intercepted %s exceptions: %s' % (
243 243 len(exceptions), exceptions))
244 244 return wrapper
245 245 return test_concurrently_decorator
246 246
247 247
248 248 def wait_for_url(url, timeout=10):
249 249 """
250 250 Wait until URL becomes reachable.
251 251
252 252 It polls the URL until the timeout is reached or it became reachable.
253 253 If will call to `py.test.fail` in case the URL is not reachable.
254 254 """
255 255 timeout = time.time() + timeout
256 256 last = 0
257 257 wait = 0.1
258 258
259 259 while (timeout > last):
260 260 last = time.time()
261 261 if is_url_reachable(url):
262 262 break
263 263 elif ((last + wait) > time.time()):
264 264 # Go to sleep because not enough time has passed since last check.
265 265 time.sleep(wait)
266 266 else:
267 267 pytest.fail("Timeout while waiting for URL {}".format(url))
268 268
269 269
270 270 def is_url_reachable(url):
271 271 try:
272 272 urllib2.urlopen(url)
273 273 except urllib2.URLError:
274 274 return False
275 275 return True
276 276
277 277
278 278 def get_session_from_response(response):
279 279 """
280 280 This returns the session from a response object. Pylons has some magic
281 281 to make the session available as `response.session`. But pyramid
282 282 doesn't expose it.
283 283 """
284 284 # TODO: Try to look up the session key also.
285 285 return response.request.environ['beaker.session']
286 286
287 287
288 288 def repo_on_filesystem(repo_name):
289 289 from rhodecode.lib import vcs
290 290 from rhodecode.tests import TESTS_TMP_PATH
291 291 repo = vcs.get_vcs_instance(
292 292 os.path.join(TESTS_TMP_PATH, repo_name), create=False)
293 293 return repo is not None
@@ -1,195 +1,195 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2010-2016 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 import datetime
22 22 import os
23 import subprocess
23 import subprocess32
24 24
25 25 import pytest
26 26
27 27 from rhodecode.lib.vcs.exceptions import VCSError
28 28 from rhodecode.lib.vcs.utils import author_email, author_name
29 29 from rhodecode.lib.vcs.utils.helpers import get_scm
30 30 from rhodecode.lib.vcs.utils.helpers import get_scms_for_path
31 31 from rhodecode.lib.vcs.utils.helpers import parse_datetime
32 32 from rhodecode.lib.vcs.utils.paths import get_dirs_for_path
33 33
34 34
35 35 @pytest.mark.usefixtures("pylonsapp")
36 36 class TestPaths:
37 37
38 38 def _test_get_dirs_for_path(self, path, expected):
39 39 """
40 40 Tests if get_dirs_for_path returns same as expected.
41 41 """
42 42 expected = sorted(expected)
43 43 result = sorted(get_dirs_for_path(path))
44 44 assert result == expected, (
45 45 "%s != %s which was expected result for path %s"
46 46 % (result, expected, path))
47 47
48 48 def test_get_dirs_for_path(self):
49 49 path = 'foo/bar/baz/file'
50 50 paths_and_results = (
51 51 ('foo/bar/baz/file', ['foo', 'foo/bar', 'foo/bar/baz']),
52 52 ('foo/bar/', ['foo', 'foo/bar']),
53 53 ('foo/bar', ['foo']),
54 54 )
55 55 for path, expected in paths_and_results:
56 56 self._test_get_dirs_for_path(path, expected)
57 57
58 58 def test_get_scms_for_path(self, tmpdir):
59 59 new = tmpdir.strpath
60 60 assert get_scms_for_path(new) == []
61 61
62 62 os.mkdir(os.path.join(new, '.tux'))
63 63 assert get_scms_for_path(new) == []
64 64
65 65 os.mkdir(os.path.join(new, '.git'))
66 66 assert set(get_scms_for_path(new)) == set(['git'])
67 67
68 68 os.mkdir(os.path.join(new, '.hg'))
69 69 assert set(get_scms_for_path(new)) == set(['git', 'hg'])
70 70
71 71
72 72 class TestGetScm:
73 73
74 74 def test_existing_repository(self, vcs_repository_support):
75 75 alias, repo = vcs_repository_support
76 76 assert (alias, repo.path) == get_scm(repo.path)
77 77
78 78 def test_raises_if_path_is_empty(self, tmpdir):
79 79 with pytest.raises(VCSError):
80 80 get_scm(str(tmpdir))
81 81
82 82 def test_get_scm_error_path(self):
83 83 with pytest.raises(VCSError):
84 84 get_scm('err')
85 85
86 86 def test_get_two_scms_for_path(self, tmpdir):
87 87 multialias_repo_path = str(tmpdir)
88 88
89 subprocess.check_call(['hg', 'init', multialias_repo_path])
90 subprocess.check_call(['git', 'init', multialias_repo_path])
89 subprocess32.check_call(['hg', 'init', multialias_repo_path])
90 subprocess32.check_call(['git', 'init', multialias_repo_path])
91 91
92 92 with pytest.raises(VCSError):
93 93 get_scm(multialias_repo_path)
94 94
95 95 def test_ignores_svn_working_copy(self, tmpdir):
96 96 tmpdir.mkdir('.svn')
97 97 with pytest.raises(VCSError):
98 98 get_scm(tmpdir.strpath)
99 99
100 100
101 101 class TestParseDatetime:
102 102
103 103 def test_datetime_text(self):
104 104 assert parse_datetime('2010-04-07 21:29:41') == \
105 105 datetime.datetime(2010, 4, 7, 21, 29, 41)
106 106
107 107 def test_no_seconds(self):
108 108 assert parse_datetime('2010-04-07 21:29') == \
109 109 datetime.datetime(2010, 4, 7, 21, 29)
110 110
111 111 def test_date_only(self):
112 112 assert parse_datetime('2010-04-07') == \
113 113 datetime.datetime(2010, 4, 7)
114 114
115 115 def test_another_format(self):
116 116 assert parse_datetime('04/07/10 21:29:41') == \
117 117 datetime.datetime(2010, 4, 7, 21, 29, 41)
118 118
119 119 def test_now(self):
120 120 assert parse_datetime('now') - datetime.datetime.now() < \
121 121 datetime.timedelta(seconds=1)
122 122
123 123 def test_today(self):
124 124 today = datetime.date.today()
125 125 assert parse_datetime('today') == \
126 126 datetime.datetime(*today.timetuple()[:3])
127 127
128 128 def test_yesterday(self):
129 129 yesterday = datetime.date.today() - datetime.timedelta(days=1)
130 130 assert parse_datetime('yesterday') == \
131 131 datetime.datetime(*yesterday.timetuple()[:3])
132 132
133 133 def test_tomorrow(self):
134 134 tomorrow = datetime.date.today() + datetime.timedelta(days=1)
135 135 args = tomorrow.timetuple()[:3] + (23, 59, 59)
136 136 assert parse_datetime('tomorrow') == datetime.datetime(*args)
137 137
138 138 def test_days(self):
139 139 timestamp = datetime.datetime.today() - datetime.timedelta(days=3)
140 140 args = timestamp.timetuple()[:3] + (0, 0, 0, 0)
141 141 expected = datetime.datetime(*args)
142 142 assert parse_datetime('3d') == expected
143 143 assert parse_datetime('3 d') == expected
144 144 assert parse_datetime('3 day') == expected
145 145 assert parse_datetime('3 days') == expected
146 146
147 147 def test_weeks(self):
148 148 timestamp = datetime.datetime.today() - datetime.timedelta(days=3 * 7)
149 149 args = timestamp.timetuple()[:3] + (0, 0, 0, 0)
150 150 expected = datetime.datetime(*args)
151 151 assert parse_datetime('3w') == expected
152 152 assert parse_datetime('3 w') == expected
153 153 assert parse_datetime('3 week') == expected
154 154 assert parse_datetime('3 weeks') == expected
155 155
156 156 def test_mixed(self):
157 157 timestamp = (
158 158 datetime.datetime.today() - datetime.timedelta(days=2 * 7 + 3))
159 159 args = timestamp.timetuple()[:3] + (0, 0, 0, 0)
160 160 expected = datetime.datetime(*args)
161 161 assert parse_datetime('2w3d') == expected
162 162 assert parse_datetime('2w 3d') == expected
163 163 assert parse_datetime('2w 3 days') == expected
164 164 assert parse_datetime('2 weeks 3 days') == expected
165 165
166 166
167 167 @pytest.mark.parametrize("test_str, name, email", [
168 168 ('Marcin Kuzminski <marcin@python-works.com>',
169 169 'Marcin Kuzminski', 'marcin@python-works.com'),
170 170 ('Marcin Kuzminski Spaces < marcin@python-works.com >',
171 171 'Marcin Kuzminski Spaces', 'marcin@python-works.com'),
172 172 ('Marcin Kuzminski <marcin.kuzminski@python-works.com>',
173 173 'Marcin Kuzminski', 'marcin.kuzminski@python-works.com'),
174 174 ('mrf RFC_SPEC <marcin+kuzminski@python-works.com>',
175 175 'mrf RFC_SPEC', 'marcin+kuzminski@python-works.com'),
176 176 ('username <user@email.com>',
177 177 'username', 'user@email.com'),
178 178 ('username <user@email.com',
179 179 'username', 'user@email.com'),
180 180 ('broken missing@email.com',
181 181 'broken', 'missing@email.com'),
182 182 ('<justemail@mail.com>',
183 183 '', 'justemail@mail.com'),
184 184 ('justname',
185 185 'justname', ''),
186 186 ('Mr Double Name withemail@email.com ',
187 187 'Mr Double Name', 'withemail@email.com'),
188 188 ])
189 189 class TestAuthorExtractors:
190 190
191 191 def test_author_email(self, test_str, name, email):
192 192 assert email == author_email(test_str)
193 193
194 194 def test_author_name(self, test_str, name, email):
195 195 assert name == author_name(test_str)
@@ -1,118 +1,118 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2010-2016 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 """
22 22 Utilities for tests only. These are not or should not be used normally -
23 23 functions here are crafted as we don't want to use ``vcs`` to verify tests.
24 24 """
25 25
26 26 import os
27 27 import re
28 28 import sys
29 29
30 from subprocess import Popen
30 from subprocess32 import Popen
31 31
32 32
33 33 class VCSTestError(Exception):
34 34 pass
35 35
36 36
37 37 def run_command(cmd, args):
38 38 """
39 39 Runs command on the system with given ``args``.
40 40 """
41 41 command = ' '.join((cmd, args))
42 42 p = Popen(command, shell=True)
43 43 status = os.waitpid(p.pid, 0)[1]
44 44 return status
45 45
46 46
47 47 def eprint(msg):
48 48 """
49 49 Prints given ``msg`` into sys.stderr as nose test runner hides all output
50 50 from sys.stdout by default and if we want to pipe stream somewhere we don't
51 51 need those verbose messages anyway.
52 52 Appends line break.
53 53 """
54 54 sys.stderr.write(msg)
55 55 sys.stderr.write('\n')
56 56
57 57
58 58 # TODO: Revisit once we have CI running, if this is not helping us, remove it
59 59 class SCMFetcher(object):
60 60
61 61 def __init__(self, alias, test_repo_path):
62 62 """
63 63 :param clone_cmd: command which would clone remote repository; pass
64 64 only first bits - remote path and destination would be appended
65 65 using ``remote_repo`` and ``test_repo_path``
66 66 """
67 67 self.alias = alias
68 68 self.test_repo_path = test_repo_path
69 69
70 70 def setup(self):
71 71 if not os.path.isdir(self.test_repo_path):
72 72 self.fetch_repo()
73 73
74 74 def fetch_repo(self):
75 75 """
76 76 Tries to fetch repository from remote path.
77 77 """
78 78 remote = self.remote_repo
79 79 eprint(
80 80 "Fetching repository %s into %s" % (remote, self.test_repo_path))
81 81 run_command(self.clone_cmd, '%s %s' % (remote, self.test_repo_path))
82 82
83 83
84 84 def get_normalized_path(path):
85 85 """
86 86 If given path exists, new path would be generated and returned. Otherwise
87 87 same whats given is returned. Assumes that there would be no more than
88 88 10000 same named files.
89 89 """
90 90 if os.path.exists(path):
91 91 dir, basename = os.path.split(path)
92 92 splitted_name = basename.split('.')
93 93 if len(splitted_name) > 1:
94 94 ext = splitted_name[-1]
95 95 else:
96 96 ext = None
97 97 name = '.'.join(splitted_name[:-1])
98 98 matcher = re.compile(r'^.*-(\d{5})$')
99 99 start = 0
100 100 m = matcher.match(name)
101 101 if not m:
102 102 # Haven't append number yet so return first
103 103 newname = '%s-00000' % name
104 104 newpath = os.path.join(dir, newname)
105 105 if ext:
106 106 newpath = '.'.join((newpath, ext))
107 107 return get_normalized_path(newpath)
108 108 else:
109 109 start = int(m.group(1)[-5:]) + 1
110 110 for x in xrange(start, 10000):
111 111 newname = name[:-5] + str(x).rjust(5, '0')
112 112 newpath = os.path.join(dir, newname)
113 113 if ext:
114 114 newpath = '.'.join((newpath, ext))
115 115 if not os.path.exists(newpath):
116 116 return newpath
117 117 raise VCSTestError("Couldn't compute new path for %s" % path)
118 118 return path
General Comments 0
You need to be logged in to leave comments. Login now