##// END OF EJS Templates
run-tests: fix crash when --json and --blacklist are both used (issue5050)...
Laurent Charignon -
r27927:0de4dfc9 stable
parent child Browse files
Show More
@@ -1,2375 +1,2381 b''
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2 #
2 #
3 # run-tests.py - Run a set of tests on Mercurial
3 # run-tests.py - Run a set of tests on Mercurial
4 #
4 #
5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 # Modifying this script is tricky because it has many modes:
10 # Modifying this script is tricky because it has many modes:
11 # - serial (default) vs parallel (-jN, N > 1)
11 # - serial (default) vs parallel (-jN, N > 1)
12 # - no coverage (default) vs coverage (-c, -C, -s)
12 # - no coverage (default) vs coverage (-c, -C, -s)
13 # - temp install (default) vs specific hg script (--with-hg, --local)
13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 # - tests are a mix of shell scripts and Python scripts
14 # - tests are a mix of shell scripts and Python scripts
15 #
15 #
16 # If you change this script, it is recommended that you ensure you
16 # If you change this script, it is recommended that you ensure you
17 # haven't broken it by running it in various modes with a representative
17 # haven't broken it by running it in various modes with a representative
18 # sample of test scripts. For example:
18 # sample of test scripts. For example:
19 #
19 #
20 # 1) serial, no coverage, temp install:
20 # 1) serial, no coverage, temp install:
21 # ./run-tests.py test-s*
21 # ./run-tests.py test-s*
22 # 2) serial, no coverage, local hg:
22 # 2) serial, no coverage, local hg:
23 # ./run-tests.py --local test-s*
23 # ./run-tests.py --local test-s*
24 # 3) serial, coverage, temp install:
24 # 3) serial, coverage, temp install:
25 # ./run-tests.py -c test-s*
25 # ./run-tests.py -c test-s*
26 # 4) serial, coverage, local hg:
26 # 4) serial, coverage, local hg:
27 # ./run-tests.py -c --local test-s* # unsupported
27 # ./run-tests.py -c --local test-s* # unsupported
28 # 5) parallel, no coverage, temp install:
28 # 5) parallel, no coverage, temp install:
29 # ./run-tests.py -j2 test-s*
29 # ./run-tests.py -j2 test-s*
30 # 6) parallel, no coverage, local hg:
30 # 6) parallel, no coverage, local hg:
31 # ./run-tests.py -j2 --local test-s*
31 # ./run-tests.py -j2 --local test-s*
32 # 7) parallel, coverage, temp install:
32 # 7) parallel, coverage, temp install:
33 # ./run-tests.py -j2 -c test-s* # currently broken
33 # ./run-tests.py -j2 -c test-s* # currently broken
34 # 8) parallel, coverage, local install:
34 # 8) parallel, coverage, local install:
35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 # 9) parallel, custom tmp dir:
36 # 9) parallel, custom tmp dir:
37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 # 10) parallel, pure, tests that call run-tests:
38 # 10) parallel, pure, tests that call run-tests:
39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
40 #
40 #
41 # (You could use any subset of the tests: test-s* happens to match
41 # (You could use any subset of the tests: test-s* happens to match
42 # enough that it's worth doing parallel runs, few enough that it
42 # enough that it's worth doing parallel runs, few enough that it
43 # completes fairly quickly, includes both shell and Python scripts, and
43 # completes fairly quickly, includes both shell and Python scripts, and
44 # includes some scripts that run daemon processes.)
44 # includes some scripts that run daemon processes.)
45
45
46 from __future__ import print_function
46 from __future__ import print_function
47
47
48 from distutils import version
48 from distutils import version
49 import difflib
49 import difflib
50 import errno
50 import errno
51 import optparse
51 import optparse
52 import os
52 import os
53 import shutil
53 import shutil
54 import subprocess
54 import subprocess
55 import signal
55 import signal
56 import socket
56 import socket
57 import sys
57 import sys
58 import tempfile
58 import tempfile
59 import time
59 import time
60 import random
60 import random
61 import re
61 import re
62 import threading
62 import threading
63 import killdaemons as killmod
63 import killdaemons as killmod
64 try:
64 try:
65 import Queue as queue
65 import Queue as queue
66 except ImportError:
66 except ImportError:
67 import queue
67 import queue
68 from xml.dom import minidom
68 from xml.dom import minidom
69 import unittest
69 import unittest
70
70
71 osenvironb = getattr(os, 'environb', os.environ)
71 osenvironb = getattr(os, 'environb', os.environ)
72
72
73 try:
73 try:
74 import json
74 import json
75 except ImportError:
75 except ImportError:
76 try:
76 try:
77 import simplejson as json
77 import simplejson as json
78 except ImportError:
78 except ImportError:
79 json = None
79 json = None
80
80
81 processlock = threading.Lock()
81 processlock = threading.Lock()
82
82
83 if sys.version_info > (3, 5, 0):
83 if sys.version_info > (3, 5, 0):
84 PYTHON3 = True
84 PYTHON3 = True
85 xrange = range # we use xrange in one place, and we'd rather not use range
85 xrange = range # we use xrange in one place, and we'd rather not use range
86 def _bytespath(p):
86 def _bytespath(p):
87 return p.encode('utf-8')
87 return p.encode('utf-8')
88
88
89 def _strpath(p):
89 def _strpath(p):
90 return p.decode('utf-8')
90 return p.decode('utf-8')
91
91
92 elif sys.version_info >= (3, 0, 0):
92 elif sys.version_info >= (3, 0, 0):
93 print('%s is only supported on Python 3.5+ and 2.6-2.7, not %s' %
93 print('%s is only supported on Python 3.5+ and 2.6-2.7, not %s' %
94 (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])))
94 (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])))
95 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
95 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
96 else:
96 else:
97 PYTHON3 = False
97 PYTHON3 = False
98
98
99 # In python 2.x, path operations are generally done using
99 # In python 2.x, path operations are generally done using
100 # bytestrings by default, so we don't have to do any extra
100 # bytestrings by default, so we don't have to do any extra
101 # fiddling there. We define the wrapper functions anyway just to
101 # fiddling there. We define the wrapper functions anyway just to
102 # help keep code consistent between platforms.
102 # help keep code consistent between platforms.
103 def _bytespath(p):
103 def _bytespath(p):
104 return p
104 return p
105
105
106 _strpath = _bytespath
106 _strpath = _bytespath
107
107
108 # For Windows support
108 # For Windows support
109 wifexited = getattr(os, "WIFEXITED", lambda x: False)
109 wifexited = getattr(os, "WIFEXITED", lambda x: False)
110
110
111 def checkportisavailable(port):
111 def checkportisavailable(port):
112 """return true if a port seems free to bind on localhost"""
112 """return true if a port seems free to bind on localhost"""
113 try:
113 try:
114 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
114 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
115 s.bind(('localhost', port))
115 s.bind(('localhost', port))
116 s.close()
116 s.close()
117 return True
117 return True
118 except socket.error as exc:
118 except socket.error as exc:
119 if not exc.errno == errno.EADDRINUSE:
119 if not exc.errno == errno.EADDRINUSE:
120 raise
120 raise
121 return False
121 return False
122
122
123 closefds = os.name == 'posix'
123 closefds = os.name == 'posix'
124 def Popen4(cmd, wd, timeout, env=None):
124 def Popen4(cmd, wd, timeout, env=None):
125 processlock.acquire()
125 processlock.acquire()
126 p = subprocess.Popen(cmd, shell=True, bufsize=-1, cwd=wd, env=env,
126 p = subprocess.Popen(cmd, shell=True, bufsize=-1, cwd=wd, env=env,
127 close_fds=closefds,
127 close_fds=closefds,
128 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
128 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
129 stderr=subprocess.STDOUT)
129 stderr=subprocess.STDOUT)
130 processlock.release()
130 processlock.release()
131
131
132 p.fromchild = p.stdout
132 p.fromchild = p.stdout
133 p.tochild = p.stdin
133 p.tochild = p.stdin
134 p.childerr = p.stderr
134 p.childerr = p.stderr
135
135
136 p.timeout = False
136 p.timeout = False
137 if timeout:
137 if timeout:
138 def t():
138 def t():
139 start = time.time()
139 start = time.time()
140 while time.time() - start < timeout and p.returncode is None:
140 while time.time() - start < timeout and p.returncode is None:
141 time.sleep(.1)
141 time.sleep(.1)
142 p.timeout = True
142 p.timeout = True
143 if p.returncode is None:
143 if p.returncode is None:
144 terminate(p)
144 terminate(p)
145 threading.Thread(target=t).start()
145 threading.Thread(target=t).start()
146
146
147 return p
147 return p
148
148
149 PYTHON = _bytespath(sys.executable.replace('\\', '/'))
149 PYTHON = _bytespath(sys.executable.replace('\\', '/'))
150 IMPL_PATH = b'PYTHONPATH'
150 IMPL_PATH = b'PYTHONPATH'
151 if 'java' in sys.platform:
151 if 'java' in sys.platform:
152 IMPL_PATH = b'JYTHONPATH'
152 IMPL_PATH = b'JYTHONPATH'
153
153
154 defaults = {
154 defaults = {
155 'jobs': ('HGTEST_JOBS', 1),
155 'jobs': ('HGTEST_JOBS', 1),
156 'timeout': ('HGTEST_TIMEOUT', 180),
156 'timeout': ('HGTEST_TIMEOUT', 180),
157 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 500),
157 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 500),
158 'port': ('HGTEST_PORT', 20059),
158 'port': ('HGTEST_PORT', 20059),
159 'shell': ('HGTEST_SHELL', 'sh'),
159 'shell': ('HGTEST_SHELL', 'sh'),
160 }
160 }
161
161
162 def parselistfiles(files, listtype, warn=True):
162 def parselistfiles(files, listtype, warn=True):
163 entries = dict()
163 entries = dict()
164 for filename in files:
164 for filename in files:
165 try:
165 try:
166 path = os.path.expanduser(os.path.expandvars(filename))
166 path = os.path.expanduser(os.path.expandvars(filename))
167 f = open(path, "rb")
167 f = open(path, "rb")
168 except IOError as err:
168 except IOError as err:
169 if err.errno != errno.ENOENT:
169 if err.errno != errno.ENOENT:
170 raise
170 raise
171 if warn:
171 if warn:
172 print("warning: no such %s file: %s" % (listtype, filename))
172 print("warning: no such %s file: %s" % (listtype, filename))
173 continue
173 continue
174
174
175 for line in f.readlines():
175 for line in f.readlines():
176 line = line.split(b'#', 1)[0].strip()
176 line = line.split(b'#', 1)[0].strip()
177 if line:
177 if line:
178 entries[line] = filename
178 entries[line] = filename
179
179
180 f.close()
180 f.close()
181 return entries
181 return entries
182
182
183 def getparser():
183 def getparser():
184 """Obtain the OptionParser used by the CLI."""
184 """Obtain the OptionParser used by the CLI."""
185 parser = optparse.OptionParser("%prog [options] [tests]")
185 parser = optparse.OptionParser("%prog [options] [tests]")
186
186
187 # keep these sorted
187 # keep these sorted
188 parser.add_option("--blacklist", action="append",
188 parser.add_option("--blacklist", action="append",
189 help="skip tests listed in the specified blacklist file")
189 help="skip tests listed in the specified blacklist file")
190 parser.add_option("--whitelist", action="append",
190 parser.add_option("--whitelist", action="append",
191 help="always run tests listed in the specified whitelist file")
191 help="always run tests listed in the specified whitelist file")
192 parser.add_option("--changed", type="string",
192 parser.add_option("--changed", type="string",
193 help="run tests that are changed in parent rev or working directory")
193 help="run tests that are changed in parent rev or working directory")
194 parser.add_option("-C", "--annotate", action="store_true",
194 parser.add_option("-C", "--annotate", action="store_true",
195 help="output files annotated with coverage")
195 help="output files annotated with coverage")
196 parser.add_option("-c", "--cover", action="store_true",
196 parser.add_option("-c", "--cover", action="store_true",
197 help="print a test coverage report")
197 help="print a test coverage report")
198 parser.add_option("-d", "--debug", action="store_true",
198 parser.add_option("-d", "--debug", action="store_true",
199 help="debug mode: write output of test scripts to console"
199 help="debug mode: write output of test scripts to console"
200 " rather than capturing and diffing it (disables timeout)")
200 " rather than capturing and diffing it (disables timeout)")
201 parser.add_option("-f", "--first", action="store_true",
201 parser.add_option("-f", "--first", action="store_true",
202 help="exit on the first test failure")
202 help="exit on the first test failure")
203 parser.add_option("-H", "--htmlcov", action="store_true",
203 parser.add_option("-H", "--htmlcov", action="store_true",
204 help="create an HTML report of the coverage of the files")
204 help="create an HTML report of the coverage of the files")
205 parser.add_option("-i", "--interactive", action="store_true",
205 parser.add_option("-i", "--interactive", action="store_true",
206 help="prompt to accept changed output")
206 help="prompt to accept changed output")
207 parser.add_option("-j", "--jobs", type="int",
207 parser.add_option("-j", "--jobs", type="int",
208 help="number of jobs to run in parallel"
208 help="number of jobs to run in parallel"
209 " (default: $%s or %d)" % defaults['jobs'])
209 " (default: $%s or %d)" % defaults['jobs'])
210 parser.add_option("--keep-tmpdir", action="store_true",
210 parser.add_option("--keep-tmpdir", action="store_true",
211 help="keep temporary directory after running tests")
211 help="keep temporary directory after running tests")
212 parser.add_option("-k", "--keywords",
212 parser.add_option("-k", "--keywords",
213 help="run tests matching keywords")
213 help="run tests matching keywords")
214 parser.add_option("-l", "--local", action="store_true",
214 parser.add_option("-l", "--local", action="store_true",
215 help="shortcut for --with-hg=<testdir>/../hg")
215 help="shortcut for --with-hg=<testdir>/../hg")
216 parser.add_option("--loop", action="store_true",
216 parser.add_option("--loop", action="store_true",
217 help="loop tests repeatedly")
217 help="loop tests repeatedly")
218 parser.add_option("--runs-per-test", type="int", dest="runs_per_test",
218 parser.add_option("--runs-per-test", type="int", dest="runs_per_test",
219 help="run each test N times (default=1)", default=1)
219 help="run each test N times (default=1)", default=1)
220 parser.add_option("-n", "--nodiff", action="store_true",
220 parser.add_option("-n", "--nodiff", action="store_true",
221 help="skip showing test changes")
221 help="skip showing test changes")
222 parser.add_option("-p", "--port", type="int",
222 parser.add_option("-p", "--port", type="int",
223 help="port on which servers should listen"
223 help="port on which servers should listen"
224 " (default: $%s or %d)" % defaults['port'])
224 " (default: $%s or %d)" % defaults['port'])
225 parser.add_option("--compiler", type="string",
225 parser.add_option("--compiler", type="string",
226 help="compiler to build with")
226 help="compiler to build with")
227 parser.add_option("--pure", action="store_true",
227 parser.add_option("--pure", action="store_true",
228 help="use pure Python code instead of C extensions")
228 help="use pure Python code instead of C extensions")
229 parser.add_option("-R", "--restart", action="store_true",
229 parser.add_option("-R", "--restart", action="store_true",
230 help="restart at last error")
230 help="restart at last error")
231 parser.add_option("-r", "--retest", action="store_true",
231 parser.add_option("-r", "--retest", action="store_true",
232 help="retest failed tests")
232 help="retest failed tests")
233 parser.add_option("-S", "--noskips", action="store_true",
233 parser.add_option("-S", "--noskips", action="store_true",
234 help="don't report skip tests verbosely")
234 help="don't report skip tests verbosely")
235 parser.add_option("--shell", type="string",
235 parser.add_option("--shell", type="string",
236 help="shell to use (default: $%s or %s)" % defaults['shell'])
236 help="shell to use (default: $%s or %s)" % defaults['shell'])
237 parser.add_option("-t", "--timeout", type="int",
237 parser.add_option("-t", "--timeout", type="int",
238 help="kill errant tests after TIMEOUT seconds"
238 help="kill errant tests after TIMEOUT seconds"
239 " (default: $%s or %d)" % defaults['timeout'])
239 " (default: $%s or %d)" % defaults['timeout'])
240 parser.add_option("--slowtimeout", type="int",
240 parser.add_option("--slowtimeout", type="int",
241 help="kill errant slow tests after SLOWTIMEOUT seconds"
241 help="kill errant slow tests after SLOWTIMEOUT seconds"
242 " (default: $%s or %d)" % defaults['slowtimeout'])
242 " (default: $%s or %d)" % defaults['slowtimeout'])
243 parser.add_option("--time", action="store_true",
243 parser.add_option("--time", action="store_true",
244 help="time how long each test takes")
244 help="time how long each test takes")
245 parser.add_option("--json", action="store_true",
245 parser.add_option("--json", action="store_true",
246 help="store test result data in 'report.json' file")
246 help="store test result data in 'report.json' file")
247 parser.add_option("--tmpdir", type="string",
247 parser.add_option("--tmpdir", type="string",
248 help="run tests in the given temporary directory"
248 help="run tests in the given temporary directory"
249 " (implies --keep-tmpdir)")
249 " (implies --keep-tmpdir)")
250 parser.add_option("-v", "--verbose", action="store_true",
250 parser.add_option("-v", "--verbose", action="store_true",
251 help="output verbose messages")
251 help="output verbose messages")
252 parser.add_option("--xunit", type="string",
252 parser.add_option("--xunit", type="string",
253 help="record xunit results at specified path")
253 help="record xunit results at specified path")
254 parser.add_option("--view", type="string",
254 parser.add_option("--view", type="string",
255 help="external diff viewer")
255 help="external diff viewer")
256 parser.add_option("--with-hg", type="string",
256 parser.add_option("--with-hg", type="string",
257 metavar="HG",
257 metavar="HG",
258 help="test using specified hg script rather than a "
258 help="test using specified hg script rather than a "
259 "temporary installation")
259 "temporary installation")
260 parser.add_option("-3", "--py3k-warnings", action="store_true",
260 parser.add_option("-3", "--py3k-warnings", action="store_true",
261 help="enable Py3k warnings on Python 2.6+")
261 help="enable Py3k warnings on Python 2.6+")
262 parser.add_option('--extra-config-opt', action="append",
262 parser.add_option('--extra-config-opt', action="append",
263 help='set the given config opt in the test hgrc')
263 help='set the given config opt in the test hgrc')
264 parser.add_option('--random', action="store_true",
264 parser.add_option('--random', action="store_true",
265 help='run tests in random order')
265 help='run tests in random order')
266 parser.add_option('--profile-runner', action='store_true',
266 parser.add_option('--profile-runner', action='store_true',
267 help='run statprof on run-tests')
267 help='run statprof on run-tests')
268 parser.add_option('--allow-slow-tests', action='store_true',
268 parser.add_option('--allow-slow-tests', action='store_true',
269 help='allow extremely slow tests')
269 help='allow extremely slow tests')
270 parser.add_option('--showchannels', action='store_true',
270 parser.add_option('--showchannels', action='store_true',
271 help='show scheduling channels')
271 help='show scheduling channels')
272
272
273 for option, (envvar, default) in defaults.items():
273 for option, (envvar, default) in defaults.items():
274 defaults[option] = type(default)(os.environ.get(envvar, default))
274 defaults[option] = type(default)(os.environ.get(envvar, default))
275 parser.set_defaults(**defaults)
275 parser.set_defaults(**defaults)
276
276
277 return parser
277 return parser
278
278
279 def parseargs(args, parser):
279 def parseargs(args, parser):
280 """Parse arguments with our OptionParser and validate results."""
280 """Parse arguments with our OptionParser and validate results."""
281 (options, args) = parser.parse_args(args)
281 (options, args) = parser.parse_args(args)
282
282
283 # jython is always pure
283 # jython is always pure
284 if 'java' in sys.platform or '__pypy__' in sys.modules:
284 if 'java' in sys.platform or '__pypy__' in sys.modules:
285 options.pure = True
285 options.pure = True
286
286
287 if options.with_hg:
287 if options.with_hg:
288 options.with_hg = os.path.expanduser(options.with_hg)
288 options.with_hg = os.path.expanduser(options.with_hg)
289 if not (os.path.isfile(options.with_hg) and
289 if not (os.path.isfile(options.with_hg) and
290 os.access(options.with_hg, os.X_OK)):
290 os.access(options.with_hg, os.X_OK)):
291 parser.error('--with-hg must specify an executable hg script')
291 parser.error('--with-hg must specify an executable hg script')
292 if not os.path.basename(options.with_hg) == 'hg':
292 if not os.path.basename(options.with_hg) == 'hg':
293 sys.stderr.write('warning: --with-hg should specify an hg script\n')
293 sys.stderr.write('warning: --with-hg should specify an hg script\n')
294 if options.local:
294 if options.local:
295 testdir = os.path.dirname(_bytespath(os.path.realpath(sys.argv[0])))
295 testdir = os.path.dirname(_bytespath(os.path.realpath(sys.argv[0])))
296 hgbin = os.path.join(os.path.dirname(testdir), b'hg')
296 hgbin = os.path.join(os.path.dirname(testdir), b'hg')
297 if os.name != 'nt' and not os.access(hgbin, os.X_OK):
297 if os.name != 'nt' and not os.access(hgbin, os.X_OK):
298 parser.error('--local specified, but %r not found or not executable'
298 parser.error('--local specified, but %r not found or not executable'
299 % hgbin)
299 % hgbin)
300 options.with_hg = hgbin
300 options.with_hg = hgbin
301
301
302 options.anycoverage = options.cover or options.annotate or options.htmlcov
302 options.anycoverage = options.cover or options.annotate or options.htmlcov
303 if options.anycoverage:
303 if options.anycoverage:
304 try:
304 try:
305 import coverage
305 import coverage
306 covver = version.StrictVersion(coverage.__version__).version
306 covver = version.StrictVersion(coverage.__version__).version
307 if covver < (3, 3):
307 if covver < (3, 3):
308 parser.error('coverage options require coverage 3.3 or later')
308 parser.error('coverage options require coverage 3.3 or later')
309 except ImportError:
309 except ImportError:
310 parser.error('coverage options now require the coverage package')
310 parser.error('coverage options now require the coverage package')
311
311
312 if options.anycoverage and options.local:
312 if options.anycoverage and options.local:
313 # this needs some path mangling somewhere, I guess
313 # this needs some path mangling somewhere, I guess
314 parser.error("sorry, coverage options do not work when --local "
314 parser.error("sorry, coverage options do not work when --local "
315 "is specified")
315 "is specified")
316
316
317 if options.anycoverage and options.with_hg:
317 if options.anycoverage and options.with_hg:
318 parser.error("sorry, coverage options do not work when --with-hg "
318 parser.error("sorry, coverage options do not work when --with-hg "
319 "is specified")
319 "is specified")
320
320
321 global verbose
321 global verbose
322 if options.verbose:
322 if options.verbose:
323 verbose = ''
323 verbose = ''
324
324
325 if options.tmpdir:
325 if options.tmpdir:
326 options.tmpdir = os.path.expanduser(options.tmpdir)
326 options.tmpdir = os.path.expanduser(options.tmpdir)
327
327
328 if options.jobs < 1:
328 if options.jobs < 1:
329 parser.error('--jobs must be positive')
329 parser.error('--jobs must be positive')
330 if options.interactive and options.debug:
330 if options.interactive and options.debug:
331 parser.error("-i/--interactive and -d/--debug are incompatible")
331 parser.error("-i/--interactive and -d/--debug are incompatible")
332 if options.debug:
332 if options.debug:
333 if options.timeout != defaults['timeout']:
333 if options.timeout != defaults['timeout']:
334 sys.stderr.write(
334 sys.stderr.write(
335 'warning: --timeout option ignored with --debug\n')
335 'warning: --timeout option ignored with --debug\n')
336 if options.slowtimeout != defaults['slowtimeout']:
336 if options.slowtimeout != defaults['slowtimeout']:
337 sys.stderr.write(
337 sys.stderr.write(
338 'warning: --slowtimeout option ignored with --debug\n')
338 'warning: --slowtimeout option ignored with --debug\n')
339 options.timeout = 0
339 options.timeout = 0
340 options.slowtimeout = 0
340 options.slowtimeout = 0
341 if options.py3k_warnings:
341 if options.py3k_warnings:
342 if PYTHON3:
342 if PYTHON3:
343 parser.error(
343 parser.error(
344 '--py3k-warnings can only be used on Python 2.6 and 2.7')
344 '--py3k-warnings can only be used on Python 2.6 and 2.7')
345 if options.blacklist:
345 if options.blacklist:
346 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
346 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
347 if options.whitelist:
347 if options.whitelist:
348 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
348 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
349 else:
349 else:
350 options.whitelisted = {}
350 options.whitelisted = {}
351
351
352 if options.showchannels:
352 if options.showchannels:
353 options.nodiff = True
353 options.nodiff = True
354
354
355 return (options, args)
355 return (options, args)
356
356
357 def rename(src, dst):
357 def rename(src, dst):
358 """Like os.rename(), trade atomicity and opened files friendliness
358 """Like os.rename(), trade atomicity and opened files friendliness
359 for existing destination support.
359 for existing destination support.
360 """
360 """
361 shutil.copy(src, dst)
361 shutil.copy(src, dst)
362 os.remove(src)
362 os.remove(src)
363
363
364 _unified_diff = difflib.unified_diff
364 _unified_diff = difflib.unified_diff
365 if PYTHON3:
365 if PYTHON3:
366 import functools
366 import functools
367 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
367 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
368
368
369 def getdiff(expected, output, ref, err):
369 def getdiff(expected, output, ref, err):
370 servefail = False
370 servefail = False
371 lines = []
371 lines = []
372 for line in _unified_diff(expected, output, ref, err):
372 for line in _unified_diff(expected, output, ref, err):
373 if line.startswith(b'+++') or line.startswith(b'---'):
373 if line.startswith(b'+++') or line.startswith(b'---'):
374 line = line.replace(b'\\', b'/')
374 line = line.replace(b'\\', b'/')
375 if line.endswith(b' \n'):
375 if line.endswith(b' \n'):
376 line = line[:-2] + b'\n'
376 line = line[:-2] + b'\n'
377 lines.append(line)
377 lines.append(line)
378 if not servefail and line.startswith(
378 if not servefail and line.startswith(
379 b'+ abort: child process failed to start'):
379 b'+ abort: child process failed to start'):
380 servefail = True
380 servefail = True
381
381
382 return servefail, lines
382 return servefail, lines
383
383
384 verbose = False
384 verbose = False
385 def vlog(*msg):
385 def vlog(*msg):
386 """Log only when in verbose mode."""
386 """Log only when in verbose mode."""
387 if verbose is False:
387 if verbose is False:
388 return
388 return
389
389
390 return log(*msg)
390 return log(*msg)
391
391
392 # Bytes that break XML even in a CDATA block: control characters 0-31
392 # Bytes that break XML even in a CDATA block: control characters 0-31
393 # sans \t, \n and \r
393 # sans \t, \n and \r
394 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
394 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
395
395
396 def cdatasafe(data):
396 def cdatasafe(data):
397 """Make a string safe to include in a CDATA block.
397 """Make a string safe to include in a CDATA block.
398
398
399 Certain control characters are illegal in a CDATA block, and
399 Certain control characters are illegal in a CDATA block, and
400 there's no way to include a ]]> in a CDATA either. This function
400 there's no way to include a ]]> in a CDATA either. This function
401 replaces illegal bytes with ? and adds a space between the ]] so
401 replaces illegal bytes with ? and adds a space between the ]] so
402 that it won't break the CDATA block.
402 that it won't break the CDATA block.
403 """
403 """
404 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
404 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
405
405
406 def log(*msg):
406 def log(*msg):
407 """Log something to stdout.
407 """Log something to stdout.
408
408
409 Arguments are strings to print.
409 Arguments are strings to print.
410 """
410 """
411 with iolock:
411 with iolock:
412 if verbose:
412 if verbose:
413 print(verbose, end=' ')
413 print(verbose, end=' ')
414 for m in msg:
414 for m in msg:
415 print(m, end=' ')
415 print(m, end=' ')
416 print()
416 print()
417 sys.stdout.flush()
417 sys.stdout.flush()
418
418
419 def terminate(proc):
419 def terminate(proc):
420 """Terminate subprocess (with fallback for Python versions < 2.6)"""
420 """Terminate subprocess (with fallback for Python versions < 2.6)"""
421 vlog('# Terminating process %d' % proc.pid)
421 vlog('# Terminating process %d' % proc.pid)
422 try:
422 try:
423 getattr(proc, 'terminate', lambda : os.kill(proc.pid, signal.SIGTERM))()
423 getattr(proc, 'terminate', lambda : os.kill(proc.pid, signal.SIGTERM))()
424 except OSError:
424 except OSError:
425 pass
425 pass
426
426
427 def killdaemons(pidfile):
427 def killdaemons(pidfile):
428 return killmod.killdaemons(pidfile, tryhard=False, remove=True,
428 return killmod.killdaemons(pidfile, tryhard=False, remove=True,
429 logfn=vlog)
429 logfn=vlog)
430
430
431 class Test(unittest.TestCase):
431 class Test(unittest.TestCase):
432 """Encapsulates a single, runnable test.
432 """Encapsulates a single, runnable test.
433
433
434 While this class conforms to the unittest.TestCase API, it differs in that
434 While this class conforms to the unittest.TestCase API, it differs in that
435 instances need to be instantiated manually. (Typically, unittest.TestCase
435 instances need to be instantiated manually. (Typically, unittest.TestCase
436 classes are instantiated automatically by scanning modules.)
436 classes are instantiated automatically by scanning modules.)
437 """
437 """
438
438
439 # Status code reserved for skipped tests (used by hghave).
439 # Status code reserved for skipped tests (used by hghave).
440 SKIPPED_STATUS = 80
440 SKIPPED_STATUS = 80
441
441
442 def __init__(self, path, tmpdir, keeptmpdir=False,
442 def __init__(self, path, tmpdir, keeptmpdir=False,
443 debug=False,
443 debug=False,
444 timeout=defaults['timeout'],
444 timeout=defaults['timeout'],
445 startport=defaults['port'], extraconfigopts=None,
445 startport=defaults['port'], extraconfigopts=None,
446 py3kwarnings=False, shell=None,
446 py3kwarnings=False, shell=None,
447 slowtimeout=defaults['slowtimeout']):
447 slowtimeout=defaults['slowtimeout']):
448 """Create a test from parameters.
448 """Create a test from parameters.
449
449
450 path is the full path to the file defining the test.
450 path is the full path to the file defining the test.
451
451
452 tmpdir is the main temporary directory to use for this test.
452 tmpdir is the main temporary directory to use for this test.
453
453
454 keeptmpdir determines whether to keep the test's temporary directory
454 keeptmpdir determines whether to keep the test's temporary directory
455 after execution. It defaults to removal (False).
455 after execution. It defaults to removal (False).
456
456
457 debug mode will make the test execute verbosely, with unfiltered
457 debug mode will make the test execute verbosely, with unfiltered
458 output.
458 output.
459
459
460 timeout controls the maximum run time of the test. It is ignored when
460 timeout controls the maximum run time of the test. It is ignored when
461 debug is True. See slowtimeout for tests with #require slow.
461 debug is True. See slowtimeout for tests with #require slow.
462
462
463 slowtimeout overrides timeout if the test has #require slow.
463 slowtimeout overrides timeout if the test has #require slow.
464
464
465 startport controls the starting port number to use for this test. Each
465 startport controls the starting port number to use for this test. Each
466 test will reserve 3 port numbers for execution. It is the caller's
466 test will reserve 3 port numbers for execution. It is the caller's
467 responsibility to allocate a non-overlapping port range to Test
467 responsibility to allocate a non-overlapping port range to Test
468 instances.
468 instances.
469
469
470 extraconfigopts is an iterable of extra hgrc config options. Values
470 extraconfigopts is an iterable of extra hgrc config options. Values
471 must have the form "key=value" (something understood by hgrc). Values
471 must have the form "key=value" (something understood by hgrc). Values
472 of the form "foo.key=value" will result in "[foo] key=value".
472 of the form "foo.key=value" will result in "[foo] key=value".
473
473
474 py3kwarnings enables Py3k warnings.
474 py3kwarnings enables Py3k warnings.
475
475
476 shell is the shell to execute tests in.
476 shell is the shell to execute tests in.
477 """
477 """
478 self.path = path
478 self.path = path
479 self.bname = os.path.basename(path)
479 self.bname = os.path.basename(path)
480 self.name = _strpath(self.bname)
480 self.name = _strpath(self.bname)
481 self._testdir = os.path.dirname(path)
481 self._testdir = os.path.dirname(path)
482 self.errpath = os.path.join(self._testdir, b'%s.err' % self.bname)
482 self.errpath = os.path.join(self._testdir, b'%s.err' % self.bname)
483
483
484 self._threadtmp = tmpdir
484 self._threadtmp = tmpdir
485 self._keeptmpdir = keeptmpdir
485 self._keeptmpdir = keeptmpdir
486 self._debug = debug
486 self._debug = debug
487 self._timeout = timeout
487 self._timeout = timeout
488 self._slowtimeout = slowtimeout
488 self._slowtimeout = slowtimeout
489 self._startport = startport
489 self._startport = startport
490 self._extraconfigopts = extraconfigopts or []
490 self._extraconfigopts = extraconfigopts or []
491 self._py3kwarnings = py3kwarnings
491 self._py3kwarnings = py3kwarnings
492 self._shell = _bytespath(shell)
492 self._shell = _bytespath(shell)
493
493
494 self._aborted = False
494 self._aborted = False
495 self._daemonpids = []
495 self._daemonpids = []
496 self._finished = None
496 self._finished = None
497 self._ret = None
497 self._ret = None
498 self._out = None
498 self._out = None
499 self._skipped = None
499 self._skipped = None
500 self._testtmp = None
500 self._testtmp = None
501
501
502 # If we're not in --debug mode and reference output file exists,
502 # If we're not in --debug mode and reference output file exists,
503 # check test output against it.
503 # check test output against it.
504 if debug:
504 if debug:
505 self._refout = None # to match "out is None"
505 self._refout = None # to match "out is None"
506 elif os.path.exists(self.refpath):
506 elif os.path.exists(self.refpath):
507 f = open(self.refpath, 'rb')
507 f = open(self.refpath, 'rb')
508 self._refout = f.read().splitlines(True)
508 self._refout = f.read().splitlines(True)
509 f.close()
509 f.close()
510 else:
510 else:
511 self._refout = []
511 self._refout = []
512
512
513 # needed to get base class __repr__ running
513 # needed to get base class __repr__ running
514 @property
514 @property
515 def _testMethodName(self):
515 def _testMethodName(self):
516 return self.name
516 return self.name
517
517
518 def __str__(self):
518 def __str__(self):
519 return self.name
519 return self.name
520
520
521 def shortDescription(self):
521 def shortDescription(self):
522 return self.name
522 return self.name
523
523
524 def setUp(self):
524 def setUp(self):
525 """Tasks to perform before run()."""
525 """Tasks to perform before run()."""
526 self._finished = False
526 self._finished = False
527 self._ret = None
527 self._ret = None
528 self._out = None
528 self._out = None
529 self._skipped = None
529 self._skipped = None
530
530
531 try:
531 try:
532 os.mkdir(self._threadtmp)
532 os.mkdir(self._threadtmp)
533 except OSError as e:
533 except OSError as e:
534 if e.errno != errno.EEXIST:
534 if e.errno != errno.EEXIST:
535 raise
535 raise
536
536
537 self._testtmp = os.path.join(self._threadtmp,
537 self._testtmp = os.path.join(self._threadtmp,
538 os.path.basename(self.path))
538 os.path.basename(self.path))
539 os.mkdir(self._testtmp)
539 os.mkdir(self._testtmp)
540
540
541 # Remove any previous output files.
541 # Remove any previous output files.
542 if os.path.exists(self.errpath):
542 if os.path.exists(self.errpath):
543 try:
543 try:
544 os.remove(self.errpath)
544 os.remove(self.errpath)
545 except OSError as e:
545 except OSError as e:
546 # We might have raced another test to clean up a .err
546 # We might have raced another test to clean up a .err
547 # file, so ignore ENOENT when removing a previous .err
547 # file, so ignore ENOENT when removing a previous .err
548 # file.
548 # file.
549 if e.errno != errno.ENOENT:
549 if e.errno != errno.ENOENT:
550 raise
550 raise
551
551
552 def run(self, result):
552 def run(self, result):
553 """Run this test and report results against a TestResult instance."""
553 """Run this test and report results against a TestResult instance."""
554 # This function is extremely similar to unittest.TestCase.run(). Once
554 # This function is extremely similar to unittest.TestCase.run(). Once
555 # we require Python 2.7 (or at least its version of unittest), this
555 # we require Python 2.7 (or at least its version of unittest), this
556 # function can largely go away.
556 # function can largely go away.
557 self._result = result
557 self._result = result
558 result.startTest(self)
558 result.startTest(self)
559 try:
559 try:
560 try:
560 try:
561 self.setUp()
561 self.setUp()
562 except (KeyboardInterrupt, SystemExit):
562 except (KeyboardInterrupt, SystemExit):
563 self._aborted = True
563 self._aborted = True
564 raise
564 raise
565 except Exception:
565 except Exception:
566 result.addError(self, sys.exc_info())
566 result.addError(self, sys.exc_info())
567 return
567 return
568
568
569 success = False
569 success = False
570 try:
570 try:
571 self.runTest()
571 self.runTest()
572 except KeyboardInterrupt:
572 except KeyboardInterrupt:
573 self._aborted = True
573 self._aborted = True
574 raise
574 raise
575 except SkipTest as e:
575 except SkipTest as e:
576 result.addSkip(self, str(e))
576 result.addSkip(self, str(e))
577 # The base class will have already counted this as a
577 # The base class will have already counted this as a
578 # test we "ran", but we want to exclude skipped tests
578 # test we "ran", but we want to exclude skipped tests
579 # from those we count towards those run.
579 # from those we count towards those run.
580 result.testsRun -= 1
580 result.testsRun -= 1
581 except IgnoreTest as e:
581 except IgnoreTest as e:
582 result.addIgnore(self, str(e))
582 result.addIgnore(self, str(e))
583 # As with skips, ignores also should be excluded from
583 # As with skips, ignores also should be excluded from
584 # the number of tests executed.
584 # the number of tests executed.
585 result.testsRun -= 1
585 result.testsRun -= 1
586 except WarnTest as e:
586 except WarnTest as e:
587 result.addWarn(self, str(e))
587 result.addWarn(self, str(e))
588 except ReportedTest as e:
588 except ReportedTest as e:
589 pass
589 pass
590 except self.failureException as e:
590 except self.failureException as e:
591 # This differs from unittest in that we don't capture
591 # This differs from unittest in that we don't capture
592 # the stack trace. This is for historical reasons and
592 # the stack trace. This is for historical reasons and
593 # this decision could be revisited in the future,
593 # this decision could be revisited in the future,
594 # especially for PythonTest instances.
594 # especially for PythonTest instances.
595 if result.addFailure(self, str(e)):
595 if result.addFailure(self, str(e)):
596 success = True
596 success = True
597 except Exception:
597 except Exception:
598 result.addError(self, sys.exc_info())
598 result.addError(self, sys.exc_info())
599 else:
599 else:
600 success = True
600 success = True
601
601
602 try:
602 try:
603 self.tearDown()
603 self.tearDown()
604 except (KeyboardInterrupt, SystemExit):
604 except (KeyboardInterrupt, SystemExit):
605 self._aborted = True
605 self._aborted = True
606 raise
606 raise
607 except Exception:
607 except Exception:
608 result.addError(self, sys.exc_info())
608 result.addError(self, sys.exc_info())
609 success = False
609 success = False
610
610
611 if success:
611 if success:
612 result.addSuccess(self)
612 result.addSuccess(self)
613 finally:
613 finally:
614 result.stopTest(self, interrupted=self._aborted)
614 result.stopTest(self, interrupted=self._aborted)
615
615
616 def runTest(self):
616 def runTest(self):
617 """Run this test instance.
617 """Run this test instance.
618
618
619 This will return a tuple describing the result of the test.
619 This will return a tuple describing the result of the test.
620 """
620 """
621 env = self._getenv()
621 env = self._getenv()
622 self._daemonpids.append(env['DAEMON_PIDS'])
622 self._daemonpids.append(env['DAEMON_PIDS'])
623 self._createhgrc(env['HGRCPATH'])
623 self._createhgrc(env['HGRCPATH'])
624
624
625 vlog('# Test', self.name)
625 vlog('# Test', self.name)
626
626
627 ret, out = self._run(env)
627 ret, out = self._run(env)
628 self._finished = True
628 self._finished = True
629 self._ret = ret
629 self._ret = ret
630 self._out = out
630 self._out = out
631
631
632 def describe(ret):
632 def describe(ret):
633 if ret < 0:
633 if ret < 0:
634 return 'killed by signal: %d' % -ret
634 return 'killed by signal: %d' % -ret
635 return 'returned error code %d' % ret
635 return 'returned error code %d' % ret
636
636
637 self._skipped = False
637 self._skipped = False
638
638
639 if ret == self.SKIPPED_STATUS:
639 if ret == self.SKIPPED_STATUS:
640 if out is None: # Debug mode, nothing to parse.
640 if out is None: # Debug mode, nothing to parse.
641 missing = ['unknown']
641 missing = ['unknown']
642 failed = None
642 failed = None
643 else:
643 else:
644 missing, failed = TTest.parsehghaveoutput(out)
644 missing, failed = TTest.parsehghaveoutput(out)
645
645
646 if not missing:
646 if not missing:
647 missing = ['skipped']
647 missing = ['skipped']
648
648
649 if failed:
649 if failed:
650 self.fail('hg have failed checking for %s' % failed[-1])
650 self.fail('hg have failed checking for %s' % failed[-1])
651 else:
651 else:
652 self._skipped = True
652 self._skipped = True
653 raise SkipTest(missing[-1])
653 raise SkipTest(missing[-1])
654 elif ret == 'timeout':
654 elif ret == 'timeout':
655 self.fail('timed out')
655 self.fail('timed out')
656 elif ret is False:
656 elif ret is False:
657 raise WarnTest('no result code from test')
657 raise WarnTest('no result code from test')
658 elif out != self._refout:
658 elif out != self._refout:
659 # Diff generation may rely on written .err file.
659 # Diff generation may rely on written .err file.
660 if (ret != 0 or out != self._refout) and not self._skipped \
660 if (ret != 0 or out != self._refout) and not self._skipped \
661 and not self._debug:
661 and not self._debug:
662 f = open(self.errpath, 'wb')
662 f = open(self.errpath, 'wb')
663 for line in out:
663 for line in out:
664 f.write(line)
664 f.write(line)
665 f.close()
665 f.close()
666
666
667 # The result object handles diff calculation for us.
667 # The result object handles diff calculation for us.
668 if self._result.addOutputMismatch(self, ret, out, self._refout):
668 if self._result.addOutputMismatch(self, ret, out, self._refout):
669 # change was accepted, skip failing
669 # change was accepted, skip failing
670 return
670 return
671
671
672 if ret:
672 if ret:
673 msg = 'output changed and ' + describe(ret)
673 msg = 'output changed and ' + describe(ret)
674 else:
674 else:
675 msg = 'output changed'
675 msg = 'output changed'
676
676
677 self.fail(msg)
677 self.fail(msg)
678 elif ret:
678 elif ret:
679 self.fail(describe(ret))
679 self.fail(describe(ret))
680
680
681 def tearDown(self):
681 def tearDown(self):
682 """Tasks to perform after run()."""
682 """Tasks to perform after run()."""
683 for entry in self._daemonpids:
683 for entry in self._daemonpids:
684 killdaemons(entry)
684 killdaemons(entry)
685 self._daemonpids = []
685 self._daemonpids = []
686
686
687 if self._keeptmpdir:
687 if self._keeptmpdir:
688 log('\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' %
688 log('\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' %
689 (self._testtmp, self._threadtmp))
689 (self._testtmp, self._threadtmp))
690 else:
690 else:
691 shutil.rmtree(self._testtmp, True)
691 shutil.rmtree(self._testtmp, True)
692 shutil.rmtree(self._threadtmp, True)
692 shutil.rmtree(self._threadtmp, True)
693
693
694 if (self._ret != 0 or self._out != self._refout) and not self._skipped \
694 if (self._ret != 0 or self._out != self._refout) and not self._skipped \
695 and not self._debug and self._out:
695 and not self._debug and self._out:
696 f = open(self.errpath, 'wb')
696 f = open(self.errpath, 'wb')
697 for line in self._out:
697 for line in self._out:
698 f.write(line)
698 f.write(line)
699 f.close()
699 f.close()
700
700
701 vlog("# Ret was:", self._ret, '(%s)' % self.name)
701 vlog("# Ret was:", self._ret, '(%s)' % self.name)
702
702
703 def _run(self, env):
703 def _run(self, env):
704 # This should be implemented in child classes to run tests.
704 # This should be implemented in child classes to run tests.
705 raise SkipTest('unknown test type')
705 raise SkipTest('unknown test type')
706
706
707 def abort(self):
707 def abort(self):
708 """Terminate execution of this test."""
708 """Terminate execution of this test."""
709 self._aborted = True
709 self._aborted = True
710
710
711 def _getreplacements(self):
711 def _getreplacements(self):
712 """Obtain a mapping of text replacements to apply to test output.
712 """Obtain a mapping of text replacements to apply to test output.
713
713
714 Test output needs to be normalized so it can be compared to expected
714 Test output needs to be normalized so it can be compared to expected
715 output. This function defines how some of that normalization will
715 output. This function defines how some of that normalization will
716 occur.
716 occur.
717 """
717 """
718 r = [
718 r = [
719 (br':%d\b' % self._startport, b':$HGPORT'),
719 (br':%d\b' % self._startport, b':$HGPORT'),
720 (br':%d\b' % (self._startport + 1), b':$HGPORT1'),
720 (br':%d\b' % (self._startport + 1), b':$HGPORT1'),
721 (br':%d\b' % (self._startport + 2), b':$HGPORT2'),
721 (br':%d\b' % (self._startport + 2), b':$HGPORT2'),
722 (br'(?m)^(saved backup bundle to .*\.hg)( \(glob\))?$',
722 (br'(?m)^(saved backup bundle to .*\.hg)( \(glob\))?$',
723 br'\1 (glob)'),
723 br'\1 (glob)'),
724 ]
724 ]
725
725
726 if os.name == 'nt':
726 if os.name == 'nt':
727 r.append(
727 r.append(
728 (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or
728 (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or
729 c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c
729 c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c
730 for c in self._testtmp), b'$TESTTMP'))
730 for c in self._testtmp), b'$TESTTMP'))
731 else:
731 else:
732 r.append((re.escape(self._testtmp), b'$TESTTMP'))
732 r.append((re.escape(self._testtmp), b'$TESTTMP'))
733
733
734 return r
734 return r
735
735
736 def _getenv(self):
736 def _getenv(self):
737 """Obtain environment variables to use during test execution."""
737 """Obtain environment variables to use during test execution."""
738 env = os.environ.copy()
738 env = os.environ.copy()
739 env['TESTTMP'] = self._testtmp
739 env['TESTTMP'] = self._testtmp
740 env['HOME'] = self._testtmp
740 env['HOME'] = self._testtmp
741 env["HGPORT"] = str(self._startport)
741 env["HGPORT"] = str(self._startport)
742 env["HGPORT1"] = str(self._startport + 1)
742 env["HGPORT1"] = str(self._startport + 1)
743 env["HGPORT2"] = str(self._startport + 2)
743 env["HGPORT2"] = str(self._startport + 2)
744 env["HGRCPATH"] = os.path.join(self._threadtmp, b'.hgrc')
744 env["HGRCPATH"] = os.path.join(self._threadtmp, b'.hgrc')
745 env["DAEMON_PIDS"] = os.path.join(self._threadtmp, b'daemon.pids')
745 env["DAEMON_PIDS"] = os.path.join(self._threadtmp, b'daemon.pids')
746 env["HGEDITOR"] = ('"' + sys.executable + '"'
746 env["HGEDITOR"] = ('"' + sys.executable + '"'
747 + ' -c "import sys; sys.exit(0)"')
747 + ' -c "import sys; sys.exit(0)"')
748 env["HGMERGE"] = "internal:merge"
748 env["HGMERGE"] = "internal:merge"
749 env["HGUSER"] = "test"
749 env["HGUSER"] = "test"
750 env["HGENCODING"] = "ascii"
750 env["HGENCODING"] = "ascii"
751 env["HGENCODINGMODE"] = "strict"
751 env["HGENCODINGMODE"] = "strict"
752
752
753 # Reset some environment variables to well-known values so that
753 # Reset some environment variables to well-known values so that
754 # the tests produce repeatable output.
754 # the tests produce repeatable output.
755 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
755 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
756 env['TZ'] = 'GMT'
756 env['TZ'] = 'GMT'
757 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
757 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
758 env['COLUMNS'] = '80'
758 env['COLUMNS'] = '80'
759 env['TERM'] = 'xterm'
759 env['TERM'] = 'xterm'
760
760
761 for k in ('HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy ' +
761 for k in ('HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy ' +
762 'NO_PROXY').split():
762 'NO_PROXY').split():
763 if k in env:
763 if k in env:
764 del env[k]
764 del env[k]
765
765
766 # unset env related to hooks
766 # unset env related to hooks
767 for k in env.keys():
767 for k in env.keys():
768 if k.startswith('HG_'):
768 if k.startswith('HG_'):
769 del env[k]
769 del env[k]
770
770
771 return env
771 return env
772
772
773 def _createhgrc(self, path):
773 def _createhgrc(self, path):
774 """Create an hgrc file for this test."""
774 """Create an hgrc file for this test."""
775 hgrc = open(path, 'wb')
775 hgrc = open(path, 'wb')
776 hgrc.write(b'[ui]\n')
776 hgrc.write(b'[ui]\n')
777 hgrc.write(b'slash = True\n')
777 hgrc.write(b'slash = True\n')
778 hgrc.write(b'interactive = False\n')
778 hgrc.write(b'interactive = False\n')
779 hgrc.write(b'mergemarkers = detailed\n')
779 hgrc.write(b'mergemarkers = detailed\n')
780 hgrc.write(b'promptecho = True\n')
780 hgrc.write(b'promptecho = True\n')
781 hgrc.write(b'[defaults]\n')
781 hgrc.write(b'[defaults]\n')
782 hgrc.write(b'backout = -d "0 0"\n')
782 hgrc.write(b'backout = -d "0 0"\n')
783 hgrc.write(b'commit = -d "0 0"\n')
783 hgrc.write(b'commit = -d "0 0"\n')
784 hgrc.write(b'shelve = --date "0 0"\n')
784 hgrc.write(b'shelve = --date "0 0"\n')
785 hgrc.write(b'tag = -d "0 0"\n')
785 hgrc.write(b'tag = -d "0 0"\n')
786 hgrc.write(b'[devel]\n')
786 hgrc.write(b'[devel]\n')
787 hgrc.write(b'all-warnings = true\n')
787 hgrc.write(b'all-warnings = true\n')
788 hgrc.write(b'[largefiles]\n')
788 hgrc.write(b'[largefiles]\n')
789 hgrc.write(b'usercache = %s\n' %
789 hgrc.write(b'usercache = %s\n' %
790 (os.path.join(self._testtmp, b'.cache/largefiles')))
790 (os.path.join(self._testtmp, b'.cache/largefiles')))
791
791
792 for opt in self._extraconfigopts:
792 for opt in self._extraconfigopts:
793 section, key = opt.split('.', 1)
793 section, key = opt.split('.', 1)
794 assert '=' in key, ('extra config opt %s must '
794 assert '=' in key, ('extra config opt %s must '
795 'have an = for assignment' % opt)
795 'have an = for assignment' % opt)
796 hgrc.write(b'[%s]\n%s\n' % (section, key))
796 hgrc.write(b'[%s]\n%s\n' % (section, key))
797 hgrc.close()
797 hgrc.close()
798
798
799 def fail(self, msg):
799 def fail(self, msg):
800 # unittest differentiates between errored and failed.
800 # unittest differentiates between errored and failed.
801 # Failed is denoted by AssertionError (by default at least).
801 # Failed is denoted by AssertionError (by default at least).
802 raise AssertionError(msg)
802 raise AssertionError(msg)
803
803
804 def _runcommand(self, cmd, env, normalizenewlines=False):
804 def _runcommand(self, cmd, env, normalizenewlines=False):
805 """Run command in a sub-process, capturing the output (stdout and
805 """Run command in a sub-process, capturing the output (stdout and
806 stderr).
806 stderr).
807
807
808 Return a tuple (exitcode, output). output is None in debug mode.
808 Return a tuple (exitcode, output). output is None in debug mode.
809 """
809 """
810 if self._debug:
810 if self._debug:
811 proc = subprocess.Popen(cmd, shell=True, cwd=self._testtmp,
811 proc = subprocess.Popen(cmd, shell=True, cwd=self._testtmp,
812 env=env)
812 env=env)
813 ret = proc.wait()
813 ret = proc.wait()
814 return (ret, None)
814 return (ret, None)
815
815
816 proc = Popen4(cmd, self._testtmp, self._timeout, env)
816 proc = Popen4(cmd, self._testtmp, self._timeout, env)
817 def cleanup():
817 def cleanup():
818 terminate(proc)
818 terminate(proc)
819 ret = proc.wait()
819 ret = proc.wait()
820 if ret == 0:
820 if ret == 0:
821 ret = signal.SIGTERM << 8
821 ret = signal.SIGTERM << 8
822 killdaemons(env['DAEMON_PIDS'])
822 killdaemons(env['DAEMON_PIDS'])
823 return ret
823 return ret
824
824
825 output = ''
825 output = ''
826 proc.tochild.close()
826 proc.tochild.close()
827
827
828 try:
828 try:
829 output = proc.fromchild.read()
829 output = proc.fromchild.read()
830 except KeyboardInterrupt:
830 except KeyboardInterrupt:
831 vlog('# Handling keyboard interrupt')
831 vlog('# Handling keyboard interrupt')
832 cleanup()
832 cleanup()
833 raise
833 raise
834
834
835 ret = proc.wait()
835 ret = proc.wait()
836 if wifexited(ret):
836 if wifexited(ret):
837 ret = os.WEXITSTATUS(ret)
837 ret = os.WEXITSTATUS(ret)
838
838
839 if proc.timeout:
839 if proc.timeout:
840 ret = 'timeout'
840 ret = 'timeout'
841
841
842 if ret:
842 if ret:
843 killdaemons(env['DAEMON_PIDS'])
843 killdaemons(env['DAEMON_PIDS'])
844
844
845 for s, r in self._getreplacements():
845 for s, r in self._getreplacements():
846 output = re.sub(s, r, output)
846 output = re.sub(s, r, output)
847
847
848 if normalizenewlines:
848 if normalizenewlines:
849 output = output.replace('\r\n', '\n')
849 output = output.replace('\r\n', '\n')
850
850
851 return ret, output.splitlines(True)
851 return ret, output.splitlines(True)
852
852
853 class PythonTest(Test):
853 class PythonTest(Test):
854 """A Python-based test."""
854 """A Python-based test."""
855
855
856 @property
856 @property
857 def refpath(self):
857 def refpath(self):
858 return os.path.join(self._testdir, b'%s.out' % self.bname)
858 return os.path.join(self._testdir, b'%s.out' % self.bname)
859
859
860 def _run(self, env):
860 def _run(self, env):
861 py3kswitch = self._py3kwarnings and b' -3' or b''
861 py3kswitch = self._py3kwarnings and b' -3' or b''
862 cmd = b'%s%s "%s"' % (PYTHON, py3kswitch, self.path)
862 cmd = b'%s%s "%s"' % (PYTHON, py3kswitch, self.path)
863 vlog("# Running", cmd)
863 vlog("# Running", cmd)
864 normalizenewlines = os.name == 'nt'
864 normalizenewlines = os.name == 'nt'
865 result = self._runcommand(cmd, env,
865 result = self._runcommand(cmd, env,
866 normalizenewlines=normalizenewlines)
866 normalizenewlines=normalizenewlines)
867 if self._aborted:
867 if self._aborted:
868 raise KeyboardInterrupt()
868 raise KeyboardInterrupt()
869
869
870 return result
870 return result
871
871
872 # This script may want to drop globs from lines matching these patterns on
872 # This script may want to drop globs from lines matching these patterns on
873 # Windows, but check-code.py wants a glob on these lines unconditionally. Don't
873 # Windows, but check-code.py wants a glob on these lines unconditionally. Don't
874 # warn if that is the case for anything matching these lines.
874 # warn if that is the case for anything matching these lines.
875 checkcodeglobpats = [
875 checkcodeglobpats = [
876 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
876 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
877 re.compile(br'^moving \S+/.*[^)]$'),
877 re.compile(br'^moving \S+/.*[^)]$'),
878 re.compile(br'^pulling from \$TESTTMP/.*[^)]$')
878 re.compile(br'^pulling from \$TESTTMP/.*[^)]$')
879 ]
879 ]
880
880
881 bchr = chr
881 bchr = chr
882 if PYTHON3:
882 if PYTHON3:
883 bchr = lambda x: bytes([x])
883 bchr = lambda x: bytes([x])
884
884
885 class TTest(Test):
885 class TTest(Test):
886 """A "t test" is a test backed by a .t file."""
886 """A "t test" is a test backed by a .t file."""
887
887
888 SKIPPED_PREFIX = 'skipped: '
888 SKIPPED_PREFIX = 'skipped: '
889 FAILED_PREFIX = 'hghave check failed: '
889 FAILED_PREFIX = 'hghave check failed: '
890 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
890 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
891
891
892 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
892 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
893 ESCAPEMAP = dict((bchr(i), br'\x%02x' % i) for i in range(256))
893 ESCAPEMAP = dict((bchr(i), br'\x%02x' % i) for i in range(256))
894 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
894 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
895
895
896 @property
896 @property
897 def refpath(self):
897 def refpath(self):
898 return os.path.join(self._testdir, self.bname)
898 return os.path.join(self._testdir, self.bname)
899
899
900 def _run(self, env):
900 def _run(self, env):
901 f = open(self.path, 'rb')
901 f = open(self.path, 'rb')
902 lines = f.readlines()
902 lines = f.readlines()
903 f.close()
903 f.close()
904
904
905 salt, script, after, expected = self._parsetest(lines)
905 salt, script, after, expected = self._parsetest(lines)
906
906
907 # Write out the generated script.
907 # Write out the generated script.
908 fname = b'%s.sh' % self._testtmp
908 fname = b'%s.sh' % self._testtmp
909 f = open(fname, 'wb')
909 f = open(fname, 'wb')
910 for l in script:
910 for l in script:
911 f.write(l)
911 f.write(l)
912 f.close()
912 f.close()
913
913
914 cmd = b'%s "%s"' % (self._shell, fname)
914 cmd = b'%s "%s"' % (self._shell, fname)
915 vlog("# Running", cmd)
915 vlog("# Running", cmd)
916
916
917 exitcode, output = self._runcommand(cmd, env)
917 exitcode, output = self._runcommand(cmd, env)
918
918
919 if self._aborted:
919 if self._aborted:
920 raise KeyboardInterrupt()
920 raise KeyboardInterrupt()
921
921
922 # Do not merge output if skipped. Return hghave message instead.
922 # Do not merge output if skipped. Return hghave message instead.
923 # Similarly, with --debug, output is None.
923 # Similarly, with --debug, output is None.
924 if exitcode == self.SKIPPED_STATUS or output is None:
924 if exitcode == self.SKIPPED_STATUS or output is None:
925 return exitcode, output
925 return exitcode, output
926
926
927 return self._processoutput(exitcode, output, salt, after, expected)
927 return self._processoutput(exitcode, output, salt, after, expected)
928
928
929 def _hghave(self, reqs):
929 def _hghave(self, reqs):
930 # TODO do something smarter when all other uses of hghave are gone.
930 # TODO do something smarter when all other uses of hghave are gone.
931 runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__)))
931 runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__)))
932 tdir = runtestdir.replace(b'\\', b'/')
932 tdir = runtestdir.replace(b'\\', b'/')
933 proc = Popen4(b'%s -c "%s/hghave %s"' %
933 proc = Popen4(b'%s -c "%s/hghave %s"' %
934 (self._shell, tdir, b' '.join(reqs)),
934 (self._shell, tdir, b' '.join(reqs)),
935 self._testtmp, 0, self._getenv())
935 self._testtmp, 0, self._getenv())
936 stdout, stderr = proc.communicate()
936 stdout, stderr = proc.communicate()
937 ret = proc.wait()
937 ret = proc.wait()
938 if wifexited(ret):
938 if wifexited(ret):
939 ret = os.WEXITSTATUS(ret)
939 ret = os.WEXITSTATUS(ret)
940 if ret == 2:
940 if ret == 2:
941 print(stdout)
941 print(stdout)
942 sys.exit(1)
942 sys.exit(1)
943
943
944 if ret != 0:
944 if ret != 0:
945 return False, stdout
945 return False, stdout
946
946
947 if 'slow' in reqs:
947 if 'slow' in reqs:
948 self._timeout = self._slowtimeout
948 self._timeout = self._slowtimeout
949 return True, None
949 return True, None
950
950
951 def _parsetest(self, lines):
951 def _parsetest(self, lines):
952 # We generate a shell script which outputs unique markers to line
952 # We generate a shell script which outputs unique markers to line
953 # up script results with our source. These markers include input
953 # up script results with our source. These markers include input
954 # line number and the last return code.
954 # line number and the last return code.
955 salt = b"SALT%d" % time.time()
955 salt = b"SALT%d" % time.time()
956 def addsalt(line, inpython):
956 def addsalt(line, inpython):
957 if inpython:
957 if inpython:
958 script.append(b'%s %d 0\n' % (salt, line))
958 script.append(b'%s %d 0\n' % (salt, line))
959 else:
959 else:
960 script.append(b'echo %s %d $?\n' % (salt, line))
960 script.append(b'echo %s %d $?\n' % (salt, line))
961
961
962 script = []
962 script = []
963
963
964 # After we run the shell script, we re-unify the script output
964 # After we run the shell script, we re-unify the script output
965 # with non-active parts of the source, with synchronization by our
965 # with non-active parts of the source, with synchronization by our
966 # SALT line number markers. The after table contains the non-active
966 # SALT line number markers. The after table contains the non-active
967 # components, ordered by line number.
967 # components, ordered by line number.
968 after = {}
968 after = {}
969
969
970 # Expected shell script output.
970 # Expected shell script output.
971 expected = {}
971 expected = {}
972
972
973 pos = prepos = -1
973 pos = prepos = -1
974
974
975 # True or False when in a true or false conditional section
975 # True or False when in a true or false conditional section
976 skipping = None
976 skipping = None
977
977
978 # We keep track of whether or not we're in a Python block so we
978 # We keep track of whether or not we're in a Python block so we
979 # can generate the surrounding doctest magic.
979 # can generate the surrounding doctest magic.
980 inpython = False
980 inpython = False
981
981
982 if self._debug:
982 if self._debug:
983 script.append(b'set -x\n')
983 script.append(b'set -x\n')
984 if os.getenv('MSYSTEM'):
984 if os.getenv('MSYSTEM'):
985 script.append(b'alias pwd="pwd -W"\n')
985 script.append(b'alias pwd="pwd -W"\n')
986
986
987 for n, l in enumerate(lines):
987 for n, l in enumerate(lines):
988 if not l.endswith(b'\n'):
988 if not l.endswith(b'\n'):
989 l += b'\n'
989 l += b'\n'
990 if l.startswith(b'#require'):
990 if l.startswith(b'#require'):
991 lsplit = l.split()
991 lsplit = l.split()
992 if len(lsplit) < 2 or lsplit[0] != b'#require':
992 if len(lsplit) < 2 or lsplit[0] != b'#require':
993 after.setdefault(pos, []).append(' !!! invalid #require\n')
993 after.setdefault(pos, []).append(' !!! invalid #require\n')
994 haveresult, message = self._hghave(lsplit[1:])
994 haveresult, message = self._hghave(lsplit[1:])
995 if not haveresult:
995 if not haveresult:
996 script = [b'echo "%s"\nexit 80\n' % message]
996 script = [b'echo "%s"\nexit 80\n' % message]
997 break
997 break
998 after.setdefault(pos, []).append(l)
998 after.setdefault(pos, []).append(l)
999 elif l.startswith(b'#if'):
999 elif l.startswith(b'#if'):
1000 lsplit = l.split()
1000 lsplit = l.split()
1001 if len(lsplit) < 2 or lsplit[0] != b'#if':
1001 if len(lsplit) < 2 or lsplit[0] != b'#if':
1002 after.setdefault(pos, []).append(' !!! invalid #if\n')
1002 after.setdefault(pos, []).append(' !!! invalid #if\n')
1003 if skipping is not None:
1003 if skipping is not None:
1004 after.setdefault(pos, []).append(' !!! nested #if\n')
1004 after.setdefault(pos, []).append(' !!! nested #if\n')
1005 skipping = not self._hghave(lsplit[1:])[0]
1005 skipping = not self._hghave(lsplit[1:])[0]
1006 after.setdefault(pos, []).append(l)
1006 after.setdefault(pos, []).append(l)
1007 elif l.startswith(b'#else'):
1007 elif l.startswith(b'#else'):
1008 if skipping is None:
1008 if skipping is None:
1009 after.setdefault(pos, []).append(' !!! missing #if\n')
1009 after.setdefault(pos, []).append(' !!! missing #if\n')
1010 skipping = not skipping
1010 skipping = not skipping
1011 after.setdefault(pos, []).append(l)
1011 after.setdefault(pos, []).append(l)
1012 elif l.startswith(b'#endif'):
1012 elif l.startswith(b'#endif'):
1013 if skipping is None:
1013 if skipping is None:
1014 after.setdefault(pos, []).append(' !!! missing #if\n')
1014 after.setdefault(pos, []).append(' !!! missing #if\n')
1015 skipping = None
1015 skipping = None
1016 after.setdefault(pos, []).append(l)
1016 after.setdefault(pos, []).append(l)
1017 elif skipping:
1017 elif skipping:
1018 after.setdefault(pos, []).append(l)
1018 after.setdefault(pos, []).append(l)
1019 elif l.startswith(b' >>> '): # python inlines
1019 elif l.startswith(b' >>> '): # python inlines
1020 after.setdefault(pos, []).append(l)
1020 after.setdefault(pos, []).append(l)
1021 prepos = pos
1021 prepos = pos
1022 pos = n
1022 pos = n
1023 if not inpython:
1023 if not inpython:
1024 # We've just entered a Python block. Add the header.
1024 # We've just entered a Python block. Add the header.
1025 inpython = True
1025 inpython = True
1026 addsalt(prepos, False) # Make sure we report the exit code.
1026 addsalt(prepos, False) # Make sure we report the exit code.
1027 script.append(b'%s -m heredoctest <<EOF\n' % PYTHON)
1027 script.append(b'%s -m heredoctest <<EOF\n' % PYTHON)
1028 addsalt(n, True)
1028 addsalt(n, True)
1029 script.append(l[2:])
1029 script.append(l[2:])
1030 elif l.startswith(b' ... '): # python inlines
1030 elif l.startswith(b' ... '): # python inlines
1031 after.setdefault(prepos, []).append(l)
1031 after.setdefault(prepos, []).append(l)
1032 script.append(l[2:])
1032 script.append(l[2:])
1033 elif l.startswith(b' $ '): # commands
1033 elif l.startswith(b' $ '): # commands
1034 if inpython:
1034 if inpython:
1035 script.append(b'EOF\n')
1035 script.append(b'EOF\n')
1036 inpython = False
1036 inpython = False
1037 after.setdefault(pos, []).append(l)
1037 after.setdefault(pos, []).append(l)
1038 prepos = pos
1038 prepos = pos
1039 pos = n
1039 pos = n
1040 addsalt(n, False)
1040 addsalt(n, False)
1041 cmd = l[4:].split()
1041 cmd = l[4:].split()
1042 if len(cmd) == 2 and cmd[0] == b'cd':
1042 if len(cmd) == 2 and cmd[0] == b'cd':
1043 l = b' $ cd %s || exit 1\n' % cmd[1]
1043 l = b' $ cd %s || exit 1\n' % cmd[1]
1044 script.append(l[4:])
1044 script.append(l[4:])
1045 elif l.startswith(b' > '): # continuations
1045 elif l.startswith(b' > '): # continuations
1046 after.setdefault(prepos, []).append(l)
1046 after.setdefault(prepos, []).append(l)
1047 script.append(l[4:])
1047 script.append(l[4:])
1048 elif l.startswith(b' '): # results
1048 elif l.startswith(b' '): # results
1049 # Queue up a list of expected results.
1049 # Queue up a list of expected results.
1050 expected.setdefault(pos, []).append(l[2:])
1050 expected.setdefault(pos, []).append(l[2:])
1051 else:
1051 else:
1052 if inpython:
1052 if inpython:
1053 script.append(b'EOF\n')
1053 script.append(b'EOF\n')
1054 inpython = False
1054 inpython = False
1055 # Non-command/result. Queue up for merged output.
1055 # Non-command/result. Queue up for merged output.
1056 after.setdefault(pos, []).append(l)
1056 after.setdefault(pos, []).append(l)
1057
1057
1058 if inpython:
1058 if inpython:
1059 script.append(b'EOF\n')
1059 script.append(b'EOF\n')
1060 if skipping is not None:
1060 if skipping is not None:
1061 after.setdefault(pos, []).append(' !!! missing #endif\n')
1061 after.setdefault(pos, []).append(' !!! missing #endif\n')
1062 addsalt(n + 1, False)
1062 addsalt(n + 1, False)
1063
1063
1064 return salt, script, after, expected
1064 return salt, script, after, expected
1065
1065
1066 def _processoutput(self, exitcode, output, salt, after, expected):
1066 def _processoutput(self, exitcode, output, salt, after, expected):
1067 # Merge the script output back into a unified test.
1067 # Merge the script output back into a unified test.
1068 warnonly = 1 # 1: not yet; 2: yes; 3: for sure not
1068 warnonly = 1 # 1: not yet; 2: yes; 3: for sure not
1069 if exitcode != 0:
1069 if exitcode != 0:
1070 warnonly = 3
1070 warnonly = 3
1071
1071
1072 pos = -1
1072 pos = -1
1073 postout = []
1073 postout = []
1074 for l in output:
1074 for l in output:
1075 lout, lcmd = l, None
1075 lout, lcmd = l, None
1076 if salt in l:
1076 if salt in l:
1077 lout, lcmd = l.split(salt, 1)
1077 lout, lcmd = l.split(salt, 1)
1078
1078
1079 while lout:
1079 while lout:
1080 if not lout.endswith(b'\n'):
1080 if not lout.endswith(b'\n'):
1081 lout += b' (no-eol)\n'
1081 lout += b' (no-eol)\n'
1082
1082
1083 # Find the expected output at the current position.
1083 # Find the expected output at the current position.
1084 el = None
1084 el = None
1085 if expected.get(pos, None):
1085 if expected.get(pos, None):
1086 el = expected[pos].pop(0)
1086 el = expected[pos].pop(0)
1087
1087
1088 r = TTest.linematch(el, lout)
1088 r = TTest.linematch(el, lout)
1089 if isinstance(r, str):
1089 if isinstance(r, str):
1090 if r == '+glob':
1090 if r == '+glob':
1091 lout = el[:-1] + ' (glob)\n'
1091 lout = el[:-1] + ' (glob)\n'
1092 r = '' # Warn only this line.
1092 r = '' # Warn only this line.
1093 elif r == '-glob':
1093 elif r == '-glob':
1094 lout = ''.join(el.rsplit(' (glob)', 1))
1094 lout = ''.join(el.rsplit(' (glob)', 1))
1095 r = '' # Warn only this line.
1095 r = '' # Warn only this line.
1096 elif r == "retry":
1096 elif r == "retry":
1097 postout.append(b' ' + el)
1097 postout.append(b' ' + el)
1098 continue
1098 continue
1099 else:
1099 else:
1100 log('\ninfo, unknown linematch result: %r\n' % r)
1100 log('\ninfo, unknown linematch result: %r\n' % r)
1101 r = False
1101 r = False
1102 if r:
1102 if r:
1103 postout.append(b' ' + el)
1103 postout.append(b' ' + el)
1104 else:
1104 else:
1105 if self.NEEDESCAPE(lout):
1105 if self.NEEDESCAPE(lout):
1106 lout = TTest._stringescape(b'%s (esc)\n' %
1106 lout = TTest._stringescape(b'%s (esc)\n' %
1107 lout.rstrip(b'\n'))
1107 lout.rstrip(b'\n'))
1108 postout.append(b' ' + lout) # Let diff deal with it.
1108 postout.append(b' ' + lout) # Let diff deal with it.
1109 if r != '': # If line failed.
1109 if r != '': # If line failed.
1110 warnonly = 3 # for sure not
1110 warnonly = 3 # for sure not
1111 elif warnonly == 1: # Is "not yet" and line is warn only.
1111 elif warnonly == 1: # Is "not yet" and line is warn only.
1112 warnonly = 2 # Yes do warn.
1112 warnonly = 2 # Yes do warn.
1113 break
1113 break
1114
1114
1115 # clean up any optional leftovers
1115 # clean up any optional leftovers
1116 while expected.get(pos, None):
1116 while expected.get(pos, None):
1117 el = expected[pos].pop(0)
1117 el = expected[pos].pop(0)
1118 if not el.endswith(b" (?)\n"):
1118 if not el.endswith(b" (?)\n"):
1119 expected[pos].insert(0, el)
1119 expected[pos].insert(0, el)
1120 break
1120 break
1121 postout.append(b' ' + el)
1121 postout.append(b' ' + el)
1122
1122
1123 if lcmd:
1123 if lcmd:
1124 # Add on last return code.
1124 # Add on last return code.
1125 ret = int(lcmd.split()[1])
1125 ret = int(lcmd.split()[1])
1126 if ret != 0:
1126 if ret != 0:
1127 postout.append(b' [%d]\n' % ret)
1127 postout.append(b' [%d]\n' % ret)
1128 if pos in after:
1128 if pos in after:
1129 # Merge in non-active test bits.
1129 # Merge in non-active test bits.
1130 postout += after.pop(pos)
1130 postout += after.pop(pos)
1131 pos = int(lcmd.split()[0])
1131 pos = int(lcmd.split()[0])
1132
1132
1133 if pos in after:
1133 if pos in after:
1134 postout += after.pop(pos)
1134 postout += after.pop(pos)
1135
1135
1136 if warnonly == 2:
1136 if warnonly == 2:
1137 exitcode = False # Set exitcode to warned.
1137 exitcode = False # Set exitcode to warned.
1138
1138
1139 return exitcode, postout
1139 return exitcode, postout
1140
1140
1141 @staticmethod
1141 @staticmethod
1142 def rematch(el, l):
1142 def rematch(el, l):
1143 try:
1143 try:
1144 # use \Z to ensure that the regex matches to the end of the string
1144 # use \Z to ensure that the regex matches to the end of the string
1145 if os.name == 'nt':
1145 if os.name == 'nt':
1146 return re.match(el + br'\r?\n\Z', l)
1146 return re.match(el + br'\r?\n\Z', l)
1147 return re.match(el + br'\n\Z', l)
1147 return re.match(el + br'\n\Z', l)
1148 except re.error:
1148 except re.error:
1149 # el is an invalid regex
1149 # el is an invalid regex
1150 return False
1150 return False
1151
1151
1152 @staticmethod
1152 @staticmethod
1153 def globmatch(el, l):
1153 def globmatch(el, l):
1154 # The only supported special characters are * and ? plus / which also
1154 # The only supported special characters are * and ? plus / which also
1155 # matches \ on windows. Escaping of these characters is supported.
1155 # matches \ on windows. Escaping of these characters is supported.
1156 if el + b'\n' == l:
1156 if el + b'\n' == l:
1157 if os.altsep:
1157 if os.altsep:
1158 # matching on "/" is not needed for this line
1158 # matching on "/" is not needed for this line
1159 for pat in checkcodeglobpats:
1159 for pat in checkcodeglobpats:
1160 if pat.match(el):
1160 if pat.match(el):
1161 return True
1161 return True
1162 return b'-glob'
1162 return b'-glob'
1163 return True
1163 return True
1164 i, n = 0, len(el)
1164 i, n = 0, len(el)
1165 res = b''
1165 res = b''
1166 while i < n:
1166 while i < n:
1167 c = el[i:i + 1]
1167 c = el[i:i + 1]
1168 i += 1
1168 i += 1
1169 if c == b'\\' and i < n and el[i:i + 1] in b'*?\\/':
1169 if c == b'\\' and i < n and el[i:i + 1] in b'*?\\/':
1170 res += el[i - 1:i + 1]
1170 res += el[i - 1:i + 1]
1171 i += 1
1171 i += 1
1172 elif c == b'*':
1172 elif c == b'*':
1173 res += b'.*'
1173 res += b'.*'
1174 elif c == b'?':
1174 elif c == b'?':
1175 res += b'.'
1175 res += b'.'
1176 elif c == b'/' and os.altsep:
1176 elif c == b'/' and os.altsep:
1177 res += b'[/\\\\]'
1177 res += b'[/\\\\]'
1178 else:
1178 else:
1179 res += re.escape(c)
1179 res += re.escape(c)
1180 return TTest.rematch(res, l)
1180 return TTest.rematch(res, l)
1181
1181
1182 @staticmethod
1182 @staticmethod
1183 def linematch(el, l):
1183 def linematch(el, l):
1184 retry = False
1184 retry = False
1185 if el == l: # perfect match (fast)
1185 if el == l: # perfect match (fast)
1186 return True
1186 return True
1187 if el:
1187 if el:
1188 if el.endswith(b" (?)\n"):
1188 if el.endswith(b" (?)\n"):
1189 retry = "retry"
1189 retry = "retry"
1190 el = el[:-5] + "\n"
1190 el = el[:-5] + "\n"
1191 if el.endswith(b" (esc)\n"):
1191 if el.endswith(b" (esc)\n"):
1192 if PYTHON3:
1192 if PYTHON3:
1193 el = el[:-7].decode('unicode_escape') + '\n'
1193 el = el[:-7].decode('unicode_escape') + '\n'
1194 el = el.encode('utf-8')
1194 el = el.encode('utf-8')
1195 else:
1195 else:
1196 el = el[:-7].decode('string-escape') + '\n'
1196 el = el[:-7].decode('string-escape') + '\n'
1197 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
1197 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
1198 return True
1198 return True
1199 if el.endswith(b" (re)\n"):
1199 if el.endswith(b" (re)\n"):
1200 return TTest.rematch(el[:-6], l) or retry
1200 return TTest.rematch(el[:-6], l) or retry
1201 if el.endswith(b" (glob)\n"):
1201 if el.endswith(b" (glob)\n"):
1202 # ignore '(glob)' added to l by 'replacements'
1202 # ignore '(glob)' added to l by 'replacements'
1203 if l.endswith(b" (glob)\n"):
1203 if l.endswith(b" (glob)\n"):
1204 l = l[:-8] + b"\n"
1204 l = l[:-8] + b"\n"
1205 return TTest.globmatch(el[:-8], l)
1205 return TTest.globmatch(el[:-8], l)
1206 if os.altsep and l.replace(b'\\', b'/') == el:
1206 if os.altsep and l.replace(b'\\', b'/') == el:
1207 return b'+glob'
1207 return b'+glob'
1208 return retry
1208 return retry
1209
1209
1210 @staticmethod
1210 @staticmethod
1211 def parsehghaveoutput(lines):
1211 def parsehghaveoutput(lines):
1212 '''Parse hghave log lines.
1212 '''Parse hghave log lines.
1213
1213
1214 Return tuple of lists (missing, failed):
1214 Return tuple of lists (missing, failed):
1215 * the missing/unknown features
1215 * the missing/unknown features
1216 * the features for which existence check failed'''
1216 * the features for which existence check failed'''
1217 missing = []
1217 missing = []
1218 failed = []
1218 failed = []
1219 for line in lines:
1219 for line in lines:
1220 if line.startswith(TTest.SKIPPED_PREFIX):
1220 if line.startswith(TTest.SKIPPED_PREFIX):
1221 line = line.splitlines()[0]
1221 line = line.splitlines()[0]
1222 missing.append(line[len(TTest.SKIPPED_PREFIX):])
1222 missing.append(line[len(TTest.SKIPPED_PREFIX):])
1223 elif line.startswith(TTest.FAILED_PREFIX):
1223 elif line.startswith(TTest.FAILED_PREFIX):
1224 line = line.splitlines()[0]
1224 line = line.splitlines()[0]
1225 failed.append(line[len(TTest.FAILED_PREFIX):])
1225 failed.append(line[len(TTest.FAILED_PREFIX):])
1226
1226
1227 return missing, failed
1227 return missing, failed
1228
1228
1229 @staticmethod
1229 @staticmethod
1230 def _escapef(m):
1230 def _escapef(m):
1231 return TTest.ESCAPEMAP[m.group(0)]
1231 return TTest.ESCAPEMAP[m.group(0)]
1232
1232
1233 @staticmethod
1233 @staticmethod
1234 def _stringescape(s):
1234 def _stringescape(s):
1235 return TTest.ESCAPESUB(TTest._escapef, s)
1235 return TTest.ESCAPESUB(TTest._escapef, s)
1236
1236
1237 iolock = threading.RLock()
1237 iolock = threading.RLock()
1238
1238
1239 class SkipTest(Exception):
1239 class SkipTest(Exception):
1240 """Raised to indicate that a test is to be skipped."""
1240 """Raised to indicate that a test is to be skipped."""
1241
1241
1242 class IgnoreTest(Exception):
1242 class IgnoreTest(Exception):
1243 """Raised to indicate that a test is to be ignored."""
1243 """Raised to indicate that a test is to be ignored."""
1244
1244
1245 class WarnTest(Exception):
1245 class WarnTest(Exception):
1246 """Raised to indicate that a test warned."""
1246 """Raised to indicate that a test warned."""
1247
1247
1248 class ReportedTest(Exception):
1248 class ReportedTest(Exception):
1249 """Raised to indicate that a test already reported."""
1249 """Raised to indicate that a test already reported."""
1250
1250
1251 class TestResult(unittest._TextTestResult):
1251 class TestResult(unittest._TextTestResult):
1252 """Holds results when executing via unittest."""
1252 """Holds results when executing via unittest."""
1253 # Don't worry too much about accessing the non-public _TextTestResult.
1253 # Don't worry too much about accessing the non-public _TextTestResult.
1254 # It is relatively common in Python testing tools.
1254 # It is relatively common in Python testing tools.
1255 def __init__(self, options, *args, **kwargs):
1255 def __init__(self, options, *args, **kwargs):
1256 super(TestResult, self).__init__(*args, **kwargs)
1256 super(TestResult, self).__init__(*args, **kwargs)
1257
1257
1258 self._options = options
1258 self._options = options
1259
1259
1260 # unittest.TestResult didn't have skipped until 2.7. We need to
1260 # unittest.TestResult didn't have skipped until 2.7. We need to
1261 # polyfill it.
1261 # polyfill it.
1262 self.skipped = []
1262 self.skipped = []
1263
1263
1264 # We have a custom "ignored" result that isn't present in any Python
1264 # We have a custom "ignored" result that isn't present in any Python
1265 # unittest implementation. It is very similar to skipped. It may make
1265 # unittest implementation. It is very similar to skipped. It may make
1266 # sense to map it into skip some day.
1266 # sense to map it into skip some day.
1267 self.ignored = []
1267 self.ignored = []
1268
1268
1269 # We have a custom "warned" result that isn't present in any Python
1269 # We have a custom "warned" result that isn't present in any Python
1270 # unittest implementation. It is very similar to failed. It may make
1270 # unittest implementation. It is very similar to failed. It may make
1271 # sense to map it into fail some day.
1271 # sense to map it into fail some day.
1272 self.warned = []
1272 self.warned = []
1273
1273
1274 self.times = []
1274 self.times = []
1275 self._firststarttime = None
1275 self._firststarttime = None
1276 # Data stored for the benefit of generating xunit reports.
1276 # Data stored for the benefit of generating xunit reports.
1277 self.successes = []
1277 self.successes = []
1278 self.faildata = {}
1278 self.faildata = {}
1279
1279
1280 def addFailure(self, test, reason):
1280 def addFailure(self, test, reason):
1281 self.failures.append((test, reason))
1281 self.failures.append((test, reason))
1282
1282
1283 if self._options.first:
1283 if self._options.first:
1284 self.stop()
1284 self.stop()
1285 else:
1285 else:
1286 with iolock:
1286 with iolock:
1287 if reason == "timed out":
1287 if reason == "timed out":
1288 self.stream.write('t')
1288 self.stream.write('t')
1289 else:
1289 else:
1290 if not self._options.nodiff:
1290 if not self._options.nodiff:
1291 self.stream.write('\nERROR: %s output changed\n' % test)
1291 self.stream.write('\nERROR: %s output changed\n' % test)
1292 self.stream.write('!')
1292 self.stream.write('!')
1293
1293
1294 self.stream.flush()
1294 self.stream.flush()
1295
1295
1296 def addSuccess(self, test):
1296 def addSuccess(self, test):
1297 with iolock:
1297 with iolock:
1298 super(TestResult, self).addSuccess(test)
1298 super(TestResult, self).addSuccess(test)
1299 self.successes.append(test)
1299 self.successes.append(test)
1300
1300
1301 def addError(self, test, err):
1301 def addError(self, test, err):
1302 super(TestResult, self).addError(test, err)
1302 super(TestResult, self).addError(test, err)
1303 if self._options.first:
1303 if self._options.first:
1304 self.stop()
1304 self.stop()
1305
1305
1306 # Polyfill.
1306 # Polyfill.
1307 def addSkip(self, test, reason):
1307 def addSkip(self, test, reason):
1308 self.skipped.append((test, reason))
1308 self.skipped.append((test, reason))
1309 with iolock:
1309 with iolock:
1310 if self.showAll:
1310 if self.showAll:
1311 self.stream.writeln('skipped %s' % reason)
1311 self.stream.writeln('skipped %s' % reason)
1312 else:
1312 else:
1313 self.stream.write('s')
1313 self.stream.write('s')
1314 self.stream.flush()
1314 self.stream.flush()
1315
1315
1316 def addIgnore(self, test, reason):
1316 def addIgnore(self, test, reason):
1317 self.ignored.append((test, reason))
1317 self.ignored.append((test, reason))
1318 with iolock:
1318 with iolock:
1319 if self.showAll:
1319 if self.showAll:
1320 self.stream.writeln('ignored %s' % reason)
1320 self.stream.writeln('ignored %s' % reason)
1321 else:
1321 else:
1322 if reason not in ('not retesting', "doesn't match keyword"):
1322 if reason not in ('not retesting', "doesn't match keyword"):
1323 self.stream.write('i')
1323 self.stream.write('i')
1324 else:
1324 else:
1325 self.testsRun += 1
1325 self.testsRun += 1
1326 self.stream.flush()
1326 self.stream.flush()
1327
1327
1328 def addWarn(self, test, reason):
1328 def addWarn(self, test, reason):
1329 self.warned.append((test, reason))
1329 self.warned.append((test, reason))
1330
1330
1331 if self._options.first:
1331 if self._options.first:
1332 self.stop()
1332 self.stop()
1333
1333
1334 with iolock:
1334 with iolock:
1335 if self.showAll:
1335 if self.showAll:
1336 self.stream.writeln('warned %s' % reason)
1336 self.stream.writeln('warned %s' % reason)
1337 else:
1337 else:
1338 self.stream.write('~')
1338 self.stream.write('~')
1339 self.stream.flush()
1339 self.stream.flush()
1340
1340
1341 def addOutputMismatch(self, test, ret, got, expected):
1341 def addOutputMismatch(self, test, ret, got, expected):
1342 """Record a mismatch in test output for a particular test."""
1342 """Record a mismatch in test output for a particular test."""
1343 if self.shouldStop:
1343 if self.shouldStop:
1344 # don't print, some other test case already failed and
1344 # don't print, some other test case already failed and
1345 # printed, we're just stale and probably failed due to our
1345 # printed, we're just stale and probably failed due to our
1346 # temp dir getting cleaned up.
1346 # temp dir getting cleaned up.
1347 return
1347 return
1348
1348
1349 accepted = False
1349 accepted = False
1350 failed = False
1350 failed = False
1351 lines = []
1351 lines = []
1352
1352
1353 with iolock:
1353 with iolock:
1354 if self._options.nodiff:
1354 if self._options.nodiff:
1355 pass
1355 pass
1356 elif self._options.view:
1356 elif self._options.view:
1357 v = self._options.view
1357 v = self._options.view
1358 if PYTHON3:
1358 if PYTHON3:
1359 v = _bytespath(v)
1359 v = _bytespath(v)
1360 os.system(b"%s %s %s" %
1360 os.system(b"%s %s %s" %
1361 (v, test.refpath, test.errpath))
1361 (v, test.refpath, test.errpath))
1362 else:
1362 else:
1363 servefail, lines = getdiff(expected, got,
1363 servefail, lines = getdiff(expected, got,
1364 test.refpath, test.errpath)
1364 test.refpath, test.errpath)
1365 if servefail:
1365 if servefail:
1366 self.addFailure(
1366 self.addFailure(
1367 test,
1367 test,
1368 'server failed to start (HGPORT=%s)' % test._startport)
1368 'server failed to start (HGPORT=%s)' % test._startport)
1369 raise ReportedTest('server failed to start')
1369 raise ReportedTest('server failed to start')
1370 else:
1370 else:
1371 self.stream.write('\n')
1371 self.stream.write('\n')
1372 for line in lines:
1372 for line in lines:
1373 if PYTHON3:
1373 if PYTHON3:
1374 self.stream.flush()
1374 self.stream.flush()
1375 self.stream.buffer.write(line)
1375 self.stream.buffer.write(line)
1376 self.stream.buffer.flush()
1376 self.stream.buffer.flush()
1377 else:
1377 else:
1378 self.stream.write(line)
1378 self.stream.write(line)
1379 self.stream.flush()
1379 self.stream.flush()
1380
1380
1381 # handle interactive prompt without releasing iolock
1381 # handle interactive prompt without releasing iolock
1382 if self._options.interactive:
1382 if self._options.interactive:
1383 self.stream.write('Accept this change? [n] ')
1383 self.stream.write('Accept this change? [n] ')
1384 answer = sys.stdin.readline().strip()
1384 answer = sys.stdin.readline().strip()
1385 if answer.lower() in ('y', 'yes'):
1385 if answer.lower() in ('y', 'yes'):
1386 if test.name.endswith('.t'):
1386 if test.name.endswith('.t'):
1387 rename(test.errpath, test.path)
1387 rename(test.errpath, test.path)
1388 else:
1388 else:
1389 rename(test.errpath, '%s.out' % test.path)
1389 rename(test.errpath, '%s.out' % test.path)
1390 accepted = True
1390 accepted = True
1391 if not accepted and not failed:
1391 if not accepted and not failed:
1392 self.faildata[test.name] = b''.join(lines)
1392 self.faildata[test.name] = b''.join(lines)
1393
1393
1394 return accepted
1394 return accepted
1395
1395
1396 def startTest(self, test):
1396 def startTest(self, test):
1397 super(TestResult, self).startTest(test)
1397 super(TestResult, self).startTest(test)
1398
1398
1399 # os.times module computes the user time and system time spent by
1399 # os.times module computes the user time and system time spent by
1400 # child's processes along with real elapsed time taken by a process.
1400 # child's processes along with real elapsed time taken by a process.
1401 # This module has one limitation. It can only work for Linux user
1401 # This module has one limitation. It can only work for Linux user
1402 # and not for Windows.
1402 # and not for Windows.
1403 test.started = os.times()
1403 test.started = os.times()
1404 if self._firststarttime is None: # thread racy but irrelevant
1404 if self._firststarttime is None: # thread racy but irrelevant
1405 self._firststarttime = test.started[4]
1405 self._firststarttime = test.started[4]
1406
1406
1407 def stopTest(self, test, interrupted=False):
1407 def stopTest(self, test, interrupted=False):
1408 super(TestResult, self).stopTest(test)
1408 super(TestResult, self).stopTest(test)
1409
1409
1410 test.stopped = os.times()
1410 test.stopped = os.times()
1411
1411
1412 starttime = test.started
1412 starttime = test.started
1413 endtime = test.stopped
1413 endtime = test.stopped
1414 origin = self._firststarttime
1414 origin = self._firststarttime
1415 self.times.append((test.name,
1415 self.times.append((test.name,
1416 endtime[2] - starttime[2], # user space CPU time
1416 endtime[2] - starttime[2], # user space CPU time
1417 endtime[3] - starttime[3], # sys space CPU time
1417 endtime[3] - starttime[3], # sys space CPU time
1418 endtime[4] - starttime[4], # real time
1418 endtime[4] - starttime[4], # real time
1419 starttime[4] - origin, # start date in run context
1419 starttime[4] - origin, # start date in run context
1420 endtime[4] - origin, # end date in run context
1420 endtime[4] - origin, # end date in run context
1421 ))
1421 ))
1422
1422
1423 if interrupted:
1423 if interrupted:
1424 with iolock:
1424 with iolock:
1425 self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
1425 self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
1426 test.name, self.times[-1][3]))
1426 test.name, self.times[-1][3]))
1427
1427
1428 class TestSuite(unittest.TestSuite):
1428 class TestSuite(unittest.TestSuite):
1429 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
1429 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
1430
1430
1431 def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
1431 def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
1432 retest=False, keywords=None, loop=False, runs_per_test=1,
1432 retest=False, keywords=None, loop=False, runs_per_test=1,
1433 loadtest=None, showchannels=False,
1433 loadtest=None, showchannels=False,
1434 *args, **kwargs):
1434 *args, **kwargs):
1435 """Create a new instance that can run tests with a configuration.
1435 """Create a new instance that can run tests with a configuration.
1436
1436
1437 testdir specifies the directory where tests are executed from. This
1437 testdir specifies the directory where tests are executed from. This
1438 is typically the ``tests`` directory from Mercurial's source
1438 is typically the ``tests`` directory from Mercurial's source
1439 repository.
1439 repository.
1440
1440
1441 jobs specifies the number of jobs to run concurrently. Each test
1441 jobs specifies the number of jobs to run concurrently. Each test
1442 executes on its own thread. Tests actually spawn new processes, so
1442 executes on its own thread. Tests actually spawn new processes, so
1443 state mutation should not be an issue.
1443 state mutation should not be an issue.
1444
1444
1445 If there is only one job, it will use the main thread.
1445 If there is only one job, it will use the main thread.
1446
1446
1447 whitelist and blacklist denote tests that have been whitelisted and
1447 whitelist and blacklist denote tests that have been whitelisted and
1448 blacklisted, respectively. These arguments don't belong in TestSuite.
1448 blacklisted, respectively. These arguments don't belong in TestSuite.
1449 Instead, whitelist and blacklist should be handled by the thing that
1449 Instead, whitelist and blacklist should be handled by the thing that
1450 populates the TestSuite with tests. They are present to preserve
1450 populates the TestSuite with tests. They are present to preserve
1451 backwards compatible behavior which reports skipped tests as part
1451 backwards compatible behavior which reports skipped tests as part
1452 of the results.
1452 of the results.
1453
1453
1454 retest denotes whether to retest failed tests. This arguably belongs
1454 retest denotes whether to retest failed tests. This arguably belongs
1455 outside of TestSuite.
1455 outside of TestSuite.
1456
1456
1457 keywords denotes key words that will be used to filter which tests
1457 keywords denotes key words that will be used to filter which tests
1458 to execute. This arguably belongs outside of TestSuite.
1458 to execute. This arguably belongs outside of TestSuite.
1459
1459
1460 loop denotes whether to loop over tests forever.
1460 loop denotes whether to loop over tests forever.
1461 """
1461 """
1462 super(TestSuite, self).__init__(*args, **kwargs)
1462 super(TestSuite, self).__init__(*args, **kwargs)
1463
1463
1464 self._jobs = jobs
1464 self._jobs = jobs
1465 self._whitelist = whitelist
1465 self._whitelist = whitelist
1466 self._blacklist = blacklist
1466 self._blacklist = blacklist
1467 self._retest = retest
1467 self._retest = retest
1468 self._keywords = keywords
1468 self._keywords = keywords
1469 self._loop = loop
1469 self._loop = loop
1470 self._runs_per_test = runs_per_test
1470 self._runs_per_test = runs_per_test
1471 self._loadtest = loadtest
1471 self._loadtest = loadtest
1472 self._showchannels = showchannels
1472 self._showchannels = showchannels
1473
1473
1474 def run(self, result):
1474 def run(self, result):
1475 # We have a number of filters that need to be applied. We do this
1475 # We have a number of filters that need to be applied. We do this
1476 # here instead of inside Test because it makes the running logic for
1476 # here instead of inside Test because it makes the running logic for
1477 # Test simpler.
1477 # Test simpler.
1478 tests = []
1478 tests = []
1479 num_tests = [0]
1479 num_tests = [0]
1480 for test in self._tests:
1480 for test in self._tests:
1481 def get():
1481 def get():
1482 num_tests[0] += 1
1482 num_tests[0] += 1
1483 if getattr(test, 'should_reload', False):
1483 if getattr(test, 'should_reload', False):
1484 return self._loadtest(test.bname, num_tests[0])
1484 return self._loadtest(test.bname, num_tests[0])
1485 return test
1485 return test
1486 if not os.path.exists(test.path):
1486 if not os.path.exists(test.path):
1487 result.addSkip(test, "Doesn't exist")
1487 result.addSkip(test, "Doesn't exist")
1488 continue
1488 continue
1489
1489
1490 if not (self._whitelist and test.name in self._whitelist):
1490 if not (self._whitelist and test.name in self._whitelist):
1491 if self._blacklist and test.bname in self._blacklist:
1491 if self._blacklist and test.bname in self._blacklist:
1492 result.addSkip(test, 'blacklisted')
1492 result.addSkip(test, 'blacklisted')
1493 continue
1493 continue
1494
1494
1495 if self._retest and not os.path.exists(test.errpath):
1495 if self._retest and not os.path.exists(test.errpath):
1496 result.addIgnore(test, 'not retesting')
1496 result.addIgnore(test, 'not retesting')
1497 continue
1497 continue
1498
1498
1499 if self._keywords:
1499 if self._keywords:
1500 f = open(test.path, 'rb')
1500 f = open(test.path, 'rb')
1501 t = f.read().lower() + test.bname.lower()
1501 t = f.read().lower() + test.bname.lower()
1502 f.close()
1502 f.close()
1503 ignored = False
1503 ignored = False
1504 for k in self._keywords.lower().split():
1504 for k in self._keywords.lower().split():
1505 if k not in t:
1505 if k not in t:
1506 result.addIgnore(test, "doesn't match keyword")
1506 result.addIgnore(test, "doesn't match keyword")
1507 ignored = True
1507 ignored = True
1508 break
1508 break
1509
1509
1510 if ignored:
1510 if ignored:
1511 continue
1511 continue
1512 for _ in xrange(self._runs_per_test):
1512 for _ in xrange(self._runs_per_test):
1513 tests.append(get())
1513 tests.append(get())
1514
1514
1515 runtests = list(tests)
1515 runtests = list(tests)
1516 done = queue.Queue()
1516 done = queue.Queue()
1517 running = 0
1517 running = 0
1518
1518
1519 channels = [""] * self._jobs
1519 channels = [""] * self._jobs
1520
1520
1521 def job(test, result):
1521 def job(test, result):
1522 for n, v in enumerate(channels):
1522 for n, v in enumerate(channels):
1523 if not v:
1523 if not v:
1524 channel = n
1524 channel = n
1525 break
1525 break
1526 channels[channel] = "=" + test.name[5:].split(".")[0]
1526 channels[channel] = "=" + test.name[5:].split(".")[0]
1527 try:
1527 try:
1528 test(result)
1528 test(result)
1529 channels[channel] = ''
1529 channels[channel] = ''
1530 done.put(None)
1530 done.put(None)
1531 except KeyboardInterrupt:
1531 except KeyboardInterrupt:
1532 channels[channel] = ''
1532 channels[channel] = ''
1533 except: # re-raises
1533 except: # re-raises
1534 done.put(('!', test, 'run-test raised an error, see traceback'))
1534 done.put(('!', test, 'run-test raised an error, see traceback'))
1535 raise
1535 raise
1536
1536
1537 def stat():
1537 def stat():
1538 count = 0
1538 count = 0
1539 while channels:
1539 while channels:
1540 d = '\n%03s ' % count
1540 d = '\n%03s ' % count
1541 for n, v in enumerate(channels):
1541 for n, v in enumerate(channels):
1542 if v:
1542 if v:
1543 d += v[0]
1543 d += v[0]
1544 channels[n] = v[1:] or '.'
1544 channels[n] = v[1:] or '.'
1545 else:
1545 else:
1546 d += ' '
1546 d += ' '
1547 d += ' '
1547 d += ' '
1548 with iolock:
1548 with iolock:
1549 sys.stdout.write(d + ' ')
1549 sys.stdout.write(d + ' ')
1550 sys.stdout.flush()
1550 sys.stdout.flush()
1551 for x in xrange(10):
1551 for x in xrange(10):
1552 if channels:
1552 if channels:
1553 time.sleep(.1)
1553 time.sleep(.1)
1554 count += 1
1554 count += 1
1555
1555
1556 stoppedearly = False
1556 stoppedearly = False
1557
1557
1558 if self._showchannels:
1558 if self._showchannels:
1559 statthread = threading.Thread(target=stat, name="stat")
1559 statthread = threading.Thread(target=stat, name="stat")
1560 statthread.start()
1560 statthread.start()
1561
1561
1562 try:
1562 try:
1563 while tests or running:
1563 while tests or running:
1564 if not done.empty() or running == self._jobs or not tests:
1564 if not done.empty() or running == self._jobs or not tests:
1565 try:
1565 try:
1566 done.get(True, 1)
1566 done.get(True, 1)
1567 running -= 1
1567 running -= 1
1568 if result and result.shouldStop:
1568 if result and result.shouldStop:
1569 stoppedearly = True
1569 stoppedearly = True
1570 break
1570 break
1571 except queue.Empty:
1571 except queue.Empty:
1572 continue
1572 continue
1573 if tests and not running == self._jobs:
1573 if tests and not running == self._jobs:
1574 test = tests.pop(0)
1574 test = tests.pop(0)
1575 if self._loop:
1575 if self._loop:
1576 if getattr(test, 'should_reload', False):
1576 if getattr(test, 'should_reload', False):
1577 num_tests[0] += 1
1577 num_tests[0] += 1
1578 tests.append(
1578 tests.append(
1579 self._loadtest(test.name, num_tests[0]))
1579 self._loadtest(test.name, num_tests[0]))
1580 else:
1580 else:
1581 tests.append(test)
1581 tests.append(test)
1582 if self._jobs == 1:
1582 if self._jobs == 1:
1583 job(test, result)
1583 job(test, result)
1584 else:
1584 else:
1585 t = threading.Thread(target=job, name=test.name,
1585 t = threading.Thread(target=job, name=test.name,
1586 args=(test, result))
1586 args=(test, result))
1587 t.start()
1587 t.start()
1588 running += 1
1588 running += 1
1589
1589
1590 # If we stop early we still need to wait on started tests to
1590 # If we stop early we still need to wait on started tests to
1591 # finish. Otherwise, there is a race between the test completing
1591 # finish. Otherwise, there is a race between the test completing
1592 # and the test's cleanup code running. This could result in the
1592 # and the test's cleanup code running. This could result in the
1593 # test reporting incorrect.
1593 # test reporting incorrect.
1594 if stoppedearly:
1594 if stoppedearly:
1595 while running:
1595 while running:
1596 try:
1596 try:
1597 done.get(True, 1)
1597 done.get(True, 1)
1598 running -= 1
1598 running -= 1
1599 except queue.Empty:
1599 except queue.Empty:
1600 continue
1600 continue
1601 except KeyboardInterrupt:
1601 except KeyboardInterrupt:
1602 for test in runtests:
1602 for test in runtests:
1603 test.abort()
1603 test.abort()
1604
1604
1605 channels = []
1605 channels = []
1606
1606
1607 return result
1607 return result
1608
1608
1609 # Save the most recent 5 wall-clock runtimes of each test to a
1609 # Save the most recent 5 wall-clock runtimes of each test to a
1610 # human-readable text file named .testtimes. Tests are sorted
1610 # human-readable text file named .testtimes. Tests are sorted
1611 # alphabetically, while times for each test are listed from oldest to
1611 # alphabetically, while times for each test are listed from oldest to
1612 # newest.
1612 # newest.
1613
1613
1614 def loadtimes(testdir):
1614 def loadtimes(testdir):
1615 times = []
1615 times = []
1616 try:
1616 try:
1617 with open(os.path.join(testdir, '.testtimes-')) as fp:
1617 with open(os.path.join(testdir, '.testtimes-')) as fp:
1618 for line in fp:
1618 for line in fp:
1619 ts = line.split()
1619 ts = line.split()
1620 times.append((ts[0], [float(t) for t in ts[1:]]))
1620 times.append((ts[0], [float(t) for t in ts[1:]]))
1621 except IOError as err:
1621 except IOError as err:
1622 if err.errno != errno.ENOENT:
1622 if err.errno != errno.ENOENT:
1623 raise
1623 raise
1624 return times
1624 return times
1625
1625
1626 def savetimes(testdir, result):
1626 def savetimes(testdir, result):
1627 saved = dict(loadtimes(testdir))
1627 saved = dict(loadtimes(testdir))
1628 maxruns = 5
1628 maxruns = 5
1629 skipped = set([str(t[0]) for t in result.skipped])
1629 skipped = set([str(t[0]) for t in result.skipped])
1630 for tdata in result.times:
1630 for tdata in result.times:
1631 test, real = tdata[0], tdata[3]
1631 test, real = tdata[0], tdata[3]
1632 if test not in skipped:
1632 if test not in skipped:
1633 ts = saved.setdefault(test, [])
1633 ts = saved.setdefault(test, [])
1634 ts.append(real)
1634 ts.append(real)
1635 ts[:] = ts[-maxruns:]
1635 ts[:] = ts[-maxruns:]
1636
1636
1637 fd, tmpname = tempfile.mkstemp(prefix='.testtimes',
1637 fd, tmpname = tempfile.mkstemp(prefix='.testtimes',
1638 dir=testdir, text=True)
1638 dir=testdir, text=True)
1639 with os.fdopen(fd, 'w') as fp:
1639 with os.fdopen(fd, 'w') as fp:
1640 for name, ts in sorted(saved.iteritems()):
1640 for name, ts in sorted(saved.iteritems()):
1641 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
1641 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
1642 timepath = os.path.join(testdir, '.testtimes')
1642 timepath = os.path.join(testdir, '.testtimes')
1643 try:
1643 try:
1644 os.unlink(timepath)
1644 os.unlink(timepath)
1645 except OSError:
1645 except OSError:
1646 pass
1646 pass
1647 try:
1647 try:
1648 os.rename(tmpname, timepath)
1648 os.rename(tmpname, timepath)
1649 except OSError:
1649 except OSError:
1650 pass
1650 pass
1651
1651
1652 class TextTestRunner(unittest.TextTestRunner):
1652 class TextTestRunner(unittest.TextTestRunner):
1653 """Custom unittest test runner that uses appropriate settings."""
1653 """Custom unittest test runner that uses appropriate settings."""
1654
1654
1655 def __init__(self, runner, *args, **kwargs):
1655 def __init__(self, runner, *args, **kwargs):
1656 super(TextTestRunner, self).__init__(*args, **kwargs)
1656 super(TextTestRunner, self).__init__(*args, **kwargs)
1657
1657
1658 self._runner = runner
1658 self._runner = runner
1659
1659
1660 def run(self, test):
1660 def run(self, test):
1661 result = TestResult(self._runner.options, self.stream,
1661 result = TestResult(self._runner.options, self.stream,
1662 self.descriptions, self.verbosity)
1662 self.descriptions, self.verbosity)
1663
1663
1664 test(result)
1664 test(result)
1665
1665
1666 failed = len(result.failures)
1666 failed = len(result.failures)
1667 warned = len(result.warned)
1667 warned = len(result.warned)
1668 skipped = len(result.skipped)
1668 skipped = len(result.skipped)
1669 ignored = len(result.ignored)
1669 ignored = len(result.ignored)
1670
1670
1671 with iolock:
1671 with iolock:
1672 self.stream.writeln('')
1672 self.stream.writeln('')
1673
1673
1674 if not self._runner.options.noskips:
1674 if not self._runner.options.noskips:
1675 for test, msg in result.skipped:
1675 for test, msg in result.skipped:
1676 self.stream.writeln('Skipped %s: %s' % (test.name, msg))
1676 self.stream.writeln('Skipped %s: %s' % (test.name, msg))
1677 for test, msg in result.warned:
1677 for test, msg in result.warned:
1678 self.stream.writeln('Warned %s: %s' % (test.name, msg))
1678 self.stream.writeln('Warned %s: %s' % (test.name, msg))
1679 for test, msg in result.failures:
1679 for test, msg in result.failures:
1680 self.stream.writeln('Failed %s: %s' % (test.name, msg))
1680 self.stream.writeln('Failed %s: %s' % (test.name, msg))
1681 for test, msg in result.errors:
1681 for test, msg in result.errors:
1682 self.stream.writeln('Errored %s: %s' % (test.name, msg))
1682 self.stream.writeln('Errored %s: %s' % (test.name, msg))
1683
1683
1684 if self._runner.options.xunit:
1684 if self._runner.options.xunit:
1685 with open(self._runner.options.xunit, 'wb') as xuf:
1685 with open(self._runner.options.xunit, 'wb') as xuf:
1686 timesd = dict((t[0], t[3]) for t in result.times)
1686 timesd = dict((t[0], t[3]) for t in result.times)
1687 doc = minidom.Document()
1687 doc = minidom.Document()
1688 s = doc.createElement('testsuite')
1688 s = doc.createElement('testsuite')
1689 s.setAttribute('name', 'run-tests')
1689 s.setAttribute('name', 'run-tests')
1690 s.setAttribute('tests', str(result.testsRun))
1690 s.setAttribute('tests', str(result.testsRun))
1691 s.setAttribute('errors', "0") # TODO
1691 s.setAttribute('errors', "0") # TODO
1692 s.setAttribute('failures', str(failed))
1692 s.setAttribute('failures', str(failed))
1693 s.setAttribute('skipped', str(skipped + ignored))
1693 s.setAttribute('skipped', str(skipped + ignored))
1694 doc.appendChild(s)
1694 doc.appendChild(s)
1695 for tc in result.successes:
1695 for tc in result.successes:
1696 t = doc.createElement('testcase')
1696 t = doc.createElement('testcase')
1697 t.setAttribute('name', tc.name)
1697 t.setAttribute('name', tc.name)
1698 t.setAttribute('time', '%.3f' % timesd[tc.name])
1698 t.setAttribute('time', '%.3f' % timesd[tc.name])
1699 s.appendChild(t)
1699 s.appendChild(t)
1700 for tc, err in sorted(result.faildata.items()):
1700 for tc, err in sorted(result.faildata.items()):
1701 t = doc.createElement('testcase')
1701 t = doc.createElement('testcase')
1702 t.setAttribute('name', tc)
1702 t.setAttribute('name', tc)
1703 t.setAttribute('time', '%.3f' % timesd[tc])
1703 t.setAttribute('time', '%.3f' % timesd[tc])
1704 # createCDATASection expects a unicode or it will
1704 # createCDATASection expects a unicode or it will
1705 # convert using default conversion rules, which will
1705 # convert using default conversion rules, which will
1706 # fail if string isn't ASCII.
1706 # fail if string isn't ASCII.
1707 err = cdatasafe(err).decode('utf-8', 'replace')
1707 err = cdatasafe(err).decode('utf-8', 'replace')
1708 cd = doc.createCDATASection(err)
1708 cd = doc.createCDATASection(err)
1709 t.appendChild(cd)
1709 t.appendChild(cd)
1710 s.appendChild(t)
1710 s.appendChild(t)
1711 xuf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
1711 xuf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
1712
1712
1713 if self._runner.options.json:
1713 if self._runner.options.json:
1714 if json is None:
1714 if json is None:
1715 raise ImportError("json module not installed")
1715 raise ImportError("json module not installed")
1716 jsonpath = os.path.join(self._runner._testdir, 'report.json')
1716 jsonpath = os.path.join(self._runner._testdir, 'report.json')
1717 with open(jsonpath, 'w') as fp:
1717 with open(jsonpath, 'w') as fp:
1718 timesd = {}
1718 timesd = {}
1719 for tdata in result.times:
1719 for tdata in result.times:
1720 test = tdata[0]
1720 test = tdata[0]
1721 timesd[test] = tdata[1:]
1721 timesd[test] = tdata[1:]
1722
1722
1723 outcome = {}
1723 outcome = {}
1724 groups = [('success', ((tc, None)
1724 groups = [('success', ((tc, None)
1725 for tc in result.successes)),
1725 for tc in result.successes)),
1726 ('failure', result.failures),
1726 ('failure', result.failures),
1727 ('skip', result.skipped)]
1727 ('skip', result.skipped)]
1728 for res, testcases in groups:
1728 for res, testcases in groups:
1729 for tc, __ in testcases:
1729 for tc, __ in testcases:
1730 if tc.name in timesd:
1730 tres = {'result': res,
1731 tres = {'result': res,
1731 'time': ('%0.3f' % timesd[tc.name][2]),
1732 'time': ('%0.3f' % timesd[tc.name][2]),
1732 'cuser': ('%0.3f' % timesd[tc.name][0]),
1733 'cuser': ('%0.3f' % timesd[tc.name][0]),
1733 'csys': ('%0.3f' % timesd[tc.name][1]),
1734 'csys': ('%0.3f' % timesd[tc.name][1]),
1734 'start': ('%0.3f' % timesd[tc.name][3]),
1735 'start': ('%0.3f' % timesd[tc.name][3]),
1735 'end': ('%0.3f' % timesd[tc.name][4]),
1736 'end': ('%0.3f' % timesd[tc.name][4]),
1736 'diff': result.faildata.get(tc.name, ''),
1737 'diff': result.faildata.get(tc.name,
1738 ''),
1737 }
1739 }
1740 else:
1741 # blacklisted test
1742 tres = {'result': res}
1743
1738 outcome[tc.name] = tres
1744 outcome[tc.name] = tres
1739 jsonout = json.dumps(outcome, sort_keys=True, indent=4)
1745 jsonout = json.dumps(outcome, sort_keys=True, indent=4)
1740 fp.writelines(("testreport =", jsonout))
1746 fp.writelines(("testreport =", jsonout))
1741
1747
1742 self._runner._checkhglib('Tested')
1748 self._runner._checkhglib('Tested')
1743
1749
1744 savetimes(self._runner._testdir, result)
1750 savetimes(self._runner._testdir, result)
1745 self.stream.writeln(
1751 self.stream.writeln(
1746 '# Ran %d tests, %d skipped, %d warned, %d failed.'
1752 '# Ran %d tests, %d skipped, %d warned, %d failed.'
1747 % (result.testsRun,
1753 % (result.testsRun,
1748 skipped + ignored, warned, failed))
1754 skipped + ignored, warned, failed))
1749 if failed:
1755 if failed:
1750 self.stream.writeln('python hash seed: %s' %
1756 self.stream.writeln('python hash seed: %s' %
1751 os.environ['PYTHONHASHSEED'])
1757 os.environ['PYTHONHASHSEED'])
1752 if self._runner.options.time:
1758 if self._runner.options.time:
1753 self.printtimes(result.times)
1759 self.printtimes(result.times)
1754
1760
1755 return result
1761 return result
1756
1762
1757 def printtimes(self, times):
1763 def printtimes(self, times):
1758 # iolock held by run
1764 # iolock held by run
1759 self.stream.writeln('# Producing time report')
1765 self.stream.writeln('# Producing time report')
1760 times.sort(key=lambda t: (t[3]))
1766 times.sort(key=lambda t: (t[3]))
1761 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
1767 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
1762 self.stream.writeln('%-7s %-7s %-7s %-7s %-7s %s' %
1768 self.stream.writeln('%-7s %-7s %-7s %-7s %-7s %s' %
1763 ('start', 'end', 'cuser', 'csys', 'real', 'Test'))
1769 ('start', 'end', 'cuser', 'csys', 'real', 'Test'))
1764 for tdata in times:
1770 for tdata in times:
1765 test = tdata[0]
1771 test = tdata[0]
1766 cuser, csys, real, start, end = tdata[1:6]
1772 cuser, csys, real, start, end = tdata[1:6]
1767 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
1773 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
1768
1774
1769 class TestRunner(object):
1775 class TestRunner(object):
1770 """Holds context for executing tests.
1776 """Holds context for executing tests.
1771
1777
1772 Tests rely on a lot of state. This object holds it for them.
1778 Tests rely on a lot of state. This object holds it for them.
1773 """
1779 """
1774
1780
1775 # Programs required to run tests.
1781 # Programs required to run tests.
1776 REQUIREDTOOLS = [
1782 REQUIREDTOOLS = [
1777 os.path.basename(_bytespath(sys.executable)),
1783 os.path.basename(_bytespath(sys.executable)),
1778 b'diff',
1784 b'diff',
1779 b'grep',
1785 b'grep',
1780 b'unzip',
1786 b'unzip',
1781 b'gunzip',
1787 b'gunzip',
1782 b'bunzip2',
1788 b'bunzip2',
1783 b'sed',
1789 b'sed',
1784 ]
1790 ]
1785
1791
1786 # Maps file extensions to test class.
1792 # Maps file extensions to test class.
1787 TESTTYPES = [
1793 TESTTYPES = [
1788 (b'.py', PythonTest),
1794 (b'.py', PythonTest),
1789 (b'.t', TTest),
1795 (b'.t', TTest),
1790 ]
1796 ]
1791
1797
1792 def __init__(self):
1798 def __init__(self):
1793 self.options = None
1799 self.options = None
1794 self._hgroot = None
1800 self._hgroot = None
1795 self._testdir = None
1801 self._testdir = None
1796 self._hgtmp = None
1802 self._hgtmp = None
1797 self._installdir = None
1803 self._installdir = None
1798 self._bindir = None
1804 self._bindir = None
1799 self._tmpbinddir = None
1805 self._tmpbinddir = None
1800 self._pythondir = None
1806 self._pythondir = None
1801 self._coveragefile = None
1807 self._coveragefile = None
1802 self._createdfiles = []
1808 self._createdfiles = []
1803 self._hgpath = None
1809 self._hgpath = None
1804 self._portoffset = 0
1810 self._portoffset = 0
1805 self._ports = {}
1811 self._ports = {}
1806
1812
1807 def run(self, args, parser=None):
1813 def run(self, args, parser=None):
1808 """Run the test suite."""
1814 """Run the test suite."""
1809 oldmask = os.umask(0o22)
1815 oldmask = os.umask(0o22)
1810 try:
1816 try:
1811 parser = parser or getparser()
1817 parser = parser or getparser()
1812 options, args = parseargs(args, parser)
1818 options, args = parseargs(args, parser)
1813 # positional arguments are paths to test files to run, so
1819 # positional arguments are paths to test files to run, so
1814 # we make sure they're all bytestrings
1820 # we make sure they're all bytestrings
1815 args = [_bytespath(a) for a in args]
1821 args = [_bytespath(a) for a in args]
1816 self.options = options
1822 self.options = options
1817
1823
1818 self._checktools()
1824 self._checktools()
1819 tests = self.findtests(args)
1825 tests = self.findtests(args)
1820 if options.profile_runner:
1826 if options.profile_runner:
1821 import statprof
1827 import statprof
1822 statprof.start()
1828 statprof.start()
1823 result = self._run(tests)
1829 result = self._run(tests)
1824 if options.profile_runner:
1830 if options.profile_runner:
1825 statprof.stop()
1831 statprof.stop()
1826 statprof.display()
1832 statprof.display()
1827 return result
1833 return result
1828
1834
1829 finally:
1835 finally:
1830 os.umask(oldmask)
1836 os.umask(oldmask)
1831
1837
1832 def _run(self, tests):
1838 def _run(self, tests):
1833 if self.options.random:
1839 if self.options.random:
1834 random.shuffle(tests)
1840 random.shuffle(tests)
1835 else:
1841 else:
1836 # keywords for slow tests
1842 # keywords for slow tests
1837 slow = {b'svn': 10,
1843 slow = {b'svn': 10,
1838 b'cvs': 10,
1844 b'cvs': 10,
1839 b'hghave': 10,
1845 b'hghave': 10,
1840 b'largefiles-update': 10,
1846 b'largefiles-update': 10,
1841 b'run-tests': 10,
1847 b'run-tests': 10,
1842 b'corruption': 10,
1848 b'corruption': 10,
1843 b'race': 10,
1849 b'race': 10,
1844 b'i18n': 10,
1850 b'i18n': 10,
1845 b'check': 100,
1851 b'check': 100,
1846 b'gendoc': 100,
1852 b'gendoc': 100,
1847 b'contrib-perf': 200,
1853 b'contrib-perf': 200,
1848 }
1854 }
1849 perf = {}
1855 perf = {}
1850 def sortkey(f):
1856 def sortkey(f):
1851 # run largest tests first, as they tend to take the longest
1857 # run largest tests first, as they tend to take the longest
1852 try:
1858 try:
1853 return perf[f]
1859 return perf[f]
1854 except KeyError:
1860 except KeyError:
1855 try:
1861 try:
1856 val = -os.stat(f).st_size
1862 val = -os.stat(f).st_size
1857 except OSError as e:
1863 except OSError as e:
1858 if e.errno != errno.ENOENT:
1864 if e.errno != errno.ENOENT:
1859 raise
1865 raise
1860 perf[f] = -1e9 # file does not exist, tell early
1866 perf[f] = -1e9 # file does not exist, tell early
1861 return -1e9
1867 return -1e9
1862 for kw, mul in slow.items():
1868 for kw, mul in slow.items():
1863 if kw in f:
1869 if kw in f:
1864 val *= mul
1870 val *= mul
1865 if f.endswith('.py'):
1871 if f.endswith('.py'):
1866 val /= 10.0
1872 val /= 10.0
1867 perf[f] = val / 1000.0
1873 perf[f] = val / 1000.0
1868 return perf[f]
1874 return perf[f]
1869 tests.sort(key=sortkey)
1875 tests.sort(key=sortkey)
1870
1876
1871 self._testdir = osenvironb[b'TESTDIR'] = getattr(
1877 self._testdir = osenvironb[b'TESTDIR'] = getattr(
1872 os, 'getcwdb', os.getcwd)()
1878 os, 'getcwdb', os.getcwd)()
1873
1879
1874 if 'PYTHONHASHSEED' not in os.environ:
1880 if 'PYTHONHASHSEED' not in os.environ:
1875 # use a random python hash seed all the time
1881 # use a random python hash seed all the time
1876 # we do the randomness ourself to know what seed is used
1882 # we do the randomness ourself to know what seed is used
1877 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
1883 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
1878
1884
1879 if self.options.tmpdir:
1885 if self.options.tmpdir:
1880 self.options.keep_tmpdir = True
1886 self.options.keep_tmpdir = True
1881 tmpdir = _bytespath(self.options.tmpdir)
1887 tmpdir = _bytespath(self.options.tmpdir)
1882 if os.path.exists(tmpdir):
1888 if os.path.exists(tmpdir):
1883 # Meaning of tmpdir has changed since 1.3: we used to create
1889 # Meaning of tmpdir has changed since 1.3: we used to create
1884 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
1890 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
1885 # tmpdir already exists.
1891 # tmpdir already exists.
1886 print("error: temp dir %r already exists" % tmpdir)
1892 print("error: temp dir %r already exists" % tmpdir)
1887 return 1
1893 return 1
1888
1894
1889 # Automatically removing tmpdir sounds convenient, but could
1895 # Automatically removing tmpdir sounds convenient, but could
1890 # really annoy anyone in the habit of using "--tmpdir=/tmp"
1896 # really annoy anyone in the habit of using "--tmpdir=/tmp"
1891 # or "--tmpdir=$HOME".
1897 # or "--tmpdir=$HOME".
1892 #vlog("# Removing temp dir", tmpdir)
1898 #vlog("# Removing temp dir", tmpdir)
1893 #shutil.rmtree(tmpdir)
1899 #shutil.rmtree(tmpdir)
1894 os.makedirs(tmpdir)
1900 os.makedirs(tmpdir)
1895 else:
1901 else:
1896 d = None
1902 d = None
1897 if os.name == 'nt':
1903 if os.name == 'nt':
1898 # without this, we get the default temp dir location, but
1904 # without this, we get the default temp dir location, but
1899 # in all lowercase, which causes troubles with paths (issue3490)
1905 # in all lowercase, which causes troubles with paths (issue3490)
1900 d = osenvironb.get(b'TMP', None)
1906 d = osenvironb.get(b'TMP', None)
1901 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
1907 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
1902
1908
1903 self._hgtmp = osenvironb[b'HGTMP'] = (
1909 self._hgtmp = osenvironb[b'HGTMP'] = (
1904 os.path.realpath(tmpdir))
1910 os.path.realpath(tmpdir))
1905
1911
1906 if self.options.with_hg:
1912 if self.options.with_hg:
1907 self._installdir = None
1913 self._installdir = None
1908 whg = self.options.with_hg
1914 whg = self.options.with_hg
1909 # If --with-hg is not specified, we have bytes already,
1915 # If --with-hg is not specified, we have bytes already,
1910 # but if it was specified in python3 we get a str, so we
1916 # but if it was specified in python3 we get a str, so we
1911 # have to encode it back into a bytes.
1917 # have to encode it back into a bytes.
1912 if PYTHON3:
1918 if PYTHON3:
1913 if not isinstance(whg, bytes):
1919 if not isinstance(whg, bytes):
1914 whg = _bytespath(whg)
1920 whg = _bytespath(whg)
1915 self._bindir = os.path.dirname(os.path.realpath(whg))
1921 self._bindir = os.path.dirname(os.path.realpath(whg))
1916 assert isinstance(self._bindir, bytes)
1922 assert isinstance(self._bindir, bytes)
1917 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
1923 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
1918 os.makedirs(self._tmpbindir)
1924 os.makedirs(self._tmpbindir)
1919
1925
1920 # This looks redundant with how Python initializes sys.path from
1926 # This looks redundant with how Python initializes sys.path from
1921 # the location of the script being executed. Needed because the
1927 # the location of the script being executed. Needed because the
1922 # "hg" specified by --with-hg is not the only Python script
1928 # "hg" specified by --with-hg is not the only Python script
1923 # executed in the test suite that needs to import 'mercurial'
1929 # executed in the test suite that needs to import 'mercurial'
1924 # ... which means it's not really redundant at all.
1930 # ... which means it's not really redundant at all.
1925 self._pythondir = self._bindir
1931 self._pythondir = self._bindir
1926 else:
1932 else:
1927 self._installdir = os.path.join(self._hgtmp, b"install")
1933 self._installdir = os.path.join(self._hgtmp, b"install")
1928 self._bindir = osenvironb[b"BINDIR"] = \
1934 self._bindir = osenvironb[b"BINDIR"] = \
1929 os.path.join(self._installdir, b"bin")
1935 os.path.join(self._installdir, b"bin")
1930 self._tmpbindir = self._bindir
1936 self._tmpbindir = self._bindir
1931 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
1937 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
1932
1938
1933 osenvironb[b"BINDIR"] = self._bindir
1939 osenvironb[b"BINDIR"] = self._bindir
1934 osenvironb[b"PYTHON"] = PYTHON
1940 osenvironb[b"PYTHON"] = PYTHON
1935
1941
1936 fileb = _bytespath(__file__)
1942 fileb = _bytespath(__file__)
1937 runtestdir = os.path.abspath(os.path.dirname(fileb))
1943 runtestdir = os.path.abspath(os.path.dirname(fileb))
1938 osenvironb[b'RUNTESTDIR'] = runtestdir
1944 osenvironb[b'RUNTESTDIR'] = runtestdir
1939 if PYTHON3:
1945 if PYTHON3:
1940 sepb = _bytespath(os.pathsep)
1946 sepb = _bytespath(os.pathsep)
1941 else:
1947 else:
1942 sepb = os.pathsep
1948 sepb = os.pathsep
1943 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
1949 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
1944 if os.path.islink(__file__):
1950 if os.path.islink(__file__):
1945 # test helper will likely be at the end of the symlink
1951 # test helper will likely be at the end of the symlink
1946 realfile = os.path.realpath(fileb)
1952 realfile = os.path.realpath(fileb)
1947 realdir = os.path.abspath(os.path.dirname(realfile))
1953 realdir = os.path.abspath(os.path.dirname(realfile))
1948 path.insert(2, realdir)
1954 path.insert(2, realdir)
1949 if self._testdir != runtestdir:
1955 if self._testdir != runtestdir:
1950 path = [self._testdir] + path
1956 path = [self._testdir] + path
1951 if self._tmpbindir != self._bindir:
1957 if self._tmpbindir != self._bindir:
1952 path = [self._tmpbindir] + path
1958 path = [self._tmpbindir] + path
1953 osenvironb[b"PATH"] = sepb.join(path)
1959 osenvironb[b"PATH"] = sepb.join(path)
1954
1960
1955 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
1961 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
1956 # can run .../tests/run-tests.py test-foo where test-foo
1962 # can run .../tests/run-tests.py test-foo where test-foo
1957 # adds an extension to HGRC. Also include run-test.py directory to
1963 # adds an extension to HGRC. Also include run-test.py directory to
1958 # import modules like heredoctest.
1964 # import modules like heredoctest.
1959 pypath = [self._pythondir, self._testdir, runtestdir]
1965 pypath = [self._pythondir, self._testdir, runtestdir]
1960 # We have to augment PYTHONPATH, rather than simply replacing
1966 # We have to augment PYTHONPATH, rather than simply replacing
1961 # it, in case external libraries are only available via current
1967 # it, in case external libraries are only available via current
1962 # PYTHONPATH. (In particular, the Subversion bindings on OS X
1968 # PYTHONPATH. (In particular, the Subversion bindings on OS X
1963 # are in /opt/subversion.)
1969 # are in /opt/subversion.)
1964 oldpypath = osenvironb.get(IMPL_PATH)
1970 oldpypath = osenvironb.get(IMPL_PATH)
1965 if oldpypath:
1971 if oldpypath:
1966 pypath.append(oldpypath)
1972 pypath.append(oldpypath)
1967 osenvironb[IMPL_PATH] = sepb.join(pypath)
1973 osenvironb[IMPL_PATH] = sepb.join(pypath)
1968
1974
1969 if self.options.pure:
1975 if self.options.pure:
1970 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
1976 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
1971
1977
1972 if self.options.allow_slow_tests:
1978 if self.options.allow_slow_tests:
1973 os.environ["HGTEST_SLOW"] = "slow"
1979 os.environ["HGTEST_SLOW"] = "slow"
1974 elif 'HGTEST_SLOW' in os.environ:
1980 elif 'HGTEST_SLOW' in os.environ:
1975 del os.environ['HGTEST_SLOW']
1981 del os.environ['HGTEST_SLOW']
1976
1982
1977 self._coveragefile = os.path.join(self._testdir, b'.coverage')
1983 self._coveragefile = os.path.join(self._testdir, b'.coverage')
1978
1984
1979 vlog("# Using TESTDIR", self._testdir)
1985 vlog("# Using TESTDIR", self._testdir)
1980 vlog("# Using RUNTESTDIR", osenvironb[b'RUNTESTDIR'])
1986 vlog("# Using RUNTESTDIR", osenvironb[b'RUNTESTDIR'])
1981 vlog("# Using HGTMP", self._hgtmp)
1987 vlog("# Using HGTMP", self._hgtmp)
1982 vlog("# Using PATH", os.environ["PATH"])
1988 vlog("# Using PATH", os.environ["PATH"])
1983 vlog("# Using", IMPL_PATH, osenvironb[IMPL_PATH])
1989 vlog("# Using", IMPL_PATH, osenvironb[IMPL_PATH])
1984
1990
1985 try:
1991 try:
1986 return self._runtests(tests) or 0
1992 return self._runtests(tests) or 0
1987 finally:
1993 finally:
1988 time.sleep(.1)
1994 time.sleep(.1)
1989 self._cleanup()
1995 self._cleanup()
1990
1996
1991 def findtests(self, args):
1997 def findtests(self, args):
1992 """Finds possible test files from arguments.
1998 """Finds possible test files from arguments.
1993
1999
1994 If you wish to inject custom tests into the test harness, this would
2000 If you wish to inject custom tests into the test harness, this would
1995 be a good function to monkeypatch or override in a derived class.
2001 be a good function to monkeypatch or override in a derived class.
1996 """
2002 """
1997 if not args:
2003 if not args:
1998 if self.options.changed:
2004 if self.options.changed:
1999 proc = Popen4('hg st --rev "%s" -man0 .' %
2005 proc = Popen4('hg st --rev "%s" -man0 .' %
2000 self.options.changed, None, 0)
2006 self.options.changed, None, 0)
2001 stdout, stderr = proc.communicate()
2007 stdout, stderr = proc.communicate()
2002 args = stdout.strip(b'\0').split(b'\0')
2008 args = stdout.strip(b'\0').split(b'\0')
2003 else:
2009 else:
2004 args = os.listdir(b'.')
2010 args = os.listdir(b'.')
2005
2011
2006 return [t for t in args
2012 return [t for t in args
2007 if os.path.basename(t).startswith(b'test-')
2013 if os.path.basename(t).startswith(b'test-')
2008 and (t.endswith(b'.py') or t.endswith(b'.t'))]
2014 and (t.endswith(b'.py') or t.endswith(b'.t'))]
2009
2015
2010 def _runtests(self, tests):
2016 def _runtests(self, tests):
2011 try:
2017 try:
2012 if self._installdir:
2018 if self._installdir:
2013 self._installhg()
2019 self._installhg()
2014 self._checkhglib("Testing")
2020 self._checkhglib("Testing")
2015 else:
2021 else:
2016 self._usecorrectpython()
2022 self._usecorrectpython()
2017
2023
2018 if self.options.restart:
2024 if self.options.restart:
2019 orig = list(tests)
2025 orig = list(tests)
2020 while tests:
2026 while tests:
2021 if os.path.exists(tests[0] + ".err"):
2027 if os.path.exists(tests[0] + ".err"):
2022 break
2028 break
2023 tests.pop(0)
2029 tests.pop(0)
2024 if not tests:
2030 if not tests:
2025 print("running all tests")
2031 print("running all tests")
2026 tests = orig
2032 tests = orig
2027
2033
2028 tests = [self._gettest(t, i) for i, t in enumerate(tests)]
2034 tests = [self._gettest(t, i) for i, t in enumerate(tests)]
2029
2035
2030 failed = False
2036 failed = False
2031 warned = False
2037 warned = False
2032 kws = self.options.keywords
2038 kws = self.options.keywords
2033 if kws is not None and PYTHON3:
2039 if kws is not None and PYTHON3:
2034 kws = kws.encode('utf-8')
2040 kws = kws.encode('utf-8')
2035
2041
2036 suite = TestSuite(self._testdir,
2042 suite = TestSuite(self._testdir,
2037 jobs=self.options.jobs,
2043 jobs=self.options.jobs,
2038 whitelist=self.options.whitelisted,
2044 whitelist=self.options.whitelisted,
2039 blacklist=self.options.blacklist,
2045 blacklist=self.options.blacklist,
2040 retest=self.options.retest,
2046 retest=self.options.retest,
2041 keywords=kws,
2047 keywords=kws,
2042 loop=self.options.loop,
2048 loop=self.options.loop,
2043 runs_per_test=self.options.runs_per_test,
2049 runs_per_test=self.options.runs_per_test,
2044 showchannels=self.options.showchannels,
2050 showchannels=self.options.showchannels,
2045 tests=tests, loadtest=self._gettest)
2051 tests=tests, loadtest=self._gettest)
2046 verbosity = 1
2052 verbosity = 1
2047 if self.options.verbose:
2053 if self.options.verbose:
2048 verbosity = 2
2054 verbosity = 2
2049 runner = TextTestRunner(self, verbosity=verbosity)
2055 runner = TextTestRunner(self, verbosity=verbosity)
2050 result = runner.run(suite)
2056 result = runner.run(suite)
2051
2057
2052 if result.failures:
2058 if result.failures:
2053 failed = True
2059 failed = True
2054 if result.warned:
2060 if result.warned:
2055 warned = True
2061 warned = True
2056
2062
2057 if self.options.anycoverage:
2063 if self.options.anycoverage:
2058 self._outputcoverage()
2064 self._outputcoverage()
2059 except KeyboardInterrupt:
2065 except KeyboardInterrupt:
2060 failed = True
2066 failed = True
2061 print("\ninterrupted!")
2067 print("\ninterrupted!")
2062
2068
2063 if failed:
2069 if failed:
2064 return 1
2070 return 1
2065 if warned:
2071 if warned:
2066 return 80
2072 return 80
2067
2073
2068 def _getport(self, count):
2074 def _getport(self, count):
2069 port = self._ports.get(count) # do we have a cached entry?
2075 port = self._ports.get(count) # do we have a cached entry?
2070 if port is None:
2076 if port is None:
2071 portneeded = 3
2077 portneeded = 3
2072 # above 100 tries we just give up and let test reports failure
2078 # above 100 tries we just give up and let test reports failure
2073 for tries in xrange(100):
2079 for tries in xrange(100):
2074 allfree = True
2080 allfree = True
2075 port = self.options.port + self._portoffset
2081 port = self.options.port + self._portoffset
2076 for idx in xrange(portneeded):
2082 for idx in xrange(portneeded):
2077 if not checkportisavailable(port + idx):
2083 if not checkportisavailable(port + idx):
2078 allfree = False
2084 allfree = False
2079 break
2085 break
2080 self._portoffset += portneeded
2086 self._portoffset += portneeded
2081 if allfree:
2087 if allfree:
2082 break
2088 break
2083 self._ports[count] = port
2089 self._ports[count] = port
2084 return port
2090 return port
2085
2091
2086 def _gettest(self, test, count):
2092 def _gettest(self, test, count):
2087 """Obtain a Test by looking at its filename.
2093 """Obtain a Test by looking at its filename.
2088
2094
2089 Returns a Test instance. The Test may not be runnable if it doesn't
2095 Returns a Test instance. The Test may not be runnable if it doesn't
2090 map to a known type.
2096 map to a known type.
2091 """
2097 """
2092 lctest = test.lower()
2098 lctest = test.lower()
2093 testcls = Test
2099 testcls = Test
2094
2100
2095 for ext, cls in self.TESTTYPES:
2101 for ext, cls in self.TESTTYPES:
2096 if lctest.endswith(ext):
2102 if lctest.endswith(ext):
2097 testcls = cls
2103 testcls = cls
2098 break
2104 break
2099
2105
2100 refpath = os.path.join(self._testdir, test)
2106 refpath = os.path.join(self._testdir, test)
2101 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
2107 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
2102
2108
2103 t = testcls(refpath, tmpdir,
2109 t = testcls(refpath, tmpdir,
2104 keeptmpdir=self.options.keep_tmpdir,
2110 keeptmpdir=self.options.keep_tmpdir,
2105 debug=self.options.debug,
2111 debug=self.options.debug,
2106 timeout=self.options.timeout,
2112 timeout=self.options.timeout,
2107 startport=self._getport(count),
2113 startport=self._getport(count),
2108 extraconfigopts=self.options.extra_config_opt,
2114 extraconfigopts=self.options.extra_config_opt,
2109 py3kwarnings=self.options.py3k_warnings,
2115 py3kwarnings=self.options.py3k_warnings,
2110 shell=self.options.shell)
2116 shell=self.options.shell)
2111 t.should_reload = True
2117 t.should_reload = True
2112 return t
2118 return t
2113
2119
2114 def _cleanup(self):
2120 def _cleanup(self):
2115 """Clean up state from this test invocation."""
2121 """Clean up state from this test invocation."""
2116
2122
2117 if self.options.keep_tmpdir:
2123 if self.options.keep_tmpdir:
2118 return
2124 return
2119
2125
2120 vlog("# Cleaning up HGTMP", self._hgtmp)
2126 vlog("# Cleaning up HGTMP", self._hgtmp)
2121 shutil.rmtree(self._hgtmp, True)
2127 shutil.rmtree(self._hgtmp, True)
2122 for f in self._createdfiles:
2128 for f in self._createdfiles:
2123 try:
2129 try:
2124 os.remove(f)
2130 os.remove(f)
2125 except OSError:
2131 except OSError:
2126 pass
2132 pass
2127
2133
2128 def _usecorrectpython(self):
2134 def _usecorrectpython(self):
2129 """Configure the environment to use the appropriate Python in tests."""
2135 """Configure the environment to use the appropriate Python in tests."""
2130 # Tests must use the same interpreter as us or bad things will happen.
2136 # Tests must use the same interpreter as us or bad things will happen.
2131 pyexename = sys.platform == 'win32' and b'python.exe' or b'python'
2137 pyexename = sys.platform == 'win32' and b'python.exe' or b'python'
2132 if getattr(os, 'symlink', None):
2138 if getattr(os, 'symlink', None):
2133 vlog("# Making python executable in test path a symlink to '%s'" %
2139 vlog("# Making python executable in test path a symlink to '%s'" %
2134 sys.executable)
2140 sys.executable)
2135 mypython = os.path.join(self._tmpbindir, pyexename)
2141 mypython = os.path.join(self._tmpbindir, pyexename)
2136 try:
2142 try:
2137 if os.readlink(mypython) == sys.executable:
2143 if os.readlink(mypython) == sys.executable:
2138 return
2144 return
2139 os.unlink(mypython)
2145 os.unlink(mypython)
2140 except OSError as err:
2146 except OSError as err:
2141 if err.errno != errno.ENOENT:
2147 if err.errno != errno.ENOENT:
2142 raise
2148 raise
2143 if self._findprogram(pyexename) != sys.executable:
2149 if self._findprogram(pyexename) != sys.executable:
2144 try:
2150 try:
2145 os.symlink(sys.executable, mypython)
2151 os.symlink(sys.executable, mypython)
2146 self._createdfiles.append(mypython)
2152 self._createdfiles.append(mypython)
2147 except OSError as err:
2153 except OSError as err:
2148 # child processes may race, which is harmless
2154 # child processes may race, which is harmless
2149 if err.errno != errno.EEXIST:
2155 if err.errno != errno.EEXIST:
2150 raise
2156 raise
2151 else:
2157 else:
2152 exedir, exename = os.path.split(sys.executable)
2158 exedir, exename = os.path.split(sys.executable)
2153 vlog("# Modifying search path to find %s as %s in '%s'" %
2159 vlog("# Modifying search path to find %s as %s in '%s'" %
2154 (exename, pyexename, exedir))
2160 (exename, pyexename, exedir))
2155 path = os.environ['PATH'].split(os.pathsep)
2161 path = os.environ['PATH'].split(os.pathsep)
2156 while exedir in path:
2162 while exedir in path:
2157 path.remove(exedir)
2163 path.remove(exedir)
2158 os.environ['PATH'] = os.pathsep.join([exedir] + path)
2164 os.environ['PATH'] = os.pathsep.join([exedir] + path)
2159 if not self._findprogram(pyexename):
2165 if not self._findprogram(pyexename):
2160 print("WARNING: Cannot find %s in search path" % pyexename)
2166 print("WARNING: Cannot find %s in search path" % pyexename)
2161
2167
2162 def _installhg(self):
2168 def _installhg(self):
2163 """Install hg into the test environment.
2169 """Install hg into the test environment.
2164
2170
2165 This will also configure hg with the appropriate testing settings.
2171 This will also configure hg with the appropriate testing settings.
2166 """
2172 """
2167 vlog("# Performing temporary installation of HG")
2173 vlog("# Performing temporary installation of HG")
2168 installerrs = os.path.join(b"tests", b"install.err")
2174 installerrs = os.path.join(b"tests", b"install.err")
2169 compiler = ''
2175 compiler = ''
2170 if self.options.compiler:
2176 if self.options.compiler:
2171 compiler = '--compiler ' + self.options.compiler
2177 compiler = '--compiler ' + self.options.compiler
2172 if self.options.pure:
2178 if self.options.pure:
2173 pure = b"--pure"
2179 pure = b"--pure"
2174 else:
2180 else:
2175 pure = b""
2181 pure = b""
2176 py3 = ''
2182 py3 = ''
2177
2183
2178 # Run installer in hg root
2184 # Run installer in hg root
2179 script = os.path.realpath(sys.argv[0])
2185 script = os.path.realpath(sys.argv[0])
2180 exe = sys.executable
2186 exe = sys.executable
2181 if PYTHON3:
2187 if PYTHON3:
2182 py3 = b'--c2to3'
2188 py3 = b'--c2to3'
2183 compiler = _bytespath(compiler)
2189 compiler = _bytespath(compiler)
2184 script = _bytespath(script)
2190 script = _bytespath(script)
2185 exe = _bytespath(exe)
2191 exe = _bytespath(exe)
2186 hgroot = os.path.dirname(os.path.dirname(script))
2192 hgroot = os.path.dirname(os.path.dirname(script))
2187 self._hgroot = hgroot
2193 self._hgroot = hgroot
2188 os.chdir(hgroot)
2194 os.chdir(hgroot)
2189 nohome = b'--home=""'
2195 nohome = b'--home=""'
2190 if os.name == 'nt':
2196 if os.name == 'nt':
2191 # The --home="" trick works only on OS where os.sep == '/'
2197 # The --home="" trick works only on OS where os.sep == '/'
2192 # because of a distutils convert_path() fast-path. Avoid it at
2198 # because of a distutils convert_path() fast-path. Avoid it at
2193 # least on Windows for now, deal with .pydistutils.cfg bugs
2199 # least on Windows for now, deal with .pydistutils.cfg bugs
2194 # when they happen.
2200 # when they happen.
2195 nohome = b''
2201 nohome = b''
2196 cmd = (b'%(exe)s setup.py %(py3)s %(pure)s clean --all'
2202 cmd = (b'%(exe)s setup.py %(py3)s %(pure)s clean --all'
2197 b' build %(compiler)s --build-base="%(base)s"'
2203 b' build %(compiler)s --build-base="%(base)s"'
2198 b' install --force --prefix="%(prefix)s"'
2204 b' install --force --prefix="%(prefix)s"'
2199 b' --install-lib="%(libdir)s"'
2205 b' --install-lib="%(libdir)s"'
2200 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
2206 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
2201 % {b'exe': exe, b'py3': py3, b'pure': pure,
2207 % {b'exe': exe, b'py3': py3, b'pure': pure,
2202 b'compiler': compiler,
2208 b'compiler': compiler,
2203 b'base': os.path.join(self._hgtmp, b"build"),
2209 b'base': os.path.join(self._hgtmp, b"build"),
2204 b'prefix': self._installdir, b'libdir': self._pythondir,
2210 b'prefix': self._installdir, b'libdir': self._pythondir,
2205 b'bindir': self._bindir,
2211 b'bindir': self._bindir,
2206 b'nohome': nohome, b'logfile': installerrs})
2212 b'nohome': nohome, b'logfile': installerrs})
2207
2213
2208 # setuptools requires install directories to exist.
2214 # setuptools requires install directories to exist.
2209 def makedirs(p):
2215 def makedirs(p):
2210 try:
2216 try:
2211 os.makedirs(p)
2217 os.makedirs(p)
2212 except OSError as e:
2218 except OSError as e:
2213 if e.errno != errno.EEXIST:
2219 if e.errno != errno.EEXIST:
2214 raise
2220 raise
2215 makedirs(self._pythondir)
2221 makedirs(self._pythondir)
2216 makedirs(self._bindir)
2222 makedirs(self._bindir)
2217
2223
2218 vlog("# Running", cmd)
2224 vlog("# Running", cmd)
2219 if os.system(cmd) == 0:
2225 if os.system(cmd) == 0:
2220 if not self.options.verbose:
2226 if not self.options.verbose:
2221 try:
2227 try:
2222 os.remove(installerrs)
2228 os.remove(installerrs)
2223 except OSError as e:
2229 except OSError as e:
2224 if e.errno != errno.ENOENT:
2230 if e.errno != errno.ENOENT:
2225 raise
2231 raise
2226 else:
2232 else:
2227 f = open(installerrs, 'rb')
2233 f = open(installerrs, 'rb')
2228 for line in f:
2234 for line in f:
2229 if PYTHON3:
2235 if PYTHON3:
2230 sys.stdout.buffer.write(line)
2236 sys.stdout.buffer.write(line)
2231 else:
2237 else:
2232 sys.stdout.write(line)
2238 sys.stdout.write(line)
2233 f.close()
2239 f.close()
2234 sys.exit(1)
2240 sys.exit(1)
2235 os.chdir(self._testdir)
2241 os.chdir(self._testdir)
2236
2242
2237 self._usecorrectpython()
2243 self._usecorrectpython()
2238
2244
2239 if self.options.py3k_warnings and not self.options.anycoverage:
2245 if self.options.py3k_warnings and not self.options.anycoverage:
2240 vlog("# Updating hg command to enable Py3k Warnings switch")
2246 vlog("# Updating hg command to enable Py3k Warnings switch")
2241 f = open(os.path.join(self._bindir, 'hg'), 'rb')
2247 f = open(os.path.join(self._bindir, 'hg'), 'rb')
2242 lines = [line.rstrip() for line in f]
2248 lines = [line.rstrip() for line in f]
2243 lines[0] += ' -3'
2249 lines[0] += ' -3'
2244 f.close()
2250 f.close()
2245 f = open(os.path.join(self._bindir, 'hg'), 'wb')
2251 f = open(os.path.join(self._bindir, 'hg'), 'wb')
2246 for line in lines:
2252 for line in lines:
2247 f.write(line + '\n')
2253 f.write(line + '\n')
2248 f.close()
2254 f.close()
2249
2255
2250 hgbat = os.path.join(self._bindir, b'hg.bat')
2256 hgbat = os.path.join(self._bindir, b'hg.bat')
2251 if os.path.isfile(hgbat):
2257 if os.path.isfile(hgbat):
2252 # hg.bat expects to be put in bin/scripts while run-tests.py
2258 # hg.bat expects to be put in bin/scripts while run-tests.py
2253 # installation layout put it in bin/ directly. Fix it
2259 # installation layout put it in bin/ directly. Fix it
2254 f = open(hgbat, 'rb')
2260 f = open(hgbat, 'rb')
2255 data = f.read()
2261 data = f.read()
2256 f.close()
2262 f.close()
2257 if b'"%~dp0..\python" "%~dp0hg" %*' in data:
2263 if b'"%~dp0..\python" "%~dp0hg" %*' in data:
2258 data = data.replace(b'"%~dp0..\python" "%~dp0hg" %*',
2264 data = data.replace(b'"%~dp0..\python" "%~dp0hg" %*',
2259 b'"%~dp0python" "%~dp0hg" %*')
2265 b'"%~dp0python" "%~dp0hg" %*')
2260 f = open(hgbat, 'wb')
2266 f = open(hgbat, 'wb')
2261 f.write(data)
2267 f.write(data)
2262 f.close()
2268 f.close()
2263 else:
2269 else:
2264 print('WARNING: cannot fix hg.bat reference to python.exe')
2270 print('WARNING: cannot fix hg.bat reference to python.exe')
2265
2271
2266 if self.options.anycoverage:
2272 if self.options.anycoverage:
2267 custom = os.path.join(self._testdir, 'sitecustomize.py')
2273 custom = os.path.join(self._testdir, 'sitecustomize.py')
2268 target = os.path.join(self._pythondir, 'sitecustomize.py')
2274 target = os.path.join(self._pythondir, 'sitecustomize.py')
2269 vlog('# Installing coverage trigger to %s' % target)
2275 vlog('# Installing coverage trigger to %s' % target)
2270 shutil.copyfile(custom, target)
2276 shutil.copyfile(custom, target)
2271 rc = os.path.join(self._testdir, '.coveragerc')
2277 rc = os.path.join(self._testdir, '.coveragerc')
2272 vlog('# Installing coverage rc to %s' % rc)
2278 vlog('# Installing coverage rc to %s' % rc)
2273 os.environ['COVERAGE_PROCESS_START'] = rc
2279 os.environ['COVERAGE_PROCESS_START'] = rc
2274 covdir = os.path.join(self._installdir, '..', 'coverage')
2280 covdir = os.path.join(self._installdir, '..', 'coverage')
2275 try:
2281 try:
2276 os.mkdir(covdir)
2282 os.mkdir(covdir)
2277 except OSError as e:
2283 except OSError as e:
2278 if e.errno != errno.EEXIST:
2284 if e.errno != errno.EEXIST:
2279 raise
2285 raise
2280
2286
2281 os.environ['COVERAGE_DIR'] = covdir
2287 os.environ['COVERAGE_DIR'] = covdir
2282
2288
2283 def _checkhglib(self, verb):
2289 def _checkhglib(self, verb):
2284 """Ensure that the 'mercurial' package imported by python is
2290 """Ensure that the 'mercurial' package imported by python is
2285 the one we expect it to be. If not, print a warning to stderr."""
2291 the one we expect it to be. If not, print a warning to stderr."""
2286 if ((self._bindir == self._pythondir) and
2292 if ((self._bindir == self._pythondir) and
2287 (self._bindir != self._tmpbindir)):
2293 (self._bindir != self._tmpbindir)):
2288 # The pythondir has been inferred from --with-hg flag.
2294 # The pythondir has been inferred from --with-hg flag.
2289 # We cannot expect anything sensible here.
2295 # We cannot expect anything sensible here.
2290 return
2296 return
2291 expecthg = os.path.join(self._pythondir, b'mercurial')
2297 expecthg = os.path.join(self._pythondir, b'mercurial')
2292 actualhg = self._gethgpath()
2298 actualhg = self._gethgpath()
2293 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
2299 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
2294 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
2300 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
2295 ' (expected %s)\n'
2301 ' (expected %s)\n'
2296 % (verb, actualhg, expecthg))
2302 % (verb, actualhg, expecthg))
2297 def _gethgpath(self):
2303 def _gethgpath(self):
2298 """Return the path to the mercurial package that is actually found by
2304 """Return the path to the mercurial package that is actually found by
2299 the current Python interpreter."""
2305 the current Python interpreter."""
2300 if self._hgpath is not None:
2306 if self._hgpath is not None:
2301 return self._hgpath
2307 return self._hgpath
2302
2308
2303 cmd = b'%s -c "import mercurial; print (mercurial.__path__[0])"'
2309 cmd = b'%s -c "import mercurial; print (mercurial.__path__[0])"'
2304 cmd = cmd % PYTHON
2310 cmd = cmd % PYTHON
2305 if PYTHON3:
2311 if PYTHON3:
2306 cmd = _strpath(cmd)
2312 cmd = _strpath(cmd)
2307 pipe = os.popen(cmd)
2313 pipe = os.popen(cmd)
2308 try:
2314 try:
2309 self._hgpath = _bytespath(pipe.read().strip())
2315 self._hgpath = _bytespath(pipe.read().strip())
2310 finally:
2316 finally:
2311 pipe.close()
2317 pipe.close()
2312
2318
2313 return self._hgpath
2319 return self._hgpath
2314
2320
2315 def _outputcoverage(self):
2321 def _outputcoverage(self):
2316 """Produce code coverage output."""
2322 """Produce code coverage output."""
2317 from coverage import coverage
2323 from coverage import coverage
2318
2324
2319 vlog('# Producing coverage report')
2325 vlog('# Producing coverage report')
2320 # chdir is the easiest way to get short, relative paths in the
2326 # chdir is the easiest way to get short, relative paths in the
2321 # output.
2327 # output.
2322 os.chdir(self._hgroot)
2328 os.chdir(self._hgroot)
2323 covdir = os.path.join(self._installdir, '..', 'coverage')
2329 covdir = os.path.join(self._installdir, '..', 'coverage')
2324 cov = coverage(data_file=os.path.join(covdir, 'cov'))
2330 cov = coverage(data_file=os.path.join(covdir, 'cov'))
2325
2331
2326 # Map install directory paths back to source directory.
2332 # Map install directory paths back to source directory.
2327 cov.config.paths['srcdir'] = ['.', self._pythondir]
2333 cov.config.paths['srcdir'] = ['.', self._pythondir]
2328
2334
2329 cov.combine()
2335 cov.combine()
2330
2336
2331 omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]]
2337 omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]]
2332 cov.report(ignore_errors=True, omit=omit)
2338 cov.report(ignore_errors=True, omit=omit)
2333
2339
2334 if self.options.htmlcov:
2340 if self.options.htmlcov:
2335 htmldir = os.path.join(self._testdir, 'htmlcov')
2341 htmldir = os.path.join(self._testdir, 'htmlcov')
2336 cov.html_report(directory=htmldir, omit=omit)
2342 cov.html_report(directory=htmldir, omit=omit)
2337 if self.options.annotate:
2343 if self.options.annotate:
2338 adir = os.path.join(self._testdir, 'annotated')
2344 adir = os.path.join(self._testdir, 'annotated')
2339 if not os.path.isdir(adir):
2345 if not os.path.isdir(adir):
2340 os.mkdir(adir)
2346 os.mkdir(adir)
2341 cov.annotate(directory=adir, omit=omit)
2347 cov.annotate(directory=adir, omit=omit)
2342
2348
2343 def _findprogram(self, program):
2349 def _findprogram(self, program):
2344 """Search PATH for a executable program"""
2350 """Search PATH for a executable program"""
2345 dpb = _bytespath(os.defpath)
2351 dpb = _bytespath(os.defpath)
2346 sepb = _bytespath(os.pathsep)
2352 sepb = _bytespath(os.pathsep)
2347 for p in osenvironb.get(b'PATH', dpb).split(sepb):
2353 for p in osenvironb.get(b'PATH', dpb).split(sepb):
2348 name = os.path.join(p, program)
2354 name = os.path.join(p, program)
2349 if os.name == 'nt' or os.access(name, os.X_OK):
2355 if os.name == 'nt' or os.access(name, os.X_OK):
2350 return name
2356 return name
2351 return None
2357 return None
2352
2358
2353 def _checktools(self):
2359 def _checktools(self):
2354 """Ensure tools required to run tests are present."""
2360 """Ensure tools required to run tests are present."""
2355 for p in self.REQUIREDTOOLS:
2361 for p in self.REQUIREDTOOLS:
2356 if os.name == 'nt' and not p.endswith('.exe'):
2362 if os.name == 'nt' and not p.endswith('.exe'):
2357 p += '.exe'
2363 p += '.exe'
2358 found = self._findprogram(p)
2364 found = self._findprogram(p)
2359 if found:
2365 if found:
2360 vlog("# Found prerequisite", p, "at", found)
2366 vlog("# Found prerequisite", p, "at", found)
2361 else:
2367 else:
2362 print("WARNING: Did not find prerequisite tool: %s " % p)
2368 print("WARNING: Did not find prerequisite tool: %s " % p)
2363
2369
2364 if __name__ == '__main__':
2370 if __name__ == '__main__':
2365 runner = TestRunner()
2371 runner = TestRunner()
2366
2372
2367 try:
2373 try:
2368 import msvcrt
2374 import msvcrt
2369 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
2375 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
2370 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
2376 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
2371 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
2377 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
2372 except ImportError:
2378 except ImportError:
2373 pass
2379 pass
2374
2380
2375 sys.exit(runner.run(sys.argv[1:]))
2381 sys.exit(runner.run(sys.argv[1:]))
@@ -1,695 +1,703 b''
1 This file tests the behavior of run-tests.py itself.
1 This file tests the behavior of run-tests.py itself.
2
2
3 Avoid interference from actual test env:
3 Avoid interference from actual test env:
4
4
5 $ unset HGTEST_JOBS
5 $ unset HGTEST_JOBS
6 $ unset HGTEST_TIMEOUT
6 $ unset HGTEST_TIMEOUT
7 $ unset HGTEST_PORT
7 $ unset HGTEST_PORT
8 $ unset HGTEST_SHELL
8 $ unset HGTEST_SHELL
9
9
10 Smoke test with install
10 Smoke test with install
11 ============
11 ============
12
12
13 $ run-tests.py $HGTEST_RUN_TESTS_PURE -l
13 $ run-tests.py $HGTEST_RUN_TESTS_PURE -l
14
14
15 # Ran 0 tests, 0 skipped, 0 warned, 0 failed.
15 # Ran 0 tests, 0 skipped, 0 warned, 0 failed.
16
16
17 Define a helper to avoid the install step
17 Define a helper to avoid the install step
18 =============
18 =============
19 $ rt()
19 $ rt()
20 > {
20 > {
21 > run-tests.py --with-hg=`which hg` "$@"
21 > run-tests.py --with-hg=`which hg` "$@"
22 > }
22 > }
23
23
24 a succesful test
24 a succesful test
25 =======================
25 =======================
26
26
27 $ cat > test-success.t << EOF
27 $ cat > test-success.t << EOF
28 > $ echo babar
28 > $ echo babar
29 > babar
29 > babar
30 > $ echo xyzzy
30 > $ echo xyzzy
31 > never happens (?)
31 > never happens (?)
32 > xyzzy
32 > xyzzy
33 > nor this (?)
33 > nor this (?)
34 > EOF
34 > EOF
35
35
36 $ rt
36 $ rt
37 .
37 .
38 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
38 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
39
39
40 failing test
40 failing test
41 ==================
41 ==================
42
42
43 $ cat > test-failure.t << EOF
43 $ cat > test-failure.t << EOF
44 > $ echo babar
44 > $ echo babar
45 > rataxes
45 > rataxes
46 > This is a noop statement so that
46 > This is a noop statement so that
47 > this test is still more bytes than success.
47 > this test is still more bytes than success.
48 > EOF
48 > EOF
49
49
50 >>> fh = open('test-failure-unicode.t', 'wb')
50 >>> fh = open('test-failure-unicode.t', 'wb')
51 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8')) and None
51 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8')) and None
52 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8')) and None
52 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8')) and None
53
53
54 $ rt
54 $ rt
55
55
56 --- $TESTTMP/test-failure.t
56 --- $TESTTMP/test-failure.t
57 +++ $TESTTMP/test-failure.t.err
57 +++ $TESTTMP/test-failure.t.err
58 @@ -1,4 +1,4 @@
58 @@ -1,4 +1,4 @@
59 $ echo babar
59 $ echo babar
60 - rataxes
60 - rataxes
61 + babar
61 + babar
62 This is a noop statement so that
62 This is a noop statement so that
63 this test is still more bytes than success.
63 this test is still more bytes than success.
64
64
65 ERROR: test-failure.t output changed
65 ERROR: test-failure.t output changed
66 !.
66 !.
67 --- $TESTTMP/test-failure-unicode.t
67 --- $TESTTMP/test-failure-unicode.t
68 +++ $TESTTMP/test-failure-unicode.t.err
68 +++ $TESTTMP/test-failure-unicode.t.err
69 @@ -1,2 +1,2 @@
69 @@ -1,2 +1,2 @@
70 $ echo babar\xce\xb1 (esc)
70 $ echo babar\xce\xb1 (esc)
71 - l\xce\xb5\xce\xb5t (esc)
71 - l\xce\xb5\xce\xb5t (esc)
72 + babar\xce\xb1 (esc)
72 + babar\xce\xb1 (esc)
73
73
74 ERROR: test-failure-unicode.t output changed
74 ERROR: test-failure-unicode.t output changed
75 !
75 !
76 Failed test-failure.t: output changed
76 Failed test-failure.t: output changed
77 Failed test-failure-unicode.t: output changed
77 Failed test-failure-unicode.t: output changed
78 # Ran 3 tests, 0 skipped, 0 warned, 2 failed.
78 # Ran 3 tests, 0 skipped, 0 warned, 2 failed.
79 python hash seed: * (glob)
79 python hash seed: * (glob)
80 [1]
80 [1]
81
81
82 test --xunit support
82 test --xunit support
83 $ rt --xunit=xunit.xml
83 $ rt --xunit=xunit.xml
84
84
85 --- $TESTTMP/test-failure.t
85 --- $TESTTMP/test-failure.t
86 +++ $TESTTMP/test-failure.t.err
86 +++ $TESTTMP/test-failure.t.err
87 @@ -1,4 +1,4 @@
87 @@ -1,4 +1,4 @@
88 $ echo babar
88 $ echo babar
89 - rataxes
89 - rataxes
90 + babar
90 + babar
91 This is a noop statement so that
91 This is a noop statement so that
92 this test is still more bytes than success.
92 this test is still more bytes than success.
93
93
94 ERROR: test-failure.t output changed
94 ERROR: test-failure.t output changed
95 !.
95 !.
96 --- $TESTTMP/test-failure-unicode.t
96 --- $TESTTMP/test-failure-unicode.t
97 +++ $TESTTMP/test-failure-unicode.t.err
97 +++ $TESTTMP/test-failure-unicode.t.err
98 @@ -1,2 +1,2 @@
98 @@ -1,2 +1,2 @@
99 $ echo babar\xce\xb1 (esc)
99 $ echo babar\xce\xb1 (esc)
100 - l\xce\xb5\xce\xb5t (esc)
100 - l\xce\xb5\xce\xb5t (esc)
101 + babar\xce\xb1 (esc)
101 + babar\xce\xb1 (esc)
102
102
103 ERROR: test-failure-unicode.t output changed
103 ERROR: test-failure-unicode.t output changed
104 !
104 !
105 Failed test-failure.t: output changed
105 Failed test-failure.t: output changed
106 Failed test-failure-unicode.t: output changed
106 Failed test-failure-unicode.t: output changed
107 # Ran 3 tests, 0 skipped, 0 warned, 2 failed.
107 # Ran 3 tests, 0 skipped, 0 warned, 2 failed.
108 python hash seed: * (glob)
108 python hash seed: * (glob)
109 [1]
109 [1]
110 $ cat xunit.xml
110 $ cat xunit.xml
111 <?xml version="1.0" encoding="utf-8"?>
111 <?xml version="1.0" encoding="utf-8"?>
112 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
112 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
113 <testcase name="test-success.t" time="*"/> (glob)
113 <testcase name="test-success.t" time="*"/> (glob)
114 <testcase name="test-failure-unicode.t" time="*"> (glob)
114 <testcase name="test-failure-unicode.t" time="*"> (glob)
115 <![CDATA[--- $TESTTMP/test-failure-unicode.t
115 <![CDATA[--- $TESTTMP/test-failure-unicode.t
116 +++ $TESTTMP/test-failure-unicode.t.err
116 +++ $TESTTMP/test-failure-unicode.t.err
117 @@ -1,2 +1,2 @@
117 @@ -1,2 +1,2 @@
118 $ echo babar\xce\xb1 (esc)
118 $ echo babar\xce\xb1 (esc)
119 - l\xce\xb5\xce\xb5t (esc)
119 - l\xce\xb5\xce\xb5t (esc)
120 + babar\xce\xb1 (esc)
120 + babar\xce\xb1 (esc)
121 ]]> </testcase>
121 ]]> </testcase>
122 <testcase name="test-failure.t" time="*"> (glob)
122 <testcase name="test-failure.t" time="*"> (glob)
123 <![CDATA[--- $TESTTMP/test-failure.t
123 <![CDATA[--- $TESTTMP/test-failure.t
124 +++ $TESTTMP/test-failure.t.err
124 +++ $TESTTMP/test-failure.t.err
125 @@ -1,4 +1,4 @@
125 @@ -1,4 +1,4 @@
126 $ echo babar
126 $ echo babar
127 - rataxes
127 - rataxes
128 + babar
128 + babar
129 This is a noop statement so that
129 This is a noop statement so that
130 this test is still more bytes than success.
130 this test is still more bytes than success.
131 ]]> </testcase>
131 ]]> </testcase>
132 </testsuite>
132 </testsuite>
133
133
134 $ rm test-failure-unicode.t
134 $ rm test-failure-unicode.t
135
135
136 test for --retest
136 test for --retest
137 ====================
137 ====================
138
138
139 $ rt --retest
139 $ rt --retest
140
140
141 --- $TESTTMP/test-failure.t
141 --- $TESTTMP/test-failure.t
142 +++ $TESTTMP/test-failure.t.err
142 +++ $TESTTMP/test-failure.t.err
143 @@ -1,4 +1,4 @@
143 @@ -1,4 +1,4 @@
144 $ echo babar
144 $ echo babar
145 - rataxes
145 - rataxes
146 + babar
146 + babar
147 This is a noop statement so that
147 This is a noop statement so that
148 this test is still more bytes than success.
148 this test is still more bytes than success.
149
149
150 ERROR: test-failure.t output changed
150 ERROR: test-failure.t output changed
151 !
151 !
152 Failed test-failure.t: output changed
152 Failed test-failure.t: output changed
153 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
153 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
154 python hash seed: * (glob)
154 python hash seed: * (glob)
155 [1]
155 [1]
156
156
157 Selecting Tests To Run
157 Selecting Tests To Run
158 ======================
158 ======================
159
159
160 successful
160 successful
161
161
162 $ rt test-success.t
162 $ rt test-success.t
163 .
163 .
164 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
164 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
165
165
166 success w/ keyword
166 success w/ keyword
167 $ rt -k xyzzy
167 $ rt -k xyzzy
168 .
168 .
169 # Ran 2 tests, 1 skipped, 0 warned, 0 failed.
169 # Ran 2 tests, 1 skipped, 0 warned, 0 failed.
170
170
171 failed
171 failed
172
172
173 $ rt test-failure.t
173 $ rt test-failure.t
174
174
175 --- $TESTTMP/test-failure.t
175 --- $TESTTMP/test-failure.t
176 +++ $TESTTMP/test-failure.t.err
176 +++ $TESTTMP/test-failure.t.err
177 @@ -1,4 +1,4 @@
177 @@ -1,4 +1,4 @@
178 $ echo babar
178 $ echo babar
179 - rataxes
179 - rataxes
180 + babar
180 + babar
181 This is a noop statement so that
181 This is a noop statement so that
182 this test is still more bytes than success.
182 this test is still more bytes than success.
183
183
184 ERROR: test-failure.t output changed
184 ERROR: test-failure.t output changed
185 !
185 !
186 Failed test-failure.t: output changed
186 Failed test-failure.t: output changed
187 # Ran 1 tests, 0 skipped, 0 warned, 1 failed.
187 # Ran 1 tests, 0 skipped, 0 warned, 1 failed.
188 python hash seed: * (glob)
188 python hash seed: * (glob)
189 [1]
189 [1]
190
190
191 failure w/ keyword
191 failure w/ keyword
192 $ rt -k rataxes
192 $ rt -k rataxes
193
193
194 --- $TESTTMP/test-failure.t
194 --- $TESTTMP/test-failure.t
195 +++ $TESTTMP/test-failure.t.err
195 +++ $TESTTMP/test-failure.t.err
196 @@ -1,4 +1,4 @@
196 @@ -1,4 +1,4 @@
197 $ echo babar
197 $ echo babar
198 - rataxes
198 - rataxes
199 + babar
199 + babar
200 This is a noop statement so that
200 This is a noop statement so that
201 this test is still more bytes than success.
201 this test is still more bytes than success.
202
202
203 ERROR: test-failure.t output changed
203 ERROR: test-failure.t output changed
204 !
204 !
205 Failed test-failure.t: output changed
205 Failed test-failure.t: output changed
206 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
206 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
207 python hash seed: * (glob)
207 python hash seed: * (glob)
208 [1]
208 [1]
209
209
210 Verify that when a process fails to start we show a useful message
210 Verify that when a process fails to start we show a useful message
211 ==================================================================
211 ==================================================================
212
212
213 $ cat > test-serve-fail.t <<EOF
213 $ cat > test-serve-fail.t <<EOF
214 > $ echo 'abort: child process failed to start blah'
214 > $ echo 'abort: child process failed to start blah'
215 > EOF
215 > EOF
216 $ rt test-serve-fail.t
216 $ rt test-serve-fail.t
217
217
218 ERROR: test-serve-fail.t output changed
218 ERROR: test-serve-fail.t output changed
219 !
219 !
220 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
220 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
221 # Ran 1 tests, 0 skipped, 0 warned, 1 failed.
221 # Ran 1 tests, 0 skipped, 0 warned, 1 failed.
222 python hash seed: * (glob)
222 python hash seed: * (glob)
223 [1]
223 [1]
224 $ rm test-serve-fail.t
224 $ rm test-serve-fail.t
225
225
226 Verify that we can try other ports
226 Verify that we can try other ports
227 ===================================
227 ===================================
228 $ hg init inuse
228 $ hg init inuse
229 $ hg serve -R inuse -p $HGPORT -d --pid-file=blocks.pid
229 $ hg serve -R inuse -p $HGPORT -d --pid-file=blocks.pid
230 $ cat blocks.pid >> $DAEMON_PIDS
230 $ cat blocks.pid >> $DAEMON_PIDS
231 $ cat > test-serve-inuse.t <<EOF
231 $ cat > test-serve-inuse.t <<EOF
232 > $ hg serve -R `pwd`/inuse -p \$HGPORT -d --pid-file=hg.pid
232 > $ hg serve -R `pwd`/inuse -p \$HGPORT -d --pid-file=hg.pid
233 > $ cat hg.pid >> \$DAEMON_PIDS
233 > $ cat hg.pid >> \$DAEMON_PIDS
234 > EOF
234 > EOF
235 $ rt test-serve-inuse.t
235 $ rt test-serve-inuse.t
236 .
236 .
237 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
237 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
238 $ rm test-serve-inuse.t
238 $ rm test-serve-inuse.t
239
239
240 Running In Debug Mode
240 Running In Debug Mode
241 ======================
241 ======================
242
242
243 $ rt --debug 2>&1 | grep -v pwd
243 $ rt --debug 2>&1 | grep -v pwd
244 + echo *SALT* 0 0 (glob)
244 + echo *SALT* 0 0 (glob)
245 *SALT* 0 0 (glob)
245 *SALT* 0 0 (glob)
246 + echo babar
246 + echo babar
247 babar
247 babar
248 + echo *SALT* 4 0 (glob)
248 + echo *SALT* 4 0 (glob)
249 *SALT* 4 0 (glob)
249 *SALT* 4 0 (glob)
250 *+ echo *SALT* 0 0 (glob)
250 *+ echo *SALT* 0 0 (glob)
251 *SALT* 0 0 (glob)
251 *SALT* 0 0 (glob)
252 + echo babar
252 + echo babar
253 babar
253 babar
254 + echo *SALT* 2 0 (glob)
254 + echo *SALT* 2 0 (glob)
255 *SALT* 2 0 (glob)
255 *SALT* 2 0 (glob)
256 + echo xyzzy
256 + echo xyzzy
257 xyzzy
257 xyzzy
258 + echo *SALT* 6 0 (glob)
258 + echo *SALT* 6 0 (glob)
259 *SALT* 6 0 (glob)
259 *SALT* 6 0 (glob)
260 .
260 .
261 # Ran 2 tests, 0 skipped, 0 warned, 0 failed.
261 # Ran 2 tests, 0 skipped, 0 warned, 0 failed.
262
262
263 Parallel runs
263 Parallel runs
264 ==============
264 ==============
265
265
266 (duplicate the failing test to get predictable output)
266 (duplicate the failing test to get predictable output)
267 $ cp test-failure.t test-failure-copy.t
267 $ cp test-failure.t test-failure-copy.t
268
268
269 $ rt --jobs 2 test-failure*.t -n
269 $ rt --jobs 2 test-failure*.t -n
270 !!
270 !!
271 Failed test-failure*.t: output changed (glob)
271 Failed test-failure*.t: output changed (glob)
272 Failed test-failure*.t: output changed (glob)
272 Failed test-failure*.t: output changed (glob)
273 # Ran 2 tests, 0 skipped, 0 warned, 2 failed.
273 # Ran 2 tests, 0 skipped, 0 warned, 2 failed.
274 python hash seed: * (glob)
274 python hash seed: * (glob)
275 [1]
275 [1]
276
276
277 failures in parallel with --first should only print one failure
277 failures in parallel with --first should only print one failure
278 >>> f = open('test-nothing.t', 'w')
278 >>> f = open('test-nothing.t', 'w')
279 >>> f.write('foo\n' * 1024) and None
279 >>> f.write('foo\n' * 1024) and None
280 >>> f.write(' $ sleep 1') and None
280 >>> f.write(' $ sleep 1') and None
281 $ rt --jobs 2 --first
281 $ rt --jobs 2 --first
282
282
283 --- $TESTTMP/test-failure*.t (glob)
283 --- $TESTTMP/test-failure*.t (glob)
284 +++ $TESTTMP/test-failure*.t.err (glob)
284 +++ $TESTTMP/test-failure*.t.err (glob)
285 @@ -1,4 +1,4 @@
285 @@ -1,4 +1,4 @@
286 $ echo babar
286 $ echo babar
287 - rataxes
287 - rataxes
288 + babar
288 + babar
289 This is a noop statement so that
289 This is a noop statement so that
290 this test is still more bytes than success.
290 this test is still more bytes than success.
291
291
292 Failed test-failure*.t: output changed (glob)
292 Failed test-failure*.t: output changed (glob)
293 Failed test-nothing.t: output changed
293 Failed test-nothing.t: output changed
294 # Ran 2 tests, 0 skipped, 0 warned, 2 failed.
294 # Ran 2 tests, 0 skipped, 0 warned, 2 failed.
295 python hash seed: * (glob)
295 python hash seed: * (glob)
296 [1]
296 [1]
297
297
298
298
299 (delete the duplicated test file)
299 (delete the duplicated test file)
300 $ rm test-failure-copy.t test-nothing.t
300 $ rm test-failure-copy.t test-nothing.t
301
301
302
302
303 Interactive run
303 Interactive run
304 ===============
304 ===============
305
305
306 (backup the failing test)
306 (backup the failing test)
307 $ cp test-failure.t backup
307 $ cp test-failure.t backup
308
308
309 Refuse the fix
309 Refuse the fix
310
310
311 $ echo 'n' | rt -i
311 $ echo 'n' | rt -i
312
312
313 --- $TESTTMP/test-failure.t
313 --- $TESTTMP/test-failure.t
314 +++ $TESTTMP/test-failure.t.err
314 +++ $TESTTMP/test-failure.t.err
315 @@ -1,4 +1,4 @@
315 @@ -1,4 +1,4 @@
316 $ echo babar
316 $ echo babar
317 - rataxes
317 - rataxes
318 + babar
318 + babar
319 This is a noop statement so that
319 This is a noop statement so that
320 this test is still more bytes than success.
320 this test is still more bytes than success.
321 Accept this change? [n]
321 Accept this change? [n]
322 ERROR: test-failure.t output changed
322 ERROR: test-failure.t output changed
323 !.
323 !.
324 Failed test-failure.t: output changed
324 Failed test-failure.t: output changed
325 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
325 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
326 python hash seed: * (glob)
326 python hash seed: * (glob)
327 [1]
327 [1]
328
328
329 $ cat test-failure.t
329 $ cat test-failure.t
330 $ echo babar
330 $ echo babar
331 rataxes
331 rataxes
332 This is a noop statement so that
332 This is a noop statement so that
333 this test is still more bytes than success.
333 this test is still more bytes than success.
334
334
335 Interactive with custom view
335 Interactive with custom view
336
336
337 $ echo 'n' | rt -i --view echo
337 $ echo 'n' | rt -i --view echo
338 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err (glob)
338 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err (glob)
339 Accept this change? [n]* (glob)
339 Accept this change? [n]* (glob)
340 ERROR: test-failure.t output changed
340 ERROR: test-failure.t output changed
341 !.
341 !.
342 Failed test-failure.t: output changed
342 Failed test-failure.t: output changed
343 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
343 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
344 python hash seed: * (glob)
344 python hash seed: * (glob)
345 [1]
345 [1]
346
346
347 View the fix
347 View the fix
348
348
349 $ echo 'y' | rt --view echo
349 $ echo 'y' | rt --view echo
350 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err (glob)
350 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err (glob)
351
351
352 ERROR: test-failure.t output changed
352 ERROR: test-failure.t output changed
353 !.
353 !.
354 Failed test-failure.t: output changed
354 Failed test-failure.t: output changed
355 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
355 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
356 python hash seed: * (glob)
356 python hash seed: * (glob)
357 [1]
357 [1]
358
358
359 Accept the fix
359 Accept the fix
360
360
361 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
361 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
362 $ echo " saved backup bundle to \$TESTTMP/foo.hg" >> test-failure.t
362 $ echo " saved backup bundle to \$TESTTMP/foo.hg" >> test-failure.t
363 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
363 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
364 $ echo " saved backup bundle to \$TESTTMP/foo.hg (glob)" >> test-failure.t
364 $ echo " saved backup bundle to \$TESTTMP/foo.hg (glob)" >> test-failure.t
365 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
365 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
366 $ echo " saved backup bundle to \$TESTTMP/*.hg (glob)" >> test-failure.t
366 $ echo " saved backup bundle to \$TESTTMP/*.hg (glob)" >> test-failure.t
367 $ echo 'y' | rt -i 2>&1
367 $ echo 'y' | rt -i 2>&1
368
368
369 --- $TESTTMP/test-failure.t
369 --- $TESTTMP/test-failure.t
370 +++ $TESTTMP/test-failure.t.err
370 +++ $TESTTMP/test-failure.t.err
371 @@ -1,9 +1,9 @@
371 @@ -1,9 +1,9 @@
372 $ echo babar
372 $ echo babar
373 - rataxes
373 - rataxes
374 + babar
374 + babar
375 This is a noop statement so that
375 This is a noop statement so that
376 this test is still more bytes than success.
376 this test is still more bytes than success.
377 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
377 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
378 - saved backup bundle to $TESTTMP/foo.hg
378 - saved backup bundle to $TESTTMP/foo.hg
379 + saved backup bundle to $TESTTMP/foo.hg* (glob)
379 + saved backup bundle to $TESTTMP/foo.hg* (glob)
380 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
380 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
381 saved backup bundle to $TESTTMP/foo.hg* (glob)
381 saved backup bundle to $TESTTMP/foo.hg* (glob)
382 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
382 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
383 Accept this change? [n] ..
383 Accept this change? [n] ..
384 # Ran 2 tests, 0 skipped, 0 warned, 0 failed.
384 # Ran 2 tests, 0 skipped, 0 warned, 0 failed.
385
385
386 $ sed -e 's,(glob)$,&<,g' test-failure.t
386 $ sed -e 's,(glob)$,&<,g' test-failure.t
387 $ echo babar
387 $ echo babar
388 babar
388 babar
389 This is a noop statement so that
389 This is a noop statement so that
390 this test is still more bytes than success.
390 this test is still more bytes than success.
391 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
391 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
392 saved backup bundle to $TESTTMP/foo.hg (glob)<
392 saved backup bundle to $TESTTMP/foo.hg (glob)<
393 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
393 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
394 saved backup bundle to $TESTTMP/foo.hg (glob)<
394 saved backup bundle to $TESTTMP/foo.hg (glob)<
395 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
395 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
396 saved backup bundle to $TESTTMP/*.hg (glob)<
396 saved backup bundle to $TESTTMP/*.hg (glob)<
397
397
398 (reinstall)
398 (reinstall)
399 $ mv backup test-failure.t
399 $ mv backup test-failure.t
400
400
401 No Diff
401 No Diff
402 ===============
402 ===============
403
403
404 $ rt --nodiff
404 $ rt --nodiff
405 !.
405 !.
406 Failed test-failure.t: output changed
406 Failed test-failure.t: output changed
407 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
407 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
408 python hash seed: * (glob)
408 python hash seed: * (glob)
409 [1]
409 [1]
410
410
411 test --tmpdir support
411 test --tmpdir support
412 $ rt --tmpdir=$TESTTMP/keep test-success.t
412 $ rt --tmpdir=$TESTTMP/keep test-success.t
413
413
414 Keeping testtmp dir: $TESTTMP/keep/child1/test-success.t (glob)
414 Keeping testtmp dir: $TESTTMP/keep/child1/test-success.t (glob)
415 Keeping threadtmp dir: $TESTTMP/keep/child1 (glob)
415 Keeping threadtmp dir: $TESTTMP/keep/child1 (glob)
416 .
416 .
417 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
417 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
418
418
419 timeouts
419 timeouts
420 ========
420 ========
421 $ cat > test-timeout.t <<EOF
421 $ cat > test-timeout.t <<EOF
422 > $ sleep 2
422 > $ sleep 2
423 > $ echo pass
423 > $ echo pass
424 > pass
424 > pass
425 > EOF
425 > EOF
426 > echo '#require slow' > test-slow-timeout.t
426 > echo '#require slow' > test-slow-timeout.t
427 > cat test-timeout.t >> test-slow-timeout.t
427 > cat test-timeout.t >> test-slow-timeout.t
428 $ rt --timeout=1 --slowtimeout=3 test-timeout.t test-slow-timeout.t
428 $ rt --timeout=1 --slowtimeout=3 test-timeout.t test-slow-timeout.t
429 st
429 st
430 Skipped test-slow-timeout.t: missing feature: allow slow tests
430 Skipped test-slow-timeout.t: missing feature: allow slow tests
431 Failed test-timeout.t: timed out
431 Failed test-timeout.t: timed out
432 # Ran 1 tests, 1 skipped, 0 warned, 1 failed.
432 # Ran 1 tests, 1 skipped, 0 warned, 1 failed.
433 python hash seed: * (glob)
433 python hash seed: * (glob)
434 [1]
434 [1]
435 $ rt --timeout=1 --slowtimeout=3 \
435 $ rt --timeout=1 --slowtimeout=3 \
436 > test-timeout.t test-slow-timeout.t --allow-slow-tests
436 > test-timeout.t test-slow-timeout.t --allow-slow-tests
437 .t
437 .t
438 Failed test-timeout.t: timed out
438 Failed test-timeout.t: timed out
439 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
439 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
440 python hash seed: * (glob)
440 python hash seed: * (glob)
441 [1]
441 [1]
442 $ rm test-timeout.t test-slow-timeout.t
442 $ rm test-timeout.t test-slow-timeout.t
443
443
444 test for --time
444 test for --time
445 ==================
445 ==================
446
446
447 $ rt test-success.t --time
447 $ rt test-success.t --time
448 .
448 .
449 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
449 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
450 # Producing time report
450 # Producing time report
451 start end cuser csys real Test
451 start end cuser csys real Test
452 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
452 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
453
453
454 test for --time with --job enabled
454 test for --time with --job enabled
455 ====================================
455 ====================================
456
456
457 $ rt test-success.t --time --jobs 2
457 $ rt test-success.t --time --jobs 2
458 .
458 .
459 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
459 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
460 # Producing time report
460 # Producing time report
461 start end cuser csys real Test
461 start end cuser csys real Test
462 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
462 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
463
463
464 Skips
464 Skips
465 ================
465 ================
466 $ cat > test-skip.t <<EOF
466 $ cat > test-skip.t <<EOF
467 > $ echo xyzzy
467 > $ echo xyzzy
468 > #require false
468 > #require false
469 > EOF
469 > EOF
470 $ rt --nodiff
470 $ rt --nodiff
471 !.s
471 !.s
472 Skipped test-skip.t: missing feature: nail clipper
472 Skipped test-skip.t: missing feature: nail clipper
473 Failed test-failure.t: output changed
473 Failed test-failure.t: output changed
474 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
474 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
475 python hash seed: * (glob)
475 python hash seed: * (glob)
476 [1]
476 [1]
477
477
478 $ rt --keyword xyzzy
478 $ rt --keyword xyzzy
479 .s
479 .s
480 Skipped test-skip.t: missing feature: nail clipper
480 Skipped test-skip.t: missing feature: nail clipper
481 # Ran 2 tests, 2 skipped, 0 warned, 0 failed.
481 # Ran 2 tests, 2 skipped, 0 warned, 0 failed.
482
482
483 Skips with xml
483 Skips with xml
484 $ rt --keyword xyzzy \
484 $ rt --keyword xyzzy \
485 > --xunit=xunit.xml
485 > --xunit=xunit.xml
486 .s
486 .s
487 Skipped test-skip.t: missing feature: nail clipper
487 Skipped test-skip.t: missing feature: nail clipper
488 # Ran 2 tests, 2 skipped, 0 warned, 0 failed.
488 # Ran 2 tests, 2 skipped, 0 warned, 0 failed.
489 $ cat xunit.xml
489 $ cat xunit.xml
490 <?xml version="1.0" encoding="utf-8"?>
490 <?xml version="1.0" encoding="utf-8"?>
491 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
491 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
492 <testcase name="test-success.t" time="*"/> (glob)
492 <testcase name="test-success.t" time="*"/> (glob)
493 </testsuite>
493 </testsuite>
494
494
495 Missing skips or blacklisted skips don't count as executed:
495 Missing skips or blacklisted skips don't count as executed:
496 $ echo test-failure.t > blacklist
496 $ echo test-failure.t > blacklist
497 $ rt --blacklist=blacklist \
497 $ rt --blacklist=blacklist --json\
498 > test-failure.t test-bogus.t
498 > test-failure.t test-bogus.t
499 ss
499 ss
500 Skipped test-bogus.t: Doesn't exist
500 Skipped test-bogus.t: Doesn't exist
501 Skipped test-failure.t: blacklisted
501 Skipped test-failure.t: blacklisted
502 # Ran 0 tests, 2 skipped, 0 warned, 0 failed.
502 # Ran 0 tests, 2 skipped, 0 warned, 0 failed.
503
503 $ cat report.json
504 testreport ={
505 "test-bogus.t": {
506 "result": "skip"
507 },
508 "test-failure.t": {
509 "result": "skip"
510 }
511 } (no-eol)
504 #if json
512 #if json
505
513
506 test for --json
514 test for --json
507 ==================
515 ==================
508
516
509 $ rt --json
517 $ rt --json
510
518
511 --- $TESTTMP/test-failure.t
519 --- $TESTTMP/test-failure.t
512 +++ $TESTTMP/test-failure.t.err
520 +++ $TESTTMP/test-failure.t.err
513 @@ -1,4 +1,4 @@
521 @@ -1,4 +1,4 @@
514 $ echo babar
522 $ echo babar
515 - rataxes
523 - rataxes
516 + babar
524 + babar
517 This is a noop statement so that
525 This is a noop statement so that
518 this test is still more bytes than success.
526 this test is still more bytes than success.
519
527
520 ERROR: test-failure.t output changed
528 ERROR: test-failure.t output changed
521 !.s
529 !.s
522 Skipped test-skip.t: missing feature: nail clipper
530 Skipped test-skip.t: missing feature: nail clipper
523 Failed test-failure.t: output changed
531 Failed test-failure.t: output changed
524 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
532 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
525 python hash seed: * (glob)
533 python hash seed: * (glob)
526 [1]
534 [1]
527
535
528 $ cat report.json
536 $ cat report.json
529 testreport ={
537 testreport ={
530 "test-failure.t": [\{] (re)
538 "test-failure.t": [\{] (re)
531 "csys": "\s*[\d\.]{4,5}", ? (re)
539 "csys": "\s*[\d\.]{4,5}", ? (re)
532 "cuser": "\s*[\d\.]{4,5}", ? (re)
540 "cuser": "\s*[\d\.]{4,5}", ? (re)
533 "diff": "---.+\+\+\+.+", ? (re)
541 "diff": "---.+\+\+\+.+", ? (re)
534 "end": "\s*[\d\.]{4,5}", ? (re)
542 "end": "\s*[\d\.]{4,5}", ? (re)
535 "result": "failure", ? (re)
543 "result": "failure", ? (re)
536 "start": "\s*[\d\.]{4,5}", ? (re)
544 "start": "\s*[\d\.]{4,5}", ? (re)
537 "time": "\s*[\d\.]{4,5}" (re)
545 "time": "\s*[\d\.]{4,5}" (re)
538 }, ? (re)
546 }, ? (re)
539 "test-skip.t": {
547 "test-skip.t": {
540 "csys": "\s*[\d\.]{4,5}", ? (re)
548 "csys": "\s*[\d\.]{4,5}", ? (re)
541 "cuser": "\s*[\d\.]{4,5}", ? (re)
549 "cuser": "\s*[\d\.]{4,5}", ? (re)
542 "diff": "", ? (re)
550 "diff": "", ? (re)
543 "end": "\s*[\d\.]{4,5}", ? (re)
551 "end": "\s*[\d\.]{4,5}", ? (re)
544 "result": "skip", ? (re)
552 "result": "skip", ? (re)
545 "start": "\s*[\d\.]{4,5}", ? (re)
553 "start": "\s*[\d\.]{4,5}", ? (re)
546 "time": "\s*[\d\.]{4,5}" (re)
554 "time": "\s*[\d\.]{4,5}" (re)
547 }, ? (re)
555 }, ? (re)
548 "test-success.t": [\{] (re)
556 "test-success.t": [\{] (re)
549 "csys": "\s*[\d\.]{4,5}", ? (re)
557 "csys": "\s*[\d\.]{4,5}", ? (re)
550 "cuser": "\s*[\d\.]{4,5}", ? (re)
558 "cuser": "\s*[\d\.]{4,5}", ? (re)
551 "diff": "", ? (re)
559 "diff": "", ? (re)
552 "end": "\s*[\d\.]{4,5}", ? (re)
560 "end": "\s*[\d\.]{4,5}", ? (re)
553 "result": "success", ? (re)
561 "result": "success", ? (re)
554 "start": "\s*[\d\.]{4,5}", ? (re)
562 "start": "\s*[\d\.]{4,5}", ? (re)
555 "time": "\s*[\d\.]{4,5}" (re)
563 "time": "\s*[\d\.]{4,5}" (re)
556 }
564 }
557 } (no-eol)
565 } (no-eol)
558
566
559 Test that failed test accepted through interactive are properly reported:
567 Test that failed test accepted through interactive are properly reported:
560
568
561 $ cp test-failure.t backup
569 $ cp test-failure.t backup
562 $ echo y | rt --json -i
570 $ echo y | rt --json -i
563
571
564 --- $TESTTMP/test-failure.t
572 --- $TESTTMP/test-failure.t
565 +++ $TESTTMP/test-failure.t.err
573 +++ $TESTTMP/test-failure.t.err
566 @@ -1,4 +1,4 @@
574 @@ -1,4 +1,4 @@
567 $ echo babar
575 $ echo babar
568 - rataxes
576 - rataxes
569 + babar
577 + babar
570 This is a noop statement so that
578 This is a noop statement so that
571 this test is still more bytes than success.
579 this test is still more bytes than success.
572 Accept this change? [n] ..s
580 Accept this change? [n] ..s
573 Skipped test-skip.t: missing feature: nail clipper
581 Skipped test-skip.t: missing feature: nail clipper
574 # Ran 2 tests, 1 skipped, 0 warned, 0 failed.
582 # Ran 2 tests, 1 skipped, 0 warned, 0 failed.
575
583
576 $ cat report.json
584 $ cat report.json
577 testreport ={
585 testreport ={
578 "test-failure.t": [\{] (re)
586 "test-failure.t": [\{] (re)
579 "csys": "\s*[\d\.]{4,5}", ? (re)
587 "csys": "\s*[\d\.]{4,5}", ? (re)
580 "cuser": "\s*[\d\.]{4,5}", ? (re)
588 "cuser": "\s*[\d\.]{4,5}", ? (re)
581 "diff": "", ? (re)
589 "diff": "", ? (re)
582 "end": "\s*[\d\.]{4,5}", ? (re)
590 "end": "\s*[\d\.]{4,5}", ? (re)
583 "result": "success", ? (re)
591 "result": "success", ? (re)
584 "start": "\s*[\d\.]{4,5}", ? (re)
592 "start": "\s*[\d\.]{4,5}", ? (re)
585 "time": "\s*[\d\.]{4,5}" (re)
593 "time": "\s*[\d\.]{4,5}" (re)
586 }, ? (re)
594 }, ? (re)
587 "test-skip.t": {
595 "test-skip.t": {
588 "csys": "\s*[\d\.]{4,5}", ? (re)
596 "csys": "\s*[\d\.]{4,5}", ? (re)
589 "cuser": "\s*[\d\.]{4,5}", ? (re)
597 "cuser": "\s*[\d\.]{4,5}", ? (re)
590 "diff": "", ? (re)
598 "diff": "", ? (re)
591 "end": "\s*[\d\.]{4,5}", ? (re)
599 "end": "\s*[\d\.]{4,5}", ? (re)
592 "result": "skip", ? (re)
600 "result": "skip", ? (re)
593 "start": "\s*[\d\.]{4,5}", ? (re)
601 "start": "\s*[\d\.]{4,5}", ? (re)
594 "time": "\s*[\d\.]{4,5}" (re)
602 "time": "\s*[\d\.]{4,5}" (re)
595 }, ? (re)
603 }, ? (re)
596 "test-success.t": [\{] (re)
604 "test-success.t": [\{] (re)
597 "csys": "\s*[\d\.]{4,5}", ? (re)
605 "csys": "\s*[\d\.]{4,5}", ? (re)
598 "cuser": "\s*[\d\.]{4,5}", ? (re)
606 "cuser": "\s*[\d\.]{4,5}", ? (re)
599 "diff": "", ? (re)
607 "diff": "", ? (re)
600 "end": "\s*[\d\.]{4,5}", ? (re)
608 "end": "\s*[\d\.]{4,5}", ? (re)
601 "result": "success", ? (re)
609 "result": "success", ? (re)
602 "start": "\s*[\d\.]{4,5}", ? (re)
610 "start": "\s*[\d\.]{4,5}", ? (re)
603 "time": "\s*[\d\.]{4,5}" (re)
611 "time": "\s*[\d\.]{4,5}" (re)
604 }
612 }
605 } (no-eol)
613 } (no-eol)
606 $ mv backup test-failure.t
614 $ mv backup test-failure.t
607
615
608 #endif
616 #endif
609
617
610 backslash on end of line with glob matching is handled properly
618 backslash on end of line with glob matching is handled properly
611
619
612 $ cat > test-glob-backslash.t << EOF
620 $ cat > test-glob-backslash.t << EOF
613 > $ echo 'foo bar \\'
621 > $ echo 'foo bar \\'
614 > foo * \ (glob)
622 > foo * \ (glob)
615 > EOF
623 > EOF
616
624
617 $ rt test-glob-backslash.t
625 $ rt test-glob-backslash.t
618 .
626 .
619 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
627 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
620
628
621 $ rm -f test-glob-backslash.t
629 $ rm -f test-glob-backslash.t
622
630
623 Test reusability for third party tools
631 Test reusability for third party tools
624 ======================================
632 ======================================
625
633
626 $ mkdir "$TESTTMP"/anothertests
634 $ mkdir "$TESTTMP"/anothertests
627 $ cd "$TESTTMP"/anothertests
635 $ cd "$TESTTMP"/anothertests
628
636
629 test that `run-tests.py` can execute hghave, even if it runs not in
637 test that `run-tests.py` can execute hghave, even if it runs not in
630 Mercurial source tree.
638 Mercurial source tree.
631
639
632 $ cat > test-hghave.t <<EOF
640 $ cat > test-hghave.t <<EOF
633 > #require true
641 > #require true
634 > $ echo foo
642 > $ echo foo
635 > foo
643 > foo
636 > EOF
644 > EOF
637 $ rt $HGTEST_RUN_TESTS_PURE test-hghave.t
645 $ rt $HGTEST_RUN_TESTS_PURE test-hghave.t
638 .
646 .
639 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
647 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
640
648
641 test that RUNTESTDIR refers the directory, in which `run-tests.py` now
649 test that RUNTESTDIR refers the directory, in which `run-tests.py` now
642 running is placed.
650 running is placed.
643
651
644 $ cat > test-runtestdir.t <<EOF
652 $ cat > test-runtestdir.t <<EOF
645 > - $TESTDIR, in which test-run-tests.t is placed
653 > - $TESTDIR, in which test-run-tests.t is placed
646 > - \$TESTDIR, in which test-runtestdir.t is placed (expanded at runtime)
654 > - \$TESTDIR, in which test-runtestdir.t is placed (expanded at runtime)
647 > - \$RUNTESTDIR, in which run-tests.py is placed (expanded at runtime)
655 > - \$RUNTESTDIR, in which run-tests.py is placed (expanded at runtime)
648 >
656 >
649 > #if windows
657 > #if windows
650 > $ test "\$TESTDIR" = "$TESTTMP\anothertests"
658 > $ test "\$TESTDIR" = "$TESTTMP\anothertests"
651 > #else
659 > #else
652 > $ test "\$TESTDIR" = "$TESTTMP"/anothertests
660 > $ test "\$TESTDIR" = "$TESTTMP"/anothertests
653 > #endif
661 > #endif
654 > $ test "\$RUNTESTDIR" = "$TESTDIR"
662 > $ test "\$RUNTESTDIR" = "$TESTDIR"
655 > $ head -n 3 "\$RUNTESTDIR"/../contrib/check-code.py
663 > $ head -n 3 "\$RUNTESTDIR"/../contrib/check-code.py
656 > #!/usr/bin/env python
664 > #!/usr/bin/env python
657 > #
665 > #
658 > # check-code - a style and portability checker for Mercurial
666 > # check-code - a style and portability checker for Mercurial
659 > EOF
667 > EOF
660 $ rt $HGTEST_RUN_TESTS_PURE test-runtestdir.t
668 $ rt $HGTEST_RUN_TESTS_PURE test-runtestdir.t
661 .
669 .
662 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
670 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
663
671
664 #if execbit
672 #if execbit
665
673
666 test that TESTDIR is referred in PATH
674 test that TESTDIR is referred in PATH
667
675
668 $ cat > custom-command.sh <<EOF
676 $ cat > custom-command.sh <<EOF
669 > #!/bin/sh
677 > #!/bin/sh
670 > echo "hello world"
678 > echo "hello world"
671 > EOF
679 > EOF
672 $ chmod +x custom-command.sh
680 $ chmod +x custom-command.sh
673 $ cat > test-testdir-path.t <<EOF
681 $ cat > test-testdir-path.t <<EOF
674 > $ custom-command.sh
682 > $ custom-command.sh
675 > hello world
683 > hello world
676 > EOF
684 > EOF
677 $ rt $HGTEST_RUN_TESTS_PURE test-testdir-path.t
685 $ rt $HGTEST_RUN_TESTS_PURE test-testdir-path.t
678 .
686 .
679 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
687 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
680
688
681 #endif
689 #endif
682
690
683 test support for --allow-slow-tests
691 test support for --allow-slow-tests
684 $ cat > test-very-slow-test.t <<EOF
692 $ cat > test-very-slow-test.t <<EOF
685 > #require slow
693 > #require slow
686 > $ echo pass
694 > $ echo pass
687 > pass
695 > pass
688 > EOF
696 > EOF
689 $ rt $HGTEST_RUN_TESTS_PURE test-very-slow-test.t
697 $ rt $HGTEST_RUN_TESTS_PURE test-very-slow-test.t
690 s
698 s
691 Skipped test-very-slow-test.t: missing feature: allow slow tests
699 Skipped test-very-slow-test.t: missing feature: allow slow tests
692 # Ran 0 tests, 1 skipped, 0 warned, 0 failed.
700 # Ran 0 tests, 1 skipped, 0 warned, 0 failed.
693 $ rt $HGTEST_RUN_TESTS_PURE --allow-slow-tests test-very-slow-test.t
701 $ rt $HGTEST_RUN_TESTS_PURE --allow-slow-tests test-very-slow-test.t
694 .
702 .
695 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
703 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
General Comments 0
You need to be logged in to leave comments. Login now