##// END OF EJS Templates
run-tests: add 'diff' entry in json report...
Laurent Charignon -
r27686:df142b77 default
parent child Browse files
Show More
@@ -1,2374 +1,2376 b''
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2 #
2 #
3 # run-tests.py - Run a set of tests on Mercurial
3 # run-tests.py - Run a set of tests on Mercurial
4 #
4 #
5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 # Modifying this script is tricky because it has many modes:
10 # Modifying this script is tricky because it has many modes:
11 # - serial (default) vs parallel (-jN, N > 1)
11 # - serial (default) vs parallel (-jN, N > 1)
12 # - no coverage (default) vs coverage (-c, -C, -s)
12 # - no coverage (default) vs coverage (-c, -C, -s)
13 # - temp install (default) vs specific hg script (--with-hg, --local)
13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 # - tests are a mix of shell scripts and Python scripts
14 # - tests are a mix of shell scripts and Python scripts
15 #
15 #
16 # If you change this script, it is recommended that you ensure you
16 # If you change this script, it is recommended that you ensure you
17 # haven't broken it by running it in various modes with a representative
17 # haven't broken it by running it in various modes with a representative
18 # sample of test scripts. For example:
18 # sample of test scripts. For example:
19 #
19 #
20 # 1) serial, no coverage, temp install:
20 # 1) serial, no coverage, temp install:
21 # ./run-tests.py test-s*
21 # ./run-tests.py test-s*
22 # 2) serial, no coverage, local hg:
22 # 2) serial, no coverage, local hg:
23 # ./run-tests.py --local test-s*
23 # ./run-tests.py --local test-s*
24 # 3) serial, coverage, temp install:
24 # 3) serial, coverage, temp install:
25 # ./run-tests.py -c test-s*
25 # ./run-tests.py -c test-s*
26 # 4) serial, coverage, local hg:
26 # 4) serial, coverage, local hg:
27 # ./run-tests.py -c --local test-s* # unsupported
27 # ./run-tests.py -c --local test-s* # unsupported
28 # 5) parallel, no coverage, temp install:
28 # 5) parallel, no coverage, temp install:
29 # ./run-tests.py -j2 test-s*
29 # ./run-tests.py -j2 test-s*
30 # 6) parallel, no coverage, local hg:
30 # 6) parallel, no coverage, local hg:
31 # ./run-tests.py -j2 --local test-s*
31 # ./run-tests.py -j2 --local test-s*
32 # 7) parallel, coverage, temp install:
32 # 7) parallel, coverage, temp install:
33 # ./run-tests.py -j2 -c test-s* # currently broken
33 # ./run-tests.py -j2 -c test-s* # currently broken
34 # 8) parallel, coverage, local install:
34 # 8) parallel, coverage, local install:
35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 # 9) parallel, custom tmp dir:
36 # 9) parallel, custom tmp dir:
37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 # 10) parallel, pure, tests that call run-tests:
38 # 10) parallel, pure, tests that call run-tests:
39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
40 #
40 #
41 # (You could use any subset of the tests: test-s* happens to match
41 # (You could use any subset of the tests: test-s* happens to match
42 # enough that it's worth doing parallel runs, few enough that it
42 # enough that it's worth doing parallel runs, few enough that it
43 # completes fairly quickly, includes both shell and Python scripts, and
43 # completes fairly quickly, includes both shell and Python scripts, and
44 # includes some scripts that run daemon processes.)
44 # includes some scripts that run daemon processes.)
45
45
46 from __future__ import print_function
46 from __future__ import print_function
47
47
48 from distutils import version
48 from distutils import version
49 import difflib
49 import difflib
50 import errno
50 import errno
51 import optparse
51 import optparse
52 import os
52 import os
53 import shutil
53 import shutil
54 import subprocess
54 import subprocess
55 import signal
55 import signal
56 import socket
56 import socket
57 import sys
57 import sys
58 import tempfile
58 import tempfile
59 import time
59 import time
60 import random
60 import random
61 import re
61 import re
62 import threading
62 import threading
63 import killdaemons as killmod
63 import killdaemons as killmod
64 try:
64 try:
65 import Queue as queue
65 import Queue as queue
66 except ImportError:
66 except ImportError:
67 import queue
67 import queue
68 from xml.dom import minidom
68 from xml.dom import minidom
69 import unittest
69 import unittest
70
70
71 osenvironb = getattr(os, 'environb', os.environ)
71 osenvironb = getattr(os, 'environb', os.environ)
72
72
73 try:
73 try:
74 import json
74 import json
75 except ImportError:
75 except ImportError:
76 try:
76 try:
77 import simplejson as json
77 import simplejson as json
78 except ImportError:
78 except ImportError:
79 json = None
79 json = None
80
80
81 processlock = threading.Lock()
81 processlock = threading.Lock()
82
82
83 if sys.version_info > (3, 5, 0):
83 if sys.version_info > (3, 5, 0):
84 PYTHON3 = True
84 PYTHON3 = True
85 xrange = range # we use xrange in one place, and we'd rather not use range
85 xrange = range # we use xrange in one place, and we'd rather not use range
86 def _bytespath(p):
86 def _bytespath(p):
87 return p.encode('utf-8')
87 return p.encode('utf-8')
88
88
89 def _strpath(p):
89 def _strpath(p):
90 return p.decode('utf-8')
90 return p.decode('utf-8')
91
91
92 elif sys.version_info >= (3, 0, 0):
92 elif sys.version_info >= (3, 0, 0):
93 print('%s is only supported on Python 3.5+ and 2.6-2.7, not %s' %
93 print('%s is only supported on Python 3.5+ and 2.6-2.7, not %s' %
94 (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])))
94 (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])))
95 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
95 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
96 else:
96 else:
97 PYTHON3 = False
97 PYTHON3 = False
98
98
99 # In python 2.x, path operations are generally done using
99 # In python 2.x, path operations are generally done using
100 # bytestrings by default, so we don't have to do any extra
100 # bytestrings by default, so we don't have to do any extra
101 # fiddling there. We define the wrapper functions anyway just to
101 # fiddling there. We define the wrapper functions anyway just to
102 # help keep code consistent between platforms.
102 # help keep code consistent between platforms.
103 def _bytespath(p):
103 def _bytespath(p):
104 return p
104 return p
105
105
106 _strpath = _bytespath
106 _strpath = _bytespath
107
107
108 # For Windows support
108 # For Windows support
109 wifexited = getattr(os, "WIFEXITED", lambda x: False)
109 wifexited = getattr(os, "WIFEXITED", lambda x: False)
110
110
111 def checkportisavailable(port):
111 def checkportisavailable(port):
112 """return true if a port seems free to bind on localhost"""
112 """return true if a port seems free to bind on localhost"""
113 try:
113 try:
114 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
114 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
115 s.bind(('localhost', port))
115 s.bind(('localhost', port))
116 s.close()
116 s.close()
117 return True
117 return True
118 except socket.error as exc:
118 except socket.error as exc:
119 if not exc.errno == errno.EADDRINUSE:
119 if not exc.errno == errno.EADDRINUSE:
120 raise
120 raise
121 return False
121 return False
122
122
123 closefds = os.name == 'posix'
123 closefds = os.name == 'posix'
124 def Popen4(cmd, wd, timeout, env=None):
124 def Popen4(cmd, wd, timeout, env=None):
125 processlock.acquire()
125 processlock.acquire()
126 p = subprocess.Popen(cmd, shell=True, bufsize=-1, cwd=wd, env=env,
126 p = subprocess.Popen(cmd, shell=True, bufsize=-1, cwd=wd, env=env,
127 close_fds=closefds,
127 close_fds=closefds,
128 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
128 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
129 stderr=subprocess.STDOUT)
129 stderr=subprocess.STDOUT)
130 processlock.release()
130 processlock.release()
131
131
132 p.fromchild = p.stdout
132 p.fromchild = p.stdout
133 p.tochild = p.stdin
133 p.tochild = p.stdin
134 p.childerr = p.stderr
134 p.childerr = p.stderr
135
135
136 p.timeout = False
136 p.timeout = False
137 if timeout:
137 if timeout:
138 def t():
138 def t():
139 start = time.time()
139 start = time.time()
140 while time.time() - start < timeout and p.returncode is None:
140 while time.time() - start < timeout and p.returncode is None:
141 time.sleep(.1)
141 time.sleep(.1)
142 p.timeout = True
142 p.timeout = True
143 if p.returncode is None:
143 if p.returncode is None:
144 terminate(p)
144 terminate(p)
145 threading.Thread(target=t).start()
145 threading.Thread(target=t).start()
146
146
147 return p
147 return p
148
148
149 PYTHON = _bytespath(sys.executable.replace('\\', '/'))
149 PYTHON = _bytespath(sys.executable.replace('\\', '/'))
150 IMPL_PATH = b'PYTHONPATH'
150 IMPL_PATH = b'PYTHONPATH'
151 if 'java' in sys.platform:
151 if 'java' in sys.platform:
152 IMPL_PATH = b'JYTHONPATH'
152 IMPL_PATH = b'JYTHONPATH'
153
153
154 defaults = {
154 defaults = {
155 'jobs': ('HGTEST_JOBS', 1),
155 'jobs': ('HGTEST_JOBS', 1),
156 'timeout': ('HGTEST_TIMEOUT', 180),
156 'timeout': ('HGTEST_TIMEOUT', 180),
157 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 500),
157 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 500),
158 'port': ('HGTEST_PORT', 20059),
158 'port': ('HGTEST_PORT', 20059),
159 'shell': ('HGTEST_SHELL', 'sh'),
159 'shell': ('HGTEST_SHELL', 'sh'),
160 }
160 }
161
161
162 def parselistfiles(files, listtype, warn=True):
162 def parselistfiles(files, listtype, warn=True):
163 entries = dict()
163 entries = dict()
164 for filename in files:
164 for filename in files:
165 try:
165 try:
166 path = os.path.expanduser(os.path.expandvars(filename))
166 path = os.path.expanduser(os.path.expandvars(filename))
167 f = open(path, "rb")
167 f = open(path, "rb")
168 except IOError as err:
168 except IOError as err:
169 if err.errno != errno.ENOENT:
169 if err.errno != errno.ENOENT:
170 raise
170 raise
171 if warn:
171 if warn:
172 print("warning: no such %s file: %s" % (listtype, filename))
172 print("warning: no such %s file: %s" % (listtype, filename))
173 continue
173 continue
174
174
175 for line in f.readlines():
175 for line in f.readlines():
176 line = line.split(b'#', 1)[0].strip()
176 line = line.split(b'#', 1)[0].strip()
177 if line:
177 if line:
178 entries[line] = filename
178 entries[line] = filename
179
179
180 f.close()
180 f.close()
181 return entries
181 return entries
182
182
183 def getparser():
183 def getparser():
184 """Obtain the OptionParser used by the CLI."""
184 """Obtain the OptionParser used by the CLI."""
185 parser = optparse.OptionParser("%prog [options] [tests]")
185 parser = optparse.OptionParser("%prog [options] [tests]")
186
186
187 # keep these sorted
187 # keep these sorted
188 parser.add_option("--blacklist", action="append",
188 parser.add_option("--blacklist", action="append",
189 help="skip tests listed in the specified blacklist file")
189 help="skip tests listed in the specified blacklist file")
190 parser.add_option("--whitelist", action="append",
190 parser.add_option("--whitelist", action="append",
191 help="always run tests listed in the specified whitelist file")
191 help="always run tests listed in the specified whitelist file")
192 parser.add_option("--changed", type="string",
192 parser.add_option("--changed", type="string",
193 help="run tests that are changed in parent rev or working directory")
193 help="run tests that are changed in parent rev or working directory")
194 parser.add_option("-C", "--annotate", action="store_true",
194 parser.add_option("-C", "--annotate", action="store_true",
195 help="output files annotated with coverage")
195 help="output files annotated with coverage")
196 parser.add_option("-c", "--cover", action="store_true",
196 parser.add_option("-c", "--cover", action="store_true",
197 help="print a test coverage report")
197 help="print a test coverage report")
198 parser.add_option("-d", "--debug", action="store_true",
198 parser.add_option("-d", "--debug", action="store_true",
199 help="debug mode: write output of test scripts to console"
199 help="debug mode: write output of test scripts to console"
200 " rather than capturing and diffing it (disables timeout)")
200 " rather than capturing and diffing it (disables timeout)")
201 parser.add_option("-f", "--first", action="store_true",
201 parser.add_option("-f", "--first", action="store_true",
202 help="exit on the first test failure")
202 help="exit on the first test failure")
203 parser.add_option("-H", "--htmlcov", action="store_true",
203 parser.add_option("-H", "--htmlcov", action="store_true",
204 help="create an HTML report of the coverage of the files")
204 help="create an HTML report of the coverage of the files")
205 parser.add_option("-i", "--interactive", action="store_true",
205 parser.add_option("-i", "--interactive", action="store_true",
206 help="prompt to accept changed output")
206 help="prompt to accept changed output")
207 parser.add_option("-j", "--jobs", type="int",
207 parser.add_option("-j", "--jobs", type="int",
208 help="number of jobs to run in parallel"
208 help="number of jobs to run in parallel"
209 " (default: $%s or %d)" % defaults['jobs'])
209 " (default: $%s or %d)" % defaults['jobs'])
210 parser.add_option("--keep-tmpdir", action="store_true",
210 parser.add_option("--keep-tmpdir", action="store_true",
211 help="keep temporary directory after running tests")
211 help="keep temporary directory after running tests")
212 parser.add_option("-k", "--keywords",
212 parser.add_option("-k", "--keywords",
213 help="run tests matching keywords")
213 help="run tests matching keywords")
214 parser.add_option("-l", "--local", action="store_true",
214 parser.add_option("-l", "--local", action="store_true",
215 help="shortcut for --with-hg=<testdir>/../hg")
215 help="shortcut for --with-hg=<testdir>/../hg")
216 parser.add_option("--loop", action="store_true",
216 parser.add_option("--loop", action="store_true",
217 help="loop tests repeatedly")
217 help="loop tests repeatedly")
218 parser.add_option("--runs-per-test", type="int", dest="runs_per_test",
218 parser.add_option("--runs-per-test", type="int", dest="runs_per_test",
219 help="run each test N times (default=1)", default=1)
219 help="run each test N times (default=1)", default=1)
220 parser.add_option("-n", "--nodiff", action="store_true",
220 parser.add_option("-n", "--nodiff", action="store_true",
221 help="skip showing test changes")
221 help="skip showing test changes")
222 parser.add_option("-p", "--port", type="int",
222 parser.add_option("-p", "--port", type="int",
223 help="port on which servers should listen"
223 help="port on which servers should listen"
224 " (default: $%s or %d)" % defaults['port'])
224 " (default: $%s or %d)" % defaults['port'])
225 parser.add_option("--compiler", type="string",
225 parser.add_option("--compiler", type="string",
226 help="compiler to build with")
226 help="compiler to build with")
227 parser.add_option("--pure", action="store_true",
227 parser.add_option("--pure", action="store_true",
228 help="use pure Python code instead of C extensions")
228 help="use pure Python code instead of C extensions")
229 parser.add_option("-R", "--restart", action="store_true",
229 parser.add_option("-R", "--restart", action="store_true",
230 help="restart at last error")
230 help="restart at last error")
231 parser.add_option("-r", "--retest", action="store_true",
231 parser.add_option("-r", "--retest", action="store_true",
232 help="retest failed tests")
232 help="retest failed tests")
233 parser.add_option("-S", "--noskips", action="store_true",
233 parser.add_option("-S", "--noskips", action="store_true",
234 help="don't report skip tests verbosely")
234 help="don't report skip tests verbosely")
235 parser.add_option("--shell", type="string",
235 parser.add_option("--shell", type="string",
236 help="shell to use (default: $%s or %s)" % defaults['shell'])
236 help="shell to use (default: $%s or %s)" % defaults['shell'])
237 parser.add_option("-t", "--timeout", type="int",
237 parser.add_option("-t", "--timeout", type="int",
238 help="kill errant tests after TIMEOUT seconds"
238 help="kill errant tests after TIMEOUT seconds"
239 " (default: $%s or %d)" % defaults['timeout'])
239 " (default: $%s or %d)" % defaults['timeout'])
240 parser.add_option("--slowtimeout", type="int",
240 parser.add_option("--slowtimeout", type="int",
241 help="kill errant slow tests after SLOWTIMEOUT seconds"
241 help="kill errant slow tests after SLOWTIMEOUT seconds"
242 " (default: $%s or %d)" % defaults['slowtimeout'])
242 " (default: $%s or %d)" % defaults['slowtimeout'])
243 parser.add_option("--time", action="store_true",
243 parser.add_option("--time", action="store_true",
244 help="time how long each test takes")
244 help="time how long each test takes")
245 parser.add_option("--json", action="store_true",
245 parser.add_option("--json", action="store_true",
246 help="store test result data in 'report.json' file")
246 help="store test result data in 'report.json' file")
247 parser.add_option("--tmpdir", type="string",
247 parser.add_option("--tmpdir", type="string",
248 help="run tests in the given temporary directory"
248 help="run tests in the given temporary directory"
249 " (implies --keep-tmpdir)")
249 " (implies --keep-tmpdir)")
250 parser.add_option("-v", "--verbose", action="store_true",
250 parser.add_option("-v", "--verbose", action="store_true",
251 help="output verbose messages")
251 help="output verbose messages")
252 parser.add_option("--xunit", type="string",
252 parser.add_option("--xunit", type="string",
253 help="record xunit results at specified path")
253 help="record xunit results at specified path")
254 parser.add_option("--view", type="string",
254 parser.add_option("--view", type="string",
255 help="external diff viewer")
255 help="external diff viewer")
256 parser.add_option("--with-hg", type="string",
256 parser.add_option("--with-hg", type="string",
257 metavar="HG",
257 metavar="HG",
258 help="test using specified hg script rather than a "
258 help="test using specified hg script rather than a "
259 "temporary installation")
259 "temporary installation")
260 parser.add_option("-3", "--py3k-warnings", action="store_true",
260 parser.add_option("-3", "--py3k-warnings", action="store_true",
261 help="enable Py3k warnings on Python 2.6+")
261 help="enable Py3k warnings on Python 2.6+")
262 parser.add_option('--extra-config-opt', action="append",
262 parser.add_option('--extra-config-opt', action="append",
263 help='set the given config opt in the test hgrc')
263 help='set the given config opt in the test hgrc')
264 parser.add_option('--random', action="store_true",
264 parser.add_option('--random', action="store_true",
265 help='run tests in random order')
265 help='run tests in random order')
266 parser.add_option('--profile-runner', action='store_true',
266 parser.add_option('--profile-runner', action='store_true',
267 help='run statprof on run-tests')
267 help='run statprof on run-tests')
268 parser.add_option('--allow-slow-tests', action='store_true',
268 parser.add_option('--allow-slow-tests', action='store_true',
269 help='allow extremely slow tests')
269 help='allow extremely slow tests')
270 parser.add_option('--showchannels', action='store_true',
270 parser.add_option('--showchannels', action='store_true',
271 help='show scheduling channels')
271 help='show scheduling channels')
272
272
273 for option, (envvar, default) in defaults.items():
273 for option, (envvar, default) in defaults.items():
274 defaults[option] = type(default)(os.environ.get(envvar, default))
274 defaults[option] = type(default)(os.environ.get(envvar, default))
275 parser.set_defaults(**defaults)
275 parser.set_defaults(**defaults)
276
276
277 return parser
277 return parser
278
278
279 def parseargs(args, parser):
279 def parseargs(args, parser):
280 """Parse arguments with our OptionParser and validate results."""
280 """Parse arguments with our OptionParser and validate results."""
281 (options, args) = parser.parse_args(args)
281 (options, args) = parser.parse_args(args)
282
282
283 # jython is always pure
283 # jython is always pure
284 if 'java' in sys.platform or '__pypy__' in sys.modules:
284 if 'java' in sys.platform or '__pypy__' in sys.modules:
285 options.pure = True
285 options.pure = True
286
286
287 if options.with_hg:
287 if options.with_hg:
288 options.with_hg = os.path.expanduser(options.with_hg)
288 options.with_hg = os.path.expanduser(options.with_hg)
289 if not (os.path.isfile(options.with_hg) and
289 if not (os.path.isfile(options.with_hg) and
290 os.access(options.with_hg, os.X_OK)):
290 os.access(options.with_hg, os.X_OK)):
291 parser.error('--with-hg must specify an executable hg script')
291 parser.error('--with-hg must specify an executable hg script')
292 if not os.path.basename(options.with_hg) == 'hg':
292 if not os.path.basename(options.with_hg) == 'hg':
293 sys.stderr.write('warning: --with-hg should specify an hg script\n')
293 sys.stderr.write('warning: --with-hg should specify an hg script\n')
294 if options.local:
294 if options.local:
295 testdir = os.path.dirname(_bytespath(os.path.realpath(sys.argv[0])))
295 testdir = os.path.dirname(_bytespath(os.path.realpath(sys.argv[0])))
296 hgbin = os.path.join(os.path.dirname(testdir), b'hg')
296 hgbin = os.path.join(os.path.dirname(testdir), b'hg')
297 if os.name != 'nt' and not os.access(hgbin, os.X_OK):
297 if os.name != 'nt' and not os.access(hgbin, os.X_OK):
298 parser.error('--local specified, but %r not found or not executable'
298 parser.error('--local specified, but %r not found or not executable'
299 % hgbin)
299 % hgbin)
300 options.with_hg = hgbin
300 options.with_hg = hgbin
301
301
302 options.anycoverage = options.cover or options.annotate or options.htmlcov
302 options.anycoverage = options.cover or options.annotate or options.htmlcov
303 if options.anycoverage:
303 if options.anycoverage:
304 try:
304 try:
305 import coverage
305 import coverage
306 covver = version.StrictVersion(coverage.__version__).version
306 covver = version.StrictVersion(coverage.__version__).version
307 if covver < (3, 3):
307 if covver < (3, 3):
308 parser.error('coverage options require coverage 3.3 or later')
308 parser.error('coverage options require coverage 3.3 or later')
309 except ImportError:
309 except ImportError:
310 parser.error('coverage options now require the coverage package')
310 parser.error('coverage options now require the coverage package')
311
311
312 if options.anycoverage and options.local:
312 if options.anycoverage and options.local:
313 # this needs some path mangling somewhere, I guess
313 # this needs some path mangling somewhere, I guess
314 parser.error("sorry, coverage options do not work when --local "
314 parser.error("sorry, coverage options do not work when --local "
315 "is specified")
315 "is specified")
316
316
317 if options.anycoverage and options.with_hg:
317 if options.anycoverage and options.with_hg:
318 parser.error("sorry, coverage options do not work when --with-hg "
318 parser.error("sorry, coverage options do not work when --with-hg "
319 "is specified")
319 "is specified")
320
320
321 global verbose
321 global verbose
322 if options.verbose:
322 if options.verbose:
323 verbose = ''
323 verbose = ''
324
324
325 if options.tmpdir:
325 if options.tmpdir:
326 options.tmpdir = os.path.expanduser(options.tmpdir)
326 options.tmpdir = os.path.expanduser(options.tmpdir)
327
327
328 if options.jobs < 1:
328 if options.jobs < 1:
329 parser.error('--jobs must be positive')
329 parser.error('--jobs must be positive')
330 if options.interactive and options.debug:
330 if options.interactive and options.debug:
331 parser.error("-i/--interactive and -d/--debug are incompatible")
331 parser.error("-i/--interactive and -d/--debug are incompatible")
332 if options.debug:
332 if options.debug:
333 if options.timeout != defaults['timeout']:
333 if options.timeout != defaults['timeout']:
334 sys.stderr.write(
334 sys.stderr.write(
335 'warning: --timeout option ignored with --debug\n')
335 'warning: --timeout option ignored with --debug\n')
336 if options.slowtimeout != defaults['slowtimeout']:
336 if options.slowtimeout != defaults['slowtimeout']:
337 sys.stderr.write(
337 sys.stderr.write(
338 'warning: --slowtimeout option ignored with --debug\n')
338 'warning: --slowtimeout option ignored with --debug\n')
339 options.timeout = 0
339 options.timeout = 0
340 options.slowtimeout = 0
340 options.slowtimeout = 0
341 if options.py3k_warnings:
341 if options.py3k_warnings:
342 if PYTHON3:
342 if PYTHON3:
343 parser.error(
343 parser.error(
344 '--py3k-warnings can only be used on Python 2.6 and 2.7')
344 '--py3k-warnings can only be used on Python 2.6 and 2.7')
345 if options.blacklist:
345 if options.blacklist:
346 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
346 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
347 if options.whitelist:
347 if options.whitelist:
348 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
348 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
349 else:
349 else:
350 options.whitelisted = {}
350 options.whitelisted = {}
351
351
352 if options.showchannels:
352 if options.showchannels:
353 options.nodiff = True
353 options.nodiff = True
354
354
355 return (options, args)
355 return (options, args)
356
356
357 def rename(src, dst):
357 def rename(src, dst):
358 """Like os.rename(), trade atomicity and opened files friendliness
358 """Like os.rename(), trade atomicity and opened files friendliness
359 for existing destination support.
359 for existing destination support.
360 """
360 """
361 shutil.copy(src, dst)
361 shutil.copy(src, dst)
362 os.remove(src)
362 os.remove(src)
363
363
364 _unified_diff = difflib.unified_diff
364 _unified_diff = difflib.unified_diff
365 if PYTHON3:
365 if PYTHON3:
366 import functools
366 import functools
367 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
367 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
368
368
369 def getdiff(expected, output, ref, err):
369 def getdiff(expected, output, ref, err):
370 servefail = False
370 servefail = False
371 lines = []
371 lines = []
372 for line in _unified_diff(expected, output, ref, err):
372 for line in _unified_diff(expected, output, ref, err):
373 if line.startswith(b'+++') or line.startswith(b'---'):
373 if line.startswith(b'+++') or line.startswith(b'---'):
374 line = line.replace(b'\\', b'/')
374 line = line.replace(b'\\', b'/')
375 if line.endswith(b' \n'):
375 if line.endswith(b' \n'):
376 line = line[:-2] + b'\n'
376 line = line[:-2] + b'\n'
377 lines.append(line)
377 lines.append(line)
378 if not servefail and line.startswith(
378 if not servefail and line.startswith(
379 b'+ abort: child process failed to start'):
379 b'+ abort: child process failed to start'):
380 servefail = True
380 servefail = True
381
381
382 return servefail, lines
382 return servefail, lines
383
383
384 verbose = False
384 verbose = False
385 def vlog(*msg):
385 def vlog(*msg):
386 """Log only when in verbose mode."""
386 """Log only when in verbose mode."""
387 if verbose is False:
387 if verbose is False:
388 return
388 return
389
389
390 return log(*msg)
390 return log(*msg)
391
391
392 # Bytes that break XML even in a CDATA block: control characters 0-31
392 # Bytes that break XML even in a CDATA block: control characters 0-31
393 # sans \t, \n and \r
393 # sans \t, \n and \r
394 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
394 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
395
395
396 def cdatasafe(data):
396 def cdatasafe(data):
397 """Make a string safe to include in a CDATA block.
397 """Make a string safe to include in a CDATA block.
398
398
399 Certain control characters are illegal in a CDATA block, and
399 Certain control characters are illegal in a CDATA block, and
400 there's no way to include a ]]> in a CDATA either. This function
400 there's no way to include a ]]> in a CDATA either. This function
401 replaces illegal bytes with ? and adds a space between the ]] so
401 replaces illegal bytes with ? and adds a space between the ]] so
402 that it won't break the CDATA block.
402 that it won't break the CDATA block.
403 """
403 """
404 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
404 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
405
405
406 def log(*msg):
406 def log(*msg):
407 """Log something to stdout.
407 """Log something to stdout.
408
408
409 Arguments are strings to print.
409 Arguments are strings to print.
410 """
410 """
411 with iolock:
411 with iolock:
412 if verbose:
412 if verbose:
413 print(verbose, end=' ')
413 print(verbose, end=' ')
414 for m in msg:
414 for m in msg:
415 print(m, end=' ')
415 print(m, end=' ')
416 print()
416 print()
417 sys.stdout.flush()
417 sys.stdout.flush()
418
418
419 def terminate(proc):
419 def terminate(proc):
420 """Terminate subprocess (with fallback for Python versions < 2.6)"""
420 """Terminate subprocess (with fallback for Python versions < 2.6)"""
421 vlog('# Terminating process %d' % proc.pid)
421 vlog('# Terminating process %d' % proc.pid)
422 try:
422 try:
423 getattr(proc, 'terminate', lambda : os.kill(proc.pid, signal.SIGTERM))()
423 getattr(proc, 'terminate', lambda : os.kill(proc.pid, signal.SIGTERM))()
424 except OSError:
424 except OSError:
425 pass
425 pass
426
426
427 def killdaemons(pidfile):
427 def killdaemons(pidfile):
428 return killmod.killdaemons(pidfile, tryhard=False, remove=True,
428 return killmod.killdaemons(pidfile, tryhard=False, remove=True,
429 logfn=vlog)
429 logfn=vlog)
430
430
431 class Test(unittest.TestCase):
431 class Test(unittest.TestCase):
432 """Encapsulates a single, runnable test.
432 """Encapsulates a single, runnable test.
433
433
434 While this class conforms to the unittest.TestCase API, it differs in that
434 While this class conforms to the unittest.TestCase API, it differs in that
435 instances need to be instantiated manually. (Typically, unittest.TestCase
435 instances need to be instantiated manually. (Typically, unittest.TestCase
436 classes are instantiated automatically by scanning modules.)
436 classes are instantiated automatically by scanning modules.)
437 """
437 """
438
438
439 # Status code reserved for skipped tests (used by hghave).
439 # Status code reserved for skipped tests (used by hghave).
440 SKIPPED_STATUS = 80
440 SKIPPED_STATUS = 80
441
441
442 def __init__(self, path, tmpdir, keeptmpdir=False,
442 def __init__(self, path, tmpdir, keeptmpdir=False,
443 debug=False,
443 debug=False,
444 timeout=defaults['timeout'],
444 timeout=defaults['timeout'],
445 startport=defaults['port'], extraconfigopts=None,
445 startport=defaults['port'], extraconfigopts=None,
446 py3kwarnings=False, shell=None,
446 py3kwarnings=False, shell=None,
447 slowtimeout=defaults['slowtimeout']):
447 slowtimeout=defaults['slowtimeout']):
448 """Create a test from parameters.
448 """Create a test from parameters.
449
449
450 path is the full path to the file defining the test.
450 path is the full path to the file defining the test.
451
451
452 tmpdir is the main temporary directory to use for this test.
452 tmpdir is the main temporary directory to use for this test.
453
453
454 keeptmpdir determines whether to keep the test's temporary directory
454 keeptmpdir determines whether to keep the test's temporary directory
455 after execution. It defaults to removal (False).
455 after execution. It defaults to removal (False).
456
456
457 debug mode will make the test execute verbosely, with unfiltered
457 debug mode will make the test execute verbosely, with unfiltered
458 output.
458 output.
459
459
460 timeout controls the maximum run time of the test. It is ignored when
460 timeout controls the maximum run time of the test. It is ignored when
461 debug is True. See slowtimeout for tests with #require slow.
461 debug is True. See slowtimeout for tests with #require slow.
462
462
463 slowtimeout overrides timeout if the test has #require slow.
463 slowtimeout overrides timeout if the test has #require slow.
464
464
465 startport controls the starting port number to use for this test. Each
465 startport controls the starting port number to use for this test. Each
466 test will reserve 3 port numbers for execution. It is the caller's
466 test will reserve 3 port numbers for execution. It is the caller's
467 responsibility to allocate a non-overlapping port range to Test
467 responsibility to allocate a non-overlapping port range to Test
468 instances.
468 instances.
469
469
470 extraconfigopts is an iterable of extra hgrc config options. Values
470 extraconfigopts is an iterable of extra hgrc config options. Values
471 must have the form "key=value" (something understood by hgrc). Values
471 must have the form "key=value" (something understood by hgrc). Values
472 of the form "foo.key=value" will result in "[foo] key=value".
472 of the form "foo.key=value" will result in "[foo] key=value".
473
473
474 py3kwarnings enables Py3k warnings.
474 py3kwarnings enables Py3k warnings.
475
475
476 shell is the shell to execute tests in.
476 shell is the shell to execute tests in.
477 """
477 """
478 self.path = path
478 self.path = path
479 self.bname = os.path.basename(path)
479 self.bname = os.path.basename(path)
480 self.name = _strpath(self.bname)
480 self.name = _strpath(self.bname)
481 self._testdir = os.path.dirname(path)
481 self._testdir = os.path.dirname(path)
482 self.errpath = os.path.join(self._testdir, b'%s.err' % self.bname)
482 self.errpath = os.path.join(self._testdir, b'%s.err' % self.bname)
483
483
484 self._threadtmp = tmpdir
484 self._threadtmp = tmpdir
485 self._keeptmpdir = keeptmpdir
485 self._keeptmpdir = keeptmpdir
486 self._debug = debug
486 self._debug = debug
487 self._timeout = timeout
487 self._timeout = timeout
488 self._slowtimeout = slowtimeout
488 self._slowtimeout = slowtimeout
489 self._startport = startport
489 self._startport = startport
490 self._extraconfigopts = extraconfigopts or []
490 self._extraconfigopts = extraconfigopts or []
491 self._py3kwarnings = py3kwarnings
491 self._py3kwarnings = py3kwarnings
492 self._shell = _bytespath(shell)
492 self._shell = _bytespath(shell)
493
493
494 self._aborted = False
494 self._aborted = False
495 self._daemonpids = []
495 self._daemonpids = []
496 self._finished = None
496 self._finished = None
497 self._ret = None
497 self._ret = None
498 self._out = None
498 self._out = None
499 self._skipped = None
499 self._skipped = None
500 self._testtmp = None
500 self._testtmp = None
501
501
502 # If we're not in --debug mode and reference output file exists,
502 # If we're not in --debug mode and reference output file exists,
503 # check test output against it.
503 # check test output against it.
504 if debug:
504 if debug:
505 self._refout = None # to match "out is None"
505 self._refout = None # to match "out is None"
506 elif os.path.exists(self.refpath):
506 elif os.path.exists(self.refpath):
507 f = open(self.refpath, 'rb')
507 f = open(self.refpath, 'rb')
508 self._refout = f.read().splitlines(True)
508 self._refout = f.read().splitlines(True)
509 f.close()
509 f.close()
510 else:
510 else:
511 self._refout = []
511 self._refout = []
512
512
513 # needed to get base class __repr__ running
513 # needed to get base class __repr__ running
514 @property
514 @property
515 def _testMethodName(self):
515 def _testMethodName(self):
516 return self.name
516 return self.name
517
517
518 def __str__(self):
518 def __str__(self):
519 return self.name
519 return self.name
520
520
521 def shortDescription(self):
521 def shortDescription(self):
522 return self.name
522 return self.name
523
523
524 def setUp(self):
524 def setUp(self):
525 """Tasks to perform before run()."""
525 """Tasks to perform before run()."""
526 self._finished = False
526 self._finished = False
527 self._ret = None
527 self._ret = None
528 self._out = None
528 self._out = None
529 self._skipped = None
529 self._skipped = None
530
530
531 try:
531 try:
532 os.mkdir(self._threadtmp)
532 os.mkdir(self._threadtmp)
533 except OSError as e:
533 except OSError as e:
534 if e.errno != errno.EEXIST:
534 if e.errno != errno.EEXIST:
535 raise
535 raise
536
536
537 self._testtmp = os.path.join(self._threadtmp,
537 self._testtmp = os.path.join(self._threadtmp,
538 os.path.basename(self.path))
538 os.path.basename(self.path))
539 os.mkdir(self._testtmp)
539 os.mkdir(self._testtmp)
540
540
541 # Remove any previous output files.
541 # Remove any previous output files.
542 if os.path.exists(self.errpath):
542 if os.path.exists(self.errpath):
543 try:
543 try:
544 os.remove(self.errpath)
544 os.remove(self.errpath)
545 except OSError as e:
545 except OSError as e:
546 # We might have raced another test to clean up a .err
546 # We might have raced another test to clean up a .err
547 # file, so ignore ENOENT when removing a previous .err
547 # file, so ignore ENOENT when removing a previous .err
548 # file.
548 # file.
549 if e.errno != errno.ENOENT:
549 if e.errno != errno.ENOENT:
550 raise
550 raise
551
551
552 def run(self, result):
552 def run(self, result):
553 """Run this test and report results against a TestResult instance."""
553 """Run this test and report results against a TestResult instance."""
554 # This function is extremely similar to unittest.TestCase.run(). Once
554 # This function is extremely similar to unittest.TestCase.run(). Once
555 # we require Python 2.7 (or at least its version of unittest), this
555 # we require Python 2.7 (or at least its version of unittest), this
556 # function can largely go away.
556 # function can largely go away.
557 self._result = result
557 self._result = result
558 result.startTest(self)
558 result.startTest(self)
559 try:
559 try:
560 try:
560 try:
561 self.setUp()
561 self.setUp()
562 except (KeyboardInterrupt, SystemExit):
562 except (KeyboardInterrupt, SystemExit):
563 self._aborted = True
563 self._aborted = True
564 raise
564 raise
565 except Exception:
565 except Exception:
566 result.addError(self, sys.exc_info())
566 result.addError(self, sys.exc_info())
567 return
567 return
568
568
569 success = False
569 success = False
570 try:
570 try:
571 self.runTest()
571 self.runTest()
572 except KeyboardInterrupt:
572 except KeyboardInterrupt:
573 self._aborted = True
573 self._aborted = True
574 raise
574 raise
575 except SkipTest as e:
575 except SkipTest as e:
576 result.addSkip(self, str(e))
576 result.addSkip(self, str(e))
577 # The base class will have already counted this as a
577 # The base class will have already counted this as a
578 # test we "ran", but we want to exclude skipped tests
578 # test we "ran", but we want to exclude skipped tests
579 # from those we count towards those run.
579 # from those we count towards those run.
580 result.testsRun -= 1
580 result.testsRun -= 1
581 except IgnoreTest as e:
581 except IgnoreTest as e:
582 result.addIgnore(self, str(e))
582 result.addIgnore(self, str(e))
583 # As with skips, ignores also should be excluded from
583 # As with skips, ignores also should be excluded from
584 # the number of tests executed.
584 # the number of tests executed.
585 result.testsRun -= 1
585 result.testsRun -= 1
586 except WarnTest as e:
586 except WarnTest as e:
587 result.addWarn(self, str(e))
587 result.addWarn(self, str(e))
588 except ReportedTest as e:
588 except ReportedTest as e:
589 pass
589 pass
590 except self.failureException as e:
590 except self.failureException as e:
591 # This differs from unittest in that we don't capture
591 # This differs from unittest in that we don't capture
592 # the stack trace. This is for historical reasons and
592 # the stack trace. This is for historical reasons and
593 # this decision could be revisited in the future,
593 # this decision could be revisited in the future,
594 # especially for PythonTest instances.
594 # especially for PythonTest instances.
595 if result.addFailure(self, str(e)):
595 if result.addFailure(self, str(e)):
596 success = True
596 success = True
597 except Exception:
597 except Exception:
598 result.addError(self, sys.exc_info())
598 result.addError(self, sys.exc_info())
599 else:
599 else:
600 success = True
600 success = True
601
601
602 try:
602 try:
603 self.tearDown()
603 self.tearDown()
604 except (KeyboardInterrupt, SystemExit):
604 except (KeyboardInterrupt, SystemExit):
605 self._aborted = True
605 self._aborted = True
606 raise
606 raise
607 except Exception:
607 except Exception:
608 result.addError(self, sys.exc_info())
608 result.addError(self, sys.exc_info())
609 success = False
609 success = False
610
610
611 if success:
611 if success:
612 result.addSuccess(self)
612 result.addSuccess(self)
613 finally:
613 finally:
614 result.stopTest(self, interrupted=self._aborted)
614 result.stopTest(self, interrupted=self._aborted)
615
615
616 def runTest(self):
616 def runTest(self):
617 """Run this test instance.
617 """Run this test instance.
618
618
619 This will return a tuple describing the result of the test.
619 This will return a tuple describing the result of the test.
620 """
620 """
621 env = self._getenv()
621 env = self._getenv()
622 self._daemonpids.append(env['DAEMON_PIDS'])
622 self._daemonpids.append(env['DAEMON_PIDS'])
623 self._createhgrc(env['HGRCPATH'])
623 self._createhgrc(env['HGRCPATH'])
624
624
625 vlog('# Test', self.name)
625 vlog('# Test', self.name)
626
626
627 ret, out = self._run(env)
627 ret, out = self._run(env)
628 self._finished = True
628 self._finished = True
629 self._ret = ret
629 self._ret = ret
630 self._out = out
630 self._out = out
631
631
632 def describe(ret):
632 def describe(ret):
633 if ret < 0:
633 if ret < 0:
634 return 'killed by signal: %d' % -ret
634 return 'killed by signal: %d' % -ret
635 return 'returned error code %d' % ret
635 return 'returned error code %d' % ret
636
636
637 self._skipped = False
637 self._skipped = False
638
638
639 if ret == self.SKIPPED_STATUS:
639 if ret == self.SKIPPED_STATUS:
640 if out is None: # Debug mode, nothing to parse.
640 if out is None: # Debug mode, nothing to parse.
641 missing = ['unknown']
641 missing = ['unknown']
642 failed = None
642 failed = None
643 else:
643 else:
644 missing, failed = TTest.parsehghaveoutput(out)
644 missing, failed = TTest.parsehghaveoutput(out)
645
645
646 if not missing:
646 if not missing:
647 missing = ['skipped']
647 missing = ['skipped']
648
648
649 if failed:
649 if failed:
650 self.fail('hg have failed checking for %s' % failed[-1])
650 self.fail('hg have failed checking for %s' % failed[-1])
651 else:
651 else:
652 self._skipped = True
652 self._skipped = True
653 raise SkipTest(missing[-1])
653 raise SkipTest(missing[-1])
654 elif ret == 'timeout':
654 elif ret == 'timeout':
655 self.fail('timed out')
655 self.fail('timed out')
656 elif ret is False:
656 elif ret is False:
657 raise WarnTest('no result code from test')
657 raise WarnTest('no result code from test')
658 elif out != self._refout:
658 elif out != self._refout:
659 # Diff generation may rely on written .err file.
659 # Diff generation may rely on written .err file.
660 if (ret != 0 or out != self._refout) and not self._skipped \
660 if (ret != 0 or out != self._refout) and not self._skipped \
661 and not self._debug:
661 and not self._debug:
662 f = open(self.errpath, 'wb')
662 f = open(self.errpath, 'wb')
663 for line in out:
663 for line in out:
664 f.write(line)
664 f.write(line)
665 f.close()
665 f.close()
666
666
667 # The result object handles diff calculation for us.
667 # The result object handles diff calculation for us.
668 if self._result.addOutputMismatch(self, ret, out, self._refout):
668 if self._result.addOutputMismatch(self, ret, out, self._refout):
669 # change was accepted, skip failing
669 # change was accepted, skip failing
670 return
670 return
671
671
672 if ret:
672 if ret:
673 msg = 'output changed and ' + describe(ret)
673 msg = 'output changed and ' + describe(ret)
674 else:
674 else:
675 msg = 'output changed'
675 msg = 'output changed'
676
676
677 self.fail(msg)
677 self.fail(msg)
678 elif ret:
678 elif ret:
679 self.fail(describe(ret))
679 self.fail(describe(ret))
680
680
681 def tearDown(self):
681 def tearDown(self):
682 """Tasks to perform after run()."""
682 """Tasks to perform after run()."""
683 for entry in self._daemonpids:
683 for entry in self._daemonpids:
684 killdaemons(entry)
684 killdaemons(entry)
685 self._daemonpids = []
685 self._daemonpids = []
686
686
687 if self._keeptmpdir:
687 if self._keeptmpdir:
688 log('\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' %
688 log('\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' %
689 (self._testtmp, self._threadtmp))
689 (self._testtmp, self._threadtmp))
690 else:
690 else:
691 shutil.rmtree(self._testtmp, True)
691 shutil.rmtree(self._testtmp, True)
692 shutil.rmtree(self._threadtmp, True)
692 shutil.rmtree(self._threadtmp, True)
693
693
694 if (self._ret != 0 or self._out != self._refout) and not self._skipped \
694 if (self._ret != 0 or self._out != self._refout) and not self._skipped \
695 and not self._debug and self._out:
695 and not self._debug and self._out:
696 f = open(self.errpath, 'wb')
696 f = open(self.errpath, 'wb')
697 for line in self._out:
697 for line in self._out:
698 f.write(line)
698 f.write(line)
699 f.close()
699 f.close()
700
700
701 vlog("# Ret was:", self._ret, '(%s)' % self.name)
701 vlog("# Ret was:", self._ret, '(%s)' % self.name)
702
702
703 def _run(self, env):
703 def _run(self, env):
704 # This should be implemented in child classes to run tests.
704 # This should be implemented in child classes to run tests.
705 raise SkipTest('unknown test type')
705 raise SkipTest('unknown test type')
706
706
707 def abort(self):
707 def abort(self):
708 """Terminate execution of this test."""
708 """Terminate execution of this test."""
709 self._aborted = True
709 self._aborted = True
710
710
711 def _getreplacements(self):
711 def _getreplacements(self):
712 """Obtain a mapping of text replacements to apply to test output.
712 """Obtain a mapping of text replacements to apply to test output.
713
713
714 Test output needs to be normalized so it can be compared to expected
714 Test output needs to be normalized so it can be compared to expected
715 output. This function defines how some of that normalization will
715 output. This function defines how some of that normalization will
716 occur.
716 occur.
717 """
717 """
718 r = [
718 r = [
719 (br':%d\b' % self._startport, b':$HGPORT'),
719 (br':%d\b' % self._startport, b':$HGPORT'),
720 (br':%d\b' % (self._startport + 1), b':$HGPORT1'),
720 (br':%d\b' % (self._startport + 1), b':$HGPORT1'),
721 (br':%d\b' % (self._startport + 2), b':$HGPORT2'),
721 (br':%d\b' % (self._startport + 2), b':$HGPORT2'),
722 (br'(?m)^(saved backup bundle to .*\.hg)( \(glob\))?$',
722 (br'(?m)^(saved backup bundle to .*\.hg)( \(glob\))?$',
723 br'\1 (glob)'),
723 br'\1 (glob)'),
724 ]
724 ]
725
725
726 if os.name == 'nt':
726 if os.name == 'nt':
727 r.append(
727 r.append(
728 (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or
728 (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or
729 c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c
729 c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c
730 for c in self._testtmp), b'$TESTTMP'))
730 for c in self._testtmp), b'$TESTTMP'))
731 else:
731 else:
732 r.append((re.escape(self._testtmp), b'$TESTTMP'))
732 r.append((re.escape(self._testtmp), b'$TESTTMP'))
733
733
734 return r
734 return r
735
735
736 def _getenv(self):
736 def _getenv(self):
737 """Obtain environment variables to use during test execution."""
737 """Obtain environment variables to use during test execution."""
738 env = os.environ.copy()
738 env = os.environ.copy()
739 env['TESTTMP'] = self._testtmp
739 env['TESTTMP'] = self._testtmp
740 env['HOME'] = self._testtmp
740 env['HOME'] = self._testtmp
741 env["HGPORT"] = str(self._startport)
741 env["HGPORT"] = str(self._startport)
742 env["HGPORT1"] = str(self._startport + 1)
742 env["HGPORT1"] = str(self._startport + 1)
743 env["HGPORT2"] = str(self._startport + 2)
743 env["HGPORT2"] = str(self._startport + 2)
744 env["HGRCPATH"] = os.path.join(self._threadtmp, b'.hgrc')
744 env["HGRCPATH"] = os.path.join(self._threadtmp, b'.hgrc')
745 env["DAEMON_PIDS"] = os.path.join(self._threadtmp, b'daemon.pids')
745 env["DAEMON_PIDS"] = os.path.join(self._threadtmp, b'daemon.pids')
746 env["HGEDITOR"] = ('"' + sys.executable + '"'
746 env["HGEDITOR"] = ('"' + sys.executable + '"'
747 + ' -c "import sys; sys.exit(0)"')
747 + ' -c "import sys; sys.exit(0)"')
748 env["HGMERGE"] = "internal:merge"
748 env["HGMERGE"] = "internal:merge"
749 env["HGUSER"] = "test"
749 env["HGUSER"] = "test"
750 env["HGENCODING"] = "ascii"
750 env["HGENCODING"] = "ascii"
751 env["HGENCODINGMODE"] = "strict"
751 env["HGENCODINGMODE"] = "strict"
752
752
753 # Reset some environment variables to well-known values so that
753 # Reset some environment variables to well-known values so that
754 # the tests produce repeatable output.
754 # the tests produce repeatable output.
755 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
755 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
756 env['TZ'] = 'GMT'
756 env['TZ'] = 'GMT'
757 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
757 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
758 env['COLUMNS'] = '80'
758 env['COLUMNS'] = '80'
759 env['TERM'] = 'xterm'
759 env['TERM'] = 'xterm'
760
760
761 for k in ('HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy ' +
761 for k in ('HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy ' +
762 'NO_PROXY').split():
762 'NO_PROXY').split():
763 if k in env:
763 if k in env:
764 del env[k]
764 del env[k]
765
765
766 # unset env related to hooks
766 # unset env related to hooks
767 for k in env.keys():
767 for k in env.keys():
768 if k.startswith('HG_'):
768 if k.startswith('HG_'):
769 del env[k]
769 del env[k]
770
770
771 return env
771 return env
772
772
773 def _createhgrc(self, path):
773 def _createhgrc(self, path):
774 """Create an hgrc file for this test."""
774 """Create an hgrc file for this test."""
775 hgrc = open(path, 'wb')
775 hgrc = open(path, 'wb')
776 hgrc.write(b'[ui]\n')
776 hgrc.write(b'[ui]\n')
777 hgrc.write(b'slash = True\n')
777 hgrc.write(b'slash = True\n')
778 hgrc.write(b'interactive = False\n')
778 hgrc.write(b'interactive = False\n')
779 hgrc.write(b'mergemarkers = detailed\n')
779 hgrc.write(b'mergemarkers = detailed\n')
780 hgrc.write(b'promptecho = True\n')
780 hgrc.write(b'promptecho = True\n')
781 hgrc.write(b'[defaults]\n')
781 hgrc.write(b'[defaults]\n')
782 hgrc.write(b'backout = -d "0 0"\n')
782 hgrc.write(b'backout = -d "0 0"\n')
783 hgrc.write(b'commit = -d "0 0"\n')
783 hgrc.write(b'commit = -d "0 0"\n')
784 hgrc.write(b'shelve = --date "0 0"\n')
784 hgrc.write(b'shelve = --date "0 0"\n')
785 hgrc.write(b'tag = -d "0 0"\n')
785 hgrc.write(b'tag = -d "0 0"\n')
786 hgrc.write(b'[devel]\n')
786 hgrc.write(b'[devel]\n')
787 hgrc.write(b'all-warnings = true\n')
787 hgrc.write(b'all-warnings = true\n')
788 hgrc.write(b'[largefiles]\n')
788 hgrc.write(b'[largefiles]\n')
789 hgrc.write(b'usercache = %s\n' %
789 hgrc.write(b'usercache = %s\n' %
790 (os.path.join(self._testtmp, b'.cache/largefiles')))
790 (os.path.join(self._testtmp, b'.cache/largefiles')))
791
791
792 for opt in self._extraconfigopts:
792 for opt in self._extraconfigopts:
793 section, key = opt.split('.', 1)
793 section, key = opt.split('.', 1)
794 assert '=' in key, ('extra config opt %s must '
794 assert '=' in key, ('extra config opt %s must '
795 'have an = for assignment' % opt)
795 'have an = for assignment' % opt)
796 hgrc.write(b'[%s]\n%s\n' % (section, key))
796 hgrc.write(b'[%s]\n%s\n' % (section, key))
797 hgrc.close()
797 hgrc.close()
798
798
799 def fail(self, msg):
799 def fail(self, msg):
800 # unittest differentiates between errored and failed.
800 # unittest differentiates between errored and failed.
801 # Failed is denoted by AssertionError (by default at least).
801 # Failed is denoted by AssertionError (by default at least).
802 raise AssertionError(msg)
802 raise AssertionError(msg)
803
803
804 def _runcommand(self, cmd, env, normalizenewlines=False):
804 def _runcommand(self, cmd, env, normalizenewlines=False):
805 """Run command in a sub-process, capturing the output (stdout and
805 """Run command in a sub-process, capturing the output (stdout and
806 stderr).
806 stderr).
807
807
808 Return a tuple (exitcode, output). output is None in debug mode.
808 Return a tuple (exitcode, output). output is None in debug mode.
809 """
809 """
810 if self._debug:
810 if self._debug:
811 proc = subprocess.Popen(cmd, shell=True, cwd=self._testtmp,
811 proc = subprocess.Popen(cmd, shell=True, cwd=self._testtmp,
812 env=env)
812 env=env)
813 ret = proc.wait()
813 ret = proc.wait()
814 return (ret, None)
814 return (ret, None)
815
815
816 proc = Popen4(cmd, self._testtmp, self._timeout, env)
816 proc = Popen4(cmd, self._testtmp, self._timeout, env)
817 def cleanup():
817 def cleanup():
818 terminate(proc)
818 terminate(proc)
819 ret = proc.wait()
819 ret = proc.wait()
820 if ret == 0:
820 if ret == 0:
821 ret = signal.SIGTERM << 8
821 ret = signal.SIGTERM << 8
822 killdaemons(env['DAEMON_PIDS'])
822 killdaemons(env['DAEMON_PIDS'])
823 return ret
823 return ret
824
824
825 output = ''
825 output = ''
826 proc.tochild.close()
826 proc.tochild.close()
827
827
828 try:
828 try:
829 output = proc.fromchild.read()
829 output = proc.fromchild.read()
830 except KeyboardInterrupt:
830 except KeyboardInterrupt:
831 vlog('# Handling keyboard interrupt')
831 vlog('# Handling keyboard interrupt')
832 cleanup()
832 cleanup()
833 raise
833 raise
834
834
835 ret = proc.wait()
835 ret = proc.wait()
836 if wifexited(ret):
836 if wifexited(ret):
837 ret = os.WEXITSTATUS(ret)
837 ret = os.WEXITSTATUS(ret)
838
838
839 if proc.timeout:
839 if proc.timeout:
840 ret = 'timeout'
840 ret = 'timeout'
841
841
842 if ret:
842 if ret:
843 killdaemons(env['DAEMON_PIDS'])
843 killdaemons(env['DAEMON_PIDS'])
844
844
845 for s, r in self._getreplacements():
845 for s, r in self._getreplacements():
846 output = re.sub(s, r, output)
846 output = re.sub(s, r, output)
847
847
848 if normalizenewlines:
848 if normalizenewlines:
849 output = output.replace('\r\n', '\n')
849 output = output.replace('\r\n', '\n')
850
850
851 return ret, output.splitlines(True)
851 return ret, output.splitlines(True)
852
852
853 class PythonTest(Test):
853 class PythonTest(Test):
854 """A Python-based test."""
854 """A Python-based test."""
855
855
856 @property
856 @property
857 def refpath(self):
857 def refpath(self):
858 return os.path.join(self._testdir, b'%s.out' % self.bname)
858 return os.path.join(self._testdir, b'%s.out' % self.bname)
859
859
860 def _run(self, env):
860 def _run(self, env):
861 py3kswitch = self._py3kwarnings and b' -3' or b''
861 py3kswitch = self._py3kwarnings and b' -3' or b''
862 cmd = b'%s%s "%s"' % (PYTHON, py3kswitch, self.path)
862 cmd = b'%s%s "%s"' % (PYTHON, py3kswitch, self.path)
863 vlog("# Running", cmd)
863 vlog("# Running", cmd)
864 normalizenewlines = os.name == 'nt'
864 normalizenewlines = os.name == 'nt'
865 result = self._runcommand(cmd, env,
865 result = self._runcommand(cmd, env,
866 normalizenewlines=normalizenewlines)
866 normalizenewlines=normalizenewlines)
867 if self._aborted:
867 if self._aborted:
868 raise KeyboardInterrupt()
868 raise KeyboardInterrupt()
869
869
870 return result
870 return result
871
871
872 # This script may want to drop globs from lines matching these patterns on
872 # This script may want to drop globs from lines matching these patterns on
873 # Windows, but check-code.py wants a glob on these lines unconditionally. Don't
873 # Windows, but check-code.py wants a glob on these lines unconditionally. Don't
874 # warn if that is the case for anything matching these lines.
874 # warn if that is the case for anything matching these lines.
875 checkcodeglobpats = [
875 checkcodeglobpats = [
876 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
876 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
877 re.compile(br'^moving \S+/.*[^)]$'),
877 re.compile(br'^moving \S+/.*[^)]$'),
878 re.compile(br'^pulling from \$TESTTMP/.*[^)]$')
878 re.compile(br'^pulling from \$TESTTMP/.*[^)]$')
879 ]
879 ]
880
880
881 bchr = chr
881 bchr = chr
882 if PYTHON3:
882 if PYTHON3:
883 bchr = lambda x: bytes([x])
883 bchr = lambda x: bytes([x])
884
884
885 class TTest(Test):
885 class TTest(Test):
886 """A "t test" is a test backed by a .t file."""
886 """A "t test" is a test backed by a .t file."""
887
887
888 SKIPPED_PREFIX = 'skipped: '
888 SKIPPED_PREFIX = 'skipped: '
889 FAILED_PREFIX = 'hghave check failed: '
889 FAILED_PREFIX = 'hghave check failed: '
890 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
890 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
891
891
892 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
892 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
893 ESCAPEMAP = dict((bchr(i), br'\x%02x' % i) for i in range(256))
893 ESCAPEMAP = dict((bchr(i), br'\x%02x' % i) for i in range(256))
894 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
894 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
895
895
896 @property
896 @property
897 def refpath(self):
897 def refpath(self):
898 return os.path.join(self._testdir, self.bname)
898 return os.path.join(self._testdir, self.bname)
899
899
900 def _run(self, env):
900 def _run(self, env):
901 f = open(self.path, 'rb')
901 f = open(self.path, 'rb')
902 lines = f.readlines()
902 lines = f.readlines()
903 f.close()
903 f.close()
904
904
905 salt, script, after, expected = self._parsetest(lines)
905 salt, script, after, expected = self._parsetest(lines)
906
906
907 # Write out the generated script.
907 # Write out the generated script.
908 fname = b'%s.sh' % self._testtmp
908 fname = b'%s.sh' % self._testtmp
909 f = open(fname, 'wb')
909 f = open(fname, 'wb')
910 for l in script:
910 for l in script:
911 f.write(l)
911 f.write(l)
912 f.close()
912 f.close()
913
913
914 cmd = b'%s "%s"' % (self._shell, fname)
914 cmd = b'%s "%s"' % (self._shell, fname)
915 vlog("# Running", cmd)
915 vlog("# Running", cmd)
916
916
917 exitcode, output = self._runcommand(cmd, env)
917 exitcode, output = self._runcommand(cmd, env)
918
918
919 if self._aborted:
919 if self._aborted:
920 raise KeyboardInterrupt()
920 raise KeyboardInterrupt()
921
921
922 # Do not merge output if skipped. Return hghave message instead.
922 # Do not merge output if skipped. Return hghave message instead.
923 # Similarly, with --debug, output is None.
923 # Similarly, with --debug, output is None.
924 if exitcode == self.SKIPPED_STATUS or output is None:
924 if exitcode == self.SKIPPED_STATUS or output is None:
925 return exitcode, output
925 return exitcode, output
926
926
927 return self._processoutput(exitcode, output, salt, after, expected)
927 return self._processoutput(exitcode, output, salt, after, expected)
928
928
929 def _hghave(self, reqs):
929 def _hghave(self, reqs):
930 # TODO do something smarter when all other uses of hghave are gone.
930 # TODO do something smarter when all other uses of hghave are gone.
931 runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__)))
931 runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__)))
932 tdir = runtestdir.replace(b'\\', b'/')
932 tdir = runtestdir.replace(b'\\', b'/')
933 proc = Popen4(b'%s -c "%s/hghave %s"' %
933 proc = Popen4(b'%s -c "%s/hghave %s"' %
934 (self._shell, tdir, b' '.join(reqs)),
934 (self._shell, tdir, b' '.join(reqs)),
935 self._testtmp, 0, self._getenv())
935 self._testtmp, 0, self._getenv())
936 stdout, stderr = proc.communicate()
936 stdout, stderr = proc.communicate()
937 ret = proc.wait()
937 ret = proc.wait()
938 if wifexited(ret):
938 if wifexited(ret):
939 ret = os.WEXITSTATUS(ret)
939 ret = os.WEXITSTATUS(ret)
940 if ret == 2:
940 if ret == 2:
941 print(stdout)
941 print(stdout)
942 sys.exit(1)
942 sys.exit(1)
943
943
944 if ret != 0:
944 if ret != 0:
945 return False, stdout
945 return False, stdout
946
946
947 if 'slow' in reqs:
947 if 'slow' in reqs:
948 self._timeout = self._slowtimeout
948 self._timeout = self._slowtimeout
949 return True, None
949 return True, None
950
950
951 def _parsetest(self, lines):
951 def _parsetest(self, lines):
952 # We generate a shell script which outputs unique markers to line
952 # We generate a shell script which outputs unique markers to line
953 # up script results with our source. These markers include input
953 # up script results with our source. These markers include input
954 # line number and the last return code.
954 # line number and the last return code.
955 salt = b"SALT%d" % time.time()
955 salt = b"SALT%d" % time.time()
956 def addsalt(line, inpython):
956 def addsalt(line, inpython):
957 if inpython:
957 if inpython:
958 script.append(b'%s %d 0\n' % (salt, line))
958 script.append(b'%s %d 0\n' % (salt, line))
959 else:
959 else:
960 script.append(b'echo %s %d $?\n' % (salt, line))
960 script.append(b'echo %s %d $?\n' % (salt, line))
961
961
962 script = []
962 script = []
963
963
964 # After we run the shell script, we re-unify the script output
964 # After we run the shell script, we re-unify the script output
965 # with non-active parts of the source, with synchronization by our
965 # with non-active parts of the source, with synchronization by our
966 # SALT line number markers. The after table contains the non-active
966 # SALT line number markers. The after table contains the non-active
967 # components, ordered by line number.
967 # components, ordered by line number.
968 after = {}
968 after = {}
969
969
970 # Expected shell script output.
970 # Expected shell script output.
971 expected = {}
971 expected = {}
972
972
973 pos = prepos = -1
973 pos = prepos = -1
974
974
975 # True or False when in a true or false conditional section
975 # True or False when in a true or false conditional section
976 skipping = None
976 skipping = None
977
977
978 # We keep track of whether or not we're in a Python block so we
978 # We keep track of whether or not we're in a Python block so we
979 # can generate the surrounding doctest magic.
979 # can generate the surrounding doctest magic.
980 inpython = False
980 inpython = False
981
981
982 if self._debug:
982 if self._debug:
983 script.append(b'set -x\n')
983 script.append(b'set -x\n')
984 if os.getenv('MSYSTEM'):
984 if os.getenv('MSYSTEM'):
985 script.append(b'alias pwd="pwd -W"\n')
985 script.append(b'alias pwd="pwd -W"\n')
986
986
987 for n, l in enumerate(lines):
987 for n, l in enumerate(lines):
988 if not l.endswith(b'\n'):
988 if not l.endswith(b'\n'):
989 l += b'\n'
989 l += b'\n'
990 if l.startswith(b'#require'):
990 if l.startswith(b'#require'):
991 lsplit = l.split()
991 lsplit = l.split()
992 if len(lsplit) < 2 or lsplit[0] != b'#require':
992 if len(lsplit) < 2 or lsplit[0] != b'#require':
993 after.setdefault(pos, []).append(' !!! invalid #require\n')
993 after.setdefault(pos, []).append(' !!! invalid #require\n')
994 haveresult, message = self._hghave(lsplit[1:])
994 haveresult, message = self._hghave(lsplit[1:])
995 if not haveresult:
995 if not haveresult:
996 script = [b'echo "%s"\nexit 80\n' % message]
996 script = [b'echo "%s"\nexit 80\n' % message]
997 break
997 break
998 after.setdefault(pos, []).append(l)
998 after.setdefault(pos, []).append(l)
999 elif l.startswith(b'#if'):
999 elif l.startswith(b'#if'):
1000 lsplit = l.split()
1000 lsplit = l.split()
1001 if len(lsplit) < 2 or lsplit[0] != b'#if':
1001 if len(lsplit) < 2 or lsplit[0] != b'#if':
1002 after.setdefault(pos, []).append(' !!! invalid #if\n')
1002 after.setdefault(pos, []).append(' !!! invalid #if\n')
1003 if skipping is not None:
1003 if skipping is not None:
1004 after.setdefault(pos, []).append(' !!! nested #if\n')
1004 after.setdefault(pos, []).append(' !!! nested #if\n')
1005 skipping = not self._hghave(lsplit[1:])[0]
1005 skipping = not self._hghave(lsplit[1:])[0]
1006 after.setdefault(pos, []).append(l)
1006 after.setdefault(pos, []).append(l)
1007 elif l.startswith(b'#else'):
1007 elif l.startswith(b'#else'):
1008 if skipping is None:
1008 if skipping is None:
1009 after.setdefault(pos, []).append(' !!! missing #if\n')
1009 after.setdefault(pos, []).append(' !!! missing #if\n')
1010 skipping = not skipping
1010 skipping = not skipping
1011 after.setdefault(pos, []).append(l)
1011 after.setdefault(pos, []).append(l)
1012 elif l.startswith(b'#endif'):
1012 elif l.startswith(b'#endif'):
1013 if skipping is None:
1013 if skipping is None:
1014 after.setdefault(pos, []).append(' !!! missing #if\n')
1014 after.setdefault(pos, []).append(' !!! missing #if\n')
1015 skipping = None
1015 skipping = None
1016 after.setdefault(pos, []).append(l)
1016 after.setdefault(pos, []).append(l)
1017 elif skipping:
1017 elif skipping:
1018 after.setdefault(pos, []).append(l)
1018 after.setdefault(pos, []).append(l)
1019 elif l.startswith(b' >>> '): # python inlines
1019 elif l.startswith(b' >>> '): # python inlines
1020 after.setdefault(pos, []).append(l)
1020 after.setdefault(pos, []).append(l)
1021 prepos = pos
1021 prepos = pos
1022 pos = n
1022 pos = n
1023 if not inpython:
1023 if not inpython:
1024 # We've just entered a Python block. Add the header.
1024 # We've just entered a Python block. Add the header.
1025 inpython = True
1025 inpython = True
1026 addsalt(prepos, False) # Make sure we report the exit code.
1026 addsalt(prepos, False) # Make sure we report the exit code.
1027 script.append(b'%s -m heredoctest <<EOF\n' % PYTHON)
1027 script.append(b'%s -m heredoctest <<EOF\n' % PYTHON)
1028 addsalt(n, True)
1028 addsalt(n, True)
1029 script.append(l[2:])
1029 script.append(l[2:])
1030 elif l.startswith(b' ... '): # python inlines
1030 elif l.startswith(b' ... '): # python inlines
1031 after.setdefault(prepos, []).append(l)
1031 after.setdefault(prepos, []).append(l)
1032 script.append(l[2:])
1032 script.append(l[2:])
1033 elif l.startswith(b' $ '): # commands
1033 elif l.startswith(b' $ '): # commands
1034 if inpython:
1034 if inpython:
1035 script.append(b'EOF\n')
1035 script.append(b'EOF\n')
1036 inpython = False
1036 inpython = False
1037 after.setdefault(pos, []).append(l)
1037 after.setdefault(pos, []).append(l)
1038 prepos = pos
1038 prepos = pos
1039 pos = n
1039 pos = n
1040 addsalt(n, False)
1040 addsalt(n, False)
1041 cmd = l[4:].split()
1041 cmd = l[4:].split()
1042 if len(cmd) == 2 and cmd[0] == b'cd':
1042 if len(cmd) == 2 and cmd[0] == b'cd':
1043 l = b' $ cd %s || exit 1\n' % cmd[1]
1043 l = b' $ cd %s || exit 1\n' % cmd[1]
1044 script.append(l[4:])
1044 script.append(l[4:])
1045 elif l.startswith(b' > '): # continuations
1045 elif l.startswith(b' > '): # continuations
1046 after.setdefault(prepos, []).append(l)
1046 after.setdefault(prepos, []).append(l)
1047 script.append(l[4:])
1047 script.append(l[4:])
1048 elif l.startswith(b' '): # results
1048 elif l.startswith(b' '): # results
1049 # Queue up a list of expected results.
1049 # Queue up a list of expected results.
1050 expected.setdefault(pos, []).append(l[2:])
1050 expected.setdefault(pos, []).append(l[2:])
1051 else:
1051 else:
1052 if inpython:
1052 if inpython:
1053 script.append(b'EOF\n')
1053 script.append(b'EOF\n')
1054 inpython = False
1054 inpython = False
1055 # Non-command/result. Queue up for merged output.
1055 # Non-command/result. Queue up for merged output.
1056 after.setdefault(pos, []).append(l)
1056 after.setdefault(pos, []).append(l)
1057
1057
1058 if inpython:
1058 if inpython:
1059 script.append(b'EOF\n')
1059 script.append(b'EOF\n')
1060 if skipping is not None:
1060 if skipping is not None:
1061 after.setdefault(pos, []).append(' !!! missing #endif\n')
1061 after.setdefault(pos, []).append(' !!! missing #endif\n')
1062 addsalt(n + 1, False)
1062 addsalt(n + 1, False)
1063
1063
1064 return salt, script, after, expected
1064 return salt, script, after, expected
1065
1065
1066 def _processoutput(self, exitcode, output, salt, after, expected):
1066 def _processoutput(self, exitcode, output, salt, after, expected):
1067 # Merge the script output back into a unified test.
1067 # Merge the script output back into a unified test.
1068 warnonly = 1 # 1: not yet; 2: yes; 3: for sure not
1068 warnonly = 1 # 1: not yet; 2: yes; 3: for sure not
1069 if exitcode != 0:
1069 if exitcode != 0:
1070 warnonly = 3
1070 warnonly = 3
1071
1071
1072 pos = -1
1072 pos = -1
1073 postout = []
1073 postout = []
1074 for l in output:
1074 for l in output:
1075 lout, lcmd = l, None
1075 lout, lcmd = l, None
1076 if salt in l:
1076 if salt in l:
1077 lout, lcmd = l.split(salt, 1)
1077 lout, lcmd = l.split(salt, 1)
1078
1078
1079 while lout:
1079 while lout:
1080 if not lout.endswith(b'\n'):
1080 if not lout.endswith(b'\n'):
1081 lout += b' (no-eol)\n'
1081 lout += b' (no-eol)\n'
1082
1082
1083 # Find the expected output at the current position.
1083 # Find the expected output at the current position.
1084 el = None
1084 el = None
1085 if expected.get(pos, None):
1085 if expected.get(pos, None):
1086 el = expected[pos].pop(0)
1086 el = expected[pos].pop(0)
1087
1087
1088 r = TTest.linematch(el, lout)
1088 r = TTest.linematch(el, lout)
1089 if isinstance(r, str):
1089 if isinstance(r, str):
1090 if r == '+glob':
1090 if r == '+glob':
1091 lout = el[:-1] + ' (glob)\n'
1091 lout = el[:-1] + ' (glob)\n'
1092 r = '' # Warn only this line.
1092 r = '' # Warn only this line.
1093 elif r == '-glob':
1093 elif r == '-glob':
1094 lout = ''.join(el.rsplit(' (glob)', 1))
1094 lout = ''.join(el.rsplit(' (glob)', 1))
1095 r = '' # Warn only this line.
1095 r = '' # Warn only this line.
1096 elif r == "retry":
1096 elif r == "retry":
1097 postout.append(b' ' + el)
1097 postout.append(b' ' + el)
1098 continue
1098 continue
1099 else:
1099 else:
1100 log('\ninfo, unknown linematch result: %r\n' % r)
1100 log('\ninfo, unknown linematch result: %r\n' % r)
1101 r = False
1101 r = False
1102 if r:
1102 if r:
1103 postout.append(b' ' + el)
1103 postout.append(b' ' + el)
1104 else:
1104 else:
1105 if self.NEEDESCAPE(lout):
1105 if self.NEEDESCAPE(lout):
1106 lout = TTest._stringescape(b'%s (esc)\n' %
1106 lout = TTest._stringescape(b'%s (esc)\n' %
1107 lout.rstrip(b'\n'))
1107 lout.rstrip(b'\n'))
1108 postout.append(b' ' + lout) # Let diff deal with it.
1108 postout.append(b' ' + lout) # Let diff deal with it.
1109 if r != '': # If line failed.
1109 if r != '': # If line failed.
1110 warnonly = 3 # for sure not
1110 warnonly = 3 # for sure not
1111 elif warnonly == 1: # Is "not yet" and line is warn only.
1111 elif warnonly == 1: # Is "not yet" and line is warn only.
1112 warnonly = 2 # Yes do warn.
1112 warnonly = 2 # Yes do warn.
1113 break
1113 break
1114
1114
1115 # clean up any optional leftovers
1115 # clean up any optional leftovers
1116 while expected.get(pos, None):
1116 while expected.get(pos, None):
1117 el = expected[pos].pop(0)
1117 el = expected[pos].pop(0)
1118 if not el.endswith(b" (?)\n"):
1118 if not el.endswith(b" (?)\n"):
1119 expected[pos].insert(0, el)
1119 expected[pos].insert(0, el)
1120 break
1120 break
1121 postout.append(b' ' + el)
1121 postout.append(b' ' + el)
1122
1122
1123 if lcmd:
1123 if lcmd:
1124 # Add on last return code.
1124 # Add on last return code.
1125 ret = int(lcmd.split()[1])
1125 ret = int(lcmd.split()[1])
1126 if ret != 0:
1126 if ret != 0:
1127 postout.append(b' [%d]\n' % ret)
1127 postout.append(b' [%d]\n' % ret)
1128 if pos in after:
1128 if pos in after:
1129 # Merge in non-active test bits.
1129 # Merge in non-active test bits.
1130 postout += after.pop(pos)
1130 postout += after.pop(pos)
1131 pos = int(lcmd.split()[0])
1131 pos = int(lcmd.split()[0])
1132
1132
1133 if pos in after:
1133 if pos in after:
1134 postout += after.pop(pos)
1134 postout += after.pop(pos)
1135
1135
1136 if warnonly == 2:
1136 if warnonly == 2:
1137 exitcode = False # Set exitcode to warned.
1137 exitcode = False # Set exitcode to warned.
1138
1138
1139 return exitcode, postout
1139 return exitcode, postout
1140
1140
1141 @staticmethod
1141 @staticmethod
1142 def rematch(el, l):
1142 def rematch(el, l):
1143 try:
1143 try:
1144 # use \Z to ensure that the regex matches to the end of the string
1144 # use \Z to ensure that the regex matches to the end of the string
1145 if os.name == 'nt':
1145 if os.name == 'nt':
1146 return re.match(el + br'\r?\n\Z', l)
1146 return re.match(el + br'\r?\n\Z', l)
1147 return re.match(el + br'\n\Z', l)
1147 return re.match(el + br'\n\Z', l)
1148 except re.error:
1148 except re.error:
1149 # el is an invalid regex
1149 # el is an invalid regex
1150 return False
1150 return False
1151
1151
1152 @staticmethod
1152 @staticmethod
1153 def globmatch(el, l):
1153 def globmatch(el, l):
1154 # The only supported special characters are * and ? plus / which also
1154 # The only supported special characters are * and ? plus / which also
1155 # matches \ on windows. Escaping of these characters is supported.
1155 # matches \ on windows. Escaping of these characters is supported.
1156 if el + b'\n' == l:
1156 if el + b'\n' == l:
1157 if os.altsep:
1157 if os.altsep:
1158 # matching on "/" is not needed for this line
1158 # matching on "/" is not needed for this line
1159 for pat in checkcodeglobpats:
1159 for pat in checkcodeglobpats:
1160 if pat.match(el):
1160 if pat.match(el):
1161 return True
1161 return True
1162 return b'-glob'
1162 return b'-glob'
1163 return True
1163 return True
1164 i, n = 0, len(el)
1164 i, n = 0, len(el)
1165 res = b''
1165 res = b''
1166 while i < n:
1166 while i < n:
1167 c = el[i:i + 1]
1167 c = el[i:i + 1]
1168 i += 1
1168 i += 1
1169 if c == b'\\' and i < n and el[i:i + 1] in b'*?\\/':
1169 if c == b'\\' and i < n and el[i:i + 1] in b'*?\\/':
1170 res += el[i - 1:i + 1]
1170 res += el[i - 1:i + 1]
1171 i += 1
1171 i += 1
1172 elif c == b'*':
1172 elif c == b'*':
1173 res += b'.*'
1173 res += b'.*'
1174 elif c == b'?':
1174 elif c == b'?':
1175 res += b'.'
1175 res += b'.'
1176 elif c == b'/' and os.altsep:
1176 elif c == b'/' and os.altsep:
1177 res += b'[/\\\\]'
1177 res += b'[/\\\\]'
1178 else:
1178 else:
1179 res += re.escape(c)
1179 res += re.escape(c)
1180 return TTest.rematch(res, l)
1180 return TTest.rematch(res, l)
1181
1181
1182 @staticmethod
1182 @staticmethod
1183 def linematch(el, l):
1183 def linematch(el, l):
1184 retry = False
1184 retry = False
1185 if el == l: # perfect match (fast)
1185 if el == l: # perfect match (fast)
1186 return True
1186 return True
1187 if el:
1187 if el:
1188 if el.endswith(b" (?)\n"):
1188 if el.endswith(b" (?)\n"):
1189 retry = "retry"
1189 retry = "retry"
1190 el = el[:-5] + "\n"
1190 el = el[:-5] + "\n"
1191 if el.endswith(b" (esc)\n"):
1191 if el.endswith(b" (esc)\n"):
1192 if PYTHON3:
1192 if PYTHON3:
1193 el = el[:-7].decode('unicode_escape') + '\n'
1193 el = el[:-7].decode('unicode_escape') + '\n'
1194 el = el.encode('utf-8')
1194 el = el.encode('utf-8')
1195 else:
1195 else:
1196 el = el[:-7].decode('string-escape') + '\n'
1196 el = el[:-7].decode('string-escape') + '\n'
1197 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
1197 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
1198 return True
1198 return True
1199 if el.endswith(b" (re)\n"):
1199 if el.endswith(b" (re)\n"):
1200 return TTest.rematch(el[:-6], l) or retry
1200 return TTest.rematch(el[:-6], l) or retry
1201 if el.endswith(b" (glob)\n"):
1201 if el.endswith(b" (glob)\n"):
1202 # ignore '(glob)' added to l by 'replacements'
1202 # ignore '(glob)' added to l by 'replacements'
1203 if l.endswith(b" (glob)\n"):
1203 if l.endswith(b" (glob)\n"):
1204 l = l[:-8] + b"\n"
1204 l = l[:-8] + b"\n"
1205 return TTest.globmatch(el[:-8], l)
1205 return TTest.globmatch(el[:-8], l)
1206 if os.altsep and l.replace(b'\\', b'/') == el:
1206 if os.altsep and l.replace(b'\\', b'/') == el:
1207 return b'+glob'
1207 return b'+glob'
1208 return retry
1208 return retry
1209
1209
1210 @staticmethod
1210 @staticmethod
1211 def parsehghaveoutput(lines):
1211 def parsehghaveoutput(lines):
1212 '''Parse hghave log lines.
1212 '''Parse hghave log lines.
1213
1213
1214 Return tuple of lists (missing, failed):
1214 Return tuple of lists (missing, failed):
1215 * the missing/unknown features
1215 * the missing/unknown features
1216 * the features for which existence check failed'''
1216 * the features for which existence check failed'''
1217 missing = []
1217 missing = []
1218 failed = []
1218 failed = []
1219 for line in lines:
1219 for line in lines:
1220 if line.startswith(TTest.SKIPPED_PREFIX):
1220 if line.startswith(TTest.SKIPPED_PREFIX):
1221 line = line.splitlines()[0]
1221 line = line.splitlines()[0]
1222 missing.append(line[len(TTest.SKIPPED_PREFIX):])
1222 missing.append(line[len(TTest.SKIPPED_PREFIX):])
1223 elif line.startswith(TTest.FAILED_PREFIX):
1223 elif line.startswith(TTest.FAILED_PREFIX):
1224 line = line.splitlines()[0]
1224 line = line.splitlines()[0]
1225 failed.append(line[len(TTest.FAILED_PREFIX):])
1225 failed.append(line[len(TTest.FAILED_PREFIX):])
1226
1226
1227 return missing, failed
1227 return missing, failed
1228
1228
1229 @staticmethod
1229 @staticmethod
1230 def _escapef(m):
1230 def _escapef(m):
1231 return TTest.ESCAPEMAP[m.group(0)]
1231 return TTest.ESCAPEMAP[m.group(0)]
1232
1232
1233 @staticmethod
1233 @staticmethod
1234 def _stringescape(s):
1234 def _stringescape(s):
1235 return TTest.ESCAPESUB(TTest._escapef, s)
1235 return TTest.ESCAPESUB(TTest._escapef, s)
1236
1236
1237 iolock = threading.RLock()
1237 iolock = threading.RLock()
1238
1238
1239 class SkipTest(Exception):
1239 class SkipTest(Exception):
1240 """Raised to indicate that a test is to be skipped."""
1240 """Raised to indicate that a test is to be skipped."""
1241
1241
1242 class IgnoreTest(Exception):
1242 class IgnoreTest(Exception):
1243 """Raised to indicate that a test is to be ignored."""
1243 """Raised to indicate that a test is to be ignored."""
1244
1244
1245 class WarnTest(Exception):
1245 class WarnTest(Exception):
1246 """Raised to indicate that a test warned."""
1246 """Raised to indicate that a test warned."""
1247
1247
1248 class ReportedTest(Exception):
1248 class ReportedTest(Exception):
1249 """Raised to indicate that a test already reported."""
1249 """Raised to indicate that a test already reported."""
1250
1250
1251 class TestResult(unittest._TextTestResult):
1251 class TestResult(unittest._TextTestResult):
1252 """Holds results when executing via unittest."""
1252 """Holds results when executing via unittest."""
1253 # Don't worry too much about accessing the non-public _TextTestResult.
1253 # Don't worry too much about accessing the non-public _TextTestResult.
1254 # It is relatively common in Python testing tools.
1254 # It is relatively common in Python testing tools.
1255 def __init__(self, options, *args, **kwargs):
1255 def __init__(self, options, *args, **kwargs):
1256 super(TestResult, self).__init__(*args, **kwargs)
1256 super(TestResult, self).__init__(*args, **kwargs)
1257
1257
1258 self._options = options
1258 self._options = options
1259
1259
1260 # unittest.TestResult didn't have skipped until 2.7. We need to
1260 # unittest.TestResult didn't have skipped until 2.7. We need to
1261 # polyfill it.
1261 # polyfill it.
1262 self.skipped = []
1262 self.skipped = []
1263
1263
1264 # We have a custom "ignored" result that isn't present in any Python
1264 # We have a custom "ignored" result that isn't present in any Python
1265 # unittest implementation. It is very similar to skipped. It may make
1265 # unittest implementation. It is very similar to skipped. It may make
1266 # sense to map it into skip some day.
1266 # sense to map it into skip some day.
1267 self.ignored = []
1267 self.ignored = []
1268
1268
1269 # We have a custom "warned" result that isn't present in any Python
1269 # We have a custom "warned" result that isn't present in any Python
1270 # unittest implementation. It is very similar to failed. It may make
1270 # unittest implementation. It is very similar to failed. It may make
1271 # sense to map it into fail some day.
1271 # sense to map it into fail some day.
1272 self.warned = []
1272 self.warned = []
1273
1273
1274 self.times = []
1274 self.times = []
1275 self._firststarttime = None
1275 self._firststarttime = None
1276 # Data stored for the benefit of generating xunit reports.
1276 # Data stored for the benefit of generating xunit reports.
1277 self.successes = []
1277 self.successes = []
1278 self.faildata = {}
1278 self.faildata = {}
1279
1279
1280 def addFailure(self, test, reason):
1280 def addFailure(self, test, reason):
1281 self.failures.append((test, reason))
1281 self.failures.append((test, reason))
1282
1282
1283 if self._options.first:
1283 if self._options.first:
1284 self.stop()
1284 self.stop()
1285 else:
1285 else:
1286 with iolock:
1286 with iolock:
1287 if reason == "timed out":
1287 if reason == "timed out":
1288 self.stream.write('t')
1288 self.stream.write('t')
1289 else:
1289 else:
1290 if not self._options.nodiff:
1290 if not self._options.nodiff:
1291 self.stream.write('\nERROR: %s output changed\n' % test)
1291 self.stream.write('\nERROR: %s output changed\n' % test)
1292 self.stream.write('!')
1292 self.stream.write('!')
1293
1293
1294 self.stream.flush()
1294 self.stream.flush()
1295
1295
1296 def addSuccess(self, test):
1296 def addSuccess(self, test):
1297 with iolock:
1297 with iolock:
1298 super(TestResult, self).addSuccess(test)
1298 super(TestResult, self).addSuccess(test)
1299 self.successes.append(test)
1299 self.successes.append(test)
1300
1300
1301 def addError(self, test, err):
1301 def addError(self, test, err):
1302 super(TestResult, self).addError(test, err)
1302 super(TestResult, self).addError(test, err)
1303 if self._options.first:
1303 if self._options.first:
1304 self.stop()
1304 self.stop()
1305
1305
1306 # Polyfill.
1306 # Polyfill.
1307 def addSkip(self, test, reason):
1307 def addSkip(self, test, reason):
1308 self.skipped.append((test, reason))
1308 self.skipped.append((test, reason))
1309 with iolock:
1309 with iolock:
1310 if self.showAll:
1310 if self.showAll:
1311 self.stream.writeln('skipped %s' % reason)
1311 self.stream.writeln('skipped %s' % reason)
1312 else:
1312 else:
1313 self.stream.write('s')
1313 self.stream.write('s')
1314 self.stream.flush()
1314 self.stream.flush()
1315
1315
1316 def addIgnore(self, test, reason):
1316 def addIgnore(self, test, reason):
1317 self.ignored.append((test, reason))
1317 self.ignored.append((test, reason))
1318 with iolock:
1318 with iolock:
1319 if self.showAll:
1319 if self.showAll:
1320 self.stream.writeln('ignored %s' % reason)
1320 self.stream.writeln('ignored %s' % reason)
1321 else:
1321 else:
1322 if reason not in ('not retesting', "doesn't match keyword"):
1322 if reason not in ('not retesting', "doesn't match keyword"):
1323 self.stream.write('i')
1323 self.stream.write('i')
1324 else:
1324 else:
1325 self.testsRun += 1
1325 self.testsRun += 1
1326 self.stream.flush()
1326 self.stream.flush()
1327
1327
1328 def addWarn(self, test, reason):
1328 def addWarn(self, test, reason):
1329 self.warned.append((test, reason))
1329 self.warned.append((test, reason))
1330
1330
1331 if self._options.first:
1331 if self._options.first:
1332 self.stop()
1332 self.stop()
1333
1333
1334 with iolock:
1334 with iolock:
1335 if self.showAll:
1335 if self.showAll:
1336 self.stream.writeln('warned %s' % reason)
1336 self.stream.writeln('warned %s' % reason)
1337 else:
1337 else:
1338 self.stream.write('~')
1338 self.stream.write('~')
1339 self.stream.flush()
1339 self.stream.flush()
1340
1340
1341 def addOutputMismatch(self, test, ret, got, expected):
1341 def addOutputMismatch(self, test, ret, got, expected):
1342 """Record a mismatch in test output for a particular test."""
1342 """Record a mismatch in test output for a particular test."""
1343 if self.shouldStop:
1343 if self.shouldStop:
1344 # don't print, some other test case already failed and
1344 # don't print, some other test case already failed and
1345 # printed, we're just stale and probably failed due to our
1345 # printed, we're just stale and probably failed due to our
1346 # temp dir getting cleaned up.
1346 # temp dir getting cleaned up.
1347 return
1347 return
1348
1348
1349 accepted = False
1349 accepted = False
1350 failed = False
1350 failed = False
1351 lines = []
1351 lines = []
1352
1352
1353 with iolock:
1353 with iolock:
1354 if self._options.nodiff:
1354 if self._options.nodiff:
1355 pass
1355 pass
1356 elif self._options.view:
1356 elif self._options.view:
1357 v = self._options.view
1357 v = self._options.view
1358 if PYTHON3:
1358 if PYTHON3:
1359 v = _bytespath(v)
1359 v = _bytespath(v)
1360 os.system(b"%s %s %s" %
1360 os.system(b"%s %s %s" %
1361 (v, test.refpath, test.errpath))
1361 (v, test.refpath, test.errpath))
1362 else:
1362 else:
1363 servefail, lines = getdiff(expected, got,
1363 servefail, lines = getdiff(expected, got,
1364 test.refpath, test.errpath)
1364 test.refpath, test.errpath)
1365 if servefail:
1365 if servefail:
1366 self.addFailure(
1366 self.addFailure(
1367 test,
1367 test,
1368 'server failed to start (HGPORT=%s)' % test._startport)
1368 'server failed to start (HGPORT=%s)' % test._startport)
1369 raise ReportedTest('server failed to start')
1369 raise ReportedTest('server failed to start')
1370 else:
1370 else:
1371 self.stream.write('\n')
1371 self.stream.write('\n')
1372 for line in lines:
1372 for line in lines:
1373 if PYTHON3:
1373 if PYTHON3:
1374 self.stream.flush()
1374 self.stream.flush()
1375 self.stream.buffer.write(line)
1375 self.stream.buffer.write(line)
1376 self.stream.buffer.flush()
1376 self.stream.buffer.flush()
1377 else:
1377 else:
1378 self.stream.write(line)
1378 self.stream.write(line)
1379 self.stream.flush()
1379 self.stream.flush()
1380
1380
1381 # handle interactive prompt without releasing iolock
1381 # handle interactive prompt without releasing iolock
1382 if self._options.interactive:
1382 if self._options.interactive:
1383 self.stream.write('Accept this change? [n] ')
1383 self.stream.write('Accept this change? [n] ')
1384 answer = sys.stdin.readline().strip()
1384 answer = sys.stdin.readline().strip()
1385 if answer.lower() in ('y', 'yes'):
1385 if answer.lower() in ('y', 'yes'):
1386 if test.name.endswith('.t'):
1386 if test.name.endswith('.t'):
1387 rename(test.errpath, test.path)
1387 rename(test.errpath, test.path)
1388 else:
1388 else:
1389 rename(test.errpath, '%s.out' % test.path)
1389 rename(test.errpath, '%s.out' % test.path)
1390 accepted = True
1390 accepted = True
1391 if not accepted and not failed:
1391 if not accepted and not failed:
1392 self.faildata[test.name] = b''.join(lines)
1392 self.faildata[test.name] = b''.join(lines)
1393
1393
1394 return accepted
1394 return accepted
1395
1395
1396 def startTest(self, test):
1396 def startTest(self, test):
1397 super(TestResult, self).startTest(test)
1397 super(TestResult, self).startTest(test)
1398
1398
1399 # os.times module computes the user time and system time spent by
1399 # os.times module computes the user time and system time spent by
1400 # child's processes along with real elapsed time taken by a process.
1400 # child's processes along with real elapsed time taken by a process.
1401 # This module has one limitation. It can only work for Linux user
1401 # This module has one limitation. It can only work for Linux user
1402 # and not for Windows.
1402 # and not for Windows.
1403 test.started = os.times()
1403 test.started = os.times()
1404 if self._firststarttime is None: # thread racy but irrelevant
1404 if self._firststarttime is None: # thread racy but irrelevant
1405 self._firststarttime = test.started[4]
1405 self._firststarttime = test.started[4]
1406
1406
1407 def stopTest(self, test, interrupted=False):
1407 def stopTest(self, test, interrupted=False):
1408 super(TestResult, self).stopTest(test)
1408 super(TestResult, self).stopTest(test)
1409
1409
1410 test.stopped = os.times()
1410 test.stopped = os.times()
1411
1411
1412 starttime = test.started
1412 starttime = test.started
1413 endtime = test.stopped
1413 endtime = test.stopped
1414 origin = self._firststarttime
1414 origin = self._firststarttime
1415 self.times.append((test.name,
1415 self.times.append((test.name,
1416 endtime[2] - starttime[2], # user space CPU time
1416 endtime[2] - starttime[2], # user space CPU time
1417 endtime[3] - starttime[3], # sys space CPU time
1417 endtime[3] - starttime[3], # sys space CPU time
1418 endtime[4] - starttime[4], # real time
1418 endtime[4] - starttime[4], # real time
1419 starttime[4] - origin, # start date in run context
1419 starttime[4] - origin, # start date in run context
1420 endtime[4] - origin, # end date in run context
1420 endtime[4] - origin, # end date in run context
1421 ))
1421 ))
1422
1422
1423 if interrupted:
1423 if interrupted:
1424 with iolock:
1424 with iolock:
1425 self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
1425 self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
1426 test.name, self.times[-1][3]))
1426 test.name, self.times[-1][3]))
1427
1427
1428 class TestSuite(unittest.TestSuite):
1428 class TestSuite(unittest.TestSuite):
1429 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
1429 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
1430
1430
1431 def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
1431 def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
1432 retest=False, keywords=None, loop=False, runs_per_test=1,
1432 retest=False, keywords=None, loop=False, runs_per_test=1,
1433 loadtest=None, showchannels=False,
1433 loadtest=None, showchannels=False,
1434 *args, **kwargs):
1434 *args, **kwargs):
1435 """Create a new instance that can run tests with a configuration.
1435 """Create a new instance that can run tests with a configuration.
1436
1436
1437 testdir specifies the directory where tests are executed from. This
1437 testdir specifies the directory where tests are executed from. This
1438 is typically the ``tests`` directory from Mercurial's source
1438 is typically the ``tests`` directory from Mercurial's source
1439 repository.
1439 repository.
1440
1440
1441 jobs specifies the number of jobs to run concurrently. Each test
1441 jobs specifies the number of jobs to run concurrently. Each test
1442 executes on its own thread. Tests actually spawn new processes, so
1442 executes on its own thread. Tests actually spawn new processes, so
1443 state mutation should not be an issue.
1443 state mutation should not be an issue.
1444
1444
1445 whitelist and blacklist denote tests that have been whitelisted and
1445 whitelist and blacklist denote tests that have been whitelisted and
1446 blacklisted, respectively. These arguments don't belong in TestSuite.
1446 blacklisted, respectively. These arguments don't belong in TestSuite.
1447 Instead, whitelist and blacklist should be handled by the thing that
1447 Instead, whitelist and blacklist should be handled by the thing that
1448 populates the TestSuite with tests. They are present to preserve
1448 populates the TestSuite with tests. They are present to preserve
1449 backwards compatible behavior which reports skipped tests as part
1449 backwards compatible behavior which reports skipped tests as part
1450 of the results.
1450 of the results.
1451
1451
1452 retest denotes whether to retest failed tests. This arguably belongs
1452 retest denotes whether to retest failed tests. This arguably belongs
1453 outside of TestSuite.
1453 outside of TestSuite.
1454
1454
1455 keywords denotes key words that will be used to filter which tests
1455 keywords denotes key words that will be used to filter which tests
1456 to execute. This arguably belongs outside of TestSuite.
1456 to execute. This arguably belongs outside of TestSuite.
1457
1457
1458 loop denotes whether to loop over tests forever.
1458 loop denotes whether to loop over tests forever.
1459 """
1459 """
1460 super(TestSuite, self).__init__(*args, **kwargs)
1460 super(TestSuite, self).__init__(*args, **kwargs)
1461
1461
1462 self._jobs = jobs
1462 self._jobs = jobs
1463 self._whitelist = whitelist
1463 self._whitelist = whitelist
1464 self._blacklist = blacklist
1464 self._blacklist = blacklist
1465 self._retest = retest
1465 self._retest = retest
1466 self._keywords = keywords
1466 self._keywords = keywords
1467 self._loop = loop
1467 self._loop = loop
1468 self._runs_per_test = runs_per_test
1468 self._runs_per_test = runs_per_test
1469 self._loadtest = loadtest
1469 self._loadtest = loadtest
1470 self._showchannels = showchannels
1470 self._showchannels = showchannels
1471
1471
1472 def run(self, result):
1472 def run(self, result):
1473 # We have a number of filters that need to be applied. We do this
1473 # We have a number of filters that need to be applied. We do this
1474 # here instead of inside Test because it makes the running logic for
1474 # here instead of inside Test because it makes the running logic for
1475 # Test simpler.
1475 # Test simpler.
1476 tests = []
1476 tests = []
1477 num_tests = [0]
1477 num_tests = [0]
1478 for test in self._tests:
1478 for test in self._tests:
1479 def get():
1479 def get():
1480 num_tests[0] += 1
1480 num_tests[0] += 1
1481 if getattr(test, 'should_reload', False):
1481 if getattr(test, 'should_reload', False):
1482 return self._loadtest(test.bname, num_tests[0])
1482 return self._loadtest(test.bname, num_tests[0])
1483 return test
1483 return test
1484 if not os.path.exists(test.path):
1484 if not os.path.exists(test.path):
1485 result.addSkip(test, "Doesn't exist")
1485 result.addSkip(test, "Doesn't exist")
1486 continue
1486 continue
1487
1487
1488 if not (self._whitelist and test.name in self._whitelist):
1488 if not (self._whitelist and test.name in self._whitelist):
1489 if self._blacklist and test.bname in self._blacklist:
1489 if self._blacklist and test.bname in self._blacklist:
1490 result.addSkip(test, 'blacklisted')
1490 result.addSkip(test, 'blacklisted')
1491 continue
1491 continue
1492
1492
1493 if self._retest and not os.path.exists(test.errpath):
1493 if self._retest and not os.path.exists(test.errpath):
1494 result.addIgnore(test, 'not retesting')
1494 result.addIgnore(test, 'not retesting')
1495 continue
1495 continue
1496
1496
1497 if self._keywords:
1497 if self._keywords:
1498 f = open(test.path, 'rb')
1498 f = open(test.path, 'rb')
1499 t = f.read().lower() + test.bname.lower()
1499 t = f.read().lower() + test.bname.lower()
1500 f.close()
1500 f.close()
1501 ignored = False
1501 ignored = False
1502 for k in self._keywords.lower().split():
1502 for k in self._keywords.lower().split():
1503 if k not in t:
1503 if k not in t:
1504 result.addIgnore(test, "doesn't match keyword")
1504 result.addIgnore(test, "doesn't match keyword")
1505 ignored = True
1505 ignored = True
1506 break
1506 break
1507
1507
1508 if ignored:
1508 if ignored:
1509 continue
1509 continue
1510 for _ in xrange(self._runs_per_test):
1510 for _ in xrange(self._runs_per_test):
1511 tests.append(get())
1511 tests.append(get())
1512
1512
1513 runtests = list(tests)
1513 runtests = list(tests)
1514 done = queue.Queue()
1514 done = queue.Queue()
1515 running = 0
1515 running = 0
1516
1516
1517 channels = [""] * self._jobs
1517 channels = [""] * self._jobs
1518
1518
1519 def job(test, result):
1519 def job(test, result):
1520 for n, v in enumerate(channels):
1520 for n, v in enumerate(channels):
1521 if not v:
1521 if not v:
1522 channel = n
1522 channel = n
1523 break
1523 break
1524 channels[channel] = "=" + test.name[5:].split(".")[0]
1524 channels[channel] = "=" + test.name[5:].split(".")[0]
1525 try:
1525 try:
1526 test(result)
1526 test(result)
1527 done.put(None)
1527 done.put(None)
1528 except KeyboardInterrupt:
1528 except KeyboardInterrupt:
1529 pass
1529 pass
1530 except: # re-raises
1530 except: # re-raises
1531 done.put(('!', test, 'run-test raised an error, see traceback'))
1531 done.put(('!', test, 'run-test raised an error, see traceback'))
1532 raise
1532 raise
1533 channels[channel] = ''
1533 channels[channel] = ''
1534
1534
1535 def stat():
1535 def stat():
1536 count = 0
1536 count = 0
1537 while channels:
1537 while channels:
1538 d = '\n%03s ' % count
1538 d = '\n%03s ' % count
1539 for n, v in enumerate(channels):
1539 for n, v in enumerate(channels):
1540 if v:
1540 if v:
1541 d += v[0]
1541 d += v[0]
1542 channels[n] = v[1:] or '.'
1542 channels[n] = v[1:] or '.'
1543 else:
1543 else:
1544 d += ' '
1544 d += ' '
1545 d += ' '
1545 d += ' '
1546 with iolock:
1546 with iolock:
1547 sys.stdout.write(d + ' ')
1547 sys.stdout.write(d + ' ')
1548 sys.stdout.flush()
1548 sys.stdout.flush()
1549 for x in xrange(10):
1549 for x in xrange(10):
1550 if channels:
1550 if channels:
1551 time.sleep(.1)
1551 time.sleep(.1)
1552 count += 1
1552 count += 1
1553
1553
1554 stoppedearly = False
1554 stoppedearly = False
1555
1555
1556 if self._showchannels:
1556 if self._showchannels:
1557 statthread = threading.Thread(target=stat, name="stat")
1557 statthread = threading.Thread(target=stat, name="stat")
1558 statthread.start()
1558 statthread.start()
1559
1559
1560 try:
1560 try:
1561 while tests or running:
1561 while tests or running:
1562 if not done.empty() or running == self._jobs or not tests:
1562 if not done.empty() or running == self._jobs or not tests:
1563 try:
1563 try:
1564 done.get(True, 1)
1564 done.get(True, 1)
1565 running -= 1
1565 running -= 1
1566 if result and result.shouldStop:
1566 if result and result.shouldStop:
1567 stoppedearly = True
1567 stoppedearly = True
1568 break
1568 break
1569 except queue.Empty:
1569 except queue.Empty:
1570 continue
1570 continue
1571 if tests and not running == self._jobs:
1571 if tests and not running == self._jobs:
1572 test = tests.pop(0)
1572 test = tests.pop(0)
1573 if self._loop:
1573 if self._loop:
1574 if getattr(test, 'should_reload', False):
1574 if getattr(test, 'should_reload', False):
1575 num_tests[0] += 1
1575 num_tests[0] += 1
1576 tests.append(
1576 tests.append(
1577 self._loadtest(test.name, num_tests[0]))
1577 self._loadtest(test.name, num_tests[0]))
1578 else:
1578 else:
1579 tests.append(test)
1579 tests.append(test)
1580 t = threading.Thread(target=job, name=test.name,
1580 t = threading.Thread(target=job, name=test.name,
1581 args=(test, result))
1581 args=(test, result))
1582 t.start()
1582 t.start()
1583 running += 1
1583 running += 1
1584
1584
1585 # If we stop early we still need to wait on started tests to
1585 # If we stop early we still need to wait on started tests to
1586 # finish. Otherwise, there is a race between the test completing
1586 # finish. Otherwise, there is a race between the test completing
1587 # and the test's cleanup code running. This could result in the
1587 # and the test's cleanup code running. This could result in the
1588 # test reporting incorrect.
1588 # test reporting incorrect.
1589 if stoppedearly:
1589 if stoppedearly:
1590 while running:
1590 while running:
1591 try:
1591 try:
1592 done.get(True, 1)
1592 done.get(True, 1)
1593 running -= 1
1593 running -= 1
1594 except queue.Empty:
1594 except queue.Empty:
1595 continue
1595 continue
1596 except KeyboardInterrupt:
1596 except KeyboardInterrupt:
1597 for test in runtests:
1597 for test in runtests:
1598 test.abort()
1598 test.abort()
1599
1599
1600 channels = []
1600 channels = []
1601
1601
1602 return result
1602 return result
1603
1603
1604 # Save the most recent 5 wall-clock runtimes of each test to a
1604 # Save the most recent 5 wall-clock runtimes of each test to a
1605 # human-readable text file named .testtimes. Tests are sorted
1605 # human-readable text file named .testtimes. Tests are sorted
1606 # alphabetically, while times for each test are listed from oldest to
1606 # alphabetically, while times for each test are listed from oldest to
1607 # newest.
1607 # newest.
1608
1608
1609 def loadtimes(testdir):
1609 def loadtimes(testdir):
1610 times = []
1610 times = []
1611 try:
1611 try:
1612 with open(os.path.join(testdir, '.testtimes-')) as fp:
1612 with open(os.path.join(testdir, '.testtimes-')) as fp:
1613 for line in fp:
1613 for line in fp:
1614 ts = line.split()
1614 ts = line.split()
1615 times.append((ts[0], [float(t) for t in ts[1:]]))
1615 times.append((ts[0], [float(t) for t in ts[1:]]))
1616 except IOError as err:
1616 except IOError as err:
1617 if err.errno != errno.ENOENT:
1617 if err.errno != errno.ENOENT:
1618 raise
1618 raise
1619 return times
1619 return times
1620
1620
1621 def savetimes(testdir, result):
1621 def savetimes(testdir, result):
1622 saved = dict(loadtimes(testdir))
1622 saved = dict(loadtimes(testdir))
1623 maxruns = 5
1623 maxruns = 5
1624 skipped = set([str(t[0]) for t in result.skipped])
1624 skipped = set([str(t[0]) for t in result.skipped])
1625 for tdata in result.times:
1625 for tdata in result.times:
1626 test, real = tdata[0], tdata[3]
1626 test, real = tdata[0], tdata[3]
1627 if test not in skipped:
1627 if test not in skipped:
1628 ts = saved.setdefault(test, [])
1628 ts = saved.setdefault(test, [])
1629 ts.append(real)
1629 ts.append(real)
1630 ts[:] = ts[-maxruns:]
1630 ts[:] = ts[-maxruns:]
1631
1631
1632 fd, tmpname = tempfile.mkstemp(prefix='.testtimes',
1632 fd, tmpname = tempfile.mkstemp(prefix='.testtimes',
1633 dir=testdir, text=True)
1633 dir=testdir, text=True)
1634 with os.fdopen(fd, 'w') as fp:
1634 with os.fdopen(fd, 'w') as fp:
1635 for name, ts in sorted(saved.iteritems()):
1635 for name, ts in sorted(saved.iteritems()):
1636 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
1636 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
1637 timepath = os.path.join(testdir, '.testtimes')
1637 timepath = os.path.join(testdir, '.testtimes')
1638 try:
1638 try:
1639 os.unlink(timepath)
1639 os.unlink(timepath)
1640 except OSError:
1640 except OSError:
1641 pass
1641 pass
1642 try:
1642 try:
1643 os.rename(tmpname, timepath)
1643 os.rename(tmpname, timepath)
1644 except OSError:
1644 except OSError:
1645 pass
1645 pass
1646
1646
1647 class TextTestRunner(unittest.TextTestRunner):
1647 class TextTestRunner(unittest.TextTestRunner):
1648 """Custom unittest test runner that uses appropriate settings."""
1648 """Custom unittest test runner that uses appropriate settings."""
1649
1649
1650 def __init__(self, runner, *args, **kwargs):
1650 def __init__(self, runner, *args, **kwargs):
1651 super(TextTestRunner, self).__init__(*args, **kwargs)
1651 super(TextTestRunner, self).__init__(*args, **kwargs)
1652
1652
1653 self._runner = runner
1653 self._runner = runner
1654
1654
1655 def run(self, test):
1655 def run(self, test):
1656 result = TestResult(self._runner.options, self.stream,
1656 result = TestResult(self._runner.options, self.stream,
1657 self.descriptions, self.verbosity)
1657 self.descriptions, self.verbosity)
1658
1658
1659 test(result)
1659 test(result)
1660
1660
1661 failed = len(result.failures)
1661 failed = len(result.failures)
1662 warned = len(result.warned)
1662 warned = len(result.warned)
1663 skipped = len(result.skipped)
1663 skipped = len(result.skipped)
1664 ignored = len(result.ignored)
1664 ignored = len(result.ignored)
1665
1665
1666 with iolock:
1666 with iolock:
1667 self.stream.writeln('')
1667 self.stream.writeln('')
1668
1668
1669 if not self._runner.options.noskips:
1669 if not self._runner.options.noskips:
1670 for test, msg in result.skipped:
1670 for test, msg in result.skipped:
1671 self.stream.writeln('Skipped %s: %s' % (test.name, msg))
1671 self.stream.writeln('Skipped %s: %s' % (test.name, msg))
1672 for test, msg in result.warned:
1672 for test, msg in result.warned:
1673 self.stream.writeln('Warned %s: %s' % (test.name, msg))
1673 self.stream.writeln('Warned %s: %s' % (test.name, msg))
1674 for test, msg in result.failures:
1674 for test, msg in result.failures:
1675 self.stream.writeln('Failed %s: %s' % (test.name, msg))
1675 self.stream.writeln('Failed %s: %s' % (test.name, msg))
1676 for test, msg in result.errors:
1676 for test, msg in result.errors:
1677 self.stream.writeln('Errored %s: %s' % (test.name, msg))
1677 self.stream.writeln('Errored %s: %s' % (test.name, msg))
1678
1678
1679 if self._runner.options.xunit:
1679 if self._runner.options.xunit:
1680 xuf = open(self._runner.options.xunit, 'wb')
1680 xuf = open(self._runner.options.xunit, 'wb')
1681 try:
1681 try:
1682 timesd = dict((t[0], t[3]) for t in result.times)
1682 timesd = dict((t[0], t[3]) for t in result.times)
1683 doc = minidom.Document()
1683 doc = minidom.Document()
1684 s = doc.createElement('testsuite')
1684 s = doc.createElement('testsuite')
1685 s.setAttribute('name', 'run-tests')
1685 s.setAttribute('name', 'run-tests')
1686 s.setAttribute('tests', str(result.testsRun))
1686 s.setAttribute('tests', str(result.testsRun))
1687 s.setAttribute('errors', "0") # TODO
1687 s.setAttribute('errors', "0") # TODO
1688 s.setAttribute('failures', str(failed))
1688 s.setAttribute('failures', str(failed))
1689 s.setAttribute('skipped', str(skipped + ignored))
1689 s.setAttribute('skipped', str(skipped + ignored))
1690 doc.appendChild(s)
1690 doc.appendChild(s)
1691 for tc in result.successes:
1691 for tc in result.successes:
1692 t = doc.createElement('testcase')
1692 t = doc.createElement('testcase')
1693 t.setAttribute('name', tc.name)
1693 t.setAttribute('name', tc.name)
1694 t.setAttribute('time', '%.3f' % timesd[tc.name])
1694 t.setAttribute('time', '%.3f' % timesd[tc.name])
1695 s.appendChild(t)
1695 s.appendChild(t)
1696 for tc, err in sorted(result.faildata.items()):
1696 for tc, err in sorted(result.faildata.items()):
1697 t = doc.createElement('testcase')
1697 t = doc.createElement('testcase')
1698 t.setAttribute('name', tc)
1698 t.setAttribute('name', tc)
1699 t.setAttribute('time', '%.3f' % timesd[tc])
1699 t.setAttribute('time', '%.3f' % timesd[tc])
1700 # createCDATASection expects a unicode or it will
1700 # createCDATASection expects a unicode or it will
1701 # convert using default conversion rules, which will
1701 # convert using default conversion rules, which will
1702 # fail if string isn't ASCII.
1702 # fail if string isn't ASCII.
1703 err = cdatasafe(err).decode('utf-8', 'replace')
1703 err = cdatasafe(err).decode('utf-8', 'replace')
1704 cd = doc.createCDATASection(err)
1704 cd = doc.createCDATASection(err)
1705 t.appendChild(cd)
1705 t.appendChild(cd)
1706 s.appendChild(t)
1706 s.appendChild(t)
1707 xuf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
1707 xuf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
1708 finally:
1708 finally:
1709 xuf.close()
1709 xuf.close()
1710
1710
1711 if self._runner.options.json:
1711 if self._runner.options.json:
1712 if json is None:
1712 if json is None:
1713 raise ImportError("json module not installed")
1713 raise ImportError("json module not installed")
1714 jsonpath = os.path.join(self._runner._testdir, 'report.json')
1714 jsonpath = os.path.join(self._runner._testdir, 'report.json')
1715 fp = open(jsonpath, 'w')
1715 fp = open(jsonpath, 'w')
1716 try:
1716 try:
1717 timesd = {}
1717 timesd = {}
1718 for tdata in result.times:
1718 for tdata in result.times:
1719 test = tdata[0]
1719 test = tdata[0]
1720 timesd[test] = tdata[1:]
1720 timesd[test] = tdata[1:]
1721
1721
1722 outcome = {}
1722 outcome = {}
1723 groups = [('success', ((tc, None)
1723 groups = [('success', ((tc, None)
1724 for tc in result.successes)),
1724 for tc in result.successes)),
1725 ('failure', result.failures),
1725 ('failure', result.failures),
1726 ('skip', result.skipped)]
1726 ('skip', result.skipped)]
1727 for res, testcases in groups:
1727 for res, testcases in groups:
1728 for tc, __ in testcases:
1728 for tc, __ in testcases:
1729 tres = {'result': res,
1729 tres = {'result': res,
1730 'time': ('%0.3f' % timesd[tc.name][2]),
1730 'time': ('%0.3f' % timesd[tc.name][2]),
1731 'cuser': ('%0.3f' % timesd[tc.name][0]),
1731 'cuser': ('%0.3f' % timesd[tc.name][0]),
1732 'csys': ('%0.3f' % timesd[tc.name][1]),
1732 'csys': ('%0.3f' % timesd[tc.name][1]),
1733 'start': ('%0.3f' % timesd[tc.name][3]),
1733 'start': ('%0.3f' % timesd[tc.name][3]),
1734 'end': ('%0.3f' % timesd[tc.name][4])}
1734 'end': ('%0.3f' % timesd[tc.name][4]),
1735 'diff': result.faildata.get(tc.name, ''),
1736 }
1735 outcome[tc.name] = tres
1737 outcome[tc.name] = tres
1736 jsonout = json.dumps(outcome, sort_keys=True, indent=4)
1738 jsonout = json.dumps(outcome, sort_keys=True, indent=4)
1737 fp.writelines(("testreport =", jsonout))
1739 fp.writelines(("testreport =", jsonout))
1738 finally:
1740 finally:
1739 fp.close()
1741 fp.close()
1740
1742
1741 self._runner._checkhglib('Tested')
1743 self._runner._checkhglib('Tested')
1742
1744
1743 savetimes(self._runner._testdir, result)
1745 savetimes(self._runner._testdir, result)
1744 self.stream.writeln(
1746 self.stream.writeln(
1745 '# Ran %d tests, %d skipped, %d warned, %d failed.'
1747 '# Ran %d tests, %d skipped, %d warned, %d failed.'
1746 % (result.testsRun,
1748 % (result.testsRun,
1747 skipped + ignored, warned, failed))
1749 skipped + ignored, warned, failed))
1748 if failed:
1750 if failed:
1749 self.stream.writeln('python hash seed: %s' %
1751 self.stream.writeln('python hash seed: %s' %
1750 os.environ['PYTHONHASHSEED'])
1752 os.environ['PYTHONHASHSEED'])
1751 if self._runner.options.time:
1753 if self._runner.options.time:
1752 self.printtimes(result.times)
1754 self.printtimes(result.times)
1753
1755
1754 return result
1756 return result
1755
1757
1756 def printtimes(self, times):
1758 def printtimes(self, times):
1757 # iolock held by run
1759 # iolock held by run
1758 self.stream.writeln('# Producing time report')
1760 self.stream.writeln('# Producing time report')
1759 times.sort(key=lambda t: (t[3]))
1761 times.sort(key=lambda t: (t[3]))
1760 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
1762 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
1761 self.stream.writeln('%-7s %-7s %-7s %-7s %-7s %s' %
1763 self.stream.writeln('%-7s %-7s %-7s %-7s %-7s %s' %
1762 ('start', 'end', 'cuser', 'csys', 'real', 'Test'))
1764 ('start', 'end', 'cuser', 'csys', 'real', 'Test'))
1763 for tdata in times:
1765 for tdata in times:
1764 test = tdata[0]
1766 test = tdata[0]
1765 cuser, csys, real, start, end = tdata[1:6]
1767 cuser, csys, real, start, end = tdata[1:6]
1766 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
1768 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
1767
1769
1768 class TestRunner(object):
1770 class TestRunner(object):
1769 """Holds context for executing tests.
1771 """Holds context for executing tests.
1770
1772
1771 Tests rely on a lot of state. This object holds it for them.
1773 Tests rely on a lot of state. This object holds it for them.
1772 """
1774 """
1773
1775
1774 # Programs required to run tests.
1776 # Programs required to run tests.
1775 REQUIREDTOOLS = [
1777 REQUIREDTOOLS = [
1776 os.path.basename(_bytespath(sys.executable)),
1778 os.path.basename(_bytespath(sys.executable)),
1777 b'diff',
1779 b'diff',
1778 b'grep',
1780 b'grep',
1779 b'unzip',
1781 b'unzip',
1780 b'gunzip',
1782 b'gunzip',
1781 b'bunzip2',
1783 b'bunzip2',
1782 b'sed',
1784 b'sed',
1783 ]
1785 ]
1784
1786
1785 # Maps file extensions to test class.
1787 # Maps file extensions to test class.
1786 TESTTYPES = [
1788 TESTTYPES = [
1787 (b'.py', PythonTest),
1789 (b'.py', PythonTest),
1788 (b'.t', TTest),
1790 (b'.t', TTest),
1789 ]
1791 ]
1790
1792
1791 def __init__(self):
1793 def __init__(self):
1792 self.options = None
1794 self.options = None
1793 self._hgroot = None
1795 self._hgroot = None
1794 self._testdir = None
1796 self._testdir = None
1795 self._hgtmp = None
1797 self._hgtmp = None
1796 self._installdir = None
1798 self._installdir = None
1797 self._bindir = None
1799 self._bindir = None
1798 self._tmpbinddir = None
1800 self._tmpbinddir = None
1799 self._pythondir = None
1801 self._pythondir = None
1800 self._coveragefile = None
1802 self._coveragefile = None
1801 self._createdfiles = []
1803 self._createdfiles = []
1802 self._hgpath = None
1804 self._hgpath = None
1803 self._portoffset = 0
1805 self._portoffset = 0
1804 self._ports = {}
1806 self._ports = {}
1805
1807
1806 def run(self, args, parser=None):
1808 def run(self, args, parser=None):
1807 """Run the test suite."""
1809 """Run the test suite."""
1808 oldmask = os.umask(0o22)
1810 oldmask = os.umask(0o22)
1809 try:
1811 try:
1810 parser = parser or getparser()
1812 parser = parser or getparser()
1811 options, args = parseargs(args, parser)
1813 options, args = parseargs(args, parser)
1812 # positional arguments are paths to test files to run, so
1814 # positional arguments are paths to test files to run, so
1813 # we make sure they're all bytestrings
1815 # we make sure they're all bytestrings
1814 args = [_bytespath(a) for a in args]
1816 args = [_bytespath(a) for a in args]
1815 self.options = options
1817 self.options = options
1816
1818
1817 self._checktools()
1819 self._checktools()
1818 tests = self.findtests(args)
1820 tests = self.findtests(args)
1819 if options.profile_runner:
1821 if options.profile_runner:
1820 import statprof
1822 import statprof
1821 statprof.start()
1823 statprof.start()
1822 result = self._run(tests)
1824 result = self._run(tests)
1823 if options.profile_runner:
1825 if options.profile_runner:
1824 statprof.stop()
1826 statprof.stop()
1825 statprof.display()
1827 statprof.display()
1826 return result
1828 return result
1827
1829
1828 finally:
1830 finally:
1829 os.umask(oldmask)
1831 os.umask(oldmask)
1830
1832
1831 def _run(self, tests):
1833 def _run(self, tests):
1832 if self.options.random:
1834 if self.options.random:
1833 random.shuffle(tests)
1835 random.shuffle(tests)
1834 else:
1836 else:
1835 # keywords for slow tests
1837 # keywords for slow tests
1836 slow = {b'svn': 10,
1838 slow = {b'svn': 10,
1837 b'cvs': 10,
1839 b'cvs': 10,
1838 b'hghave': 10,
1840 b'hghave': 10,
1839 b'largefiles-update': 10,
1841 b'largefiles-update': 10,
1840 b'run-tests': 10,
1842 b'run-tests': 10,
1841 b'corruption': 10,
1843 b'corruption': 10,
1842 b'race': 10,
1844 b'race': 10,
1843 b'i18n': 10,
1845 b'i18n': 10,
1844 b'check': 100,
1846 b'check': 100,
1845 b'gendoc': 100,
1847 b'gendoc': 100,
1846 b'contrib-perf': 200,
1848 b'contrib-perf': 200,
1847 }
1849 }
1848 perf = {}
1850 perf = {}
1849 def sortkey(f):
1851 def sortkey(f):
1850 # run largest tests first, as they tend to take the longest
1852 # run largest tests first, as they tend to take the longest
1851 try:
1853 try:
1852 return perf[f]
1854 return perf[f]
1853 except KeyError:
1855 except KeyError:
1854 try:
1856 try:
1855 val = -os.stat(f).st_size
1857 val = -os.stat(f).st_size
1856 except OSError as e:
1858 except OSError as e:
1857 if e.errno != errno.ENOENT:
1859 if e.errno != errno.ENOENT:
1858 raise
1860 raise
1859 perf[f] = -1e9 # file does not exist, tell early
1861 perf[f] = -1e9 # file does not exist, tell early
1860 return -1e9
1862 return -1e9
1861 for kw, mul in slow.items():
1863 for kw, mul in slow.items():
1862 if kw in f:
1864 if kw in f:
1863 val *= mul
1865 val *= mul
1864 if f.endswith('.py'):
1866 if f.endswith('.py'):
1865 val /= 10.0
1867 val /= 10.0
1866 perf[f] = val / 1000.0
1868 perf[f] = val / 1000.0
1867 return perf[f]
1869 return perf[f]
1868 tests.sort(key=sortkey)
1870 tests.sort(key=sortkey)
1869
1871
1870 self._testdir = osenvironb[b'TESTDIR'] = getattr(
1872 self._testdir = osenvironb[b'TESTDIR'] = getattr(
1871 os, 'getcwdb', os.getcwd)()
1873 os, 'getcwdb', os.getcwd)()
1872
1874
1873 if 'PYTHONHASHSEED' not in os.environ:
1875 if 'PYTHONHASHSEED' not in os.environ:
1874 # use a random python hash seed all the time
1876 # use a random python hash seed all the time
1875 # we do the randomness ourself to know what seed is used
1877 # we do the randomness ourself to know what seed is used
1876 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
1878 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
1877
1879
1878 if self.options.tmpdir:
1880 if self.options.tmpdir:
1879 self.options.keep_tmpdir = True
1881 self.options.keep_tmpdir = True
1880 tmpdir = _bytespath(self.options.tmpdir)
1882 tmpdir = _bytespath(self.options.tmpdir)
1881 if os.path.exists(tmpdir):
1883 if os.path.exists(tmpdir):
1882 # Meaning of tmpdir has changed since 1.3: we used to create
1884 # Meaning of tmpdir has changed since 1.3: we used to create
1883 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
1885 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
1884 # tmpdir already exists.
1886 # tmpdir already exists.
1885 print("error: temp dir %r already exists" % tmpdir)
1887 print("error: temp dir %r already exists" % tmpdir)
1886 return 1
1888 return 1
1887
1889
1888 # Automatically removing tmpdir sounds convenient, but could
1890 # Automatically removing tmpdir sounds convenient, but could
1889 # really annoy anyone in the habit of using "--tmpdir=/tmp"
1891 # really annoy anyone in the habit of using "--tmpdir=/tmp"
1890 # or "--tmpdir=$HOME".
1892 # or "--tmpdir=$HOME".
1891 #vlog("# Removing temp dir", tmpdir)
1893 #vlog("# Removing temp dir", tmpdir)
1892 #shutil.rmtree(tmpdir)
1894 #shutil.rmtree(tmpdir)
1893 os.makedirs(tmpdir)
1895 os.makedirs(tmpdir)
1894 else:
1896 else:
1895 d = None
1897 d = None
1896 if os.name == 'nt':
1898 if os.name == 'nt':
1897 # without this, we get the default temp dir location, but
1899 # without this, we get the default temp dir location, but
1898 # in all lowercase, which causes troubles with paths (issue3490)
1900 # in all lowercase, which causes troubles with paths (issue3490)
1899 d = osenvironb.get(b'TMP', None)
1901 d = osenvironb.get(b'TMP', None)
1900 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
1902 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
1901
1903
1902 self._hgtmp = osenvironb[b'HGTMP'] = (
1904 self._hgtmp = osenvironb[b'HGTMP'] = (
1903 os.path.realpath(tmpdir))
1905 os.path.realpath(tmpdir))
1904
1906
1905 if self.options.with_hg:
1907 if self.options.with_hg:
1906 self._installdir = None
1908 self._installdir = None
1907 whg = self.options.with_hg
1909 whg = self.options.with_hg
1908 # If --with-hg is not specified, we have bytes already,
1910 # If --with-hg is not specified, we have bytes already,
1909 # but if it was specified in python3 we get a str, so we
1911 # but if it was specified in python3 we get a str, so we
1910 # have to encode it back into a bytes.
1912 # have to encode it back into a bytes.
1911 if PYTHON3:
1913 if PYTHON3:
1912 if not isinstance(whg, bytes):
1914 if not isinstance(whg, bytes):
1913 whg = _bytespath(whg)
1915 whg = _bytespath(whg)
1914 self._bindir = os.path.dirname(os.path.realpath(whg))
1916 self._bindir = os.path.dirname(os.path.realpath(whg))
1915 assert isinstance(self._bindir, bytes)
1917 assert isinstance(self._bindir, bytes)
1916 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
1918 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
1917 os.makedirs(self._tmpbindir)
1919 os.makedirs(self._tmpbindir)
1918
1920
1919 # This looks redundant with how Python initializes sys.path from
1921 # This looks redundant with how Python initializes sys.path from
1920 # the location of the script being executed. Needed because the
1922 # the location of the script being executed. Needed because the
1921 # "hg" specified by --with-hg is not the only Python script
1923 # "hg" specified by --with-hg is not the only Python script
1922 # executed in the test suite that needs to import 'mercurial'
1924 # executed in the test suite that needs to import 'mercurial'
1923 # ... which means it's not really redundant at all.
1925 # ... which means it's not really redundant at all.
1924 self._pythondir = self._bindir
1926 self._pythondir = self._bindir
1925 else:
1927 else:
1926 self._installdir = os.path.join(self._hgtmp, b"install")
1928 self._installdir = os.path.join(self._hgtmp, b"install")
1927 self._bindir = osenvironb[b"BINDIR"] = \
1929 self._bindir = osenvironb[b"BINDIR"] = \
1928 os.path.join(self._installdir, b"bin")
1930 os.path.join(self._installdir, b"bin")
1929 self._tmpbindir = self._bindir
1931 self._tmpbindir = self._bindir
1930 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
1932 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
1931
1933
1932 osenvironb[b"BINDIR"] = self._bindir
1934 osenvironb[b"BINDIR"] = self._bindir
1933 osenvironb[b"PYTHON"] = PYTHON
1935 osenvironb[b"PYTHON"] = PYTHON
1934
1936
1935 fileb = _bytespath(__file__)
1937 fileb = _bytespath(__file__)
1936 runtestdir = os.path.abspath(os.path.dirname(fileb))
1938 runtestdir = os.path.abspath(os.path.dirname(fileb))
1937 osenvironb[b'RUNTESTDIR'] = runtestdir
1939 osenvironb[b'RUNTESTDIR'] = runtestdir
1938 if PYTHON3:
1940 if PYTHON3:
1939 sepb = _bytespath(os.pathsep)
1941 sepb = _bytespath(os.pathsep)
1940 else:
1942 else:
1941 sepb = os.pathsep
1943 sepb = os.pathsep
1942 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
1944 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
1943 if os.path.islink(__file__):
1945 if os.path.islink(__file__):
1944 # test helper will likely be at the end of the symlink
1946 # test helper will likely be at the end of the symlink
1945 realfile = os.path.realpath(fileb)
1947 realfile = os.path.realpath(fileb)
1946 realdir = os.path.abspath(os.path.dirname(realfile))
1948 realdir = os.path.abspath(os.path.dirname(realfile))
1947 path.insert(2, realdir)
1949 path.insert(2, realdir)
1948 if self._testdir != runtestdir:
1950 if self._testdir != runtestdir:
1949 path = [self._testdir] + path
1951 path = [self._testdir] + path
1950 if self._tmpbindir != self._bindir:
1952 if self._tmpbindir != self._bindir:
1951 path = [self._tmpbindir] + path
1953 path = [self._tmpbindir] + path
1952 osenvironb[b"PATH"] = sepb.join(path)
1954 osenvironb[b"PATH"] = sepb.join(path)
1953
1955
1954 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
1956 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
1955 # can run .../tests/run-tests.py test-foo where test-foo
1957 # can run .../tests/run-tests.py test-foo where test-foo
1956 # adds an extension to HGRC. Also include run-test.py directory to
1958 # adds an extension to HGRC. Also include run-test.py directory to
1957 # import modules like heredoctest.
1959 # import modules like heredoctest.
1958 pypath = [self._pythondir, self._testdir, runtestdir]
1960 pypath = [self._pythondir, self._testdir, runtestdir]
1959 # We have to augment PYTHONPATH, rather than simply replacing
1961 # We have to augment PYTHONPATH, rather than simply replacing
1960 # it, in case external libraries are only available via current
1962 # it, in case external libraries are only available via current
1961 # PYTHONPATH. (In particular, the Subversion bindings on OS X
1963 # PYTHONPATH. (In particular, the Subversion bindings on OS X
1962 # are in /opt/subversion.)
1964 # are in /opt/subversion.)
1963 oldpypath = osenvironb.get(IMPL_PATH)
1965 oldpypath = osenvironb.get(IMPL_PATH)
1964 if oldpypath:
1966 if oldpypath:
1965 pypath.append(oldpypath)
1967 pypath.append(oldpypath)
1966 osenvironb[IMPL_PATH] = sepb.join(pypath)
1968 osenvironb[IMPL_PATH] = sepb.join(pypath)
1967
1969
1968 if self.options.pure:
1970 if self.options.pure:
1969 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
1971 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
1970
1972
1971 if self.options.allow_slow_tests:
1973 if self.options.allow_slow_tests:
1972 os.environ["HGTEST_SLOW"] = "slow"
1974 os.environ["HGTEST_SLOW"] = "slow"
1973 elif 'HGTEST_SLOW' in os.environ:
1975 elif 'HGTEST_SLOW' in os.environ:
1974 del os.environ['HGTEST_SLOW']
1976 del os.environ['HGTEST_SLOW']
1975
1977
1976 self._coveragefile = os.path.join(self._testdir, b'.coverage')
1978 self._coveragefile = os.path.join(self._testdir, b'.coverage')
1977
1979
1978 vlog("# Using TESTDIR", self._testdir)
1980 vlog("# Using TESTDIR", self._testdir)
1979 vlog("# Using RUNTESTDIR", osenvironb[b'RUNTESTDIR'])
1981 vlog("# Using RUNTESTDIR", osenvironb[b'RUNTESTDIR'])
1980 vlog("# Using HGTMP", self._hgtmp)
1982 vlog("# Using HGTMP", self._hgtmp)
1981 vlog("# Using PATH", os.environ["PATH"])
1983 vlog("# Using PATH", os.environ["PATH"])
1982 vlog("# Using", IMPL_PATH, osenvironb[IMPL_PATH])
1984 vlog("# Using", IMPL_PATH, osenvironb[IMPL_PATH])
1983
1985
1984 try:
1986 try:
1985 return self._runtests(tests) or 0
1987 return self._runtests(tests) or 0
1986 finally:
1988 finally:
1987 time.sleep(.1)
1989 time.sleep(.1)
1988 self._cleanup()
1990 self._cleanup()
1989
1991
1990 def findtests(self, args):
1992 def findtests(self, args):
1991 """Finds possible test files from arguments.
1993 """Finds possible test files from arguments.
1992
1994
1993 If you wish to inject custom tests into the test harness, this would
1995 If you wish to inject custom tests into the test harness, this would
1994 be a good function to monkeypatch or override in a derived class.
1996 be a good function to monkeypatch or override in a derived class.
1995 """
1997 """
1996 if not args:
1998 if not args:
1997 if self.options.changed:
1999 if self.options.changed:
1998 proc = Popen4('hg st --rev "%s" -man0 .' %
2000 proc = Popen4('hg st --rev "%s" -man0 .' %
1999 self.options.changed, None, 0)
2001 self.options.changed, None, 0)
2000 stdout, stderr = proc.communicate()
2002 stdout, stderr = proc.communicate()
2001 args = stdout.strip(b'\0').split(b'\0')
2003 args = stdout.strip(b'\0').split(b'\0')
2002 else:
2004 else:
2003 args = os.listdir(b'.')
2005 args = os.listdir(b'.')
2004
2006
2005 return [t for t in args
2007 return [t for t in args
2006 if os.path.basename(t).startswith(b'test-')
2008 if os.path.basename(t).startswith(b'test-')
2007 and (t.endswith(b'.py') or t.endswith(b'.t'))]
2009 and (t.endswith(b'.py') or t.endswith(b'.t'))]
2008
2010
2009 def _runtests(self, tests):
2011 def _runtests(self, tests):
2010 try:
2012 try:
2011 if self._installdir:
2013 if self._installdir:
2012 self._installhg()
2014 self._installhg()
2013 self._checkhglib("Testing")
2015 self._checkhglib("Testing")
2014 else:
2016 else:
2015 self._usecorrectpython()
2017 self._usecorrectpython()
2016
2018
2017 if self.options.restart:
2019 if self.options.restart:
2018 orig = list(tests)
2020 orig = list(tests)
2019 while tests:
2021 while tests:
2020 if os.path.exists(tests[0] + ".err"):
2022 if os.path.exists(tests[0] + ".err"):
2021 break
2023 break
2022 tests.pop(0)
2024 tests.pop(0)
2023 if not tests:
2025 if not tests:
2024 print("running all tests")
2026 print("running all tests")
2025 tests = orig
2027 tests = orig
2026
2028
2027 tests = [self._gettest(t, i) for i, t in enumerate(tests)]
2029 tests = [self._gettest(t, i) for i, t in enumerate(tests)]
2028
2030
2029 failed = False
2031 failed = False
2030 warned = False
2032 warned = False
2031 kws = self.options.keywords
2033 kws = self.options.keywords
2032 if kws is not None and PYTHON3:
2034 if kws is not None and PYTHON3:
2033 kws = kws.encode('utf-8')
2035 kws = kws.encode('utf-8')
2034
2036
2035 suite = TestSuite(self._testdir,
2037 suite = TestSuite(self._testdir,
2036 jobs=self.options.jobs,
2038 jobs=self.options.jobs,
2037 whitelist=self.options.whitelisted,
2039 whitelist=self.options.whitelisted,
2038 blacklist=self.options.blacklist,
2040 blacklist=self.options.blacklist,
2039 retest=self.options.retest,
2041 retest=self.options.retest,
2040 keywords=kws,
2042 keywords=kws,
2041 loop=self.options.loop,
2043 loop=self.options.loop,
2042 runs_per_test=self.options.runs_per_test,
2044 runs_per_test=self.options.runs_per_test,
2043 showchannels=self.options.showchannels,
2045 showchannels=self.options.showchannels,
2044 tests=tests, loadtest=self._gettest)
2046 tests=tests, loadtest=self._gettest)
2045 verbosity = 1
2047 verbosity = 1
2046 if self.options.verbose:
2048 if self.options.verbose:
2047 verbosity = 2
2049 verbosity = 2
2048 runner = TextTestRunner(self, verbosity=verbosity)
2050 runner = TextTestRunner(self, verbosity=verbosity)
2049 result = runner.run(suite)
2051 result = runner.run(suite)
2050
2052
2051 if result.failures:
2053 if result.failures:
2052 failed = True
2054 failed = True
2053 if result.warned:
2055 if result.warned:
2054 warned = True
2056 warned = True
2055
2057
2056 if self.options.anycoverage:
2058 if self.options.anycoverage:
2057 self._outputcoverage()
2059 self._outputcoverage()
2058 except KeyboardInterrupt:
2060 except KeyboardInterrupt:
2059 failed = True
2061 failed = True
2060 print("\ninterrupted!")
2062 print("\ninterrupted!")
2061
2063
2062 if failed:
2064 if failed:
2063 return 1
2065 return 1
2064 if warned:
2066 if warned:
2065 return 80
2067 return 80
2066
2068
2067 def _getport(self, count):
2069 def _getport(self, count):
2068 port = self._ports.get(count) # do we have a cached entry?
2070 port = self._ports.get(count) # do we have a cached entry?
2069 if port is None:
2071 if port is None:
2070 portneeded = 3
2072 portneeded = 3
2071 # above 100 tries we just give up and let test reports failure
2073 # above 100 tries we just give up and let test reports failure
2072 for tries in xrange(100):
2074 for tries in xrange(100):
2073 allfree = True
2075 allfree = True
2074 port = self.options.port + self._portoffset
2076 port = self.options.port + self._portoffset
2075 for idx in xrange(portneeded):
2077 for idx in xrange(portneeded):
2076 if not checkportisavailable(port + idx):
2078 if not checkportisavailable(port + idx):
2077 allfree = False
2079 allfree = False
2078 break
2080 break
2079 self._portoffset += portneeded
2081 self._portoffset += portneeded
2080 if allfree:
2082 if allfree:
2081 break
2083 break
2082 self._ports[count] = port
2084 self._ports[count] = port
2083 return port
2085 return port
2084
2086
2085 def _gettest(self, test, count):
2087 def _gettest(self, test, count):
2086 """Obtain a Test by looking at its filename.
2088 """Obtain a Test by looking at its filename.
2087
2089
2088 Returns a Test instance. The Test may not be runnable if it doesn't
2090 Returns a Test instance. The Test may not be runnable if it doesn't
2089 map to a known type.
2091 map to a known type.
2090 """
2092 """
2091 lctest = test.lower()
2093 lctest = test.lower()
2092 testcls = Test
2094 testcls = Test
2093
2095
2094 for ext, cls in self.TESTTYPES:
2096 for ext, cls in self.TESTTYPES:
2095 if lctest.endswith(ext):
2097 if lctest.endswith(ext):
2096 testcls = cls
2098 testcls = cls
2097 break
2099 break
2098
2100
2099 refpath = os.path.join(self._testdir, test)
2101 refpath = os.path.join(self._testdir, test)
2100 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
2102 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
2101
2103
2102 t = testcls(refpath, tmpdir,
2104 t = testcls(refpath, tmpdir,
2103 keeptmpdir=self.options.keep_tmpdir,
2105 keeptmpdir=self.options.keep_tmpdir,
2104 debug=self.options.debug,
2106 debug=self.options.debug,
2105 timeout=self.options.timeout,
2107 timeout=self.options.timeout,
2106 startport=self._getport(count),
2108 startport=self._getport(count),
2107 extraconfigopts=self.options.extra_config_opt,
2109 extraconfigopts=self.options.extra_config_opt,
2108 py3kwarnings=self.options.py3k_warnings,
2110 py3kwarnings=self.options.py3k_warnings,
2109 shell=self.options.shell)
2111 shell=self.options.shell)
2110 t.should_reload = True
2112 t.should_reload = True
2111 return t
2113 return t
2112
2114
2113 def _cleanup(self):
2115 def _cleanup(self):
2114 """Clean up state from this test invocation."""
2116 """Clean up state from this test invocation."""
2115
2117
2116 if self.options.keep_tmpdir:
2118 if self.options.keep_tmpdir:
2117 return
2119 return
2118
2120
2119 vlog("# Cleaning up HGTMP", self._hgtmp)
2121 vlog("# Cleaning up HGTMP", self._hgtmp)
2120 shutil.rmtree(self._hgtmp, True)
2122 shutil.rmtree(self._hgtmp, True)
2121 for f in self._createdfiles:
2123 for f in self._createdfiles:
2122 try:
2124 try:
2123 os.remove(f)
2125 os.remove(f)
2124 except OSError:
2126 except OSError:
2125 pass
2127 pass
2126
2128
2127 def _usecorrectpython(self):
2129 def _usecorrectpython(self):
2128 """Configure the environment to use the appropriate Python in tests."""
2130 """Configure the environment to use the appropriate Python in tests."""
2129 # Tests must use the same interpreter as us or bad things will happen.
2131 # Tests must use the same interpreter as us or bad things will happen.
2130 pyexename = sys.platform == 'win32' and b'python.exe' or b'python'
2132 pyexename = sys.platform == 'win32' and b'python.exe' or b'python'
2131 if getattr(os, 'symlink', None):
2133 if getattr(os, 'symlink', None):
2132 vlog("# Making python executable in test path a symlink to '%s'" %
2134 vlog("# Making python executable in test path a symlink to '%s'" %
2133 sys.executable)
2135 sys.executable)
2134 mypython = os.path.join(self._tmpbindir, pyexename)
2136 mypython = os.path.join(self._tmpbindir, pyexename)
2135 try:
2137 try:
2136 if os.readlink(mypython) == sys.executable:
2138 if os.readlink(mypython) == sys.executable:
2137 return
2139 return
2138 os.unlink(mypython)
2140 os.unlink(mypython)
2139 except OSError as err:
2141 except OSError as err:
2140 if err.errno != errno.ENOENT:
2142 if err.errno != errno.ENOENT:
2141 raise
2143 raise
2142 if self._findprogram(pyexename) != sys.executable:
2144 if self._findprogram(pyexename) != sys.executable:
2143 try:
2145 try:
2144 os.symlink(sys.executable, mypython)
2146 os.symlink(sys.executable, mypython)
2145 self._createdfiles.append(mypython)
2147 self._createdfiles.append(mypython)
2146 except OSError as err:
2148 except OSError as err:
2147 # child processes may race, which is harmless
2149 # child processes may race, which is harmless
2148 if err.errno != errno.EEXIST:
2150 if err.errno != errno.EEXIST:
2149 raise
2151 raise
2150 else:
2152 else:
2151 exedir, exename = os.path.split(sys.executable)
2153 exedir, exename = os.path.split(sys.executable)
2152 vlog("# Modifying search path to find %s as %s in '%s'" %
2154 vlog("# Modifying search path to find %s as %s in '%s'" %
2153 (exename, pyexename, exedir))
2155 (exename, pyexename, exedir))
2154 path = os.environ['PATH'].split(os.pathsep)
2156 path = os.environ['PATH'].split(os.pathsep)
2155 while exedir in path:
2157 while exedir in path:
2156 path.remove(exedir)
2158 path.remove(exedir)
2157 os.environ['PATH'] = os.pathsep.join([exedir] + path)
2159 os.environ['PATH'] = os.pathsep.join([exedir] + path)
2158 if not self._findprogram(pyexename):
2160 if not self._findprogram(pyexename):
2159 print("WARNING: Cannot find %s in search path" % pyexename)
2161 print("WARNING: Cannot find %s in search path" % pyexename)
2160
2162
2161 def _installhg(self):
2163 def _installhg(self):
2162 """Install hg into the test environment.
2164 """Install hg into the test environment.
2163
2165
2164 This will also configure hg with the appropriate testing settings.
2166 This will also configure hg with the appropriate testing settings.
2165 """
2167 """
2166 vlog("# Performing temporary installation of HG")
2168 vlog("# Performing temporary installation of HG")
2167 installerrs = os.path.join(b"tests", b"install.err")
2169 installerrs = os.path.join(b"tests", b"install.err")
2168 compiler = ''
2170 compiler = ''
2169 if self.options.compiler:
2171 if self.options.compiler:
2170 compiler = '--compiler ' + self.options.compiler
2172 compiler = '--compiler ' + self.options.compiler
2171 if self.options.pure:
2173 if self.options.pure:
2172 pure = b"--pure"
2174 pure = b"--pure"
2173 else:
2175 else:
2174 pure = b""
2176 pure = b""
2175 py3 = ''
2177 py3 = ''
2176
2178
2177 # Run installer in hg root
2179 # Run installer in hg root
2178 script = os.path.realpath(sys.argv[0])
2180 script = os.path.realpath(sys.argv[0])
2179 exe = sys.executable
2181 exe = sys.executable
2180 if PYTHON3:
2182 if PYTHON3:
2181 py3 = b'--c2to3'
2183 py3 = b'--c2to3'
2182 compiler = _bytespath(compiler)
2184 compiler = _bytespath(compiler)
2183 script = _bytespath(script)
2185 script = _bytespath(script)
2184 exe = _bytespath(exe)
2186 exe = _bytespath(exe)
2185 hgroot = os.path.dirname(os.path.dirname(script))
2187 hgroot = os.path.dirname(os.path.dirname(script))
2186 self._hgroot = hgroot
2188 self._hgroot = hgroot
2187 os.chdir(hgroot)
2189 os.chdir(hgroot)
2188 nohome = b'--home=""'
2190 nohome = b'--home=""'
2189 if os.name == 'nt':
2191 if os.name == 'nt':
2190 # The --home="" trick works only on OS where os.sep == '/'
2192 # The --home="" trick works only on OS where os.sep == '/'
2191 # because of a distutils convert_path() fast-path. Avoid it at
2193 # because of a distutils convert_path() fast-path. Avoid it at
2192 # least on Windows for now, deal with .pydistutils.cfg bugs
2194 # least on Windows for now, deal with .pydistutils.cfg bugs
2193 # when they happen.
2195 # when they happen.
2194 nohome = b''
2196 nohome = b''
2195 cmd = (b'%(exe)s setup.py %(py3)s %(pure)s clean --all'
2197 cmd = (b'%(exe)s setup.py %(py3)s %(pure)s clean --all'
2196 b' build %(compiler)s --build-base="%(base)s"'
2198 b' build %(compiler)s --build-base="%(base)s"'
2197 b' install --force --prefix="%(prefix)s"'
2199 b' install --force --prefix="%(prefix)s"'
2198 b' --install-lib="%(libdir)s"'
2200 b' --install-lib="%(libdir)s"'
2199 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
2201 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
2200 % {b'exe': exe, b'py3': py3, b'pure': pure,
2202 % {b'exe': exe, b'py3': py3, b'pure': pure,
2201 b'compiler': compiler,
2203 b'compiler': compiler,
2202 b'base': os.path.join(self._hgtmp, b"build"),
2204 b'base': os.path.join(self._hgtmp, b"build"),
2203 b'prefix': self._installdir, b'libdir': self._pythondir,
2205 b'prefix': self._installdir, b'libdir': self._pythondir,
2204 b'bindir': self._bindir,
2206 b'bindir': self._bindir,
2205 b'nohome': nohome, b'logfile': installerrs})
2207 b'nohome': nohome, b'logfile': installerrs})
2206
2208
2207 # setuptools requires install directories to exist.
2209 # setuptools requires install directories to exist.
2208 def makedirs(p):
2210 def makedirs(p):
2209 try:
2211 try:
2210 os.makedirs(p)
2212 os.makedirs(p)
2211 except OSError as e:
2213 except OSError as e:
2212 if e.errno != errno.EEXIST:
2214 if e.errno != errno.EEXIST:
2213 raise
2215 raise
2214 makedirs(self._pythondir)
2216 makedirs(self._pythondir)
2215 makedirs(self._bindir)
2217 makedirs(self._bindir)
2216
2218
2217 vlog("# Running", cmd)
2219 vlog("# Running", cmd)
2218 if os.system(cmd) == 0:
2220 if os.system(cmd) == 0:
2219 if not self.options.verbose:
2221 if not self.options.verbose:
2220 try:
2222 try:
2221 os.remove(installerrs)
2223 os.remove(installerrs)
2222 except OSError as e:
2224 except OSError as e:
2223 if e.errno != errno.ENOENT:
2225 if e.errno != errno.ENOENT:
2224 raise
2226 raise
2225 else:
2227 else:
2226 f = open(installerrs, 'rb')
2228 f = open(installerrs, 'rb')
2227 for line in f:
2229 for line in f:
2228 if PYTHON3:
2230 if PYTHON3:
2229 sys.stdout.buffer.write(line)
2231 sys.stdout.buffer.write(line)
2230 else:
2232 else:
2231 sys.stdout.write(line)
2233 sys.stdout.write(line)
2232 f.close()
2234 f.close()
2233 sys.exit(1)
2235 sys.exit(1)
2234 os.chdir(self._testdir)
2236 os.chdir(self._testdir)
2235
2237
2236 self._usecorrectpython()
2238 self._usecorrectpython()
2237
2239
2238 if self.options.py3k_warnings and not self.options.anycoverage:
2240 if self.options.py3k_warnings and not self.options.anycoverage:
2239 vlog("# Updating hg command to enable Py3k Warnings switch")
2241 vlog("# Updating hg command to enable Py3k Warnings switch")
2240 f = open(os.path.join(self._bindir, 'hg'), 'rb')
2242 f = open(os.path.join(self._bindir, 'hg'), 'rb')
2241 lines = [line.rstrip() for line in f]
2243 lines = [line.rstrip() for line in f]
2242 lines[0] += ' -3'
2244 lines[0] += ' -3'
2243 f.close()
2245 f.close()
2244 f = open(os.path.join(self._bindir, 'hg'), 'wb')
2246 f = open(os.path.join(self._bindir, 'hg'), 'wb')
2245 for line in lines:
2247 for line in lines:
2246 f.write(line + '\n')
2248 f.write(line + '\n')
2247 f.close()
2249 f.close()
2248
2250
2249 hgbat = os.path.join(self._bindir, b'hg.bat')
2251 hgbat = os.path.join(self._bindir, b'hg.bat')
2250 if os.path.isfile(hgbat):
2252 if os.path.isfile(hgbat):
2251 # hg.bat expects to be put in bin/scripts while run-tests.py
2253 # hg.bat expects to be put in bin/scripts while run-tests.py
2252 # installation layout put it in bin/ directly. Fix it
2254 # installation layout put it in bin/ directly. Fix it
2253 f = open(hgbat, 'rb')
2255 f = open(hgbat, 'rb')
2254 data = f.read()
2256 data = f.read()
2255 f.close()
2257 f.close()
2256 if b'"%~dp0..\python" "%~dp0hg" %*' in data:
2258 if b'"%~dp0..\python" "%~dp0hg" %*' in data:
2257 data = data.replace(b'"%~dp0..\python" "%~dp0hg" %*',
2259 data = data.replace(b'"%~dp0..\python" "%~dp0hg" %*',
2258 b'"%~dp0python" "%~dp0hg" %*')
2260 b'"%~dp0python" "%~dp0hg" %*')
2259 f = open(hgbat, 'wb')
2261 f = open(hgbat, 'wb')
2260 f.write(data)
2262 f.write(data)
2261 f.close()
2263 f.close()
2262 else:
2264 else:
2263 print('WARNING: cannot fix hg.bat reference to python.exe')
2265 print('WARNING: cannot fix hg.bat reference to python.exe')
2264
2266
2265 if self.options.anycoverage:
2267 if self.options.anycoverage:
2266 custom = os.path.join(self._testdir, 'sitecustomize.py')
2268 custom = os.path.join(self._testdir, 'sitecustomize.py')
2267 target = os.path.join(self._pythondir, 'sitecustomize.py')
2269 target = os.path.join(self._pythondir, 'sitecustomize.py')
2268 vlog('# Installing coverage trigger to %s' % target)
2270 vlog('# Installing coverage trigger to %s' % target)
2269 shutil.copyfile(custom, target)
2271 shutil.copyfile(custom, target)
2270 rc = os.path.join(self._testdir, '.coveragerc')
2272 rc = os.path.join(self._testdir, '.coveragerc')
2271 vlog('# Installing coverage rc to %s' % rc)
2273 vlog('# Installing coverage rc to %s' % rc)
2272 os.environ['COVERAGE_PROCESS_START'] = rc
2274 os.environ['COVERAGE_PROCESS_START'] = rc
2273 covdir = os.path.join(self._installdir, '..', 'coverage')
2275 covdir = os.path.join(self._installdir, '..', 'coverage')
2274 try:
2276 try:
2275 os.mkdir(covdir)
2277 os.mkdir(covdir)
2276 except OSError as e:
2278 except OSError as e:
2277 if e.errno != errno.EEXIST:
2279 if e.errno != errno.EEXIST:
2278 raise
2280 raise
2279
2281
2280 os.environ['COVERAGE_DIR'] = covdir
2282 os.environ['COVERAGE_DIR'] = covdir
2281
2283
2282 def _checkhglib(self, verb):
2284 def _checkhglib(self, verb):
2283 """Ensure that the 'mercurial' package imported by python is
2285 """Ensure that the 'mercurial' package imported by python is
2284 the one we expect it to be. If not, print a warning to stderr."""
2286 the one we expect it to be. If not, print a warning to stderr."""
2285 if ((self._bindir == self._pythondir) and
2287 if ((self._bindir == self._pythondir) and
2286 (self._bindir != self._tmpbindir)):
2288 (self._bindir != self._tmpbindir)):
2287 # The pythondir has been inferred from --with-hg flag.
2289 # The pythondir has been inferred from --with-hg flag.
2288 # We cannot expect anything sensible here.
2290 # We cannot expect anything sensible here.
2289 return
2291 return
2290 expecthg = os.path.join(self._pythondir, b'mercurial')
2292 expecthg = os.path.join(self._pythondir, b'mercurial')
2291 actualhg = self._gethgpath()
2293 actualhg = self._gethgpath()
2292 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
2294 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
2293 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
2295 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
2294 ' (expected %s)\n'
2296 ' (expected %s)\n'
2295 % (verb, actualhg, expecthg))
2297 % (verb, actualhg, expecthg))
2296 def _gethgpath(self):
2298 def _gethgpath(self):
2297 """Return the path to the mercurial package that is actually found by
2299 """Return the path to the mercurial package that is actually found by
2298 the current Python interpreter."""
2300 the current Python interpreter."""
2299 if self._hgpath is not None:
2301 if self._hgpath is not None:
2300 return self._hgpath
2302 return self._hgpath
2301
2303
2302 cmd = b'%s -c "import mercurial; print (mercurial.__path__[0])"'
2304 cmd = b'%s -c "import mercurial; print (mercurial.__path__[0])"'
2303 cmd = cmd % PYTHON
2305 cmd = cmd % PYTHON
2304 if PYTHON3:
2306 if PYTHON3:
2305 cmd = _strpath(cmd)
2307 cmd = _strpath(cmd)
2306 pipe = os.popen(cmd)
2308 pipe = os.popen(cmd)
2307 try:
2309 try:
2308 self._hgpath = _bytespath(pipe.read().strip())
2310 self._hgpath = _bytespath(pipe.read().strip())
2309 finally:
2311 finally:
2310 pipe.close()
2312 pipe.close()
2311
2313
2312 return self._hgpath
2314 return self._hgpath
2313
2315
2314 def _outputcoverage(self):
2316 def _outputcoverage(self):
2315 """Produce code coverage output."""
2317 """Produce code coverage output."""
2316 from coverage import coverage
2318 from coverage import coverage
2317
2319
2318 vlog('# Producing coverage report')
2320 vlog('# Producing coverage report')
2319 # chdir is the easiest way to get short, relative paths in the
2321 # chdir is the easiest way to get short, relative paths in the
2320 # output.
2322 # output.
2321 os.chdir(self._hgroot)
2323 os.chdir(self._hgroot)
2322 covdir = os.path.join(self._installdir, '..', 'coverage')
2324 covdir = os.path.join(self._installdir, '..', 'coverage')
2323 cov = coverage(data_file=os.path.join(covdir, 'cov'))
2325 cov = coverage(data_file=os.path.join(covdir, 'cov'))
2324
2326
2325 # Map install directory paths back to source directory.
2327 # Map install directory paths back to source directory.
2326 cov.config.paths['srcdir'] = ['.', self._pythondir]
2328 cov.config.paths['srcdir'] = ['.', self._pythondir]
2327
2329
2328 cov.combine()
2330 cov.combine()
2329
2331
2330 omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]]
2332 omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]]
2331 cov.report(ignore_errors=True, omit=omit)
2333 cov.report(ignore_errors=True, omit=omit)
2332
2334
2333 if self.options.htmlcov:
2335 if self.options.htmlcov:
2334 htmldir = os.path.join(self._testdir, 'htmlcov')
2336 htmldir = os.path.join(self._testdir, 'htmlcov')
2335 cov.html_report(directory=htmldir, omit=omit)
2337 cov.html_report(directory=htmldir, omit=omit)
2336 if self.options.annotate:
2338 if self.options.annotate:
2337 adir = os.path.join(self._testdir, 'annotated')
2339 adir = os.path.join(self._testdir, 'annotated')
2338 if not os.path.isdir(adir):
2340 if not os.path.isdir(adir):
2339 os.mkdir(adir)
2341 os.mkdir(adir)
2340 cov.annotate(directory=adir, omit=omit)
2342 cov.annotate(directory=adir, omit=omit)
2341
2343
2342 def _findprogram(self, program):
2344 def _findprogram(self, program):
2343 """Search PATH for a executable program"""
2345 """Search PATH for a executable program"""
2344 dpb = _bytespath(os.defpath)
2346 dpb = _bytespath(os.defpath)
2345 sepb = _bytespath(os.pathsep)
2347 sepb = _bytespath(os.pathsep)
2346 for p in osenvironb.get(b'PATH', dpb).split(sepb):
2348 for p in osenvironb.get(b'PATH', dpb).split(sepb):
2347 name = os.path.join(p, program)
2349 name = os.path.join(p, program)
2348 if os.name == 'nt' or os.access(name, os.X_OK):
2350 if os.name == 'nt' or os.access(name, os.X_OK):
2349 return name
2351 return name
2350 return None
2352 return None
2351
2353
2352 def _checktools(self):
2354 def _checktools(self):
2353 """Ensure tools required to run tests are present."""
2355 """Ensure tools required to run tests are present."""
2354 for p in self.REQUIREDTOOLS:
2356 for p in self.REQUIREDTOOLS:
2355 if os.name == 'nt' and not p.endswith('.exe'):
2357 if os.name == 'nt' and not p.endswith('.exe'):
2356 p += '.exe'
2358 p += '.exe'
2357 found = self._findprogram(p)
2359 found = self._findprogram(p)
2358 if found:
2360 if found:
2359 vlog("# Found prerequisite", p, "at", found)
2361 vlog("# Found prerequisite", p, "at", found)
2360 else:
2362 else:
2361 print("WARNING: Did not find prerequisite tool: %s " % p)
2363 print("WARNING: Did not find prerequisite tool: %s " % p)
2362
2364
2363 if __name__ == '__main__':
2365 if __name__ == '__main__':
2364 runner = TestRunner()
2366 runner = TestRunner()
2365
2367
2366 try:
2368 try:
2367 import msvcrt
2369 import msvcrt
2368 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
2370 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
2369 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
2371 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
2370 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
2372 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
2371 except ImportError:
2373 except ImportError:
2372 pass
2374 pass
2373
2375
2374 sys.exit(runner.run(sys.argv[1:]))
2376 sys.exit(runner.run(sys.argv[1:]))
@@ -1,689 +1,695 b''
1 This file tests the behavior of run-tests.py itself.
1 This file tests the behavior of run-tests.py itself.
2
2
3 Avoid interference from actual test env:
3 Avoid interference from actual test env:
4
4
5 $ unset HGTEST_JOBS
5 $ unset HGTEST_JOBS
6 $ unset HGTEST_TIMEOUT
6 $ unset HGTEST_TIMEOUT
7 $ unset HGTEST_PORT
7 $ unset HGTEST_PORT
8 $ unset HGTEST_SHELL
8 $ unset HGTEST_SHELL
9
9
10 Smoke test with install
10 Smoke test with install
11 ============
11 ============
12
12
13 $ run-tests.py $HGTEST_RUN_TESTS_PURE -l
13 $ run-tests.py $HGTEST_RUN_TESTS_PURE -l
14
14
15 # Ran 0 tests, 0 skipped, 0 warned, 0 failed.
15 # Ran 0 tests, 0 skipped, 0 warned, 0 failed.
16
16
17 Define a helper to avoid the install step
17 Define a helper to avoid the install step
18 =============
18 =============
19 $ rt()
19 $ rt()
20 > {
20 > {
21 > run-tests.py --with-hg=`which hg` "$@"
21 > run-tests.py --with-hg=`which hg` "$@"
22 > }
22 > }
23
23
24 a succesful test
24 a succesful test
25 =======================
25 =======================
26
26
27 $ cat > test-success.t << EOF
27 $ cat > test-success.t << EOF
28 > $ echo babar
28 > $ echo babar
29 > babar
29 > babar
30 > $ echo xyzzy
30 > $ echo xyzzy
31 > never happens (?)
31 > never happens (?)
32 > xyzzy
32 > xyzzy
33 > nor this (?)
33 > nor this (?)
34 > EOF
34 > EOF
35
35
36 $ rt
36 $ rt
37 .
37 .
38 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
38 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
39
39
40 failing test
40 failing test
41 ==================
41 ==================
42
42
43 $ cat > test-failure.t << EOF
43 $ cat > test-failure.t << EOF
44 > $ echo babar
44 > $ echo babar
45 > rataxes
45 > rataxes
46 > This is a noop statement so that
46 > This is a noop statement so that
47 > this test is still more bytes than success.
47 > this test is still more bytes than success.
48 > EOF
48 > EOF
49
49
50 >>> fh = open('test-failure-unicode.t', 'wb')
50 >>> fh = open('test-failure-unicode.t', 'wb')
51 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8')) and None
51 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8')) and None
52 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8')) and None
52 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8')) and None
53
53
54 $ rt
54 $ rt
55
55
56 --- $TESTTMP/test-failure.t
56 --- $TESTTMP/test-failure.t
57 +++ $TESTTMP/test-failure.t.err
57 +++ $TESTTMP/test-failure.t.err
58 @@ -1,4 +1,4 @@
58 @@ -1,4 +1,4 @@
59 $ echo babar
59 $ echo babar
60 - rataxes
60 - rataxes
61 + babar
61 + babar
62 This is a noop statement so that
62 This is a noop statement so that
63 this test is still more bytes than success.
63 this test is still more bytes than success.
64
64
65 ERROR: test-failure.t output changed
65 ERROR: test-failure.t output changed
66 !.
66 !.
67 --- $TESTTMP/test-failure-unicode.t
67 --- $TESTTMP/test-failure-unicode.t
68 +++ $TESTTMP/test-failure-unicode.t.err
68 +++ $TESTTMP/test-failure-unicode.t.err
69 @@ -1,2 +1,2 @@
69 @@ -1,2 +1,2 @@
70 $ echo babar\xce\xb1 (esc)
70 $ echo babar\xce\xb1 (esc)
71 - l\xce\xb5\xce\xb5t (esc)
71 - l\xce\xb5\xce\xb5t (esc)
72 + babar\xce\xb1 (esc)
72 + babar\xce\xb1 (esc)
73
73
74 ERROR: test-failure-unicode.t output changed
74 ERROR: test-failure-unicode.t output changed
75 !
75 !
76 Failed test-failure.t: output changed
76 Failed test-failure.t: output changed
77 Failed test-failure-unicode.t: output changed
77 Failed test-failure-unicode.t: output changed
78 # Ran 3 tests, 0 skipped, 0 warned, 2 failed.
78 # Ran 3 tests, 0 skipped, 0 warned, 2 failed.
79 python hash seed: * (glob)
79 python hash seed: * (glob)
80 [1]
80 [1]
81
81
82 test --xunit support
82 test --xunit support
83 $ rt --xunit=xunit.xml
83 $ rt --xunit=xunit.xml
84
84
85 --- $TESTTMP/test-failure.t
85 --- $TESTTMP/test-failure.t
86 +++ $TESTTMP/test-failure.t.err
86 +++ $TESTTMP/test-failure.t.err
87 @@ -1,4 +1,4 @@
87 @@ -1,4 +1,4 @@
88 $ echo babar
88 $ echo babar
89 - rataxes
89 - rataxes
90 + babar
90 + babar
91 This is a noop statement so that
91 This is a noop statement so that
92 this test is still more bytes than success.
92 this test is still more bytes than success.
93
93
94 ERROR: test-failure.t output changed
94 ERROR: test-failure.t output changed
95 !.
95 !.
96 --- $TESTTMP/test-failure-unicode.t
96 --- $TESTTMP/test-failure-unicode.t
97 +++ $TESTTMP/test-failure-unicode.t.err
97 +++ $TESTTMP/test-failure-unicode.t.err
98 @@ -1,2 +1,2 @@
98 @@ -1,2 +1,2 @@
99 $ echo babar\xce\xb1 (esc)
99 $ echo babar\xce\xb1 (esc)
100 - l\xce\xb5\xce\xb5t (esc)
100 - l\xce\xb5\xce\xb5t (esc)
101 + babar\xce\xb1 (esc)
101 + babar\xce\xb1 (esc)
102
102
103 ERROR: test-failure-unicode.t output changed
103 ERROR: test-failure-unicode.t output changed
104 !
104 !
105 Failed test-failure.t: output changed
105 Failed test-failure.t: output changed
106 Failed test-failure-unicode.t: output changed
106 Failed test-failure-unicode.t: output changed
107 # Ran 3 tests, 0 skipped, 0 warned, 2 failed.
107 # Ran 3 tests, 0 skipped, 0 warned, 2 failed.
108 python hash seed: * (glob)
108 python hash seed: * (glob)
109 [1]
109 [1]
110 $ cat xunit.xml
110 $ cat xunit.xml
111 <?xml version="1.0" encoding="utf-8"?>
111 <?xml version="1.0" encoding="utf-8"?>
112 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
112 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
113 <testcase name="test-success.t" time="*"/> (glob)
113 <testcase name="test-success.t" time="*"/> (glob)
114 <testcase name="test-failure-unicode.t" time="*"> (glob)
114 <testcase name="test-failure-unicode.t" time="*"> (glob)
115 <![CDATA[--- $TESTTMP/test-failure-unicode.t
115 <![CDATA[--- $TESTTMP/test-failure-unicode.t
116 +++ $TESTTMP/test-failure-unicode.t.err
116 +++ $TESTTMP/test-failure-unicode.t.err
117 @@ -1,2 +1,2 @@
117 @@ -1,2 +1,2 @@
118 $ echo babar\xce\xb1 (esc)
118 $ echo babar\xce\xb1 (esc)
119 - l\xce\xb5\xce\xb5t (esc)
119 - l\xce\xb5\xce\xb5t (esc)
120 + babar\xce\xb1 (esc)
120 + babar\xce\xb1 (esc)
121 ]]> </testcase>
121 ]]> </testcase>
122 <testcase name="test-failure.t" time="*"> (glob)
122 <testcase name="test-failure.t" time="*"> (glob)
123 <![CDATA[--- $TESTTMP/test-failure.t
123 <![CDATA[--- $TESTTMP/test-failure.t
124 +++ $TESTTMP/test-failure.t.err
124 +++ $TESTTMP/test-failure.t.err
125 @@ -1,4 +1,4 @@
125 @@ -1,4 +1,4 @@
126 $ echo babar
126 $ echo babar
127 - rataxes
127 - rataxes
128 + babar
128 + babar
129 This is a noop statement so that
129 This is a noop statement so that
130 this test is still more bytes than success.
130 this test is still more bytes than success.
131 ]]> </testcase>
131 ]]> </testcase>
132 </testsuite>
132 </testsuite>
133
133
134 $ rm test-failure-unicode.t
134 $ rm test-failure-unicode.t
135
135
136 test for --retest
136 test for --retest
137 ====================
137 ====================
138
138
139 $ rt --retest
139 $ rt --retest
140
140
141 --- $TESTTMP/test-failure.t
141 --- $TESTTMP/test-failure.t
142 +++ $TESTTMP/test-failure.t.err
142 +++ $TESTTMP/test-failure.t.err
143 @@ -1,4 +1,4 @@
143 @@ -1,4 +1,4 @@
144 $ echo babar
144 $ echo babar
145 - rataxes
145 - rataxes
146 + babar
146 + babar
147 This is a noop statement so that
147 This is a noop statement so that
148 this test is still more bytes than success.
148 this test is still more bytes than success.
149
149
150 ERROR: test-failure.t output changed
150 ERROR: test-failure.t output changed
151 !
151 !
152 Failed test-failure.t: output changed
152 Failed test-failure.t: output changed
153 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
153 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
154 python hash seed: * (glob)
154 python hash seed: * (glob)
155 [1]
155 [1]
156
156
157 Selecting Tests To Run
157 Selecting Tests To Run
158 ======================
158 ======================
159
159
160 successful
160 successful
161
161
162 $ rt test-success.t
162 $ rt test-success.t
163 .
163 .
164 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
164 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
165
165
166 success w/ keyword
166 success w/ keyword
167 $ rt -k xyzzy
167 $ rt -k xyzzy
168 .
168 .
169 # Ran 2 tests, 1 skipped, 0 warned, 0 failed.
169 # Ran 2 tests, 1 skipped, 0 warned, 0 failed.
170
170
171 failed
171 failed
172
172
173 $ rt test-failure.t
173 $ rt test-failure.t
174
174
175 --- $TESTTMP/test-failure.t
175 --- $TESTTMP/test-failure.t
176 +++ $TESTTMP/test-failure.t.err
176 +++ $TESTTMP/test-failure.t.err
177 @@ -1,4 +1,4 @@
177 @@ -1,4 +1,4 @@
178 $ echo babar
178 $ echo babar
179 - rataxes
179 - rataxes
180 + babar
180 + babar
181 This is a noop statement so that
181 This is a noop statement so that
182 this test is still more bytes than success.
182 this test is still more bytes than success.
183
183
184 ERROR: test-failure.t output changed
184 ERROR: test-failure.t output changed
185 !
185 !
186 Failed test-failure.t: output changed
186 Failed test-failure.t: output changed
187 # Ran 1 tests, 0 skipped, 0 warned, 1 failed.
187 # Ran 1 tests, 0 skipped, 0 warned, 1 failed.
188 python hash seed: * (glob)
188 python hash seed: * (glob)
189 [1]
189 [1]
190
190
191 failure w/ keyword
191 failure w/ keyword
192 $ rt -k rataxes
192 $ rt -k rataxes
193
193
194 --- $TESTTMP/test-failure.t
194 --- $TESTTMP/test-failure.t
195 +++ $TESTTMP/test-failure.t.err
195 +++ $TESTTMP/test-failure.t.err
196 @@ -1,4 +1,4 @@
196 @@ -1,4 +1,4 @@
197 $ echo babar
197 $ echo babar
198 - rataxes
198 - rataxes
199 + babar
199 + babar
200 This is a noop statement so that
200 This is a noop statement so that
201 this test is still more bytes than success.
201 this test is still more bytes than success.
202
202
203 ERROR: test-failure.t output changed
203 ERROR: test-failure.t output changed
204 !
204 !
205 Failed test-failure.t: output changed
205 Failed test-failure.t: output changed
206 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
206 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
207 python hash seed: * (glob)
207 python hash seed: * (glob)
208 [1]
208 [1]
209
209
210 Verify that when a process fails to start we show a useful message
210 Verify that when a process fails to start we show a useful message
211 ==================================================================
211 ==================================================================
212
212
213 $ cat > test-serve-fail.t <<EOF
213 $ cat > test-serve-fail.t <<EOF
214 > $ echo 'abort: child process failed to start blah'
214 > $ echo 'abort: child process failed to start blah'
215 > EOF
215 > EOF
216 $ rt test-serve-fail.t
216 $ rt test-serve-fail.t
217
217
218 ERROR: test-serve-fail.t output changed
218 ERROR: test-serve-fail.t output changed
219 !
219 !
220 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
220 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
221 # Ran 1 tests, 0 skipped, 0 warned, 1 failed.
221 # Ran 1 tests, 0 skipped, 0 warned, 1 failed.
222 python hash seed: * (glob)
222 python hash seed: * (glob)
223 [1]
223 [1]
224 $ rm test-serve-fail.t
224 $ rm test-serve-fail.t
225
225
226 Verify that we can try other ports
226 Verify that we can try other ports
227 ===================================
227 ===================================
228 $ hg init inuse
228 $ hg init inuse
229 $ hg serve -R inuse -p $HGPORT -d --pid-file=blocks.pid
229 $ hg serve -R inuse -p $HGPORT -d --pid-file=blocks.pid
230 $ cat blocks.pid >> $DAEMON_PIDS
230 $ cat blocks.pid >> $DAEMON_PIDS
231 $ cat > test-serve-inuse.t <<EOF
231 $ cat > test-serve-inuse.t <<EOF
232 > $ hg serve -R `pwd`/inuse -p \$HGPORT -d --pid-file=hg.pid
232 > $ hg serve -R `pwd`/inuse -p \$HGPORT -d --pid-file=hg.pid
233 > $ cat hg.pid >> \$DAEMON_PIDS
233 > $ cat hg.pid >> \$DAEMON_PIDS
234 > EOF
234 > EOF
235 $ rt test-serve-inuse.t
235 $ rt test-serve-inuse.t
236 .
236 .
237 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
237 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
238 $ rm test-serve-inuse.t
238 $ rm test-serve-inuse.t
239
239
240 Running In Debug Mode
240 Running In Debug Mode
241 ======================
241 ======================
242
242
243 $ rt --debug 2>&1 | grep -v pwd
243 $ rt --debug 2>&1 | grep -v pwd
244 + echo *SALT* 0 0 (glob)
244 + echo *SALT* 0 0 (glob)
245 *SALT* 0 0 (glob)
245 *SALT* 0 0 (glob)
246 + echo babar
246 + echo babar
247 babar
247 babar
248 + echo *SALT* 4 0 (glob)
248 + echo *SALT* 4 0 (glob)
249 *SALT* 4 0 (glob)
249 *SALT* 4 0 (glob)
250 *+ echo *SALT* 0 0 (glob)
250 *+ echo *SALT* 0 0 (glob)
251 *SALT* 0 0 (glob)
251 *SALT* 0 0 (glob)
252 + echo babar
252 + echo babar
253 babar
253 babar
254 + echo *SALT* 2 0 (glob)
254 + echo *SALT* 2 0 (glob)
255 *SALT* 2 0 (glob)
255 *SALT* 2 0 (glob)
256 + echo xyzzy
256 + echo xyzzy
257 xyzzy
257 xyzzy
258 + echo *SALT* 6 0 (glob)
258 + echo *SALT* 6 0 (glob)
259 *SALT* 6 0 (glob)
259 *SALT* 6 0 (glob)
260 .
260 .
261 # Ran 2 tests, 0 skipped, 0 warned, 0 failed.
261 # Ran 2 tests, 0 skipped, 0 warned, 0 failed.
262
262
263 Parallel runs
263 Parallel runs
264 ==============
264 ==============
265
265
266 (duplicate the failing test to get predictable output)
266 (duplicate the failing test to get predictable output)
267 $ cp test-failure.t test-failure-copy.t
267 $ cp test-failure.t test-failure-copy.t
268
268
269 $ rt --jobs 2 test-failure*.t -n
269 $ rt --jobs 2 test-failure*.t -n
270 !!
270 !!
271 Failed test-failure*.t: output changed (glob)
271 Failed test-failure*.t: output changed (glob)
272 Failed test-failure*.t: output changed (glob)
272 Failed test-failure*.t: output changed (glob)
273 # Ran 2 tests, 0 skipped, 0 warned, 2 failed.
273 # Ran 2 tests, 0 skipped, 0 warned, 2 failed.
274 python hash seed: * (glob)
274 python hash seed: * (glob)
275 [1]
275 [1]
276
276
277 failures in parallel with --first should only print one failure
277 failures in parallel with --first should only print one failure
278 >>> f = open('test-nothing.t', 'w')
278 >>> f = open('test-nothing.t', 'w')
279 >>> f.write('foo\n' * 1024) and None
279 >>> f.write('foo\n' * 1024) and None
280 >>> f.write(' $ sleep 1') and None
280 >>> f.write(' $ sleep 1') and None
281 $ rt --jobs 2 --first
281 $ rt --jobs 2 --first
282
282
283 --- $TESTTMP/test-failure*.t (glob)
283 --- $TESTTMP/test-failure*.t (glob)
284 +++ $TESTTMP/test-failure*.t.err (glob)
284 +++ $TESTTMP/test-failure*.t.err (glob)
285 @@ -1,4 +1,4 @@
285 @@ -1,4 +1,4 @@
286 $ echo babar
286 $ echo babar
287 - rataxes
287 - rataxes
288 + babar
288 + babar
289 This is a noop statement so that
289 This is a noop statement so that
290 this test is still more bytes than success.
290 this test is still more bytes than success.
291
291
292 Failed test-failure*.t: output changed (glob)
292 Failed test-failure*.t: output changed (glob)
293 Failed test-nothing.t: output changed
293 Failed test-nothing.t: output changed
294 # Ran 2 tests, 0 skipped, 0 warned, 2 failed.
294 # Ran 2 tests, 0 skipped, 0 warned, 2 failed.
295 python hash seed: * (glob)
295 python hash seed: * (glob)
296 [1]
296 [1]
297
297
298
298
299 (delete the duplicated test file)
299 (delete the duplicated test file)
300 $ rm test-failure-copy.t test-nothing.t
300 $ rm test-failure-copy.t test-nothing.t
301
301
302
302
303 Interactive run
303 Interactive run
304 ===============
304 ===============
305
305
306 (backup the failing test)
306 (backup the failing test)
307 $ cp test-failure.t backup
307 $ cp test-failure.t backup
308
308
309 Refuse the fix
309 Refuse the fix
310
310
311 $ echo 'n' | rt -i
311 $ echo 'n' | rt -i
312
312
313 --- $TESTTMP/test-failure.t
313 --- $TESTTMP/test-failure.t
314 +++ $TESTTMP/test-failure.t.err
314 +++ $TESTTMP/test-failure.t.err
315 @@ -1,4 +1,4 @@
315 @@ -1,4 +1,4 @@
316 $ echo babar
316 $ echo babar
317 - rataxes
317 - rataxes
318 + babar
318 + babar
319 This is a noop statement so that
319 This is a noop statement so that
320 this test is still more bytes than success.
320 this test is still more bytes than success.
321 Accept this change? [n]
321 Accept this change? [n]
322 ERROR: test-failure.t output changed
322 ERROR: test-failure.t output changed
323 !.
323 !.
324 Failed test-failure.t: output changed
324 Failed test-failure.t: output changed
325 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
325 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
326 python hash seed: * (glob)
326 python hash seed: * (glob)
327 [1]
327 [1]
328
328
329 $ cat test-failure.t
329 $ cat test-failure.t
330 $ echo babar
330 $ echo babar
331 rataxes
331 rataxes
332 This is a noop statement so that
332 This is a noop statement so that
333 this test is still more bytes than success.
333 this test is still more bytes than success.
334
334
335 Interactive with custom view
335 Interactive with custom view
336
336
337 $ echo 'n' | rt -i --view echo
337 $ echo 'n' | rt -i --view echo
338 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err (glob)
338 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err (glob)
339 Accept this change? [n]* (glob)
339 Accept this change? [n]* (glob)
340 ERROR: test-failure.t output changed
340 ERROR: test-failure.t output changed
341 !.
341 !.
342 Failed test-failure.t: output changed
342 Failed test-failure.t: output changed
343 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
343 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
344 python hash seed: * (glob)
344 python hash seed: * (glob)
345 [1]
345 [1]
346
346
347 View the fix
347 View the fix
348
348
349 $ echo 'y' | rt --view echo
349 $ echo 'y' | rt --view echo
350 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err (glob)
350 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err (glob)
351
351
352 ERROR: test-failure.t output changed
352 ERROR: test-failure.t output changed
353 !.
353 !.
354 Failed test-failure.t: output changed
354 Failed test-failure.t: output changed
355 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
355 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
356 python hash seed: * (glob)
356 python hash seed: * (glob)
357 [1]
357 [1]
358
358
359 Accept the fix
359 Accept the fix
360
360
361 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
361 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
362 $ echo " saved backup bundle to \$TESTTMP/foo.hg" >> test-failure.t
362 $ echo " saved backup bundle to \$TESTTMP/foo.hg" >> test-failure.t
363 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
363 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
364 $ echo " saved backup bundle to \$TESTTMP/foo.hg (glob)" >> test-failure.t
364 $ echo " saved backup bundle to \$TESTTMP/foo.hg (glob)" >> test-failure.t
365 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
365 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
366 $ echo " saved backup bundle to \$TESTTMP/*.hg (glob)" >> test-failure.t
366 $ echo " saved backup bundle to \$TESTTMP/*.hg (glob)" >> test-failure.t
367 $ echo 'y' | rt -i 2>&1
367 $ echo 'y' | rt -i 2>&1
368
368
369 --- $TESTTMP/test-failure.t
369 --- $TESTTMP/test-failure.t
370 +++ $TESTTMP/test-failure.t.err
370 +++ $TESTTMP/test-failure.t.err
371 @@ -1,9 +1,9 @@
371 @@ -1,9 +1,9 @@
372 $ echo babar
372 $ echo babar
373 - rataxes
373 - rataxes
374 + babar
374 + babar
375 This is a noop statement so that
375 This is a noop statement so that
376 this test is still more bytes than success.
376 this test is still more bytes than success.
377 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
377 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
378 - saved backup bundle to $TESTTMP/foo.hg
378 - saved backup bundle to $TESTTMP/foo.hg
379 + saved backup bundle to $TESTTMP/foo.hg* (glob)
379 + saved backup bundle to $TESTTMP/foo.hg* (glob)
380 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
380 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
381 saved backup bundle to $TESTTMP/foo.hg* (glob)
381 saved backup bundle to $TESTTMP/foo.hg* (glob)
382 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
382 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
383 Accept this change? [n] ..
383 Accept this change? [n] ..
384 # Ran 2 tests, 0 skipped, 0 warned, 0 failed.
384 # Ran 2 tests, 0 skipped, 0 warned, 0 failed.
385
385
386 $ sed -e 's,(glob)$,&<,g' test-failure.t
386 $ sed -e 's,(glob)$,&<,g' test-failure.t
387 $ echo babar
387 $ echo babar
388 babar
388 babar
389 This is a noop statement so that
389 This is a noop statement so that
390 this test is still more bytes than success.
390 this test is still more bytes than success.
391 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
391 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
392 saved backup bundle to $TESTTMP/foo.hg (glob)<
392 saved backup bundle to $TESTTMP/foo.hg (glob)<
393 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
393 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
394 saved backup bundle to $TESTTMP/foo.hg (glob)<
394 saved backup bundle to $TESTTMP/foo.hg (glob)<
395 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
395 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
396 saved backup bundle to $TESTTMP/*.hg (glob)<
396 saved backup bundle to $TESTTMP/*.hg (glob)<
397
397
398 (reinstall)
398 (reinstall)
399 $ mv backup test-failure.t
399 $ mv backup test-failure.t
400
400
401 No Diff
401 No Diff
402 ===============
402 ===============
403
403
404 $ rt --nodiff
404 $ rt --nodiff
405 !.
405 !.
406 Failed test-failure.t: output changed
406 Failed test-failure.t: output changed
407 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
407 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
408 python hash seed: * (glob)
408 python hash seed: * (glob)
409 [1]
409 [1]
410
410
411 test --tmpdir support
411 test --tmpdir support
412 $ rt --tmpdir=$TESTTMP/keep test-success.t
412 $ rt --tmpdir=$TESTTMP/keep test-success.t
413
413
414 Keeping testtmp dir: $TESTTMP/keep/child1/test-success.t (glob)
414 Keeping testtmp dir: $TESTTMP/keep/child1/test-success.t (glob)
415 Keeping threadtmp dir: $TESTTMP/keep/child1 (glob)
415 Keeping threadtmp dir: $TESTTMP/keep/child1 (glob)
416 .
416 .
417 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
417 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
418
418
419 timeouts
419 timeouts
420 ========
420 ========
421 $ cat > test-timeout.t <<EOF
421 $ cat > test-timeout.t <<EOF
422 > $ sleep 2
422 > $ sleep 2
423 > $ echo pass
423 > $ echo pass
424 > pass
424 > pass
425 > EOF
425 > EOF
426 > echo '#require slow' > test-slow-timeout.t
426 > echo '#require slow' > test-slow-timeout.t
427 > cat test-timeout.t >> test-slow-timeout.t
427 > cat test-timeout.t >> test-slow-timeout.t
428 $ rt --timeout=1 --slowtimeout=3 test-timeout.t test-slow-timeout.t
428 $ rt --timeout=1 --slowtimeout=3 test-timeout.t test-slow-timeout.t
429 st
429 st
430 Skipped test-slow-timeout.t: missing feature: allow slow tests
430 Skipped test-slow-timeout.t: missing feature: allow slow tests
431 Failed test-timeout.t: timed out
431 Failed test-timeout.t: timed out
432 # Ran 1 tests, 1 skipped, 0 warned, 1 failed.
432 # Ran 1 tests, 1 skipped, 0 warned, 1 failed.
433 python hash seed: * (glob)
433 python hash seed: * (glob)
434 [1]
434 [1]
435 $ rt --timeout=1 --slowtimeout=3 \
435 $ rt --timeout=1 --slowtimeout=3 \
436 > test-timeout.t test-slow-timeout.t --allow-slow-tests
436 > test-timeout.t test-slow-timeout.t --allow-slow-tests
437 .t
437 .t
438 Failed test-timeout.t: timed out
438 Failed test-timeout.t: timed out
439 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
439 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
440 python hash seed: * (glob)
440 python hash seed: * (glob)
441 [1]
441 [1]
442 $ rm test-timeout.t test-slow-timeout.t
442 $ rm test-timeout.t test-slow-timeout.t
443
443
444 test for --time
444 test for --time
445 ==================
445 ==================
446
446
447 $ rt test-success.t --time
447 $ rt test-success.t --time
448 .
448 .
449 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
449 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
450 # Producing time report
450 # Producing time report
451 start end cuser csys real Test
451 start end cuser csys real Test
452 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
452 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
453
453
454 test for --time with --job enabled
454 test for --time with --job enabled
455 ====================================
455 ====================================
456
456
457 $ rt test-success.t --time --jobs 2
457 $ rt test-success.t --time --jobs 2
458 .
458 .
459 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
459 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
460 # Producing time report
460 # Producing time report
461 start end cuser csys real Test
461 start end cuser csys real Test
462 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
462 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
463
463
464 Skips
464 Skips
465 ================
465 ================
466 $ cat > test-skip.t <<EOF
466 $ cat > test-skip.t <<EOF
467 > $ echo xyzzy
467 > $ echo xyzzy
468 > #require false
468 > #require false
469 > EOF
469 > EOF
470 $ rt --nodiff
470 $ rt --nodiff
471 !.s
471 !.s
472 Skipped test-skip.t: missing feature: nail clipper
472 Skipped test-skip.t: missing feature: nail clipper
473 Failed test-failure.t: output changed
473 Failed test-failure.t: output changed
474 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
474 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
475 python hash seed: * (glob)
475 python hash seed: * (glob)
476 [1]
476 [1]
477
477
478 $ rt --keyword xyzzy
478 $ rt --keyword xyzzy
479 .s
479 .s
480 Skipped test-skip.t: missing feature: nail clipper
480 Skipped test-skip.t: missing feature: nail clipper
481 # Ran 2 tests, 2 skipped, 0 warned, 0 failed.
481 # Ran 2 tests, 2 skipped, 0 warned, 0 failed.
482
482
483 Skips with xml
483 Skips with xml
484 $ rt --keyword xyzzy \
484 $ rt --keyword xyzzy \
485 > --xunit=xunit.xml
485 > --xunit=xunit.xml
486 .s
486 .s
487 Skipped test-skip.t: missing feature: nail clipper
487 Skipped test-skip.t: missing feature: nail clipper
488 # Ran 2 tests, 2 skipped, 0 warned, 0 failed.
488 # Ran 2 tests, 2 skipped, 0 warned, 0 failed.
489 $ cat xunit.xml
489 $ cat xunit.xml
490 <?xml version="1.0" encoding="utf-8"?>
490 <?xml version="1.0" encoding="utf-8"?>
491 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
491 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
492 <testcase name="test-success.t" time="*"/> (glob)
492 <testcase name="test-success.t" time="*"/> (glob)
493 </testsuite>
493 </testsuite>
494
494
495 Missing skips or blacklisted skips don't count as executed:
495 Missing skips or blacklisted skips don't count as executed:
496 $ echo test-failure.t > blacklist
496 $ echo test-failure.t > blacklist
497 $ rt --blacklist=blacklist \
497 $ rt --blacklist=blacklist \
498 > test-failure.t test-bogus.t
498 > test-failure.t test-bogus.t
499 ss
499 ss
500 Skipped test-bogus.t: Doesn't exist
500 Skipped test-bogus.t: Doesn't exist
501 Skipped test-failure.t: blacklisted
501 Skipped test-failure.t: blacklisted
502 # Ran 0 tests, 2 skipped, 0 warned, 0 failed.
502 # Ran 0 tests, 2 skipped, 0 warned, 0 failed.
503
503
504 #if json
504 #if json
505
505
506 test for --json
506 test for --json
507 ==================
507 ==================
508
508
509 $ rt --json
509 $ rt --json
510
510
511 --- $TESTTMP/test-failure.t
511 --- $TESTTMP/test-failure.t
512 +++ $TESTTMP/test-failure.t.err
512 +++ $TESTTMP/test-failure.t.err
513 @@ -1,4 +1,4 @@
513 @@ -1,4 +1,4 @@
514 $ echo babar
514 $ echo babar
515 - rataxes
515 - rataxes
516 + babar
516 + babar
517 This is a noop statement so that
517 This is a noop statement so that
518 this test is still more bytes than success.
518 this test is still more bytes than success.
519
519
520 ERROR: test-failure.t output changed
520 ERROR: test-failure.t output changed
521 !.s
521 !.s
522 Skipped test-skip.t: missing feature: nail clipper
522 Skipped test-skip.t: missing feature: nail clipper
523 Failed test-failure.t: output changed
523 Failed test-failure.t: output changed
524 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
524 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
525 python hash seed: * (glob)
525 python hash seed: * (glob)
526 [1]
526 [1]
527
527
528 $ cat report.json
528 $ cat report.json
529 testreport ={
529 testreport ={
530 "test-failure.t": [\{] (re)
530 "test-failure.t": [\{] (re)
531 "csys": "\s*[\d\.]{4,5}", ? (re)
531 "csys": "\s*[\d\.]{4,5}", ? (re)
532 "cuser": "\s*[\d\.]{4,5}", ? (re)
532 "cuser": "\s*[\d\.]{4,5}", ? (re)
533 "diff": "---.+\+\+\+.+", ? (re)
533 "end": "\s*[\d\.]{4,5}", ? (re)
534 "end": "\s*[\d\.]{4,5}", ? (re)
534 "result": "failure", ? (re)
535 "result": "failure", ? (re)
535 "start": "\s*[\d\.]{4,5}", ? (re)
536 "start": "\s*[\d\.]{4,5}", ? (re)
536 "time": "\s*[\d\.]{4,5}" (re)
537 "time": "\s*[\d\.]{4,5}" (re)
537 }, ? (re)
538 }, ? (re)
538 "test-skip.t": {
539 "test-skip.t": {
539 "csys": "\s*[\d\.]{4,5}", ? (re)
540 "csys": "\s*[\d\.]{4,5}", ? (re)
540 "cuser": "\s*[\d\.]{4,5}", ? (re)
541 "cuser": "\s*[\d\.]{4,5}", ? (re)
542 "diff": "", ? (re)
541 "end": "\s*[\d\.]{4,5}", ? (re)
543 "end": "\s*[\d\.]{4,5}", ? (re)
542 "result": "skip", ? (re)
544 "result": "skip", ? (re)
543 "start": "\s*[\d\.]{4,5}", ? (re)
545 "start": "\s*[\d\.]{4,5}", ? (re)
544 "time": "\s*[\d\.]{4,5}" (re)
546 "time": "\s*[\d\.]{4,5}" (re)
545 }, ? (re)
547 }, ? (re)
546 "test-success.t": [\{] (re)
548 "test-success.t": [\{] (re)
547 "csys": "\s*[\d\.]{4,5}", ? (re)
549 "csys": "\s*[\d\.]{4,5}", ? (re)
548 "cuser": "\s*[\d\.]{4,5}", ? (re)
550 "cuser": "\s*[\d\.]{4,5}", ? (re)
551 "diff": "", ? (re)
549 "end": "\s*[\d\.]{4,5}", ? (re)
552 "end": "\s*[\d\.]{4,5}", ? (re)
550 "result": "success", ? (re)
553 "result": "success", ? (re)
551 "start": "\s*[\d\.]{4,5}", ? (re)
554 "start": "\s*[\d\.]{4,5}", ? (re)
552 "time": "\s*[\d\.]{4,5}" (re)
555 "time": "\s*[\d\.]{4,5}" (re)
553 }
556 }
554 } (no-eol)
557 } (no-eol)
555
558
556 Test that failed test accepted through interactive are properly reported:
559 Test that failed test accepted through interactive are properly reported:
557
560
558 $ cp test-failure.t backup
561 $ cp test-failure.t backup
559 $ echo y | rt --json -i
562 $ echo y | rt --json -i
560
563
561 --- $TESTTMP/test-failure.t
564 --- $TESTTMP/test-failure.t
562 +++ $TESTTMP/test-failure.t.err
565 +++ $TESTTMP/test-failure.t.err
563 @@ -1,4 +1,4 @@
566 @@ -1,4 +1,4 @@
564 $ echo babar
567 $ echo babar
565 - rataxes
568 - rataxes
566 + babar
569 + babar
567 This is a noop statement so that
570 This is a noop statement so that
568 this test is still more bytes than success.
571 this test is still more bytes than success.
569 Accept this change? [n] ..s
572 Accept this change? [n] ..s
570 Skipped test-skip.t: missing feature: nail clipper
573 Skipped test-skip.t: missing feature: nail clipper
571 # Ran 2 tests, 1 skipped, 0 warned, 0 failed.
574 # Ran 2 tests, 1 skipped, 0 warned, 0 failed.
572
575
573 $ cat report.json
576 $ cat report.json
574 testreport ={
577 testreport ={
575 "test-failure.t": [\{] (re)
578 "test-failure.t": [\{] (re)
576 "csys": "\s*[\d\.]{4,5}", ? (re)
579 "csys": "\s*[\d\.]{4,5}", ? (re)
577 "cuser": "\s*[\d\.]{4,5}", ? (re)
580 "cuser": "\s*[\d\.]{4,5}", ? (re)
581 "diff": "", ? (re)
578 "end": "\s*[\d\.]{4,5}", ? (re)
582 "end": "\s*[\d\.]{4,5}", ? (re)
579 "result": "success", ? (re)
583 "result": "success", ? (re)
580 "start": "\s*[\d\.]{4,5}", ? (re)
584 "start": "\s*[\d\.]{4,5}", ? (re)
581 "time": "\s*[\d\.]{4,5}" (re)
585 "time": "\s*[\d\.]{4,5}" (re)
582 }, ? (re)
586 }, ? (re)
583 "test-skip.t": {
587 "test-skip.t": {
584 "csys": "\s*[\d\.]{4,5}", ? (re)
588 "csys": "\s*[\d\.]{4,5}", ? (re)
585 "cuser": "\s*[\d\.]{4,5}", ? (re)
589 "cuser": "\s*[\d\.]{4,5}", ? (re)
590 "diff": "", ? (re)
586 "end": "\s*[\d\.]{4,5}", ? (re)
591 "end": "\s*[\d\.]{4,5}", ? (re)
587 "result": "skip", ? (re)
592 "result": "skip", ? (re)
588 "start": "\s*[\d\.]{4,5}", ? (re)
593 "start": "\s*[\d\.]{4,5}", ? (re)
589 "time": "\s*[\d\.]{4,5}" (re)
594 "time": "\s*[\d\.]{4,5}" (re)
590 }, ? (re)
595 }, ? (re)
591 "test-success.t": [\{] (re)
596 "test-success.t": [\{] (re)
592 "csys": "\s*[\d\.]{4,5}", ? (re)
597 "csys": "\s*[\d\.]{4,5}", ? (re)
593 "cuser": "\s*[\d\.]{4,5}", ? (re)
598 "cuser": "\s*[\d\.]{4,5}", ? (re)
599 "diff": "", ? (re)
594 "end": "\s*[\d\.]{4,5}", ? (re)
600 "end": "\s*[\d\.]{4,5}", ? (re)
595 "result": "success", ? (re)
601 "result": "success", ? (re)
596 "start": "\s*[\d\.]{4,5}", ? (re)
602 "start": "\s*[\d\.]{4,5}", ? (re)
597 "time": "\s*[\d\.]{4,5}" (re)
603 "time": "\s*[\d\.]{4,5}" (re)
598 }
604 }
599 } (no-eol)
605 } (no-eol)
600 $ mv backup test-failure.t
606 $ mv backup test-failure.t
601
607
602 #endif
608 #endif
603
609
604 backslash on end of line with glob matching is handled properly
610 backslash on end of line with glob matching is handled properly
605
611
606 $ cat > test-glob-backslash.t << EOF
612 $ cat > test-glob-backslash.t << EOF
607 > $ echo 'foo bar \\'
613 > $ echo 'foo bar \\'
608 > foo * \ (glob)
614 > foo * \ (glob)
609 > EOF
615 > EOF
610
616
611 $ rt test-glob-backslash.t
617 $ rt test-glob-backslash.t
612 .
618 .
613 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
619 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
614
620
615 $ rm -f test-glob-backslash.t
621 $ rm -f test-glob-backslash.t
616
622
617 Test reusability for third party tools
623 Test reusability for third party tools
618 ======================================
624 ======================================
619
625
620 $ mkdir "$TESTTMP"/anothertests
626 $ mkdir "$TESTTMP"/anothertests
621 $ cd "$TESTTMP"/anothertests
627 $ cd "$TESTTMP"/anothertests
622
628
623 test that `run-tests.py` can execute hghave, even if it runs not in
629 test that `run-tests.py` can execute hghave, even if it runs not in
624 Mercurial source tree.
630 Mercurial source tree.
625
631
626 $ cat > test-hghave.t <<EOF
632 $ cat > test-hghave.t <<EOF
627 > #require true
633 > #require true
628 > $ echo foo
634 > $ echo foo
629 > foo
635 > foo
630 > EOF
636 > EOF
631 $ rt $HGTEST_RUN_TESTS_PURE test-hghave.t
637 $ rt $HGTEST_RUN_TESTS_PURE test-hghave.t
632 .
638 .
633 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
639 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
634
640
635 test that RUNTESTDIR refers the directory, in which `run-tests.py` now
641 test that RUNTESTDIR refers the directory, in which `run-tests.py` now
636 running is placed.
642 running is placed.
637
643
638 $ cat > test-runtestdir.t <<EOF
644 $ cat > test-runtestdir.t <<EOF
639 > - $TESTDIR, in which test-run-tests.t is placed
645 > - $TESTDIR, in which test-run-tests.t is placed
640 > - \$TESTDIR, in which test-runtestdir.t is placed (expanded at runtime)
646 > - \$TESTDIR, in which test-runtestdir.t is placed (expanded at runtime)
641 > - \$RUNTESTDIR, in which run-tests.py is placed (expanded at runtime)
647 > - \$RUNTESTDIR, in which run-tests.py is placed (expanded at runtime)
642 >
648 >
643 > #if windows
649 > #if windows
644 > $ test "\$TESTDIR" = "$TESTTMP\anothertests"
650 > $ test "\$TESTDIR" = "$TESTTMP\anothertests"
645 > #else
651 > #else
646 > $ test "\$TESTDIR" = "$TESTTMP"/anothertests
652 > $ test "\$TESTDIR" = "$TESTTMP"/anothertests
647 > #endif
653 > #endif
648 > $ test "\$RUNTESTDIR" = "$TESTDIR"
654 > $ test "\$RUNTESTDIR" = "$TESTDIR"
649 > $ head -n 3 "\$RUNTESTDIR"/../contrib/check-code.py
655 > $ head -n 3 "\$RUNTESTDIR"/../contrib/check-code.py
650 > #!/usr/bin/env python
656 > #!/usr/bin/env python
651 > #
657 > #
652 > # check-code - a style and portability checker for Mercurial
658 > # check-code - a style and portability checker for Mercurial
653 > EOF
659 > EOF
654 $ rt $HGTEST_RUN_TESTS_PURE test-runtestdir.t
660 $ rt $HGTEST_RUN_TESTS_PURE test-runtestdir.t
655 .
661 .
656 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
662 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
657
663
658 #if execbit
664 #if execbit
659
665
660 test that TESTDIR is referred in PATH
666 test that TESTDIR is referred in PATH
661
667
662 $ cat > custom-command.sh <<EOF
668 $ cat > custom-command.sh <<EOF
663 > #!/bin/sh
669 > #!/bin/sh
664 > echo "hello world"
670 > echo "hello world"
665 > EOF
671 > EOF
666 $ chmod +x custom-command.sh
672 $ chmod +x custom-command.sh
667 $ cat > test-testdir-path.t <<EOF
673 $ cat > test-testdir-path.t <<EOF
668 > $ custom-command.sh
674 > $ custom-command.sh
669 > hello world
675 > hello world
670 > EOF
676 > EOF
671 $ rt $HGTEST_RUN_TESTS_PURE test-testdir-path.t
677 $ rt $HGTEST_RUN_TESTS_PURE test-testdir-path.t
672 .
678 .
673 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
679 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
674
680
675 #endif
681 #endif
676
682
677 test support for --allow-slow-tests
683 test support for --allow-slow-tests
678 $ cat > test-very-slow-test.t <<EOF
684 $ cat > test-very-slow-test.t <<EOF
679 > #require slow
685 > #require slow
680 > $ echo pass
686 > $ echo pass
681 > pass
687 > pass
682 > EOF
688 > EOF
683 $ rt $HGTEST_RUN_TESTS_PURE test-very-slow-test.t
689 $ rt $HGTEST_RUN_TESTS_PURE test-very-slow-test.t
684 s
690 s
685 Skipped test-very-slow-test.t: missing feature: allow slow tests
691 Skipped test-very-slow-test.t: missing feature: allow slow tests
686 # Ran 0 tests, 1 skipped, 0 warned, 0 failed.
692 # Ran 0 tests, 1 skipped, 0 warned, 0 failed.
687 $ rt $HGTEST_RUN_TESTS_PURE --allow-slow-tests test-very-slow-test.t
693 $ rt $HGTEST_RUN_TESTS_PURE --allow-slow-tests test-very-slow-test.t
688 .
694 .
689 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
695 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
General Comments 0
You need to be logged in to leave comments. Login now