##// END OF EJS Templates
run-tests: print number of tests and parallel process count...
Gregory Szorc -
r40280:1039404c default
parent child Browse files
Show More
@@ -1,3234 +1,3239 b''
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2 #
2 #
3 # run-tests.py - Run a set of tests on Mercurial
3 # run-tests.py - Run a set of tests on Mercurial
4 #
4 #
5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 # Modifying this script is tricky because it has many modes:
10 # Modifying this script is tricky because it has many modes:
11 # - serial (default) vs parallel (-jN, N > 1)
11 # - serial (default) vs parallel (-jN, N > 1)
12 # - no coverage (default) vs coverage (-c, -C, -s)
12 # - no coverage (default) vs coverage (-c, -C, -s)
13 # - temp install (default) vs specific hg script (--with-hg, --local)
13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 # - tests are a mix of shell scripts and Python scripts
14 # - tests are a mix of shell scripts and Python scripts
15 #
15 #
16 # If you change this script, it is recommended that you ensure you
16 # If you change this script, it is recommended that you ensure you
17 # haven't broken it by running it in various modes with a representative
17 # haven't broken it by running it in various modes with a representative
18 # sample of test scripts. For example:
18 # sample of test scripts. For example:
19 #
19 #
20 # 1) serial, no coverage, temp install:
20 # 1) serial, no coverage, temp install:
21 # ./run-tests.py test-s*
21 # ./run-tests.py test-s*
22 # 2) serial, no coverage, local hg:
22 # 2) serial, no coverage, local hg:
23 # ./run-tests.py --local test-s*
23 # ./run-tests.py --local test-s*
24 # 3) serial, coverage, temp install:
24 # 3) serial, coverage, temp install:
25 # ./run-tests.py -c test-s*
25 # ./run-tests.py -c test-s*
26 # 4) serial, coverage, local hg:
26 # 4) serial, coverage, local hg:
27 # ./run-tests.py -c --local test-s* # unsupported
27 # ./run-tests.py -c --local test-s* # unsupported
28 # 5) parallel, no coverage, temp install:
28 # 5) parallel, no coverage, temp install:
29 # ./run-tests.py -j2 test-s*
29 # ./run-tests.py -j2 test-s*
30 # 6) parallel, no coverage, local hg:
30 # 6) parallel, no coverage, local hg:
31 # ./run-tests.py -j2 --local test-s*
31 # ./run-tests.py -j2 --local test-s*
32 # 7) parallel, coverage, temp install:
32 # 7) parallel, coverage, temp install:
33 # ./run-tests.py -j2 -c test-s* # currently broken
33 # ./run-tests.py -j2 -c test-s* # currently broken
34 # 8) parallel, coverage, local install:
34 # 8) parallel, coverage, local install:
35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 # 9) parallel, custom tmp dir:
36 # 9) parallel, custom tmp dir:
37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 # 10) parallel, pure, tests that call run-tests:
38 # 10) parallel, pure, tests that call run-tests:
39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
40 #
40 #
41 # (You could use any subset of the tests: test-s* happens to match
41 # (You could use any subset of the tests: test-s* happens to match
42 # enough that it's worth doing parallel runs, few enough that it
42 # enough that it's worth doing parallel runs, few enough that it
43 # completes fairly quickly, includes both shell and Python scripts, and
43 # completes fairly quickly, includes both shell and Python scripts, and
44 # includes some scripts that run daemon processes.)
44 # includes some scripts that run daemon processes.)
45
45
46 from __future__ import absolute_import, print_function
46 from __future__ import absolute_import, print_function
47
47
48 import argparse
48 import argparse
49 import collections
49 import collections
50 import difflib
50 import difflib
51 import distutils.version as version
51 import distutils.version as version
52 import errno
52 import errno
53 import json
53 import json
54 import os
54 import os
55 import random
55 import random
56 import re
56 import re
57 import shutil
57 import shutil
58 import signal
58 import signal
59 import socket
59 import socket
60 import subprocess
60 import subprocess
61 import sys
61 import sys
62 import sysconfig
62 import sysconfig
63 import tempfile
63 import tempfile
64 import threading
64 import threading
65 import time
65 import time
66 import unittest
66 import unittest
67 import uuid
67 import uuid
68 import xml.dom.minidom as minidom
68 import xml.dom.minidom as minidom
69
69
70 try:
70 try:
71 import Queue as queue
71 import Queue as queue
72 except ImportError:
72 except ImportError:
73 import queue
73 import queue
74
74
75 try:
75 try:
76 import shlex
76 import shlex
77 shellquote = shlex.quote
77 shellquote = shlex.quote
78 except (ImportError, AttributeError):
78 except (ImportError, AttributeError):
79 import pipes
79 import pipes
80 shellquote = pipes.quote
80 shellquote = pipes.quote
81
81
82 if os.environ.get('RTUNICODEPEDANTRY', False):
82 if os.environ.get('RTUNICODEPEDANTRY', False):
83 try:
83 try:
84 reload(sys)
84 reload(sys)
85 sys.setdefaultencoding("undefined")
85 sys.setdefaultencoding("undefined")
86 except NameError:
86 except NameError:
87 pass
87 pass
88
88
89 processlock = threading.Lock()
89 processlock = threading.Lock()
90
90
91 pygmentspresent = False
91 pygmentspresent = False
92 # ANSI color is unsupported prior to Windows 10
92 # ANSI color is unsupported prior to Windows 10
93 if os.name != 'nt':
93 if os.name != 'nt':
94 try: # is pygments installed
94 try: # is pygments installed
95 import pygments
95 import pygments
96 import pygments.lexers as lexers
96 import pygments.lexers as lexers
97 import pygments.lexer as lexer
97 import pygments.lexer as lexer
98 import pygments.formatters as formatters
98 import pygments.formatters as formatters
99 import pygments.token as token
99 import pygments.token as token
100 import pygments.style as style
100 import pygments.style as style
101 pygmentspresent = True
101 pygmentspresent = True
102 difflexer = lexers.DiffLexer()
102 difflexer = lexers.DiffLexer()
103 terminal256formatter = formatters.Terminal256Formatter()
103 terminal256formatter = formatters.Terminal256Formatter()
104 except ImportError:
104 except ImportError:
105 pass
105 pass
106
106
107 if pygmentspresent:
107 if pygmentspresent:
108 class TestRunnerStyle(style.Style):
108 class TestRunnerStyle(style.Style):
109 default_style = ""
109 default_style = ""
110 skipped = token.string_to_tokentype("Token.Generic.Skipped")
110 skipped = token.string_to_tokentype("Token.Generic.Skipped")
111 failed = token.string_to_tokentype("Token.Generic.Failed")
111 failed = token.string_to_tokentype("Token.Generic.Failed")
112 skippedname = token.string_to_tokentype("Token.Generic.SName")
112 skippedname = token.string_to_tokentype("Token.Generic.SName")
113 failedname = token.string_to_tokentype("Token.Generic.FName")
113 failedname = token.string_to_tokentype("Token.Generic.FName")
114 styles = {
114 styles = {
115 skipped: '#e5e5e5',
115 skipped: '#e5e5e5',
116 skippedname: '#00ffff',
116 skippedname: '#00ffff',
117 failed: '#7f0000',
117 failed: '#7f0000',
118 failedname: '#ff0000',
118 failedname: '#ff0000',
119 }
119 }
120
120
121 class TestRunnerLexer(lexer.RegexLexer):
121 class TestRunnerLexer(lexer.RegexLexer):
122 testpattern = r'[\w-]+\.(t|py)(#[a-zA-Z0-9_\-\.]+)?'
122 testpattern = r'[\w-]+\.(t|py)(#[a-zA-Z0-9_\-\.]+)?'
123 tokens = {
123 tokens = {
124 'root': [
124 'root': [
125 (r'^Skipped', token.Generic.Skipped, 'skipped'),
125 (r'^Skipped', token.Generic.Skipped, 'skipped'),
126 (r'^Failed ', token.Generic.Failed, 'failed'),
126 (r'^Failed ', token.Generic.Failed, 'failed'),
127 (r'^ERROR: ', token.Generic.Failed, 'failed'),
127 (r'^ERROR: ', token.Generic.Failed, 'failed'),
128 ],
128 ],
129 'skipped': [
129 'skipped': [
130 (testpattern, token.Generic.SName),
130 (testpattern, token.Generic.SName),
131 (r':.*', token.Generic.Skipped),
131 (r':.*', token.Generic.Skipped),
132 ],
132 ],
133 'failed': [
133 'failed': [
134 (testpattern, token.Generic.FName),
134 (testpattern, token.Generic.FName),
135 (r'(:| ).*', token.Generic.Failed),
135 (r'(:| ).*', token.Generic.Failed),
136 ]
136 ]
137 }
137 }
138
138
139 runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle)
139 runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle)
140 runnerlexer = TestRunnerLexer()
140 runnerlexer = TestRunnerLexer()
141
141
142 origenviron = os.environ.copy()
142 origenviron = os.environ.copy()
143
143
144 if sys.version_info > (3, 5, 0):
144 if sys.version_info > (3, 5, 0):
145 PYTHON3 = True
145 PYTHON3 = True
146 xrange = range # we use xrange in one place, and we'd rather not use range
146 xrange = range # we use xrange in one place, and we'd rather not use range
147 def _bytespath(p):
147 def _bytespath(p):
148 if p is None:
148 if p is None:
149 return p
149 return p
150 return p.encode('utf-8')
150 return p.encode('utf-8')
151
151
152 def _strpath(p):
152 def _strpath(p):
153 if p is None:
153 if p is None:
154 return p
154 return p
155 return p.decode('utf-8')
155 return p.decode('utf-8')
156
156
157 osenvironb = getattr(os, 'environb', None)
157 osenvironb = getattr(os, 'environb', None)
158 if osenvironb is None:
158 if osenvironb is None:
159 # Windows lacks os.environb, for instance. A proxy over the real thing
159 # Windows lacks os.environb, for instance. A proxy over the real thing
160 # instead of a copy allows the environment to be updated via bytes on
160 # instead of a copy allows the environment to be updated via bytes on
161 # all platforms.
161 # all platforms.
162 class environbytes(object):
162 class environbytes(object):
163 def __init__(self, strenv):
163 def __init__(self, strenv):
164 self.__len__ = strenv.__len__
164 self.__len__ = strenv.__len__
165 self.clear = strenv.clear
165 self.clear = strenv.clear
166 self._strenv = strenv
166 self._strenv = strenv
167 def __getitem__(self, k):
167 def __getitem__(self, k):
168 v = self._strenv.__getitem__(_strpath(k))
168 v = self._strenv.__getitem__(_strpath(k))
169 return _bytespath(v)
169 return _bytespath(v)
170 def __setitem__(self, k, v):
170 def __setitem__(self, k, v):
171 self._strenv.__setitem__(_strpath(k), _strpath(v))
171 self._strenv.__setitem__(_strpath(k), _strpath(v))
172 def __delitem__(self, k):
172 def __delitem__(self, k):
173 self._strenv.__delitem__(_strpath(k))
173 self._strenv.__delitem__(_strpath(k))
174 def __contains__(self, k):
174 def __contains__(self, k):
175 return self._strenv.__contains__(_strpath(k))
175 return self._strenv.__contains__(_strpath(k))
176 def __iter__(self):
176 def __iter__(self):
177 return iter([_bytespath(k) for k in iter(self._strenv)])
177 return iter([_bytespath(k) for k in iter(self._strenv)])
178 def get(self, k, default=None):
178 def get(self, k, default=None):
179 v = self._strenv.get(_strpath(k), _strpath(default))
179 v = self._strenv.get(_strpath(k), _strpath(default))
180 return _bytespath(v)
180 return _bytespath(v)
181 def pop(self, k, default=None):
181 def pop(self, k, default=None):
182 v = self._strenv.pop(_strpath(k), _strpath(default))
182 v = self._strenv.pop(_strpath(k), _strpath(default))
183 return _bytespath(v)
183 return _bytespath(v)
184
184
185 osenvironb = environbytes(os.environ)
185 osenvironb = environbytes(os.environ)
186
186
187 getcwdb = getattr(os, 'getcwdb')
187 getcwdb = getattr(os, 'getcwdb')
188 if not getcwdb or os.name == 'nt':
188 if not getcwdb or os.name == 'nt':
189 getcwdb = lambda: _bytespath(os.getcwd())
189 getcwdb = lambda: _bytespath(os.getcwd())
190
190
191 elif sys.version_info >= (3, 0, 0):
191 elif sys.version_info >= (3, 0, 0):
192 print('%s is only supported on Python 3.5+ and 2.7, not %s' %
192 print('%s is only supported on Python 3.5+ and 2.7, not %s' %
193 (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])))
193 (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])))
194 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
194 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
195 else:
195 else:
196 PYTHON3 = False
196 PYTHON3 = False
197
197
198 # In python 2.x, path operations are generally done using
198 # In python 2.x, path operations are generally done using
199 # bytestrings by default, so we don't have to do any extra
199 # bytestrings by default, so we don't have to do any extra
200 # fiddling there. We define the wrapper functions anyway just to
200 # fiddling there. We define the wrapper functions anyway just to
201 # help keep code consistent between platforms.
201 # help keep code consistent between platforms.
202 def _bytespath(p):
202 def _bytespath(p):
203 return p
203 return p
204
204
205 _strpath = _bytespath
205 _strpath = _bytespath
206 osenvironb = os.environ
206 osenvironb = os.environ
207 getcwdb = os.getcwd
207 getcwdb = os.getcwd
208
208
209 # For Windows support
209 # For Windows support
210 wifexited = getattr(os, "WIFEXITED", lambda x: False)
210 wifexited = getattr(os, "WIFEXITED", lambda x: False)
211
211
212 # Whether to use IPv6
212 # Whether to use IPv6
213 def checksocketfamily(name, port=20058):
213 def checksocketfamily(name, port=20058):
214 """return true if we can listen on localhost using family=name
214 """return true if we can listen on localhost using family=name
215
215
216 name should be either 'AF_INET', or 'AF_INET6'.
216 name should be either 'AF_INET', or 'AF_INET6'.
217 port being used is okay - EADDRINUSE is considered as successful.
217 port being used is okay - EADDRINUSE is considered as successful.
218 """
218 """
219 family = getattr(socket, name, None)
219 family = getattr(socket, name, None)
220 if family is None:
220 if family is None:
221 return False
221 return False
222 try:
222 try:
223 s = socket.socket(family, socket.SOCK_STREAM)
223 s = socket.socket(family, socket.SOCK_STREAM)
224 s.bind(('localhost', port))
224 s.bind(('localhost', port))
225 s.close()
225 s.close()
226 return True
226 return True
227 except socket.error as exc:
227 except socket.error as exc:
228 if exc.errno == errno.EADDRINUSE:
228 if exc.errno == errno.EADDRINUSE:
229 return True
229 return True
230 elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
230 elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
231 return False
231 return False
232 else:
232 else:
233 raise
233 raise
234 else:
234 else:
235 return False
235 return False
236
236
237 # useipv6 will be set by parseargs
237 # useipv6 will be set by parseargs
238 useipv6 = None
238 useipv6 = None
239
239
240 def checkportisavailable(port):
240 def checkportisavailable(port):
241 """return true if a port seems free to bind on localhost"""
241 """return true if a port seems free to bind on localhost"""
242 if useipv6:
242 if useipv6:
243 family = socket.AF_INET6
243 family = socket.AF_INET6
244 else:
244 else:
245 family = socket.AF_INET
245 family = socket.AF_INET
246 try:
246 try:
247 s = socket.socket(family, socket.SOCK_STREAM)
247 s = socket.socket(family, socket.SOCK_STREAM)
248 s.bind(('localhost', port))
248 s.bind(('localhost', port))
249 s.close()
249 s.close()
250 return True
250 return True
251 except socket.error as exc:
251 except socket.error as exc:
252 if exc.errno not in (errno.EADDRINUSE, errno.EADDRNOTAVAIL,
252 if exc.errno not in (errno.EADDRINUSE, errno.EADDRNOTAVAIL,
253 errno.EPROTONOSUPPORT):
253 errno.EPROTONOSUPPORT):
254 raise
254 raise
255 return False
255 return False
256
256
257 closefds = os.name == 'posix'
257 closefds = os.name == 'posix'
258 def Popen4(cmd, wd, timeout, env=None):
258 def Popen4(cmd, wd, timeout, env=None):
259 processlock.acquire()
259 processlock.acquire()
260 p = subprocess.Popen(_strpath(cmd), shell=True, bufsize=-1,
260 p = subprocess.Popen(_strpath(cmd), shell=True, bufsize=-1,
261 cwd=_strpath(wd), env=env,
261 cwd=_strpath(wd), env=env,
262 close_fds=closefds,
262 close_fds=closefds,
263 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
263 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
264 stderr=subprocess.STDOUT)
264 stderr=subprocess.STDOUT)
265 processlock.release()
265 processlock.release()
266
266
267 p.fromchild = p.stdout
267 p.fromchild = p.stdout
268 p.tochild = p.stdin
268 p.tochild = p.stdin
269 p.childerr = p.stderr
269 p.childerr = p.stderr
270
270
271 p.timeout = False
271 p.timeout = False
272 if timeout:
272 if timeout:
273 def t():
273 def t():
274 start = time.time()
274 start = time.time()
275 while time.time() - start < timeout and p.returncode is None:
275 while time.time() - start < timeout and p.returncode is None:
276 time.sleep(.1)
276 time.sleep(.1)
277 p.timeout = True
277 p.timeout = True
278 if p.returncode is None:
278 if p.returncode is None:
279 terminate(p)
279 terminate(p)
280 threading.Thread(target=t).start()
280 threading.Thread(target=t).start()
281
281
282 return p
282 return p
283
283
284 PYTHON = _bytespath(sys.executable.replace('\\', '/'))
284 PYTHON = _bytespath(sys.executable.replace('\\', '/'))
285 IMPL_PATH = b'PYTHONPATH'
285 IMPL_PATH = b'PYTHONPATH'
286 if 'java' in sys.platform:
286 if 'java' in sys.platform:
287 IMPL_PATH = b'JYTHONPATH'
287 IMPL_PATH = b'JYTHONPATH'
288
288
289 defaults = {
289 defaults = {
290 'jobs': ('HGTEST_JOBS', 1),
290 'jobs': ('HGTEST_JOBS', 1),
291 'timeout': ('HGTEST_TIMEOUT', 180),
291 'timeout': ('HGTEST_TIMEOUT', 180),
292 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 500),
292 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 500),
293 'port': ('HGTEST_PORT', 20059),
293 'port': ('HGTEST_PORT', 20059),
294 'shell': ('HGTEST_SHELL', 'sh'),
294 'shell': ('HGTEST_SHELL', 'sh'),
295 }
295 }
296
296
297 def canonpath(path):
297 def canonpath(path):
298 return os.path.realpath(os.path.expanduser(path))
298 return os.path.realpath(os.path.expanduser(path))
299
299
300 def parselistfiles(files, listtype, warn=True):
300 def parselistfiles(files, listtype, warn=True):
301 entries = dict()
301 entries = dict()
302 for filename in files:
302 for filename in files:
303 try:
303 try:
304 path = os.path.expanduser(os.path.expandvars(filename))
304 path = os.path.expanduser(os.path.expandvars(filename))
305 f = open(path, "rb")
305 f = open(path, "rb")
306 except IOError as err:
306 except IOError as err:
307 if err.errno != errno.ENOENT:
307 if err.errno != errno.ENOENT:
308 raise
308 raise
309 if warn:
309 if warn:
310 print("warning: no such %s file: %s" % (listtype, filename))
310 print("warning: no such %s file: %s" % (listtype, filename))
311 continue
311 continue
312
312
313 for line in f.readlines():
313 for line in f.readlines():
314 line = line.split(b'#', 1)[0].strip()
314 line = line.split(b'#', 1)[0].strip()
315 if line:
315 if line:
316 entries[line] = filename
316 entries[line] = filename
317
317
318 f.close()
318 f.close()
319 return entries
319 return entries
320
320
321 def parsettestcases(path):
321 def parsettestcases(path):
322 """read a .t test file, return a set of test case names
322 """read a .t test file, return a set of test case names
323
323
324 If path does not exist, return an empty set.
324 If path does not exist, return an empty set.
325 """
325 """
326 cases = []
326 cases = []
327 try:
327 try:
328 with open(path, 'rb') as f:
328 with open(path, 'rb') as f:
329 for l in f:
329 for l in f:
330 if l.startswith(b'#testcases '):
330 if l.startswith(b'#testcases '):
331 cases.append(sorted(l[11:].split()))
331 cases.append(sorted(l[11:].split()))
332 except IOError as ex:
332 except IOError as ex:
333 if ex.errno != errno.ENOENT:
333 if ex.errno != errno.ENOENT:
334 raise
334 raise
335 return cases
335 return cases
336
336
337 def getparser():
337 def getparser():
338 """Obtain the OptionParser used by the CLI."""
338 """Obtain the OptionParser used by the CLI."""
339 parser = argparse.ArgumentParser(usage='%(prog)s [options] [tests]')
339 parser = argparse.ArgumentParser(usage='%(prog)s [options] [tests]')
340
340
341 selection = parser.add_argument_group('Test Selection')
341 selection = parser.add_argument_group('Test Selection')
342 selection.add_argument('--allow-slow-tests', action='store_true',
342 selection.add_argument('--allow-slow-tests', action='store_true',
343 help='allow extremely slow tests')
343 help='allow extremely slow tests')
344 selection.add_argument("--blacklist", action="append",
344 selection.add_argument("--blacklist", action="append",
345 help="skip tests listed in the specified blacklist file")
345 help="skip tests listed in the specified blacklist file")
346 selection.add_argument("--changed",
346 selection.add_argument("--changed",
347 help="run tests that are changed in parent rev or working directory")
347 help="run tests that are changed in parent rev or working directory")
348 selection.add_argument("-k", "--keywords",
348 selection.add_argument("-k", "--keywords",
349 help="run tests matching keywords")
349 help="run tests matching keywords")
350 selection.add_argument("-r", "--retest", action="store_true",
350 selection.add_argument("-r", "--retest", action="store_true",
351 help = "retest failed tests")
351 help = "retest failed tests")
352 selection.add_argument("--test-list", action="append",
352 selection.add_argument("--test-list", action="append",
353 help="read tests to run from the specified file")
353 help="read tests to run from the specified file")
354 selection.add_argument("--whitelist", action="append",
354 selection.add_argument("--whitelist", action="append",
355 help="always run tests listed in the specified whitelist file")
355 help="always run tests listed in the specified whitelist file")
356 selection.add_argument('tests', metavar='TESTS', nargs='*',
356 selection.add_argument('tests', metavar='TESTS', nargs='*',
357 help='Tests to run')
357 help='Tests to run')
358
358
359 harness = parser.add_argument_group('Test Harness Behavior')
359 harness = parser.add_argument_group('Test Harness Behavior')
360 harness.add_argument('--bisect-repo',
360 harness.add_argument('--bisect-repo',
361 metavar='bisect_repo',
361 metavar='bisect_repo',
362 help=("Path of a repo to bisect. Use together with "
362 help=("Path of a repo to bisect. Use together with "
363 "--known-good-rev"))
363 "--known-good-rev"))
364 harness.add_argument("-d", "--debug", action="store_true",
364 harness.add_argument("-d", "--debug", action="store_true",
365 help="debug mode: write output of test scripts to console"
365 help="debug mode: write output of test scripts to console"
366 " rather than capturing and diffing it (disables timeout)")
366 " rather than capturing and diffing it (disables timeout)")
367 harness.add_argument("-f", "--first", action="store_true",
367 harness.add_argument("-f", "--first", action="store_true",
368 help="exit on the first test failure")
368 help="exit on the first test failure")
369 harness.add_argument("-i", "--interactive", action="store_true",
369 harness.add_argument("-i", "--interactive", action="store_true",
370 help="prompt to accept changed output")
370 help="prompt to accept changed output")
371 harness.add_argument("-j", "--jobs", type=int,
371 harness.add_argument("-j", "--jobs", type=int,
372 help="number of jobs to run in parallel"
372 help="number of jobs to run in parallel"
373 " (default: $%s or %d)" % defaults['jobs'])
373 " (default: $%s or %d)" % defaults['jobs'])
374 harness.add_argument("--keep-tmpdir", action="store_true",
374 harness.add_argument("--keep-tmpdir", action="store_true",
375 help="keep temporary directory after running tests")
375 help="keep temporary directory after running tests")
376 harness.add_argument('--known-good-rev',
376 harness.add_argument('--known-good-rev',
377 metavar="known_good_rev",
377 metavar="known_good_rev",
378 help=("Automatically bisect any failures using this "
378 help=("Automatically bisect any failures using this "
379 "revision as a known-good revision."))
379 "revision as a known-good revision."))
380 harness.add_argument("--list-tests", action="store_true",
380 harness.add_argument("--list-tests", action="store_true",
381 help="list tests instead of running them")
381 help="list tests instead of running them")
382 harness.add_argument("--loop", action="store_true",
382 harness.add_argument("--loop", action="store_true",
383 help="loop tests repeatedly")
383 help="loop tests repeatedly")
384 harness.add_argument('--random', action="store_true",
384 harness.add_argument('--random', action="store_true",
385 help='run tests in random order')
385 help='run tests in random order')
386 harness.add_argument('--order-by-runtime', action="store_true",
386 harness.add_argument('--order-by-runtime', action="store_true",
387 help='run slowest tests first, according to .testtimes')
387 help='run slowest tests first, according to .testtimes')
388 harness.add_argument("-p", "--port", type=int,
388 harness.add_argument("-p", "--port", type=int,
389 help="port on which servers should listen"
389 help="port on which servers should listen"
390 " (default: $%s or %d)" % defaults['port'])
390 " (default: $%s or %d)" % defaults['port'])
391 harness.add_argument('--profile-runner', action='store_true',
391 harness.add_argument('--profile-runner', action='store_true',
392 help='run statprof on run-tests')
392 help='run statprof on run-tests')
393 harness.add_argument("-R", "--restart", action="store_true",
393 harness.add_argument("-R", "--restart", action="store_true",
394 help="restart at last error")
394 help="restart at last error")
395 harness.add_argument("--runs-per-test", type=int, dest="runs_per_test",
395 harness.add_argument("--runs-per-test", type=int, dest="runs_per_test",
396 help="run each test N times (default=1)", default=1)
396 help="run each test N times (default=1)", default=1)
397 harness.add_argument("--shell",
397 harness.add_argument("--shell",
398 help="shell to use (default: $%s or %s)" % defaults['shell'])
398 help="shell to use (default: $%s or %s)" % defaults['shell'])
399 harness.add_argument('--showchannels', action='store_true',
399 harness.add_argument('--showchannels', action='store_true',
400 help='show scheduling channels')
400 help='show scheduling channels')
401 harness.add_argument("--slowtimeout", type=int,
401 harness.add_argument("--slowtimeout", type=int,
402 help="kill errant slow tests after SLOWTIMEOUT seconds"
402 help="kill errant slow tests after SLOWTIMEOUT seconds"
403 " (default: $%s or %d)" % defaults['slowtimeout'])
403 " (default: $%s or %d)" % defaults['slowtimeout'])
404 harness.add_argument("-t", "--timeout", type=int,
404 harness.add_argument("-t", "--timeout", type=int,
405 help="kill errant tests after TIMEOUT seconds"
405 help="kill errant tests after TIMEOUT seconds"
406 " (default: $%s or %d)" % defaults['timeout'])
406 " (default: $%s or %d)" % defaults['timeout'])
407 harness.add_argument("--tmpdir",
407 harness.add_argument("--tmpdir",
408 help="run tests in the given temporary directory"
408 help="run tests in the given temporary directory"
409 " (implies --keep-tmpdir)")
409 " (implies --keep-tmpdir)")
410 harness.add_argument("-v", "--verbose", action="store_true",
410 harness.add_argument("-v", "--verbose", action="store_true",
411 help="output verbose messages")
411 help="output verbose messages")
412
412
413 hgconf = parser.add_argument_group('Mercurial Configuration')
413 hgconf = parser.add_argument_group('Mercurial Configuration')
414 hgconf.add_argument("--chg", action="store_true",
414 hgconf.add_argument("--chg", action="store_true",
415 help="install and use chg wrapper in place of hg")
415 help="install and use chg wrapper in place of hg")
416 hgconf.add_argument("--compiler",
416 hgconf.add_argument("--compiler",
417 help="compiler to build with")
417 help="compiler to build with")
418 hgconf.add_argument('--extra-config-opt', action="append", default=[],
418 hgconf.add_argument('--extra-config-opt', action="append", default=[],
419 help='set the given config opt in the test hgrc')
419 help='set the given config opt in the test hgrc')
420 hgconf.add_argument("-l", "--local", action="store_true",
420 hgconf.add_argument("-l", "--local", action="store_true",
421 help="shortcut for --with-hg=<testdir>/../hg, "
421 help="shortcut for --with-hg=<testdir>/../hg, "
422 "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set")
422 "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set")
423 hgconf.add_argument("--ipv6", action="store_true",
423 hgconf.add_argument("--ipv6", action="store_true",
424 help="prefer IPv6 to IPv4 for network related tests")
424 help="prefer IPv6 to IPv4 for network related tests")
425 hgconf.add_argument("--pure", action="store_true",
425 hgconf.add_argument("--pure", action="store_true",
426 help="use pure Python code instead of C extensions")
426 help="use pure Python code instead of C extensions")
427 hgconf.add_argument("-3", "--py3k-warnings", action="store_true",
427 hgconf.add_argument("-3", "--py3k-warnings", action="store_true",
428 help="enable Py3k warnings on Python 2.7+")
428 help="enable Py3k warnings on Python 2.7+")
429 hgconf.add_argument("--with-chg", metavar="CHG",
429 hgconf.add_argument("--with-chg", metavar="CHG",
430 help="use specified chg wrapper in place of hg")
430 help="use specified chg wrapper in place of hg")
431 hgconf.add_argument("--with-hg",
431 hgconf.add_argument("--with-hg",
432 metavar="HG",
432 metavar="HG",
433 help="test using specified hg script rather than a "
433 help="test using specified hg script rather than a "
434 "temporary installation")
434 "temporary installation")
435
435
436 reporting = parser.add_argument_group('Results Reporting')
436 reporting = parser.add_argument_group('Results Reporting')
437 reporting.add_argument("-C", "--annotate", action="store_true",
437 reporting.add_argument("-C", "--annotate", action="store_true",
438 help="output files annotated with coverage")
438 help="output files annotated with coverage")
439 reporting.add_argument("--color", choices=["always", "auto", "never"],
439 reporting.add_argument("--color", choices=["always", "auto", "never"],
440 default=os.environ.get('HGRUNTESTSCOLOR', 'auto'),
440 default=os.environ.get('HGRUNTESTSCOLOR', 'auto'),
441 help="colorisation: always|auto|never (default: auto)")
441 help="colorisation: always|auto|never (default: auto)")
442 reporting.add_argument("-c", "--cover", action="store_true",
442 reporting.add_argument("-c", "--cover", action="store_true",
443 help="print a test coverage report")
443 help="print a test coverage report")
444 reporting.add_argument('--exceptions', action='store_true',
444 reporting.add_argument('--exceptions', action='store_true',
445 help='log all exceptions and generate an exception report')
445 help='log all exceptions and generate an exception report')
446 reporting.add_argument("-H", "--htmlcov", action="store_true",
446 reporting.add_argument("-H", "--htmlcov", action="store_true",
447 help="create an HTML report of the coverage of the files")
447 help="create an HTML report of the coverage of the files")
448 reporting.add_argument("--json", action="store_true",
448 reporting.add_argument("--json", action="store_true",
449 help="store test result data in 'report.json' file")
449 help="store test result data in 'report.json' file")
450 reporting.add_argument("--outputdir",
450 reporting.add_argument("--outputdir",
451 help="directory to write error logs to (default=test directory)")
451 help="directory to write error logs to (default=test directory)")
452 reporting.add_argument("-n", "--nodiff", action="store_true",
452 reporting.add_argument("-n", "--nodiff", action="store_true",
453 help="skip showing test changes")
453 help="skip showing test changes")
454 reporting.add_argument("-S", "--noskips", action="store_true",
454 reporting.add_argument("-S", "--noskips", action="store_true",
455 help="don't report skip tests verbosely")
455 help="don't report skip tests verbosely")
456 reporting.add_argument("--time", action="store_true",
456 reporting.add_argument("--time", action="store_true",
457 help="time how long each test takes")
457 help="time how long each test takes")
458 reporting.add_argument("--view",
458 reporting.add_argument("--view",
459 help="external diff viewer")
459 help="external diff viewer")
460 reporting.add_argument("--xunit",
460 reporting.add_argument("--xunit",
461 help="record xunit results at specified path")
461 help="record xunit results at specified path")
462
462
463 for option, (envvar, default) in defaults.items():
463 for option, (envvar, default) in defaults.items():
464 defaults[option] = type(default)(os.environ.get(envvar, default))
464 defaults[option] = type(default)(os.environ.get(envvar, default))
465 parser.set_defaults(**defaults)
465 parser.set_defaults(**defaults)
466
466
467 return parser
467 return parser
468
468
469 def parseargs(args, parser):
469 def parseargs(args, parser):
470 """Parse arguments with our OptionParser and validate results."""
470 """Parse arguments with our OptionParser and validate results."""
471 options = parser.parse_args(args)
471 options = parser.parse_args(args)
472
472
473 # jython is always pure
473 # jython is always pure
474 if 'java' in sys.platform or '__pypy__' in sys.modules:
474 if 'java' in sys.platform or '__pypy__' in sys.modules:
475 options.pure = True
475 options.pure = True
476
476
477 if options.with_hg:
477 if options.with_hg:
478 options.with_hg = canonpath(_bytespath(options.with_hg))
478 options.with_hg = canonpath(_bytespath(options.with_hg))
479 if not (os.path.isfile(options.with_hg) and
479 if not (os.path.isfile(options.with_hg) and
480 os.access(options.with_hg, os.X_OK)):
480 os.access(options.with_hg, os.X_OK)):
481 parser.error('--with-hg must specify an executable hg script')
481 parser.error('--with-hg must specify an executable hg script')
482 if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
482 if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
483 sys.stderr.write('warning: --with-hg should specify an hg script\n')
483 sys.stderr.write('warning: --with-hg should specify an hg script\n')
484 if options.local:
484 if options.local:
485 testdir = os.path.dirname(_bytespath(canonpath(sys.argv[0])))
485 testdir = os.path.dirname(_bytespath(canonpath(sys.argv[0])))
486 reporootdir = os.path.dirname(testdir)
486 reporootdir = os.path.dirname(testdir)
487 pathandattrs = [(b'hg', 'with_hg')]
487 pathandattrs = [(b'hg', 'with_hg')]
488 if options.chg:
488 if options.chg:
489 pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
489 pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
490 for relpath, attr in pathandattrs:
490 for relpath, attr in pathandattrs:
491 binpath = os.path.join(reporootdir, relpath)
491 binpath = os.path.join(reporootdir, relpath)
492 if os.name != 'nt' and not os.access(binpath, os.X_OK):
492 if os.name != 'nt' and not os.access(binpath, os.X_OK):
493 parser.error('--local specified, but %r not found or '
493 parser.error('--local specified, but %r not found or '
494 'not executable' % binpath)
494 'not executable' % binpath)
495 setattr(options, attr, binpath)
495 setattr(options, attr, binpath)
496
496
497 if (options.chg or options.with_chg) and os.name == 'nt':
497 if (options.chg or options.with_chg) and os.name == 'nt':
498 parser.error('chg does not work on %s' % os.name)
498 parser.error('chg does not work on %s' % os.name)
499 if options.with_chg:
499 if options.with_chg:
500 options.chg = False # no installation to temporary location
500 options.chg = False # no installation to temporary location
501 options.with_chg = canonpath(_bytespath(options.with_chg))
501 options.with_chg = canonpath(_bytespath(options.with_chg))
502 if not (os.path.isfile(options.with_chg) and
502 if not (os.path.isfile(options.with_chg) and
503 os.access(options.with_chg, os.X_OK)):
503 os.access(options.with_chg, os.X_OK)):
504 parser.error('--with-chg must specify a chg executable')
504 parser.error('--with-chg must specify a chg executable')
505 if options.chg and options.with_hg:
505 if options.chg and options.with_hg:
506 # chg shares installation location with hg
506 # chg shares installation location with hg
507 parser.error('--chg does not work when --with-hg is specified '
507 parser.error('--chg does not work when --with-hg is specified '
508 '(use --with-chg instead)')
508 '(use --with-chg instead)')
509
509
510 if options.color == 'always' and not pygmentspresent:
510 if options.color == 'always' and not pygmentspresent:
511 sys.stderr.write('warning: --color=always ignored because '
511 sys.stderr.write('warning: --color=always ignored because '
512 'pygments is not installed\n')
512 'pygments is not installed\n')
513
513
514 if options.bisect_repo and not options.known_good_rev:
514 if options.bisect_repo and not options.known_good_rev:
515 parser.error("--bisect-repo cannot be used without --known-good-rev")
515 parser.error("--bisect-repo cannot be used without --known-good-rev")
516
516
517 global useipv6
517 global useipv6
518 if options.ipv6:
518 if options.ipv6:
519 useipv6 = checksocketfamily('AF_INET6')
519 useipv6 = checksocketfamily('AF_INET6')
520 else:
520 else:
521 # only use IPv6 if IPv4 is unavailable and IPv6 is available
521 # only use IPv6 if IPv4 is unavailable and IPv6 is available
522 useipv6 = ((not checksocketfamily('AF_INET'))
522 useipv6 = ((not checksocketfamily('AF_INET'))
523 and checksocketfamily('AF_INET6'))
523 and checksocketfamily('AF_INET6'))
524
524
525 options.anycoverage = options.cover or options.annotate or options.htmlcov
525 options.anycoverage = options.cover or options.annotate or options.htmlcov
526 if options.anycoverage:
526 if options.anycoverage:
527 try:
527 try:
528 import coverage
528 import coverage
529 covver = version.StrictVersion(coverage.__version__).version
529 covver = version.StrictVersion(coverage.__version__).version
530 if covver < (3, 3):
530 if covver < (3, 3):
531 parser.error('coverage options require coverage 3.3 or later')
531 parser.error('coverage options require coverage 3.3 or later')
532 except ImportError:
532 except ImportError:
533 parser.error('coverage options now require the coverage package')
533 parser.error('coverage options now require the coverage package')
534
534
535 if options.anycoverage and options.local:
535 if options.anycoverage and options.local:
536 # this needs some path mangling somewhere, I guess
536 # this needs some path mangling somewhere, I guess
537 parser.error("sorry, coverage options do not work when --local "
537 parser.error("sorry, coverage options do not work when --local "
538 "is specified")
538 "is specified")
539
539
540 if options.anycoverage and options.with_hg:
540 if options.anycoverage and options.with_hg:
541 parser.error("sorry, coverage options do not work when --with-hg "
541 parser.error("sorry, coverage options do not work when --with-hg "
542 "is specified")
542 "is specified")
543
543
544 global verbose
544 global verbose
545 if options.verbose:
545 if options.verbose:
546 verbose = ''
546 verbose = ''
547
547
548 if options.tmpdir:
548 if options.tmpdir:
549 options.tmpdir = canonpath(options.tmpdir)
549 options.tmpdir = canonpath(options.tmpdir)
550
550
551 if options.jobs < 1:
551 if options.jobs < 1:
552 parser.error('--jobs must be positive')
552 parser.error('--jobs must be positive')
553 if options.interactive and options.debug:
553 if options.interactive and options.debug:
554 parser.error("-i/--interactive and -d/--debug are incompatible")
554 parser.error("-i/--interactive and -d/--debug are incompatible")
555 if options.debug:
555 if options.debug:
556 if options.timeout != defaults['timeout']:
556 if options.timeout != defaults['timeout']:
557 sys.stderr.write(
557 sys.stderr.write(
558 'warning: --timeout option ignored with --debug\n')
558 'warning: --timeout option ignored with --debug\n')
559 if options.slowtimeout != defaults['slowtimeout']:
559 if options.slowtimeout != defaults['slowtimeout']:
560 sys.stderr.write(
560 sys.stderr.write(
561 'warning: --slowtimeout option ignored with --debug\n')
561 'warning: --slowtimeout option ignored with --debug\n')
562 options.timeout = 0
562 options.timeout = 0
563 options.slowtimeout = 0
563 options.slowtimeout = 0
564 if options.py3k_warnings:
564 if options.py3k_warnings:
565 if PYTHON3:
565 if PYTHON3:
566 parser.error(
566 parser.error(
567 '--py3k-warnings can only be used on Python 2.7')
567 '--py3k-warnings can only be used on Python 2.7')
568
568
569 if options.blacklist:
569 if options.blacklist:
570 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
570 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
571 if options.whitelist:
571 if options.whitelist:
572 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
572 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
573 else:
573 else:
574 options.whitelisted = {}
574 options.whitelisted = {}
575
575
576 if options.showchannels:
576 if options.showchannels:
577 options.nodiff = True
577 options.nodiff = True
578
578
579 return options
579 return options
580
580
581 def rename(src, dst):
581 def rename(src, dst):
582 """Like os.rename(), trade atomicity and opened files friendliness
582 """Like os.rename(), trade atomicity and opened files friendliness
583 for existing destination support.
583 for existing destination support.
584 """
584 """
585 shutil.copy(src, dst)
585 shutil.copy(src, dst)
586 os.remove(src)
586 os.remove(src)
587
587
588 _unified_diff = difflib.unified_diff
588 _unified_diff = difflib.unified_diff
589 if PYTHON3:
589 if PYTHON3:
590 import functools
590 import functools
591 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
591 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
592
592
593 def getdiff(expected, output, ref, err):
593 def getdiff(expected, output, ref, err):
594 servefail = False
594 servefail = False
595 lines = []
595 lines = []
596 for line in _unified_diff(expected, output, ref, err):
596 for line in _unified_diff(expected, output, ref, err):
597 if line.startswith(b'+++') or line.startswith(b'---'):
597 if line.startswith(b'+++') or line.startswith(b'---'):
598 line = line.replace(b'\\', b'/')
598 line = line.replace(b'\\', b'/')
599 if line.endswith(b' \n'):
599 if line.endswith(b' \n'):
600 line = line[:-2] + b'\n'
600 line = line[:-2] + b'\n'
601 lines.append(line)
601 lines.append(line)
602 if not servefail and line.startswith(
602 if not servefail and line.startswith(
603 b'+ abort: child process failed to start'):
603 b'+ abort: child process failed to start'):
604 servefail = True
604 servefail = True
605
605
606 return servefail, lines
606 return servefail, lines
607
607
608 verbose = False
608 verbose = False
609 def vlog(*msg):
609 def vlog(*msg):
610 """Log only when in verbose mode."""
610 """Log only when in verbose mode."""
611 if verbose is False:
611 if verbose is False:
612 return
612 return
613
613
614 return log(*msg)
614 return log(*msg)
615
615
616 # Bytes that break XML even in a CDATA block: control characters 0-31
616 # Bytes that break XML even in a CDATA block: control characters 0-31
617 # sans \t, \n and \r
617 # sans \t, \n and \r
618 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
618 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
619
619
620 # Match feature conditionalized output lines in the form, capturing the feature
620 # Match feature conditionalized output lines in the form, capturing the feature
621 # list in group 2, and the preceeding line output in group 1:
621 # list in group 2, and the preceeding line output in group 1:
622 #
622 #
623 # output..output (feature !)\n
623 # output..output (feature !)\n
624 optline = re.compile(b'(.*) \((.+?) !\)\n$')
624 optline = re.compile(b'(.*) \((.+?) !\)\n$')
625
625
626 def cdatasafe(data):
626 def cdatasafe(data):
627 """Make a string safe to include in a CDATA block.
627 """Make a string safe to include in a CDATA block.
628
628
629 Certain control characters are illegal in a CDATA block, and
629 Certain control characters are illegal in a CDATA block, and
630 there's no way to include a ]]> in a CDATA either. This function
630 there's no way to include a ]]> in a CDATA either. This function
631 replaces illegal bytes with ? and adds a space between the ]] so
631 replaces illegal bytes with ? and adds a space between the ]] so
632 that it won't break the CDATA block.
632 that it won't break the CDATA block.
633 """
633 """
634 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
634 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
635
635
636 def log(*msg):
636 def log(*msg):
637 """Log something to stdout.
637 """Log something to stdout.
638
638
639 Arguments are strings to print.
639 Arguments are strings to print.
640 """
640 """
641 with iolock:
641 with iolock:
642 if verbose:
642 if verbose:
643 print(verbose, end=' ')
643 print(verbose, end=' ')
644 for m in msg:
644 for m in msg:
645 print(m, end=' ')
645 print(m, end=' ')
646 print()
646 print()
647 sys.stdout.flush()
647 sys.stdout.flush()
648
648
649 def highlightdiff(line, color):
649 def highlightdiff(line, color):
650 if not color:
650 if not color:
651 return line
651 return line
652 assert pygmentspresent
652 assert pygmentspresent
653 return pygments.highlight(line.decode('latin1'), difflexer,
653 return pygments.highlight(line.decode('latin1'), difflexer,
654 terminal256formatter).encode('latin1')
654 terminal256formatter).encode('latin1')
655
655
656 def highlightmsg(msg, color):
656 def highlightmsg(msg, color):
657 if not color:
657 if not color:
658 return msg
658 return msg
659 assert pygmentspresent
659 assert pygmentspresent
660 return pygments.highlight(msg, runnerlexer, runnerformatter)
660 return pygments.highlight(msg, runnerlexer, runnerformatter)
661
661
662 def terminate(proc):
662 def terminate(proc):
663 """Terminate subprocess"""
663 """Terminate subprocess"""
664 vlog('# Terminating process %d' % proc.pid)
664 vlog('# Terminating process %d' % proc.pid)
665 try:
665 try:
666 proc.terminate()
666 proc.terminate()
667 except OSError:
667 except OSError:
668 pass
668 pass
669
669
670 def killdaemons(pidfile):
670 def killdaemons(pidfile):
671 import killdaemons as killmod
671 import killdaemons as killmod
672 return killmod.killdaemons(pidfile, tryhard=False, remove=True,
672 return killmod.killdaemons(pidfile, tryhard=False, remove=True,
673 logfn=vlog)
673 logfn=vlog)
674
674
675 class Test(unittest.TestCase):
675 class Test(unittest.TestCase):
676 """Encapsulates a single, runnable test.
676 """Encapsulates a single, runnable test.
677
677
678 While this class conforms to the unittest.TestCase API, it differs in that
678 While this class conforms to the unittest.TestCase API, it differs in that
679 instances need to be instantiated manually. (Typically, unittest.TestCase
679 instances need to be instantiated manually. (Typically, unittest.TestCase
680 classes are instantiated automatically by scanning modules.)
680 classes are instantiated automatically by scanning modules.)
681 """
681 """
682
682
683 # Status code reserved for skipped tests (used by hghave).
683 # Status code reserved for skipped tests (used by hghave).
684 SKIPPED_STATUS = 80
684 SKIPPED_STATUS = 80
685
685
686 def __init__(self, path, outputdir, tmpdir, keeptmpdir=False,
686 def __init__(self, path, outputdir, tmpdir, keeptmpdir=False,
687 debug=False,
687 debug=False,
688 first=False,
688 first=False,
689 timeout=None,
689 timeout=None,
690 startport=None, extraconfigopts=None,
690 startport=None, extraconfigopts=None,
691 py3kwarnings=False, shell=None, hgcommand=None,
691 py3kwarnings=False, shell=None, hgcommand=None,
692 slowtimeout=None, usechg=False,
692 slowtimeout=None, usechg=False,
693 useipv6=False):
693 useipv6=False):
694 """Create a test from parameters.
694 """Create a test from parameters.
695
695
696 path is the full path to the file defining the test.
696 path is the full path to the file defining the test.
697
697
698 tmpdir is the main temporary directory to use for this test.
698 tmpdir is the main temporary directory to use for this test.
699
699
700 keeptmpdir determines whether to keep the test's temporary directory
700 keeptmpdir determines whether to keep the test's temporary directory
701 after execution. It defaults to removal (False).
701 after execution. It defaults to removal (False).
702
702
703 debug mode will make the test execute verbosely, with unfiltered
703 debug mode will make the test execute verbosely, with unfiltered
704 output.
704 output.
705
705
706 timeout controls the maximum run time of the test. It is ignored when
706 timeout controls the maximum run time of the test. It is ignored when
707 debug is True. See slowtimeout for tests with #require slow.
707 debug is True. See slowtimeout for tests with #require slow.
708
708
709 slowtimeout overrides timeout if the test has #require slow.
709 slowtimeout overrides timeout if the test has #require slow.
710
710
711 startport controls the starting port number to use for this test. Each
711 startport controls the starting port number to use for this test. Each
712 test will reserve 3 port numbers for execution. It is the caller's
712 test will reserve 3 port numbers for execution. It is the caller's
713 responsibility to allocate a non-overlapping port range to Test
713 responsibility to allocate a non-overlapping port range to Test
714 instances.
714 instances.
715
715
716 extraconfigopts is an iterable of extra hgrc config options. Values
716 extraconfigopts is an iterable of extra hgrc config options. Values
717 must have the form "key=value" (something understood by hgrc). Values
717 must have the form "key=value" (something understood by hgrc). Values
718 of the form "foo.key=value" will result in "[foo] key=value".
718 of the form "foo.key=value" will result in "[foo] key=value".
719
719
720 py3kwarnings enables Py3k warnings.
720 py3kwarnings enables Py3k warnings.
721
721
722 shell is the shell to execute tests in.
722 shell is the shell to execute tests in.
723 """
723 """
724 if timeout is None:
724 if timeout is None:
725 timeout = defaults['timeout']
725 timeout = defaults['timeout']
726 if startport is None:
726 if startport is None:
727 startport = defaults['port']
727 startport = defaults['port']
728 if slowtimeout is None:
728 if slowtimeout is None:
729 slowtimeout = defaults['slowtimeout']
729 slowtimeout = defaults['slowtimeout']
730 self.path = path
730 self.path = path
731 self.bname = os.path.basename(path)
731 self.bname = os.path.basename(path)
732 self.name = _strpath(self.bname)
732 self.name = _strpath(self.bname)
733 self._testdir = os.path.dirname(path)
733 self._testdir = os.path.dirname(path)
734 self._outputdir = outputdir
734 self._outputdir = outputdir
735 self._tmpname = os.path.basename(path)
735 self._tmpname = os.path.basename(path)
736 self.errpath = os.path.join(self._outputdir, b'%s.err' % self.bname)
736 self.errpath = os.path.join(self._outputdir, b'%s.err' % self.bname)
737
737
738 self._threadtmp = tmpdir
738 self._threadtmp = tmpdir
739 self._keeptmpdir = keeptmpdir
739 self._keeptmpdir = keeptmpdir
740 self._debug = debug
740 self._debug = debug
741 self._first = first
741 self._first = first
742 self._timeout = timeout
742 self._timeout = timeout
743 self._slowtimeout = slowtimeout
743 self._slowtimeout = slowtimeout
744 self._startport = startport
744 self._startport = startport
745 self._extraconfigopts = extraconfigopts or []
745 self._extraconfigopts = extraconfigopts or []
746 self._py3kwarnings = py3kwarnings
746 self._py3kwarnings = py3kwarnings
747 self._shell = _bytespath(shell)
747 self._shell = _bytespath(shell)
748 self._hgcommand = hgcommand or b'hg'
748 self._hgcommand = hgcommand or b'hg'
749 self._usechg = usechg
749 self._usechg = usechg
750 self._useipv6 = useipv6
750 self._useipv6 = useipv6
751
751
752 self._aborted = False
752 self._aborted = False
753 self._daemonpids = []
753 self._daemonpids = []
754 self._finished = None
754 self._finished = None
755 self._ret = None
755 self._ret = None
756 self._out = None
756 self._out = None
757 self._skipped = None
757 self._skipped = None
758 self._testtmp = None
758 self._testtmp = None
759 self._chgsockdir = None
759 self._chgsockdir = None
760
760
761 self._refout = self.readrefout()
761 self._refout = self.readrefout()
762
762
763 def readrefout(self):
763 def readrefout(self):
764 """read reference output"""
764 """read reference output"""
765 # If we're not in --debug mode and reference output file exists,
765 # If we're not in --debug mode and reference output file exists,
766 # check test output against it.
766 # check test output against it.
767 if self._debug:
767 if self._debug:
768 return None # to match "out is None"
768 return None # to match "out is None"
769 elif os.path.exists(self.refpath):
769 elif os.path.exists(self.refpath):
770 with open(self.refpath, 'rb') as f:
770 with open(self.refpath, 'rb') as f:
771 return f.read().splitlines(True)
771 return f.read().splitlines(True)
772 else:
772 else:
773 return []
773 return []
774
774
775 # needed to get base class __repr__ running
775 # needed to get base class __repr__ running
776 @property
776 @property
777 def _testMethodName(self):
777 def _testMethodName(self):
778 return self.name
778 return self.name
779
779
780 def __str__(self):
780 def __str__(self):
781 return self.name
781 return self.name
782
782
783 def shortDescription(self):
783 def shortDescription(self):
784 return self.name
784 return self.name
785
785
786 def setUp(self):
786 def setUp(self):
787 """Tasks to perform before run()."""
787 """Tasks to perform before run()."""
788 self._finished = False
788 self._finished = False
789 self._ret = None
789 self._ret = None
790 self._out = None
790 self._out = None
791 self._skipped = None
791 self._skipped = None
792
792
793 try:
793 try:
794 os.mkdir(self._threadtmp)
794 os.mkdir(self._threadtmp)
795 except OSError as e:
795 except OSError as e:
796 if e.errno != errno.EEXIST:
796 if e.errno != errno.EEXIST:
797 raise
797 raise
798
798
799 name = self._tmpname
799 name = self._tmpname
800 self._testtmp = os.path.join(self._threadtmp, name)
800 self._testtmp = os.path.join(self._threadtmp, name)
801 os.mkdir(self._testtmp)
801 os.mkdir(self._testtmp)
802
802
803 # Remove any previous output files.
803 # Remove any previous output files.
804 if os.path.exists(self.errpath):
804 if os.path.exists(self.errpath):
805 try:
805 try:
806 os.remove(self.errpath)
806 os.remove(self.errpath)
807 except OSError as e:
807 except OSError as e:
808 # We might have raced another test to clean up a .err
808 # We might have raced another test to clean up a .err
809 # file, so ignore ENOENT when removing a previous .err
809 # file, so ignore ENOENT when removing a previous .err
810 # file.
810 # file.
811 if e.errno != errno.ENOENT:
811 if e.errno != errno.ENOENT:
812 raise
812 raise
813
813
814 if self._usechg:
814 if self._usechg:
815 self._chgsockdir = os.path.join(self._threadtmp,
815 self._chgsockdir = os.path.join(self._threadtmp,
816 b'%s.chgsock' % name)
816 b'%s.chgsock' % name)
817 os.mkdir(self._chgsockdir)
817 os.mkdir(self._chgsockdir)
818
818
819 def run(self, result):
819 def run(self, result):
820 """Run this test and report results against a TestResult instance."""
820 """Run this test and report results against a TestResult instance."""
821 # This function is extremely similar to unittest.TestCase.run(). Once
821 # This function is extremely similar to unittest.TestCase.run(). Once
822 # we require Python 2.7 (or at least its version of unittest), this
822 # we require Python 2.7 (or at least its version of unittest), this
823 # function can largely go away.
823 # function can largely go away.
824 self._result = result
824 self._result = result
825 result.startTest(self)
825 result.startTest(self)
826 try:
826 try:
827 try:
827 try:
828 self.setUp()
828 self.setUp()
829 except (KeyboardInterrupt, SystemExit):
829 except (KeyboardInterrupt, SystemExit):
830 self._aborted = True
830 self._aborted = True
831 raise
831 raise
832 except Exception:
832 except Exception:
833 result.addError(self, sys.exc_info())
833 result.addError(self, sys.exc_info())
834 return
834 return
835
835
836 success = False
836 success = False
837 try:
837 try:
838 self.runTest()
838 self.runTest()
839 except KeyboardInterrupt:
839 except KeyboardInterrupt:
840 self._aborted = True
840 self._aborted = True
841 raise
841 raise
842 except unittest.SkipTest as e:
842 except unittest.SkipTest as e:
843 result.addSkip(self, str(e))
843 result.addSkip(self, str(e))
844 # The base class will have already counted this as a
844 # The base class will have already counted this as a
845 # test we "ran", but we want to exclude skipped tests
845 # test we "ran", but we want to exclude skipped tests
846 # from those we count towards those run.
846 # from those we count towards those run.
847 result.testsRun -= 1
847 result.testsRun -= 1
848 except self.failureException as e:
848 except self.failureException as e:
849 # This differs from unittest in that we don't capture
849 # This differs from unittest in that we don't capture
850 # the stack trace. This is for historical reasons and
850 # the stack trace. This is for historical reasons and
851 # this decision could be revisited in the future,
851 # this decision could be revisited in the future,
852 # especially for PythonTest instances.
852 # especially for PythonTest instances.
853 if result.addFailure(self, str(e)):
853 if result.addFailure(self, str(e)):
854 success = True
854 success = True
855 except Exception:
855 except Exception:
856 result.addError(self, sys.exc_info())
856 result.addError(self, sys.exc_info())
857 else:
857 else:
858 success = True
858 success = True
859
859
860 try:
860 try:
861 self.tearDown()
861 self.tearDown()
862 except (KeyboardInterrupt, SystemExit):
862 except (KeyboardInterrupt, SystemExit):
863 self._aborted = True
863 self._aborted = True
864 raise
864 raise
865 except Exception:
865 except Exception:
866 result.addError(self, sys.exc_info())
866 result.addError(self, sys.exc_info())
867 success = False
867 success = False
868
868
869 if success:
869 if success:
870 result.addSuccess(self)
870 result.addSuccess(self)
871 finally:
871 finally:
872 result.stopTest(self, interrupted=self._aborted)
872 result.stopTest(self, interrupted=self._aborted)
873
873
874 def runTest(self):
874 def runTest(self):
875 """Run this test instance.
875 """Run this test instance.
876
876
877 This will return a tuple describing the result of the test.
877 This will return a tuple describing the result of the test.
878 """
878 """
879 env = self._getenv()
879 env = self._getenv()
880 self._genrestoreenv(env)
880 self._genrestoreenv(env)
881 self._daemonpids.append(env['DAEMON_PIDS'])
881 self._daemonpids.append(env['DAEMON_PIDS'])
882 self._createhgrc(env['HGRCPATH'])
882 self._createhgrc(env['HGRCPATH'])
883
883
884 vlog('# Test', self.name)
884 vlog('# Test', self.name)
885
885
886 ret, out = self._run(env)
886 ret, out = self._run(env)
887 self._finished = True
887 self._finished = True
888 self._ret = ret
888 self._ret = ret
889 self._out = out
889 self._out = out
890
890
891 def describe(ret):
891 def describe(ret):
892 if ret < 0:
892 if ret < 0:
893 return 'killed by signal: %d' % -ret
893 return 'killed by signal: %d' % -ret
894 return 'returned error code %d' % ret
894 return 'returned error code %d' % ret
895
895
896 self._skipped = False
896 self._skipped = False
897
897
898 if ret == self.SKIPPED_STATUS:
898 if ret == self.SKIPPED_STATUS:
899 if out is None: # Debug mode, nothing to parse.
899 if out is None: # Debug mode, nothing to parse.
900 missing = ['unknown']
900 missing = ['unknown']
901 failed = None
901 failed = None
902 else:
902 else:
903 missing, failed = TTest.parsehghaveoutput(out)
903 missing, failed = TTest.parsehghaveoutput(out)
904
904
905 if not missing:
905 if not missing:
906 missing = ['skipped']
906 missing = ['skipped']
907
907
908 if failed:
908 if failed:
909 self.fail('hg have failed checking for %s' % failed[-1])
909 self.fail('hg have failed checking for %s' % failed[-1])
910 else:
910 else:
911 self._skipped = True
911 self._skipped = True
912 raise unittest.SkipTest(missing[-1])
912 raise unittest.SkipTest(missing[-1])
913 elif ret == 'timeout':
913 elif ret == 'timeout':
914 self.fail('timed out')
914 self.fail('timed out')
915 elif ret is False:
915 elif ret is False:
916 self.fail('no result code from test')
916 self.fail('no result code from test')
917 elif out != self._refout:
917 elif out != self._refout:
918 # Diff generation may rely on written .err file.
918 # Diff generation may rely on written .err file.
919 if (ret != 0 or out != self._refout) and not self._skipped \
919 if (ret != 0 or out != self._refout) and not self._skipped \
920 and not self._debug:
920 and not self._debug:
921 with open(self.errpath, 'wb') as f:
921 with open(self.errpath, 'wb') as f:
922 for line in out:
922 for line in out:
923 f.write(line)
923 f.write(line)
924
924
925 # The result object handles diff calculation for us.
925 # The result object handles diff calculation for us.
926 with firstlock:
926 with firstlock:
927 if self._result.addOutputMismatch(self, ret, out, self._refout):
927 if self._result.addOutputMismatch(self, ret, out, self._refout):
928 # change was accepted, skip failing
928 # change was accepted, skip failing
929 return
929 return
930 if self._first:
930 if self._first:
931 global firsterror
931 global firsterror
932 firsterror = True
932 firsterror = True
933
933
934 if ret:
934 if ret:
935 msg = 'output changed and ' + describe(ret)
935 msg = 'output changed and ' + describe(ret)
936 else:
936 else:
937 msg = 'output changed'
937 msg = 'output changed'
938
938
939 self.fail(msg)
939 self.fail(msg)
940 elif ret:
940 elif ret:
941 self.fail(describe(ret))
941 self.fail(describe(ret))
942
942
943 def tearDown(self):
943 def tearDown(self):
944 """Tasks to perform after run()."""
944 """Tasks to perform after run()."""
945 for entry in self._daemonpids:
945 for entry in self._daemonpids:
946 killdaemons(entry)
946 killdaemons(entry)
947 self._daemonpids = []
947 self._daemonpids = []
948
948
949 if self._keeptmpdir:
949 if self._keeptmpdir:
950 log('\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' %
950 log('\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' %
951 (self._testtmp.decode('utf-8'),
951 (self._testtmp.decode('utf-8'),
952 self._threadtmp.decode('utf-8')))
952 self._threadtmp.decode('utf-8')))
953 else:
953 else:
954 shutil.rmtree(self._testtmp, True)
954 shutil.rmtree(self._testtmp, True)
955 shutil.rmtree(self._threadtmp, True)
955 shutil.rmtree(self._threadtmp, True)
956
956
957 if self._usechg:
957 if self._usechg:
958 # chgservers will stop automatically after they find the socket
958 # chgservers will stop automatically after they find the socket
959 # files are deleted
959 # files are deleted
960 shutil.rmtree(self._chgsockdir, True)
960 shutil.rmtree(self._chgsockdir, True)
961
961
962 if (self._ret != 0 or self._out != self._refout) and not self._skipped \
962 if (self._ret != 0 or self._out != self._refout) and not self._skipped \
963 and not self._debug and self._out:
963 and not self._debug and self._out:
964 with open(self.errpath, 'wb') as f:
964 with open(self.errpath, 'wb') as f:
965 for line in self._out:
965 for line in self._out:
966 f.write(line)
966 f.write(line)
967
967
968 vlog("# Ret was:", self._ret, '(%s)' % self.name)
968 vlog("# Ret was:", self._ret, '(%s)' % self.name)
969
969
970 def _run(self, env):
970 def _run(self, env):
971 # This should be implemented in child classes to run tests.
971 # This should be implemented in child classes to run tests.
972 raise unittest.SkipTest('unknown test type')
972 raise unittest.SkipTest('unknown test type')
973
973
974 def abort(self):
974 def abort(self):
975 """Terminate execution of this test."""
975 """Terminate execution of this test."""
976 self._aborted = True
976 self._aborted = True
977
977
978 def _portmap(self, i):
978 def _portmap(self, i):
979 offset = b'' if i == 0 else b'%d' % i
979 offset = b'' if i == 0 else b'%d' % i
980 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
980 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
981
981
982 def _getreplacements(self):
982 def _getreplacements(self):
983 """Obtain a mapping of text replacements to apply to test output.
983 """Obtain a mapping of text replacements to apply to test output.
984
984
985 Test output needs to be normalized so it can be compared to expected
985 Test output needs to be normalized so it can be compared to expected
986 output. This function defines how some of that normalization will
986 output. This function defines how some of that normalization will
987 occur.
987 occur.
988 """
988 """
989 r = [
989 r = [
990 # This list should be parallel to defineport in _getenv
990 # This list should be parallel to defineport in _getenv
991 self._portmap(0),
991 self._portmap(0),
992 self._portmap(1),
992 self._portmap(1),
993 self._portmap(2),
993 self._portmap(2),
994 (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
994 (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
995 (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
995 (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
996 ]
996 ]
997 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
997 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
998
998
999 replacementfile = os.path.join(self._testdir, b'common-pattern.py')
999 replacementfile = os.path.join(self._testdir, b'common-pattern.py')
1000
1000
1001 if os.path.exists(replacementfile):
1001 if os.path.exists(replacementfile):
1002 data = {}
1002 data = {}
1003 with open(replacementfile, mode='rb') as source:
1003 with open(replacementfile, mode='rb') as source:
1004 # the intermediate 'compile' step help with debugging
1004 # the intermediate 'compile' step help with debugging
1005 code = compile(source.read(), replacementfile, 'exec')
1005 code = compile(source.read(), replacementfile, 'exec')
1006 exec(code, data)
1006 exec(code, data)
1007 for value in data.get('substitutions', ()):
1007 for value in data.get('substitutions', ()):
1008 if len(value) != 2:
1008 if len(value) != 2:
1009 msg = 'malformatted substitution in %s: %r'
1009 msg = 'malformatted substitution in %s: %r'
1010 msg %= (replacementfile, value)
1010 msg %= (replacementfile, value)
1011 raise ValueError(msg)
1011 raise ValueError(msg)
1012 r.append(value)
1012 r.append(value)
1013 return r
1013 return r
1014
1014
1015 def _escapepath(self, p):
1015 def _escapepath(self, p):
1016 if os.name == 'nt':
1016 if os.name == 'nt':
1017 return (
1017 return (
1018 (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or
1018 (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or
1019 c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c
1019 c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c
1020 for c in [p[i:i + 1] for i in range(len(p))]))
1020 for c in [p[i:i + 1] for i in range(len(p))]))
1021 )
1021 )
1022 else:
1022 else:
1023 return re.escape(p)
1023 return re.escape(p)
1024
1024
1025 def _localip(self):
1025 def _localip(self):
1026 if self._useipv6:
1026 if self._useipv6:
1027 return b'::1'
1027 return b'::1'
1028 else:
1028 else:
1029 return b'127.0.0.1'
1029 return b'127.0.0.1'
1030
1030
1031 def _genrestoreenv(self, testenv):
1031 def _genrestoreenv(self, testenv):
1032 """Generate a script that can be used by tests to restore the original
1032 """Generate a script that can be used by tests to restore the original
1033 environment."""
1033 environment."""
1034 # Put the restoreenv script inside self._threadtmp
1034 # Put the restoreenv script inside self._threadtmp
1035 scriptpath = os.path.join(self._threadtmp, b'restoreenv.sh')
1035 scriptpath = os.path.join(self._threadtmp, b'restoreenv.sh')
1036 testenv['HGTEST_RESTOREENV'] = _strpath(scriptpath)
1036 testenv['HGTEST_RESTOREENV'] = _strpath(scriptpath)
1037
1037
1038 # Only restore environment variable names that the shell allows
1038 # Only restore environment variable names that the shell allows
1039 # us to export.
1039 # us to export.
1040 name_regex = re.compile('^[a-zA-Z][a-zA-Z0-9_]*$')
1040 name_regex = re.compile('^[a-zA-Z][a-zA-Z0-9_]*$')
1041
1041
1042 # Do not restore these variables; otherwise tests would fail.
1042 # Do not restore these variables; otherwise tests would fail.
1043 reqnames = {'PYTHON', 'TESTDIR', 'TESTTMP'}
1043 reqnames = {'PYTHON', 'TESTDIR', 'TESTTMP'}
1044
1044
1045 with open(scriptpath, 'w') as envf:
1045 with open(scriptpath, 'w') as envf:
1046 for name, value in origenviron.items():
1046 for name, value in origenviron.items():
1047 if not name_regex.match(name):
1047 if not name_regex.match(name):
1048 # Skip environment variables with unusual names not
1048 # Skip environment variables with unusual names not
1049 # allowed by most shells.
1049 # allowed by most shells.
1050 continue
1050 continue
1051 if name in reqnames:
1051 if name in reqnames:
1052 continue
1052 continue
1053 envf.write('%s=%s\n' % (name, shellquote(value)))
1053 envf.write('%s=%s\n' % (name, shellquote(value)))
1054
1054
1055 for name in testenv:
1055 for name in testenv:
1056 if name in origenviron or name in reqnames:
1056 if name in origenviron or name in reqnames:
1057 continue
1057 continue
1058 envf.write('unset %s\n' % (name,))
1058 envf.write('unset %s\n' % (name,))
1059
1059
1060 def _getenv(self):
1060 def _getenv(self):
1061 """Obtain environment variables to use during test execution."""
1061 """Obtain environment variables to use during test execution."""
1062 def defineport(i):
1062 def defineport(i):
1063 offset = '' if i == 0 else '%s' % i
1063 offset = '' if i == 0 else '%s' % i
1064 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
1064 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
1065 env = os.environ.copy()
1065 env = os.environ.copy()
1066 env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase') or ''
1066 env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase') or ''
1067 env['HGEMITWARNINGS'] = '1'
1067 env['HGEMITWARNINGS'] = '1'
1068 env['TESTTMP'] = _strpath(self._testtmp)
1068 env['TESTTMP'] = _strpath(self._testtmp)
1069 env['TESTNAME'] = self.name
1069 env['TESTNAME'] = self.name
1070 env['HOME'] = _strpath(self._testtmp)
1070 env['HOME'] = _strpath(self._testtmp)
1071 # This number should match portneeded in _getport
1071 # This number should match portneeded in _getport
1072 for port in xrange(3):
1072 for port in xrange(3):
1073 # This list should be parallel to _portmap in _getreplacements
1073 # This list should be parallel to _portmap in _getreplacements
1074 defineport(port)
1074 defineport(port)
1075 env["HGRCPATH"] = _strpath(os.path.join(self._threadtmp, b'.hgrc'))
1075 env["HGRCPATH"] = _strpath(os.path.join(self._threadtmp, b'.hgrc'))
1076 env["DAEMON_PIDS"] = _strpath(os.path.join(self._threadtmp,
1076 env["DAEMON_PIDS"] = _strpath(os.path.join(self._threadtmp,
1077 b'daemon.pids'))
1077 b'daemon.pids'))
1078 env["HGEDITOR"] = ('"' + sys.executable + '"'
1078 env["HGEDITOR"] = ('"' + sys.executable + '"'
1079 + ' -c "import sys; sys.exit(0)"')
1079 + ' -c "import sys; sys.exit(0)"')
1080 env["HGMERGE"] = "internal:merge"
1080 env["HGMERGE"] = "internal:merge"
1081 env["HGUSER"] = "test"
1081 env["HGUSER"] = "test"
1082 env["HGENCODING"] = "ascii"
1082 env["HGENCODING"] = "ascii"
1083 env["HGENCODINGMODE"] = "strict"
1083 env["HGENCODINGMODE"] = "strict"
1084 env["HGHOSTNAME"] = "test-hostname"
1084 env["HGHOSTNAME"] = "test-hostname"
1085 env['HGIPV6'] = str(int(self._useipv6))
1085 env['HGIPV6'] = str(int(self._useipv6))
1086 if 'HGCATAPULTSERVERPIPE' not in env:
1086 if 'HGCATAPULTSERVERPIPE' not in env:
1087 env['HGCATAPULTSERVERPIPE'] = os.devnull
1087 env['HGCATAPULTSERVERPIPE'] = os.devnull
1088
1088
1089 extraextensions = []
1089 extraextensions = []
1090 for opt in self._extraconfigopts:
1090 for opt in self._extraconfigopts:
1091 section, key = opt.encode('utf-8').split(b'.', 1)
1091 section, key = opt.encode('utf-8').split(b'.', 1)
1092 if section != 'extensions':
1092 if section != 'extensions':
1093 continue
1093 continue
1094 name = key.split(b'=', 1)[0]
1094 name = key.split(b'=', 1)[0]
1095 extraextensions.append(name)
1095 extraextensions.append(name)
1096
1096
1097 if extraextensions:
1097 if extraextensions:
1098 env['HGTESTEXTRAEXTENSIONS'] = b' '.join(extraextensions)
1098 env['HGTESTEXTRAEXTENSIONS'] = b' '.join(extraextensions)
1099
1099
1100 # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
1100 # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
1101 # IP addresses.
1101 # IP addresses.
1102 env['LOCALIP'] = _strpath(self._localip())
1102 env['LOCALIP'] = _strpath(self._localip())
1103
1103
1104 # Reset some environment variables to well-known values so that
1104 # Reset some environment variables to well-known values so that
1105 # the tests produce repeatable output.
1105 # the tests produce repeatable output.
1106 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
1106 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
1107 env['TZ'] = 'GMT'
1107 env['TZ'] = 'GMT'
1108 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
1108 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
1109 env['COLUMNS'] = '80'
1109 env['COLUMNS'] = '80'
1110 env['TERM'] = 'xterm'
1110 env['TERM'] = 'xterm'
1111
1111
1112 for k in ('HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy ' +
1112 for k in ('HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy ' +
1113 'HGPLAIN HGPLAINEXCEPT EDITOR VISUAL PAGER ' +
1113 'HGPLAIN HGPLAINEXCEPT EDITOR VISUAL PAGER ' +
1114 'NO_PROXY CHGDEBUG').split():
1114 'NO_PROXY CHGDEBUG').split():
1115 if k in env:
1115 if k in env:
1116 del env[k]
1116 del env[k]
1117
1117
1118 # unset env related to hooks
1118 # unset env related to hooks
1119 for k in list(env):
1119 for k in list(env):
1120 if k.startswith('HG_'):
1120 if k.startswith('HG_'):
1121 del env[k]
1121 del env[k]
1122
1122
1123 if self._usechg:
1123 if self._usechg:
1124 env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server')
1124 env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server')
1125
1125
1126 return env
1126 return env
1127
1127
1128 def _createhgrc(self, path):
1128 def _createhgrc(self, path):
1129 """Create an hgrc file for this test."""
1129 """Create an hgrc file for this test."""
1130 with open(path, 'wb') as hgrc:
1130 with open(path, 'wb') as hgrc:
1131 hgrc.write(b'[ui]\n')
1131 hgrc.write(b'[ui]\n')
1132 hgrc.write(b'slash = True\n')
1132 hgrc.write(b'slash = True\n')
1133 hgrc.write(b'interactive = False\n')
1133 hgrc.write(b'interactive = False\n')
1134 hgrc.write(b'mergemarkers = detailed\n')
1134 hgrc.write(b'mergemarkers = detailed\n')
1135 hgrc.write(b'promptecho = True\n')
1135 hgrc.write(b'promptecho = True\n')
1136 hgrc.write(b'[defaults]\n')
1136 hgrc.write(b'[defaults]\n')
1137 hgrc.write(b'[devel]\n')
1137 hgrc.write(b'[devel]\n')
1138 hgrc.write(b'all-warnings = true\n')
1138 hgrc.write(b'all-warnings = true\n')
1139 hgrc.write(b'default-date = 0 0\n')
1139 hgrc.write(b'default-date = 0 0\n')
1140 hgrc.write(b'[largefiles]\n')
1140 hgrc.write(b'[largefiles]\n')
1141 hgrc.write(b'usercache = %s\n' %
1141 hgrc.write(b'usercache = %s\n' %
1142 (os.path.join(self._testtmp, b'.cache/largefiles')))
1142 (os.path.join(self._testtmp, b'.cache/largefiles')))
1143 hgrc.write(b'[lfs]\n')
1143 hgrc.write(b'[lfs]\n')
1144 hgrc.write(b'usercache = %s\n' %
1144 hgrc.write(b'usercache = %s\n' %
1145 (os.path.join(self._testtmp, b'.cache/lfs')))
1145 (os.path.join(self._testtmp, b'.cache/lfs')))
1146 hgrc.write(b'[web]\n')
1146 hgrc.write(b'[web]\n')
1147 hgrc.write(b'address = localhost\n')
1147 hgrc.write(b'address = localhost\n')
1148 hgrc.write(b'ipv6 = %s\n' % str(self._useipv6).encode('ascii'))
1148 hgrc.write(b'ipv6 = %s\n' % str(self._useipv6).encode('ascii'))
1149 hgrc.write(b'server-header = testing stub value\n')
1149 hgrc.write(b'server-header = testing stub value\n')
1150
1150
1151 for opt in self._extraconfigopts:
1151 for opt in self._extraconfigopts:
1152 section, key = opt.encode('utf-8').split(b'.', 1)
1152 section, key = opt.encode('utf-8').split(b'.', 1)
1153 assert b'=' in key, ('extra config opt %s must '
1153 assert b'=' in key, ('extra config opt %s must '
1154 'have an = for assignment' % opt)
1154 'have an = for assignment' % opt)
1155 hgrc.write(b'[%s]\n%s\n' % (section, key))
1155 hgrc.write(b'[%s]\n%s\n' % (section, key))
1156
1156
1157 def fail(self, msg):
1157 def fail(self, msg):
1158 # unittest differentiates between errored and failed.
1158 # unittest differentiates between errored and failed.
1159 # Failed is denoted by AssertionError (by default at least).
1159 # Failed is denoted by AssertionError (by default at least).
1160 raise AssertionError(msg)
1160 raise AssertionError(msg)
1161
1161
1162 def _runcommand(self, cmd, env, normalizenewlines=False):
1162 def _runcommand(self, cmd, env, normalizenewlines=False):
1163 """Run command in a sub-process, capturing the output (stdout and
1163 """Run command in a sub-process, capturing the output (stdout and
1164 stderr).
1164 stderr).
1165
1165
1166 Return a tuple (exitcode, output). output is None in debug mode.
1166 Return a tuple (exitcode, output). output is None in debug mode.
1167 """
1167 """
1168 if self._debug:
1168 if self._debug:
1169 proc = subprocess.Popen(_strpath(cmd), shell=True,
1169 proc = subprocess.Popen(_strpath(cmd), shell=True,
1170 cwd=_strpath(self._testtmp),
1170 cwd=_strpath(self._testtmp),
1171 env=env)
1171 env=env)
1172 ret = proc.wait()
1172 ret = proc.wait()
1173 return (ret, None)
1173 return (ret, None)
1174
1174
1175 proc = Popen4(cmd, self._testtmp, self._timeout, env)
1175 proc = Popen4(cmd, self._testtmp, self._timeout, env)
1176 def cleanup():
1176 def cleanup():
1177 terminate(proc)
1177 terminate(proc)
1178 ret = proc.wait()
1178 ret = proc.wait()
1179 if ret == 0:
1179 if ret == 0:
1180 ret = signal.SIGTERM << 8
1180 ret = signal.SIGTERM << 8
1181 killdaemons(env['DAEMON_PIDS'])
1181 killdaemons(env['DAEMON_PIDS'])
1182 return ret
1182 return ret
1183
1183
1184 output = b''
1184 output = b''
1185 proc.tochild.close()
1185 proc.tochild.close()
1186
1186
1187 try:
1187 try:
1188 output = proc.fromchild.read()
1188 output = proc.fromchild.read()
1189 except KeyboardInterrupt:
1189 except KeyboardInterrupt:
1190 vlog('# Handling keyboard interrupt')
1190 vlog('# Handling keyboard interrupt')
1191 cleanup()
1191 cleanup()
1192 raise
1192 raise
1193
1193
1194 ret = proc.wait()
1194 ret = proc.wait()
1195 if wifexited(ret):
1195 if wifexited(ret):
1196 ret = os.WEXITSTATUS(ret)
1196 ret = os.WEXITSTATUS(ret)
1197
1197
1198 if proc.timeout:
1198 if proc.timeout:
1199 ret = 'timeout'
1199 ret = 'timeout'
1200
1200
1201 if ret:
1201 if ret:
1202 killdaemons(env['DAEMON_PIDS'])
1202 killdaemons(env['DAEMON_PIDS'])
1203
1203
1204 for s, r in self._getreplacements():
1204 for s, r in self._getreplacements():
1205 output = re.sub(s, r, output)
1205 output = re.sub(s, r, output)
1206
1206
1207 if normalizenewlines:
1207 if normalizenewlines:
1208 output = output.replace(b'\r\n', b'\n')
1208 output = output.replace(b'\r\n', b'\n')
1209
1209
1210 return ret, output.splitlines(True)
1210 return ret, output.splitlines(True)
1211
1211
1212 class PythonTest(Test):
1212 class PythonTest(Test):
1213 """A Python-based test."""
1213 """A Python-based test."""
1214
1214
1215 @property
1215 @property
1216 def refpath(self):
1216 def refpath(self):
1217 return os.path.join(self._testdir, b'%s.out' % self.bname)
1217 return os.path.join(self._testdir, b'%s.out' % self.bname)
1218
1218
1219 def _run(self, env):
1219 def _run(self, env):
1220 py3kswitch = self._py3kwarnings and b' -3' or b''
1220 py3kswitch = self._py3kwarnings and b' -3' or b''
1221 cmd = b'"%s"%s "%s"' % (PYTHON, py3kswitch, self.path)
1221 cmd = b'"%s"%s "%s"' % (PYTHON, py3kswitch, self.path)
1222 vlog("# Running", cmd)
1222 vlog("# Running", cmd)
1223 normalizenewlines = os.name == 'nt'
1223 normalizenewlines = os.name == 'nt'
1224 result = self._runcommand(cmd, env,
1224 result = self._runcommand(cmd, env,
1225 normalizenewlines=normalizenewlines)
1225 normalizenewlines=normalizenewlines)
1226 if self._aborted:
1226 if self._aborted:
1227 raise KeyboardInterrupt()
1227 raise KeyboardInterrupt()
1228
1228
1229 return result
1229 return result
1230
1230
1231 # Some glob patterns apply only in some circumstances, so the script
1231 # Some glob patterns apply only in some circumstances, so the script
1232 # might want to remove (glob) annotations that otherwise should be
1232 # might want to remove (glob) annotations that otherwise should be
1233 # retained.
1233 # retained.
1234 checkcodeglobpats = [
1234 checkcodeglobpats = [
1235 # On Windows it looks like \ doesn't require a (glob), but we know
1235 # On Windows it looks like \ doesn't require a (glob), but we know
1236 # better.
1236 # better.
1237 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
1237 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
1238 re.compile(br'^moving \S+/.*[^)]$'),
1238 re.compile(br'^moving \S+/.*[^)]$'),
1239 re.compile(br'^pulling from \$TESTTMP/.*[^)]$'),
1239 re.compile(br'^pulling from \$TESTTMP/.*[^)]$'),
1240 # Not all platforms have 127.0.0.1 as loopback (though most do),
1240 # Not all platforms have 127.0.0.1 as loopback (though most do),
1241 # so we always glob that too.
1241 # so we always glob that too.
1242 re.compile(br'.*\$LOCALIP.*$'),
1242 re.compile(br'.*\$LOCALIP.*$'),
1243 ]
1243 ]
1244
1244
1245 bchr = chr
1245 bchr = chr
1246 if PYTHON3:
1246 if PYTHON3:
1247 bchr = lambda x: bytes([x])
1247 bchr = lambda x: bytes([x])
1248
1248
1249 class TTest(Test):
1249 class TTest(Test):
1250 """A "t test" is a test backed by a .t file."""
1250 """A "t test" is a test backed by a .t file."""
1251
1251
1252 SKIPPED_PREFIX = b'skipped: '
1252 SKIPPED_PREFIX = b'skipped: '
1253 FAILED_PREFIX = b'hghave check failed: '
1253 FAILED_PREFIX = b'hghave check failed: '
1254 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
1254 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
1255
1255
1256 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
1256 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
1257 ESCAPEMAP = dict((bchr(i), br'\x%02x' % i) for i in range(256))
1257 ESCAPEMAP = dict((bchr(i), br'\x%02x' % i) for i in range(256))
1258 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
1258 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
1259
1259
1260 def __init__(self, path, *args, **kwds):
1260 def __init__(self, path, *args, **kwds):
1261 # accept an extra "case" parameter
1261 # accept an extra "case" parameter
1262 case = kwds.pop('case', [])
1262 case = kwds.pop('case', [])
1263 self._case = case
1263 self._case = case
1264 self._allcases = {x for y in parsettestcases(path) for x in y}
1264 self._allcases = {x for y in parsettestcases(path) for x in y}
1265 super(TTest, self).__init__(path, *args, **kwds)
1265 super(TTest, self).__init__(path, *args, **kwds)
1266 if case:
1266 if case:
1267 casepath = b'#'.join(case)
1267 casepath = b'#'.join(case)
1268 self.name = '%s#%s' % (self.name, _strpath(casepath))
1268 self.name = '%s#%s' % (self.name, _strpath(casepath))
1269 self.errpath = b'%s#%s.err' % (self.errpath[:-4], casepath)
1269 self.errpath = b'%s#%s.err' % (self.errpath[:-4], casepath)
1270 self._tmpname += b'-%s' % casepath
1270 self._tmpname += b'-%s' % casepath
1271 self._have = {}
1271 self._have = {}
1272
1272
1273 @property
1273 @property
1274 def refpath(self):
1274 def refpath(self):
1275 return os.path.join(self._testdir, self.bname)
1275 return os.path.join(self._testdir, self.bname)
1276
1276
1277 def _run(self, env):
1277 def _run(self, env):
1278 with open(self.path, 'rb') as f:
1278 with open(self.path, 'rb') as f:
1279 lines = f.readlines()
1279 lines = f.readlines()
1280
1280
1281 # .t file is both reference output and the test input, keep reference
1281 # .t file is both reference output and the test input, keep reference
1282 # output updated with the the test input. This avoids some race
1282 # output updated with the the test input. This avoids some race
1283 # conditions where the reference output does not match the actual test.
1283 # conditions where the reference output does not match the actual test.
1284 if self._refout is not None:
1284 if self._refout is not None:
1285 self._refout = lines
1285 self._refout = lines
1286
1286
1287 salt, script, after, expected = self._parsetest(lines)
1287 salt, script, after, expected = self._parsetest(lines)
1288
1288
1289 # Write out the generated script.
1289 # Write out the generated script.
1290 fname = b'%s.sh' % self._testtmp
1290 fname = b'%s.sh' % self._testtmp
1291 with open(fname, 'wb') as f:
1291 with open(fname, 'wb') as f:
1292 for l in script:
1292 for l in script:
1293 f.write(l)
1293 f.write(l)
1294
1294
1295 cmd = b'%s "%s"' % (self._shell, fname)
1295 cmd = b'%s "%s"' % (self._shell, fname)
1296 vlog("# Running", cmd)
1296 vlog("# Running", cmd)
1297
1297
1298 exitcode, output = self._runcommand(cmd, env)
1298 exitcode, output = self._runcommand(cmd, env)
1299
1299
1300 if self._aborted:
1300 if self._aborted:
1301 raise KeyboardInterrupt()
1301 raise KeyboardInterrupt()
1302
1302
1303 # Do not merge output if skipped. Return hghave message instead.
1303 # Do not merge output if skipped. Return hghave message instead.
1304 # Similarly, with --debug, output is None.
1304 # Similarly, with --debug, output is None.
1305 if exitcode == self.SKIPPED_STATUS or output is None:
1305 if exitcode == self.SKIPPED_STATUS or output is None:
1306 return exitcode, output
1306 return exitcode, output
1307
1307
1308 return self._processoutput(exitcode, output, salt, after, expected)
1308 return self._processoutput(exitcode, output, salt, after, expected)
1309
1309
1310 def _hghave(self, reqs):
1310 def _hghave(self, reqs):
1311 allreqs = b' '.join(reqs)
1311 allreqs = b' '.join(reqs)
1312 if allreqs in self._have:
1312 if allreqs in self._have:
1313 return self._have.get(allreqs)
1313 return self._have.get(allreqs)
1314
1314
1315 # TODO do something smarter when all other uses of hghave are gone.
1315 # TODO do something smarter when all other uses of hghave are gone.
1316 runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__)))
1316 runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__)))
1317 tdir = runtestdir.replace(b'\\', b'/')
1317 tdir = runtestdir.replace(b'\\', b'/')
1318 proc = Popen4(b'%s -c "%s/hghave %s"' %
1318 proc = Popen4(b'%s -c "%s/hghave %s"' %
1319 (self._shell, tdir, allreqs),
1319 (self._shell, tdir, allreqs),
1320 self._testtmp, 0, self._getenv())
1320 self._testtmp, 0, self._getenv())
1321 stdout, stderr = proc.communicate()
1321 stdout, stderr = proc.communicate()
1322 ret = proc.wait()
1322 ret = proc.wait()
1323 if wifexited(ret):
1323 if wifexited(ret):
1324 ret = os.WEXITSTATUS(ret)
1324 ret = os.WEXITSTATUS(ret)
1325 if ret == 2:
1325 if ret == 2:
1326 print(stdout.decode('utf-8'))
1326 print(stdout.decode('utf-8'))
1327 sys.exit(1)
1327 sys.exit(1)
1328
1328
1329 if ret != 0:
1329 if ret != 0:
1330 self._have[allreqs] = (False, stdout)
1330 self._have[allreqs] = (False, stdout)
1331 return False, stdout
1331 return False, stdout
1332
1332
1333 if b'slow' in reqs:
1333 if b'slow' in reqs:
1334 self._timeout = self._slowtimeout
1334 self._timeout = self._slowtimeout
1335
1335
1336 self._have[allreqs] = (True, None)
1336 self._have[allreqs] = (True, None)
1337 return True, None
1337 return True, None
1338
1338
1339 def _iftest(self, args):
1339 def _iftest(self, args):
1340 # implements "#if"
1340 # implements "#if"
1341 reqs = []
1341 reqs = []
1342 for arg in args:
1342 for arg in args:
1343 if arg.startswith(b'no-') and arg[3:] in self._allcases:
1343 if arg.startswith(b'no-') and arg[3:] in self._allcases:
1344 if arg[3:] in self._case:
1344 if arg[3:] in self._case:
1345 return False
1345 return False
1346 elif arg in self._allcases:
1346 elif arg in self._allcases:
1347 if arg not in self._case:
1347 if arg not in self._case:
1348 return False
1348 return False
1349 else:
1349 else:
1350 reqs.append(arg)
1350 reqs.append(arg)
1351 return self._hghave(reqs)[0]
1351 return self._hghave(reqs)[0]
1352
1352
1353 def _parsetest(self, lines):
1353 def _parsetest(self, lines):
1354 # We generate a shell script which outputs unique markers to line
1354 # We generate a shell script which outputs unique markers to line
1355 # up script results with our source. These markers include input
1355 # up script results with our source. These markers include input
1356 # line number and the last return code.
1356 # line number and the last return code.
1357 salt = b"SALT%d" % time.time()
1357 salt = b"SALT%d" % time.time()
1358 def addsalt(line, inpython):
1358 def addsalt(line, inpython):
1359 if inpython:
1359 if inpython:
1360 script.append(b'%s %d 0\n' % (salt, line))
1360 script.append(b'%s %d 0\n' % (salt, line))
1361 else:
1361 else:
1362 script.append(b'echo %s %d $?\n' % (salt, line))
1362 script.append(b'echo %s %d $?\n' % (salt, line))
1363 active = []
1363 active = []
1364 session = str(uuid.uuid4())
1364 session = str(uuid.uuid4())
1365 if PYTHON3:
1365 if PYTHON3:
1366 session = session.encode('ascii')
1366 session = session.encode('ascii')
1367 def toggletrace(cmd):
1367 def toggletrace(cmd):
1368 if isinstance(cmd, str):
1368 if isinstance(cmd, str):
1369 quoted = shellquote(cmd.strip())
1369 quoted = shellquote(cmd.strip())
1370 else:
1370 else:
1371 quoted = shellquote(cmd.strip().decode('utf8')).encode('utf8')
1371 quoted = shellquote(cmd.strip().decode('utf8')).encode('utf8')
1372 quoted = quoted.replace(b'\\', b'\\\\')
1372 quoted = quoted.replace(b'\\', b'\\\\')
1373 if active:
1373 if active:
1374 script.append(
1374 script.append(
1375 b'echo END %s %s >> "$HGCATAPULTSERVERPIPE"\n' % (
1375 b'echo END %s %s >> "$HGCATAPULTSERVERPIPE"\n' % (
1376 session, active[0]))
1376 session, active[0]))
1377 script.append(
1377 script.append(
1378 b'echo START %s %s >> "$HGCATAPULTSERVERPIPE"\n' % (
1378 b'echo START %s %s >> "$HGCATAPULTSERVERPIPE"\n' % (
1379 session, quoted))
1379 session, quoted))
1380 active[0:] = [quoted]
1380 active[0:] = [quoted]
1381
1381
1382 script = []
1382 script = []
1383
1383
1384 # After we run the shell script, we re-unify the script output
1384 # After we run the shell script, we re-unify the script output
1385 # with non-active parts of the source, with synchronization by our
1385 # with non-active parts of the source, with synchronization by our
1386 # SALT line number markers. The after table contains the non-active
1386 # SALT line number markers. The after table contains the non-active
1387 # components, ordered by line number.
1387 # components, ordered by line number.
1388 after = {}
1388 after = {}
1389
1389
1390 # Expected shell script output.
1390 # Expected shell script output.
1391 expected = {}
1391 expected = {}
1392
1392
1393 pos = prepos = -1
1393 pos = prepos = -1
1394
1394
1395 # True or False when in a true or false conditional section
1395 # True or False when in a true or false conditional section
1396 skipping = None
1396 skipping = None
1397
1397
1398 # We keep track of whether or not we're in a Python block so we
1398 # We keep track of whether or not we're in a Python block so we
1399 # can generate the surrounding doctest magic.
1399 # can generate the surrounding doctest magic.
1400 inpython = False
1400 inpython = False
1401
1401
1402 if self._debug:
1402 if self._debug:
1403 script.append(b'set -x\n')
1403 script.append(b'set -x\n')
1404 if self._hgcommand != b'hg':
1404 if self._hgcommand != b'hg':
1405 script.append(b'alias hg="%s"\n' % self._hgcommand)
1405 script.append(b'alias hg="%s"\n' % self._hgcommand)
1406 if os.getenv('MSYSTEM'):
1406 if os.getenv('MSYSTEM'):
1407 script.append(b'alias pwd="pwd -W"\n')
1407 script.append(b'alias pwd="pwd -W"\n')
1408
1408
1409 hgcatapult = os.getenv('HGCATAPULTSERVERPIPE')
1409 hgcatapult = os.getenv('HGCATAPULTSERVERPIPE')
1410 if hgcatapult and hgcatapult != os.devnull:
1410 if hgcatapult and hgcatapult != os.devnull:
1411 # Kludge: use a while loop to keep the pipe from getting
1411 # Kludge: use a while loop to keep the pipe from getting
1412 # closed by our echo commands. The still-running file gets
1412 # closed by our echo commands. The still-running file gets
1413 # reaped at the end of the script, which causes the while
1413 # reaped at the end of the script, which causes the while
1414 # loop to exit and closes the pipe. Sigh.
1414 # loop to exit and closes the pipe. Sigh.
1415 script.append(
1415 script.append(
1416 b'rtendtracing() {\n'
1416 b'rtendtracing() {\n'
1417 b' echo END %(session)s %(name)s >> $HGCATAPULTSERVERPIPE\n'
1417 b' echo END %(session)s %(name)s >> $HGCATAPULTSERVERPIPE\n'
1418 b' rm -f "$TESTTMP/.still-running"\n'
1418 b' rm -f "$TESTTMP/.still-running"\n'
1419 b'}\n'
1419 b'}\n'
1420 b'trap "rtendtracing" 0\n'
1420 b'trap "rtendtracing" 0\n'
1421 b'touch "$TESTTMP/.still-running"\n'
1421 b'touch "$TESTTMP/.still-running"\n'
1422 b'while [ -f "$TESTTMP/.still-running" ]; do sleep 1; done '
1422 b'while [ -f "$TESTTMP/.still-running" ]; do sleep 1; done '
1423 b'> $HGCATAPULTSERVERPIPE &\n'
1423 b'> $HGCATAPULTSERVERPIPE &\n'
1424 b'HGCATAPULTSESSION=%(session)s ; export HGCATAPULTSESSION\n'
1424 b'HGCATAPULTSESSION=%(session)s ; export HGCATAPULTSESSION\n'
1425 b'echo START %(session)s %(name)s >> $HGCATAPULTSERVERPIPE\n'
1425 b'echo START %(session)s %(name)s >> $HGCATAPULTSERVERPIPE\n'
1426 % {
1426 % {
1427 'name': self.name,
1427 'name': self.name,
1428 'session': session,
1428 'session': session,
1429 }
1429 }
1430 )
1430 )
1431
1431
1432 if self._case:
1432 if self._case:
1433 casestr = b'#'.join(self._case)
1433 casestr = b'#'.join(self._case)
1434 if isinstance(self._case, str):
1434 if isinstance(self._case, str):
1435 quoted = shellquote(casestr)
1435 quoted = shellquote(casestr)
1436 else:
1436 else:
1437 quoted = shellquote(casestr.decode('utf8')).encode('utf8')
1437 quoted = shellquote(casestr.decode('utf8')).encode('utf8')
1438 script.append(b'TESTCASE=%s\n' % quoted)
1438 script.append(b'TESTCASE=%s\n' % quoted)
1439 script.append(b'export TESTCASE\n')
1439 script.append(b'export TESTCASE\n')
1440
1440
1441 n = 0
1441 n = 0
1442 for n, l in enumerate(lines):
1442 for n, l in enumerate(lines):
1443 if not l.endswith(b'\n'):
1443 if not l.endswith(b'\n'):
1444 l += b'\n'
1444 l += b'\n'
1445 if l.startswith(b'#require'):
1445 if l.startswith(b'#require'):
1446 lsplit = l.split()
1446 lsplit = l.split()
1447 if len(lsplit) < 2 or lsplit[0] != b'#require':
1447 if len(lsplit) < 2 or lsplit[0] != b'#require':
1448 after.setdefault(pos, []).append(' !!! invalid #require\n')
1448 after.setdefault(pos, []).append(' !!! invalid #require\n')
1449 if not skipping:
1449 if not skipping:
1450 haveresult, message = self._hghave(lsplit[1:])
1450 haveresult, message = self._hghave(lsplit[1:])
1451 if not haveresult:
1451 if not haveresult:
1452 script = [b'echo "%s"\nexit 80\n' % message]
1452 script = [b'echo "%s"\nexit 80\n' % message]
1453 break
1453 break
1454 after.setdefault(pos, []).append(l)
1454 after.setdefault(pos, []).append(l)
1455 elif l.startswith(b'#if'):
1455 elif l.startswith(b'#if'):
1456 lsplit = l.split()
1456 lsplit = l.split()
1457 if len(lsplit) < 2 or lsplit[0] != b'#if':
1457 if len(lsplit) < 2 or lsplit[0] != b'#if':
1458 after.setdefault(pos, []).append(' !!! invalid #if\n')
1458 after.setdefault(pos, []).append(' !!! invalid #if\n')
1459 if skipping is not None:
1459 if skipping is not None:
1460 after.setdefault(pos, []).append(' !!! nested #if\n')
1460 after.setdefault(pos, []).append(' !!! nested #if\n')
1461 skipping = not self._iftest(lsplit[1:])
1461 skipping = not self._iftest(lsplit[1:])
1462 after.setdefault(pos, []).append(l)
1462 after.setdefault(pos, []).append(l)
1463 elif l.startswith(b'#else'):
1463 elif l.startswith(b'#else'):
1464 if skipping is None:
1464 if skipping is None:
1465 after.setdefault(pos, []).append(' !!! missing #if\n')
1465 after.setdefault(pos, []).append(' !!! missing #if\n')
1466 skipping = not skipping
1466 skipping = not skipping
1467 after.setdefault(pos, []).append(l)
1467 after.setdefault(pos, []).append(l)
1468 elif l.startswith(b'#endif'):
1468 elif l.startswith(b'#endif'):
1469 if skipping is None:
1469 if skipping is None:
1470 after.setdefault(pos, []).append(' !!! missing #if\n')
1470 after.setdefault(pos, []).append(' !!! missing #if\n')
1471 skipping = None
1471 skipping = None
1472 after.setdefault(pos, []).append(l)
1472 after.setdefault(pos, []).append(l)
1473 elif skipping:
1473 elif skipping:
1474 after.setdefault(pos, []).append(l)
1474 after.setdefault(pos, []).append(l)
1475 elif l.startswith(b' >>> '): # python inlines
1475 elif l.startswith(b' >>> '): # python inlines
1476 after.setdefault(pos, []).append(l)
1476 after.setdefault(pos, []).append(l)
1477 prepos = pos
1477 prepos = pos
1478 pos = n
1478 pos = n
1479 if not inpython:
1479 if not inpython:
1480 # We've just entered a Python block. Add the header.
1480 # We've just entered a Python block. Add the header.
1481 inpython = True
1481 inpython = True
1482 addsalt(prepos, False) # Make sure we report the exit code.
1482 addsalt(prepos, False) # Make sure we report the exit code.
1483 script.append(b'"%s" -m heredoctest <<EOF\n' % PYTHON)
1483 script.append(b'"%s" -m heredoctest <<EOF\n' % PYTHON)
1484 addsalt(n, True)
1484 addsalt(n, True)
1485 script.append(l[2:])
1485 script.append(l[2:])
1486 elif l.startswith(b' ... '): # python inlines
1486 elif l.startswith(b' ... '): # python inlines
1487 after.setdefault(prepos, []).append(l)
1487 after.setdefault(prepos, []).append(l)
1488 script.append(l[2:])
1488 script.append(l[2:])
1489 elif l.startswith(b' $ '): # commands
1489 elif l.startswith(b' $ '): # commands
1490 if inpython:
1490 if inpython:
1491 script.append(b'EOF\n')
1491 script.append(b'EOF\n')
1492 inpython = False
1492 inpython = False
1493 after.setdefault(pos, []).append(l)
1493 after.setdefault(pos, []).append(l)
1494 prepos = pos
1494 prepos = pos
1495 pos = n
1495 pos = n
1496 addsalt(n, False)
1496 addsalt(n, False)
1497 rawcmd = l[4:]
1497 rawcmd = l[4:]
1498 cmd = rawcmd.split()
1498 cmd = rawcmd.split()
1499 toggletrace(rawcmd)
1499 toggletrace(rawcmd)
1500 if len(cmd) == 2 and cmd[0] == b'cd':
1500 if len(cmd) == 2 and cmd[0] == b'cd':
1501 l = b' $ cd %s || exit 1\n' % cmd[1]
1501 l = b' $ cd %s || exit 1\n' % cmd[1]
1502 script.append(rawcmd)
1502 script.append(rawcmd)
1503 elif l.startswith(b' > '): # continuations
1503 elif l.startswith(b' > '): # continuations
1504 after.setdefault(prepos, []).append(l)
1504 after.setdefault(prepos, []).append(l)
1505 script.append(l[4:])
1505 script.append(l[4:])
1506 elif l.startswith(b' '): # results
1506 elif l.startswith(b' '): # results
1507 # Queue up a list of expected results.
1507 # Queue up a list of expected results.
1508 expected.setdefault(pos, []).append(l[2:])
1508 expected.setdefault(pos, []).append(l[2:])
1509 else:
1509 else:
1510 if inpython:
1510 if inpython:
1511 script.append(b'EOF\n')
1511 script.append(b'EOF\n')
1512 inpython = False
1512 inpython = False
1513 # Non-command/result. Queue up for merged output.
1513 # Non-command/result. Queue up for merged output.
1514 after.setdefault(pos, []).append(l)
1514 after.setdefault(pos, []).append(l)
1515
1515
1516 if inpython:
1516 if inpython:
1517 script.append(b'EOF\n')
1517 script.append(b'EOF\n')
1518 if skipping is not None:
1518 if skipping is not None:
1519 after.setdefault(pos, []).append(' !!! missing #endif\n')
1519 after.setdefault(pos, []).append(' !!! missing #endif\n')
1520 addsalt(n + 1, False)
1520 addsalt(n + 1, False)
1521 return salt, script, after, expected
1521 return salt, script, after, expected
1522
1522
1523 def _processoutput(self, exitcode, output, salt, after, expected):
1523 def _processoutput(self, exitcode, output, salt, after, expected):
1524 # Merge the script output back into a unified test.
1524 # Merge the script output back into a unified test.
1525 warnonly = 1 # 1: not yet; 2: yes; 3: for sure not
1525 warnonly = 1 # 1: not yet; 2: yes; 3: for sure not
1526 if exitcode != 0:
1526 if exitcode != 0:
1527 warnonly = 3
1527 warnonly = 3
1528
1528
1529 pos = -1
1529 pos = -1
1530 postout = []
1530 postout = []
1531 for l in output:
1531 for l in output:
1532 lout, lcmd = l, None
1532 lout, lcmd = l, None
1533 if salt in l:
1533 if salt in l:
1534 lout, lcmd = l.split(salt, 1)
1534 lout, lcmd = l.split(salt, 1)
1535
1535
1536 while lout:
1536 while lout:
1537 if not lout.endswith(b'\n'):
1537 if not lout.endswith(b'\n'):
1538 lout += b' (no-eol)\n'
1538 lout += b' (no-eol)\n'
1539
1539
1540 # Find the expected output at the current position.
1540 # Find the expected output at the current position.
1541 els = [None]
1541 els = [None]
1542 if expected.get(pos, None):
1542 if expected.get(pos, None):
1543 els = expected[pos]
1543 els = expected[pos]
1544
1544
1545 optional = []
1545 optional = []
1546 for i, el in enumerate(els):
1546 for i, el in enumerate(els):
1547 r = False
1547 r = False
1548 if el:
1548 if el:
1549 r, exact = self.linematch(el, lout)
1549 r, exact = self.linematch(el, lout)
1550 if isinstance(r, str):
1550 if isinstance(r, str):
1551 if r == '-glob':
1551 if r == '-glob':
1552 lout = ''.join(el.rsplit(' (glob)', 1))
1552 lout = ''.join(el.rsplit(' (glob)', 1))
1553 r = '' # Warn only this line.
1553 r = '' # Warn only this line.
1554 elif r == "retry":
1554 elif r == "retry":
1555 postout.append(b' ' + el)
1555 postout.append(b' ' + el)
1556 else:
1556 else:
1557 log('\ninfo, unknown linematch result: %r\n' % r)
1557 log('\ninfo, unknown linematch result: %r\n' % r)
1558 r = False
1558 r = False
1559 if r:
1559 if r:
1560 els.pop(i)
1560 els.pop(i)
1561 break
1561 break
1562 if el:
1562 if el:
1563 if el.endswith(b" (?)\n"):
1563 if el.endswith(b" (?)\n"):
1564 optional.append(i)
1564 optional.append(i)
1565 else:
1565 else:
1566 m = optline.match(el)
1566 m = optline.match(el)
1567 if m:
1567 if m:
1568 conditions = [
1568 conditions = [
1569 c for c in m.group(2).split(b' ')]
1569 c for c in m.group(2).split(b' ')]
1570
1570
1571 if not self._iftest(conditions):
1571 if not self._iftest(conditions):
1572 optional.append(i)
1572 optional.append(i)
1573 if exact:
1573 if exact:
1574 # Don't allow line to be matches against a later
1574 # Don't allow line to be matches against a later
1575 # line in the output
1575 # line in the output
1576 els.pop(i)
1576 els.pop(i)
1577 break
1577 break
1578
1578
1579 if r:
1579 if r:
1580 if r == "retry":
1580 if r == "retry":
1581 continue
1581 continue
1582 # clean up any optional leftovers
1582 # clean up any optional leftovers
1583 for i in optional:
1583 for i in optional:
1584 postout.append(b' ' + els[i])
1584 postout.append(b' ' + els[i])
1585 for i in reversed(optional):
1585 for i in reversed(optional):
1586 del els[i]
1586 del els[i]
1587 postout.append(b' ' + el)
1587 postout.append(b' ' + el)
1588 else:
1588 else:
1589 if self.NEEDESCAPE(lout):
1589 if self.NEEDESCAPE(lout):
1590 lout = TTest._stringescape(b'%s (esc)\n' %
1590 lout = TTest._stringescape(b'%s (esc)\n' %
1591 lout.rstrip(b'\n'))
1591 lout.rstrip(b'\n'))
1592 postout.append(b' ' + lout) # Let diff deal with it.
1592 postout.append(b' ' + lout) # Let diff deal with it.
1593 if r != '': # If line failed.
1593 if r != '': # If line failed.
1594 warnonly = 3 # for sure not
1594 warnonly = 3 # for sure not
1595 elif warnonly == 1: # Is "not yet" and line is warn only.
1595 elif warnonly == 1: # Is "not yet" and line is warn only.
1596 warnonly = 2 # Yes do warn.
1596 warnonly = 2 # Yes do warn.
1597 break
1597 break
1598 else:
1598 else:
1599 # clean up any optional leftovers
1599 # clean up any optional leftovers
1600 while expected.get(pos, None):
1600 while expected.get(pos, None):
1601 el = expected[pos].pop(0)
1601 el = expected[pos].pop(0)
1602 if el:
1602 if el:
1603 if not el.endswith(b" (?)\n"):
1603 if not el.endswith(b" (?)\n"):
1604 m = optline.match(el)
1604 m = optline.match(el)
1605 if m:
1605 if m:
1606 conditions = [c for c in m.group(2).split(b' ')]
1606 conditions = [c for c in m.group(2).split(b' ')]
1607
1607
1608 if self._iftest(conditions):
1608 if self._iftest(conditions):
1609 # Don't append as optional line
1609 # Don't append as optional line
1610 continue
1610 continue
1611 else:
1611 else:
1612 continue
1612 continue
1613 postout.append(b' ' + el)
1613 postout.append(b' ' + el)
1614
1614
1615 if lcmd:
1615 if lcmd:
1616 # Add on last return code.
1616 # Add on last return code.
1617 ret = int(lcmd.split()[1])
1617 ret = int(lcmd.split()[1])
1618 if ret != 0:
1618 if ret != 0:
1619 postout.append(b' [%d]\n' % ret)
1619 postout.append(b' [%d]\n' % ret)
1620 if pos in after:
1620 if pos in after:
1621 # Merge in non-active test bits.
1621 # Merge in non-active test bits.
1622 postout += after.pop(pos)
1622 postout += after.pop(pos)
1623 pos = int(lcmd.split()[0])
1623 pos = int(lcmd.split()[0])
1624
1624
1625 if pos in after:
1625 if pos in after:
1626 postout += after.pop(pos)
1626 postout += after.pop(pos)
1627
1627
1628 if warnonly == 2:
1628 if warnonly == 2:
1629 exitcode = False # Set exitcode to warned.
1629 exitcode = False # Set exitcode to warned.
1630
1630
1631 return exitcode, postout
1631 return exitcode, postout
1632
1632
1633 @staticmethod
1633 @staticmethod
1634 def rematch(el, l):
1634 def rematch(el, l):
1635 try:
1635 try:
1636 el = b'(?:' + el + b')'
1636 el = b'(?:' + el + b')'
1637 # use \Z to ensure that the regex matches to the end of the string
1637 # use \Z to ensure that the regex matches to the end of the string
1638 if os.name == 'nt':
1638 if os.name == 'nt':
1639 return re.match(el + br'\r?\n\Z', l)
1639 return re.match(el + br'\r?\n\Z', l)
1640 return re.match(el + br'\n\Z', l)
1640 return re.match(el + br'\n\Z', l)
1641 except re.error:
1641 except re.error:
1642 # el is an invalid regex
1642 # el is an invalid regex
1643 return False
1643 return False
1644
1644
1645 @staticmethod
1645 @staticmethod
1646 def globmatch(el, l):
1646 def globmatch(el, l):
1647 # The only supported special characters are * and ? plus / which also
1647 # The only supported special characters are * and ? plus / which also
1648 # matches \ on windows. Escaping of these characters is supported.
1648 # matches \ on windows. Escaping of these characters is supported.
1649 if el + b'\n' == l:
1649 if el + b'\n' == l:
1650 if os.altsep:
1650 if os.altsep:
1651 # matching on "/" is not needed for this line
1651 # matching on "/" is not needed for this line
1652 for pat in checkcodeglobpats:
1652 for pat in checkcodeglobpats:
1653 if pat.match(el):
1653 if pat.match(el):
1654 return True
1654 return True
1655 return b'-glob'
1655 return b'-glob'
1656 return True
1656 return True
1657 el = el.replace(b'$LOCALIP', b'*')
1657 el = el.replace(b'$LOCALIP', b'*')
1658 i, n = 0, len(el)
1658 i, n = 0, len(el)
1659 res = b''
1659 res = b''
1660 while i < n:
1660 while i < n:
1661 c = el[i:i + 1]
1661 c = el[i:i + 1]
1662 i += 1
1662 i += 1
1663 if c == b'\\' and i < n and el[i:i + 1] in b'*?\\/':
1663 if c == b'\\' and i < n and el[i:i + 1] in b'*?\\/':
1664 res += el[i - 1:i + 1]
1664 res += el[i - 1:i + 1]
1665 i += 1
1665 i += 1
1666 elif c == b'*':
1666 elif c == b'*':
1667 res += b'.*'
1667 res += b'.*'
1668 elif c == b'?':
1668 elif c == b'?':
1669 res += b'.'
1669 res += b'.'
1670 elif c == b'/' and os.altsep:
1670 elif c == b'/' and os.altsep:
1671 res += b'[/\\\\]'
1671 res += b'[/\\\\]'
1672 else:
1672 else:
1673 res += re.escape(c)
1673 res += re.escape(c)
1674 return TTest.rematch(res, l)
1674 return TTest.rematch(res, l)
1675
1675
1676 def linematch(self, el, l):
1676 def linematch(self, el, l):
1677 if el == l: # perfect match (fast)
1677 if el == l: # perfect match (fast)
1678 return True, True
1678 return True, True
1679 retry = False
1679 retry = False
1680 if el.endswith(b" (?)\n"):
1680 if el.endswith(b" (?)\n"):
1681 retry = "retry"
1681 retry = "retry"
1682 el = el[:-5] + b"\n"
1682 el = el[:-5] + b"\n"
1683 else:
1683 else:
1684 m = optline.match(el)
1684 m = optline.match(el)
1685 if m:
1685 if m:
1686 conditions = [c for c in m.group(2).split(b' ')]
1686 conditions = [c for c in m.group(2).split(b' ')]
1687
1687
1688 el = m.group(1) + b"\n"
1688 el = m.group(1) + b"\n"
1689 if not self._iftest(conditions):
1689 if not self._iftest(conditions):
1690 retry = "retry" # Not required by listed features
1690 retry = "retry" # Not required by listed features
1691
1691
1692 if el.endswith(b" (esc)\n"):
1692 if el.endswith(b" (esc)\n"):
1693 if PYTHON3:
1693 if PYTHON3:
1694 el = el[:-7].decode('unicode_escape') + '\n'
1694 el = el[:-7].decode('unicode_escape') + '\n'
1695 el = el.encode('utf-8')
1695 el = el.encode('utf-8')
1696 else:
1696 else:
1697 el = el[:-7].decode('string-escape') + '\n'
1697 el = el[:-7].decode('string-escape') + '\n'
1698 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
1698 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
1699 return True, True
1699 return True, True
1700 if el.endswith(b" (re)\n"):
1700 if el.endswith(b" (re)\n"):
1701 return (TTest.rematch(el[:-6], l) or retry), False
1701 return (TTest.rematch(el[:-6], l) or retry), False
1702 if el.endswith(b" (glob)\n"):
1702 if el.endswith(b" (glob)\n"):
1703 # ignore '(glob)' added to l by 'replacements'
1703 # ignore '(glob)' added to l by 'replacements'
1704 if l.endswith(b" (glob)\n"):
1704 if l.endswith(b" (glob)\n"):
1705 l = l[:-8] + b"\n"
1705 l = l[:-8] + b"\n"
1706 return (TTest.globmatch(el[:-8], l) or retry), False
1706 return (TTest.globmatch(el[:-8], l) or retry), False
1707 if os.altsep:
1707 if os.altsep:
1708 _l = l.replace(b'\\', b'/')
1708 _l = l.replace(b'\\', b'/')
1709 if el == _l or os.name == 'nt' and el[:-1] + b'\r\n' == _l:
1709 if el == _l or os.name == 'nt' and el[:-1] + b'\r\n' == _l:
1710 return True, True
1710 return True, True
1711 return retry, True
1711 return retry, True
1712
1712
1713 @staticmethod
1713 @staticmethod
1714 def parsehghaveoutput(lines):
1714 def parsehghaveoutput(lines):
1715 '''Parse hghave log lines.
1715 '''Parse hghave log lines.
1716
1716
1717 Return tuple of lists (missing, failed):
1717 Return tuple of lists (missing, failed):
1718 * the missing/unknown features
1718 * the missing/unknown features
1719 * the features for which existence check failed'''
1719 * the features for which existence check failed'''
1720 missing = []
1720 missing = []
1721 failed = []
1721 failed = []
1722 for line in lines:
1722 for line in lines:
1723 if line.startswith(TTest.SKIPPED_PREFIX):
1723 if line.startswith(TTest.SKIPPED_PREFIX):
1724 line = line.splitlines()[0]
1724 line = line.splitlines()[0]
1725 missing.append(line[len(TTest.SKIPPED_PREFIX):].decode('utf-8'))
1725 missing.append(line[len(TTest.SKIPPED_PREFIX):].decode('utf-8'))
1726 elif line.startswith(TTest.FAILED_PREFIX):
1726 elif line.startswith(TTest.FAILED_PREFIX):
1727 line = line.splitlines()[0]
1727 line = line.splitlines()[0]
1728 failed.append(line[len(TTest.FAILED_PREFIX):].decode('utf-8'))
1728 failed.append(line[len(TTest.FAILED_PREFIX):].decode('utf-8'))
1729
1729
1730 return missing, failed
1730 return missing, failed
1731
1731
1732 @staticmethod
1732 @staticmethod
1733 def _escapef(m):
1733 def _escapef(m):
1734 return TTest.ESCAPEMAP[m.group(0)]
1734 return TTest.ESCAPEMAP[m.group(0)]
1735
1735
1736 @staticmethod
1736 @staticmethod
1737 def _stringescape(s):
1737 def _stringescape(s):
1738 return TTest.ESCAPESUB(TTest._escapef, s)
1738 return TTest.ESCAPESUB(TTest._escapef, s)
1739
1739
1740 iolock = threading.RLock()
1740 iolock = threading.RLock()
1741 firstlock = threading.RLock()
1741 firstlock = threading.RLock()
1742 firsterror = False
1742 firsterror = False
1743
1743
1744 class TestResult(unittest._TextTestResult):
1744 class TestResult(unittest._TextTestResult):
1745 """Holds results when executing via unittest."""
1745 """Holds results when executing via unittest."""
1746 # Don't worry too much about accessing the non-public _TextTestResult.
1746 # Don't worry too much about accessing the non-public _TextTestResult.
1747 # It is relatively common in Python testing tools.
1747 # It is relatively common in Python testing tools.
1748 def __init__(self, options, *args, **kwargs):
1748 def __init__(self, options, *args, **kwargs):
1749 super(TestResult, self).__init__(*args, **kwargs)
1749 super(TestResult, self).__init__(*args, **kwargs)
1750
1750
1751 self._options = options
1751 self._options = options
1752
1752
1753 # unittest.TestResult didn't have skipped until 2.7. We need to
1753 # unittest.TestResult didn't have skipped until 2.7. We need to
1754 # polyfill it.
1754 # polyfill it.
1755 self.skipped = []
1755 self.skipped = []
1756
1756
1757 # We have a custom "ignored" result that isn't present in any Python
1757 # We have a custom "ignored" result that isn't present in any Python
1758 # unittest implementation. It is very similar to skipped. It may make
1758 # unittest implementation. It is very similar to skipped. It may make
1759 # sense to map it into skip some day.
1759 # sense to map it into skip some day.
1760 self.ignored = []
1760 self.ignored = []
1761
1761
1762 self.times = []
1762 self.times = []
1763 self._firststarttime = None
1763 self._firststarttime = None
1764 # Data stored for the benefit of generating xunit reports.
1764 # Data stored for the benefit of generating xunit reports.
1765 self.successes = []
1765 self.successes = []
1766 self.faildata = {}
1766 self.faildata = {}
1767
1767
1768 if options.color == 'auto':
1768 if options.color == 'auto':
1769 self.color = pygmentspresent and self.stream.isatty()
1769 self.color = pygmentspresent and self.stream.isatty()
1770 elif options.color == 'never':
1770 elif options.color == 'never':
1771 self.color = False
1771 self.color = False
1772 else: # 'always', for testing purposes
1772 else: # 'always', for testing purposes
1773 self.color = pygmentspresent
1773 self.color = pygmentspresent
1774
1774
1775 def onStart(self, test):
1775 def onStart(self, test):
1776 """ Can be overriden by custom TestResult
1776 """ Can be overriden by custom TestResult
1777 """
1777 """
1778
1778
1779 def onEnd(self):
1779 def onEnd(self):
1780 """ Can be overriden by custom TestResult
1780 """ Can be overriden by custom TestResult
1781 """
1781 """
1782
1782
1783 def addFailure(self, test, reason):
1783 def addFailure(self, test, reason):
1784 self.failures.append((test, reason))
1784 self.failures.append((test, reason))
1785
1785
1786 if self._options.first:
1786 if self._options.first:
1787 self.stop()
1787 self.stop()
1788 else:
1788 else:
1789 with iolock:
1789 with iolock:
1790 if reason == "timed out":
1790 if reason == "timed out":
1791 self.stream.write('t')
1791 self.stream.write('t')
1792 else:
1792 else:
1793 if not self._options.nodiff:
1793 if not self._options.nodiff:
1794 self.stream.write('\n')
1794 self.stream.write('\n')
1795 # Exclude the '\n' from highlighting to lex correctly
1795 # Exclude the '\n' from highlighting to lex correctly
1796 formatted = 'ERROR: %s output changed\n' % test
1796 formatted = 'ERROR: %s output changed\n' % test
1797 self.stream.write(highlightmsg(formatted, self.color))
1797 self.stream.write(highlightmsg(formatted, self.color))
1798 self.stream.write('!')
1798 self.stream.write('!')
1799
1799
1800 self.stream.flush()
1800 self.stream.flush()
1801
1801
1802 def addSuccess(self, test):
1802 def addSuccess(self, test):
1803 with iolock:
1803 with iolock:
1804 super(TestResult, self).addSuccess(test)
1804 super(TestResult, self).addSuccess(test)
1805 self.successes.append(test)
1805 self.successes.append(test)
1806
1806
1807 def addError(self, test, err):
1807 def addError(self, test, err):
1808 super(TestResult, self).addError(test, err)
1808 super(TestResult, self).addError(test, err)
1809 if self._options.first:
1809 if self._options.first:
1810 self.stop()
1810 self.stop()
1811
1811
1812 # Polyfill.
1812 # Polyfill.
1813 def addSkip(self, test, reason):
1813 def addSkip(self, test, reason):
1814 self.skipped.append((test, reason))
1814 self.skipped.append((test, reason))
1815 with iolock:
1815 with iolock:
1816 if self.showAll:
1816 if self.showAll:
1817 self.stream.writeln('skipped %s' % reason)
1817 self.stream.writeln('skipped %s' % reason)
1818 else:
1818 else:
1819 self.stream.write('s')
1819 self.stream.write('s')
1820 self.stream.flush()
1820 self.stream.flush()
1821
1821
1822 def addIgnore(self, test, reason):
1822 def addIgnore(self, test, reason):
1823 self.ignored.append((test, reason))
1823 self.ignored.append((test, reason))
1824 with iolock:
1824 with iolock:
1825 if self.showAll:
1825 if self.showAll:
1826 self.stream.writeln('ignored %s' % reason)
1826 self.stream.writeln('ignored %s' % reason)
1827 else:
1827 else:
1828 if reason not in ('not retesting', "doesn't match keyword"):
1828 if reason not in ('not retesting', "doesn't match keyword"):
1829 self.stream.write('i')
1829 self.stream.write('i')
1830 else:
1830 else:
1831 self.testsRun += 1
1831 self.testsRun += 1
1832 self.stream.flush()
1832 self.stream.flush()
1833
1833
1834 def addOutputMismatch(self, test, ret, got, expected):
1834 def addOutputMismatch(self, test, ret, got, expected):
1835 """Record a mismatch in test output for a particular test."""
1835 """Record a mismatch in test output for a particular test."""
1836 if self.shouldStop or firsterror:
1836 if self.shouldStop or firsterror:
1837 # don't print, some other test case already failed and
1837 # don't print, some other test case already failed and
1838 # printed, we're just stale and probably failed due to our
1838 # printed, we're just stale and probably failed due to our
1839 # temp dir getting cleaned up.
1839 # temp dir getting cleaned up.
1840 return
1840 return
1841
1841
1842 accepted = False
1842 accepted = False
1843 lines = []
1843 lines = []
1844
1844
1845 with iolock:
1845 with iolock:
1846 if self._options.nodiff:
1846 if self._options.nodiff:
1847 pass
1847 pass
1848 elif self._options.view:
1848 elif self._options.view:
1849 v = self._options.view
1849 v = self._options.view
1850 os.system(r"%s %s %s" %
1850 os.system(r"%s %s %s" %
1851 (v, _strpath(test.refpath), _strpath(test.errpath)))
1851 (v, _strpath(test.refpath), _strpath(test.errpath)))
1852 else:
1852 else:
1853 servefail, lines = getdiff(expected, got,
1853 servefail, lines = getdiff(expected, got,
1854 test.refpath, test.errpath)
1854 test.refpath, test.errpath)
1855 self.stream.write('\n')
1855 self.stream.write('\n')
1856 for line in lines:
1856 for line in lines:
1857 line = highlightdiff(line, self.color)
1857 line = highlightdiff(line, self.color)
1858 if PYTHON3:
1858 if PYTHON3:
1859 self.stream.flush()
1859 self.stream.flush()
1860 self.stream.buffer.write(line)
1860 self.stream.buffer.write(line)
1861 self.stream.buffer.flush()
1861 self.stream.buffer.flush()
1862 else:
1862 else:
1863 self.stream.write(line)
1863 self.stream.write(line)
1864 self.stream.flush()
1864 self.stream.flush()
1865
1865
1866 if servefail:
1866 if servefail:
1867 raise test.failureException(
1867 raise test.failureException(
1868 'server failed to start (HGPORT=%s)' % test._startport)
1868 'server failed to start (HGPORT=%s)' % test._startport)
1869
1869
1870 # handle interactive prompt without releasing iolock
1870 # handle interactive prompt without releasing iolock
1871 if self._options.interactive:
1871 if self._options.interactive:
1872 if test.readrefout() != expected:
1872 if test.readrefout() != expected:
1873 self.stream.write(
1873 self.stream.write(
1874 'Reference output has changed (run again to prompt '
1874 'Reference output has changed (run again to prompt '
1875 'changes)')
1875 'changes)')
1876 else:
1876 else:
1877 self.stream.write('Accept this change? [n] ')
1877 self.stream.write('Accept this change? [n] ')
1878 self.stream.flush()
1878 self.stream.flush()
1879 answer = sys.stdin.readline().strip()
1879 answer = sys.stdin.readline().strip()
1880 if answer.lower() in ('y', 'yes'):
1880 if answer.lower() in ('y', 'yes'):
1881 if test.path.endswith(b'.t'):
1881 if test.path.endswith(b'.t'):
1882 rename(test.errpath, test.path)
1882 rename(test.errpath, test.path)
1883 else:
1883 else:
1884 rename(test.errpath, '%s.out' % test.path)
1884 rename(test.errpath, '%s.out' % test.path)
1885 accepted = True
1885 accepted = True
1886 if not accepted:
1886 if not accepted:
1887 self.faildata[test.name] = b''.join(lines)
1887 self.faildata[test.name] = b''.join(lines)
1888
1888
1889 return accepted
1889 return accepted
1890
1890
1891 def startTest(self, test):
1891 def startTest(self, test):
1892 super(TestResult, self).startTest(test)
1892 super(TestResult, self).startTest(test)
1893
1893
1894 # os.times module computes the user time and system time spent by
1894 # os.times module computes the user time and system time spent by
1895 # child's processes along with real elapsed time taken by a process.
1895 # child's processes along with real elapsed time taken by a process.
1896 # This module has one limitation. It can only work for Linux user
1896 # This module has one limitation. It can only work for Linux user
1897 # and not for Windows.
1897 # and not for Windows.
1898 test.started = os.times()
1898 test.started = os.times()
1899 if self._firststarttime is None: # thread racy but irrelevant
1899 if self._firststarttime is None: # thread racy but irrelevant
1900 self._firststarttime = test.started[4]
1900 self._firststarttime = test.started[4]
1901
1901
1902 def stopTest(self, test, interrupted=False):
1902 def stopTest(self, test, interrupted=False):
1903 super(TestResult, self).stopTest(test)
1903 super(TestResult, self).stopTest(test)
1904
1904
1905 test.stopped = os.times()
1905 test.stopped = os.times()
1906
1906
1907 starttime = test.started
1907 starttime = test.started
1908 endtime = test.stopped
1908 endtime = test.stopped
1909 origin = self._firststarttime
1909 origin = self._firststarttime
1910 self.times.append((test.name,
1910 self.times.append((test.name,
1911 endtime[2] - starttime[2], # user space CPU time
1911 endtime[2] - starttime[2], # user space CPU time
1912 endtime[3] - starttime[3], # sys space CPU time
1912 endtime[3] - starttime[3], # sys space CPU time
1913 endtime[4] - starttime[4], # real time
1913 endtime[4] - starttime[4], # real time
1914 starttime[4] - origin, # start date in run context
1914 starttime[4] - origin, # start date in run context
1915 endtime[4] - origin, # end date in run context
1915 endtime[4] - origin, # end date in run context
1916 ))
1916 ))
1917
1917
1918 if interrupted:
1918 if interrupted:
1919 with iolock:
1919 with iolock:
1920 self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
1920 self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
1921 test.name, self.times[-1][3]))
1921 test.name, self.times[-1][3]))
1922
1922
1923 def getTestResult():
1923 def getTestResult():
1924 """
1924 """
1925 Returns the relevant test result
1925 Returns the relevant test result
1926 """
1926 """
1927 if "CUSTOM_TEST_RESULT" in os.environ:
1927 if "CUSTOM_TEST_RESULT" in os.environ:
1928 testresultmodule = __import__(os.environ["CUSTOM_TEST_RESULT"])
1928 testresultmodule = __import__(os.environ["CUSTOM_TEST_RESULT"])
1929 return testresultmodule.TestResult
1929 return testresultmodule.TestResult
1930 else:
1930 else:
1931 return TestResult
1931 return TestResult
1932
1932
1933 class TestSuite(unittest.TestSuite):
1933 class TestSuite(unittest.TestSuite):
1934 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
1934 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
1935
1935
1936 def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
1936 def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
1937 retest=False, keywords=None, loop=False, runs_per_test=1,
1937 retest=False, keywords=None, loop=False, runs_per_test=1,
1938 loadtest=None, showchannels=False,
1938 loadtest=None, showchannels=False,
1939 *args, **kwargs):
1939 *args, **kwargs):
1940 """Create a new instance that can run tests with a configuration.
1940 """Create a new instance that can run tests with a configuration.
1941
1941
1942 testdir specifies the directory where tests are executed from. This
1942 testdir specifies the directory where tests are executed from. This
1943 is typically the ``tests`` directory from Mercurial's source
1943 is typically the ``tests`` directory from Mercurial's source
1944 repository.
1944 repository.
1945
1945
1946 jobs specifies the number of jobs to run concurrently. Each test
1946 jobs specifies the number of jobs to run concurrently. Each test
1947 executes on its own thread. Tests actually spawn new processes, so
1947 executes on its own thread. Tests actually spawn new processes, so
1948 state mutation should not be an issue.
1948 state mutation should not be an issue.
1949
1949
1950 If there is only one job, it will use the main thread.
1950 If there is only one job, it will use the main thread.
1951
1951
1952 whitelist and blacklist denote tests that have been whitelisted and
1952 whitelist and blacklist denote tests that have been whitelisted and
1953 blacklisted, respectively. These arguments don't belong in TestSuite.
1953 blacklisted, respectively. These arguments don't belong in TestSuite.
1954 Instead, whitelist and blacklist should be handled by the thing that
1954 Instead, whitelist and blacklist should be handled by the thing that
1955 populates the TestSuite with tests. They are present to preserve
1955 populates the TestSuite with tests. They are present to preserve
1956 backwards compatible behavior which reports skipped tests as part
1956 backwards compatible behavior which reports skipped tests as part
1957 of the results.
1957 of the results.
1958
1958
1959 retest denotes whether to retest failed tests. This arguably belongs
1959 retest denotes whether to retest failed tests. This arguably belongs
1960 outside of TestSuite.
1960 outside of TestSuite.
1961
1961
1962 keywords denotes key words that will be used to filter which tests
1962 keywords denotes key words that will be used to filter which tests
1963 to execute. This arguably belongs outside of TestSuite.
1963 to execute. This arguably belongs outside of TestSuite.
1964
1964
1965 loop denotes whether to loop over tests forever.
1965 loop denotes whether to loop over tests forever.
1966 """
1966 """
1967 super(TestSuite, self).__init__(*args, **kwargs)
1967 super(TestSuite, self).__init__(*args, **kwargs)
1968
1968
1969 self._jobs = jobs
1969 self._jobs = jobs
1970 self._whitelist = whitelist
1970 self._whitelist = whitelist
1971 self._blacklist = blacklist
1971 self._blacklist = blacklist
1972 self._retest = retest
1972 self._retest = retest
1973 self._keywords = keywords
1973 self._keywords = keywords
1974 self._loop = loop
1974 self._loop = loop
1975 self._runs_per_test = runs_per_test
1975 self._runs_per_test = runs_per_test
1976 self._loadtest = loadtest
1976 self._loadtest = loadtest
1977 self._showchannels = showchannels
1977 self._showchannels = showchannels
1978
1978
1979 def run(self, result):
1979 def run(self, result):
1980 # We have a number of filters that need to be applied. We do this
1980 # We have a number of filters that need to be applied. We do this
1981 # here instead of inside Test because it makes the running logic for
1981 # here instead of inside Test because it makes the running logic for
1982 # Test simpler.
1982 # Test simpler.
1983 tests = []
1983 tests = []
1984 num_tests = [0]
1984 num_tests = [0]
1985 for test in self._tests:
1985 for test in self._tests:
1986 def get():
1986 def get():
1987 num_tests[0] += 1
1987 num_tests[0] += 1
1988 if getattr(test, 'should_reload', False):
1988 if getattr(test, 'should_reload', False):
1989 return self._loadtest(test, num_tests[0])
1989 return self._loadtest(test, num_tests[0])
1990 return test
1990 return test
1991 if not os.path.exists(test.path):
1991 if not os.path.exists(test.path):
1992 result.addSkip(test, "Doesn't exist")
1992 result.addSkip(test, "Doesn't exist")
1993 continue
1993 continue
1994
1994
1995 if not (self._whitelist and test.bname in self._whitelist):
1995 if not (self._whitelist and test.bname in self._whitelist):
1996 if self._blacklist and test.bname in self._blacklist:
1996 if self._blacklist and test.bname in self._blacklist:
1997 result.addSkip(test, 'blacklisted')
1997 result.addSkip(test, 'blacklisted')
1998 continue
1998 continue
1999
1999
2000 if self._retest and not os.path.exists(test.errpath):
2000 if self._retest and not os.path.exists(test.errpath):
2001 result.addIgnore(test, 'not retesting')
2001 result.addIgnore(test, 'not retesting')
2002 continue
2002 continue
2003
2003
2004 if self._keywords:
2004 if self._keywords:
2005 with open(test.path, 'rb') as f:
2005 with open(test.path, 'rb') as f:
2006 t = f.read().lower() + test.bname.lower()
2006 t = f.read().lower() + test.bname.lower()
2007 ignored = False
2007 ignored = False
2008 for k in self._keywords.lower().split():
2008 for k in self._keywords.lower().split():
2009 if k not in t:
2009 if k not in t:
2010 result.addIgnore(test, "doesn't match keyword")
2010 result.addIgnore(test, "doesn't match keyword")
2011 ignored = True
2011 ignored = True
2012 break
2012 break
2013
2013
2014 if ignored:
2014 if ignored:
2015 continue
2015 continue
2016 for _ in xrange(self._runs_per_test):
2016 for _ in xrange(self._runs_per_test):
2017 tests.append(get())
2017 tests.append(get())
2018
2018
2019 runtests = list(tests)
2019 runtests = list(tests)
2020 done = queue.Queue()
2020 done = queue.Queue()
2021 running = 0
2021 running = 0
2022
2022
2023 channels = [""] * self._jobs
2023 channels = [""] * self._jobs
2024
2024
2025 def job(test, result):
2025 def job(test, result):
2026 for n, v in enumerate(channels):
2026 for n, v in enumerate(channels):
2027 if not v:
2027 if not v:
2028 channel = n
2028 channel = n
2029 break
2029 break
2030 else:
2030 else:
2031 raise ValueError('Could not find output channel')
2031 raise ValueError('Could not find output channel')
2032 channels[channel] = "=" + test.name[5:].split(".")[0]
2032 channels[channel] = "=" + test.name[5:].split(".")[0]
2033 try:
2033 try:
2034 test(result)
2034 test(result)
2035 done.put(None)
2035 done.put(None)
2036 except KeyboardInterrupt:
2036 except KeyboardInterrupt:
2037 pass
2037 pass
2038 except: # re-raises
2038 except: # re-raises
2039 done.put(('!', test, 'run-test raised an error, see traceback'))
2039 done.put(('!', test, 'run-test raised an error, see traceback'))
2040 raise
2040 raise
2041 finally:
2041 finally:
2042 try:
2042 try:
2043 channels[channel] = ''
2043 channels[channel] = ''
2044 except IndexError:
2044 except IndexError:
2045 pass
2045 pass
2046
2046
2047 def stat():
2047 def stat():
2048 count = 0
2048 count = 0
2049 while channels:
2049 while channels:
2050 d = '\n%03s ' % count
2050 d = '\n%03s ' % count
2051 for n, v in enumerate(channels):
2051 for n, v in enumerate(channels):
2052 if v:
2052 if v:
2053 d += v[0]
2053 d += v[0]
2054 channels[n] = v[1:] or '.'
2054 channels[n] = v[1:] or '.'
2055 else:
2055 else:
2056 d += ' '
2056 d += ' '
2057 d += ' '
2057 d += ' '
2058 with iolock:
2058 with iolock:
2059 sys.stdout.write(d + ' ')
2059 sys.stdout.write(d + ' ')
2060 sys.stdout.flush()
2060 sys.stdout.flush()
2061 for x in xrange(10):
2061 for x in xrange(10):
2062 if channels:
2062 if channels:
2063 time.sleep(.1)
2063 time.sleep(.1)
2064 count += 1
2064 count += 1
2065
2065
2066 stoppedearly = False
2066 stoppedearly = False
2067
2067
2068 if self._showchannels:
2068 if self._showchannels:
2069 statthread = threading.Thread(target=stat, name="stat")
2069 statthread = threading.Thread(target=stat, name="stat")
2070 statthread.start()
2070 statthread.start()
2071
2071
2072 try:
2072 try:
2073 while tests or running:
2073 while tests or running:
2074 if not done.empty() or running == self._jobs or not tests:
2074 if not done.empty() or running == self._jobs or not tests:
2075 try:
2075 try:
2076 done.get(True, 1)
2076 done.get(True, 1)
2077 running -= 1
2077 running -= 1
2078 if result and result.shouldStop:
2078 if result and result.shouldStop:
2079 stoppedearly = True
2079 stoppedearly = True
2080 break
2080 break
2081 except queue.Empty:
2081 except queue.Empty:
2082 continue
2082 continue
2083 if tests and not running == self._jobs:
2083 if tests and not running == self._jobs:
2084 test = tests.pop(0)
2084 test = tests.pop(0)
2085 if self._loop:
2085 if self._loop:
2086 if getattr(test, 'should_reload', False):
2086 if getattr(test, 'should_reload', False):
2087 num_tests[0] += 1
2087 num_tests[0] += 1
2088 tests.append(
2088 tests.append(
2089 self._loadtest(test, num_tests[0]))
2089 self._loadtest(test, num_tests[0]))
2090 else:
2090 else:
2091 tests.append(test)
2091 tests.append(test)
2092 if self._jobs == 1:
2092 if self._jobs == 1:
2093 job(test, result)
2093 job(test, result)
2094 else:
2094 else:
2095 t = threading.Thread(target=job, name=test.name,
2095 t = threading.Thread(target=job, name=test.name,
2096 args=(test, result))
2096 args=(test, result))
2097 t.start()
2097 t.start()
2098 running += 1
2098 running += 1
2099
2099
2100 # If we stop early we still need to wait on started tests to
2100 # If we stop early we still need to wait on started tests to
2101 # finish. Otherwise, there is a race between the test completing
2101 # finish. Otherwise, there is a race between the test completing
2102 # and the test's cleanup code running. This could result in the
2102 # and the test's cleanup code running. This could result in the
2103 # test reporting incorrect.
2103 # test reporting incorrect.
2104 if stoppedearly:
2104 if stoppedearly:
2105 while running:
2105 while running:
2106 try:
2106 try:
2107 done.get(True, 1)
2107 done.get(True, 1)
2108 running -= 1
2108 running -= 1
2109 except queue.Empty:
2109 except queue.Empty:
2110 continue
2110 continue
2111 except KeyboardInterrupt:
2111 except KeyboardInterrupt:
2112 for test in runtests:
2112 for test in runtests:
2113 test.abort()
2113 test.abort()
2114
2114
2115 channels = []
2115 channels = []
2116
2116
2117 return result
2117 return result
2118
2118
2119 # Save the most recent 5 wall-clock runtimes of each test to a
2119 # Save the most recent 5 wall-clock runtimes of each test to a
2120 # human-readable text file named .testtimes. Tests are sorted
2120 # human-readable text file named .testtimes. Tests are sorted
2121 # alphabetically, while times for each test are listed from oldest to
2121 # alphabetically, while times for each test are listed from oldest to
2122 # newest.
2122 # newest.
2123
2123
2124 def loadtimes(outputdir):
2124 def loadtimes(outputdir):
2125 times = []
2125 times = []
2126 try:
2126 try:
2127 with open(os.path.join(outputdir, b'.testtimes')) as fp:
2127 with open(os.path.join(outputdir, b'.testtimes')) as fp:
2128 for line in fp:
2128 for line in fp:
2129 m = re.match('(.*?) ([0-9. ]+)', line)
2129 m = re.match('(.*?) ([0-9. ]+)', line)
2130 times.append((m.group(1),
2130 times.append((m.group(1),
2131 [float(t) for t in m.group(2).split()]))
2131 [float(t) for t in m.group(2).split()]))
2132 except IOError as err:
2132 except IOError as err:
2133 if err.errno != errno.ENOENT:
2133 if err.errno != errno.ENOENT:
2134 raise
2134 raise
2135 return times
2135 return times
2136
2136
2137 def savetimes(outputdir, result):
2137 def savetimes(outputdir, result):
2138 saved = dict(loadtimes(outputdir))
2138 saved = dict(loadtimes(outputdir))
2139 maxruns = 5
2139 maxruns = 5
2140 skipped = set([str(t[0]) for t in result.skipped])
2140 skipped = set([str(t[0]) for t in result.skipped])
2141 for tdata in result.times:
2141 for tdata in result.times:
2142 test, real = tdata[0], tdata[3]
2142 test, real = tdata[0], tdata[3]
2143 if test not in skipped:
2143 if test not in skipped:
2144 ts = saved.setdefault(test, [])
2144 ts = saved.setdefault(test, [])
2145 ts.append(real)
2145 ts.append(real)
2146 ts[:] = ts[-maxruns:]
2146 ts[:] = ts[-maxruns:]
2147
2147
2148 fd, tmpname = tempfile.mkstemp(prefix=b'.testtimes',
2148 fd, tmpname = tempfile.mkstemp(prefix=b'.testtimes',
2149 dir=outputdir, text=True)
2149 dir=outputdir, text=True)
2150 with os.fdopen(fd, 'w') as fp:
2150 with os.fdopen(fd, 'w') as fp:
2151 for name, ts in sorted(saved.items()):
2151 for name, ts in sorted(saved.items()):
2152 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
2152 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
2153 timepath = os.path.join(outputdir, b'.testtimes')
2153 timepath = os.path.join(outputdir, b'.testtimes')
2154 try:
2154 try:
2155 os.unlink(timepath)
2155 os.unlink(timepath)
2156 except OSError:
2156 except OSError:
2157 pass
2157 pass
2158 try:
2158 try:
2159 os.rename(tmpname, timepath)
2159 os.rename(tmpname, timepath)
2160 except OSError:
2160 except OSError:
2161 pass
2161 pass
2162
2162
2163 class TextTestRunner(unittest.TextTestRunner):
2163 class TextTestRunner(unittest.TextTestRunner):
2164 """Custom unittest test runner that uses appropriate settings."""
2164 """Custom unittest test runner that uses appropriate settings."""
2165
2165
2166 def __init__(self, runner, *args, **kwargs):
2166 def __init__(self, runner, *args, **kwargs):
2167 super(TextTestRunner, self).__init__(*args, **kwargs)
2167 super(TextTestRunner, self).__init__(*args, **kwargs)
2168
2168
2169 self._runner = runner
2169 self._runner = runner
2170
2170
2171 self._result = getTestResult()(self._runner.options, self.stream,
2171 self._result = getTestResult()(self._runner.options, self.stream,
2172 self.descriptions, self.verbosity)
2172 self.descriptions, self.verbosity)
2173
2173
2174 def listtests(self, test):
2174 def listtests(self, test):
2175 test = sorted(test, key=lambda t: t.name)
2175 test = sorted(test, key=lambda t: t.name)
2176
2176
2177 self._result.onStart(test)
2177 self._result.onStart(test)
2178
2178
2179 for t in test:
2179 for t in test:
2180 print(t.name)
2180 print(t.name)
2181 self._result.addSuccess(t)
2181 self._result.addSuccess(t)
2182
2182
2183 if self._runner.options.xunit:
2183 if self._runner.options.xunit:
2184 with open(self._runner.options.xunit, "wb") as xuf:
2184 with open(self._runner.options.xunit, "wb") as xuf:
2185 self._writexunit(self._result, xuf)
2185 self._writexunit(self._result, xuf)
2186
2186
2187 if self._runner.options.json:
2187 if self._runner.options.json:
2188 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2188 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2189 with open(jsonpath, 'w') as fp:
2189 with open(jsonpath, 'w') as fp:
2190 self._writejson(self._result, fp)
2190 self._writejson(self._result, fp)
2191
2191
2192 return self._result
2192 return self._result
2193
2193
2194 def run(self, test):
2194 def run(self, test):
2195 self._result.onStart(test)
2195 self._result.onStart(test)
2196 test(self._result)
2196 test(self._result)
2197
2197
2198 failed = len(self._result.failures)
2198 failed = len(self._result.failures)
2199 skipped = len(self._result.skipped)
2199 skipped = len(self._result.skipped)
2200 ignored = len(self._result.ignored)
2200 ignored = len(self._result.ignored)
2201
2201
2202 with iolock:
2202 with iolock:
2203 self.stream.writeln('')
2203 self.stream.writeln('')
2204
2204
2205 if not self._runner.options.noskips:
2205 if not self._runner.options.noskips:
2206 for test, msg in self._result.skipped:
2206 for test, msg in self._result.skipped:
2207 formatted = 'Skipped %s: %s\n' % (test.name, msg)
2207 formatted = 'Skipped %s: %s\n' % (test.name, msg)
2208 msg = highlightmsg(formatted, self._result.color)
2208 msg = highlightmsg(formatted, self._result.color)
2209 self.stream.write(msg)
2209 self.stream.write(msg)
2210 for test, msg in self._result.failures:
2210 for test, msg in self._result.failures:
2211 formatted = 'Failed %s: %s\n' % (test.name, msg)
2211 formatted = 'Failed %s: %s\n' % (test.name, msg)
2212 self.stream.write(highlightmsg(formatted, self._result.color))
2212 self.stream.write(highlightmsg(formatted, self._result.color))
2213 for test, msg in self._result.errors:
2213 for test, msg in self._result.errors:
2214 self.stream.writeln('Errored %s: %s' % (test.name, msg))
2214 self.stream.writeln('Errored %s: %s' % (test.name, msg))
2215
2215
2216 if self._runner.options.xunit:
2216 if self._runner.options.xunit:
2217 with open(self._runner.options.xunit, "wb") as xuf:
2217 with open(self._runner.options.xunit, "wb") as xuf:
2218 self._writexunit(self._result, xuf)
2218 self._writexunit(self._result, xuf)
2219
2219
2220 if self._runner.options.json:
2220 if self._runner.options.json:
2221 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2221 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2222 with open(jsonpath, 'w') as fp:
2222 with open(jsonpath, 'w') as fp:
2223 self._writejson(self._result, fp)
2223 self._writejson(self._result, fp)
2224
2224
2225 self._runner._checkhglib('Tested')
2225 self._runner._checkhglib('Tested')
2226
2226
2227 savetimes(self._runner._outputdir, self._result)
2227 savetimes(self._runner._outputdir, self._result)
2228
2228
2229 if failed and self._runner.options.known_good_rev:
2229 if failed and self._runner.options.known_good_rev:
2230 self._bisecttests(t for t, m in self._result.failures)
2230 self._bisecttests(t for t, m in self._result.failures)
2231 self.stream.writeln(
2231 self.stream.writeln(
2232 '# Ran %d tests, %d skipped, %d failed.'
2232 '# Ran %d tests, %d skipped, %d failed.'
2233 % (self._result.testsRun, skipped + ignored, failed))
2233 % (self._result.testsRun, skipped + ignored, failed))
2234 if failed:
2234 if failed:
2235 self.stream.writeln('python hash seed: %s' %
2235 self.stream.writeln('python hash seed: %s' %
2236 os.environ['PYTHONHASHSEED'])
2236 os.environ['PYTHONHASHSEED'])
2237 if self._runner.options.time:
2237 if self._runner.options.time:
2238 self.printtimes(self._result.times)
2238 self.printtimes(self._result.times)
2239
2239
2240 if self._runner.options.exceptions:
2240 if self._runner.options.exceptions:
2241 exceptions = aggregateexceptions(
2241 exceptions = aggregateexceptions(
2242 os.path.join(self._runner._outputdir, b'exceptions'))
2242 os.path.join(self._runner._outputdir, b'exceptions'))
2243
2243
2244 self.stream.writeln('Exceptions Report:')
2244 self.stream.writeln('Exceptions Report:')
2245 self.stream.writeln('%d total from %d frames' %
2245 self.stream.writeln('%d total from %d frames' %
2246 (exceptions['total'],
2246 (exceptions['total'],
2247 len(exceptions['exceptioncounts'])))
2247 len(exceptions['exceptioncounts'])))
2248 combined = exceptions['combined']
2248 combined = exceptions['combined']
2249 for key in sorted(combined, key=combined.get, reverse=True):
2249 for key in sorted(combined, key=combined.get, reverse=True):
2250 frame, line, exc = key
2250 frame, line, exc = key
2251 totalcount, testcount, leastcount, leasttest = combined[key]
2251 totalcount, testcount, leastcount, leasttest = combined[key]
2252
2252
2253 self.stream.writeln('%d (%d tests)\t%s: %s (%s - %d total)'
2253 self.stream.writeln('%d (%d tests)\t%s: %s (%s - %d total)'
2254 % (totalcount,
2254 % (totalcount,
2255 testcount,
2255 testcount,
2256 frame, exc,
2256 frame, exc,
2257 leasttest, leastcount))
2257 leasttest, leastcount))
2258
2258
2259 self.stream.flush()
2259 self.stream.flush()
2260
2260
2261 return self._result
2261 return self._result
2262
2262
2263 def _bisecttests(self, tests):
2263 def _bisecttests(self, tests):
2264 bisectcmd = ['hg', 'bisect']
2264 bisectcmd = ['hg', 'bisect']
2265 bisectrepo = self._runner.options.bisect_repo
2265 bisectrepo = self._runner.options.bisect_repo
2266 if bisectrepo:
2266 if bisectrepo:
2267 bisectcmd.extend(['-R', os.path.abspath(bisectrepo)])
2267 bisectcmd.extend(['-R', os.path.abspath(bisectrepo)])
2268 def pread(args):
2268 def pread(args):
2269 env = os.environ.copy()
2269 env = os.environ.copy()
2270 env['HGPLAIN'] = '1'
2270 env['HGPLAIN'] = '1'
2271 p = subprocess.Popen(args, stderr=subprocess.STDOUT,
2271 p = subprocess.Popen(args, stderr=subprocess.STDOUT,
2272 stdout=subprocess.PIPE, env=env)
2272 stdout=subprocess.PIPE, env=env)
2273 data = p.stdout.read()
2273 data = p.stdout.read()
2274 p.wait()
2274 p.wait()
2275 return data
2275 return data
2276 for test in tests:
2276 for test in tests:
2277 pread(bisectcmd + ['--reset']),
2277 pread(bisectcmd + ['--reset']),
2278 pread(bisectcmd + ['--bad', '.'])
2278 pread(bisectcmd + ['--bad', '.'])
2279 pread(bisectcmd + ['--good', self._runner.options.known_good_rev])
2279 pread(bisectcmd + ['--good', self._runner.options.known_good_rev])
2280 # TODO: we probably need to forward more options
2280 # TODO: we probably need to forward more options
2281 # that alter hg's behavior inside the tests.
2281 # that alter hg's behavior inside the tests.
2282 opts = ''
2282 opts = ''
2283 withhg = self._runner.options.with_hg
2283 withhg = self._runner.options.with_hg
2284 if withhg:
2284 if withhg:
2285 opts += ' --with-hg=%s ' % shellquote(_strpath(withhg))
2285 opts += ' --with-hg=%s ' % shellquote(_strpath(withhg))
2286 rtc = '%s %s %s %s' % (sys.executable, sys.argv[0], opts,
2286 rtc = '%s %s %s %s' % (sys.executable, sys.argv[0], opts,
2287 test)
2287 test)
2288 data = pread(bisectcmd + ['--command', rtc])
2288 data = pread(bisectcmd + ['--command', rtc])
2289 m = re.search(
2289 m = re.search(
2290 (br'\nThe first (?P<goodbad>bad|good) revision '
2290 (br'\nThe first (?P<goodbad>bad|good) revision '
2291 br'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
2291 br'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
2292 br'summary: +(?P<summary>[^\n]+)\n'),
2292 br'summary: +(?P<summary>[^\n]+)\n'),
2293 data, (re.MULTILINE | re.DOTALL))
2293 data, (re.MULTILINE | re.DOTALL))
2294 if m is None:
2294 if m is None:
2295 self.stream.writeln(
2295 self.stream.writeln(
2296 'Failed to identify failure point for %s' % test)
2296 'Failed to identify failure point for %s' % test)
2297 continue
2297 continue
2298 dat = m.groupdict()
2298 dat = m.groupdict()
2299 verb = 'broken' if dat['goodbad'] == b'bad' else 'fixed'
2299 verb = 'broken' if dat['goodbad'] == b'bad' else 'fixed'
2300 self.stream.writeln(
2300 self.stream.writeln(
2301 '%s %s by %s (%s)' % (
2301 '%s %s by %s (%s)' % (
2302 test, verb, dat['node'].decode('ascii'),
2302 test, verb, dat['node'].decode('ascii'),
2303 dat['summary'].decode('utf8', 'ignore')))
2303 dat['summary'].decode('utf8', 'ignore')))
2304
2304
2305 def printtimes(self, times):
2305 def printtimes(self, times):
2306 # iolock held by run
2306 # iolock held by run
2307 self.stream.writeln('# Producing time report')
2307 self.stream.writeln('# Producing time report')
2308 times.sort(key=lambda t: (t[3]))
2308 times.sort(key=lambda t: (t[3]))
2309 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
2309 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
2310 self.stream.writeln('%-7s %-7s %-7s %-7s %-7s %s' %
2310 self.stream.writeln('%-7s %-7s %-7s %-7s %-7s %s' %
2311 ('start', 'end', 'cuser', 'csys', 'real', 'Test'))
2311 ('start', 'end', 'cuser', 'csys', 'real', 'Test'))
2312 for tdata in times:
2312 for tdata in times:
2313 test = tdata[0]
2313 test = tdata[0]
2314 cuser, csys, real, start, end = tdata[1:6]
2314 cuser, csys, real, start, end = tdata[1:6]
2315 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
2315 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
2316
2316
2317 @staticmethod
2317 @staticmethod
2318 def _writexunit(result, outf):
2318 def _writexunit(result, outf):
2319 # See http://llg.cubic.org/docs/junit/ for a reference.
2319 # See http://llg.cubic.org/docs/junit/ for a reference.
2320 timesd = dict((t[0], t[3]) for t in result.times)
2320 timesd = dict((t[0], t[3]) for t in result.times)
2321 doc = minidom.Document()
2321 doc = minidom.Document()
2322 s = doc.createElement('testsuite')
2322 s = doc.createElement('testsuite')
2323 s.setAttribute('name', 'run-tests')
2323 s.setAttribute('name', 'run-tests')
2324 s.setAttribute('tests', str(result.testsRun))
2324 s.setAttribute('tests', str(result.testsRun))
2325 s.setAttribute('errors', "0") # TODO
2325 s.setAttribute('errors', "0") # TODO
2326 s.setAttribute('failures', str(len(result.failures)))
2326 s.setAttribute('failures', str(len(result.failures)))
2327 s.setAttribute('skipped', str(len(result.skipped) +
2327 s.setAttribute('skipped', str(len(result.skipped) +
2328 len(result.ignored)))
2328 len(result.ignored)))
2329 doc.appendChild(s)
2329 doc.appendChild(s)
2330 for tc in result.successes:
2330 for tc in result.successes:
2331 t = doc.createElement('testcase')
2331 t = doc.createElement('testcase')
2332 t.setAttribute('name', tc.name)
2332 t.setAttribute('name', tc.name)
2333 tctime = timesd.get(tc.name)
2333 tctime = timesd.get(tc.name)
2334 if tctime is not None:
2334 if tctime is not None:
2335 t.setAttribute('time', '%.3f' % tctime)
2335 t.setAttribute('time', '%.3f' % tctime)
2336 s.appendChild(t)
2336 s.appendChild(t)
2337 for tc, err in sorted(result.faildata.items()):
2337 for tc, err in sorted(result.faildata.items()):
2338 t = doc.createElement('testcase')
2338 t = doc.createElement('testcase')
2339 t.setAttribute('name', tc)
2339 t.setAttribute('name', tc)
2340 tctime = timesd.get(tc)
2340 tctime = timesd.get(tc)
2341 if tctime is not None:
2341 if tctime is not None:
2342 t.setAttribute('time', '%.3f' % tctime)
2342 t.setAttribute('time', '%.3f' % tctime)
2343 # createCDATASection expects a unicode or it will
2343 # createCDATASection expects a unicode or it will
2344 # convert using default conversion rules, which will
2344 # convert using default conversion rules, which will
2345 # fail if string isn't ASCII.
2345 # fail if string isn't ASCII.
2346 err = cdatasafe(err).decode('utf-8', 'replace')
2346 err = cdatasafe(err).decode('utf-8', 'replace')
2347 cd = doc.createCDATASection(err)
2347 cd = doc.createCDATASection(err)
2348 # Use 'failure' here instead of 'error' to match errors = 0,
2348 # Use 'failure' here instead of 'error' to match errors = 0,
2349 # failures = len(result.failures) in the testsuite element.
2349 # failures = len(result.failures) in the testsuite element.
2350 failelem = doc.createElement('failure')
2350 failelem = doc.createElement('failure')
2351 failelem.setAttribute('message', 'output changed')
2351 failelem.setAttribute('message', 'output changed')
2352 failelem.setAttribute('type', 'output-mismatch')
2352 failelem.setAttribute('type', 'output-mismatch')
2353 failelem.appendChild(cd)
2353 failelem.appendChild(cd)
2354 t.appendChild(failelem)
2354 t.appendChild(failelem)
2355 s.appendChild(t)
2355 s.appendChild(t)
2356 for tc, message in result.skipped:
2356 for tc, message in result.skipped:
2357 # According to the schema, 'skipped' has no attributes. So store
2357 # According to the schema, 'skipped' has no attributes. So store
2358 # the skip message as a text node instead.
2358 # the skip message as a text node instead.
2359 t = doc.createElement('testcase')
2359 t = doc.createElement('testcase')
2360 t.setAttribute('name', tc.name)
2360 t.setAttribute('name', tc.name)
2361 binmessage = message.encode('utf-8')
2361 binmessage = message.encode('utf-8')
2362 message = cdatasafe(binmessage).decode('utf-8', 'replace')
2362 message = cdatasafe(binmessage).decode('utf-8', 'replace')
2363 cd = doc.createCDATASection(message)
2363 cd = doc.createCDATASection(message)
2364 skipelem = doc.createElement('skipped')
2364 skipelem = doc.createElement('skipped')
2365 skipelem.appendChild(cd)
2365 skipelem.appendChild(cd)
2366 t.appendChild(skipelem)
2366 t.appendChild(skipelem)
2367 s.appendChild(t)
2367 s.appendChild(t)
2368 outf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
2368 outf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
2369
2369
2370 @staticmethod
2370 @staticmethod
2371 def _writejson(result, outf):
2371 def _writejson(result, outf):
2372 timesd = {}
2372 timesd = {}
2373 for tdata in result.times:
2373 for tdata in result.times:
2374 test = tdata[0]
2374 test = tdata[0]
2375 timesd[test] = tdata[1:]
2375 timesd[test] = tdata[1:]
2376
2376
2377 outcome = {}
2377 outcome = {}
2378 groups = [('success', ((tc, None)
2378 groups = [('success', ((tc, None)
2379 for tc in result.successes)),
2379 for tc in result.successes)),
2380 ('failure', result.failures),
2380 ('failure', result.failures),
2381 ('skip', result.skipped)]
2381 ('skip', result.skipped)]
2382 for res, testcases in groups:
2382 for res, testcases in groups:
2383 for tc, __ in testcases:
2383 for tc, __ in testcases:
2384 if tc.name in timesd:
2384 if tc.name in timesd:
2385 diff = result.faildata.get(tc.name, b'')
2385 diff = result.faildata.get(tc.name, b'')
2386 try:
2386 try:
2387 diff = diff.decode('unicode_escape')
2387 diff = diff.decode('unicode_escape')
2388 except UnicodeDecodeError as e:
2388 except UnicodeDecodeError as e:
2389 diff = '%r decoding diff, sorry' % e
2389 diff = '%r decoding diff, sorry' % e
2390 tres = {'result': res,
2390 tres = {'result': res,
2391 'time': ('%0.3f' % timesd[tc.name][2]),
2391 'time': ('%0.3f' % timesd[tc.name][2]),
2392 'cuser': ('%0.3f' % timesd[tc.name][0]),
2392 'cuser': ('%0.3f' % timesd[tc.name][0]),
2393 'csys': ('%0.3f' % timesd[tc.name][1]),
2393 'csys': ('%0.3f' % timesd[tc.name][1]),
2394 'start': ('%0.3f' % timesd[tc.name][3]),
2394 'start': ('%0.3f' % timesd[tc.name][3]),
2395 'end': ('%0.3f' % timesd[tc.name][4]),
2395 'end': ('%0.3f' % timesd[tc.name][4]),
2396 'diff': diff,
2396 'diff': diff,
2397 }
2397 }
2398 else:
2398 else:
2399 # blacklisted test
2399 # blacklisted test
2400 tres = {'result': res}
2400 tres = {'result': res}
2401
2401
2402 outcome[tc.name] = tres
2402 outcome[tc.name] = tres
2403 jsonout = json.dumps(outcome, sort_keys=True, indent=4,
2403 jsonout = json.dumps(outcome, sort_keys=True, indent=4,
2404 separators=(',', ': '))
2404 separators=(',', ': '))
2405 outf.writelines(("testreport =", jsonout))
2405 outf.writelines(("testreport =", jsonout))
2406
2406
2407 def sorttests(testdescs, previoustimes, shuffle=False):
2407 def sorttests(testdescs, previoustimes, shuffle=False):
2408 """Do an in-place sort of tests."""
2408 """Do an in-place sort of tests."""
2409 if shuffle:
2409 if shuffle:
2410 random.shuffle(testdescs)
2410 random.shuffle(testdescs)
2411 return
2411 return
2412
2412
2413 if previoustimes:
2413 if previoustimes:
2414 def sortkey(f):
2414 def sortkey(f):
2415 f = f['path']
2415 f = f['path']
2416 if f in previoustimes:
2416 if f in previoustimes:
2417 # Use most recent time as estimate
2417 # Use most recent time as estimate
2418 return -previoustimes[f][-1]
2418 return -previoustimes[f][-1]
2419 else:
2419 else:
2420 # Default to a rather arbitrary value of 1 second for new tests
2420 # Default to a rather arbitrary value of 1 second for new tests
2421 return -1.0
2421 return -1.0
2422 else:
2422 else:
2423 # keywords for slow tests
2423 # keywords for slow tests
2424 slow = {b'svn': 10,
2424 slow = {b'svn': 10,
2425 b'cvs': 10,
2425 b'cvs': 10,
2426 b'hghave': 10,
2426 b'hghave': 10,
2427 b'largefiles-update': 10,
2427 b'largefiles-update': 10,
2428 b'run-tests': 10,
2428 b'run-tests': 10,
2429 b'corruption': 10,
2429 b'corruption': 10,
2430 b'race': 10,
2430 b'race': 10,
2431 b'i18n': 10,
2431 b'i18n': 10,
2432 b'check': 100,
2432 b'check': 100,
2433 b'gendoc': 100,
2433 b'gendoc': 100,
2434 b'contrib-perf': 200,
2434 b'contrib-perf': 200,
2435 }
2435 }
2436 perf = {}
2436 perf = {}
2437
2437
2438 def sortkey(f):
2438 def sortkey(f):
2439 # run largest tests first, as they tend to take the longest
2439 # run largest tests first, as they tend to take the longest
2440 f = f['path']
2440 f = f['path']
2441 try:
2441 try:
2442 return perf[f]
2442 return perf[f]
2443 except KeyError:
2443 except KeyError:
2444 try:
2444 try:
2445 val = -os.stat(f).st_size
2445 val = -os.stat(f).st_size
2446 except OSError as e:
2446 except OSError as e:
2447 if e.errno != errno.ENOENT:
2447 if e.errno != errno.ENOENT:
2448 raise
2448 raise
2449 perf[f] = -1e9 # file does not exist, tell early
2449 perf[f] = -1e9 # file does not exist, tell early
2450 return -1e9
2450 return -1e9
2451 for kw, mul in slow.items():
2451 for kw, mul in slow.items():
2452 if kw in f:
2452 if kw in f:
2453 val *= mul
2453 val *= mul
2454 if f.endswith(b'.py'):
2454 if f.endswith(b'.py'):
2455 val /= 10.0
2455 val /= 10.0
2456 perf[f] = val / 1000.0
2456 perf[f] = val / 1000.0
2457 return perf[f]
2457 return perf[f]
2458
2458
2459 testdescs.sort(key=sortkey)
2459 testdescs.sort(key=sortkey)
2460
2460
2461 class TestRunner(object):
2461 class TestRunner(object):
2462 """Holds context for executing tests.
2462 """Holds context for executing tests.
2463
2463
2464 Tests rely on a lot of state. This object holds it for them.
2464 Tests rely on a lot of state. This object holds it for them.
2465 """
2465 """
2466
2466
2467 # Programs required to run tests.
2467 # Programs required to run tests.
2468 REQUIREDTOOLS = [
2468 REQUIREDTOOLS = [
2469 b'diff',
2469 b'diff',
2470 b'grep',
2470 b'grep',
2471 b'unzip',
2471 b'unzip',
2472 b'gunzip',
2472 b'gunzip',
2473 b'bunzip2',
2473 b'bunzip2',
2474 b'sed',
2474 b'sed',
2475 ]
2475 ]
2476
2476
2477 # Maps file extensions to test class.
2477 # Maps file extensions to test class.
2478 TESTTYPES = [
2478 TESTTYPES = [
2479 (b'.py', PythonTest),
2479 (b'.py', PythonTest),
2480 (b'.t', TTest),
2480 (b'.t', TTest),
2481 ]
2481 ]
2482
2482
2483 def __init__(self):
2483 def __init__(self):
2484 self.options = None
2484 self.options = None
2485 self._hgroot = None
2485 self._hgroot = None
2486 self._testdir = None
2486 self._testdir = None
2487 self._outputdir = None
2487 self._outputdir = None
2488 self._hgtmp = None
2488 self._hgtmp = None
2489 self._installdir = None
2489 self._installdir = None
2490 self._bindir = None
2490 self._bindir = None
2491 self._tmpbinddir = None
2491 self._tmpbinddir = None
2492 self._pythondir = None
2492 self._pythondir = None
2493 self._coveragefile = None
2493 self._coveragefile = None
2494 self._createdfiles = []
2494 self._createdfiles = []
2495 self._hgcommand = None
2495 self._hgcommand = None
2496 self._hgpath = None
2496 self._hgpath = None
2497 self._portoffset = 0
2497 self._portoffset = 0
2498 self._ports = {}
2498 self._ports = {}
2499
2499
2500 def run(self, args, parser=None):
2500 def run(self, args, parser=None):
2501 """Run the test suite."""
2501 """Run the test suite."""
2502 oldmask = os.umask(0o22)
2502 oldmask = os.umask(0o22)
2503 try:
2503 try:
2504 parser = parser or getparser()
2504 parser = parser or getparser()
2505 options = parseargs(args, parser)
2505 options = parseargs(args, parser)
2506 tests = [_bytespath(a) for a in options.tests]
2506 tests = [_bytespath(a) for a in options.tests]
2507 if options.test_list is not None:
2507 if options.test_list is not None:
2508 for listfile in options.test_list:
2508 for listfile in options.test_list:
2509 with open(listfile, 'rb') as f:
2509 with open(listfile, 'rb') as f:
2510 tests.extend(t for t in f.read().splitlines() if t)
2510 tests.extend(t for t in f.read().splitlines() if t)
2511 self.options = options
2511 self.options = options
2512
2512
2513 self._checktools()
2513 self._checktools()
2514 testdescs = self.findtests(tests)
2514 testdescs = self.findtests(tests)
2515 if options.profile_runner:
2515 if options.profile_runner:
2516 import statprof
2516 import statprof
2517 statprof.start()
2517 statprof.start()
2518 result = self._run(testdescs)
2518 result = self._run(testdescs)
2519 if options.profile_runner:
2519 if options.profile_runner:
2520 statprof.stop()
2520 statprof.stop()
2521 statprof.display()
2521 statprof.display()
2522 return result
2522 return result
2523
2523
2524 finally:
2524 finally:
2525 os.umask(oldmask)
2525 os.umask(oldmask)
2526
2526
2527 def _run(self, testdescs):
2527 def _run(self, testdescs):
2528 self._testdir = osenvironb[b'TESTDIR'] = getcwdb()
2528 self._testdir = osenvironb[b'TESTDIR'] = getcwdb()
2529 # assume all tests in same folder for now
2529 # assume all tests in same folder for now
2530 if testdescs:
2530 if testdescs:
2531 pathname = os.path.dirname(testdescs[0]['path'])
2531 pathname = os.path.dirname(testdescs[0]['path'])
2532 if pathname:
2532 if pathname:
2533 osenvironb[b'TESTDIR'] = os.path.join(osenvironb[b'TESTDIR'],
2533 osenvironb[b'TESTDIR'] = os.path.join(osenvironb[b'TESTDIR'],
2534 pathname)
2534 pathname)
2535 if self.options.outputdir:
2535 if self.options.outputdir:
2536 self._outputdir = canonpath(_bytespath(self.options.outputdir))
2536 self._outputdir = canonpath(_bytespath(self.options.outputdir))
2537 else:
2537 else:
2538 self._outputdir = self._testdir
2538 self._outputdir = self._testdir
2539 if testdescs and pathname:
2539 if testdescs and pathname:
2540 self._outputdir = os.path.join(self._outputdir, pathname)
2540 self._outputdir = os.path.join(self._outputdir, pathname)
2541 previoustimes = {}
2541 previoustimes = {}
2542 if self.options.order_by_runtime:
2542 if self.options.order_by_runtime:
2543 previoustimes = dict(loadtimes(self._outputdir))
2543 previoustimes = dict(loadtimes(self._outputdir))
2544 sorttests(testdescs, previoustimes, shuffle=self.options.random)
2544 sorttests(testdescs, previoustimes, shuffle=self.options.random)
2545
2545
2546 if 'PYTHONHASHSEED' not in os.environ:
2546 if 'PYTHONHASHSEED' not in os.environ:
2547 # use a random python hash seed all the time
2547 # use a random python hash seed all the time
2548 # we do the randomness ourself to know what seed is used
2548 # we do the randomness ourself to know what seed is used
2549 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
2549 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
2550
2550
2551 if self.options.tmpdir:
2551 if self.options.tmpdir:
2552 self.options.keep_tmpdir = True
2552 self.options.keep_tmpdir = True
2553 tmpdir = _bytespath(self.options.tmpdir)
2553 tmpdir = _bytespath(self.options.tmpdir)
2554 if os.path.exists(tmpdir):
2554 if os.path.exists(tmpdir):
2555 # Meaning of tmpdir has changed since 1.3: we used to create
2555 # Meaning of tmpdir has changed since 1.3: we used to create
2556 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
2556 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
2557 # tmpdir already exists.
2557 # tmpdir already exists.
2558 print("error: temp dir %r already exists" % tmpdir)
2558 print("error: temp dir %r already exists" % tmpdir)
2559 return 1
2559 return 1
2560
2560
2561 os.makedirs(tmpdir)
2561 os.makedirs(tmpdir)
2562 else:
2562 else:
2563 d = None
2563 d = None
2564 if os.name == 'nt':
2564 if os.name == 'nt':
2565 # without this, we get the default temp dir location, but
2565 # without this, we get the default temp dir location, but
2566 # in all lowercase, which causes troubles with paths (issue3490)
2566 # in all lowercase, which causes troubles with paths (issue3490)
2567 d = osenvironb.get(b'TMP', None)
2567 d = osenvironb.get(b'TMP', None)
2568 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
2568 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
2569
2569
2570 self._hgtmp = osenvironb[b'HGTMP'] = (
2570 self._hgtmp = osenvironb[b'HGTMP'] = (
2571 os.path.realpath(tmpdir))
2571 os.path.realpath(tmpdir))
2572
2572
2573 if self.options.with_hg:
2573 if self.options.with_hg:
2574 self._installdir = None
2574 self._installdir = None
2575 whg = self.options.with_hg
2575 whg = self.options.with_hg
2576 self._bindir = os.path.dirname(os.path.realpath(whg))
2576 self._bindir = os.path.dirname(os.path.realpath(whg))
2577 assert isinstance(self._bindir, bytes)
2577 assert isinstance(self._bindir, bytes)
2578 self._hgcommand = os.path.basename(whg)
2578 self._hgcommand = os.path.basename(whg)
2579 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
2579 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
2580 os.makedirs(self._tmpbindir)
2580 os.makedirs(self._tmpbindir)
2581
2581
2582 normbin = os.path.normpath(os.path.abspath(whg))
2582 normbin = os.path.normpath(os.path.abspath(whg))
2583 normbin = normbin.replace(os.sep.encode('ascii'), b'/')
2583 normbin = normbin.replace(os.sep.encode('ascii'), b'/')
2584
2584
2585 # Other Python scripts in the test harness need to
2585 # Other Python scripts in the test harness need to
2586 # `import mercurial`. If `hg` is a Python script, we assume
2586 # `import mercurial`. If `hg` is a Python script, we assume
2587 # the Mercurial modules are relative to its path and tell the tests
2587 # the Mercurial modules are relative to its path and tell the tests
2588 # to load Python modules from its directory.
2588 # to load Python modules from its directory.
2589 with open(whg, 'rb') as fh:
2589 with open(whg, 'rb') as fh:
2590 initial = fh.read(1024)
2590 initial = fh.read(1024)
2591
2591
2592 if re.match(b'#!.*python', initial):
2592 if re.match(b'#!.*python', initial):
2593 self._pythondir = self._bindir
2593 self._pythondir = self._bindir
2594 # If it looks like our in-repo Rust binary, use the source root.
2594 # If it looks like our in-repo Rust binary, use the source root.
2595 # This is a bit hacky. But rhg is still not supported outside the
2595 # This is a bit hacky. But rhg is still not supported outside the
2596 # source directory. So until it is, do the simple thing.
2596 # source directory. So until it is, do the simple thing.
2597 elif re.search(b'/rust/target/[^/]+/hg', normbin):
2597 elif re.search(b'/rust/target/[^/]+/hg', normbin):
2598 self._pythondir = os.path.dirname(self._testdir)
2598 self._pythondir = os.path.dirname(self._testdir)
2599 # Fall back to the legacy behavior.
2599 # Fall back to the legacy behavior.
2600 else:
2600 else:
2601 self._pythondir = self._bindir
2601 self._pythondir = self._bindir
2602
2602
2603 else:
2603 else:
2604 self._installdir = os.path.join(self._hgtmp, b"install")
2604 self._installdir = os.path.join(self._hgtmp, b"install")
2605 self._bindir = os.path.join(self._installdir, b"bin")
2605 self._bindir = os.path.join(self._installdir, b"bin")
2606 self._hgcommand = b'hg'
2606 self._hgcommand = b'hg'
2607 self._tmpbindir = self._bindir
2607 self._tmpbindir = self._bindir
2608 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
2608 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
2609
2609
2610 # set CHGHG, then replace "hg" command by "chg"
2610 # set CHGHG, then replace "hg" command by "chg"
2611 chgbindir = self._bindir
2611 chgbindir = self._bindir
2612 if self.options.chg or self.options.with_chg:
2612 if self.options.chg or self.options.with_chg:
2613 osenvironb[b'CHGHG'] = os.path.join(self._bindir, self._hgcommand)
2613 osenvironb[b'CHGHG'] = os.path.join(self._bindir, self._hgcommand)
2614 else:
2614 else:
2615 osenvironb.pop(b'CHGHG', None) # drop flag for hghave
2615 osenvironb.pop(b'CHGHG', None) # drop flag for hghave
2616 if self.options.chg:
2616 if self.options.chg:
2617 self._hgcommand = b'chg'
2617 self._hgcommand = b'chg'
2618 elif self.options.with_chg:
2618 elif self.options.with_chg:
2619 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
2619 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
2620 self._hgcommand = os.path.basename(self.options.with_chg)
2620 self._hgcommand = os.path.basename(self.options.with_chg)
2621
2621
2622 osenvironb[b"BINDIR"] = self._bindir
2622 osenvironb[b"BINDIR"] = self._bindir
2623 osenvironb[b"PYTHON"] = PYTHON
2623 osenvironb[b"PYTHON"] = PYTHON
2624
2624
2625 fileb = _bytespath(__file__)
2625 fileb = _bytespath(__file__)
2626 runtestdir = os.path.abspath(os.path.dirname(fileb))
2626 runtestdir = os.path.abspath(os.path.dirname(fileb))
2627 osenvironb[b'RUNTESTDIR'] = runtestdir
2627 osenvironb[b'RUNTESTDIR'] = runtestdir
2628 if PYTHON3:
2628 if PYTHON3:
2629 sepb = _bytespath(os.pathsep)
2629 sepb = _bytespath(os.pathsep)
2630 else:
2630 else:
2631 sepb = os.pathsep
2631 sepb = os.pathsep
2632 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
2632 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
2633 if os.path.islink(__file__):
2633 if os.path.islink(__file__):
2634 # test helper will likely be at the end of the symlink
2634 # test helper will likely be at the end of the symlink
2635 realfile = os.path.realpath(fileb)
2635 realfile = os.path.realpath(fileb)
2636 realdir = os.path.abspath(os.path.dirname(realfile))
2636 realdir = os.path.abspath(os.path.dirname(realfile))
2637 path.insert(2, realdir)
2637 path.insert(2, realdir)
2638 if chgbindir != self._bindir:
2638 if chgbindir != self._bindir:
2639 path.insert(1, chgbindir)
2639 path.insert(1, chgbindir)
2640 if self._testdir != runtestdir:
2640 if self._testdir != runtestdir:
2641 path = [self._testdir] + path
2641 path = [self._testdir] + path
2642 if self._tmpbindir != self._bindir:
2642 if self._tmpbindir != self._bindir:
2643 path = [self._tmpbindir] + path
2643 path = [self._tmpbindir] + path
2644 osenvironb[b"PATH"] = sepb.join(path)
2644 osenvironb[b"PATH"] = sepb.join(path)
2645
2645
2646 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
2646 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
2647 # can run .../tests/run-tests.py test-foo where test-foo
2647 # can run .../tests/run-tests.py test-foo where test-foo
2648 # adds an extension to HGRC. Also include run-test.py directory to
2648 # adds an extension to HGRC. Also include run-test.py directory to
2649 # import modules like heredoctest.
2649 # import modules like heredoctest.
2650 pypath = [self._pythondir, self._testdir, runtestdir]
2650 pypath = [self._pythondir, self._testdir, runtestdir]
2651 # We have to augment PYTHONPATH, rather than simply replacing
2651 # We have to augment PYTHONPATH, rather than simply replacing
2652 # it, in case external libraries are only available via current
2652 # it, in case external libraries are only available via current
2653 # PYTHONPATH. (In particular, the Subversion bindings on OS X
2653 # PYTHONPATH. (In particular, the Subversion bindings on OS X
2654 # are in /opt/subversion.)
2654 # are in /opt/subversion.)
2655 oldpypath = osenvironb.get(IMPL_PATH)
2655 oldpypath = osenvironb.get(IMPL_PATH)
2656 if oldpypath:
2656 if oldpypath:
2657 pypath.append(oldpypath)
2657 pypath.append(oldpypath)
2658 osenvironb[IMPL_PATH] = sepb.join(pypath)
2658 osenvironb[IMPL_PATH] = sepb.join(pypath)
2659
2659
2660 if self.options.pure:
2660 if self.options.pure:
2661 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
2661 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
2662 os.environ["HGMODULEPOLICY"] = "py"
2662 os.environ["HGMODULEPOLICY"] = "py"
2663
2663
2664 if self.options.allow_slow_tests:
2664 if self.options.allow_slow_tests:
2665 os.environ["HGTEST_SLOW"] = "slow"
2665 os.environ["HGTEST_SLOW"] = "slow"
2666 elif 'HGTEST_SLOW' in os.environ:
2666 elif 'HGTEST_SLOW' in os.environ:
2667 del os.environ['HGTEST_SLOW']
2667 del os.environ['HGTEST_SLOW']
2668
2668
2669 self._coveragefile = os.path.join(self._testdir, b'.coverage')
2669 self._coveragefile = os.path.join(self._testdir, b'.coverage')
2670
2670
2671 if self.options.exceptions:
2671 if self.options.exceptions:
2672 exceptionsdir = os.path.join(self._outputdir, b'exceptions')
2672 exceptionsdir = os.path.join(self._outputdir, b'exceptions')
2673 try:
2673 try:
2674 os.makedirs(exceptionsdir)
2674 os.makedirs(exceptionsdir)
2675 except OSError as e:
2675 except OSError as e:
2676 if e.errno != errno.EEXIST:
2676 if e.errno != errno.EEXIST:
2677 raise
2677 raise
2678
2678
2679 # Remove all existing exception reports.
2679 # Remove all existing exception reports.
2680 for f in os.listdir(exceptionsdir):
2680 for f in os.listdir(exceptionsdir):
2681 os.unlink(os.path.join(exceptionsdir, f))
2681 os.unlink(os.path.join(exceptionsdir, f))
2682
2682
2683 osenvironb[b'HGEXCEPTIONSDIR'] = exceptionsdir
2683 osenvironb[b'HGEXCEPTIONSDIR'] = exceptionsdir
2684 logexceptions = os.path.join(self._testdir, b'logexceptions.py')
2684 logexceptions = os.path.join(self._testdir, b'logexceptions.py')
2685 self.options.extra_config_opt.append(
2685 self.options.extra_config_opt.append(
2686 'extensions.logexceptions=%s' % logexceptions.decode('utf-8'))
2686 'extensions.logexceptions=%s' % logexceptions.decode('utf-8'))
2687
2687
2688 vlog("# Using TESTDIR", self._testdir)
2688 vlog("# Using TESTDIR", self._testdir)
2689 vlog("# Using RUNTESTDIR", osenvironb[b'RUNTESTDIR'])
2689 vlog("# Using RUNTESTDIR", osenvironb[b'RUNTESTDIR'])
2690 vlog("# Using HGTMP", self._hgtmp)
2690 vlog("# Using HGTMP", self._hgtmp)
2691 vlog("# Using PATH", os.environ["PATH"])
2691 vlog("# Using PATH", os.environ["PATH"])
2692 vlog("# Using", IMPL_PATH, osenvironb[IMPL_PATH])
2692 vlog("# Using", IMPL_PATH, osenvironb[IMPL_PATH])
2693 vlog("# Writing to directory", self._outputdir)
2693 vlog("# Writing to directory", self._outputdir)
2694
2694
2695 try:
2695 try:
2696 return self._runtests(testdescs) or 0
2696 return self._runtests(testdescs) or 0
2697 finally:
2697 finally:
2698 time.sleep(.1)
2698 time.sleep(.1)
2699 self._cleanup()
2699 self._cleanup()
2700
2700
2701 def findtests(self, args):
2701 def findtests(self, args):
2702 """Finds possible test files from arguments.
2702 """Finds possible test files from arguments.
2703
2703
2704 If you wish to inject custom tests into the test harness, this would
2704 If you wish to inject custom tests into the test harness, this would
2705 be a good function to monkeypatch or override in a derived class.
2705 be a good function to monkeypatch or override in a derived class.
2706 """
2706 """
2707 if not args:
2707 if not args:
2708 if self.options.changed:
2708 if self.options.changed:
2709 proc = Popen4('hg st --rev "%s" -man0 .' %
2709 proc = Popen4('hg st --rev "%s" -man0 .' %
2710 self.options.changed, None, 0)
2710 self.options.changed, None, 0)
2711 stdout, stderr = proc.communicate()
2711 stdout, stderr = proc.communicate()
2712 args = stdout.strip(b'\0').split(b'\0')
2712 args = stdout.strip(b'\0').split(b'\0')
2713 else:
2713 else:
2714 args = os.listdir(b'.')
2714 args = os.listdir(b'.')
2715
2715
2716 expanded_args = []
2716 expanded_args = []
2717 for arg in args:
2717 for arg in args:
2718 if os.path.isdir(arg):
2718 if os.path.isdir(arg):
2719 if not arg.endswith(b'/'):
2719 if not arg.endswith(b'/'):
2720 arg += b'/'
2720 arg += b'/'
2721 expanded_args.extend([arg + a for a in os.listdir(arg)])
2721 expanded_args.extend([arg + a for a in os.listdir(arg)])
2722 else:
2722 else:
2723 expanded_args.append(arg)
2723 expanded_args.append(arg)
2724 args = expanded_args
2724 args = expanded_args
2725
2725
2726 testcasepattern = re.compile(br'([\w-]+\.t|py)(#([a-zA-Z0-9_\-\.#]+))')
2726 testcasepattern = re.compile(br'([\w-]+\.t|py)(#([a-zA-Z0-9_\-\.#]+))')
2727 tests = []
2727 tests = []
2728 for t in args:
2728 for t in args:
2729 case = []
2729 case = []
2730
2730
2731 if not (os.path.basename(t).startswith(b'test-')
2731 if not (os.path.basename(t).startswith(b'test-')
2732 and (t.endswith(b'.py') or t.endswith(b'.t'))):
2732 and (t.endswith(b'.py') or t.endswith(b'.t'))):
2733
2733
2734 m = testcasepattern.match(t)
2734 m = testcasepattern.match(t)
2735 if m is not None:
2735 if m is not None:
2736 t, _, casestr = m.groups()
2736 t, _, casestr = m.groups()
2737 if casestr:
2737 if casestr:
2738 case = casestr.split(b'#')
2738 case = casestr.split(b'#')
2739 else:
2739 else:
2740 continue
2740 continue
2741
2741
2742 if t.endswith(b'.t'):
2742 if t.endswith(b'.t'):
2743 # .t file may contain multiple test cases
2743 # .t file may contain multiple test cases
2744 casedimensions = parsettestcases(t)
2744 casedimensions = parsettestcases(t)
2745 if casedimensions:
2745 if casedimensions:
2746 cases = []
2746 cases = []
2747 def addcases(case, casedimensions):
2747 def addcases(case, casedimensions):
2748 if not casedimensions:
2748 if not casedimensions:
2749 cases.append(case)
2749 cases.append(case)
2750 else:
2750 else:
2751 for c in casedimensions[0]:
2751 for c in casedimensions[0]:
2752 addcases(case + [c], casedimensions[1:])
2752 addcases(case + [c], casedimensions[1:])
2753 addcases([], casedimensions)
2753 addcases([], casedimensions)
2754 if case and case in cases:
2754 if case and case in cases:
2755 cases = [case]
2755 cases = [case]
2756 elif case:
2756 elif case:
2757 # Ignore invalid cases
2757 # Ignore invalid cases
2758 cases = []
2758 cases = []
2759 else:
2759 else:
2760 pass
2760 pass
2761 tests += [{'path': t, 'case': c} for c in sorted(cases)]
2761 tests += [{'path': t, 'case': c} for c in sorted(cases)]
2762 else:
2762 else:
2763 tests.append({'path': t})
2763 tests.append({'path': t})
2764 else:
2764 else:
2765 tests.append({'path': t})
2765 tests.append({'path': t})
2766 return tests
2766 return tests
2767
2767
2768 def _runtests(self, testdescs):
2768 def _runtests(self, testdescs):
2769 def _reloadtest(test, i):
2769 def _reloadtest(test, i):
2770 # convert a test back to its description dict
2770 # convert a test back to its description dict
2771 desc = {'path': test.path}
2771 desc = {'path': test.path}
2772 case = getattr(test, '_case', [])
2772 case = getattr(test, '_case', [])
2773 if case:
2773 if case:
2774 desc['case'] = case
2774 desc['case'] = case
2775 return self._gettest(desc, i)
2775 return self._gettest(desc, i)
2776
2776
2777 try:
2777 try:
2778 if self.options.restart:
2778 if self.options.restart:
2779 orig = list(testdescs)
2779 orig = list(testdescs)
2780 while testdescs:
2780 while testdescs:
2781 desc = testdescs[0]
2781 desc = testdescs[0]
2782 # desc['path'] is a relative path
2782 # desc['path'] is a relative path
2783 if 'case' in desc:
2783 if 'case' in desc:
2784 casestr = b'#'.join(desc['case'])
2784 casestr = b'#'.join(desc['case'])
2785 errpath = b'%s#%s.err' % (desc['path'], casestr)
2785 errpath = b'%s#%s.err' % (desc['path'], casestr)
2786 else:
2786 else:
2787 errpath = b'%s.err' % desc['path']
2787 errpath = b'%s.err' % desc['path']
2788 errpath = os.path.join(self._outputdir, errpath)
2788 errpath = os.path.join(self._outputdir, errpath)
2789 if os.path.exists(errpath):
2789 if os.path.exists(errpath):
2790 break
2790 break
2791 testdescs.pop(0)
2791 testdescs.pop(0)
2792 if not testdescs:
2792 if not testdescs:
2793 print("running all tests")
2793 print("running all tests")
2794 testdescs = orig
2794 testdescs = orig
2795
2795
2796 tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
2796 tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
2797
2797
2798 jobs = min(len(tests), self.options.jobs)
2799
2798 failed = False
2800 failed = False
2799 kws = self.options.keywords
2801 kws = self.options.keywords
2800 if kws is not None and PYTHON3:
2802 if kws is not None and PYTHON3:
2801 kws = kws.encode('utf-8')
2803 kws = kws.encode('utf-8')
2802
2804
2803 suite = TestSuite(self._testdir,
2805 suite = TestSuite(self._testdir,
2804 jobs=self.options.jobs,
2806 jobs=jobs,
2805 whitelist=self.options.whitelisted,
2807 whitelist=self.options.whitelisted,
2806 blacklist=self.options.blacklist,
2808 blacklist=self.options.blacklist,
2807 retest=self.options.retest,
2809 retest=self.options.retest,
2808 keywords=kws,
2810 keywords=kws,
2809 loop=self.options.loop,
2811 loop=self.options.loop,
2810 runs_per_test=self.options.runs_per_test,
2812 runs_per_test=self.options.runs_per_test,
2811 showchannels=self.options.showchannels,
2813 showchannels=self.options.showchannels,
2812 tests=tests, loadtest=_reloadtest)
2814 tests=tests, loadtest=_reloadtest)
2813 verbosity = 1
2815 verbosity = 1
2814 if self.options.list_tests:
2816 if self.options.list_tests:
2815 verbosity = 0
2817 verbosity = 0
2816 elif self.options.verbose:
2818 elif self.options.verbose:
2817 verbosity = 2
2819 verbosity = 2
2818 runner = TextTestRunner(self, verbosity=verbosity)
2820 runner = TextTestRunner(self, verbosity=verbosity)
2819
2821
2820 if self.options.list_tests:
2822 if self.options.list_tests:
2821 result = runner.listtests(suite)
2823 result = runner.listtests(suite)
2822 else:
2824 else:
2823 if self._installdir:
2825 if self._installdir:
2824 self._installhg()
2826 self._installhg()
2825 self._checkhglib("Testing")
2827 self._checkhglib("Testing")
2826 else:
2828 else:
2827 self._usecorrectpython()
2829 self._usecorrectpython()
2828 if self.options.chg:
2830 if self.options.chg:
2829 assert self._installdir
2831 assert self._installdir
2830 self._installchg()
2832 self._installchg()
2831
2833
2834 log('running %d tests using %d parallel processes' % (
2835 len(tests), jobs))
2836
2832 result = runner.run(suite)
2837 result = runner.run(suite)
2833
2838
2834 if result.failures:
2839 if result.failures:
2835 failed = True
2840 failed = True
2836
2841
2837 result.onEnd()
2842 result.onEnd()
2838
2843
2839 if self.options.anycoverage:
2844 if self.options.anycoverage:
2840 self._outputcoverage()
2845 self._outputcoverage()
2841 except KeyboardInterrupt:
2846 except KeyboardInterrupt:
2842 failed = True
2847 failed = True
2843 print("\ninterrupted!")
2848 print("\ninterrupted!")
2844
2849
2845 if failed:
2850 if failed:
2846 return 1
2851 return 1
2847
2852
2848 def _getport(self, count):
2853 def _getport(self, count):
2849 port = self._ports.get(count) # do we have a cached entry?
2854 port = self._ports.get(count) # do we have a cached entry?
2850 if port is None:
2855 if port is None:
2851 portneeded = 3
2856 portneeded = 3
2852 # above 100 tries we just give up and let test reports failure
2857 # above 100 tries we just give up and let test reports failure
2853 for tries in xrange(100):
2858 for tries in xrange(100):
2854 allfree = True
2859 allfree = True
2855 port = self.options.port + self._portoffset
2860 port = self.options.port + self._portoffset
2856 for idx in xrange(portneeded):
2861 for idx in xrange(portneeded):
2857 if not checkportisavailable(port + idx):
2862 if not checkportisavailable(port + idx):
2858 allfree = False
2863 allfree = False
2859 break
2864 break
2860 self._portoffset += portneeded
2865 self._portoffset += portneeded
2861 if allfree:
2866 if allfree:
2862 break
2867 break
2863 self._ports[count] = port
2868 self._ports[count] = port
2864 return port
2869 return port
2865
2870
2866 def _gettest(self, testdesc, count):
2871 def _gettest(self, testdesc, count):
2867 """Obtain a Test by looking at its filename.
2872 """Obtain a Test by looking at its filename.
2868
2873
2869 Returns a Test instance. The Test may not be runnable if it doesn't
2874 Returns a Test instance. The Test may not be runnable if it doesn't
2870 map to a known type.
2875 map to a known type.
2871 """
2876 """
2872 path = testdesc['path']
2877 path = testdesc['path']
2873 lctest = path.lower()
2878 lctest = path.lower()
2874 testcls = Test
2879 testcls = Test
2875
2880
2876 for ext, cls in self.TESTTYPES:
2881 for ext, cls in self.TESTTYPES:
2877 if lctest.endswith(ext):
2882 if lctest.endswith(ext):
2878 testcls = cls
2883 testcls = cls
2879 break
2884 break
2880
2885
2881 refpath = os.path.join(self._testdir, path)
2886 refpath = os.path.join(self._testdir, path)
2882 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
2887 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
2883
2888
2884 # extra keyword parameters. 'case' is used by .t tests
2889 # extra keyword parameters. 'case' is used by .t tests
2885 kwds = dict((k, testdesc[k]) for k in ['case'] if k in testdesc)
2890 kwds = dict((k, testdesc[k]) for k in ['case'] if k in testdesc)
2886
2891
2887 t = testcls(refpath, self._outputdir, tmpdir,
2892 t = testcls(refpath, self._outputdir, tmpdir,
2888 keeptmpdir=self.options.keep_tmpdir,
2893 keeptmpdir=self.options.keep_tmpdir,
2889 debug=self.options.debug,
2894 debug=self.options.debug,
2890 first=self.options.first,
2895 first=self.options.first,
2891 timeout=self.options.timeout,
2896 timeout=self.options.timeout,
2892 startport=self._getport(count),
2897 startport=self._getport(count),
2893 extraconfigopts=self.options.extra_config_opt,
2898 extraconfigopts=self.options.extra_config_opt,
2894 py3kwarnings=self.options.py3k_warnings,
2899 py3kwarnings=self.options.py3k_warnings,
2895 shell=self.options.shell,
2900 shell=self.options.shell,
2896 hgcommand=self._hgcommand,
2901 hgcommand=self._hgcommand,
2897 usechg=bool(self.options.with_chg or self.options.chg),
2902 usechg=bool(self.options.with_chg or self.options.chg),
2898 useipv6=useipv6, **kwds)
2903 useipv6=useipv6, **kwds)
2899 t.should_reload = True
2904 t.should_reload = True
2900 return t
2905 return t
2901
2906
2902 def _cleanup(self):
2907 def _cleanup(self):
2903 """Clean up state from this test invocation."""
2908 """Clean up state from this test invocation."""
2904 if self.options.keep_tmpdir:
2909 if self.options.keep_tmpdir:
2905 return
2910 return
2906
2911
2907 vlog("# Cleaning up HGTMP", self._hgtmp)
2912 vlog("# Cleaning up HGTMP", self._hgtmp)
2908 shutil.rmtree(self._hgtmp, True)
2913 shutil.rmtree(self._hgtmp, True)
2909 for f in self._createdfiles:
2914 for f in self._createdfiles:
2910 try:
2915 try:
2911 os.remove(f)
2916 os.remove(f)
2912 except OSError:
2917 except OSError:
2913 pass
2918 pass
2914
2919
2915 def _usecorrectpython(self):
2920 def _usecorrectpython(self):
2916 """Configure the environment to use the appropriate Python in tests."""
2921 """Configure the environment to use the appropriate Python in tests."""
2917 # Tests must use the same interpreter as us or bad things will happen.
2922 # Tests must use the same interpreter as us or bad things will happen.
2918 pyexename = sys.platform == 'win32' and b'python.exe' or b'python'
2923 pyexename = sys.platform == 'win32' and b'python.exe' or b'python'
2919
2924
2920 # os.symlink() is a thing with py3 on Windows, but it requires
2925 # os.symlink() is a thing with py3 on Windows, but it requires
2921 # Administrator rights.
2926 # Administrator rights.
2922 if getattr(os, 'symlink', None) and os.name != 'nt':
2927 if getattr(os, 'symlink', None) and os.name != 'nt':
2923 vlog("# Making python executable in test path a symlink to '%s'" %
2928 vlog("# Making python executable in test path a symlink to '%s'" %
2924 sys.executable)
2929 sys.executable)
2925 mypython = os.path.join(self._tmpbindir, pyexename)
2930 mypython = os.path.join(self._tmpbindir, pyexename)
2926 try:
2931 try:
2927 if os.readlink(mypython) == sys.executable:
2932 if os.readlink(mypython) == sys.executable:
2928 return
2933 return
2929 os.unlink(mypython)
2934 os.unlink(mypython)
2930 except OSError as err:
2935 except OSError as err:
2931 if err.errno != errno.ENOENT:
2936 if err.errno != errno.ENOENT:
2932 raise
2937 raise
2933 if self._findprogram(pyexename) != sys.executable:
2938 if self._findprogram(pyexename) != sys.executable:
2934 try:
2939 try:
2935 os.symlink(sys.executable, mypython)
2940 os.symlink(sys.executable, mypython)
2936 self._createdfiles.append(mypython)
2941 self._createdfiles.append(mypython)
2937 except OSError as err:
2942 except OSError as err:
2938 # child processes may race, which is harmless
2943 # child processes may race, which is harmless
2939 if err.errno != errno.EEXIST:
2944 if err.errno != errno.EEXIST:
2940 raise
2945 raise
2941 else:
2946 else:
2942 exedir, exename = os.path.split(sys.executable)
2947 exedir, exename = os.path.split(sys.executable)
2943 vlog("# Modifying search path to find %s as %s in '%s'" %
2948 vlog("# Modifying search path to find %s as %s in '%s'" %
2944 (exename, pyexename, exedir))
2949 (exename, pyexename, exedir))
2945 path = os.environ['PATH'].split(os.pathsep)
2950 path = os.environ['PATH'].split(os.pathsep)
2946 while exedir in path:
2951 while exedir in path:
2947 path.remove(exedir)
2952 path.remove(exedir)
2948 os.environ['PATH'] = os.pathsep.join([exedir] + path)
2953 os.environ['PATH'] = os.pathsep.join([exedir] + path)
2949 if not self._findprogram(pyexename):
2954 if not self._findprogram(pyexename):
2950 print("WARNING: Cannot find %s in search path" % pyexename)
2955 print("WARNING: Cannot find %s in search path" % pyexename)
2951
2956
2952 def _installhg(self):
2957 def _installhg(self):
2953 """Install hg into the test environment.
2958 """Install hg into the test environment.
2954
2959
2955 This will also configure hg with the appropriate testing settings.
2960 This will also configure hg with the appropriate testing settings.
2956 """
2961 """
2957 vlog("# Performing temporary installation of HG")
2962 vlog("# Performing temporary installation of HG")
2958 installerrs = os.path.join(self._hgtmp, b"install.err")
2963 installerrs = os.path.join(self._hgtmp, b"install.err")
2959 compiler = ''
2964 compiler = ''
2960 if self.options.compiler:
2965 if self.options.compiler:
2961 compiler = '--compiler ' + self.options.compiler
2966 compiler = '--compiler ' + self.options.compiler
2962 if self.options.pure:
2967 if self.options.pure:
2963 pure = b"--pure"
2968 pure = b"--pure"
2964 else:
2969 else:
2965 pure = b""
2970 pure = b""
2966
2971
2967 # Run installer in hg root
2972 # Run installer in hg root
2968 script = os.path.realpath(sys.argv[0])
2973 script = os.path.realpath(sys.argv[0])
2969 exe = sys.executable
2974 exe = sys.executable
2970 if PYTHON3:
2975 if PYTHON3:
2971 compiler = _bytespath(compiler)
2976 compiler = _bytespath(compiler)
2972 script = _bytespath(script)
2977 script = _bytespath(script)
2973 exe = _bytespath(exe)
2978 exe = _bytespath(exe)
2974 hgroot = os.path.dirname(os.path.dirname(script))
2979 hgroot = os.path.dirname(os.path.dirname(script))
2975 self._hgroot = hgroot
2980 self._hgroot = hgroot
2976 os.chdir(hgroot)
2981 os.chdir(hgroot)
2977 nohome = b'--home=""'
2982 nohome = b'--home=""'
2978 if os.name == 'nt':
2983 if os.name == 'nt':
2979 # The --home="" trick works only on OS where os.sep == '/'
2984 # The --home="" trick works only on OS where os.sep == '/'
2980 # because of a distutils convert_path() fast-path. Avoid it at
2985 # because of a distutils convert_path() fast-path. Avoid it at
2981 # least on Windows for now, deal with .pydistutils.cfg bugs
2986 # least on Windows for now, deal with .pydistutils.cfg bugs
2982 # when they happen.
2987 # when they happen.
2983 nohome = b''
2988 nohome = b''
2984 cmd = (b'%(exe)s setup.py %(pure)s clean --all'
2989 cmd = (b'%(exe)s setup.py %(pure)s clean --all'
2985 b' build %(compiler)s --build-base="%(base)s"'
2990 b' build %(compiler)s --build-base="%(base)s"'
2986 b' install --force --prefix="%(prefix)s"'
2991 b' install --force --prefix="%(prefix)s"'
2987 b' --install-lib="%(libdir)s"'
2992 b' --install-lib="%(libdir)s"'
2988 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
2993 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
2989 % {b'exe': exe, b'pure': pure,
2994 % {b'exe': exe, b'pure': pure,
2990 b'compiler': compiler,
2995 b'compiler': compiler,
2991 b'base': os.path.join(self._hgtmp, b"build"),
2996 b'base': os.path.join(self._hgtmp, b"build"),
2992 b'prefix': self._installdir, b'libdir': self._pythondir,
2997 b'prefix': self._installdir, b'libdir': self._pythondir,
2993 b'bindir': self._bindir,
2998 b'bindir': self._bindir,
2994 b'nohome': nohome, b'logfile': installerrs})
2999 b'nohome': nohome, b'logfile': installerrs})
2995
3000
2996 # setuptools requires install directories to exist.
3001 # setuptools requires install directories to exist.
2997 def makedirs(p):
3002 def makedirs(p):
2998 try:
3003 try:
2999 os.makedirs(p)
3004 os.makedirs(p)
3000 except OSError as e:
3005 except OSError as e:
3001 if e.errno != errno.EEXIST:
3006 if e.errno != errno.EEXIST:
3002 raise
3007 raise
3003 makedirs(self._pythondir)
3008 makedirs(self._pythondir)
3004 makedirs(self._bindir)
3009 makedirs(self._bindir)
3005
3010
3006 vlog("# Running", cmd)
3011 vlog("# Running", cmd)
3007 if os.system(_strpath(cmd)) == 0:
3012 if os.system(_strpath(cmd)) == 0:
3008 if not self.options.verbose:
3013 if not self.options.verbose:
3009 try:
3014 try:
3010 os.remove(installerrs)
3015 os.remove(installerrs)
3011 except OSError as e:
3016 except OSError as e:
3012 if e.errno != errno.ENOENT:
3017 if e.errno != errno.ENOENT:
3013 raise
3018 raise
3014 else:
3019 else:
3015 with open(installerrs, 'rb') as f:
3020 with open(installerrs, 'rb') as f:
3016 for line in f:
3021 for line in f:
3017 if PYTHON3:
3022 if PYTHON3:
3018 sys.stdout.buffer.write(line)
3023 sys.stdout.buffer.write(line)
3019 else:
3024 else:
3020 sys.stdout.write(line)
3025 sys.stdout.write(line)
3021 sys.exit(1)
3026 sys.exit(1)
3022 os.chdir(self._testdir)
3027 os.chdir(self._testdir)
3023
3028
3024 self._usecorrectpython()
3029 self._usecorrectpython()
3025
3030
3026 if self.options.py3k_warnings and not self.options.anycoverage:
3031 if self.options.py3k_warnings and not self.options.anycoverage:
3027 vlog("# Updating hg command to enable Py3k Warnings switch")
3032 vlog("# Updating hg command to enable Py3k Warnings switch")
3028 with open(os.path.join(self._bindir, 'hg'), 'rb') as f:
3033 with open(os.path.join(self._bindir, 'hg'), 'rb') as f:
3029 lines = [line.rstrip() for line in f]
3034 lines = [line.rstrip() for line in f]
3030 lines[0] += ' -3'
3035 lines[0] += ' -3'
3031 with open(os.path.join(self._bindir, 'hg'), 'wb') as f:
3036 with open(os.path.join(self._bindir, 'hg'), 'wb') as f:
3032 for line in lines:
3037 for line in lines:
3033 f.write(line + '\n')
3038 f.write(line + '\n')
3034
3039
3035 hgbat = os.path.join(self._bindir, b'hg.bat')
3040 hgbat = os.path.join(self._bindir, b'hg.bat')
3036 if os.path.isfile(hgbat):
3041 if os.path.isfile(hgbat):
3037 # hg.bat expects to be put in bin/scripts while run-tests.py
3042 # hg.bat expects to be put in bin/scripts while run-tests.py
3038 # installation layout put it in bin/ directly. Fix it
3043 # installation layout put it in bin/ directly. Fix it
3039 with open(hgbat, 'rb') as f:
3044 with open(hgbat, 'rb') as f:
3040 data = f.read()
3045 data = f.read()
3041 if b'"%~dp0..\python" "%~dp0hg" %*' in data:
3046 if b'"%~dp0..\python" "%~dp0hg" %*' in data:
3042 data = data.replace(b'"%~dp0..\python" "%~dp0hg" %*',
3047 data = data.replace(b'"%~dp0..\python" "%~dp0hg" %*',
3043 b'"%~dp0python" "%~dp0hg" %*')
3048 b'"%~dp0python" "%~dp0hg" %*')
3044 with open(hgbat, 'wb') as f:
3049 with open(hgbat, 'wb') as f:
3045 f.write(data)
3050 f.write(data)
3046 else:
3051 else:
3047 print('WARNING: cannot fix hg.bat reference to python.exe')
3052 print('WARNING: cannot fix hg.bat reference to python.exe')
3048
3053
3049 if self.options.anycoverage:
3054 if self.options.anycoverage:
3050 custom = os.path.join(self._testdir, 'sitecustomize.py')
3055 custom = os.path.join(self._testdir, 'sitecustomize.py')
3051 target = os.path.join(self._pythondir, 'sitecustomize.py')
3056 target = os.path.join(self._pythondir, 'sitecustomize.py')
3052 vlog('# Installing coverage trigger to %s' % target)
3057 vlog('# Installing coverage trigger to %s' % target)
3053 shutil.copyfile(custom, target)
3058 shutil.copyfile(custom, target)
3054 rc = os.path.join(self._testdir, '.coveragerc')
3059 rc = os.path.join(self._testdir, '.coveragerc')
3055 vlog('# Installing coverage rc to %s' % rc)
3060 vlog('# Installing coverage rc to %s' % rc)
3056 os.environ['COVERAGE_PROCESS_START'] = rc
3061 os.environ['COVERAGE_PROCESS_START'] = rc
3057 covdir = os.path.join(self._installdir, '..', 'coverage')
3062 covdir = os.path.join(self._installdir, '..', 'coverage')
3058 try:
3063 try:
3059 os.mkdir(covdir)
3064 os.mkdir(covdir)
3060 except OSError as e:
3065 except OSError as e:
3061 if e.errno != errno.EEXIST:
3066 if e.errno != errno.EEXIST:
3062 raise
3067 raise
3063
3068
3064 os.environ['COVERAGE_DIR'] = covdir
3069 os.environ['COVERAGE_DIR'] = covdir
3065
3070
3066 def _checkhglib(self, verb):
3071 def _checkhglib(self, verb):
3067 """Ensure that the 'mercurial' package imported by python is
3072 """Ensure that the 'mercurial' package imported by python is
3068 the one we expect it to be. If not, print a warning to stderr."""
3073 the one we expect it to be. If not, print a warning to stderr."""
3069 if ((self._bindir == self._pythondir) and
3074 if ((self._bindir == self._pythondir) and
3070 (self._bindir != self._tmpbindir)):
3075 (self._bindir != self._tmpbindir)):
3071 # The pythondir has been inferred from --with-hg flag.
3076 # The pythondir has been inferred from --with-hg flag.
3072 # We cannot expect anything sensible here.
3077 # We cannot expect anything sensible here.
3073 return
3078 return
3074 expecthg = os.path.join(self._pythondir, b'mercurial')
3079 expecthg = os.path.join(self._pythondir, b'mercurial')
3075 actualhg = self._gethgpath()
3080 actualhg = self._gethgpath()
3076 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
3081 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
3077 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
3082 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
3078 ' (expected %s)\n'
3083 ' (expected %s)\n'
3079 % (verb, actualhg, expecthg))
3084 % (verb, actualhg, expecthg))
3080 def _gethgpath(self):
3085 def _gethgpath(self):
3081 """Return the path to the mercurial package that is actually found by
3086 """Return the path to the mercurial package that is actually found by
3082 the current Python interpreter."""
3087 the current Python interpreter."""
3083 if self._hgpath is not None:
3088 if self._hgpath is not None:
3084 return self._hgpath
3089 return self._hgpath
3085
3090
3086 cmd = b'%s -c "import mercurial; print (mercurial.__path__[0])"'
3091 cmd = b'%s -c "import mercurial; print (mercurial.__path__[0])"'
3087 cmd = cmd % PYTHON
3092 cmd = cmd % PYTHON
3088 if PYTHON3:
3093 if PYTHON3:
3089 cmd = _strpath(cmd)
3094 cmd = _strpath(cmd)
3090 pipe = os.popen(cmd)
3095 pipe = os.popen(cmd)
3091 try:
3096 try:
3092 self._hgpath = _bytespath(pipe.read().strip())
3097 self._hgpath = _bytespath(pipe.read().strip())
3093 finally:
3098 finally:
3094 pipe.close()
3099 pipe.close()
3095
3100
3096 return self._hgpath
3101 return self._hgpath
3097
3102
3098 def _installchg(self):
3103 def _installchg(self):
3099 """Install chg into the test environment"""
3104 """Install chg into the test environment"""
3100 vlog('# Performing temporary installation of CHG')
3105 vlog('# Performing temporary installation of CHG')
3101 assert os.path.dirname(self._bindir) == self._installdir
3106 assert os.path.dirname(self._bindir) == self._installdir
3102 assert self._hgroot, 'must be called after _installhg()'
3107 assert self._hgroot, 'must be called after _installhg()'
3103 cmd = (b'"%(make)s" clean install PREFIX="%(prefix)s"'
3108 cmd = (b'"%(make)s" clean install PREFIX="%(prefix)s"'
3104 % {b'make': 'make', # TODO: switch by option or environment?
3109 % {b'make': 'make', # TODO: switch by option or environment?
3105 b'prefix': self._installdir})
3110 b'prefix': self._installdir})
3106 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
3111 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
3107 vlog("# Running", cmd)
3112 vlog("# Running", cmd)
3108 proc = subprocess.Popen(cmd, shell=True, cwd=cwd,
3113 proc = subprocess.Popen(cmd, shell=True, cwd=cwd,
3109 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
3114 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
3110 stderr=subprocess.STDOUT)
3115 stderr=subprocess.STDOUT)
3111 out, _err = proc.communicate()
3116 out, _err = proc.communicate()
3112 if proc.returncode != 0:
3117 if proc.returncode != 0:
3113 if PYTHON3:
3118 if PYTHON3:
3114 sys.stdout.buffer.write(out)
3119 sys.stdout.buffer.write(out)
3115 else:
3120 else:
3116 sys.stdout.write(out)
3121 sys.stdout.write(out)
3117 sys.exit(1)
3122 sys.exit(1)
3118
3123
3119 def _outputcoverage(self):
3124 def _outputcoverage(self):
3120 """Produce code coverage output."""
3125 """Produce code coverage output."""
3121 import coverage
3126 import coverage
3122 coverage = coverage.coverage
3127 coverage = coverage.coverage
3123
3128
3124 vlog('# Producing coverage report')
3129 vlog('# Producing coverage report')
3125 # chdir is the easiest way to get short, relative paths in the
3130 # chdir is the easiest way to get short, relative paths in the
3126 # output.
3131 # output.
3127 os.chdir(self._hgroot)
3132 os.chdir(self._hgroot)
3128 covdir = os.path.join(self._installdir, '..', 'coverage')
3133 covdir = os.path.join(self._installdir, '..', 'coverage')
3129 cov = coverage(data_file=os.path.join(covdir, 'cov'))
3134 cov = coverage(data_file=os.path.join(covdir, 'cov'))
3130
3135
3131 # Map install directory paths back to source directory.
3136 # Map install directory paths back to source directory.
3132 cov.config.paths['srcdir'] = ['.', self._pythondir]
3137 cov.config.paths['srcdir'] = ['.', self._pythondir]
3133
3138
3134 cov.combine()
3139 cov.combine()
3135
3140
3136 omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]]
3141 omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]]
3137 cov.report(ignore_errors=True, omit=omit)
3142 cov.report(ignore_errors=True, omit=omit)
3138
3143
3139 if self.options.htmlcov:
3144 if self.options.htmlcov:
3140 htmldir = os.path.join(self._outputdir, 'htmlcov')
3145 htmldir = os.path.join(self._outputdir, 'htmlcov')
3141 cov.html_report(directory=htmldir, omit=omit)
3146 cov.html_report(directory=htmldir, omit=omit)
3142 if self.options.annotate:
3147 if self.options.annotate:
3143 adir = os.path.join(self._outputdir, 'annotated')
3148 adir = os.path.join(self._outputdir, 'annotated')
3144 if not os.path.isdir(adir):
3149 if not os.path.isdir(adir):
3145 os.mkdir(adir)
3150 os.mkdir(adir)
3146 cov.annotate(directory=adir, omit=omit)
3151 cov.annotate(directory=adir, omit=omit)
3147
3152
3148 def _findprogram(self, program):
3153 def _findprogram(self, program):
3149 """Search PATH for a executable program"""
3154 """Search PATH for a executable program"""
3150 dpb = _bytespath(os.defpath)
3155 dpb = _bytespath(os.defpath)
3151 sepb = _bytespath(os.pathsep)
3156 sepb = _bytespath(os.pathsep)
3152 for p in osenvironb.get(b'PATH', dpb).split(sepb):
3157 for p in osenvironb.get(b'PATH', dpb).split(sepb):
3153 name = os.path.join(p, program)
3158 name = os.path.join(p, program)
3154 if os.name == 'nt' or os.access(name, os.X_OK):
3159 if os.name == 'nt' or os.access(name, os.X_OK):
3155 return name
3160 return name
3156 return None
3161 return None
3157
3162
3158 def _checktools(self):
3163 def _checktools(self):
3159 """Ensure tools required to run tests are present."""
3164 """Ensure tools required to run tests are present."""
3160 for p in self.REQUIREDTOOLS:
3165 for p in self.REQUIREDTOOLS:
3161 if os.name == 'nt' and not p.endswith(b'.exe'):
3166 if os.name == 'nt' and not p.endswith(b'.exe'):
3162 p += b'.exe'
3167 p += b'.exe'
3163 found = self._findprogram(p)
3168 found = self._findprogram(p)
3164 if found:
3169 if found:
3165 vlog("# Found prerequisite", p, "at", found)
3170 vlog("# Found prerequisite", p, "at", found)
3166 else:
3171 else:
3167 print("WARNING: Did not find prerequisite tool: %s " %
3172 print("WARNING: Did not find prerequisite tool: %s " %
3168 p.decode("utf-8"))
3173 p.decode("utf-8"))
3169
3174
3170 def aggregateexceptions(path):
3175 def aggregateexceptions(path):
3171 exceptioncounts = collections.Counter()
3176 exceptioncounts = collections.Counter()
3172 testsbyfailure = collections.defaultdict(set)
3177 testsbyfailure = collections.defaultdict(set)
3173 failuresbytest = collections.defaultdict(set)
3178 failuresbytest = collections.defaultdict(set)
3174
3179
3175 for f in os.listdir(path):
3180 for f in os.listdir(path):
3176 with open(os.path.join(path, f), 'rb') as fh:
3181 with open(os.path.join(path, f), 'rb') as fh:
3177 data = fh.read().split(b'\0')
3182 data = fh.read().split(b'\0')
3178 if len(data) != 5:
3183 if len(data) != 5:
3179 continue
3184 continue
3180
3185
3181 exc, mainframe, hgframe, hgline, testname = data
3186 exc, mainframe, hgframe, hgline, testname = data
3182 exc = exc.decode('utf-8')
3187 exc = exc.decode('utf-8')
3183 mainframe = mainframe.decode('utf-8')
3188 mainframe = mainframe.decode('utf-8')
3184 hgframe = hgframe.decode('utf-8')
3189 hgframe = hgframe.decode('utf-8')
3185 hgline = hgline.decode('utf-8')
3190 hgline = hgline.decode('utf-8')
3186 testname = testname.decode('utf-8')
3191 testname = testname.decode('utf-8')
3187
3192
3188 key = (hgframe, hgline, exc)
3193 key = (hgframe, hgline, exc)
3189 exceptioncounts[key] += 1
3194 exceptioncounts[key] += 1
3190 testsbyfailure[key].add(testname)
3195 testsbyfailure[key].add(testname)
3191 failuresbytest[testname].add(key)
3196 failuresbytest[testname].add(key)
3192
3197
3193 # Find test having fewest failures for each failure.
3198 # Find test having fewest failures for each failure.
3194 leastfailing = {}
3199 leastfailing = {}
3195 for key, tests in testsbyfailure.items():
3200 for key, tests in testsbyfailure.items():
3196 fewesttest = None
3201 fewesttest = None
3197 fewestcount = 99999999
3202 fewestcount = 99999999
3198 for test in sorted(tests):
3203 for test in sorted(tests):
3199 if len(failuresbytest[test]) < fewestcount:
3204 if len(failuresbytest[test]) < fewestcount:
3200 fewesttest = test
3205 fewesttest = test
3201 fewestcount = len(failuresbytest[test])
3206 fewestcount = len(failuresbytest[test])
3202
3207
3203 leastfailing[key] = (fewestcount, fewesttest)
3208 leastfailing[key] = (fewestcount, fewesttest)
3204
3209
3205 # Create a combined counter so we can sort by total occurrences and
3210 # Create a combined counter so we can sort by total occurrences and
3206 # impacted tests.
3211 # impacted tests.
3207 combined = {}
3212 combined = {}
3208 for key in exceptioncounts:
3213 for key in exceptioncounts:
3209 combined[key] = (exceptioncounts[key],
3214 combined[key] = (exceptioncounts[key],
3210 len(testsbyfailure[key]),
3215 len(testsbyfailure[key]),
3211 leastfailing[key][0],
3216 leastfailing[key][0],
3212 leastfailing[key][1])
3217 leastfailing[key][1])
3213
3218
3214 return {
3219 return {
3215 'exceptioncounts': exceptioncounts,
3220 'exceptioncounts': exceptioncounts,
3216 'total': sum(exceptioncounts.values()),
3221 'total': sum(exceptioncounts.values()),
3217 'combined': combined,
3222 'combined': combined,
3218 'leastfailing': leastfailing,
3223 'leastfailing': leastfailing,
3219 'byfailure': testsbyfailure,
3224 'byfailure': testsbyfailure,
3220 'bytest': failuresbytest,
3225 'bytest': failuresbytest,
3221 }
3226 }
3222
3227
3223 if __name__ == '__main__':
3228 if __name__ == '__main__':
3224 runner = TestRunner()
3229 runner = TestRunner()
3225
3230
3226 try:
3231 try:
3227 import msvcrt
3232 import msvcrt
3228 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
3233 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
3229 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
3234 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
3230 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
3235 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
3231 except ImportError:
3236 except ImportError:
3232 pass
3237 pass
3233
3238
3234 sys.exit(runner.run(sys.argv[1:]))
3239 sys.exit(runner.run(sys.argv[1:]))
@@ -1,1857 +1,1928 b''
1 This file tests the behavior of run-tests.py itself.
1 This file tests the behavior of run-tests.py itself.
2
2
3 Avoid interference from actual test env:
3 Avoid interference from actual test env:
4
4
5 $ . "$TESTDIR/helper-runtests.sh"
5 $ . "$TESTDIR/helper-runtests.sh"
6
6
7 Smoke test with install
7 Smoke test with install
8 ============
8 ============
9 $ "$PYTHON" $TESTDIR/run-tests.py $HGTEST_RUN_TESTS_PURE -l
9 $ "$PYTHON" $TESTDIR/run-tests.py $HGTEST_RUN_TESTS_PURE -l
10 running 0 tests using 0 parallel processes
10
11
11 # Ran 0 tests, 0 skipped, 0 failed.
12 # Ran 0 tests, 0 skipped, 0 failed.
12
13
13 Define a helper to avoid the install step
14 Define a helper to avoid the install step
14 =============
15 =============
15 $ rt()
16 $ rt()
16 > {
17 > {
17 > "$PYTHON" $TESTDIR/run-tests.py --with-hg=`which hg` "$@"
18 > "$PYTHON" $TESTDIR/run-tests.py --with-hg=`which hg` "$@"
18 > }
19 > }
19
20
20 error paths
21 error paths
21
22
22 #if symlink
23 #if symlink
23 $ ln -s `which true` hg
24 $ ln -s `which true` hg
24 $ "$PYTHON" $TESTDIR/run-tests.py --with-hg=./hg
25 $ "$PYTHON" $TESTDIR/run-tests.py --with-hg=./hg
25 warning: --with-hg should specify an hg script
26 warning: --with-hg should specify an hg script
27 running 0 tests using 0 parallel processes
26
28
27 # Ran 0 tests, 0 skipped, 0 failed.
29 # Ran 0 tests, 0 skipped, 0 failed.
28 $ rm hg
30 $ rm hg
29 #endif
31 #endif
30
32
31 #if execbit
33 #if execbit
32 $ touch hg
34 $ touch hg
33 $ "$PYTHON" $TESTDIR/run-tests.py --with-hg=./hg
35 $ "$PYTHON" $TESTDIR/run-tests.py --with-hg=./hg
34 usage: run-tests.py [options] [tests]
36 usage: run-tests.py [options] [tests]
35 run-tests.py: error: --with-hg must specify an executable hg script
37 run-tests.py: error: --with-hg must specify an executable hg script
36 [2]
38 [2]
37 $ rm hg
39 $ rm hg
38 #endif
40 #endif
39
41
40 Features for testing optional lines
42 Features for testing optional lines
41 ===================================
43 ===================================
42
44
43 $ cat > hghaveaddon.py <<EOF
45 $ cat > hghaveaddon.py <<EOF
44 > import hghave
46 > import hghave
45 > @hghave.check("custom", "custom hghave feature")
47 > @hghave.check("custom", "custom hghave feature")
46 > def has_custom():
48 > def has_custom():
47 > return True
49 > return True
48 > @hghave.check("missing", "missing hghave feature")
50 > @hghave.check("missing", "missing hghave feature")
49 > def has_missing():
51 > def has_missing():
50 > return False
52 > return False
51 > EOF
53 > EOF
52
54
53 an empty test
55 an empty test
54 =======================
56 =======================
55
57
56 $ touch test-empty.t
58 $ touch test-empty.t
57 $ rt
59 $ rt
60 running 1 tests using 1 parallel processes
58 .
61 .
59 # Ran 1 tests, 0 skipped, 0 failed.
62 # Ran 1 tests, 0 skipped, 0 failed.
60 $ rm test-empty.t
63 $ rm test-empty.t
61
64
62 a succesful test
65 a succesful test
63 =======================
66 =======================
64
67
65 $ cat > test-success.t << EOF
68 $ cat > test-success.t << EOF
66 > $ echo babar
69 > $ echo babar
67 > babar
70 > babar
68 > $ echo xyzzy
71 > $ echo xyzzy
69 > dont_print (?)
72 > dont_print (?)
70 > nothing[42]line (re) (?)
73 > nothing[42]line (re) (?)
71 > never*happens (glob) (?)
74 > never*happens (glob) (?)
72 > more_nothing (?)
75 > more_nothing (?)
73 > xyzzy
76 > xyzzy
74 > nor this (?)
77 > nor this (?)
75 > $ printf 'abc\ndef\nxyz\n'
78 > $ printf 'abc\ndef\nxyz\n'
76 > 123 (?)
79 > 123 (?)
77 > abc
80 > abc
78 > def (?)
81 > def (?)
79 > 456 (?)
82 > 456 (?)
80 > xyz
83 > xyz
81 > $ printf 'zyx\nwvu\ntsr\n'
84 > $ printf 'zyx\nwvu\ntsr\n'
82 > abc (?)
85 > abc (?)
83 > zyx (custom !)
86 > zyx (custom !)
84 > wvu
87 > wvu
85 > no_print (no-custom !)
88 > no_print (no-custom !)
86 > tsr (no-missing !)
89 > tsr (no-missing !)
87 > missing (missing !)
90 > missing (missing !)
88 > EOF
91 > EOF
89
92
90 $ rt
93 $ rt
94 running 1 tests using 1 parallel processes
91 .
95 .
92 # Ran 1 tests, 0 skipped, 0 failed.
96 # Ran 1 tests, 0 skipped, 0 failed.
93
97
94 failing test
98 failing test
95 ==================
99 ==================
96
100
97 test churn with globs
101 test churn with globs
98 $ cat > test-failure.t <<EOF
102 $ cat > test-failure.t <<EOF
99 > $ echo "bar-baz"; echo "bar-bad"; echo foo
103 > $ echo "bar-baz"; echo "bar-bad"; echo foo
100 > bar*bad (glob)
104 > bar*bad (glob)
101 > bar*baz (glob)
105 > bar*baz (glob)
102 > | fo (re)
106 > | fo (re)
103 > EOF
107 > EOF
104 $ rt test-failure.t
108 $ rt test-failure.t
109 running 1 tests using 1 parallel processes
105
110
106 --- $TESTTMP/test-failure.t
111 --- $TESTTMP/test-failure.t
107 +++ $TESTTMP/test-failure.t.err
112 +++ $TESTTMP/test-failure.t.err
108 @@ -1,4 +1,4 @@
113 @@ -1,4 +1,4 @@
109 $ echo "bar-baz"; echo "bar-bad"; echo foo
114 $ echo "bar-baz"; echo "bar-bad"; echo foo
110 + bar*baz (glob)
115 + bar*baz (glob)
111 bar*bad (glob)
116 bar*bad (glob)
112 - bar*baz (glob)
117 - bar*baz (glob)
113 - | fo (re)
118 - | fo (re)
114 + foo
119 + foo
115
120
116 ERROR: test-failure.t output changed
121 ERROR: test-failure.t output changed
117 !
122 !
118 Failed test-failure.t: output changed
123 Failed test-failure.t: output changed
119 # Ran 1 tests, 0 skipped, 1 failed.
124 # Ran 1 tests, 0 skipped, 1 failed.
120 python hash seed: * (glob)
125 python hash seed: * (glob)
121 [1]
126 [1]
122
127
123 test how multiple globs gets matched with lines in output
128 test how multiple globs gets matched with lines in output
124 $ cat > test-failure-globs.t <<EOF
129 $ cat > test-failure-globs.t <<EOF
125 > $ echo "context"; echo "context"; \
130 > $ echo "context"; echo "context"; \
126 > echo "key: 1"; echo "value: not a"; \
131 > echo "key: 1"; echo "value: not a"; \
127 > echo "key: 2"; echo "value: not b"; \
132 > echo "key: 2"; echo "value: not b"; \
128 > echo "key: 3"; echo "value: c"; \
133 > echo "key: 3"; echo "value: c"; \
129 > echo "key: 4"; echo "value: d"
134 > echo "key: 4"; echo "value: d"
130 > context
135 > context
131 > context
136 > context
132 > key: 1
137 > key: 1
133 > value: a
138 > value: a
134 > key: 2
139 > key: 2
135 > value: b
140 > value: b
136 > key: 3
141 > key: 3
137 > value: * (glob)
142 > value: * (glob)
138 > key: 4
143 > key: 4
139 > value: * (glob)
144 > value: * (glob)
140 > EOF
145 > EOF
141 $ rt test-failure-globs.t
146 $ rt test-failure-globs.t
147 running 1 tests using 1 parallel processes
142
148
143 --- $TESTTMP/test-failure-globs.t
149 --- $TESTTMP/test-failure-globs.t
144 +++ $TESTTMP/test-failure-globs.t.err
150 +++ $TESTTMP/test-failure-globs.t.err
145 @@ -2,9 +2,9 @@
151 @@ -2,9 +2,9 @@
146 context
152 context
147 context
153 context
148 key: 1
154 key: 1
149 - value: a
155 - value: a
150 + value: not a
156 + value: not a
151 key: 2
157 key: 2
152 - value: b
158 - value: b
153 + value: not b
159 + value: not b
154 key: 3
160 key: 3
155 value: * (glob)
161 value: * (glob)
156 key: 4
162 key: 4
157
163
158 ERROR: test-failure-globs.t output changed
164 ERROR: test-failure-globs.t output changed
159 !
165 !
160 Failed test-failure-globs.t: output changed
166 Failed test-failure-globs.t: output changed
161 # Ran 1 tests, 0 skipped, 1 failed.
167 # Ran 1 tests, 0 skipped, 1 failed.
162 python hash seed: * (glob)
168 python hash seed: * (glob)
163 [1]
169 [1]
164 $ rm test-failure-globs.t
170 $ rm test-failure-globs.t
165
171
166 test diff colorisation
172 test diff colorisation
167
173
168 #if no-windows pygments
174 #if no-windows pygments
169 $ rt test-failure.t --color always
175 $ rt test-failure.t --color always
170
176
171 \x1b[38;5;124m--- $TESTTMP/test-failure.t\x1b[39m (esc)
177 \x1b[38;5;124m--- $TESTTMP/test-failure.t\x1b[39m (esc)
172 \x1b[38;5;34m+++ $TESTTMP/test-failure.t.err\x1b[39m (esc)
178 \x1b[38;5;34m+++ $TESTTMP/test-failure.t.err\x1b[39m (esc)
173 \x1b[38;5;90;01m@@ -1,4 +1,4 @@\x1b[39;00m (esc)
179 \x1b[38;5;90;01m@@ -1,4 +1,4 @@\x1b[39;00m (esc)
174 $ echo "bar-baz"; echo "bar-bad"; echo foo
180 $ echo "bar-baz"; echo "bar-bad"; echo foo
175 \x1b[38;5;34m+ bar*baz (glob)\x1b[39m (esc)
181 \x1b[38;5;34m+ bar*baz (glob)\x1b[39m (esc)
176 bar*bad (glob)
182 bar*bad (glob)
177 \x1b[38;5;124m- bar*baz (glob)\x1b[39m (esc)
183 \x1b[38;5;124m- bar*baz (glob)\x1b[39m (esc)
178 \x1b[38;5;124m- | fo (re)\x1b[39m (esc)
184 \x1b[38;5;124m- | fo (re)\x1b[39m (esc)
179 \x1b[38;5;34m+ foo\x1b[39m (esc)
185 \x1b[38;5;34m+ foo\x1b[39m (esc)
180
186
181 \x1b[38;5;88mERROR: \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m output changed\x1b[39m (esc)
187 \x1b[38;5;88mERROR: \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m output changed\x1b[39m (esc)
182 !
188 !
183 \x1b[38;5;88mFailed \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m: output changed\x1b[39m (esc)
189 \x1b[38;5;88mFailed \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m: output changed\x1b[39m (esc)
184 # Ran 1 tests, 0 skipped, 1 failed.
190 # Ran 1 tests, 0 skipped, 1 failed.
185 python hash seed: * (glob)
191 python hash seed: * (glob)
186 [1]
192 [1]
187
193
188 $ rt test-failure.t 2> tmp.log
194 $ rt test-failure.t 2> tmp.log
189 [1]
195 [1]
190 $ cat tmp.log
196 $ cat tmp.log
191
197
192 --- $TESTTMP/test-failure.t
198 --- $TESTTMP/test-failure.t
193 +++ $TESTTMP/test-failure.t.err
199 +++ $TESTTMP/test-failure.t.err
194 @@ -1,4 +1,4 @@
200 @@ -1,4 +1,4 @@
195 $ echo "bar-baz"; echo "bar-bad"; echo foo
201 $ echo "bar-baz"; echo "bar-bad"; echo foo
196 + bar*baz (glob)
202 + bar*baz (glob)
197 bar*bad (glob)
203 bar*bad (glob)
198 - bar*baz (glob)
204 - bar*baz (glob)
199 - | fo (re)
205 - | fo (re)
200 + foo
206 + foo
201
207
202 ERROR: test-failure.t output changed
208 ERROR: test-failure.t output changed
203 !
209 !
204 Failed test-failure.t: output changed
210 Failed test-failure.t: output changed
205 # Ran 1 tests, 0 skipped, 1 failed.
211 # Ran 1 tests, 0 skipped, 1 failed.
206 python hash seed: * (glob)
212 python hash seed: * (glob)
207 #endif
213 #endif
208
214
209 $ cat > test-failure.t << EOF
215 $ cat > test-failure.t << EOF
210 > $ true
216 > $ true
211 > should go away (true !)
217 > should go away (true !)
212 > $ true
218 > $ true
213 > should stay (false !)
219 > should stay (false !)
214 >
220 >
215 > Should remove first line, not second or third
221 > Should remove first line, not second or third
216 > $ echo 'testing'
222 > $ echo 'testing'
217 > baz*foo (glob) (true !)
223 > baz*foo (glob) (true !)
218 > foobar*foo (glob) (false !)
224 > foobar*foo (glob) (false !)
219 > te*ting (glob) (true !)
225 > te*ting (glob) (true !)
220 >
226 >
221 > Should keep first two lines, remove third and last
227 > Should keep first two lines, remove third and last
222 > $ echo 'testing'
228 > $ echo 'testing'
223 > test.ng (re) (true !)
229 > test.ng (re) (true !)
224 > foo.ar (re) (false !)
230 > foo.ar (re) (false !)
225 > b.r (re) (true !)
231 > b.r (re) (true !)
226 > missing (?)
232 > missing (?)
227 > awol (true !)
233 > awol (true !)
228 >
234 >
229 > The "missing" line should stay, even though awol is dropped
235 > The "missing" line should stay, even though awol is dropped
230 > $ echo 'testing'
236 > $ echo 'testing'
231 > test.ng (re) (true !)
237 > test.ng (re) (true !)
232 > foo.ar (?)
238 > foo.ar (?)
233 > awol
239 > awol
234 > missing (?)
240 > missing (?)
235 > EOF
241 > EOF
236 $ rt test-failure.t
242 $ rt test-failure.t
243 running 1 tests using 1 parallel processes
237
244
238 --- $TESTTMP/test-failure.t
245 --- $TESTTMP/test-failure.t
239 +++ $TESTTMP/test-failure.t.err
246 +++ $TESTTMP/test-failure.t.err
240 @@ -1,11 +1,9 @@
247 @@ -1,11 +1,9 @@
241 $ true
248 $ true
242 - should go away (true !)
249 - should go away (true !)
243 $ true
250 $ true
244 should stay (false !)
251 should stay (false !)
245
252
246 Should remove first line, not second or third
253 Should remove first line, not second or third
247 $ echo 'testing'
254 $ echo 'testing'
248 - baz*foo (glob) (true !)
255 - baz*foo (glob) (true !)
249 foobar*foo (glob) (false !)
256 foobar*foo (glob) (false !)
250 te*ting (glob) (true !)
257 te*ting (glob) (true !)
251
258
252 foo.ar (re) (false !)
259 foo.ar (re) (false !)
253 missing (?)
260 missing (?)
254 @@ -13,13 +11,10 @@
261 @@ -13,13 +11,10 @@
255 $ echo 'testing'
262 $ echo 'testing'
256 test.ng (re) (true !)
263 test.ng (re) (true !)
257 foo.ar (re) (false !)
264 foo.ar (re) (false !)
258 - b.r (re) (true !)
265 - b.r (re) (true !)
259 missing (?)
266 missing (?)
260 - awol (true !)
267 - awol (true !)
261
268
262 The "missing" line should stay, even though awol is dropped
269 The "missing" line should stay, even though awol is dropped
263 $ echo 'testing'
270 $ echo 'testing'
264 test.ng (re) (true !)
271 test.ng (re) (true !)
265 foo.ar (?)
272 foo.ar (?)
266 - awol
273 - awol
267 missing (?)
274 missing (?)
268
275
269 ERROR: test-failure.t output changed
276 ERROR: test-failure.t output changed
270 !
277 !
271 Failed test-failure.t: output changed
278 Failed test-failure.t: output changed
272 # Ran 1 tests, 0 skipped, 1 failed.
279 # Ran 1 tests, 0 skipped, 1 failed.
273 python hash seed: * (glob)
280 python hash seed: * (glob)
274 [1]
281 [1]
275
282
276 basic failing test
283 basic failing test
277 $ cat > test-failure.t << EOF
284 $ cat > test-failure.t << EOF
278 > $ echo babar
285 > $ echo babar
279 > rataxes
286 > rataxes
280 > This is a noop statement so that
287 > This is a noop statement so that
281 > this test is still more bytes than success.
288 > this test is still more bytes than success.
282 > pad pad pad pad............................................................
289 > pad pad pad pad............................................................
283 > pad pad pad pad............................................................
290 > pad pad pad pad............................................................
284 > pad pad pad pad............................................................
291 > pad pad pad pad............................................................
285 > pad pad pad pad............................................................
292 > pad pad pad pad............................................................
286 > pad pad pad pad............................................................
293 > pad pad pad pad............................................................
287 > pad pad pad pad............................................................
294 > pad pad pad pad............................................................
288 > EOF
295 > EOF
289
296
290 >>> fh = open('test-failure-unicode.t', 'wb')
297 >>> fh = open('test-failure-unicode.t', 'wb')
291 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8')) and None
298 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8')) and None
292 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8')) and None
299 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8')) and None
293
300
294 $ rt
301 $ rt
302 running 3 tests using 1 parallel processes
295
303
296 --- $TESTTMP/test-failure.t
304 --- $TESTTMP/test-failure.t
297 +++ $TESTTMP/test-failure.t.err
305 +++ $TESTTMP/test-failure.t.err
298 @@ -1,5 +1,5 @@
306 @@ -1,5 +1,5 @@
299 $ echo babar
307 $ echo babar
300 - rataxes
308 - rataxes
301 + babar
309 + babar
302 This is a noop statement so that
310 This is a noop statement so that
303 this test is still more bytes than success.
311 this test is still more bytes than success.
304 pad pad pad pad............................................................
312 pad pad pad pad............................................................
305
313
306 ERROR: test-failure.t output changed
314 ERROR: test-failure.t output changed
307 !.
315 !.
308 --- $TESTTMP/test-failure-unicode.t
316 --- $TESTTMP/test-failure-unicode.t
309 +++ $TESTTMP/test-failure-unicode.t.err
317 +++ $TESTTMP/test-failure-unicode.t.err
310 @@ -1,2 +1,2 @@
318 @@ -1,2 +1,2 @@
311 $ echo babar\xce\xb1 (esc)
319 $ echo babar\xce\xb1 (esc)
312 - l\xce\xb5\xce\xb5t (esc)
320 - l\xce\xb5\xce\xb5t (esc)
313 + babar\xce\xb1 (esc)
321 + babar\xce\xb1 (esc)
314
322
315 ERROR: test-failure-unicode.t output changed
323 ERROR: test-failure-unicode.t output changed
316 !
324 !
317 Failed test-failure.t: output changed
325 Failed test-failure.t: output changed
318 Failed test-failure-unicode.t: output changed
326 Failed test-failure-unicode.t: output changed
319 # Ran 3 tests, 0 skipped, 2 failed.
327 # Ran 3 tests, 0 skipped, 2 failed.
320 python hash seed: * (glob)
328 python hash seed: * (glob)
321 [1]
329 [1]
322
330
323 test --outputdir
331 test --outputdir
324 $ mkdir output
332 $ mkdir output
325 $ rt --outputdir output
333 $ rt --outputdir output
334 running 3 tests using 1 parallel processes
326
335
327 --- $TESTTMP/test-failure.t
336 --- $TESTTMP/test-failure.t
328 +++ $TESTTMP/output/test-failure.t.err
337 +++ $TESTTMP/output/test-failure.t.err
329 @@ -1,5 +1,5 @@
338 @@ -1,5 +1,5 @@
330 $ echo babar
339 $ echo babar
331 - rataxes
340 - rataxes
332 + babar
341 + babar
333 This is a noop statement so that
342 This is a noop statement so that
334 this test is still more bytes than success.
343 this test is still more bytes than success.
335 pad pad pad pad............................................................
344 pad pad pad pad............................................................
336
345
337 ERROR: test-failure.t output changed
346 ERROR: test-failure.t output changed
338 !.
347 !.
339 --- $TESTTMP/test-failure-unicode.t
348 --- $TESTTMP/test-failure-unicode.t
340 +++ $TESTTMP/output/test-failure-unicode.t.err
349 +++ $TESTTMP/output/test-failure-unicode.t.err
341 @@ -1,2 +1,2 @@
350 @@ -1,2 +1,2 @@
342 $ echo babar\xce\xb1 (esc)
351 $ echo babar\xce\xb1 (esc)
343 - l\xce\xb5\xce\xb5t (esc)
352 - l\xce\xb5\xce\xb5t (esc)
344 + babar\xce\xb1 (esc)
353 + babar\xce\xb1 (esc)
345
354
346 ERROR: test-failure-unicode.t output changed
355 ERROR: test-failure-unicode.t output changed
347 !
356 !
348 Failed test-failure.t: output changed
357 Failed test-failure.t: output changed
349 Failed test-failure-unicode.t: output changed
358 Failed test-failure-unicode.t: output changed
350 # Ran 3 tests, 0 skipped, 2 failed.
359 # Ran 3 tests, 0 skipped, 2 failed.
351 python hash seed: * (glob)
360 python hash seed: * (glob)
352 [1]
361 [1]
353 $ ls -a output
362 $ ls -a output
354 .
363 .
355 ..
364 ..
356 .testtimes
365 .testtimes
357 test-failure-unicode.t.err
366 test-failure-unicode.t.err
358 test-failure.t.err
367 test-failure.t.err
359
368
360 test --xunit support
369 test --xunit support
361 $ rt --xunit=xunit.xml
370 $ rt --xunit=xunit.xml
371 running 3 tests using 1 parallel processes
362
372
363 --- $TESTTMP/test-failure.t
373 --- $TESTTMP/test-failure.t
364 +++ $TESTTMP/test-failure.t.err
374 +++ $TESTTMP/test-failure.t.err
365 @@ -1,5 +1,5 @@
375 @@ -1,5 +1,5 @@
366 $ echo babar
376 $ echo babar
367 - rataxes
377 - rataxes
368 + babar
378 + babar
369 This is a noop statement so that
379 This is a noop statement so that
370 this test is still more bytes than success.
380 this test is still more bytes than success.
371 pad pad pad pad............................................................
381 pad pad pad pad............................................................
372
382
373 ERROR: test-failure.t output changed
383 ERROR: test-failure.t output changed
374 !.
384 !.
375 --- $TESTTMP/test-failure-unicode.t
385 --- $TESTTMP/test-failure-unicode.t
376 +++ $TESTTMP/test-failure-unicode.t.err
386 +++ $TESTTMP/test-failure-unicode.t.err
377 @@ -1,2 +1,2 @@
387 @@ -1,2 +1,2 @@
378 $ echo babar\xce\xb1 (esc)
388 $ echo babar\xce\xb1 (esc)
379 - l\xce\xb5\xce\xb5t (esc)
389 - l\xce\xb5\xce\xb5t (esc)
380 + babar\xce\xb1 (esc)
390 + babar\xce\xb1 (esc)
381
391
382 ERROR: test-failure-unicode.t output changed
392 ERROR: test-failure-unicode.t output changed
383 !
393 !
384 Failed test-failure.t: output changed
394 Failed test-failure.t: output changed
385 Failed test-failure-unicode.t: output changed
395 Failed test-failure-unicode.t: output changed
386 # Ran 3 tests, 0 skipped, 2 failed.
396 # Ran 3 tests, 0 skipped, 2 failed.
387 python hash seed: * (glob)
397 python hash seed: * (glob)
388 [1]
398 [1]
389 $ cat xunit.xml
399 $ cat xunit.xml
390 <?xml version="1.0" encoding="utf-8"?>
400 <?xml version="1.0" encoding="utf-8"?>
391 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
401 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
392 <testcase name="test-success.t" time="*"/> (glob)
402 <testcase name="test-success.t" time="*"/> (glob)
393 <testcase name="test-failure-unicode.t" time="*"> (glob)
403 <testcase name="test-failure-unicode.t" time="*"> (glob)
394 <failure message="output changed" type="output-mismatch">
404 <failure message="output changed" type="output-mismatch">
395 <![CDATA[--- $TESTTMP/test-failure-unicode.t
405 <![CDATA[--- $TESTTMP/test-failure-unicode.t
396 +++ $TESTTMP/test-failure-unicode.t.err
406 +++ $TESTTMP/test-failure-unicode.t.err
397 @@ -1,2 +1,2 @@
407 @@ -1,2 +1,2 @@
398 $ echo babar\xce\xb1 (esc)
408 $ echo babar\xce\xb1 (esc)
399 - l\xce\xb5\xce\xb5t (esc)
409 - l\xce\xb5\xce\xb5t (esc)
400 + babar\xce\xb1 (esc)
410 + babar\xce\xb1 (esc)
401 ]]> </failure>
411 ]]> </failure>
402 </testcase>
412 </testcase>
403 <testcase name="test-failure.t" time="*"> (glob)
413 <testcase name="test-failure.t" time="*"> (glob)
404 <failure message="output changed" type="output-mismatch">
414 <failure message="output changed" type="output-mismatch">
405 <![CDATA[--- $TESTTMP/test-failure.t
415 <![CDATA[--- $TESTTMP/test-failure.t
406 +++ $TESTTMP/test-failure.t.err
416 +++ $TESTTMP/test-failure.t.err
407 @@ -1,5 +1,5 @@
417 @@ -1,5 +1,5 @@
408 $ echo babar
418 $ echo babar
409 - rataxes
419 - rataxes
410 + babar
420 + babar
411 This is a noop statement so that
421 This is a noop statement so that
412 this test is still more bytes than success.
422 this test is still more bytes than success.
413 pad pad pad pad............................................................
423 pad pad pad pad............................................................
414 ]]> </failure>
424 ]]> </failure>
415 </testcase>
425 </testcase>
416 </testsuite>
426 </testsuite>
417
427
418 $ cat .testtimes
428 $ cat .testtimes
419 test-empty.t * (glob)
429 test-empty.t * (glob)
420 test-failure-globs.t * (glob)
430 test-failure-globs.t * (glob)
421 test-failure-unicode.t * (glob)
431 test-failure-unicode.t * (glob)
422 test-failure.t * (glob)
432 test-failure.t * (glob)
423 test-success.t * (glob)
433 test-success.t * (glob)
424
434
425 $ rt --list-tests
435 $ rt --list-tests
426 test-failure-unicode.t
436 test-failure-unicode.t
427 test-failure.t
437 test-failure.t
428 test-success.t
438 test-success.t
429
439
430 $ rt --list-tests --json
440 $ rt --list-tests --json
431 test-failure-unicode.t
441 test-failure-unicode.t
432 test-failure.t
442 test-failure.t
433 test-success.t
443 test-success.t
434 $ cat report.json
444 $ cat report.json
435 testreport ={
445 testreport ={
436 "test-failure-unicode.t": {
446 "test-failure-unicode.t": {
437 "result": "success"
447 "result": "success"
438 },
448 },
439 "test-failure.t": {
449 "test-failure.t": {
440 "result": "success"
450 "result": "success"
441 },
451 },
442 "test-success.t": {
452 "test-success.t": {
443 "result": "success"
453 "result": "success"
444 }
454 }
445 } (no-eol)
455 } (no-eol)
446
456
447 $ rt --list-tests --xunit=xunit.xml
457 $ rt --list-tests --xunit=xunit.xml
448 test-failure-unicode.t
458 test-failure-unicode.t
449 test-failure.t
459 test-failure.t
450 test-success.t
460 test-success.t
451 $ cat xunit.xml
461 $ cat xunit.xml
452 <?xml version="1.0" encoding="utf-8"?>
462 <?xml version="1.0" encoding="utf-8"?>
453 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
463 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
454 <testcase name="test-failure-unicode.t"/>
464 <testcase name="test-failure-unicode.t"/>
455 <testcase name="test-failure.t"/>
465 <testcase name="test-failure.t"/>
456 <testcase name="test-success.t"/>
466 <testcase name="test-success.t"/>
457 </testsuite>
467 </testsuite>
458
468
459 $ rt --list-tests test-failure* --json --xunit=xunit.xml --outputdir output
469 $ rt --list-tests test-failure* --json --xunit=xunit.xml --outputdir output
460 test-failure-unicode.t
470 test-failure-unicode.t
461 test-failure.t
471 test-failure.t
462 $ cat output/report.json
472 $ cat output/report.json
463 testreport ={
473 testreport ={
464 "test-failure-unicode.t": {
474 "test-failure-unicode.t": {
465 "result": "success"
475 "result": "success"
466 },
476 },
467 "test-failure.t": {
477 "test-failure.t": {
468 "result": "success"
478 "result": "success"
469 }
479 }
470 } (no-eol)
480 } (no-eol)
471 $ cat xunit.xml
481 $ cat xunit.xml
472 <?xml version="1.0" encoding="utf-8"?>
482 <?xml version="1.0" encoding="utf-8"?>
473 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
483 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
474 <testcase name="test-failure-unicode.t"/>
484 <testcase name="test-failure-unicode.t"/>
475 <testcase name="test-failure.t"/>
485 <testcase name="test-failure.t"/>
476 </testsuite>
486 </testsuite>
477
487
478 $ rm test-failure-unicode.t
488 $ rm test-failure-unicode.t
479
489
480 test for --retest
490 test for --retest
481 ====================
491 ====================
482
492
483 $ rt --retest
493 $ rt --retest
494 running 2 tests using 1 parallel processes
484
495
485 --- $TESTTMP/test-failure.t
496 --- $TESTTMP/test-failure.t
486 +++ $TESTTMP/test-failure.t.err
497 +++ $TESTTMP/test-failure.t.err
487 @@ -1,5 +1,5 @@
498 @@ -1,5 +1,5 @@
488 $ echo babar
499 $ echo babar
489 - rataxes
500 - rataxes
490 + babar
501 + babar
491 This is a noop statement so that
502 This is a noop statement so that
492 this test is still more bytes than success.
503 this test is still more bytes than success.
493 pad pad pad pad............................................................
504 pad pad pad pad............................................................
494
505
495 ERROR: test-failure.t output changed
506 ERROR: test-failure.t output changed
496 !
507 !
497 Failed test-failure.t: output changed
508 Failed test-failure.t: output changed
498 # Ran 2 tests, 1 skipped, 1 failed.
509 # Ran 2 tests, 1 skipped, 1 failed.
499 python hash seed: * (glob)
510 python hash seed: * (glob)
500 [1]
511 [1]
501
512
502 --retest works with --outputdir
513 --retest works with --outputdir
503 $ rm -r output
514 $ rm -r output
504 $ mkdir output
515 $ mkdir output
505 $ mv test-failure.t.err output
516 $ mv test-failure.t.err output
506 $ rt --retest --outputdir output
517 $ rt --retest --outputdir output
518 running 2 tests using 1 parallel processes
507
519
508 --- $TESTTMP/test-failure.t
520 --- $TESTTMP/test-failure.t
509 +++ $TESTTMP/output/test-failure.t.err
521 +++ $TESTTMP/output/test-failure.t.err
510 @@ -1,5 +1,5 @@
522 @@ -1,5 +1,5 @@
511 $ echo babar
523 $ echo babar
512 - rataxes
524 - rataxes
513 + babar
525 + babar
514 This is a noop statement so that
526 This is a noop statement so that
515 this test is still more bytes than success.
527 this test is still more bytes than success.
516 pad pad pad pad............................................................
528 pad pad pad pad............................................................
517
529
518 ERROR: test-failure.t output changed
530 ERROR: test-failure.t output changed
519 !
531 !
520 Failed test-failure.t: output changed
532 Failed test-failure.t: output changed
521 # Ran 2 tests, 1 skipped, 1 failed.
533 # Ran 2 tests, 1 skipped, 1 failed.
522 python hash seed: * (glob)
534 python hash seed: * (glob)
523 [1]
535 [1]
524
536
525 Selecting Tests To Run
537 Selecting Tests To Run
526 ======================
538 ======================
527
539
528 successful
540 successful
529
541
530 $ rt test-success.t
542 $ rt test-success.t
543 running 1 tests using 1 parallel processes
531 .
544 .
532 # Ran 1 tests, 0 skipped, 0 failed.
545 # Ran 1 tests, 0 skipped, 0 failed.
533
546
534 success w/ keyword
547 success w/ keyword
535 $ rt -k xyzzy
548 $ rt -k xyzzy
549 running 2 tests using 1 parallel processes
536 .
550 .
537 # Ran 2 tests, 1 skipped, 0 failed.
551 # Ran 2 tests, 1 skipped, 0 failed.
538
552
539 failed
553 failed
540
554
541 $ rt test-failure.t
555 $ rt test-failure.t
556 running 1 tests using 1 parallel processes
542
557
543 --- $TESTTMP/test-failure.t
558 --- $TESTTMP/test-failure.t
544 +++ $TESTTMP/test-failure.t.err
559 +++ $TESTTMP/test-failure.t.err
545 @@ -1,5 +1,5 @@
560 @@ -1,5 +1,5 @@
546 $ echo babar
561 $ echo babar
547 - rataxes
562 - rataxes
548 + babar
563 + babar
549 This is a noop statement so that
564 This is a noop statement so that
550 this test is still more bytes than success.
565 this test is still more bytes than success.
551 pad pad pad pad............................................................
566 pad pad pad pad............................................................
552
567
553 ERROR: test-failure.t output changed
568 ERROR: test-failure.t output changed
554 !
569 !
555 Failed test-failure.t: output changed
570 Failed test-failure.t: output changed
556 # Ran 1 tests, 0 skipped, 1 failed.
571 # Ran 1 tests, 0 skipped, 1 failed.
557 python hash seed: * (glob)
572 python hash seed: * (glob)
558 [1]
573 [1]
559
574
560 failure w/ keyword
575 failure w/ keyword
561 $ rt -k rataxes
576 $ rt -k rataxes
577 running 2 tests using 1 parallel processes
562
578
563 --- $TESTTMP/test-failure.t
579 --- $TESTTMP/test-failure.t
564 +++ $TESTTMP/test-failure.t.err
580 +++ $TESTTMP/test-failure.t.err
565 @@ -1,5 +1,5 @@
581 @@ -1,5 +1,5 @@
566 $ echo babar
582 $ echo babar
567 - rataxes
583 - rataxes
568 + babar
584 + babar
569 This is a noop statement so that
585 This is a noop statement so that
570 this test is still more bytes than success.
586 this test is still more bytes than success.
571 pad pad pad pad............................................................
587 pad pad pad pad............................................................
572
588
573 ERROR: test-failure.t output changed
589 ERROR: test-failure.t output changed
574 !
590 !
575 Failed test-failure.t: output changed
591 Failed test-failure.t: output changed
576 # Ran 2 tests, 1 skipped, 1 failed.
592 # Ran 2 tests, 1 skipped, 1 failed.
577 python hash seed: * (glob)
593 python hash seed: * (glob)
578 [1]
594 [1]
579
595
580 Verify that when a process fails to start we show a useful message
596 Verify that when a process fails to start we show a useful message
581 ==================================================================
597 ==================================================================
582
598
583 $ cat > test-serve-fail.t <<EOF
599 $ cat > test-serve-fail.t <<EOF
584 > $ echo 'abort: child process failed to start blah'
600 > $ echo 'abort: child process failed to start blah'
585 > EOF
601 > EOF
586 $ rt test-serve-fail.t
602 $ rt test-serve-fail.t
603 running 1 tests using 1 parallel processes
587
604
588 --- $TESTTMP/test-serve-fail.t
605 --- $TESTTMP/test-serve-fail.t
589 +++ $TESTTMP/test-serve-fail.t.err
606 +++ $TESTTMP/test-serve-fail.t.err
590 @@ -1* +1,2 @@ (glob)
607 @@ -1* +1,2 @@ (glob)
591 $ echo 'abort: child process failed to start blah'
608 $ echo 'abort: child process failed to start blah'
592 + abort: child process failed to start blah
609 + abort: child process failed to start blah
593
610
594 ERROR: test-serve-fail.t output changed
611 ERROR: test-serve-fail.t output changed
595 !
612 !
596 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
613 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
597 # Ran 1 tests, 0 skipped, 1 failed.
614 # Ran 1 tests, 0 skipped, 1 failed.
598 python hash seed: * (glob)
615 python hash seed: * (glob)
599 [1]
616 [1]
600 $ rm test-serve-fail.t
617 $ rm test-serve-fail.t
601
618
602 Verify that we can try other ports
619 Verify that we can try other ports
603 ===================================
620 ===================================
604
621
605 Extensions aren't inherited by the invoked run-tests.py. An extension
622 Extensions aren't inherited by the invoked run-tests.py. An extension
606 introducing a repository requirement could cause this to fail. So we force
623 introducing a repository requirement could cause this to fail. So we force
607 HGRCPATH to get a clean environment.
624 HGRCPATH to get a clean environment.
608
625
609 $ HGRCPATH= hg init inuse
626 $ HGRCPATH= hg init inuse
610 $ hg serve -R inuse -p $HGPORT -d --pid-file=blocks.pid
627 $ hg serve -R inuse -p $HGPORT -d --pid-file=blocks.pid
611 $ cat blocks.pid >> $DAEMON_PIDS
628 $ cat blocks.pid >> $DAEMON_PIDS
612 $ cat > test-serve-inuse.t <<EOF
629 $ cat > test-serve-inuse.t <<EOF
613 > $ hg serve -R `pwd`/inuse -p \$HGPORT -d --pid-file=hg.pid
630 > $ hg serve -R `pwd`/inuse -p \$HGPORT -d --pid-file=hg.pid
614 > $ cat hg.pid >> \$DAEMON_PIDS
631 > $ cat hg.pid >> \$DAEMON_PIDS
615 > EOF
632 > EOF
616 $ rt test-serve-inuse.t
633 $ rt test-serve-inuse.t
634 running 1 tests using 1 parallel processes
617 .
635 .
618 # Ran 1 tests, 0 skipped, 0 failed.
636 # Ran 1 tests, 0 skipped, 0 failed.
619 $ rm test-serve-inuse.t
637 $ rm test-serve-inuse.t
620 $ killdaemons.py $DAEMON_PIDS
638 $ killdaemons.py $DAEMON_PIDS
621
639
622 Running In Debug Mode
640 Running In Debug Mode
623 ======================
641 ======================
624
642
625 $ rt --debug 2>&1 | grep -v pwd
643 $ rt --debug 2>&1 | grep -v pwd
644 running 2 tests using 1 parallel processes
626 + echo *SALT* 0 0 (glob)
645 + echo *SALT* 0 0 (glob)
627 *SALT* 0 0 (glob)
646 *SALT* 0 0 (glob)
628 + echo babar
647 + echo babar
629 babar
648 babar
630 + echo *SALT* 10 0 (glob)
649 + echo *SALT* 10 0 (glob)
631 *SALT* 10 0 (glob)
650 *SALT* 10 0 (glob)
632 *+ echo *SALT* 0 0 (glob)
651 *+ echo *SALT* 0 0 (glob)
633 *SALT* 0 0 (glob)
652 *SALT* 0 0 (glob)
634 + echo babar
653 + echo babar
635 babar
654 babar
636 + echo *SALT* 2 0 (glob)
655 + echo *SALT* 2 0 (glob)
637 *SALT* 2 0 (glob)
656 *SALT* 2 0 (glob)
638 + echo xyzzy
657 + echo xyzzy
639 xyzzy
658 xyzzy
640 + echo *SALT* 9 0 (glob)
659 + echo *SALT* 9 0 (glob)
641 *SALT* 9 0 (glob)
660 *SALT* 9 0 (glob)
642 + printf *abc\ndef\nxyz\n* (glob)
661 + printf *abc\ndef\nxyz\n* (glob)
643 abc
662 abc
644 def
663 def
645 xyz
664 xyz
646 + echo *SALT* 15 0 (glob)
665 + echo *SALT* 15 0 (glob)
647 *SALT* 15 0 (glob)
666 *SALT* 15 0 (glob)
648 + printf *zyx\nwvu\ntsr\n* (glob)
667 + printf *zyx\nwvu\ntsr\n* (glob)
649 zyx
668 zyx
650 wvu
669 wvu
651 tsr
670 tsr
652 + echo *SALT* 22 0 (glob)
671 + echo *SALT* 22 0 (glob)
653 *SALT* 22 0 (glob)
672 *SALT* 22 0 (glob)
654 .
673 .
655 # Ran 2 tests, 0 skipped, 0 failed.
674 # Ran 2 tests, 0 skipped, 0 failed.
656
675
657 Parallel runs
676 Parallel runs
658 ==============
677 ==============
659
678
660 (duplicate the failing test to get predictable output)
679 (duplicate the failing test to get predictable output)
661 $ cp test-failure.t test-failure-copy.t
680 $ cp test-failure.t test-failure-copy.t
662
681
663 $ rt --jobs 2 test-failure*.t -n
682 $ rt --jobs 2 test-failure*.t -n
683 running 2 tests using 2 parallel processes
664 !!
684 !!
665 Failed test-failure*.t: output changed (glob)
685 Failed test-failure*.t: output changed (glob)
666 Failed test-failure*.t: output changed (glob)
686 Failed test-failure*.t: output changed (glob)
667 # Ran 2 tests, 0 skipped, 2 failed.
687 # Ran 2 tests, 0 skipped, 2 failed.
668 python hash seed: * (glob)
688 python hash seed: * (glob)
669 [1]
689 [1]
670
690
671 failures in parallel with --first should only print one failure
691 failures in parallel with --first should only print one failure
672 $ rt --jobs 2 --first test-failure*.t
692 $ rt --jobs 2 --first test-failure*.t
693 running 2 tests using 2 parallel processes
673
694
674 --- $TESTTMP/test-failure*.t (glob)
695 --- $TESTTMP/test-failure*.t (glob)
675 +++ $TESTTMP/test-failure*.t.err (glob)
696 +++ $TESTTMP/test-failure*.t.err (glob)
676 @@ -1,5 +1,5 @@
697 @@ -1,5 +1,5 @@
677 $ echo babar
698 $ echo babar
678 - rataxes
699 - rataxes
679 + babar
700 + babar
680 This is a noop statement so that
701 This is a noop statement so that
681 this test is still more bytes than success.
702 this test is still more bytes than success.
682 pad pad pad pad............................................................
703 pad pad pad pad............................................................
683
704
684 Failed test-failure*.t: output changed (glob)
705 Failed test-failure*.t: output changed (glob)
685 Failed test-failure*.t: output changed (glob)
706 Failed test-failure*.t: output changed (glob)
686 # Ran 2 tests, 0 skipped, 2 failed.
707 # Ran 2 tests, 0 skipped, 2 failed.
687 python hash seed: * (glob)
708 python hash seed: * (glob)
688 [1]
709 [1]
689
710
690
711
691 (delete the duplicated test file)
712 (delete the duplicated test file)
692 $ rm test-failure-copy.t
713 $ rm test-failure-copy.t
693
714
694
715
695 Interactive run
716 Interactive run
696 ===============
717 ===============
697
718
698 (backup the failing test)
719 (backup the failing test)
699 $ cp test-failure.t backup
720 $ cp test-failure.t backup
700
721
701 Refuse the fix
722 Refuse the fix
702
723
703 $ echo 'n' | rt -i
724 $ echo 'n' | rt -i
725 running 2 tests using 1 parallel processes
704
726
705 --- $TESTTMP/test-failure.t
727 --- $TESTTMP/test-failure.t
706 +++ $TESTTMP/test-failure.t.err
728 +++ $TESTTMP/test-failure.t.err
707 @@ -1,5 +1,5 @@
729 @@ -1,5 +1,5 @@
708 $ echo babar
730 $ echo babar
709 - rataxes
731 - rataxes
710 + babar
732 + babar
711 This is a noop statement so that
733 This is a noop statement so that
712 this test is still more bytes than success.
734 this test is still more bytes than success.
713 pad pad pad pad............................................................
735 pad pad pad pad............................................................
714 Accept this change? [n]
736 Accept this change? [n]
715 ERROR: test-failure.t output changed
737 ERROR: test-failure.t output changed
716 !.
738 !.
717 Failed test-failure.t: output changed
739 Failed test-failure.t: output changed
718 # Ran 2 tests, 0 skipped, 1 failed.
740 # Ran 2 tests, 0 skipped, 1 failed.
719 python hash seed: * (glob)
741 python hash seed: * (glob)
720 [1]
742 [1]
721
743
722 $ cat test-failure.t
744 $ cat test-failure.t
723 $ echo babar
745 $ echo babar
724 rataxes
746 rataxes
725 This is a noop statement so that
747 This is a noop statement so that
726 this test is still more bytes than success.
748 this test is still more bytes than success.
727 pad pad pad pad............................................................
749 pad pad pad pad............................................................
728 pad pad pad pad............................................................
750 pad pad pad pad............................................................
729 pad pad pad pad............................................................
751 pad pad pad pad............................................................
730 pad pad pad pad............................................................
752 pad pad pad pad............................................................
731 pad pad pad pad............................................................
753 pad pad pad pad............................................................
732 pad pad pad pad............................................................
754 pad pad pad pad............................................................
733
755
734 Interactive with custom view
756 Interactive with custom view
735
757
736 $ echo 'n' | rt -i --view echo
758 $ echo 'n' | rt -i --view echo
759 running 2 tests using 1 parallel processes
737 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
760 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
738 Accept this change? [n]* (glob)
761 Accept this change? [n]* (glob)
739 ERROR: test-failure.t output changed
762 ERROR: test-failure.t output changed
740 !.
763 !.
741 Failed test-failure.t: output changed
764 Failed test-failure.t: output changed
742 # Ran 2 tests, 0 skipped, 1 failed.
765 # Ran 2 tests, 0 skipped, 1 failed.
743 python hash seed: * (glob)
766 python hash seed: * (glob)
744 [1]
767 [1]
745
768
746 View the fix
769 View the fix
747
770
748 $ echo 'y' | rt --view echo
771 $ echo 'y' | rt --view echo
772 running 2 tests using 1 parallel processes
749 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
773 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
750
774
751 ERROR: test-failure.t output changed
775 ERROR: test-failure.t output changed
752 !.
776 !.
753 Failed test-failure.t: output changed
777 Failed test-failure.t: output changed
754 # Ran 2 tests, 0 skipped, 1 failed.
778 # Ran 2 tests, 0 skipped, 1 failed.
755 python hash seed: * (glob)
779 python hash seed: * (glob)
756 [1]
780 [1]
757
781
758 Accept the fix
782 Accept the fix
759
783
760 $ cat >> test-failure.t <<EOF
784 $ cat >> test-failure.t <<EOF
761 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
785 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
762 > saved backup bundle to \$TESTTMP/foo.hg
786 > saved backup bundle to \$TESTTMP/foo.hg
763 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
787 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
764 > saved backup bundle to $TESTTMP\\foo.hg
788 > saved backup bundle to $TESTTMP\\foo.hg
765 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
789 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
766 > saved backup bundle to \$TESTTMP/*.hg (glob)
790 > saved backup bundle to \$TESTTMP/*.hg (glob)
767 > EOF
791 > EOF
768 $ echo 'y' | rt -i 2>&1
792 $ echo 'y' | rt -i 2>&1
793 running 2 tests using 1 parallel processes
769
794
770 --- $TESTTMP/test-failure.t
795 --- $TESTTMP/test-failure.t
771 +++ $TESTTMP/test-failure.t.err
796 +++ $TESTTMP/test-failure.t.err
772 @@ -1,5 +1,5 @@
797 @@ -1,5 +1,5 @@
773 $ echo babar
798 $ echo babar
774 - rataxes
799 - rataxes
775 + babar
800 + babar
776 This is a noop statement so that
801 This is a noop statement so that
777 this test is still more bytes than success.
802 this test is still more bytes than success.
778 pad pad pad pad............................................................
803 pad pad pad pad............................................................
779 @@ -11,6 +11,6 @@
804 @@ -11,6 +11,6 @@
780 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
805 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
781 saved backup bundle to $TESTTMP/foo.hg
806 saved backup bundle to $TESTTMP/foo.hg
782 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
807 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
783 - saved backup bundle to $TESTTMP\foo.hg
808 - saved backup bundle to $TESTTMP\foo.hg
784 + saved backup bundle to $TESTTMP/foo.hg
809 + saved backup bundle to $TESTTMP/foo.hg
785 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
810 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
786 saved backup bundle to $TESTTMP/*.hg (glob)
811 saved backup bundle to $TESTTMP/*.hg (glob)
787 Accept this change? [n] ..
812 Accept this change? [n] ..
788 # Ran 2 tests, 0 skipped, 0 failed.
813 # Ran 2 tests, 0 skipped, 0 failed.
789
814
790 $ sed -e 's,(glob)$,&<,g' test-failure.t
815 $ sed -e 's,(glob)$,&<,g' test-failure.t
791 $ echo babar
816 $ echo babar
792 babar
817 babar
793 This is a noop statement so that
818 This is a noop statement so that
794 this test is still more bytes than success.
819 this test is still more bytes than success.
795 pad pad pad pad............................................................
820 pad pad pad pad............................................................
796 pad pad pad pad............................................................
821 pad pad pad pad............................................................
797 pad pad pad pad............................................................
822 pad pad pad pad............................................................
798 pad pad pad pad............................................................
823 pad pad pad pad............................................................
799 pad pad pad pad............................................................
824 pad pad pad pad............................................................
800 pad pad pad pad............................................................
825 pad pad pad pad............................................................
801 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
826 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
802 saved backup bundle to $TESTTMP/foo.hg
827 saved backup bundle to $TESTTMP/foo.hg
803 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
828 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
804 saved backup bundle to $TESTTMP/foo.hg
829 saved backup bundle to $TESTTMP/foo.hg
805 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
830 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
806 saved backup bundle to $TESTTMP/*.hg (glob)<
831 saved backup bundle to $TESTTMP/*.hg (glob)<
807
832
808 Race condition - test file was modified when test is running
833 Race condition - test file was modified when test is running
809
834
810 $ TESTRACEDIR=`pwd`
835 $ TESTRACEDIR=`pwd`
811 $ export TESTRACEDIR
836 $ export TESTRACEDIR
812 $ cat > test-race.t <<EOF
837 $ cat > test-race.t <<EOF
813 > $ echo 1
838 > $ echo 1
814 > $ echo "# a new line" >> $TESTRACEDIR/test-race.t
839 > $ echo "# a new line" >> $TESTRACEDIR/test-race.t
815 > EOF
840 > EOF
816
841
817 $ rt -i test-race.t
842 $ rt -i test-race.t
843 running 1 tests using 1 parallel processes
818
844
819 --- $TESTTMP/test-race.t
845 --- $TESTTMP/test-race.t
820 +++ $TESTTMP/test-race.t.err
846 +++ $TESTTMP/test-race.t.err
821 @@ -1,2 +1,3 @@
847 @@ -1,2 +1,3 @@
822 $ echo 1
848 $ echo 1
823 + 1
849 + 1
824 $ echo "# a new line" >> $TESTTMP/test-race.t
850 $ echo "# a new line" >> $TESTTMP/test-race.t
825 Reference output has changed (run again to prompt changes)
851 Reference output has changed (run again to prompt changes)
826 ERROR: test-race.t output changed
852 ERROR: test-race.t output changed
827 !
853 !
828 Failed test-race.t: output changed
854 Failed test-race.t: output changed
829 # Ran 1 tests, 0 skipped, 1 failed.
855 # Ran 1 tests, 0 skipped, 1 failed.
830 python hash seed: * (glob)
856 python hash seed: * (glob)
831 [1]
857 [1]
832
858
833 $ rm test-race.t
859 $ rm test-race.t
834
860
835 When "#testcases" is used in .t files
861 When "#testcases" is used in .t files
836
862
837 $ cat >> test-cases.t <<EOF
863 $ cat >> test-cases.t <<EOF
838 > #testcases a b
864 > #testcases a b
839 > #if a
865 > #if a
840 > $ echo 1
866 > $ echo 1
841 > #endif
867 > #endif
842 > #if b
868 > #if b
843 > $ echo 2
869 > $ echo 2
844 > #endif
870 > #endif
845 > EOF
871 > EOF
846
872
847 $ cat <<EOF | rt -i test-cases.t 2>&1
873 $ cat <<EOF | rt -i test-cases.t 2>&1
848 > y
874 > y
849 > y
875 > y
850 > EOF
876 > EOF
877 running 2 tests using 1 parallel processes
851
878
852 --- $TESTTMP/test-cases.t
879 --- $TESTTMP/test-cases.t
853 +++ $TESTTMP/test-cases.t#a.err
880 +++ $TESTTMP/test-cases.t#a.err
854 @@ -1,6 +1,7 @@
881 @@ -1,6 +1,7 @@
855 #testcases a b
882 #testcases a b
856 #if a
883 #if a
857 $ echo 1
884 $ echo 1
858 + 1
885 + 1
859 #endif
886 #endif
860 #if b
887 #if b
861 $ echo 2
888 $ echo 2
862 Accept this change? [n] .
889 Accept this change? [n] .
863 --- $TESTTMP/test-cases.t
890 --- $TESTTMP/test-cases.t
864 +++ $TESTTMP/test-cases.t#b.err
891 +++ $TESTTMP/test-cases.t#b.err
865 @@ -5,4 +5,5 @@
892 @@ -5,4 +5,5 @@
866 #endif
893 #endif
867 #if b
894 #if b
868 $ echo 2
895 $ echo 2
869 + 2
896 + 2
870 #endif
897 #endif
871 Accept this change? [n] .
898 Accept this change? [n] .
872 # Ran 2 tests, 0 skipped, 0 failed.
899 # Ran 2 tests, 0 skipped, 0 failed.
873
900
874 $ cat test-cases.t
901 $ cat test-cases.t
875 #testcases a b
902 #testcases a b
876 #if a
903 #if a
877 $ echo 1
904 $ echo 1
878 1
905 1
879 #endif
906 #endif
880 #if b
907 #if b
881 $ echo 2
908 $ echo 2
882 2
909 2
883 #endif
910 #endif
884
911
885 $ cat >> test-cases.t <<'EOF'
912 $ cat >> test-cases.t <<'EOF'
886 > #if a
913 > #if a
887 > $ NAME=A
914 > $ NAME=A
888 > #else
915 > #else
889 > $ NAME=B
916 > $ NAME=B
890 > #endif
917 > #endif
891 > $ echo $NAME
918 > $ echo $NAME
892 > A (a !)
919 > A (a !)
893 > B (b !)
920 > B (b !)
894 > EOF
921 > EOF
895 $ rt test-cases.t
922 $ rt test-cases.t
923 running 2 tests using 1 parallel processes
896 ..
924 ..
897 # Ran 2 tests, 0 skipped, 0 failed.
925 # Ran 2 tests, 0 skipped, 0 failed.
898
926
899 When using multiple dimensions of "#testcases" in .t files
927 When using multiple dimensions of "#testcases" in .t files
900
928
901 $ cat > test-cases.t <<'EOF'
929 $ cat > test-cases.t <<'EOF'
902 > #testcases a b
930 > #testcases a b
903 > #testcases c d
931 > #testcases c d
904 > #if a d
932 > #if a d
905 > $ echo $TESTCASE
933 > $ echo $TESTCASE
906 > a#d
934 > a#d
907 > #endif
935 > #endif
908 > #if b c
936 > #if b c
909 > $ echo yes
937 > $ echo yes
910 > no
938 > no
911 > #endif
939 > #endif
912 > EOF
940 > EOF
913 $ rt test-cases.t
941 $ rt test-cases.t
942 running 4 tests using 1 parallel processes
914 ..
943 ..
915 --- $TESTTMP/test-cases.t
944 --- $TESTTMP/test-cases.t
916 +++ $TESTTMP/test-cases.t#b#c.err
945 +++ $TESTTMP/test-cases.t#b#c.err
917 @@ -6,5 +6,5 @@
946 @@ -6,5 +6,5 @@
918 #endif
947 #endif
919 #if b c
948 #if b c
920 $ echo yes
949 $ echo yes
921 - no
950 - no
922 + yes
951 + yes
923 #endif
952 #endif
924
953
925 ERROR: test-cases.t#b#c output changed
954 ERROR: test-cases.t#b#c output changed
926 !.
955 !.
927 Failed test-cases.t#b#c: output changed
956 Failed test-cases.t#b#c: output changed
928 # Ran 4 tests, 0 skipped, 1 failed.
957 # Ran 4 tests, 0 skipped, 1 failed.
929 python hash seed: * (glob)
958 python hash seed: * (glob)
930 [1]
959 [1]
931
960
932 $ rm test-cases.t#b#c.err
961 $ rm test-cases.t#b#c.err
933 $ rm test-cases.t
962 $ rm test-cases.t
934
963
935 (reinstall)
964 (reinstall)
936 $ mv backup test-failure.t
965 $ mv backup test-failure.t
937
966
938 No Diff
967 No Diff
939 ===============
968 ===============
940
969
941 $ rt --nodiff
970 $ rt --nodiff
971 running 2 tests using 1 parallel processes
942 !.
972 !.
943 Failed test-failure.t: output changed
973 Failed test-failure.t: output changed
944 # Ran 2 tests, 0 skipped, 1 failed.
974 # Ran 2 tests, 0 skipped, 1 failed.
945 python hash seed: * (glob)
975 python hash seed: * (glob)
946 [1]
976 [1]
947
977
948 test --tmpdir support
978 test --tmpdir support
949 $ rt --tmpdir=$TESTTMP/keep test-success.t
979 $ rt --tmpdir=$TESTTMP/keep test-success.t
980 running 1 tests using 1 parallel processes
950
981
951 Keeping testtmp dir: $TESTTMP/keep/child1/test-success.t
982 Keeping testtmp dir: $TESTTMP/keep/child1/test-success.t
952 Keeping threadtmp dir: $TESTTMP/keep/child1
983 Keeping threadtmp dir: $TESTTMP/keep/child1
953 .
984 .
954 # Ran 1 tests, 0 skipped, 0 failed.
985 # Ran 1 tests, 0 skipped, 0 failed.
955
986
956 timeouts
987 timeouts
957 ========
988 ========
958 $ cat > test-timeout.t <<EOF
989 $ cat > test-timeout.t <<EOF
959 > $ sleep 2
990 > $ sleep 2
960 > $ echo pass
991 > $ echo pass
961 > pass
992 > pass
962 > EOF
993 > EOF
963 > echo '#require slow' > test-slow-timeout.t
994 > echo '#require slow' > test-slow-timeout.t
964 > cat test-timeout.t >> test-slow-timeout.t
995 > cat test-timeout.t >> test-slow-timeout.t
965 $ rt --timeout=1 --slowtimeout=3 test-timeout.t test-slow-timeout.t
996 $ rt --timeout=1 --slowtimeout=3 test-timeout.t test-slow-timeout.t
997 running 2 tests using 1 parallel processes
966 st
998 st
967 Skipped test-slow-timeout.t: missing feature: allow slow tests (use --allow-slow-tests)
999 Skipped test-slow-timeout.t: missing feature: allow slow tests (use --allow-slow-tests)
968 Failed test-timeout.t: timed out
1000 Failed test-timeout.t: timed out
969 # Ran 1 tests, 1 skipped, 1 failed.
1001 # Ran 1 tests, 1 skipped, 1 failed.
970 python hash seed: * (glob)
1002 python hash seed: * (glob)
971 [1]
1003 [1]
972 $ rt --timeout=1 --slowtimeout=3 \
1004 $ rt --timeout=1 --slowtimeout=3 \
973 > test-timeout.t test-slow-timeout.t --allow-slow-tests
1005 > test-timeout.t test-slow-timeout.t --allow-slow-tests
1006 running 2 tests using 1 parallel processes
974 .t
1007 .t
975 Failed test-timeout.t: timed out
1008 Failed test-timeout.t: timed out
976 # Ran 2 tests, 0 skipped, 1 failed.
1009 # Ran 2 tests, 0 skipped, 1 failed.
977 python hash seed: * (glob)
1010 python hash seed: * (glob)
978 [1]
1011 [1]
979 $ rm test-timeout.t test-slow-timeout.t
1012 $ rm test-timeout.t test-slow-timeout.t
980
1013
981 test for --time
1014 test for --time
982 ==================
1015 ==================
983
1016
984 $ rt test-success.t --time
1017 $ rt test-success.t --time
1018 running 1 tests using 1 parallel processes
985 .
1019 .
986 # Ran 1 tests, 0 skipped, 0 failed.
1020 # Ran 1 tests, 0 skipped, 0 failed.
987 # Producing time report
1021 # Producing time report
988 start end cuser csys real Test
1022 start end cuser csys real Test
989 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
1023 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
990
1024
991 test for --time with --job enabled
1025 test for --time with --job enabled
992 ====================================
1026 ====================================
993
1027
994 $ rt test-success.t --time --jobs 2
1028 $ rt test-success.t --time --jobs 2
1029 running 1 tests using 1 parallel processes
995 .
1030 .
996 # Ran 1 tests, 0 skipped, 0 failed.
1031 # Ran 1 tests, 0 skipped, 0 failed.
997 # Producing time report
1032 # Producing time report
998 start end cuser csys real Test
1033 start end cuser csys real Test
999 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
1034 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
1000
1035
1001 Skips
1036 Skips
1002 ================
1037 ================
1003 $ cat > test-skip.t <<EOF
1038 $ cat > test-skip.t <<EOF
1004 > $ echo xyzzy
1039 > $ echo xyzzy
1005 > #if true
1040 > #if true
1006 > #require false
1041 > #require false
1007 > #end
1042 > #end
1008 > EOF
1043 > EOF
1009 $ cat > test-noskip.t <<EOF
1044 $ cat > test-noskip.t <<EOF
1010 > #if false
1045 > #if false
1011 > #require false
1046 > #require false
1012 > #endif
1047 > #endif
1013 > EOF
1048 > EOF
1014 $ rt --nodiff
1049 $ rt --nodiff
1050 running 4 tests using 1 parallel processes
1015 !.s.
1051 !.s.
1016 Skipped test-skip.t: missing feature: nail clipper
1052 Skipped test-skip.t: missing feature: nail clipper
1017 Failed test-failure.t: output changed
1053 Failed test-failure.t: output changed
1018 # Ran 3 tests, 1 skipped, 1 failed.
1054 # Ran 3 tests, 1 skipped, 1 failed.
1019 python hash seed: * (glob)
1055 python hash seed: * (glob)
1020 [1]
1056 [1]
1021
1057
1022 $ rm test-noskip.t
1058 $ rm test-noskip.t
1023 $ rt --keyword xyzzy
1059 $ rt --keyword xyzzy
1060 running 3 tests using 1 parallel processes
1024 .s
1061 .s
1025 Skipped test-skip.t: missing feature: nail clipper
1062 Skipped test-skip.t: missing feature: nail clipper
1026 # Ran 2 tests, 2 skipped, 0 failed.
1063 # Ran 2 tests, 2 skipped, 0 failed.
1027
1064
1028 Skips with xml
1065 Skips with xml
1029 $ rt --keyword xyzzy \
1066 $ rt --keyword xyzzy \
1030 > --xunit=xunit.xml
1067 > --xunit=xunit.xml
1068 running 3 tests using 1 parallel processes
1031 .s
1069 .s
1032 Skipped test-skip.t: missing feature: nail clipper
1070 Skipped test-skip.t: missing feature: nail clipper
1033 # Ran 2 tests, 2 skipped, 0 failed.
1071 # Ran 2 tests, 2 skipped, 0 failed.
1034 $ cat xunit.xml
1072 $ cat xunit.xml
1035 <?xml version="1.0" encoding="utf-8"?>
1073 <?xml version="1.0" encoding="utf-8"?>
1036 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
1074 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
1037 <testcase name="test-success.t" time="*"/> (glob)
1075 <testcase name="test-success.t" time="*"/> (glob)
1038 <testcase name="test-skip.t">
1076 <testcase name="test-skip.t">
1039 <skipped>
1077 <skipped>
1040 <![CDATA[missing feature: nail clipper]]> </skipped>
1078 <![CDATA[missing feature: nail clipper]]> </skipped>
1041 </testcase>
1079 </testcase>
1042 </testsuite>
1080 </testsuite>
1043
1081
1044 Missing skips or blacklisted skips don't count as executed:
1082 Missing skips or blacklisted skips don't count as executed:
1045 $ echo test-failure.t > blacklist
1083 $ echo test-failure.t > blacklist
1046 $ rt --blacklist=blacklist --json\
1084 $ rt --blacklist=blacklist --json\
1047 > test-failure.t test-bogus.t
1085 > test-failure.t test-bogus.t
1086 running 2 tests using 1 parallel processes
1048 ss
1087 ss
1049 Skipped test-bogus.t: Doesn't exist
1088 Skipped test-bogus.t: Doesn't exist
1050 Skipped test-failure.t: blacklisted
1089 Skipped test-failure.t: blacklisted
1051 # Ran 0 tests, 2 skipped, 0 failed.
1090 # Ran 0 tests, 2 skipped, 0 failed.
1052 $ cat report.json
1091 $ cat report.json
1053 testreport ={
1092 testreport ={
1054 "test-bogus.t": {
1093 "test-bogus.t": {
1055 "result": "skip"
1094 "result": "skip"
1056 },
1095 },
1057 "test-failure.t": {
1096 "test-failure.t": {
1058 "result": "skip"
1097 "result": "skip"
1059 }
1098 }
1060 } (no-eol)
1099 } (no-eol)
1061
1100
1062 Whitelist trumps blacklist
1101 Whitelist trumps blacklist
1063 $ echo test-failure.t > whitelist
1102 $ echo test-failure.t > whitelist
1064 $ rt --blacklist=blacklist --whitelist=whitelist --json\
1103 $ rt --blacklist=blacklist --whitelist=whitelist --json\
1065 > test-failure.t test-bogus.t
1104 > test-failure.t test-bogus.t
1105 running 2 tests using 1 parallel processes
1066 s
1106 s
1067 --- $TESTTMP/test-failure.t
1107 --- $TESTTMP/test-failure.t
1068 +++ $TESTTMP/test-failure.t.err
1108 +++ $TESTTMP/test-failure.t.err
1069 @@ -1,5 +1,5 @@
1109 @@ -1,5 +1,5 @@
1070 $ echo babar
1110 $ echo babar
1071 - rataxes
1111 - rataxes
1072 + babar
1112 + babar
1073 This is a noop statement so that
1113 This is a noop statement so that
1074 this test is still more bytes than success.
1114 this test is still more bytes than success.
1075 pad pad pad pad............................................................
1115 pad pad pad pad............................................................
1076
1116
1077 ERROR: test-failure.t output changed
1117 ERROR: test-failure.t output changed
1078 !
1118 !
1079 Skipped test-bogus.t: Doesn't exist
1119 Skipped test-bogus.t: Doesn't exist
1080 Failed test-failure.t: output changed
1120 Failed test-failure.t: output changed
1081 # Ran 1 tests, 1 skipped, 1 failed.
1121 # Ran 1 tests, 1 skipped, 1 failed.
1082 python hash seed: * (glob)
1122 python hash seed: * (glob)
1083 [1]
1123 [1]
1084
1124
1085 Ensure that --test-list causes only the tests listed in that file to
1125 Ensure that --test-list causes only the tests listed in that file to
1086 be executed.
1126 be executed.
1087 $ echo test-success.t >> onlytest
1127 $ echo test-success.t >> onlytest
1088 $ rt --test-list=onlytest
1128 $ rt --test-list=onlytest
1129 running 1 tests using 1 parallel processes
1089 .
1130 .
1090 # Ran 1 tests, 0 skipped, 0 failed.
1131 # Ran 1 tests, 0 skipped, 0 failed.
1091 $ echo test-bogus.t >> anothertest
1132 $ echo test-bogus.t >> anothertest
1092 $ rt --test-list=onlytest --test-list=anothertest
1133 $ rt --test-list=onlytest --test-list=anothertest
1134 running 2 tests using 1 parallel processes
1093 s.
1135 s.
1094 Skipped test-bogus.t: Doesn't exist
1136 Skipped test-bogus.t: Doesn't exist
1095 # Ran 1 tests, 1 skipped, 0 failed.
1137 # Ran 1 tests, 1 skipped, 0 failed.
1096 $ rm onlytest anothertest
1138 $ rm onlytest anothertest
1097
1139
1098 test for --json
1140 test for --json
1099 ==================
1141 ==================
1100
1142
1101 $ rt --json
1143 $ rt --json
1144 running 3 tests using 1 parallel processes
1102
1145
1103 --- $TESTTMP/test-failure.t
1146 --- $TESTTMP/test-failure.t
1104 +++ $TESTTMP/test-failure.t.err
1147 +++ $TESTTMP/test-failure.t.err
1105 @@ -1,5 +1,5 @@
1148 @@ -1,5 +1,5 @@
1106 $ echo babar
1149 $ echo babar
1107 - rataxes
1150 - rataxes
1108 + babar
1151 + babar
1109 This is a noop statement so that
1152 This is a noop statement so that
1110 this test is still more bytes than success.
1153 this test is still more bytes than success.
1111 pad pad pad pad............................................................
1154 pad pad pad pad............................................................
1112
1155
1113 ERROR: test-failure.t output changed
1156 ERROR: test-failure.t output changed
1114 !.s
1157 !.s
1115 Skipped test-skip.t: missing feature: nail clipper
1158 Skipped test-skip.t: missing feature: nail clipper
1116 Failed test-failure.t: output changed
1159 Failed test-failure.t: output changed
1117 # Ran 2 tests, 1 skipped, 1 failed.
1160 # Ran 2 tests, 1 skipped, 1 failed.
1118 python hash seed: * (glob)
1161 python hash seed: * (glob)
1119 [1]
1162 [1]
1120
1163
1121 $ cat report.json
1164 $ cat report.json
1122 testreport ={
1165 testreport ={
1123 "test-failure.t": [\{] (re)
1166 "test-failure.t": [\{] (re)
1124 "csys": "\s*[\d\.]{4,5}", ? (re)
1167 "csys": "\s*[\d\.]{4,5}", ? (re)
1125 "cuser": "\s*[\d\.]{4,5}", ? (re)
1168 "cuser": "\s*[\d\.]{4,5}", ? (re)
1126 "diff": "---.+\+\+\+.+", ? (re)
1169 "diff": "---.+\+\+\+.+", ? (re)
1127 "end": "\s*[\d\.]{4,5}", ? (re)
1170 "end": "\s*[\d\.]{4,5}", ? (re)
1128 "result": "failure", ? (re)
1171 "result": "failure", ? (re)
1129 "start": "\s*[\d\.]{4,5}", ? (re)
1172 "start": "\s*[\d\.]{4,5}", ? (re)
1130 "time": "\s*[\d\.]{4,5}" (re)
1173 "time": "\s*[\d\.]{4,5}" (re)
1131 }, ? (re)
1174 }, ? (re)
1132 "test-skip.t": {
1175 "test-skip.t": {
1133 "csys": "\s*[\d\.]{4,5}", ? (re)
1176 "csys": "\s*[\d\.]{4,5}", ? (re)
1134 "cuser": "\s*[\d\.]{4,5}", ? (re)
1177 "cuser": "\s*[\d\.]{4,5}", ? (re)
1135 "diff": "", ? (re)
1178 "diff": "", ? (re)
1136 "end": "\s*[\d\.]{4,5}", ? (re)
1179 "end": "\s*[\d\.]{4,5}", ? (re)
1137 "result": "skip", ? (re)
1180 "result": "skip", ? (re)
1138 "start": "\s*[\d\.]{4,5}", ? (re)
1181 "start": "\s*[\d\.]{4,5}", ? (re)
1139 "time": "\s*[\d\.]{4,5}" (re)
1182 "time": "\s*[\d\.]{4,5}" (re)
1140 }, ? (re)
1183 }, ? (re)
1141 "test-success.t": [\{] (re)
1184 "test-success.t": [\{] (re)
1142 "csys": "\s*[\d\.]{4,5}", ? (re)
1185 "csys": "\s*[\d\.]{4,5}", ? (re)
1143 "cuser": "\s*[\d\.]{4,5}", ? (re)
1186 "cuser": "\s*[\d\.]{4,5}", ? (re)
1144 "diff": "", ? (re)
1187 "diff": "", ? (re)
1145 "end": "\s*[\d\.]{4,5}", ? (re)
1188 "end": "\s*[\d\.]{4,5}", ? (re)
1146 "result": "success", ? (re)
1189 "result": "success", ? (re)
1147 "start": "\s*[\d\.]{4,5}", ? (re)
1190 "start": "\s*[\d\.]{4,5}", ? (re)
1148 "time": "\s*[\d\.]{4,5}" (re)
1191 "time": "\s*[\d\.]{4,5}" (re)
1149 }
1192 }
1150 } (no-eol)
1193 } (no-eol)
1151 --json with --outputdir
1194 --json with --outputdir
1152
1195
1153 $ rm report.json
1196 $ rm report.json
1154 $ rm -r output
1197 $ rm -r output
1155 $ mkdir output
1198 $ mkdir output
1156 $ rt --json --outputdir output
1199 $ rt --json --outputdir output
1200 running 3 tests using 1 parallel processes
1157
1201
1158 --- $TESTTMP/test-failure.t
1202 --- $TESTTMP/test-failure.t
1159 +++ $TESTTMP/output/test-failure.t.err
1203 +++ $TESTTMP/output/test-failure.t.err
1160 @@ -1,5 +1,5 @@
1204 @@ -1,5 +1,5 @@
1161 $ echo babar
1205 $ echo babar
1162 - rataxes
1206 - rataxes
1163 + babar
1207 + babar
1164 This is a noop statement so that
1208 This is a noop statement so that
1165 this test is still more bytes than success.
1209 this test is still more bytes than success.
1166 pad pad pad pad............................................................
1210 pad pad pad pad............................................................
1167
1211
1168 ERROR: test-failure.t output changed
1212 ERROR: test-failure.t output changed
1169 !.s
1213 !.s
1170 Skipped test-skip.t: missing feature: nail clipper
1214 Skipped test-skip.t: missing feature: nail clipper
1171 Failed test-failure.t: output changed
1215 Failed test-failure.t: output changed
1172 # Ran 2 tests, 1 skipped, 1 failed.
1216 # Ran 2 tests, 1 skipped, 1 failed.
1173 python hash seed: * (glob)
1217 python hash seed: * (glob)
1174 [1]
1218 [1]
1175 $ f report.json
1219 $ f report.json
1176 report.json: file not found
1220 report.json: file not found
1177 $ cat output/report.json
1221 $ cat output/report.json
1178 testreport ={
1222 testreport ={
1179 "test-failure.t": [\{] (re)
1223 "test-failure.t": [\{] (re)
1180 "csys": "\s*[\d\.]{4,5}", ? (re)
1224 "csys": "\s*[\d\.]{4,5}", ? (re)
1181 "cuser": "\s*[\d\.]{4,5}", ? (re)
1225 "cuser": "\s*[\d\.]{4,5}", ? (re)
1182 "diff": "---.+\+\+\+.+", ? (re)
1226 "diff": "---.+\+\+\+.+", ? (re)
1183 "end": "\s*[\d\.]{4,5}", ? (re)
1227 "end": "\s*[\d\.]{4,5}", ? (re)
1184 "result": "failure", ? (re)
1228 "result": "failure", ? (re)
1185 "start": "\s*[\d\.]{4,5}", ? (re)
1229 "start": "\s*[\d\.]{4,5}", ? (re)
1186 "time": "\s*[\d\.]{4,5}" (re)
1230 "time": "\s*[\d\.]{4,5}" (re)
1187 }, ? (re)
1231 }, ? (re)
1188 "test-skip.t": {
1232 "test-skip.t": {
1189 "csys": "\s*[\d\.]{4,5}", ? (re)
1233 "csys": "\s*[\d\.]{4,5}", ? (re)
1190 "cuser": "\s*[\d\.]{4,5}", ? (re)
1234 "cuser": "\s*[\d\.]{4,5}", ? (re)
1191 "diff": "", ? (re)
1235 "diff": "", ? (re)
1192 "end": "\s*[\d\.]{4,5}", ? (re)
1236 "end": "\s*[\d\.]{4,5}", ? (re)
1193 "result": "skip", ? (re)
1237 "result": "skip", ? (re)
1194 "start": "\s*[\d\.]{4,5}", ? (re)
1238 "start": "\s*[\d\.]{4,5}", ? (re)
1195 "time": "\s*[\d\.]{4,5}" (re)
1239 "time": "\s*[\d\.]{4,5}" (re)
1196 }, ? (re)
1240 }, ? (re)
1197 "test-success.t": [\{] (re)
1241 "test-success.t": [\{] (re)
1198 "csys": "\s*[\d\.]{4,5}", ? (re)
1242 "csys": "\s*[\d\.]{4,5}", ? (re)
1199 "cuser": "\s*[\d\.]{4,5}", ? (re)
1243 "cuser": "\s*[\d\.]{4,5}", ? (re)
1200 "diff": "", ? (re)
1244 "diff": "", ? (re)
1201 "end": "\s*[\d\.]{4,5}", ? (re)
1245 "end": "\s*[\d\.]{4,5}", ? (re)
1202 "result": "success", ? (re)
1246 "result": "success", ? (re)
1203 "start": "\s*[\d\.]{4,5}", ? (re)
1247 "start": "\s*[\d\.]{4,5}", ? (re)
1204 "time": "\s*[\d\.]{4,5}" (re)
1248 "time": "\s*[\d\.]{4,5}" (re)
1205 }
1249 }
1206 } (no-eol)
1250 } (no-eol)
1207 $ ls -a output
1251 $ ls -a output
1208 .
1252 .
1209 ..
1253 ..
1210 .testtimes
1254 .testtimes
1211 report.json
1255 report.json
1212 test-failure.t.err
1256 test-failure.t.err
1213
1257
1214 Test that failed test accepted through interactive are properly reported:
1258 Test that failed test accepted through interactive are properly reported:
1215
1259
1216 $ cp test-failure.t backup
1260 $ cp test-failure.t backup
1217 $ echo y | rt --json -i
1261 $ echo y | rt --json -i
1262 running 3 tests using 1 parallel processes
1218
1263
1219 --- $TESTTMP/test-failure.t
1264 --- $TESTTMP/test-failure.t
1220 +++ $TESTTMP/test-failure.t.err
1265 +++ $TESTTMP/test-failure.t.err
1221 @@ -1,5 +1,5 @@
1266 @@ -1,5 +1,5 @@
1222 $ echo babar
1267 $ echo babar
1223 - rataxes
1268 - rataxes
1224 + babar
1269 + babar
1225 This is a noop statement so that
1270 This is a noop statement so that
1226 this test is still more bytes than success.
1271 this test is still more bytes than success.
1227 pad pad pad pad............................................................
1272 pad pad pad pad............................................................
1228 Accept this change? [n] ..s
1273 Accept this change? [n] ..s
1229 Skipped test-skip.t: missing feature: nail clipper
1274 Skipped test-skip.t: missing feature: nail clipper
1230 # Ran 2 tests, 1 skipped, 0 failed.
1275 # Ran 2 tests, 1 skipped, 0 failed.
1231
1276
1232 $ cat report.json
1277 $ cat report.json
1233 testreport ={
1278 testreport ={
1234 "test-failure.t": [\{] (re)
1279 "test-failure.t": [\{] (re)
1235 "csys": "\s*[\d\.]{4,5}", ? (re)
1280 "csys": "\s*[\d\.]{4,5}", ? (re)
1236 "cuser": "\s*[\d\.]{4,5}", ? (re)
1281 "cuser": "\s*[\d\.]{4,5}", ? (re)
1237 "diff": "", ? (re)
1282 "diff": "", ? (re)
1238 "end": "\s*[\d\.]{4,5}", ? (re)
1283 "end": "\s*[\d\.]{4,5}", ? (re)
1239 "result": "success", ? (re)
1284 "result": "success", ? (re)
1240 "start": "\s*[\d\.]{4,5}", ? (re)
1285 "start": "\s*[\d\.]{4,5}", ? (re)
1241 "time": "\s*[\d\.]{4,5}" (re)
1286 "time": "\s*[\d\.]{4,5}" (re)
1242 }, ? (re)
1287 }, ? (re)
1243 "test-skip.t": {
1288 "test-skip.t": {
1244 "csys": "\s*[\d\.]{4,5}", ? (re)
1289 "csys": "\s*[\d\.]{4,5}", ? (re)
1245 "cuser": "\s*[\d\.]{4,5}", ? (re)
1290 "cuser": "\s*[\d\.]{4,5}", ? (re)
1246 "diff": "", ? (re)
1291 "diff": "", ? (re)
1247 "end": "\s*[\d\.]{4,5}", ? (re)
1292 "end": "\s*[\d\.]{4,5}", ? (re)
1248 "result": "skip", ? (re)
1293 "result": "skip", ? (re)
1249 "start": "\s*[\d\.]{4,5}", ? (re)
1294 "start": "\s*[\d\.]{4,5}", ? (re)
1250 "time": "\s*[\d\.]{4,5}" (re)
1295 "time": "\s*[\d\.]{4,5}" (re)
1251 }, ? (re)
1296 }, ? (re)
1252 "test-success.t": [\{] (re)
1297 "test-success.t": [\{] (re)
1253 "csys": "\s*[\d\.]{4,5}", ? (re)
1298 "csys": "\s*[\d\.]{4,5}", ? (re)
1254 "cuser": "\s*[\d\.]{4,5}", ? (re)
1299 "cuser": "\s*[\d\.]{4,5}", ? (re)
1255 "diff": "", ? (re)
1300 "diff": "", ? (re)
1256 "end": "\s*[\d\.]{4,5}", ? (re)
1301 "end": "\s*[\d\.]{4,5}", ? (re)
1257 "result": "success", ? (re)
1302 "result": "success", ? (re)
1258 "start": "\s*[\d\.]{4,5}", ? (re)
1303 "start": "\s*[\d\.]{4,5}", ? (re)
1259 "time": "\s*[\d\.]{4,5}" (re)
1304 "time": "\s*[\d\.]{4,5}" (re)
1260 }
1305 }
1261 } (no-eol)
1306 } (no-eol)
1262 $ mv backup test-failure.t
1307 $ mv backup test-failure.t
1263
1308
1264 backslash on end of line with glob matching is handled properly
1309 backslash on end of line with glob matching is handled properly
1265
1310
1266 $ cat > test-glob-backslash.t << EOF
1311 $ cat > test-glob-backslash.t << EOF
1267 > $ echo 'foo bar \\'
1312 > $ echo 'foo bar \\'
1268 > foo * \ (glob)
1313 > foo * \ (glob)
1269 > EOF
1314 > EOF
1270
1315
1271 $ rt test-glob-backslash.t
1316 $ rt test-glob-backslash.t
1317 running 1 tests using 1 parallel processes
1272 .
1318 .
1273 # Ran 1 tests, 0 skipped, 0 failed.
1319 # Ran 1 tests, 0 skipped, 0 failed.
1274
1320
1275 $ rm -f test-glob-backslash.t
1321 $ rm -f test-glob-backslash.t
1276
1322
1277 Test globbing of local IP addresses
1323 Test globbing of local IP addresses
1278 $ echo 172.16.18.1
1324 $ echo 172.16.18.1
1279 $LOCALIP (glob)
1325 $LOCALIP (glob)
1280 $ echo dead:beef::1
1326 $ echo dead:beef::1
1281 $LOCALIP (glob)
1327 $LOCALIP (glob)
1282
1328
1283 Add support for external test formatter
1329 Add support for external test formatter
1284 =======================================
1330 =======================================
1285
1331
1286 $ CUSTOM_TEST_RESULT=basic_test_result "$PYTHON" $TESTDIR/run-tests.py --with-hg=`which hg` "$@" test-success.t test-failure.t
1332 $ CUSTOM_TEST_RESULT=basic_test_result "$PYTHON" $TESTDIR/run-tests.py --with-hg=`which hg` "$@" test-success.t test-failure.t
1333 running 2 tests using 1 parallel processes
1287
1334
1288 # Ran 2 tests, 0 skipped, 0 failed.
1335 # Ran 2 tests, 0 skipped, 0 failed.
1289 ON_START! <__main__.TestSuite tests=[<__main__.TTest testMethod=test-failure.t>, <__main__.TTest testMethod=test-success.t>]>
1336 ON_START! <__main__.TestSuite tests=[<__main__.TTest testMethod=test-failure.t>, <__main__.TTest testMethod=test-success.t>]>
1290 FAILURE! test-failure.t output changed
1337 FAILURE! test-failure.t output changed
1291 SUCCESS! test-success.t
1338 SUCCESS! test-success.t
1292 ON_END!
1339 ON_END!
1293
1340
1294 Test reusability for third party tools
1341 Test reusability for third party tools
1295 ======================================
1342 ======================================
1296
1343
1297 $ mkdir "$TESTTMP"/anothertests
1344 $ mkdir "$TESTTMP"/anothertests
1298 $ cd "$TESTTMP"/anothertests
1345 $ cd "$TESTTMP"/anothertests
1299
1346
1300 test that `run-tests.py` can execute hghave, even if it runs not in
1347 test that `run-tests.py` can execute hghave, even if it runs not in
1301 Mercurial source tree.
1348 Mercurial source tree.
1302
1349
1303 $ cat > test-hghave.t <<EOF
1350 $ cat > test-hghave.t <<EOF
1304 > #require true
1351 > #require true
1305 > $ echo foo
1352 > $ echo foo
1306 > foo
1353 > foo
1307 > EOF
1354 > EOF
1308 $ rt test-hghave.t
1355 $ rt test-hghave.t
1356 running 1 tests using 1 parallel processes
1309 .
1357 .
1310 # Ran 1 tests, 0 skipped, 0 failed.
1358 # Ran 1 tests, 0 skipped, 0 failed.
1311
1359
1312 test that RUNTESTDIR refers the directory, in which `run-tests.py` now
1360 test that RUNTESTDIR refers the directory, in which `run-tests.py` now
1313 running is placed.
1361 running is placed.
1314
1362
1315 $ cat > test-runtestdir.t <<EOF
1363 $ cat > test-runtestdir.t <<EOF
1316 > - $TESTDIR, in which test-run-tests.t is placed
1364 > - $TESTDIR, in which test-run-tests.t is placed
1317 > - \$TESTDIR, in which test-runtestdir.t is placed (expanded at runtime)
1365 > - \$TESTDIR, in which test-runtestdir.t is placed (expanded at runtime)
1318 > - \$RUNTESTDIR, in which run-tests.py is placed (expanded at runtime)
1366 > - \$RUNTESTDIR, in which run-tests.py is placed (expanded at runtime)
1319 >
1367 >
1320 > #if windows
1368 > #if windows
1321 > $ test "\$TESTDIR" = "$TESTTMP\anothertests"
1369 > $ test "\$TESTDIR" = "$TESTTMP\anothertests"
1322 > #else
1370 > #else
1323 > $ test "\$TESTDIR" = "$TESTTMP"/anothertests
1371 > $ test "\$TESTDIR" = "$TESTTMP"/anothertests
1324 > #endif
1372 > #endif
1325 > If this prints a path, that means RUNTESTDIR didn't equal
1373 > If this prints a path, that means RUNTESTDIR didn't equal
1326 > TESTDIR as it should have.
1374 > TESTDIR as it should have.
1327 > $ test "\$RUNTESTDIR" = "$TESTDIR" || echo "\$RUNTESTDIR"
1375 > $ test "\$RUNTESTDIR" = "$TESTDIR" || echo "\$RUNTESTDIR"
1328 > This should print the start of check-code. If this passes but the
1376 > This should print the start of check-code. If this passes but the
1329 > previous check failed, that means we found a copy of check-code at whatever
1377 > previous check failed, that means we found a copy of check-code at whatever
1330 > RUNTESTSDIR ended up containing, even though it doesn't match TESTDIR.
1378 > RUNTESTSDIR ended up containing, even though it doesn't match TESTDIR.
1331 > $ head -n 3 "\$RUNTESTDIR"/../contrib/check-code.py | sed 's@.!.*python@#!USRBINENVPY@'
1379 > $ head -n 3 "\$RUNTESTDIR"/../contrib/check-code.py | sed 's@.!.*python@#!USRBINENVPY@'
1332 > #!USRBINENVPY
1380 > #!USRBINENVPY
1333 > #
1381 > #
1334 > # check-code - a style and portability checker for Mercurial
1382 > # check-code - a style and portability checker for Mercurial
1335 > EOF
1383 > EOF
1336 $ rt test-runtestdir.t
1384 $ rt test-runtestdir.t
1385 running 1 tests using 1 parallel processes
1337 .
1386 .
1338 # Ran 1 tests, 0 skipped, 0 failed.
1387 # Ran 1 tests, 0 skipped, 0 failed.
1339
1388
1340 #if execbit
1389 #if execbit
1341
1390
1342 test that TESTDIR is referred in PATH
1391 test that TESTDIR is referred in PATH
1343
1392
1344 $ cat > custom-command.sh <<EOF
1393 $ cat > custom-command.sh <<EOF
1345 > #!/bin/sh
1394 > #!/bin/sh
1346 > echo "hello world"
1395 > echo "hello world"
1347 > EOF
1396 > EOF
1348 $ chmod +x custom-command.sh
1397 $ chmod +x custom-command.sh
1349 $ cat > test-testdir-path.t <<EOF
1398 $ cat > test-testdir-path.t <<EOF
1350 > $ custom-command.sh
1399 > $ custom-command.sh
1351 > hello world
1400 > hello world
1352 > EOF
1401 > EOF
1353 $ rt test-testdir-path.t
1402 $ rt test-testdir-path.t
1403 running 1 tests using 1 parallel processes
1354 .
1404 .
1355 # Ran 1 tests, 0 skipped, 0 failed.
1405 # Ran 1 tests, 0 skipped, 0 failed.
1356
1406
1357 #endif
1407 #endif
1358
1408
1359 test support for --allow-slow-tests
1409 test support for --allow-slow-tests
1360 $ cat > test-very-slow-test.t <<EOF
1410 $ cat > test-very-slow-test.t <<EOF
1361 > #require slow
1411 > #require slow
1362 > $ echo pass
1412 > $ echo pass
1363 > pass
1413 > pass
1364 > EOF
1414 > EOF
1365 $ rt test-very-slow-test.t
1415 $ rt test-very-slow-test.t
1416 running 1 tests using 1 parallel processes
1366 s
1417 s
1367 Skipped test-very-slow-test.t: missing feature: allow slow tests (use --allow-slow-tests)
1418 Skipped test-very-slow-test.t: missing feature: allow slow tests (use --allow-slow-tests)
1368 # Ran 0 tests, 1 skipped, 0 failed.
1419 # Ran 0 tests, 1 skipped, 0 failed.
1369 $ rt $HGTEST_RUN_TESTS_PURE --allow-slow-tests test-very-slow-test.t
1420 $ rt $HGTEST_RUN_TESTS_PURE --allow-slow-tests test-very-slow-test.t
1421 running 1 tests using 1 parallel processes
1370 .
1422 .
1371 # Ran 1 tests, 0 skipped, 0 failed.
1423 # Ran 1 tests, 0 skipped, 0 failed.
1372
1424
1373 support for running a test outside the current directory
1425 support for running a test outside the current directory
1374 $ mkdir nonlocal
1426 $ mkdir nonlocal
1375 $ cat > nonlocal/test-is-not-here.t << EOF
1427 $ cat > nonlocal/test-is-not-here.t << EOF
1376 > $ echo pass
1428 > $ echo pass
1377 > pass
1429 > pass
1378 > EOF
1430 > EOF
1379 $ rt nonlocal/test-is-not-here.t
1431 $ rt nonlocal/test-is-not-here.t
1432 running 1 tests using 1 parallel processes
1380 .
1433 .
1381 # Ran 1 tests, 0 skipped, 0 failed.
1434 # Ran 1 tests, 0 skipped, 0 failed.
1382
1435
1383 support for automatically discovering test if arg is a folder
1436 support for automatically discovering test if arg is a folder
1384 $ mkdir tmp && cd tmp
1437 $ mkdir tmp && cd tmp
1385
1438
1386 $ cat > test-uno.t << EOF
1439 $ cat > test-uno.t << EOF
1387 > $ echo line
1440 > $ echo line
1388 > line
1441 > line
1389 > EOF
1442 > EOF
1390
1443
1391 $ cp test-uno.t test-dos.t
1444 $ cp test-uno.t test-dos.t
1392 $ cd ..
1445 $ cd ..
1393 $ cp -R tmp tmpp
1446 $ cp -R tmp tmpp
1394 $ cp tmp/test-uno.t test-solo.t
1447 $ cp tmp/test-uno.t test-solo.t
1395
1448
1396 $ rt tmp/ test-solo.t tmpp
1449 $ rt tmp/ test-solo.t tmpp
1450 running 5 tests using 1 parallel processes
1397 .....
1451 .....
1398 # Ran 5 tests, 0 skipped, 0 failed.
1452 # Ran 5 tests, 0 skipped, 0 failed.
1399 $ rm -rf tmp tmpp
1453 $ rm -rf tmp tmpp
1400
1454
1401 support for running run-tests.py from another directory
1455 support for running run-tests.py from another directory
1402 $ mkdir tmp && cd tmp
1456 $ mkdir tmp && cd tmp
1403
1457
1404 $ cat > useful-file.sh << EOF
1458 $ cat > useful-file.sh << EOF
1405 > important command
1459 > important command
1406 > EOF
1460 > EOF
1407
1461
1408 $ cat > test-folder.t << EOF
1462 $ cat > test-folder.t << EOF
1409 > $ cat \$TESTDIR/useful-file.sh
1463 > $ cat \$TESTDIR/useful-file.sh
1410 > important command
1464 > important command
1411 > EOF
1465 > EOF
1412
1466
1413 $ cat > test-folder-fail.t << EOF
1467 $ cat > test-folder-fail.t << EOF
1414 > $ cat \$TESTDIR/useful-file.sh
1468 > $ cat \$TESTDIR/useful-file.sh
1415 > important commando
1469 > important commando
1416 > EOF
1470 > EOF
1417
1471
1418 $ cd ..
1472 $ cd ..
1419 $ rt tmp/test-*.t
1473 $ rt tmp/test-*.t
1474 running 2 tests using 1 parallel processes
1420
1475
1421 --- $TESTTMP/anothertests/tmp/test-folder-fail.t
1476 --- $TESTTMP/anothertests/tmp/test-folder-fail.t
1422 +++ $TESTTMP/anothertests/tmp/test-folder-fail.t.err
1477 +++ $TESTTMP/anothertests/tmp/test-folder-fail.t.err
1423 @@ -1,2 +1,2 @@
1478 @@ -1,2 +1,2 @@
1424 $ cat $TESTDIR/useful-file.sh
1479 $ cat $TESTDIR/useful-file.sh
1425 - important commando
1480 - important commando
1426 + important command
1481 + important command
1427
1482
1428 ERROR: test-folder-fail.t output changed
1483 ERROR: test-folder-fail.t output changed
1429 !.
1484 !.
1430 Failed test-folder-fail.t: output changed
1485 Failed test-folder-fail.t: output changed
1431 # Ran 2 tests, 0 skipped, 1 failed.
1486 # Ran 2 tests, 0 skipped, 1 failed.
1432 python hash seed: * (glob)
1487 python hash seed: * (glob)
1433 [1]
1488 [1]
1434
1489
1435 support for bisecting failed tests automatically
1490 support for bisecting failed tests automatically
1436 $ hg init bisect
1491 $ hg init bisect
1437 $ cd bisect
1492 $ cd bisect
1438 $ cat >> test-bisect.t <<EOF
1493 $ cat >> test-bisect.t <<EOF
1439 > $ echo pass
1494 > $ echo pass
1440 > pass
1495 > pass
1441 > EOF
1496 > EOF
1442 $ hg add test-bisect.t
1497 $ hg add test-bisect.t
1443 $ hg ci -m 'good'
1498 $ hg ci -m 'good'
1444 $ cat >> test-bisect.t <<EOF
1499 $ cat >> test-bisect.t <<EOF
1445 > $ echo pass
1500 > $ echo pass
1446 > fail
1501 > fail
1447 > EOF
1502 > EOF
1448 $ hg ci -m 'bad'
1503 $ hg ci -m 'bad'
1449 $ rt --known-good-rev=0 test-bisect.t
1504 $ rt --known-good-rev=0 test-bisect.t
1505 running 1 tests using 1 parallel processes
1450
1506
1451 --- $TESTTMP/anothertests/bisect/test-bisect.t
1507 --- $TESTTMP/anothertests/bisect/test-bisect.t
1452 +++ $TESTTMP/anothertests/bisect/test-bisect.t.err
1508 +++ $TESTTMP/anothertests/bisect/test-bisect.t.err
1453 @@ -1,4 +1,4 @@
1509 @@ -1,4 +1,4 @@
1454 $ echo pass
1510 $ echo pass
1455 pass
1511 pass
1456 $ echo pass
1512 $ echo pass
1457 - fail
1513 - fail
1458 + pass
1514 + pass
1459
1515
1460 ERROR: test-bisect.t output changed
1516 ERROR: test-bisect.t output changed
1461 !
1517 !
1462 Failed test-bisect.t: output changed
1518 Failed test-bisect.t: output changed
1463 test-bisect.t broken by 72cbf122d116 (bad)
1519 test-bisect.t broken by 72cbf122d116 (bad)
1464 # Ran 1 tests, 0 skipped, 1 failed.
1520 # Ran 1 tests, 0 skipped, 1 failed.
1465 python hash seed: * (glob)
1521 python hash seed: * (glob)
1466 [1]
1522 [1]
1467
1523
1468 $ cd ..
1524 $ cd ..
1469
1525
1470 support bisecting a separate repo
1526 support bisecting a separate repo
1471
1527
1472 $ hg init bisect-dependent
1528 $ hg init bisect-dependent
1473 $ cd bisect-dependent
1529 $ cd bisect-dependent
1474 $ cat > test-bisect-dependent.t <<EOF
1530 $ cat > test-bisect-dependent.t <<EOF
1475 > $ tail -1 \$TESTDIR/../bisect/test-bisect.t
1531 > $ tail -1 \$TESTDIR/../bisect/test-bisect.t
1476 > pass
1532 > pass
1477 > EOF
1533 > EOF
1478 $ hg commit -Am dependent test-bisect-dependent.t
1534 $ hg commit -Am dependent test-bisect-dependent.t
1479
1535
1480 $ rt --known-good-rev=0 test-bisect-dependent.t
1536 $ rt --known-good-rev=0 test-bisect-dependent.t
1537 running 1 tests using 1 parallel processes
1481
1538
1482 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1539 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1483 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1540 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1484 @@ -1,2 +1,2 @@
1541 @@ -1,2 +1,2 @@
1485 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1542 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1486 - pass
1543 - pass
1487 + fail
1544 + fail
1488
1545
1489 ERROR: test-bisect-dependent.t output changed
1546 ERROR: test-bisect-dependent.t output changed
1490 !
1547 !
1491 Failed test-bisect-dependent.t: output changed
1548 Failed test-bisect-dependent.t: output changed
1492 Failed to identify failure point for test-bisect-dependent.t
1549 Failed to identify failure point for test-bisect-dependent.t
1493 # Ran 1 tests, 0 skipped, 1 failed.
1550 # Ran 1 tests, 0 skipped, 1 failed.
1494 python hash seed: * (glob)
1551 python hash seed: * (glob)
1495 [1]
1552 [1]
1496
1553
1497 $ rt --bisect-repo=../test-bisect test-bisect-dependent.t
1554 $ rt --bisect-repo=../test-bisect test-bisect-dependent.t
1498 usage: run-tests.py [options] [tests]
1555 usage: run-tests.py [options] [tests]
1499 run-tests.py: error: --bisect-repo cannot be used without --known-good-rev
1556 run-tests.py: error: --bisect-repo cannot be used without --known-good-rev
1500 [2]
1557 [2]
1501
1558
1502 $ rt --known-good-rev=0 --bisect-repo=../bisect test-bisect-dependent.t
1559 $ rt --known-good-rev=0 --bisect-repo=../bisect test-bisect-dependent.t
1560 running 1 tests using 1 parallel processes
1503
1561
1504 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1562 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1505 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1563 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1506 @@ -1,2 +1,2 @@
1564 @@ -1,2 +1,2 @@
1507 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1565 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1508 - pass
1566 - pass
1509 + fail
1567 + fail
1510
1568
1511 ERROR: test-bisect-dependent.t output changed
1569 ERROR: test-bisect-dependent.t output changed
1512 !
1570 !
1513 Failed test-bisect-dependent.t: output changed
1571 Failed test-bisect-dependent.t: output changed
1514 test-bisect-dependent.t broken by 72cbf122d116 (bad)
1572 test-bisect-dependent.t broken by 72cbf122d116 (bad)
1515 # Ran 1 tests, 0 skipped, 1 failed.
1573 # Ran 1 tests, 0 skipped, 1 failed.
1516 python hash seed: * (glob)
1574 python hash seed: * (glob)
1517 [1]
1575 [1]
1518
1576
1519 $ cd ..
1577 $ cd ..
1520
1578
1521 Test a broken #if statement doesn't break run-tests threading.
1579 Test a broken #if statement doesn't break run-tests threading.
1522 ==============================================================
1580 ==============================================================
1523 $ mkdir broken
1581 $ mkdir broken
1524 $ cd broken
1582 $ cd broken
1525 $ cat > test-broken.t <<EOF
1583 $ cat > test-broken.t <<EOF
1526 > true
1584 > true
1527 > #if notarealhghavefeature
1585 > #if notarealhghavefeature
1528 > $ false
1586 > $ false
1529 > #endif
1587 > #endif
1530 > EOF
1588 > EOF
1531 $ for f in 1 2 3 4 ; do
1589 $ for f in 1 2 3 4 ; do
1532 > cat > test-works-$f.t <<EOF
1590 > cat > test-works-$f.t <<EOF
1533 > This is test case $f
1591 > This is test case $f
1534 > $ sleep 1
1592 > $ sleep 1
1535 > EOF
1593 > EOF
1536 > done
1594 > done
1537 $ rt -j 2
1595 $ rt -j 2
1596 running 5 tests using 2 parallel processes
1538 ....
1597 ....
1539 # Ran 5 tests, 0 skipped, 0 failed.
1598 # Ran 5 tests, 0 skipped, 0 failed.
1540 skipped: unknown feature: notarealhghavefeature
1599 skipped: unknown feature: notarealhghavefeature
1541
1600
1542 $ cd ..
1601 $ cd ..
1543 $ rm -rf broken
1602 $ rm -rf broken
1544
1603
1545 Test cases in .t files
1604 Test cases in .t files
1546 ======================
1605 ======================
1547 $ mkdir cases
1606 $ mkdir cases
1548 $ cd cases
1607 $ cd cases
1549 $ cat > test-cases-abc.t <<'EOF'
1608 $ cat > test-cases-abc.t <<'EOF'
1550 > #testcases A B C
1609 > #testcases A B C
1551 > $ V=B
1610 > $ V=B
1552 > #if A
1611 > #if A
1553 > $ V=A
1612 > $ V=A
1554 > #endif
1613 > #endif
1555 > #if C
1614 > #if C
1556 > $ V=C
1615 > $ V=C
1557 > #endif
1616 > #endif
1558 > $ echo $V | sed 's/A/C/'
1617 > $ echo $V | sed 's/A/C/'
1559 > C
1618 > C
1560 > #if C
1619 > #if C
1561 > $ [ $V = C ]
1620 > $ [ $V = C ]
1562 > #endif
1621 > #endif
1563 > #if A
1622 > #if A
1564 > $ [ $V = C ]
1623 > $ [ $V = C ]
1565 > [1]
1624 > [1]
1566 > #endif
1625 > #endif
1567 > #if no-C
1626 > #if no-C
1568 > $ [ $V = C ]
1627 > $ [ $V = C ]
1569 > [1]
1628 > [1]
1570 > #endif
1629 > #endif
1571 > $ [ $V = D ]
1630 > $ [ $V = D ]
1572 > [1]
1631 > [1]
1573 > EOF
1632 > EOF
1574 $ rt
1633 $ rt
1634 running 3 tests using 1 parallel processes
1575 .
1635 .
1576 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1636 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1577 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1637 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1578 @@ -7,7 +7,7 @@
1638 @@ -7,7 +7,7 @@
1579 $ V=C
1639 $ V=C
1580 #endif
1640 #endif
1581 $ echo $V | sed 's/A/C/'
1641 $ echo $V | sed 's/A/C/'
1582 - C
1642 - C
1583 + B
1643 + B
1584 #if C
1644 #if C
1585 $ [ $V = C ]
1645 $ [ $V = C ]
1586 #endif
1646 #endif
1587
1647
1588 ERROR: test-cases-abc.t#B output changed
1648 ERROR: test-cases-abc.t#B output changed
1589 !.
1649 !.
1590 Failed test-cases-abc.t#B: output changed
1650 Failed test-cases-abc.t#B: output changed
1591 # Ran 3 tests, 0 skipped, 1 failed.
1651 # Ran 3 tests, 0 skipped, 1 failed.
1592 python hash seed: * (glob)
1652 python hash seed: * (glob)
1593 [1]
1653 [1]
1594
1654
1595 --restart works
1655 --restart works
1596
1656
1597 $ rt --restart
1657 $ rt --restart
1658 running 2 tests using 1 parallel processes
1598
1659
1599 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1660 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1600 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1661 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1601 @@ -7,7 +7,7 @@
1662 @@ -7,7 +7,7 @@
1602 $ V=C
1663 $ V=C
1603 #endif
1664 #endif
1604 $ echo $V | sed 's/A/C/'
1665 $ echo $V | sed 's/A/C/'
1605 - C
1666 - C
1606 + B
1667 + B
1607 #if C
1668 #if C
1608 $ [ $V = C ]
1669 $ [ $V = C ]
1609 #endif
1670 #endif
1610
1671
1611 ERROR: test-cases-abc.t#B output changed
1672 ERROR: test-cases-abc.t#B output changed
1612 !.
1673 !.
1613 Failed test-cases-abc.t#B: output changed
1674 Failed test-cases-abc.t#B: output changed
1614 # Ran 2 tests, 0 skipped, 1 failed.
1675 # Ran 2 tests, 0 skipped, 1 failed.
1615 python hash seed: * (glob)
1676 python hash seed: * (glob)
1616 [1]
1677 [1]
1617
1678
1618 --restart works with outputdir
1679 --restart works with outputdir
1619
1680
1620 $ mkdir output
1681 $ mkdir output
1621 $ mv test-cases-abc.t#B.err output
1682 $ mv test-cases-abc.t#B.err output
1622 $ rt --restart --outputdir output
1683 $ rt --restart --outputdir output
1684 running 2 tests using 1 parallel processes
1623
1685
1624 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1686 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1625 +++ $TESTTMP/anothertests/cases/output/test-cases-abc.t#B.err
1687 +++ $TESTTMP/anothertests/cases/output/test-cases-abc.t#B.err
1626 @@ -7,7 +7,7 @@
1688 @@ -7,7 +7,7 @@
1627 $ V=C
1689 $ V=C
1628 #endif
1690 #endif
1629 $ echo $V | sed 's/A/C/'
1691 $ echo $V | sed 's/A/C/'
1630 - C
1692 - C
1631 + B
1693 + B
1632 #if C
1694 #if C
1633 $ [ $V = C ]
1695 $ [ $V = C ]
1634 #endif
1696 #endif
1635
1697
1636 ERROR: test-cases-abc.t#B output changed
1698 ERROR: test-cases-abc.t#B output changed
1637 !.
1699 !.
1638 Failed test-cases-abc.t#B: output changed
1700 Failed test-cases-abc.t#B: output changed
1639 # Ran 2 tests, 0 skipped, 1 failed.
1701 # Ran 2 tests, 0 skipped, 1 failed.
1640 python hash seed: * (glob)
1702 python hash seed: * (glob)
1641 [1]
1703 [1]
1642
1704
1643 Test TESTCASE variable
1705 Test TESTCASE variable
1644
1706
1645 $ cat > test-cases-ab.t <<'EOF'
1707 $ cat > test-cases-ab.t <<'EOF'
1646 > $ dostuff() {
1708 > $ dostuff() {
1647 > > echo "In case $TESTCASE"
1709 > > echo "In case $TESTCASE"
1648 > > }
1710 > > }
1649 > #testcases A B
1711 > #testcases A B
1650 > #if A
1712 > #if A
1651 > $ dostuff
1713 > $ dostuff
1652 > In case A
1714 > In case A
1653 > #endif
1715 > #endif
1654 > #if B
1716 > #if B
1655 > $ dostuff
1717 > $ dostuff
1656 > In case B
1718 > In case B
1657 > #endif
1719 > #endif
1658 > EOF
1720 > EOF
1659 $ rt test-cases-ab.t
1721 $ rt test-cases-ab.t
1722 running 2 tests using 1 parallel processes
1660 ..
1723 ..
1661 # Ran 2 tests, 0 skipped, 0 failed.
1724 # Ran 2 tests, 0 skipped, 0 failed.
1662
1725
1663 Support running a specific test case
1726 Support running a specific test case
1664
1727
1665 $ rt "test-cases-abc.t#B"
1728 $ rt "test-cases-abc.t#B"
1729 running 1 tests using 1 parallel processes
1666
1730
1667 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1731 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1668 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1732 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1669 @@ -7,7 +7,7 @@
1733 @@ -7,7 +7,7 @@
1670 $ V=C
1734 $ V=C
1671 #endif
1735 #endif
1672 $ echo $V | sed 's/A/C/'
1736 $ echo $V | sed 's/A/C/'
1673 - C
1737 - C
1674 + B
1738 + B
1675 #if C
1739 #if C
1676 $ [ $V = C ]
1740 $ [ $V = C ]
1677 #endif
1741 #endif
1678
1742
1679 ERROR: test-cases-abc.t#B output changed
1743 ERROR: test-cases-abc.t#B output changed
1680 !
1744 !
1681 Failed test-cases-abc.t#B: output changed
1745 Failed test-cases-abc.t#B: output changed
1682 # Ran 1 tests, 0 skipped, 1 failed.
1746 # Ran 1 tests, 0 skipped, 1 failed.
1683 python hash seed: * (glob)
1747 python hash seed: * (glob)
1684 [1]
1748 [1]
1685
1749
1686 Support running multiple test cases in the same file
1750 Support running multiple test cases in the same file
1687
1751
1688 $ rt test-cases-abc.t#B test-cases-abc.t#C
1752 $ rt test-cases-abc.t#B test-cases-abc.t#C
1753 running 2 tests using 1 parallel processes
1689
1754
1690 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1755 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1691 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1756 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1692 @@ -7,7 +7,7 @@
1757 @@ -7,7 +7,7 @@
1693 $ V=C
1758 $ V=C
1694 #endif
1759 #endif
1695 $ echo $V | sed 's/A/C/'
1760 $ echo $V | sed 's/A/C/'
1696 - C
1761 - C
1697 + B
1762 + B
1698 #if C
1763 #if C
1699 $ [ $V = C ]
1764 $ [ $V = C ]
1700 #endif
1765 #endif
1701
1766
1702 ERROR: test-cases-abc.t#B output changed
1767 ERROR: test-cases-abc.t#B output changed
1703 !.
1768 !.
1704 Failed test-cases-abc.t#B: output changed
1769 Failed test-cases-abc.t#B: output changed
1705 # Ran 2 tests, 0 skipped, 1 failed.
1770 # Ran 2 tests, 0 skipped, 1 failed.
1706 python hash seed: * (glob)
1771 python hash seed: * (glob)
1707 [1]
1772 [1]
1708
1773
1709 Support ignoring invalid test cases
1774 Support ignoring invalid test cases
1710
1775
1711 $ rt test-cases-abc.t#B test-cases-abc.t#D
1776 $ rt test-cases-abc.t#B test-cases-abc.t#D
1777 running 1 tests using 1 parallel processes
1712
1778
1713 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1779 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1714 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1780 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1715 @@ -7,7 +7,7 @@
1781 @@ -7,7 +7,7 @@
1716 $ V=C
1782 $ V=C
1717 #endif
1783 #endif
1718 $ echo $V | sed 's/A/C/'
1784 $ echo $V | sed 's/A/C/'
1719 - C
1785 - C
1720 + B
1786 + B
1721 #if C
1787 #if C
1722 $ [ $V = C ]
1788 $ [ $V = C ]
1723 #endif
1789 #endif
1724
1790
1725 ERROR: test-cases-abc.t#B output changed
1791 ERROR: test-cases-abc.t#B output changed
1726 !
1792 !
1727 Failed test-cases-abc.t#B: output changed
1793 Failed test-cases-abc.t#B: output changed
1728 # Ran 1 tests, 0 skipped, 1 failed.
1794 # Ran 1 tests, 0 skipped, 1 failed.
1729 python hash seed: * (glob)
1795 python hash seed: * (glob)
1730 [1]
1796 [1]
1731
1797
1732 Support running complex test cases names
1798 Support running complex test cases names
1733
1799
1734 $ cat > test-cases-advanced-cases.t <<'EOF'
1800 $ cat > test-cases-advanced-cases.t <<'EOF'
1735 > #testcases simple case-with-dashes casewith_-.chars
1801 > #testcases simple case-with-dashes casewith_-.chars
1736 > $ echo $TESTCASE
1802 > $ echo $TESTCASE
1737 > simple
1803 > simple
1738 > EOF
1804 > EOF
1739
1805
1740 $ cat test-cases-advanced-cases.t
1806 $ cat test-cases-advanced-cases.t
1741 #testcases simple case-with-dashes casewith_-.chars
1807 #testcases simple case-with-dashes casewith_-.chars
1742 $ echo $TESTCASE
1808 $ echo $TESTCASE
1743 simple
1809 simple
1744
1810
1745 $ rt test-cases-advanced-cases.t
1811 $ rt test-cases-advanced-cases.t
1812 running 3 tests using 1 parallel processes
1746
1813
1747 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1814 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1748 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err
1815 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err
1749 @@ -1,3 +1,3 @@
1816 @@ -1,3 +1,3 @@
1750 #testcases simple case-with-dashes casewith_-.chars
1817 #testcases simple case-with-dashes casewith_-.chars
1751 $ echo $TESTCASE
1818 $ echo $TESTCASE
1752 - simple
1819 - simple
1753 + case-with-dashes
1820 + case-with-dashes
1754
1821
1755 ERROR: test-cases-advanced-cases.t#case-with-dashes output changed
1822 ERROR: test-cases-advanced-cases.t#case-with-dashes output changed
1756 !
1823 !
1757 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1824 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1758 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err
1825 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err
1759 @@ -1,3 +1,3 @@
1826 @@ -1,3 +1,3 @@
1760 #testcases simple case-with-dashes casewith_-.chars
1827 #testcases simple case-with-dashes casewith_-.chars
1761 $ echo $TESTCASE
1828 $ echo $TESTCASE
1762 - simple
1829 - simple
1763 + casewith_-.chars
1830 + casewith_-.chars
1764
1831
1765 ERROR: test-cases-advanced-cases.t#casewith_-.chars output changed
1832 ERROR: test-cases-advanced-cases.t#casewith_-.chars output changed
1766 !.
1833 !.
1767 Failed test-cases-advanced-cases.t#case-with-dashes: output changed
1834 Failed test-cases-advanced-cases.t#case-with-dashes: output changed
1768 Failed test-cases-advanced-cases.t#casewith_-.chars: output changed
1835 Failed test-cases-advanced-cases.t#casewith_-.chars: output changed
1769 # Ran 3 tests, 0 skipped, 2 failed.
1836 # Ran 3 tests, 0 skipped, 2 failed.
1770 python hash seed: * (glob)
1837 python hash seed: * (glob)
1771 [1]
1838 [1]
1772
1839
1773 $ rt "test-cases-advanced-cases.t#case-with-dashes"
1840 $ rt "test-cases-advanced-cases.t#case-with-dashes"
1841 running 1 tests using 1 parallel processes
1774
1842
1775 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1843 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1776 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err
1844 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err
1777 @@ -1,3 +1,3 @@
1845 @@ -1,3 +1,3 @@
1778 #testcases simple case-with-dashes casewith_-.chars
1846 #testcases simple case-with-dashes casewith_-.chars
1779 $ echo $TESTCASE
1847 $ echo $TESTCASE
1780 - simple
1848 - simple
1781 + case-with-dashes
1849 + case-with-dashes
1782
1850
1783 ERROR: test-cases-advanced-cases.t#case-with-dashes output changed
1851 ERROR: test-cases-advanced-cases.t#case-with-dashes output changed
1784 !
1852 !
1785 Failed test-cases-advanced-cases.t#case-with-dashes: output changed
1853 Failed test-cases-advanced-cases.t#case-with-dashes: output changed
1786 # Ran 1 tests, 0 skipped, 1 failed.
1854 # Ran 1 tests, 0 skipped, 1 failed.
1787 python hash seed: * (glob)
1855 python hash seed: * (glob)
1788 [1]
1856 [1]
1789
1857
1790 $ rt "test-cases-advanced-cases.t#casewith_-.chars"
1858 $ rt "test-cases-advanced-cases.t#casewith_-.chars"
1859 running 1 tests using 1 parallel processes
1791
1860
1792 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1861 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1793 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err
1862 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err
1794 @@ -1,3 +1,3 @@
1863 @@ -1,3 +1,3 @@
1795 #testcases simple case-with-dashes casewith_-.chars
1864 #testcases simple case-with-dashes casewith_-.chars
1796 $ echo $TESTCASE
1865 $ echo $TESTCASE
1797 - simple
1866 - simple
1798 + casewith_-.chars
1867 + casewith_-.chars
1799
1868
1800 ERROR: test-cases-advanced-cases.t#casewith_-.chars output changed
1869 ERROR: test-cases-advanced-cases.t#casewith_-.chars output changed
1801 !
1870 !
1802 Failed test-cases-advanced-cases.t#casewith_-.chars: output changed
1871 Failed test-cases-advanced-cases.t#casewith_-.chars: output changed
1803 # Ran 1 tests, 0 skipped, 1 failed.
1872 # Ran 1 tests, 0 skipped, 1 failed.
1804 python hash seed: * (glob)
1873 python hash seed: * (glob)
1805 [1]
1874 [1]
1806
1875
1807 Test automatic pattern replacement
1876 Test automatic pattern replacement
1808 ==================================
1877 ==================================
1809
1878
1810 $ cat << EOF >> common-pattern.py
1879 $ cat << EOF >> common-pattern.py
1811 > substitutions = [
1880 > substitutions = [
1812 > (br'foo-(.*)\\b',
1881 > (br'foo-(.*)\\b',
1813 > br'\$XXX=\\1\$'),
1882 > br'\$XXX=\\1\$'),
1814 > (br'bar\\n',
1883 > (br'bar\\n',
1815 > br'\$YYY$\\n'),
1884 > br'\$YYY$\\n'),
1816 > ]
1885 > ]
1817 > EOF
1886 > EOF
1818
1887
1819 $ cat << EOF >> test-substitution.t
1888 $ cat << EOF >> test-substitution.t
1820 > $ echo foo-12
1889 > $ echo foo-12
1821 > \$XXX=12$
1890 > \$XXX=12$
1822 > $ echo foo-42
1891 > $ echo foo-42
1823 > \$XXX=42$
1892 > \$XXX=42$
1824 > $ echo bar prior
1893 > $ echo bar prior
1825 > bar prior
1894 > bar prior
1826 > $ echo lastbar
1895 > $ echo lastbar
1827 > last\$YYY$
1896 > last\$YYY$
1828 > $ echo foo-bar foo-baz
1897 > $ echo foo-bar foo-baz
1829 > EOF
1898 > EOF
1830
1899
1831 $ rt test-substitution.t
1900 $ rt test-substitution.t
1901 running 1 tests using 1 parallel processes
1832
1902
1833 --- $TESTTMP/anothertests/cases/test-substitution.t
1903 --- $TESTTMP/anothertests/cases/test-substitution.t
1834 +++ $TESTTMP/anothertests/cases/test-substitution.t.err
1904 +++ $TESTTMP/anothertests/cases/test-substitution.t.err
1835 @@ -7,3 +7,4 @@
1905 @@ -7,3 +7,4 @@
1836 $ echo lastbar
1906 $ echo lastbar
1837 last$YYY$
1907 last$YYY$
1838 $ echo foo-bar foo-baz
1908 $ echo foo-bar foo-baz
1839 + $XXX=bar foo-baz$
1909 + $XXX=bar foo-baz$
1840
1910
1841 ERROR: test-substitution.t output changed
1911 ERROR: test-substitution.t output changed
1842 !
1912 !
1843 Failed test-substitution.t: output changed
1913 Failed test-substitution.t: output changed
1844 # Ran 1 tests, 0 skipped, 1 failed.
1914 # Ran 1 tests, 0 skipped, 1 failed.
1845 python hash seed: * (glob)
1915 python hash seed: * (glob)
1846 [1]
1916 [1]
1847
1917
1848 --extra-config-opt works
1918 --extra-config-opt works
1849
1919
1850 $ cat << EOF >> test-config-opt.t
1920 $ cat << EOF >> test-config-opt.t
1851 > $ hg init test-config-opt
1921 > $ hg init test-config-opt
1852 > $ hg -R test-config-opt purge
1922 > $ hg -R test-config-opt purge
1853 > EOF
1923 > EOF
1854
1924
1855 $ rt --extra-config-opt extensions.purge= test-config-opt.t
1925 $ rt --extra-config-opt extensions.purge= test-config-opt.t
1926 running 1 tests using 1 parallel processes
1856 .
1927 .
1857 # Ran 1 tests, 0 skipped, 0 failed.
1928 # Ran 1 tests, 0 skipped, 0 failed.
General Comments 0
You need to be logged in to leave comments. Login now