##// END OF EJS Templates
run-tests: run tests with as many processes as cores by default...
Gregory Szorc -
r40281:e7e70c03 default
parent child Browse files
Show More
@@ -1,3239 +1,3240 b''
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2 #
2 #
3 # run-tests.py - Run a set of tests on Mercurial
3 # run-tests.py - Run a set of tests on Mercurial
4 #
4 #
5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 # Modifying this script is tricky because it has many modes:
10 # Modifying this script is tricky because it has many modes:
11 # - serial (default) vs parallel (-jN, N > 1)
11 # - serial (default) vs parallel (-jN, N > 1)
12 # - no coverage (default) vs coverage (-c, -C, -s)
12 # - no coverage (default) vs coverage (-c, -C, -s)
13 # - temp install (default) vs specific hg script (--with-hg, --local)
13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 # - tests are a mix of shell scripts and Python scripts
14 # - tests are a mix of shell scripts and Python scripts
15 #
15 #
16 # If you change this script, it is recommended that you ensure you
16 # If you change this script, it is recommended that you ensure you
17 # haven't broken it by running it in various modes with a representative
17 # haven't broken it by running it in various modes with a representative
18 # sample of test scripts. For example:
18 # sample of test scripts. For example:
19 #
19 #
20 # 1) serial, no coverage, temp install:
20 # 1) serial, no coverage, temp install:
21 # ./run-tests.py test-s*
21 # ./run-tests.py test-s*
22 # 2) serial, no coverage, local hg:
22 # 2) serial, no coverage, local hg:
23 # ./run-tests.py --local test-s*
23 # ./run-tests.py --local test-s*
24 # 3) serial, coverage, temp install:
24 # 3) serial, coverage, temp install:
25 # ./run-tests.py -c test-s*
25 # ./run-tests.py -c test-s*
26 # 4) serial, coverage, local hg:
26 # 4) serial, coverage, local hg:
27 # ./run-tests.py -c --local test-s* # unsupported
27 # ./run-tests.py -c --local test-s* # unsupported
28 # 5) parallel, no coverage, temp install:
28 # 5) parallel, no coverage, temp install:
29 # ./run-tests.py -j2 test-s*
29 # ./run-tests.py -j2 test-s*
30 # 6) parallel, no coverage, local hg:
30 # 6) parallel, no coverage, local hg:
31 # ./run-tests.py -j2 --local test-s*
31 # ./run-tests.py -j2 --local test-s*
32 # 7) parallel, coverage, temp install:
32 # 7) parallel, coverage, temp install:
33 # ./run-tests.py -j2 -c test-s* # currently broken
33 # ./run-tests.py -j2 -c test-s* # currently broken
34 # 8) parallel, coverage, local install:
34 # 8) parallel, coverage, local install:
35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 # 9) parallel, custom tmp dir:
36 # 9) parallel, custom tmp dir:
37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 # 10) parallel, pure, tests that call run-tests:
38 # 10) parallel, pure, tests that call run-tests:
39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
40 #
40 #
41 # (You could use any subset of the tests: test-s* happens to match
41 # (You could use any subset of the tests: test-s* happens to match
42 # enough that it's worth doing parallel runs, few enough that it
42 # enough that it's worth doing parallel runs, few enough that it
43 # completes fairly quickly, includes both shell and Python scripts, and
43 # completes fairly quickly, includes both shell and Python scripts, and
44 # includes some scripts that run daemon processes.)
44 # includes some scripts that run daemon processes.)
45
45
46 from __future__ import absolute_import, print_function
46 from __future__ import absolute_import, print_function
47
47
48 import argparse
48 import argparse
49 import collections
49 import collections
50 import difflib
50 import difflib
51 import distutils.version as version
51 import distutils.version as version
52 import errno
52 import errno
53 import json
53 import json
54 import multiprocessing
54 import os
55 import os
55 import random
56 import random
56 import re
57 import re
57 import shutil
58 import shutil
58 import signal
59 import signal
59 import socket
60 import socket
60 import subprocess
61 import subprocess
61 import sys
62 import sys
62 import sysconfig
63 import sysconfig
63 import tempfile
64 import tempfile
64 import threading
65 import threading
65 import time
66 import time
66 import unittest
67 import unittest
67 import uuid
68 import uuid
68 import xml.dom.minidom as minidom
69 import xml.dom.minidom as minidom
69
70
70 try:
71 try:
71 import Queue as queue
72 import Queue as queue
72 except ImportError:
73 except ImportError:
73 import queue
74 import queue
74
75
75 try:
76 try:
76 import shlex
77 import shlex
77 shellquote = shlex.quote
78 shellquote = shlex.quote
78 except (ImportError, AttributeError):
79 except (ImportError, AttributeError):
79 import pipes
80 import pipes
80 shellquote = pipes.quote
81 shellquote = pipes.quote
81
82
82 if os.environ.get('RTUNICODEPEDANTRY', False):
83 if os.environ.get('RTUNICODEPEDANTRY', False):
83 try:
84 try:
84 reload(sys)
85 reload(sys)
85 sys.setdefaultencoding("undefined")
86 sys.setdefaultencoding("undefined")
86 except NameError:
87 except NameError:
87 pass
88 pass
88
89
89 processlock = threading.Lock()
90 processlock = threading.Lock()
90
91
91 pygmentspresent = False
92 pygmentspresent = False
92 # ANSI color is unsupported prior to Windows 10
93 # ANSI color is unsupported prior to Windows 10
93 if os.name != 'nt':
94 if os.name != 'nt':
94 try: # is pygments installed
95 try: # is pygments installed
95 import pygments
96 import pygments
96 import pygments.lexers as lexers
97 import pygments.lexers as lexers
97 import pygments.lexer as lexer
98 import pygments.lexer as lexer
98 import pygments.formatters as formatters
99 import pygments.formatters as formatters
99 import pygments.token as token
100 import pygments.token as token
100 import pygments.style as style
101 import pygments.style as style
101 pygmentspresent = True
102 pygmentspresent = True
102 difflexer = lexers.DiffLexer()
103 difflexer = lexers.DiffLexer()
103 terminal256formatter = formatters.Terminal256Formatter()
104 terminal256formatter = formatters.Terminal256Formatter()
104 except ImportError:
105 except ImportError:
105 pass
106 pass
106
107
107 if pygmentspresent:
108 if pygmentspresent:
108 class TestRunnerStyle(style.Style):
109 class TestRunnerStyle(style.Style):
109 default_style = ""
110 default_style = ""
110 skipped = token.string_to_tokentype("Token.Generic.Skipped")
111 skipped = token.string_to_tokentype("Token.Generic.Skipped")
111 failed = token.string_to_tokentype("Token.Generic.Failed")
112 failed = token.string_to_tokentype("Token.Generic.Failed")
112 skippedname = token.string_to_tokentype("Token.Generic.SName")
113 skippedname = token.string_to_tokentype("Token.Generic.SName")
113 failedname = token.string_to_tokentype("Token.Generic.FName")
114 failedname = token.string_to_tokentype("Token.Generic.FName")
114 styles = {
115 styles = {
115 skipped: '#e5e5e5',
116 skipped: '#e5e5e5',
116 skippedname: '#00ffff',
117 skippedname: '#00ffff',
117 failed: '#7f0000',
118 failed: '#7f0000',
118 failedname: '#ff0000',
119 failedname: '#ff0000',
119 }
120 }
120
121
121 class TestRunnerLexer(lexer.RegexLexer):
122 class TestRunnerLexer(lexer.RegexLexer):
122 testpattern = r'[\w-]+\.(t|py)(#[a-zA-Z0-9_\-\.]+)?'
123 testpattern = r'[\w-]+\.(t|py)(#[a-zA-Z0-9_\-\.]+)?'
123 tokens = {
124 tokens = {
124 'root': [
125 'root': [
125 (r'^Skipped', token.Generic.Skipped, 'skipped'),
126 (r'^Skipped', token.Generic.Skipped, 'skipped'),
126 (r'^Failed ', token.Generic.Failed, 'failed'),
127 (r'^Failed ', token.Generic.Failed, 'failed'),
127 (r'^ERROR: ', token.Generic.Failed, 'failed'),
128 (r'^ERROR: ', token.Generic.Failed, 'failed'),
128 ],
129 ],
129 'skipped': [
130 'skipped': [
130 (testpattern, token.Generic.SName),
131 (testpattern, token.Generic.SName),
131 (r':.*', token.Generic.Skipped),
132 (r':.*', token.Generic.Skipped),
132 ],
133 ],
133 'failed': [
134 'failed': [
134 (testpattern, token.Generic.FName),
135 (testpattern, token.Generic.FName),
135 (r'(:| ).*', token.Generic.Failed),
136 (r'(:| ).*', token.Generic.Failed),
136 ]
137 ]
137 }
138 }
138
139
139 runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle)
140 runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle)
140 runnerlexer = TestRunnerLexer()
141 runnerlexer = TestRunnerLexer()
141
142
142 origenviron = os.environ.copy()
143 origenviron = os.environ.copy()
143
144
144 if sys.version_info > (3, 5, 0):
145 if sys.version_info > (3, 5, 0):
145 PYTHON3 = True
146 PYTHON3 = True
146 xrange = range # we use xrange in one place, and we'd rather not use range
147 xrange = range # we use xrange in one place, and we'd rather not use range
147 def _bytespath(p):
148 def _bytespath(p):
148 if p is None:
149 if p is None:
149 return p
150 return p
150 return p.encode('utf-8')
151 return p.encode('utf-8')
151
152
152 def _strpath(p):
153 def _strpath(p):
153 if p is None:
154 if p is None:
154 return p
155 return p
155 return p.decode('utf-8')
156 return p.decode('utf-8')
156
157
157 osenvironb = getattr(os, 'environb', None)
158 osenvironb = getattr(os, 'environb', None)
158 if osenvironb is None:
159 if osenvironb is None:
159 # Windows lacks os.environb, for instance. A proxy over the real thing
160 # Windows lacks os.environb, for instance. A proxy over the real thing
160 # instead of a copy allows the environment to be updated via bytes on
161 # instead of a copy allows the environment to be updated via bytes on
161 # all platforms.
162 # all platforms.
162 class environbytes(object):
163 class environbytes(object):
163 def __init__(self, strenv):
164 def __init__(self, strenv):
164 self.__len__ = strenv.__len__
165 self.__len__ = strenv.__len__
165 self.clear = strenv.clear
166 self.clear = strenv.clear
166 self._strenv = strenv
167 self._strenv = strenv
167 def __getitem__(self, k):
168 def __getitem__(self, k):
168 v = self._strenv.__getitem__(_strpath(k))
169 v = self._strenv.__getitem__(_strpath(k))
169 return _bytespath(v)
170 return _bytespath(v)
170 def __setitem__(self, k, v):
171 def __setitem__(self, k, v):
171 self._strenv.__setitem__(_strpath(k), _strpath(v))
172 self._strenv.__setitem__(_strpath(k), _strpath(v))
172 def __delitem__(self, k):
173 def __delitem__(self, k):
173 self._strenv.__delitem__(_strpath(k))
174 self._strenv.__delitem__(_strpath(k))
174 def __contains__(self, k):
175 def __contains__(self, k):
175 return self._strenv.__contains__(_strpath(k))
176 return self._strenv.__contains__(_strpath(k))
176 def __iter__(self):
177 def __iter__(self):
177 return iter([_bytespath(k) for k in iter(self._strenv)])
178 return iter([_bytespath(k) for k in iter(self._strenv)])
178 def get(self, k, default=None):
179 def get(self, k, default=None):
179 v = self._strenv.get(_strpath(k), _strpath(default))
180 v = self._strenv.get(_strpath(k), _strpath(default))
180 return _bytespath(v)
181 return _bytespath(v)
181 def pop(self, k, default=None):
182 def pop(self, k, default=None):
182 v = self._strenv.pop(_strpath(k), _strpath(default))
183 v = self._strenv.pop(_strpath(k), _strpath(default))
183 return _bytespath(v)
184 return _bytespath(v)
184
185
185 osenvironb = environbytes(os.environ)
186 osenvironb = environbytes(os.environ)
186
187
187 getcwdb = getattr(os, 'getcwdb')
188 getcwdb = getattr(os, 'getcwdb')
188 if not getcwdb or os.name == 'nt':
189 if not getcwdb or os.name == 'nt':
189 getcwdb = lambda: _bytespath(os.getcwd())
190 getcwdb = lambda: _bytespath(os.getcwd())
190
191
191 elif sys.version_info >= (3, 0, 0):
192 elif sys.version_info >= (3, 0, 0):
192 print('%s is only supported on Python 3.5+ and 2.7, not %s' %
193 print('%s is only supported on Python 3.5+ and 2.7, not %s' %
193 (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])))
194 (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])))
194 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
195 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
195 else:
196 else:
196 PYTHON3 = False
197 PYTHON3 = False
197
198
198 # In python 2.x, path operations are generally done using
199 # In python 2.x, path operations are generally done using
199 # bytestrings by default, so we don't have to do any extra
200 # bytestrings by default, so we don't have to do any extra
200 # fiddling there. We define the wrapper functions anyway just to
201 # fiddling there. We define the wrapper functions anyway just to
201 # help keep code consistent between platforms.
202 # help keep code consistent between platforms.
202 def _bytespath(p):
203 def _bytespath(p):
203 return p
204 return p
204
205
205 _strpath = _bytespath
206 _strpath = _bytespath
206 osenvironb = os.environ
207 osenvironb = os.environ
207 getcwdb = os.getcwd
208 getcwdb = os.getcwd
208
209
209 # For Windows support
210 # For Windows support
210 wifexited = getattr(os, "WIFEXITED", lambda x: False)
211 wifexited = getattr(os, "WIFEXITED", lambda x: False)
211
212
212 # Whether to use IPv6
213 # Whether to use IPv6
213 def checksocketfamily(name, port=20058):
214 def checksocketfamily(name, port=20058):
214 """return true if we can listen on localhost using family=name
215 """return true if we can listen on localhost using family=name
215
216
216 name should be either 'AF_INET', or 'AF_INET6'.
217 name should be either 'AF_INET', or 'AF_INET6'.
217 port being used is okay - EADDRINUSE is considered as successful.
218 port being used is okay - EADDRINUSE is considered as successful.
218 """
219 """
219 family = getattr(socket, name, None)
220 family = getattr(socket, name, None)
220 if family is None:
221 if family is None:
221 return False
222 return False
222 try:
223 try:
223 s = socket.socket(family, socket.SOCK_STREAM)
224 s = socket.socket(family, socket.SOCK_STREAM)
224 s.bind(('localhost', port))
225 s.bind(('localhost', port))
225 s.close()
226 s.close()
226 return True
227 return True
227 except socket.error as exc:
228 except socket.error as exc:
228 if exc.errno == errno.EADDRINUSE:
229 if exc.errno == errno.EADDRINUSE:
229 return True
230 return True
230 elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
231 elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
231 return False
232 return False
232 else:
233 else:
233 raise
234 raise
234 else:
235 else:
235 return False
236 return False
236
237
237 # useipv6 will be set by parseargs
238 # useipv6 will be set by parseargs
238 useipv6 = None
239 useipv6 = None
239
240
240 def checkportisavailable(port):
241 def checkportisavailable(port):
241 """return true if a port seems free to bind on localhost"""
242 """return true if a port seems free to bind on localhost"""
242 if useipv6:
243 if useipv6:
243 family = socket.AF_INET6
244 family = socket.AF_INET6
244 else:
245 else:
245 family = socket.AF_INET
246 family = socket.AF_INET
246 try:
247 try:
247 s = socket.socket(family, socket.SOCK_STREAM)
248 s = socket.socket(family, socket.SOCK_STREAM)
248 s.bind(('localhost', port))
249 s.bind(('localhost', port))
249 s.close()
250 s.close()
250 return True
251 return True
251 except socket.error as exc:
252 except socket.error as exc:
252 if exc.errno not in (errno.EADDRINUSE, errno.EADDRNOTAVAIL,
253 if exc.errno not in (errno.EADDRINUSE, errno.EADDRNOTAVAIL,
253 errno.EPROTONOSUPPORT):
254 errno.EPROTONOSUPPORT):
254 raise
255 raise
255 return False
256 return False
256
257
257 closefds = os.name == 'posix'
258 closefds = os.name == 'posix'
258 def Popen4(cmd, wd, timeout, env=None):
259 def Popen4(cmd, wd, timeout, env=None):
259 processlock.acquire()
260 processlock.acquire()
260 p = subprocess.Popen(_strpath(cmd), shell=True, bufsize=-1,
261 p = subprocess.Popen(_strpath(cmd), shell=True, bufsize=-1,
261 cwd=_strpath(wd), env=env,
262 cwd=_strpath(wd), env=env,
262 close_fds=closefds,
263 close_fds=closefds,
263 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
264 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
264 stderr=subprocess.STDOUT)
265 stderr=subprocess.STDOUT)
265 processlock.release()
266 processlock.release()
266
267
267 p.fromchild = p.stdout
268 p.fromchild = p.stdout
268 p.tochild = p.stdin
269 p.tochild = p.stdin
269 p.childerr = p.stderr
270 p.childerr = p.stderr
270
271
271 p.timeout = False
272 p.timeout = False
272 if timeout:
273 if timeout:
273 def t():
274 def t():
274 start = time.time()
275 start = time.time()
275 while time.time() - start < timeout and p.returncode is None:
276 while time.time() - start < timeout and p.returncode is None:
276 time.sleep(.1)
277 time.sleep(.1)
277 p.timeout = True
278 p.timeout = True
278 if p.returncode is None:
279 if p.returncode is None:
279 terminate(p)
280 terminate(p)
280 threading.Thread(target=t).start()
281 threading.Thread(target=t).start()
281
282
282 return p
283 return p
283
284
284 PYTHON = _bytespath(sys.executable.replace('\\', '/'))
285 PYTHON = _bytespath(sys.executable.replace('\\', '/'))
285 IMPL_PATH = b'PYTHONPATH'
286 IMPL_PATH = b'PYTHONPATH'
286 if 'java' in sys.platform:
287 if 'java' in sys.platform:
287 IMPL_PATH = b'JYTHONPATH'
288 IMPL_PATH = b'JYTHONPATH'
288
289
289 defaults = {
290 defaults = {
290 'jobs': ('HGTEST_JOBS', 1),
291 'jobs': ('HGTEST_JOBS', multiprocessing.cpu_count()),
291 'timeout': ('HGTEST_TIMEOUT', 180),
292 'timeout': ('HGTEST_TIMEOUT', 180),
292 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 500),
293 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 500),
293 'port': ('HGTEST_PORT', 20059),
294 'port': ('HGTEST_PORT', 20059),
294 'shell': ('HGTEST_SHELL', 'sh'),
295 'shell': ('HGTEST_SHELL', 'sh'),
295 }
296 }
296
297
297 def canonpath(path):
298 def canonpath(path):
298 return os.path.realpath(os.path.expanduser(path))
299 return os.path.realpath(os.path.expanduser(path))
299
300
300 def parselistfiles(files, listtype, warn=True):
301 def parselistfiles(files, listtype, warn=True):
301 entries = dict()
302 entries = dict()
302 for filename in files:
303 for filename in files:
303 try:
304 try:
304 path = os.path.expanduser(os.path.expandvars(filename))
305 path = os.path.expanduser(os.path.expandvars(filename))
305 f = open(path, "rb")
306 f = open(path, "rb")
306 except IOError as err:
307 except IOError as err:
307 if err.errno != errno.ENOENT:
308 if err.errno != errno.ENOENT:
308 raise
309 raise
309 if warn:
310 if warn:
310 print("warning: no such %s file: %s" % (listtype, filename))
311 print("warning: no such %s file: %s" % (listtype, filename))
311 continue
312 continue
312
313
313 for line in f.readlines():
314 for line in f.readlines():
314 line = line.split(b'#', 1)[0].strip()
315 line = line.split(b'#', 1)[0].strip()
315 if line:
316 if line:
316 entries[line] = filename
317 entries[line] = filename
317
318
318 f.close()
319 f.close()
319 return entries
320 return entries
320
321
321 def parsettestcases(path):
322 def parsettestcases(path):
322 """read a .t test file, return a set of test case names
323 """read a .t test file, return a set of test case names
323
324
324 If path does not exist, return an empty set.
325 If path does not exist, return an empty set.
325 """
326 """
326 cases = []
327 cases = []
327 try:
328 try:
328 with open(path, 'rb') as f:
329 with open(path, 'rb') as f:
329 for l in f:
330 for l in f:
330 if l.startswith(b'#testcases '):
331 if l.startswith(b'#testcases '):
331 cases.append(sorted(l[11:].split()))
332 cases.append(sorted(l[11:].split()))
332 except IOError as ex:
333 except IOError as ex:
333 if ex.errno != errno.ENOENT:
334 if ex.errno != errno.ENOENT:
334 raise
335 raise
335 return cases
336 return cases
336
337
337 def getparser():
338 def getparser():
338 """Obtain the OptionParser used by the CLI."""
339 """Obtain the OptionParser used by the CLI."""
339 parser = argparse.ArgumentParser(usage='%(prog)s [options] [tests]')
340 parser = argparse.ArgumentParser(usage='%(prog)s [options] [tests]')
340
341
341 selection = parser.add_argument_group('Test Selection')
342 selection = parser.add_argument_group('Test Selection')
342 selection.add_argument('--allow-slow-tests', action='store_true',
343 selection.add_argument('--allow-slow-tests', action='store_true',
343 help='allow extremely slow tests')
344 help='allow extremely slow tests')
344 selection.add_argument("--blacklist", action="append",
345 selection.add_argument("--blacklist", action="append",
345 help="skip tests listed in the specified blacklist file")
346 help="skip tests listed in the specified blacklist file")
346 selection.add_argument("--changed",
347 selection.add_argument("--changed",
347 help="run tests that are changed in parent rev or working directory")
348 help="run tests that are changed in parent rev or working directory")
348 selection.add_argument("-k", "--keywords",
349 selection.add_argument("-k", "--keywords",
349 help="run tests matching keywords")
350 help="run tests matching keywords")
350 selection.add_argument("-r", "--retest", action="store_true",
351 selection.add_argument("-r", "--retest", action="store_true",
351 help = "retest failed tests")
352 help = "retest failed tests")
352 selection.add_argument("--test-list", action="append",
353 selection.add_argument("--test-list", action="append",
353 help="read tests to run from the specified file")
354 help="read tests to run from the specified file")
354 selection.add_argument("--whitelist", action="append",
355 selection.add_argument("--whitelist", action="append",
355 help="always run tests listed in the specified whitelist file")
356 help="always run tests listed in the specified whitelist file")
356 selection.add_argument('tests', metavar='TESTS', nargs='*',
357 selection.add_argument('tests', metavar='TESTS', nargs='*',
357 help='Tests to run')
358 help='Tests to run')
358
359
359 harness = parser.add_argument_group('Test Harness Behavior')
360 harness = parser.add_argument_group('Test Harness Behavior')
360 harness.add_argument('--bisect-repo',
361 harness.add_argument('--bisect-repo',
361 metavar='bisect_repo',
362 metavar='bisect_repo',
362 help=("Path of a repo to bisect. Use together with "
363 help=("Path of a repo to bisect. Use together with "
363 "--known-good-rev"))
364 "--known-good-rev"))
364 harness.add_argument("-d", "--debug", action="store_true",
365 harness.add_argument("-d", "--debug", action="store_true",
365 help="debug mode: write output of test scripts to console"
366 help="debug mode: write output of test scripts to console"
366 " rather than capturing and diffing it (disables timeout)")
367 " rather than capturing and diffing it (disables timeout)")
367 harness.add_argument("-f", "--first", action="store_true",
368 harness.add_argument("-f", "--first", action="store_true",
368 help="exit on the first test failure")
369 help="exit on the first test failure")
369 harness.add_argument("-i", "--interactive", action="store_true",
370 harness.add_argument("-i", "--interactive", action="store_true",
370 help="prompt to accept changed output")
371 help="prompt to accept changed output")
371 harness.add_argument("-j", "--jobs", type=int,
372 harness.add_argument("-j", "--jobs", type=int,
372 help="number of jobs to run in parallel"
373 help="number of jobs to run in parallel"
373 " (default: $%s or %d)" % defaults['jobs'])
374 " (default: $%s or %d)" % defaults['jobs'])
374 harness.add_argument("--keep-tmpdir", action="store_true",
375 harness.add_argument("--keep-tmpdir", action="store_true",
375 help="keep temporary directory after running tests")
376 help="keep temporary directory after running tests")
376 harness.add_argument('--known-good-rev',
377 harness.add_argument('--known-good-rev',
377 metavar="known_good_rev",
378 metavar="known_good_rev",
378 help=("Automatically bisect any failures using this "
379 help=("Automatically bisect any failures using this "
379 "revision as a known-good revision."))
380 "revision as a known-good revision."))
380 harness.add_argument("--list-tests", action="store_true",
381 harness.add_argument("--list-tests", action="store_true",
381 help="list tests instead of running them")
382 help="list tests instead of running them")
382 harness.add_argument("--loop", action="store_true",
383 harness.add_argument("--loop", action="store_true",
383 help="loop tests repeatedly")
384 help="loop tests repeatedly")
384 harness.add_argument('--random', action="store_true",
385 harness.add_argument('--random', action="store_true",
385 help='run tests in random order')
386 help='run tests in random order')
386 harness.add_argument('--order-by-runtime', action="store_true",
387 harness.add_argument('--order-by-runtime', action="store_true",
387 help='run slowest tests first, according to .testtimes')
388 help='run slowest tests first, according to .testtimes')
388 harness.add_argument("-p", "--port", type=int,
389 harness.add_argument("-p", "--port", type=int,
389 help="port on which servers should listen"
390 help="port on which servers should listen"
390 " (default: $%s or %d)" % defaults['port'])
391 " (default: $%s or %d)" % defaults['port'])
391 harness.add_argument('--profile-runner', action='store_true',
392 harness.add_argument('--profile-runner', action='store_true',
392 help='run statprof on run-tests')
393 help='run statprof on run-tests')
393 harness.add_argument("-R", "--restart", action="store_true",
394 harness.add_argument("-R", "--restart", action="store_true",
394 help="restart at last error")
395 help="restart at last error")
395 harness.add_argument("--runs-per-test", type=int, dest="runs_per_test",
396 harness.add_argument("--runs-per-test", type=int, dest="runs_per_test",
396 help="run each test N times (default=1)", default=1)
397 help="run each test N times (default=1)", default=1)
397 harness.add_argument("--shell",
398 harness.add_argument("--shell",
398 help="shell to use (default: $%s or %s)" % defaults['shell'])
399 help="shell to use (default: $%s or %s)" % defaults['shell'])
399 harness.add_argument('--showchannels', action='store_true',
400 harness.add_argument('--showchannels', action='store_true',
400 help='show scheduling channels')
401 help='show scheduling channels')
401 harness.add_argument("--slowtimeout", type=int,
402 harness.add_argument("--slowtimeout", type=int,
402 help="kill errant slow tests after SLOWTIMEOUT seconds"
403 help="kill errant slow tests after SLOWTIMEOUT seconds"
403 " (default: $%s or %d)" % defaults['slowtimeout'])
404 " (default: $%s or %d)" % defaults['slowtimeout'])
404 harness.add_argument("-t", "--timeout", type=int,
405 harness.add_argument("-t", "--timeout", type=int,
405 help="kill errant tests after TIMEOUT seconds"
406 help="kill errant tests after TIMEOUT seconds"
406 " (default: $%s or %d)" % defaults['timeout'])
407 " (default: $%s or %d)" % defaults['timeout'])
407 harness.add_argument("--tmpdir",
408 harness.add_argument("--tmpdir",
408 help="run tests in the given temporary directory"
409 help="run tests in the given temporary directory"
409 " (implies --keep-tmpdir)")
410 " (implies --keep-tmpdir)")
410 harness.add_argument("-v", "--verbose", action="store_true",
411 harness.add_argument("-v", "--verbose", action="store_true",
411 help="output verbose messages")
412 help="output verbose messages")
412
413
413 hgconf = parser.add_argument_group('Mercurial Configuration')
414 hgconf = parser.add_argument_group('Mercurial Configuration')
414 hgconf.add_argument("--chg", action="store_true",
415 hgconf.add_argument("--chg", action="store_true",
415 help="install and use chg wrapper in place of hg")
416 help="install and use chg wrapper in place of hg")
416 hgconf.add_argument("--compiler",
417 hgconf.add_argument("--compiler",
417 help="compiler to build with")
418 help="compiler to build with")
418 hgconf.add_argument('--extra-config-opt', action="append", default=[],
419 hgconf.add_argument('--extra-config-opt', action="append", default=[],
419 help='set the given config opt in the test hgrc')
420 help='set the given config opt in the test hgrc')
420 hgconf.add_argument("-l", "--local", action="store_true",
421 hgconf.add_argument("-l", "--local", action="store_true",
421 help="shortcut for --with-hg=<testdir>/../hg, "
422 help="shortcut for --with-hg=<testdir>/../hg, "
422 "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set")
423 "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set")
423 hgconf.add_argument("--ipv6", action="store_true",
424 hgconf.add_argument("--ipv6", action="store_true",
424 help="prefer IPv6 to IPv4 for network related tests")
425 help="prefer IPv6 to IPv4 for network related tests")
425 hgconf.add_argument("--pure", action="store_true",
426 hgconf.add_argument("--pure", action="store_true",
426 help="use pure Python code instead of C extensions")
427 help="use pure Python code instead of C extensions")
427 hgconf.add_argument("-3", "--py3k-warnings", action="store_true",
428 hgconf.add_argument("-3", "--py3k-warnings", action="store_true",
428 help="enable Py3k warnings on Python 2.7+")
429 help="enable Py3k warnings on Python 2.7+")
429 hgconf.add_argument("--with-chg", metavar="CHG",
430 hgconf.add_argument("--with-chg", metavar="CHG",
430 help="use specified chg wrapper in place of hg")
431 help="use specified chg wrapper in place of hg")
431 hgconf.add_argument("--with-hg",
432 hgconf.add_argument("--with-hg",
432 metavar="HG",
433 metavar="HG",
433 help="test using specified hg script rather than a "
434 help="test using specified hg script rather than a "
434 "temporary installation")
435 "temporary installation")
435
436
436 reporting = parser.add_argument_group('Results Reporting')
437 reporting = parser.add_argument_group('Results Reporting')
437 reporting.add_argument("-C", "--annotate", action="store_true",
438 reporting.add_argument("-C", "--annotate", action="store_true",
438 help="output files annotated with coverage")
439 help="output files annotated with coverage")
439 reporting.add_argument("--color", choices=["always", "auto", "never"],
440 reporting.add_argument("--color", choices=["always", "auto", "never"],
440 default=os.environ.get('HGRUNTESTSCOLOR', 'auto'),
441 default=os.environ.get('HGRUNTESTSCOLOR', 'auto'),
441 help="colorisation: always|auto|never (default: auto)")
442 help="colorisation: always|auto|never (default: auto)")
442 reporting.add_argument("-c", "--cover", action="store_true",
443 reporting.add_argument("-c", "--cover", action="store_true",
443 help="print a test coverage report")
444 help="print a test coverage report")
444 reporting.add_argument('--exceptions', action='store_true',
445 reporting.add_argument('--exceptions', action='store_true',
445 help='log all exceptions and generate an exception report')
446 help='log all exceptions and generate an exception report')
446 reporting.add_argument("-H", "--htmlcov", action="store_true",
447 reporting.add_argument("-H", "--htmlcov", action="store_true",
447 help="create an HTML report of the coverage of the files")
448 help="create an HTML report of the coverage of the files")
448 reporting.add_argument("--json", action="store_true",
449 reporting.add_argument("--json", action="store_true",
449 help="store test result data in 'report.json' file")
450 help="store test result data in 'report.json' file")
450 reporting.add_argument("--outputdir",
451 reporting.add_argument("--outputdir",
451 help="directory to write error logs to (default=test directory)")
452 help="directory to write error logs to (default=test directory)")
452 reporting.add_argument("-n", "--nodiff", action="store_true",
453 reporting.add_argument("-n", "--nodiff", action="store_true",
453 help="skip showing test changes")
454 help="skip showing test changes")
454 reporting.add_argument("-S", "--noskips", action="store_true",
455 reporting.add_argument("-S", "--noskips", action="store_true",
455 help="don't report skip tests verbosely")
456 help="don't report skip tests verbosely")
456 reporting.add_argument("--time", action="store_true",
457 reporting.add_argument("--time", action="store_true",
457 help="time how long each test takes")
458 help="time how long each test takes")
458 reporting.add_argument("--view",
459 reporting.add_argument("--view",
459 help="external diff viewer")
460 help="external diff viewer")
460 reporting.add_argument("--xunit",
461 reporting.add_argument("--xunit",
461 help="record xunit results at specified path")
462 help="record xunit results at specified path")
462
463
463 for option, (envvar, default) in defaults.items():
464 for option, (envvar, default) in defaults.items():
464 defaults[option] = type(default)(os.environ.get(envvar, default))
465 defaults[option] = type(default)(os.environ.get(envvar, default))
465 parser.set_defaults(**defaults)
466 parser.set_defaults(**defaults)
466
467
467 return parser
468 return parser
468
469
469 def parseargs(args, parser):
470 def parseargs(args, parser):
470 """Parse arguments with our OptionParser and validate results."""
471 """Parse arguments with our OptionParser and validate results."""
471 options = parser.parse_args(args)
472 options = parser.parse_args(args)
472
473
473 # jython is always pure
474 # jython is always pure
474 if 'java' in sys.platform or '__pypy__' in sys.modules:
475 if 'java' in sys.platform or '__pypy__' in sys.modules:
475 options.pure = True
476 options.pure = True
476
477
477 if options.with_hg:
478 if options.with_hg:
478 options.with_hg = canonpath(_bytespath(options.with_hg))
479 options.with_hg = canonpath(_bytespath(options.with_hg))
479 if not (os.path.isfile(options.with_hg) and
480 if not (os.path.isfile(options.with_hg) and
480 os.access(options.with_hg, os.X_OK)):
481 os.access(options.with_hg, os.X_OK)):
481 parser.error('--with-hg must specify an executable hg script')
482 parser.error('--with-hg must specify an executable hg script')
482 if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
483 if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
483 sys.stderr.write('warning: --with-hg should specify an hg script\n')
484 sys.stderr.write('warning: --with-hg should specify an hg script\n')
484 if options.local:
485 if options.local:
485 testdir = os.path.dirname(_bytespath(canonpath(sys.argv[0])))
486 testdir = os.path.dirname(_bytespath(canonpath(sys.argv[0])))
486 reporootdir = os.path.dirname(testdir)
487 reporootdir = os.path.dirname(testdir)
487 pathandattrs = [(b'hg', 'with_hg')]
488 pathandattrs = [(b'hg', 'with_hg')]
488 if options.chg:
489 if options.chg:
489 pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
490 pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
490 for relpath, attr in pathandattrs:
491 for relpath, attr in pathandattrs:
491 binpath = os.path.join(reporootdir, relpath)
492 binpath = os.path.join(reporootdir, relpath)
492 if os.name != 'nt' and not os.access(binpath, os.X_OK):
493 if os.name != 'nt' and not os.access(binpath, os.X_OK):
493 parser.error('--local specified, but %r not found or '
494 parser.error('--local specified, but %r not found or '
494 'not executable' % binpath)
495 'not executable' % binpath)
495 setattr(options, attr, binpath)
496 setattr(options, attr, binpath)
496
497
497 if (options.chg or options.with_chg) and os.name == 'nt':
498 if (options.chg or options.with_chg) and os.name == 'nt':
498 parser.error('chg does not work on %s' % os.name)
499 parser.error('chg does not work on %s' % os.name)
499 if options.with_chg:
500 if options.with_chg:
500 options.chg = False # no installation to temporary location
501 options.chg = False # no installation to temporary location
501 options.with_chg = canonpath(_bytespath(options.with_chg))
502 options.with_chg = canonpath(_bytespath(options.with_chg))
502 if not (os.path.isfile(options.with_chg) and
503 if not (os.path.isfile(options.with_chg) and
503 os.access(options.with_chg, os.X_OK)):
504 os.access(options.with_chg, os.X_OK)):
504 parser.error('--with-chg must specify a chg executable')
505 parser.error('--with-chg must specify a chg executable')
505 if options.chg and options.with_hg:
506 if options.chg and options.with_hg:
506 # chg shares installation location with hg
507 # chg shares installation location with hg
507 parser.error('--chg does not work when --with-hg is specified '
508 parser.error('--chg does not work when --with-hg is specified '
508 '(use --with-chg instead)')
509 '(use --with-chg instead)')
509
510
510 if options.color == 'always' and not pygmentspresent:
511 if options.color == 'always' and not pygmentspresent:
511 sys.stderr.write('warning: --color=always ignored because '
512 sys.stderr.write('warning: --color=always ignored because '
512 'pygments is not installed\n')
513 'pygments is not installed\n')
513
514
514 if options.bisect_repo and not options.known_good_rev:
515 if options.bisect_repo and not options.known_good_rev:
515 parser.error("--bisect-repo cannot be used without --known-good-rev")
516 parser.error("--bisect-repo cannot be used without --known-good-rev")
516
517
517 global useipv6
518 global useipv6
518 if options.ipv6:
519 if options.ipv6:
519 useipv6 = checksocketfamily('AF_INET6')
520 useipv6 = checksocketfamily('AF_INET6')
520 else:
521 else:
521 # only use IPv6 if IPv4 is unavailable and IPv6 is available
522 # only use IPv6 if IPv4 is unavailable and IPv6 is available
522 useipv6 = ((not checksocketfamily('AF_INET'))
523 useipv6 = ((not checksocketfamily('AF_INET'))
523 and checksocketfamily('AF_INET6'))
524 and checksocketfamily('AF_INET6'))
524
525
525 options.anycoverage = options.cover or options.annotate or options.htmlcov
526 options.anycoverage = options.cover or options.annotate or options.htmlcov
526 if options.anycoverage:
527 if options.anycoverage:
527 try:
528 try:
528 import coverage
529 import coverage
529 covver = version.StrictVersion(coverage.__version__).version
530 covver = version.StrictVersion(coverage.__version__).version
530 if covver < (3, 3):
531 if covver < (3, 3):
531 parser.error('coverage options require coverage 3.3 or later')
532 parser.error('coverage options require coverage 3.3 or later')
532 except ImportError:
533 except ImportError:
533 parser.error('coverage options now require the coverage package')
534 parser.error('coverage options now require the coverage package')
534
535
535 if options.anycoverage and options.local:
536 if options.anycoverage and options.local:
536 # this needs some path mangling somewhere, I guess
537 # this needs some path mangling somewhere, I guess
537 parser.error("sorry, coverage options do not work when --local "
538 parser.error("sorry, coverage options do not work when --local "
538 "is specified")
539 "is specified")
539
540
540 if options.anycoverage and options.with_hg:
541 if options.anycoverage and options.with_hg:
541 parser.error("sorry, coverage options do not work when --with-hg "
542 parser.error("sorry, coverage options do not work when --with-hg "
542 "is specified")
543 "is specified")
543
544
544 global verbose
545 global verbose
545 if options.verbose:
546 if options.verbose:
546 verbose = ''
547 verbose = ''
547
548
548 if options.tmpdir:
549 if options.tmpdir:
549 options.tmpdir = canonpath(options.tmpdir)
550 options.tmpdir = canonpath(options.tmpdir)
550
551
551 if options.jobs < 1:
552 if options.jobs < 1:
552 parser.error('--jobs must be positive')
553 parser.error('--jobs must be positive')
553 if options.interactive and options.debug:
554 if options.interactive and options.debug:
554 parser.error("-i/--interactive and -d/--debug are incompatible")
555 parser.error("-i/--interactive and -d/--debug are incompatible")
555 if options.debug:
556 if options.debug:
556 if options.timeout != defaults['timeout']:
557 if options.timeout != defaults['timeout']:
557 sys.stderr.write(
558 sys.stderr.write(
558 'warning: --timeout option ignored with --debug\n')
559 'warning: --timeout option ignored with --debug\n')
559 if options.slowtimeout != defaults['slowtimeout']:
560 if options.slowtimeout != defaults['slowtimeout']:
560 sys.stderr.write(
561 sys.stderr.write(
561 'warning: --slowtimeout option ignored with --debug\n')
562 'warning: --slowtimeout option ignored with --debug\n')
562 options.timeout = 0
563 options.timeout = 0
563 options.slowtimeout = 0
564 options.slowtimeout = 0
564 if options.py3k_warnings:
565 if options.py3k_warnings:
565 if PYTHON3:
566 if PYTHON3:
566 parser.error(
567 parser.error(
567 '--py3k-warnings can only be used on Python 2.7')
568 '--py3k-warnings can only be used on Python 2.7')
568
569
569 if options.blacklist:
570 if options.blacklist:
570 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
571 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
571 if options.whitelist:
572 if options.whitelist:
572 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
573 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
573 else:
574 else:
574 options.whitelisted = {}
575 options.whitelisted = {}
575
576
576 if options.showchannels:
577 if options.showchannels:
577 options.nodiff = True
578 options.nodiff = True
578
579
579 return options
580 return options
580
581
581 def rename(src, dst):
582 def rename(src, dst):
582 """Like os.rename(), trade atomicity and opened files friendliness
583 """Like os.rename(), trade atomicity and opened files friendliness
583 for existing destination support.
584 for existing destination support.
584 """
585 """
585 shutil.copy(src, dst)
586 shutil.copy(src, dst)
586 os.remove(src)
587 os.remove(src)
587
588
588 _unified_diff = difflib.unified_diff
589 _unified_diff = difflib.unified_diff
589 if PYTHON3:
590 if PYTHON3:
590 import functools
591 import functools
591 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
592 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
592
593
593 def getdiff(expected, output, ref, err):
594 def getdiff(expected, output, ref, err):
594 servefail = False
595 servefail = False
595 lines = []
596 lines = []
596 for line in _unified_diff(expected, output, ref, err):
597 for line in _unified_diff(expected, output, ref, err):
597 if line.startswith(b'+++') or line.startswith(b'---'):
598 if line.startswith(b'+++') or line.startswith(b'---'):
598 line = line.replace(b'\\', b'/')
599 line = line.replace(b'\\', b'/')
599 if line.endswith(b' \n'):
600 if line.endswith(b' \n'):
600 line = line[:-2] + b'\n'
601 line = line[:-2] + b'\n'
601 lines.append(line)
602 lines.append(line)
602 if not servefail and line.startswith(
603 if not servefail and line.startswith(
603 b'+ abort: child process failed to start'):
604 b'+ abort: child process failed to start'):
604 servefail = True
605 servefail = True
605
606
606 return servefail, lines
607 return servefail, lines
607
608
608 verbose = False
609 verbose = False
609 def vlog(*msg):
610 def vlog(*msg):
610 """Log only when in verbose mode."""
611 """Log only when in verbose mode."""
611 if verbose is False:
612 if verbose is False:
612 return
613 return
613
614
614 return log(*msg)
615 return log(*msg)
615
616
616 # Bytes that break XML even in a CDATA block: control characters 0-31
617 # Bytes that break XML even in a CDATA block: control characters 0-31
617 # sans \t, \n and \r
618 # sans \t, \n and \r
618 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
619 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
619
620
620 # Match feature conditionalized output lines in the form, capturing the feature
621 # Match feature conditionalized output lines in the form, capturing the feature
621 # list in group 2, and the preceeding line output in group 1:
622 # list in group 2, and the preceeding line output in group 1:
622 #
623 #
623 # output..output (feature !)\n
624 # output..output (feature !)\n
624 optline = re.compile(b'(.*) \((.+?) !\)\n$')
625 optline = re.compile(b'(.*) \((.+?) !\)\n$')
625
626
626 def cdatasafe(data):
627 def cdatasafe(data):
627 """Make a string safe to include in a CDATA block.
628 """Make a string safe to include in a CDATA block.
628
629
629 Certain control characters are illegal in a CDATA block, and
630 Certain control characters are illegal in a CDATA block, and
630 there's no way to include a ]]> in a CDATA either. This function
631 there's no way to include a ]]> in a CDATA either. This function
631 replaces illegal bytes with ? and adds a space between the ]] so
632 replaces illegal bytes with ? and adds a space between the ]] so
632 that it won't break the CDATA block.
633 that it won't break the CDATA block.
633 """
634 """
634 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
635 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
635
636
636 def log(*msg):
637 def log(*msg):
637 """Log something to stdout.
638 """Log something to stdout.
638
639
639 Arguments are strings to print.
640 Arguments are strings to print.
640 """
641 """
641 with iolock:
642 with iolock:
642 if verbose:
643 if verbose:
643 print(verbose, end=' ')
644 print(verbose, end=' ')
644 for m in msg:
645 for m in msg:
645 print(m, end=' ')
646 print(m, end=' ')
646 print()
647 print()
647 sys.stdout.flush()
648 sys.stdout.flush()
648
649
649 def highlightdiff(line, color):
650 def highlightdiff(line, color):
650 if not color:
651 if not color:
651 return line
652 return line
652 assert pygmentspresent
653 assert pygmentspresent
653 return pygments.highlight(line.decode('latin1'), difflexer,
654 return pygments.highlight(line.decode('latin1'), difflexer,
654 terminal256formatter).encode('latin1')
655 terminal256formatter).encode('latin1')
655
656
656 def highlightmsg(msg, color):
657 def highlightmsg(msg, color):
657 if not color:
658 if not color:
658 return msg
659 return msg
659 assert pygmentspresent
660 assert pygmentspresent
660 return pygments.highlight(msg, runnerlexer, runnerformatter)
661 return pygments.highlight(msg, runnerlexer, runnerformatter)
661
662
662 def terminate(proc):
663 def terminate(proc):
663 """Terminate subprocess"""
664 """Terminate subprocess"""
664 vlog('# Terminating process %d' % proc.pid)
665 vlog('# Terminating process %d' % proc.pid)
665 try:
666 try:
666 proc.terminate()
667 proc.terminate()
667 except OSError:
668 except OSError:
668 pass
669 pass
669
670
670 def killdaemons(pidfile):
671 def killdaemons(pidfile):
671 import killdaemons as killmod
672 import killdaemons as killmod
672 return killmod.killdaemons(pidfile, tryhard=False, remove=True,
673 return killmod.killdaemons(pidfile, tryhard=False, remove=True,
673 logfn=vlog)
674 logfn=vlog)
674
675
675 class Test(unittest.TestCase):
676 class Test(unittest.TestCase):
676 """Encapsulates a single, runnable test.
677 """Encapsulates a single, runnable test.
677
678
678 While this class conforms to the unittest.TestCase API, it differs in that
679 While this class conforms to the unittest.TestCase API, it differs in that
679 instances need to be instantiated manually. (Typically, unittest.TestCase
680 instances need to be instantiated manually. (Typically, unittest.TestCase
680 classes are instantiated automatically by scanning modules.)
681 classes are instantiated automatically by scanning modules.)
681 """
682 """
682
683
683 # Status code reserved for skipped tests (used by hghave).
684 # Status code reserved for skipped tests (used by hghave).
684 SKIPPED_STATUS = 80
685 SKIPPED_STATUS = 80
685
686
686 def __init__(self, path, outputdir, tmpdir, keeptmpdir=False,
687 def __init__(self, path, outputdir, tmpdir, keeptmpdir=False,
687 debug=False,
688 debug=False,
688 first=False,
689 first=False,
689 timeout=None,
690 timeout=None,
690 startport=None, extraconfigopts=None,
691 startport=None, extraconfigopts=None,
691 py3kwarnings=False, shell=None, hgcommand=None,
692 py3kwarnings=False, shell=None, hgcommand=None,
692 slowtimeout=None, usechg=False,
693 slowtimeout=None, usechg=False,
693 useipv6=False):
694 useipv6=False):
694 """Create a test from parameters.
695 """Create a test from parameters.
695
696
696 path is the full path to the file defining the test.
697 path is the full path to the file defining the test.
697
698
698 tmpdir is the main temporary directory to use for this test.
699 tmpdir is the main temporary directory to use for this test.
699
700
700 keeptmpdir determines whether to keep the test's temporary directory
701 keeptmpdir determines whether to keep the test's temporary directory
701 after execution. It defaults to removal (False).
702 after execution. It defaults to removal (False).
702
703
703 debug mode will make the test execute verbosely, with unfiltered
704 debug mode will make the test execute verbosely, with unfiltered
704 output.
705 output.
705
706
706 timeout controls the maximum run time of the test. It is ignored when
707 timeout controls the maximum run time of the test. It is ignored when
707 debug is True. See slowtimeout for tests with #require slow.
708 debug is True. See slowtimeout for tests with #require slow.
708
709
709 slowtimeout overrides timeout if the test has #require slow.
710 slowtimeout overrides timeout if the test has #require slow.
710
711
711 startport controls the starting port number to use for this test. Each
712 startport controls the starting port number to use for this test. Each
712 test will reserve 3 port numbers for execution. It is the caller's
713 test will reserve 3 port numbers for execution. It is the caller's
713 responsibility to allocate a non-overlapping port range to Test
714 responsibility to allocate a non-overlapping port range to Test
714 instances.
715 instances.
715
716
716 extraconfigopts is an iterable of extra hgrc config options. Values
717 extraconfigopts is an iterable of extra hgrc config options. Values
717 must have the form "key=value" (something understood by hgrc). Values
718 must have the form "key=value" (something understood by hgrc). Values
718 of the form "foo.key=value" will result in "[foo] key=value".
719 of the form "foo.key=value" will result in "[foo] key=value".
719
720
720 py3kwarnings enables Py3k warnings.
721 py3kwarnings enables Py3k warnings.
721
722
722 shell is the shell to execute tests in.
723 shell is the shell to execute tests in.
723 """
724 """
724 if timeout is None:
725 if timeout is None:
725 timeout = defaults['timeout']
726 timeout = defaults['timeout']
726 if startport is None:
727 if startport is None:
727 startport = defaults['port']
728 startport = defaults['port']
728 if slowtimeout is None:
729 if slowtimeout is None:
729 slowtimeout = defaults['slowtimeout']
730 slowtimeout = defaults['slowtimeout']
730 self.path = path
731 self.path = path
731 self.bname = os.path.basename(path)
732 self.bname = os.path.basename(path)
732 self.name = _strpath(self.bname)
733 self.name = _strpath(self.bname)
733 self._testdir = os.path.dirname(path)
734 self._testdir = os.path.dirname(path)
734 self._outputdir = outputdir
735 self._outputdir = outputdir
735 self._tmpname = os.path.basename(path)
736 self._tmpname = os.path.basename(path)
736 self.errpath = os.path.join(self._outputdir, b'%s.err' % self.bname)
737 self.errpath = os.path.join(self._outputdir, b'%s.err' % self.bname)
737
738
738 self._threadtmp = tmpdir
739 self._threadtmp = tmpdir
739 self._keeptmpdir = keeptmpdir
740 self._keeptmpdir = keeptmpdir
740 self._debug = debug
741 self._debug = debug
741 self._first = first
742 self._first = first
742 self._timeout = timeout
743 self._timeout = timeout
743 self._slowtimeout = slowtimeout
744 self._slowtimeout = slowtimeout
744 self._startport = startport
745 self._startport = startport
745 self._extraconfigopts = extraconfigopts or []
746 self._extraconfigopts = extraconfigopts or []
746 self._py3kwarnings = py3kwarnings
747 self._py3kwarnings = py3kwarnings
747 self._shell = _bytespath(shell)
748 self._shell = _bytespath(shell)
748 self._hgcommand = hgcommand or b'hg'
749 self._hgcommand = hgcommand or b'hg'
749 self._usechg = usechg
750 self._usechg = usechg
750 self._useipv6 = useipv6
751 self._useipv6 = useipv6
751
752
752 self._aborted = False
753 self._aborted = False
753 self._daemonpids = []
754 self._daemonpids = []
754 self._finished = None
755 self._finished = None
755 self._ret = None
756 self._ret = None
756 self._out = None
757 self._out = None
757 self._skipped = None
758 self._skipped = None
758 self._testtmp = None
759 self._testtmp = None
759 self._chgsockdir = None
760 self._chgsockdir = None
760
761
761 self._refout = self.readrefout()
762 self._refout = self.readrefout()
762
763
763 def readrefout(self):
764 def readrefout(self):
764 """read reference output"""
765 """read reference output"""
765 # If we're not in --debug mode and reference output file exists,
766 # If we're not in --debug mode and reference output file exists,
766 # check test output against it.
767 # check test output against it.
767 if self._debug:
768 if self._debug:
768 return None # to match "out is None"
769 return None # to match "out is None"
769 elif os.path.exists(self.refpath):
770 elif os.path.exists(self.refpath):
770 with open(self.refpath, 'rb') as f:
771 with open(self.refpath, 'rb') as f:
771 return f.read().splitlines(True)
772 return f.read().splitlines(True)
772 else:
773 else:
773 return []
774 return []
774
775
775 # needed to get base class __repr__ running
776 # needed to get base class __repr__ running
776 @property
777 @property
777 def _testMethodName(self):
778 def _testMethodName(self):
778 return self.name
779 return self.name
779
780
780 def __str__(self):
781 def __str__(self):
781 return self.name
782 return self.name
782
783
783 def shortDescription(self):
784 def shortDescription(self):
784 return self.name
785 return self.name
785
786
786 def setUp(self):
787 def setUp(self):
787 """Tasks to perform before run()."""
788 """Tasks to perform before run()."""
788 self._finished = False
789 self._finished = False
789 self._ret = None
790 self._ret = None
790 self._out = None
791 self._out = None
791 self._skipped = None
792 self._skipped = None
792
793
793 try:
794 try:
794 os.mkdir(self._threadtmp)
795 os.mkdir(self._threadtmp)
795 except OSError as e:
796 except OSError as e:
796 if e.errno != errno.EEXIST:
797 if e.errno != errno.EEXIST:
797 raise
798 raise
798
799
799 name = self._tmpname
800 name = self._tmpname
800 self._testtmp = os.path.join(self._threadtmp, name)
801 self._testtmp = os.path.join(self._threadtmp, name)
801 os.mkdir(self._testtmp)
802 os.mkdir(self._testtmp)
802
803
803 # Remove any previous output files.
804 # Remove any previous output files.
804 if os.path.exists(self.errpath):
805 if os.path.exists(self.errpath):
805 try:
806 try:
806 os.remove(self.errpath)
807 os.remove(self.errpath)
807 except OSError as e:
808 except OSError as e:
808 # We might have raced another test to clean up a .err
809 # We might have raced another test to clean up a .err
809 # file, so ignore ENOENT when removing a previous .err
810 # file, so ignore ENOENT when removing a previous .err
810 # file.
811 # file.
811 if e.errno != errno.ENOENT:
812 if e.errno != errno.ENOENT:
812 raise
813 raise
813
814
814 if self._usechg:
815 if self._usechg:
815 self._chgsockdir = os.path.join(self._threadtmp,
816 self._chgsockdir = os.path.join(self._threadtmp,
816 b'%s.chgsock' % name)
817 b'%s.chgsock' % name)
817 os.mkdir(self._chgsockdir)
818 os.mkdir(self._chgsockdir)
818
819
819 def run(self, result):
820 def run(self, result):
820 """Run this test and report results against a TestResult instance."""
821 """Run this test and report results against a TestResult instance."""
821 # This function is extremely similar to unittest.TestCase.run(). Once
822 # This function is extremely similar to unittest.TestCase.run(). Once
822 # we require Python 2.7 (or at least its version of unittest), this
823 # we require Python 2.7 (or at least its version of unittest), this
823 # function can largely go away.
824 # function can largely go away.
824 self._result = result
825 self._result = result
825 result.startTest(self)
826 result.startTest(self)
826 try:
827 try:
827 try:
828 try:
828 self.setUp()
829 self.setUp()
829 except (KeyboardInterrupt, SystemExit):
830 except (KeyboardInterrupt, SystemExit):
830 self._aborted = True
831 self._aborted = True
831 raise
832 raise
832 except Exception:
833 except Exception:
833 result.addError(self, sys.exc_info())
834 result.addError(self, sys.exc_info())
834 return
835 return
835
836
836 success = False
837 success = False
837 try:
838 try:
838 self.runTest()
839 self.runTest()
839 except KeyboardInterrupt:
840 except KeyboardInterrupt:
840 self._aborted = True
841 self._aborted = True
841 raise
842 raise
842 except unittest.SkipTest as e:
843 except unittest.SkipTest as e:
843 result.addSkip(self, str(e))
844 result.addSkip(self, str(e))
844 # The base class will have already counted this as a
845 # The base class will have already counted this as a
845 # test we "ran", but we want to exclude skipped tests
846 # test we "ran", but we want to exclude skipped tests
846 # from those we count towards those run.
847 # from those we count towards those run.
847 result.testsRun -= 1
848 result.testsRun -= 1
848 except self.failureException as e:
849 except self.failureException as e:
849 # This differs from unittest in that we don't capture
850 # This differs from unittest in that we don't capture
850 # the stack trace. This is for historical reasons and
851 # the stack trace. This is for historical reasons and
851 # this decision could be revisited in the future,
852 # this decision could be revisited in the future,
852 # especially for PythonTest instances.
853 # especially for PythonTest instances.
853 if result.addFailure(self, str(e)):
854 if result.addFailure(self, str(e)):
854 success = True
855 success = True
855 except Exception:
856 except Exception:
856 result.addError(self, sys.exc_info())
857 result.addError(self, sys.exc_info())
857 else:
858 else:
858 success = True
859 success = True
859
860
860 try:
861 try:
861 self.tearDown()
862 self.tearDown()
862 except (KeyboardInterrupt, SystemExit):
863 except (KeyboardInterrupt, SystemExit):
863 self._aborted = True
864 self._aborted = True
864 raise
865 raise
865 except Exception:
866 except Exception:
866 result.addError(self, sys.exc_info())
867 result.addError(self, sys.exc_info())
867 success = False
868 success = False
868
869
869 if success:
870 if success:
870 result.addSuccess(self)
871 result.addSuccess(self)
871 finally:
872 finally:
872 result.stopTest(self, interrupted=self._aborted)
873 result.stopTest(self, interrupted=self._aborted)
873
874
874 def runTest(self):
875 def runTest(self):
875 """Run this test instance.
876 """Run this test instance.
876
877
877 This will return a tuple describing the result of the test.
878 This will return a tuple describing the result of the test.
878 """
879 """
879 env = self._getenv()
880 env = self._getenv()
880 self._genrestoreenv(env)
881 self._genrestoreenv(env)
881 self._daemonpids.append(env['DAEMON_PIDS'])
882 self._daemonpids.append(env['DAEMON_PIDS'])
882 self._createhgrc(env['HGRCPATH'])
883 self._createhgrc(env['HGRCPATH'])
883
884
884 vlog('# Test', self.name)
885 vlog('# Test', self.name)
885
886
886 ret, out = self._run(env)
887 ret, out = self._run(env)
887 self._finished = True
888 self._finished = True
888 self._ret = ret
889 self._ret = ret
889 self._out = out
890 self._out = out
890
891
891 def describe(ret):
892 def describe(ret):
892 if ret < 0:
893 if ret < 0:
893 return 'killed by signal: %d' % -ret
894 return 'killed by signal: %d' % -ret
894 return 'returned error code %d' % ret
895 return 'returned error code %d' % ret
895
896
896 self._skipped = False
897 self._skipped = False
897
898
898 if ret == self.SKIPPED_STATUS:
899 if ret == self.SKIPPED_STATUS:
899 if out is None: # Debug mode, nothing to parse.
900 if out is None: # Debug mode, nothing to parse.
900 missing = ['unknown']
901 missing = ['unknown']
901 failed = None
902 failed = None
902 else:
903 else:
903 missing, failed = TTest.parsehghaveoutput(out)
904 missing, failed = TTest.parsehghaveoutput(out)
904
905
905 if not missing:
906 if not missing:
906 missing = ['skipped']
907 missing = ['skipped']
907
908
908 if failed:
909 if failed:
909 self.fail('hg have failed checking for %s' % failed[-1])
910 self.fail('hg have failed checking for %s' % failed[-1])
910 else:
911 else:
911 self._skipped = True
912 self._skipped = True
912 raise unittest.SkipTest(missing[-1])
913 raise unittest.SkipTest(missing[-1])
913 elif ret == 'timeout':
914 elif ret == 'timeout':
914 self.fail('timed out')
915 self.fail('timed out')
915 elif ret is False:
916 elif ret is False:
916 self.fail('no result code from test')
917 self.fail('no result code from test')
917 elif out != self._refout:
918 elif out != self._refout:
918 # Diff generation may rely on written .err file.
919 # Diff generation may rely on written .err file.
919 if (ret != 0 or out != self._refout) and not self._skipped \
920 if (ret != 0 or out != self._refout) and not self._skipped \
920 and not self._debug:
921 and not self._debug:
921 with open(self.errpath, 'wb') as f:
922 with open(self.errpath, 'wb') as f:
922 for line in out:
923 for line in out:
923 f.write(line)
924 f.write(line)
924
925
925 # The result object handles diff calculation for us.
926 # The result object handles diff calculation for us.
926 with firstlock:
927 with firstlock:
927 if self._result.addOutputMismatch(self, ret, out, self._refout):
928 if self._result.addOutputMismatch(self, ret, out, self._refout):
928 # change was accepted, skip failing
929 # change was accepted, skip failing
929 return
930 return
930 if self._first:
931 if self._first:
931 global firsterror
932 global firsterror
932 firsterror = True
933 firsterror = True
933
934
934 if ret:
935 if ret:
935 msg = 'output changed and ' + describe(ret)
936 msg = 'output changed and ' + describe(ret)
936 else:
937 else:
937 msg = 'output changed'
938 msg = 'output changed'
938
939
939 self.fail(msg)
940 self.fail(msg)
940 elif ret:
941 elif ret:
941 self.fail(describe(ret))
942 self.fail(describe(ret))
942
943
943 def tearDown(self):
944 def tearDown(self):
944 """Tasks to perform after run()."""
945 """Tasks to perform after run()."""
945 for entry in self._daemonpids:
946 for entry in self._daemonpids:
946 killdaemons(entry)
947 killdaemons(entry)
947 self._daemonpids = []
948 self._daemonpids = []
948
949
949 if self._keeptmpdir:
950 if self._keeptmpdir:
950 log('\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' %
951 log('\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' %
951 (self._testtmp.decode('utf-8'),
952 (self._testtmp.decode('utf-8'),
952 self._threadtmp.decode('utf-8')))
953 self._threadtmp.decode('utf-8')))
953 else:
954 else:
954 shutil.rmtree(self._testtmp, True)
955 shutil.rmtree(self._testtmp, True)
955 shutil.rmtree(self._threadtmp, True)
956 shutil.rmtree(self._threadtmp, True)
956
957
957 if self._usechg:
958 if self._usechg:
958 # chgservers will stop automatically after they find the socket
959 # chgservers will stop automatically after they find the socket
959 # files are deleted
960 # files are deleted
960 shutil.rmtree(self._chgsockdir, True)
961 shutil.rmtree(self._chgsockdir, True)
961
962
962 if (self._ret != 0 or self._out != self._refout) and not self._skipped \
963 if (self._ret != 0 or self._out != self._refout) and not self._skipped \
963 and not self._debug and self._out:
964 and not self._debug and self._out:
964 with open(self.errpath, 'wb') as f:
965 with open(self.errpath, 'wb') as f:
965 for line in self._out:
966 for line in self._out:
966 f.write(line)
967 f.write(line)
967
968
968 vlog("# Ret was:", self._ret, '(%s)' % self.name)
969 vlog("# Ret was:", self._ret, '(%s)' % self.name)
969
970
970 def _run(self, env):
971 def _run(self, env):
971 # This should be implemented in child classes to run tests.
972 # This should be implemented in child classes to run tests.
972 raise unittest.SkipTest('unknown test type')
973 raise unittest.SkipTest('unknown test type')
973
974
974 def abort(self):
975 def abort(self):
975 """Terminate execution of this test."""
976 """Terminate execution of this test."""
976 self._aborted = True
977 self._aborted = True
977
978
978 def _portmap(self, i):
979 def _portmap(self, i):
979 offset = b'' if i == 0 else b'%d' % i
980 offset = b'' if i == 0 else b'%d' % i
980 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
981 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
981
982
982 def _getreplacements(self):
983 def _getreplacements(self):
983 """Obtain a mapping of text replacements to apply to test output.
984 """Obtain a mapping of text replacements to apply to test output.
984
985
985 Test output needs to be normalized so it can be compared to expected
986 Test output needs to be normalized so it can be compared to expected
986 output. This function defines how some of that normalization will
987 output. This function defines how some of that normalization will
987 occur.
988 occur.
988 """
989 """
989 r = [
990 r = [
990 # This list should be parallel to defineport in _getenv
991 # This list should be parallel to defineport in _getenv
991 self._portmap(0),
992 self._portmap(0),
992 self._portmap(1),
993 self._portmap(1),
993 self._portmap(2),
994 self._portmap(2),
994 (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
995 (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
995 (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
996 (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
996 ]
997 ]
997 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
998 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
998
999
999 replacementfile = os.path.join(self._testdir, b'common-pattern.py')
1000 replacementfile = os.path.join(self._testdir, b'common-pattern.py')
1000
1001
1001 if os.path.exists(replacementfile):
1002 if os.path.exists(replacementfile):
1002 data = {}
1003 data = {}
1003 with open(replacementfile, mode='rb') as source:
1004 with open(replacementfile, mode='rb') as source:
1004 # the intermediate 'compile' step help with debugging
1005 # the intermediate 'compile' step help with debugging
1005 code = compile(source.read(), replacementfile, 'exec')
1006 code = compile(source.read(), replacementfile, 'exec')
1006 exec(code, data)
1007 exec(code, data)
1007 for value in data.get('substitutions', ()):
1008 for value in data.get('substitutions', ()):
1008 if len(value) != 2:
1009 if len(value) != 2:
1009 msg = 'malformatted substitution in %s: %r'
1010 msg = 'malformatted substitution in %s: %r'
1010 msg %= (replacementfile, value)
1011 msg %= (replacementfile, value)
1011 raise ValueError(msg)
1012 raise ValueError(msg)
1012 r.append(value)
1013 r.append(value)
1013 return r
1014 return r
1014
1015
1015 def _escapepath(self, p):
1016 def _escapepath(self, p):
1016 if os.name == 'nt':
1017 if os.name == 'nt':
1017 return (
1018 return (
1018 (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or
1019 (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or
1019 c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c
1020 c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c
1020 for c in [p[i:i + 1] for i in range(len(p))]))
1021 for c in [p[i:i + 1] for i in range(len(p))]))
1021 )
1022 )
1022 else:
1023 else:
1023 return re.escape(p)
1024 return re.escape(p)
1024
1025
1025 def _localip(self):
1026 def _localip(self):
1026 if self._useipv6:
1027 if self._useipv6:
1027 return b'::1'
1028 return b'::1'
1028 else:
1029 else:
1029 return b'127.0.0.1'
1030 return b'127.0.0.1'
1030
1031
1031 def _genrestoreenv(self, testenv):
1032 def _genrestoreenv(self, testenv):
1032 """Generate a script that can be used by tests to restore the original
1033 """Generate a script that can be used by tests to restore the original
1033 environment."""
1034 environment."""
1034 # Put the restoreenv script inside self._threadtmp
1035 # Put the restoreenv script inside self._threadtmp
1035 scriptpath = os.path.join(self._threadtmp, b'restoreenv.sh')
1036 scriptpath = os.path.join(self._threadtmp, b'restoreenv.sh')
1036 testenv['HGTEST_RESTOREENV'] = _strpath(scriptpath)
1037 testenv['HGTEST_RESTOREENV'] = _strpath(scriptpath)
1037
1038
1038 # Only restore environment variable names that the shell allows
1039 # Only restore environment variable names that the shell allows
1039 # us to export.
1040 # us to export.
1040 name_regex = re.compile('^[a-zA-Z][a-zA-Z0-9_]*$')
1041 name_regex = re.compile('^[a-zA-Z][a-zA-Z0-9_]*$')
1041
1042
1042 # Do not restore these variables; otherwise tests would fail.
1043 # Do not restore these variables; otherwise tests would fail.
1043 reqnames = {'PYTHON', 'TESTDIR', 'TESTTMP'}
1044 reqnames = {'PYTHON', 'TESTDIR', 'TESTTMP'}
1044
1045
1045 with open(scriptpath, 'w') as envf:
1046 with open(scriptpath, 'w') as envf:
1046 for name, value in origenviron.items():
1047 for name, value in origenviron.items():
1047 if not name_regex.match(name):
1048 if not name_regex.match(name):
1048 # Skip environment variables with unusual names not
1049 # Skip environment variables with unusual names not
1049 # allowed by most shells.
1050 # allowed by most shells.
1050 continue
1051 continue
1051 if name in reqnames:
1052 if name in reqnames:
1052 continue
1053 continue
1053 envf.write('%s=%s\n' % (name, shellquote(value)))
1054 envf.write('%s=%s\n' % (name, shellquote(value)))
1054
1055
1055 for name in testenv:
1056 for name in testenv:
1056 if name in origenviron or name in reqnames:
1057 if name in origenviron or name in reqnames:
1057 continue
1058 continue
1058 envf.write('unset %s\n' % (name,))
1059 envf.write('unset %s\n' % (name,))
1059
1060
1060 def _getenv(self):
1061 def _getenv(self):
1061 """Obtain environment variables to use during test execution."""
1062 """Obtain environment variables to use during test execution."""
1062 def defineport(i):
1063 def defineport(i):
1063 offset = '' if i == 0 else '%s' % i
1064 offset = '' if i == 0 else '%s' % i
1064 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
1065 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
1065 env = os.environ.copy()
1066 env = os.environ.copy()
1066 env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase') or ''
1067 env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase') or ''
1067 env['HGEMITWARNINGS'] = '1'
1068 env['HGEMITWARNINGS'] = '1'
1068 env['TESTTMP'] = _strpath(self._testtmp)
1069 env['TESTTMP'] = _strpath(self._testtmp)
1069 env['TESTNAME'] = self.name
1070 env['TESTNAME'] = self.name
1070 env['HOME'] = _strpath(self._testtmp)
1071 env['HOME'] = _strpath(self._testtmp)
1071 # This number should match portneeded in _getport
1072 # This number should match portneeded in _getport
1072 for port in xrange(3):
1073 for port in xrange(3):
1073 # This list should be parallel to _portmap in _getreplacements
1074 # This list should be parallel to _portmap in _getreplacements
1074 defineport(port)
1075 defineport(port)
1075 env["HGRCPATH"] = _strpath(os.path.join(self._threadtmp, b'.hgrc'))
1076 env["HGRCPATH"] = _strpath(os.path.join(self._threadtmp, b'.hgrc'))
1076 env["DAEMON_PIDS"] = _strpath(os.path.join(self._threadtmp,
1077 env["DAEMON_PIDS"] = _strpath(os.path.join(self._threadtmp,
1077 b'daemon.pids'))
1078 b'daemon.pids'))
1078 env["HGEDITOR"] = ('"' + sys.executable + '"'
1079 env["HGEDITOR"] = ('"' + sys.executable + '"'
1079 + ' -c "import sys; sys.exit(0)"')
1080 + ' -c "import sys; sys.exit(0)"')
1080 env["HGMERGE"] = "internal:merge"
1081 env["HGMERGE"] = "internal:merge"
1081 env["HGUSER"] = "test"
1082 env["HGUSER"] = "test"
1082 env["HGENCODING"] = "ascii"
1083 env["HGENCODING"] = "ascii"
1083 env["HGENCODINGMODE"] = "strict"
1084 env["HGENCODINGMODE"] = "strict"
1084 env["HGHOSTNAME"] = "test-hostname"
1085 env["HGHOSTNAME"] = "test-hostname"
1085 env['HGIPV6'] = str(int(self._useipv6))
1086 env['HGIPV6'] = str(int(self._useipv6))
1086 if 'HGCATAPULTSERVERPIPE' not in env:
1087 if 'HGCATAPULTSERVERPIPE' not in env:
1087 env['HGCATAPULTSERVERPIPE'] = os.devnull
1088 env['HGCATAPULTSERVERPIPE'] = os.devnull
1088
1089
1089 extraextensions = []
1090 extraextensions = []
1090 for opt in self._extraconfigopts:
1091 for opt in self._extraconfigopts:
1091 section, key = opt.encode('utf-8').split(b'.', 1)
1092 section, key = opt.encode('utf-8').split(b'.', 1)
1092 if section != 'extensions':
1093 if section != 'extensions':
1093 continue
1094 continue
1094 name = key.split(b'=', 1)[0]
1095 name = key.split(b'=', 1)[0]
1095 extraextensions.append(name)
1096 extraextensions.append(name)
1096
1097
1097 if extraextensions:
1098 if extraextensions:
1098 env['HGTESTEXTRAEXTENSIONS'] = b' '.join(extraextensions)
1099 env['HGTESTEXTRAEXTENSIONS'] = b' '.join(extraextensions)
1099
1100
1100 # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
1101 # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
1101 # IP addresses.
1102 # IP addresses.
1102 env['LOCALIP'] = _strpath(self._localip())
1103 env['LOCALIP'] = _strpath(self._localip())
1103
1104
1104 # Reset some environment variables to well-known values so that
1105 # Reset some environment variables to well-known values so that
1105 # the tests produce repeatable output.
1106 # the tests produce repeatable output.
1106 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
1107 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
1107 env['TZ'] = 'GMT'
1108 env['TZ'] = 'GMT'
1108 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
1109 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
1109 env['COLUMNS'] = '80'
1110 env['COLUMNS'] = '80'
1110 env['TERM'] = 'xterm'
1111 env['TERM'] = 'xterm'
1111
1112
1112 for k in ('HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy ' +
1113 for k in ('HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy ' +
1113 'HGPLAIN HGPLAINEXCEPT EDITOR VISUAL PAGER ' +
1114 'HGPLAIN HGPLAINEXCEPT EDITOR VISUAL PAGER ' +
1114 'NO_PROXY CHGDEBUG').split():
1115 'NO_PROXY CHGDEBUG').split():
1115 if k in env:
1116 if k in env:
1116 del env[k]
1117 del env[k]
1117
1118
1118 # unset env related to hooks
1119 # unset env related to hooks
1119 for k in list(env):
1120 for k in list(env):
1120 if k.startswith('HG_'):
1121 if k.startswith('HG_'):
1121 del env[k]
1122 del env[k]
1122
1123
1123 if self._usechg:
1124 if self._usechg:
1124 env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server')
1125 env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server')
1125
1126
1126 return env
1127 return env
1127
1128
1128 def _createhgrc(self, path):
1129 def _createhgrc(self, path):
1129 """Create an hgrc file for this test."""
1130 """Create an hgrc file for this test."""
1130 with open(path, 'wb') as hgrc:
1131 with open(path, 'wb') as hgrc:
1131 hgrc.write(b'[ui]\n')
1132 hgrc.write(b'[ui]\n')
1132 hgrc.write(b'slash = True\n')
1133 hgrc.write(b'slash = True\n')
1133 hgrc.write(b'interactive = False\n')
1134 hgrc.write(b'interactive = False\n')
1134 hgrc.write(b'mergemarkers = detailed\n')
1135 hgrc.write(b'mergemarkers = detailed\n')
1135 hgrc.write(b'promptecho = True\n')
1136 hgrc.write(b'promptecho = True\n')
1136 hgrc.write(b'[defaults]\n')
1137 hgrc.write(b'[defaults]\n')
1137 hgrc.write(b'[devel]\n')
1138 hgrc.write(b'[devel]\n')
1138 hgrc.write(b'all-warnings = true\n')
1139 hgrc.write(b'all-warnings = true\n')
1139 hgrc.write(b'default-date = 0 0\n')
1140 hgrc.write(b'default-date = 0 0\n')
1140 hgrc.write(b'[largefiles]\n')
1141 hgrc.write(b'[largefiles]\n')
1141 hgrc.write(b'usercache = %s\n' %
1142 hgrc.write(b'usercache = %s\n' %
1142 (os.path.join(self._testtmp, b'.cache/largefiles')))
1143 (os.path.join(self._testtmp, b'.cache/largefiles')))
1143 hgrc.write(b'[lfs]\n')
1144 hgrc.write(b'[lfs]\n')
1144 hgrc.write(b'usercache = %s\n' %
1145 hgrc.write(b'usercache = %s\n' %
1145 (os.path.join(self._testtmp, b'.cache/lfs')))
1146 (os.path.join(self._testtmp, b'.cache/lfs')))
1146 hgrc.write(b'[web]\n')
1147 hgrc.write(b'[web]\n')
1147 hgrc.write(b'address = localhost\n')
1148 hgrc.write(b'address = localhost\n')
1148 hgrc.write(b'ipv6 = %s\n' % str(self._useipv6).encode('ascii'))
1149 hgrc.write(b'ipv6 = %s\n' % str(self._useipv6).encode('ascii'))
1149 hgrc.write(b'server-header = testing stub value\n')
1150 hgrc.write(b'server-header = testing stub value\n')
1150
1151
1151 for opt in self._extraconfigopts:
1152 for opt in self._extraconfigopts:
1152 section, key = opt.encode('utf-8').split(b'.', 1)
1153 section, key = opt.encode('utf-8').split(b'.', 1)
1153 assert b'=' in key, ('extra config opt %s must '
1154 assert b'=' in key, ('extra config opt %s must '
1154 'have an = for assignment' % opt)
1155 'have an = for assignment' % opt)
1155 hgrc.write(b'[%s]\n%s\n' % (section, key))
1156 hgrc.write(b'[%s]\n%s\n' % (section, key))
1156
1157
1157 def fail(self, msg):
1158 def fail(self, msg):
1158 # unittest differentiates between errored and failed.
1159 # unittest differentiates between errored and failed.
1159 # Failed is denoted by AssertionError (by default at least).
1160 # Failed is denoted by AssertionError (by default at least).
1160 raise AssertionError(msg)
1161 raise AssertionError(msg)
1161
1162
1162 def _runcommand(self, cmd, env, normalizenewlines=False):
1163 def _runcommand(self, cmd, env, normalizenewlines=False):
1163 """Run command in a sub-process, capturing the output (stdout and
1164 """Run command in a sub-process, capturing the output (stdout and
1164 stderr).
1165 stderr).
1165
1166
1166 Return a tuple (exitcode, output). output is None in debug mode.
1167 Return a tuple (exitcode, output). output is None in debug mode.
1167 """
1168 """
1168 if self._debug:
1169 if self._debug:
1169 proc = subprocess.Popen(_strpath(cmd), shell=True,
1170 proc = subprocess.Popen(_strpath(cmd), shell=True,
1170 cwd=_strpath(self._testtmp),
1171 cwd=_strpath(self._testtmp),
1171 env=env)
1172 env=env)
1172 ret = proc.wait()
1173 ret = proc.wait()
1173 return (ret, None)
1174 return (ret, None)
1174
1175
1175 proc = Popen4(cmd, self._testtmp, self._timeout, env)
1176 proc = Popen4(cmd, self._testtmp, self._timeout, env)
1176 def cleanup():
1177 def cleanup():
1177 terminate(proc)
1178 terminate(proc)
1178 ret = proc.wait()
1179 ret = proc.wait()
1179 if ret == 0:
1180 if ret == 0:
1180 ret = signal.SIGTERM << 8
1181 ret = signal.SIGTERM << 8
1181 killdaemons(env['DAEMON_PIDS'])
1182 killdaemons(env['DAEMON_PIDS'])
1182 return ret
1183 return ret
1183
1184
1184 output = b''
1185 output = b''
1185 proc.tochild.close()
1186 proc.tochild.close()
1186
1187
1187 try:
1188 try:
1188 output = proc.fromchild.read()
1189 output = proc.fromchild.read()
1189 except KeyboardInterrupt:
1190 except KeyboardInterrupt:
1190 vlog('# Handling keyboard interrupt')
1191 vlog('# Handling keyboard interrupt')
1191 cleanup()
1192 cleanup()
1192 raise
1193 raise
1193
1194
1194 ret = proc.wait()
1195 ret = proc.wait()
1195 if wifexited(ret):
1196 if wifexited(ret):
1196 ret = os.WEXITSTATUS(ret)
1197 ret = os.WEXITSTATUS(ret)
1197
1198
1198 if proc.timeout:
1199 if proc.timeout:
1199 ret = 'timeout'
1200 ret = 'timeout'
1200
1201
1201 if ret:
1202 if ret:
1202 killdaemons(env['DAEMON_PIDS'])
1203 killdaemons(env['DAEMON_PIDS'])
1203
1204
1204 for s, r in self._getreplacements():
1205 for s, r in self._getreplacements():
1205 output = re.sub(s, r, output)
1206 output = re.sub(s, r, output)
1206
1207
1207 if normalizenewlines:
1208 if normalizenewlines:
1208 output = output.replace(b'\r\n', b'\n')
1209 output = output.replace(b'\r\n', b'\n')
1209
1210
1210 return ret, output.splitlines(True)
1211 return ret, output.splitlines(True)
1211
1212
1212 class PythonTest(Test):
1213 class PythonTest(Test):
1213 """A Python-based test."""
1214 """A Python-based test."""
1214
1215
1215 @property
1216 @property
1216 def refpath(self):
1217 def refpath(self):
1217 return os.path.join(self._testdir, b'%s.out' % self.bname)
1218 return os.path.join(self._testdir, b'%s.out' % self.bname)
1218
1219
1219 def _run(self, env):
1220 def _run(self, env):
1220 py3kswitch = self._py3kwarnings and b' -3' or b''
1221 py3kswitch = self._py3kwarnings and b' -3' or b''
1221 cmd = b'"%s"%s "%s"' % (PYTHON, py3kswitch, self.path)
1222 cmd = b'"%s"%s "%s"' % (PYTHON, py3kswitch, self.path)
1222 vlog("# Running", cmd)
1223 vlog("# Running", cmd)
1223 normalizenewlines = os.name == 'nt'
1224 normalizenewlines = os.name == 'nt'
1224 result = self._runcommand(cmd, env,
1225 result = self._runcommand(cmd, env,
1225 normalizenewlines=normalizenewlines)
1226 normalizenewlines=normalizenewlines)
1226 if self._aborted:
1227 if self._aborted:
1227 raise KeyboardInterrupt()
1228 raise KeyboardInterrupt()
1228
1229
1229 return result
1230 return result
1230
1231
1231 # Some glob patterns apply only in some circumstances, so the script
1232 # Some glob patterns apply only in some circumstances, so the script
1232 # might want to remove (glob) annotations that otherwise should be
1233 # might want to remove (glob) annotations that otherwise should be
1233 # retained.
1234 # retained.
1234 checkcodeglobpats = [
1235 checkcodeglobpats = [
1235 # On Windows it looks like \ doesn't require a (glob), but we know
1236 # On Windows it looks like \ doesn't require a (glob), but we know
1236 # better.
1237 # better.
1237 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
1238 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
1238 re.compile(br'^moving \S+/.*[^)]$'),
1239 re.compile(br'^moving \S+/.*[^)]$'),
1239 re.compile(br'^pulling from \$TESTTMP/.*[^)]$'),
1240 re.compile(br'^pulling from \$TESTTMP/.*[^)]$'),
1240 # Not all platforms have 127.0.0.1 as loopback (though most do),
1241 # Not all platforms have 127.0.0.1 as loopback (though most do),
1241 # so we always glob that too.
1242 # so we always glob that too.
1242 re.compile(br'.*\$LOCALIP.*$'),
1243 re.compile(br'.*\$LOCALIP.*$'),
1243 ]
1244 ]
1244
1245
1245 bchr = chr
1246 bchr = chr
1246 if PYTHON3:
1247 if PYTHON3:
1247 bchr = lambda x: bytes([x])
1248 bchr = lambda x: bytes([x])
1248
1249
1249 class TTest(Test):
1250 class TTest(Test):
1250 """A "t test" is a test backed by a .t file."""
1251 """A "t test" is a test backed by a .t file."""
1251
1252
1252 SKIPPED_PREFIX = b'skipped: '
1253 SKIPPED_PREFIX = b'skipped: '
1253 FAILED_PREFIX = b'hghave check failed: '
1254 FAILED_PREFIX = b'hghave check failed: '
1254 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
1255 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
1255
1256
1256 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
1257 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
1257 ESCAPEMAP = dict((bchr(i), br'\x%02x' % i) for i in range(256))
1258 ESCAPEMAP = dict((bchr(i), br'\x%02x' % i) for i in range(256))
1258 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
1259 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
1259
1260
1260 def __init__(self, path, *args, **kwds):
1261 def __init__(self, path, *args, **kwds):
1261 # accept an extra "case" parameter
1262 # accept an extra "case" parameter
1262 case = kwds.pop('case', [])
1263 case = kwds.pop('case', [])
1263 self._case = case
1264 self._case = case
1264 self._allcases = {x for y in parsettestcases(path) for x in y}
1265 self._allcases = {x for y in parsettestcases(path) for x in y}
1265 super(TTest, self).__init__(path, *args, **kwds)
1266 super(TTest, self).__init__(path, *args, **kwds)
1266 if case:
1267 if case:
1267 casepath = b'#'.join(case)
1268 casepath = b'#'.join(case)
1268 self.name = '%s#%s' % (self.name, _strpath(casepath))
1269 self.name = '%s#%s' % (self.name, _strpath(casepath))
1269 self.errpath = b'%s#%s.err' % (self.errpath[:-4], casepath)
1270 self.errpath = b'%s#%s.err' % (self.errpath[:-4], casepath)
1270 self._tmpname += b'-%s' % casepath
1271 self._tmpname += b'-%s' % casepath
1271 self._have = {}
1272 self._have = {}
1272
1273
1273 @property
1274 @property
1274 def refpath(self):
1275 def refpath(self):
1275 return os.path.join(self._testdir, self.bname)
1276 return os.path.join(self._testdir, self.bname)
1276
1277
1277 def _run(self, env):
1278 def _run(self, env):
1278 with open(self.path, 'rb') as f:
1279 with open(self.path, 'rb') as f:
1279 lines = f.readlines()
1280 lines = f.readlines()
1280
1281
1281 # .t file is both reference output and the test input, keep reference
1282 # .t file is both reference output and the test input, keep reference
1282 # output updated with the the test input. This avoids some race
1283 # output updated with the the test input. This avoids some race
1283 # conditions where the reference output does not match the actual test.
1284 # conditions where the reference output does not match the actual test.
1284 if self._refout is not None:
1285 if self._refout is not None:
1285 self._refout = lines
1286 self._refout = lines
1286
1287
1287 salt, script, after, expected = self._parsetest(lines)
1288 salt, script, after, expected = self._parsetest(lines)
1288
1289
1289 # Write out the generated script.
1290 # Write out the generated script.
1290 fname = b'%s.sh' % self._testtmp
1291 fname = b'%s.sh' % self._testtmp
1291 with open(fname, 'wb') as f:
1292 with open(fname, 'wb') as f:
1292 for l in script:
1293 for l in script:
1293 f.write(l)
1294 f.write(l)
1294
1295
1295 cmd = b'%s "%s"' % (self._shell, fname)
1296 cmd = b'%s "%s"' % (self._shell, fname)
1296 vlog("# Running", cmd)
1297 vlog("# Running", cmd)
1297
1298
1298 exitcode, output = self._runcommand(cmd, env)
1299 exitcode, output = self._runcommand(cmd, env)
1299
1300
1300 if self._aborted:
1301 if self._aborted:
1301 raise KeyboardInterrupt()
1302 raise KeyboardInterrupt()
1302
1303
1303 # Do not merge output if skipped. Return hghave message instead.
1304 # Do not merge output if skipped. Return hghave message instead.
1304 # Similarly, with --debug, output is None.
1305 # Similarly, with --debug, output is None.
1305 if exitcode == self.SKIPPED_STATUS or output is None:
1306 if exitcode == self.SKIPPED_STATUS or output is None:
1306 return exitcode, output
1307 return exitcode, output
1307
1308
1308 return self._processoutput(exitcode, output, salt, after, expected)
1309 return self._processoutput(exitcode, output, salt, after, expected)
1309
1310
1310 def _hghave(self, reqs):
1311 def _hghave(self, reqs):
1311 allreqs = b' '.join(reqs)
1312 allreqs = b' '.join(reqs)
1312 if allreqs in self._have:
1313 if allreqs in self._have:
1313 return self._have.get(allreqs)
1314 return self._have.get(allreqs)
1314
1315
1315 # TODO do something smarter when all other uses of hghave are gone.
1316 # TODO do something smarter when all other uses of hghave are gone.
1316 runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__)))
1317 runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__)))
1317 tdir = runtestdir.replace(b'\\', b'/')
1318 tdir = runtestdir.replace(b'\\', b'/')
1318 proc = Popen4(b'%s -c "%s/hghave %s"' %
1319 proc = Popen4(b'%s -c "%s/hghave %s"' %
1319 (self._shell, tdir, allreqs),
1320 (self._shell, tdir, allreqs),
1320 self._testtmp, 0, self._getenv())
1321 self._testtmp, 0, self._getenv())
1321 stdout, stderr = proc.communicate()
1322 stdout, stderr = proc.communicate()
1322 ret = proc.wait()
1323 ret = proc.wait()
1323 if wifexited(ret):
1324 if wifexited(ret):
1324 ret = os.WEXITSTATUS(ret)
1325 ret = os.WEXITSTATUS(ret)
1325 if ret == 2:
1326 if ret == 2:
1326 print(stdout.decode('utf-8'))
1327 print(stdout.decode('utf-8'))
1327 sys.exit(1)
1328 sys.exit(1)
1328
1329
1329 if ret != 0:
1330 if ret != 0:
1330 self._have[allreqs] = (False, stdout)
1331 self._have[allreqs] = (False, stdout)
1331 return False, stdout
1332 return False, stdout
1332
1333
1333 if b'slow' in reqs:
1334 if b'slow' in reqs:
1334 self._timeout = self._slowtimeout
1335 self._timeout = self._slowtimeout
1335
1336
1336 self._have[allreqs] = (True, None)
1337 self._have[allreqs] = (True, None)
1337 return True, None
1338 return True, None
1338
1339
1339 def _iftest(self, args):
1340 def _iftest(self, args):
1340 # implements "#if"
1341 # implements "#if"
1341 reqs = []
1342 reqs = []
1342 for arg in args:
1343 for arg in args:
1343 if arg.startswith(b'no-') and arg[3:] in self._allcases:
1344 if arg.startswith(b'no-') and arg[3:] in self._allcases:
1344 if arg[3:] in self._case:
1345 if arg[3:] in self._case:
1345 return False
1346 return False
1346 elif arg in self._allcases:
1347 elif arg in self._allcases:
1347 if arg not in self._case:
1348 if arg not in self._case:
1348 return False
1349 return False
1349 else:
1350 else:
1350 reqs.append(arg)
1351 reqs.append(arg)
1351 return self._hghave(reqs)[0]
1352 return self._hghave(reqs)[0]
1352
1353
1353 def _parsetest(self, lines):
1354 def _parsetest(self, lines):
1354 # We generate a shell script which outputs unique markers to line
1355 # We generate a shell script which outputs unique markers to line
1355 # up script results with our source. These markers include input
1356 # up script results with our source. These markers include input
1356 # line number and the last return code.
1357 # line number and the last return code.
1357 salt = b"SALT%d" % time.time()
1358 salt = b"SALT%d" % time.time()
1358 def addsalt(line, inpython):
1359 def addsalt(line, inpython):
1359 if inpython:
1360 if inpython:
1360 script.append(b'%s %d 0\n' % (salt, line))
1361 script.append(b'%s %d 0\n' % (salt, line))
1361 else:
1362 else:
1362 script.append(b'echo %s %d $?\n' % (salt, line))
1363 script.append(b'echo %s %d $?\n' % (salt, line))
1363 active = []
1364 active = []
1364 session = str(uuid.uuid4())
1365 session = str(uuid.uuid4())
1365 if PYTHON3:
1366 if PYTHON3:
1366 session = session.encode('ascii')
1367 session = session.encode('ascii')
1367 def toggletrace(cmd):
1368 def toggletrace(cmd):
1368 if isinstance(cmd, str):
1369 if isinstance(cmd, str):
1369 quoted = shellquote(cmd.strip())
1370 quoted = shellquote(cmd.strip())
1370 else:
1371 else:
1371 quoted = shellquote(cmd.strip().decode('utf8')).encode('utf8')
1372 quoted = shellquote(cmd.strip().decode('utf8')).encode('utf8')
1372 quoted = quoted.replace(b'\\', b'\\\\')
1373 quoted = quoted.replace(b'\\', b'\\\\')
1373 if active:
1374 if active:
1374 script.append(
1375 script.append(
1375 b'echo END %s %s >> "$HGCATAPULTSERVERPIPE"\n' % (
1376 b'echo END %s %s >> "$HGCATAPULTSERVERPIPE"\n' % (
1376 session, active[0]))
1377 session, active[0]))
1377 script.append(
1378 script.append(
1378 b'echo START %s %s >> "$HGCATAPULTSERVERPIPE"\n' % (
1379 b'echo START %s %s >> "$HGCATAPULTSERVERPIPE"\n' % (
1379 session, quoted))
1380 session, quoted))
1380 active[0:] = [quoted]
1381 active[0:] = [quoted]
1381
1382
1382 script = []
1383 script = []
1383
1384
1384 # After we run the shell script, we re-unify the script output
1385 # After we run the shell script, we re-unify the script output
1385 # with non-active parts of the source, with synchronization by our
1386 # with non-active parts of the source, with synchronization by our
1386 # SALT line number markers. The after table contains the non-active
1387 # SALT line number markers. The after table contains the non-active
1387 # components, ordered by line number.
1388 # components, ordered by line number.
1388 after = {}
1389 after = {}
1389
1390
1390 # Expected shell script output.
1391 # Expected shell script output.
1391 expected = {}
1392 expected = {}
1392
1393
1393 pos = prepos = -1
1394 pos = prepos = -1
1394
1395
1395 # True or False when in a true or false conditional section
1396 # True or False when in a true or false conditional section
1396 skipping = None
1397 skipping = None
1397
1398
1398 # We keep track of whether or not we're in a Python block so we
1399 # We keep track of whether or not we're in a Python block so we
1399 # can generate the surrounding doctest magic.
1400 # can generate the surrounding doctest magic.
1400 inpython = False
1401 inpython = False
1401
1402
1402 if self._debug:
1403 if self._debug:
1403 script.append(b'set -x\n')
1404 script.append(b'set -x\n')
1404 if self._hgcommand != b'hg':
1405 if self._hgcommand != b'hg':
1405 script.append(b'alias hg="%s"\n' % self._hgcommand)
1406 script.append(b'alias hg="%s"\n' % self._hgcommand)
1406 if os.getenv('MSYSTEM'):
1407 if os.getenv('MSYSTEM'):
1407 script.append(b'alias pwd="pwd -W"\n')
1408 script.append(b'alias pwd="pwd -W"\n')
1408
1409
1409 hgcatapult = os.getenv('HGCATAPULTSERVERPIPE')
1410 hgcatapult = os.getenv('HGCATAPULTSERVERPIPE')
1410 if hgcatapult and hgcatapult != os.devnull:
1411 if hgcatapult and hgcatapult != os.devnull:
1411 # Kludge: use a while loop to keep the pipe from getting
1412 # Kludge: use a while loop to keep the pipe from getting
1412 # closed by our echo commands. The still-running file gets
1413 # closed by our echo commands. The still-running file gets
1413 # reaped at the end of the script, which causes the while
1414 # reaped at the end of the script, which causes the while
1414 # loop to exit and closes the pipe. Sigh.
1415 # loop to exit and closes the pipe. Sigh.
1415 script.append(
1416 script.append(
1416 b'rtendtracing() {\n'
1417 b'rtendtracing() {\n'
1417 b' echo END %(session)s %(name)s >> $HGCATAPULTSERVERPIPE\n'
1418 b' echo END %(session)s %(name)s >> $HGCATAPULTSERVERPIPE\n'
1418 b' rm -f "$TESTTMP/.still-running"\n'
1419 b' rm -f "$TESTTMP/.still-running"\n'
1419 b'}\n'
1420 b'}\n'
1420 b'trap "rtendtracing" 0\n'
1421 b'trap "rtendtracing" 0\n'
1421 b'touch "$TESTTMP/.still-running"\n'
1422 b'touch "$TESTTMP/.still-running"\n'
1422 b'while [ -f "$TESTTMP/.still-running" ]; do sleep 1; done '
1423 b'while [ -f "$TESTTMP/.still-running" ]; do sleep 1; done '
1423 b'> $HGCATAPULTSERVERPIPE &\n'
1424 b'> $HGCATAPULTSERVERPIPE &\n'
1424 b'HGCATAPULTSESSION=%(session)s ; export HGCATAPULTSESSION\n'
1425 b'HGCATAPULTSESSION=%(session)s ; export HGCATAPULTSESSION\n'
1425 b'echo START %(session)s %(name)s >> $HGCATAPULTSERVERPIPE\n'
1426 b'echo START %(session)s %(name)s >> $HGCATAPULTSERVERPIPE\n'
1426 % {
1427 % {
1427 'name': self.name,
1428 'name': self.name,
1428 'session': session,
1429 'session': session,
1429 }
1430 }
1430 )
1431 )
1431
1432
1432 if self._case:
1433 if self._case:
1433 casestr = b'#'.join(self._case)
1434 casestr = b'#'.join(self._case)
1434 if isinstance(self._case, str):
1435 if isinstance(self._case, str):
1435 quoted = shellquote(casestr)
1436 quoted = shellquote(casestr)
1436 else:
1437 else:
1437 quoted = shellquote(casestr.decode('utf8')).encode('utf8')
1438 quoted = shellquote(casestr.decode('utf8')).encode('utf8')
1438 script.append(b'TESTCASE=%s\n' % quoted)
1439 script.append(b'TESTCASE=%s\n' % quoted)
1439 script.append(b'export TESTCASE\n')
1440 script.append(b'export TESTCASE\n')
1440
1441
1441 n = 0
1442 n = 0
1442 for n, l in enumerate(lines):
1443 for n, l in enumerate(lines):
1443 if not l.endswith(b'\n'):
1444 if not l.endswith(b'\n'):
1444 l += b'\n'
1445 l += b'\n'
1445 if l.startswith(b'#require'):
1446 if l.startswith(b'#require'):
1446 lsplit = l.split()
1447 lsplit = l.split()
1447 if len(lsplit) < 2 or lsplit[0] != b'#require':
1448 if len(lsplit) < 2 or lsplit[0] != b'#require':
1448 after.setdefault(pos, []).append(' !!! invalid #require\n')
1449 after.setdefault(pos, []).append(' !!! invalid #require\n')
1449 if not skipping:
1450 if not skipping:
1450 haveresult, message = self._hghave(lsplit[1:])
1451 haveresult, message = self._hghave(lsplit[1:])
1451 if not haveresult:
1452 if not haveresult:
1452 script = [b'echo "%s"\nexit 80\n' % message]
1453 script = [b'echo "%s"\nexit 80\n' % message]
1453 break
1454 break
1454 after.setdefault(pos, []).append(l)
1455 after.setdefault(pos, []).append(l)
1455 elif l.startswith(b'#if'):
1456 elif l.startswith(b'#if'):
1456 lsplit = l.split()
1457 lsplit = l.split()
1457 if len(lsplit) < 2 or lsplit[0] != b'#if':
1458 if len(lsplit) < 2 or lsplit[0] != b'#if':
1458 after.setdefault(pos, []).append(' !!! invalid #if\n')
1459 after.setdefault(pos, []).append(' !!! invalid #if\n')
1459 if skipping is not None:
1460 if skipping is not None:
1460 after.setdefault(pos, []).append(' !!! nested #if\n')
1461 after.setdefault(pos, []).append(' !!! nested #if\n')
1461 skipping = not self._iftest(lsplit[1:])
1462 skipping = not self._iftest(lsplit[1:])
1462 after.setdefault(pos, []).append(l)
1463 after.setdefault(pos, []).append(l)
1463 elif l.startswith(b'#else'):
1464 elif l.startswith(b'#else'):
1464 if skipping is None:
1465 if skipping is None:
1465 after.setdefault(pos, []).append(' !!! missing #if\n')
1466 after.setdefault(pos, []).append(' !!! missing #if\n')
1466 skipping = not skipping
1467 skipping = not skipping
1467 after.setdefault(pos, []).append(l)
1468 after.setdefault(pos, []).append(l)
1468 elif l.startswith(b'#endif'):
1469 elif l.startswith(b'#endif'):
1469 if skipping is None:
1470 if skipping is None:
1470 after.setdefault(pos, []).append(' !!! missing #if\n')
1471 after.setdefault(pos, []).append(' !!! missing #if\n')
1471 skipping = None
1472 skipping = None
1472 after.setdefault(pos, []).append(l)
1473 after.setdefault(pos, []).append(l)
1473 elif skipping:
1474 elif skipping:
1474 after.setdefault(pos, []).append(l)
1475 after.setdefault(pos, []).append(l)
1475 elif l.startswith(b' >>> '): # python inlines
1476 elif l.startswith(b' >>> '): # python inlines
1476 after.setdefault(pos, []).append(l)
1477 after.setdefault(pos, []).append(l)
1477 prepos = pos
1478 prepos = pos
1478 pos = n
1479 pos = n
1479 if not inpython:
1480 if not inpython:
1480 # We've just entered a Python block. Add the header.
1481 # We've just entered a Python block. Add the header.
1481 inpython = True
1482 inpython = True
1482 addsalt(prepos, False) # Make sure we report the exit code.
1483 addsalt(prepos, False) # Make sure we report the exit code.
1483 script.append(b'"%s" -m heredoctest <<EOF\n' % PYTHON)
1484 script.append(b'"%s" -m heredoctest <<EOF\n' % PYTHON)
1484 addsalt(n, True)
1485 addsalt(n, True)
1485 script.append(l[2:])
1486 script.append(l[2:])
1486 elif l.startswith(b' ... '): # python inlines
1487 elif l.startswith(b' ... '): # python inlines
1487 after.setdefault(prepos, []).append(l)
1488 after.setdefault(prepos, []).append(l)
1488 script.append(l[2:])
1489 script.append(l[2:])
1489 elif l.startswith(b' $ '): # commands
1490 elif l.startswith(b' $ '): # commands
1490 if inpython:
1491 if inpython:
1491 script.append(b'EOF\n')
1492 script.append(b'EOF\n')
1492 inpython = False
1493 inpython = False
1493 after.setdefault(pos, []).append(l)
1494 after.setdefault(pos, []).append(l)
1494 prepos = pos
1495 prepos = pos
1495 pos = n
1496 pos = n
1496 addsalt(n, False)
1497 addsalt(n, False)
1497 rawcmd = l[4:]
1498 rawcmd = l[4:]
1498 cmd = rawcmd.split()
1499 cmd = rawcmd.split()
1499 toggletrace(rawcmd)
1500 toggletrace(rawcmd)
1500 if len(cmd) == 2 and cmd[0] == b'cd':
1501 if len(cmd) == 2 and cmd[0] == b'cd':
1501 l = b' $ cd %s || exit 1\n' % cmd[1]
1502 l = b' $ cd %s || exit 1\n' % cmd[1]
1502 script.append(rawcmd)
1503 script.append(rawcmd)
1503 elif l.startswith(b' > '): # continuations
1504 elif l.startswith(b' > '): # continuations
1504 after.setdefault(prepos, []).append(l)
1505 after.setdefault(prepos, []).append(l)
1505 script.append(l[4:])
1506 script.append(l[4:])
1506 elif l.startswith(b' '): # results
1507 elif l.startswith(b' '): # results
1507 # Queue up a list of expected results.
1508 # Queue up a list of expected results.
1508 expected.setdefault(pos, []).append(l[2:])
1509 expected.setdefault(pos, []).append(l[2:])
1509 else:
1510 else:
1510 if inpython:
1511 if inpython:
1511 script.append(b'EOF\n')
1512 script.append(b'EOF\n')
1512 inpython = False
1513 inpython = False
1513 # Non-command/result. Queue up for merged output.
1514 # Non-command/result. Queue up for merged output.
1514 after.setdefault(pos, []).append(l)
1515 after.setdefault(pos, []).append(l)
1515
1516
1516 if inpython:
1517 if inpython:
1517 script.append(b'EOF\n')
1518 script.append(b'EOF\n')
1518 if skipping is not None:
1519 if skipping is not None:
1519 after.setdefault(pos, []).append(' !!! missing #endif\n')
1520 after.setdefault(pos, []).append(' !!! missing #endif\n')
1520 addsalt(n + 1, False)
1521 addsalt(n + 1, False)
1521 return salt, script, after, expected
1522 return salt, script, after, expected
1522
1523
1523 def _processoutput(self, exitcode, output, salt, after, expected):
1524 def _processoutput(self, exitcode, output, salt, after, expected):
1524 # Merge the script output back into a unified test.
1525 # Merge the script output back into a unified test.
1525 warnonly = 1 # 1: not yet; 2: yes; 3: for sure not
1526 warnonly = 1 # 1: not yet; 2: yes; 3: for sure not
1526 if exitcode != 0:
1527 if exitcode != 0:
1527 warnonly = 3
1528 warnonly = 3
1528
1529
1529 pos = -1
1530 pos = -1
1530 postout = []
1531 postout = []
1531 for l in output:
1532 for l in output:
1532 lout, lcmd = l, None
1533 lout, lcmd = l, None
1533 if salt in l:
1534 if salt in l:
1534 lout, lcmd = l.split(salt, 1)
1535 lout, lcmd = l.split(salt, 1)
1535
1536
1536 while lout:
1537 while lout:
1537 if not lout.endswith(b'\n'):
1538 if not lout.endswith(b'\n'):
1538 lout += b' (no-eol)\n'
1539 lout += b' (no-eol)\n'
1539
1540
1540 # Find the expected output at the current position.
1541 # Find the expected output at the current position.
1541 els = [None]
1542 els = [None]
1542 if expected.get(pos, None):
1543 if expected.get(pos, None):
1543 els = expected[pos]
1544 els = expected[pos]
1544
1545
1545 optional = []
1546 optional = []
1546 for i, el in enumerate(els):
1547 for i, el in enumerate(els):
1547 r = False
1548 r = False
1548 if el:
1549 if el:
1549 r, exact = self.linematch(el, lout)
1550 r, exact = self.linematch(el, lout)
1550 if isinstance(r, str):
1551 if isinstance(r, str):
1551 if r == '-glob':
1552 if r == '-glob':
1552 lout = ''.join(el.rsplit(' (glob)', 1))
1553 lout = ''.join(el.rsplit(' (glob)', 1))
1553 r = '' # Warn only this line.
1554 r = '' # Warn only this line.
1554 elif r == "retry":
1555 elif r == "retry":
1555 postout.append(b' ' + el)
1556 postout.append(b' ' + el)
1556 else:
1557 else:
1557 log('\ninfo, unknown linematch result: %r\n' % r)
1558 log('\ninfo, unknown linematch result: %r\n' % r)
1558 r = False
1559 r = False
1559 if r:
1560 if r:
1560 els.pop(i)
1561 els.pop(i)
1561 break
1562 break
1562 if el:
1563 if el:
1563 if el.endswith(b" (?)\n"):
1564 if el.endswith(b" (?)\n"):
1564 optional.append(i)
1565 optional.append(i)
1565 else:
1566 else:
1566 m = optline.match(el)
1567 m = optline.match(el)
1567 if m:
1568 if m:
1568 conditions = [
1569 conditions = [
1569 c for c in m.group(2).split(b' ')]
1570 c for c in m.group(2).split(b' ')]
1570
1571
1571 if not self._iftest(conditions):
1572 if not self._iftest(conditions):
1572 optional.append(i)
1573 optional.append(i)
1573 if exact:
1574 if exact:
1574 # Don't allow line to be matches against a later
1575 # Don't allow line to be matches against a later
1575 # line in the output
1576 # line in the output
1576 els.pop(i)
1577 els.pop(i)
1577 break
1578 break
1578
1579
1579 if r:
1580 if r:
1580 if r == "retry":
1581 if r == "retry":
1581 continue
1582 continue
1582 # clean up any optional leftovers
1583 # clean up any optional leftovers
1583 for i in optional:
1584 for i in optional:
1584 postout.append(b' ' + els[i])
1585 postout.append(b' ' + els[i])
1585 for i in reversed(optional):
1586 for i in reversed(optional):
1586 del els[i]
1587 del els[i]
1587 postout.append(b' ' + el)
1588 postout.append(b' ' + el)
1588 else:
1589 else:
1589 if self.NEEDESCAPE(lout):
1590 if self.NEEDESCAPE(lout):
1590 lout = TTest._stringescape(b'%s (esc)\n' %
1591 lout = TTest._stringescape(b'%s (esc)\n' %
1591 lout.rstrip(b'\n'))
1592 lout.rstrip(b'\n'))
1592 postout.append(b' ' + lout) # Let diff deal with it.
1593 postout.append(b' ' + lout) # Let diff deal with it.
1593 if r != '': # If line failed.
1594 if r != '': # If line failed.
1594 warnonly = 3 # for sure not
1595 warnonly = 3 # for sure not
1595 elif warnonly == 1: # Is "not yet" and line is warn only.
1596 elif warnonly == 1: # Is "not yet" and line is warn only.
1596 warnonly = 2 # Yes do warn.
1597 warnonly = 2 # Yes do warn.
1597 break
1598 break
1598 else:
1599 else:
1599 # clean up any optional leftovers
1600 # clean up any optional leftovers
1600 while expected.get(pos, None):
1601 while expected.get(pos, None):
1601 el = expected[pos].pop(0)
1602 el = expected[pos].pop(0)
1602 if el:
1603 if el:
1603 if not el.endswith(b" (?)\n"):
1604 if not el.endswith(b" (?)\n"):
1604 m = optline.match(el)
1605 m = optline.match(el)
1605 if m:
1606 if m:
1606 conditions = [c for c in m.group(2).split(b' ')]
1607 conditions = [c for c in m.group(2).split(b' ')]
1607
1608
1608 if self._iftest(conditions):
1609 if self._iftest(conditions):
1609 # Don't append as optional line
1610 # Don't append as optional line
1610 continue
1611 continue
1611 else:
1612 else:
1612 continue
1613 continue
1613 postout.append(b' ' + el)
1614 postout.append(b' ' + el)
1614
1615
1615 if lcmd:
1616 if lcmd:
1616 # Add on last return code.
1617 # Add on last return code.
1617 ret = int(lcmd.split()[1])
1618 ret = int(lcmd.split()[1])
1618 if ret != 0:
1619 if ret != 0:
1619 postout.append(b' [%d]\n' % ret)
1620 postout.append(b' [%d]\n' % ret)
1620 if pos in after:
1621 if pos in after:
1621 # Merge in non-active test bits.
1622 # Merge in non-active test bits.
1622 postout += after.pop(pos)
1623 postout += after.pop(pos)
1623 pos = int(lcmd.split()[0])
1624 pos = int(lcmd.split()[0])
1624
1625
1625 if pos in after:
1626 if pos in after:
1626 postout += after.pop(pos)
1627 postout += after.pop(pos)
1627
1628
1628 if warnonly == 2:
1629 if warnonly == 2:
1629 exitcode = False # Set exitcode to warned.
1630 exitcode = False # Set exitcode to warned.
1630
1631
1631 return exitcode, postout
1632 return exitcode, postout
1632
1633
1633 @staticmethod
1634 @staticmethod
1634 def rematch(el, l):
1635 def rematch(el, l):
1635 try:
1636 try:
1636 el = b'(?:' + el + b')'
1637 el = b'(?:' + el + b')'
1637 # use \Z to ensure that the regex matches to the end of the string
1638 # use \Z to ensure that the regex matches to the end of the string
1638 if os.name == 'nt':
1639 if os.name == 'nt':
1639 return re.match(el + br'\r?\n\Z', l)
1640 return re.match(el + br'\r?\n\Z', l)
1640 return re.match(el + br'\n\Z', l)
1641 return re.match(el + br'\n\Z', l)
1641 except re.error:
1642 except re.error:
1642 # el is an invalid regex
1643 # el is an invalid regex
1643 return False
1644 return False
1644
1645
1645 @staticmethod
1646 @staticmethod
1646 def globmatch(el, l):
1647 def globmatch(el, l):
1647 # The only supported special characters are * and ? plus / which also
1648 # The only supported special characters are * and ? plus / which also
1648 # matches \ on windows. Escaping of these characters is supported.
1649 # matches \ on windows. Escaping of these characters is supported.
1649 if el + b'\n' == l:
1650 if el + b'\n' == l:
1650 if os.altsep:
1651 if os.altsep:
1651 # matching on "/" is not needed for this line
1652 # matching on "/" is not needed for this line
1652 for pat in checkcodeglobpats:
1653 for pat in checkcodeglobpats:
1653 if pat.match(el):
1654 if pat.match(el):
1654 return True
1655 return True
1655 return b'-glob'
1656 return b'-glob'
1656 return True
1657 return True
1657 el = el.replace(b'$LOCALIP', b'*')
1658 el = el.replace(b'$LOCALIP', b'*')
1658 i, n = 0, len(el)
1659 i, n = 0, len(el)
1659 res = b''
1660 res = b''
1660 while i < n:
1661 while i < n:
1661 c = el[i:i + 1]
1662 c = el[i:i + 1]
1662 i += 1
1663 i += 1
1663 if c == b'\\' and i < n and el[i:i + 1] in b'*?\\/':
1664 if c == b'\\' and i < n and el[i:i + 1] in b'*?\\/':
1664 res += el[i - 1:i + 1]
1665 res += el[i - 1:i + 1]
1665 i += 1
1666 i += 1
1666 elif c == b'*':
1667 elif c == b'*':
1667 res += b'.*'
1668 res += b'.*'
1668 elif c == b'?':
1669 elif c == b'?':
1669 res += b'.'
1670 res += b'.'
1670 elif c == b'/' and os.altsep:
1671 elif c == b'/' and os.altsep:
1671 res += b'[/\\\\]'
1672 res += b'[/\\\\]'
1672 else:
1673 else:
1673 res += re.escape(c)
1674 res += re.escape(c)
1674 return TTest.rematch(res, l)
1675 return TTest.rematch(res, l)
1675
1676
1676 def linematch(self, el, l):
1677 def linematch(self, el, l):
1677 if el == l: # perfect match (fast)
1678 if el == l: # perfect match (fast)
1678 return True, True
1679 return True, True
1679 retry = False
1680 retry = False
1680 if el.endswith(b" (?)\n"):
1681 if el.endswith(b" (?)\n"):
1681 retry = "retry"
1682 retry = "retry"
1682 el = el[:-5] + b"\n"
1683 el = el[:-5] + b"\n"
1683 else:
1684 else:
1684 m = optline.match(el)
1685 m = optline.match(el)
1685 if m:
1686 if m:
1686 conditions = [c for c in m.group(2).split(b' ')]
1687 conditions = [c for c in m.group(2).split(b' ')]
1687
1688
1688 el = m.group(1) + b"\n"
1689 el = m.group(1) + b"\n"
1689 if not self._iftest(conditions):
1690 if not self._iftest(conditions):
1690 retry = "retry" # Not required by listed features
1691 retry = "retry" # Not required by listed features
1691
1692
1692 if el.endswith(b" (esc)\n"):
1693 if el.endswith(b" (esc)\n"):
1693 if PYTHON3:
1694 if PYTHON3:
1694 el = el[:-7].decode('unicode_escape') + '\n'
1695 el = el[:-7].decode('unicode_escape') + '\n'
1695 el = el.encode('utf-8')
1696 el = el.encode('utf-8')
1696 else:
1697 else:
1697 el = el[:-7].decode('string-escape') + '\n'
1698 el = el[:-7].decode('string-escape') + '\n'
1698 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
1699 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
1699 return True, True
1700 return True, True
1700 if el.endswith(b" (re)\n"):
1701 if el.endswith(b" (re)\n"):
1701 return (TTest.rematch(el[:-6], l) or retry), False
1702 return (TTest.rematch(el[:-6], l) or retry), False
1702 if el.endswith(b" (glob)\n"):
1703 if el.endswith(b" (glob)\n"):
1703 # ignore '(glob)' added to l by 'replacements'
1704 # ignore '(glob)' added to l by 'replacements'
1704 if l.endswith(b" (glob)\n"):
1705 if l.endswith(b" (glob)\n"):
1705 l = l[:-8] + b"\n"
1706 l = l[:-8] + b"\n"
1706 return (TTest.globmatch(el[:-8], l) or retry), False
1707 return (TTest.globmatch(el[:-8], l) or retry), False
1707 if os.altsep:
1708 if os.altsep:
1708 _l = l.replace(b'\\', b'/')
1709 _l = l.replace(b'\\', b'/')
1709 if el == _l or os.name == 'nt' and el[:-1] + b'\r\n' == _l:
1710 if el == _l or os.name == 'nt' and el[:-1] + b'\r\n' == _l:
1710 return True, True
1711 return True, True
1711 return retry, True
1712 return retry, True
1712
1713
1713 @staticmethod
1714 @staticmethod
1714 def parsehghaveoutput(lines):
1715 def parsehghaveoutput(lines):
1715 '''Parse hghave log lines.
1716 '''Parse hghave log lines.
1716
1717
1717 Return tuple of lists (missing, failed):
1718 Return tuple of lists (missing, failed):
1718 * the missing/unknown features
1719 * the missing/unknown features
1719 * the features for which existence check failed'''
1720 * the features for which existence check failed'''
1720 missing = []
1721 missing = []
1721 failed = []
1722 failed = []
1722 for line in lines:
1723 for line in lines:
1723 if line.startswith(TTest.SKIPPED_PREFIX):
1724 if line.startswith(TTest.SKIPPED_PREFIX):
1724 line = line.splitlines()[0]
1725 line = line.splitlines()[0]
1725 missing.append(line[len(TTest.SKIPPED_PREFIX):].decode('utf-8'))
1726 missing.append(line[len(TTest.SKIPPED_PREFIX):].decode('utf-8'))
1726 elif line.startswith(TTest.FAILED_PREFIX):
1727 elif line.startswith(TTest.FAILED_PREFIX):
1727 line = line.splitlines()[0]
1728 line = line.splitlines()[0]
1728 failed.append(line[len(TTest.FAILED_PREFIX):].decode('utf-8'))
1729 failed.append(line[len(TTest.FAILED_PREFIX):].decode('utf-8'))
1729
1730
1730 return missing, failed
1731 return missing, failed
1731
1732
1732 @staticmethod
1733 @staticmethod
1733 def _escapef(m):
1734 def _escapef(m):
1734 return TTest.ESCAPEMAP[m.group(0)]
1735 return TTest.ESCAPEMAP[m.group(0)]
1735
1736
1736 @staticmethod
1737 @staticmethod
1737 def _stringescape(s):
1738 def _stringescape(s):
1738 return TTest.ESCAPESUB(TTest._escapef, s)
1739 return TTest.ESCAPESUB(TTest._escapef, s)
1739
1740
1740 iolock = threading.RLock()
1741 iolock = threading.RLock()
1741 firstlock = threading.RLock()
1742 firstlock = threading.RLock()
1742 firsterror = False
1743 firsterror = False
1743
1744
1744 class TestResult(unittest._TextTestResult):
1745 class TestResult(unittest._TextTestResult):
1745 """Holds results when executing via unittest."""
1746 """Holds results when executing via unittest."""
1746 # Don't worry too much about accessing the non-public _TextTestResult.
1747 # Don't worry too much about accessing the non-public _TextTestResult.
1747 # It is relatively common in Python testing tools.
1748 # It is relatively common in Python testing tools.
1748 def __init__(self, options, *args, **kwargs):
1749 def __init__(self, options, *args, **kwargs):
1749 super(TestResult, self).__init__(*args, **kwargs)
1750 super(TestResult, self).__init__(*args, **kwargs)
1750
1751
1751 self._options = options
1752 self._options = options
1752
1753
1753 # unittest.TestResult didn't have skipped until 2.7. We need to
1754 # unittest.TestResult didn't have skipped until 2.7. We need to
1754 # polyfill it.
1755 # polyfill it.
1755 self.skipped = []
1756 self.skipped = []
1756
1757
1757 # We have a custom "ignored" result that isn't present in any Python
1758 # We have a custom "ignored" result that isn't present in any Python
1758 # unittest implementation. It is very similar to skipped. It may make
1759 # unittest implementation. It is very similar to skipped. It may make
1759 # sense to map it into skip some day.
1760 # sense to map it into skip some day.
1760 self.ignored = []
1761 self.ignored = []
1761
1762
1762 self.times = []
1763 self.times = []
1763 self._firststarttime = None
1764 self._firststarttime = None
1764 # Data stored for the benefit of generating xunit reports.
1765 # Data stored for the benefit of generating xunit reports.
1765 self.successes = []
1766 self.successes = []
1766 self.faildata = {}
1767 self.faildata = {}
1767
1768
1768 if options.color == 'auto':
1769 if options.color == 'auto':
1769 self.color = pygmentspresent and self.stream.isatty()
1770 self.color = pygmentspresent and self.stream.isatty()
1770 elif options.color == 'never':
1771 elif options.color == 'never':
1771 self.color = False
1772 self.color = False
1772 else: # 'always', for testing purposes
1773 else: # 'always', for testing purposes
1773 self.color = pygmentspresent
1774 self.color = pygmentspresent
1774
1775
1775 def onStart(self, test):
1776 def onStart(self, test):
1776 """ Can be overriden by custom TestResult
1777 """ Can be overriden by custom TestResult
1777 """
1778 """
1778
1779
1779 def onEnd(self):
1780 def onEnd(self):
1780 """ Can be overriden by custom TestResult
1781 """ Can be overriden by custom TestResult
1781 """
1782 """
1782
1783
1783 def addFailure(self, test, reason):
1784 def addFailure(self, test, reason):
1784 self.failures.append((test, reason))
1785 self.failures.append((test, reason))
1785
1786
1786 if self._options.first:
1787 if self._options.first:
1787 self.stop()
1788 self.stop()
1788 else:
1789 else:
1789 with iolock:
1790 with iolock:
1790 if reason == "timed out":
1791 if reason == "timed out":
1791 self.stream.write('t')
1792 self.stream.write('t')
1792 else:
1793 else:
1793 if not self._options.nodiff:
1794 if not self._options.nodiff:
1794 self.stream.write('\n')
1795 self.stream.write('\n')
1795 # Exclude the '\n' from highlighting to lex correctly
1796 # Exclude the '\n' from highlighting to lex correctly
1796 formatted = 'ERROR: %s output changed\n' % test
1797 formatted = 'ERROR: %s output changed\n' % test
1797 self.stream.write(highlightmsg(formatted, self.color))
1798 self.stream.write(highlightmsg(formatted, self.color))
1798 self.stream.write('!')
1799 self.stream.write('!')
1799
1800
1800 self.stream.flush()
1801 self.stream.flush()
1801
1802
1802 def addSuccess(self, test):
1803 def addSuccess(self, test):
1803 with iolock:
1804 with iolock:
1804 super(TestResult, self).addSuccess(test)
1805 super(TestResult, self).addSuccess(test)
1805 self.successes.append(test)
1806 self.successes.append(test)
1806
1807
1807 def addError(self, test, err):
1808 def addError(self, test, err):
1808 super(TestResult, self).addError(test, err)
1809 super(TestResult, self).addError(test, err)
1809 if self._options.first:
1810 if self._options.first:
1810 self.stop()
1811 self.stop()
1811
1812
1812 # Polyfill.
1813 # Polyfill.
1813 def addSkip(self, test, reason):
1814 def addSkip(self, test, reason):
1814 self.skipped.append((test, reason))
1815 self.skipped.append((test, reason))
1815 with iolock:
1816 with iolock:
1816 if self.showAll:
1817 if self.showAll:
1817 self.stream.writeln('skipped %s' % reason)
1818 self.stream.writeln('skipped %s' % reason)
1818 else:
1819 else:
1819 self.stream.write('s')
1820 self.stream.write('s')
1820 self.stream.flush()
1821 self.stream.flush()
1821
1822
1822 def addIgnore(self, test, reason):
1823 def addIgnore(self, test, reason):
1823 self.ignored.append((test, reason))
1824 self.ignored.append((test, reason))
1824 with iolock:
1825 with iolock:
1825 if self.showAll:
1826 if self.showAll:
1826 self.stream.writeln('ignored %s' % reason)
1827 self.stream.writeln('ignored %s' % reason)
1827 else:
1828 else:
1828 if reason not in ('not retesting', "doesn't match keyword"):
1829 if reason not in ('not retesting', "doesn't match keyword"):
1829 self.stream.write('i')
1830 self.stream.write('i')
1830 else:
1831 else:
1831 self.testsRun += 1
1832 self.testsRun += 1
1832 self.stream.flush()
1833 self.stream.flush()
1833
1834
1834 def addOutputMismatch(self, test, ret, got, expected):
1835 def addOutputMismatch(self, test, ret, got, expected):
1835 """Record a mismatch in test output for a particular test."""
1836 """Record a mismatch in test output for a particular test."""
1836 if self.shouldStop or firsterror:
1837 if self.shouldStop or firsterror:
1837 # don't print, some other test case already failed and
1838 # don't print, some other test case already failed and
1838 # printed, we're just stale and probably failed due to our
1839 # printed, we're just stale and probably failed due to our
1839 # temp dir getting cleaned up.
1840 # temp dir getting cleaned up.
1840 return
1841 return
1841
1842
1842 accepted = False
1843 accepted = False
1843 lines = []
1844 lines = []
1844
1845
1845 with iolock:
1846 with iolock:
1846 if self._options.nodiff:
1847 if self._options.nodiff:
1847 pass
1848 pass
1848 elif self._options.view:
1849 elif self._options.view:
1849 v = self._options.view
1850 v = self._options.view
1850 os.system(r"%s %s %s" %
1851 os.system(r"%s %s %s" %
1851 (v, _strpath(test.refpath), _strpath(test.errpath)))
1852 (v, _strpath(test.refpath), _strpath(test.errpath)))
1852 else:
1853 else:
1853 servefail, lines = getdiff(expected, got,
1854 servefail, lines = getdiff(expected, got,
1854 test.refpath, test.errpath)
1855 test.refpath, test.errpath)
1855 self.stream.write('\n')
1856 self.stream.write('\n')
1856 for line in lines:
1857 for line in lines:
1857 line = highlightdiff(line, self.color)
1858 line = highlightdiff(line, self.color)
1858 if PYTHON3:
1859 if PYTHON3:
1859 self.stream.flush()
1860 self.stream.flush()
1860 self.stream.buffer.write(line)
1861 self.stream.buffer.write(line)
1861 self.stream.buffer.flush()
1862 self.stream.buffer.flush()
1862 else:
1863 else:
1863 self.stream.write(line)
1864 self.stream.write(line)
1864 self.stream.flush()
1865 self.stream.flush()
1865
1866
1866 if servefail:
1867 if servefail:
1867 raise test.failureException(
1868 raise test.failureException(
1868 'server failed to start (HGPORT=%s)' % test._startport)
1869 'server failed to start (HGPORT=%s)' % test._startport)
1869
1870
1870 # handle interactive prompt without releasing iolock
1871 # handle interactive prompt without releasing iolock
1871 if self._options.interactive:
1872 if self._options.interactive:
1872 if test.readrefout() != expected:
1873 if test.readrefout() != expected:
1873 self.stream.write(
1874 self.stream.write(
1874 'Reference output has changed (run again to prompt '
1875 'Reference output has changed (run again to prompt '
1875 'changes)')
1876 'changes)')
1876 else:
1877 else:
1877 self.stream.write('Accept this change? [n] ')
1878 self.stream.write('Accept this change? [n] ')
1878 self.stream.flush()
1879 self.stream.flush()
1879 answer = sys.stdin.readline().strip()
1880 answer = sys.stdin.readline().strip()
1880 if answer.lower() in ('y', 'yes'):
1881 if answer.lower() in ('y', 'yes'):
1881 if test.path.endswith(b'.t'):
1882 if test.path.endswith(b'.t'):
1882 rename(test.errpath, test.path)
1883 rename(test.errpath, test.path)
1883 else:
1884 else:
1884 rename(test.errpath, '%s.out' % test.path)
1885 rename(test.errpath, '%s.out' % test.path)
1885 accepted = True
1886 accepted = True
1886 if not accepted:
1887 if not accepted:
1887 self.faildata[test.name] = b''.join(lines)
1888 self.faildata[test.name] = b''.join(lines)
1888
1889
1889 return accepted
1890 return accepted
1890
1891
1891 def startTest(self, test):
1892 def startTest(self, test):
1892 super(TestResult, self).startTest(test)
1893 super(TestResult, self).startTest(test)
1893
1894
1894 # os.times module computes the user time and system time spent by
1895 # os.times module computes the user time and system time spent by
1895 # child's processes along with real elapsed time taken by a process.
1896 # child's processes along with real elapsed time taken by a process.
1896 # This module has one limitation. It can only work for Linux user
1897 # This module has one limitation. It can only work for Linux user
1897 # and not for Windows.
1898 # and not for Windows.
1898 test.started = os.times()
1899 test.started = os.times()
1899 if self._firststarttime is None: # thread racy but irrelevant
1900 if self._firststarttime is None: # thread racy but irrelevant
1900 self._firststarttime = test.started[4]
1901 self._firststarttime = test.started[4]
1901
1902
1902 def stopTest(self, test, interrupted=False):
1903 def stopTest(self, test, interrupted=False):
1903 super(TestResult, self).stopTest(test)
1904 super(TestResult, self).stopTest(test)
1904
1905
1905 test.stopped = os.times()
1906 test.stopped = os.times()
1906
1907
1907 starttime = test.started
1908 starttime = test.started
1908 endtime = test.stopped
1909 endtime = test.stopped
1909 origin = self._firststarttime
1910 origin = self._firststarttime
1910 self.times.append((test.name,
1911 self.times.append((test.name,
1911 endtime[2] - starttime[2], # user space CPU time
1912 endtime[2] - starttime[2], # user space CPU time
1912 endtime[3] - starttime[3], # sys space CPU time
1913 endtime[3] - starttime[3], # sys space CPU time
1913 endtime[4] - starttime[4], # real time
1914 endtime[4] - starttime[4], # real time
1914 starttime[4] - origin, # start date in run context
1915 starttime[4] - origin, # start date in run context
1915 endtime[4] - origin, # end date in run context
1916 endtime[4] - origin, # end date in run context
1916 ))
1917 ))
1917
1918
1918 if interrupted:
1919 if interrupted:
1919 with iolock:
1920 with iolock:
1920 self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
1921 self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
1921 test.name, self.times[-1][3]))
1922 test.name, self.times[-1][3]))
1922
1923
1923 def getTestResult():
1924 def getTestResult():
1924 """
1925 """
1925 Returns the relevant test result
1926 Returns the relevant test result
1926 """
1927 """
1927 if "CUSTOM_TEST_RESULT" in os.environ:
1928 if "CUSTOM_TEST_RESULT" in os.environ:
1928 testresultmodule = __import__(os.environ["CUSTOM_TEST_RESULT"])
1929 testresultmodule = __import__(os.environ["CUSTOM_TEST_RESULT"])
1929 return testresultmodule.TestResult
1930 return testresultmodule.TestResult
1930 else:
1931 else:
1931 return TestResult
1932 return TestResult
1932
1933
1933 class TestSuite(unittest.TestSuite):
1934 class TestSuite(unittest.TestSuite):
1934 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
1935 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
1935
1936
1936 def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
1937 def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
1937 retest=False, keywords=None, loop=False, runs_per_test=1,
1938 retest=False, keywords=None, loop=False, runs_per_test=1,
1938 loadtest=None, showchannels=False,
1939 loadtest=None, showchannels=False,
1939 *args, **kwargs):
1940 *args, **kwargs):
1940 """Create a new instance that can run tests with a configuration.
1941 """Create a new instance that can run tests with a configuration.
1941
1942
1942 testdir specifies the directory where tests are executed from. This
1943 testdir specifies the directory where tests are executed from. This
1943 is typically the ``tests`` directory from Mercurial's source
1944 is typically the ``tests`` directory from Mercurial's source
1944 repository.
1945 repository.
1945
1946
1946 jobs specifies the number of jobs to run concurrently. Each test
1947 jobs specifies the number of jobs to run concurrently. Each test
1947 executes on its own thread. Tests actually spawn new processes, so
1948 executes on its own thread. Tests actually spawn new processes, so
1948 state mutation should not be an issue.
1949 state mutation should not be an issue.
1949
1950
1950 If there is only one job, it will use the main thread.
1951 If there is only one job, it will use the main thread.
1951
1952
1952 whitelist and blacklist denote tests that have been whitelisted and
1953 whitelist and blacklist denote tests that have been whitelisted and
1953 blacklisted, respectively. These arguments don't belong in TestSuite.
1954 blacklisted, respectively. These arguments don't belong in TestSuite.
1954 Instead, whitelist and blacklist should be handled by the thing that
1955 Instead, whitelist and blacklist should be handled by the thing that
1955 populates the TestSuite with tests. They are present to preserve
1956 populates the TestSuite with tests. They are present to preserve
1956 backwards compatible behavior which reports skipped tests as part
1957 backwards compatible behavior which reports skipped tests as part
1957 of the results.
1958 of the results.
1958
1959
1959 retest denotes whether to retest failed tests. This arguably belongs
1960 retest denotes whether to retest failed tests. This arguably belongs
1960 outside of TestSuite.
1961 outside of TestSuite.
1961
1962
1962 keywords denotes key words that will be used to filter which tests
1963 keywords denotes key words that will be used to filter which tests
1963 to execute. This arguably belongs outside of TestSuite.
1964 to execute. This arguably belongs outside of TestSuite.
1964
1965
1965 loop denotes whether to loop over tests forever.
1966 loop denotes whether to loop over tests forever.
1966 """
1967 """
1967 super(TestSuite, self).__init__(*args, **kwargs)
1968 super(TestSuite, self).__init__(*args, **kwargs)
1968
1969
1969 self._jobs = jobs
1970 self._jobs = jobs
1970 self._whitelist = whitelist
1971 self._whitelist = whitelist
1971 self._blacklist = blacklist
1972 self._blacklist = blacklist
1972 self._retest = retest
1973 self._retest = retest
1973 self._keywords = keywords
1974 self._keywords = keywords
1974 self._loop = loop
1975 self._loop = loop
1975 self._runs_per_test = runs_per_test
1976 self._runs_per_test = runs_per_test
1976 self._loadtest = loadtest
1977 self._loadtest = loadtest
1977 self._showchannels = showchannels
1978 self._showchannels = showchannels
1978
1979
1979 def run(self, result):
1980 def run(self, result):
1980 # We have a number of filters that need to be applied. We do this
1981 # We have a number of filters that need to be applied. We do this
1981 # here instead of inside Test because it makes the running logic for
1982 # here instead of inside Test because it makes the running logic for
1982 # Test simpler.
1983 # Test simpler.
1983 tests = []
1984 tests = []
1984 num_tests = [0]
1985 num_tests = [0]
1985 for test in self._tests:
1986 for test in self._tests:
1986 def get():
1987 def get():
1987 num_tests[0] += 1
1988 num_tests[0] += 1
1988 if getattr(test, 'should_reload', False):
1989 if getattr(test, 'should_reload', False):
1989 return self._loadtest(test, num_tests[0])
1990 return self._loadtest(test, num_tests[0])
1990 return test
1991 return test
1991 if not os.path.exists(test.path):
1992 if not os.path.exists(test.path):
1992 result.addSkip(test, "Doesn't exist")
1993 result.addSkip(test, "Doesn't exist")
1993 continue
1994 continue
1994
1995
1995 if not (self._whitelist and test.bname in self._whitelist):
1996 if not (self._whitelist and test.bname in self._whitelist):
1996 if self._blacklist and test.bname in self._blacklist:
1997 if self._blacklist and test.bname in self._blacklist:
1997 result.addSkip(test, 'blacklisted')
1998 result.addSkip(test, 'blacklisted')
1998 continue
1999 continue
1999
2000
2000 if self._retest and not os.path.exists(test.errpath):
2001 if self._retest and not os.path.exists(test.errpath):
2001 result.addIgnore(test, 'not retesting')
2002 result.addIgnore(test, 'not retesting')
2002 continue
2003 continue
2003
2004
2004 if self._keywords:
2005 if self._keywords:
2005 with open(test.path, 'rb') as f:
2006 with open(test.path, 'rb') as f:
2006 t = f.read().lower() + test.bname.lower()
2007 t = f.read().lower() + test.bname.lower()
2007 ignored = False
2008 ignored = False
2008 for k in self._keywords.lower().split():
2009 for k in self._keywords.lower().split():
2009 if k not in t:
2010 if k not in t:
2010 result.addIgnore(test, "doesn't match keyword")
2011 result.addIgnore(test, "doesn't match keyword")
2011 ignored = True
2012 ignored = True
2012 break
2013 break
2013
2014
2014 if ignored:
2015 if ignored:
2015 continue
2016 continue
2016 for _ in xrange(self._runs_per_test):
2017 for _ in xrange(self._runs_per_test):
2017 tests.append(get())
2018 tests.append(get())
2018
2019
2019 runtests = list(tests)
2020 runtests = list(tests)
2020 done = queue.Queue()
2021 done = queue.Queue()
2021 running = 0
2022 running = 0
2022
2023
2023 channels = [""] * self._jobs
2024 channels = [""] * self._jobs
2024
2025
2025 def job(test, result):
2026 def job(test, result):
2026 for n, v in enumerate(channels):
2027 for n, v in enumerate(channels):
2027 if not v:
2028 if not v:
2028 channel = n
2029 channel = n
2029 break
2030 break
2030 else:
2031 else:
2031 raise ValueError('Could not find output channel')
2032 raise ValueError('Could not find output channel')
2032 channels[channel] = "=" + test.name[5:].split(".")[0]
2033 channels[channel] = "=" + test.name[5:].split(".")[0]
2033 try:
2034 try:
2034 test(result)
2035 test(result)
2035 done.put(None)
2036 done.put(None)
2036 except KeyboardInterrupt:
2037 except KeyboardInterrupt:
2037 pass
2038 pass
2038 except: # re-raises
2039 except: # re-raises
2039 done.put(('!', test, 'run-test raised an error, see traceback'))
2040 done.put(('!', test, 'run-test raised an error, see traceback'))
2040 raise
2041 raise
2041 finally:
2042 finally:
2042 try:
2043 try:
2043 channels[channel] = ''
2044 channels[channel] = ''
2044 except IndexError:
2045 except IndexError:
2045 pass
2046 pass
2046
2047
2047 def stat():
2048 def stat():
2048 count = 0
2049 count = 0
2049 while channels:
2050 while channels:
2050 d = '\n%03s ' % count
2051 d = '\n%03s ' % count
2051 for n, v in enumerate(channels):
2052 for n, v in enumerate(channels):
2052 if v:
2053 if v:
2053 d += v[0]
2054 d += v[0]
2054 channels[n] = v[1:] or '.'
2055 channels[n] = v[1:] or '.'
2055 else:
2056 else:
2056 d += ' '
2057 d += ' '
2057 d += ' '
2058 d += ' '
2058 with iolock:
2059 with iolock:
2059 sys.stdout.write(d + ' ')
2060 sys.stdout.write(d + ' ')
2060 sys.stdout.flush()
2061 sys.stdout.flush()
2061 for x in xrange(10):
2062 for x in xrange(10):
2062 if channels:
2063 if channels:
2063 time.sleep(.1)
2064 time.sleep(.1)
2064 count += 1
2065 count += 1
2065
2066
2066 stoppedearly = False
2067 stoppedearly = False
2067
2068
2068 if self._showchannels:
2069 if self._showchannels:
2069 statthread = threading.Thread(target=stat, name="stat")
2070 statthread = threading.Thread(target=stat, name="stat")
2070 statthread.start()
2071 statthread.start()
2071
2072
2072 try:
2073 try:
2073 while tests or running:
2074 while tests or running:
2074 if not done.empty() or running == self._jobs or not tests:
2075 if not done.empty() or running == self._jobs or not tests:
2075 try:
2076 try:
2076 done.get(True, 1)
2077 done.get(True, 1)
2077 running -= 1
2078 running -= 1
2078 if result and result.shouldStop:
2079 if result and result.shouldStop:
2079 stoppedearly = True
2080 stoppedearly = True
2080 break
2081 break
2081 except queue.Empty:
2082 except queue.Empty:
2082 continue
2083 continue
2083 if tests and not running == self._jobs:
2084 if tests and not running == self._jobs:
2084 test = tests.pop(0)
2085 test = tests.pop(0)
2085 if self._loop:
2086 if self._loop:
2086 if getattr(test, 'should_reload', False):
2087 if getattr(test, 'should_reload', False):
2087 num_tests[0] += 1
2088 num_tests[0] += 1
2088 tests.append(
2089 tests.append(
2089 self._loadtest(test, num_tests[0]))
2090 self._loadtest(test, num_tests[0]))
2090 else:
2091 else:
2091 tests.append(test)
2092 tests.append(test)
2092 if self._jobs == 1:
2093 if self._jobs == 1:
2093 job(test, result)
2094 job(test, result)
2094 else:
2095 else:
2095 t = threading.Thread(target=job, name=test.name,
2096 t = threading.Thread(target=job, name=test.name,
2096 args=(test, result))
2097 args=(test, result))
2097 t.start()
2098 t.start()
2098 running += 1
2099 running += 1
2099
2100
2100 # If we stop early we still need to wait on started tests to
2101 # If we stop early we still need to wait on started tests to
2101 # finish. Otherwise, there is a race between the test completing
2102 # finish. Otherwise, there is a race between the test completing
2102 # and the test's cleanup code running. This could result in the
2103 # and the test's cleanup code running. This could result in the
2103 # test reporting incorrect.
2104 # test reporting incorrect.
2104 if stoppedearly:
2105 if stoppedearly:
2105 while running:
2106 while running:
2106 try:
2107 try:
2107 done.get(True, 1)
2108 done.get(True, 1)
2108 running -= 1
2109 running -= 1
2109 except queue.Empty:
2110 except queue.Empty:
2110 continue
2111 continue
2111 except KeyboardInterrupt:
2112 except KeyboardInterrupt:
2112 for test in runtests:
2113 for test in runtests:
2113 test.abort()
2114 test.abort()
2114
2115
2115 channels = []
2116 channels = []
2116
2117
2117 return result
2118 return result
2118
2119
2119 # Save the most recent 5 wall-clock runtimes of each test to a
2120 # Save the most recent 5 wall-clock runtimes of each test to a
2120 # human-readable text file named .testtimes. Tests are sorted
2121 # human-readable text file named .testtimes. Tests are sorted
2121 # alphabetically, while times for each test are listed from oldest to
2122 # alphabetically, while times for each test are listed from oldest to
2122 # newest.
2123 # newest.
2123
2124
2124 def loadtimes(outputdir):
2125 def loadtimes(outputdir):
2125 times = []
2126 times = []
2126 try:
2127 try:
2127 with open(os.path.join(outputdir, b'.testtimes')) as fp:
2128 with open(os.path.join(outputdir, b'.testtimes')) as fp:
2128 for line in fp:
2129 for line in fp:
2129 m = re.match('(.*?) ([0-9. ]+)', line)
2130 m = re.match('(.*?) ([0-9. ]+)', line)
2130 times.append((m.group(1),
2131 times.append((m.group(1),
2131 [float(t) for t in m.group(2).split()]))
2132 [float(t) for t in m.group(2).split()]))
2132 except IOError as err:
2133 except IOError as err:
2133 if err.errno != errno.ENOENT:
2134 if err.errno != errno.ENOENT:
2134 raise
2135 raise
2135 return times
2136 return times
2136
2137
2137 def savetimes(outputdir, result):
2138 def savetimes(outputdir, result):
2138 saved = dict(loadtimes(outputdir))
2139 saved = dict(loadtimes(outputdir))
2139 maxruns = 5
2140 maxruns = 5
2140 skipped = set([str(t[0]) for t in result.skipped])
2141 skipped = set([str(t[0]) for t in result.skipped])
2141 for tdata in result.times:
2142 for tdata in result.times:
2142 test, real = tdata[0], tdata[3]
2143 test, real = tdata[0], tdata[3]
2143 if test not in skipped:
2144 if test not in skipped:
2144 ts = saved.setdefault(test, [])
2145 ts = saved.setdefault(test, [])
2145 ts.append(real)
2146 ts.append(real)
2146 ts[:] = ts[-maxruns:]
2147 ts[:] = ts[-maxruns:]
2147
2148
2148 fd, tmpname = tempfile.mkstemp(prefix=b'.testtimes',
2149 fd, tmpname = tempfile.mkstemp(prefix=b'.testtimes',
2149 dir=outputdir, text=True)
2150 dir=outputdir, text=True)
2150 with os.fdopen(fd, 'w') as fp:
2151 with os.fdopen(fd, 'w') as fp:
2151 for name, ts in sorted(saved.items()):
2152 for name, ts in sorted(saved.items()):
2152 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
2153 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
2153 timepath = os.path.join(outputdir, b'.testtimes')
2154 timepath = os.path.join(outputdir, b'.testtimes')
2154 try:
2155 try:
2155 os.unlink(timepath)
2156 os.unlink(timepath)
2156 except OSError:
2157 except OSError:
2157 pass
2158 pass
2158 try:
2159 try:
2159 os.rename(tmpname, timepath)
2160 os.rename(tmpname, timepath)
2160 except OSError:
2161 except OSError:
2161 pass
2162 pass
2162
2163
2163 class TextTestRunner(unittest.TextTestRunner):
2164 class TextTestRunner(unittest.TextTestRunner):
2164 """Custom unittest test runner that uses appropriate settings."""
2165 """Custom unittest test runner that uses appropriate settings."""
2165
2166
2166 def __init__(self, runner, *args, **kwargs):
2167 def __init__(self, runner, *args, **kwargs):
2167 super(TextTestRunner, self).__init__(*args, **kwargs)
2168 super(TextTestRunner, self).__init__(*args, **kwargs)
2168
2169
2169 self._runner = runner
2170 self._runner = runner
2170
2171
2171 self._result = getTestResult()(self._runner.options, self.stream,
2172 self._result = getTestResult()(self._runner.options, self.stream,
2172 self.descriptions, self.verbosity)
2173 self.descriptions, self.verbosity)
2173
2174
2174 def listtests(self, test):
2175 def listtests(self, test):
2175 test = sorted(test, key=lambda t: t.name)
2176 test = sorted(test, key=lambda t: t.name)
2176
2177
2177 self._result.onStart(test)
2178 self._result.onStart(test)
2178
2179
2179 for t in test:
2180 for t in test:
2180 print(t.name)
2181 print(t.name)
2181 self._result.addSuccess(t)
2182 self._result.addSuccess(t)
2182
2183
2183 if self._runner.options.xunit:
2184 if self._runner.options.xunit:
2184 with open(self._runner.options.xunit, "wb") as xuf:
2185 with open(self._runner.options.xunit, "wb") as xuf:
2185 self._writexunit(self._result, xuf)
2186 self._writexunit(self._result, xuf)
2186
2187
2187 if self._runner.options.json:
2188 if self._runner.options.json:
2188 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2189 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2189 with open(jsonpath, 'w') as fp:
2190 with open(jsonpath, 'w') as fp:
2190 self._writejson(self._result, fp)
2191 self._writejson(self._result, fp)
2191
2192
2192 return self._result
2193 return self._result
2193
2194
2194 def run(self, test):
2195 def run(self, test):
2195 self._result.onStart(test)
2196 self._result.onStart(test)
2196 test(self._result)
2197 test(self._result)
2197
2198
2198 failed = len(self._result.failures)
2199 failed = len(self._result.failures)
2199 skipped = len(self._result.skipped)
2200 skipped = len(self._result.skipped)
2200 ignored = len(self._result.ignored)
2201 ignored = len(self._result.ignored)
2201
2202
2202 with iolock:
2203 with iolock:
2203 self.stream.writeln('')
2204 self.stream.writeln('')
2204
2205
2205 if not self._runner.options.noskips:
2206 if not self._runner.options.noskips:
2206 for test, msg in self._result.skipped:
2207 for test, msg in self._result.skipped:
2207 formatted = 'Skipped %s: %s\n' % (test.name, msg)
2208 formatted = 'Skipped %s: %s\n' % (test.name, msg)
2208 msg = highlightmsg(formatted, self._result.color)
2209 msg = highlightmsg(formatted, self._result.color)
2209 self.stream.write(msg)
2210 self.stream.write(msg)
2210 for test, msg in self._result.failures:
2211 for test, msg in self._result.failures:
2211 formatted = 'Failed %s: %s\n' % (test.name, msg)
2212 formatted = 'Failed %s: %s\n' % (test.name, msg)
2212 self.stream.write(highlightmsg(formatted, self._result.color))
2213 self.stream.write(highlightmsg(formatted, self._result.color))
2213 for test, msg in self._result.errors:
2214 for test, msg in self._result.errors:
2214 self.stream.writeln('Errored %s: %s' % (test.name, msg))
2215 self.stream.writeln('Errored %s: %s' % (test.name, msg))
2215
2216
2216 if self._runner.options.xunit:
2217 if self._runner.options.xunit:
2217 with open(self._runner.options.xunit, "wb") as xuf:
2218 with open(self._runner.options.xunit, "wb") as xuf:
2218 self._writexunit(self._result, xuf)
2219 self._writexunit(self._result, xuf)
2219
2220
2220 if self._runner.options.json:
2221 if self._runner.options.json:
2221 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2222 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2222 with open(jsonpath, 'w') as fp:
2223 with open(jsonpath, 'w') as fp:
2223 self._writejson(self._result, fp)
2224 self._writejson(self._result, fp)
2224
2225
2225 self._runner._checkhglib('Tested')
2226 self._runner._checkhglib('Tested')
2226
2227
2227 savetimes(self._runner._outputdir, self._result)
2228 savetimes(self._runner._outputdir, self._result)
2228
2229
2229 if failed and self._runner.options.known_good_rev:
2230 if failed and self._runner.options.known_good_rev:
2230 self._bisecttests(t for t, m in self._result.failures)
2231 self._bisecttests(t for t, m in self._result.failures)
2231 self.stream.writeln(
2232 self.stream.writeln(
2232 '# Ran %d tests, %d skipped, %d failed.'
2233 '# Ran %d tests, %d skipped, %d failed.'
2233 % (self._result.testsRun, skipped + ignored, failed))
2234 % (self._result.testsRun, skipped + ignored, failed))
2234 if failed:
2235 if failed:
2235 self.stream.writeln('python hash seed: %s' %
2236 self.stream.writeln('python hash seed: %s' %
2236 os.environ['PYTHONHASHSEED'])
2237 os.environ['PYTHONHASHSEED'])
2237 if self._runner.options.time:
2238 if self._runner.options.time:
2238 self.printtimes(self._result.times)
2239 self.printtimes(self._result.times)
2239
2240
2240 if self._runner.options.exceptions:
2241 if self._runner.options.exceptions:
2241 exceptions = aggregateexceptions(
2242 exceptions = aggregateexceptions(
2242 os.path.join(self._runner._outputdir, b'exceptions'))
2243 os.path.join(self._runner._outputdir, b'exceptions'))
2243
2244
2244 self.stream.writeln('Exceptions Report:')
2245 self.stream.writeln('Exceptions Report:')
2245 self.stream.writeln('%d total from %d frames' %
2246 self.stream.writeln('%d total from %d frames' %
2246 (exceptions['total'],
2247 (exceptions['total'],
2247 len(exceptions['exceptioncounts'])))
2248 len(exceptions['exceptioncounts'])))
2248 combined = exceptions['combined']
2249 combined = exceptions['combined']
2249 for key in sorted(combined, key=combined.get, reverse=True):
2250 for key in sorted(combined, key=combined.get, reverse=True):
2250 frame, line, exc = key
2251 frame, line, exc = key
2251 totalcount, testcount, leastcount, leasttest = combined[key]
2252 totalcount, testcount, leastcount, leasttest = combined[key]
2252
2253
2253 self.stream.writeln('%d (%d tests)\t%s: %s (%s - %d total)'
2254 self.stream.writeln('%d (%d tests)\t%s: %s (%s - %d total)'
2254 % (totalcount,
2255 % (totalcount,
2255 testcount,
2256 testcount,
2256 frame, exc,
2257 frame, exc,
2257 leasttest, leastcount))
2258 leasttest, leastcount))
2258
2259
2259 self.stream.flush()
2260 self.stream.flush()
2260
2261
2261 return self._result
2262 return self._result
2262
2263
2263 def _bisecttests(self, tests):
2264 def _bisecttests(self, tests):
2264 bisectcmd = ['hg', 'bisect']
2265 bisectcmd = ['hg', 'bisect']
2265 bisectrepo = self._runner.options.bisect_repo
2266 bisectrepo = self._runner.options.bisect_repo
2266 if bisectrepo:
2267 if bisectrepo:
2267 bisectcmd.extend(['-R', os.path.abspath(bisectrepo)])
2268 bisectcmd.extend(['-R', os.path.abspath(bisectrepo)])
2268 def pread(args):
2269 def pread(args):
2269 env = os.environ.copy()
2270 env = os.environ.copy()
2270 env['HGPLAIN'] = '1'
2271 env['HGPLAIN'] = '1'
2271 p = subprocess.Popen(args, stderr=subprocess.STDOUT,
2272 p = subprocess.Popen(args, stderr=subprocess.STDOUT,
2272 stdout=subprocess.PIPE, env=env)
2273 stdout=subprocess.PIPE, env=env)
2273 data = p.stdout.read()
2274 data = p.stdout.read()
2274 p.wait()
2275 p.wait()
2275 return data
2276 return data
2276 for test in tests:
2277 for test in tests:
2277 pread(bisectcmd + ['--reset']),
2278 pread(bisectcmd + ['--reset']),
2278 pread(bisectcmd + ['--bad', '.'])
2279 pread(bisectcmd + ['--bad', '.'])
2279 pread(bisectcmd + ['--good', self._runner.options.known_good_rev])
2280 pread(bisectcmd + ['--good', self._runner.options.known_good_rev])
2280 # TODO: we probably need to forward more options
2281 # TODO: we probably need to forward more options
2281 # that alter hg's behavior inside the tests.
2282 # that alter hg's behavior inside the tests.
2282 opts = ''
2283 opts = ''
2283 withhg = self._runner.options.with_hg
2284 withhg = self._runner.options.with_hg
2284 if withhg:
2285 if withhg:
2285 opts += ' --with-hg=%s ' % shellquote(_strpath(withhg))
2286 opts += ' --with-hg=%s ' % shellquote(_strpath(withhg))
2286 rtc = '%s %s %s %s' % (sys.executable, sys.argv[0], opts,
2287 rtc = '%s %s %s %s' % (sys.executable, sys.argv[0], opts,
2287 test)
2288 test)
2288 data = pread(bisectcmd + ['--command', rtc])
2289 data = pread(bisectcmd + ['--command', rtc])
2289 m = re.search(
2290 m = re.search(
2290 (br'\nThe first (?P<goodbad>bad|good) revision '
2291 (br'\nThe first (?P<goodbad>bad|good) revision '
2291 br'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
2292 br'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
2292 br'summary: +(?P<summary>[^\n]+)\n'),
2293 br'summary: +(?P<summary>[^\n]+)\n'),
2293 data, (re.MULTILINE | re.DOTALL))
2294 data, (re.MULTILINE | re.DOTALL))
2294 if m is None:
2295 if m is None:
2295 self.stream.writeln(
2296 self.stream.writeln(
2296 'Failed to identify failure point for %s' % test)
2297 'Failed to identify failure point for %s' % test)
2297 continue
2298 continue
2298 dat = m.groupdict()
2299 dat = m.groupdict()
2299 verb = 'broken' if dat['goodbad'] == b'bad' else 'fixed'
2300 verb = 'broken' if dat['goodbad'] == b'bad' else 'fixed'
2300 self.stream.writeln(
2301 self.stream.writeln(
2301 '%s %s by %s (%s)' % (
2302 '%s %s by %s (%s)' % (
2302 test, verb, dat['node'].decode('ascii'),
2303 test, verb, dat['node'].decode('ascii'),
2303 dat['summary'].decode('utf8', 'ignore')))
2304 dat['summary'].decode('utf8', 'ignore')))
2304
2305
2305 def printtimes(self, times):
2306 def printtimes(self, times):
2306 # iolock held by run
2307 # iolock held by run
2307 self.stream.writeln('# Producing time report')
2308 self.stream.writeln('# Producing time report')
2308 times.sort(key=lambda t: (t[3]))
2309 times.sort(key=lambda t: (t[3]))
2309 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
2310 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
2310 self.stream.writeln('%-7s %-7s %-7s %-7s %-7s %s' %
2311 self.stream.writeln('%-7s %-7s %-7s %-7s %-7s %s' %
2311 ('start', 'end', 'cuser', 'csys', 'real', 'Test'))
2312 ('start', 'end', 'cuser', 'csys', 'real', 'Test'))
2312 for tdata in times:
2313 for tdata in times:
2313 test = tdata[0]
2314 test = tdata[0]
2314 cuser, csys, real, start, end = tdata[1:6]
2315 cuser, csys, real, start, end = tdata[1:6]
2315 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
2316 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
2316
2317
2317 @staticmethod
2318 @staticmethod
2318 def _writexunit(result, outf):
2319 def _writexunit(result, outf):
2319 # See http://llg.cubic.org/docs/junit/ for a reference.
2320 # See http://llg.cubic.org/docs/junit/ for a reference.
2320 timesd = dict((t[0], t[3]) for t in result.times)
2321 timesd = dict((t[0], t[3]) for t in result.times)
2321 doc = minidom.Document()
2322 doc = minidom.Document()
2322 s = doc.createElement('testsuite')
2323 s = doc.createElement('testsuite')
2323 s.setAttribute('name', 'run-tests')
2324 s.setAttribute('name', 'run-tests')
2324 s.setAttribute('tests', str(result.testsRun))
2325 s.setAttribute('tests', str(result.testsRun))
2325 s.setAttribute('errors', "0") # TODO
2326 s.setAttribute('errors', "0") # TODO
2326 s.setAttribute('failures', str(len(result.failures)))
2327 s.setAttribute('failures', str(len(result.failures)))
2327 s.setAttribute('skipped', str(len(result.skipped) +
2328 s.setAttribute('skipped', str(len(result.skipped) +
2328 len(result.ignored)))
2329 len(result.ignored)))
2329 doc.appendChild(s)
2330 doc.appendChild(s)
2330 for tc in result.successes:
2331 for tc in result.successes:
2331 t = doc.createElement('testcase')
2332 t = doc.createElement('testcase')
2332 t.setAttribute('name', tc.name)
2333 t.setAttribute('name', tc.name)
2333 tctime = timesd.get(tc.name)
2334 tctime = timesd.get(tc.name)
2334 if tctime is not None:
2335 if tctime is not None:
2335 t.setAttribute('time', '%.3f' % tctime)
2336 t.setAttribute('time', '%.3f' % tctime)
2336 s.appendChild(t)
2337 s.appendChild(t)
2337 for tc, err in sorted(result.faildata.items()):
2338 for tc, err in sorted(result.faildata.items()):
2338 t = doc.createElement('testcase')
2339 t = doc.createElement('testcase')
2339 t.setAttribute('name', tc)
2340 t.setAttribute('name', tc)
2340 tctime = timesd.get(tc)
2341 tctime = timesd.get(tc)
2341 if tctime is not None:
2342 if tctime is not None:
2342 t.setAttribute('time', '%.3f' % tctime)
2343 t.setAttribute('time', '%.3f' % tctime)
2343 # createCDATASection expects a unicode or it will
2344 # createCDATASection expects a unicode or it will
2344 # convert using default conversion rules, which will
2345 # convert using default conversion rules, which will
2345 # fail if string isn't ASCII.
2346 # fail if string isn't ASCII.
2346 err = cdatasafe(err).decode('utf-8', 'replace')
2347 err = cdatasafe(err).decode('utf-8', 'replace')
2347 cd = doc.createCDATASection(err)
2348 cd = doc.createCDATASection(err)
2348 # Use 'failure' here instead of 'error' to match errors = 0,
2349 # Use 'failure' here instead of 'error' to match errors = 0,
2349 # failures = len(result.failures) in the testsuite element.
2350 # failures = len(result.failures) in the testsuite element.
2350 failelem = doc.createElement('failure')
2351 failelem = doc.createElement('failure')
2351 failelem.setAttribute('message', 'output changed')
2352 failelem.setAttribute('message', 'output changed')
2352 failelem.setAttribute('type', 'output-mismatch')
2353 failelem.setAttribute('type', 'output-mismatch')
2353 failelem.appendChild(cd)
2354 failelem.appendChild(cd)
2354 t.appendChild(failelem)
2355 t.appendChild(failelem)
2355 s.appendChild(t)
2356 s.appendChild(t)
2356 for tc, message in result.skipped:
2357 for tc, message in result.skipped:
2357 # According to the schema, 'skipped' has no attributes. So store
2358 # According to the schema, 'skipped' has no attributes. So store
2358 # the skip message as a text node instead.
2359 # the skip message as a text node instead.
2359 t = doc.createElement('testcase')
2360 t = doc.createElement('testcase')
2360 t.setAttribute('name', tc.name)
2361 t.setAttribute('name', tc.name)
2361 binmessage = message.encode('utf-8')
2362 binmessage = message.encode('utf-8')
2362 message = cdatasafe(binmessage).decode('utf-8', 'replace')
2363 message = cdatasafe(binmessage).decode('utf-8', 'replace')
2363 cd = doc.createCDATASection(message)
2364 cd = doc.createCDATASection(message)
2364 skipelem = doc.createElement('skipped')
2365 skipelem = doc.createElement('skipped')
2365 skipelem.appendChild(cd)
2366 skipelem.appendChild(cd)
2366 t.appendChild(skipelem)
2367 t.appendChild(skipelem)
2367 s.appendChild(t)
2368 s.appendChild(t)
2368 outf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
2369 outf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
2369
2370
2370 @staticmethod
2371 @staticmethod
2371 def _writejson(result, outf):
2372 def _writejson(result, outf):
2372 timesd = {}
2373 timesd = {}
2373 for tdata in result.times:
2374 for tdata in result.times:
2374 test = tdata[0]
2375 test = tdata[0]
2375 timesd[test] = tdata[1:]
2376 timesd[test] = tdata[1:]
2376
2377
2377 outcome = {}
2378 outcome = {}
2378 groups = [('success', ((tc, None)
2379 groups = [('success', ((tc, None)
2379 for tc in result.successes)),
2380 for tc in result.successes)),
2380 ('failure', result.failures),
2381 ('failure', result.failures),
2381 ('skip', result.skipped)]
2382 ('skip', result.skipped)]
2382 for res, testcases in groups:
2383 for res, testcases in groups:
2383 for tc, __ in testcases:
2384 for tc, __ in testcases:
2384 if tc.name in timesd:
2385 if tc.name in timesd:
2385 diff = result.faildata.get(tc.name, b'')
2386 diff = result.faildata.get(tc.name, b'')
2386 try:
2387 try:
2387 diff = diff.decode('unicode_escape')
2388 diff = diff.decode('unicode_escape')
2388 except UnicodeDecodeError as e:
2389 except UnicodeDecodeError as e:
2389 diff = '%r decoding diff, sorry' % e
2390 diff = '%r decoding diff, sorry' % e
2390 tres = {'result': res,
2391 tres = {'result': res,
2391 'time': ('%0.3f' % timesd[tc.name][2]),
2392 'time': ('%0.3f' % timesd[tc.name][2]),
2392 'cuser': ('%0.3f' % timesd[tc.name][0]),
2393 'cuser': ('%0.3f' % timesd[tc.name][0]),
2393 'csys': ('%0.3f' % timesd[tc.name][1]),
2394 'csys': ('%0.3f' % timesd[tc.name][1]),
2394 'start': ('%0.3f' % timesd[tc.name][3]),
2395 'start': ('%0.3f' % timesd[tc.name][3]),
2395 'end': ('%0.3f' % timesd[tc.name][4]),
2396 'end': ('%0.3f' % timesd[tc.name][4]),
2396 'diff': diff,
2397 'diff': diff,
2397 }
2398 }
2398 else:
2399 else:
2399 # blacklisted test
2400 # blacklisted test
2400 tres = {'result': res}
2401 tres = {'result': res}
2401
2402
2402 outcome[tc.name] = tres
2403 outcome[tc.name] = tres
2403 jsonout = json.dumps(outcome, sort_keys=True, indent=4,
2404 jsonout = json.dumps(outcome, sort_keys=True, indent=4,
2404 separators=(',', ': '))
2405 separators=(',', ': '))
2405 outf.writelines(("testreport =", jsonout))
2406 outf.writelines(("testreport =", jsonout))
2406
2407
2407 def sorttests(testdescs, previoustimes, shuffle=False):
2408 def sorttests(testdescs, previoustimes, shuffle=False):
2408 """Do an in-place sort of tests."""
2409 """Do an in-place sort of tests."""
2409 if shuffle:
2410 if shuffle:
2410 random.shuffle(testdescs)
2411 random.shuffle(testdescs)
2411 return
2412 return
2412
2413
2413 if previoustimes:
2414 if previoustimes:
2414 def sortkey(f):
2415 def sortkey(f):
2415 f = f['path']
2416 f = f['path']
2416 if f in previoustimes:
2417 if f in previoustimes:
2417 # Use most recent time as estimate
2418 # Use most recent time as estimate
2418 return -previoustimes[f][-1]
2419 return -previoustimes[f][-1]
2419 else:
2420 else:
2420 # Default to a rather arbitrary value of 1 second for new tests
2421 # Default to a rather arbitrary value of 1 second for new tests
2421 return -1.0
2422 return -1.0
2422 else:
2423 else:
2423 # keywords for slow tests
2424 # keywords for slow tests
2424 slow = {b'svn': 10,
2425 slow = {b'svn': 10,
2425 b'cvs': 10,
2426 b'cvs': 10,
2426 b'hghave': 10,
2427 b'hghave': 10,
2427 b'largefiles-update': 10,
2428 b'largefiles-update': 10,
2428 b'run-tests': 10,
2429 b'run-tests': 10,
2429 b'corruption': 10,
2430 b'corruption': 10,
2430 b'race': 10,
2431 b'race': 10,
2431 b'i18n': 10,
2432 b'i18n': 10,
2432 b'check': 100,
2433 b'check': 100,
2433 b'gendoc': 100,
2434 b'gendoc': 100,
2434 b'contrib-perf': 200,
2435 b'contrib-perf': 200,
2435 }
2436 }
2436 perf = {}
2437 perf = {}
2437
2438
2438 def sortkey(f):
2439 def sortkey(f):
2439 # run largest tests first, as they tend to take the longest
2440 # run largest tests first, as they tend to take the longest
2440 f = f['path']
2441 f = f['path']
2441 try:
2442 try:
2442 return perf[f]
2443 return perf[f]
2443 except KeyError:
2444 except KeyError:
2444 try:
2445 try:
2445 val = -os.stat(f).st_size
2446 val = -os.stat(f).st_size
2446 except OSError as e:
2447 except OSError as e:
2447 if e.errno != errno.ENOENT:
2448 if e.errno != errno.ENOENT:
2448 raise
2449 raise
2449 perf[f] = -1e9 # file does not exist, tell early
2450 perf[f] = -1e9 # file does not exist, tell early
2450 return -1e9
2451 return -1e9
2451 for kw, mul in slow.items():
2452 for kw, mul in slow.items():
2452 if kw in f:
2453 if kw in f:
2453 val *= mul
2454 val *= mul
2454 if f.endswith(b'.py'):
2455 if f.endswith(b'.py'):
2455 val /= 10.0
2456 val /= 10.0
2456 perf[f] = val / 1000.0
2457 perf[f] = val / 1000.0
2457 return perf[f]
2458 return perf[f]
2458
2459
2459 testdescs.sort(key=sortkey)
2460 testdescs.sort(key=sortkey)
2460
2461
2461 class TestRunner(object):
2462 class TestRunner(object):
2462 """Holds context for executing tests.
2463 """Holds context for executing tests.
2463
2464
2464 Tests rely on a lot of state. This object holds it for them.
2465 Tests rely on a lot of state. This object holds it for them.
2465 """
2466 """
2466
2467
2467 # Programs required to run tests.
2468 # Programs required to run tests.
2468 REQUIREDTOOLS = [
2469 REQUIREDTOOLS = [
2469 b'diff',
2470 b'diff',
2470 b'grep',
2471 b'grep',
2471 b'unzip',
2472 b'unzip',
2472 b'gunzip',
2473 b'gunzip',
2473 b'bunzip2',
2474 b'bunzip2',
2474 b'sed',
2475 b'sed',
2475 ]
2476 ]
2476
2477
2477 # Maps file extensions to test class.
2478 # Maps file extensions to test class.
2478 TESTTYPES = [
2479 TESTTYPES = [
2479 (b'.py', PythonTest),
2480 (b'.py', PythonTest),
2480 (b'.t', TTest),
2481 (b'.t', TTest),
2481 ]
2482 ]
2482
2483
2483 def __init__(self):
2484 def __init__(self):
2484 self.options = None
2485 self.options = None
2485 self._hgroot = None
2486 self._hgroot = None
2486 self._testdir = None
2487 self._testdir = None
2487 self._outputdir = None
2488 self._outputdir = None
2488 self._hgtmp = None
2489 self._hgtmp = None
2489 self._installdir = None
2490 self._installdir = None
2490 self._bindir = None
2491 self._bindir = None
2491 self._tmpbinddir = None
2492 self._tmpbinddir = None
2492 self._pythondir = None
2493 self._pythondir = None
2493 self._coveragefile = None
2494 self._coveragefile = None
2494 self._createdfiles = []
2495 self._createdfiles = []
2495 self._hgcommand = None
2496 self._hgcommand = None
2496 self._hgpath = None
2497 self._hgpath = None
2497 self._portoffset = 0
2498 self._portoffset = 0
2498 self._ports = {}
2499 self._ports = {}
2499
2500
2500 def run(self, args, parser=None):
2501 def run(self, args, parser=None):
2501 """Run the test suite."""
2502 """Run the test suite."""
2502 oldmask = os.umask(0o22)
2503 oldmask = os.umask(0o22)
2503 try:
2504 try:
2504 parser = parser or getparser()
2505 parser = parser or getparser()
2505 options = parseargs(args, parser)
2506 options = parseargs(args, parser)
2506 tests = [_bytespath(a) for a in options.tests]
2507 tests = [_bytespath(a) for a in options.tests]
2507 if options.test_list is not None:
2508 if options.test_list is not None:
2508 for listfile in options.test_list:
2509 for listfile in options.test_list:
2509 with open(listfile, 'rb') as f:
2510 with open(listfile, 'rb') as f:
2510 tests.extend(t for t in f.read().splitlines() if t)
2511 tests.extend(t for t in f.read().splitlines() if t)
2511 self.options = options
2512 self.options = options
2512
2513
2513 self._checktools()
2514 self._checktools()
2514 testdescs = self.findtests(tests)
2515 testdescs = self.findtests(tests)
2515 if options.profile_runner:
2516 if options.profile_runner:
2516 import statprof
2517 import statprof
2517 statprof.start()
2518 statprof.start()
2518 result = self._run(testdescs)
2519 result = self._run(testdescs)
2519 if options.profile_runner:
2520 if options.profile_runner:
2520 statprof.stop()
2521 statprof.stop()
2521 statprof.display()
2522 statprof.display()
2522 return result
2523 return result
2523
2524
2524 finally:
2525 finally:
2525 os.umask(oldmask)
2526 os.umask(oldmask)
2526
2527
2527 def _run(self, testdescs):
2528 def _run(self, testdescs):
2528 self._testdir = osenvironb[b'TESTDIR'] = getcwdb()
2529 self._testdir = osenvironb[b'TESTDIR'] = getcwdb()
2529 # assume all tests in same folder for now
2530 # assume all tests in same folder for now
2530 if testdescs:
2531 if testdescs:
2531 pathname = os.path.dirname(testdescs[0]['path'])
2532 pathname = os.path.dirname(testdescs[0]['path'])
2532 if pathname:
2533 if pathname:
2533 osenvironb[b'TESTDIR'] = os.path.join(osenvironb[b'TESTDIR'],
2534 osenvironb[b'TESTDIR'] = os.path.join(osenvironb[b'TESTDIR'],
2534 pathname)
2535 pathname)
2535 if self.options.outputdir:
2536 if self.options.outputdir:
2536 self._outputdir = canonpath(_bytespath(self.options.outputdir))
2537 self._outputdir = canonpath(_bytespath(self.options.outputdir))
2537 else:
2538 else:
2538 self._outputdir = self._testdir
2539 self._outputdir = self._testdir
2539 if testdescs and pathname:
2540 if testdescs and pathname:
2540 self._outputdir = os.path.join(self._outputdir, pathname)
2541 self._outputdir = os.path.join(self._outputdir, pathname)
2541 previoustimes = {}
2542 previoustimes = {}
2542 if self.options.order_by_runtime:
2543 if self.options.order_by_runtime:
2543 previoustimes = dict(loadtimes(self._outputdir))
2544 previoustimes = dict(loadtimes(self._outputdir))
2544 sorttests(testdescs, previoustimes, shuffle=self.options.random)
2545 sorttests(testdescs, previoustimes, shuffle=self.options.random)
2545
2546
2546 if 'PYTHONHASHSEED' not in os.environ:
2547 if 'PYTHONHASHSEED' not in os.environ:
2547 # use a random python hash seed all the time
2548 # use a random python hash seed all the time
2548 # we do the randomness ourself to know what seed is used
2549 # we do the randomness ourself to know what seed is used
2549 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
2550 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
2550
2551
2551 if self.options.tmpdir:
2552 if self.options.tmpdir:
2552 self.options.keep_tmpdir = True
2553 self.options.keep_tmpdir = True
2553 tmpdir = _bytespath(self.options.tmpdir)
2554 tmpdir = _bytespath(self.options.tmpdir)
2554 if os.path.exists(tmpdir):
2555 if os.path.exists(tmpdir):
2555 # Meaning of tmpdir has changed since 1.3: we used to create
2556 # Meaning of tmpdir has changed since 1.3: we used to create
2556 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
2557 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
2557 # tmpdir already exists.
2558 # tmpdir already exists.
2558 print("error: temp dir %r already exists" % tmpdir)
2559 print("error: temp dir %r already exists" % tmpdir)
2559 return 1
2560 return 1
2560
2561
2561 os.makedirs(tmpdir)
2562 os.makedirs(tmpdir)
2562 else:
2563 else:
2563 d = None
2564 d = None
2564 if os.name == 'nt':
2565 if os.name == 'nt':
2565 # without this, we get the default temp dir location, but
2566 # without this, we get the default temp dir location, but
2566 # in all lowercase, which causes troubles with paths (issue3490)
2567 # in all lowercase, which causes troubles with paths (issue3490)
2567 d = osenvironb.get(b'TMP', None)
2568 d = osenvironb.get(b'TMP', None)
2568 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
2569 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
2569
2570
2570 self._hgtmp = osenvironb[b'HGTMP'] = (
2571 self._hgtmp = osenvironb[b'HGTMP'] = (
2571 os.path.realpath(tmpdir))
2572 os.path.realpath(tmpdir))
2572
2573
2573 if self.options.with_hg:
2574 if self.options.with_hg:
2574 self._installdir = None
2575 self._installdir = None
2575 whg = self.options.with_hg
2576 whg = self.options.with_hg
2576 self._bindir = os.path.dirname(os.path.realpath(whg))
2577 self._bindir = os.path.dirname(os.path.realpath(whg))
2577 assert isinstance(self._bindir, bytes)
2578 assert isinstance(self._bindir, bytes)
2578 self._hgcommand = os.path.basename(whg)
2579 self._hgcommand = os.path.basename(whg)
2579 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
2580 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
2580 os.makedirs(self._tmpbindir)
2581 os.makedirs(self._tmpbindir)
2581
2582
2582 normbin = os.path.normpath(os.path.abspath(whg))
2583 normbin = os.path.normpath(os.path.abspath(whg))
2583 normbin = normbin.replace(os.sep.encode('ascii'), b'/')
2584 normbin = normbin.replace(os.sep.encode('ascii'), b'/')
2584
2585
2585 # Other Python scripts in the test harness need to
2586 # Other Python scripts in the test harness need to
2586 # `import mercurial`. If `hg` is a Python script, we assume
2587 # `import mercurial`. If `hg` is a Python script, we assume
2587 # the Mercurial modules are relative to its path and tell the tests
2588 # the Mercurial modules are relative to its path and tell the tests
2588 # to load Python modules from its directory.
2589 # to load Python modules from its directory.
2589 with open(whg, 'rb') as fh:
2590 with open(whg, 'rb') as fh:
2590 initial = fh.read(1024)
2591 initial = fh.read(1024)
2591
2592
2592 if re.match(b'#!.*python', initial):
2593 if re.match(b'#!.*python', initial):
2593 self._pythondir = self._bindir
2594 self._pythondir = self._bindir
2594 # If it looks like our in-repo Rust binary, use the source root.
2595 # If it looks like our in-repo Rust binary, use the source root.
2595 # This is a bit hacky. But rhg is still not supported outside the
2596 # This is a bit hacky. But rhg is still not supported outside the
2596 # source directory. So until it is, do the simple thing.
2597 # source directory. So until it is, do the simple thing.
2597 elif re.search(b'/rust/target/[^/]+/hg', normbin):
2598 elif re.search(b'/rust/target/[^/]+/hg', normbin):
2598 self._pythondir = os.path.dirname(self._testdir)
2599 self._pythondir = os.path.dirname(self._testdir)
2599 # Fall back to the legacy behavior.
2600 # Fall back to the legacy behavior.
2600 else:
2601 else:
2601 self._pythondir = self._bindir
2602 self._pythondir = self._bindir
2602
2603
2603 else:
2604 else:
2604 self._installdir = os.path.join(self._hgtmp, b"install")
2605 self._installdir = os.path.join(self._hgtmp, b"install")
2605 self._bindir = os.path.join(self._installdir, b"bin")
2606 self._bindir = os.path.join(self._installdir, b"bin")
2606 self._hgcommand = b'hg'
2607 self._hgcommand = b'hg'
2607 self._tmpbindir = self._bindir
2608 self._tmpbindir = self._bindir
2608 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
2609 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
2609
2610
2610 # set CHGHG, then replace "hg" command by "chg"
2611 # set CHGHG, then replace "hg" command by "chg"
2611 chgbindir = self._bindir
2612 chgbindir = self._bindir
2612 if self.options.chg or self.options.with_chg:
2613 if self.options.chg or self.options.with_chg:
2613 osenvironb[b'CHGHG'] = os.path.join(self._bindir, self._hgcommand)
2614 osenvironb[b'CHGHG'] = os.path.join(self._bindir, self._hgcommand)
2614 else:
2615 else:
2615 osenvironb.pop(b'CHGHG', None) # drop flag for hghave
2616 osenvironb.pop(b'CHGHG', None) # drop flag for hghave
2616 if self.options.chg:
2617 if self.options.chg:
2617 self._hgcommand = b'chg'
2618 self._hgcommand = b'chg'
2618 elif self.options.with_chg:
2619 elif self.options.with_chg:
2619 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
2620 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
2620 self._hgcommand = os.path.basename(self.options.with_chg)
2621 self._hgcommand = os.path.basename(self.options.with_chg)
2621
2622
2622 osenvironb[b"BINDIR"] = self._bindir
2623 osenvironb[b"BINDIR"] = self._bindir
2623 osenvironb[b"PYTHON"] = PYTHON
2624 osenvironb[b"PYTHON"] = PYTHON
2624
2625
2625 fileb = _bytespath(__file__)
2626 fileb = _bytespath(__file__)
2626 runtestdir = os.path.abspath(os.path.dirname(fileb))
2627 runtestdir = os.path.abspath(os.path.dirname(fileb))
2627 osenvironb[b'RUNTESTDIR'] = runtestdir
2628 osenvironb[b'RUNTESTDIR'] = runtestdir
2628 if PYTHON3:
2629 if PYTHON3:
2629 sepb = _bytespath(os.pathsep)
2630 sepb = _bytespath(os.pathsep)
2630 else:
2631 else:
2631 sepb = os.pathsep
2632 sepb = os.pathsep
2632 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
2633 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
2633 if os.path.islink(__file__):
2634 if os.path.islink(__file__):
2634 # test helper will likely be at the end of the symlink
2635 # test helper will likely be at the end of the symlink
2635 realfile = os.path.realpath(fileb)
2636 realfile = os.path.realpath(fileb)
2636 realdir = os.path.abspath(os.path.dirname(realfile))
2637 realdir = os.path.abspath(os.path.dirname(realfile))
2637 path.insert(2, realdir)
2638 path.insert(2, realdir)
2638 if chgbindir != self._bindir:
2639 if chgbindir != self._bindir:
2639 path.insert(1, chgbindir)
2640 path.insert(1, chgbindir)
2640 if self._testdir != runtestdir:
2641 if self._testdir != runtestdir:
2641 path = [self._testdir] + path
2642 path = [self._testdir] + path
2642 if self._tmpbindir != self._bindir:
2643 if self._tmpbindir != self._bindir:
2643 path = [self._tmpbindir] + path
2644 path = [self._tmpbindir] + path
2644 osenvironb[b"PATH"] = sepb.join(path)
2645 osenvironb[b"PATH"] = sepb.join(path)
2645
2646
2646 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
2647 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
2647 # can run .../tests/run-tests.py test-foo where test-foo
2648 # can run .../tests/run-tests.py test-foo where test-foo
2648 # adds an extension to HGRC. Also include run-test.py directory to
2649 # adds an extension to HGRC. Also include run-test.py directory to
2649 # import modules like heredoctest.
2650 # import modules like heredoctest.
2650 pypath = [self._pythondir, self._testdir, runtestdir]
2651 pypath = [self._pythondir, self._testdir, runtestdir]
2651 # We have to augment PYTHONPATH, rather than simply replacing
2652 # We have to augment PYTHONPATH, rather than simply replacing
2652 # it, in case external libraries are only available via current
2653 # it, in case external libraries are only available via current
2653 # PYTHONPATH. (In particular, the Subversion bindings on OS X
2654 # PYTHONPATH. (In particular, the Subversion bindings on OS X
2654 # are in /opt/subversion.)
2655 # are in /opt/subversion.)
2655 oldpypath = osenvironb.get(IMPL_PATH)
2656 oldpypath = osenvironb.get(IMPL_PATH)
2656 if oldpypath:
2657 if oldpypath:
2657 pypath.append(oldpypath)
2658 pypath.append(oldpypath)
2658 osenvironb[IMPL_PATH] = sepb.join(pypath)
2659 osenvironb[IMPL_PATH] = sepb.join(pypath)
2659
2660
2660 if self.options.pure:
2661 if self.options.pure:
2661 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
2662 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
2662 os.environ["HGMODULEPOLICY"] = "py"
2663 os.environ["HGMODULEPOLICY"] = "py"
2663
2664
2664 if self.options.allow_slow_tests:
2665 if self.options.allow_slow_tests:
2665 os.environ["HGTEST_SLOW"] = "slow"
2666 os.environ["HGTEST_SLOW"] = "slow"
2666 elif 'HGTEST_SLOW' in os.environ:
2667 elif 'HGTEST_SLOW' in os.environ:
2667 del os.environ['HGTEST_SLOW']
2668 del os.environ['HGTEST_SLOW']
2668
2669
2669 self._coveragefile = os.path.join(self._testdir, b'.coverage')
2670 self._coveragefile = os.path.join(self._testdir, b'.coverage')
2670
2671
2671 if self.options.exceptions:
2672 if self.options.exceptions:
2672 exceptionsdir = os.path.join(self._outputdir, b'exceptions')
2673 exceptionsdir = os.path.join(self._outputdir, b'exceptions')
2673 try:
2674 try:
2674 os.makedirs(exceptionsdir)
2675 os.makedirs(exceptionsdir)
2675 except OSError as e:
2676 except OSError as e:
2676 if e.errno != errno.EEXIST:
2677 if e.errno != errno.EEXIST:
2677 raise
2678 raise
2678
2679
2679 # Remove all existing exception reports.
2680 # Remove all existing exception reports.
2680 for f in os.listdir(exceptionsdir):
2681 for f in os.listdir(exceptionsdir):
2681 os.unlink(os.path.join(exceptionsdir, f))
2682 os.unlink(os.path.join(exceptionsdir, f))
2682
2683
2683 osenvironb[b'HGEXCEPTIONSDIR'] = exceptionsdir
2684 osenvironb[b'HGEXCEPTIONSDIR'] = exceptionsdir
2684 logexceptions = os.path.join(self._testdir, b'logexceptions.py')
2685 logexceptions = os.path.join(self._testdir, b'logexceptions.py')
2685 self.options.extra_config_opt.append(
2686 self.options.extra_config_opt.append(
2686 'extensions.logexceptions=%s' % logexceptions.decode('utf-8'))
2687 'extensions.logexceptions=%s' % logexceptions.decode('utf-8'))
2687
2688
2688 vlog("# Using TESTDIR", self._testdir)
2689 vlog("# Using TESTDIR", self._testdir)
2689 vlog("# Using RUNTESTDIR", osenvironb[b'RUNTESTDIR'])
2690 vlog("# Using RUNTESTDIR", osenvironb[b'RUNTESTDIR'])
2690 vlog("# Using HGTMP", self._hgtmp)
2691 vlog("# Using HGTMP", self._hgtmp)
2691 vlog("# Using PATH", os.environ["PATH"])
2692 vlog("# Using PATH", os.environ["PATH"])
2692 vlog("# Using", IMPL_PATH, osenvironb[IMPL_PATH])
2693 vlog("# Using", IMPL_PATH, osenvironb[IMPL_PATH])
2693 vlog("# Writing to directory", self._outputdir)
2694 vlog("# Writing to directory", self._outputdir)
2694
2695
2695 try:
2696 try:
2696 return self._runtests(testdescs) or 0
2697 return self._runtests(testdescs) or 0
2697 finally:
2698 finally:
2698 time.sleep(.1)
2699 time.sleep(.1)
2699 self._cleanup()
2700 self._cleanup()
2700
2701
2701 def findtests(self, args):
2702 def findtests(self, args):
2702 """Finds possible test files from arguments.
2703 """Finds possible test files from arguments.
2703
2704
2704 If you wish to inject custom tests into the test harness, this would
2705 If you wish to inject custom tests into the test harness, this would
2705 be a good function to monkeypatch or override in a derived class.
2706 be a good function to monkeypatch or override in a derived class.
2706 """
2707 """
2707 if not args:
2708 if not args:
2708 if self.options.changed:
2709 if self.options.changed:
2709 proc = Popen4('hg st --rev "%s" -man0 .' %
2710 proc = Popen4('hg st --rev "%s" -man0 .' %
2710 self.options.changed, None, 0)
2711 self.options.changed, None, 0)
2711 stdout, stderr = proc.communicate()
2712 stdout, stderr = proc.communicate()
2712 args = stdout.strip(b'\0').split(b'\0')
2713 args = stdout.strip(b'\0').split(b'\0')
2713 else:
2714 else:
2714 args = os.listdir(b'.')
2715 args = os.listdir(b'.')
2715
2716
2716 expanded_args = []
2717 expanded_args = []
2717 for arg in args:
2718 for arg in args:
2718 if os.path.isdir(arg):
2719 if os.path.isdir(arg):
2719 if not arg.endswith(b'/'):
2720 if not arg.endswith(b'/'):
2720 arg += b'/'
2721 arg += b'/'
2721 expanded_args.extend([arg + a for a in os.listdir(arg)])
2722 expanded_args.extend([arg + a for a in os.listdir(arg)])
2722 else:
2723 else:
2723 expanded_args.append(arg)
2724 expanded_args.append(arg)
2724 args = expanded_args
2725 args = expanded_args
2725
2726
2726 testcasepattern = re.compile(br'([\w-]+\.t|py)(#([a-zA-Z0-9_\-\.#]+))')
2727 testcasepattern = re.compile(br'([\w-]+\.t|py)(#([a-zA-Z0-9_\-\.#]+))')
2727 tests = []
2728 tests = []
2728 for t in args:
2729 for t in args:
2729 case = []
2730 case = []
2730
2731
2731 if not (os.path.basename(t).startswith(b'test-')
2732 if not (os.path.basename(t).startswith(b'test-')
2732 and (t.endswith(b'.py') or t.endswith(b'.t'))):
2733 and (t.endswith(b'.py') or t.endswith(b'.t'))):
2733
2734
2734 m = testcasepattern.match(t)
2735 m = testcasepattern.match(t)
2735 if m is not None:
2736 if m is not None:
2736 t, _, casestr = m.groups()
2737 t, _, casestr = m.groups()
2737 if casestr:
2738 if casestr:
2738 case = casestr.split(b'#')
2739 case = casestr.split(b'#')
2739 else:
2740 else:
2740 continue
2741 continue
2741
2742
2742 if t.endswith(b'.t'):
2743 if t.endswith(b'.t'):
2743 # .t file may contain multiple test cases
2744 # .t file may contain multiple test cases
2744 casedimensions = parsettestcases(t)
2745 casedimensions = parsettestcases(t)
2745 if casedimensions:
2746 if casedimensions:
2746 cases = []
2747 cases = []
2747 def addcases(case, casedimensions):
2748 def addcases(case, casedimensions):
2748 if not casedimensions:
2749 if not casedimensions:
2749 cases.append(case)
2750 cases.append(case)
2750 else:
2751 else:
2751 for c in casedimensions[0]:
2752 for c in casedimensions[0]:
2752 addcases(case + [c], casedimensions[1:])
2753 addcases(case + [c], casedimensions[1:])
2753 addcases([], casedimensions)
2754 addcases([], casedimensions)
2754 if case and case in cases:
2755 if case and case in cases:
2755 cases = [case]
2756 cases = [case]
2756 elif case:
2757 elif case:
2757 # Ignore invalid cases
2758 # Ignore invalid cases
2758 cases = []
2759 cases = []
2759 else:
2760 else:
2760 pass
2761 pass
2761 tests += [{'path': t, 'case': c} for c in sorted(cases)]
2762 tests += [{'path': t, 'case': c} for c in sorted(cases)]
2762 else:
2763 else:
2763 tests.append({'path': t})
2764 tests.append({'path': t})
2764 else:
2765 else:
2765 tests.append({'path': t})
2766 tests.append({'path': t})
2766 return tests
2767 return tests
2767
2768
2768 def _runtests(self, testdescs):
2769 def _runtests(self, testdescs):
2769 def _reloadtest(test, i):
2770 def _reloadtest(test, i):
2770 # convert a test back to its description dict
2771 # convert a test back to its description dict
2771 desc = {'path': test.path}
2772 desc = {'path': test.path}
2772 case = getattr(test, '_case', [])
2773 case = getattr(test, '_case', [])
2773 if case:
2774 if case:
2774 desc['case'] = case
2775 desc['case'] = case
2775 return self._gettest(desc, i)
2776 return self._gettest(desc, i)
2776
2777
2777 try:
2778 try:
2778 if self.options.restart:
2779 if self.options.restart:
2779 orig = list(testdescs)
2780 orig = list(testdescs)
2780 while testdescs:
2781 while testdescs:
2781 desc = testdescs[0]
2782 desc = testdescs[0]
2782 # desc['path'] is a relative path
2783 # desc['path'] is a relative path
2783 if 'case' in desc:
2784 if 'case' in desc:
2784 casestr = b'#'.join(desc['case'])
2785 casestr = b'#'.join(desc['case'])
2785 errpath = b'%s#%s.err' % (desc['path'], casestr)
2786 errpath = b'%s#%s.err' % (desc['path'], casestr)
2786 else:
2787 else:
2787 errpath = b'%s.err' % desc['path']
2788 errpath = b'%s.err' % desc['path']
2788 errpath = os.path.join(self._outputdir, errpath)
2789 errpath = os.path.join(self._outputdir, errpath)
2789 if os.path.exists(errpath):
2790 if os.path.exists(errpath):
2790 break
2791 break
2791 testdescs.pop(0)
2792 testdescs.pop(0)
2792 if not testdescs:
2793 if not testdescs:
2793 print("running all tests")
2794 print("running all tests")
2794 testdescs = orig
2795 testdescs = orig
2795
2796
2796 tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
2797 tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
2797
2798
2798 jobs = min(len(tests), self.options.jobs)
2799 jobs = min(len(tests), self.options.jobs)
2799
2800
2800 failed = False
2801 failed = False
2801 kws = self.options.keywords
2802 kws = self.options.keywords
2802 if kws is not None and PYTHON3:
2803 if kws is not None and PYTHON3:
2803 kws = kws.encode('utf-8')
2804 kws = kws.encode('utf-8')
2804
2805
2805 suite = TestSuite(self._testdir,
2806 suite = TestSuite(self._testdir,
2806 jobs=jobs,
2807 jobs=jobs,
2807 whitelist=self.options.whitelisted,
2808 whitelist=self.options.whitelisted,
2808 blacklist=self.options.blacklist,
2809 blacklist=self.options.blacklist,
2809 retest=self.options.retest,
2810 retest=self.options.retest,
2810 keywords=kws,
2811 keywords=kws,
2811 loop=self.options.loop,
2812 loop=self.options.loop,
2812 runs_per_test=self.options.runs_per_test,
2813 runs_per_test=self.options.runs_per_test,
2813 showchannels=self.options.showchannels,
2814 showchannels=self.options.showchannels,
2814 tests=tests, loadtest=_reloadtest)
2815 tests=tests, loadtest=_reloadtest)
2815 verbosity = 1
2816 verbosity = 1
2816 if self.options.list_tests:
2817 if self.options.list_tests:
2817 verbosity = 0
2818 verbosity = 0
2818 elif self.options.verbose:
2819 elif self.options.verbose:
2819 verbosity = 2
2820 verbosity = 2
2820 runner = TextTestRunner(self, verbosity=verbosity)
2821 runner = TextTestRunner(self, verbosity=verbosity)
2821
2822
2822 if self.options.list_tests:
2823 if self.options.list_tests:
2823 result = runner.listtests(suite)
2824 result = runner.listtests(suite)
2824 else:
2825 else:
2825 if self._installdir:
2826 if self._installdir:
2826 self._installhg()
2827 self._installhg()
2827 self._checkhglib("Testing")
2828 self._checkhglib("Testing")
2828 else:
2829 else:
2829 self._usecorrectpython()
2830 self._usecorrectpython()
2830 if self.options.chg:
2831 if self.options.chg:
2831 assert self._installdir
2832 assert self._installdir
2832 self._installchg()
2833 self._installchg()
2833
2834
2834 log('running %d tests using %d parallel processes' % (
2835 log('running %d tests using %d parallel processes' % (
2835 len(tests), jobs))
2836 len(tests), jobs))
2836
2837
2837 result = runner.run(suite)
2838 result = runner.run(suite)
2838
2839
2839 if result.failures:
2840 if result.failures:
2840 failed = True
2841 failed = True
2841
2842
2842 result.onEnd()
2843 result.onEnd()
2843
2844
2844 if self.options.anycoverage:
2845 if self.options.anycoverage:
2845 self._outputcoverage()
2846 self._outputcoverage()
2846 except KeyboardInterrupt:
2847 except KeyboardInterrupt:
2847 failed = True
2848 failed = True
2848 print("\ninterrupted!")
2849 print("\ninterrupted!")
2849
2850
2850 if failed:
2851 if failed:
2851 return 1
2852 return 1
2852
2853
2853 def _getport(self, count):
2854 def _getport(self, count):
2854 port = self._ports.get(count) # do we have a cached entry?
2855 port = self._ports.get(count) # do we have a cached entry?
2855 if port is None:
2856 if port is None:
2856 portneeded = 3
2857 portneeded = 3
2857 # above 100 tries we just give up and let test reports failure
2858 # above 100 tries we just give up and let test reports failure
2858 for tries in xrange(100):
2859 for tries in xrange(100):
2859 allfree = True
2860 allfree = True
2860 port = self.options.port + self._portoffset
2861 port = self.options.port + self._portoffset
2861 for idx in xrange(portneeded):
2862 for idx in xrange(portneeded):
2862 if not checkportisavailable(port + idx):
2863 if not checkportisavailable(port + idx):
2863 allfree = False
2864 allfree = False
2864 break
2865 break
2865 self._portoffset += portneeded
2866 self._portoffset += portneeded
2866 if allfree:
2867 if allfree:
2867 break
2868 break
2868 self._ports[count] = port
2869 self._ports[count] = port
2869 return port
2870 return port
2870
2871
2871 def _gettest(self, testdesc, count):
2872 def _gettest(self, testdesc, count):
2872 """Obtain a Test by looking at its filename.
2873 """Obtain a Test by looking at its filename.
2873
2874
2874 Returns a Test instance. The Test may not be runnable if it doesn't
2875 Returns a Test instance. The Test may not be runnable if it doesn't
2875 map to a known type.
2876 map to a known type.
2876 """
2877 """
2877 path = testdesc['path']
2878 path = testdesc['path']
2878 lctest = path.lower()
2879 lctest = path.lower()
2879 testcls = Test
2880 testcls = Test
2880
2881
2881 for ext, cls in self.TESTTYPES:
2882 for ext, cls in self.TESTTYPES:
2882 if lctest.endswith(ext):
2883 if lctest.endswith(ext):
2883 testcls = cls
2884 testcls = cls
2884 break
2885 break
2885
2886
2886 refpath = os.path.join(self._testdir, path)
2887 refpath = os.path.join(self._testdir, path)
2887 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
2888 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
2888
2889
2889 # extra keyword parameters. 'case' is used by .t tests
2890 # extra keyword parameters. 'case' is used by .t tests
2890 kwds = dict((k, testdesc[k]) for k in ['case'] if k in testdesc)
2891 kwds = dict((k, testdesc[k]) for k in ['case'] if k in testdesc)
2891
2892
2892 t = testcls(refpath, self._outputdir, tmpdir,
2893 t = testcls(refpath, self._outputdir, tmpdir,
2893 keeptmpdir=self.options.keep_tmpdir,
2894 keeptmpdir=self.options.keep_tmpdir,
2894 debug=self.options.debug,
2895 debug=self.options.debug,
2895 first=self.options.first,
2896 first=self.options.first,
2896 timeout=self.options.timeout,
2897 timeout=self.options.timeout,
2897 startport=self._getport(count),
2898 startport=self._getport(count),
2898 extraconfigopts=self.options.extra_config_opt,
2899 extraconfigopts=self.options.extra_config_opt,
2899 py3kwarnings=self.options.py3k_warnings,
2900 py3kwarnings=self.options.py3k_warnings,
2900 shell=self.options.shell,
2901 shell=self.options.shell,
2901 hgcommand=self._hgcommand,
2902 hgcommand=self._hgcommand,
2902 usechg=bool(self.options.with_chg or self.options.chg),
2903 usechg=bool(self.options.with_chg or self.options.chg),
2903 useipv6=useipv6, **kwds)
2904 useipv6=useipv6, **kwds)
2904 t.should_reload = True
2905 t.should_reload = True
2905 return t
2906 return t
2906
2907
2907 def _cleanup(self):
2908 def _cleanup(self):
2908 """Clean up state from this test invocation."""
2909 """Clean up state from this test invocation."""
2909 if self.options.keep_tmpdir:
2910 if self.options.keep_tmpdir:
2910 return
2911 return
2911
2912
2912 vlog("# Cleaning up HGTMP", self._hgtmp)
2913 vlog("# Cleaning up HGTMP", self._hgtmp)
2913 shutil.rmtree(self._hgtmp, True)
2914 shutil.rmtree(self._hgtmp, True)
2914 for f in self._createdfiles:
2915 for f in self._createdfiles:
2915 try:
2916 try:
2916 os.remove(f)
2917 os.remove(f)
2917 except OSError:
2918 except OSError:
2918 pass
2919 pass
2919
2920
2920 def _usecorrectpython(self):
2921 def _usecorrectpython(self):
2921 """Configure the environment to use the appropriate Python in tests."""
2922 """Configure the environment to use the appropriate Python in tests."""
2922 # Tests must use the same interpreter as us or bad things will happen.
2923 # Tests must use the same interpreter as us or bad things will happen.
2923 pyexename = sys.platform == 'win32' and b'python.exe' or b'python'
2924 pyexename = sys.platform == 'win32' and b'python.exe' or b'python'
2924
2925
2925 # os.symlink() is a thing with py3 on Windows, but it requires
2926 # os.symlink() is a thing with py3 on Windows, but it requires
2926 # Administrator rights.
2927 # Administrator rights.
2927 if getattr(os, 'symlink', None) and os.name != 'nt':
2928 if getattr(os, 'symlink', None) and os.name != 'nt':
2928 vlog("# Making python executable in test path a symlink to '%s'" %
2929 vlog("# Making python executable in test path a symlink to '%s'" %
2929 sys.executable)
2930 sys.executable)
2930 mypython = os.path.join(self._tmpbindir, pyexename)
2931 mypython = os.path.join(self._tmpbindir, pyexename)
2931 try:
2932 try:
2932 if os.readlink(mypython) == sys.executable:
2933 if os.readlink(mypython) == sys.executable:
2933 return
2934 return
2934 os.unlink(mypython)
2935 os.unlink(mypython)
2935 except OSError as err:
2936 except OSError as err:
2936 if err.errno != errno.ENOENT:
2937 if err.errno != errno.ENOENT:
2937 raise
2938 raise
2938 if self._findprogram(pyexename) != sys.executable:
2939 if self._findprogram(pyexename) != sys.executable:
2939 try:
2940 try:
2940 os.symlink(sys.executable, mypython)
2941 os.symlink(sys.executable, mypython)
2941 self._createdfiles.append(mypython)
2942 self._createdfiles.append(mypython)
2942 except OSError as err:
2943 except OSError as err:
2943 # child processes may race, which is harmless
2944 # child processes may race, which is harmless
2944 if err.errno != errno.EEXIST:
2945 if err.errno != errno.EEXIST:
2945 raise
2946 raise
2946 else:
2947 else:
2947 exedir, exename = os.path.split(sys.executable)
2948 exedir, exename = os.path.split(sys.executable)
2948 vlog("# Modifying search path to find %s as %s in '%s'" %
2949 vlog("# Modifying search path to find %s as %s in '%s'" %
2949 (exename, pyexename, exedir))
2950 (exename, pyexename, exedir))
2950 path = os.environ['PATH'].split(os.pathsep)
2951 path = os.environ['PATH'].split(os.pathsep)
2951 while exedir in path:
2952 while exedir in path:
2952 path.remove(exedir)
2953 path.remove(exedir)
2953 os.environ['PATH'] = os.pathsep.join([exedir] + path)
2954 os.environ['PATH'] = os.pathsep.join([exedir] + path)
2954 if not self._findprogram(pyexename):
2955 if not self._findprogram(pyexename):
2955 print("WARNING: Cannot find %s in search path" % pyexename)
2956 print("WARNING: Cannot find %s in search path" % pyexename)
2956
2957
2957 def _installhg(self):
2958 def _installhg(self):
2958 """Install hg into the test environment.
2959 """Install hg into the test environment.
2959
2960
2960 This will also configure hg with the appropriate testing settings.
2961 This will also configure hg with the appropriate testing settings.
2961 """
2962 """
2962 vlog("# Performing temporary installation of HG")
2963 vlog("# Performing temporary installation of HG")
2963 installerrs = os.path.join(self._hgtmp, b"install.err")
2964 installerrs = os.path.join(self._hgtmp, b"install.err")
2964 compiler = ''
2965 compiler = ''
2965 if self.options.compiler:
2966 if self.options.compiler:
2966 compiler = '--compiler ' + self.options.compiler
2967 compiler = '--compiler ' + self.options.compiler
2967 if self.options.pure:
2968 if self.options.pure:
2968 pure = b"--pure"
2969 pure = b"--pure"
2969 else:
2970 else:
2970 pure = b""
2971 pure = b""
2971
2972
2972 # Run installer in hg root
2973 # Run installer in hg root
2973 script = os.path.realpath(sys.argv[0])
2974 script = os.path.realpath(sys.argv[0])
2974 exe = sys.executable
2975 exe = sys.executable
2975 if PYTHON3:
2976 if PYTHON3:
2976 compiler = _bytespath(compiler)
2977 compiler = _bytespath(compiler)
2977 script = _bytespath(script)
2978 script = _bytespath(script)
2978 exe = _bytespath(exe)
2979 exe = _bytespath(exe)
2979 hgroot = os.path.dirname(os.path.dirname(script))
2980 hgroot = os.path.dirname(os.path.dirname(script))
2980 self._hgroot = hgroot
2981 self._hgroot = hgroot
2981 os.chdir(hgroot)
2982 os.chdir(hgroot)
2982 nohome = b'--home=""'
2983 nohome = b'--home=""'
2983 if os.name == 'nt':
2984 if os.name == 'nt':
2984 # The --home="" trick works only on OS where os.sep == '/'
2985 # The --home="" trick works only on OS where os.sep == '/'
2985 # because of a distutils convert_path() fast-path. Avoid it at
2986 # because of a distutils convert_path() fast-path. Avoid it at
2986 # least on Windows for now, deal with .pydistutils.cfg bugs
2987 # least on Windows for now, deal with .pydistutils.cfg bugs
2987 # when they happen.
2988 # when they happen.
2988 nohome = b''
2989 nohome = b''
2989 cmd = (b'%(exe)s setup.py %(pure)s clean --all'
2990 cmd = (b'%(exe)s setup.py %(pure)s clean --all'
2990 b' build %(compiler)s --build-base="%(base)s"'
2991 b' build %(compiler)s --build-base="%(base)s"'
2991 b' install --force --prefix="%(prefix)s"'
2992 b' install --force --prefix="%(prefix)s"'
2992 b' --install-lib="%(libdir)s"'
2993 b' --install-lib="%(libdir)s"'
2993 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
2994 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
2994 % {b'exe': exe, b'pure': pure,
2995 % {b'exe': exe, b'pure': pure,
2995 b'compiler': compiler,
2996 b'compiler': compiler,
2996 b'base': os.path.join(self._hgtmp, b"build"),
2997 b'base': os.path.join(self._hgtmp, b"build"),
2997 b'prefix': self._installdir, b'libdir': self._pythondir,
2998 b'prefix': self._installdir, b'libdir': self._pythondir,
2998 b'bindir': self._bindir,
2999 b'bindir': self._bindir,
2999 b'nohome': nohome, b'logfile': installerrs})
3000 b'nohome': nohome, b'logfile': installerrs})
3000
3001
3001 # setuptools requires install directories to exist.
3002 # setuptools requires install directories to exist.
3002 def makedirs(p):
3003 def makedirs(p):
3003 try:
3004 try:
3004 os.makedirs(p)
3005 os.makedirs(p)
3005 except OSError as e:
3006 except OSError as e:
3006 if e.errno != errno.EEXIST:
3007 if e.errno != errno.EEXIST:
3007 raise
3008 raise
3008 makedirs(self._pythondir)
3009 makedirs(self._pythondir)
3009 makedirs(self._bindir)
3010 makedirs(self._bindir)
3010
3011
3011 vlog("# Running", cmd)
3012 vlog("# Running", cmd)
3012 if os.system(_strpath(cmd)) == 0:
3013 if os.system(_strpath(cmd)) == 0:
3013 if not self.options.verbose:
3014 if not self.options.verbose:
3014 try:
3015 try:
3015 os.remove(installerrs)
3016 os.remove(installerrs)
3016 except OSError as e:
3017 except OSError as e:
3017 if e.errno != errno.ENOENT:
3018 if e.errno != errno.ENOENT:
3018 raise
3019 raise
3019 else:
3020 else:
3020 with open(installerrs, 'rb') as f:
3021 with open(installerrs, 'rb') as f:
3021 for line in f:
3022 for line in f:
3022 if PYTHON3:
3023 if PYTHON3:
3023 sys.stdout.buffer.write(line)
3024 sys.stdout.buffer.write(line)
3024 else:
3025 else:
3025 sys.stdout.write(line)
3026 sys.stdout.write(line)
3026 sys.exit(1)
3027 sys.exit(1)
3027 os.chdir(self._testdir)
3028 os.chdir(self._testdir)
3028
3029
3029 self._usecorrectpython()
3030 self._usecorrectpython()
3030
3031
3031 if self.options.py3k_warnings and not self.options.anycoverage:
3032 if self.options.py3k_warnings and not self.options.anycoverage:
3032 vlog("# Updating hg command to enable Py3k Warnings switch")
3033 vlog("# Updating hg command to enable Py3k Warnings switch")
3033 with open(os.path.join(self._bindir, 'hg'), 'rb') as f:
3034 with open(os.path.join(self._bindir, 'hg'), 'rb') as f:
3034 lines = [line.rstrip() for line in f]
3035 lines = [line.rstrip() for line in f]
3035 lines[0] += ' -3'
3036 lines[0] += ' -3'
3036 with open(os.path.join(self._bindir, 'hg'), 'wb') as f:
3037 with open(os.path.join(self._bindir, 'hg'), 'wb') as f:
3037 for line in lines:
3038 for line in lines:
3038 f.write(line + '\n')
3039 f.write(line + '\n')
3039
3040
3040 hgbat = os.path.join(self._bindir, b'hg.bat')
3041 hgbat = os.path.join(self._bindir, b'hg.bat')
3041 if os.path.isfile(hgbat):
3042 if os.path.isfile(hgbat):
3042 # hg.bat expects to be put in bin/scripts while run-tests.py
3043 # hg.bat expects to be put in bin/scripts while run-tests.py
3043 # installation layout put it in bin/ directly. Fix it
3044 # installation layout put it in bin/ directly. Fix it
3044 with open(hgbat, 'rb') as f:
3045 with open(hgbat, 'rb') as f:
3045 data = f.read()
3046 data = f.read()
3046 if b'"%~dp0..\python" "%~dp0hg" %*' in data:
3047 if b'"%~dp0..\python" "%~dp0hg" %*' in data:
3047 data = data.replace(b'"%~dp0..\python" "%~dp0hg" %*',
3048 data = data.replace(b'"%~dp0..\python" "%~dp0hg" %*',
3048 b'"%~dp0python" "%~dp0hg" %*')
3049 b'"%~dp0python" "%~dp0hg" %*')
3049 with open(hgbat, 'wb') as f:
3050 with open(hgbat, 'wb') as f:
3050 f.write(data)
3051 f.write(data)
3051 else:
3052 else:
3052 print('WARNING: cannot fix hg.bat reference to python.exe')
3053 print('WARNING: cannot fix hg.bat reference to python.exe')
3053
3054
3054 if self.options.anycoverage:
3055 if self.options.anycoverage:
3055 custom = os.path.join(self._testdir, 'sitecustomize.py')
3056 custom = os.path.join(self._testdir, 'sitecustomize.py')
3056 target = os.path.join(self._pythondir, 'sitecustomize.py')
3057 target = os.path.join(self._pythondir, 'sitecustomize.py')
3057 vlog('# Installing coverage trigger to %s' % target)
3058 vlog('# Installing coverage trigger to %s' % target)
3058 shutil.copyfile(custom, target)
3059 shutil.copyfile(custom, target)
3059 rc = os.path.join(self._testdir, '.coveragerc')
3060 rc = os.path.join(self._testdir, '.coveragerc')
3060 vlog('# Installing coverage rc to %s' % rc)
3061 vlog('# Installing coverage rc to %s' % rc)
3061 os.environ['COVERAGE_PROCESS_START'] = rc
3062 os.environ['COVERAGE_PROCESS_START'] = rc
3062 covdir = os.path.join(self._installdir, '..', 'coverage')
3063 covdir = os.path.join(self._installdir, '..', 'coverage')
3063 try:
3064 try:
3064 os.mkdir(covdir)
3065 os.mkdir(covdir)
3065 except OSError as e:
3066 except OSError as e:
3066 if e.errno != errno.EEXIST:
3067 if e.errno != errno.EEXIST:
3067 raise
3068 raise
3068
3069
3069 os.environ['COVERAGE_DIR'] = covdir
3070 os.environ['COVERAGE_DIR'] = covdir
3070
3071
3071 def _checkhglib(self, verb):
3072 def _checkhglib(self, verb):
3072 """Ensure that the 'mercurial' package imported by python is
3073 """Ensure that the 'mercurial' package imported by python is
3073 the one we expect it to be. If not, print a warning to stderr."""
3074 the one we expect it to be. If not, print a warning to stderr."""
3074 if ((self._bindir == self._pythondir) and
3075 if ((self._bindir == self._pythondir) and
3075 (self._bindir != self._tmpbindir)):
3076 (self._bindir != self._tmpbindir)):
3076 # The pythondir has been inferred from --with-hg flag.
3077 # The pythondir has been inferred from --with-hg flag.
3077 # We cannot expect anything sensible here.
3078 # We cannot expect anything sensible here.
3078 return
3079 return
3079 expecthg = os.path.join(self._pythondir, b'mercurial')
3080 expecthg = os.path.join(self._pythondir, b'mercurial')
3080 actualhg = self._gethgpath()
3081 actualhg = self._gethgpath()
3081 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
3082 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
3082 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
3083 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
3083 ' (expected %s)\n'
3084 ' (expected %s)\n'
3084 % (verb, actualhg, expecthg))
3085 % (verb, actualhg, expecthg))
3085 def _gethgpath(self):
3086 def _gethgpath(self):
3086 """Return the path to the mercurial package that is actually found by
3087 """Return the path to the mercurial package that is actually found by
3087 the current Python interpreter."""
3088 the current Python interpreter."""
3088 if self._hgpath is not None:
3089 if self._hgpath is not None:
3089 return self._hgpath
3090 return self._hgpath
3090
3091
3091 cmd = b'%s -c "import mercurial; print (mercurial.__path__[0])"'
3092 cmd = b'%s -c "import mercurial; print (mercurial.__path__[0])"'
3092 cmd = cmd % PYTHON
3093 cmd = cmd % PYTHON
3093 if PYTHON3:
3094 if PYTHON3:
3094 cmd = _strpath(cmd)
3095 cmd = _strpath(cmd)
3095 pipe = os.popen(cmd)
3096 pipe = os.popen(cmd)
3096 try:
3097 try:
3097 self._hgpath = _bytespath(pipe.read().strip())
3098 self._hgpath = _bytespath(pipe.read().strip())
3098 finally:
3099 finally:
3099 pipe.close()
3100 pipe.close()
3100
3101
3101 return self._hgpath
3102 return self._hgpath
3102
3103
3103 def _installchg(self):
3104 def _installchg(self):
3104 """Install chg into the test environment"""
3105 """Install chg into the test environment"""
3105 vlog('# Performing temporary installation of CHG')
3106 vlog('# Performing temporary installation of CHG')
3106 assert os.path.dirname(self._bindir) == self._installdir
3107 assert os.path.dirname(self._bindir) == self._installdir
3107 assert self._hgroot, 'must be called after _installhg()'
3108 assert self._hgroot, 'must be called after _installhg()'
3108 cmd = (b'"%(make)s" clean install PREFIX="%(prefix)s"'
3109 cmd = (b'"%(make)s" clean install PREFIX="%(prefix)s"'
3109 % {b'make': 'make', # TODO: switch by option or environment?
3110 % {b'make': 'make', # TODO: switch by option or environment?
3110 b'prefix': self._installdir})
3111 b'prefix': self._installdir})
3111 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
3112 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
3112 vlog("# Running", cmd)
3113 vlog("# Running", cmd)
3113 proc = subprocess.Popen(cmd, shell=True, cwd=cwd,
3114 proc = subprocess.Popen(cmd, shell=True, cwd=cwd,
3114 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
3115 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
3115 stderr=subprocess.STDOUT)
3116 stderr=subprocess.STDOUT)
3116 out, _err = proc.communicate()
3117 out, _err = proc.communicate()
3117 if proc.returncode != 0:
3118 if proc.returncode != 0:
3118 if PYTHON3:
3119 if PYTHON3:
3119 sys.stdout.buffer.write(out)
3120 sys.stdout.buffer.write(out)
3120 else:
3121 else:
3121 sys.stdout.write(out)
3122 sys.stdout.write(out)
3122 sys.exit(1)
3123 sys.exit(1)
3123
3124
3124 def _outputcoverage(self):
3125 def _outputcoverage(self):
3125 """Produce code coverage output."""
3126 """Produce code coverage output."""
3126 import coverage
3127 import coverage
3127 coverage = coverage.coverage
3128 coverage = coverage.coverage
3128
3129
3129 vlog('# Producing coverage report')
3130 vlog('# Producing coverage report')
3130 # chdir is the easiest way to get short, relative paths in the
3131 # chdir is the easiest way to get short, relative paths in the
3131 # output.
3132 # output.
3132 os.chdir(self._hgroot)
3133 os.chdir(self._hgroot)
3133 covdir = os.path.join(self._installdir, '..', 'coverage')
3134 covdir = os.path.join(self._installdir, '..', 'coverage')
3134 cov = coverage(data_file=os.path.join(covdir, 'cov'))
3135 cov = coverage(data_file=os.path.join(covdir, 'cov'))
3135
3136
3136 # Map install directory paths back to source directory.
3137 # Map install directory paths back to source directory.
3137 cov.config.paths['srcdir'] = ['.', self._pythondir]
3138 cov.config.paths['srcdir'] = ['.', self._pythondir]
3138
3139
3139 cov.combine()
3140 cov.combine()
3140
3141
3141 omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]]
3142 omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]]
3142 cov.report(ignore_errors=True, omit=omit)
3143 cov.report(ignore_errors=True, omit=omit)
3143
3144
3144 if self.options.htmlcov:
3145 if self.options.htmlcov:
3145 htmldir = os.path.join(self._outputdir, 'htmlcov')
3146 htmldir = os.path.join(self._outputdir, 'htmlcov')
3146 cov.html_report(directory=htmldir, omit=omit)
3147 cov.html_report(directory=htmldir, omit=omit)
3147 if self.options.annotate:
3148 if self.options.annotate:
3148 adir = os.path.join(self._outputdir, 'annotated')
3149 adir = os.path.join(self._outputdir, 'annotated')
3149 if not os.path.isdir(adir):
3150 if not os.path.isdir(adir):
3150 os.mkdir(adir)
3151 os.mkdir(adir)
3151 cov.annotate(directory=adir, omit=omit)
3152 cov.annotate(directory=adir, omit=omit)
3152
3153
3153 def _findprogram(self, program):
3154 def _findprogram(self, program):
3154 """Search PATH for a executable program"""
3155 """Search PATH for a executable program"""
3155 dpb = _bytespath(os.defpath)
3156 dpb = _bytespath(os.defpath)
3156 sepb = _bytespath(os.pathsep)
3157 sepb = _bytespath(os.pathsep)
3157 for p in osenvironb.get(b'PATH', dpb).split(sepb):
3158 for p in osenvironb.get(b'PATH', dpb).split(sepb):
3158 name = os.path.join(p, program)
3159 name = os.path.join(p, program)
3159 if os.name == 'nt' or os.access(name, os.X_OK):
3160 if os.name == 'nt' or os.access(name, os.X_OK):
3160 return name
3161 return name
3161 return None
3162 return None
3162
3163
3163 def _checktools(self):
3164 def _checktools(self):
3164 """Ensure tools required to run tests are present."""
3165 """Ensure tools required to run tests are present."""
3165 for p in self.REQUIREDTOOLS:
3166 for p in self.REQUIREDTOOLS:
3166 if os.name == 'nt' and not p.endswith(b'.exe'):
3167 if os.name == 'nt' and not p.endswith(b'.exe'):
3167 p += b'.exe'
3168 p += b'.exe'
3168 found = self._findprogram(p)
3169 found = self._findprogram(p)
3169 if found:
3170 if found:
3170 vlog("# Found prerequisite", p, "at", found)
3171 vlog("# Found prerequisite", p, "at", found)
3171 else:
3172 else:
3172 print("WARNING: Did not find prerequisite tool: %s " %
3173 print("WARNING: Did not find prerequisite tool: %s " %
3173 p.decode("utf-8"))
3174 p.decode("utf-8"))
3174
3175
3175 def aggregateexceptions(path):
3176 def aggregateexceptions(path):
3176 exceptioncounts = collections.Counter()
3177 exceptioncounts = collections.Counter()
3177 testsbyfailure = collections.defaultdict(set)
3178 testsbyfailure = collections.defaultdict(set)
3178 failuresbytest = collections.defaultdict(set)
3179 failuresbytest = collections.defaultdict(set)
3179
3180
3180 for f in os.listdir(path):
3181 for f in os.listdir(path):
3181 with open(os.path.join(path, f), 'rb') as fh:
3182 with open(os.path.join(path, f), 'rb') as fh:
3182 data = fh.read().split(b'\0')
3183 data = fh.read().split(b'\0')
3183 if len(data) != 5:
3184 if len(data) != 5:
3184 continue
3185 continue
3185
3186
3186 exc, mainframe, hgframe, hgline, testname = data
3187 exc, mainframe, hgframe, hgline, testname = data
3187 exc = exc.decode('utf-8')
3188 exc = exc.decode('utf-8')
3188 mainframe = mainframe.decode('utf-8')
3189 mainframe = mainframe.decode('utf-8')
3189 hgframe = hgframe.decode('utf-8')
3190 hgframe = hgframe.decode('utf-8')
3190 hgline = hgline.decode('utf-8')
3191 hgline = hgline.decode('utf-8')
3191 testname = testname.decode('utf-8')
3192 testname = testname.decode('utf-8')
3192
3193
3193 key = (hgframe, hgline, exc)
3194 key = (hgframe, hgline, exc)
3194 exceptioncounts[key] += 1
3195 exceptioncounts[key] += 1
3195 testsbyfailure[key].add(testname)
3196 testsbyfailure[key].add(testname)
3196 failuresbytest[testname].add(key)
3197 failuresbytest[testname].add(key)
3197
3198
3198 # Find test having fewest failures for each failure.
3199 # Find test having fewest failures for each failure.
3199 leastfailing = {}
3200 leastfailing = {}
3200 for key, tests in testsbyfailure.items():
3201 for key, tests in testsbyfailure.items():
3201 fewesttest = None
3202 fewesttest = None
3202 fewestcount = 99999999
3203 fewestcount = 99999999
3203 for test in sorted(tests):
3204 for test in sorted(tests):
3204 if len(failuresbytest[test]) < fewestcount:
3205 if len(failuresbytest[test]) < fewestcount:
3205 fewesttest = test
3206 fewesttest = test
3206 fewestcount = len(failuresbytest[test])
3207 fewestcount = len(failuresbytest[test])
3207
3208
3208 leastfailing[key] = (fewestcount, fewesttest)
3209 leastfailing[key] = (fewestcount, fewesttest)
3209
3210
3210 # Create a combined counter so we can sort by total occurrences and
3211 # Create a combined counter so we can sort by total occurrences and
3211 # impacted tests.
3212 # impacted tests.
3212 combined = {}
3213 combined = {}
3213 for key in exceptioncounts:
3214 for key in exceptioncounts:
3214 combined[key] = (exceptioncounts[key],
3215 combined[key] = (exceptioncounts[key],
3215 len(testsbyfailure[key]),
3216 len(testsbyfailure[key]),
3216 leastfailing[key][0],
3217 leastfailing[key][0],
3217 leastfailing[key][1])
3218 leastfailing[key][1])
3218
3219
3219 return {
3220 return {
3220 'exceptioncounts': exceptioncounts,
3221 'exceptioncounts': exceptioncounts,
3221 'total': sum(exceptioncounts.values()),
3222 'total': sum(exceptioncounts.values()),
3222 'combined': combined,
3223 'combined': combined,
3223 'leastfailing': leastfailing,
3224 'leastfailing': leastfailing,
3224 'byfailure': testsbyfailure,
3225 'byfailure': testsbyfailure,
3225 'bytest': failuresbytest,
3226 'bytest': failuresbytest,
3226 }
3227 }
3227
3228
3228 if __name__ == '__main__':
3229 if __name__ == '__main__':
3229 runner = TestRunner()
3230 runner = TestRunner()
3230
3231
3231 try:
3232 try:
3232 import msvcrt
3233 import msvcrt
3233 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
3234 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
3234 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
3235 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
3235 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
3236 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
3236 except ImportError:
3237 except ImportError:
3237 pass
3238 pass
3238
3239
3239 sys.exit(runner.run(sys.argv[1:]))
3240 sys.exit(runner.run(sys.argv[1:]))
@@ -1,1928 +1,1928 b''
1 This file tests the behavior of run-tests.py itself.
1 This file tests the behavior of run-tests.py itself.
2
2
3 Avoid interference from actual test env:
3 Avoid interference from actual test env:
4
4
5 $ . "$TESTDIR/helper-runtests.sh"
5 $ . "$TESTDIR/helper-runtests.sh"
6
6
7 Smoke test with install
7 Smoke test with install
8 ============
8 ============
9 $ "$PYTHON" $TESTDIR/run-tests.py $HGTEST_RUN_TESTS_PURE -l
9 $ "$PYTHON" $TESTDIR/run-tests.py $HGTEST_RUN_TESTS_PURE -l
10 running 0 tests using 0 parallel processes
10 running 0 tests using 0 parallel processes
11
11
12 # Ran 0 tests, 0 skipped, 0 failed.
12 # Ran 0 tests, 0 skipped, 0 failed.
13
13
14 Define a helper to avoid the install step
14 Define a helper to avoid the install step
15 =============
15 =============
16 $ rt()
16 $ rt()
17 > {
17 > {
18 > "$PYTHON" $TESTDIR/run-tests.py --with-hg=`which hg` "$@"
18 > "$PYTHON" $TESTDIR/run-tests.py --with-hg=`which hg` -j1 "$@"
19 > }
19 > }
20
20
21 error paths
21 error paths
22
22
23 #if symlink
23 #if symlink
24 $ ln -s `which true` hg
24 $ ln -s `which true` hg
25 $ "$PYTHON" $TESTDIR/run-tests.py --with-hg=./hg
25 $ "$PYTHON" $TESTDIR/run-tests.py --with-hg=./hg
26 warning: --with-hg should specify an hg script
26 warning: --with-hg should specify an hg script
27 running 0 tests using 0 parallel processes
27 running 0 tests using 0 parallel processes
28
28
29 # Ran 0 tests, 0 skipped, 0 failed.
29 # Ran 0 tests, 0 skipped, 0 failed.
30 $ rm hg
30 $ rm hg
31 #endif
31 #endif
32
32
33 #if execbit
33 #if execbit
34 $ touch hg
34 $ touch hg
35 $ "$PYTHON" $TESTDIR/run-tests.py --with-hg=./hg
35 $ "$PYTHON" $TESTDIR/run-tests.py --with-hg=./hg
36 usage: run-tests.py [options] [tests]
36 usage: run-tests.py [options] [tests]
37 run-tests.py: error: --with-hg must specify an executable hg script
37 run-tests.py: error: --with-hg must specify an executable hg script
38 [2]
38 [2]
39 $ rm hg
39 $ rm hg
40 #endif
40 #endif
41
41
42 Features for testing optional lines
42 Features for testing optional lines
43 ===================================
43 ===================================
44
44
45 $ cat > hghaveaddon.py <<EOF
45 $ cat > hghaveaddon.py <<EOF
46 > import hghave
46 > import hghave
47 > @hghave.check("custom", "custom hghave feature")
47 > @hghave.check("custom", "custom hghave feature")
48 > def has_custom():
48 > def has_custom():
49 > return True
49 > return True
50 > @hghave.check("missing", "missing hghave feature")
50 > @hghave.check("missing", "missing hghave feature")
51 > def has_missing():
51 > def has_missing():
52 > return False
52 > return False
53 > EOF
53 > EOF
54
54
55 an empty test
55 an empty test
56 =======================
56 =======================
57
57
58 $ touch test-empty.t
58 $ touch test-empty.t
59 $ rt
59 $ rt
60 running 1 tests using 1 parallel processes
60 running 1 tests using 1 parallel processes
61 .
61 .
62 # Ran 1 tests, 0 skipped, 0 failed.
62 # Ran 1 tests, 0 skipped, 0 failed.
63 $ rm test-empty.t
63 $ rm test-empty.t
64
64
65 a succesful test
65 a succesful test
66 =======================
66 =======================
67
67
68 $ cat > test-success.t << EOF
68 $ cat > test-success.t << EOF
69 > $ echo babar
69 > $ echo babar
70 > babar
70 > babar
71 > $ echo xyzzy
71 > $ echo xyzzy
72 > dont_print (?)
72 > dont_print (?)
73 > nothing[42]line (re) (?)
73 > nothing[42]line (re) (?)
74 > never*happens (glob) (?)
74 > never*happens (glob) (?)
75 > more_nothing (?)
75 > more_nothing (?)
76 > xyzzy
76 > xyzzy
77 > nor this (?)
77 > nor this (?)
78 > $ printf 'abc\ndef\nxyz\n'
78 > $ printf 'abc\ndef\nxyz\n'
79 > 123 (?)
79 > 123 (?)
80 > abc
80 > abc
81 > def (?)
81 > def (?)
82 > 456 (?)
82 > 456 (?)
83 > xyz
83 > xyz
84 > $ printf 'zyx\nwvu\ntsr\n'
84 > $ printf 'zyx\nwvu\ntsr\n'
85 > abc (?)
85 > abc (?)
86 > zyx (custom !)
86 > zyx (custom !)
87 > wvu
87 > wvu
88 > no_print (no-custom !)
88 > no_print (no-custom !)
89 > tsr (no-missing !)
89 > tsr (no-missing !)
90 > missing (missing !)
90 > missing (missing !)
91 > EOF
91 > EOF
92
92
93 $ rt
93 $ rt
94 running 1 tests using 1 parallel processes
94 running 1 tests using 1 parallel processes
95 .
95 .
96 # Ran 1 tests, 0 skipped, 0 failed.
96 # Ran 1 tests, 0 skipped, 0 failed.
97
97
98 failing test
98 failing test
99 ==================
99 ==================
100
100
101 test churn with globs
101 test churn with globs
102 $ cat > test-failure.t <<EOF
102 $ cat > test-failure.t <<EOF
103 > $ echo "bar-baz"; echo "bar-bad"; echo foo
103 > $ echo "bar-baz"; echo "bar-bad"; echo foo
104 > bar*bad (glob)
104 > bar*bad (glob)
105 > bar*baz (glob)
105 > bar*baz (glob)
106 > | fo (re)
106 > | fo (re)
107 > EOF
107 > EOF
108 $ rt test-failure.t
108 $ rt test-failure.t
109 running 1 tests using 1 parallel processes
109 running 1 tests using 1 parallel processes
110
110
111 --- $TESTTMP/test-failure.t
111 --- $TESTTMP/test-failure.t
112 +++ $TESTTMP/test-failure.t.err
112 +++ $TESTTMP/test-failure.t.err
113 @@ -1,4 +1,4 @@
113 @@ -1,4 +1,4 @@
114 $ echo "bar-baz"; echo "bar-bad"; echo foo
114 $ echo "bar-baz"; echo "bar-bad"; echo foo
115 + bar*baz (glob)
115 + bar*baz (glob)
116 bar*bad (glob)
116 bar*bad (glob)
117 - bar*baz (glob)
117 - bar*baz (glob)
118 - | fo (re)
118 - | fo (re)
119 + foo
119 + foo
120
120
121 ERROR: test-failure.t output changed
121 ERROR: test-failure.t output changed
122 !
122 !
123 Failed test-failure.t: output changed
123 Failed test-failure.t: output changed
124 # Ran 1 tests, 0 skipped, 1 failed.
124 # Ran 1 tests, 0 skipped, 1 failed.
125 python hash seed: * (glob)
125 python hash seed: * (glob)
126 [1]
126 [1]
127
127
128 test how multiple globs gets matched with lines in output
128 test how multiple globs gets matched with lines in output
129 $ cat > test-failure-globs.t <<EOF
129 $ cat > test-failure-globs.t <<EOF
130 > $ echo "context"; echo "context"; \
130 > $ echo "context"; echo "context"; \
131 > echo "key: 1"; echo "value: not a"; \
131 > echo "key: 1"; echo "value: not a"; \
132 > echo "key: 2"; echo "value: not b"; \
132 > echo "key: 2"; echo "value: not b"; \
133 > echo "key: 3"; echo "value: c"; \
133 > echo "key: 3"; echo "value: c"; \
134 > echo "key: 4"; echo "value: d"
134 > echo "key: 4"; echo "value: d"
135 > context
135 > context
136 > context
136 > context
137 > key: 1
137 > key: 1
138 > value: a
138 > value: a
139 > key: 2
139 > key: 2
140 > value: b
140 > value: b
141 > key: 3
141 > key: 3
142 > value: * (glob)
142 > value: * (glob)
143 > key: 4
143 > key: 4
144 > value: * (glob)
144 > value: * (glob)
145 > EOF
145 > EOF
146 $ rt test-failure-globs.t
146 $ rt test-failure-globs.t
147 running 1 tests using 1 parallel processes
147 running 1 tests using 1 parallel processes
148
148
149 --- $TESTTMP/test-failure-globs.t
149 --- $TESTTMP/test-failure-globs.t
150 +++ $TESTTMP/test-failure-globs.t.err
150 +++ $TESTTMP/test-failure-globs.t.err
151 @@ -2,9 +2,9 @@
151 @@ -2,9 +2,9 @@
152 context
152 context
153 context
153 context
154 key: 1
154 key: 1
155 - value: a
155 - value: a
156 + value: not a
156 + value: not a
157 key: 2
157 key: 2
158 - value: b
158 - value: b
159 + value: not b
159 + value: not b
160 key: 3
160 key: 3
161 value: * (glob)
161 value: * (glob)
162 key: 4
162 key: 4
163
163
164 ERROR: test-failure-globs.t output changed
164 ERROR: test-failure-globs.t output changed
165 !
165 !
166 Failed test-failure-globs.t: output changed
166 Failed test-failure-globs.t: output changed
167 # Ran 1 tests, 0 skipped, 1 failed.
167 # Ran 1 tests, 0 skipped, 1 failed.
168 python hash seed: * (glob)
168 python hash seed: * (glob)
169 [1]
169 [1]
170 $ rm test-failure-globs.t
170 $ rm test-failure-globs.t
171
171
172 test diff colorisation
172 test diff colorisation
173
173
174 #if no-windows pygments
174 #if no-windows pygments
175 $ rt test-failure.t --color always
175 $ rt test-failure.t --color always
176
176
177 \x1b[38;5;124m--- $TESTTMP/test-failure.t\x1b[39m (esc)
177 \x1b[38;5;124m--- $TESTTMP/test-failure.t\x1b[39m (esc)
178 \x1b[38;5;34m+++ $TESTTMP/test-failure.t.err\x1b[39m (esc)
178 \x1b[38;5;34m+++ $TESTTMP/test-failure.t.err\x1b[39m (esc)
179 \x1b[38;5;90;01m@@ -1,4 +1,4 @@\x1b[39;00m (esc)
179 \x1b[38;5;90;01m@@ -1,4 +1,4 @@\x1b[39;00m (esc)
180 $ echo "bar-baz"; echo "bar-bad"; echo foo
180 $ echo "bar-baz"; echo "bar-bad"; echo foo
181 \x1b[38;5;34m+ bar*baz (glob)\x1b[39m (esc)
181 \x1b[38;5;34m+ bar*baz (glob)\x1b[39m (esc)
182 bar*bad (glob)
182 bar*bad (glob)
183 \x1b[38;5;124m- bar*baz (glob)\x1b[39m (esc)
183 \x1b[38;5;124m- bar*baz (glob)\x1b[39m (esc)
184 \x1b[38;5;124m- | fo (re)\x1b[39m (esc)
184 \x1b[38;5;124m- | fo (re)\x1b[39m (esc)
185 \x1b[38;5;34m+ foo\x1b[39m (esc)
185 \x1b[38;5;34m+ foo\x1b[39m (esc)
186
186
187 \x1b[38;5;88mERROR: \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m output changed\x1b[39m (esc)
187 \x1b[38;5;88mERROR: \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m output changed\x1b[39m (esc)
188 !
188 !
189 \x1b[38;5;88mFailed \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m: output changed\x1b[39m (esc)
189 \x1b[38;5;88mFailed \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m: output changed\x1b[39m (esc)
190 # Ran 1 tests, 0 skipped, 1 failed.
190 # Ran 1 tests, 0 skipped, 1 failed.
191 python hash seed: * (glob)
191 python hash seed: * (glob)
192 [1]
192 [1]
193
193
194 $ rt test-failure.t 2> tmp.log
194 $ rt test-failure.t 2> tmp.log
195 [1]
195 [1]
196 $ cat tmp.log
196 $ cat tmp.log
197
197
198 --- $TESTTMP/test-failure.t
198 --- $TESTTMP/test-failure.t
199 +++ $TESTTMP/test-failure.t.err
199 +++ $TESTTMP/test-failure.t.err
200 @@ -1,4 +1,4 @@
200 @@ -1,4 +1,4 @@
201 $ echo "bar-baz"; echo "bar-bad"; echo foo
201 $ echo "bar-baz"; echo "bar-bad"; echo foo
202 + bar*baz (glob)
202 + bar*baz (glob)
203 bar*bad (glob)
203 bar*bad (glob)
204 - bar*baz (glob)
204 - bar*baz (glob)
205 - | fo (re)
205 - | fo (re)
206 + foo
206 + foo
207
207
208 ERROR: test-failure.t output changed
208 ERROR: test-failure.t output changed
209 !
209 !
210 Failed test-failure.t: output changed
210 Failed test-failure.t: output changed
211 # Ran 1 tests, 0 skipped, 1 failed.
211 # Ran 1 tests, 0 skipped, 1 failed.
212 python hash seed: * (glob)
212 python hash seed: * (glob)
213 #endif
213 #endif
214
214
215 $ cat > test-failure.t << EOF
215 $ cat > test-failure.t << EOF
216 > $ true
216 > $ true
217 > should go away (true !)
217 > should go away (true !)
218 > $ true
218 > $ true
219 > should stay (false !)
219 > should stay (false !)
220 >
220 >
221 > Should remove first line, not second or third
221 > Should remove first line, not second or third
222 > $ echo 'testing'
222 > $ echo 'testing'
223 > baz*foo (glob) (true !)
223 > baz*foo (glob) (true !)
224 > foobar*foo (glob) (false !)
224 > foobar*foo (glob) (false !)
225 > te*ting (glob) (true !)
225 > te*ting (glob) (true !)
226 >
226 >
227 > Should keep first two lines, remove third and last
227 > Should keep first two lines, remove third and last
228 > $ echo 'testing'
228 > $ echo 'testing'
229 > test.ng (re) (true !)
229 > test.ng (re) (true !)
230 > foo.ar (re) (false !)
230 > foo.ar (re) (false !)
231 > b.r (re) (true !)
231 > b.r (re) (true !)
232 > missing (?)
232 > missing (?)
233 > awol (true !)
233 > awol (true !)
234 >
234 >
235 > The "missing" line should stay, even though awol is dropped
235 > The "missing" line should stay, even though awol is dropped
236 > $ echo 'testing'
236 > $ echo 'testing'
237 > test.ng (re) (true !)
237 > test.ng (re) (true !)
238 > foo.ar (?)
238 > foo.ar (?)
239 > awol
239 > awol
240 > missing (?)
240 > missing (?)
241 > EOF
241 > EOF
242 $ rt test-failure.t
242 $ rt test-failure.t
243 running 1 tests using 1 parallel processes
243 running 1 tests using 1 parallel processes
244
244
245 --- $TESTTMP/test-failure.t
245 --- $TESTTMP/test-failure.t
246 +++ $TESTTMP/test-failure.t.err
246 +++ $TESTTMP/test-failure.t.err
247 @@ -1,11 +1,9 @@
247 @@ -1,11 +1,9 @@
248 $ true
248 $ true
249 - should go away (true !)
249 - should go away (true !)
250 $ true
250 $ true
251 should stay (false !)
251 should stay (false !)
252
252
253 Should remove first line, not second or third
253 Should remove first line, not second or third
254 $ echo 'testing'
254 $ echo 'testing'
255 - baz*foo (glob) (true !)
255 - baz*foo (glob) (true !)
256 foobar*foo (glob) (false !)
256 foobar*foo (glob) (false !)
257 te*ting (glob) (true !)
257 te*ting (glob) (true !)
258
258
259 foo.ar (re) (false !)
259 foo.ar (re) (false !)
260 missing (?)
260 missing (?)
261 @@ -13,13 +11,10 @@
261 @@ -13,13 +11,10 @@
262 $ echo 'testing'
262 $ echo 'testing'
263 test.ng (re) (true !)
263 test.ng (re) (true !)
264 foo.ar (re) (false !)
264 foo.ar (re) (false !)
265 - b.r (re) (true !)
265 - b.r (re) (true !)
266 missing (?)
266 missing (?)
267 - awol (true !)
267 - awol (true !)
268
268
269 The "missing" line should stay, even though awol is dropped
269 The "missing" line should stay, even though awol is dropped
270 $ echo 'testing'
270 $ echo 'testing'
271 test.ng (re) (true !)
271 test.ng (re) (true !)
272 foo.ar (?)
272 foo.ar (?)
273 - awol
273 - awol
274 missing (?)
274 missing (?)
275
275
276 ERROR: test-failure.t output changed
276 ERROR: test-failure.t output changed
277 !
277 !
278 Failed test-failure.t: output changed
278 Failed test-failure.t: output changed
279 # Ran 1 tests, 0 skipped, 1 failed.
279 # Ran 1 tests, 0 skipped, 1 failed.
280 python hash seed: * (glob)
280 python hash seed: * (glob)
281 [1]
281 [1]
282
282
283 basic failing test
283 basic failing test
284 $ cat > test-failure.t << EOF
284 $ cat > test-failure.t << EOF
285 > $ echo babar
285 > $ echo babar
286 > rataxes
286 > rataxes
287 > This is a noop statement so that
287 > This is a noop statement so that
288 > this test is still more bytes than success.
288 > this test is still more bytes than success.
289 > pad pad pad pad............................................................
289 > pad pad pad pad............................................................
290 > pad pad pad pad............................................................
290 > pad pad pad pad............................................................
291 > pad pad pad pad............................................................
291 > pad pad pad pad............................................................
292 > pad pad pad pad............................................................
292 > pad pad pad pad............................................................
293 > pad pad pad pad............................................................
293 > pad pad pad pad............................................................
294 > pad pad pad pad............................................................
294 > pad pad pad pad............................................................
295 > EOF
295 > EOF
296
296
297 >>> fh = open('test-failure-unicode.t', 'wb')
297 >>> fh = open('test-failure-unicode.t', 'wb')
298 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8')) and None
298 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8')) and None
299 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8')) and None
299 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8')) and None
300
300
301 $ rt
301 $ rt
302 running 3 tests using 1 parallel processes
302 running 3 tests using 1 parallel processes
303
303
304 --- $TESTTMP/test-failure.t
304 --- $TESTTMP/test-failure.t
305 +++ $TESTTMP/test-failure.t.err
305 +++ $TESTTMP/test-failure.t.err
306 @@ -1,5 +1,5 @@
306 @@ -1,5 +1,5 @@
307 $ echo babar
307 $ echo babar
308 - rataxes
308 - rataxes
309 + babar
309 + babar
310 This is a noop statement so that
310 This is a noop statement so that
311 this test is still more bytes than success.
311 this test is still more bytes than success.
312 pad pad pad pad............................................................
312 pad pad pad pad............................................................
313
313
314 ERROR: test-failure.t output changed
314 ERROR: test-failure.t output changed
315 !.
315 !.
316 --- $TESTTMP/test-failure-unicode.t
316 --- $TESTTMP/test-failure-unicode.t
317 +++ $TESTTMP/test-failure-unicode.t.err
317 +++ $TESTTMP/test-failure-unicode.t.err
318 @@ -1,2 +1,2 @@
318 @@ -1,2 +1,2 @@
319 $ echo babar\xce\xb1 (esc)
319 $ echo babar\xce\xb1 (esc)
320 - l\xce\xb5\xce\xb5t (esc)
320 - l\xce\xb5\xce\xb5t (esc)
321 + babar\xce\xb1 (esc)
321 + babar\xce\xb1 (esc)
322
322
323 ERROR: test-failure-unicode.t output changed
323 ERROR: test-failure-unicode.t output changed
324 !
324 !
325 Failed test-failure.t: output changed
325 Failed test-failure.t: output changed
326 Failed test-failure-unicode.t: output changed
326 Failed test-failure-unicode.t: output changed
327 # Ran 3 tests, 0 skipped, 2 failed.
327 # Ran 3 tests, 0 skipped, 2 failed.
328 python hash seed: * (glob)
328 python hash seed: * (glob)
329 [1]
329 [1]
330
330
331 test --outputdir
331 test --outputdir
332 $ mkdir output
332 $ mkdir output
333 $ rt --outputdir output
333 $ rt --outputdir output
334 running 3 tests using 1 parallel processes
334 running 3 tests using 1 parallel processes
335
335
336 --- $TESTTMP/test-failure.t
336 --- $TESTTMP/test-failure.t
337 +++ $TESTTMP/output/test-failure.t.err
337 +++ $TESTTMP/output/test-failure.t.err
338 @@ -1,5 +1,5 @@
338 @@ -1,5 +1,5 @@
339 $ echo babar
339 $ echo babar
340 - rataxes
340 - rataxes
341 + babar
341 + babar
342 This is a noop statement so that
342 This is a noop statement so that
343 this test is still more bytes than success.
343 this test is still more bytes than success.
344 pad pad pad pad............................................................
344 pad pad pad pad............................................................
345
345
346 ERROR: test-failure.t output changed
346 ERROR: test-failure.t output changed
347 !.
347 !.
348 --- $TESTTMP/test-failure-unicode.t
348 --- $TESTTMP/test-failure-unicode.t
349 +++ $TESTTMP/output/test-failure-unicode.t.err
349 +++ $TESTTMP/output/test-failure-unicode.t.err
350 @@ -1,2 +1,2 @@
350 @@ -1,2 +1,2 @@
351 $ echo babar\xce\xb1 (esc)
351 $ echo babar\xce\xb1 (esc)
352 - l\xce\xb5\xce\xb5t (esc)
352 - l\xce\xb5\xce\xb5t (esc)
353 + babar\xce\xb1 (esc)
353 + babar\xce\xb1 (esc)
354
354
355 ERROR: test-failure-unicode.t output changed
355 ERROR: test-failure-unicode.t output changed
356 !
356 !
357 Failed test-failure.t: output changed
357 Failed test-failure.t: output changed
358 Failed test-failure-unicode.t: output changed
358 Failed test-failure-unicode.t: output changed
359 # Ran 3 tests, 0 skipped, 2 failed.
359 # Ran 3 tests, 0 skipped, 2 failed.
360 python hash seed: * (glob)
360 python hash seed: * (glob)
361 [1]
361 [1]
362 $ ls -a output
362 $ ls -a output
363 .
363 .
364 ..
364 ..
365 .testtimes
365 .testtimes
366 test-failure-unicode.t.err
366 test-failure-unicode.t.err
367 test-failure.t.err
367 test-failure.t.err
368
368
369 test --xunit support
369 test --xunit support
370 $ rt --xunit=xunit.xml
370 $ rt --xunit=xunit.xml
371 running 3 tests using 1 parallel processes
371 running 3 tests using 1 parallel processes
372
372
373 --- $TESTTMP/test-failure.t
373 --- $TESTTMP/test-failure.t
374 +++ $TESTTMP/test-failure.t.err
374 +++ $TESTTMP/test-failure.t.err
375 @@ -1,5 +1,5 @@
375 @@ -1,5 +1,5 @@
376 $ echo babar
376 $ echo babar
377 - rataxes
377 - rataxes
378 + babar
378 + babar
379 This is a noop statement so that
379 This is a noop statement so that
380 this test is still more bytes than success.
380 this test is still more bytes than success.
381 pad pad pad pad............................................................
381 pad pad pad pad............................................................
382
382
383 ERROR: test-failure.t output changed
383 ERROR: test-failure.t output changed
384 !.
384 !.
385 --- $TESTTMP/test-failure-unicode.t
385 --- $TESTTMP/test-failure-unicode.t
386 +++ $TESTTMP/test-failure-unicode.t.err
386 +++ $TESTTMP/test-failure-unicode.t.err
387 @@ -1,2 +1,2 @@
387 @@ -1,2 +1,2 @@
388 $ echo babar\xce\xb1 (esc)
388 $ echo babar\xce\xb1 (esc)
389 - l\xce\xb5\xce\xb5t (esc)
389 - l\xce\xb5\xce\xb5t (esc)
390 + babar\xce\xb1 (esc)
390 + babar\xce\xb1 (esc)
391
391
392 ERROR: test-failure-unicode.t output changed
392 ERROR: test-failure-unicode.t output changed
393 !
393 !
394 Failed test-failure.t: output changed
394 Failed test-failure.t: output changed
395 Failed test-failure-unicode.t: output changed
395 Failed test-failure-unicode.t: output changed
396 # Ran 3 tests, 0 skipped, 2 failed.
396 # Ran 3 tests, 0 skipped, 2 failed.
397 python hash seed: * (glob)
397 python hash seed: * (glob)
398 [1]
398 [1]
399 $ cat xunit.xml
399 $ cat xunit.xml
400 <?xml version="1.0" encoding="utf-8"?>
400 <?xml version="1.0" encoding="utf-8"?>
401 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
401 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
402 <testcase name="test-success.t" time="*"/> (glob)
402 <testcase name="test-success.t" time="*"/> (glob)
403 <testcase name="test-failure-unicode.t" time="*"> (glob)
403 <testcase name="test-failure-unicode.t" time="*"> (glob)
404 <failure message="output changed" type="output-mismatch">
404 <failure message="output changed" type="output-mismatch">
405 <![CDATA[--- $TESTTMP/test-failure-unicode.t
405 <![CDATA[--- $TESTTMP/test-failure-unicode.t
406 +++ $TESTTMP/test-failure-unicode.t.err
406 +++ $TESTTMP/test-failure-unicode.t.err
407 @@ -1,2 +1,2 @@
407 @@ -1,2 +1,2 @@
408 $ echo babar\xce\xb1 (esc)
408 $ echo babar\xce\xb1 (esc)
409 - l\xce\xb5\xce\xb5t (esc)
409 - l\xce\xb5\xce\xb5t (esc)
410 + babar\xce\xb1 (esc)
410 + babar\xce\xb1 (esc)
411 ]]> </failure>
411 ]]> </failure>
412 </testcase>
412 </testcase>
413 <testcase name="test-failure.t" time="*"> (glob)
413 <testcase name="test-failure.t" time="*"> (glob)
414 <failure message="output changed" type="output-mismatch">
414 <failure message="output changed" type="output-mismatch">
415 <![CDATA[--- $TESTTMP/test-failure.t
415 <![CDATA[--- $TESTTMP/test-failure.t
416 +++ $TESTTMP/test-failure.t.err
416 +++ $TESTTMP/test-failure.t.err
417 @@ -1,5 +1,5 @@
417 @@ -1,5 +1,5 @@
418 $ echo babar
418 $ echo babar
419 - rataxes
419 - rataxes
420 + babar
420 + babar
421 This is a noop statement so that
421 This is a noop statement so that
422 this test is still more bytes than success.
422 this test is still more bytes than success.
423 pad pad pad pad............................................................
423 pad pad pad pad............................................................
424 ]]> </failure>
424 ]]> </failure>
425 </testcase>
425 </testcase>
426 </testsuite>
426 </testsuite>
427
427
428 $ cat .testtimes
428 $ cat .testtimes
429 test-empty.t * (glob)
429 test-empty.t * (glob)
430 test-failure-globs.t * (glob)
430 test-failure-globs.t * (glob)
431 test-failure-unicode.t * (glob)
431 test-failure-unicode.t * (glob)
432 test-failure.t * (glob)
432 test-failure.t * (glob)
433 test-success.t * (glob)
433 test-success.t * (glob)
434
434
435 $ rt --list-tests
435 $ rt --list-tests
436 test-failure-unicode.t
436 test-failure-unicode.t
437 test-failure.t
437 test-failure.t
438 test-success.t
438 test-success.t
439
439
440 $ rt --list-tests --json
440 $ rt --list-tests --json
441 test-failure-unicode.t
441 test-failure-unicode.t
442 test-failure.t
442 test-failure.t
443 test-success.t
443 test-success.t
444 $ cat report.json
444 $ cat report.json
445 testreport ={
445 testreport ={
446 "test-failure-unicode.t": {
446 "test-failure-unicode.t": {
447 "result": "success"
447 "result": "success"
448 },
448 },
449 "test-failure.t": {
449 "test-failure.t": {
450 "result": "success"
450 "result": "success"
451 },
451 },
452 "test-success.t": {
452 "test-success.t": {
453 "result": "success"
453 "result": "success"
454 }
454 }
455 } (no-eol)
455 } (no-eol)
456
456
457 $ rt --list-tests --xunit=xunit.xml
457 $ rt --list-tests --xunit=xunit.xml
458 test-failure-unicode.t
458 test-failure-unicode.t
459 test-failure.t
459 test-failure.t
460 test-success.t
460 test-success.t
461 $ cat xunit.xml
461 $ cat xunit.xml
462 <?xml version="1.0" encoding="utf-8"?>
462 <?xml version="1.0" encoding="utf-8"?>
463 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
463 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
464 <testcase name="test-failure-unicode.t"/>
464 <testcase name="test-failure-unicode.t"/>
465 <testcase name="test-failure.t"/>
465 <testcase name="test-failure.t"/>
466 <testcase name="test-success.t"/>
466 <testcase name="test-success.t"/>
467 </testsuite>
467 </testsuite>
468
468
469 $ rt --list-tests test-failure* --json --xunit=xunit.xml --outputdir output
469 $ rt --list-tests test-failure* --json --xunit=xunit.xml --outputdir output
470 test-failure-unicode.t
470 test-failure-unicode.t
471 test-failure.t
471 test-failure.t
472 $ cat output/report.json
472 $ cat output/report.json
473 testreport ={
473 testreport ={
474 "test-failure-unicode.t": {
474 "test-failure-unicode.t": {
475 "result": "success"
475 "result": "success"
476 },
476 },
477 "test-failure.t": {
477 "test-failure.t": {
478 "result": "success"
478 "result": "success"
479 }
479 }
480 } (no-eol)
480 } (no-eol)
481 $ cat xunit.xml
481 $ cat xunit.xml
482 <?xml version="1.0" encoding="utf-8"?>
482 <?xml version="1.0" encoding="utf-8"?>
483 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
483 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
484 <testcase name="test-failure-unicode.t"/>
484 <testcase name="test-failure-unicode.t"/>
485 <testcase name="test-failure.t"/>
485 <testcase name="test-failure.t"/>
486 </testsuite>
486 </testsuite>
487
487
488 $ rm test-failure-unicode.t
488 $ rm test-failure-unicode.t
489
489
490 test for --retest
490 test for --retest
491 ====================
491 ====================
492
492
493 $ rt --retest
493 $ rt --retest
494 running 2 tests using 1 parallel processes
494 running 2 tests using 1 parallel processes
495
495
496 --- $TESTTMP/test-failure.t
496 --- $TESTTMP/test-failure.t
497 +++ $TESTTMP/test-failure.t.err
497 +++ $TESTTMP/test-failure.t.err
498 @@ -1,5 +1,5 @@
498 @@ -1,5 +1,5 @@
499 $ echo babar
499 $ echo babar
500 - rataxes
500 - rataxes
501 + babar
501 + babar
502 This is a noop statement so that
502 This is a noop statement so that
503 this test is still more bytes than success.
503 this test is still more bytes than success.
504 pad pad pad pad............................................................
504 pad pad pad pad............................................................
505
505
506 ERROR: test-failure.t output changed
506 ERROR: test-failure.t output changed
507 !
507 !
508 Failed test-failure.t: output changed
508 Failed test-failure.t: output changed
509 # Ran 2 tests, 1 skipped, 1 failed.
509 # Ran 2 tests, 1 skipped, 1 failed.
510 python hash seed: * (glob)
510 python hash seed: * (glob)
511 [1]
511 [1]
512
512
513 --retest works with --outputdir
513 --retest works with --outputdir
514 $ rm -r output
514 $ rm -r output
515 $ mkdir output
515 $ mkdir output
516 $ mv test-failure.t.err output
516 $ mv test-failure.t.err output
517 $ rt --retest --outputdir output
517 $ rt --retest --outputdir output
518 running 2 tests using 1 parallel processes
518 running 2 tests using 1 parallel processes
519
519
520 --- $TESTTMP/test-failure.t
520 --- $TESTTMP/test-failure.t
521 +++ $TESTTMP/output/test-failure.t.err
521 +++ $TESTTMP/output/test-failure.t.err
522 @@ -1,5 +1,5 @@
522 @@ -1,5 +1,5 @@
523 $ echo babar
523 $ echo babar
524 - rataxes
524 - rataxes
525 + babar
525 + babar
526 This is a noop statement so that
526 This is a noop statement so that
527 this test is still more bytes than success.
527 this test is still more bytes than success.
528 pad pad pad pad............................................................
528 pad pad pad pad............................................................
529
529
530 ERROR: test-failure.t output changed
530 ERROR: test-failure.t output changed
531 !
531 !
532 Failed test-failure.t: output changed
532 Failed test-failure.t: output changed
533 # Ran 2 tests, 1 skipped, 1 failed.
533 # Ran 2 tests, 1 skipped, 1 failed.
534 python hash seed: * (glob)
534 python hash seed: * (glob)
535 [1]
535 [1]
536
536
537 Selecting Tests To Run
537 Selecting Tests To Run
538 ======================
538 ======================
539
539
540 successful
540 successful
541
541
542 $ rt test-success.t
542 $ rt test-success.t
543 running 1 tests using 1 parallel processes
543 running 1 tests using 1 parallel processes
544 .
544 .
545 # Ran 1 tests, 0 skipped, 0 failed.
545 # Ran 1 tests, 0 skipped, 0 failed.
546
546
547 success w/ keyword
547 success w/ keyword
548 $ rt -k xyzzy
548 $ rt -k xyzzy
549 running 2 tests using 1 parallel processes
549 running 2 tests using 1 parallel processes
550 .
550 .
551 # Ran 2 tests, 1 skipped, 0 failed.
551 # Ran 2 tests, 1 skipped, 0 failed.
552
552
553 failed
553 failed
554
554
555 $ rt test-failure.t
555 $ rt test-failure.t
556 running 1 tests using 1 parallel processes
556 running 1 tests using 1 parallel processes
557
557
558 --- $TESTTMP/test-failure.t
558 --- $TESTTMP/test-failure.t
559 +++ $TESTTMP/test-failure.t.err
559 +++ $TESTTMP/test-failure.t.err
560 @@ -1,5 +1,5 @@
560 @@ -1,5 +1,5 @@
561 $ echo babar
561 $ echo babar
562 - rataxes
562 - rataxes
563 + babar
563 + babar
564 This is a noop statement so that
564 This is a noop statement so that
565 this test is still more bytes than success.
565 this test is still more bytes than success.
566 pad pad pad pad............................................................
566 pad pad pad pad............................................................
567
567
568 ERROR: test-failure.t output changed
568 ERROR: test-failure.t output changed
569 !
569 !
570 Failed test-failure.t: output changed
570 Failed test-failure.t: output changed
571 # Ran 1 tests, 0 skipped, 1 failed.
571 # Ran 1 tests, 0 skipped, 1 failed.
572 python hash seed: * (glob)
572 python hash seed: * (glob)
573 [1]
573 [1]
574
574
575 failure w/ keyword
575 failure w/ keyword
576 $ rt -k rataxes
576 $ rt -k rataxes
577 running 2 tests using 1 parallel processes
577 running 2 tests using 1 parallel processes
578
578
579 --- $TESTTMP/test-failure.t
579 --- $TESTTMP/test-failure.t
580 +++ $TESTTMP/test-failure.t.err
580 +++ $TESTTMP/test-failure.t.err
581 @@ -1,5 +1,5 @@
581 @@ -1,5 +1,5 @@
582 $ echo babar
582 $ echo babar
583 - rataxes
583 - rataxes
584 + babar
584 + babar
585 This is a noop statement so that
585 This is a noop statement so that
586 this test is still more bytes than success.
586 this test is still more bytes than success.
587 pad pad pad pad............................................................
587 pad pad pad pad............................................................
588
588
589 ERROR: test-failure.t output changed
589 ERROR: test-failure.t output changed
590 !
590 !
591 Failed test-failure.t: output changed
591 Failed test-failure.t: output changed
592 # Ran 2 tests, 1 skipped, 1 failed.
592 # Ran 2 tests, 1 skipped, 1 failed.
593 python hash seed: * (glob)
593 python hash seed: * (glob)
594 [1]
594 [1]
595
595
596 Verify that when a process fails to start we show a useful message
596 Verify that when a process fails to start we show a useful message
597 ==================================================================
597 ==================================================================
598
598
599 $ cat > test-serve-fail.t <<EOF
599 $ cat > test-serve-fail.t <<EOF
600 > $ echo 'abort: child process failed to start blah'
600 > $ echo 'abort: child process failed to start blah'
601 > EOF
601 > EOF
602 $ rt test-serve-fail.t
602 $ rt test-serve-fail.t
603 running 1 tests using 1 parallel processes
603 running 1 tests using 1 parallel processes
604
604
605 --- $TESTTMP/test-serve-fail.t
605 --- $TESTTMP/test-serve-fail.t
606 +++ $TESTTMP/test-serve-fail.t.err
606 +++ $TESTTMP/test-serve-fail.t.err
607 @@ -1* +1,2 @@ (glob)
607 @@ -1* +1,2 @@ (glob)
608 $ echo 'abort: child process failed to start blah'
608 $ echo 'abort: child process failed to start blah'
609 + abort: child process failed to start blah
609 + abort: child process failed to start blah
610
610
611 ERROR: test-serve-fail.t output changed
611 ERROR: test-serve-fail.t output changed
612 !
612 !
613 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
613 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
614 # Ran 1 tests, 0 skipped, 1 failed.
614 # Ran 1 tests, 0 skipped, 1 failed.
615 python hash seed: * (glob)
615 python hash seed: * (glob)
616 [1]
616 [1]
617 $ rm test-serve-fail.t
617 $ rm test-serve-fail.t
618
618
619 Verify that we can try other ports
619 Verify that we can try other ports
620 ===================================
620 ===================================
621
621
622 Extensions aren't inherited by the invoked run-tests.py. An extension
622 Extensions aren't inherited by the invoked run-tests.py. An extension
623 introducing a repository requirement could cause this to fail. So we force
623 introducing a repository requirement could cause this to fail. So we force
624 HGRCPATH to get a clean environment.
624 HGRCPATH to get a clean environment.
625
625
626 $ HGRCPATH= hg init inuse
626 $ HGRCPATH= hg init inuse
627 $ hg serve -R inuse -p $HGPORT -d --pid-file=blocks.pid
627 $ hg serve -R inuse -p $HGPORT -d --pid-file=blocks.pid
628 $ cat blocks.pid >> $DAEMON_PIDS
628 $ cat blocks.pid >> $DAEMON_PIDS
629 $ cat > test-serve-inuse.t <<EOF
629 $ cat > test-serve-inuse.t <<EOF
630 > $ hg serve -R `pwd`/inuse -p \$HGPORT -d --pid-file=hg.pid
630 > $ hg serve -R `pwd`/inuse -p \$HGPORT -d --pid-file=hg.pid
631 > $ cat hg.pid >> \$DAEMON_PIDS
631 > $ cat hg.pid >> \$DAEMON_PIDS
632 > EOF
632 > EOF
633 $ rt test-serve-inuse.t
633 $ rt test-serve-inuse.t
634 running 1 tests using 1 parallel processes
634 running 1 tests using 1 parallel processes
635 .
635 .
636 # Ran 1 tests, 0 skipped, 0 failed.
636 # Ran 1 tests, 0 skipped, 0 failed.
637 $ rm test-serve-inuse.t
637 $ rm test-serve-inuse.t
638 $ killdaemons.py $DAEMON_PIDS
638 $ killdaemons.py $DAEMON_PIDS
639
639
640 Running In Debug Mode
640 Running In Debug Mode
641 ======================
641 ======================
642
642
643 $ rt --debug 2>&1 | grep -v pwd
643 $ rt --debug 2>&1 | grep -v pwd
644 running 2 tests using 1 parallel processes
644 running 2 tests using 1 parallel processes
645 + echo *SALT* 0 0 (glob)
645 + echo *SALT* 0 0 (glob)
646 *SALT* 0 0 (glob)
646 *SALT* 0 0 (glob)
647 + echo babar
647 + echo babar
648 babar
648 babar
649 + echo *SALT* 10 0 (glob)
649 + echo *SALT* 10 0 (glob)
650 *SALT* 10 0 (glob)
650 *SALT* 10 0 (glob)
651 *+ echo *SALT* 0 0 (glob)
651 *+ echo *SALT* 0 0 (glob)
652 *SALT* 0 0 (glob)
652 *SALT* 0 0 (glob)
653 + echo babar
653 + echo babar
654 babar
654 babar
655 + echo *SALT* 2 0 (glob)
655 + echo *SALT* 2 0 (glob)
656 *SALT* 2 0 (glob)
656 *SALT* 2 0 (glob)
657 + echo xyzzy
657 + echo xyzzy
658 xyzzy
658 xyzzy
659 + echo *SALT* 9 0 (glob)
659 + echo *SALT* 9 0 (glob)
660 *SALT* 9 0 (glob)
660 *SALT* 9 0 (glob)
661 + printf *abc\ndef\nxyz\n* (glob)
661 + printf *abc\ndef\nxyz\n* (glob)
662 abc
662 abc
663 def
663 def
664 xyz
664 xyz
665 + echo *SALT* 15 0 (glob)
665 + echo *SALT* 15 0 (glob)
666 *SALT* 15 0 (glob)
666 *SALT* 15 0 (glob)
667 + printf *zyx\nwvu\ntsr\n* (glob)
667 + printf *zyx\nwvu\ntsr\n* (glob)
668 zyx
668 zyx
669 wvu
669 wvu
670 tsr
670 tsr
671 + echo *SALT* 22 0 (glob)
671 + echo *SALT* 22 0 (glob)
672 *SALT* 22 0 (glob)
672 *SALT* 22 0 (glob)
673 .
673 .
674 # Ran 2 tests, 0 skipped, 0 failed.
674 # Ran 2 tests, 0 skipped, 0 failed.
675
675
676 Parallel runs
676 Parallel runs
677 ==============
677 ==============
678
678
679 (duplicate the failing test to get predictable output)
679 (duplicate the failing test to get predictable output)
680 $ cp test-failure.t test-failure-copy.t
680 $ cp test-failure.t test-failure-copy.t
681
681
682 $ rt --jobs 2 test-failure*.t -n
682 $ rt --jobs 2 test-failure*.t -n
683 running 2 tests using 2 parallel processes
683 running 2 tests using 2 parallel processes
684 !!
684 !!
685 Failed test-failure*.t: output changed (glob)
685 Failed test-failure*.t: output changed (glob)
686 Failed test-failure*.t: output changed (glob)
686 Failed test-failure*.t: output changed (glob)
687 # Ran 2 tests, 0 skipped, 2 failed.
687 # Ran 2 tests, 0 skipped, 2 failed.
688 python hash seed: * (glob)
688 python hash seed: * (glob)
689 [1]
689 [1]
690
690
691 failures in parallel with --first should only print one failure
691 failures in parallel with --first should only print one failure
692 $ rt --jobs 2 --first test-failure*.t
692 $ rt --jobs 2 --first test-failure*.t
693 running 2 tests using 2 parallel processes
693 running 2 tests using 2 parallel processes
694
694
695 --- $TESTTMP/test-failure*.t (glob)
695 --- $TESTTMP/test-failure*.t (glob)
696 +++ $TESTTMP/test-failure*.t.err (glob)
696 +++ $TESTTMP/test-failure*.t.err (glob)
697 @@ -1,5 +1,5 @@
697 @@ -1,5 +1,5 @@
698 $ echo babar
698 $ echo babar
699 - rataxes
699 - rataxes
700 + babar
700 + babar
701 This is a noop statement so that
701 This is a noop statement so that
702 this test is still more bytes than success.
702 this test is still more bytes than success.
703 pad pad pad pad............................................................
703 pad pad pad pad............................................................
704
704
705 Failed test-failure*.t: output changed (glob)
705 Failed test-failure*.t: output changed (glob)
706 Failed test-failure*.t: output changed (glob)
706 Failed test-failure*.t: output changed (glob)
707 # Ran 2 tests, 0 skipped, 2 failed.
707 # Ran 2 tests, 0 skipped, 2 failed.
708 python hash seed: * (glob)
708 python hash seed: * (glob)
709 [1]
709 [1]
710
710
711
711
712 (delete the duplicated test file)
712 (delete the duplicated test file)
713 $ rm test-failure-copy.t
713 $ rm test-failure-copy.t
714
714
715
715
716 Interactive run
716 Interactive run
717 ===============
717 ===============
718
718
719 (backup the failing test)
719 (backup the failing test)
720 $ cp test-failure.t backup
720 $ cp test-failure.t backup
721
721
722 Refuse the fix
722 Refuse the fix
723
723
724 $ echo 'n' | rt -i
724 $ echo 'n' | rt -i
725 running 2 tests using 1 parallel processes
725 running 2 tests using 1 parallel processes
726
726
727 --- $TESTTMP/test-failure.t
727 --- $TESTTMP/test-failure.t
728 +++ $TESTTMP/test-failure.t.err
728 +++ $TESTTMP/test-failure.t.err
729 @@ -1,5 +1,5 @@
729 @@ -1,5 +1,5 @@
730 $ echo babar
730 $ echo babar
731 - rataxes
731 - rataxes
732 + babar
732 + babar
733 This is a noop statement so that
733 This is a noop statement so that
734 this test is still more bytes than success.
734 this test is still more bytes than success.
735 pad pad pad pad............................................................
735 pad pad pad pad............................................................
736 Accept this change? [n]
736 Accept this change? [n]
737 ERROR: test-failure.t output changed
737 ERROR: test-failure.t output changed
738 !.
738 !.
739 Failed test-failure.t: output changed
739 Failed test-failure.t: output changed
740 # Ran 2 tests, 0 skipped, 1 failed.
740 # Ran 2 tests, 0 skipped, 1 failed.
741 python hash seed: * (glob)
741 python hash seed: * (glob)
742 [1]
742 [1]
743
743
744 $ cat test-failure.t
744 $ cat test-failure.t
745 $ echo babar
745 $ echo babar
746 rataxes
746 rataxes
747 This is a noop statement so that
747 This is a noop statement so that
748 this test is still more bytes than success.
748 this test is still more bytes than success.
749 pad pad pad pad............................................................
749 pad pad pad pad............................................................
750 pad pad pad pad............................................................
750 pad pad pad pad............................................................
751 pad pad pad pad............................................................
751 pad pad pad pad............................................................
752 pad pad pad pad............................................................
752 pad pad pad pad............................................................
753 pad pad pad pad............................................................
753 pad pad pad pad............................................................
754 pad pad pad pad............................................................
754 pad pad pad pad............................................................
755
755
756 Interactive with custom view
756 Interactive with custom view
757
757
758 $ echo 'n' | rt -i --view echo
758 $ echo 'n' | rt -i --view echo
759 running 2 tests using 1 parallel processes
759 running 2 tests using 1 parallel processes
760 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
760 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
761 Accept this change? [n]* (glob)
761 Accept this change? [n]* (glob)
762 ERROR: test-failure.t output changed
762 ERROR: test-failure.t output changed
763 !.
763 !.
764 Failed test-failure.t: output changed
764 Failed test-failure.t: output changed
765 # Ran 2 tests, 0 skipped, 1 failed.
765 # Ran 2 tests, 0 skipped, 1 failed.
766 python hash seed: * (glob)
766 python hash seed: * (glob)
767 [1]
767 [1]
768
768
769 View the fix
769 View the fix
770
770
771 $ echo 'y' | rt --view echo
771 $ echo 'y' | rt --view echo
772 running 2 tests using 1 parallel processes
772 running 2 tests using 1 parallel processes
773 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
773 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
774
774
775 ERROR: test-failure.t output changed
775 ERROR: test-failure.t output changed
776 !.
776 !.
777 Failed test-failure.t: output changed
777 Failed test-failure.t: output changed
778 # Ran 2 tests, 0 skipped, 1 failed.
778 # Ran 2 tests, 0 skipped, 1 failed.
779 python hash seed: * (glob)
779 python hash seed: * (glob)
780 [1]
780 [1]
781
781
782 Accept the fix
782 Accept the fix
783
783
784 $ cat >> test-failure.t <<EOF
784 $ cat >> test-failure.t <<EOF
785 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
785 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
786 > saved backup bundle to \$TESTTMP/foo.hg
786 > saved backup bundle to \$TESTTMP/foo.hg
787 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
787 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
788 > saved backup bundle to $TESTTMP\\foo.hg
788 > saved backup bundle to $TESTTMP\\foo.hg
789 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
789 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
790 > saved backup bundle to \$TESTTMP/*.hg (glob)
790 > saved backup bundle to \$TESTTMP/*.hg (glob)
791 > EOF
791 > EOF
792 $ echo 'y' | rt -i 2>&1
792 $ echo 'y' | rt -i 2>&1
793 running 2 tests using 1 parallel processes
793 running 2 tests using 1 parallel processes
794
794
795 --- $TESTTMP/test-failure.t
795 --- $TESTTMP/test-failure.t
796 +++ $TESTTMP/test-failure.t.err
796 +++ $TESTTMP/test-failure.t.err
797 @@ -1,5 +1,5 @@
797 @@ -1,5 +1,5 @@
798 $ echo babar
798 $ echo babar
799 - rataxes
799 - rataxes
800 + babar
800 + babar
801 This is a noop statement so that
801 This is a noop statement so that
802 this test is still more bytes than success.
802 this test is still more bytes than success.
803 pad pad pad pad............................................................
803 pad pad pad pad............................................................
804 @@ -11,6 +11,6 @@
804 @@ -11,6 +11,6 @@
805 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
805 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
806 saved backup bundle to $TESTTMP/foo.hg
806 saved backup bundle to $TESTTMP/foo.hg
807 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
807 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
808 - saved backup bundle to $TESTTMP\foo.hg
808 - saved backup bundle to $TESTTMP\foo.hg
809 + saved backup bundle to $TESTTMP/foo.hg
809 + saved backup bundle to $TESTTMP/foo.hg
810 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
810 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
811 saved backup bundle to $TESTTMP/*.hg (glob)
811 saved backup bundle to $TESTTMP/*.hg (glob)
812 Accept this change? [n] ..
812 Accept this change? [n] ..
813 # Ran 2 tests, 0 skipped, 0 failed.
813 # Ran 2 tests, 0 skipped, 0 failed.
814
814
815 $ sed -e 's,(glob)$,&<,g' test-failure.t
815 $ sed -e 's,(glob)$,&<,g' test-failure.t
816 $ echo babar
816 $ echo babar
817 babar
817 babar
818 This is a noop statement so that
818 This is a noop statement so that
819 this test is still more bytes than success.
819 this test is still more bytes than success.
820 pad pad pad pad............................................................
820 pad pad pad pad............................................................
821 pad pad pad pad............................................................
821 pad pad pad pad............................................................
822 pad pad pad pad............................................................
822 pad pad pad pad............................................................
823 pad pad pad pad............................................................
823 pad pad pad pad............................................................
824 pad pad pad pad............................................................
824 pad pad pad pad............................................................
825 pad pad pad pad............................................................
825 pad pad pad pad............................................................
826 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
826 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
827 saved backup bundle to $TESTTMP/foo.hg
827 saved backup bundle to $TESTTMP/foo.hg
828 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
828 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
829 saved backup bundle to $TESTTMP/foo.hg
829 saved backup bundle to $TESTTMP/foo.hg
830 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
830 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
831 saved backup bundle to $TESTTMP/*.hg (glob)<
831 saved backup bundle to $TESTTMP/*.hg (glob)<
832
832
833 Race condition - test file was modified when test is running
833 Race condition - test file was modified when test is running
834
834
835 $ TESTRACEDIR=`pwd`
835 $ TESTRACEDIR=`pwd`
836 $ export TESTRACEDIR
836 $ export TESTRACEDIR
837 $ cat > test-race.t <<EOF
837 $ cat > test-race.t <<EOF
838 > $ echo 1
838 > $ echo 1
839 > $ echo "# a new line" >> $TESTRACEDIR/test-race.t
839 > $ echo "# a new line" >> $TESTRACEDIR/test-race.t
840 > EOF
840 > EOF
841
841
842 $ rt -i test-race.t
842 $ rt -i test-race.t
843 running 1 tests using 1 parallel processes
843 running 1 tests using 1 parallel processes
844
844
845 --- $TESTTMP/test-race.t
845 --- $TESTTMP/test-race.t
846 +++ $TESTTMP/test-race.t.err
846 +++ $TESTTMP/test-race.t.err
847 @@ -1,2 +1,3 @@
847 @@ -1,2 +1,3 @@
848 $ echo 1
848 $ echo 1
849 + 1
849 + 1
850 $ echo "# a new line" >> $TESTTMP/test-race.t
850 $ echo "# a new line" >> $TESTTMP/test-race.t
851 Reference output has changed (run again to prompt changes)
851 Reference output has changed (run again to prompt changes)
852 ERROR: test-race.t output changed
852 ERROR: test-race.t output changed
853 !
853 !
854 Failed test-race.t: output changed
854 Failed test-race.t: output changed
855 # Ran 1 tests, 0 skipped, 1 failed.
855 # Ran 1 tests, 0 skipped, 1 failed.
856 python hash seed: * (glob)
856 python hash seed: * (glob)
857 [1]
857 [1]
858
858
859 $ rm test-race.t
859 $ rm test-race.t
860
860
861 When "#testcases" is used in .t files
861 When "#testcases" is used in .t files
862
862
863 $ cat >> test-cases.t <<EOF
863 $ cat >> test-cases.t <<EOF
864 > #testcases a b
864 > #testcases a b
865 > #if a
865 > #if a
866 > $ echo 1
866 > $ echo 1
867 > #endif
867 > #endif
868 > #if b
868 > #if b
869 > $ echo 2
869 > $ echo 2
870 > #endif
870 > #endif
871 > EOF
871 > EOF
872
872
873 $ cat <<EOF | rt -i test-cases.t 2>&1
873 $ cat <<EOF | rt -i test-cases.t 2>&1
874 > y
874 > y
875 > y
875 > y
876 > EOF
876 > EOF
877 running 2 tests using 1 parallel processes
877 running 2 tests using 1 parallel processes
878
878
879 --- $TESTTMP/test-cases.t
879 --- $TESTTMP/test-cases.t
880 +++ $TESTTMP/test-cases.t#a.err
880 +++ $TESTTMP/test-cases.t#a.err
881 @@ -1,6 +1,7 @@
881 @@ -1,6 +1,7 @@
882 #testcases a b
882 #testcases a b
883 #if a
883 #if a
884 $ echo 1
884 $ echo 1
885 + 1
885 + 1
886 #endif
886 #endif
887 #if b
887 #if b
888 $ echo 2
888 $ echo 2
889 Accept this change? [n] .
889 Accept this change? [n] .
890 --- $TESTTMP/test-cases.t
890 --- $TESTTMP/test-cases.t
891 +++ $TESTTMP/test-cases.t#b.err
891 +++ $TESTTMP/test-cases.t#b.err
892 @@ -5,4 +5,5 @@
892 @@ -5,4 +5,5 @@
893 #endif
893 #endif
894 #if b
894 #if b
895 $ echo 2
895 $ echo 2
896 + 2
896 + 2
897 #endif
897 #endif
898 Accept this change? [n] .
898 Accept this change? [n] .
899 # Ran 2 tests, 0 skipped, 0 failed.
899 # Ran 2 tests, 0 skipped, 0 failed.
900
900
901 $ cat test-cases.t
901 $ cat test-cases.t
902 #testcases a b
902 #testcases a b
903 #if a
903 #if a
904 $ echo 1
904 $ echo 1
905 1
905 1
906 #endif
906 #endif
907 #if b
907 #if b
908 $ echo 2
908 $ echo 2
909 2
909 2
910 #endif
910 #endif
911
911
912 $ cat >> test-cases.t <<'EOF'
912 $ cat >> test-cases.t <<'EOF'
913 > #if a
913 > #if a
914 > $ NAME=A
914 > $ NAME=A
915 > #else
915 > #else
916 > $ NAME=B
916 > $ NAME=B
917 > #endif
917 > #endif
918 > $ echo $NAME
918 > $ echo $NAME
919 > A (a !)
919 > A (a !)
920 > B (b !)
920 > B (b !)
921 > EOF
921 > EOF
922 $ rt test-cases.t
922 $ rt test-cases.t
923 running 2 tests using 1 parallel processes
923 running 2 tests using 1 parallel processes
924 ..
924 ..
925 # Ran 2 tests, 0 skipped, 0 failed.
925 # Ran 2 tests, 0 skipped, 0 failed.
926
926
927 When using multiple dimensions of "#testcases" in .t files
927 When using multiple dimensions of "#testcases" in .t files
928
928
929 $ cat > test-cases.t <<'EOF'
929 $ cat > test-cases.t <<'EOF'
930 > #testcases a b
930 > #testcases a b
931 > #testcases c d
931 > #testcases c d
932 > #if a d
932 > #if a d
933 > $ echo $TESTCASE
933 > $ echo $TESTCASE
934 > a#d
934 > a#d
935 > #endif
935 > #endif
936 > #if b c
936 > #if b c
937 > $ echo yes
937 > $ echo yes
938 > no
938 > no
939 > #endif
939 > #endif
940 > EOF
940 > EOF
941 $ rt test-cases.t
941 $ rt test-cases.t
942 running 4 tests using 1 parallel processes
942 running 4 tests using 1 parallel processes
943 ..
943 ..
944 --- $TESTTMP/test-cases.t
944 --- $TESTTMP/test-cases.t
945 +++ $TESTTMP/test-cases.t#b#c.err
945 +++ $TESTTMP/test-cases.t#b#c.err
946 @@ -6,5 +6,5 @@
946 @@ -6,5 +6,5 @@
947 #endif
947 #endif
948 #if b c
948 #if b c
949 $ echo yes
949 $ echo yes
950 - no
950 - no
951 + yes
951 + yes
952 #endif
952 #endif
953
953
954 ERROR: test-cases.t#b#c output changed
954 ERROR: test-cases.t#b#c output changed
955 !.
955 !.
956 Failed test-cases.t#b#c: output changed
956 Failed test-cases.t#b#c: output changed
957 # Ran 4 tests, 0 skipped, 1 failed.
957 # Ran 4 tests, 0 skipped, 1 failed.
958 python hash seed: * (glob)
958 python hash seed: * (glob)
959 [1]
959 [1]
960
960
961 $ rm test-cases.t#b#c.err
961 $ rm test-cases.t#b#c.err
962 $ rm test-cases.t
962 $ rm test-cases.t
963
963
964 (reinstall)
964 (reinstall)
965 $ mv backup test-failure.t
965 $ mv backup test-failure.t
966
966
967 No Diff
967 No Diff
968 ===============
968 ===============
969
969
970 $ rt --nodiff
970 $ rt --nodiff
971 running 2 tests using 1 parallel processes
971 running 2 tests using 1 parallel processes
972 !.
972 !.
973 Failed test-failure.t: output changed
973 Failed test-failure.t: output changed
974 # Ran 2 tests, 0 skipped, 1 failed.
974 # Ran 2 tests, 0 skipped, 1 failed.
975 python hash seed: * (glob)
975 python hash seed: * (glob)
976 [1]
976 [1]
977
977
978 test --tmpdir support
978 test --tmpdir support
979 $ rt --tmpdir=$TESTTMP/keep test-success.t
979 $ rt --tmpdir=$TESTTMP/keep test-success.t
980 running 1 tests using 1 parallel processes
980 running 1 tests using 1 parallel processes
981
981
982 Keeping testtmp dir: $TESTTMP/keep/child1/test-success.t
982 Keeping testtmp dir: $TESTTMP/keep/child1/test-success.t
983 Keeping threadtmp dir: $TESTTMP/keep/child1
983 Keeping threadtmp dir: $TESTTMP/keep/child1
984 .
984 .
985 # Ran 1 tests, 0 skipped, 0 failed.
985 # Ran 1 tests, 0 skipped, 0 failed.
986
986
987 timeouts
987 timeouts
988 ========
988 ========
989 $ cat > test-timeout.t <<EOF
989 $ cat > test-timeout.t <<EOF
990 > $ sleep 2
990 > $ sleep 2
991 > $ echo pass
991 > $ echo pass
992 > pass
992 > pass
993 > EOF
993 > EOF
994 > echo '#require slow' > test-slow-timeout.t
994 > echo '#require slow' > test-slow-timeout.t
995 > cat test-timeout.t >> test-slow-timeout.t
995 > cat test-timeout.t >> test-slow-timeout.t
996 $ rt --timeout=1 --slowtimeout=3 test-timeout.t test-slow-timeout.t
996 $ rt --timeout=1 --slowtimeout=3 test-timeout.t test-slow-timeout.t
997 running 2 tests using 1 parallel processes
997 running 2 tests using 1 parallel processes
998 st
998 st
999 Skipped test-slow-timeout.t: missing feature: allow slow tests (use --allow-slow-tests)
999 Skipped test-slow-timeout.t: missing feature: allow slow tests (use --allow-slow-tests)
1000 Failed test-timeout.t: timed out
1000 Failed test-timeout.t: timed out
1001 # Ran 1 tests, 1 skipped, 1 failed.
1001 # Ran 1 tests, 1 skipped, 1 failed.
1002 python hash seed: * (glob)
1002 python hash seed: * (glob)
1003 [1]
1003 [1]
1004 $ rt --timeout=1 --slowtimeout=3 \
1004 $ rt --timeout=1 --slowtimeout=3 \
1005 > test-timeout.t test-slow-timeout.t --allow-slow-tests
1005 > test-timeout.t test-slow-timeout.t --allow-slow-tests
1006 running 2 tests using 1 parallel processes
1006 running 2 tests using 1 parallel processes
1007 .t
1007 .t
1008 Failed test-timeout.t: timed out
1008 Failed test-timeout.t: timed out
1009 # Ran 2 tests, 0 skipped, 1 failed.
1009 # Ran 2 tests, 0 skipped, 1 failed.
1010 python hash seed: * (glob)
1010 python hash seed: * (glob)
1011 [1]
1011 [1]
1012 $ rm test-timeout.t test-slow-timeout.t
1012 $ rm test-timeout.t test-slow-timeout.t
1013
1013
1014 test for --time
1014 test for --time
1015 ==================
1015 ==================
1016
1016
1017 $ rt test-success.t --time
1017 $ rt test-success.t --time
1018 running 1 tests using 1 parallel processes
1018 running 1 tests using 1 parallel processes
1019 .
1019 .
1020 # Ran 1 tests, 0 skipped, 0 failed.
1020 # Ran 1 tests, 0 skipped, 0 failed.
1021 # Producing time report
1021 # Producing time report
1022 start end cuser csys real Test
1022 start end cuser csys real Test
1023 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
1023 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
1024
1024
1025 test for --time with --job enabled
1025 test for --time with --job enabled
1026 ====================================
1026 ====================================
1027
1027
1028 $ rt test-success.t --time --jobs 2
1028 $ rt test-success.t --time --jobs 2
1029 running 1 tests using 1 parallel processes
1029 running 1 tests using 1 parallel processes
1030 .
1030 .
1031 # Ran 1 tests, 0 skipped, 0 failed.
1031 # Ran 1 tests, 0 skipped, 0 failed.
1032 # Producing time report
1032 # Producing time report
1033 start end cuser csys real Test
1033 start end cuser csys real Test
1034 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
1034 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
1035
1035
1036 Skips
1036 Skips
1037 ================
1037 ================
1038 $ cat > test-skip.t <<EOF
1038 $ cat > test-skip.t <<EOF
1039 > $ echo xyzzy
1039 > $ echo xyzzy
1040 > #if true
1040 > #if true
1041 > #require false
1041 > #require false
1042 > #end
1042 > #end
1043 > EOF
1043 > EOF
1044 $ cat > test-noskip.t <<EOF
1044 $ cat > test-noskip.t <<EOF
1045 > #if false
1045 > #if false
1046 > #require false
1046 > #require false
1047 > #endif
1047 > #endif
1048 > EOF
1048 > EOF
1049 $ rt --nodiff
1049 $ rt --nodiff
1050 running 4 tests using 1 parallel processes
1050 running 4 tests using 1 parallel processes
1051 !.s.
1051 !.s.
1052 Skipped test-skip.t: missing feature: nail clipper
1052 Skipped test-skip.t: missing feature: nail clipper
1053 Failed test-failure.t: output changed
1053 Failed test-failure.t: output changed
1054 # Ran 3 tests, 1 skipped, 1 failed.
1054 # Ran 3 tests, 1 skipped, 1 failed.
1055 python hash seed: * (glob)
1055 python hash seed: * (glob)
1056 [1]
1056 [1]
1057
1057
1058 $ rm test-noskip.t
1058 $ rm test-noskip.t
1059 $ rt --keyword xyzzy
1059 $ rt --keyword xyzzy
1060 running 3 tests using 1 parallel processes
1060 running 3 tests using 1 parallel processes
1061 .s
1061 .s
1062 Skipped test-skip.t: missing feature: nail clipper
1062 Skipped test-skip.t: missing feature: nail clipper
1063 # Ran 2 tests, 2 skipped, 0 failed.
1063 # Ran 2 tests, 2 skipped, 0 failed.
1064
1064
1065 Skips with xml
1065 Skips with xml
1066 $ rt --keyword xyzzy \
1066 $ rt --keyword xyzzy \
1067 > --xunit=xunit.xml
1067 > --xunit=xunit.xml
1068 running 3 tests using 1 parallel processes
1068 running 3 tests using 1 parallel processes
1069 .s
1069 .s
1070 Skipped test-skip.t: missing feature: nail clipper
1070 Skipped test-skip.t: missing feature: nail clipper
1071 # Ran 2 tests, 2 skipped, 0 failed.
1071 # Ran 2 tests, 2 skipped, 0 failed.
1072 $ cat xunit.xml
1072 $ cat xunit.xml
1073 <?xml version="1.0" encoding="utf-8"?>
1073 <?xml version="1.0" encoding="utf-8"?>
1074 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
1074 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
1075 <testcase name="test-success.t" time="*"/> (glob)
1075 <testcase name="test-success.t" time="*"/> (glob)
1076 <testcase name="test-skip.t">
1076 <testcase name="test-skip.t">
1077 <skipped>
1077 <skipped>
1078 <![CDATA[missing feature: nail clipper]]> </skipped>
1078 <![CDATA[missing feature: nail clipper]]> </skipped>
1079 </testcase>
1079 </testcase>
1080 </testsuite>
1080 </testsuite>
1081
1081
1082 Missing skips or blacklisted skips don't count as executed:
1082 Missing skips or blacklisted skips don't count as executed:
1083 $ echo test-failure.t > blacklist
1083 $ echo test-failure.t > blacklist
1084 $ rt --blacklist=blacklist --json\
1084 $ rt --blacklist=blacklist --json\
1085 > test-failure.t test-bogus.t
1085 > test-failure.t test-bogus.t
1086 running 2 tests using 1 parallel processes
1086 running 2 tests using 1 parallel processes
1087 ss
1087 ss
1088 Skipped test-bogus.t: Doesn't exist
1088 Skipped test-bogus.t: Doesn't exist
1089 Skipped test-failure.t: blacklisted
1089 Skipped test-failure.t: blacklisted
1090 # Ran 0 tests, 2 skipped, 0 failed.
1090 # Ran 0 tests, 2 skipped, 0 failed.
1091 $ cat report.json
1091 $ cat report.json
1092 testreport ={
1092 testreport ={
1093 "test-bogus.t": {
1093 "test-bogus.t": {
1094 "result": "skip"
1094 "result": "skip"
1095 },
1095 },
1096 "test-failure.t": {
1096 "test-failure.t": {
1097 "result": "skip"
1097 "result": "skip"
1098 }
1098 }
1099 } (no-eol)
1099 } (no-eol)
1100
1100
1101 Whitelist trumps blacklist
1101 Whitelist trumps blacklist
1102 $ echo test-failure.t > whitelist
1102 $ echo test-failure.t > whitelist
1103 $ rt --blacklist=blacklist --whitelist=whitelist --json\
1103 $ rt --blacklist=blacklist --whitelist=whitelist --json\
1104 > test-failure.t test-bogus.t
1104 > test-failure.t test-bogus.t
1105 running 2 tests using 1 parallel processes
1105 running 2 tests using 1 parallel processes
1106 s
1106 s
1107 --- $TESTTMP/test-failure.t
1107 --- $TESTTMP/test-failure.t
1108 +++ $TESTTMP/test-failure.t.err
1108 +++ $TESTTMP/test-failure.t.err
1109 @@ -1,5 +1,5 @@
1109 @@ -1,5 +1,5 @@
1110 $ echo babar
1110 $ echo babar
1111 - rataxes
1111 - rataxes
1112 + babar
1112 + babar
1113 This is a noop statement so that
1113 This is a noop statement so that
1114 this test is still more bytes than success.
1114 this test is still more bytes than success.
1115 pad pad pad pad............................................................
1115 pad pad pad pad............................................................
1116
1116
1117 ERROR: test-failure.t output changed
1117 ERROR: test-failure.t output changed
1118 !
1118 !
1119 Skipped test-bogus.t: Doesn't exist
1119 Skipped test-bogus.t: Doesn't exist
1120 Failed test-failure.t: output changed
1120 Failed test-failure.t: output changed
1121 # Ran 1 tests, 1 skipped, 1 failed.
1121 # Ran 1 tests, 1 skipped, 1 failed.
1122 python hash seed: * (glob)
1122 python hash seed: * (glob)
1123 [1]
1123 [1]
1124
1124
1125 Ensure that --test-list causes only the tests listed in that file to
1125 Ensure that --test-list causes only the tests listed in that file to
1126 be executed.
1126 be executed.
1127 $ echo test-success.t >> onlytest
1127 $ echo test-success.t >> onlytest
1128 $ rt --test-list=onlytest
1128 $ rt --test-list=onlytest
1129 running 1 tests using 1 parallel processes
1129 running 1 tests using 1 parallel processes
1130 .
1130 .
1131 # Ran 1 tests, 0 skipped, 0 failed.
1131 # Ran 1 tests, 0 skipped, 0 failed.
1132 $ echo test-bogus.t >> anothertest
1132 $ echo test-bogus.t >> anothertest
1133 $ rt --test-list=onlytest --test-list=anothertest
1133 $ rt --test-list=onlytest --test-list=anothertest
1134 running 2 tests using 1 parallel processes
1134 running 2 tests using 1 parallel processes
1135 s.
1135 s.
1136 Skipped test-bogus.t: Doesn't exist
1136 Skipped test-bogus.t: Doesn't exist
1137 # Ran 1 tests, 1 skipped, 0 failed.
1137 # Ran 1 tests, 1 skipped, 0 failed.
1138 $ rm onlytest anothertest
1138 $ rm onlytest anothertest
1139
1139
1140 test for --json
1140 test for --json
1141 ==================
1141 ==================
1142
1142
1143 $ rt --json
1143 $ rt --json
1144 running 3 tests using 1 parallel processes
1144 running 3 tests using 1 parallel processes
1145
1145
1146 --- $TESTTMP/test-failure.t
1146 --- $TESTTMP/test-failure.t
1147 +++ $TESTTMP/test-failure.t.err
1147 +++ $TESTTMP/test-failure.t.err
1148 @@ -1,5 +1,5 @@
1148 @@ -1,5 +1,5 @@
1149 $ echo babar
1149 $ echo babar
1150 - rataxes
1150 - rataxes
1151 + babar
1151 + babar
1152 This is a noop statement so that
1152 This is a noop statement so that
1153 this test is still more bytes than success.
1153 this test is still more bytes than success.
1154 pad pad pad pad............................................................
1154 pad pad pad pad............................................................
1155
1155
1156 ERROR: test-failure.t output changed
1156 ERROR: test-failure.t output changed
1157 !.s
1157 !.s
1158 Skipped test-skip.t: missing feature: nail clipper
1158 Skipped test-skip.t: missing feature: nail clipper
1159 Failed test-failure.t: output changed
1159 Failed test-failure.t: output changed
1160 # Ran 2 tests, 1 skipped, 1 failed.
1160 # Ran 2 tests, 1 skipped, 1 failed.
1161 python hash seed: * (glob)
1161 python hash seed: * (glob)
1162 [1]
1162 [1]
1163
1163
1164 $ cat report.json
1164 $ cat report.json
1165 testreport ={
1165 testreport ={
1166 "test-failure.t": [\{] (re)
1166 "test-failure.t": [\{] (re)
1167 "csys": "\s*[\d\.]{4,5}", ? (re)
1167 "csys": "\s*[\d\.]{4,5}", ? (re)
1168 "cuser": "\s*[\d\.]{4,5}", ? (re)
1168 "cuser": "\s*[\d\.]{4,5}", ? (re)
1169 "diff": "---.+\+\+\+.+", ? (re)
1169 "diff": "---.+\+\+\+.+", ? (re)
1170 "end": "\s*[\d\.]{4,5}", ? (re)
1170 "end": "\s*[\d\.]{4,5}", ? (re)
1171 "result": "failure", ? (re)
1171 "result": "failure", ? (re)
1172 "start": "\s*[\d\.]{4,5}", ? (re)
1172 "start": "\s*[\d\.]{4,5}", ? (re)
1173 "time": "\s*[\d\.]{4,5}" (re)
1173 "time": "\s*[\d\.]{4,5}" (re)
1174 }, ? (re)
1174 }, ? (re)
1175 "test-skip.t": {
1175 "test-skip.t": {
1176 "csys": "\s*[\d\.]{4,5}", ? (re)
1176 "csys": "\s*[\d\.]{4,5}", ? (re)
1177 "cuser": "\s*[\d\.]{4,5}", ? (re)
1177 "cuser": "\s*[\d\.]{4,5}", ? (re)
1178 "diff": "", ? (re)
1178 "diff": "", ? (re)
1179 "end": "\s*[\d\.]{4,5}", ? (re)
1179 "end": "\s*[\d\.]{4,5}", ? (re)
1180 "result": "skip", ? (re)
1180 "result": "skip", ? (re)
1181 "start": "\s*[\d\.]{4,5}", ? (re)
1181 "start": "\s*[\d\.]{4,5}", ? (re)
1182 "time": "\s*[\d\.]{4,5}" (re)
1182 "time": "\s*[\d\.]{4,5}" (re)
1183 }, ? (re)
1183 }, ? (re)
1184 "test-success.t": [\{] (re)
1184 "test-success.t": [\{] (re)
1185 "csys": "\s*[\d\.]{4,5}", ? (re)
1185 "csys": "\s*[\d\.]{4,5}", ? (re)
1186 "cuser": "\s*[\d\.]{4,5}", ? (re)
1186 "cuser": "\s*[\d\.]{4,5}", ? (re)
1187 "diff": "", ? (re)
1187 "diff": "", ? (re)
1188 "end": "\s*[\d\.]{4,5}", ? (re)
1188 "end": "\s*[\d\.]{4,5}", ? (re)
1189 "result": "success", ? (re)
1189 "result": "success", ? (re)
1190 "start": "\s*[\d\.]{4,5}", ? (re)
1190 "start": "\s*[\d\.]{4,5}", ? (re)
1191 "time": "\s*[\d\.]{4,5}" (re)
1191 "time": "\s*[\d\.]{4,5}" (re)
1192 }
1192 }
1193 } (no-eol)
1193 } (no-eol)
1194 --json with --outputdir
1194 --json with --outputdir
1195
1195
1196 $ rm report.json
1196 $ rm report.json
1197 $ rm -r output
1197 $ rm -r output
1198 $ mkdir output
1198 $ mkdir output
1199 $ rt --json --outputdir output
1199 $ rt --json --outputdir output
1200 running 3 tests using 1 parallel processes
1200 running 3 tests using 1 parallel processes
1201
1201
1202 --- $TESTTMP/test-failure.t
1202 --- $TESTTMP/test-failure.t
1203 +++ $TESTTMP/output/test-failure.t.err
1203 +++ $TESTTMP/output/test-failure.t.err
1204 @@ -1,5 +1,5 @@
1204 @@ -1,5 +1,5 @@
1205 $ echo babar
1205 $ echo babar
1206 - rataxes
1206 - rataxes
1207 + babar
1207 + babar
1208 This is a noop statement so that
1208 This is a noop statement so that
1209 this test is still more bytes than success.
1209 this test is still more bytes than success.
1210 pad pad pad pad............................................................
1210 pad pad pad pad............................................................
1211
1211
1212 ERROR: test-failure.t output changed
1212 ERROR: test-failure.t output changed
1213 !.s
1213 !.s
1214 Skipped test-skip.t: missing feature: nail clipper
1214 Skipped test-skip.t: missing feature: nail clipper
1215 Failed test-failure.t: output changed
1215 Failed test-failure.t: output changed
1216 # Ran 2 tests, 1 skipped, 1 failed.
1216 # Ran 2 tests, 1 skipped, 1 failed.
1217 python hash seed: * (glob)
1217 python hash seed: * (glob)
1218 [1]
1218 [1]
1219 $ f report.json
1219 $ f report.json
1220 report.json: file not found
1220 report.json: file not found
1221 $ cat output/report.json
1221 $ cat output/report.json
1222 testreport ={
1222 testreport ={
1223 "test-failure.t": [\{] (re)
1223 "test-failure.t": [\{] (re)
1224 "csys": "\s*[\d\.]{4,5}", ? (re)
1224 "csys": "\s*[\d\.]{4,5}", ? (re)
1225 "cuser": "\s*[\d\.]{4,5}", ? (re)
1225 "cuser": "\s*[\d\.]{4,5}", ? (re)
1226 "diff": "---.+\+\+\+.+", ? (re)
1226 "diff": "---.+\+\+\+.+", ? (re)
1227 "end": "\s*[\d\.]{4,5}", ? (re)
1227 "end": "\s*[\d\.]{4,5}", ? (re)
1228 "result": "failure", ? (re)
1228 "result": "failure", ? (re)
1229 "start": "\s*[\d\.]{4,5}", ? (re)
1229 "start": "\s*[\d\.]{4,5}", ? (re)
1230 "time": "\s*[\d\.]{4,5}" (re)
1230 "time": "\s*[\d\.]{4,5}" (re)
1231 }, ? (re)
1231 }, ? (re)
1232 "test-skip.t": {
1232 "test-skip.t": {
1233 "csys": "\s*[\d\.]{4,5}", ? (re)
1233 "csys": "\s*[\d\.]{4,5}", ? (re)
1234 "cuser": "\s*[\d\.]{4,5}", ? (re)
1234 "cuser": "\s*[\d\.]{4,5}", ? (re)
1235 "diff": "", ? (re)
1235 "diff": "", ? (re)
1236 "end": "\s*[\d\.]{4,5}", ? (re)
1236 "end": "\s*[\d\.]{4,5}", ? (re)
1237 "result": "skip", ? (re)
1237 "result": "skip", ? (re)
1238 "start": "\s*[\d\.]{4,5}", ? (re)
1238 "start": "\s*[\d\.]{4,5}", ? (re)
1239 "time": "\s*[\d\.]{4,5}" (re)
1239 "time": "\s*[\d\.]{4,5}" (re)
1240 }, ? (re)
1240 }, ? (re)
1241 "test-success.t": [\{] (re)
1241 "test-success.t": [\{] (re)
1242 "csys": "\s*[\d\.]{4,5}", ? (re)
1242 "csys": "\s*[\d\.]{4,5}", ? (re)
1243 "cuser": "\s*[\d\.]{4,5}", ? (re)
1243 "cuser": "\s*[\d\.]{4,5}", ? (re)
1244 "diff": "", ? (re)
1244 "diff": "", ? (re)
1245 "end": "\s*[\d\.]{4,5}", ? (re)
1245 "end": "\s*[\d\.]{4,5}", ? (re)
1246 "result": "success", ? (re)
1246 "result": "success", ? (re)
1247 "start": "\s*[\d\.]{4,5}", ? (re)
1247 "start": "\s*[\d\.]{4,5}", ? (re)
1248 "time": "\s*[\d\.]{4,5}" (re)
1248 "time": "\s*[\d\.]{4,5}" (re)
1249 }
1249 }
1250 } (no-eol)
1250 } (no-eol)
1251 $ ls -a output
1251 $ ls -a output
1252 .
1252 .
1253 ..
1253 ..
1254 .testtimes
1254 .testtimes
1255 report.json
1255 report.json
1256 test-failure.t.err
1256 test-failure.t.err
1257
1257
1258 Test that failed test accepted through interactive are properly reported:
1258 Test that failed test accepted through interactive are properly reported:
1259
1259
1260 $ cp test-failure.t backup
1260 $ cp test-failure.t backup
1261 $ echo y | rt --json -i
1261 $ echo y | rt --json -i
1262 running 3 tests using 1 parallel processes
1262 running 3 tests using 1 parallel processes
1263
1263
1264 --- $TESTTMP/test-failure.t
1264 --- $TESTTMP/test-failure.t
1265 +++ $TESTTMP/test-failure.t.err
1265 +++ $TESTTMP/test-failure.t.err
1266 @@ -1,5 +1,5 @@
1266 @@ -1,5 +1,5 @@
1267 $ echo babar
1267 $ echo babar
1268 - rataxes
1268 - rataxes
1269 + babar
1269 + babar
1270 This is a noop statement so that
1270 This is a noop statement so that
1271 this test is still more bytes than success.
1271 this test is still more bytes than success.
1272 pad pad pad pad............................................................
1272 pad pad pad pad............................................................
1273 Accept this change? [n] ..s
1273 Accept this change? [n] ..s
1274 Skipped test-skip.t: missing feature: nail clipper
1274 Skipped test-skip.t: missing feature: nail clipper
1275 # Ran 2 tests, 1 skipped, 0 failed.
1275 # Ran 2 tests, 1 skipped, 0 failed.
1276
1276
1277 $ cat report.json
1277 $ cat report.json
1278 testreport ={
1278 testreport ={
1279 "test-failure.t": [\{] (re)
1279 "test-failure.t": [\{] (re)
1280 "csys": "\s*[\d\.]{4,5}", ? (re)
1280 "csys": "\s*[\d\.]{4,5}", ? (re)
1281 "cuser": "\s*[\d\.]{4,5}", ? (re)
1281 "cuser": "\s*[\d\.]{4,5}", ? (re)
1282 "diff": "", ? (re)
1282 "diff": "", ? (re)
1283 "end": "\s*[\d\.]{4,5}", ? (re)
1283 "end": "\s*[\d\.]{4,5}", ? (re)
1284 "result": "success", ? (re)
1284 "result": "success", ? (re)
1285 "start": "\s*[\d\.]{4,5}", ? (re)
1285 "start": "\s*[\d\.]{4,5}", ? (re)
1286 "time": "\s*[\d\.]{4,5}" (re)
1286 "time": "\s*[\d\.]{4,5}" (re)
1287 }, ? (re)
1287 }, ? (re)
1288 "test-skip.t": {
1288 "test-skip.t": {
1289 "csys": "\s*[\d\.]{4,5}", ? (re)
1289 "csys": "\s*[\d\.]{4,5}", ? (re)
1290 "cuser": "\s*[\d\.]{4,5}", ? (re)
1290 "cuser": "\s*[\d\.]{4,5}", ? (re)
1291 "diff": "", ? (re)
1291 "diff": "", ? (re)
1292 "end": "\s*[\d\.]{4,5}", ? (re)
1292 "end": "\s*[\d\.]{4,5}", ? (re)
1293 "result": "skip", ? (re)
1293 "result": "skip", ? (re)
1294 "start": "\s*[\d\.]{4,5}", ? (re)
1294 "start": "\s*[\d\.]{4,5}", ? (re)
1295 "time": "\s*[\d\.]{4,5}" (re)
1295 "time": "\s*[\d\.]{4,5}" (re)
1296 }, ? (re)
1296 }, ? (re)
1297 "test-success.t": [\{] (re)
1297 "test-success.t": [\{] (re)
1298 "csys": "\s*[\d\.]{4,5}", ? (re)
1298 "csys": "\s*[\d\.]{4,5}", ? (re)
1299 "cuser": "\s*[\d\.]{4,5}", ? (re)
1299 "cuser": "\s*[\d\.]{4,5}", ? (re)
1300 "diff": "", ? (re)
1300 "diff": "", ? (re)
1301 "end": "\s*[\d\.]{4,5}", ? (re)
1301 "end": "\s*[\d\.]{4,5}", ? (re)
1302 "result": "success", ? (re)
1302 "result": "success", ? (re)
1303 "start": "\s*[\d\.]{4,5}", ? (re)
1303 "start": "\s*[\d\.]{4,5}", ? (re)
1304 "time": "\s*[\d\.]{4,5}" (re)
1304 "time": "\s*[\d\.]{4,5}" (re)
1305 }
1305 }
1306 } (no-eol)
1306 } (no-eol)
1307 $ mv backup test-failure.t
1307 $ mv backup test-failure.t
1308
1308
1309 backslash on end of line with glob matching is handled properly
1309 backslash on end of line with glob matching is handled properly
1310
1310
1311 $ cat > test-glob-backslash.t << EOF
1311 $ cat > test-glob-backslash.t << EOF
1312 > $ echo 'foo bar \\'
1312 > $ echo 'foo bar \\'
1313 > foo * \ (glob)
1313 > foo * \ (glob)
1314 > EOF
1314 > EOF
1315
1315
1316 $ rt test-glob-backslash.t
1316 $ rt test-glob-backslash.t
1317 running 1 tests using 1 parallel processes
1317 running 1 tests using 1 parallel processes
1318 .
1318 .
1319 # Ran 1 tests, 0 skipped, 0 failed.
1319 # Ran 1 tests, 0 skipped, 0 failed.
1320
1320
1321 $ rm -f test-glob-backslash.t
1321 $ rm -f test-glob-backslash.t
1322
1322
1323 Test globbing of local IP addresses
1323 Test globbing of local IP addresses
1324 $ echo 172.16.18.1
1324 $ echo 172.16.18.1
1325 $LOCALIP (glob)
1325 $LOCALIP (glob)
1326 $ echo dead:beef::1
1326 $ echo dead:beef::1
1327 $LOCALIP (glob)
1327 $LOCALIP (glob)
1328
1328
1329 Add support for external test formatter
1329 Add support for external test formatter
1330 =======================================
1330 =======================================
1331
1331
1332 $ CUSTOM_TEST_RESULT=basic_test_result "$PYTHON" $TESTDIR/run-tests.py --with-hg=`which hg` "$@" test-success.t test-failure.t
1332 $ CUSTOM_TEST_RESULT=basic_test_result "$PYTHON" $TESTDIR/run-tests.py --with-hg=`which hg` -j1 "$@" test-success.t test-failure.t
1333 running 2 tests using 1 parallel processes
1333 running 2 tests using 1 parallel processes
1334
1334
1335 # Ran 2 tests, 0 skipped, 0 failed.
1335 # Ran 2 tests, 0 skipped, 0 failed.
1336 ON_START! <__main__.TestSuite tests=[<__main__.TTest testMethod=test-failure.t>, <__main__.TTest testMethod=test-success.t>]>
1336 ON_START! <__main__.TestSuite tests=[<__main__.TTest testMethod=test-failure.t>, <__main__.TTest testMethod=test-success.t>]>
1337 FAILURE! test-failure.t output changed
1337 FAILURE! test-failure.t output changed
1338 SUCCESS! test-success.t
1338 SUCCESS! test-success.t
1339 ON_END!
1339 ON_END!
1340
1340
1341 Test reusability for third party tools
1341 Test reusability for third party tools
1342 ======================================
1342 ======================================
1343
1343
1344 $ mkdir "$TESTTMP"/anothertests
1344 $ mkdir "$TESTTMP"/anothertests
1345 $ cd "$TESTTMP"/anothertests
1345 $ cd "$TESTTMP"/anothertests
1346
1346
1347 test that `run-tests.py` can execute hghave, even if it runs not in
1347 test that `run-tests.py` can execute hghave, even if it runs not in
1348 Mercurial source tree.
1348 Mercurial source tree.
1349
1349
1350 $ cat > test-hghave.t <<EOF
1350 $ cat > test-hghave.t <<EOF
1351 > #require true
1351 > #require true
1352 > $ echo foo
1352 > $ echo foo
1353 > foo
1353 > foo
1354 > EOF
1354 > EOF
1355 $ rt test-hghave.t
1355 $ rt test-hghave.t
1356 running 1 tests using 1 parallel processes
1356 running 1 tests using 1 parallel processes
1357 .
1357 .
1358 # Ran 1 tests, 0 skipped, 0 failed.
1358 # Ran 1 tests, 0 skipped, 0 failed.
1359
1359
1360 test that RUNTESTDIR refers the directory, in which `run-tests.py` now
1360 test that RUNTESTDIR refers the directory, in which `run-tests.py` now
1361 running is placed.
1361 running is placed.
1362
1362
1363 $ cat > test-runtestdir.t <<EOF
1363 $ cat > test-runtestdir.t <<EOF
1364 > - $TESTDIR, in which test-run-tests.t is placed
1364 > - $TESTDIR, in which test-run-tests.t is placed
1365 > - \$TESTDIR, in which test-runtestdir.t is placed (expanded at runtime)
1365 > - \$TESTDIR, in which test-runtestdir.t is placed (expanded at runtime)
1366 > - \$RUNTESTDIR, in which run-tests.py is placed (expanded at runtime)
1366 > - \$RUNTESTDIR, in which run-tests.py is placed (expanded at runtime)
1367 >
1367 >
1368 > #if windows
1368 > #if windows
1369 > $ test "\$TESTDIR" = "$TESTTMP\anothertests"
1369 > $ test "\$TESTDIR" = "$TESTTMP\anothertests"
1370 > #else
1370 > #else
1371 > $ test "\$TESTDIR" = "$TESTTMP"/anothertests
1371 > $ test "\$TESTDIR" = "$TESTTMP"/anothertests
1372 > #endif
1372 > #endif
1373 > If this prints a path, that means RUNTESTDIR didn't equal
1373 > If this prints a path, that means RUNTESTDIR didn't equal
1374 > TESTDIR as it should have.
1374 > TESTDIR as it should have.
1375 > $ test "\$RUNTESTDIR" = "$TESTDIR" || echo "\$RUNTESTDIR"
1375 > $ test "\$RUNTESTDIR" = "$TESTDIR" || echo "\$RUNTESTDIR"
1376 > This should print the start of check-code. If this passes but the
1376 > This should print the start of check-code. If this passes but the
1377 > previous check failed, that means we found a copy of check-code at whatever
1377 > previous check failed, that means we found a copy of check-code at whatever
1378 > RUNTESTSDIR ended up containing, even though it doesn't match TESTDIR.
1378 > RUNTESTSDIR ended up containing, even though it doesn't match TESTDIR.
1379 > $ head -n 3 "\$RUNTESTDIR"/../contrib/check-code.py | sed 's@.!.*python@#!USRBINENVPY@'
1379 > $ head -n 3 "\$RUNTESTDIR"/../contrib/check-code.py | sed 's@.!.*python@#!USRBINENVPY@'
1380 > #!USRBINENVPY
1380 > #!USRBINENVPY
1381 > #
1381 > #
1382 > # check-code - a style and portability checker for Mercurial
1382 > # check-code - a style and portability checker for Mercurial
1383 > EOF
1383 > EOF
1384 $ rt test-runtestdir.t
1384 $ rt test-runtestdir.t
1385 running 1 tests using 1 parallel processes
1385 running 1 tests using 1 parallel processes
1386 .
1386 .
1387 # Ran 1 tests, 0 skipped, 0 failed.
1387 # Ran 1 tests, 0 skipped, 0 failed.
1388
1388
1389 #if execbit
1389 #if execbit
1390
1390
1391 test that TESTDIR is referred in PATH
1391 test that TESTDIR is referred in PATH
1392
1392
1393 $ cat > custom-command.sh <<EOF
1393 $ cat > custom-command.sh <<EOF
1394 > #!/bin/sh
1394 > #!/bin/sh
1395 > echo "hello world"
1395 > echo "hello world"
1396 > EOF
1396 > EOF
1397 $ chmod +x custom-command.sh
1397 $ chmod +x custom-command.sh
1398 $ cat > test-testdir-path.t <<EOF
1398 $ cat > test-testdir-path.t <<EOF
1399 > $ custom-command.sh
1399 > $ custom-command.sh
1400 > hello world
1400 > hello world
1401 > EOF
1401 > EOF
1402 $ rt test-testdir-path.t
1402 $ rt test-testdir-path.t
1403 running 1 tests using 1 parallel processes
1403 running 1 tests using 1 parallel processes
1404 .
1404 .
1405 # Ran 1 tests, 0 skipped, 0 failed.
1405 # Ran 1 tests, 0 skipped, 0 failed.
1406
1406
1407 #endif
1407 #endif
1408
1408
1409 test support for --allow-slow-tests
1409 test support for --allow-slow-tests
1410 $ cat > test-very-slow-test.t <<EOF
1410 $ cat > test-very-slow-test.t <<EOF
1411 > #require slow
1411 > #require slow
1412 > $ echo pass
1412 > $ echo pass
1413 > pass
1413 > pass
1414 > EOF
1414 > EOF
1415 $ rt test-very-slow-test.t
1415 $ rt test-very-slow-test.t
1416 running 1 tests using 1 parallel processes
1416 running 1 tests using 1 parallel processes
1417 s
1417 s
1418 Skipped test-very-slow-test.t: missing feature: allow slow tests (use --allow-slow-tests)
1418 Skipped test-very-slow-test.t: missing feature: allow slow tests (use --allow-slow-tests)
1419 # Ran 0 tests, 1 skipped, 0 failed.
1419 # Ran 0 tests, 1 skipped, 0 failed.
1420 $ rt $HGTEST_RUN_TESTS_PURE --allow-slow-tests test-very-slow-test.t
1420 $ rt $HGTEST_RUN_TESTS_PURE --allow-slow-tests test-very-slow-test.t
1421 running 1 tests using 1 parallel processes
1421 running 1 tests using 1 parallel processes
1422 .
1422 .
1423 # Ran 1 tests, 0 skipped, 0 failed.
1423 # Ran 1 tests, 0 skipped, 0 failed.
1424
1424
1425 support for running a test outside the current directory
1425 support for running a test outside the current directory
1426 $ mkdir nonlocal
1426 $ mkdir nonlocal
1427 $ cat > nonlocal/test-is-not-here.t << EOF
1427 $ cat > nonlocal/test-is-not-here.t << EOF
1428 > $ echo pass
1428 > $ echo pass
1429 > pass
1429 > pass
1430 > EOF
1430 > EOF
1431 $ rt nonlocal/test-is-not-here.t
1431 $ rt nonlocal/test-is-not-here.t
1432 running 1 tests using 1 parallel processes
1432 running 1 tests using 1 parallel processes
1433 .
1433 .
1434 # Ran 1 tests, 0 skipped, 0 failed.
1434 # Ran 1 tests, 0 skipped, 0 failed.
1435
1435
1436 support for automatically discovering test if arg is a folder
1436 support for automatically discovering test if arg is a folder
1437 $ mkdir tmp && cd tmp
1437 $ mkdir tmp && cd tmp
1438
1438
1439 $ cat > test-uno.t << EOF
1439 $ cat > test-uno.t << EOF
1440 > $ echo line
1440 > $ echo line
1441 > line
1441 > line
1442 > EOF
1442 > EOF
1443
1443
1444 $ cp test-uno.t test-dos.t
1444 $ cp test-uno.t test-dos.t
1445 $ cd ..
1445 $ cd ..
1446 $ cp -R tmp tmpp
1446 $ cp -R tmp tmpp
1447 $ cp tmp/test-uno.t test-solo.t
1447 $ cp tmp/test-uno.t test-solo.t
1448
1448
1449 $ rt tmp/ test-solo.t tmpp
1449 $ rt tmp/ test-solo.t tmpp
1450 running 5 tests using 1 parallel processes
1450 running 5 tests using 1 parallel processes
1451 .....
1451 .....
1452 # Ran 5 tests, 0 skipped, 0 failed.
1452 # Ran 5 tests, 0 skipped, 0 failed.
1453 $ rm -rf tmp tmpp
1453 $ rm -rf tmp tmpp
1454
1454
1455 support for running run-tests.py from another directory
1455 support for running run-tests.py from another directory
1456 $ mkdir tmp && cd tmp
1456 $ mkdir tmp && cd tmp
1457
1457
1458 $ cat > useful-file.sh << EOF
1458 $ cat > useful-file.sh << EOF
1459 > important command
1459 > important command
1460 > EOF
1460 > EOF
1461
1461
1462 $ cat > test-folder.t << EOF
1462 $ cat > test-folder.t << EOF
1463 > $ cat \$TESTDIR/useful-file.sh
1463 > $ cat \$TESTDIR/useful-file.sh
1464 > important command
1464 > important command
1465 > EOF
1465 > EOF
1466
1466
1467 $ cat > test-folder-fail.t << EOF
1467 $ cat > test-folder-fail.t << EOF
1468 > $ cat \$TESTDIR/useful-file.sh
1468 > $ cat \$TESTDIR/useful-file.sh
1469 > important commando
1469 > important commando
1470 > EOF
1470 > EOF
1471
1471
1472 $ cd ..
1472 $ cd ..
1473 $ rt tmp/test-*.t
1473 $ rt tmp/test-*.t
1474 running 2 tests using 1 parallel processes
1474 running 2 tests using 1 parallel processes
1475
1475
1476 --- $TESTTMP/anothertests/tmp/test-folder-fail.t
1476 --- $TESTTMP/anothertests/tmp/test-folder-fail.t
1477 +++ $TESTTMP/anothertests/tmp/test-folder-fail.t.err
1477 +++ $TESTTMP/anothertests/tmp/test-folder-fail.t.err
1478 @@ -1,2 +1,2 @@
1478 @@ -1,2 +1,2 @@
1479 $ cat $TESTDIR/useful-file.sh
1479 $ cat $TESTDIR/useful-file.sh
1480 - important commando
1480 - important commando
1481 + important command
1481 + important command
1482
1482
1483 ERROR: test-folder-fail.t output changed
1483 ERROR: test-folder-fail.t output changed
1484 !.
1484 !.
1485 Failed test-folder-fail.t: output changed
1485 Failed test-folder-fail.t: output changed
1486 # Ran 2 tests, 0 skipped, 1 failed.
1486 # Ran 2 tests, 0 skipped, 1 failed.
1487 python hash seed: * (glob)
1487 python hash seed: * (glob)
1488 [1]
1488 [1]
1489
1489
1490 support for bisecting failed tests automatically
1490 support for bisecting failed tests automatically
1491 $ hg init bisect
1491 $ hg init bisect
1492 $ cd bisect
1492 $ cd bisect
1493 $ cat >> test-bisect.t <<EOF
1493 $ cat >> test-bisect.t <<EOF
1494 > $ echo pass
1494 > $ echo pass
1495 > pass
1495 > pass
1496 > EOF
1496 > EOF
1497 $ hg add test-bisect.t
1497 $ hg add test-bisect.t
1498 $ hg ci -m 'good'
1498 $ hg ci -m 'good'
1499 $ cat >> test-bisect.t <<EOF
1499 $ cat >> test-bisect.t <<EOF
1500 > $ echo pass
1500 > $ echo pass
1501 > fail
1501 > fail
1502 > EOF
1502 > EOF
1503 $ hg ci -m 'bad'
1503 $ hg ci -m 'bad'
1504 $ rt --known-good-rev=0 test-bisect.t
1504 $ rt --known-good-rev=0 test-bisect.t
1505 running 1 tests using 1 parallel processes
1505 running 1 tests using 1 parallel processes
1506
1506
1507 --- $TESTTMP/anothertests/bisect/test-bisect.t
1507 --- $TESTTMP/anothertests/bisect/test-bisect.t
1508 +++ $TESTTMP/anothertests/bisect/test-bisect.t.err
1508 +++ $TESTTMP/anothertests/bisect/test-bisect.t.err
1509 @@ -1,4 +1,4 @@
1509 @@ -1,4 +1,4 @@
1510 $ echo pass
1510 $ echo pass
1511 pass
1511 pass
1512 $ echo pass
1512 $ echo pass
1513 - fail
1513 - fail
1514 + pass
1514 + pass
1515
1515
1516 ERROR: test-bisect.t output changed
1516 ERROR: test-bisect.t output changed
1517 !
1517 !
1518 Failed test-bisect.t: output changed
1518 Failed test-bisect.t: output changed
1519 test-bisect.t broken by 72cbf122d116 (bad)
1519 test-bisect.t broken by 72cbf122d116 (bad)
1520 # Ran 1 tests, 0 skipped, 1 failed.
1520 # Ran 1 tests, 0 skipped, 1 failed.
1521 python hash seed: * (glob)
1521 python hash seed: * (glob)
1522 [1]
1522 [1]
1523
1523
1524 $ cd ..
1524 $ cd ..
1525
1525
1526 support bisecting a separate repo
1526 support bisecting a separate repo
1527
1527
1528 $ hg init bisect-dependent
1528 $ hg init bisect-dependent
1529 $ cd bisect-dependent
1529 $ cd bisect-dependent
1530 $ cat > test-bisect-dependent.t <<EOF
1530 $ cat > test-bisect-dependent.t <<EOF
1531 > $ tail -1 \$TESTDIR/../bisect/test-bisect.t
1531 > $ tail -1 \$TESTDIR/../bisect/test-bisect.t
1532 > pass
1532 > pass
1533 > EOF
1533 > EOF
1534 $ hg commit -Am dependent test-bisect-dependent.t
1534 $ hg commit -Am dependent test-bisect-dependent.t
1535
1535
1536 $ rt --known-good-rev=0 test-bisect-dependent.t
1536 $ rt --known-good-rev=0 test-bisect-dependent.t
1537 running 1 tests using 1 parallel processes
1537 running 1 tests using 1 parallel processes
1538
1538
1539 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1539 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1540 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1540 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1541 @@ -1,2 +1,2 @@
1541 @@ -1,2 +1,2 @@
1542 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1542 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1543 - pass
1543 - pass
1544 + fail
1544 + fail
1545
1545
1546 ERROR: test-bisect-dependent.t output changed
1546 ERROR: test-bisect-dependent.t output changed
1547 !
1547 !
1548 Failed test-bisect-dependent.t: output changed
1548 Failed test-bisect-dependent.t: output changed
1549 Failed to identify failure point for test-bisect-dependent.t
1549 Failed to identify failure point for test-bisect-dependent.t
1550 # Ran 1 tests, 0 skipped, 1 failed.
1550 # Ran 1 tests, 0 skipped, 1 failed.
1551 python hash seed: * (glob)
1551 python hash seed: * (glob)
1552 [1]
1552 [1]
1553
1553
1554 $ rt --bisect-repo=../test-bisect test-bisect-dependent.t
1554 $ rt --bisect-repo=../test-bisect test-bisect-dependent.t
1555 usage: run-tests.py [options] [tests]
1555 usage: run-tests.py [options] [tests]
1556 run-tests.py: error: --bisect-repo cannot be used without --known-good-rev
1556 run-tests.py: error: --bisect-repo cannot be used without --known-good-rev
1557 [2]
1557 [2]
1558
1558
1559 $ rt --known-good-rev=0 --bisect-repo=../bisect test-bisect-dependent.t
1559 $ rt --known-good-rev=0 --bisect-repo=../bisect test-bisect-dependent.t
1560 running 1 tests using 1 parallel processes
1560 running 1 tests using 1 parallel processes
1561
1561
1562 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1562 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1563 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1563 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1564 @@ -1,2 +1,2 @@
1564 @@ -1,2 +1,2 @@
1565 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1565 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1566 - pass
1566 - pass
1567 + fail
1567 + fail
1568
1568
1569 ERROR: test-bisect-dependent.t output changed
1569 ERROR: test-bisect-dependent.t output changed
1570 !
1570 !
1571 Failed test-bisect-dependent.t: output changed
1571 Failed test-bisect-dependent.t: output changed
1572 test-bisect-dependent.t broken by 72cbf122d116 (bad)
1572 test-bisect-dependent.t broken by 72cbf122d116 (bad)
1573 # Ran 1 tests, 0 skipped, 1 failed.
1573 # Ran 1 tests, 0 skipped, 1 failed.
1574 python hash seed: * (glob)
1574 python hash seed: * (glob)
1575 [1]
1575 [1]
1576
1576
1577 $ cd ..
1577 $ cd ..
1578
1578
1579 Test a broken #if statement doesn't break run-tests threading.
1579 Test a broken #if statement doesn't break run-tests threading.
1580 ==============================================================
1580 ==============================================================
1581 $ mkdir broken
1581 $ mkdir broken
1582 $ cd broken
1582 $ cd broken
1583 $ cat > test-broken.t <<EOF
1583 $ cat > test-broken.t <<EOF
1584 > true
1584 > true
1585 > #if notarealhghavefeature
1585 > #if notarealhghavefeature
1586 > $ false
1586 > $ false
1587 > #endif
1587 > #endif
1588 > EOF
1588 > EOF
1589 $ for f in 1 2 3 4 ; do
1589 $ for f in 1 2 3 4 ; do
1590 > cat > test-works-$f.t <<EOF
1590 > cat > test-works-$f.t <<EOF
1591 > This is test case $f
1591 > This is test case $f
1592 > $ sleep 1
1592 > $ sleep 1
1593 > EOF
1593 > EOF
1594 > done
1594 > done
1595 $ rt -j 2
1595 $ rt -j 2
1596 running 5 tests using 2 parallel processes
1596 running 5 tests using 2 parallel processes
1597 ....
1597 ....
1598 # Ran 5 tests, 0 skipped, 0 failed.
1598 # Ran 5 tests, 0 skipped, 0 failed.
1599 skipped: unknown feature: notarealhghavefeature
1599 skipped: unknown feature: notarealhghavefeature
1600
1600
1601 $ cd ..
1601 $ cd ..
1602 $ rm -rf broken
1602 $ rm -rf broken
1603
1603
1604 Test cases in .t files
1604 Test cases in .t files
1605 ======================
1605 ======================
1606 $ mkdir cases
1606 $ mkdir cases
1607 $ cd cases
1607 $ cd cases
1608 $ cat > test-cases-abc.t <<'EOF'
1608 $ cat > test-cases-abc.t <<'EOF'
1609 > #testcases A B C
1609 > #testcases A B C
1610 > $ V=B
1610 > $ V=B
1611 > #if A
1611 > #if A
1612 > $ V=A
1612 > $ V=A
1613 > #endif
1613 > #endif
1614 > #if C
1614 > #if C
1615 > $ V=C
1615 > $ V=C
1616 > #endif
1616 > #endif
1617 > $ echo $V | sed 's/A/C/'
1617 > $ echo $V | sed 's/A/C/'
1618 > C
1618 > C
1619 > #if C
1619 > #if C
1620 > $ [ $V = C ]
1620 > $ [ $V = C ]
1621 > #endif
1621 > #endif
1622 > #if A
1622 > #if A
1623 > $ [ $V = C ]
1623 > $ [ $V = C ]
1624 > [1]
1624 > [1]
1625 > #endif
1625 > #endif
1626 > #if no-C
1626 > #if no-C
1627 > $ [ $V = C ]
1627 > $ [ $V = C ]
1628 > [1]
1628 > [1]
1629 > #endif
1629 > #endif
1630 > $ [ $V = D ]
1630 > $ [ $V = D ]
1631 > [1]
1631 > [1]
1632 > EOF
1632 > EOF
1633 $ rt
1633 $ rt
1634 running 3 tests using 1 parallel processes
1634 running 3 tests using 1 parallel processes
1635 .
1635 .
1636 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1636 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1637 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1637 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1638 @@ -7,7 +7,7 @@
1638 @@ -7,7 +7,7 @@
1639 $ V=C
1639 $ V=C
1640 #endif
1640 #endif
1641 $ echo $V | sed 's/A/C/'
1641 $ echo $V | sed 's/A/C/'
1642 - C
1642 - C
1643 + B
1643 + B
1644 #if C
1644 #if C
1645 $ [ $V = C ]
1645 $ [ $V = C ]
1646 #endif
1646 #endif
1647
1647
1648 ERROR: test-cases-abc.t#B output changed
1648 ERROR: test-cases-abc.t#B output changed
1649 !.
1649 !.
1650 Failed test-cases-abc.t#B: output changed
1650 Failed test-cases-abc.t#B: output changed
1651 # Ran 3 tests, 0 skipped, 1 failed.
1651 # Ran 3 tests, 0 skipped, 1 failed.
1652 python hash seed: * (glob)
1652 python hash seed: * (glob)
1653 [1]
1653 [1]
1654
1654
1655 --restart works
1655 --restart works
1656
1656
1657 $ rt --restart
1657 $ rt --restart
1658 running 2 tests using 1 parallel processes
1658 running 2 tests using 1 parallel processes
1659
1659
1660 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1660 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1661 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1661 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1662 @@ -7,7 +7,7 @@
1662 @@ -7,7 +7,7 @@
1663 $ V=C
1663 $ V=C
1664 #endif
1664 #endif
1665 $ echo $V | sed 's/A/C/'
1665 $ echo $V | sed 's/A/C/'
1666 - C
1666 - C
1667 + B
1667 + B
1668 #if C
1668 #if C
1669 $ [ $V = C ]
1669 $ [ $V = C ]
1670 #endif
1670 #endif
1671
1671
1672 ERROR: test-cases-abc.t#B output changed
1672 ERROR: test-cases-abc.t#B output changed
1673 !.
1673 !.
1674 Failed test-cases-abc.t#B: output changed
1674 Failed test-cases-abc.t#B: output changed
1675 # Ran 2 tests, 0 skipped, 1 failed.
1675 # Ran 2 tests, 0 skipped, 1 failed.
1676 python hash seed: * (glob)
1676 python hash seed: * (glob)
1677 [1]
1677 [1]
1678
1678
1679 --restart works with outputdir
1679 --restart works with outputdir
1680
1680
1681 $ mkdir output
1681 $ mkdir output
1682 $ mv test-cases-abc.t#B.err output
1682 $ mv test-cases-abc.t#B.err output
1683 $ rt --restart --outputdir output
1683 $ rt --restart --outputdir output
1684 running 2 tests using 1 parallel processes
1684 running 2 tests using 1 parallel processes
1685
1685
1686 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1686 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1687 +++ $TESTTMP/anothertests/cases/output/test-cases-abc.t#B.err
1687 +++ $TESTTMP/anothertests/cases/output/test-cases-abc.t#B.err
1688 @@ -7,7 +7,7 @@
1688 @@ -7,7 +7,7 @@
1689 $ V=C
1689 $ V=C
1690 #endif
1690 #endif
1691 $ echo $V | sed 's/A/C/'
1691 $ echo $V | sed 's/A/C/'
1692 - C
1692 - C
1693 + B
1693 + B
1694 #if C
1694 #if C
1695 $ [ $V = C ]
1695 $ [ $V = C ]
1696 #endif
1696 #endif
1697
1697
1698 ERROR: test-cases-abc.t#B output changed
1698 ERROR: test-cases-abc.t#B output changed
1699 !.
1699 !.
1700 Failed test-cases-abc.t#B: output changed
1700 Failed test-cases-abc.t#B: output changed
1701 # Ran 2 tests, 0 skipped, 1 failed.
1701 # Ran 2 tests, 0 skipped, 1 failed.
1702 python hash seed: * (glob)
1702 python hash seed: * (glob)
1703 [1]
1703 [1]
1704
1704
1705 Test TESTCASE variable
1705 Test TESTCASE variable
1706
1706
1707 $ cat > test-cases-ab.t <<'EOF'
1707 $ cat > test-cases-ab.t <<'EOF'
1708 > $ dostuff() {
1708 > $ dostuff() {
1709 > > echo "In case $TESTCASE"
1709 > > echo "In case $TESTCASE"
1710 > > }
1710 > > }
1711 > #testcases A B
1711 > #testcases A B
1712 > #if A
1712 > #if A
1713 > $ dostuff
1713 > $ dostuff
1714 > In case A
1714 > In case A
1715 > #endif
1715 > #endif
1716 > #if B
1716 > #if B
1717 > $ dostuff
1717 > $ dostuff
1718 > In case B
1718 > In case B
1719 > #endif
1719 > #endif
1720 > EOF
1720 > EOF
1721 $ rt test-cases-ab.t
1721 $ rt test-cases-ab.t
1722 running 2 tests using 1 parallel processes
1722 running 2 tests using 1 parallel processes
1723 ..
1723 ..
1724 # Ran 2 tests, 0 skipped, 0 failed.
1724 # Ran 2 tests, 0 skipped, 0 failed.
1725
1725
1726 Support running a specific test case
1726 Support running a specific test case
1727
1727
1728 $ rt "test-cases-abc.t#B"
1728 $ rt "test-cases-abc.t#B"
1729 running 1 tests using 1 parallel processes
1729 running 1 tests using 1 parallel processes
1730
1730
1731 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1731 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1732 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1732 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1733 @@ -7,7 +7,7 @@
1733 @@ -7,7 +7,7 @@
1734 $ V=C
1734 $ V=C
1735 #endif
1735 #endif
1736 $ echo $V | sed 's/A/C/'
1736 $ echo $V | sed 's/A/C/'
1737 - C
1737 - C
1738 + B
1738 + B
1739 #if C
1739 #if C
1740 $ [ $V = C ]
1740 $ [ $V = C ]
1741 #endif
1741 #endif
1742
1742
1743 ERROR: test-cases-abc.t#B output changed
1743 ERROR: test-cases-abc.t#B output changed
1744 !
1744 !
1745 Failed test-cases-abc.t#B: output changed
1745 Failed test-cases-abc.t#B: output changed
1746 # Ran 1 tests, 0 skipped, 1 failed.
1746 # Ran 1 tests, 0 skipped, 1 failed.
1747 python hash seed: * (glob)
1747 python hash seed: * (glob)
1748 [1]
1748 [1]
1749
1749
1750 Support running multiple test cases in the same file
1750 Support running multiple test cases in the same file
1751
1751
1752 $ rt test-cases-abc.t#B test-cases-abc.t#C
1752 $ rt test-cases-abc.t#B test-cases-abc.t#C
1753 running 2 tests using 1 parallel processes
1753 running 2 tests using 1 parallel processes
1754
1754
1755 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1755 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1756 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1756 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1757 @@ -7,7 +7,7 @@
1757 @@ -7,7 +7,7 @@
1758 $ V=C
1758 $ V=C
1759 #endif
1759 #endif
1760 $ echo $V | sed 's/A/C/'
1760 $ echo $V | sed 's/A/C/'
1761 - C
1761 - C
1762 + B
1762 + B
1763 #if C
1763 #if C
1764 $ [ $V = C ]
1764 $ [ $V = C ]
1765 #endif
1765 #endif
1766
1766
1767 ERROR: test-cases-abc.t#B output changed
1767 ERROR: test-cases-abc.t#B output changed
1768 !.
1768 !.
1769 Failed test-cases-abc.t#B: output changed
1769 Failed test-cases-abc.t#B: output changed
1770 # Ran 2 tests, 0 skipped, 1 failed.
1770 # Ran 2 tests, 0 skipped, 1 failed.
1771 python hash seed: * (glob)
1771 python hash seed: * (glob)
1772 [1]
1772 [1]
1773
1773
1774 Support ignoring invalid test cases
1774 Support ignoring invalid test cases
1775
1775
1776 $ rt test-cases-abc.t#B test-cases-abc.t#D
1776 $ rt test-cases-abc.t#B test-cases-abc.t#D
1777 running 1 tests using 1 parallel processes
1777 running 1 tests using 1 parallel processes
1778
1778
1779 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1779 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1780 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1780 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1781 @@ -7,7 +7,7 @@
1781 @@ -7,7 +7,7 @@
1782 $ V=C
1782 $ V=C
1783 #endif
1783 #endif
1784 $ echo $V | sed 's/A/C/'
1784 $ echo $V | sed 's/A/C/'
1785 - C
1785 - C
1786 + B
1786 + B
1787 #if C
1787 #if C
1788 $ [ $V = C ]
1788 $ [ $V = C ]
1789 #endif
1789 #endif
1790
1790
1791 ERROR: test-cases-abc.t#B output changed
1791 ERROR: test-cases-abc.t#B output changed
1792 !
1792 !
1793 Failed test-cases-abc.t#B: output changed
1793 Failed test-cases-abc.t#B: output changed
1794 # Ran 1 tests, 0 skipped, 1 failed.
1794 # Ran 1 tests, 0 skipped, 1 failed.
1795 python hash seed: * (glob)
1795 python hash seed: * (glob)
1796 [1]
1796 [1]
1797
1797
1798 Support running complex test cases names
1798 Support running complex test cases names
1799
1799
1800 $ cat > test-cases-advanced-cases.t <<'EOF'
1800 $ cat > test-cases-advanced-cases.t <<'EOF'
1801 > #testcases simple case-with-dashes casewith_-.chars
1801 > #testcases simple case-with-dashes casewith_-.chars
1802 > $ echo $TESTCASE
1802 > $ echo $TESTCASE
1803 > simple
1803 > simple
1804 > EOF
1804 > EOF
1805
1805
1806 $ cat test-cases-advanced-cases.t
1806 $ cat test-cases-advanced-cases.t
1807 #testcases simple case-with-dashes casewith_-.chars
1807 #testcases simple case-with-dashes casewith_-.chars
1808 $ echo $TESTCASE
1808 $ echo $TESTCASE
1809 simple
1809 simple
1810
1810
1811 $ rt test-cases-advanced-cases.t
1811 $ rt test-cases-advanced-cases.t
1812 running 3 tests using 1 parallel processes
1812 running 3 tests using 1 parallel processes
1813
1813
1814 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1814 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1815 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err
1815 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err
1816 @@ -1,3 +1,3 @@
1816 @@ -1,3 +1,3 @@
1817 #testcases simple case-with-dashes casewith_-.chars
1817 #testcases simple case-with-dashes casewith_-.chars
1818 $ echo $TESTCASE
1818 $ echo $TESTCASE
1819 - simple
1819 - simple
1820 + case-with-dashes
1820 + case-with-dashes
1821
1821
1822 ERROR: test-cases-advanced-cases.t#case-with-dashes output changed
1822 ERROR: test-cases-advanced-cases.t#case-with-dashes output changed
1823 !
1823 !
1824 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1824 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1825 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err
1825 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err
1826 @@ -1,3 +1,3 @@
1826 @@ -1,3 +1,3 @@
1827 #testcases simple case-with-dashes casewith_-.chars
1827 #testcases simple case-with-dashes casewith_-.chars
1828 $ echo $TESTCASE
1828 $ echo $TESTCASE
1829 - simple
1829 - simple
1830 + casewith_-.chars
1830 + casewith_-.chars
1831
1831
1832 ERROR: test-cases-advanced-cases.t#casewith_-.chars output changed
1832 ERROR: test-cases-advanced-cases.t#casewith_-.chars output changed
1833 !.
1833 !.
1834 Failed test-cases-advanced-cases.t#case-with-dashes: output changed
1834 Failed test-cases-advanced-cases.t#case-with-dashes: output changed
1835 Failed test-cases-advanced-cases.t#casewith_-.chars: output changed
1835 Failed test-cases-advanced-cases.t#casewith_-.chars: output changed
1836 # Ran 3 tests, 0 skipped, 2 failed.
1836 # Ran 3 tests, 0 skipped, 2 failed.
1837 python hash seed: * (glob)
1837 python hash seed: * (glob)
1838 [1]
1838 [1]
1839
1839
1840 $ rt "test-cases-advanced-cases.t#case-with-dashes"
1840 $ rt "test-cases-advanced-cases.t#case-with-dashes"
1841 running 1 tests using 1 parallel processes
1841 running 1 tests using 1 parallel processes
1842
1842
1843 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1843 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1844 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err
1844 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err
1845 @@ -1,3 +1,3 @@
1845 @@ -1,3 +1,3 @@
1846 #testcases simple case-with-dashes casewith_-.chars
1846 #testcases simple case-with-dashes casewith_-.chars
1847 $ echo $TESTCASE
1847 $ echo $TESTCASE
1848 - simple
1848 - simple
1849 + case-with-dashes
1849 + case-with-dashes
1850
1850
1851 ERROR: test-cases-advanced-cases.t#case-with-dashes output changed
1851 ERROR: test-cases-advanced-cases.t#case-with-dashes output changed
1852 !
1852 !
1853 Failed test-cases-advanced-cases.t#case-with-dashes: output changed
1853 Failed test-cases-advanced-cases.t#case-with-dashes: output changed
1854 # Ran 1 tests, 0 skipped, 1 failed.
1854 # Ran 1 tests, 0 skipped, 1 failed.
1855 python hash seed: * (glob)
1855 python hash seed: * (glob)
1856 [1]
1856 [1]
1857
1857
1858 $ rt "test-cases-advanced-cases.t#casewith_-.chars"
1858 $ rt "test-cases-advanced-cases.t#casewith_-.chars"
1859 running 1 tests using 1 parallel processes
1859 running 1 tests using 1 parallel processes
1860
1860
1861 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1861 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1862 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err
1862 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err
1863 @@ -1,3 +1,3 @@
1863 @@ -1,3 +1,3 @@
1864 #testcases simple case-with-dashes casewith_-.chars
1864 #testcases simple case-with-dashes casewith_-.chars
1865 $ echo $TESTCASE
1865 $ echo $TESTCASE
1866 - simple
1866 - simple
1867 + casewith_-.chars
1867 + casewith_-.chars
1868
1868
1869 ERROR: test-cases-advanced-cases.t#casewith_-.chars output changed
1869 ERROR: test-cases-advanced-cases.t#casewith_-.chars output changed
1870 !
1870 !
1871 Failed test-cases-advanced-cases.t#casewith_-.chars: output changed
1871 Failed test-cases-advanced-cases.t#casewith_-.chars: output changed
1872 # Ran 1 tests, 0 skipped, 1 failed.
1872 # Ran 1 tests, 0 skipped, 1 failed.
1873 python hash seed: * (glob)
1873 python hash seed: * (glob)
1874 [1]
1874 [1]
1875
1875
1876 Test automatic pattern replacement
1876 Test automatic pattern replacement
1877 ==================================
1877 ==================================
1878
1878
1879 $ cat << EOF >> common-pattern.py
1879 $ cat << EOF >> common-pattern.py
1880 > substitutions = [
1880 > substitutions = [
1881 > (br'foo-(.*)\\b',
1881 > (br'foo-(.*)\\b',
1882 > br'\$XXX=\\1\$'),
1882 > br'\$XXX=\\1\$'),
1883 > (br'bar\\n',
1883 > (br'bar\\n',
1884 > br'\$YYY$\\n'),
1884 > br'\$YYY$\\n'),
1885 > ]
1885 > ]
1886 > EOF
1886 > EOF
1887
1887
1888 $ cat << EOF >> test-substitution.t
1888 $ cat << EOF >> test-substitution.t
1889 > $ echo foo-12
1889 > $ echo foo-12
1890 > \$XXX=12$
1890 > \$XXX=12$
1891 > $ echo foo-42
1891 > $ echo foo-42
1892 > \$XXX=42$
1892 > \$XXX=42$
1893 > $ echo bar prior
1893 > $ echo bar prior
1894 > bar prior
1894 > bar prior
1895 > $ echo lastbar
1895 > $ echo lastbar
1896 > last\$YYY$
1896 > last\$YYY$
1897 > $ echo foo-bar foo-baz
1897 > $ echo foo-bar foo-baz
1898 > EOF
1898 > EOF
1899
1899
1900 $ rt test-substitution.t
1900 $ rt test-substitution.t
1901 running 1 tests using 1 parallel processes
1901 running 1 tests using 1 parallel processes
1902
1902
1903 --- $TESTTMP/anothertests/cases/test-substitution.t
1903 --- $TESTTMP/anothertests/cases/test-substitution.t
1904 +++ $TESTTMP/anothertests/cases/test-substitution.t.err
1904 +++ $TESTTMP/anothertests/cases/test-substitution.t.err
1905 @@ -7,3 +7,4 @@
1905 @@ -7,3 +7,4 @@
1906 $ echo lastbar
1906 $ echo lastbar
1907 last$YYY$
1907 last$YYY$
1908 $ echo foo-bar foo-baz
1908 $ echo foo-bar foo-baz
1909 + $XXX=bar foo-baz$
1909 + $XXX=bar foo-baz$
1910
1910
1911 ERROR: test-substitution.t output changed
1911 ERROR: test-substitution.t output changed
1912 !
1912 !
1913 Failed test-substitution.t: output changed
1913 Failed test-substitution.t: output changed
1914 # Ran 1 tests, 0 skipped, 1 failed.
1914 # Ran 1 tests, 0 skipped, 1 failed.
1915 python hash seed: * (glob)
1915 python hash seed: * (glob)
1916 [1]
1916 [1]
1917
1917
1918 --extra-config-opt works
1918 --extra-config-opt works
1919
1919
1920 $ cat << EOF >> test-config-opt.t
1920 $ cat << EOF >> test-config-opt.t
1921 > $ hg init test-config-opt
1921 > $ hg init test-config-opt
1922 > $ hg -R test-config-opt purge
1922 > $ hg -R test-config-opt purge
1923 > EOF
1923 > EOF
1924
1924
1925 $ rt --extra-config-opt extensions.purge= test-config-opt.t
1925 $ rt --extra-config-opt extensions.purge= test-config-opt.t
1926 running 1 tests using 1 parallel processes
1926 running 1 tests using 1 parallel processes
1927 .
1927 .
1928 # Ran 1 tests, 0 skipped, 0 failed.
1928 # Ran 1 tests, 0 skipped, 0 failed.
General Comments 0
You need to be logged in to leave comments. Login now