##// END OF EJS Templates
run-tests: sort the skip, failure and error lists in the final output...
Matt Harbison -
r41627:f9150901 default
parent child Browse files
Show More
@@ -1,3305 +1,3308 b''
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2 #
2 #
3 # run-tests.py - Run a set of tests on Mercurial
3 # run-tests.py - Run a set of tests on Mercurial
4 #
4 #
5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 # Modifying this script is tricky because it has many modes:
10 # Modifying this script is tricky because it has many modes:
11 # - serial (default) vs parallel (-jN, N > 1)
11 # - serial (default) vs parallel (-jN, N > 1)
12 # - no coverage (default) vs coverage (-c, -C, -s)
12 # - no coverage (default) vs coverage (-c, -C, -s)
13 # - temp install (default) vs specific hg script (--with-hg, --local)
13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 # - tests are a mix of shell scripts and Python scripts
14 # - tests are a mix of shell scripts and Python scripts
15 #
15 #
16 # If you change this script, it is recommended that you ensure you
16 # If you change this script, it is recommended that you ensure you
17 # haven't broken it by running it in various modes with a representative
17 # haven't broken it by running it in various modes with a representative
18 # sample of test scripts. For example:
18 # sample of test scripts. For example:
19 #
19 #
20 # 1) serial, no coverage, temp install:
20 # 1) serial, no coverage, temp install:
21 # ./run-tests.py test-s*
21 # ./run-tests.py test-s*
22 # 2) serial, no coverage, local hg:
22 # 2) serial, no coverage, local hg:
23 # ./run-tests.py --local test-s*
23 # ./run-tests.py --local test-s*
24 # 3) serial, coverage, temp install:
24 # 3) serial, coverage, temp install:
25 # ./run-tests.py -c test-s*
25 # ./run-tests.py -c test-s*
26 # 4) serial, coverage, local hg:
26 # 4) serial, coverage, local hg:
27 # ./run-tests.py -c --local test-s* # unsupported
27 # ./run-tests.py -c --local test-s* # unsupported
28 # 5) parallel, no coverage, temp install:
28 # 5) parallel, no coverage, temp install:
29 # ./run-tests.py -j2 test-s*
29 # ./run-tests.py -j2 test-s*
30 # 6) parallel, no coverage, local hg:
30 # 6) parallel, no coverage, local hg:
31 # ./run-tests.py -j2 --local test-s*
31 # ./run-tests.py -j2 --local test-s*
32 # 7) parallel, coverage, temp install:
32 # 7) parallel, coverage, temp install:
33 # ./run-tests.py -j2 -c test-s* # currently broken
33 # ./run-tests.py -j2 -c test-s* # currently broken
34 # 8) parallel, coverage, local install:
34 # 8) parallel, coverage, local install:
35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 # 9) parallel, custom tmp dir:
36 # 9) parallel, custom tmp dir:
37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 # 10) parallel, pure, tests that call run-tests:
38 # 10) parallel, pure, tests that call run-tests:
39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
40 #
40 #
41 # (You could use any subset of the tests: test-s* happens to match
41 # (You could use any subset of the tests: test-s* happens to match
42 # enough that it's worth doing parallel runs, few enough that it
42 # enough that it's worth doing parallel runs, few enough that it
43 # completes fairly quickly, includes both shell and Python scripts, and
43 # completes fairly quickly, includes both shell and Python scripts, and
44 # includes some scripts that run daemon processes.)
44 # includes some scripts that run daemon processes.)
45
45
46 from __future__ import absolute_import, print_function
46 from __future__ import absolute_import, print_function
47
47
48 import argparse
48 import argparse
49 import collections
49 import collections
50 import difflib
50 import difflib
51 import distutils.version as version
51 import distutils.version as version
52 import errno
52 import errno
53 import json
53 import json
54 import multiprocessing
54 import multiprocessing
55 import os
55 import os
56 import random
56 import random
57 import re
57 import re
58 import shutil
58 import shutil
59 import signal
59 import signal
60 import socket
60 import socket
61 import subprocess
61 import subprocess
62 import sys
62 import sys
63 import sysconfig
63 import sysconfig
64 import tempfile
64 import tempfile
65 import threading
65 import threading
66 import time
66 import time
67 import unittest
67 import unittest
68 import uuid
68 import uuid
69 import xml.dom.minidom as minidom
69 import xml.dom.minidom as minidom
70
70
71 try:
71 try:
72 import Queue as queue
72 import Queue as queue
73 except ImportError:
73 except ImportError:
74 import queue
74 import queue
75
75
76 try:
76 try:
77 import shlex
77 import shlex
78 shellquote = shlex.quote
78 shellquote = shlex.quote
79 except (ImportError, AttributeError):
79 except (ImportError, AttributeError):
80 import pipes
80 import pipes
81 shellquote = pipes.quote
81 shellquote = pipes.quote
82
82
83 if os.environ.get('RTUNICODEPEDANTRY', False):
83 if os.environ.get('RTUNICODEPEDANTRY', False):
84 try:
84 try:
85 reload(sys)
85 reload(sys)
86 sys.setdefaultencoding("undefined")
86 sys.setdefaultencoding("undefined")
87 except NameError:
87 except NameError:
88 pass
88 pass
89
89
90 processlock = threading.Lock()
90 processlock = threading.Lock()
91
91
92 pygmentspresent = False
92 pygmentspresent = False
93 # ANSI color is unsupported prior to Windows 10
93 # ANSI color is unsupported prior to Windows 10
94 if os.name != 'nt':
94 if os.name != 'nt':
95 try: # is pygments installed
95 try: # is pygments installed
96 import pygments
96 import pygments
97 import pygments.lexers as lexers
97 import pygments.lexers as lexers
98 import pygments.lexer as lexer
98 import pygments.lexer as lexer
99 import pygments.formatters as formatters
99 import pygments.formatters as formatters
100 import pygments.token as token
100 import pygments.token as token
101 import pygments.style as style
101 import pygments.style as style
102 pygmentspresent = True
102 pygmentspresent = True
103 difflexer = lexers.DiffLexer()
103 difflexer = lexers.DiffLexer()
104 terminal256formatter = formatters.Terminal256Formatter()
104 terminal256formatter = formatters.Terminal256Formatter()
105 except ImportError:
105 except ImportError:
106 pass
106 pass
107
107
108 if pygmentspresent:
108 if pygmentspresent:
109 class TestRunnerStyle(style.Style):
109 class TestRunnerStyle(style.Style):
110 default_style = ""
110 default_style = ""
111 skipped = token.string_to_tokentype("Token.Generic.Skipped")
111 skipped = token.string_to_tokentype("Token.Generic.Skipped")
112 failed = token.string_to_tokentype("Token.Generic.Failed")
112 failed = token.string_to_tokentype("Token.Generic.Failed")
113 skippedname = token.string_to_tokentype("Token.Generic.SName")
113 skippedname = token.string_to_tokentype("Token.Generic.SName")
114 failedname = token.string_to_tokentype("Token.Generic.FName")
114 failedname = token.string_to_tokentype("Token.Generic.FName")
115 styles = {
115 styles = {
116 skipped: '#e5e5e5',
116 skipped: '#e5e5e5',
117 skippedname: '#00ffff',
117 skippedname: '#00ffff',
118 failed: '#7f0000',
118 failed: '#7f0000',
119 failedname: '#ff0000',
119 failedname: '#ff0000',
120 }
120 }
121
121
122 class TestRunnerLexer(lexer.RegexLexer):
122 class TestRunnerLexer(lexer.RegexLexer):
123 testpattern = r'[\w-]+\.(t|py)(#[a-zA-Z0-9_\-\.]+)?'
123 testpattern = r'[\w-]+\.(t|py)(#[a-zA-Z0-9_\-\.]+)?'
124 tokens = {
124 tokens = {
125 'root': [
125 'root': [
126 (r'^Skipped', token.Generic.Skipped, 'skipped'),
126 (r'^Skipped', token.Generic.Skipped, 'skipped'),
127 (r'^Failed ', token.Generic.Failed, 'failed'),
127 (r'^Failed ', token.Generic.Failed, 'failed'),
128 (r'^ERROR: ', token.Generic.Failed, 'failed'),
128 (r'^ERROR: ', token.Generic.Failed, 'failed'),
129 ],
129 ],
130 'skipped': [
130 'skipped': [
131 (testpattern, token.Generic.SName),
131 (testpattern, token.Generic.SName),
132 (r':.*', token.Generic.Skipped),
132 (r':.*', token.Generic.Skipped),
133 ],
133 ],
134 'failed': [
134 'failed': [
135 (testpattern, token.Generic.FName),
135 (testpattern, token.Generic.FName),
136 (r'(:| ).*', token.Generic.Failed),
136 (r'(:| ).*', token.Generic.Failed),
137 ]
137 ]
138 }
138 }
139
139
140 runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle)
140 runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle)
141 runnerlexer = TestRunnerLexer()
141 runnerlexer = TestRunnerLexer()
142
142
143 origenviron = os.environ.copy()
143 origenviron = os.environ.copy()
144
144
145 if sys.version_info > (3, 5, 0):
145 if sys.version_info > (3, 5, 0):
146 PYTHON3 = True
146 PYTHON3 = True
147 xrange = range # we use xrange in one place, and we'd rather not use range
147 xrange = range # we use xrange in one place, and we'd rather not use range
148 def _bytespath(p):
148 def _bytespath(p):
149 if p is None:
149 if p is None:
150 return p
150 return p
151 return p.encode('utf-8')
151 return p.encode('utf-8')
152
152
153 def _strpath(p):
153 def _strpath(p):
154 if p is None:
154 if p is None:
155 return p
155 return p
156 return p.decode('utf-8')
156 return p.decode('utf-8')
157
157
158 osenvironb = getattr(os, 'environb', None)
158 osenvironb = getattr(os, 'environb', None)
159 if osenvironb is None:
159 if osenvironb is None:
160 # Windows lacks os.environb, for instance. A proxy over the real thing
160 # Windows lacks os.environb, for instance. A proxy over the real thing
161 # instead of a copy allows the environment to be updated via bytes on
161 # instead of a copy allows the environment to be updated via bytes on
162 # all platforms.
162 # all platforms.
163 class environbytes(object):
163 class environbytes(object):
164 def __init__(self, strenv):
164 def __init__(self, strenv):
165 self.__len__ = strenv.__len__
165 self.__len__ = strenv.__len__
166 self.clear = strenv.clear
166 self.clear = strenv.clear
167 self._strenv = strenv
167 self._strenv = strenv
168 def __getitem__(self, k):
168 def __getitem__(self, k):
169 v = self._strenv.__getitem__(_strpath(k))
169 v = self._strenv.__getitem__(_strpath(k))
170 return _bytespath(v)
170 return _bytespath(v)
171 def __setitem__(self, k, v):
171 def __setitem__(self, k, v):
172 self._strenv.__setitem__(_strpath(k), _strpath(v))
172 self._strenv.__setitem__(_strpath(k), _strpath(v))
173 def __delitem__(self, k):
173 def __delitem__(self, k):
174 self._strenv.__delitem__(_strpath(k))
174 self._strenv.__delitem__(_strpath(k))
175 def __contains__(self, k):
175 def __contains__(self, k):
176 return self._strenv.__contains__(_strpath(k))
176 return self._strenv.__contains__(_strpath(k))
177 def __iter__(self):
177 def __iter__(self):
178 return iter([_bytespath(k) for k in iter(self._strenv)])
178 return iter([_bytespath(k) for k in iter(self._strenv)])
179 def get(self, k, default=None):
179 def get(self, k, default=None):
180 v = self._strenv.get(_strpath(k), _strpath(default))
180 v = self._strenv.get(_strpath(k), _strpath(default))
181 return _bytespath(v)
181 return _bytespath(v)
182 def pop(self, k, default=None):
182 def pop(self, k, default=None):
183 v = self._strenv.pop(_strpath(k), _strpath(default))
183 v = self._strenv.pop(_strpath(k), _strpath(default))
184 return _bytespath(v)
184 return _bytespath(v)
185
185
186 osenvironb = environbytes(os.environ)
186 osenvironb = environbytes(os.environ)
187
187
188 getcwdb = getattr(os, 'getcwdb')
188 getcwdb = getattr(os, 'getcwdb')
189 if not getcwdb or os.name == 'nt':
189 if not getcwdb or os.name == 'nt':
190 getcwdb = lambda: _bytespath(os.getcwd())
190 getcwdb = lambda: _bytespath(os.getcwd())
191
191
192 elif sys.version_info >= (3, 0, 0):
192 elif sys.version_info >= (3, 0, 0):
193 print('%s is only supported on Python 3.5+ and 2.7, not %s' %
193 print('%s is only supported on Python 3.5+ and 2.7, not %s' %
194 (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])))
194 (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])))
195 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
195 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
196 else:
196 else:
197 PYTHON3 = False
197 PYTHON3 = False
198
198
199 # In python 2.x, path operations are generally done using
199 # In python 2.x, path operations are generally done using
200 # bytestrings by default, so we don't have to do any extra
200 # bytestrings by default, so we don't have to do any extra
201 # fiddling there. We define the wrapper functions anyway just to
201 # fiddling there. We define the wrapper functions anyway just to
202 # help keep code consistent between platforms.
202 # help keep code consistent between platforms.
203 def _bytespath(p):
203 def _bytespath(p):
204 return p
204 return p
205
205
206 _strpath = _bytespath
206 _strpath = _bytespath
207 osenvironb = os.environ
207 osenvironb = os.environ
208 getcwdb = os.getcwd
208 getcwdb = os.getcwd
209
209
210 # For Windows support
210 # For Windows support
211 wifexited = getattr(os, "WIFEXITED", lambda x: False)
211 wifexited = getattr(os, "WIFEXITED", lambda x: False)
212
212
213 # Whether to use IPv6
213 # Whether to use IPv6
214 def checksocketfamily(name, port=20058):
214 def checksocketfamily(name, port=20058):
215 """return true if we can listen on localhost using family=name
215 """return true if we can listen on localhost using family=name
216
216
217 name should be either 'AF_INET', or 'AF_INET6'.
217 name should be either 'AF_INET', or 'AF_INET6'.
218 port being used is okay - EADDRINUSE is considered as successful.
218 port being used is okay - EADDRINUSE is considered as successful.
219 """
219 """
220 family = getattr(socket, name, None)
220 family = getattr(socket, name, None)
221 if family is None:
221 if family is None:
222 return False
222 return False
223 try:
223 try:
224 s = socket.socket(family, socket.SOCK_STREAM)
224 s = socket.socket(family, socket.SOCK_STREAM)
225 s.bind(('localhost', port))
225 s.bind(('localhost', port))
226 s.close()
226 s.close()
227 return True
227 return True
228 except socket.error as exc:
228 except socket.error as exc:
229 if exc.errno == errno.EADDRINUSE:
229 if exc.errno == errno.EADDRINUSE:
230 return True
230 return True
231 elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
231 elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
232 return False
232 return False
233 else:
233 else:
234 raise
234 raise
235 else:
235 else:
236 return False
236 return False
237
237
238 # useipv6 will be set by parseargs
238 # useipv6 will be set by parseargs
239 useipv6 = None
239 useipv6 = None
240
240
241 def checkportisavailable(port):
241 def checkportisavailable(port):
242 """return true if a port seems free to bind on localhost"""
242 """return true if a port seems free to bind on localhost"""
243 if useipv6:
243 if useipv6:
244 family = socket.AF_INET6
244 family = socket.AF_INET6
245 else:
245 else:
246 family = socket.AF_INET
246 family = socket.AF_INET
247 try:
247 try:
248 s = socket.socket(family, socket.SOCK_STREAM)
248 s = socket.socket(family, socket.SOCK_STREAM)
249 s.bind(('localhost', port))
249 s.bind(('localhost', port))
250 s.close()
250 s.close()
251 return True
251 return True
252 except socket.error as exc:
252 except socket.error as exc:
253 if exc.errno not in (errno.EADDRINUSE, errno.EADDRNOTAVAIL,
253 if exc.errno not in (errno.EADDRINUSE, errno.EADDRNOTAVAIL,
254 errno.EPROTONOSUPPORT):
254 errno.EPROTONOSUPPORT):
255 raise
255 raise
256 return False
256 return False
257
257
258 closefds = os.name == 'posix'
258 closefds = os.name == 'posix'
259 def Popen4(cmd, wd, timeout, env=None):
259 def Popen4(cmd, wd, timeout, env=None):
260 processlock.acquire()
260 processlock.acquire()
261 p = subprocess.Popen(_strpath(cmd), shell=True, bufsize=-1,
261 p = subprocess.Popen(_strpath(cmd), shell=True, bufsize=-1,
262 cwd=_strpath(wd), env=env,
262 cwd=_strpath(wd), env=env,
263 close_fds=closefds,
263 close_fds=closefds,
264 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
264 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
265 stderr=subprocess.STDOUT)
265 stderr=subprocess.STDOUT)
266 processlock.release()
266 processlock.release()
267
267
268 p.fromchild = p.stdout
268 p.fromchild = p.stdout
269 p.tochild = p.stdin
269 p.tochild = p.stdin
270 p.childerr = p.stderr
270 p.childerr = p.stderr
271
271
272 p.timeout = False
272 p.timeout = False
273 if timeout:
273 if timeout:
274 def t():
274 def t():
275 start = time.time()
275 start = time.time()
276 while time.time() - start < timeout and p.returncode is None:
276 while time.time() - start < timeout and p.returncode is None:
277 time.sleep(.1)
277 time.sleep(.1)
278 p.timeout = True
278 p.timeout = True
279 if p.returncode is None:
279 if p.returncode is None:
280 terminate(p)
280 terminate(p)
281 threading.Thread(target=t).start()
281 threading.Thread(target=t).start()
282
282
283 return p
283 return p
284
284
285 PYTHON = _bytespath(sys.executable.replace('\\', '/'))
285 PYTHON = _bytespath(sys.executable.replace('\\', '/'))
286 IMPL_PATH = b'PYTHONPATH'
286 IMPL_PATH = b'PYTHONPATH'
287 if 'java' in sys.platform:
287 if 'java' in sys.platform:
288 IMPL_PATH = b'JYTHONPATH'
288 IMPL_PATH = b'JYTHONPATH'
289
289
290 defaults = {
290 defaults = {
291 'jobs': ('HGTEST_JOBS', multiprocessing.cpu_count()),
291 'jobs': ('HGTEST_JOBS', multiprocessing.cpu_count()),
292 'timeout': ('HGTEST_TIMEOUT', 180),
292 'timeout': ('HGTEST_TIMEOUT', 180),
293 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 500),
293 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 500),
294 'port': ('HGTEST_PORT', 20059),
294 'port': ('HGTEST_PORT', 20059),
295 'shell': ('HGTEST_SHELL', 'sh'),
295 'shell': ('HGTEST_SHELL', 'sh'),
296 }
296 }
297
297
298 def canonpath(path):
298 def canonpath(path):
299 return os.path.realpath(os.path.expanduser(path))
299 return os.path.realpath(os.path.expanduser(path))
300
300
301 def parselistfiles(files, listtype, warn=True):
301 def parselistfiles(files, listtype, warn=True):
302 entries = dict()
302 entries = dict()
303 for filename in files:
303 for filename in files:
304 try:
304 try:
305 path = os.path.expanduser(os.path.expandvars(filename))
305 path = os.path.expanduser(os.path.expandvars(filename))
306 f = open(path, "rb")
306 f = open(path, "rb")
307 except IOError as err:
307 except IOError as err:
308 if err.errno != errno.ENOENT:
308 if err.errno != errno.ENOENT:
309 raise
309 raise
310 if warn:
310 if warn:
311 print("warning: no such %s file: %s" % (listtype, filename))
311 print("warning: no such %s file: %s" % (listtype, filename))
312 continue
312 continue
313
313
314 for line in f.readlines():
314 for line in f.readlines():
315 line = line.split(b'#', 1)[0].strip()
315 line = line.split(b'#', 1)[0].strip()
316 if line:
316 if line:
317 entries[line] = filename
317 entries[line] = filename
318
318
319 f.close()
319 f.close()
320 return entries
320 return entries
321
321
322 def parsettestcases(path):
322 def parsettestcases(path):
323 """read a .t test file, return a set of test case names
323 """read a .t test file, return a set of test case names
324
324
325 If path does not exist, return an empty set.
325 If path does not exist, return an empty set.
326 """
326 """
327 cases = []
327 cases = []
328 try:
328 try:
329 with open(path, 'rb') as f:
329 with open(path, 'rb') as f:
330 for l in f:
330 for l in f:
331 if l.startswith(b'#testcases '):
331 if l.startswith(b'#testcases '):
332 cases.append(sorted(l[11:].split()))
332 cases.append(sorted(l[11:].split()))
333 except IOError as ex:
333 except IOError as ex:
334 if ex.errno != errno.ENOENT:
334 if ex.errno != errno.ENOENT:
335 raise
335 raise
336 return cases
336 return cases
337
337
338 def getparser():
338 def getparser():
339 """Obtain the OptionParser used by the CLI."""
339 """Obtain the OptionParser used by the CLI."""
340 parser = argparse.ArgumentParser(usage='%(prog)s [options] [tests]')
340 parser = argparse.ArgumentParser(usage='%(prog)s [options] [tests]')
341
341
342 selection = parser.add_argument_group('Test Selection')
342 selection = parser.add_argument_group('Test Selection')
343 selection.add_argument('--allow-slow-tests', action='store_true',
343 selection.add_argument('--allow-slow-tests', action='store_true',
344 help='allow extremely slow tests')
344 help='allow extremely slow tests')
345 selection.add_argument("--blacklist", action="append",
345 selection.add_argument("--blacklist", action="append",
346 help="skip tests listed in the specified blacklist file")
346 help="skip tests listed in the specified blacklist file")
347 selection.add_argument("--changed",
347 selection.add_argument("--changed",
348 help="run tests that are changed in parent rev or working directory")
348 help="run tests that are changed in parent rev or working directory")
349 selection.add_argument("-k", "--keywords",
349 selection.add_argument("-k", "--keywords",
350 help="run tests matching keywords")
350 help="run tests matching keywords")
351 selection.add_argument("-r", "--retest", action="store_true",
351 selection.add_argument("-r", "--retest", action="store_true",
352 help = "retest failed tests")
352 help = "retest failed tests")
353 selection.add_argument("--test-list", action="append",
353 selection.add_argument("--test-list", action="append",
354 help="read tests to run from the specified file")
354 help="read tests to run from the specified file")
355 selection.add_argument("--whitelist", action="append",
355 selection.add_argument("--whitelist", action="append",
356 help="always run tests listed in the specified whitelist file")
356 help="always run tests listed in the specified whitelist file")
357 selection.add_argument('tests', metavar='TESTS', nargs='*',
357 selection.add_argument('tests', metavar='TESTS', nargs='*',
358 help='Tests to run')
358 help='Tests to run')
359
359
360 harness = parser.add_argument_group('Test Harness Behavior')
360 harness = parser.add_argument_group('Test Harness Behavior')
361 harness.add_argument('--bisect-repo',
361 harness.add_argument('--bisect-repo',
362 metavar='bisect_repo',
362 metavar='bisect_repo',
363 help=("Path of a repo to bisect. Use together with "
363 help=("Path of a repo to bisect. Use together with "
364 "--known-good-rev"))
364 "--known-good-rev"))
365 harness.add_argument("-d", "--debug", action="store_true",
365 harness.add_argument("-d", "--debug", action="store_true",
366 help="debug mode: write output of test scripts to console"
366 help="debug mode: write output of test scripts to console"
367 " rather than capturing and diffing it (disables timeout)")
367 " rather than capturing and diffing it (disables timeout)")
368 harness.add_argument("-f", "--first", action="store_true",
368 harness.add_argument("-f", "--first", action="store_true",
369 help="exit on the first test failure")
369 help="exit on the first test failure")
370 harness.add_argument("-i", "--interactive", action="store_true",
370 harness.add_argument("-i", "--interactive", action="store_true",
371 help="prompt to accept changed output")
371 help="prompt to accept changed output")
372 harness.add_argument("-j", "--jobs", type=int,
372 harness.add_argument("-j", "--jobs", type=int,
373 help="number of jobs to run in parallel"
373 help="number of jobs to run in parallel"
374 " (default: $%s or %d)" % defaults['jobs'])
374 " (default: $%s or %d)" % defaults['jobs'])
375 harness.add_argument("--keep-tmpdir", action="store_true",
375 harness.add_argument("--keep-tmpdir", action="store_true",
376 help="keep temporary directory after running tests")
376 help="keep temporary directory after running tests")
377 harness.add_argument('--known-good-rev',
377 harness.add_argument('--known-good-rev',
378 metavar="known_good_rev",
378 metavar="known_good_rev",
379 help=("Automatically bisect any failures using this "
379 help=("Automatically bisect any failures using this "
380 "revision as a known-good revision."))
380 "revision as a known-good revision."))
381 harness.add_argument("--list-tests", action="store_true",
381 harness.add_argument("--list-tests", action="store_true",
382 help="list tests instead of running them")
382 help="list tests instead of running them")
383 harness.add_argument("--loop", action="store_true",
383 harness.add_argument("--loop", action="store_true",
384 help="loop tests repeatedly")
384 help="loop tests repeatedly")
385 harness.add_argument('--random', action="store_true",
385 harness.add_argument('--random', action="store_true",
386 help='run tests in random order')
386 help='run tests in random order')
387 harness.add_argument('--order-by-runtime', action="store_true",
387 harness.add_argument('--order-by-runtime', action="store_true",
388 help='run slowest tests first, according to .testtimes')
388 help='run slowest tests first, according to .testtimes')
389 harness.add_argument("-p", "--port", type=int,
389 harness.add_argument("-p", "--port", type=int,
390 help="port on which servers should listen"
390 help="port on which servers should listen"
391 " (default: $%s or %d)" % defaults['port'])
391 " (default: $%s or %d)" % defaults['port'])
392 harness.add_argument('--profile-runner', action='store_true',
392 harness.add_argument('--profile-runner', action='store_true',
393 help='run statprof on run-tests')
393 help='run statprof on run-tests')
394 harness.add_argument("-R", "--restart", action="store_true",
394 harness.add_argument("-R", "--restart", action="store_true",
395 help="restart at last error")
395 help="restart at last error")
396 harness.add_argument("--runs-per-test", type=int, dest="runs_per_test",
396 harness.add_argument("--runs-per-test", type=int, dest="runs_per_test",
397 help="run each test N times (default=1)", default=1)
397 help="run each test N times (default=1)", default=1)
398 harness.add_argument("--shell",
398 harness.add_argument("--shell",
399 help="shell to use (default: $%s or %s)" % defaults['shell'])
399 help="shell to use (default: $%s or %s)" % defaults['shell'])
400 harness.add_argument('--showchannels', action='store_true',
400 harness.add_argument('--showchannels', action='store_true',
401 help='show scheduling channels')
401 help='show scheduling channels')
402 harness.add_argument("--slowtimeout", type=int,
402 harness.add_argument("--slowtimeout", type=int,
403 help="kill errant slow tests after SLOWTIMEOUT seconds"
403 help="kill errant slow tests after SLOWTIMEOUT seconds"
404 " (default: $%s or %d)" % defaults['slowtimeout'])
404 " (default: $%s or %d)" % defaults['slowtimeout'])
405 harness.add_argument("-t", "--timeout", type=int,
405 harness.add_argument("-t", "--timeout", type=int,
406 help="kill errant tests after TIMEOUT seconds"
406 help="kill errant tests after TIMEOUT seconds"
407 " (default: $%s or %d)" % defaults['timeout'])
407 " (default: $%s or %d)" % defaults['timeout'])
408 harness.add_argument("--tmpdir",
408 harness.add_argument("--tmpdir",
409 help="run tests in the given temporary directory"
409 help="run tests in the given temporary directory"
410 " (implies --keep-tmpdir)")
410 " (implies --keep-tmpdir)")
411 harness.add_argument("-v", "--verbose", action="store_true",
411 harness.add_argument("-v", "--verbose", action="store_true",
412 help="output verbose messages")
412 help="output verbose messages")
413
413
414 hgconf = parser.add_argument_group('Mercurial Configuration')
414 hgconf = parser.add_argument_group('Mercurial Configuration')
415 hgconf.add_argument("--chg", action="store_true",
415 hgconf.add_argument("--chg", action="store_true",
416 help="install and use chg wrapper in place of hg")
416 help="install and use chg wrapper in place of hg")
417 hgconf.add_argument("--compiler",
417 hgconf.add_argument("--compiler",
418 help="compiler to build with")
418 help="compiler to build with")
419 hgconf.add_argument('--extra-config-opt', action="append", default=[],
419 hgconf.add_argument('--extra-config-opt', action="append", default=[],
420 help='set the given config opt in the test hgrc')
420 help='set the given config opt in the test hgrc')
421 hgconf.add_argument("-l", "--local", action="store_true",
421 hgconf.add_argument("-l", "--local", action="store_true",
422 help="shortcut for --with-hg=<testdir>/../hg, "
422 help="shortcut for --with-hg=<testdir>/../hg, "
423 "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set")
423 "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set")
424 hgconf.add_argument("--ipv6", action="store_true",
424 hgconf.add_argument("--ipv6", action="store_true",
425 help="prefer IPv6 to IPv4 for network related tests")
425 help="prefer IPv6 to IPv4 for network related tests")
426 hgconf.add_argument("--pure", action="store_true",
426 hgconf.add_argument("--pure", action="store_true",
427 help="use pure Python code instead of C extensions")
427 help="use pure Python code instead of C extensions")
428 hgconf.add_argument("-3", "--py3-warnings", action="store_true",
428 hgconf.add_argument("-3", "--py3-warnings", action="store_true",
429 help="enable Py3k warnings on Python 2.7+")
429 help="enable Py3k warnings on Python 2.7+")
430 hgconf.add_argument("--with-chg", metavar="CHG",
430 hgconf.add_argument("--with-chg", metavar="CHG",
431 help="use specified chg wrapper in place of hg")
431 help="use specified chg wrapper in place of hg")
432 hgconf.add_argument("--with-hg",
432 hgconf.add_argument("--with-hg",
433 metavar="HG",
433 metavar="HG",
434 help="test using specified hg script rather than a "
434 help="test using specified hg script rather than a "
435 "temporary installation")
435 "temporary installation")
436
436
437 reporting = parser.add_argument_group('Results Reporting')
437 reporting = parser.add_argument_group('Results Reporting')
438 reporting.add_argument("-C", "--annotate", action="store_true",
438 reporting.add_argument("-C", "--annotate", action="store_true",
439 help="output files annotated with coverage")
439 help="output files annotated with coverage")
440 reporting.add_argument("--color", choices=["always", "auto", "never"],
440 reporting.add_argument("--color", choices=["always", "auto", "never"],
441 default=os.environ.get('HGRUNTESTSCOLOR', 'auto'),
441 default=os.environ.get('HGRUNTESTSCOLOR', 'auto'),
442 help="colorisation: always|auto|never (default: auto)")
442 help="colorisation: always|auto|never (default: auto)")
443 reporting.add_argument("-c", "--cover", action="store_true",
443 reporting.add_argument("-c", "--cover", action="store_true",
444 help="print a test coverage report")
444 help="print a test coverage report")
445 reporting.add_argument('--exceptions', action='store_true',
445 reporting.add_argument('--exceptions', action='store_true',
446 help='log all exceptions and generate an exception report')
446 help='log all exceptions and generate an exception report')
447 reporting.add_argument("-H", "--htmlcov", action="store_true",
447 reporting.add_argument("-H", "--htmlcov", action="store_true",
448 help="create an HTML report of the coverage of the files")
448 help="create an HTML report of the coverage of the files")
449 reporting.add_argument("--json", action="store_true",
449 reporting.add_argument("--json", action="store_true",
450 help="store test result data in 'report.json' file")
450 help="store test result data in 'report.json' file")
451 reporting.add_argument("--outputdir",
451 reporting.add_argument("--outputdir",
452 help="directory to write error logs to (default=test directory)")
452 help="directory to write error logs to (default=test directory)")
453 reporting.add_argument("-n", "--nodiff", action="store_true",
453 reporting.add_argument("-n", "--nodiff", action="store_true",
454 help="skip showing test changes")
454 help="skip showing test changes")
455 reporting.add_argument("-S", "--noskips", action="store_true",
455 reporting.add_argument("-S", "--noskips", action="store_true",
456 help="don't report skip tests verbosely")
456 help="don't report skip tests verbosely")
457 reporting.add_argument("--time", action="store_true",
457 reporting.add_argument("--time", action="store_true",
458 help="time how long each test takes")
458 help="time how long each test takes")
459 reporting.add_argument("--view",
459 reporting.add_argument("--view",
460 help="external diff viewer")
460 help="external diff viewer")
461 reporting.add_argument("--xunit",
461 reporting.add_argument("--xunit",
462 help="record xunit results at specified path")
462 help="record xunit results at specified path")
463
463
464 for option, (envvar, default) in defaults.items():
464 for option, (envvar, default) in defaults.items():
465 defaults[option] = type(default)(os.environ.get(envvar, default))
465 defaults[option] = type(default)(os.environ.get(envvar, default))
466 parser.set_defaults(**defaults)
466 parser.set_defaults(**defaults)
467
467
468 return parser
468 return parser
469
469
470 def parseargs(args, parser):
470 def parseargs(args, parser):
471 """Parse arguments with our OptionParser and validate results."""
471 """Parse arguments with our OptionParser and validate results."""
472 options = parser.parse_args(args)
472 options = parser.parse_args(args)
473
473
474 # jython is always pure
474 # jython is always pure
475 if 'java' in sys.platform or '__pypy__' in sys.modules:
475 if 'java' in sys.platform or '__pypy__' in sys.modules:
476 options.pure = True
476 options.pure = True
477
477
478 if options.with_hg:
478 if options.with_hg:
479 options.with_hg = canonpath(_bytespath(options.with_hg))
479 options.with_hg = canonpath(_bytespath(options.with_hg))
480 if not (os.path.isfile(options.with_hg) and
480 if not (os.path.isfile(options.with_hg) and
481 os.access(options.with_hg, os.X_OK)):
481 os.access(options.with_hg, os.X_OK)):
482 parser.error('--with-hg must specify an executable hg script')
482 parser.error('--with-hg must specify an executable hg script')
483 if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
483 if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
484 sys.stderr.write('warning: --with-hg should specify an hg script\n')
484 sys.stderr.write('warning: --with-hg should specify an hg script\n')
485 sys.stderr.flush()
485 sys.stderr.flush()
486 if options.local:
486 if options.local:
487 testdir = os.path.dirname(_bytespath(canonpath(sys.argv[0])))
487 testdir = os.path.dirname(_bytespath(canonpath(sys.argv[0])))
488 reporootdir = os.path.dirname(testdir)
488 reporootdir = os.path.dirname(testdir)
489 pathandattrs = [(b'hg', 'with_hg')]
489 pathandattrs = [(b'hg', 'with_hg')]
490 if options.chg:
490 if options.chg:
491 pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
491 pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
492 for relpath, attr in pathandattrs:
492 for relpath, attr in pathandattrs:
493 binpath = os.path.join(reporootdir, relpath)
493 binpath = os.path.join(reporootdir, relpath)
494 if os.name != 'nt' and not os.access(binpath, os.X_OK):
494 if os.name != 'nt' and not os.access(binpath, os.X_OK):
495 parser.error('--local specified, but %r not found or '
495 parser.error('--local specified, but %r not found or '
496 'not executable' % binpath)
496 'not executable' % binpath)
497 setattr(options, attr, binpath)
497 setattr(options, attr, binpath)
498
498
499 if (options.chg or options.with_chg) and os.name == 'nt':
499 if (options.chg or options.with_chg) and os.name == 'nt':
500 parser.error('chg does not work on %s' % os.name)
500 parser.error('chg does not work on %s' % os.name)
501 if options.with_chg:
501 if options.with_chg:
502 options.chg = False # no installation to temporary location
502 options.chg = False # no installation to temporary location
503 options.with_chg = canonpath(_bytespath(options.with_chg))
503 options.with_chg = canonpath(_bytespath(options.with_chg))
504 if not (os.path.isfile(options.with_chg) and
504 if not (os.path.isfile(options.with_chg) and
505 os.access(options.with_chg, os.X_OK)):
505 os.access(options.with_chg, os.X_OK)):
506 parser.error('--with-chg must specify a chg executable')
506 parser.error('--with-chg must specify a chg executable')
507 if options.chg and options.with_hg:
507 if options.chg and options.with_hg:
508 # chg shares installation location with hg
508 # chg shares installation location with hg
509 parser.error('--chg does not work when --with-hg is specified '
509 parser.error('--chg does not work when --with-hg is specified '
510 '(use --with-chg instead)')
510 '(use --with-chg instead)')
511
511
512 if options.color == 'always' and not pygmentspresent:
512 if options.color == 'always' and not pygmentspresent:
513 sys.stderr.write('warning: --color=always ignored because '
513 sys.stderr.write('warning: --color=always ignored because '
514 'pygments is not installed\n')
514 'pygments is not installed\n')
515
515
516 if options.bisect_repo and not options.known_good_rev:
516 if options.bisect_repo and not options.known_good_rev:
517 parser.error("--bisect-repo cannot be used without --known-good-rev")
517 parser.error("--bisect-repo cannot be used without --known-good-rev")
518
518
519 global useipv6
519 global useipv6
520 if options.ipv6:
520 if options.ipv6:
521 useipv6 = checksocketfamily('AF_INET6')
521 useipv6 = checksocketfamily('AF_INET6')
522 else:
522 else:
523 # only use IPv6 if IPv4 is unavailable and IPv6 is available
523 # only use IPv6 if IPv4 is unavailable and IPv6 is available
524 useipv6 = ((not checksocketfamily('AF_INET'))
524 useipv6 = ((not checksocketfamily('AF_INET'))
525 and checksocketfamily('AF_INET6'))
525 and checksocketfamily('AF_INET6'))
526
526
527 options.anycoverage = options.cover or options.annotate or options.htmlcov
527 options.anycoverage = options.cover or options.annotate or options.htmlcov
528 if options.anycoverage:
528 if options.anycoverage:
529 try:
529 try:
530 import coverage
530 import coverage
531 covver = version.StrictVersion(coverage.__version__).version
531 covver = version.StrictVersion(coverage.__version__).version
532 if covver < (3, 3):
532 if covver < (3, 3):
533 parser.error('coverage options require coverage 3.3 or later')
533 parser.error('coverage options require coverage 3.3 or later')
534 except ImportError:
534 except ImportError:
535 parser.error('coverage options now require the coverage package')
535 parser.error('coverage options now require the coverage package')
536
536
537 if options.anycoverage and options.local:
537 if options.anycoverage and options.local:
538 # this needs some path mangling somewhere, I guess
538 # this needs some path mangling somewhere, I guess
539 parser.error("sorry, coverage options do not work when --local "
539 parser.error("sorry, coverage options do not work when --local "
540 "is specified")
540 "is specified")
541
541
542 if options.anycoverage and options.with_hg:
542 if options.anycoverage and options.with_hg:
543 parser.error("sorry, coverage options do not work when --with-hg "
543 parser.error("sorry, coverage options do not work when --with-hg "
544 "is specified")
544 "is specified")
545
545
546 global verbose
546 global verbose
547 if options.verbose:
547 if options.verbose:
548 verbose = ''
548 verbose = ''
549
549
550 if options.tmpdir:
550 if options.tmpdir:
551 options.tmpdir = canonpath(options.tmpdir)
551 options.tmpdir = canonpath(options.tmpdir)
552
552
553 if options.jobs < 1:
553 if options.jobs < 1:
554 parser.error('--jobs must be positive')
554 parser.error('--jobs must be positive')
555 if options.interactive and options.debug:
555 if options.interactive and options.debug:
556 parser.error("-i/--interactive and -d/--debug are incompatible")
556 parser.error("-i/--interactive and -d/--debug are incompatible")
557 if options.debug:
557 if options.debug:
558 if options.timeout != defaults['timeout']:
558 if options.timeout != defaults['timeout']:
559 sys.stderr.write(
559 sys.stderr.write(
560 'warning: --timeout option ignored with --debug\n')
560 'warning: --timeout option ignored with --debug\n')
561 if options.slowtimeout != defaults['slowtimeout']:
561 if options.slowtimeout != defaults['slowtimeout']:
562 sys.stderr.write(
562 sys.stderr.write(
563 'warning: --slowtimeout option ignored with --debug\n')
563 'warning: --slowtimeout option ignored with --debug\n')
564 options.timeout = 0
564 options.timeout = 0
565 options.slowtimeout = 0
565 options.slowtimeout = 0
566 if options.py3_warnings:
566 if options.py3_warnings:
567 if PYTHON3:
567 if PYTHON3:
568 parser.error(
568 parser.error(
569 '--py3-warnings can only be used on Python 2.7')
569 '--py3-warnings can only be used on Python 2.7')
570
570
571 if options.blacklist:
571 if options.blacklist:
572 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
572 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
573 if options.whitelist:
573 if options.whitelist:
574 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
574 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
575 else:
575 else:
576 options.whitelisted = {}
576 options.whitelisted = {}
577
577
578 if options.showchannels:
578 if options.showchannels:
579 options.nodiff = True
579 options.nodiff = True
580
580
581 return options
581 return options
582
582
583 def rename(src, dst):
583 def rename(src, dst):
584 """Like os.rename(), trade atomicity and opened files friendliness
584 """Like os.rename(), trade atomicity and opened files friendliness
585 for existing destination support.
585 for existing destination support.
586 """
586 """
587 shutil.copy(src, dst)
587 shutil.copy(src, dst)
588 os.remove(src)
588 os.remove(src)
589
589
590 def makecleanable(path):
590 def makecleanable(path):
591 """Try to fix directory permission recursively so that the entire tree
591 """Try to fix directory permission recursively so that the entire tree
592 can be deleted"""
592 can be deleted"""
593 for dirpath, dirnames, _filenames in os.walk(path, topdown=True):
593 for dirpath, dirnames, _filenames in os.walk(path, topdown=True):
594 for d in dirnames:
594 for d in dirnames:
595 p = os.path.join(dirpath, d)
595 p = os.path.join(dirpath, d)
596 try:
596 try:
597 os.chmod(p, os.stat(p).st_mode & 0o777 | 0o700) # chmod u+rwx
597 os.chmod(p, os.stat(p).st_mode & 0o777 | 0o700) # chmod u+rwx
598 except OSError:
598 except OSError:
599 pass
599 pass
600
600
601 _unified_diff = difflib.unified_diff
601 _unified_diff = difflib.unified_diff
602 if PYTHON3:
602 if PYTHON3:
603 import functools
603 import functools
604 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
604 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
605
605
606 def getdiff(expected, output, ref, err):
606 def getdiff(expected, output, ref, err):
607 servefail = False
607 servefail = False
608 lines = []
608 lines = []
609 for line in _unified_diff(expected, output, ref, err):
609 for line in _unified_diff(expected, output, ref, err):
610 if line.startswith(b'+++') or line.startswith(b'---'):
610 if line.startswith(b'+++') or line.startswith(b'---'):
611 line = line.replace(b'\\', b'/')
611 line = line.replace(b'\\', b'/')
612 if line.endswith(b' \n'):
612 if line.endswith(b' \n'):
613 line = line[:-2] + b'\n'
613 line = line[:-2] + b'\n'
614 lines.append(line)
614 lines.append(line)
615 if not servefail and line.startswith(
615 if not servefail and line.startswith(
616 b'+ abort: child process failed to start'):
616 b'+ abort: child process failed to start'):
617 servefail = True
617 servefail = True
618
618
619 return servefail, lines
619 return servefail, lines
620
620
621 verbose = False
621 verbose = False
622 def vlog(*msg):
622 def vlog(*msg):
623 """Log only when in verbose mode."""
623 """Log only when in verbose mode."""
624 if verbose is False:
624 if verbose is False:
625 return
625 return
626
626
627 return log(*msg)
627 return log(*msg)
628
628
629 # Bytes that break XML even in a CDATA block: control characters 0-31
629 # Bytes that break XML even in a CDATA block: control characters 0-31
630 # sans \t, \n and \r
630 # sans \t, \n and \r
631 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
631 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
632
632
633 # Match feature conditionalized output lines in the form, capturing the feature
633 # Match feature conditionalized output lines in the form, capturing the feature
634 # list in group 2, and the preceeding line output in group 1:
634 # list in group 2, and the preceeding line output in group 1:
635 #
635 #
636 # output..output (feature !)\n
636 # output..output (feature !)\n
637 optline = re.compile(b'(.*) \((.+?) !\)\n$')
637 optline = re.compile(b'(.*) \((.+?) !\)\n$')
638
638
639 def cdatasafe(data):
639 def cdatasafe(data):
640 """Make a string safe to include in a CDATA block.
640 """Make a string safe to include in a CDATA block.
641
641
642 Certain control characters are illegal in a CDATA block, and
642 Certain control characters are illegal in a CDATA block, and
643 there's no way to include a ]]> in a CDATA either. This function
643 there's no way to include a ]]> in a CDATA either. This function
644 replaces illegal bytes with ? and adds a space between the ]] so
644 replaces illegal bytes with ? and adds a space between the ]] so
645 that it won't break the CDATA block.
645 that it won't break the CDATA block.
646 """
646 """
647 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
647 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
648
648
649 def log(*msg):
649 def log(*msg):
650 """Log something to stdout.
650 """Log something to stdout.
651
651
652 Arguments are strings to print.
652 Arguments are strings to print.
653 """
653 """
654 with iolock:
654 with iolock:
655 if verbose:
655 if verbose:
656 print(verbose, end=' ')
656 print(verbose, end=' ')
657 for m in msg:
657 for m in msg:
658 print(m, end=' ')
658 print(m, end=' ')
659 print()
659 print()
660 sys.stdout.flush()
660 sys.stdout.flush()
661
661
662 def highlightdiff(line, color):
662 def highlightdiff(line, color):
663 if not color:
663 if not color:
664 return line
664 return line
665 assert pygmentspresent
665 assert pygmentspresent
666 return pygments.highlight(line.decode('latin1'), difflexer,
666 return pygments.highlight(line.decode('latin1'), difflexer,
667 terminal256formatter).encode('latin1')
667 terminal256formatter).encode('latin1')
668
668
669 def highlightmsg(msg, color):
669 def highlightmsg(msg, color):
670 if not color:
670 if not color:
671 return msg
671 return msg
672 assert pygmentspresent
672 assert pygmentspresent
673 return pygments.highlight(msg, runnerlexer, runnerformatter)
673 return pygments.highlight(msg, runnerlexer, runnerformatter)
674
674
675 def terminate(proc):
675 def terminate(proc):
676 """Terminate subprocess"""
676 """Terminate subprocess"""
677 vlog('# Terminating process %d' % proc.pid)
677 vlog('# Terminating process %d' % proc.pid)
678 try:
678 try:
679 proc.terminate()
679 proc.terminate()
680 except OSError:
680 except OSError:
681 pass
681 pass
682
682
683 def killdaemons(pidfile):
683 def killdaemons(pidfile):
684 import killdaemons as killmod
684 import killdaemons as killmod
685 return killmod.killdaemons(pidfile, tryhard=False, remove=True,
685 return killmod.killdaemons(pidfile, tryhard=False, remove=True,
686 logfn=vlog)
686 logfn=vlog)
687
687
688 class Test(unittest.TestCase):
688 class Test(unittest.TestCase):
689 """Encapsulates a single, runnable test.
689 """Encapsulates a single, runnable test.
690
690
691 While this class conforms to the unittest.TestCase API, it differs in that
691 While this class conforms to the unittest.TestCase API, it differs in that
692 instances need to be instantiated manually. (Typically, unittest.TestCase
692 instances need to be instantiated manually. (Typically, unittest.TestCase
693 classes are instantiated automatically by scanning modules.)
693 classes are instantiated automatically by scanning modules.)
694 """
694 """
695
695
696 # Status code reserved for skipped tests (used by hghave).
696 # Status code reserved for skipped tests (used by hghave).
697 SKIPPED_STATUS = 80
697 SKIPPED_STATUS = 80
698
698
699 def __init__(self, path, outputdir, tmpdir, keeptmpdir=False,
699 def __init__(self, path, outputdir, tmpdir, keeptmpdir=False,
700 debug=False,
700 debug=False,
701 first=False,
701 first=False,
702 timeout=None,
702 timeout=None,
703 startport=None, extraconfigopts=None,
703 startport=None, extraconfigopts=None,
704 py3warnings=False, shell=None, hgcommand=None,
704 py3warnings=False, shell=None, hgcommand=None,
705 slowtimeout=None, usechg=False,
705 slowtimeout=None, usechg=False,
706 useipv6=False):
706 useipv6=False):
707 """Create a test from parameters.
707 """Create a test from parameters.
708
708
709 path is the full path to the file defining the test.
709 path is the full path to the file defining the test.
710
710
711 tmpdir is the main temporary directory to use for this test.
711 tmpdir is the main temporary directory to use for this test.
712
712
713 keeptmpdir determines whether to keep the test's temporary directory
713 keeptmpdir determines whether to keep the test's temporary directory
714 after execution. It defaults to removal (False).
714 after execution. It defaults to removal (False).
715
715
716 debug mode will make the test execute verbosely, with unfiltered
716 debug mode will make the test execute verbosely, with unfiltered
717 output.
717 output.
718
718
719 timeout controls the maximum run time of the test. It is ignored when
719 timeout controls the maximum run time of the test. It is ignored when
720 debug is True. See slowtimeout for tests with #require slow.
720 debug is True. See slowtimeout for tests with #require slow.
721
721
722 slowtimeout overrides timeout if the test has #require slow.
722 slowtimeout overrides timeout if the test has #require slow.
723
723
724 startport controls the starting port number to use for this test. Each
724 startport controls the starting port number to use for this test. Each
725 test will reserve 3 port numbers for execution. It is the caller's
725 test will reserve 3 port numbers for execution. It is the caller's
726 responsibility to allocate a non-overlapping port range to Test
726 responsibility to allocate a non-overlapping port range to Test
727 instances.
727 instances.
728
728
729 extraconfigopts is an iterable of extra hgrc config options. Values
729 extraconfigopts is an iterable of extra hgrc config options. Values
730 must have the form "key=value" (something understood by hgrc). Values
730 must have the form "key=value" (something understood by hgrc). Values
731 of the form "foo.key=value" will result in "[foo] key=value".
731 of the form "foo.key=value" will result in "[foo] key=value".
732
732
733 py3warnings enables Py3k warnings.
733 py3warnings enables Py3k warnings.
734
734
735 shell is the shell to execute tests in.
735 shell is the shell to execute tests in.
736 """
736 """
737 if timeout is None:
737 if timeout is None:
738 timeout = defaults['timeout']
738 timeout = defaults['timeout']
739 if startport is None:
739 if startport is None:
740 startport = defaults['port']
740 startport = defaults['port']
741 if slowtimeout is None:
741 if slowtimeout is None:
742 slowtimeout = defaults['slowtimeout']
742 slowtimeout = defaults['slowtimeout']
743 self.path = path
743 self.path = path
744 self.bname = os.path.basename(path)
744 self.bname = os.path.basename(path)
745 self.name = _strpath(self.bname)
745 self.name = _strpath(self.bname)
746 self._testdir = os.path.dirname(path)
746 self._testdir = os.path.dirname(path)
747 self._outputdir = outputdir
747 self._outputdir = outputdir
748 self._tmpname = os.path.basename(path)
748 self._tmpname = os.path.basename(path)
749 self.errpath = os.path.join(self._outputdir, b'%s.err' % self.bname)
749 self.errpath = os.path.join(self._outputdir, b'%s.err' % self.bname)
750
750
751 self._threadtmp = tmpdir
751 self._threadtmp = tmpdir
752 self._keeptmpdir = keeptmpdir
752 self._keeptmpdir = keeptmpdir
753 self._debug = debug
753 self._debug = debug
754 self._first = first
754 self._first = first
755 self._timeout = timeout
755 self._timeout = timeout
756 self._slowtimeout = slowtimeout
756 self._slowtimeout = slowtimeout
757 self._startport = startport
757 self._startport = startport
758 self._extraconfigopts = extraconfigopts or []
758 self._extraconfigopts = extraconfigopts or []
759 self._py3warnings = py3warnings
759 self._py3warnings = py3warnings
760 self._shell = _bytespath(shell)
760 self._shell = _bytespath(shell)
761 self._hgcommand = hgcommand or b'hg'
761 self._hgcommand = hgcommand or b'hg'
762 self._usechg = usechg
762 self._usechg = usechg
763 self._useipv6 = useipv6
763 self._useipv6 = useipv6
764
764
765 self._aborted = False
765 self._aborted = False
766 self._daemonpids = []
766 self._daemonpids = []
767 self._finished = None
767 self._finished = None
768 self._ret = None
768 self._ret = None
769 self._out = None
769 self._out = None
770 self._skipped = None
770 self._skipped = None
771 self._testtmp = None
771 self._testtmp = None
772 self._chgsockdir = None
772 self._chgsockdir = None
773
773
774 self._refout = self.readrefout()
774 self._refout = self.readrefout()
775
775
776 def readrefout(self):
776 def readrefout(self):
777 """read reference output"""
777 """read reference output"""
778 # If we're not in --debug mode and reference output file exists,
778 # If we're not in --debug mode and reference output file exists,
779 # check test output against it.
779 # check test output against it.
780 if self._debug:
780 if self._debug:
781 return None # to match "out is None"
781 return None # to match "out is None"
782 elif os.path.exists(self.refpath):
782 elif os.path.exists(self.refpath):
783 with open(self.refpath, 'rb') as f:
783 with open(self.refpath, 'rb') as f:
784 return f.read().splitlines(True)
784 return f.read().splitlines(True)
785 else:
785 else:
786 return []
786 return []
787
787
788 # needed to get base class __repr__ running
788 # needed to get base class __repr__ running
789 @property
789 @property
790 def _testMethodName(self):
790 def _testMethodName(self):
791 return self.name
791 return self.name
792
792
793 def __str__(self):
793 def __str__(self):
794 return self.name
794 return self.name
795
795
796 def shortDescription(self):
796 def shortDescription(self):
797 return self.name
797 return self.name
798
798
799 def setUp(self):
799 def setUp(self):
800 """Tasks to perform before run()."""
800 """Tasks to perform before run()."""
801 self._finished = False
801 self._finished = False
802 self._ret = None
802 self._ret = None
803 self._out = None
803 self._out = None
804 self._skipped = None
804 self._skipped = None
805
805
806 try:
806 try:
807 os.mkdir(self._threadtmp)
807 os.mkdir(self._threadtmp)
808 except OSError as e:
808 except OSError as e:
809 if e.errno != errno.EEXIST:
809 if e.errno != errno.EEXIST:
810 raise
810 raise
811
811
812 name = self._tmpname
812 name = self._tmpname
813 self._testtmp = os.path.join(self._threadtmp, name)
813 self._testtmp = os.path.join(self._threadtmp, name)
814 os.mkdir(self._testtmp)
814 os.mkdir(self._testtmp)
815
815
816 # Remove any previous output files.
816 # Remove any previous output files.
817 if os.path.exists(self.errpath):
817 if os.path.exists(self.errpath):
818 try:
818 try:
819 os.remove(self.errpath)
819 os.remove(self.errpath)
820 except OSError as e:
820 except OSError as e:
821 # We might have raced another test to clean up a .err
821 # We might have raced another test to clean up a .err
822 # file, so ignore ENOENT when removing a previous .err
822 # file, so ignore ENOENT when removing a previous .err
823 # file.
823 # file.
824 if e.errno != errno.ENOENT:
824 if e.errno != errno.ENOENT:
825 raise
825 raise
826
826
827 if self._usechg:
827 if self._usechg:
828 self._chgsockdir = os.path.join(self._threadtmp,
828 self._chgsockdir = os.path.join(self._threadtmp,
829 b'%s.chgsock' % name)
829 b'%s.chgsock' % name)
830 os.mkdir(self._chgsockdir)
830 os.mkdir(self._chgsockdir)
831
831
832 def run(self, result):
832 def run(self, result):
833 """Run this test and report results against a TestResult instance."""
833 """Run this test and report results against a TestResult instance."""
834 # This function is extremely similar to unittest.TestCase.run(). Once
834 # This function is extremely similar to unittest.TestCase.run(). Once
835 # we require Python 2.7 (or at least its version of unittest), this
835 # we require Python 2.7 (or at least its version of unittest), this
836 # function can largely go away.
836 # function can largely go away.
837 self._result = result
837 self._result = result
838 result.startTest(self)
838 result.startTest(self)
839 try:
839 try:
840 try:
840 try:
841 self.setUp()
841 self.setUp()
842 except (KeyboardInterrupt, SystemExit):
842 except (KeyboardInterrupt, SystemExit):
843 self._aborted = True
843 self._aborted = True
844 raise
844 raise
845 except Exception:
845 except Exception:
846 result.addError(self, sys.exc_info())
846 result.addError(self, sys.exc_info())
847 return
847 return
848
848
849 success = False
849 success = False
850 try:
850 try:
851 self.runTest()
851 self.runTest()
852 except KeyboardInterrupt:
852 except KeyboardInterrupt:
853 self._aborted = True
853 self._aborted = True
854 raise
854 raise
855 except unittest.SkipTest as e:
855 except unittest.SkipTest as e:
856 result.addSkip(self, str(e))
856 result.addSkip(self, str(e))
857 # The base class will have already counted this as a
857 # The base class will have already counted this as a
858 # test we "ran", but we want to exclude skipped tests
858 # test we "ran", but we want to exclude skipped tests
859 # from those we count towards those run.
859 # from those we count towards those run.
860 result.testsRun -= 1
860 result.testsRun -= 1
861 except self.failureException as e:
861 except self.failureException as e:
862 # This differs from unittest in that we don't capture
862 # This differs from unittest in that we don't capture
863 # the stack trace. This is for historical reasons and
863 # the stack trace. This is for historical reasons and
864 # this decision could be revisited in the future,
864 # this decision could be revisited in the future,
865 # especially for PythonTest instances.
865 # especially for PythonTest instances.
866 if result.addFailure(self, str(e)):
866 if result.addFailure(self, str(e)):
867 success = True
867 success = True
868 except Exception:
868 except Exception:
869 result.addError(self, sys.exc_info())
869 result.addError(self, sys.exc_info())
870 else:
870 else:
871 success = True
871 success = True
872
872
873 try:
873 try:
874 self.tearDown()
874 self.tearDown()
875 except (KeyboardInterrupt, SystemExit):
875 except (KeyboardInterrupt, SystemExit):
876 self._aborted = True
876 self._aborted = True
877 raise
877 raise
878 except Exception:
878 except Exception:
879 result.addError(self, sys.exc_info())
879 result.addError(self, sys.exc_info())
880 success = False
880 success = False
881
881
882 if success:
882 if success:
883 result.addSuccess(self)
883 result.addSuccess(self)
884 finally:
884 finally:
885 result.stopTest(self, interrupted=self._aborted)
885 result.stopTest(self, interrupted=self._aborted)
886
886
887 def runTest(self):
887 def runTest(self):
888 """Run this test instance.
888 """Run this test instance.
889
889
890 This will return a tuple describing the result of the test.
890 This will return a tuple describing the result of the test.
891 """
891 """
892 env = self._getenv()
892 env = self._getenv()
893 self._genrestoreenv(env)
893 self._genrestoreenv(env)
894 self._daemonpids.append(env['DAEMON_PIDS'])
894 self._daemonpids.append(env['DAEMON_PIDS'])
895 self._createhgrc(env['HGRCPATH'])
895 self._createhgrc(env['HGRCPATH'])
896
896
897 vlog('# Test', self.name)
897 vlog('# Test', self.name)
898
898
899 ret, out = self._run(env)
899 ret, out = self._run(env)
900 self._finished = True
900 self._finished = True
901 self._ret = ret
901 self._ret = ret
902 self._out = out
902 self._out = out
903
903
904 def describe(ret):
904 def describe(ret):
905 if ret < 0:
905 if ret < 0:
906 return 'killed by signal: %d' % -ret
906 return 'killed by signal: %d' % -ret
907 return 'returned error code %d' % ret
907 return 'returned error code %d' % ret
908
908
909 self._skipped = False
909 self._skipped = False
910
910
911 if ret == self.SKIPPED_STATUS:
911 if ret == self.SKIPPED_STATUS:
912 if out is None: # Debug mode, nothing to parse.
912 if out is None: # Debug mode, nothing to parse.
913 missing = ['unknown']
913 missing = ['unknown']
914 failed = None
914 failed = None
915 else:
915 else:
916 missing, failed = TTest.parsehghaveoutput(out)
916 missing, failed = TTest.parsehghaveoutput(out)
917
917
918 if not missing:
918 if not missing:
919 missing = ['skipped']
919 missing = ['skipped']
920
920
921 if failed:
921 if failed:
922 self.fail('hg have failed checking for %s' % failed[-1])
922 self.fail('hg have failed checking for %s' % failed[-1])
923 else:
923 else:
924 self._skipped = True
924 self._skipped = True
925 raise unittest.SkipTest(missing[-1])
925 raise unittest.SkipTest(missing[-1])
926 elif ret == 'timeout':
926 elif ret == 'timeout':
927 self.fail('timed out')
927 self.fail('timed out')
928 elif ret is False:
928 elif ret is False:
929 self.fail('no result code from test')
929 self.fail('no result code from test')
930 elif out != self._refout:
930 elif out != self._refout:
931 # Diff generation may rely on written .err file.
931 # Diff generation may rely on written .err file.
932 if (ret != 0 or out != self._refout) and not self._skipped \
932 if (ret != 0 or out != self._refout) and not self._skipped \
933 and not self._debug:
933 and not self._debug:
934 with open(self.errpath, 'wb') as f:
934 with open(self.errpath, 'wb') as f:
935 for line in out:
935 for line in out:
936 f.write(line)
936 f.write(line)
937
937
938 # The result object handles diff calculation for us.
938 # The result object handles diff calculation for us.
939 with firstlock:
939 with firstlock:
940 if self._result.addOutputMismatch(self, ret, out, self._refout):
940 if self._result.addOutputMismatch(self, ret, out, self._refout):
941 # change was accepted, skip failing
941 # change was accepted, skip failing
942 return
942 return
943 if self._first:
943 if self._first:
944 global firsterror
944 global firsterror
945 firsterror = True
945 firsterror = True
946
946
947 if ret:
947 if ret:
948 msg = 'output changed and ' + describe(ret)
948 msg = 'output changed and ' + describe(ret)
949 else:
949 else:
950 msg = 'output changed'
950 msg = 'output changed'
951
951
952 self.fail(msg)
952 self.fail(msg)
953 elif ret:
953 elif ret:
954 self.fail(describe(ret))
954 self.fail(describe(ret))
955
955
956 def tearDown(self):
956 def tearDown(self):
957 """Tasks to perform after run()."""
957 """Tasks to perform after run()."""
958 for entry in self._daemonpids:
958 for entry in self._daemonpids:
959 killdaemons(entry)
959 killdaemons(entry)
960 self._daemonpids = []
960 self._daemonpids = []
961
961
962 if self._keeptmpdir:
962 if self._keeptmpdir:
963 log('\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' %
963 log('\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' %
964 (self._testtmp.decode('utf-8'),
964 (self._testtmp.decode('utf-8'),
965 self._threadtmp.decode('utf-8')))
965 self._threadtmp.decode('utf-8')))
966 else:
966 else:
967 try:
967 try:
968 shutil.rmtree(self._testtmp)
968 shutil.rmtree(self._testtmp)
969 except OSError:
969 except OSError:
970 # unreadable directory may be left in $TESTTMP; fix permission
970 # unreadable directory may be left in $TESTTMP; fix permission
971 # and try again
971 # and try again
972 makecleanable(self._testtmp)
972 makecleanable(self._testtmp)
973 shutil.rmtree(self._testtmp, True)
973 shutil.rmtree(self._testtmp, True)
974 shutil.rmtree(self._threadtmp, True)
974 shutil.rmtree(self._threadtmp, True)
975
975
976 if self._usechg:
976 if self._usechg:
977 # chgservers will stop automatically after they find the socket
977 # chgservers will stop automatically after they find the socket
978 # files are deleted
978 # files are deleted
979 shutil.rmtree(self._chgsockdir, True)
979 shutil.rmtree(self._chgsockdir, True)
980
980
981 if (self._ret != 0 or self._out != self._refout) and not self._skipped \
981 if (self._ret != 0 or self._out != self._refout) and not self._skipped \
982 and not self._debug and self._out:
982 and not self._debug and self._out:
983 with open(self.errpath, 'wb') as f:
983 with open(self.errpath, 'wb') as f:
984 for line in self._out:
984 for line in self._out:
985 f.write(line)
985 f.write(line)
986
986
987 vlog("# Ret was:", self._ret, '(%s)' % self.name)
987 vlog("# Ret was:", self._ret, '(%s)' % self.name)
988
988
989 def _run(self, env):
989 def _run(self, env):
990 # This should be implemented in child classes to run tests.
990 # This should be implemented in child classes to run tests.
991 raise unittest.SkipTest('unknown test type')
991 raise unittest.SkipTest('unknown test type')
992
992
993 def abort(self):
993 def abort(self):
994 """Terminate execution of this test."""
994 """Terminate execution of this test."""
995 self._aborted = True
995 self._aborted = True
996
996
997 def _portmap(self, i):
997 def _portmap(self, i):
998 offset = b'' if i == 0 else b'%d' % i
998 offset = b'' if i == 0 else b'%d' % i
999 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
999 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
1000
1000
1001 def _getreplacements(self):
1001 def _getreplacements(self):
1002 """Obtain a mapping of text replacements to apply to test output.
1002 """Obtain a mapping of text replacements to apply to test output.
1003
1003
1004 Test output needs to be normalized so it can be compared to expected
1004 Test output needs to be normalized so it can be compared to expected
1005 output. This function defines how some of that normalization will
1005 output. This function defines how some of that normalization will
1006 occur.
1006 occur.
1007 """
1007 """
1008 r = [
1008 r = [
1009 # This list should be parallel to defineport in _getenv
1009 # This list should be parallel to defineport in _getenv
1010 self._portmap(0),
1010 self._portmap(0),
1011 self._portmap(1),
1011 self._portmap(1),
1012 self._portmap(2),
1012 self._portmap(2),
1013 (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
1013 (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
1014 (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
1014 (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
1015 ]
1015 ]
1016 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
1016 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
1017
1017
1018 replacementfile = os.path.join(self._testdir, b'common-pattern.py')
1018 replacementfile = os.path.join(self._testdir, b'common-pattern.py')
1019
1019
1020 if os.path.exists(replacementfile):
1020 if os.path.exists(replacementfile):
1021 data = {}
1021 data = {}
1022 with open(replacementfile, mode='rb') as source:
1022 with open(replacementfile, mode='rb') as source:
1023 # the intermediate 'compile' step help with debugging
1023 # the intermediate 'compile' step help with debugging
1024 code = compile(source.read(), replacementfile, 'exec')
1024 code = compile(source.read(), replacementfile, 'exec')
1025 exec(code, data)
1025 exec(code, data)
1026 for value in data.get('substitutions', ()):
1026 for value in data.get('substitutions', ()):
1027 if len(value) != 2:
1027 if len(value) != 2:
1028 msg = 'malformatted substitution in %s: %r'
1028 msg = 'malformatted substitution in %s: %r'
1029 msg %= (replacementfile, value)
1029 msg %= (replacementfile, value)
1030 raise ValueError(msg)
1030 raise ValueError(msg)
1031 r.append(value)
1031 r.append(value)
1032 return r
1032 return r
1033
1033
1034 def _escapepath(self, p):
1034 def _escapepath(self, p):
1035 if os.name == 'nt':
1035 if os.name == 'nt':
1036 return (
1036 return (
1037 (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or
1037 (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or
1038 c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c
1038 c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c
1039 for c in [p[i:i + 1] for i in range(len(p))]))
1039 for c in [p[i:i + 1] for i in range(len(p))]))
1040 )
1040 )
1041 else:
1041 else:
1042 return re.escape(p)
1042 return re.escape(p)
1043
1043
1044 def _localip(self):
1044 def _localip(self):
1045 if self._useipv6:
1045 if self._useipv6:
1046 return b'::1'
1046 return b'::1'
1047 else:
1047 else:
1048 return b'127.0.0.1'
1048 return b'127.0.0.1'
1049
1049
1050 def _genrestoreenv(self, testenv):
1050 def _genrestoreenv(self, testenv):
1051 """Generate a script that can be used by tests to restore the original
1051 """Generate a script that can be used by tests to restore the original
1052 environment."""
1052 environment."""
1053 # Put the restoreenv script inside self._threadtmp
1053 # Put the restoreenv script inside self._threadtmp
1054 scriptpath = os.path.join(self._threadtmp, b'restoreenv.sh')
1054 scriptpath = os.path.join(self._threadtmp, b'restoreenv.sh')
1055 testenv['HGTEST_RESTOREENV'] = _strpath(scriptpath)
1055 testenv['HGTEST_RESTOREENV'] = _strpath(scriptpath)
1056
1056
1057 # Only restore environment variable names that the shell allows
1057 # Only restore environment variable names that the shell allows
1058 # us to export.
1058 # us to export.
1059 name_regex = re.compile('^[a-zA-Z][a-zA-Z0-9_]*$')
1059 name_regex = re.compile('^[a-zA-Z][a-zA-Z0-9_]*$')
1060
1060
1061 # Do not restore these variables; otherwise tests would fail.
1061 # Do not restore these variables; otherwise tests would fail.
1062 reqnames = {'PYTHON', 'TESTDIR', 'TESTTMP'}
1062 reqnames = {'PYTHON', 'TESTDIR', 'TESTTMP'}
1063
1063
1064 with open(scriptpath, 'w') as envf:
1064 with open(scriptpath, 'w') as envf:
1065 for name, value in origenviron.items():
1065 for name, value in origenviron.items():
1066 if not name_regex.match(name):
1066 if not name_regex.match(name):
1067 # Skip environment variables with unusual names not
1067 # Skip environment variables with unusual names not
1068 # allowed by most shells.
1068 # allowed by most shells.
1069 continue
1069 continue
1070 if name in reqnames:
1070 if name in reqnames:
1071 continue
1071 continue
1072 envf.write('%s=%s\n' % (name, shellquote(value)))
1072 envf.write('%s=%s\n' % (name, shellquote(value)))
1073
1073
1074 for name in testenv:
1074 for name in testenv:
1075 if name in origenviron or name in reqnames:
1075 if name in origenviron or name in reqnames:
1076 continue
1076 continue
1077 envf.write('unset %s\n' % (name,))
1077 envf.write('unset %s\n' % (name,))
1078
1078
1079 def _getenv(self):
1079 def _getenv(self):
1080 """Obtain environment variables to use during test execution."""
1080 """Obtain environment variables to use during test execution."""
1081 def defineport(i):
1081 def defineport(i):
1082 offset = '' if i == 0 else '%s' % i
1082 offset = '' if i == 0 else '%s' % i
1083 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
1083 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
1084 env = os.environ.copy()
1084 env = os.environ.copy()
1085 env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase') or ''
1085 env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase') or ''
1086 env['HGEMITWARNINGS'] = '1'
1086 env['HGEMITWARNINGS'] = '1'
1087 env['TESTTMP'] = _strpath(self._testtmp)
1087 env['TESTTMP'] = _strpath(self._testtmp)
1088 env['TESTNAME'] = self.name
1088 env['TESTNAME'] = self.name
1089 env['HOME'] = _strpath(self._testtmp)
1089 env['HOME'] = _strpath(self._testtmp)
1090 # This number should match portneeded in _getport
1090 # This number should match portneeded in _getport
1091 for port in xrange(3):
1091 for port in xrange(3):
1092 # This list should be parallel to _portmap in _getreplacements
1092 # This list should be parallel to _portmap in _getreplacements
1093 defineport(port)
1093 defineport(port)
1094 env["HGRCPATH"] = _strpath(os.path.join(self._threadtmp, b'.hgrc'))
1094 env["HGRCPATH"] = _strpath(os.path.join(self._threadtmp, b'.hgrc'))
1095 env["DAEMON_PIDS"] = _strpath(os.path.join(self._threadtmp,
1095 env["DAEMON_PIDS"] = _strpath(os.path.join(self._threadtmp,
1096 b'daemon.pids'))
1096 b'daemon.pids'))
1097 env["HGEDITOR"] = ('"' + sys.executable + '"'
1097 env["HGEDITOR"] = ('"' + sys.executable + '"'
1098 + ' -c "import sys; sys.exit(0)"')
1098 + ' -c "import sys; sys.exit(0)"')
1099 env["HGUSER"] = "test"
1099 env["HGUSER"] = "test"
1100 env["HGENCODING"] = "ascii"
1100 env["HGENCODING"] = "ascii"
1101 env["HGENCODINGMODE"] = "strict"
1101 env["HGENCODINGMODE"] = "strict"
1102 env["HGHOSTNAME"] = "test-hostname"
1102 env["HGHOSTNAME"] = "test-hostname"
1103 env['HGIPV6'] = str(int(self._useipv6))
1103 env['HGIPV6'] = str(int(self._useipv6))
1104 # See contrib/catapipe.py for how to use this functionality.
1104 # See contrib/catapipe.py for how to use this functionality.
1105 if 'HGTESTCATAPULTSERVERPIPE' not in env:
1105 if 'HGTESTCATAPULTSERVERPIPE' not in env:
1106 # If we don't have HGTESTCATAPULTSERVERPIPE explicitly set, pull the
1106 # If we don't have HGTESTCATAPULTSERVERPIPE explicitly set, pull the
1107 # non-test one in as a default, otherwise set to devnull
1107 # non-test one in as a default, otherwise set to devnull
1108 env['HGTESTCATAPULTSERVERPIPE'] = \
1108 env['HGTESTCATAPULTSERVERPIPE'] = \
1109 env.get('HGCATAPULTSERVERPIPE', os.devnull)
1109 env.get('HGCATAPULTSERVERPIPE', os.devnull)
1110
1110
1111 extraextensions = []
1111 extraextensions = []
1112 for opt in self._extraconfigopts:
1112 for opt in self._extraconfigopts:
1113 section, key = opt.encode('utf-8').split(b'.', 1)
1113 section, key = opt.encode('utf-8').split(b'.', 1)
1114 if section != 'extensions':
1114 if section != 'extensions':
1115 continue
1115 continue
1116 name = key.split(b'=', 1)[0]
1116 name = key.split(b'=', 1)[0]
1117 extraextensions.append(name)
1117 extraextensions.append(name)
1118
1118
1119 if extraextensions:
1119 if extraextensions:
1120 env['HGTESTEXTRAEXTENSIONS'] = b' '.join(extraextensions)
1120 env['HGTESTEXTRAEXTENSIONS'] = b' '.join(extraextensions)
1121
1121
1122 # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
1122 # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
1123 # IP addresses.
1123 # IP addresses.
1124 env['LOCALIP'] = _strpath(self._localip())
1124 env['LOCALIP'] = _strpath(self._localip())
1125
1125
1126 # This has the same effect as Py_LegacyWindowsStdioFlag in exewrapper.c,
1126 # This has the same effect as Py_LegacyWindowsStdioFlag in exewrapper.c,
1127 # but this is needed for testing python instances like dummyssh,
1127 # but this is needed for testing python instances like dummyssh,
1128 # dummysmtpd.py, and dumbhttp.py.
1128 # dummysmtpd.py, and dumbhttp.py.
1129 if PYTHON3 and os.name == 'nt':
1129 if PYTHON3 and os.name == 'nt':
1130 env['PYTHONLEGACYWINDOWSSTDIO'] = '1'
1130 env['PYTHONLEGACYWINDOWSSTDIO'] = '1'
1131
1131
1132 # Reset some environment variables to well-known values so that
1132 # Reset some environment variables to well-known values so that
1133 # the tests produce repeatable output.
1133 # the tests produce repeatable output.
1134 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
1134 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
1135 env['TZ'] = 'GMT'
1135 env['TZ'] = 'GMT'
1136 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
1136 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
1137 env['COLUMNS'] = '80'
1137 env['COLUMNS'] = '80'
1138 env['TERM'] = 'xterm'
1138 env['TERM'] = 'xterm'
1139
1139
1140 dropped = [
1140 dropped = [
1141 'CDPATH',
1141 'CDPATH',
1142 'CHGDEBUG',
1142 'CHGDEBUG',
1143 'EDITOR',
1143 'EDITOR',
1144 'GREP_OPTIONS',
1144 'GREP_OPTIONS',
1145 'HG',
1145 'HG',
1146 'HGMERGE',
1146 'HGMERGE',
1147 'HGPLAIN',
1147 'HGPLAIN',
1148 'HGPLAINEXCEPT',
1148 'HGPLAINEXCEPT',
1149 'HGPROF',
1149 'HGPROF',
1150 'http_proxy',
1150 'http_proxy',
1151 'no_proxy',
1151 'no_proxy',
1152 'NO_PROXY',
1152 'NO_PROXY',
1153 'PAGER',
1153 'PAGER',
1154 'VISUAL',
1154 'VISUAL',
1155 ]
1155 ]
1156
1156
1157 for k in dropped:
1157 for k in dropped:
1158 if k in env:
1158 if k in env:
1159 del env[k]
1159 del env[k]
1160
1160
1161 # unset env related to hooks
1161 # unset env related to hooks
1162 for k in list(env):
1162 for k in list(env):
1163 if k.startswith('HG_'):
1163 if k.startswith('HG_'):
1164 del env[k]
1164 del env[k]
1165
1165
1166 if self._usechg:
1166 if self._usechg:
1167 env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server')
1167 env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server')
1168
1168
1169 return env
1169 return env
1170
1170
1171 def _createhgrc(self, path):
1171 def _createhgrc(self, path):
1172 """Create an hgrc file for this test."""
1172 """Create an hgrc file for this test."""
1173 with open(path, 'wb') as hgrc:
1173 with open(path, 'wb') as hgrc:
1174 hgrc.write(b'[ui]\n')
1174 hgrc.write(b'[ui]\n')
1175 hgrc.write(b'slash = True\n')
1175 hgrc.write(b'slash = True\n')
1176 hgrc.write(b'interactive = False\n')
1176 hgrc.write(b'interactive = False\n')
1177 hgrc.write(b'merge = internal:merge\n')
1177 hgrc.write(b'merge = internal:merge\n')
1178 hgrc.write(b'mergemarkers = detailed\n')
1178 hgrc.write(b'mergemarkers = detailed\n')
1179 hgrc.write(b'promptecho = True\n')
1179 hgrc.write(b'promptecho = True\n')
1180 hgrc.write(b'[defaults]\n')
1180 hgrc.write(b'[defaults]\n')
1181 hgrc.write(b'[devel]\n')
1181 hgrc.write(b'[devel]\n')
1182 hgrc.write(b'all-warnings = true\n')
1182 hgrc.write(b'all-warnings = true\n')
1183 hgrc.write(b'default-date = 0 0\n')
1183 hgrc.write(b'default-date = 0 0\n')
1184 hgrc.write(b'[largefiles]\n')
1184 hgrc.write(b'[largefiles]\n')
1185 hgrc.write(b'usercache = %s\n' %
1185 hgrc.write(b'usercache = %s\n' %
1186 (os.path.join(self._testtmp, b'.cache/largefiles')))
1186 (os.path.join(self._testtmp, b'.cache/largefiles')))
1187 hgrc.write(b'[lfs]\n')
1187 hgrc.write(b'[lfs]\n')
1188 hgrc.write(b'usercache = %s\n' %
1188 hgrc.write(b'usercache = %s\n' %
1189 (os.path.join(self._testtmp, b'.cache/lfs')))
1189 (os.path.join(self._testtmp, b'.cache/lfs')))
1190 hgrc.write(b'[web]\n')
1190 hgrc.write(b'[web]\n')
1191 hgrc.write(b'address = localhost\n')
1191 hgrc.write(b'address = localhost\n')
1192 hgrc.write(b'ipv6 = %s\n' % str(self._useipv6).encode('ascii'))
1192 hgrc.write(b'ipv6 = %s\n' % str(self._useipv6).encode('ascii'))
1193 hgrc.write(b'server-header = testing stub value\n')
1193 hgrc.write(b'server-header = testing stub value\n')
1194
1194
1195 for opt in self._extraconfigopts:
1195 for opt in self._extraconfigopts:
1196 section, key = opt.encode('utf-8').split(b'.', 1)
1196 section, key = opt.encode('utf-8').split(b'.', 1)
1197 assert b'=' in key, ('extra config opt %s must '
1197 assert b'=' in key, ('extra config opt %s must '
1198 'have an = for assignment' % opt)
1198 'have an = for assignment' % opt)
1199 hgrc.write(b'[%s]\n%s\n' % (section, key))
1199 hgrc.write(b'[%s]\n%s\n' % (section, key))
1200
1200
1201 def fail(self, msg):
1201 def fail(self, msg):
1202 # unittest differentiates between errored and failed.
1202 # unittest differentiates between errored and failed.
1203 # Failed is denoted by AssertionError (by default at least).
1203 # Failed is denoted by AssertionError (by default at least).
1204 raise AssertionError(msg)
1204 raise AssertionError(msg)
1205
1205
1206 def _runcommand(self, cmd, env, normalizenewlines=False):
1206 def _runcommand(self, cmd, env, normalizenewlines=False):
1207 """Run command in a sub-process, capturing the output (stdout and
1207 """Run command in a sub-process, capturing the output (stdout and
1208 stderr).
1208 stderr).
1209
1209
1210 Return a tuple (exitcode, output). output is None in debug mode.
1210 Return a tuple (exitcode, output). output is None in debug mode.
1211 """
1211 """
1212 if self._debug:
1212 if self._debug:
1213 proc = subprocess.Popen(_strpath(cmd), shell=True,
1213 proc = subprocess.Popen(_strpath(cmd), shell=True,
1214 cwd=_strpath(self._testtmp),
1214 cwd=_strpath(self._testtmp),
1215 env=env)
1215 env=env)
1216 ret = proc.wait()
1216 ret = proc.wait()
1217 return (ret, None)
1217 return (ret, None)
1218
1218
1219 proc = Popen4(cmd, self._testtmp, self._timeout, env)
1219 proc = Popen4(cmd, self._testtmp, self._timeout, env)
1220 def cleanup():
1220 def cleanup():
1221 terminate(proc)
1221 terminate(proc)
1222 ret = proc.wait()
1222 ret = proc.wait()
1223 if ret == 0:
1223 if ret == 0:
1224 ret = signal.SIGTERM << 8
1224 ret = signal.SIGTERM << 8
1225 killdaemons(env['DAEMON_PIDS'])
1225 killdaemons(env['DAEMON_PIDS'])
1226 return ret
1226 return ret
1227
1227
1228 proc.tochild.close()
1228 proc.tochild.close()
1229
1229
1230 try:
1230 try:
1231 output = proc.fromchild.read()
1231 output = proc.fromchild.read()
1232 except KeyboardInterrupt:
1232 except KeyboardInterrupt:
1233 vlog('# Handling keyboard interrupt')
1233 vlog('# Handling keyboard interrupt')
1234 cleanup()
1234 cleanup()
1235 raise
1235 raise
1236
1236
1237 ret = proc.wait()
1237 ret = proc.wait()
1238 if wifexited(ret):
1238 if wifexited(ret):
1239 ret = os.WEXITSTATUS(ret)
1239 ret = os.WEXITSTATUS(ret)
1240
1240
1241 if proc.timeout:
1241 if proc.timeout:
1242 ret = 'timeout'
1242 ret = 'timeout'
1243
1243
1244 if ret:
1244 if ret:
1245 killdaemons(env['DAEMON_PIDS'])
1245 killdaemons(env['DAEMON_PIDS'])
1246
1246
1247 for s, r in self._getreplacements():
1247 for s, r in self._getreplacements():
1248 output = re.sub(s, r, output)
1248 output = re.sub(s, r, output)
1249
1249
1250 if normalizenewlines:
1250 if normalizenewlines:
1251 output = output.replace(b'\r\n', b'\n')
1251 output = output.replace(b'\r\n', b'\n')
1252
1252
1253 return ret, output.splitlines(True)
1253 return ret, output.splitlines(True)
1254
1254
1255 class PythonTest(Test):
1255 class PythonTest(Test):
1256 """A Python-based test."""
1256 """A Python-based test."""
1257
1257
1258 @property
1258 @property
1259 def refpath(self):
1259 def refpath(self):
1260 return os.path.join(self._testdir, b'%s.out' % self.bname)
1260 return os.path.join(self._testdir, b'%s.out' % self.bname)
1261
1261
1262 def _run(self, env):
1262 def _run(self, env):
1263 py3switch = self._py3warnings and b' -3' or b''
1263 py3switch = self._py3warnings and b' -3' or b''
1264 # Quote the python(3) executable for Windows
1264 # Quote the python(3) executable for Windows
1265 cmd = b'"%s"%s "%s"' % (PYTHON, py3switch, self.path)
1265 cmd = b'"%s"%s "%s"' % (PYTHON, py3switch, self.path)
1266 vlog("# Running", cmd)
1266 vlog("# Running", cmd)
1267 normalizenewlines = os.name == 'nt'
1267 normalizenewlines = os.name == 'nt'
1268 result = self._runcommand(cmd, env,
1268 result = self._runcommand(cmd, env,
1269 normalizenewlines=normalizenewlines)
1269 normalizenewlines=normalizenewlines)
1270 if self._aborted:
1270 if self._aborted:
1271 raise KeyboardInterrupt()
1271 raise KeyboardInterrupt()
1272
1272
1273 return result
1273 return result
1274
1274
1275 # Some glob patterns apply only in some circumstances, so the script
1275 # Some glob patterns apply only in some circumstances, so the script
1276 # might want to remove (glob) annotations that otherwise should be
1276 # might want to remove (glob) annotations that otherwise should be
1277 # retained.
1277 # retained.
1278 checkcodeglobpats = [
1278 checkcodeglobpats = [
1279 # On Windows it looks like \ doesn't require a (glob), but we know
1279 # On Windows it looks like \ doesn't require a (glob), but we know
1280 # better.
1280 # better.
1281 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
1281 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
1282 re.compile(br'^moving \S+/.*[^)]$'),
1282 re.compile(br'^moving \S+/.*[^)]$'),
1283 re.compile(br'^pulling from \$TESTTMP/.*[^)]$'),
1283 re.compile(br'^pulling from \$TESTTMP/.*[^)]$'),
1284 # Not all platforms have 127.0.0.1 as loopback (though most do),
1284 # Not all platforms have 127.0.0.1 as loopback (though most do),
1285 # so we always glob that too.
1285 # so we always glob that too.
1286 re.compile(br'.*\$LOCALIP.*$'),
1286 re.compile(br'.*\$LOCALIP.*$'),
1287 ]
1287 ]
1288
1288
1289 bchr = chr
1289 bchr = chr
1290 if PYTHON3:
1290 if PYTHON3:
1291 bchr = lambda x: bytes([x])
1291 bchr = lambda x: bytes([x])
1292
1292
1293 class TTest(Test):
1293 class TTest(Test):
1294 """A "t test" is a test backed by a .t file."""
1294 """A "t test" is a test backed by a .t file."""
1295
1295
1296 SKIPPED_PREFIX = b'skipped: '
1296 SKIPPED_PREFIX = b'skipped: '
1297 FAILED_PREFIX = b'hghave check failed: '
1297 FAILED_PREFIX = b'hghave check failed: '
1298 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
1298 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
1299
1299
1300 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
1300 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
1301 ESCAPEMAP = dict((bchr(i), br'\x%02x' % i) for i in range(256))
1301 ESCAPEMAP = dict((bchr(i), br'\x%02x' % i) for i in range(256))
1302 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
1302 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
1303
1303
1304 def __init__(self, path, *args, **kwds):
1304 def __init__(self, path, *args, **kwds):
1305 # accept an extra "case" parameter
1305 # accept an extra "case" parameter
1306 case = kwds.pop('case', [])
1306 case = kwds.pop('case', [])
1307 self._case = case
1307 self._case = case
1308 self._allcases = {x for y in parsettestcases(path) for x in y}
1308 self._allcases = {x for y in parsettestcases(path) for x in y}
1309 super(TTest, self).__init__(path, *args, **kwds)
1309 super(TTest, self).__init__(path, *args, **kwds)
1310 if case:
1310 if case:
1311 casepath = b'#'.join(case)
1311 casepath = b'#'.join(case)
1312 self.name = '%s#%s' % (self.name, _strpath(casepath))
1312 self.name = '%s#%s' % (self.name, _strpath(casepath))
1313 self.errpath = b'%s#%s.err' % (self.errpath[:-4], casepath)
1313 self.errpath = b'%s#%s.err' % (self.errpath[:-4], casepath)
1314 self._tmpname += b'-%s' % casepath
1314 self._tmpname += b'-%s' % casepath
1315 self._have = {}
1315 self._have = {}
1316
1316
1317 @property
1317 @property
1318 def refpath(self):
1318 def refpath(self):
1319 return os.path.join(self._testdir, self.bname)
1319 return os.path.join(self._testdir, self.bname)
1320
1320
1321 def _run(self, env):
1321 def _run(self, env):
1322 with open(self.path, 'rb') as f:
1322 with open(self.path, 'rb') as f:
1323 lines = f.readlines()
1323 lines = f.readlines()
1324
1324
1325 # .t file is both reference output and the test input, keep reference
1325 # .t file is both reference output and the test input, keep reference
1326 # output updated with the the test input. This avoids some race
1326 # output updated with the the test input. This avoids some race
1327 # conditions where the reference output does not match the actual test.
1327 # conditions where the reference output does not match the actual test.
1328 if self._refout is not None:
1328 if self._refout is not None:
1329 self._refout = lines
1329 self._refout = lines
1330
1330
1331 salt, script, after, expected = self._parsetest(lines)
1331 salt, script, after, expected = self._parsetest(lines)
1332
1332
1333 # Write out the generated script.
1333 # Write out the generated script.
1334 fname = b'%s.sh' % self._testtmp
1334 fname = b'%s.sh' % self._testtmp
1335 with open(fname, 'wb') as f:
1335 with open(fname, 'wb') as f:
1336 for l in script:
1336 for l in script:
1337 f.write(l)
1337 f.write(l)
1338
1338
1339 cmd = b'%s "%s"' % (self._shell, fname)
1339 cmd = b'%s "%s"' % (self._shell, fname)
1340 vlog("# Running", cmd)
1340 vlog("# Running", cmd)
1341
1341
1342 exitcode, output = self._runcommand(cmd, env)
1342 exitcode, output = self._runcommand(cmd, env)
1343
1343
1344 if self._aborted:
1344 if self._aborted:
1345 raise KeyboardInterrupt()
1345 raise KeyboardInterrupt()
1346
1346
1347 # Do not merge output if skipped. Return hghave message instead.
1347 # Do not merge output if skipped. Return hghave message instead.
1348 # Similarly, with --debug, output is None.
1348 # Similarly, with --debug, output is None.
1349 if exitcode == self.SKIPPED_STATUS or output is None:
1349 if exitcode == self.SKIPPED_STATUS or output is None:
1350 return exitcode, output
1350 return exitcode, output
1351
1351
1352 return self._processoutput(exitcode, output, salt, after, expected)
1352 return self._processoutput(exitcode, output, salt, after, expected)
1353
1353
1354 def _hghave(self, reqs):
1354 def _hghave(self, reqs):
1355 allreqs = b' '.join(reqs)
1355 allreqs = b' '.join(reqs)
1356 if allreqs in self._have:
1356 if allreqs in self._have:
1357 return self._have.get(allreqs)
1357 return self._have.get(allreqs)
1358
1358
1359 # TODO do something smarter when all other uses of hghave are gone.
1359 # TODO do something smarter when all other uses of hghave are gone.
1360 runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__)))
1360 runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__)))
1361 tdir = runtestdir.replace(b'\\', b'/')
1361 tdir = runtestdir.replace(b'\\', b'/')
1362 proc = Popen4(b'%s -c "%s/hghave %s"' %
1362 proc = Popen4(b'%s -c "%s/hghave %s"' %
1363 (self._shell, tdir, allreqs),
1363 (self._shell, tdir, allreqs),
1364 self._testtmp, 0, self._getenv())
1364 self._testtmp, 0, self._getenv())
1365 stdout, stderr = proc.communicate()
1365 stdout, stderr = proc.communicate()
1366 ret = proc.wait()
1366 ret = proc.wait()
1367 if wifexited(ret):
1367 if wifexited(ret):
1368 ret = os.WEXITSTATUS(ret)
1368 ret = os.WEXITSTATUS(ret)
1369 if ret == 2:
1369 if ret == 2:
1370 print(stdout.decode('utf-8'))
1370 print(stdout.decode('utf-8'))
1371 sys.exit(1)
1371 sys.exit(1)
1372
1372
1373 if ret != 0:
1373 if ret != 0:
1374 self._have[allreqs] = (False, stdout)
1374 self._have[allreqs] = (False, stdout)
1375 return False, stdout
1375 return False, stdout
1376
1376
1377 if b'slow' in reqs:
1377 if b'slow' in reqs:
1378 self._timeout = self._slowtimeout
1378 self._timeout = self._slowtimeout
1379
1379
1380 self._have[allreqs] = (True, None)
1380 self._have[allreqs] = (True, None)
1381 return True, None
1381 return True, None
1382
1382
1383 def _iftest(self, args):
1383 def _iftest(self, args):
1384 # implements "#if"
1384 # implements "#if"
1385 reqs = []
1385 reqs = []
1386 for arg in args:
1386 for arg in args:
1387 if arg.startswith(b'no-') and arg[3:] in self._allcases:
1387 if arg.startswith(b'no-') and arg[3:] in self._allcases:
1388 if arg[3:] in self._case:
1388 if arg[3:] in self._case:
1389 return False
1389 return False
1390 elif arg in self._allcases:
1390 elif arg in self._allcases:
1391 if arg not in self._case:
1391 if arg not in self._case:
1392 return False
1392 return False
1393 else:
1393 else:
1394 reqs.append(arg)
1394 reqs.append(arg)
1395 return self._hghave(reqs)[0]
1395 return self._hghave(reqs)[0]
1396
1396
1397 def _parsetest(self, lines):
1397 def _parsetest(self, lines):
1398 # We generate a shell script which outputs unique markers to line
1398 # We generate a shell script which outputs unique markers to line
1399 # up script results with our source. These markers include input
1399 # up script results with our source. These markers include input
1400 # line number and the last return code.
1400 # line number and the last return code.
1401 salt = b"SALT%d" % time.time()
1401 salt = b"SALT%d" % time.time()
1402 def addsalt(line, inpython):
1402 def addsalt(line, inpython):
1403 if inpython:
1403 if inpython:
1404 script.append(b'%s %d 0\n' % (salt, line))
1404 script.append(b'%s %d 0\n' % (salt, line))
1405 else:
1405 else:
1406 script.append(b'echo %s %d $?\n' % (salt, line))
1406 script.append(b'echo %s %d $?\n' % (salt, line))
1407 activetrace = []
1407 activetrace = []
1408 session = str(uuid.uuid4())
1408 session = str(uuid.uuid4())
1409 if PYTHON3:
1409 if PYTHON3:
1410 session = session.encode('ascii')
1410 session = session.encode('ascii')
1411 hgcatapult = os.getenv('HGTESTCATAPULTSERVERPIPE') or \
1411 hgcatapult = os.getenv('HGTESTCATAPULTSERVERPIPE') or \
1412 os.getenv('HGCATAPULTSERVERPIPE')
1412 os.getenv('HGCATAPULTSERVERPIPE')
1413 def toggletrace(cmd=None):
1413 def toggletrace(cmd=None):
1414 if not hgcatapult or hgcatapult == os.devnull:
1414 if not hgcatapult or hgcatapult == os.devnull:
1415 return
1415 return
1416
1416
1417 if activetrace:
1417 if activetrace:
1418 script.append(
1418 script.append(
1419 b'echo END %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n' % (
1419 b'echo END %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n' % (
1420 session, activetrace[0]))
1420 session, activetrace[0]))
1421 if cmd is None:
1421 if cmd is None:
1422 return
1422 return
1423
1423
1424 if isinstance(cmd, str):
1424 if isinstance(cmd, str):
1425 quoted = shellquote(cmd.strip())
1425 quoted = shellquote(cmd.strip())
1426 else:
1426 else:
1427 quoted = shellquote(cmd.strip().decode('utf8')).encode('utf8')
1427 quoted = shellquote(cmd.strip().decode('utf8')).encode('utf8')
1428 quoted = quoted.replace(b'\\', b'\\\\')
1428 quoted = quoted.replace(b'\\', b'\\\\')
1429 script.append(
1429 script.append(
1430 b'echo START %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n' % (
1430 b'echo START %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n' % (
1431 session, quoted))
1431 session, quoted))
1432 activetrace[0:] = [quoted]
1432 activetrace[0:] = [quoted]
1433
1433
1434 script = []
1434 script = []
1435
1435
1436 # After we run the shell script, we re-unify the script output
1436 # After we run the shell script, we re-unify the script output
1437 # with non-active parts of the source, with synchronization by our
1437 # with non-active parts of the source, with synchronization by our
1438 # SALT line number markers. The after table contains the non-active
1438 # SALT line number markers. The after table contains the non-active
1439 # components, ordered by line number.
1439 # components, ordered by line number.
1440 after = {}
1440 after = {}
1441
1441
1442 # Expected shell script output.
1442 # Expected shell script output.
1443 expected = {}
1443 expected = {}
1444
1444
1445 pos = prepos = -1
1445 pos = prepos = -1
1446
1446
1447 # True or False when in a true or false conditional section
1447 # True or False when in a true or false conditional section
1448 skipping = None
1448 skipping = None
1449
1449
1450 # We keep track of whether or not we're in a Python block so we
1450 # We keep track of whether or not we're in a Python block so we
1451 # can generate the surrounding doctest magic.
1451 # can generate the surrounding doctest magic.
1452 inpython = False
1452 inpython = False
1453
1453
1454 if self._debug:
1454 if self._debug:
1455 script.append(b'set -x\n')
1455 script.append(b'set -x\n')
1456 if self._hgcommand != b'hg':
1456 if self._hgcommand != b'hg':
1457 script.append(b'alias hg="%s"\n' % self._hgcommand)
1457 script.append(b'alias hg="%s"\n' % self._hgcommand)
1458 if os.getenv('MSYSTEM'):
1458 if os.getenv('MSYSTEM'):
1459 script.append(b'alias pwd="pwd -W"\n')
1459 script.append(b'alias pwd="pwd -W"\n')
1460
1460
1461 if hgcatapult and hgcatapult != os.devnull:
1461 if hgcatapult and hgcatapult != os.devnull:
1462 # Kludge: use a while loop to keep the pipe from getting
1462 # Kludge: use a while loop to keep the pipe from getting
1463 # closed by our echo commands. The still-running file gets
1463 # closed by our echo commands. The still-running file gets
1464 # reaped at the end of the script, which causes the while
1464 # reaped at the end of the script, which causes the while
1465 # loop to exit and closes the pipe. Sigh.
1465 # loop to exit and closes the pipe. Sigh.
1466 script.append(
1466 script.append(
1467 b'rtendtracing() {\n'
1467 b'rtendtracing() {\n'
1468 b' echo END %(session)s %(name)s >> %(catapult)s\n'
1468 b' echo END %(session)s %(name)s >> %(catapult)s\n'
1469 b' rm -f "$TESTTMP/.still-running"\n'
1469 b' rm -f "$TESTTMP/.still-running"\n'
1470 b'}\n'
1470 b'}\n'
1471 b'trap "rtendtracing" 0\n'
1471 b'trap "rtendtracing" 0\n'
1472 b'touch "$TESTTMP/.still-running"\n'
1472 b'touch "$TESTTMP/.still-running"\n'
1473 b'while [ -f "$TESTTMP/.still-running" ]; do sleep 1; done '
1473 b'while [ -f "$TESTTMP/.still-running" ]; do sleep 1; done '
1474 b'> %(catapult)s &\n'
1474 b'> %(catapult)s &\n'
1475 b'HGCATAPULTSESSION=%(session)s ; export HGCATAPULTSESSION\n'
1475 b'HGCATAPULTSESSION=%(session)s ; export HGCATAPULTSESSION\n'
1476 b'echo START %(session)s %(name)s >> %(catapult)s\n'
1476 b'echo START %(session)s %(name)s >> %(catapult)s\n'
1477 % {
1477 % {
1478 'name': self.name,
1478 'name': self.name,
1479 'session': session,
1479 'session': session,
1480 'catapult': hgcatapult,
1480 'catapult': hgcatapult,
1481 }
1481 }
1482 )
1482 )
1483
1483
1484 if self._case:
1484 if self._case:
1485 casestr = b'#'.join(self._case)
1485 casestr = b'#'.join(self._case)
1486 if isinstance(self._case, str):
1486 if isinstance(self._case, str):
1487 quoted = shellquote(casestr)
1487 quoted = shellquote(casestr)
1488 else:
1488 else:
1489 quoted = shellquote(casestr.decode('utf8')).encode('utf8')
1489 quoted = shellquote(casestr.decode('utf8')).encode('utf8')
1490 script.append(b'TESTCASE=%s\n' % quoted)
1490 script.append(b'TESTCASE=%s\n' % quoted)
1491 script.append(b'export TESTCASE\n')
1491 script.append(b'export TESTCASE\n')
1492
1492
1493 n = 0
1493 n = 0
1494 for n, l in enumerate(lines):
1494 for n, l in enumerate(lines):
1495 if not l.endswith(b'\n'):
1495 if not l.endswith(b'\n'):
1496 l += b'\n'
1496 l += b'\n'
1497 if l.startswith(b'#require'):
1497 if l.startswith(b'#require'):
1498 lsplit = l.split()
1498 lsplit = l.split()
1499 if len(lsplit) < 2 or lsplit[0] != b'#require':
1499 if len(lsplit) < 2 or lsplit[0] != b'#require':
1500 after.setdefault(pos, []).append(' !!! invalid #require\n')
1500 after.setdefault(pos, []).append(' !!! invalid #require\n')
1501 if not skipping:
1501 if not skipping:
1502 haveresult, message = self._hghave(lsplit[1:])
1502 haveresult, message = self._hghave(lsplit[1:])
1503 if not haveresult:
1503 if not haveresult:
1504 script = [b'echo "%s"\nexit 80\n' % message]
1504 script = [b'echo "%s"\nexit 80\n' % message]
1505 break
1505 break
1506 after.setdefault(pos, []).append(l)
1506 after.setdefault(pos, []).append(l)
1507 elif l.startswith(b'#if'):
1507 elif l.startswith(b'#if'):
1508 lsplit = l.split()
1508 lsplit = l.split()
1509 if len(lsplit) < 2 or lsplit[0] != b'#if':
1509 if len(lsplit) < 2 or lsplit[0] != b'#if':
1510 after.setdefault(pos, []).append(' !!! invalid #if\n')
1510 after.setdefault(pos, []).append(' !!! invalid #if\n')
1511 if skipping is not None:
1511 if skipping is not None:
1512 after.setdefault(pos, []).append(' !!! nested #if\n')
1512 after.setdefault(pos, []).append(' !!! nested #if\n')
1513 skipping = not self._iftest(lsplit[1:])
1513 skipping = not self._iftest(lsplit[1:])
1514 after.setdefault(pos, []).append(l)
1514 after.setdefault(pos, []).append(l)
1515 elif l.startswith(b'#else'):
1515 elif l.startswith(b'#else'):
1516 if skipping is None:
1516 if skipping is None:
1517 after.setdefault(pos, []).append(' !!! missing #if\n')
1517 after.setdefault(pos, []).append(' !!! missing #if\n')
1518 skipping = not skipping
1518 skipping = not skipping
1519 after.setdefault(pos, []).append(l)
1519 after.setdefault(pos, []).append(l)
1520 elif l.startswith(b'#endif'):
1520 elif l.startswith(b'#endif'):
1521 if skipping is None:
1521 if skipping is None:
1522 after.setdefault(pos, []).append(' !!! missing #if\n')
1522 after.setdefault(pos, []).append(' !!! missing #if\n')
1523 skipping = None
1523 skipping = None
1524 after.setdefault(pos, []).append(l)
1524 after.setdefault(pos, []).append(l)
1525 elif skipping:
1525 elif skipping:
1526 after.setdefault(pos, []).append(l)
1526 after.setdefault(pos, []).append(l)
1527 elif l.startswith(b' >>> '): # python inlines
1527 elif l.startswith(b' >>> '): # python inlines
1528 after.setdefault(pos, []).append(l)
1528 after.setdefault(pos, []).append(l)
1529 prepos = pos
1529 prepos = pos
1530 pos = n
1530 pos = n
1531 if not inpython:
1531 if not inpython:
1532 # We've just entered a Python block. Add the header.
1532 # We've just entered a Python block. Add the header.
1533 inpython = True
1533 inpython = True
1534 addsalt(prepos, False) # Make sure we report the exit code.
1534 addsalt(prepos, False) # Make sure we report the exit code.
1535 script.append(b'"%s" -m heredoctest <<EOF\n' % PYTHON)
1535 script.append(b'"%s" -m heredoctest <<EOF\n' % PYTHON)
1536 addsalt(n, True)
1536 addsalt(n, True)
1537 script.append(l[2:])
1537 script.append(l[2:])
1538 elif l.startswith(b' ... '): # python inlines
1538 elif l.startswith(b' ... '): # python inlines
1539 after.setdefault(prepos, []).append(l)
1539 after.setdefault(prepos, []).append(l)
1540 script.append(l[2:])
1540 script.append(l[2:])
1541 elif l.startswith(b' $ '): # commands
1541 elif l.startswith(b' $ '): # commands
1542 if inpython:
1542 if inpython:
1543 script.append(b'EOF\n')
1543 script.append(b'EOF\n')
1544 inpython = False
1544 inpython = False
1545 after.setdefault(pos, []).append(l)
1545 after.setdefault(pos, []).append(l)
1546 prepos = pos
1546 prepos = pos
1547 pos = n
1547 pos = n
1548 addsalt(n, False)
1548 addsalt(n, False)
1549 rawcmd = l[4:]
1549 rawcmd = l[4:]
1550 cmd = rawcmd.split()
1550 cmd = rawcmd.split()
1551 toggletrace(rawcmd)
1551 toggletrace(rawcmd)
1552 if len(cmd) == 2 and cmd[0] == b'cd':
1552 if len(cmd) == 2 and cmd[0] == b'cd':
1553 l = b' $ cd %s || exit 1\n' % cmd[1]
1553 l = b' $ cd %s || exit 1\n' % cmd[1]
1554 script.append(rawcmd)
1554 script.append(rawcmd)
1555 elif l.startswith(b' > '): # continuations
1555 elif l.startswith(b' > '): # continuations
1556 after.setdefault(prepos, []).append(l)
1556 after.setdefault(prepos, []).append(l)
1557 script.append(l[4:])
1557 script.append(l[4:])
1558 elif l.startswith(b' '): # results
1558 elif l.startswith(b' '): # results
1559 # Queue up a list of expected results.
1559 # Queue up a list of expected results.
1560 expected.setdefault(pos, []).append(l[2:])
1560 expected.setdefault(pos, []).append(l[2:])
1561 else:
1561 else:
1562 if inpython:
1562 if inpython:
1563 script.append(b'EOF\n')
1563 script.append(b'EOF\n')
1564 inpython = False
1564 inpython = False
1565 # Non-command/result. Queue up for merged output.
1565 # Non-command/result. Queue up for merged output.
1566 after.setdefault(pos, []).append(l)
1566 after.setdefault(pos, []).append(l)
1567
1567
1568 if inpython:
1568 if inpython:
1569 script.append(b'EOF\n')
1569 script.append(b'EOF\n')
1570 if skipping is not None:
1570 if skipping is not None:
1571 after.setdefault(pos, []).append(' !!! missing #endif\n')
1571 after.setdefault(pos, []).append(' !!! missing #endif\n')
1572 addsalt(n + 1, False)
1572 addsalt(n + 1, False)
1573 # Need to end any current per-command trace
1573 # Need to end any current per-command trace
1574 if activetrace:
1574 if activetrace:
1575 toggletrace()
1575 toggletrace()
1576 return salt, script, after, expected
1576 return salt, script, after, expected
1577
1577
1578 def _processoutput(self, exitcode, output, salt, after, expected):
1578 def _processoutput(self, exitcode, output, salt, after, expected):
1579 # Merge the script output back into a unified test.
1579 # Merge the script output back into a unified test.
1580 warnonly = 1 # 1: not yet; 2: yes; 3: for sure not
1580 warnonly = 1 # 1: not yet; 2: yes; 3: for sure not
1581 if exitcode != 0:
1581 if exitcode != 0:
1582 warnonly = 3
1582 warnonly = 3
1583
1583
1584 pos = -1
1584 pos = -1
1585 postout = []
1585 postout = []
1586 for l in output:
1586 for l in output:
1587 lout, lcmd = l, None
1587 lout, lcmd = l, None
1588 if salt in l:
1588 if salt in l:
1589 lout, lcmd = l.split(salt, 1)
1589 lout, lcmd = l.split(salt, 1)
1590
1590
1591 while lout:
1591 while lout:
1592 if not lout.endswith(b'\n'):
1592 if not lout.endswith(b'\n'):
1593 lout += b' (no-eol)\n'
1593 lout += b' (no-eol)\n'
1594
1594
1595 # Find the expected output at the current position.
1595 # Find the expected output at the current position.
1596 els = [None]
1596 els = [None]
1597 if expected.get(pos, None):
1597 if expected.get(pos, None):
1598 els = expected[pos]
1598 els = expected[pos]
1599
1599
1600 optional = []
1600 optional = []
1601 for i, el in enumerate(els):
1601 for i, el in enumerate(els):
1602 r = False
1602 r = False
1603 if el:
1603 if el:
1604 r, exact = self.linematch(el, lout)
1604 r, exact = self.linematch(el, lout)
1605 if isinstance(r, str):
1605 if isinstance(r, str):
1606 if r == '-glob':
1606 if r == '-glob':
1607 lout = ''.join(el.rsplit(' (glob)', 1))
1607 lout = ''.join(el.rsplit(' (glob)', 1))
1608 r = '' # Warn only this line.
1608 r = '' # Warn only this line.
1609 elif r == "retry":
1609 elif r == "retry":
1610 postout.append(b' ' + el)
1610 postout.append(b' ' + el)
1611 else:
1611 else:
1612 log('\ninfo, unknown linematch result: %r\n' % r)
1612 log('\ninfo, unknown linematch result: %r\n' % r)
1613 r = False
1613 r = False
1614 if r:
1614 if r:
1615 els.pop(i)
1615 els.pop(i)
1616 break
1616 break
1617 if el:
1617 if el:
1618 if el.endswith(b" (?)\n"):
1618 if el.endswith(b" (?)\n"):
1619 optional.append(i)
1619 optional.append(i)
1620 else:
1620 else:
1621 m = optline.match(el)
1621 m = optline.match(el)
1622 if m:
1622 if m:
1623 conditions = [
1623 conditions = [
1624 c for c in m.group(2).split(b' ')]
1624 c for c in m.group(2).split(b' ')]
1625
1625
1626 if not self._iftest(conditions):
1626 if not self._iftest(conditions):
1627 optional.append(i)
1627 optional.append(i)
1628 if exact:
1628 if exact:
1629 # Don't allow line to be matches against a later
1629 # Don't allow line to be matches against a later
1630 # line in the output
1630 # line in the output
1631 els.pop(i)
1631 els.pop(i)
1632 break
1632 break
1633
1633
1634 if r:
1634 if r:
1635 if r == "retry":
1635 if r == "retry":
1636 continue
1636 continue
1637 # clean up any optional leftovers
1637 # clean up any optional leftovers
1638 for i in optional:
1638 for i in optional:
1639 postout.append(b' ' + els[i])
1639 postout.append(b' ' + els[i])
1640 for i in reversed(optional):
1640 for i in reversed(optional):
1641 del els[i]
1641 del els[i]
1642 postout.append(b' ' + el)
1642 postout.append(b' ' + el)
1643 else:
1643 else:
1644 if self.NEEDESCAPE(lout):
1644 if self.NEEDESCAPE(lout):
1645 lout = TTest._stringescape(b'%s (esc)\n' %
1645 lout = TTest._stringescape(b'%s (esc)\n' %
1646 lout.rstrip(b'\n'))
1646 lout.rstrip(b'\n'))
1647 postout.append(b' ' + lout) # Let diff deal with it.
1647 postout.append(b' ' + lout) # Let diff deal with it.
1648 if r != '': # If line failed.
1648 if r != '': # If line failed.
1649 warnonly = 3 # for sure not
1649 warnonly = 3 # for sure not
1650 elif warnonly == 1: # Is "not yet" and line is warn only.
1650 elif warnonly == 1: # Is "not yet" and line is warn only.
1651 warnonly = 2 # Yes do warn.
1651 warnonly = 2 # Yes do warn.
1652 break
1652 break
1653 else:
1653 else:
1654 # clean up any optional leftovers
1654 # clean up any optional leftovers
1655 while expected.get(pos, None):
1655 while expected.get(pos, None):
1656 el = expected[pos].pop(0)
1656 el = expected[pos].pop(0)
1657 if el:
1657 if el:
1658 if not el.endswith(b" (?)\n"):
1658 if not el.endswith(b" (?)\n"):
1659 m = optline.match(el)
1659 m = optline.match(el)
1660 if m:
1660 if m:
1661 conditions = [c for c in m.group(2).split(b' ')]
1661 conditions = [c for c in m.group(2).split(b' ')]
1662
1662
1663 if self._iftest(conditions):
1663 if self._iftest(conditions):
1664 # Don't append as optional line
1664 # Don't append as optional line
1665 continue
1665 continue
1666 else:
1666 else:
1667 continue
1667 continue
1668 postout.append(b' ' + el)
1668 postout.append(b' ' + el)
1669
1669
1670 if lcmd:
1670 if lcmd:
1671 # Add on last return code.
1671 # Add on last return code.
1672 ret = int(lcmd.split()[1])
1672 ret = int(lcmd.split()[1])
1673 if ret != 0:
1673 if ret != 0:
1674 postout.append(b' [%d]\n' % ret)
1674 postout.append(b' [%d]\n' % ret)
1675 if pos in after:
1675 if pos in after:
1676 # Merge in non-active test bits.
1676 # Merge in non-active test bits.
1677 postout += after.pop(pos)
1677 postout += after.pop(pos)
1678 pos = int(lcmd.split()[0])
1678 pos = int(lcmd.split()[0])
1679
1679
1680 if pos in after:
1680 if pos in after:
1681 postout += after.pop(pos)
1681 postout += after.pop(pos)
1682
1682
1683 if warnonly == 2:
1683 if warnonly == 2:
1684 exitcode = False # Set exitcode to warned.
1684 exitcode = False # Set exitcode to warned.
1685
1685
1686 return exitcode, postout
1686 return exitcode, postout
1687
1687
1688 @staticmethod
1688 @staticmethod
1689 def rematch(el, l):
1689 def rematch(el, l):
1690 try:
1690 try:
1691 el = b'(?:' + el + b')'
1691 el = b'(?:' + el + b')'
1692 # use \Z to ensure that the regex matches to the end of the string
1692 # use \Z to ensure that the regex matches to the end of the string
1693 if os.name == 'nt':
1693 if os.name == 'nt':
1694 return re.match(el + br'\r?\n\Z', l)
1694 return re.match(el + br'\r?\n\Z', l)
1695 return re.match(el + br'\n\Z', l)
1695 return re.match(el + br'\n\Z', l)
1696 except re.error:
1696 except re.error:
1697 # el is an invalid regex
1697 # el is an invalid regex
1698 return False
1698 return False
1699
1699
1700 @staticmethod
1700 @staticmethod
1701 def globmatch(el, l):
1701 def globmatch(el, l):
1702 # The only supported special characters are * and ? plus / which also
1702 # The only supported special characters are * and ? plus / which also
1703 # matches \ on windows. Escaping of these characters is supported.
1703 # matches \ on windows. Escaping of these characters is supported.
1704 if el + b'\n' == l:
1704 if el + b'\n' == l:
1705 if os.altsep:
1705 if os.altsep:
1706 # matching on "/" is not needed for this line
1706 # matching on "/" is not needed for this line
1707 for pat in checkcodeglobpats:
1707 for pat in checkcodeglobpats:
1708 if pat.match(el):
1708 if pat.match(el):
1709 return True
1709 return True
1710 return b'-glob'
1710 return b'-glob'
1711 return True
1711 return True
1712 el = el.replace(b'$LOCALIP', b'*')
1712 el = el.replace(b'$LOCALIP', b'*')
1713 i, n = 0, len(el)
1713 i, n = 0, len(el)
1714 res = b''
1714 res = b''
1715 while i < n:
1715 while i < n:
1716 c = el[i:i + 1]
1716 c = el[i:i + 1]
1717 i += 1
1717 i += 1
1718 if c == b'\\' and i < n and el[i:i + 1] in b'*?\\/':
1718 if c == b'\\' and i < n and el[i:i + 1] in b'*?\\/':
1719 res += el[i - 1:i + 1]
1719 res += el[i - 1:i + 1]
1720 i += 1
1720 i += 1
1721 elif c == b'*':
1721 elif c == b'*':
1722 res += b'.*'
1722 res += b'.*'
1723 elif c == b'?':
1723 elif c == b'?':
1724 res += b'.'
1724 res += b'.'
1725 elif c == b'/' and os.altsep:
1725 elif c == b'/' and os.altsep:
1726 res += b'[/\\\\]'
1726 res += b'[/\\\\]'
1727 else:
1727 else:
1728 res += re.escape(c)
1728 res += re.escape(c)
1729 return TTest.rematch(res, l)
1729 return TTest.rematch(res, l)
1730
1730
1731 def linematch(self, el, l):
1731 def linematch(self, el, l):
1732 if el == l: # perfect match (fast)
1732 if el == l: # perfect match (fast)
1733 return True, True
1733 return True, True
1734 retry = False
1734 retry = False
1735 if el.endswith(b" (?)\n"):
1735 if el.endswith(b" (?)\n"):
1736 retry = "retry"
1736 retry = "retry"
1737 el = el[:-5] + b"\n"
1737 el = el[:-5] + b"\n"
1738 else:
1738 else:
1739 m = optline.match(el)
1739 m = optline.match(el)
1740 if m:
1740 if m:
1741 conditions = [c for c in m.group(2).split(b' ')]
1741 conditions = [c for c in m.group(2).split(b' ')]
1742
1742
1743 el = m.group(1) + b"\n"
1743 el = m.group(1) + b"\n"
1744 if not self._iftest(conditions):
1744 if not self._iftest(conditions):
1745 retry = "retry" # Not required by listed features
1745 retry = "retry" # Not required by listed features
1746
1746
1747 if el.endswith(b" (esc)\n"):
1747 if el.endswith(b" (esc)\n"):
1748 if PYTHON3:
1748 if PYTHON3:
1749 el = el[:-7].decode('unicode_escape') + '\n'
1749 el = el[:-7].decode('unicode_escape') + '\n'
1750 el = el.encode('utf-8')
1750 el = el.encode('utf-8')
1751 else:
1751 else:
1752 el = el[:-7].decode('string-escape') + '\n'
1752 el = el[:-7].decode('string-escape') + '\n'
1753 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
1753 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
1754 return True, True
1754 return True, True
1755 if el.endswith(b" (re)\n"):
1755 if el.endswith(b" (re)\n"):
1756 return (TTest.rematch(el[:-6], l) or retry), False
1756 return (TTest.rematch(el[:-6], l) or retry), False
1757 if el.endswith(b" (glob)\n"):
1757 if el.endswith(b" (glob)\n"):
1758 # ignore '(glob)' added to l by 'replacements'
1758 # ignore '(glob)' added to l by 'replacements'
1759 if l.endswith(b" (glob)\n"):
1759 if l.endswith(b" (glob)\n"):
1760 l = l[:-8] + b"\n"
1760 l = l[:-8] + b"\n"
1761 return (TTest.globmatch(el[:-8], l) or retry), False
1761 return (TTest.globmatch(el[:-8], l) or retry), False
1762 if os.altsep:
1762 if os.altsep:
1763 _l = l.replace(b'\\', b'/')
1763 _l = l.replace(b'\\', b'/')
1764 if el == _l or os.name == 'nt' and el[:-1] + b'\r\n' == _l:
1764 if el == _l or os.name == 'nt' and el[:-1] + b'\r\n' == _l:
1765 return True, True
1765 return True, True
1766 return retry, True
1766 return retry, True
1767
1767
1768 @staticmethod
1768 @staticmethod
1769 def parsehghaveoutput(lines):
1769 def parsehghaveoutput(lines):
1770 '''Parse hghave log lines.
1770 '''Parse hghave log lines.
1771
1771
1772 Return tuple of lists (missing, failed):
1772 Return tuple of lists (missing, failed):
1773 * the missing/unknown features
1773 * the missing/unknown features
1774 * the features for which existence check failed'''
1774 * the features for which existence check failed'''
1775 missing = []
1775 missing = []
1776 failed = []
1776 failed = []
1777 for line in lines:
1777 for line in lines:
1778 if line.startswith(TTest.SKIPPED_PREFIX):
1778 if line.startswith(TTest.SKIPPED_PREFIX):
1779 line = line.splitlines()[0]
1779 line = line.splitlines()[0]
1780 missing.append(line[len(TTest.SKIPPED_PREFIX):].decode('utf-8'))
1780 missing.append(line[len(TTest.SKIPPED_PREFIX):].decode('utf-8'))
1781 elif line.startswith(TTest.FAILED_PREFIX):
1781 elif line.startswith(TTest.FAILED_PREFIX):
1782 line = line.splitlines()[0]
1782 line = line.splitlines()[0]
1783 failed.append(line[len(TTest.FAILED_PREFIX):].decode('utf-8'))
1783 failed.append(line[len(TTest.FAILED_PREFIX):].decode('utf-8'))
1784
1784
1785 return missing, failed
1785 return missing, failed
1786
1786
1787 @staticmethod
1787 @staticmethod
1788 def _escapef(m):
1788 def _escapef(m):
1789 return TTest.ESCAPEMAP[m.group(0)]
1789 return TTest.ESCAPEMAP[m.group(0)]
1790
1790
1791 @staticmethod
1791 @staticmethod
1792 def _stringescape(s):
1792 def _stringescape(s):
1793 return TTest.ESCAPESUB(TTest._escapef, s)
1793 return TTest.ESCAPESUB(TTest._escapef, s)
1794
1794
1795 iolock = threading.RLock()
1795 iolock = threading.RLock()
1796 firstlock = threading.RLock()
1796 firstlock = threading.RLock()
1797 firsterror = False
1797 firsterror = False
1798
1798
1799 class TestResult(unittest._TextTestResult):
1799 class TestResult(unittest._TextTestResult):
1800 """Holds results when executing via unittest."""
1800 """Holds results when executing via unittest."""
1801 # Don't worry too much about accessing the non-public _TextTestResult.
1801 # Don't worry too much about accessing the non-public _TextTestResult.
1802 # It is relatively common in Python testing tools.
1802 # It is relatively common in Python testing tools.
1803 def __init__(self, options, *args, **kwargs):
1803 def __init__(self, options, *args, **kwargs):
1804 super(TestResult, self).__init__(*args, **kwargs)
1804 super(TestResult, self).__init__(*args, **kwargs)
1805
1805
1806 self._options = options
1806 self._options = options
1807
1807
1808 # unittest.TestResult didn't have skipped until 2.7. We need to
1808 # unittest.TestResult didn't have skipped until 2.7. We need to
1809 # polyfill it.
1809 # polyfill it.
1810 self.skipped = []
1810 self.skipped = []
1811
1811
1812 # We have a custom "ignored" result that isn't present in any Python
1812 # We have a custom "ignored" result that isn't present in any Python
1813 # unittest implementation. It is very similar to skipped. It may make
1813 # unittest implementation. It is very similar to skipped. It may make
1814 # sense to map it into skip some day.
1814 # sense to map it into skip some day.
1815 self.ignored = []
1815 self.ignored = []
1816
1816
1817 self.times = []
1817 self.times = []
1818 self._firststarttime = None
1818 self._firststarttime = None
1819 # Data stored for the benefit of generating xunit reports.
1819 # Data stored for the benefit of generating xunit reports.
1820 self.successes = []
1820 self.successes = []
1821 self.faildata = {}
1821 self.faildata = {}
1822
1822
1823 if options.color == 'auto':
1823 if options.color == 'auto':
1824 self.color = pygmentspresent and self.stream.isatty()
1824 self.color = pygmentspresent and self.stream.isatty()
1825 elif options.color == 'never':
1825 elif options.color == 'never':
1826 self.color = False
1826 self.color = False
1827 else: # 'always', for testing purposes
1827 else: # 'always', for testing purposes
1828 self.color = pygmentspresent
1828 self.color = pygmentspresent
1829
1829
1830 def onStart(self, test):
1830 def onStart(self, test):
1831 """ Can be overriden by custom TestResult
1831 """ Can be overriden by custom TestResult
1832 """
1832 """
1833
1833
1834 def onEnd(self):
1834 def onEnd(self):
1835 """ Can be overriden by custom TestResult
1835 """ Can be overriden by custom TestResult
1836 """
1836 """
1837
1837
1838 def addFailure(self, test, reason):
1838 def addFailure(self, test, reason):
1839 self.failures.append((test, reason))
1839 self.failures.append((test, reason))
1840
1840
1841 if self._options.first:
1841 if self._options.first:
1842 self.stop()
1842 self.stop()
1843 else:
1843 else:
1844 with iolock:
1844 with iolock:
1845 if reason == "timed out":
1845 if reason == "timed out":
1846 self.stream.write('t')
1846 self.stream.write('t')
1847 else:
1847 else:
1848 if not self._options.nodiff:
1848 if not self._options.nodiff:
1849 self.stream.write('\n')
1849 self.stream.write('\n')
1850 # Exclude the '\n' from highlighting to lex correctly
1850 # Exclude the '\n' from highlighting to lex correctly
1851 formatted = 'ERROR: %s output changed\n' % test
1851 formatted = 'ERROR: %s output changed\n' % test
1852 self.stream.write(highlightmsg(formatted, self.color))
1852 self.stream.write(highlightmsg(formatted, self.color))
1853 self.stream.write('!')
1853 self.stream.write('!')
1854
1854
1855 self.stream.flush()
1855 self.stream.flush()
1856
1856
1857 def addSuccess(self, test):
1857 def addSuccess(self, test):
1858 with iolock:
1858 with iolock:
1859 super(TestResult, self).addSuccess(test)
1859 super(TestResult, self).addSuccess(test)
1860 self.successes.append(test)
1860 self.successes.append(test)
1861
1861
1862 def addError(self, test, err):
1862 def addError(self, test, err):
1863 super(TestResult, self).addError(test, err)
1863 super(TestResult, self).addError(test, err)
1864 if self._options.first:
1864 if self._options.first:
1865 self.stop()
1865 self.stop()
1866
1866
1867 # Polyfill.
1867 # Polyfill.
1868 def addSkip(self, test, reason):
1868 def addSkip(self, test, reason):
1869 self.skipped.append((test, reason))
1869 self.skipped.append((test, reason))
1870 with iolock:
1870 with iolock:
1871 if self.showAll:
1871 if self.showAll:
1872 self.stream.writeln('skipped %s' % reason)
1872 self.stream.writeln('skipped %s' % reason)
1873 else:
1873 else:
1874 self.stream.write('s')
1874 self.stream.write('s')
1875 self.stream.flush()
1875 self.stream.flush()
1876
1876
1877 def addIgnore(self, test, reason):
1877 def addIgnore(self, test, reason):
1878 self.ignored.append((test, reason))
1878 self.ignored.append((test, reason))
1879 with iolock:
1879 with iolock:
1880 if self.showAll:
1880 if self.showAll:
1881 self.stream.writeln('ignored %s' % reason)
1881 self.stream.writeln('ignored %s' % reason)
1882 else:
1882 else:
1883 if reason not in ('not retesting', "doesn't match keyword"):
1883 if reason not in ('not retesting', "doesn't match keyword"):
1884 self.stream.write('i')
1884 self.stream.write('i')
1885 else:
1885 else:
1886 self.testsRun += 1
1886 self.testsRun += 1
1887 self.stream.flush()
1887 self.stream.flush()
1888
1888
1889 def addOutputMismatch(self, test, ret, got, expected):
1889 def addOutputMismatch(self, test, ret, got, expected):
1890 """Record a mismatch in test output for a particular test."""
1890 """Record a mismatch in test output for a particular test."""
1891 if self.shouldStop or firsterror:
1891 if self.shouldStop or firsterror:
1892 # don't print, some other test case already failed and
1892 # don't print, some other test case already failed and
1893 # printed, we're just stale and probably failed due to our
1893 # printed, we're just stale and probably failed due to our
1894 # temp dir getting cleaned up.
1894 # temp dir getting cleaned up.
1895 return
1895 return
1896
1896
1897 accepted = False
1897 accepted = False
1898 lines = []
1898 lines = []
1899
1899
1900 with iolock:
1900 with iolock:
1901 if self._options.nodiff:
1901 if self._options.nodiff:
1902 pass
1902 pass
1903 elif self._options.view:
1903 elif self._options.view:
1904 v = self._options.view
1904 v = self._options.view
1905 os.system(r"%s %s %s" %
1905 os.system(r"%s %s %s" %
1906 (v, _strpath(test.refpath), _strpath(test.errpath)))
1906 (v, _strpath(test.refpath), _strpath(test.errpath)))
1907 else:
1907 else:
1908 servefail, lines = getdiff(expected, got,
1908 servefail, lines = getdiff(expected, got,
1909 test.refpath, test.errpath)
1909 test.refpath, test.errpath)
1910 self.stream.write('\n')
1910 self.stream.write('\n')
1911 for line in lines:
1911 for line in lines:
1912 line = highlightdiff(line, self.color)
1912 line = highlightdiff(line, self.color)
1913 if PYTHON3:
1913 if PYTHON3:
1914 self.stream.flush()
1914 self.stream.flush()
1915 self.stream.buffer.write(line)
1915 self.stream.buffer.write(line)
1916 self.stream.buffer.flush()
1916 self.stream.buffer.flush()
1917 else:
1917 else:
1918 self.stream.write(line)
1918 self.stream.write(line)
1919 self.stream.flush()
1919 self.stream.flush()
1920
1920
1921 if servefail:
1921 if servefail:
1922 raise test.failureException(
1922 raise test.failureException(
1923 'server failed to start (HGPORT=%s)' % test._startport)
1923 'server failed to start (HGPORT=%s)' % test._startport)
1924
1924
1925 # handle interactive prompt without releasing iolock
1925 # handle interactive prompt without releasing iolock
1926 if self._options.interactive:
1926 if self._options.interactive:
1927 if test.readrefout() != expected:
1927 if test.readrefout() != expected:
1928 self.stream.write(
1928 self.stream.write(
1929 'Reference output has changed (run again to prompt '
1929 'Reference output has changed (run again to prompt '
1930 'changes)')
1930 'changes)')
1931 else:
1931 else:
1932 self.stream.write('Accept this change? [n] ')
1932 self.stream.write('Accept this change? [n] ')
1933 self.stream.flush()
1933 self.stream.flush()
1934 answer = sys.stdin.readline().strip()
1934 answer = sys.stdin.readline().strip()
1935 if answer.lower() in ('y', 'yes'):
1935 if answer.lower() in ('y', 'yes'):
1936 if test.path.endswith(b'.t'):
1936 if test.path.endswith(b'.t'):
1937 rename(test.errpath, test.path)
1937 rename(test.errpath, test.path)
1938 else:
1938 else:
1939 rename(test.errpath, '%s.out' % test.path)
1939 rename(test.errpath, '%s.out' % test.path)
1940 accepted = True
1940 accepted = True
1941 if not accepted:
1941 if not accepted:
1942 self.faildata[test.name] = b''.join(lines)
1942 self.faildata[test.name] = b''.join(lines)
1943
1943
1944 return accepted
1944 return accepted
1945
1945
1946 def startTest(self, test):
1946 def startTest(self, test):
1947 super(TestResult, self).startTest(test)
1947 super(TestResult, self).startTest(test)
1948
1948
1949 # os.times module computes the user time and system time spent by
1949 # os.times module computes the user time and system time spent by
1950 # child's processes along with real elapsed time taken by a process.
1950 # child's processes along with real elapsed time taken by a process.
1951 # This module has one limitation. It can only work for Linux user
1951 # This module has one limitation. It can only work for Linux user
1952 # and not for Windows.
1952 # and not for Windows.
1953 test.started = os.times()
1953 test.started = os.times()
1954 if self._firststarttime is None: # thread racy but irrelevant
1954 if self._firststarttime is None: # thread racy but irrelevant
1955 self._firststarttime = test.started[4]
1955 self._firststarttime = test.started[4]
1956
1956
1957 def stopTest(self, test, interrupted=False):
1957 def stopTest(self, test, interrupted=False):
1958 super(TestResult, self).stopTest(test)
1958 super(TestResult, self).stopTest(test)
1959
1959
1960 test.stopped = os.times()
1960 test.stopped = os.times()
1961
1961
1962 starttime = test.started
1962 starttime = test.started
1963 endtime = test.stopped
1963 endtime = test.stopped
1964 origin = self._firststarttime
1964 origin = self._firststarttime
1965 self.times.append((test.name,
1965 self.times.append((test.name,
1966 endtime[2] - starttime[2], # user space CPU time
1966 endtime[2] - starttime[2], # user space CPU time
1967 endtime[3] - starttime[3], # sys space CPU time
1967 endtime[3] - starttime[3], # sys space CPU time
1968 endtime[4] - starttime[4], # real time
1968 endtime[4] - starttime[4], # real time
1969 starttime[4] - origin, # start date in run context
1969 starttime[4] - origin, # start date in run context
1970 endtime[4] - origin, # end date in run context
1970 endtime[4] - origin, # end date in run context
1971 ))
1971 ))
1972
1972
1973 if interrupted:
1973 if interrupted:
1974 with iolock:
1974 with iolock:
1975 self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
1975 self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
1976 test.name, self.times[-1][3]))
1976 test.name, self.times[-1][3]))
1977
1977
1978 def getTestResult():
1978 def getTestResult():
1979 """
1979 """
1980 Returns the relevant test result
1980 Returns the relevant test result
1981 """
1981 """
1982 if "CUSTOM_TEST_RESULT" in os.environ:
1982 if "CUSTOM_TEST_RESULT" in os.environ:
1983 testresultmodule = __import__(os.environ["CUSTOM_TEST_RESULT"])
1983 testresultmodule = __import__(os.environ["CUSTOM_TEST_RESULT"])
1984 return testresultmodule.TestResult
1984 return testresultmodule.TestResult
1985 else:
1985 else:
1986 return TestResult
1986 return TestResult
1987
1987
1988 class TestSuite(unittest.TestSuite):
1988 class TestSuite(unittest.TestSuite):
1989 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
1989 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
1990
1990
1991 def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
1991 def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
1992 retest=False, keywords=None, loop=False, runs_per_test=1,
1992 retest=False, keywords=None, loop=False, runs_per_test=1,
1993 loadtest=None, showchannels=False,
1993 loadtest=None, showchannels=False,
1994 *args, **kwargs):
1994 *args, **kwargs):
1995 """Create a new instance that can run tests with a configuration.
1995 """Create a new instance that can run tests with a configuration.
1996
1996
1997 testdir specifies the directory where tests are executed from. This
1997 testdir specifies the directory where tests are executed from. This
1998 is typically the ``tests`` directory from Mercurial's source
1998 is typically the ``tests`` directory from Mercurial's source
1999 repository.
1999 repository.
2000
2000
2001 jobs specifies the number of jobs to run concurrently. Each test
2001 jobs specifies the number of jobs to run concurrently. Each test
2002 executes on its own thread. Tests actually spawn new processes, so
2002 executes on its own thread. Tests actually spawn new processes, so
2003 state mutation should not be an issue.
2003 state mutation should not be an issue.
2004
2004
2005 If there is only one job, it will use the main thread.
2005 If there is only one job, it will use the main thread.
2006
2006
2007 whitelist and blacklist denote tests that have been whitelisted and
2007 whitelist and blacklist denote tests that have been whitelisted and
2008 blacklisted, respectively. These arguments don't belong in TestSuite.
2008 blacklisted, respectively. These arguments don't belong in TestSuite.
2009 Instead, whitelist and blacklist should be handled by the thing that
2009 Instead, whitelist and blacklist should be handled by the thing that
2010 populates the TestSuite with tests. They are present to preserve
2010 populates the TestSuite with tests. They are present to preserve
2011 backwards compatible behavior which reports skipped tests as part
2011 backwards compatible behavior which reports skipped tests as part
2012 of the results.
2012 of the results.
2013
2013
2014 retest denotes whether to retest failed tests. This arguably belongs
2014 retest denotes whether to retest failed tests. This arguably belongs
2015 outside of TestSuite.
2015 outside of TestSuite.
2016
2016
2017 keywords denotes key words that will be used to filter which tests
2017 keywords denotes key words that will be used to filter which tests
2018 to execute. This arguably belongs outside of TestSuite.
2018 to execute. This arguably belongs outside of TestSuite.
2019
2019
2020 loop denotes whether to loop over tests forever.
2020 loop denotes whether to loop over tests forever.
2021 """
2021 """
2022 super(TestSuite, self).__init__(*args, **kwargs)
2022 super(TestSuite, self).__init__(*args, **kwargs)
2023
2023
2024 self._jobs = jobs
2024 self._jobs = jobs
2025 self._whitelist = whitelist
2025 self._whitelist = whitelist
2026 self._blacklist = blacklist
2026 self._blacklist = blacklist
2027 self._retest = retest
2027 self._retest = retest
2028 self._keywords = keywords
2028 self._keywords = keywords
2029 self._loop = loop
2029 self._loop = loop
2030 self._runs_per_test = runs_per_test
2030 self._runs_per_test = runs_per_test
2031 self._loadtest = loadtest
2031 self._loadtest = loadtest
2032 self._showchannels = showchannels
2032 self._showchannels = showchannels
2033
2033
2034 def run(self, result):
2034 def run(self, result):
2035 # We have a number of filters that need to be applied. We do this
2035 # We have a number of filters that need to be applied. We do this
2036 # here instead of inside Test because it makes the running logic for
2036 # here instead of inside Test because it makes the running logic for
2037 # Test simpler.
2037 # Test simpler.
2038 tests = []
2038 tests = []
2039 num_tests = [0]
2039 num_tests = [0]
2040 for test in self._tests:
2040 for test in self._tests:
2041 def get():
2041 def get():
2042 num_tests[0] += 1
2042 num_tests[0] += 1
2043 if getattr(test, 'should_reload', False):
2043 if getattr(test, 'should_reload', False):
2044 return self._loadtest(test, num_tests[0])
2044 return self._loadtest(test, num_tests[0])
2045 return test
2045 return test
2046 if not os.path.exists(test.path):
2046 if not os.path.exists(test.path):
2047 result.addSkip(test, "Doesn't exist")
2047 result.addSkip(test, "Doesn't exist")
2048 continue
2048 continue
2049
2049
2050 if not (self._whitelist and test.bname in self._whitelist):
2050 if not (self._whitelist and test.bname in self._whitelist):
2051 if self._blacklist and test.bname in self._blacklist:
2051 if self._blacklist and test.bname in self._blacklist:
2052 result.addSkip(test, 'blacklisted')
2052 result.addSkip(test, 'blacklisted')
2053 continue
2053 continue
2054
2054
2055 if self._retest and not os.path.exists(test.errpath):
2055 if self._retest and not os.path.exists(test.errpath):
2056 result.addIgnore(test, 'not retesting')
2056 result.addIgnore(test, 'not retesting')
2057 continue
2057 continue
2058
2058
2059 if self._keywords:
2059 if self._keywords:
2060 with open(test.path, 'rb') as f:
2060 with open(test.path, 'rb') as f:
2061 t = f.read().lower() + test.bname.lower()
2061 t = f.read().lower() + test.bname.lower()
2062 ignored = False
2062 ignored = False
2063 for k in self._keywords.lower().split():
2063 for k in self._keywords.lower().split():
2064 if k not in t:
2064 if k not in t:
2065 result.addIgnore(test, "doesn't match keyword")
2065 result.addIgnore(test, "doesn't match keyword")
2066 ignored = True
2066 ignored = True
2067 break
2067 break
2068
2068
2069 if ignored:
2069 if ignored:
2070 continue
2070 continue
2071 for _ in xrange(self._runs_per_test):
2071 for _ in xrange(self._runs_per_test):
2072 tests.append(get())
2072 tests.append(get())
2073
2073
2074 runtests = list(tests)
2074 runtests = list(tests)
2075 done = queue.Queue()
2075 done = queue.Queue()
2076 running = 0
2076 running = 0
2077
2077
2078 channels = [""] * self._jobs
2078 channels = [""] * self._jobs
2079
2079
2080 def job(test, result):
2080 def job(test, result):
2081 for n, v in enumerate(channels):
2081 for n, v in enumerate(channels):
2082 if not v:
2082 if not v:
2083 channel = n
2083 channel = n
2084 break
2084 break
2085 else:
2085 else:
2086 raise ValueError('Could not find output channel')
2086 raise ValueError('Could not find output channel')
2087 channels[channel] = "=" + test.name[5:].split(".")[0]
2087 channels[channel] = "=" + test.name[5:].split(".")[0]
2088 try:
2088 try:
2089 test(result)
2089 test(result)
2090 done.put(None)
2090 done.put(None)
2091 except KeyboardInterrupt:
2091 except KeyboardInterrupt:
2092 pass
2092 pass
2093 except: # re-raises
2093 except: # re-raises
2094 done.put(('!', test, 'run-test raised an error, see traceback'))
2094 done.put(('!', test, 'run-test raised an error, see traceback'))
2095 raise
2095 raise
2096 finally:
2096 finally:
2097 try:
2097 try:
2098 channels[channel] = ''
2098 channels[channel] = ''
2099 except IndexError:
2099 except IndexError:
2100 pass
2100 pass
2101
2101
2102 def stat():
2102 def stat():
2103 count = 0
2103 count = 0
2104 while channels:
2104 while channels:
2105 d = '\n%03s ' % count
2105 d = '\n%03s ' % count
2106 for n, v in enumerate(channels):
2106 for n, v in enumerate(channels):
2107 if v:
2107 if v:
2108 d += v[0]
2108 d += v[0]
2109 channels[n] = v[1:] or '.'
2109 channels[n] = v[1:] or '.'
2110 else:
2110 else:
2111 d += ' '
2111 d += ' '
2112 d += ' '
2112 d += ' '
2113 with iolock:
2113 with iolock:
2114 sys.stdout.write(d + ' ')
2114 sys.stdout.write(d + ' ')
2115 sys.stdout.flush()
2115 sys.stdout.flush()
2116 for x in xrange(10):
2116 for x in xrange(10):
2117 if channels:
2117 if channels:
2118 time.sleep(.1)
2118 time.sleep(.1)
2119 count += 1
2119 count += 1
2120
2120
2121 stoppedearly = False
2121 stoppedearly = False
2122
2122
2123 if self._showchannels:
2123 if self._showchannels:
2124 statthread = threading.Thread(target=stat, name="stat")
2124 statthread = threading.Thread(target=stat, name="stat")
2125 statthread.start()
2125 statthread.start()
2126
2126
2127 try:
2127 try:
2128 while tests or running:
2128 while tests or running:
2129 if not done.empty() or running == self._jobs or not tests:
2129 if not done.empty() or running == self._jobs or not tests:
2130 try:
2130 try:
2131 done.get(True, 1)
2131 done.get(True, 1)
2132 running -= 1
2132 running -= 1
2133 if result and result.shouldStop:
2133 if result and result.shouldStop:
2134 stoppedearly = True
2134 stoppedearly = True
2135 break
2135 break
2136 except queue.Empty:
2136 except queue.Empty:
2137 continue
2137 continue
2138 if tests and not running == self._jobs:
2138 if tests and not running == self._jobs:
2139 test = tests.pop(0)
2139 test = tests.pop(0)
2140 if self._loop:
2140 if self._loop:
2141 if getattr(test, 'should_reload', False):
2141 if getattr(test, 'should_reload', False):
2142 num_tests[0] += 1
2142 num_tests[0] += 1
2143 tests.append(
2143 tests.append(
2144 self._loadtest(test, num_tests[0]))
2144 self._loadtest(test, num_tests[0]))
2145 else:
2145 else:
2146 tests.append(test)
2146 tests.append(test)
2147 if self._jobs == 1:
2147 if self._jobs == 1:
2148 job(test, result)
2148 job(test, result)
2149 else:
2149 else:
2150 t = threading.Thread(target=job, name=test.name,
2150 t = threading.Thread(target=job, name=test.name,
2151 args=(test, result))
2151 args=(test, result))
2152 t.start()
2152 t.start()
2153 running += 1
2153 running += 1
2154
2154
2155 # If we stop early we still need to wait on started tests to
2155 # If we stop early we still need to wait on started tests to
2156 # finish. Otherwise, there is a race between the test completing
2156 # finish. Otherwise, there is a race between the test completing
2157 # and the test's cleanup code running. This could result in the
2157 # and the test's cleanup code running. This could result in the
2158 # test reporting incorrect.
2158 # test reporting incorrect.
2159 if stoppedearly:
2159 if stoppedearly:
2160 while running:
2160 while running:
2161 try:
2161 try:
2162 done.get(True, 1)
2162 done.get(True, 1)
2163 running -= 1
2163 running -= 1
2164 except queue.Empty:
2164 except queue.Empty:
2165 continue
2165 continue
2166 except KeyboardInterrupt:
2166 except KeyboardInterrupt:
2167 for test in runtests:
2167 for test in runtests:
2168 test.abort()
2168 test.abort()
2169
2169
2170 channels = []
2170 channels = []
2171
2171
2172 return result
2172 return result
2173
2173
2174 # Save the most recent 5 wall-clock runtimes of each test to a
2174 # Save the most recent 5 wall-clock runtimes of each test to a
2175 # human-readable text file named .testtimes. Tests are sorted
2175 # human-readable text file named .testtimes. Tests are sorted
2176 # alphabetically, while times for each test are listed from oldest to
2176 # alphabetically, while times for each test are listed from oldest to
2177 # newest.
2177 # newest.
2178
2178
2179 def loadtimes(outputdir):
2179 def loadtimes(outputdir):
2180 times = []
2180 times = []
2181 try:
2181 try:
2182 with open(os.path.join(outputdir, b'.testtimes')) as fp:
2182 with open(os.path.join(outputdir, b'.testtimes')) as fp:
2183 for line in fp:
2183 for line in fp:
2184 m = re.match('(.*?) ([0-9. ]+)', line)
2184 m = re.match('(.*?) ([0-9. ]+)', line)
2185 times.append((m.group(1),
2185 times.append((m.group(1),
2186 [float(t) for t in m.group(2).split()]))
2186 [float(t) for t in m.group(2).split()]))
2187 except IOError as err:
2187 except IOError as err:
2188 if err.errno != errno.ENOENT:
2188 if err.errno != errno.ENOENT:
2189 raise
2189 raise
2190 return times
2190 return times
2191
2191
2192 def savetimes(outputdir, result):
2192 def savetimes(outputdir, result):
2193 saved = dict(loadtimes(outputdir))
2193 saved = dict(loadtimes(outputdir))
2194 maxruns = 5
2194 maxruns = 5
2195 skipped = set([str(t[0]) for t in result.skipped])
2195 skipped = set([str(t[0]) for t in result.skipped])
2196 for tdata in result.times:
2196 for tdata in result.times:
2197 test, real = tdata[0], tdata[3]
2197 test, real = tdata[0], tdata[3]
2198 if test not in skipped:
2198 if test not in skipped:
2199 ts = saved.setdefault(test, [])
2199 ts = saved.setdefault(test, [])
2200 ts.append(real)
2200 ts.append(real)
2201 ts[:] = ts[-maxruns:]
2201 ts[:] = ts[-maxruns:]
2202
2202
2203 fd, tmpname = tempfile.mkstemp(prefix=b'.testtimes',
2203 fd, tmpname = tempfile.mkstemp(prefix=b'.testtimes',
2204 dir=outputdir, text=True)
2204 dir=outputdir, text=True)
2205 with os.fdopen(fd, 'w') as fp:
2205 with os.fdopen(fd, 'w') as fp:
2206 for name, ts in sorted(saved.items()):
2206 for name, ts in sorted(saved.items()):
2207 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
2207 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
2208 timepath = os.path.join(outputdir, b'.testtimes')
2208 timepath = os.path.join(outputdir, b'.testtimes')
2209 try:
2209 try:
2210 os.unlink(timepath)
2210 os.unlink(timepath)
2211 except OSError:
2211 except OSError:
2212 pass
2212 pass
2213 try:
2213 try:
2214 os.rename(tmpname, timepath)
2214 os.rename(tmpname, timepath)
2215 except OSError:
2215 except OSError:
2216 pass
2216 pass
2217
2217
2218 class TextTestRunner(unittest.TextTestRunner):
2218 class TextTestRunner(unittest.TextTestRunner):
2219 """Custom unittest test runner that uses appropriate settings."""
2219 """Custom unittest test runner that uses appropriate settings."""
2220
2220
2221 def __init__(self, runner, *args, **kwargs):
2221 def __init__(self, runner, *args, **kwargs):
2222 super(TextTestRunner, self).__init__(*args, **kwargs)
2222 super(TextTestRunner, self).__init__(*args, **kwargs)
2223
2223
2224 self._runner = runner
2224 self._runner = runner
2225
2225
2226 self._result = getTestResult()(self._runner.options, self.stream,
2226 self._result = getTestResult()(self._runner.options, self.stream,
2227 self.descriptions, self.verbosity)
2227 self.descriptions, self.verbosity)
2228
2228
2229 def listtests(self, test):
2229 def listtests(self, test):
2230 test = sorted(test, key=lambda t: t.name)
2230 test = sorted(test, key=lambda t: t.name)
2231
2231
2232 self._result.onStart(test)
2232 self._result.onStart(test)
2233
2233
2234 for t in test:
2234 for t in test:
2235 print(t.name)
2235 print(t.name)
2236 self._result.addSuccess(t)
2236 self._result.addSuccess(t)
2237
2237
2238 if self._runner.options.xunit:
2238 if self._runner.options.xunit:
2239 with open(self._runner.options.xunit, "wb") as xuf:
2239 with open(self._runner.options.xunit, "wb") as xuf:
2240 self._writexunit(self._result, xuf)
2240 self._writexunit(self._result, xuf)
2241
2241
2242 if self._runner.options.json:
2242 if self._runner.options.json:
2243 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2243 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2244 with open(jsonpath, 'w') as fp:
2244 with open(jsonpath, 'w') as fp:
2245 self._writejson(self._result, fp)
2245 self._writejson(self._result, fp)
2246
2246
2247 return self._result
2247 return self._result
2248
2248
2249 def run(self, test):
2249 def run(self, test):
2250 self._result.onStart(test)
2250 self._result.onStart(test)
2251 test(self._result)
2251 test(self._result)
2252
2252
2253 failed = len(self._result.failures)
2253 failed = len(self._result.failures)
2254 skipped = len(self._result.skipped)
2254 skipped = len(self._result.skipped)
2255 ignored = len(self._result.ignored)
2255 ignored = len(self._result.ignored)
2256
2256
2257 with iolock:
2257 with iolock:
2258 self.stream.writeln('')
2258 self.stream.writeln('')
2259
2259
2260 if not self._runner.options.noskips:
2260 if not self._runner.options.noskips:
2261 for test, msg in self._result.skipped:
2261 for test, msg in sorted(self._result.skipped,
2262 key=lambda s: s[0].name):
2262 formatted = 'Skipped %s: %s\n' % (test.name, msg)
2263 formatted = 'Skipped %s: %s\n' % (test.name, msg)
2263 msg = highlightmsg(formatted, self._result.color)
2264 msg = highlightmsg(formatted, self._result.color)
2264 self.stream.write(msg)
2265 self.stream.write(msg)
2265 for test, msg in self._result.failures:
2266 for test, msg in sorted(self._result.failures,
2267 key=lambda f: f[0].name):
2266 formatted = 'Failed %s: %s\n' % (test.name, msg)
2268 formatted = 'Failed %s: %s\n' % (test.name, msg)
2267 self.stream.write(highlightmsg(formatted, self._result.color))
2269 self.stream.write(highlightmsg(formatted, self._result.color))
2268 for test, msg in self._result.errors:
2270 for test, msg in sorted(self._result.errors,
2271 key=lambda e: e[0].name):
2269 self.stream.writeln('Errored %s: %s' % (test.name, msg))
2272 self.stream.writeln('Errored %s: %s' % (test.name, msg))
2270
2273
2271 if self._runner.options.xunit:
2274 if self._runner.options.xunit:
2272 with open(self._runner.options.xunit, "wb") as xuf:
2275 with open(self._runner.options.xunit, "wb") as xuf:
2273 self._writexunit(self._result, xuf)
2276 self._writexunit(self._result, xuf)
2274
2277
2275 if self._runner.options.json:
2278 if self._runner.options.json:
2276 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2279 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2277 with open(jsonpath, 'w') as fp:
2280 with open(jsonpath, 'w') as fp:
2278 self._writejson(self._result, fp)
2281 self._writejson(self._result, fp)
2279
2282
2280 self._runner._checkhglib('Tested')
2283 self._runner._checkhglib('Tested')
2281
2284
2282 savetimes(self._runner._outputdir, self._result)
2285 savetimes(self._runner._outputdir, self._result)
2283
2286
2284 if failed and self._runner.options.known_good_rev:
2287 if failed and self._runner.options.known_good_rev:
2285 self._bisecttests(t for t, m in self._result.failures)
2288 self._bisecttests(t for t, m in self._result.failures)
2286 self.stream.writeln(
2289 self.stream.writeln(
2287 '# Ran %d tests, %d skipped, %d failed.'
2290 '# Ran %d tests, %d skipped, %d failed.'
2288 % (self._result.testsRun, skipped + ignored, failed))
2291 % (self._result.testsRun, skipped + ignored, failed))
2289 if failed:
2292 if failed:
2290 self.stream.writeln('python hash seed: %s' %
2293 self.stream.writeln('python hash seed: %s' %
2291 os.environ['PYTHONHASHSEED'])
2294 os.environ['PYTHONHASHSEED'])
2292 if self._runner.options.time:
2295 if self._runner.options.time:
2293 self.printtimes(self._result.times)
2296 self.printtimes(self._result.times)
2294
2297
2295 if self._runner.options.exceptions:
2298 if self._runner.options.exceptions:
2296 exceptions = aggregateexceptions(
2299 exceptions = aggregateexceptions(
2297 os.path.join(self._runner._outputdir, b'exceptions'))
2300 os.path.join(self._runner._outputdir, b'exceptions'))
2298
2301
2299 self.stream.writeln('Exceptions Report:')
2302 self.stream.writeln('Exceptions Report:')
2300 self.stream.writeln('%d total from %d frames' %
2303 self.stream.writeln('%d total from %d frames' %
2301 (exceptions['total'],
2304 (exceptions['total'],
2302 len(exceptions['exceptioncounts'])))
2305 len(exceptions['exceptioncounts'])))
2303 combined = exceptions['combined']
2306 combined = exceptions['combined']
2304 for key in sorted(combined, key=combined.get, reverse=True):
2307 for key in sorted(combined, key=combined.get, reverse=True):
2305 frame, line, exc = key
2308 frame, line, exc = key
2306 totalcount, testcount, leastcount, leasttest = combined[key]
2309 totalcount, testcount, leastcount, leasttest = combined[key]
2307
2310
2308 self.stream.writeln('%d (%d tests)\t%s: %s (%s - %d total)'
2311 self.stream.writeln('%d (%d tests)\t%s: %s (%s - %d total)'
2309 % (totalcount,
2312 % (totalcount,
2310 testcount,
2313 testcount,
2311 frame, exc,
2314 frame, exc,
2312 leasttest, leastcount))
2315 leasttest, leastcount))
2313
2316
2314 self.stream.flush()
2317 self.stream.flush()
2315
2318
2316 return self._result
2319 return self._result
2317
2320
2318 def _bisecttests(self, tests):
2321 def _bisecttests(self, tests):
2319 bisectcmd = ['hg', 'bisect']
2322 bisectcmd = ['hg', 'bisect']
2320 bisectrepo = self._runner.options.bisect_repo
2323 bisectrepo = self._runner.options.bisect_repo
2321 if bisectrepo:
2324 if bisectrepo:
2322 bisectcmd.extend(['-R', os.path.abspath(bisectrepo)])
2325 bisectcmd.extend(['-R', os.path.abspath(bisectrepo)])
2323 def pread(args):
2326 def pread(args):
2324 env = os.environ.copy()
2327 env = os.environ.copy()
2325 env['HGPLAIN'] = '1'
2328 env['HGPLAIN'] = '1'
2326 p = subprocess.Popen(args, stderr=subprocess.STDOUT,
2329 p = subprocess.Popen(args, stderr=subprocess.STDOUT,
2327 stdout=subprocess.PIPE, env=env)
2330 stdout=subprocess.PIPE, env=env)
2328 data = p.stdout.read()
2331 data = p.stdout.read()
2329 p.wait()
2332 p.wait()
2330 return data
2333 return data
2331 for test in tests:
2334 for test in tests:
2332 pread(bisectcmd + ['--reset']),
2335 pread(bisectcmd + ['--reset']),
2333 pread(bisectcmd + ['--bad', '.'])
2336 pread(bisectcmd + ['--bad', '.'])
2334 pread(bisectcmd + ['--good', self._runner.options.known_good_rev])
2337 pread(bisectcmd + ['--good', self._runner.options.known_good_rev])
2335 # TODO: we probably need to forward more options
2338 # TODO: we probably need to forward more options
2336 # that alter hg's behavior inside the tests.
2339 # that alter hg's behavior inside the tests.
2337 opts = ''
2340 opts = ''
2338 withhg = self._runner.options.with_hg
2341 withhg = self._runner.options.with_hg
2339 if withhg:
2342 if withhg:
2340 opts += ' --with-hg=%s ' % shellquote(_strpath(withhg))
2343 opts += ' --with-hg=%s ' % shellquote(_strpath(withhg))
2341 rtc = '%s %s %s %s' % (sys.executable, sys.argv[0], opts,
2344 rtc = '%s %s %s %s' % (sys.executable, sys.argv[0], opts,
2342 test)
2345 test)
2343 data = pread(bisectcmd + ['--command', rtc])
2346 data = pread(bisectcmd + ['--command', rtc])
2344 m = re.search(
2347 m = re.search(
2345 (br'\nThe first (?P<goodbad>bad|good) revision '
2348 (br'\nThe first (?P<goodbad>bad|good) revision '
2346 br'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
2349 br'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
2347 br'summary: +(?P<summary>[^\n]+)\n'),
2350 br'summary: +(?P<summary>[^\n]+)\n'),
2348 data, (re.MULTILINE | re.DOTALL))
2351 data, (re.MULTILINE | re.DOTALL))
2349 if m is None:
2352 if m is None:
2350 self.stream.writeln(
2353 self.stream.writeln(
2351 'Failed to identify failure point for %s' % test)
2354 'Failed to identify failure point for %s' % test)
2352 continue
2355 continue
2353 dat = m.groupdict()
2356 dat = m.groupdict()
2354 verb = 'broken' if dat['goodbad'] == b'bad' else 'fixed'
2357 verb = 'broken' if dat['goodbad'] == b'bad' else 'fixed'
2355 self.stream.writeln(
2358 self.stream.writeln(
2356 '%s %s by %s (%s)' % (
2359 '%s %s by %s (%s)' % (
2357 test, verb, dat['node'].decode('ascii'),
2360 test, verb, dat['node'].decode('ascii'),
2358 dat['summary'].decode('utf8', 'ignore')))
2361 dat['summary'].decode('utf8', 'ignore')))
2359
2362
2360 def printtimes(self, times):
2363 def printtimes(self, times):
2361 # iolock held by run
2364 # iolock held by run
2362 self.stream.writeln('# Producing time report')
2365 self.stream.writeln('# Producing time report')
2363 times.sort(key=lambda t: (t[3]))
2366 times.sort(key=lambda t: (t[3]))
2364 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
2367 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
2365 self.stream.writeln('%-7s %-7s %-7s %-7s %-7s %s' %
2368 self.stream.writeln('%-7s %-7s %-7s %-7s %-7s %s' %
2366 ('start', 'end', 'cuser', 'csys', 'real', 'Test'))
2369 ('start', 'end', 'cuser', 'csys', 'real', 'Test'))
2367 for tdata in times:
2370 for tdata in times:
2368 test = tdata[0]
2371 test = tdata[0]
2369 cuser, csys, real, start, end = tdata[1:6]
2372 cuser, csys, real, start, end = tdata[1:6]
2370 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
2373 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
2371
2374
2372 @staticmethod
2375 @staticmethod
2373 def _writexunit(result, outf):
2376 def _writexunit(result, outf):
2374 # See http://llg.cubic.org/docs/junit/ for a reference.
2377 # See http://llg.cubic.org/docs/junit/ for a reference.
2375 timesd = dict((t[0], t[3]) for t in result.times)
2378 timesd = dict((t[0], t[3]) for t in result.times)
2376 doc = minidom.Document()
2379 doc = minidom.Document()
2377 s = doc.createElement('testsuite')
2380 s = doc.createElement('testsuite')
2378 s.setAttribute('name', 'run-tests')
2381 s.setAttribute('name', 'run-tests')
2379 s.setAttribute('tests', str(result.testsRun))
2382 s.setAttribute('tests', str(result.testsRun))
2380 s.setAttribute('errors', "0") # TODO
2383 s.setAttribute('errors', "0") # TODO
2381 s.setAttribute('failures', str(len(result.failures)))
2384 s.setAttribute('failures', str(len(result.failures)))
2382 s.setAttribute('skipped', str(len(result.skipped) +
2385 s.setAttribute('skipped', str(len(result.skipped) +
2383 len(result.ignored)))
2386 len(result.ignored)))
2384 doc.appendChild(s)
2387 doc.appendChild(s)
2385 for tc in result.successes:
2388 for tc in result.successes:
2386 t = doc.createElement('testcase')
2389 t = doc.createElement('testcase')
2387 t.setAttribute('name', tc.name)
2390 t.setAttribute('name', tc.name)
2388 tctime = timesd.get(tc.name)
2391 tctime = timesd.get(tc.name)
2389 if tctime is not None:
2392 if tctime is not None:
2390 t.setAttribute('time', '%.3f' % tctime)
2393 t.setAttribute('time', '%.3f' % tctime)
2391 s.appendChild(t)
2394 s.appendChild(t)
2392 for tc, err in sorted(result.faildata.items()):
2395 for tc, err in sorted(result.faildata.items()):
2393 t = doc.createElement('testcase')
2396 t = doc.createElement('testcase')
2394 t.setAttribute('name', tc)
2397 t.setAttribute('name', tc)
2395 tctime = timesd.get(tc)
2398 tctime = timesd.get(tc)
2396 if tctime is not None:
2399 if tctime is not None:
2397 t.setAttribute('time', '%.3f' % tctime)
2400 t.setAttribute('time', '%.3f' % tctime)
2398 # createCDATASection expects a unicode or it will
2401 # createCDATASection expects a unicode or it will
2399 # convert using default conversion rules, which will
2402 # convert using default conversion rules, which will
2400 # fail if string isn't ASCII.
2403 # fail if string isn't ASCII.
2401 err = cdatasafe(err).decode('utf-8', 'replace')
2404 err = cdatasafe(err).decode('utf-8', 'replace')
2402 cd = doc.createCDATASection(err)
2405 cd = doc.createCDATASection(err)
2403 # Use 'failure' here instead of 'error' to match errors = 0,
2406 # Use 'failure' here instead of 'error' to match errors = 0,
2404 # failures = len(result.failures) in the testsuite element.
2407 # failures = len(result.failures) in the testsuite element.
2405 failelem = doc.createElement('failure')
2408 failelem = doc.createElement('failure')
2406 failelem.setAttribute('message', 'output changed')
2409 failelem.setAttribute('message', 'output changed')
2407 failelem.setAttribute('type', 'output-mismatch')
2410 failelem.setAttribute('type', 'output-mismatch')
2408 failelem.appendChild(cd)
2411 failelem.appendChild(cd)
2409 t.appendChild(failelem)
2412 t.appendChild(failelem)
2410 s.appendChild(t)
2413 s.appendChild(t)
2411 for tc, message in result.skipped:
2414 for tc, message in result.skipped:
2412 # According to the schema, 'skipped' has no attributes. So store
2415 # According to the schema, 'skipped' has no attributes. So store
2413 # the skip message as a text node instead.
2416 # the skip message as a text node instead.
2414 t = doc.createElement('testcase')
2417 t = doc.createElement('testcase')
2415 t.setAttribute('name', tc.name)
2418 t.setAttribute('name', tc.name)
2416 binmessage = message.encode('utf-8')
2419 binmessage = message.encode('utf-8')
2417 message = cdatasafe(binmessage).decode('utf-8', 'replace')
2420 message = cdatasafe(binmessage).decode('utf-8', 'replace')
2418 cd = doc.createCDATASection(message)
2421 cd = doc.createCDATASection(message)
2419 skipelem = doc.createElement('skipped')
2422 skipelem = doc.createElement('skipped')
2420 skipelem.appendChild(cd)
2423 skipelem.appendChild(cd)
2421 t.appendChild(skipelem)
2424 t.appendChild(skipelem)
2422 s.appendChild(t)
2425 s.appendChild(t)
2423 outf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
2426 outf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
2424
2427
2425 @staticmethod
2428 @staticmethod
2426 def _writejson(result, outf):
2429 def _writejson(result, outf):
2427 timesd = {}
2430 timesd = {}
2428 for tdata in result.times:
2431 for tdata in result.times:
2429 test = tdata[0]
2432 test = tdata[0]
2430 timesd[test] = tdata[1:]
2433 timesd[test] = tdata[1:]
2431
2434
2432 outcome = {}
2435 outcome = {}
2433 groups = [('success', ((tc, None)
2436 groups = [('success', ((tc, None)
2434 for tc in result.successes)),
2437 for tc in result.successes)),
2435 ('failure', result.failures),
2438 ('failure', result.failures),
2436 ('skip', result.skipped)]
2439 ('skip', result.skipped)]
2437 for res, testcases in groups:
2440 for res, testcases in groups:
2438 for tc, __ in testcases:
2441 for tc, __ in testcases:
2439 if tc.name in timesd:
2442 if tc.name in timesd:
2440 diff = result.faildata.get(tc.name, b'')
2443 diff = result.faildata.get(tc.name, b'')
2441 try:
2444 try:
2442 diff = diff.decode('unicode_escape')
2445 diff = diff.decode('unicode_escape')
2443 except UnicodeDecodeError as e:
2446 except UnicodeDecodeError as e:
2444 diff = '%r decoding diff, sorry' % e
2447 diff = '%r decoding diff, sorry' % e
2445 tres = {'result': res,
2448 tres = {'result': res,
2446 'time': ('%0.3f' % timesd[tc.name][2]),
2449 'time': ('%0.3f' % timesd[tc.name][2]),
2447 'cuser': ('%0.3f' % timesd[tc.name][0]),
2450 'cuser': ('%0.3f' % timesd[tc.name][0]),
2448 'csys': ('%0.3f' % timesd[tc.name][1]),
2451 'csys': ('%0.3f' % timesd[tc.name][1]),
2449 'start': ('%0.3f' % timesd[tc.name][3]),
2452 'start': ('%0.3f' % timesd[tc.name][3]),
2450 'end': ('%0.3f' % timesd[tc.name][4]),
2453 'end': ('%0.3f' % timesd[tc.name][4]),
2451 'diff': diff,
2454 'diff': diff,
2452 }
2455 }
2453 else:
2456 else:
2454 # blacklisted test
2457 # blacklisted test
2455 tres = {'result': res}
2458 tres = {'result': res}
2456
2459
2457 outcome[tc.name] = tres
2460 outcome[tc.name] = tres
2458 jsonout = json.dumps(outcome, sort_keys=True, indent=4,
2461 jsonout = json.dumps(outcome, sort_keys=True, indent=4,
2459 separators=(',', ': '))
2462 separators=(',', ': '))
2460 outf.writelines(("testreport =", jsonout))
2463 outf.writelines(("testreport =", jsonout))
2461
2464
2462 def sorttests(testdescs, previoustimes, shuffle=False):
2465 def sorttests(testdescs, previoustimes, shuffle=False):
2463 """Do an in-place sort of tests."""
2466 """Do an in-place sort of tests."""
2464 if shuffle:
2467 if shuffle:
2465 random.shuffle(testdescs)
2468 random.shuffle(testdescs)
2466 return
2469 return
2467
2470
2468 if previoustimes:
2471 if previoustimes:
2469 def sortkey(f):
2472 def sortkey(f):
2470 f = f['path']
2473 f = f['path']
2471 if f in previoustimes:
2474 if f in previoustimes:
2472 # Use most recent time as estimate
2475 # Use most recent time as estimate
2473 return -previoustimes[f][-1]
2476 return -previoustimes[f][-1]
2474 else:
2477 else:
2475 # Default to a rather arbitrary value of 1 second for new tests
2478 # Default to a rather arbitrary value of 1 second for new tests
2476 return -1.0
2479 return -1.0
2477 else:
2480 else:
2478 # keywords for slow tests
2481 # keywords for slow tests
2479 slow = {b'svn': 10,
2482 slow = {b'svn': 10,
2480 b'cvs': 10,
2483 b'cvs': 10,
2481 b'hghave': 10,
2484 b'hghave': 10,
2482 b'largefiles-update': 10,
2485 b'largefiles-update': 10,
2483 b'run-tests': 10,
2486 b'run-tests': 10,
2484 b'corruption': 10,
2487 b'corruption': 10,
2485 b'race': 10,
2488 b'race': 10,
2486 b'i18n': 10,
2489 b'i18n': 10,
2487 b'check': 100,
2490 b'check': 100,
2488 b'gendoc': 100,
2491 b'gendoc': 100,
2489 b'contrib-perf': 200,
2492 b'contrib-perf': 200,
2490 }
2493 }
2491 perf = {}
2494 perf = {}
2492
2495
2493 def sortkey(f):
2496 def sortkey(f):
2494 # run largest tests first, as they tend to take the longest
2497 # run largest tests first, as they tend to take the longest
2495 f = f['path']
2498 f = f['path']
2496 try:
2499 try:
2497 return perf[f]
2500 return perf[f]
2498 except KeyError:
2501 except KeyError:
2499 try:
2502 try:
2500 val = -os.stat(f).st_size
2503 val = -os.stat(f).st_size
2501 except OSError as e:
2504 except OSError as e:
2502 if e.errno != errno.ENOENT:
2505 if e.errno != errno.ENOENT:
2503 raise
2506 raise
2504 perf[f] = -1e9 # file does not exist, tell early
2507 perf[f] = -1e9 # file does not exist, tell early
2505 return -1e9
2508 return -1e9
2506 for kw, mul in slow.items():
2509 for kw, mul in slow.items():
2507 if kw in f:
2510 if kw in f:
2508 val *= mul
2511 val *= mul
2509 if f.endswith(b'.py'):
2512 if f.endswith(b'.py'):
2510 val /= 10.0
2513 val /= 10.0
2511 perf[f] = val / 1000.0
2514 perf[f] = val / 1000.0
2512 return perf[f]
2515 return perf[f]
2513
2516
2514 testdescs.sort(key=sortkey)
2517 testdescs.sort(key=sortkey)
2515
2518
2516 class TestRunner(object):
2519 class TestRunner(object):
2517 """Holds context for executing tests.
2520 """Holds context for executing tests.
2518
2521
2519 Tests rely on a lot of state. This object holds it for them.
2522 Tests rely on a lot of state. This object holds it for them.
2520 """
2523 """
2521
2524
2522 # Programs required to run tests.
2525 # Programs required to run tests.
2523 REQUIREDTOOLS = [
2526 REQUIREDTOOLS = [
2524 b'diff',
2527 b'diff',
2525 b'grep',
2528 b'grep',
2526 b'unzip',
2529 b'unzip',
2527 b'gunzip',
2530 b'gunzip',
2528 b'bunzip2',
2531 b'bunzip2',
2529 b'sed',
2532 b'sed',
2530 ]
2533 ]
2531
2534
2532 # Maps file extensions to test class.
2535 # Maps file extensions to test class.
2533 TESTTYPES = [
2536 TESTTYPES = [
2534 (b'.py', PythonTest),
2537 (b'.py', PythonTest),
2535 (b'.t', TTest),
2538 (b'.t', TTest),
2536 ]
2539 ]
2537
2540
2538 def __init__(self):
2541 def __init__(self):
2539 self.options = None
2542 self.options = None
2540 self._hgroot = None
2543 self._hgroot = None
2541 self._testdir = None
2544 self._testdir = None
2542 self._outputdir = None
2545 self._outputdir = None
2543 self._hgtmp = None
2546 self._hgtmp = None
2544 self._installdir = None
2547 self._installdir = None
2545 self._bindir = None
2548 self._bindir = None
2546 self._tmpbinddir = None
2549 self._tmpbinddir = None
2547 self._pythondir = None
2550 self._pythondir = None
2548 self._coveragefile = None
2551 self._coveragefile = None
2549 self._createdfiles = []
2552 self._createdfiles = []
2550 self._hgcommand = None
2553 self._hgcommand = None
2551 self._hgpath = None
2554 self._hgpath = None
2552 self._portoffset = 0
2555 self._portoffset = 0
2553 self._ports = {}
2556 self._ports = {}
2554
2557
2555 def run(self, args, parser=None):
2558 def run(self, args, parser=None):
2556 """Run the test suite."""
2559 """Run the test suite."""
2557 oldmask = os.umask(0o22)
2560 oldmask = os.umask(0o22)
2558 try:
2561 try:
2559 parser = parser or getparser()
2562 parser = parser or getparser()
2560 options = parseargs(args, parser)
2563 options = parseargs(args, parser)
2561 tests = [_bytespath(a) for a in options.tests]
2564 tests = [_bytespath(a) for a in options.tests]
2562 if options.test_list is not None:
2565 if options.test_list is not None:
2563 for listfile in options.test_list:
2566 for listfile in options.test_list:
2564 with open(listfile, 'rb') as f:
2567 with open(listfile, 'rb') as f:
2565 tests.extend(t for t in f.read().splitlines() if t)
2568 tests.extend(t for t in f.read().splitlines() if t)
2566 self.options = options
2569 self.options = options
2567
2570
2568 self._checktools()
2571 self._checktools()
2569 testdescs = self.findtests(tests)
2572 testdescs = self.findtests(tests)
2570 if options.profile_runner:
2573 if options.profile_runner:
2571 import statprof
2574 import statprof
2572 statprof.start()
2575 statprof.start()
2573 result = self._run(testdescs)
2576 result = self._run(testdescs)
2574 if options.profile_runner:
2577 if options.profile_runner:
2575 statprof.stop()
2578 statprof.stop()
2576 statprof.display()
2579 statprof.display()
2577 return result
2580 return result
2578
2581
2579 finally:
2582 finally:
2580 os.umask(oldmask)
2583 os.umask(oldmask)
2581
2584
2582 def _run(self, testdescs):
2585 def _run(self, testdescs):
2583 testdir = getcwdb()
2586 testdir = getcwdb()
2584 self._testdir = osenvironb[b'TESTDIR'] = getcwdb()
2587 self._testdir = osenvironb[b'TESTDIR'] = getcwdb()
2585 # assume all tests in same folder for now
2588 # assume all tests in same folder for now
2586 if testdescs:
2589 if testdescs:
2587 pathname = os.path.dirname(testdescs[0]['path'])
2590 pathname = os.path.dirname(testdescs[0]['path'])
2588 if pathname:
2591 if pathname:
2589 testdir = os.path.join(testdir, pathname)
2592 testdir = os.path.join(testdir, pathname)
2590 self._testdir = osenvironb[b'TESTDIR'] = testdir
2593 self._testdir = osenvironb[b'TESTDIR'] = testdir
2591 if self.options.outputdir:
2594 if self.options.outputdir:
2592 self._outputdir = canonpath(_bytespath(self.options.outputdir))
2595 self._outputdir = canonpath(_bytespath(self.options.outputdir))
2593 else:
2596 else:
2594 self._outputdir = getcwdb()
2597 self._outputdir = getcwdb()
2595 if testdescs and pathname:
2598 if testdescs and pathname:
2596 self._outputdir = os.path.join(self._outputdir, pathname)
2599 self._outputdir = os.path.join(self._outputdir, pathname)
2597 previoustimes = {}
2600 previoustimes = {}
2598 if self.options.order_by_runtime:
2601 if self.options.order_by_runtime:
2599 previoustimes = dict(loadtimes(self._outputdir))
2602 previoustimes = dict(loadtimes(self._outputdir))
2600 sorttests(testdescs, previoustimes, shuffle=self.options.random)
2603 sorttests(testdescs, previoustimes, shuffle=self.options.random)
2601
2604
2602 if 'PYTHONHASHSEED' not in os.environ:
2605 if 'PYTHONHASHSEED' not in os.environ:
2603 # use a random python hash seed all the time
2606 # use a random python hash seed all the time
2604 # we do the randomness ourself to know what seed is used
2607 # we do the randomness ourself to know what seed is used
2605 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
2608 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
2606
2609
2607 if self.options.tmpdir:
2610 if self.options.tmpdir:
2608 self.options.keep_tmpdir = True
2611 self.options.keep_tmpdir = True
2609 tmpdir = _bytespath(self.options.tmpdir)
2612 tmpdir = _bytespath(self.options.tmpdir)
2610 if os.path.exists(tmpdir):
2613 if os.path.exists(tmpdir):
2611 # Meaning of tmpdir has changed since 1.3: we used to create
2614 # Meaning of tmpdir has changed since 1.3: we used to create
2612 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
2615 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
2613 # tmpdir already exists.
2616 # tmpdir already exists.
2614 print("error: temp dir %r already exists" % tmpdir)
2617 print("error: temp dir %r already exists" % tmpdir)
2615 return 1
2618 return 1
2616
2619
2617 os.makedirs(tmpdir)
2620 os.makedirs(tmpdir)
2618 else:
2621 else:
2619 d = None
2622 d = None
2620 if os.name == 'nt':
2623 if os.name == 'nt':
2621 # without this, we get the default temp dir location, but
2624 # without this, we get the default temp dir location, but
2622 # in all lowercase, which causes troubles with paths (issue3490)
2625 # in all lowercase, which causes troubles with paths (issue3490)
2623 d = osenvironb.get(b'TMP', None)
2626 d = osenvironb.get(b'TMP', None)
2624 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
2627 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
2625
2628
2626 self._hgtmp = osenvironb[b'HGTMP'] = (
2629 self._hgtmp = osenvironb[b'HGTMP'] = (
2627 os.path.realpath(tmpdir))
2630 os.path.realpath(tmpdir))
2628
2631
2629 if self.options.with_hg:
2632 if self.options.with_hg:
2630 self._installdir = None
2633 self._installdir = None
2631 whg = self.options.with_hg
2634 whg = self.options.with_hg
2632 self._bindir = os.path.dirname(os.path.realpath(whg))
2635 self._bindir = os.path.dirname(os.path.realpath(whg))
2633 assert isinstance(self._bindir, bytes)
2636 assert isinstance(self._bindir, bytes)
2634 self._hgcommand = os.path.basename(whg)
2637 self._hgcommand = os.path.basename(whg)
2635 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
2638 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
2636 os.makedirs(self._tmpbindir)
2639 os.makedirs(self._tmpbindir)
2637
2640
2638 normbin = os.path.normpath(os.path.abspath(whg))
2641 normbin = os.path.normpath(os.path.abspath(whg))
2639 normbin = normbin.replace(os.sep.encode('ascii'), b'/')
2642 normbin = normbin.replace(os.sep.encode('ascii'), b'/')
2640
2643
2641 # Other Python scripts in the test harness need to
2644 # Other Python scripts in the test harness need to
2642 # `import mercurial`. If `hg` is a Python script, we assume
2645 # `import mercurial`. If `hg` is a Python script, we assume
2643 # the Mercurial modules are relative to its path and tell the tests
2646 # the Mercurial modules are relative to its path and tell the tests
2644 # to load Python modules from its directory.
2647 # to load Python modules from its directory.
2645 with open(whg, 'rb') as fh:
2648 with open(whg, 'rb') as fh:
2646 initial = fh.read(1024)
2649 initial = fh.read(1024)
2647
2650
2648 if re.match(b'#!.*python', initial):
2651 if re.match(b'#!.*python', initial):
2649 self._pythondir = self._bindir
2652 self._pythondir = self._bindir
2650 # If it looks like our in-repo Rust binary, use the source root.
2653 # If it looks like our in-repo Rust binary, use the source root.
2651 # This is a bit hacky. But rhg is still not supported outside the
2654 # This is a bit hacky. But rhg is still not supported outside the
2652 # source directory. So until it is, do the simple thing.
2655 # source directory. So until it is, do the simple thing.
2653 elif re.search(b'/rust/target/[^/]+/hg', normbin):
2656 elif re.search(b'/rust/target/[^/]+/hg', normbin):
2654 self._pythondir = os.path.dirname(self._testdir)
2657 self._pythondir = os.path.dirname(self._testdir)
2655 # Fall back to the legacy behavior.
2658 # Fall back to the legacy behavior.
2656 else:
2659 else:
2657 self._pythondir = self._bindir
2660 self._pythondir = self._bindir
2658
2661
2659 else:
2662 else:
2660 self._installdir = os.path.join(self._hgtmp, b"install")
2663 self._installdir = os.path.join(self._hgtmp, b"install")
2661 self._bindir = os.path.join(self._installdir, b"bin")
2664 self._bindir = os.path.join(self._installdir, b"bin")
2662 self._hgcommand = b'hg'
2665 self._hgcommand = b'hg'
2663 self._tmpbindir = self._bindir
2666 self._tmpbindir = self._bindir
2664 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
2667 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
2665
2668
2666 # Force the use of hg.exe instead of relying on MSYS to recognize hg is
2669 # Force the use of hg.exe instead of relying on MSYS to recognize hg is
2667 # a python script and feed it to python.exe. Legacy stdio is force
2670 # a python script and feed it to python.exe. Legacy stdio is force
2668 # enabled by hg.exe, and this is a more realistic way to launch hg
2671 # enabled by hg.exe, and this is a more realistic way to launch hg
2669 # anyway.
2672 # anyway.
2670 if os.name == 'nt' and not self._hgcommand.endswith(b'.exe'):
2673 if os.name == 'nt' and not self._hgcommand.endswith(b'.exe'):
2671 self._hgcommand += b'.exe'
2674 self._hgcommand += b'.exe'
2672
2675
2673 # set CHGHG, then replace "hg" command by "chg"
2676 # set CHGHG, then replace "hg" command by "chg"
2674 chgbindir = self._bindir
2677 chgbindir = self._bindir
2675 if self.options.chg or self.options.with_chg:
2678 if self.options.chg or self.options.with_chg:
2676 osenvironb[b'CHGHG'] = os.path.join(self._bindir, self._hgcommand)
2679 osenvironb[b'CHGHG'] = os.path.join(self._bindir, self._hgcommand)
2677 else:
2680 else:
2678 osenvironb.pop(b'CHGHG', None) # drop flag for hghave
2681 osenvironb.pop(b'CHGHG', None) # drop flag for hghave
2679 if self.options.chg:
2682 if self.options.chg:
2680 self._hgcommand = b'chg'
2683 self._hgcommand = b'chg'
2681 elif self.options.with_chg:
2684 elif self.options.with_chg:
2682 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
2685 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
2683 self._hgcommand = os.path.basename(self.options.with_chg)
2686 self._hgcommand = os.path.basename(self.options.with_chg)
2684
2687
2685 osenvironb[b"BINDIR"] = self._bindir
2688 osenvironb[b"BINDIR"] = self._bindir
2686 osenvironb[b"PYTHON"] = PYTHON
2689 osenvironb[b"PYTHON"] = PYTHON
2687
2690
2688 fileb = _bytespath(__file__)
2691 fileb = _bytespath(__file__)
2689 runtestdir = os.path.abspath(os.path.dirname(fileb))
2692 runtestdir = os.path.abspath(os.path.dirname(fileb))
2690 osenvironb[b'RUNTESTDIR'] = runtestdir
2693 osenvironb[b'RUNTESTDIR'] = runtestdir
2691 if PYTHON3:
2694 if PYTHON3:
2692 sepb = _bytespath(os.pathsep)
2695 sepb = _bytespath(os.pathsep)
2693 else:
2696 else:
2694 sepb = os.pathsep
2697 sepb = os.pathsep
2695 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
2698 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
2696 if os.path.islink(__file__):
2699 if os.path.islink(__file__):
2697 # test helper will likely be at the end of the symlink
2700 # test helper will likely be at the end of the symlink
2698 realfile = os.path.realpath(fileb)
2701 realfile = os.path.realpath(fileb)
2699 realdir = os.path.abspath(os.path.dirname(realfile))
2702 realdir = os.path.abspath(os.path.dirname(realfile))
2700 path.insert(2, realdir)
2703 path.insert(2, realdir)
2701 if chgbindir != self._bindir:
2704 if chgbindir != self._bindir:
2702 path.insert(1, chgbindir)
2705 path.insert(1, chgbindir)
2703 if self._testdir != runtestdir:
2706 if self._testdir != runtestdir:
2704 path = [self._testdir] + path
2707 path = [self._testdir] + path
2705 if self._tmpbindir != self._bindir:
2708 if self._tmpbindir != self._bindir:
2706 path = [self._tmpbindir] + path
2709 path = [self._tmpbindir] + path
2707 osenvironb[b"PATH"] = sepb.join(path)
2710 osenvironb[b"PATH"] = sepb.join(path)
2708
2711
2709 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
2712 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
2710 # can run .../tests/run-tests.py test-foo where test-foo
2713 # can run .../tests/run-tests.py test-foo where test-foo
2711 # adds an extension to HGRC. Also include run-test.py directory to
2714 # adds an extension to HGRC. Also include run-test.py directory to
2712 # import modules like heredoctest.
2715 # import modules like heredoctest.
2713 pypath = [self._pythondir, self._testdir, runtestdir]
2716 pypath = [self._pythondir, self._testdir, runtestdir]
2714 # We have to augment PYTHONPATH, rather than simply replacing
2717 # We have to augment PYTHONPATH, rather than simply replacing
2715 # it, in case external libraries are only available via current
2718 # it, in case external libraries are only available via current
2716 # PYTHONPATH. (In particular, the Subversion bindings on OS X
2719 # PYTHONPATH. (In particular, the Subversion bindings on OS X
2717 # are in /opt/subversion.)
2720 # are in /opt/subversion.)
2718 oldpypath = osenvironb.get(IMPL_PATH)
2721 oldpypath = osenvironb.get(IMPL_PATH)
2719 if oldpypath:
2722 if oldpypath:
2720 pypath.append(oldpypath)
2723 pypath.append(oldpypath)
2721 osenvironb[IMPL_PATH] = sepb.join(pypath)
2724 osenvironb[IMPL_PATH] = sepb.join(pypath)
2722
2725
2723 if self.options.pure:
2726 if self.options.pure:
2724 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
2727 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
2725 os.environ["HGMODULEPOLICY"] = "py"
2728 os.environ["HGMODULEPOLICY"] = "py"
2726
2729
2727 if self.options.allow_slow_tests:
2730 if self.options.allow_slow_tests:
2728 os.environ["HGTEST_SLOW"] = "slow"
2731 os.environ["HGTEST_SLOW"] = "slow"
2729 elif 'HGTEST_SLOW' in os.environ:
2732 elif 'HGTEST_SLOW' in os.environ:
2730 del os.environ['HGTEST_SLOW']
2733 del os.environ['HGTEST_SLOW']
2731
2734
2732 self._coveragefile = os.path.join(self._testdir, b'.coverage')
2735 self._coveragefile = os.path.join(self._testdir, b'.coverage')
2733
2736
2734 if self.options.exceptions:
2737 if self.options.exceptions:
2735 exceptionsdir = os.path.join(self._outputdir, b'exceptions')
2738 exceptionsdir = os.path.join(self._outputdir, b'exceptions')
2736 try:
2739 try:
2737 os.makedirs(exceptionsdir)
2740 os.makedirs(exceptionsdir)
2738 except OSError as e:
2741 except OSError as e:
2739 if e.errno != errno.EEXIST:
2742 if e.errno != errno.EEXIST:
2740 raise
2743 raise
2741
2744
2742 # Remove all existing exception reports.
2745 # Remove all existing exception reports.
2743 for f in os.listdir(exceptionsdir):
2746 for f in os.listdir(exceptionsdir):
2744 os.unlink(os.path.join(exceptionsdir, f))
2747 os.unlink(os.path.join(exceptionsdir, f))
2745
2748
2746 osenvironb[b'HGEXCEPTIONSDIR'] = exceptionsdir
2749 osenvironb[b'HGEXCEPTIONSDIR'] = exceptionsdir
2747 logexceptions = os.path.join(self._testdir, b'logexceptions.py')
2750 logexceptions = os.path.join(self._testdir, b'logexceptions.py')
2748 self.options.extra_config_opt.append(
2751 self.options.extra_config_opt.append(
2749 'extensions.logexceptions=%s' % logexceptions.decode('utf-8'))
2752 'extensions.logexceptions=%s' % logexceptions.decode('utf-8'))
2750
2753
2751 vlog("# Using TESTDIR", self._testdir)
2754 vlog("# Using TESTDIR", self._testdir)
2752 vlog("# Using RUNTESTDIR", osenvironb[b'RUNTESTDIR'])
2755 vlog("# Using RUNTESTDIR", osenvironb[b'RUNTESTDIR'])
2753 vlog("# Using HGTMP", self._hgtmp)
2756 vlog("# Using HGTMP", self._hgtmp)
2754 vlog("# Using PATH", os.environ["PATH"])
2757 vlog("# Using PATH", os.environ["PATH"])
2755 vlog("# Using", IMPL_PATH, osenvironb[IMPL_PATH])
2758 vlog("# Using", IMPL_PATH, osenvironb[IMPL_PATH])
2756 vlog("# Writing to directory", self._outputdir)
2759 vlog("# Writing to directory", self._outputdir)
2757
2760
2758 try:
2761 try:
2759 return self._runtests(testdescs) or 0
2762 return self._runtests(testdescs) or 0
2760 finally:
2763 finally:
2761 time.sleep(.1)
2764 time.sleep(.1)
2762 self._cleanup()
2765 self._cleanup()
2763
2766
2764 def findtests(self, args):
2767 def findtests(self, args):
2765 """Finds possible test files from arguments.
2768 """Finds possible test files from arguments.
2766
2769
2767 If you wish to inject custom tests into the test harness, this would
2770 If you wish to inject custom tests into the test harness, this would
2768 be a good function to monkeypatch or override in a derived class.
2771 be a good function to monkeypatch or override in a derived class.
2769 """
2772 """
2770 if not args:
2773 if not args:
2771 if self.options.changed:
2774 if self.options.changed:
2772 proc = Popen4('hg st --rev "%s" -man0 .' %
2775 proc = Popen4('hg st --rev "%s" -man0 .' %
2773 self.options.changed, None, 0)
2776 self.options.changed, None, 0)
2774 stdout, stderr = proc.communicate()
2777 stdout, stderr = proc.communicate()
2775 args = stdout.strip(b'\0').split(b'\0')
2778 args = stdout.strip(b'\0').split(b'\0')
2776 else:
2779 else:
2777 args = os.listdir(b'.')
2780 args = os.listdir(b'.')
2778
2781
2779 expanded_args = []
2782 expanded_args = []
2780 for arg in args:
2783 for arg in args:
2781 if os.path.isdir(arg):
2784 if os.path.isdir(arg):
2782 if not arg.endswith(b'/'):
2785 if not arg.endswith(b'/'):
2783 arg += b'/'
2786 arg += b'/'
2784 expanded_args.extend([arg + a for a in os.listdir(arg)])
2787 expanded_args.extend([arg + a for a in os.listdir(arg)])
2785 else:
2788 else:
2786 expanded_args.append(arg)
2789 expanded_args.append(arg)
2787 args = expanded_args
2790 args = expanded_args
2788
2791
2789 testcasepattern = re.compile(
2792 testcasepattern = re.compile(
2790 br'([\w-]+\.t|py)(?:#([a-zA-Z0-9_\-\.#]+))')
2793 br'([\w-]+\.t|py)(?:#([a-zA-Z0-9_\-\.#]+))')
2791 tests = []
2794 tests = []
2792 for t in args:
2795 for t in args:
2793 case = []
2796 case = []
2794
2797
2795 if not (os.path.basename(t).startswith(b'test-')
2798 if not (os.path.basename(t).startswith(b'test-')
2796 and (t.endswith(b'.py') or t.endswith(b'.t'))):
2799 and (t.endswith(b'.py') or t.endswith(b'.t'))):
2797
2800
2798 m = testcasepattern.match(os.path.basename(t))
2801 m = testcasepattern.match(os.path.basename(t))
2799 if m is not None:
2802 if m is not None:
2800 t_basename, casestr = m.groups()
2803 t_basename, casestr = m.groups()
2801 t = os.path.join(os.path.dirname(t), t_basename)
2804 t = os.path.join(os.path.dirname(t), t_basename)
2802 if casestr:
2805 if casestr:
2803 case = casestr.split(b'#')
2806 case = casestr.split(b'#')
2804 else:
2807 else:
2805 continue
2808 continue
2806
2809
2807 if t.endswith(b'.t'):
2810 if t.endswith(b'.t'):
2808 # .t file may contain multiple test cases
2811 # .t file may contain multiple test cases
2809 casedimensions = parsettestcases(t)
2812 casedimensions = parsettestcases(t)
2810 if casedimensions:
2813 if casedimensions:
2811 cases = []
2814 cases = []
2812 def addcases(case, casedimensions):
2815 def addcases(case, casedimensions):
2813 if not casedimensions:
2816 if not casedimensions:
2814 cases.append(case)
2817 cases.append(case)
2815 else:
2818 else:
2816 for c in casedimensions[0]:
2819 for c in casedimensions[0]:
2817 addcases(case + [c], casedimensions[1:])
2820 addcases(case + [c], casedimensions[1:])
2818 addcases([], casedimensions)
2821 addcases([], casedimensions)
2819 if case and case in cases:
2822 if case and case in cases:
2820 cases = [case]
2823 cases = [case]
2821 elif case:
2824 elif case:
2822 # Ignore invalid cases
2825 # Ignore invalid cases
2823 cases = []
2826 cases = []
2824 else:
2827 else:
2825 pass
2828 pass
2826 tests += [{'path': t, 'case': c} for c in sorted(cases)]
2829 tests += [{'path': t, 'case': c} for c in sorted(cases)]
2827 else:
2830 else:
2828 tests.append({'path': t})
2831 tests.append({'path': t})
2829 else:
2832 else:
2830 tests.append({'path': t})
2833 tests.append({'path': t})
2831 return tests
2834 return tests
2832
2835
2833 def _runtests(self, testdescs):
2836 def _runtests(self, testdescs):
2834 def _reloadtest(test, i):
2837 def _reloadtest(test, i):
2835 # convert a test back to its description dict
2838 # convert a test back to its description dict
2836 desc = {'path': test.path}
2839 desc = {'path': test.path}
2837 case = getattr(test, '_case', [])
2840 case = getattr(test, '_case', [])
2838 if case:
2841 if case:
2839 desc['case'] = case
2842 desc['case'] = case
2840 return self._gettest(desc, i)
2843 return self._gettest(desc, i)
2841
2844
2842 try:
2845 try:
2843 if self.options.restart:
2846 if self.options.restart:
2844 orig = list(testdescs)
2847 orig = list(testdescs)
2845 while testdescs:
2848 while testdescs:
2846 desc = testdescs[0]
2849 desc = testdescs[0]
2847 # desc['path'] is a relative path
2850 # desc['path'] is a relative path
2848 if 'case' in desc:
2851 if 'case' in desc:
2849 casestr = b'#'.join(desc['case'])
2852 casestr = b'#'.join(desc['case'])
2850 errpath = b'%s#%s.err' % (desc['path'], casestr)
2853 errpath = b'%s#%s.err' % (desc['path'], casestr)
2851 else:
2854 else:
2852 errpath = b'%s.err' % desc['path']
2855 errpath = b'%s.err' % desc['path']
2853 errpath = os.path.join(self._outputdir, errpath)
2856 errpath = os.path.join(self._outputdir, errpath)
2854 if os.path.exists(errpath):
2857 if os.path.exists(errpath):
2855 break
2858 break
2856 testdescs.pop(0)
2859 testdescs.pop(0)
2857 if not testdescs:
2860 if not testdescs:
2858 print("running all tests")
2861 print("running all tests")
2859 testdescs = orig
2862 testdescs = orig
2860
2863
2861 tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
2864 tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
2862 num_tests = len(tests) * self.options.runs_per_test
2865 num_tests = len(tests) * self.options.runs_per_test
2863
2866
2864 jobs = min(num_tests, self.options.jobs)
2867 jobs = min(num_tests, self.options.jobs)
2865
2868
2866 failed = False
2869 failed = False
2867 kws = self.options.keywords
2870 kws = self.options.keywords
2868 if kws is not None and PYTHON3:
2871 if kws is not None and PYTHON3:
2869 kws = kws.encode('utf-8')
2872 kws = kws.encode('utf-8')
2870
2873
2871 suite = TestSuite(self._testdir,
2874 suite = TestSuite(self._testdir,
2872 jobs=jobs,
2875 jobs=jobs,
2873 whitelist=self.options.whitelisted,
2876 whitelist=self.options.whitelisted,
2874 blacklist=self.options.blacklist,
2877 blacklist=self.options.blacklist,
2875 retest=self.options.retest,
2878 retest=self.options.retest,
2876 keywords=kws,
2879 keywords=kws,
2877 loop=self.options.loop,
2880 loop=self.options.loop,
2878 runs_per_test=self.options.runs_per_test,
2881 runs_per_test=self.options.runs_per_test,
2879 showchannels=self.options.showchannels,
2882 showchannels=self.options.showchannels,
2880 tests=tests, loadtest=_reloadtest)
2883 tests=tests, loadtest=_reloadtest)
2881 verbosity = 1
2884 verbosity = 1
2882 if self.options.list_tests:
2885 if self.options.list_tests:
2883 verbosity = 0
2886 verbosity = 0
2884 elif self.options.verbose:
2887 elif self.options.verbose:
2885 verbosity = 2
2888 verbosity = 2
2886 runner = TextTestRunner(self, verbosity=verbosity)
2889 runner = TextTestRunner(self, verbosity=verbosity)
2887
2890
2888 if self.options.list_tests:
2891 if self.options.list_tests:
2889 result = runner.listtests(suite)
2892 result = runner.listtests(suite)
2890 else:
2893 else:
2891 if self._installdir:
2894 if self._installdir:
2892 self._installhg()
2895 self._installhg()
2893 self._checkhglib("Testing")
2896 self._checkhglib("Testing")
2894 else:
2897 else:
2895 self._usecorrectpython()
2898 self._usecorrectpython()
2896 if self.options.chg:
2899 if self.options.chg:
2897 assert self._installdir
2900 assert self._installdir
2898 self._installchg()
2901 self._installchg()
2899
2902
2900 log('running %d tests using %d parallel processes' % (
2903 log('running %d tests using %d parallel processes' % (
2901 num_tests, jobs))
2904 num_tests, jobs))
2902
2905
2903 result = runner.run(suite)
2906 result = runner.run(suite)
2904
2907
2905 if result.failures:
2908 if result.failures:
2906 failed = True
2909 failed = True
2907
2910
2908 result.onEnd()
2911 result.onEnd()
2909
2912
2910 if self.options.anycoverage:
2913 if self.options.anycoverage:
2911 self._outputcoverage()
2914 self._outputcoverage()
2912 except KeyboardInterrupt:
2915 except KeyboardInterrupt:
2913 failed = True
2916 failed = True
2914 print("\ninterrupted!")
2917 print("\ninterrupted!")
2915
2918
2916 if failed:
2919 if failed:
2917 return 1
2920 return 1
2918
2921
2919 def _getport(self, count):
2922 def _getport(self, count):
2920 port = self._ports.get(count) # do we have a cached entry?
2923 port = self._ports.get(count) # do we have a cached entry?
2921 if port is None:
2924 if port is None:
2922 portneeded = 3
2925 portneeded = 3
2923 # above 100 tries we just give up and let test reports failure
2926 # above 100 tries we just give up and let test reports failure
2924 for tries in xrange(100):
2927 for tries in xrange(100):
2925 allfree = True
2928 allfree = True
2926 port = self.options.port + self._portoffset
2929 port = self.options.port + self._portoffset
2927 for idx in xrange(portneeded):
2930 for idx in xrange(portneeded):
2928 if not checkportisavailable(port + idx):
2931 if not checkportisavailable(port + idx):
2929 allfree = False
2932 allfree = False
2930 break
2933 break
2931 self._portoffset += portneeded
2934 self._portoffset += portneeded
2932 if allfree:
2935 if allfree:
2933 break
2936 break
2934 self._ports[count] = port
2937 self._ports[count] = port
2935 return port
2938 return port
2936
2939
2937 def _gettest(self, testdesc, count):
2940 def _gettest(self, testdesc, count):
2938 """Obtain a Test by looking at its filename.
2941 """Obtain a Test by looking at its filename.
2939
2942
2940 Returns a Test instance. The Test may not be runnable if it doesn't
2943 Returns a Test instance. The Test may not be runnable if it doesn't
2941 map to a known type.
2944 map to a known type.
2942 """
2945 """
2943 path = testdesc['path']
2946 path = testdesc['path']
2944 lctest = path.lower()
2947 lctest = path.lower()
2945 testcls = Test
2948 testcls = Test
2946
2949
2947 for ext, cls in self.TESTTYPES:
2950 for ext, cls in self.TESTTYPES:
2948 if lctest.endswith(ext):
2951 if lctest.endswith(ext):
2949 testcls = cls
2952 testcls = cls
2950 break
2953 break
2951
2954
2952 refpath = os.path.join(getcwdb(), path)
2955 refpath = os.path.join(getcwdb(), path)
2953 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
2956 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
2954
2957
2955 # extra keyword parameters. 'case' is used by .t tests
2958 # extra keyword parameters. 'case' is used by .t tests
2956 kwds = dict((k, testdesc[k]) for k in ['case'] if k in testdesc)
2959 kwds = dict((k, testdesc[k]) for k in ['case'] if k in testdesc)
2957
2960
2958 t = testcls(refpath, self._outputdir, tmpdir,
2961 t = testcls(refpath, self._outputdir, tmpdir,
2959 keeptmpdir=self.options.keep_tmpdir,
2962 keeptmpdir=self.options.keep_tmpdir,
2960 debug=self.options.debug,
2963 debug=self.options.debug,
2961 first=self.options.first,
2964 first=self.options.first,
2962 timeout=self.options.timeout,
2965 timeout=self.options.timeout,
2963 startport=self._getport(count),
2966 startport=self._getport(count),
2964 extraconfigopts=self.options.extra_config_opt,
2967 extraconfigopts=self.options.extra_config_opt,
2965 py3warnings=self.options.py3_warnings,
2968 py3warnings=self.options.py3_warnings,
2966 shell=self.options.shell,
2969 shell=self.options.shell,
2967 hgcommand=self._hgcommand,
2970 hgcommand=self._hgcommand,
2968 usechg=bool(self.options.with_chg or self.options.chg),
2971 usechg=bool(self.options.with_chg or self.options.chg),
2969 useipv6=useipv6, **kwds)
2972 useipv6=useipv6, **kwds)
2970 t.should_reload = True
2973 t.should_reload = True
2971 return t
2974 return t
2972
2975
2973 def _cleanup(self):
2976 def _cleanup(self):
2974 """Clean up state from this test invocation."""
2977 """Clean up state from this test invocation."""
2975 if self.options.keep_tmpdir:
2978 if self.options.keep_tmpdir:
2976 return
2979 return
2977
2980
2978 vlog("# Cleaning up HGTMP", self._hgtmp)
2981 vlog("# Cleaning up HGTMP", self._hgtmp)
2979 shutil.rmtree(self._hgtmp, True)
2982 shutil.rmtree(self._hgtmp, True)
2980 for f in self._createdfiles:
2983 for f in self._createdfiles:
2981 try:
2984 try:
2982 os.remove(f)
2985 os.remove(f)
2983 except OSError:
2986 except OSError:
2984 pass
2987 pass
2985
2988
2986 def _usecorrectpython(self):
2989 def _usecorrectpython(self):
2987 """Configure the environment to use the appropriate Python in tests."""
2990 """Configure the environment to use the appropriate Python in tests."""
2988 # Tests must use the same interpreter as us or bad things will happen.
2991 # Tests must use the same interpreter as us or bad things will happen.
2989 pyexename = sys.platform == 'win32' and b'python.exe' or b'python'
2992 pyexename = sys.platform == 'win32' and b'python.exe' or b'python'
2990
2993
2991 # os.symlink() is a thing with py3 on Windows, but it requires
2994 # os.symlink() is a thing with py3 on Windows, but it requires
2992 # Administrator rights.
2995 # Administrator rights.
2993 if getattr(os, 'symlink', None) and os.name != 'nt':
2996 if getattr(os, 'symlink', None) and os.name != 'nt':
2994 vlog("# Making python executable in test path a symlink to '%s'" %
2997 vlog("# Making python executable in test path a symlink to '%s'" %
2995 sys.executable)
2998 sys.executable)
2996 mypython = os.path.join(self._tmpbindir, pyexename)
2999 mypython = os.path.join(self._tmpbindir, pyexename)
2997 try:
3000 try:
2998 if os.readlink(mypython) == sys.executable:
3001 if os.readlink(mypython) == sys.executable:
2999 return
3002 return
3000 os.unlink(mypython)
3003 os.unlink(mypython)
3001 except OSError as err:
3004 except OSError as err:
3002 if err.errno != errno.ENOENT:
3005 if err.errno != errno.ENOENT:
3003 raise
3006 raise
3004 if self._findprogram(pyexename) != sys.executable:
3007 if self._findprogram(pyexename) != sys.executable:
3005 try:
3008 try:
3006 os.symlink(sys.executable, mypython)
3009 os.symlink(sys.executable, mypython)
3007 self._createdfiles.append(mypython)
3010 self._createdfiles.append(mypython)
3008 except OSError as err:
3011 except OSError as err:
3009 # child processes may race, which is harmless
3012 # child processes may race, which is harmless
3010 if err.errno != errno.EEXIST:
3013 if err.errno != errno.EEXIST:
3011 raise
3014 raise
3012 else:
3015 else:
3013 exedir, exename = os.path.split(sys.executable)
3016 exedir, exename = os.path.split(sys.executable)
3014 vlog("# Modifying search path to find %s as %s in '%s'" %
3017 vlog("# Modifying search path to find %s as %s in '%s'" %
3015 (exename, pyexename, exedir))
3018 (exename, pyexename, exedir))
3016 path = os.environ['PATH'].split(os.pathsep)
3019 path = os.environ['PATH'].split(os.pathsep)
3017 while exedir in path:
3020 while exedir in path:
3018 path.remove(exedir)
3021 path.remove(exedir)
3019 os.environ['PATH'] = os.pathsep.join([exedir] + path)
3022 os.environ['PATH'] = os.pathsep.join([exedir] + path)
3020 if not self._findprogram(pyexename):
3023 if not self._findprogram(pyexename):
3021 print("WARNING: Cannot find %s in search path" % pyexename)
3024 print("WARNING: Cannot find %s in search path" % pyexename)
3022
3025
3023 def _installhg(self):
3026 def _installhg(self):
3024 """Install hg into the test environment.
3027 """Install hg into the test environment.
3025
3028
3026 This will also configure hg with the appropriate testing settings.
3029 This will also configure hg with the appropriate testing settings.
3027 """
3030 """
3028 vlog("# Performing temporary installation of HG")
3031 vlog("# Performing temporary installation of HG")
3029 installerrs = os.path.join(self._hgtmp, b"install.err")
3032 installerrs = os.path.join(self._hgtmp, b"install.err")
3030 compiler = ''
3033 compiler = ''
3031 if self.options.compiler:
3034 if self.options.compiler:
3032 compiler = '--compiler ' + self.options.compiler
3035 compiler = '--compiler ' + self.options.compiler
3033 if self.options.pure:
3036 if self.options.pure:
3034 pure = b"--pure"
3037 pure = b"--pure"
3035 else:
3038 else:
3036 pure = b""
3039 pure = b""
3037
3040
3038 # Run installer in hg root
3041 # Run installer in hg root
3039 script = os.path.realpath(sys.argv[0])
3042 script = os.path.realpath(sys.argv[0])
3040 exe = sys.executable
3043 exe = sys.executable
3041 if PYTHON3:
3044 if PYTHON3:
3042 compiler = _bytespath(compiler)
3045 compiler = _bytespath(compiler)
3043 script = _bytespath(script)
3046 script = _bytespath(script)
3044 exe = _bytespath(exe)
3047 exe = _bytespath(exe)
3045 hgroot = os.path.dirname(os.path.dirname(script))
3048 hgroot = os.path.dirname(os.path.dirname(script))
3046 self._hgroot = hgroot
3049 self._hgroot = hgroot
3047 os.chdir(hgroot)
3050 os.chdir(hgroot)
3048 nohome = b'--home=""'
3051 nohome = b'--home=""'
3049 if os.name == 'nt':
3052 if os.name == 'nt':
3050 # The --home="" trick works only on OS where os.sep == '/'
3053 # The --home="" trick works only on OS where os.sep == '/'
3051 # because of a distutils convert_path() fast-path. Avoid it at
3054 # because of a distutils convert_path() fast-path. Avoid it at
3052 # least on Windows for now, deal with .pydistutils.cfg bugs
3055 # least on Windows for now, deal with .pydistutils.cfg bugs
3053 # when they happen.
3056 # when they happen.
3054 nohome = b''
3057 nohome = b''
3055 cmd = (b'"%(exe)s" setup.py %(pure)s clean --all'
3058 cmd = (b'"%(exe)s" setup.py %(pure)s clean --all'
3056 b' build %(compiler)s --build-base="%(base)s"'
3059 b' build %(compiler)s --build-base="%(base)s"'
3057 b' install --force --prefix="%(prefix)s"'
3060 b' install --force --prefix="%(prefix)s"'
3058 b' --install-lib="%(libdir)s"'
3061 b' --install-lib="%(libdir)s"'
3059 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
3062 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
3060 % {b'exe': exe, b'pure': pure,
3063 % {b'exe': exe, b'pure': pure,
3061 b'compiler': compiler,
3064 b'compiler': compiler,
3062 b'base': os.path.join(self._hgtmp, b"build"),
3065 b'base': os.path.join(self._hgtmp, b"build"),
3063 b'prefix': self._installdir, b'libdir': self._pythondir,
3066 b'prefix': self._installdir, b'libdir': self._pythondir,
3064 b'bindir': self._bindir,
3067 b'bindir': self._bindir,
3065 b'nohome': nohome, b'logfile': installerrs})
3068 b'nohome': nohome, b'logfile': installerrs})
3066
3069
3067 # setuptools requires install directories to exist.
3070 # setuptools requires install directories to exist.
3068 def makedirs(p):
3071 def makedirs(p):
3069 try:
3072 try:
3070 os.makedirs(p)
3073 os.makedirs(p)
3071 except OSError as e:
3074 except OSError as e:
3072 if e.errno != errno.EEXIST:
3075 if e.errno != errno.EEXIST:
3073 raise
3076 raise
3074 makedirs(self._pythondir)
3077 makedirs(self._pythondir)
3075 makedirs(self._bindir)
3078 makedirs(self._bindir)
3076
3079
3077 vlog("# Running", cmd)
3080 vlog("# Running", cmd)
3078 if subprocess.call(_strpath(cmd), shell=True) == 0:
3081 if subprocess.call(_strpath(cmd), shell=True) == 0:
3079 if not self.options.verbose:
3082 if not self.options.verbose:
3080 try:
3083 try:
3081 os.remove(installerrs)
3084 os.remove(installerrs)
3082 except OSError as e:
3085 except OSError as e:
3083 if e.errno != errno.ENOENT:
3086 if e.errno != errno.ENOENT:
3084 raise
3087 raise
3085 else:
3088 else:
3086 with open(installerrs, 'rb') as f:
3089 with open(installerrs, 'rb') as f:
3087 for line in f:
3090 for line in f:
3088 if PYTHON3:
3091 if PYTHON3:
3089 sys.stdout.buffer.write(line)
3092 sys.stdout.buffer.write(line)
3090 else:
3093 else:
3091 sys.stdout.write(line)
3094 sys.stdout.write(line)
3092 sys.exit(1)
3095 sys.exit(1)
3093 os.chdir(self._testdir)
3096 os.chdir(self._testdir)
3094
3097
3095 self._usecorrectpython()
3098 self._usecorrectpython()
3096
3099
3097 if self.options.py3_warnings and not self.options.anycoverage:
3100 if self.options.py3_warnings and not self.options.anycoverage:
3098 vlog("# Updating hg command to enable Py3k Warnings switch")
3101 vlog("# Updating hg command to enable Py3k Warnings switch")
3099 with open(os.path.join(self._bindir, 'hg'), 'rb') as f:
3102 with open(os.path.join(self._bindir, 'hg'), 'rb') as f:
3100 lines = [line.rstrip() for line in f]
3103 lines = [line.rstrip() for line in f]
3101 lines[0] += ' -3'
3104 lines[0] += ' -3'
3102 with open(os.path.join(self._bindir, 'hg'), 'wb') as f:
3105 with open(os.path.join(self._bindir, 'hg'), 'wb') as f:
3103 for line in lines:
3106 for line in lines:
3104 f.write(line + '\n')
3107 f.write(line + '\n')
3105
3108
3106 hgbat = os.path.join(self._bindir, b'hg.bat')
3109 hgbat = os.path.join(self._bindir, b'hg.bat')
3107 if os.path.isfile(hgbat):
3110 if os.path.isfile(hgbat):
3108 # hg.bat expects to be put in bin/scripts while run-tests.py
3111 # hg.bat expects to be put in bin/scripts while run-tests.py
3109 # installation layout put it in bin/ directly. Fix it
3112 # installation layout put it in bin/ directly. Fix it
3110 with open(hgbat, 'rb') as f:
3113 with open(hgbat, 'rb') as f:
3111 data = f.read()
3114 data = f.read()
3112 if b'"%~dp0..\python" "%~dp0hg" %*' in data:
3115 if b'"%~dp0..\python" "%~dp0hg" %*' in data:
3113 data = data.replace(b'"%~dp0..\python" "%~dp0hg" %*',
3116 data = data.replace(b'"%~dp0..\python" "%~dp0hg" %*',
3114 b'"%~dp0python" "%~dp0hg" %*')
3117 b'"%~dp0python" "%~dp0hg" %*')
3115 with open(hgbat, 'wb') as f:
3118 with open(hgbat, 'wb') as f:
3116 f.write(data)
3119 f.write(data)
3117 else:
3120 else:
3118 print('WARNING: cannot fix hg.bat reference to python.exe')
3121 print('WARNING: cannot fix hg.bat reference to python.exe')
3119
3122
3120 if self.options.anycoverage:
3123 if self.options.anycoverage:
3121 custom = os.path.join(self._testdir, 'sitecustomize.py')
3124 custom = os.path.join(self._testdir, 'sitecustomize.py')
3122 target = os.path.join(self._pythondir, 'sitecustomize.py')
3125 target = os.path.join(self._pythondir, 'sitecustomize.py')
3123 vlog('# Installing coverage trigger to %s' % target)
3126 vlog('# Installing coverage trigger to %s' % target)
3124 shutil.copyfile(custom, target)
3127 shutil.copyfile(custom, target)
3125 rc = os.path.join(self._testdir, '.coveragerc')
3128 rc = os.path.join(self._testdir, '.coveragerc')
3126 vlog('# Installing coverage rc to %s' % rc)
3129 vlog('# Installing coverage rc to %s' % rc)
3127 os.environ['COVERAGE_PROCESS_START'] = rc
3130 os.environ['COVERAGE_PROCESS_START'] = rc
3128 covdir = os.path.join(self._installdir, '..', 'coverage')
3131 covdir = os.path.join(self._installdir, '..', 'coverage')
3129 try:
3132 try:
3130 os.mkdir(covdir)
3133 os.mkdir(covdir)
3131 except OSError as e:
3134 except OSError as e:
3132 if e.errno != errno.EEXIST:
3135 if e.errno != errno.EEXIST:
3133 raise
3136 raise
3134
3137
3135 os.environ['COVERAGE_DIR'] = covdir
3138 os.environ['COVERAGE_DIR'] = covdir
3136
3139
3137 def _checkhglib(self, verb):
3140 def _checkhglib(self, verb):
3138 """Ensure that the 'mercurial' package imported by python is
3141 """Ensure that the 'mercurial' package imported by python is
3139 the one we expect it to be. If not, print a warning to stderr."""
3142 the one we expect it to be. If not, print a warning to stderr."""
3140 if ((self._bindir == self._pythondir) and
3143 if ((self._bindir == self._pythondir) and
3141 (self._bindir != self._tmpbindir)):
3144 (self._bindir != self._tmpbindir)):
3142 # The pythondir has been inferred from --with-hg flag.
3145 # The pythondir has been inferred from --with-hg flag.
3143 # We cannot expect anything sensible here.
3146 # We cannot expect anything sensible here.
3144 return
3147 return
3145 expecthg = os.path.join(self._pythondir, b'mercurial')
3148 expecthg = os.path.join(self._pythondir, b'mercurial')
3146 actualhg = self._gethgpath()
3149 actualhg = self._gethgpath()
3147 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
3150 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
3148 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
3151 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
3149 ' (expected %s)\n'
3152 ' (expected %s)\n'
3150 % (verb, actualhg, expecthg))
3153 % (verb, actualhg, expecthg))
3151 def _gethgpath(self):
3154 def _gethgpath(self):
3152 """Return the path to the mercurial package that is actually found by
3155 """Return the path to the mercurial package that is actually found by
3153 the current Python interpreter."""
3156 the current Python interpreter."""
3154 if self._hgpath is not None:
3157 if self._hgpath is not None:
3155 return self._hgpath
3158 return self._hgpath
3156
3159
3157 cmd = b'"%s" -c "import mercurial; print (mercurial.__path__[0])"'
3160 cmd = b'"%s" -c "import mercurial; print (mercurial.__path__[0])"'
3158 cmd = cmd % PYTHON
3161 cmd = cmd % PYTHON
3159 if PYTHON3:
3162 if PYTHON3:
3160 cmd = _strpath(cmd)
3163 cmd = _strpath(cmd)
3161
3164
3162 p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
3165 p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
3163 out, err = p.communicate()
3166 out, err = p.communicate()
3164
3167
3165 self._hgpath = out.strip()
3168 self._hgpath = out.strip()
3166
3169
3167 return self._hgpath
3170 return self._hgpath
3168
3171
3169 def _installchg(self):
3172 def _installchg(self):
3170 """Install chg into the test environment"""
3173 """Install chg into the test environment"""
3171 vlog('# Performing temporary installation of CHG')
3174 vlog('# Performing temporary installation of CHG')
3172 assert os.path.dirname(self._bindir) == self._installdir
3175 assert os.path.dirname(self._bindir) == self._installdir
3173 assert self._hgroot, 'must be called after _installhg()'
3176 assert self._hgroot, 'must be called after _installhg()'
3174 cmd = (b'"%(make)s" clean install PREFIX="%(prefix)s"'
3177 cmd = (b'"%(make)s" clean install PREFIX="%(prefix)s"'
3175 % {b'make': 'make', # TODO: switch by option or environment?
3178 % {b'make': 'make', # TODO: switch by option or environment?
3176 b'prefix': self._installdir})
3179 b'prefix': self._installdir})
3177 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
3180 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
3178 vlog("# Running", cmd)
3181 vlog("# Running", cmd)
3179 proc = subprocess.Popen(cmd, shell=True, cwd=cwd,
3182 proc = subprocess.Popen(cmd, shell=True, cwd=cwd,
3180 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
3183 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
3181 stderr=subprocess.STDOUT)
3184 stderr=subprocess.STDOUT)
3182 out, _err = proc.communicate()
3185 out, _err = proc.communicate()
3183 if proc.returncode != 0:
3186 if proc.returncode != 0:
3184 if PYTHON3:
3187 if PYTHON3:
3185 sys.stdout.buffer.write(out)
3188 sys.stdout.buffer.write(out)
3186 else:
3189 else:
3187 sys.stdout.write(out)
3190 sys.stdout.write(out)
3188 sys.exit(1)
3191 sys.exit(1)
3189
3192
3190 def _outputcoverage(self):
3193 def _outputcoverage(self):
3191 """Produce code coverage output."""
3194 """Produce code coverage output."""
3192 import coverage
3195 import coverage
3193 coverage = coverage.coverage
3196 coverage = coverage.coverage
3194
3197
3195 vlog('# Producing coverage report')
3198 vlog('# Producing coverage report')
3196 # chdir is the easiest way to get short, relative paths in the
3199 # chdir is the easiest way to get short, relative paths in the
3197 # output.
3200 # output.
3198 os.chdir(self._hgroot)
3201 os.chdir(self._hgroot)
3199 covdir = os.path.join(self._installdir, '..', 'coverage')
3202 covdir = os.path.join(self._installdir, '..', 'coverage')
3200 cov = coverage(data_file=os.path.join(covdir, 'cov'))
3203 cov = coverage(data_file=os.path.join(covdir, 'cov'))
3201
3204
3202 # Map install directory paths back to source directory.
3205 # Map install directory paths back to source directory.
3203 cov.config.paths['srcdir'] = ['.', self._pythondir]
3206 cov.config.paths['srcdir'] = ['.', self._pythondir]
3204
3207
3205 cov.combine()
3208 cov.combine()
3206
3209
3207 omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]]
3210 omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]]
3208 cov.report(ignore_errors=True, omit=omit)
3211 cov.report(ignore_errors=True, omit=omit)
3209
3212
3210 if self.options.htmlcov:
3213 if self.options.htmlcov:
3211 htmldir = os.path.join(self._outputdir, 'htmlcov')
3214 htmldir = os.path.join(self._outputdir, 'htmlcov')
3212 cov.html_report(directory=htmldir, omit=omit)
3215 cov.html_report(directory=htmldir, omit=omit)
3213 if self.options.annotate:
3216 if self.options.annotate:
3214 adir = os.path.join(self._outputdir, 'annotated')
3217 adir = os.path.join(self._outputdir, 'annotated')
3215 if not os.path.isdir(adir):
3218 if not os.path.isdir(adir):
3216 os.mkdir(adir)
3219 os.mkdir(adir)
3217 cov.annotate(directory=adir, omit=omit)
3220 cov.annotate(directory=adir, omit=omit)
3218
3221
3219 def _findprogram(self, program):
3222 def _findprogram(self, program):
3220 """Search PATH for a executable program"""
3223 """Search PATH for a executable program"""
3221 dpb = _bytespath(os.defpath)
3224 dpb = _bytespath(os.defpath)
3222 sepb = _bytespath(os.pathsep)
3225 sepb = _bytespath(os.pathsep)
3223 for p in osenvironb.get(b'PATH', dpb).split(sepb):
3226 for p in osenvironb.get(b'PATH', dpb).split(sepb):
3224 name = os.path.join(p, program)
3227 name = os.path.join(p, program)
3225 if os.name == 'nt' or os.access(name, os.X_OK):
3228 if os.name == 'nt' or os.access(name, os.X_OK):
3226 return name
3229 return name
3227 return None
3230 return None
3228
3231
3229 def _checktools(self):
3232 def _checktools(self):
3230 """Ensure tools required to run tests are present."""
3233 """Ensure tools required to run tests are present."""
3231 for p in self.REQUIREDTOOLS:
3234 for p in self.REQUIREDTOOLS:
3232 if os.name == 'nt' and not p.endswith(b'.exe'):
3235 if os.name == 'nt' and not p.endswith(b'.exe'):
3233 p += b'.exe'
3236 p += b'.exe'
3234 found = self._findprogram(p)
3237 found = self._findprogram(p)
3235 if found:
3238 if found:
3236 vlog("# Found prerequisite", p, "at", found)
3239 vlog("# Found prerequisite", p, "at", found)
3237 else:
3240 else:
3238 print("WARNING: Did not find prerequisite tool: %s " %
3241 print("WARNING: Did not find prerequisite tool: %s " %
3239 p.decode("utf-8"))
3242 p.decode("utf-8"))
3240
3243
3241 def aggregateexceptions(path):
3244 def aggregateexceptions(path):
3242 exceptioncounts = collections.Counter()
3245 exceptioncounts = collections.Counter()
3243 testsbyfailure = collections.defaultdict(set)
3246 testsbyfailure = collections.defaultdict(set)
3244 failuresbytest = collections.defaultdict(set)
3247 failuresbytest = collections.defaultdict(set)
3245
3248
3246 for f in os.listdir(path):
3249 for f in os.listdir(path):
3247 with open(os.path.join(path, f), 'rb') as fh:
3250 with open(os.path.join(path, f), 'rb') as fh:
3248 data = fh.read().split(b'\0')
3251 data = fh.read().split(b'\0')
3249 if len(data) != 5:
3252 if len(data) != 5:
3250 continue
3253 continue
3251
3254
3252 exc, mainframe, hgframe, hgline, testname = data
3255 exc, mainframe, hgframe, hgline, testname = data
3253 exc = exc.decode('utf-8')
3256 exc = exc.decode('utf-8')
3254 mainframe = mainframe.decode('utf-8')
3257 mainframe = mainframe.decode('utf-8')
3255 hgframe = hgframe.decode('utf-8')
3258 hgframe = hgframe.decode('utf-8')
3256 hgline = hgline.decode('utf-8')
3259 hgline = hgline.decode('utf-8')
3257 testname = testname.decode('utf-8')
3260 testname = testname.decode('utf-8')
3258
3261
3259 key = (hgframe, hgline, exc)
3262 key = (hgframe, hgline, exc)
3260 exceptioncounts[key] += 1
3263 exceptioncounts[key] += 1
3261 testsbyfailure[key].add(testname)
3264 testsbyfailure[key].add(testname)
3262 failuresbytest[testname].add(key)
3265 failuresbytest[testname].add(key)
3263
3266
3264 # Find test having fewest failures for each failure.
3267 # Find test having fewest failures for each failure.
3265 leastfailing = {}
3268 leastfailing = {}
3266 for key, tests in testsbyfailure.items():
3269 for key, tests in testsbyfailure.items():
3267 fewesttest = None
3270 fewesttest = None
3268 fewestcount = 99999999
3271 fewestcount = 99999999
3269 for test in sorted(tests):
3272 for test in sorted(tests):
3270 if len(failuresbytest[test]) < fewestcount:
3273 if len(failuresbytest[test]) < fewestcount:
3271 fewesttest = test
3274 fewesttest = test
3272 fewestcount = len(failuresbytest[test])
3275 fewestcount = len(failuresbytest[test])
3273
3276
3274 leastfailing[key] = (fewestcount, fewesttest)
3277 leastfailing[key] = (fewestcount, fewesttest)
3275
3278
3276 # Create a combined counter so we can sort by total occurrences and
3279 # Create a combined counter so we can sort by total occurrences and
3277 # impacted tests.
3280 # impacted tests.
3278 combined = {}
3281 combined = {}
3279 for key in exceptioncounts:
3282 for key in exceptioncounts:
3280 combined[key] = (exceptioncounts[key],
3283 combined[key] = (exceptioncounts[key],
3281 len(testsbyfailure[key]),
3284 len(testsbyfailure[key]),
3282 leastfailing[key][0],
3285 leastfailing[key][0],
3283 leastfailing[key][1])
3286 leastfailing[key][1])
3284
3287
3285 return {
3288 return {
3286 'exceptioncounts': exceptioncounts,
3289 'exceptioncounts': exceptioncounts,
3287 'total': sum(exceptioncounts.values()),
3290 'total': sum(exceptioncounts.values()),
3288 'combined': combined,
3291 'combined': combined,
3289 'leastfailing': leastfailing,
3292 'leastfailing': leastfailing,
3290 'byfailure': testsbyfailure,
3293 'byfailure': testsbyfailure,
3291 'bytest': failuresbytest,
3294 'bytest': failuresbytest,
3292 }
3295 }
3293
3296
3294 if __name__ == '__main__':
3297 if __name__ == '__main__':
3295 runner = TestRunner()
3298 runner = TestRunner()
3296
3299
3297 try:
3300 try:
3298 import msvcrt
3301 import msvcrt
3299 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
3302 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
3300 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
3303 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
3301 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
3304 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
3302 except ImportError:
3305 except ImportError:
3303 pass
3306 pass
3304
3307
3305 sys.exit(runner.run(sys.argv[1:]))
3308 sys.exit(runner.run(sys.argv[1:]))
@@ -1,1938 +1,1938 b''
1 This file tests the behavior of run-tests.py itself.
1 This file tests the behavior of run-tests.py itself.
2
2
3 Avoid interference from actual test env:
3 Avoid interference from actual test env:
4
4
5 $ . "$TESTDIR/helper-runtests.sh"
5 $ . "$TESTDIR/helper-runtests.sh"
6
6
7 Smoke test with install
7 Smoke test with install
8 ============
8 ============
9 $ "$PYTHON" $TESTDIR/run-tests.py $HGTEST_RUN_TESTS_PURE -l
9 $ "$PYTHON" $TESTDIR/run-tests.py $HGTEST_RUN_TESTS_PURE -l
10 running 0 tests using 0 parallel processes
10 running 0 tests using 0 parallel processes
11
11
12 # Ran 0 tests, 0 skipped, 0 failed.
12 # Ran 0 tests, 0 skipped, 0 failed.
13
13
14 Define a helper to avoid the install step
14 Define a helper to avoid the install step
15 =============
15 =============
16 $ rt()
16 $ rt()
17 > {
17 > {
18 > "$PYTHON" $TESTDIR/run-tests.py --with-hg=`which hg` -j1 "$@"
18 > "$PYTHON" $TESTDIR/run-tests.py --with-hg=`which hg` -j1 "$@"
19 > }
19 > }
20
20
21 error paths
21 error paths
22
22
23 #if symlink
23 #if symlink
24 $ ln -s `which true` hg
24 $ ln -s `which true` hg
25 $ "$PYTHON" $TESTDIR/run-tests.py --with-hg=./hg
25 $ "$PYTHON" $TESTDIR/run-tests.py --with-hg=./hg
26 warning: --with-hg should specify an hg script
26 warning: --with-hg should specify an hg script
27 running 0 tests using 0 parallel processes
27 running 0 tests using 0 parallel processes
28
28
29 # Ran 0 tests, 0 skipped, 0 failed.
29 # Ran 0 tests, 0 skipped, 0 failed.
30 $ rm hg
30 $ rm hg
31 #endif
31 #endif
32
32
33 #if execbit
33 #if execbit
34 $ touch hg
34 $ touch hg
35 $ "$PYTHON" $TESTDIR/run-tests.py --with-hg=./hg
35 $ "$PYTHON" $TESTDIR/run-tests.py --with-hg=./hg
36 usage: run-tests.py [options] [tests]
36 usage: run-tests.py [options] [tests]
37 run-tests.py: error: --with-hg must specify an executable hg script
37 run-tests.py: error: --with-hg must specify an executable hg script
38 [2]
38 [2]
39 $ rm hg
39 $ rm hg
40 #endif
40 #endif
41
41
42 Features for testing optional lines
42 Features for testing optional lines
43 ===================================
43 ===================================
44
44
45 $ cat > hghaveaddon.py <<EOF
45 $ cat > hghaveaddon.py <<EOF
46 > import hghave
46 > import hghave
47 > @hghave.check("custom", "custom hghave feature")
47 > @hghave.check("custom", "custom hghave feature")
48 > def has_custom():
48 > def has_custom():
49 > return True
49 > return True
50 > @hghave.check("missing", "missing hghave feature")
50 > @hghave.check("missing", "missing hghave feature")
51 > def has_missing():
51 > def has_missing():
52 > return False
52 > return False
53 > EOF
53 > EOF
54
54
55 an empty test
55 an empty test
56 =======================
56 =======================
57
57
58 $ touch test-empty.t
58 $ touch test-empty.t
59 $ rt
59 $ rt
60 running 1 tests using 1 parallel processes
60 running 1 tests using 1 parallel processes
61 .
61 .
62 # Ran 1 tests, 0 skipped, 0 failed.
62 # Ran 1 tests, 0 skipped, 0 failed.
63 $ rm test-empty.t
63 $ rm test-empty.t
64
64
65 a succesful test
65 a succesful test
66 =======================
66 =======================
67
67
68 $ cat > test-success.t << EOF
68 $ cat > test-success.t << EOF
69 > $ echo babar
69 > $ echo babar
70 > babar
70 > babar
71 > $ echo xyzzy
71 > $ echo xyzzy
72 > dont_print (?)
72 > dont_print (?)
73 > nothing[42]line (re) (?)
73 > nothing[42]line (re) (?)
74 > never*happens (glob) (?)
74 > never*happens (glob) (?)
75 > more_nothing (?)
75 > more_nothing (?)
76 > xyzzy
76 > xyzzy
77 > nor this (?)
77 > nor this (?)
78 > $ printf 'abc\ndef\nxyz\n'
78 > $ printf 'abc\ndef\nxyz\n'
79 > 123 (?)
79 > 123 (?)
80 > abc
80 > abc
81 > def (?)
81 > def (?)
82 > 456 (?)
82 > 456 (?)
83 > xyz
83 > xyz
84 > $ printf 'zyx\nwvu\ntsr\n'
84 > $ printf 'zyx\nwvu\ntsr\n'
85 > abc (?)
85 > abc (?)
86 > zyx (custom !)
86 > zyx (custom !)
87 > wvu
87 > wvu
88 > no_print (no-custom !)
88 > no_print (no-custom !)
89 > tsr (no-missing !)
89 > tsr (no-missing !)
90 > missing (missing !)
90 > missing (missing !)
91 > EOF
91 > EOF
92
92
93 $ rt
93 $ rt
94 running 1 tests using 1 parallel processes
94 running 1 tests using 1 parallel processes
95 .
95 .
96 # Ran 1 tests, 0 skipped, 0 failed.
96 # Ran 1 tests, 0 skipped, 0 failed.
97
97
98 failing test
98 failing test
99 ==================
99 ==================
100
100
101 test churn with globs
101 test churn with globs
102 $ cat > test-failure.t <<EOF
102 $ cat > test-failure.t <<EOF
103 > $ echo "bar-baz"; echo "bar-bad"; echo foo
103 > $ echo "bar-baz"; echo "bar-bad"; echo foo
104 > bar*bad (glob)
104 > bar*bad (glob)
105 > bar*baz (glob)
105 > bar*baz (glob)
106 > | fo (re)
106 > | fo (re)
107 > EOF
107 > EOF
108 $ rt test-failure.t
108 $ rt test-failure.t
109 running 1 tests using 1 parallel processes
109 running 1 tests using 1 parallel processes
110
110
111 --- $TESTTMP/test-failure.t
111 --- $TESTTMP/test-failure.t
112 +++ $TESTTMP/test-failure.t.err
112 +++ $TESTTMP/test-failure.t.err
113 @@ -1,4 +1,4 @@
113 @@ -1,4 +1,4 @@
114 $ echo "bar-baz"; echo "bar-bad"; echo foo
114 $ echo "bar-baz"; echo "bar-bad"; echo foo
115 + bar*baz (glob)
115 + bar*baz (glob)
116 bar*bad (glob)
116 bar*bad (glob)
117 - bar*baz (glob)
117 - bar*baz (glob)
118 - | fo (re)
118 - | fo (re)
119 + foo
119 + foo
120
120
121 ERROR: test-failure.t output changed
121 ERROR: test-failure.t output changed
122 !
122 !
123 Failed test-failure.t: output changed
123 Failed test-failure.t: output changed
124 # Ran 1 tests, 0 skipped, 1 failed.
124 # Ran 1 tests, 0 skipped, 1 failed.
125 python hash seed: * (glob)
125 python hash seed: * (glob)
126 [1]
126 [1]
127
127
128 test how multiple globs gets matched with lines in output
128 test how multiple globs gets matched with lines in output
129 $ cat > test-failure-globs.t <<EOF
129 $ cat > test-failure-globs.t <<EOF
130 > $ echo "context"; echo "context"; \
130 > $ echo "context"; echo "context"; \
131 > echo "key: 1"; echo "value: not a"; \
131 > echo "key: 1"; echo "value: not a"; \
132 > echo "key: 2"; echo "value: not b"; \
132 > echo "key: 2"; echo "value: not b"; \
133 > echo "key: 3"; echo "value: c"; \
133 > echo "key: 3"; echo "value: c"; \
134 > echo "key: 4"; echo "value: d"
134 > echo "key: 4"; echo "value: d"
135 > context
135 > context
136 > context
136 > context
137 > key: 1
137 > key: 1
138 > value: a
138 > value: a
139 > key: 2
139 > key: 2
140 > value: b
140 > value: b
141 > key: 3
141 > key: 3
142 > value: * (glob)
142 > value: * (glob)
143 > key: 4
143 > key: 4
144 > value: * (glob)
144 > value: * (glob)
145 > EOF
145 > EOF
146 $ rt test-failure-globs.t
146 $ rt test-failure-globs.t
147 running 1 tests using 1 parallel processes
147 running 1 tests using 1 parallel processes
148
148
149 --- $TESTTMP/test-failure-globs.t
149 --- $TESTTMP/test-failure-globs.t
150 +++ $TESTTMP/test-failure-globs.t.err
150 +++ $TESTTMP/test-failure-globs.t.err
151 @@ -2,9 +2,9 @@
151 @@ -2,9 +2,9 @@
152 context
152 context
153 context
153 context
154 key: 1
154 key: 1
155 - value: a
155 - value: a
156 + value: not a
156 + value: not a
157 key: 2
157 key: 2
158 - value: b
158 - value: b
159 + value: not b
159 + value: not b
160 key: 3
160 key: 3
161 value: * (glob)
161 value: * (glob)
162 key: 4
162 key: 4
163
163
164 ERROR: test-failure-globs.t output changed
164 ERROR: test-failure-globs.t output changed
165 !
165 !
166 Failed test-failure-globs.t: output changed
166 Failed test-failure-globs.t: output changed
167 # Ran 1 tests, 0 skipped, 1 failed.
167 # Ran 1 tests, 0 skipped, 1 failed.
168 python hash seed: * (glob)
168 python hash seed: * (glob)
169 [1]
169 [1]
170 $ rm test-failure-globs.t
170 $ rm test-failure-globs.t
171
171
172 test diff colorisation
172 test diff colorisation
173
173
174 #if no-windows pygments
174 #if no-windows pygments
175 $ rt test-failure.t --color always
175 $ rt test-failure.t --color always
176 running 1 tests using 1 parallel processes
176 running 1 tests using 1 parallel processes
177
177
178 \x1b[38;5;124m--- $TESTTMP/test-failure.t\x1b[39m (esc)
178 \x1b[38;5;124m--- $TESTTMP/test-failure.t\x1b[39m (esc)
179 \x1b[38;5;34m+++ $TESTTMP/test-failure.t.err\x1b[39m (esc)
179 \x1b[38;5;34m+++ $TESTTMP/test-failure.t.err\x1b[39m (esc)
180 \x1b[38;5;90;01m@@ -1,4 +1,4 @@\x1b[39;00m (esc)
180 \x1b[38;5;90;01m@@ -1,4 +1,4 @@\x1b[39;00m (esc)
181 $ echo "bar-baz"; echo "bar-bad"; echo foo
181 $ echo "bar-baz"; echo "bar-bad"; echo foo
182 \x1b[38;5;34m+ bar*baz (glob)\x1b[39m (esc)
182 \x1b[38;5;34m+ bar*baz (glob)\x1b[39m (esc)
183 bar*bad (glob)
183 bar*bad (glob)
184 \x1b[38;5;124m- bar*baz (glob)\x1b[39m (esc)
184 \x1b[38;5;124m- bar*baz (glob)\x1b[39m (esc)
185 \x1b[38;5;124m- | fo (re)\x1b[39m (esc)
185 \x1b[38;5;124m- | fo (re)\x1b[39m (esc)
186 \x1b[38;5;34m+ foo\x1b[39m (esc)
186 \x1b[38;5;34m+ foo\x1b[39m (esc)
187
187
188 \x1b[38;5;88mERROR: \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m output changed\x1b[39m (esc)
188 \x1b[38;5;88mERROR: \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m output changed\x1b[39m (esc)
189 !
189 !
190 \x1b[38;5;88mFailed \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m: output changed\x1b[39m (esc)
190 \x1b[38;5;88mFailed \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m: output changed\x1b[39m (esc)
191 # Ran 1 tests, 0 skipped, 1 failed.
191 # Ran 1 tests, 0 skipped, 1 failed.
192 python hash seed: * (glob)
192 python hash seed: * (glob)
193 [1]
193 [1]
194
194
195 $ rt test-failure.t 2> tmp.log
195 $ rt test-failure.t 2> tmp.log
196 running 1 tests using 1 parallel processes
196 running 1 tests using 1 parallel processes
197 [1]
197 [1]
198 $ cat tmp.log
198 $ cat tmp.log
199
199
200 --- $TESTTMP/test-failure.t
200 --- $TESTTMP/test-failure.t
201 +++ $TESTTMP/test-failure.t.err
201 +++ $TESTTMP/test-failure.t.err
202 @@ -1,4 +1,4 @@
202 @@ -1,4 +1,4 @@
203 $ echo "bar-baz"; echo "bar-bad"; echo foo
203 $ echo "bar-baz"; echo "bar-bad"; echo foo
204 + bar*baz (glob)
204 + bar*baz (glob)
205 bar*bad (glob)
205 bar*bad (glob)
206 - bar*baz (glob)
206 - bar*baz (glob)
207 - | fo (re)
207 - | fo (re)
208 + foo
208 + foo
209
209
210 ERROR: test-failure.t output changed
210 ERROR: test-failure.t output changed
211 !
211 !
212 Failed test-failure.t: output changed
212 Failed test-failure.t: output changed
213 # Ran 1 tests, 0 skipped, 1 failed.
213 # Ran 1 tests, 0 skipped, 1 failed.
214 python hash seed: * (glob)
214 python hash seed: * (glob)
215 #endif
215 #endif
216
216
217 $ cat > test-failure.t << EOF
217 $ cat > test-failure.t << EOF
218 > $ true
218 > $ true
219 > should go away (true !)
219 > should go away (true !)
220 > $ true
220 > $ true
221 > should stay (false !)
221 > should stay (false !)
222 >
222 >
223 > Should remove first line, not second or third
223 > Should remove first line, not second or third
224 > $ echo 'testing'
224 > $ echo 'testing'
225 > baz*foo (glob) (true !)
225 > baz*foo (glob) (true !)
226 > foobar*foo (glob) (false !)
226 > foobar*foo (glob) (false !)
227 > te*ting (glob) (true !)
227 > te*ting (glob) (true !)
228 >
228 >
229 > Should keep first two lines, remove third and last
229 > Should keep first two lines, remove third and last
230 > $ echo 'testing'
230 > $ echo 'testing'
231 > test.ng (re) (true !)
231 > test.ng (re) (true !)
232 > foo.ar (re) (false !)
232 > foo.ar (re) (false !)
233 > b.r (re) (true !)
233 > b.r (re) (true !)
234 > missing (?)
234 > missing (?)
235 > awol (true !)
235 > awol (true !)
236 >
236 >
237 > The "missing" line should stay, even though awol is dropped
237 > The "missing" line should stay, even though awol is dropped
238 > $ echo 'testing'
238 > $ echo 'testing'
239 > test.ng (re) (true !)
239 > test.ng (re) (true !)
240 > foo.ar (?)
240 > foo.ar (?)
241 > awol
241 > awol
242 > missing (?)
242 > missing (?)
243 > EOF
243 > EOF
244 $ rt test-failure.t
244 $ rt test-failure.t
245 running 1 tests using 1 parallel processes
245 running 1 tests using 1 parallel processes
246
246
247 --- $TESTTMP/test-failure.t
247 --- $TESTTMP/test-failure.t
248 +++ $TESTTMP/test-failure.t.err
248 +++ $TESTTMP/test-failure.t.err
249 @@ -1,11 +1,9 @@
249 @@ -1,11 +1,9 @@
250 $ true
250 $ true
251 - should go away (true !)
251 - should go away (true !)
252 $ true
252 $ true
253 should stay (false !)
253 should stay (false !)
254
254
255 Should remove first line, not second or third
255 Should remove first line, not second or third
256 $ echo 'testing'
256 $ echo 'testing'
257 - baz*foo (glob) (true !)
257 - baz*foo (glob) (true !)
258 foobar*foo (glob) (false !)
258 foobar*foo (glob) (false !)
259 te*ting (glob) (true !)
259 te*ting (glob) (true !)
260
260
261 foo.ar (re) (false !)
261 foo.ar (re) (false !)
262 missing (?)
262 missing (?)
263 @@ -13,13 +11,10 @@
263 @@ -13,13 +11,10 @@
264 $ echo 'testing'
264 $ echo 'testing'
265 test.ng (re) (true !)
265 test.ng (re) (true !)
266 foo.ar (re) (false !)
266 foo.ar (re) (false !)
267 - b.r (re) (true !)
267 - b.r (re) (true !)
268 missing (?)
268 missing (?)
269 - awol (true !)
269 - awol (true !)
270
270
271 The "missing" line should stay, even though awol is dropped
271 The "missing" line should stay, even though awol is dropped
272 $ echo 'testing'
272 $ echo 'testing'
273 test.ng (re) (true !)
273 test.ng (re) (true !)
274 foo.ar (?)
274 foo.ar (?)
275 - awol
275 - awol
276 missing (?)
276 missing (?)
277
277
278 ERROR: test-failure.t output changed
278 ERROR: test-failure.t output changed
279 !
279 !
280 Failed test-failure.t: output changed
280 Failed test-failure.t: output changed
281 # Ran 1 tests, 0 skipped, 1 failed.
281 # Ran 1 tests, 0 skipped, 1 failed.
282 python hash seed: * (glob)
282 python hash seed: * (glob)
283 [1]
283 [1]
284
284
285 basic failing test
285 basic failing test
286 $ cat > test-failure.t << EOF
286 $ cat > test-failure.t << EOF
287 > $ echo babar
287 > $ echo babar
288 > rataxes
288 > rataxes
289 > This is a noop statement so that
289 > This is a noop statement so that
290 > this test is still more bytes than success.
290 > this test is still more bytes than success.
291 > pad pad pad pad............................................................
291 > pad pad pad pad............................................................
292 > pad pad pad pad............................................................
292 > pad pad pad pad............................................................
293 > pad pad pad pad............................................................
293 > pad pad pad pad............................................................
294 > pad pad pad pad............................................................
294 > pad pad pad pad............................................................
295 > pad pad pad pad............................................................
295 > pad pad pad pad............................................................
296 > pad pad pad pad............................................................
296 > pad pad pad pad............................................................
297 > EOF
297 > EOF
298
298
299 >>> fh = open('test-failure-unicode.t', 'wb')
299 >>> fh = open('test-failure-unicode.t', 'wb')
300 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8')) and None
300 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8')) and None
301 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8')) and None
301 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8')) and None
302
302
303 $ rt
303 $ rt
304 running 3 tests using 1 parallel processes
304 running 3 tests using 1 parallel processes
305
305
306 --- $TESTTMP/test-failure.t
306 --- $TESTTMP/test-failure.t
307 +++ $TESTTMP/test-failure.t.err
307 +++ $TESTTMP/test-failure.t.err
308 @@ -1,5 +1,5 @@
308 @@ -1,5 +1,5 @@
309 $ echo babar
309 $ echo babar
310 - rataxes
310 - rataxes
311 + babar
311 + babar
312 This is a noop statement so that
312 This is a noop statement so that
313 this test is still more bytes than success.
313 this test is still more bytes than success.
314 pad pad pad pad............................................................
314 pad pad pad pad............................................................
315
315
316 ERROR: test-failure.t output changed
316 ERROR: test-failure.t output changed
317 !.
317 !.
318 --- $TESTTMP/test-failure-unicode.t
318 --- $TESTTMP/test-failure-unicode.t
319 +++ $TESTTMP/test-failure-unicode.t.err
319 +++ $TESTTMP/test-failure-unicode.t.err
320 @@ -1,2 +1,2 @@
320 @@ -1,2 +1,2 @@
321 $ echo babar\xce\xb1 (esc)
321 $ echo babar\xce\xb1 (esc)
322 - l\xce\xb5\xce\xb5t (esc)
322 - l\xce\xb5\xce\xb5t (esc)
323 + babar\xce\xb1 (esc)
323 + babar\xce\xb1 (esc)
324
324
325 ERROR: test-failure-unicode.t output changed
325 ERROR: test-failure-unicode.t output changed
326 !
326 !
327 Failed test-failure-unicode.t: output changed
327 Failed test-failure.t: output changed
328 Failed test-failure.t: output changed
328 Failed test-failure-unicode.t: output changed
329 # Ran 3 tests, 0 skipped, 2 failed.
329 # Ran 3 tests, 0 skipped, 2 failed.
330 python hash seed: * (glob)
330 python hash seed: * (glob)
331 [1]
331 [1]
332
332
333 test --outputdir
333 test --outputdir
334 $ mkdir output
334 $ mkdir output
335 $ rt --outputdir output
335 $ rt --outputdir output
336 running 3 tests using 1 parallel processes
336 running 3 tests using 1 parallel processes
337
337
338 --- $TESTTMP/test-failure.t
338 --- $TESTTMP/test-failure.t
339 +++ $TESTTMP/output/test-failure.t.err
339 +++ $TESTTMP/output/test-failure.t.err
340 @@ -1,5 +1,5 @@
340 @@ -1,5 +1,5 @@
341 $ echo babar
341 $ echo babar
342 - rataxes
342 - rataxes
343 + babar
343 + babar
344 This is a noop statement so that
344 This is a noop statement so that
345 this test is still more bytes than success.
345 this test is still more bytes than success.
346 pad pad pad pad............................................................
346 pad pad pad pad............................................................
347
347
348 ERROR: test-failure.t output changed
348 ERROR: test-failure.t output changed
349 !.
349 !.
350 --- $TESTTMP/test-failure-unicode.t
350 --- $TESTTMP/test-failure-unicode.t
351 +++ $TESTTMP/output/test-failure-unicode.t.err
351 +++ $TESTTMP/output/test-failure-unicode.t.err
352 @@ -1,2 +1,2 @@
352 @@ -1,2 +1,2 @@
353 $ echo babar\xce\xb1 (esc)
353 $ echo babar\xce\xb1 (esc)
354 - l\xce\xb5\xce\xb5t (esc)
354 - l\xce\xb5\xce\xb5t (esc)
355 + babar\xce\xb1 (esc)
355 + babar\xce\xb1 (esc)
356
356
357 ERROR: test-failure-unicode.t output changed
357 ERROR: test-failure-unicode.t output changed
358 !
358 !
359 Failed test-failure-unicode.t: output changed
359 Failed test-failure.t: output changed
360 Failed test-failure.t: output changed
360 Failed test-failure-unicode.t: output changed
361 # Ran 3 tests, 0 skipped, 2 failed.
361 # Ran 3 tests, 0 skipped, 2 failed.
362 python hash seed: * (glob)
362 python hash seed: * (glob)
363 [1]
363 [1]
364 $ ls -a output
364 $ ls -a output
365 .
365 .
366 ..
366 ..
367 .testtimes
367 .testtimes
368 test-failure-unicode.t.err
368 test-failure-unicode.t.err
369 test-failure.t.err
369 test-failure.t.err
370
370
371 test --xunit support
371 test --xunit support
372 $ rt --xunit=xunit.xml
372 $ rt --xunit=xunit.xml
373 running 3 tests using 1 parallel processes
373 running 3 tests using 1 parallel processes
374
374
375 --- $TESTTMP/test-failure.t
375 --- $TESTTMP/test-failure.t
376 +++ $TESTTMP/test-failure.t.err
376 +++ $TESTTMP/test-failure.t.err
377 @@ -1,5 +1,5 @@
377 @@ -1,5 +1,5 @@
378 $ echo babar
378 $ echo babar
379 - rataxes
379 - rataxes
380 + babar
380 + babar
381 This is a noop statement so that
381 This is a noop statement so that
382 this test is still more bytes than success.
382 this test is still more bytes than success.
383 pad pad pad pad............................................................
383 pad pad pad pad............................................................
384
384
385 ERROR: test-failure.t output changed
385 ERROR: test-failure.t output changed
386 !.
386 !.
387 --- $TESTTMP/test-failure-unicode.t
387 --- $TESTTMP/test-failure-unicode.t
388 +++ $TESTTMP/test-failure-unicode.t.err
388 +++ $TESTTMP/test-failure-unicode.t.err
389 @@ -1,2 +1,2 @@
389 @@ -1,2 +1,2 @@
390 $ echo babar\xce\xb1 (esc)
390 $ echo babar\xce\xb1 (esc)
391 - l\xce\xb5\xce\xb5t (esc)
391 - l\xce\xb5\xce\xb5t (esc)
392 + babar\xce\xb1 (esc)
392 + babar\xce\xb1 (esc)
393
393
394 ERROR: test-failure-unicode.t output changed
394 ERROR: test-failure-unicode.t output changed
395 !
395 !
396 Failed test-failure-unicode.t: output changed
396 Failed test-failure.t: output changed
397 Failed test-failure.t: output changed
397 Failed test-failure-unicode.t: output changed
398 # Ran 3 tests, 0 skipped, 2 failed.
398 # Ran 3 tests, 0 skipped, 2 failed.
399 python hash seed: * (glob)
399 python hash seed: * (glob)
400 [1]
400 [1]
401 $ cat xunit.xml
401 $ cat xunit.xml
402 <?xml version="1.0" encoding="utf-8"?>
402 <?xml version="1.0" encoding="utf-8"?>
403 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
403 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
404 <testcase name="test-success.t" time="*"/> (glob)
404 <testcase name="test-success.t" time="*"/> (glob)
405 <testcase name="test-failure-unicode.t" time="*"> (glob)
405 <testcase name="test-failure-unicode.t" time="*"> (glob)
406 <failure message="output changed" type="output-mismatch">
406 <failure message="output changed" type="output-mismatch">
407 <![CDATA[--- $TESTTMP/test-failure-unicode.t
407 <![CDATA[--- $TESTTMP/test-failure-unicode.t
408 +++ $TESTTMP/test-failure-unicode.t.err
408 +++ $TESTTMP/test-failure-unicode.t.err
409 @@ -1,2 +1,2 @@
409 @@ -1,2 +1,2 @@
410 $ echo babar\xce\xb1 (esc)
410 $ echo babar\xce\xb1 (esc)
411 - l\xce\xb5\xce\xb5t (esc)
411 - l\xce\xb5\xce\xb5t (esc)
412 + babar\xce\xb1 (esc)
412 + babar\xce\xb1 (esc)
413 ]]> </failure>
413 ]]> </failure>
414 </testcase>
414 </testcase>
415 <testcase name="test-failure.t" time="*"> (glob)
415 <testcase name="test-failure.t" time="*"> (glob)
416 <failure message="output changed" type="output-mismatch">
416 <failure message="output changed" type="output-mismatch">
417 <![CDATA[--- $TESTTMP/test-failure.t
417 <![CDATA[--- $TESTTMP/test-failure.t
418 +++ $TESTTMP/test-failure.t.err
418 +++ $TESTTMP/test-failure.t.err
419 @@ -1,5 +1,5 @@
419 @@ -1,5 +1,5 @@
420 $ echo babar
420 $ echo babar
421 - rataxes
421 - rataxes
422 + babar
422 + babar
423 This is a noop statement so that
423 This is a noop statement so that
424 this test is still more bytes than success.
424 this test is still more bytes than success.
425 pad pad pad pad............................................................
425 pad pad pad pad............................................................
426 ]]> </failure>
426 ]]> </failure>
427 </testcase>
427 </testcase>
428 </testsuite>
428 </testsuite>
429
429
430 $ cat .testtimes
430 $ cat .testtimes
431 test-empty.t * (glob)
431 test-empty.t * (glob)
432 test-failure-globs.t * (glob)
432 test-failure-globs.t * (glob)
433 test-failure-unicode.t * (glob)
433 test-failure-unicode.t * (glob)
434 test-failure.t * (glob)
434 test-failure.t * (glob)
435 test-success.t * (glob)
435 test-success.t * (glob)
436
436
437 $ rt --list-tests
437 $ rt --list-tests
438 test-failure-unicode.t
438 test-failure-unicode.t
439 test-failure.t
439 test-failure.t
440 test-success.t
440 test-success.t
441
441
442 $ rt --list-tests --json
442 $ rt --list-tests --json
443 test-failure-unicode.t
443 test-failure-unicode.t
444 test-failure.t
444 test-failure.t
445 test-success.t
445 test-success.t
446 $ cat report.json
446 $ cat report.json
447 testreport ={
447 testreport ={
448 "test-failure-unicode.t": {
448 "test-failure-unicode.t": {
449 "result": "success"
449 "result": "success"
450 },
450 },
451 "test-failure.t": {
451 "test-failure.t": {
452 "result": "success"
452 "result": "success"
453 },
453 },
454 "test-success.t": {
454 "test-success.t": {
455 "result": "success"
455 "result": "success"
456 }
456 }
457 } (no-eol)
457 } (no-eol)
458
458
459 $ rt --list-tests --xunit=xunit.xml
459 $ rt --list-tests --xunit=xunit.xml
460 test-failure-unicode.t
460 test-failure-unicode.t
461 test-failure.t
461 test-failure.t
462 test-success.t
462 test-success.t
463 $ cat xunit.xml
463 $ cat xunit.xml
464 <?xml version="1.0" encoding="utf-8"?>
464 <?xml version="1.0" encoding="utf-8"?>
465 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
465 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
466 <testcase name="test-failure-unicode.t"/>
466 <testcase name="test-failure-unicode.t"/>
467 <testcase name="test-failure.t"/>
467 <testcase name="test-failure.t"/>
468 <testcase name="test-success.t"/>
468 <testcase name="test-success.t"/>
469 </testsuite>
469 </testsuite>
470
470
471 $ rt --list-tests test-failure* --json --xunit=xunit.xml --outputdir output
471 $ rt --list-tests test-failure* --json --xunit=xunit.xml --outputdir output
472 test-failure-unicode.t
472 test-failure-unicode.t
473 test-failure.t
473 test-failure.t
474 $ cat output/report.json
474 $ cat output/report.json
475 testreport ={
475 testreport ={
476 "test-failure-unicode.t": {
476 "test-failure-unicode.t": {
477 "result": "success"
477 "result": "success"
478 },
478 },
479 "test-failure.t": {
479 "test-failure.t": {
480 "result": "success"
480 "result": "success"
481 }
481 }
482 } (no-eol)
482 } (no-eol)
483 $ cat xunit.xml
483 $ cat xunit.xml
484 <?xml version="1.0" encoding="utf-8"?>
484 <?xml version="1.0" encoding="utf-8"?>
485 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
485 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
486 <testcase name="test-failure-unicode.t"/>
486 <testcase name="test-failure-unicode.t"/>
487 <testcase name="test-failure.t"/>
487 <testcase name="test-failure.t"/>
488 </testsuite>
488 </testsuite>
489
489
490 $ rm test-failure-unicode.t
490 $ rm test-failure-unicode.t
491
491
492 test for --retest
492 test for --retest
493 ====================
493 ====================
494
494
495 $ rt --retest
495 $ rt --retest
496 running 2 tests using 1 parallel processes
496 running 2 tests using 1 parallel processes
497
497
498 --- $TESTTMP/test-failure.t
498 --- $TESTTMP/test-failure.t
499 +++ $TESTTMP/test-failure.t.err
499 +++ $TESTTMP/test-failure.t.err
500 @@ -1,5 +1,5 @@
500 @@ -1,5 +1,5 @@
501 $ echo babar
501 $ echo babar
502 - rataxes
502 - rataxes
503 + babar
503 + babar
504 This is a noop statement so that
504 This is a noop statement so that
505 this test is still more bytes than success.
505 this test is still more bytes than success.
506 pad pad pad pad............................................................
506 pad pad pad pad............................................................
507
507
508 ERROR: test-failure.t output changed
508 ERROR: test-failure.t output changed
509 !
509 !
510 Failed test-failure.t: output changed
510 Failed test-failure.t: output changed
511 # Ran 2 tests, 1 skipped, 1 failed.
511 # Ran 2 tests, 1 skipped, 1 failed.
512 python hash seed: * (glob)
512 python hash seed: * (glob)
513 [1]
513 [1]
514
514
515 --retest works with --outputdir
515 --retest works with --outputdir
516 $ rm -r output
516 $ rm -r output
517 $ mkdir output
517 $ mkdir output
518 $ mv test-failure.t.err output
518 $ mv test-failure.t.err output
519 $ rt --retest --outputdir output
519 $ rt --retest --outputdir output
520 running 2 tests using 1 parallel processes
520 running 2 tests using 1 parallel processes
521
521
522 --- $TESTTMP/test-failure.t
522 --- $TESTTMP/test-failure.t
523 +++ $TESTTMP/output/test-failure.t.err
523 +++ $TESTTMP/output/test-failure.t.err
524 @@ -1,5 +1,5 @@
524 @@ -1,5 +1,5 @@
525 $ echo babar
525 $ echo babar
526 - rataxes
526 - rataxes
527 + babar
527 + babar
528 This is a noop statement so that
528 This is a noop statement so that
529 this test is still more bytes than success.
529 this test is still more bytes than success.
530 pad pad pad pad............................................................
530 pad pad pad pad............................................................
531
531
532 ERROR: test-failure.t output changed
532 ERROR: test-failure.t output changed
533 !
533 !
534 Failed test-failure.t: output changed
534 Failed test-failure.t: output changed
535 # Ran 2 tests, 1 skipped, 1 failed.
535 # Ran 2 tests, 1 skipped, 1 failed.
536 python hash seed: * (glob)
536 python hash seed: * (glob)
537 [1]
537 [1]
538
538
539 Selecting Tests To Run
539 Selecting Tests To Run
540 ======================
540 ======================
541
541
542 successful
542 successful
543
543
544 $ rt test-success.t
544 $ rt test-success.t
545 running 1 tests using 1 parallel processes
545 running 1 tests using 1 parallel processes
546 .
546 .
547 # Ran 1 tests, 0 skipped, 0 failed.
547 # Ran 1 tests, 0 skipped, 0 failed.
548
548
549 success w/ keyword
549 success w/ keyword
550 $ rt -k xyzzy
550 $ rt -k xyzzy
551 running 2 tests using 1 parallel processes
551 running 2 tests using 1 parallel processes
552 .
552 .
553 # Ran 2 tests, 1 skipped, 0 failed.
553 # Ran 2 tests, 1 skipped, 0 failed.
554
554
555 failed
555 failed
556
556
557 $ rt test-failure.t
557 $ rt test-failure.t
558 running 1 tests using 1 parallel processes
558 running 1 tests using 1 parallel processes
559
559
560 --- $TESTTMP/test-failure.t
560 --- $TESTTMP/test-failure.t
561 +++ $TESTTMP/test-failure.t.err
561 +++ $TESTTMP/test-failure.t.err
562 @@ -1,5 +1,5 @@
562 @@ -1,5 +1,5 @@
563 $ echo babar
563 $ echo babar
564 - rataxes
564 - rataxes
565 + babar
565 + babar
566 This is a noop statement so that
566 This is a noop statement so that
567 this test is still more bytes than success.
567 this test is still more bytes than success.
568 pad pad pad pad............................................................
568 pad pad pad pad............................................................
569
569
570 ERROR: test-failure.t output changed
570 ERROR: test-failure.t output changed
571 !
571 !
572 Failed test-failure.t: output changed
572 Failed test-failure.t: output changed
573 # Ran 1 tests, 0 skipped, 1 failed.
573 # Ran 1 tests, 0 skipped, 1 failed.
574 python hash seed: * (glob)
574 python hash seed: * (glob)
575 [1]
575 [1]
576
576
577 failure w/ keyword
577 failure w/ keyword
578 $ rt -k rataxes
578 $ rt -k rataxes
579 running 2 tests using 1 parallel processes
579 running 2 tests using 1 parallel processes
580
580
581 --- $TESTTMP/test-failure.t
581 --- $TESTTMP/test-failure.t
582 +++ $TESTTMP/test-failure.t.err
582 +++ $TESTTMP/test-failure.t.err
583 @@ -1,5 +1,5 @@
583 @@ -1,5 +1,5 @@
584 $ echo babar
584 $ echo babar
585 - rataxes
585 - rataxes
586 + babar
586 + babar
587 This is a noop statement so that
587 This is a noop statement so that
588 this test is still more bytes than success.
588 this test is still more bytes than success.
589 pad pad pad pad............................................................
589 pad pad pad pad............................................................
590
590
591 ERROR: test-failure.t output changed
591 ERROR: test-failure.t output changed
592 !
592 !
593 Failed test-failure.t: output changed
593 Failed test-failure.t: output changed
594 # Ran 2 tests, 1 skipped, 1 failed.
594 # Ran 2 tests, 1 skipped, 1 failed.
595 python hash seed: * (glob)
595 python hash seed: * (glob)
596 [1]
596 [1]
597
597
598 Verify that when a process fails to start we show a useful message
598 Verify that when a process fails to start we show a useful message
599 ==================================================================
599 ==================================================================
600
600
601 $ cat > test-serve-fail.t <<EOF
601 $ cat > test-serve-fail.t <<EOF
602 > $ echo 'abort: child process failed to start blah'
602 > $ echo 'abort: child process failed to start blah'
603 > EOF
603 > EOF
604 $ rt test-serve-fail.t
604 $ rt test-serve-fail.t
605 running 1 tests using 1 parallel processes
605 running 1 tests using 1 parallel processes
606
606
607 --- $TESTTMP/test-serve-fail.t
607 --- $TESTTMP/test-serve-fail.t
608 +++ $TESTTMP/test-serve-fail.t.err
608 +++ $TESTTMP/test-serve-fail.t.err
609 @@ -1* +1,2 @@ (glob)
609 @@ -1* +1,2 @@ (glob)
610 $ echo 'abort: child process failed to start blah'
610 $ echo 'abort: child process failed to start blah'
611 + abort: child process failed to start blah
611 + abort: child process failed to start blah
612
612
613 ERROR: test-serve-fail.t output changed
613 ERROR: test-serve-fail.t output changed
614 !
614 !
615 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
615 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
616 # Ran 1 tests, 0 skipped, 1 failed.
616 # Ran 1 tests, 0 skipped, 1 failed.
617 python hash seed: * (glob)
617 python hash seed: * (glob)
618 [1]
618 [1]
619 $ rm test-serve-fail.t
619 $ rm test-serve-fail.t
620
620
621 Verify that we can try other ports
621 Verify that we can try other ports
622 ===================================
622 ===================================
623
623
624 Extensions aren't inherited by the invoked run-tests.py. An extension
624 Extensions aren't inherited by the invoked run-tests.py. An extension
625 introducing a repository requirement could cause this to fail. So we force
625 introducing a repository requirement could cause this to fail. So we force
626 HGRCPATH to get a clean environment.
626 HGRCPATH to get a clean environment.
627
627
628 $ HGRCPATH= hg init inuse
628 $ HGRCPATH= hg init inuse
629 $ hg serve -R inuse -p $HGPORT -d --pid-file=blocks.pid
629 $ hg serve -R inuse -p $HGPORT -d --pid-file=blocks.pid
630 $ cat blocks.pid >> $DAEMON_PIDS
630 $ cat blocks.pid >> $DAEMON_PIDS
631 $ cat > test-serve-inuse.t <<EOF
631 $ cat > test-serve-inuse.t <<EOF
632 > $ hg serve -R `pwd`/inuse -p \$HGPORT -d --pid-file=hg.pid
632 > $ hg serve -R `pwd`/inuse -p \$HGPORT -d --pid-file=hg.pid
633 > $ cat hg.pid >> \$DAEMON_PIDS
633 > $ cat hg.pid >> \$DAEMON_PIDS
634 > EOF
634 > EOF
635 $ rt test-serve-inuse.t
635 $ rt test-serve-inuse.t
636 running 1 tests using 1 parallel processes
636 running 1 tests using 1 parallel processes
637 .
637 .
638 # Ran 1 tests, 0 skipped, 0 failed.
638 # Ran 1 tests, 0 skipped, 0 failed.
639 $ rm test-serve-inuse.t
639 $ rm test-serve-inuse.t
640 $ killdaemons.py $DAEMON_PIDS
640 $ killdaemons.py $DAEMON_PIDS
641
641
642 Running In Debug Mode
642 Running In Debug Mode
643 ======================
643 ======================
644
644
645 $ rt --debug 2>&1 | grep -v pwd
645 $ rt --debug 2>&1 | grep -v pwd
646 running 2 tests using 1 parallel processes
646 running 2 tests using 1 parallel processes
647 + alias hg=hg.exe (windows !)
647 + alias hg=hg.exe (windows !)
648 + echo *SALT* 0 0 (glob)
648 + echo *SALT* 0 0 (glob)
649 *SALT* 0 0 (glob)
649 *SALT* 0 0 (glob)
650 + echo babar
650 + echo babar
651 babar
651 babar
652 + echo *SALT* 10 0 (glob)
652 + echo *SALT* 10 0 (glob)
653 *SALT* 10 0 (glob)
653 *SALT* 10 0 (glob)
654 .+ alias hg=hg.exe (windows !)
654 .+ alias hg=hg.exe (windows !)
655 *+ echo *SALT* 0 0 (glob)
655 *+ echo *SALT* 0 0 (glob)
656 *SALT* 0 0 (glob)
656 *SALT* 0 0 (glob)
657 + echo babar
657 + echo babar
658 babar
658 babar
659 + echo *SALT* 2 0 (glob)
659 + echo *SALT* 2 0 (glob)
660 *SALT* 2 0 (glob)
660 *SALT* 2 0 (glob)
661 + echo xyzzy
661 + echo xyzzy
662 xyzzy
662 xyzzy
663 + echo *SALT* 9 0 (glob)
663 + echo *SALT* 9 0 (glob)
664 *SALT* 9 0 (glob)
664 *SALT* 9 0 (glob)
665 + printf *abc\ndef\nxyz\n* (glob)
665 + printf *abc\ndef\nxyz\n* (glob)
666 abc
666 abc
667 def
667 def
668 xyz
668 xyz
669 + echo *SALT* 15 0 (glob)
669 + echo *SALT* 15 0 (glob)
670 *SALT* 15 0 (glob)
670 *SALT* 15 0 (glob)
671 + printf *zyx\nwvu\ntsr\n* (glob)
671 + printf *zyx\nwvu\ntsr\n* (glob)
672 zyx
672 zyx
673 wvu
673 wvu
674 tsr
674 tsr
675 + echo *SALT* 22 0 (glob)
675 + echo *SALT* 22 0 (glob)
676 *SALT* 22 0 (glob)
676 *SALT* 22 0 (glob)
677 .
677 .
678 # Ran 2 tests, 0 skipped, 0 failed.
678 # Ran 2 tests, 0 skipped, 0 failed.
679
679
680 Parallel runs
680 Parallel runs
681 ==============
681 ==============
682
682
683 (duplicate the failing test to get predictable output)
683 (duplicate the failing test to get predictable output)
684 $ cp test-failure.t test-failure-copy.t
684 $ cp test-failure.t test-failure-copy.t
685
685
686 $ rt --jobs 2 test-failure*.t -n
686 $ rt --jobs 2 test-failure*.t -n
687 running 2 tests using 2 parallel processes
687 running 2 tests using 2 parallel processes
688 !!
688 !!
689 Failed test-failure*.t: output changed (glob)
689 Failed test-failure*.t: output changed (glob)
690 Failed test-failure*.t: output changed (glob)
690 Failed test-failure*.t: output changed (glob)
691 # Ran 2 tests, 0 skipped, 2 failed.
691 # Ran 2 tests, 0 skipped, 2 failed.
692 python hash seed: * (glob)
692 python hash seed: * (glob)
693 [1]
693 [1]
694
694
695 failures in parallel with --first should only print one failure
695 failures in parallel with --first should only print one failure
696 $ rt --jobs 2 --first test-failure*.t
696 $ rt --jobs 2 --first test-failure*.t
697 running 2 tests using 2 parallel processes
697 running 2 tests using 2 parallel processes
698
698
699 --- $TESTTMP/test-failure*.t (glob)
699 --- $TESTTMP/test-failure*.t (glob)
700 +++ $TESTTMP/test-failure*.t.err (glob)
700 +++ $TESTTMP/test-failure*.t.err (glob)
701 @@ -1,5 +1,5 @@
701 @@ -1,5 +1,5 @@
702 $ echo babar
702 $ echo babar
703 - rataxes
703 - rataxes
704 + babar
704 + babar
705 This is a noop statement so that
705 This is a noop statement so that
706 this test is still more bytes than success.
706 this test is still more bytes than success.
707 pad pad pad pad............................................................
707 pad pad pad pad............................................................
708
708
709 Failed test-failure*.t: output changed (glob)
709 Failed test-failure*.t: output changed (glob)
710 Failed test-failure*.t: output changed (glob)
710 Failed test-failure*.t: output changed (glob)
711 # Ran 2 tests, 0 skipped, 2 failed.
711 # Ran 2 tests, 0 skipped, 2 failed.
712 python hash seed: * (glob)
712 python hash seed: * (glob)
713 [1]
713 [1]
714
714
715
715
716 (delete the duplicated test file)
716 (delete the duplicated test file)
717 $ rm test-failure-copy.t
717 $ rm test-failure-copy.t
718
718
719 multiple runs per test should be parallelized
719 multiple runs per test should be parallelized
720
720
721 $ rt --jobs 2 --runs-per-test 2 test-success.t
721 $ rt --jobs 2 --runs-per-test 2 test-success.t
722 running 2 tests using 2 parallel processes
722 running 2 tests using 2 parallel processes
723 ..
723 ..
724 # Ran 2 tests, 0 skipped, 0 failed.
724 # Ran 2 tests, 0 skipped, 0 failed.
725
725
726 Interactive run
726 Interactive run
727 ===============
727 ===============
728
728
729 (backup the failing test)
729 (backup the failing test)
730 $ cp test-failure.t backup
730 $ cp test-failure.t backup
731
731
732 Refuse the fix
732 Refuse the fix
733
733
734 $ echo 'n' | rt -i
734 $ echo 'n' | rt -i
735 running 2 tests using 1 parallel processes
735 running 2 tests using 1 parallel processes
736
736
737 --- $TESTTMP/test-failure.t
737 --- $TESTTMP/test-failure.t
738 +++ $TESTTMP/test-failure.t.err
738 +++ $TESTTMP/test-failure.t.err
739 @@ -1,5 +1,5 @@
739 @@ -1,5 +1,5 @@
740 $ echo babar
740 $ echo babar
741 - rataxes
741 - rataxes
742 + babar
742 + babar
743 This is a noop statement so that
743 This is a noop statement so that
744 this test is still more bytes than success.
744 this test is still more bytes than success.
745 pad pad pad pad............................................................
745 pad pad pad pad............................................................
746 Accept this change? [n]
746 Accept this change? [n]
747 ERROR: test-failure.t output changed
747 ERROR: test-failure.t output changed
748 !.
748 !.
749 Failed test-failure.t: output changed
749 Failed test-failure.t: output changed
750 # Ran 2 tests, 0 skipped, 1 failed.
750 # Ran 2 tests, 0 skipped, 1 failed.
751 python hash seed: * (glob)
751 python hash seed: * (glob)
752 [1]
752 [1]
753
753
754 $ cat test-failure.t
754 $ cat test-failure.t
755 $ echo babar
755 $ echo babar
756 rataxes
756 rataxes
757 This is a noop statement so that
757 This is a noop statement so that
758 this test is still more bytes than success.
758 this test is still more bytes than success.
759 pad pad pad pad............................................................
759 pad pad pad pad............................................................
760 pad pad pad pad............................................................
760 pad pad pad pad............................................................
761 pad pad pad pad............................................................
761 pad pad pad pad............................................................
762 pad pad pad pad............................................................
762 pad pad pad pad............................................................
763 pad pad pad pad............................................................
763 pad pad pad pad............................................................
764 pad pad pad pad............................................................
764 pad pad pad pad............................................................
765
765
766 Interactive with custom view
766 Interactive with custom view
767
767
768 $ echo 'n' | rt -i --view echo
768 $ echo 'n' | rt -i --view echo
769 running 2 tests using 1 parallel processes
769 running 2 tests using 1 parallel processes
770 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
770 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
771 Accept this change? [n]* (glob)
771 Accept this change? [n]* (glob)
772 ERROR: test-failure.t output changed
772 ERROR: test-failure.t output changed
773 !.
773 !.
774 Failed test-failure.t: output changed
774 Failed test-failure.t: output changed
775 # Ran 2 tests, 0 skipped, 1 failed.
775 # Ran 2 tests, 0 skipped, 1 failed.
776 python hash seed: * (glob)
776 python hash seed: * (glob)
777 [1]
777 [1]
778
778
779 View the fix
779 View the fix
780
780
781 $ echo 'y' | rt --view echo
781 $ echo 'y' | rt --view echo
782 running 2 tests using 1 parallel processes
782 running 2 tests using 1 parallel processes
783 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
783 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
784
784
785 ERROR: test-failure.t output changed
785 ERROR: test-failure.t output changed
786 !.
786 !.
787 Failed test-failure.t: output changed
787 Failed test-failure.t: output changed
788 # Ran 2 tests, 0 skipped, 1 failed.
788 # Ran 2 tests, 0 skipped, 1 failed.
789 python hash seed: * (glob)
789 python hash seed: * (glob)
790 [1]
790 [1]
791
791
792 Accept the fix
792 Accept the fix
793
793
794 $ cat >> test-failure.t <<EOF
794 $ cat >> test-failure.t <<EOF
795 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
795 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
796 > saved backup bundle to \$TESTTMP/foo.hg
796 > saved backup bundle to \$TESTTMP/foo.hg
797 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
797 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
798 > saved backup bundle to $TESTTMP\\foo.hg
798 > saved backup bundle to $TESTTMP\\foo.hg
799 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
799 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
800 > saved backup bundle to \$TESTTMP/*.hg (glob)
800 > saved backup bundle to \$TESTTMP/*.hg (glob)
801 > EOF
801 > EOF
802 $ echo 'y' | rt -i 2>&1
802 $ echo 'y' | rt -i 2>&1
803 running 2 tests using 1 parallel processes
803 running 2 tests using 1 parallel processes
804
804
805 --- $TESTTMP/test-failure.t
805 --- $TESTTMP/test-failure.t
806 +++ $TESTTMP/test-failure.t.err
806 +++ $TESTTMP/test-failure.t.err
807 @@ -1,5 +1,5 @@
807 @@ -1,5 +1,5 @@
808 $ echo babar
808 $ echo babar
809 - rataxes
809 - rataxes
810 + babar
810 + babar
811 This is a noop statement so that
811 This is a noop statement so that
812 this test is still more bytes than success.
812 this test is still more bytes than success.
813 pad pad pad pad............................................................
813 pad pad pad pad............................................................
814 @@ -11,6 +11,6 @@
814 @@ -11,6 +11,6 @@
815 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
815 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
816 saved backup bundle to $TESTTMP/foo.hg
816 saved backup bundle to $TESTTMP/foo.hg
817 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
817 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
818 - saved backup bundle to $TESTTMP\foo.hg
818 - saved backup bundle to $TESTTMP\foo.hg
819 + saved backup bundle to $TESTTMP/foo.hg
819 + saved backup bundle to $TESTTMP/foo.hg
820 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
820 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
821 saved backup bundle to $TESTTMP/*.hg (glob)
821 saved backup bundle to $TESTTMP/*.hg (glob)
822 Accept this change? [n] ..
822 Accept this change? [n] ..
823 # Ran 2 tests, 0 skipped, 0 failed.
823 # Ran 2 tests, 0 skipped, 0 failed.
824
824
825 $ sed -e 's,(glob)$,&<,g' test-failure.t
825 $ sed -e 's,(glob)$,&<,g' test-failure.t
826 $ echo babar
826 $ echo babar
827 babar
827 babar
828 This is a noop statement so that
828 This is a noop statement so that
829 this test is still more bytes than success.
829 this test is still more bytes than success.
830 pad pad pad pad............................................................
830 pad pad pad pad............................................................
831 pad pad pad pad............................................................
831 pad pad pad pad............................................................
832 pad pad pad pad............................................................
832 pad pad pad pad............................................................
833 pad pad pad pad............................................................
833 pad pad pad pad............................................................
834 pad pad pad pad............................................................
834 pad pad pad pad............................................................
835 pad pad pad pad............................................................
835 pad pad pad pad............................................................
836 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
836 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
837 saved backup bundle to $TESTTMP/foo.hg
837 saved backup bundle to $TESTTMP/foo.hg
838 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
838 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
839 saved backup bundle to $TESTTMP/foo.hg
839 saved backup bundle to $TESTTMP/foo.hg
840 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
840 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
841 saved backup bundle to $TESTTMP/*.hg (glob)<
841 saved backup bundle to $TESTTMP/*.hg (glob)<
842
842
843 Race condition - test file was modified when test is running
843 Race condition - test file was modified when test is running
844
844
845 $ TESTRACEDIR=`pwd`
845 $ TESTRACEDIR=`pwd`
846 $ export TESTRACEDIR
846 $ export TESTRACEDIR
847 $ cat > test-race.t <<EOF
847 $ cat > test-race.t <<EOF
848 > $ echo 1
848 > $ echo 1
849 > $ echo "# a new line" >> $TESTRACEDIR/test-race.t
849 > $ echo "# a new line" >> $TESTRACEDIR/test-race.t
850 > EOF
850 > EOF
851
851
852 $ rt -i test-race.t
852 $ rt -i test-race.t
853 running 1 tests using 1 parallel processes
853 running 1 tests using 1 parallel processes
854
854
855 --- $TESTTMP/test-race.t
855 --- $TESTTMP/test-race.t
856 +++ $TESTTMP/test-race.t.err
856 +++ $TESTTMP/test-race.t.err
857 @@ -1,2 +1,3 @@
857 @@ -1,2 +1,3 @@
858 $ echo 1
858 $ echo 1
859 + 1
859 + 1
860 $ echo "# a new line" >> $TESTTMP/test-race.t
860 $ echo "# a new line" >> $TESTTMP/test-race.t
861 Reference output has changed (run again to prompt changes)
861 Reference output has changed (run again to prompt changes)
862 ERROR: test-race.t output changed
862 ERROR: test-race.t output changed
863 !
863 !
864 Failed test-race.t: output changed
864 Failed test-race.t: output changed
865 # Ran 1 tests, 0 skipped, 1 failed.
865 # Ran 1 tests, 0 skipped, 1 failed.
866 python hash seed: * (glob)
866 python hash seed: * (glob)
867 [1]
867 [1]
868
868
869 $ rm test-race.t
869 $ rm test-race.t
870
870
871 When "#testcases" is used in .t files
871 When "#testcases" is used in .t files
872
872
873 $ cat >> test-cases.t <<EOF
873 $ cat >> test-cases.t <<EOF
874 > #testcases a b
874 > #testcases a b
875 > #if a
875 > #if a
876 > $ echo 1
876 > $ echo 1
877 > #endif
877 > #endif
878 > #if b
878 > #if b
879 > $ echo 2
879 > $ echo 2
880 > #endif
880 > #endif
881 > EOF
881 > EOF
882
882
883 $ cat <<EOF | rt -i test-cases.t 2>&1
883 $ cat <<EOF | rt -i test-cases.t 2>&1
884 > y
884 > y
885 > y
885 > y
886 > EOF
886 > EOF
887 running 2 tests using 1 parallel processes
887 running 2 tests using 1 parallel processes
888
888
889 --- $TESTTMP/test-cases.t
889 --- $TESTTMP/test-cases.t
890 +++ $TESTTMP/test-cases.t#a.err
890 +++ $TESTTMP/test-cases.t#a.err
891 @@ -1,6 +1,7 @@
891 @@ -1,6 +1,7 @@
892 #testcases a b
892 #testcases a b
893 #if a
893 #if a
894 $ echo 1
894 $ echo 1
895 + 1
895 + 1
896 #endif
896 #endif
897 #if b
897 #if b
898 $ echo 2
898 $ echo 2
899 Accept this change? [n] .
899 Accept this change? [n] .
900 --- $TESTTMP/test-cases.t
900 --- $TESTTMP/test-cases.t
901 +++ $TESTTMP/test-cases.t#b.err
901 +++ $TESTTMP/test-cases.t#b.err
902 @@ -5,4 +5,5 @@
902 @@ -5,4 +5,5 @@
903 #endif
903 #endif
904 #if b
904 #if b
905 $ echo 2
905 $ echo 2
906 + 2
906 + 2
907 #endif
907 #endif
908 Accept this change? [n] .
908 Accept this change? [n] .
909 # Ran 2 tests, 0 skipped, 0 failed.
909 # Ran 2 tests, 0 skipped, 0 failed.
910
910
911 $ cat test-cases.t
911 $ cat test-cases.t
912 #testcases a b
912 #testcases a b
913 #if a
913 #if a
914 $ echo 1
914 $ echo 1
915 1
915 1
916 #endif
916 #endif
917 #if b
917 #if b
918 $ echo 2
918 $ echo 2
919 2
919 2
920 #endif
920 #endif
921
921
922 $ cat >> test-cases.t <<'EOF'
922 $ cat >> test-cases.t <<'EOF'
923 > #if a
923 > #if a
924 > $ NAME=A
924 > $ NAME=A
925 > #else
925 > #else
926 > $ NAME=B
926 > $ NAME=B
927 > #endif
927 > #endif
928 > $ echo $NAME
928 > $ echo $NAME
929 > A (a !)
929 > A (a !)
930 > B (b !)
930 > B (b !)
931 > EOF
931 > EOF
932 $ rt test-cases.t
932 $ rt test-cases.t
933 running 2 tests using 1 parallel processes
933 running 2 tests using 1 parallel processes
934 ..
934 ..
935 # Ran 2 tests, 0 skipped, 0 failed.
935 # Ran 2 tests, 0 skipped, 0 failed.
936
936
937 When using multiple dimensions of "#testcases" in .t files
937 When using multiple dimensions of "#testcases" in .t files
938
938
939 $ cat > test-cases.t <<'EOF'
939 $ cat > test-cases.t <<'EOF'
940 > #testcases a b
940 > #testcases a b
941 > #testcases c d
941 > #testcases c d
942 > #if a d
942 > #if a d
943 > $ echo $TESTCASE
943 > $ echo $TESTCASE
944 > a#d
944 > a#d
945 > #endif
945 > #endif
946 > #if b c
946 > #if b c
947 > $ echo yes
947 > $ echo yes
948 > no
948 > no
949 > #endif
949 > #endif
950 > EOF
950 > EOF
951 $ rt test-cases.t
951 $ rt test-cases.t
952 running 4 tests using 1 parallel processes
952 running 4 tests using 1 parallel processes
953 ..
953 ..
954 --- $TESTTMP/test-cases.t
954 --- $TESTTMP/test-cases.t
955 +++ $TESTTMP/test-cases.t#b#c.err
955 +++ $TESTTMP/test-cases.t#b#c.err
956 @@ -6,5 +6,5 @@
956 @@ -6,5 +6,5 @@
957 #endif
957 #endif
958 #if b c
958 #if b c
959 $ echo yes
959 $ echo yes
960 - no
960 - no
961 + yes
961 + yes
962 #endif
962 #endif
963
963
964 ERROR: test-cases.t#b#c output changed
964 ERROR: test-cases.t#b#c output changed
965 !.
965 !.
966 Failed test-cases.t#b#c: output changed
966 Failed test-cases.t#b#c: output changed
967 # Ran 4 tests, 0 skipped, 1 failed.
967 # Ran 4 tests, 0 skipped, 1 failed.
968 python hash seed: * (glob)
968 python hash seed: * (glob)
969 [1]
969 [1]
970
970
971 $ rm test-cases.t#b#c.err
971 $ rm test-cases.t#b#c.err
972 $ rm test-cases.t
972 $ rm test-cases.t
973
973
974 (reinstall)
974 (reinstall)
975 $ mv backup test-failure.t
975 $ mv backup test-failure.t
976
976
977 No Diff
977 No Diff
978 ===============
978 ===============
979
979
980 $ rt --nodiff
980 $ rt --nodiff
981 running 2 tests using 1 parallel processes
981 running 2 tests using 1 parallel processes
982 !.
982 !.
983 Failed test-failure.t: output changed
983 Failed test-failure.t: output changed
984 # Ran 2 tests, 0 skipped, 1 failed.
984 # Ran 2 tests, 0 skipped, 1 failed.
985 python hash seed: * (glob)
985 python hash seed: * (glob)
986 [1]
986 [1]
987
987
988 test --tmpdir support
988 test --tmpdir support
989 $ rt --tmpdir=$TESTTMP/keep test-success.t
989 $ rt --tmpdir=$TESTTMP/keep test-success.t
990 running 1 tests using 1 parallel processes
990 running 1 tests using 1 parallel processes
991
991
992 Keeping testtmp dir: $TESTTMP/keep/child1/test-success.t
992 Keeping testtmp dir: $TESTTMP/keep/child1/test-success.t
993 Keeping threadtmp dir: $TESTTMP/keep/child1
993 Keeping threadtmp dir: $TESTTMP/keep/child1
994 .
994 .
995 # Ran 1 tests, 0 skipped, 0 failed.
995 # Ran 1 tests, 0 skipped, 0 failed.
996
996
997 timeouts
997 timeouts
998 ========
998 ========
999 $ cat > test-timeout.t <<EOF
999 $ cat > test-timeout.t <<EOF
1000 > $ sleep 2
1000 > $ sleep 2
1001 > $ echo pass
1001 > $ echo pass
1002 > pass
1002 > pass
1003 > EOF
1003 > EOF
1004 > echo '#require slow' > test-slow-timeout.t
1004 > echo '#require slow' > test-slow-timeout.t
1005 > cat test-timeout.t >> test-slow-timeout.t
1005 > cat test-timeout.t >> test-slow-timeout.t
1006 $ rt --timeout=1 --slowtimeout=3 test-timeout.t test-slow-timeout.t
1006 $ rt --timeout=1 --slowtimeout=3 test-timeout.t test-slow-timeout.t
1007 running 2 tests using 1 parallel processes
1007 running 2 tests using 1 parallel processes
1008 st
1008 st
1009 Skipped test-slow-timeout.t: missing feature: allow slow tests (use --allow-slow-tests)
1009 Skipped test-slow-timeout.t: missing feature: allow slow tests (use --allow-slow-tests)
1010 Failed test-timeout.t: timed out
1010 Failed test-timeout.t: timed out
1011 # Ran 1 tests, 1 skipped, 1 failed.
1011 # Ran 1 tests, 1 skipped, 1 failed.
1012 python hash seed: * (glob)
1012 python hash seed: * (glob)
1013 [1]
1013 [1]
1014 $ rt --timeout=1 --slowtimeout=3 \
1014 $ rt --timeout=1 --slowtimeout=3 \
1015 > test-timeout.t test-slow-timeout.t --allow-slow-tests
1015 > test-timeout.t test-slow-timeout.t --allow-slow-tests
1016 running 2 tests using 1 parallel processes
1016 running 2 tests using 1 parallel processes
1017 .t
1017 .t
1018 Failed test-timeout.t: timed out
1018 Failed test-timeout.t: timed out
1019 # Ran 2 tests, 0 skipped, 1 failed.
1019 # Ran 2 tests, 0 skipped, 1 failed.
1020 python hash seed: * (glob)
1020 python hash seed: * (glob)
1021 [1]
1021 [1]
1022 $ rm test-timeout.t test-slow-timeout.t
1022 $ rm test-timeout.t test-slow-timeout.t
1023
1023
1024 test for --time
1024 test for --time
1025 ==================
1025 ==================
1026
1026
1027 $ rt test-success.t --time
1027 $ rt test-success.t --time
1028 running 1 tests using 1 parallel processes
1028 running 1 tests using 1 parallel processes
1029 .
1029 .
1030 # Ran 1 tests, 0 skipped, 0 failed.
1030 # Ran 1 tests, 0 skipped, 0 failed.
1031 # Producing time report
1031 # Producing time report
1032 start end cuser csys real Test
1032 start end cuser csys real Test
1033 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
1033 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
1034
1034
1035 test for --time with --job enabled
1035 test for --time with --job enabled
1036 ====================================
1036 ====================================
1037
1037
1038 $ rt test-success.t --time --jobs 2
1038 $ rt test-success.t --time --jobs 2
1039 running 1 tests using 1 parallel processes
1039 running 1 tests using 1 parallel processes
1040 .
1040 .
1041 # Ran 1 tests, 0 skipped, 0 failed.
1041 # Ran 1 tests, 0 skipped, 0 failed.
1042 # Producing time report
1042 # Producing time report
1043 start end cuser csys real Test
1043 start end cuser csys real Test
1044 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
1044 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
1045
1045
1046 Skips
1046 Skips
1047 ================
1047 ================
1048 $ cat > test-skip.t <<EOF
1048 $ cat > test-skip.t <<EOF
1049 > $ echo xyzzy
1049 > $ echo xyzzy
1050 > #if true
1050 > #if true
1051 > #require false
1051 > #require false
1052 > #end
1052 > #end
1053 > EOF
1053 > EOF
1054 $ cat > test-noskip.t <<EOF
1054 $ cat > test-noskip.t <<EOF
1055 > #if false
1055 > #if false
1056 > #require false
1056 > #require false
1057 > #endif
1057 > #endif
1058 > EOF
1058 > EOF
1059 $ rt --nodiff
1059 $ rt --nodiff
1060 running 4 tests using 1 parallel processes
1060 running 4 tests using 1 parallel processes
1061 !.s.
1061 !.s.
1062 Skipped test-skip.t: missing feature: nail clipper
1062 Skipped test-skip.t: missing feature: nail clipper
1063 Failed test-failure.t: output changed
1063 Failed test-failure.t: output changed
1064 # Ran 3 tests, 1 skipped, 1 failed.
1064 # Ran 3 tests, 1 skipped, 1 failed.
1065 python hash seed: * (glob)
1065 python hash seed: * (glob)
1066 [1]
1066 [1]
1067
1067
1068 $ rm test-noskip.t
1068 $ rm test-noskip.t
1069 $ rt --keyword xyzzy
1069 $ rt --keyword xyzzy
1070 running 3 tests using 1 parallel processes
1070 running 3 tests using 1 parallel processes
1071 .s
1071 .s
1072 Skipped test-skip.t: missing feature: nail clipper
1072 Skipped test-skip.t: missing feature: nail clipper
1073 # Ran 2 tests, 2 skipped, 0 failed.
1073 # Ran 2 tests, 2 skipped, 0 failed.
1074
1074
1075 Skips with xml
1075 Skips with xml
1076 $ rt --keyword xyzzy \
1076 $ rt --keyword xyzzy \
1077 > --xunit=xunit.xml
1077 > --xunit=xunit.xml
1078 running 3 tests using 1 parallel processes
1078 running 3 tests using 1 parallel processes
1079 .s
1079 .s
1080 Skipped test-skip.t: missing feature: nail clipper
1080 Skipped test-skip.t: missing feature: nail clipper
1081 # Ran 2 tests, 2 skipped, 0 failed.
1081 # Ran 2 tests, 2 skipped, 0 failed.
1082 $ cat xunit.xml
1082 $ cat xunit.xml
1083 <?xml version="1.0" encoding="utf-8"?>
1083 <?xml version="1.0" encoding="utf-8"?>
1084 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
1084 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
1085 <testcase name="test-success.t" time="*"/> (glob)
1085 <testcase name="test-success.t" time="*"/> (glob)
1086 <testcase name="test-skip.t">
1086 <testcase name="test-skip.t">
1087 <skipped>
1087 <skipped>
1088 <![CDATA[missing feature: nail clipper]]> </skipped>
1088 <![CDATA[missing feature: nail clipper]]> </skipped>
1089 </testcase>
1089 </testcase>
1090 </testsuite>
1090 </testsuite>
1091
1091
1092 Missing skips or blacklisted skips don't count as executed:
1092 Missing skips or blacklisted skips don't count as executed:
1093 $ echo test-failure.t > blacklist
1093 $ echo test-failure.t > blacklist
1094 $ rt --blacklist=blacklist --json\
1094 $ rt --blacklist=blacklist --json\
1095 > test-failure.t test-bogus.t
1095 > test-failure.t test-bogus.t
1096 running 2 tests using 1 parallel processes
1096 running 2 tests using 1 parallel processes
1097 ss
1097 ss
1098 Skipped test-bogus.t: Doesn't exist
1098 Skipped test-bogus.t: Doesn't exist
1099 Skipped test-failure.t: blacklisted
1099 Skipped test-failure.t: blacklisted
1100 # Ran 0 tests, 2 skipped, 0 failed.
1100 # Ran 0 tests, 2 skipped, 0 failed.
1101 $ cat report.json
1101 $ cat report.json
1102 testreport ={
1102 testreport ={
1103 "test-bogus.t": {
1103 "test-bogus.t": {
1104 "result": "skip"
1104 "result": "skip"
1105 },
1105 },
1106 "test-failure.t": {
1106 "test-failure.t": {
1107 "result": "skip"
1107 "result": "skip"
1108 }
1108 }
1109 } (no-eol)
1109 } (no-eol)
1110
1110
1111 Whitelist trumps blacklist
1111 Whitelist trumps blacklist
1112 $ echo test-failure.t > whitelist
1112 $ echo test-failure.t > whitelist
1113 $ rt --blacklist=blacklist --whitelist=whitelist --json\
1113 $ rt --blacklist=blacklist --whitelist=whitelist --json\
1114 > test-failure.t test-bogus.t
1114 > test-failure.t test-bogus.t
1115 running 2 tests using 1 parallel processes
1115 running 2 tests using 1 parallel processes
1116 s
1116 s
1117 --- $TESTTMP/test-failure.t
1117 --- $TESTTMP/test-failure.t
1118 +++ $TESTTMP/test-failure.t.err
1118 +++ $TESTTMP/test-failure.t.err
1119 @@ -1,5 +1,5 @@
1119 @@ -1,5 +1,5 @@
1120 $ echo babar
1120 $ echo babar
1121 - rataxes
1121 - rataxes
1122 + babar
1122 + babar
1123 This is a noop statement so that
1123 This is a noop statement so that
1124 this test is still more bytes than success.
1124 this test is still more bytes than success.
1125 pad pad pad pad............................................................
1125 pad pad pad pad............................................................
1126
1126
1127 ERROR: test-failure.t output changed
1127 ERROR: test-failure.t output changed
1128 !
1128 !
1129 Skipped test-bogus.t: Doesn't exist
1129 Skipped test-bogus.t: Doesn't exist
1130 Failed test-failure.t: output changed
1130 Failed test-failure.t: output changed
1131 # Ran 1 tests, 1 skipped, 1 failed.
1131 # Ran 1 tests, 1 skipped, 1 failed.
1132 python hash seed: * (glob)
1132 python hash seed: * (glob)
1133 [1]
1133 [1]
1134
1134
1135 Ensure that --test-list causes only the tests listed in that file to
1135 Ensure that --test-list causes only the tests listed in that file to
1136 be executed.
1136 be executed.
1137 $ echo test-success.t >> onlytest
1137 $ echo test-success.t >> onlytest
1138 $ rt --test-list=onlytest
1138 $ rt --test-list=onlytest
1139 running 1 tests using 1 parallel processes
1139 running 1 tests using 1 parallel processes
1140 .
1140 .
1141 # Ran 1 tests, 0 skipped, 0 failed.
1141 # Ran 1 tests, 0 skipped, 0 failed.
1142 $ echo test-bogus.t >> anothertest
1142 $ echo test-bogus.t >> anothertest
1143 $ rt --test-list=onlytest --test-list=anothertest
1143 $ rt --test-list=onlytest --test-list=anothertest
1144 running 2 tests using 1 parallel processes
1144 running 2 tests using 1 parallel processes
1145 s.
1145 s.
1146 Skipped test-bogus.t: Doesn't exist
1146 Skipped test-bogus.t: Doesn't exist
1147 # Ran 1 tests, 1 skipped, 0 failed.
1147 # Ran 1 tests, 1 skipped, 0 failed.
1148 $ rm onlytest anothertest
1148 $ rm onlytest anothertest
1149
1149
1150 test for --json
1150 test for --json
1151 ==================
1151 ==================
1152
1152
1153 $ rt --json
1153 $ rt --json
1154 running 3 tests using 1 parallel processes
1154 running 3 tests using 1 parallel processes
1155
1155
1156 --- $TESTTMP/test-failure.t
1156 --- $TESTTMP/test-failure.t
1157 +++ $TESTTMP/test-failure.t.err
1157 +++ $TESTTMP/test-failure.t.err
1158 @@ -1,5 +1,5 @@
1158 @@ -1,5 +1,5 @@
1159 $ echo babar
1159 $ echo babar
1160 - rataxes
1160 - rataxes
1161 + babar
1161 + babar
1162 This is a noop statement so that
1162 This is a noop statement so that
1163 this test is still more bytes than success.
1163 this test is still more bytes than success.
1164 pad pad pad pad............................................................
1164 pad pad pad pad............................................................
1165
1165
1166 ERROR: test-failure.t output changed
1166 ERROR: test-failure.t output changed
1167 !.s
1167 !.s
1168 Skipped test-skip.t: missing feature: nail clipper
1168 Skipped test-skip.t: missing feature: nail clipper
1169 Failed test-failure.t: output changed
1169 Failed test-failure.t: output changed
1170 # Ran 2 tests, 1 skipped, 1 failed.
1170 # Ran 2 tests, 1 skipped, 1 failed.
1171 python hash seed: * (glob)
1171 python hash seed: * (glob)
1172 [1]
1172 [1]
1173
1173
1174 $ cat report.json
1174 $ cat report.json
1175 testreport ={
1175 testreport ={
1176 "test-failure.t": [\{] (re)
1176 "test-failure.t": [\{] (re)
1177 "csys": "\s*[\d\.]{4,5}", ? (re)
1177 "csys": "\s*[\d\.]{4,5}", ? (re)
1178 "cuser": "\s*[\d\.]{4,5}", ? (re)
1178 "cuser": "\s*[\d\.]{4,5}", ? (re)
1179 "diff": "---.+\+\+\+.+", ? (re)
1179 "diff": "---.+\+\+\+.+", ? (re)
1180 "end": "\s*[\d\.]{4,5}", ? (re)
1180 "end": "\s*[\d\.]{4,5}", ? (re)
1181 "result": "failure", ? (re)
1181 "result": "failure", ? (re)
1182 "start": "\s*[\d\.]{4,5}", ? (re)
1182 "start": "\s*[\d\.]{4,5}", ? (re)
1183 "time": "\s*[\d\.]{4,5}" (re)
1183 "time": "\s*[\d\.]{4,5}" (re)
1184 }, ? (re)
1184 }, ? (re)
1185 "test-skip.t": {
1185 "test-skip.t": {
1186 "csys": "\s*[\d\.]{4,5}", ? (re)
1186 "csys": "\s*[\d\.]{4,5}", ? (re)
1187 "cuser": "\s*[\d\.]{4,5}", ? (re)
1187 "cuser": "\s*[\d\.]{4,5}", ? (re)
1188 "diff": "", ? (re)
1188 "diff": "", ? (re)
1189 "end": "\s*[\d\.]{4,5}", ? (re)
1189 "end": "\s*[\d\.]{4,5}", ? (re)
1190 "result": "skip", ? (re)
1190 "result": "skip", ? (re)
1191 "start": "\s*[\d\.]{4,5}", ? (re)
1191 "start": "\s*[\d\.]{4,5}", ? (re)
1192 "time": "\s*[\d\.]{4,5}" (re)
1192 "time": "\s*[\d\.]{4,5}" (re)
1193 }, ? (re)
1193 }, ? (re)
1194 "test-success.t": [\{] (re)
1194 "test-success.t": [\{] (re)
1195 "csys": "\s*[\d\.]{4,5}", ? (re)
1195 "csys": "\s*[\d\.]{4,5}", ? (re)
1196 "cuser": "\s*[\d\.]{4,5}", ? (re)
1196 "cuser": "\s*[\d\.]{4,5}", ? (re)
1197 "diff": "", ? (re)
1197 "diff": "", ? (re)
1198 "end": "\s*[\d\.]{4,5}", ? (re)
1198 "end": "\s*[\d\.]{4,5}", ? (re)
1199 "result": "success", ? (re)
1199 "result": "success", ? (re)
1200 "start": "\s*[\d\.]{4,5}", ? (re)
1200 "start": "\s*[\d\.]{4,5}", ? (re)
1201 "time": "\s*[\d\.]{4,5}" (re)
1201 "time": "\s*[\d\.]{4,5}" (re)
1202 }
1202 }
1203 } (no-eol)
1203 } (no-eol)
1204 --json with --outputdir
1204 --json with --outputdir
1205
1205
1206 $ rm report.json
1206 $ rm report.json
1207 $ rm -r output
1207 $ rm -r output
1208 $ mkdir output
1208 $ mkdir output
1209 $ rt --json --outputdir output
1209 $ rt --json --outputdir output
1210 running 3 tests using 1 parallel processes
1210 running 3 tests using 1 parallel processes
1211
1211
1212 --- $TESTTMP/test-failure.t
1212 --- $TESTTMP/test-failure.t
1213 +++ $TESTTMP/output/test-failure.t.err
1213 +++ $TESTTMP/output/test-failure.t.err
1214 @@ -1,5 +1,5 @@
1214 @@ -1,5 +1,5 @@
1215 $ echo babar
1215 $ echo babar
1216 - rataxes
1216 - rataxes
1217 + babar
1217 + babar
1218 This is a noop statement so that
1218 This is a noop statement so that
1219 this test is still more bytes than success.
1219 this test is still more bytes than success.
1220 pad pad pad pad............................................................
1220 pad pad pad pad............................................................
1221
1221
1222 ERROR: test-failure.t output changed
1222 ERROR: test-failure.t output changed
1223 !.s
1223 !.s
1224 Skipped test-skip.t: missing feature: nail clipper
1224 Skipped test-skip.t: missing feature: nail clipper
1225 Failed test-failure.t: output changed
1225 Failed test-failure.t: output changed
1226 # Ran 2 tests, 1 skipped, 1 failed.
1226 # Ran 2 tests, 1 skipped, 1 failed.
1227 python hash seed: * (glob)
1227 python hash seed: * (glob)
1228 [1]
1228 [1]
1229 $ f report.json
1229 $ f report.json
1230 report.json: file not found
1230 report.json: file not found
1231 $ cat output/report.json
1231 $ cat output/report.json
1232 testreport ={
1232 testreport ={
1233 "test-failure.t": [\{] (re)
1233 "test-failure.t": [\{] (re)
1234 "csys": "\s*[\d\.]{4,5}", ? (re)
1234 "csys": "\s*[\d\.]{4,5}", ? (re)
1235 "cuser": "\s*[\d\.]{4,5}", ? (re)
1235 "cuser": "\s*[\d\.]{4,5}", ? (re)
1236 "diff": "---.+\+\+\+.+", ? (re)
1236 "diff": "---.+\+\+\+.+", ? (re)
1237 "end": "\s*[\d\.]{4,5}", ? (re)
1237 "end": "\s*[\d\.]{4,5}", ? (re)
1238 "result": "failure", ? (re)
1238 "result": "failure", ? (re)
1239 "start": "\s*[\d\.]{4,5}", ? (re)
1239 "start": "\s*[\d\.]{4,5}", ? (re)
1240 "time": "\s*[\d\.]{4,5}" (re)
1240 "time": "\s*[\d\.]{4,5}" (re)
1241 }, ? (re)
1241 }, ? (re)
1242 "test-skip.t": {
1242 "test-skip.t": {
1243 "csys": "\s*[\d\.]{4,5}", ? (re)
1243 "csys": "\s*[\d\.]{4,5}", ? (re)
1244 "cuser": "\s*[\d\.]{4,5}", ? (re)
1244 "cuser": "\s*[\d\.]{4,5}", ? (re)
1245 "diff": "", ? (re)
1245 "diff": "", ? (re)
1246 "end": "\s*[\d\.]{4,5}", ? (re)
1246 "end": "\s*[\d\.]{4,5}", ? (re)
1247 "result": "skip", ? (re)
1247 "result": "skip", ? (re)
1248 "start": "\s*[\d\.]{4,5}", ? (re)
1248 "start": "\s*[\d\.]{4,5}", ? (re)
1249 "time": "\s*[\d\.]{4,5}" (re)
1249 "time": "\s*[\d\.]{4,5}" (re)
1250 }, ? (re)
1250 }, ? (re)
1251 "test-success.t": [\{] (re)
1251 "test-success.t": [\{] (re)
1252 "csys": "\s*[\d\.]{4,5}", ? (re)
1252 "csys": "\s*[\d\.]{4,5}", ? (re)
1253 "cuser": "\s*[\d\.]{4,5}", ? (re)
1253 "cuser": "\s*[\d\.]{4,5}", ? (re)
1254 "diff": "", ? (re)
1254 "diff": "", ? (re)
1255 "end": "\s*[\d\.]{4,5}", ? (re)
1255 "end": "\s*[\d\.]{4,5}", ? (re)
1256 "result": "success", ? (re)
1256 "result": "success", ? (re)
1257 "start": "\s*[\d\.]{4,5}", ? (re)
1257 "start": "\s*[\d\.]{4,5}", ? (re)
1258 "time": "\s*[\d\.]{4,5}" (re)
1258 "time": "\s*[\d\.]{4,5}" (re)
1259 }
1259 }
1260 } (no-eol)
1260 } (no-eol)
1261 $ ls -a output
1261 $ ls -a output
1262 .
1262 .
1263 ..
1263 ..
1264 .testtimes
1264 .testtimes
1265 report.json
1265 report.json
1266 test-failure.t.err
1266 test-failure.t.err
1267
1267
1268 Test that failed test accepted through interactive are properly reported:
1268 Test that failed test accepted through interactive are properly reported:
1269
1269
1270 $ cp test-failure.t backup
1270 $ cp test-failure.t backup
1271 $ echo y | rt --json -i
1271 $ echo y | rt --json -i
1272 running 3 tests using 1 parallel processes
1272 running 3 tests using 1 parallel processes
1273
1273
1274 --- $TESTTMP/test-failure.t
1274 --- $TESTTMP/test-failure.t
1275 +++ $TESTTMP/test-failure.t.err
1275 +++ $TESTTMP/test-failure.t.err
1276 @@ -1,5 +1,5 @@
1276 @@ -1,5 +1,5 @@
1277 $ echo babar
1277 $ echo babar
1278 - rataxes
1278 - rataxes
1279 + babar
1279 + babar
1280 This is a noop statement so that
1280 This is a noop statement so that
1281 this test is still more bytes than success.
1281 this test is still more bytes than success.
1282 pad pad pad pad............................................................
1282 pad pad pad pad............................................................
1283 Accept this change? [n] ..s
1283 Accept this change? [n] ..s
1284 Skipped test-skip.t: missing feature: nail clipper
1284 Skipped test-skip.t: missing feature: nail clipper
1285 # Ran 2 tests, 1 skipped, 0 failed.
1285 # Ran 2 tests, 1 skipped, 0 failed.
1286
1286
1287 $ cat report.json
1287 $ cat report.json
1288 testreport ={
1288 testreport ={
1289 "test-failure.t": [\{] (re)
1289 "test-failure.t": [\{] (re)
1290 "csys": "\s*[\d\.]{4,5}", ? (re)
1290 "csys": "\s*[\d\.]{4,5}", ? (re)
1291 "cuser": "\s*[\d\.]{4,5}", ? (re)
1291 "cuser": "\s*[\d\.]{4,5}", ? (re)
1292 "diff": "", ? (re)
1292 "diff": "", ? (re)
1293 "end": "\s*[\d\.]{4,5}", ? (re)
1293 "end": "\s*[\d\.]{4,5}", ? (re)
1294 "result": "success", ? (re)
1294 "result": "success", ? (re)
1295 "start": "\s*[\d\.]{4,5}", ? (re)
1295 "start": "\s*[\d\.]{4,5}", ? (re)
1296 "time": "\s*[\d\.]{4,5}" (re)
1296 "time": "\s*[\d\.]{4,5}" (re)
1297 }, ? (re)
1297 }, ? (re)
1298 "test-skip.t": {
1298 "test-skip.t": {
1299 "csys": "\s*[\d\.]{4,5}", ? (re)
1299 "csys": "\s*[\d\.]{4,5}", ? (re)
1300 "cuser": "\s*[\d\.]{4,5}", ? (re)
1300 "cuser": "\s*[\d\.]{4,5}", ? (re)
1301 "diff": "", ? (re)
1301 "diff": "", ? (re)
1302 "end": "\s*[\d\.]{4,5}", ? (re)
1302 "end": "\s*[\d\.]{4,5}", ? (re)
1303 "result": "skip", ? (re)
1303 "result": "skip", ? (re)
1304 "start": "\s*[\d\.]{4,5}", ? (re)
1304 "start": "\s*[\d\.]{4,5}", ? (re)
1305 "time": "\s*[\d\.]{4,5}" (re)
1305 "time": "\s*[\d\.]{4,5}" (re)
1306 }, ? (re)
1306 }, ? (re)
1307 "test-success.t": [\{] (re)
1307 "test-success.t": [\{] (re)
1308 "csys": "\s*[\d\.]{4,5}", ? (re)
1308 "csys": "\s*[\d\.]{4,5}", ? (re)
1309 "cuser": "\s*[\d\.]{4,5}", ? (re)
1309 "cuser": "\s*[\d\.]{4,5}", ? (re)
1310 "diff": "", ? (re)
1310 "diff": "", ? (re)
1311 "end": "\s*[\d\.]{4,5}", ? (re)
1311 "end": "\s*[\d\.]{4,5}", ? (re)
1312 "result": "success", ? (re)
1312 "result": "success", ? (re)
1313 "start": "\s*[\d\.]{4,5}", ? (re)
1313 "start": "\s*[\d\.]{4,5}", ? (re)
1314 "time": "\s*[\d\.]{4,5}" (re)
1314 "time": "\s*[\d\.]{4,5}" (re)
1315 }
1315 }
1316 } (no-eol)
1316 } (no-eol)
1317 $ mv backup test-failure.t
1317 $ mv backup test-failure.t
1318
1318
1319 backslash on end of line with glob matching is handled properly
1319 backslash on end of line with glob matching is handled properly
1320
1320
1321 $ cat > test-glob-backslash.t << EOF
1321 $ cat > test-glob-backslash.t << EOF
1322 > $ echo 'foo bar \\'
1322 > $ echo 'foo bar \\'
1323 > foo * \ (glob)
1323 > foo * \ (glob)
1324 > EOF
1324 > EOF
1325
1325
1326 $ rt test-glob-backslash.t
1326 $ rt test-glob-backslash.t
1327 running 1 tests using 1 parallel processes
1327 running 1 tests using 1 parallel processes
1328 .
1328 .
1329 # Ran 1 tests, 0 skipped, 0 failed.
1329 # Ran 1 tests, 0 skipped, 0 failed.
1330
1330
1331 $ rm -f test-glob-backslash.t
1331 $ rm -f test-glob-backslash.t
1332
1332
1333 Test globbing of local IP addresses
1333 Test globbing of local IP addresses
1334 $ echo 172.16.18.1
1334 $ echo 172.16.18.1
1335 $LOCALIP (glob)
1335 $LOCALIP (glob)
1336 $ echo dead:beef::1
1336 $ echo dead:beef::1
1337 $LOCALIP (glob)
1337 $LOCALIP (glob)
1338
1338
1339 Add support for external test formatter
1339 Add support for external test formatter
1340 =======================================
1340 =======================================
1341
1341
1342 $ CUSTOM_TEST_RESULT=basic_test_result "$PYTHON" $TESTDIR/run-tests.py --with-hg=`which hg` -j1 "$@" test-success.t test-failure.t
1342 $ CUSTOM_TEST_RESULT=basic_test_result "$PYTHON" $TESTDIR/run-tests.py --with-hg=`which hg` -j1 "$@" test-success.t test-failure.t
1343 running 2 tests using 1 parallel processes
1343 running 2 tests using 1 parallel processes
1344
1344
1345 # Ran 2 tests, 0 skipped, 0 failed.
1345 # Ran 2 tests, 0 skipped, 0 failed.
1346 ON_START! <__main__.TestSuite tests=[<__main__.TTest testMethod=test-failure.t>, <__main__.TTest testMethod=test-success.t>]>
1346 ON_START! <__main__.TestSuite tests=[<__main__.TTest testMethod=test-failure.t>, <__main__.TTest testMethod=test-success.t>]>
1347 FAILURE! test-failure.t output changed
1347 FAILURE! test-failure.t output changed
1348 SUCCESS! test-success.t
1348 SUCCESS! test-success.t
1349 ON_END!
1349 ON_END!
1350
1350
1351 Test reusability for third party tools
1351 Test reusability for third party tools
1352 ======================================
1352 ======================================
1353
1353
1354 $ mkdir "$TESTTMP"/anothertests
1354 $ mkdir "$TESTTMP"/anothertests
1355 $ cd "$TESTTMP"/anothertests
1355 $ cd "$TESTTMP"/anothertests
1356
1356
1357 test that `run-tests.py` can execute hghave, even if it runs not in
1357 test that `run-tests.py` can execute hghave, even if it runs not in
1358 Mercurial source tree.
1358 Mercurial source tree.
1359
1359
1360 $ cat > test-hghave.t <<EOF
1360 $ cat > test-hghave.t <<EOF
1361 > #require true
1361 > #require true
1362 > $ echo foo
1362 > $ echo foo
1363 > foo
1363 > foo
1364 > EOF
1364 > EOF
1365 $ rt test-hghave.t
1365 $ rt test-hghave.t
1366 running 1 tests using 1 parallel processes
1366 running 1 tests using 1 parallel processes
1367 .
1367 .
1368 # Ran 1 tests, 0 skipped, 0 failed.
1368 # Ran 1 tests, 0 skipped, 0 failed.
1369
1369
1370 test that RUNTESTDIR refers the directory, in which `run-tests.py` now
1370 test that RUNTESTDIR refers the directory, in which `run-tests.py` now
1371 running is placed.
1371 running is placed.
1372
1372
1373 $ cat > test-runtestdir.t <<EOF
1373 $ cat > test-runtestdir.t <<EOF
1374 > - $TESTDIR, in which test-run-tests.t is placed
1374 > - $TESTDIR, in which test-run-tests.t is placed
1375 > - \$TESTDIR, in which test-runtestdir.t is placed (expanded at runtime)
1375 > - \$TESTDIR, in which test-runtestdir.t is placed (expanded at runtime)
1376 > - \$RUNTESTDIR, in which run-tests.py is placed (expanded at runtime)
1376 > - \$RUNTESTDIR, in which run-tests.py is placed (expanded at runtime)
1377 >
1377 >
1378 > #if windows
1378 > #if windows
1379 > $ test "\$TESTDIR" = "$TESTTMP\anothertests"
1379 > $ test "\$TESTDIR" = "$TESTTMP\anothertests"
1380 > #else
1380 > #else
1381 > $ test "\$TESTDIR" = "$TESTTMP"/anothertests
1381 > $ test "\$TESTDIR" = "$TESTTMP"/anothertests
1382 > #endif
1382 > #endif
1383 > If this prints a path, that means RUNTESTDIR didn't equal
1383 > If this prints a path, that means RUNTESTDIR didn't equal
1384 > TESTDIR as it should have.
1384 > TESTDIR as it should have.
1385 > $ test "\$RUNTESTDIR" = "$TESTDIR" || echo "\$RUNTESTDIR"
1385 > $ test "\$RUNTESTDIR" = "$TESTDIR" || echo "\$RUNTESTDIR"
1386 > This should print the start of check-code. If this passes but the
1386 > This should print the start of check-code. If this passes but the
1387 > previous check failed, that means we found a copy of check-code at whatever
1387 > previous check failed, that means we found a copy of check-code at whatever
1388 > RUNTESTSDIR ended up containing, even though it doesn't match TESTDIR.
1388 > RUNTESTSDIR ended up containing, even though it doesn't match TESTDIR.
1389 > $ head -n 3 "\$RUNTESTDIR"/../contrib/check-code.py | sed 's@.!.*python@#!USRBINENVPY@'
1389 > $ head -n 3 "\$RUNTESTDIR"/../contrib/check-code.py | sed 's@.!.*python@#!USRBINENVPY@'
1390 > #!USRBINENVPY
1390 > #!USRBINENVPY
1391 > #
1391 > #
1392 > # check-code - a style and portability checker for Mercurial
1392 > # check-code - a style and portability checker for Mercurial
1393 > EOF
1393 > EOF
1394 $ rt test-runtestdir.t
1394 $ rt test-runtestdir.t
1395 running 1 tests using 1 parallel processes
1395 running 1 tests using 1 parallel processes
1396 .
1396 .
1397 # Ran 1 tests, 0 skipped, 0 failed.
1397 # Ran 1 tests, 0 skipped, 0 failed.
1398
1398
1399 #if execbit
1399 #if execbit
1400
1400
1401 test that TESTDIR is referred in PATH
1401 test that TESTDIR is referred in PATH
1402
1402
1403 $ cat > custom-command.sh <<EOF
1403 $ cat > custom-command.sh <<EOF
1404 > #!/bin/sh
1404 > #!/bin/sh
1405 > echo "hello world"
1405 > echo "hello world"
1406 > EOF
1406 > EOF
1407 $ chmod +x custom-command.sh
1407 $ chmod +x custom-command.sh
1408 $ cat > test-testdir-path.t <<EOF
1408 $ cat > test-testdir-path.t <<EOF
1409 > $ custom-command.sh
1409 > $ custom-command.sh
1410 > hello world
1410 > hello world
1411 > EOF
1411 > EOF
1412 $ rt test-testdir-path.t
1412 $ rt test-testdir-path.t
1413 running 1 tests using 1 parallel processes
1413 running 1 tests using 1 parallel processes
1414 .
1414 .
1415 # Ran 1 tests, 0 skipped, 0 failed.
1415 # Ran 1 tests, 0 skipped, 0 failed.
1416
1416
1417 #endif
1417 #endif
1418
1418
1419 test support for --allow-slow-tests
1419 test support for --allow-slow-tests
1420 $ cat > test-very-slow-test.t <<EOF
1420 $ cat > test-very-slow-test.t <<EOF
1421 > #require slow
1421 > #require slow
1422 > $ echo pass
1422 > $ echo pass
1423 > pass
1423 > pass
1424 > EOF
1424 > EOF
1425 $ rt test-very-slow-test.t
1425 $ rt test-very-slow-test.t
1426 running 1 tests using 1 parallel processes
1426 running 1 tests using 1 parallel processes
1427 s
1427 s
1428 Skipped test-very-slow-test.t: missing feature: allow slow tests (use --allow-slow-tests)
1428 Skipped test-very-slow-test.t: missing feature: allow slow tests (use --allow-slow-tests)
1429 # Ran 0 tests, 1 skipped, 0 failed.
1429 # Ran 0 tests, 1 skipped, 0 failed.
1430 $ rt $HGTEST_RUN_TESTS_PURE --allow-slow-tests test-very-slow-test.t
1430 $ rt $HGTEST_RUN_TESTS_PURE --allow-slow-tests test-very-slow-test.t
1431 running 1 tests using 1 parallel processes
1431 running 1 tests using 1 parallel processes
1432 .
1432 .
1433 # Ran 1 tests, 0 skipped, 0 failed.
1433 # Ran 1 tests, 0 skipped, 0 failed.
1434
1434
1435 support for running a test outside the current directory
1435 support for running a test outside the current directory
1436 $ mkdir nonlocal
1436 $ mkdir nonlocal
1437 $ cat > nonlocal/test-is-not-here.t << EOF
1437 $ cat > nonlocal/test-is-not-here.t << EOF
1438 > $ echo pass
1438 > $ echo pass
1439 > pass
1439 > pass
1440 > EOF
1440 > EOF
1441 $ rt nonlocal/test-is-not-here.t
1441 $ rt nonlocal/test-is-not-here.t
1442 running 1 tests using 1 parallel processes
1442 running 1 tests using 1 parallel processes
1443 .
1443 .
1444 # Ran 1 tests, 0 skipped, 0 failed.
1444 # Ran 1 tests, 0 skipped, 0 failed.
1445
1445
1446 support for automatically discovering test if arg is a folder
1446 support for automatically discovering test if arg is a folder
1447 $ mkdir tmp && cd tmp
1447 $ mkdir tmp && cd tmp
1448
1448
1449 $ cat > test-uno.t << EOF
1449 $ cat > test-uno.t << EOF
1450 > $ echo line
1450 > $ echo line
1451 > line
1451 > line
1452 > EOF
1452 > EOF
1453
1453
1454 $ cp test-uno.t test-dos.t
1454 $ cp test-uno.t test-dos.t
1455 $ cd ..
1455 $ cd ..
1456 $ cp -R tmp tmpp
1456 $ cp -R tmp tmpp
1457 $ cp tmp/test-uno.t test-solo.t
1457 $ cp tmp/test-uno.t test-solo.t
1458
1458
1459 $ rt tmp/ test-solo.t tmpp
1459 $ rt tmp/ test-solo.t tmpp
1460 running 5 tests using 1 parallel processes
1460 running 5 tests using 1 parallel processes
1461 .....
1461 .....
1462 # Ran 5 tests, 0 skipped, 0 failed.
1462 # Ran 5 tests, 0 skipped, 0 failed.
1463 $ rm -rf tmp tmpp
1463 $ rm -rf tmp tmpp
1464
1464
1465 support for running run-tests.py from another directory
1465 support for running run-tests.py from another directory
1466 $ mkdir tmp && cd tmp
1466 $ mkdir tmp && cd tmp
1467
1467
1468 $ cat > useful-file.sh << EOF
1468 $ cat > useful-file.sh << EOF
1469 > important command
1469 > important command
1470 > EOF
1470 > EOF
1471
1471
1472 $ cat > test-folder.t << EOF
1472 $ cat > test-folder.t << EOF
1473 > $ cat \$TESTDIR/useful-file.sh
1473 > $ cat \$TESTDIR/useful-file.sh
1474 > important command
1474 > important command
1475 > EOF
1475 > EOF
1476
1476
1477 $ cat > test-folder-fail.t << EOF
1477 $ cat > test-folder-fail.t << EOF
1478 > $ cat \$TESTDIR/useful-file.sh
1478 > $ cat \$TESTDIR/useful-file.sh
1479 > important commando
1479 > important commando
1480 > EOF
1480 > EOF
1481
1481
1482 $ cd ..
1482 $ cd ..
1483 $ rt tmp/test-*.t
1483 $ rt tmp/test-*.t
1484 running 2 tests using 1 parallel processes
1484 running 2 tests using 1 parallel processes
1485
1485
1486 --- $TESTTMP/anothertests/tmp/test-folder-fail.t
1486 --- $TESTTMP/anothertests/tmp/test-folder-fail.t
1487 +++ $TESTTMP/anothertests/tmp/test-folder-fail.t.err
1487 +++ $TESTTMP/anothertests/tmp/test-folder-fail.t.err
1488 @@ -1,2 +1,2 @@
1488 @@ -1,2 +1,2 @@
1489 $ cat $TESTDIR/useful-file.sh
1489 $ cat $TESTDIR/useful-file.sh
1490 - important commando
1490 - important commando
1491 + important command
1491 + important command
1492
1492
1493 ERROR: test-folder-fail.t output changed
1493 ERROR: test-folder-fail.t output changed
1494 !.
1494 !.
1495 Failed test-folder-fail.t: output changed
1495 Failed test-folder-fail.t: output changed
1496 # Ran 2 tests, 0 skipped, 1 failed.
1496 # Ran 2 tests, 0 skipped, 1 failed.
1497 python hash seed: * (glob)
1497 python hash seed: * (glob)
1498 [1]
1498 [1]
1499
1499
1500 support for bisecting failed tests automatically
1500 support for bisecting failed tests automatically
1501 $ hg init bisect
1501 $ hg init bisect
1502 $ cd bisect
1502 $ cd bisect
1503 $ cat >> test-bisect.t <<EOF
1503 $ cat >> test-bisect.t <<EOF
1504 > $ echo pass
1504 > $ echo pass
1505 > pass
1505 > pass
1506 > EOF
1506 > EOF
1507 $ hg add test-bisect.t
1507 $ hg add test-bisect.t
1508 $ hg ci -m 'good'
1508 $ hg ci -m 'good'
1509 $ cat >> test-bisect.t <<EOF
1509 $ cat >> test-bisect.t <<EOF
1510 > $ echo pass
1510 > $ echo pass
1511 > fail
1511 > fail
1512 > EOF
1512 > EOF
1513 $ hg ci -m 'bad'
1513 $ hg ci -m 'bad'
1514 $ rt --known-good-rev=0 test-bisect.t
1514 $ rt --known-good-rev=0 test-bisect.t
1515 running 1 tests using 1 parallel processes
1515 running 1 tests using 1 parallel processes
1516
1516
1517 --- $TESTTMP/anothertests/bisect/test-bisect.t
1517 --- $TESTTMP/anothertests/bisect/test-bisect.t
1518 +++ $TESTTMP/anothertests/bisect/test-bisect.t.err
1518 +++ $TESTTMP/anothertests/bisect/test-bisect.t.err
1519 @@ -1,4 +1,4 @@
1519 @@ -1,4 +1,4 @@
1520 $ echo pass
1520 $ echo pass
1521 pass
1521 pass
1522 $ echo pass
1522 $ echo pass
1523 - fail
1523 - fail
1524 + pass
1524 + pass
1525
1525
1526 ERROR: test-bisect.t output changed
1526 ERROR: test-bisect.t output changed
1527 !
1527 !
1528 Failed test-bisect.t: output changed
1528 Failed test-bisect.t: output changed
1529 test-bisect.t broken by 72cbf122d116 (bad)
1529 test-bisect.t broken by 72cbf122d116 (bad)
1530 # Ran 1 tests, 0 skipped, 1 failed.
1530 # Ran 1 tests, 0 skipped, 1 failed.
1531 python hash seed: * (glob)
1531 python hash seed: * (glob)
1532 [1]
1532 [1]
1533
1533
1534 $ cd ..
1534 $ cd ..
1535
1535
1536 support bisecting a separate repo
1536 support bisecting a separate repo
1537
1537
1538 $ hg init bisect-dependent
1538 $ hg init bisect-dependent
1539 $ cd bisect-dependent
1539 $ cd bisect-dependent
1540 $ cat > test-bisect-dependent.t <<EOF
1540 $ cat > test-bisect-dependent.t <<EOF
1541 > $ tail -1 \$TESTDIR/../bisect/test-bisect.t
1541 > $ tail -1 \$TESTDIR/../bisect/test-bisect.t
1542 > pass
1542 > pass
1543 > EOF
1543 > EOF
1544 $ hg commit -Am dependent test-bisect-dependent.t
1544 $ hg commit -Am dependent test-bisect-dependent.t
1545
1545
1546 $ rt --known-good-rev=0 test-bisect-dependent.t
1546 $ rt --known-good-rev=0 test-bisect-dependent.t
1547 running 1 tests using 1 parallel processes
1547 running 1 tests using 1 parallel processes
1548
1548
1549 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1549 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1550 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1550 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1551 @@ -1,2 +1,2 @@
1551 @@ -1,2 +1,2 @@
1552 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1552 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1553 - pass
1553 - pass
1554 + fail
1554 + fail
1555
1555
1556 ERROR: test-bisect-dependent.t output changed
1556 ERROR: test-bisect-dependent.t output changed
1557 !
1557 !
1558 Failed test-bisect-dependent.t: output changed
1558 Failed test-bisect-dependent.t: output changed
1559 Failed to identify failure point for test-bisect-dependent.t
1559 Failed to identify failure point for test-bisect-dependent.t
1560 # Ran 1 tests, 0 skipped, 1 failed.
1560 # Ran 1 tests, 0 skipped, 1 failed.
1561 python hash seed: * (glob)
1561 python hash seed: * (glob)
1562 [1]
1562 [1]
1563
1563
1564 $ rt --bisect-repo=../test-bisect test-bisect-dependent.t
1564 $ rt --bisect-repo=../test-bisect test-bisect-dependent.t
1565 usage: run-tests.py [options] [tests]
1565 usage: run-tests.py [options] [tests]
1566 run-tests.py: error: --bisect-repo cannot be used without --known-good-rev
1566 run-tests.py: error: --bisect-repo cannot be used without --known-good-rev
1567 [2]
1567 [2]
1568
1568
1569 $ rt --known-good-rev=0 --bisect-repo=../bisect test-bisect-dependent.t
1569 $ rt --known-good-rev=0 --bisect-repo=../bisect test-bisect-dependent.t
1570 running 1 tests using 1 parallel processes
1570 running 1 tests using 1 parallel processes
1571
1571
1572 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1572 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1573 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1573 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1574 @@ -1,2 +1,2 @@
1574 @@ -1,2 +1,2 @@
1575 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1575 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1576 - pass
1576 - pass
1577 + fail
1577 + fail
1578
1578
1579 ERROR: test-bisect-dependent.t output changed
1579 ERROR: test-bisect-dependent.t output changed
1580 !
1580 !
1581 Failed test-bisect-dependent.t: output changed
1581 Failed test-bisect-dependent.t: output changed
1582 test-bisect-dependent.t broken by 72cbf122d116 (bad)
1582 test-bisect-dependent.t broken by 72cbf122d116 (bad)
1583 # Ran 1 tests, 0 skipped, 1 failed.
1583 # Ran 1 tests, 0 skipped, 1 failed.
1584 python hash seed: * (glob)
1584 python hash seed: * (glob)
1585 [1]
1585 [1]
1586
1586
1587 $ cd ..
1587 $ cd ..
1588
1588
1589 Test a broken #if statement doesn't break run-tests threading.
1589 Test a broken #if statement doesn't break run-tests threading.
1590 ==============================================================
1590 ==============================================================
1591 $ mkdir broken
1591 $ mkdir broken
1592 $ cd broken
1592 $ cd broken
1593 $ cat > test-broken.t <<EOF
1593 $ cat > test-broken.t <<EOF
1594 > true
1594 > true
1595 > #if notarealhghavefeature
1595 > #if notarealhghavefeature
1596 > $ false
1596 > $ false
1597 > #endif
1597 > #endif
1598 > EOF
1598 > EOF
1599 $ for f in 1 2 3 4 ; do
1599 $ for f in 1 2 3 4 ; do
1600 > cat > test-works-$f.t <<EOF
1600 > cat > test-works-$f.t <<EOF
1601 > This is test case $f
1601 > This is test case $f
1602 > $ sleep 1
1602 > $ sleep 1
1603 > EOF
1603 > EOF
1604 > done
1604 > done
1605 $ rt -j 2
1605 $ rt -j 2
1606 running 5 tests using 2 parallel processes
1606 running 5 tests using 2 parallel processes
1607 ....
1607 ....
1608 # Ran 5 tests, 0 skipped, 0 failed.
1608 # Ran 5 tests, 0 skipped, 0 failed.
1609 skipped: unknown feature: notarealhghavefeature
1609 skipped: unknown feature: notarealhghavefeature
1610
1610
1611 $ cd ..
1611 $ cd ..
1612 $ rm -rf broken
1612 $ rm -rf broken
1613
1613
1614 Test cases in .t files
1614 Test cases in .t files
1615 ======================
1615 ======================
1616 $ mkdir cases
1616 $ mkdir cases
1617 $ cd cases
1617 $ cd cases
1618 $ cat > test-cases-abc.t <<'EOF'
1618 $ cat > test-cases-abc.t <<'EOF'
1619 > #testcases A B C
1619 > #testcases A B C
1620 > $ V=B
1620 > $ V=B
1621 > #if A
1621 > #if A
1622 > $ V=A
1622 > $ V=A
1623 > #endif
1623 > #endif
1624 > #if C
1624 > #if C
1625 > $ V=C
1625 > $ V=C
1626 > #endif
1626 > #endif
1627 > $ echo $V | sed 's/A/C/'
1627 > $ echo $V | sed 's/A/C/'
1628 > C
1628 > C
1629 > #if C
1629 > #if C
1630 > $ [ $V = C ]
1630 > $ [ $V = C ]
1631 > #endif
1631 > #endif
1632 > #if A
1632 > #if A
1633 > $ [ $V = C ]
1633 > $ [ $V = C ]
1634 > [1]
1634 > [1]
1635 > #endif
1635 > #endif
1636 > #if no-C
1636 > #if no-C
1637 > $ [ $V = C ]
1637 > $ [ $V = C ]
1638 > [1]
1638 > [1]
1639 > #endif
1639 > #endif
1640 > $ [ $V = D ]
1640 > $ [ $V = D ]
1641 > [1]
1641 > [1]
1642 > EOF
1642 > EOF
1643 $ rt
1643 $ rt
1644 running 3 tests using 1 parallel processes
1644 running 3 tests using 1 parallel processes
1645 .
1645 .
1646 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1646 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1647 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1647 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1648 @@ -7,7 +7,7 @@
1648 @@ -7,7 +7,7 @@
1649 $ V=C
1649 $ V=C
1650 #endif
1650 #endif
1651 $ echo $V | sed 's/A/C/'
1651 $ echo $V | sed 's/A/C/'
1652 - C
1652 - C
1653 + B
1653 + B
1654 #if C
1654 #if C
1655 $ [ $V = C ]
1655 $ [ $V = C ]
1656 #endif
1656 #endif
1657
1657
1658 ERROR: test-cases-abc.t#B output changed
1658 ERROR: test-cases-abc.t#B output changed
1659 !.
1659 !.
1660 Failed test-cases-abc.t#B: output changed
1660 Failed test-cases-abc.t#B: output changed
1661 # Ran 3 tests, 0 skipped, 1 failed.
1661 # Ran 3 tests, 0 skipped, 1 failed.
1662 python hash seed: * (glob)
1662 python hash seed: * (glob)
1663 [1]
1663 [1]
1664
1664
1665 --restart works
1665 --restart works
1666
1666
1667 $ rt --restart
1667 $ rt --restart
1668 running 2 tests using 1 parallel processes
1668 running 2 tests using 1 parallel processes
1669
1669
1670 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1670 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1671 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1671 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1672 @@ -7,7 +7,7 @@
1672 @@ -7,7 +7,7 @@
1673 $ V=C
1673 $ V=C
1674 #endif
1674 #endif
1675 $ echo $V | sed 's/A/C/'
1675 $ echo $V | sed 's/A/C/'
1676 - C
1676 - C
1677 + B
1677 + B
1678 #if C
1678 #if C
1679 $ [ $V = C ]
1679 $ [ $V = C ]
1680 #endif
1680 #endif
1681
1681
1682 ERROR: test-cases-abc.t#B output changed
1682 ERROR: test-cases-abc.t#B output changed
1683 !.
1683 !.
1684 Failed test-cases-abc.t#B: output changed
1684 Failed test-cases-abc.t#B: output changed
1685 # Ran 2 tests, 0 skipped, 1 failed.
1685 # Ran 2 tests, 0 skipped, 1 failed.
1686 python hash seed: * (glob)
1686 python hash seed: * (glob)
1687 [1]
1687 [1]
1688
1688
1689 --restart works with outputdir
1689 --restart works with outputdir
1690
1690
1691 $ mkdir output
1691 $ mkdir output
1692 $ mv test-cases-abc.t#B.err output
1692 $ mv test-cases-abc.t#B.err output
1693 $ rt --restart --outputdir output
1693 $ rt --restart --outputdir output
1694 running 2 tests using 1 parallel processes
1694 running 2 tests using 1 parallel processes
1695
1695
1696 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1696 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1697 +++ $TESTTMP/anothertests/cases/output/test-cases-abc.t#B.err
1697 +++ $TESTTMP/anothertests/cases/output/test-cases-abc.t#B.err
1698 @@ -7,7 +7,7 @@
1698 @@ -7,7 +7,7 @@
1699 $ V=C
1699 $ V=C
1700 #endif
1700 #endif
1701 $ echo $V | sed 's/A/C/'
1701 $ echo $V | sed 's/A/C/'
1702 - C
1702 - C
1703 + B
1703 + B
1704 #if C
1704 #if C
1705 $ [ $V = C ]
1705 $ [ $V = C ]
1706 #endif
1706 #endif
1707
1707
1708 ERROR: test-cases-abc.t#B output changed
1708 ERROR: test-cases-abc.t#B output changed
1709 !.
1709 !.
1710 Failed test-cases-abc.t#B: output changed
1710 Failed test-cases-abc.t#B: output changed
1711 # Ran 2 tests, 0 skipped, 1 failed.
1711 # Ran 2 tests, 0 skipped, 1 failed.
1712 python hash seed: * (glob)
1712 python hash seed: * (glob)
1713 [1]
1713 [1]
1714
1714
1715 Test TESTCASE variable
1715 Test TESTCASE variable
1716
1716
1717 $ cat > test-cases-ab.t <<'EOF'
1717 $ cat > test-cases-ab.t <<'EOF'
1718 > $ dostuff() {
1718 > $ dostuff() {
1719 > > echo "In case $TESTCASE"
1719 > > echo "In case $TESTCASE"
1720 > > }
1720 > > }
1721 > #testcases A B
1721 > #testcases A B
1722 > #if A
1722 > #if A
1723 > $ dostuff
1723 > $ dostuff
1724 > In case A
1724 > In case A
1725 > #endif
1725 > #endif
1726 > #if B
1726 > #if B
1727 > $ dostuff
1727 > $ dostuff
1728 > In case B
1728 > In case B
1729 > #endif
1729 > #endif
1730 > EOF
1730 > EOF
1731 $ rt test-cases-ab.t
1731 $ rt test-cases-ab.t
1732 running 2 tests using 1 parallel processes
1732 running 2 tests using 1 parallel processes
1733 ..
1733 ..
1734 # Ran 2 tests, 0 skipped, 0 failed.
1734 # Ran 2 tests, 0 skipped, 0 failed.
1735
1735
1736 Support running a specific test case
1736 Support running a specific test case
1737
1737
1738 $ rt "test-cases-abc.t#B"
1738 $ rt "test-cases-abc.t#B"
1739 running 1 tests using 1 parallel processes
1739 running 1 tests using 1 parallel processes
1740
1740
1741 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1741 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1742 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1742 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1743 @@ -7,7 +7,7 @@
1743 @@ -7,7 +7,7 @@
1744 $ V=C
1744 $ V=C
1745 #endif
1745 #endif
1746 $ echo $V | sed 's/A/C/'
1746 $ echo $V | sed 's/A/C/'
1747 - C
1747 - C
1748 + B
1748 + B
1749 #if C
1749 #if C
1750 $ [ $V = C ]
1750 $ [ $V = C ]
1751 #endif
1751 #endif
1752
1752
1753 ERROR: test-cases-abc.t#B output changed
1753 ERROR: test-cases-abc.t#B output changed
1754 !
1754 !
1755 Failed test-cases-abc.t#B: output changed
1755 Failed test-cases-abc.t#B: output changed
1756 # Ran 1 tests, 0 skipped, 1 failed.
1756 # Ran 1 tests, 0 skipped, 1 failed.
1757 python hash seed: * (glob)
1757 python hash seed: * (glob)
1758 [1]
1758 [1]
1759
1759
1760 Support running multiple test cases in the same file
1760 Support running multiple test cases in the same file
1761
1761
1762 $ rt test-cases-abc.t#B test-cases-abc.t#C
1762 $ rt test-cases-abc.t#B test-cases-abc.t#C
1763 running 2 tests using 1 parallel processes
1763 running 2 tests using 1 parallel processes
1764
1764
1765 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1765 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1766 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1766 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1767 @@ -7,7 +7,7 @@
1767 @@ -7,7 +7,7 @@
1768 $ V=C
1768 $ V=C
1769 #endif
1769 #endif
1770 $ echo $V | sed 's/A/C/'
1770 $ echo $V | sed 's/A/C/'
1771 - C
1771 - C
1772 + B
1772 + B
1773 #if C
1773 #if C
1774 $ [ $V = C ]
1774 $ [ $V = C ]
1775 #endif
1775 #endif
1776
1776
1777 ERROR: test-cases-abc.t#B output changed
1777 ERROR: test-cases-abc.t#B output changed
1778 !.
1778 !.
1779 Failed test-cases-abc.t#B: output changed
1779 Failed test-cases-abc.t#B: output changed
1780 # Ran 2 tests, 0 skipped, 1 failed.
1780 # Ran 2 tests, 0 skipped, 1 failed.
1781 python hash seed: * (glob)
1781 python hash seed: * (glob)
1782 [1]
1782 [1]
1783
1783
1784 Support ignoring invalid test cases
1784 Support ignoring invalid test cases
1785
1785
1786 $ rt test-cases-abc.t#B test-cases-abc.t#D
1786 $ rt test-cases-abc.t#B test-cases-abc.t#D
1787 running 1 tests using 1 parallel processes
1787 running 1 tests using 1 parallel processes
1788
1788
1789 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1789 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1790 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1790 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1791 @@ -7,7 +7,7 @@
1791 @@ -7,7 +7,7 @@
1792 $ V=C
1792 $ V=C
1793 #endif
1793 #endif
1794 $ echo $V | sed 's/A/C/'
1794 $ echo $V | sed 's/A/C/'
1795 - C
1795 - C
1796 + B
1796 + B
1797 #if C
1797 #if C
1798 $ [ $V = C ]
1798 $ [ $V = C ]
1799 #endif
1799 #endif
1800
1800
1801 ERROR: test-cases-abc.t#B output changed
1801 ERROR: test-cases-abc.t#B output changed
1802 !
1802 !
1803 Failed test-cases-abc.t#B: output changed
1803 Failed test-cases-abc.t#B: output changed
1804 # Ran 1 tests, 0 skipped, 1 failed.
1804 # Ran 1 tests, 0 skipped, 1 failed.
1805 python hash seed: * (glob)
1805 python hash seed: * (glob)
1806 [1]
1806 [1]
1807
1807
1808 Support running complex test cases names
1808 Support running complex test cases names
1809
1809
1810 $ cat > test-cases-advanced-cases.t <<'EOF'
1810 $ cat > test-cases-advanced-cases.t <<'EOF'
1811 > #testcases simple case-with-dashes casewith_-.chars
1811 > #testcases simple case-with-dashes casewith_-.chars
1812 > $ echo $TESTCASE
1812 > $ echo $TESTCASE
1813 > simple
1813 > simple
1814 > EOF
1814 > EOF
1815
1815
1816 $ cat test-cases-advanced-cases.t
1816 $ cat test-cases-advanced-cases.t
1817 #testcases simple case-with-dashes casewith_-.chars
1817 #testcases simple case-with-dashes casewith_-.chars
1818 $ echo $TESTCASE
1818 $ echo $TESTCASE
1819 simple
1819 simple
1820
1820
1821 $ rt test-cases-advanced-cases.t
1821 $ rt test-cases-advanced-cases.t
1822 running 3 tests using 1 parallel processes
1822 running 3 tests using 1 parallel processes
1823
1823
1824 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1824 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1825 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err
1825 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err
1826 @@ -1,3 +1,3 @@
1826 @@ -1,3 +1,3 @@
1827 #testcases simple case-with-dashes casewith_-.chars
1827 #testcases simple case-with-dashes casewith_-.chars
1828 $ echo $TESTCASE
1828 $ echo $TESTCASE
1829 - simple
1829 - simple
1830 + case-with-dashes
1830 + case-with-dashes
1831
1831
1832 ERROR: test-cases-advanced-cases.t#case-with-dashes output changed
1832 ERROR: test-cases-advanced-cases.t#case-with-dashes output changed
1833 !
1833 !
1834 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1834 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1835 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err
1835 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err
1836 @@ -1,3 +1,3 @@
1836 @@ -1,3 +1,3 @@
1837 #testcases simple case-with-dashes casewith_-.chars
1837 #testcases simple case-with-dashes casewith_-.chars
1838 $ echo $TESTCASE
1838 $ echo $TESTCASE
1839 - simple
1839 - simple
1840 + casewith_-.chars
1840 + casewith_-.chars
1841
1841
1842 ERROR: test-cases-advanced-cases.t#casewith_-.chars output changed
1842 ERROR: test-cases-advanced-cases.t#casewith_-.chars output changed
1843 !.
1843 !.
1844 Failed test-cases-advanced-cases.t#case-with-dashes: output changed
1844 Failed test-cases-advanced-cases.t#case-with-dashes: output changed
1845 Failed test-cases-advanced-cases.t#casewith_-.chars: output changed
1845 Failed test-cases-advanced-cases.t#casewith_-.chars: output changed
1846 # Ran 3 tests, 0 skipped, 2 failed.
1846 # Ran 3 tests, 0 skipped, 2 failed.
1847 python hash seed: * (glob)
1847 python hash seed: * (glob)
1848 [1]
1848 [1]
1849
1849
1850 $ rt "test-cases-advanced-cases.t#case-with-dashes"
1850 $ rt "test-cases-advanced-cases.t#case-with-dashes"
1851 running 1 tests using 1 parallel processes
1851 running 1 tests using 1 parallel processes
1852
1852
1853 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1853 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1854 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err
1854 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err
1855 @@ -1,3 +1,3 @@
1855 @@ -1,3 +1,3 @@
1856 #testcases simple case-with-dashes casewith_-.chars
1856 #testcases simple case-with-dashes casewith_-.chars
1857 $ echo $TESTCASE
1857 $ echo $TESTCASE
1858 - simple
1858 - simple
1859 + case-with-dashes
1859 + case-with-dashes
1860
1860
1861 ERROR: test-cases-advanced-cases.t#case-with-dashes output changed
1861 ERROR: test-cases-advanced-cases.t#case-with-dashes output changed
1862 !
1862 !
1863 Failed test-cases-advanced-cases.t#case-with-dashes: output changed
1863 Failed test-cases-advanced-cases.t#case-with-dashes: output changed
1864 # Ran 1 tests, 0 skipped, 1 failed.
1864 # Ran 1 tests, 0 skipped, 1 failed.
1865 python hash seed: * (glob)
1865 python hash seed: * (glob)
1866 [1]
1866 [1]
1867
1867
1868 $ rt "test-cases-advanced-cases.t#casewith_-.chars"
1868 $ rt "test-cases-advanced-cases.t#casewith_-.chars"
1869 running 1 tests using 1 parallel processes
1869 running 1 tests using 1 parallel processes
1870
1870
1871 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1871 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1872 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err
1872 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err
1873 @@ -1,3 +1,3 @@
1873 @@ -1,3 +1,3 @@
1874 #testcases simple case-with-dashes casewith_-.chars
1874 #testcases simple case-with-dashes casewith_-.chars
1875 $ echo $TESTCASE
1875 $ echo $TESTCASE
1876 - simple
1876 - simple
1877 + casewith_-.chars
1877 + casewith_-.chars
1878
1878
1879 ERROR: test-cases-advanced-cases.t#casewith_-.chars output changed
1879 ERROR: test-cases-advanced-cases.t#casewith_-.chars output changed
1880 !
1880 !
1881 Failed test-cases-advanced-cases.t#casewith_-.chars: output changed
1881 Failed test-cases-advanced-cases.t#casewith_-.chars: output changed
1882 # Ran 1 tests, 0 skipped, 1 failed.
1882 # Ran 1 tests, 0 skipped, 1 failed.
1883 python hash seed: * (glob)
1883 python hash seed: * (glob)
1884 [1]
1884 [1]
1885
1885
1886 Test automatic pattern replacement
1886 Test automatic pattern replacement
1887 ==================================
1887 ==================================
1888
1888
1889 $ cat << EOF >> common-pattern.py
1889 $ cat << EOF >> common-pattern.py
1890 > substitutions = [
1890 > substitutions = [
1891 > (br'foo-(.*)\\b',
1891 > (br'foo-(.*)\\b',
1892 > br'\$XXX=\\1\$'),
1892 > br'\$XXX=\\1\$'),
1893 > (br'bar\\n',
1893 > (br'bar\\n',
1894 > br'\$YYY$\\n'),
1894 > br'\$YYY$\\n'),
1895 > ]
1895 > ]
1896 > EOF
1896 > EOF
1897
1897
1898 $ cat << EOF >> test-substitution.t
1898 $ cat << EOF >> test-substitution.t
1899 > $ echo foo-12
1899 > $ echo foo-12
1900 > \$XXX=12$
1900 > \$XXX=12$
1901 > $ echo foo-42
1901 > $ echo foo-42
1902 > \$XXX=42$
1902 > \$XXX=42$
1903 > $ echo bar prior
1903 > $ echo bar prior
1904 > bar prior
1904 > bar prior
1905 > $ echo lastbar
1905 > $ echo lastbar
1906 > last\$YYY$
1906 > last\$YYY$
1907 > $ echo foo-bar foo-baz
1907 > $ echo foo-bar foo-baz
1908 > EOF
1908 > EOF
1909
1909
1910 $ rt test-substitution.t
1910 $ rt test-substitution.t
1911 running 1 tests using 1 parallel processes
1911 running 1 tests using 1 parallel processes
1912
1912
1913 --- $TESTTMP/anothertests/cases/test-substitution.t
1913 --- $TESTTMP/anothertests/cases/test-substitution.t
1914 +++ $TESTTMP/anothertests/cases/test-substitution.t.err
1914 +++ $TESTTMP/anothertests/cases/test-substitution.t.err
1915 @@ -7,3 +7,4 @@
1915 @@ -7,3 +7,4 @@
1916 $ echo lastbar
1916 $ echo lastbar
1917 last$YYY$
1917 last$YYY$
1918 $ echo foo-bar foo-baz
1918 $ echo foo-bar foo-baz
1919 + $XXX=bar foo-baz$
1919 + $XXX=bar foo-baz$
1920
1920
1921 ERROR: test-substitution.t output changed
1921 ERROR: test-substitution.t output changed
1922 !
1922 !
1923 Failed test-substitution.t: output changed
1923 Failed test-substitution.t: output changed
1924 # Ran 1 tests, 0 skipped, 1 failed.
1924 # Ran 1 tests, 0 skipped, 1 failed.
1925 python hash seed: * (glob)
1925 python hash seed: * (glob)
1926 [1]
1926 [1]
1927
1927
1928 --extra-config-opt works
1928 --extra-config-opt works
1929
1929
1930 $ cat << EOF >> test-config-opt.t
1930 $ cat << EOF >> test-config-opt.t
1931 > $ hg init test-config-opt
1931 > $ hg init test-config-opt
1932 > $ hg -R test-config-opt purge
1932 > $ hg -R test-config-opt purge
1933 > EOF
1933 > EOF
1934
1934
1935 $ rt --extra-config-opt extensions.purge= test-config-opt.t
1935 $ rt --extra-config-opt extensions.purge= test-config-opt.t
1936 running 1 tests using 1 parallel processes
1936 running 1 tests using 1 parallel processes
1937 .
1937 .
1938 # Ran 1 tests, 0 skipped, 0 failed.
1938 # Ran 1 tests, 0 skipped, 0 failed.
General Comments 0
You need to be logged in to leave comments. Login now