##// END OF EJS Templates
run-tests: convert to argparse...
Gregory Szorc -
r35188:b4b0aed7 default
parent child Browse files
Show More
@@ -1,2961 +1,2962 b''
1 1 #!/usr/bin/env python
2 2 #
3 3 # run-tests.py - Run a set of tests on Mercurial
4 4 #
5 5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 # Modifying this script is tricky because it has many modes:
11 11 # - serial (default) vs parallel (-jN, N > 1)
12 12 # - no coverage (default) vs coverage (-c, -C, -s)
13 13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 14 # - tests are a mix of shell scripts and Python scripts
15 15 #
16 16 # If you change this script, it is recommended that you ensure you
17 17 # haven't broken it by running it in various modes with a representative
18 18 # sample of test scripts. For example:
19 19 #
20 20 # 1) serial, no coverage, temp install:
21 21 # ./run-tests.py test-s*
22 22 # 2) serial, no coverage, local hg:
23 23 # ./run-tests.py --local test-s*
24 24 # 3) serial, coverage, temp install:
25 25 # ./run-tests.py -c test-s*
26 26 # 4) serial, coverage, local hg:
27 27 # ./run-tests.py -c --local test-s* # unsupported
28 28 # 5) parallel, no coverage, temp install:
29 29 # ./run-tests.py -j2 test-s*
30 30 # 6) parallel, no coverage, local hg:
31 31 # ./run-tests.py -j2 --local test-s*
32 32 # 7) parallel, coverage, temp install:
33 33 # ./run-tests.py -j2 -c test-s* # currently broken
34 34 # 8) parallel, coverage, local install:
35 35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 36 # 9) parallel, custom tmp dir:
37 37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 38 # 10) parallel, pure, tests that call run-tests:
39 39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
40 40 #
41 41 # (You could use any subset of the tests: test-s* happens to match
42 42 # enough that it's worth doing parallel runs, few enough that it
43 43 # completes fairly quickly, includes both shell and Python scripts, and
44 44 # includes some scripts that run daemon processes.)
45 45
46 46 from __future__ import absolute_import, print_function
47 47
48 import argparse
48 49 import difflib
49 50 import distutils.version as version
50 51 import errno
51 52 import json
52 import optparse
53 53 import os
54 54 import random
55 55 import re
56 56 import shutil
57 57 import signal
58 58 import socket
59 59 import subprocess
60 60 import sys
61 61 import sysconfig
62 62 import tempfile
63 63 import threading
64 64 import time
65 65 import unittest
66 66 import xml.dom.minidom as minidom
67 67
68 68 try:
69 69 import Queue as queue
70 70 except ImportError:
71 71 import queue
72 72
73 73 try:
74 74 import shlex
75 75 shellquote = shlex.quote
76 76 except (ImportError, AttributeError):
77 77 import pipes
78 78 shellquote = pipes.quote
79 79
80 80 if os.environ.get('RTUNICODEPEDANTRY', False):
81 81 try:
82 82 reload(sys)
83 83 sys.setdefaultencoding("undefined")
84 84 except NameError:
85 85 pass
86 86
87 87 origenviron = os.environ.copy()
88 88 osenvironb = getattr(os, 'environb', os.environ)
89 89 processlock = threading.Lock()
90 90
91 91 pygmentspresent = False
92 92 # ANSI color is unsupported prior to Windows 10
93 93 if os.name != 'nt':
94 94 try: # is pygments installed
95 95 import pygments
96 96 import pygments.lexers as lexers
97 97 import pygments.lexer as lexer
98 98 import pygments.formatters as formatters
99 99 import pygments.token as token
100 100 import pygments.style as style
101 101 pygmentspresent = True
102 102 difflexer = lexers.DiffLexer()
103 103 terminal256formatter = formatters.Terminal256Formatter()
104 104 except ImportError:
105 105 pass
106 106
107 107 if pygmentspresent:
108 108 class TestRunnerStyle(style.Style):
109 109 default_style = ""
110 110 skipped = token.string_to_tokentype("Token.Generic.Skipped")
111 111 failed = token.string_to_tokentype("Token.Generic.Failed")
112 112 skippedname = token.string_to_tokentype("Token.Generic.SName")
113 113 failedname = token.string_to_tokentype("Token.Generic.FName")
114 114 styles = {
115 115 skipped: '#e5e5e5',
116 116 skippedname: '#00ffff',
117 117 failed: '#7f0000',
118 118 failedname: '#ff0000',
119 119 }
120 120
121 121 class TestRunnerLexer(lexer.RegexLexer):
122 122 tokens = {
123 123 'root': [
124 124 (r'^Skipped', token.Generic.Skipped, 'skipped'),
125 125 (r'^Failed ', token.Generic.Failed, 'failed'),
126 126 (r'^ERROR: ', token.Generic.Failed, 'failed'),
127 127 ],
128 128 'skipped': [
129 129 (r'[\w-]+\.(t|py)', token.Generic.SName),
130 130 (r':.*', token.Generic.Skipped),
131 131 ],
132 132 'failed': [
133 133 (r'[\w-]+\.(t|py)', token.Generic.FName),
134 134 (r'(:| ).*', token.Generic.Failed),
135 135 ]
136 136 }
137 137
138 138 runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle)
139 139 runnerlexer = TestRunnerLexer()
140 140
141 141 if sys.version_info > (3, 5, 0):
142 142 PYTHON3 = True
143 143 xrange = range # we use xrange in one place, and we'd rather not use range
144 144 def _bytespath(p):
145 145 if p is None:
146 146 return p
147 147 return p.encode('utf-8')
148 148
149 149 def _strpath(p):
150 150 if p is None:
151 151 return p
152 152 return p.decode('utf-8')
153 153
154 154 elif sys.version_info >= (3, 0, 0):
155 155 print('%s is only supported on Python 3.5+ and 2.7, not %s' %
156 156 (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])))
157 157 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
158 158 else:
159 159 PYTHON3 = False
160 160
161 161 # In python 2.x, path operations are generally done using
162 162 # bytestrings by default, so we don't have to do any extra
163 163 # fiddling there. We define the wrapper functions anyway just to
164 164 # help keep code consistent between platforms.
165 165 def _bytespath(p):
166 166 return p
167 167
168 168 _strpath = _bytespath
169 169
170 170 # For Windows support
171 171 wifexited = getattr(os, "WIFEXITED", lambda x: False)
172 172
173 173 # Whether to use IPv6
174 174 def checksocketfamily(name, port=20058):
175 175 """return true if we can listen on localhost using family=name
176 176
177 177 name should be either 'AF_INET', or 'AF_INET6'.
178 178 port being used is okay - EADDRINUSE is considered as successful.
179 179 """
180 180 family = getattr(socket, name, None)
181 181 if family is None:
182 182 return False
183 183 try:
184 184 s = socket.socket(family, socket.SOCK_STREAM)
185 185 s.bind(('localhost', port))
186 186 s.close()
187 187 return True
188 188 except socket.error as exc:
189 189 if exc.errno == errno.EADDRINUSE:
190 190 return True
191 191 elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
192 192 return False
193 193 else:
194 194 raise
195 195 else:
196 196 return False
197 197
198 198 # useipv6 will be set by parseargs
199 199 useipv6 = None
200 200
201 201 def checkportisavailable(port):
202 202 """return true if a port seems free to bind on localhost"""
203 203 if useipv6:
204 204 family = socket.AF_INET6
205 205 else:
206 206 family = socket.AF_INET
207 207 try:
208 208 s = socket.socket(family, socket.SOCK_STREAM)
209 209 s.bind(('localhost', port))
210 210 s.close()
211 211 return True
212 212 except socket.error as exc:
213 213 if exc.errno not in (errno.EADDRINUSE, errno.EADDRNOTAVAIL,
214 214 errno.EPROTONOSUPPORT):
215 215 raise
216 216 return False
217 217
218 218 closefds = os.name == 'posix'
219 219 def Popen4(cmd, wd, timeout, env=None):
220 220 processlock.acquire()
221 221 p = subprocess.Popen(cmd, shell=True, bufsize=-1, cwd=wd, env=env,
222 222 close_fds=closefds,
223 223 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
224 224 stderr=subprocess.STDOUT)
225 225 processlock.release()
226 226
227 227 p.fromchild = p.stdout
228 228 p.tochild = p.stdin
229 229 p.childerr = p.stderr
230 230
231 231 p.timeout = False
232 232 if timeout:
233 233 def t():
234 234 start = time.time()
235 235 while time.time() - start < timeout and p.returncode is None:
236 236 time.sleep(.1)
237 237 p.timeout = True
238 238 if p.returncode is None:
239 239 terminate(p)
240 240 threading.Thread(target=t).start()
241 241
242 242 return p
243 243
244 244 PYTHON = _bytespath(sys.executable.replace('\\', '/'))
245 245 IMPL_PATH = b'PYTHONPATH'
246 246 if 'java' in sys.platform:
247 247 IMPL_PATH = b'JYTHONPATH'
248 248
249 249 defaults = {
250 250 'jobs': ('HGTEST_JOBS', 1),
251 251 'timeout': ('HGTEST_TIMEOUT', 180),
252 252 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 500),
253 253 'port': ('HGTEST_PORT', 20059),
254 254 'shell': ('HGTEST_SHELL', 'sh'),
255 255 }
256 256
257 257 def canonpath(path):
258 258 return os.path.realpath(os.path.expanduser(path))
259 259
260 260 def parselistfiles(files, listtype, warn=True):
261 261 entries = dict()
262 262 for filename in files:
263 263 try:
264 264 path = os.path.expanduser(os.path.expandvars(filename))
265 265 f = open(path, "rb")
266 266 except IOError as err:
267 267 if err.errno != errno.ENOENT:
268 268 raise
269 269 if warn:
270 270 print("warning: no such %s file: %s" % (listtype, filename))
271 271 continue
272 272
273 273 for line in f.readlines():
274 274 line = line.split(b'#', 1)[0].strip()
275 275 if line:
276 276 entries[line] = filename
277 277
278 278 f.close()
279 279 return entries
280 280
281 281 def parsettestcases(path):
282 282 """read a .t test file, return a set of test case names
283 283
284 284 If path does not exist, return an empty set.
285 285 """
286 286 cases = set()
287 287 try:
288 288 with open(path, 'rb') as f:
289 289 for l in f:
290 290 if l.startswith(b'#testcases '):
291 291 cases.update(l[11:].split())
292 292 except IOError as ex:
293 293 if ex.errno != errno.ENOENT:
294 294 raise
295 295 return cases
296 296
297 297 def getparser():
298 298 """Obtain the OptionParser used by the CLI."""
299 parser = optparse.OptionParser("%prog [options] [tests]")
299 parser = argparse.ArgumentParser(usage='%(prog)s [options] [tests]')
300 300
301 301 # keep these sorted
302 parser.add_option("--blacklist", action="append",
302 parser.add_argument("--blacklist", action="append",
303 303 help="skip tests listed in the specified blacklist file")
304 parser.add_option("--whitelist", action="append",
304 parser.add_argument("--whitelist", action="append",
305 305 help="always run tests listed in the specified whitelist file")
306 parser.add_option("--test-list", action="append",
307 help="read tests to run from the specified file")
308 parser.add_option("--changed", type="string",
306 parser.add_argument("--test-list", action="append",
307 help="read tests to run from the specified file")
308 parser.add_argument("--changed",
309 309 help="run tests that are changed in parent rev or working directory")
310 parser.add_option("-C", "--annotate", action="store_true",
310 parser.add_argument("-C", "--annotate", action="store_true",
311 311 help="output files annotated with coverage")
312 parser.add_option("-c", "--cover", action="store_true",
312 parser.add_argument("-c", "--cover", action="store_true",
313 313 help="print a test coverage report")
314 parser.add_option("--color", choices=["always", "auto", "never"],
315 default=os.environ.get('HGRUNTESTSCOLOR', 'auto'),
316 help="colorisation: always|auto|never (default: auto)")
317 parser.add_option("-d", "--debug", action="store_true",
314 parser.add_argument("--color", choices=["always", "auto", "never"],
315 default=os.environ.get('HGRUNTESTSCOLOR', 'auto'),
316 help="colorisation: always|auto|never (default: auto)")
317 parser.add_argument("-d", "--debug", action="store_true",
318 318 help="debug mode: write output of test scripts to console"
319 319 " rather than capturing and diffing it (disables timeout)")
320 parser.add_option("-f", "--first", action="store_true",
320 parser.add_argument("-f", "--first", action="store_true",
321 321 help="exit on the first test failure")
322 parser.add_option("-H", "--htmlcov", action="store_true",
322 parser.add_argument("-H", "--htmlcov", action="store_true",
323 323 help="create an HTML report of the coverage of the files")
324 parser.add_option("-i", "--interactive", action="store_true",
324 parser.add_argument("-i", "--interactive", action="store_true",
325 325 help="prompt to accept changed output")
326 parser.add_option("-j", "--jobs", type="int",
326 parser.add_argument("-j", "--jobs", type=int,
327 327 help="number of jobs to run in parallel"
328 328 " (default: $%s or %d)" % defaults['jobs'])
329 parser.add_option("--keep-tmpdir", action="store_true",
329 parser.add_argument("--keep-tmpdir", action="store_true",
330 330 help="keep temporary directory after running tests")
331 parser.add_option("-k", "--keywords",
331 parser.add_argument("-k", "--keywords",
332 332 help="run tests matching keywords")
333 parser.add_option("--list-tests", action="store_true",
333 parser.add_argument("--list-tests", action="store_true",
334 334 help="list tests instead of running them")
335 parser.add_option("-l", "--local", action="store_true",
335 parser.add_argument("-l", "--local", action="store_true",
336 336 help="shortcut for --with-hg=<testdir>/../hg, "
337 337 "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set")
338 parser.add_option("--loop", action="store_true",
338 parser.add_argument("--loop", action="store_true",
339 339 help="loop tests repeatedly")
340 parser.add_option("--runs-per-test", type="int", dest="runs_per_test",
340 parser.add_argument("--runs-per-test", type=int, dest="runs_per_test",
341 341 help="run each test N times (default=1)", default=1)
342 parser.add_option("-n", "--nodiff", action="store_true",
342 parser.add_argument("-n", "--nodiff", action="store_true",
343 343 help="skip showing test changes")
344 parser.add_option("--outputdir", type="string",
344 parser.add_argument("--outputdir",
345 345 help="directory to write error logs to (default=test directory)")
346 parser.add_option("-p", "--port", type="int",
346 parser.add_argument("-p", "--port", type=int,
347 347 help="port on which servers should listen"
348 348 " (default: $%s or %d)" % defaults['port'])
349 parser.add_option("--compiler", type="string",
349 parser.add_argument("--compiler",
350 350 help="compiler to build with")
351 parser.add_option("--pure", action="store_true",
351 parser.add_argument("--pure", action="store_true",
352 352 help="use pure Python code instead of C extensions")
353 parser.add_option("-R", "--restart", action="store_true",
353 parser.add_argument("-R", "--restart", action="store_true",
354 354 help="restart at last error")
355 parser.add_option("-r", "--retest", action="store_true",
355 parser.add_argument("-r", "--retest", action="store_true",
356 356 help="retest failed tests")
357 parser.add_option("-S", "--noskips", action="store_true",
357 parser.add_argument("-S", "--noskips", action="store_true",
358 358 help="don't report skip tests verbosely")
359 parser.add_option("--shell", type="string",
359 parser.add_argument("--shell",
360 360 help="shell to use (default: $%s or %s)" % defaults['shell'])
361 parser.add_option("-t", "--timeout", type="int",
361 parser.add_argument("-t", "--timeout", type=int,
362 362 help="kill errant tests after TIMEOUT seconds"
363 363 " (default: $%s or %d)" % defaults['timeout'])
364 parser.add_option("--slowtimeout", type="int",
364 parser.add_argument("--slowtimeout", type=int,
365 365 help="kill errant slow tests after SLOWTIMEOUT seconds"
366 366 " (default: $%s or %d)" % defaults['slowtimeout'])
367 parser.add_option("--time", action="store_true",
367 parser.add_argument("--time", action="store_true",
368 368 help="time how long each test takes")
369 parser.add_option("--json", action="store_true",
370 help="store test result data in 'report.json' file")
371 parser.add_option("--tmpdir", type="string",
369 parser.add_argument("--json", action="store_true",
370 help="store test result data in 'report.json' file")
371 parser.add_argument("--tmpdir",
372 372 help="run tests in the given temporary directory"
373 373 " (implies --keep-tmpdir)")
374 parser.add_option("-v", "--verbose", action="store_true",
374 parser.add_argument("-v", "--verbose", action="store_true",
375 375 help="output verbose messages")
376 parser.add_option("--xunit", type="string",
377 help="record xunit results at specified path")
378 parser.add_option("--view", type="string",
376 parser.add_argument("--xunit",
377 help="record xunit results at specified path")
378 parser.add_argument("--view",
379 379 help="external diff viewer")
380 parser.add_option("--with-hg", type="string",
380 parser.add_argument("--with-hg",
381 381 metavar="HG",
382 382 help="test using specified hg script rather than a "
383 383 "temporary installation")
384 parser.add_option("--chg", action="store_true",
385 help="install and use chg wrapper in place of hg")
386 parser.add_option("--with-chg", metavar="CHG",
387 help="use specified chg wrapper in place of hg")
388 parser.add_option("--ipv6", action="store_true",
389 help="prefer IPv6 to IPv4 for network related tests")
390 parser.add_option("-3", "--py3k-warnings", action="store_true",
384 parser.add_argument("--chg", action="store_true",
385 help="install and use chg wrapper in place of hg")
386 parser.add_argument("--with-chg", metavar="CHG",
387 help="use specified chg wrapper in place of hg")
388 parser.add_argument("--ipv6", action="store_true",
389 help="prefer IPv6 to IPv4 for network related tests")
390 parser.add_argument("-3", "--py3k-warnings", action="store_true",
391 391 help="enable Py3k warnings on Python 2.7+")
392 392 # This option should be deleted once test-check-py3-compat.t and other
393 393 # Python 3 tests run with Python 3.
394 parser.add_option("--with-python3", metavar="PYTHON3",
395 help="Python 3 interpreter (if running under Python 2)"
396 " (TEMPORARY)")
397 parser.add_option('--extra-config-opt', action="append",
398 help='set the given config opt in the test hgrc')
399 parser.add_option('--random', action="store_true",
400 help='run tests in random order')
401 parser.add_option('--profile-runner', action='store_true',
402 help='run statprof on run-tests')
403 parser.add_option('--allow-slow-tests', action='store_true',
404 help='allow extremely slow tests')
405 parser.add_option('--showchannels', action='store_true',
406 help='show scheduling channels')
407 parser.add_option('--known-good-rev', type="string",
408 metavar="known_good_rev",
409 help=("Automatically bisect any failures using this "
410 "revision as a known-good revision."))
411 parser.add_option('--bisect-repo', type="string",
412 metavar='bisect_repo',
413 help=("Path of a repo to bisect. Use together with "
414 "--known-good-rev"))
394 parser.add_argument("--with-python3", metavar="PYTHON3",
395 help="Python 3 interpreter (if running under Python 2)"
396 " (TEMPORARY)")
397 parser.add_argument('--extra-config-opt', action="append",
398 help='set the given config opt in the test hgrc')
399 parser.add_argument('--random', action="store_true",
400 help='run tests in random order')
401 parser.add_argument('--profile-runner', action='store_true',
402 help='run statprof on run-tests')
403 parser.add_argument('--allow-slow-tests', action='store_true',
404 help='allow extremely slow tests')
405 parser.add_argument('--showchannels', action='store_true',
406 help='show scheduling channels')
407 parser.add_argument('--known-good-rev',
408 metavar="known_good_rev",
409 help=("Automatically bisect any failures using this "
410 "revision as a known-good revision."))
411 parser.add_argument('--bisect-repo',
412 metavar='bisect_repo',
413 help=("Path of a repo to bisect. Use together with "
414 "--known-good-rev"))
415
416 parser.add_argument('tests', metavar='TESTS', nargs='*',
417 help='Tests to run')
415 418
416 419 for option, (envvar, default) in defaults.items():
417 420 defaults[option] = type(default)(os.environ.get(envvar, default))
418 421 parser.set_defaults(**defaults)
419 422
420 423 return parser
421 424
422 425 def parseargs(args, parser):
423 426 """Parse arguments with our OptionParser and validate results."""
424 (options, args) = parser.parse_args(args)
427 options = parser.parse_args(args)
425 428
426 429 # jython is always pure
427 430 if 'java' in sys.platform or '__pypy__' in sys.modules:
428 431 options.pure = True
429 432
430 433 if options.with_hg:
431 434 options.with_hg = canonpath(_bytespath(options.with_hg))
432 435 if not (os.path.isfile(options.with_hg) and
433 436 os.access(options.with_hg, os.X_OK)):
434 437 parser.error('--with-hg must specify an executable hg script')
435 438 if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
436 439 sys.stderr.write('warning: --with-hg should specify an hg script\n')
437 440 if options.local:
438 441 testdir = os.path.dirname(_bytespath(canonpath(sys.argv[0])))
439 442 reporootdir = os.path.dirname(testdir)
440 443 pathandattrs = [(b'hg', 'with_hg')]
441 444 if options.chg:
442 445 pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
443 446 for relpath, attr in pathandattrs:
444 447 binpath = os.path.join(reporootdir, relpath)
445 448 if os.name != 'nt' and not os.access(binpath, os.X_OK):
446 449 parser.error('--local specified, but %r not found or '
447 450 'not executable' % binpath)
448 451 setattr(options, attr, binpath)
449 452
450 453 if (options.chg or options.with_chg) and os.name == 'nt':
451 454 parser.error('chg does not work on %s' % os.name)
452 455 if options.with_chg:
453 456 options.chg = False # no installation to temporary location
454 457 options.with_chg = canonpath(_bytespath(options.with_chg))
455 458 if not (os.path.isfile(options.with_chg) and
456 459 os.access(options.with_chg, os.X_OK)):
457 460 parser.error('--with-chg must specify a chg executable')
458 461 if options.chg and options.with_hg:
459 462 # chg shares installation location with hg
460 463 parser.error('--chg does not work when --with-hg is specified '
461 464 '(use --with-chg instead)')
462 465
463 466 if options.color == 'always' and not pygmentspresent:
464 467 sys.stderr.write('warning: --color=always ignored because '
465 468 'pygments is not installed\n')
466 469
467 470 if options.bisect_repo and not options.known_good_rev:
468 471 parser.error("--bisect-repo cannot be used without --known-good-rev")
469 472
470 473 global useipv6
471 474 if options.ipv6:
472 475 useipv6 = checksocketfamily('AF_INET6')
473 476 else:
474 477 # only use IPv6 if IPv4 is unavailable and IPv6 is available
475 478 useipv6 = ((not checksocketfamily('AF_INET'))
476 479 and checksocketfamily('AF_INET6'))
477 480
478 481 options.anycoverage = options.cover or options.annotate or options.htmlcov
479 482 if options.anycoverage:
480 483 try:
481 484 import coverage
482 485 covver = version.StrictVersion(coverage.__version__).version
483 486 if covver < (3, 3):
484 487 parser.error('coverage options require coverage 3.3 or later')
485 488 except ImportError:
486 489 parser.error('coverage options now require the coverage package')
487 490
488 491 if options.anycoverage and options.local:
489 492 # this needs some path mangling somewhere, I guess
490 493 parser.error("sorry, coverage options do not work when --local "
491 494 "is specified")
492 495
493 496 if options.anycoverage and options.with_hg:
494 497 parser.error("sorry, coverage options do not work when --with-hg "
495 498 "is specified")
496 499
497 500 global verbose
498 501 if options.verbose:
499 502 verbose = ''
500 503
501 504 if options.tmpdir:
502 505 options.tmpdir = canonpath(options.tmpdir)
503 506
504 507 if options.jobs < 1:
505 508 parser.error('--jobs must be positive')
506 509 if options.interactive and options.debug:
507 510 parser.error("-i/--interactive and -d/--debug are incompatible")
508 511 if options.debug:
509 512 if options.timeout != defaults['timeout']:
510 513 sys.stderr.write(
511 514 'warning: --timeout option ignored with --debug\n')
512 515 if options.slowtimeout != defaults['slowtimeout']:
513 516 sys.stderr.write(
514 517 'warning: --slowtimeout option ignored with --debug\n')
515 518 options.timeout = 0
516 519 options.slowtimeout = 0
517 520 if options.py3k_warnings:
518 521 if PYTHON3:
519 522 parser.error(
520 523 '--py3k-warnings can only be used on Python 2.7')
521 524 if options.with_python3:
522 525 if PYTHON3:
523 526 parser.error('--with-python3 cannot be used when executing with '
524 527 'Python 3')
525 528
526 529 options.with_python3 = canonpath(options.with_python3)
527 530 # Verify Python3 executable is acceptable.
528 531 proc = subprocess.Popen([options.with_python3, b'--version'],
529 532 stdout=subprocess.PIPE,
530 533 stderr=subprocess.STDOUT)
531 534 out, _err = proc.communicate()
532 535 ret = proc.wait()
533 536 if ret != 0:
534 537 parser.error('could not determine version of python 3')
535 538 if not out.startswith('Python '):
536 539 parser.error('unexpected output from python3 --version: %s' %
537 540 out)
538 541 vers = version.LooseVersion(out[len('Python '):])
539 542 if vers < version.LooseVersion('3.5.0'):
540 543 parser.error('--with-python3 version must be 3.5.0 or greater; '
541 544 'got %s' % out)
542 545
543 546 if options.blacklist:
544 547 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
545 548 if options.whitelist:
546 549 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
547 550 else:
548 551 options.whitelisted = {}
549 552
550 553 if options.showchannels:
551 554 options.nodiff = True
552 555
553 return (options, args)
556 return options
554 557
555 558 def rename(src, dst):
556 559 """Like os.rename(), trade atomicity and opened files friendliness
557 560 for existing destination support.
558 561 """
559 562 shutil.copy(src, dst)
560 563 os.remove(src)
561 564
562 565 _unified_diff = difflib.unified_diff
563 566 if PYTHON3:
564 567 import functools
565 568 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
566 569
567 570 def getdiff(expected, output, ref, err):
568 571 servefail = False
569 572 lines = []
570 573 for line in _unified_diff(expected, output, ref, err):
571 574 if line.startswith(b'+++') or line.startswith(b'---'):
572 575 line = line.replace(b'\\', b'/')
573 576 if line.endswith(b' \n'):
574 577 line = line[:-2] + b'\n'
575 578 lines.append(line)
576 579 if not servefail and line.startswith(
577 580 b'+ abort: child process failed to start'):
578 581 servefail = True
579 582
580 583 return servefail, lines
581 584
582 585 verbose = False
583 586 def vlog(*msg):
584 587 """Log only when in verbose mode."""
585 588 if verbose is False:
586 589 return
587 590
588 591 return log(*msg)
589 592
590 593 # Bytes that break XML even in a CDATA block: control characters 0-31
591 594 # sans \t, \n and \r
592 595 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
593 596
594 597 # Match feature conditionalized output lines in the form, capturing the feature
595 598 # list in group 2, and the preceeding line output in group 1:
596 599 #
597 600 # output..output (feature !)\n
598 601 optline = re.compile(b'(.*) \((.+?) !\)\n$')
599 602
600 603 def cdatasafe(data):
601 604 """Make a string safe to include in a CDATA block.
602 605
603 606 Certain control characters are illegal in a CDATA block, and
604 607 there's no way to include a ]]> in a CDATA either. This function
605 608 replaces illegal bytes with ? and adds a space between the ]] so
606 609 that it won't break the CDATA block.
607 610 """
608 611 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
609 612
610 613 def log(*msg):
611 614 """Log something to stdout.
612 615
613 616 Arguments are strings to print.
614 617 """
615 618 with iolock:
616 619 if verbose:
617 620 print(verbose, end=' ')
618 621 for m in msg:
619 622 print(m, end=' ')
620 623 print()
621 624 sys.stdout.flush()
622 625
623 626 def highlightdiff(line, color):
624 627 if not color:
625 628 return line
626 629 assert pygmentspresent
627 630 return pygments.highlight(line.decode('latin1'), difflexer,
628 631 terminal256formatter).encode('latin1')
629 632
630 633 def highlightmsg(msg, color):
631 634 if not color:
632 635 return msg
633 636 assert pygmentspresent
634 637 return pygments.highlight(msg, runnerlexer, runnerformatter)
635 638
636 639 def terminate(proc):
637 640 """Terminate subprocess"""
638 641 vlog('# Terminating process %d' % proc.pid)
639 642 try:
640 643 proc.terminate()
641 644 except OSError:
642 645 pass
643 646
644 647 def killdaemons(pidfile):
645 648 import killdaemons as killmod
646 649 return killmod.killdaemons(pidfile, tryhard=False, remove=True,
647 650 logfn=vlog)
648 651
649 652 class Test(unittest.TestCase):
650 653 """Encapsulates a single, runnable test.
651 654
652 655 While this class conforms to the unittest.TestCase API, it differs in that
653 656 instances need to be instantiated manually. (Typically, unittest.TestCase
654 657 classes are instantiated automatically by scanning modules.)
655 658 """
656 659
657 660 # Status code reserved for skipped tests (used by hghave).
658 661 SKIPPED_STATUS = 80
659 662
660 663 def __init__(self, path, outputdir, tmpdir, keeptmpdir=False,
661 664 debug=False,
662 665 timeout=None,
663 666 startport=None, extraconfigopts=None,
664 667 py3kwarnings=False, shell=None, hgcommand=None,
665 668 slowtimeout=None, usechg=False,
666 669 useipv6=False):
667 670 """Create a test from parameters.
668 671
669 672 path is the full path to the file defining the test.
670 673
671 674 tmpdir is the main temporary directory to use for this test.
672 675
673 676 keeptmpdir determines whether to keep the test's temporary directory
674 677 after execution. It defaults to removal (False).
675 678
676 679 debug mode will make the test execute verbosely, with unfiltered
677 680 output.
678 681
679 682 timeout controls the maximum run time of the test. It is ignored when
680 683 debug is True. See slowtimeout for tests with #require slow.
681 684
682 685 slowtimeout overrides timeout if the test has #require slow.
683 686
684 687 startport controls the starting port number to use for this test. Each
685 688 test will reserve 3 port numbers for execution. It is the caller's
686 689 responsibility to allocate a non-overlapping port range to Test
687 690 instances.
688 691
689 692 extraconfigopts is an iterable of extra hgrc config options. Values
690 693 must have the form "key=value" (something understood by hgrc). Values
691 694 of the form "foo.key=value" will result in "[foo] key=value".
692 695
693 696 py3kwarnings enables Py3k warnings.
694 697
695 698 shell is the shell to execute tests in.
696 699 """
697 700 if timeout is None:
698 701 timeout = defaults['timeout']
699 702 if startport is None:
700 703 startport = defaults['port']
701 704 if slowtimeout is None:
702 705 slowtimeout = defaults['slowtimeout']
703 706 self.path = path
704 707 self.bname = os.path.basename(path)
705 708 self.name = _strpath(self.bname)
706 709 self._testdir = os.path.dirname(path)
707 710 self._outputdir = outputdir
708 711 self._tmpname = os.path.basename(path)
709 712 self.errpath = os.path.join(self._outputdir, b'%s.err' % self.bname)
710 713
711 714 self._threadtmp = tmpdir
712 715 self._keeptmpdir = keeptmpdir
713 716 self._debug = debug
714 717 self._timeout = timeout
715 718 self._slowtimeout = slowtimeout
716 719 self._startport = startport
717 720 self._extraconfigopts = extraconfigopts or []
718 721 self._py3kwarnings = py3kwarnings
719 722 self._shell = _bytespath(shell)
720 723 self._hgcommand = hgcommand or b'hg'
721 724 self._usechg = usechg
722 725 self._useipv6 = useipv6
723 726
724 727 self._aborted = False
725 728 self._daemonpids = []
726 729 self._finished = None
727 730 self._ret = None
728 731 self._out = None
729 732 self._skipped = None
730 733 self._testtmp = None
731 734 self._chgsockdir = None
732 735
733 736 self._refout = self.readrefout()
734 737
735 738 def readrefout(self):
736 739 """read reference output"""
737 740 # If we're not in --debug mode and reference output file exists,
738 741 # check test output against it.
739 742 if self._debug:
740 743 return None # to match "out is None"
741 744 elif os.path.exists(self.refpath):
742 745 with open(self.refpath, 'rb') as f:
743 746 return f.read().splitlines(True)
744 747 else:
745 748 return []
746 749
747 750 # needed to get base class __repr__ running
748 751 @property
749 752 def _testMethodName(self):
750 753 return self.name
751 754
752 755 def __str__(self):
753 756 return self.name
754 757
755 758 def shortDescription(self):
756 759 return self.name
757 760
758 761 def setUp(self):
759 762 """Tasks to perform before run()."""
760 763 self._finished = False
761 764 self._ret = None
762 765 self._out = None
763 766 self._skipped = None
764 767
765 768 try:
766 769 os.mkdir(self._threadtmp)
767 770 except OSError as e:
768 771 if e.errno != errno.EEXIST:
769 772 raise
770 773
771 774 name = self._tmpname
772 775 self._testtmp = os.path.join(self._threadtmp, name)
773 776 os.mkdir(self._testtmp)
774 777
775 778 # Remove any previous output files.
776 779 if os.path.exists(self.errpath):
777 780 try:
778 781 os.remove(self.errpath)
779 782 except OSError as e:
780 783 # We might have raced another test to clean up a .err
781 784 # file, so ignore ENOENT when removing a previous .err
782 785 # file.
783 786 if e.errno != errno.ENOENT:
784 787 raise
785 788
786 789 if self._usechg:
787 790 self._chgsockdir = os.path.join(self._threadtmp,
788 791 b'%s.chgsock' % name)
789 792 os.mkdir(self._chgsockdir)
790 793
791 794 def run(self, result):
792 795 """Run this test and report results against a TestResult instance."""
793 796 # This function is extremely similar to unittest.TestCase.run(). Once
794 797 # we require Python 2.7 (or at least its version of unittest), this
795 798 # function can largely go away.
796 799 self._result = result
797 800 result.startTest(self)
798 801 try:
799 802 try:
800 803 self.setUp()
801 804 except (KeyboardInterrupt, SystemExit):
802 805 self._aborted = True
803 806 raise
804 807 except Exception:
805 808 result.addError(self, sys.exc_info())
806 809 return
807 810
808 811 success = False
809 812 try:
810 813 self.runTest()
811 814 except KeyboardInterrupt:
812 815 self._aborted = True
813 816 raise
814 817 except unittest.SkipTest as e:
815 818 result.addSkip(self, str(e))
816 819 # The base class will have already counted this as a
817 820 # test we "ran", but we want to exclude skipped tests
818 821 # from those we count towards those run.
819 822 result.testsRun -= 1
820 823 except self.failureException as e:
821 824 # This differs from unittest in that we don't capture
822 825 # the stack trace. This is for historical reasons and
823 826 # this decision could be revisited in the future,
824 827 # especially for PythonTest instances.
825 828 if result.addFailure(self, str(e)):
826 829 success = True
827 830 except Exception:
828 831 result.addError(self, sys.exc_info())
829 832 else:
830 833 success = True
831 834
832 835 try:
833 836 self.tearDown()
834 837 except (KeyboardInterrupt, SystemExit):
835 838 self._aborted = True
836 839 raise
837 840 except Exception:
838 841 result.addError(self, sys.exc_info())
839 842 success = False
840 843
841 844 if success:
842 845 result.addSuccess(self)
843 846 finally:
844 847 result.stopTest(self, interrupted=self._aborted)
845 848
846 849 def runTest(self):
847 850 """Run this test instance.
848 851
849 852 This will return a tuple describing the result of the test.
850 853 """
851 854 env = self._getenv()
852 855 self._genrestoreenv(env)
853 856 self._daemonpids.append(env['DAEMON_PIDS'])
854 857 self._createhgrc(env['HGRCPATH'])
855 858
856 859 vlog('# Test', self.name)
857 860
858 861 ret, out = self._run(env)
859 862 self._finished = True
860 863 self._ret = ret
861 864 self._out = out
862 865
863 866 def describe(ret):
864 867 if ret < 0:
865 868 return 'killed by signal: %d' % -ret
866 869 return 'returned error code %d' % ret
867 870
868 871 self._skipped = False
869 872
870 873 if ret == self.SKIPPED_STATUS:
871 874 if out is None: # Debug mode, nothing to parse.
872 875 missing = ['unknown']
873 876 failed = None
874 877 else:
875 878 missing, failed = TTest.parsehghaveoutput(out)
876 879
877 880 if not missing:
878 881 missing = ['skipped']
879 882
880 883 if failed:
881 884 self.fail('hg have failed checking for %s' % failed[-1])
882 885 else:
883 886 self._skipped = True
884 887 raise unittest.SkipTest(missing[-1])
885 888 elif ret == 'timeout':
886 889 self.fail('timed out')
887 890 elif ret is False:
888 891 self.fail('no result code from test')
889 892 elif out != self._refout:
890 893 # Diff generation may rely on written .err file.
891 894 if (ret != 0 or out != self._refout) and not self._skipped \
892 895 and not self._debug:
893 896 f = open(self.errpath, 'wb')
894 897 for line in out:
895 898 f.write(line)
896 899 f.close()
897 900
898 901 # The result object handles diff calculation for us.
899 902 if self._result.addOutputMismatch(self, ret, out, self._refout):
900 903 # change was accepted, skip failing
901 904 return
902 905
903 906 if ret:
904 907 msg = 'output changed and ' + describe(ret)
905 908 else:
906 909 msg = 'output changed'
907 910
908 911 self.fail(msg)
909 912 elif ret:
910 913 self.fail(describe(ret))
911 914
912 915 def tearDown(self):
913 916 """Tasks to perform after run()."""
914 917 for entry in self._daemonpids:
915 918 killdaemons(entry)
916 919 self._daemonpids = []
917 920
918 921 if self._keeptmpdir:
919 922 log('\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' %
920 923 (self._testtmp.decode('utf-8'),
921 924 self._threadtmp.decode('utf-8')))
922 925 else:
923 926 shutil.rmtree(self._testtmp, True)
924 927 shutil.rmtree(self._threadtmp, True)
925 928
926 929 if self._usechg:
927 930 # chgservers will stop automatically after they find the socket
928 931 # files are deleted
929 932 shutil.rmtree(self._chgsockdir, True)
930 933
931 934 if (self._ret != 0 or self._out != self._refout) and not self._skipped \
932 935 and not self._debug and self._out:
933 936 f = open(self.errpath, 'wb')
934 937 for line in self._out:
935 938 f.write(line)
936 939 f.close()
937 940
938 941 vlog("# Ret was:", self._ret, '(%s)' % self.name)
939 942
940 943 def _run(self, env):
941 944 # This should be implemented in child classes to run tests.
942 945 raise unittest.SkipTest('unknown test type')
943 946
944 947 def abort(self):
945 948 """Terminate execution of this test."""
946 949 self._aborted = True
947 950
948 951 def _portmap(self, i):
949 952 offset = b'' if i == 0 else b'%d' % i
950 953 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
951 954
952 955 def _getreplacements(self):
953 956 """Obtain a mapping of text replacements to apply to test output.
954 957
955 958 Test output needs to be normalized so it can be compared to expected
956 959 output. This function defines how some of that normalization will
957 960 occur.
958 961 """
959 962 r = [
960 963 # This list should be parallel to defineport in _getenv
961 964 self._portmap(0),
962 965 self._portmap(1),
963 966 self._portmap(2),
964 967 (br'(?m)^(saved backup bundle to .*\.hg)( \(glob\))?$',
965 968 br'\1 (glob)'),
966 969 (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
967 970 (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
968 971 ]
969 972 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
970 973
971 974 testdir = os.path.dirname(self.path)
972 975 replacementfile = os.path.join(testdir, b'common-pattern.py')
973 976
974 977 if os.path.exists(replacementfile):
975 978 data = {}
976 979 with open(replacementfile, mode='rb') as source:
977 980 # the intermediate 'compile' step help with debugging
978 981 code = compile(source.read(), replacementfile, 'exec')
979 982 exec(code, data)
980 983 r.extend(data.get('substitutions', ()))
981 984 return r
982 985
983 986 def _escapepath(self, p):
984 987 if os.name == 'nt':
985 988 return (
986 989 (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or
987 990 c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c
988 991 for c in p))
989 992 )
990 993 else:
991 994 return re.escape(p)
992 995
993 996 def _localip(self):
994 997 if self._useipv6:
995 998 return b'::1'
996 999 else:
997 1000 return b'127.0.0.1'
998 1001
999 1002 def _genrestoreenv(self, testenv):
1000 1003 """Generate a script that can be used by tests to restore the original
1001 1004 environment."""
1002 1005 # Put the restoreenv script inside self._threadtmp
1003 1006 scriptpath = os.path.join(self._threadtmp, b'restoreenv.sh')
1004 1007 testenv['HGTEST_RESTOREENV'] = scriptpath
1005 1008
1006 1009 # Only restore environment variable names that the shell allows
1007 1010 # us to export.
1008 1011 name_regex = re.compile('^[a-zA-Z][a-zA-Z0-9_]*$')
1009 1012
1010 1013 # Do not restore these variables; otherwise tests would fail.
1011 1014 reqnames = {'PYTHON', 'TESTDIR', 'TESTTMP'}
1012 1015
1013 1016 with open(scriptpath, 'w') as envf:
1014 1017 for name, value in origenviron.items():
1015 1018 if not name_regex.match(name):
1016 1019 # Skip environment variables with unusual names not
1017 1020 # allowed by most shells.
1018 1021 continue
1019 1022 if name in reqnames:
1020 1023 continue
1021 1024 envf.write('%s=%s\n' % (name, shellquote(value)))
1022 1025
1023 1026 for name in testenv:
1024 1027 if name in origenviron or name in reqnames:
1025 1028 continue
1026 1029 envf.write('unset %s\n' % (name,))
1027 1030
1028 1031 def _getenv(self):
1029 1032 """Obtain environment variables to use during test execution."""
1030 1033 def defineport(i):
1031 1034 offset = '' if i == 0 else '%s' % i
1032 1035 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
1033 1036 env = os.environ.copy()
1034 1037 env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase')
1035 1038 env['HGEMITWARNINGS'] = '1'
1036 1039 env['TESTTMP'] = self._testtmp
1037 1040 env['HOME'] = self._testtmp
1038 1041 # This number should match portneeded in _getport
1039 1042 for port in xrange(3):
1040 1043 # This list should be parallel to _portmap in _getreplacements
1041 1044 defineport(port)
1042 1045 env["HGRCPATH"] = os.path.join(self._threadtmp, b'.hgrc')
1043 1046 env["DAEMON_PIDS"] = os.path.join(self._threadtmp, b'daemon.pids')
1044 1047 env["HGEDITOR"] = ('"' + sys.executable + '"'
1045 1048 + ' -c "import sys; sys.exit(0)"')
1046 1049 env["HGMERGE"] = "internal:merge"
1047 1050 env["HGUSER"] = "test"
1048 1051 env["HGENCODING"] = "ascii"
1049 1052 env["HGENCODINGMODE"] = "strict"
1050 1053 env['HGIPV6'] = str(int(self._useipv6))
1051 1054
1052 1055 # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
1053 1056 # IP addresses.
1054 1057 env['LOCALIP'] = self._localip()
1055 1058
1056 1059 # Reset some environment variables to well-known values so that
1057 1060 # the tests produce repeatable output.
1058 1061 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
1059 1062 env['TZ'] = 'GMT'
1060 1063 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
1061 1064 env['COLUMNS'] = '80'
1062 1065 env['TERM'] = 'xterm'
1063 1066
1064 1067 for k in ('HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy ' +
1065 1068 'HGPLAIN HGPLAINEXCEPT EDITOR VISUAL PAGER ' +
1066 1069 'NO_PROXY CHGDEBUG').split():
1067 1070 if k in env:
1068 1071 del env[k]
1069 1072
1070 1073 # unset env related to hooks
1071 1074 for k in env.keys():
1072 1075 if k.startswith('HG_'):
1073 1076 del env[k]
1074 1077
1075 1078 if self._usechg:
1076 1079 env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server')
1077 1080
1078 1081 return env
1079 1082
1080 1083 def _createhgrc(self, path):
1081 1084 """Create an hgrc file for this test."""
1082 1085 hgrc = open(path, 'wb')
1083 1086 hgrc.write(b'[ui]\n')
1084 1087 hgrc.write(b'slash = True\n')
1085 1088 hgrc.write(b'interactive = False\n')
1086 1089 hgrc.write(b'mergemarkers = detailed\n')
1087 1090 hgrc.write(b'promptecho = True\n')
1088 1091 hgrc.write(b'[defaults]\n')
1089 1092 hgrc.write(b'[devel]\n')
1090 1093 hgrc.write(b'all-warnings = true\n')
1091 1094 hgrc.write(b'default-date = 0 0\n')
1092 1095 hgrc.write(b'[largefiles]\n')
1093 1096 hgrc.write(b'usercache = %s\n' %
1094 1097 (os.path.join(self._testtmp, b'.cache/largefiles')))
1095 1098 hgrc.write(b'[web]\n')
1096 1099 hgrc.write(b'address = localhost\n')
1097 1100 hgrc.write(b'ipv6 = %s\n' % str(self._useipv6).encode('ascii'))
1098 1101
1099 1102 for opt in self._extraconfigopts:
1100 1103 section, key = opt.split('.', 1)
1101 1104 assert '=' in key, ('extra config opt %s must '
1102 1105 'have an = for assignment' % opt)
1103 1106 hgrc.write(b'[%s]\n%s\n' % (section, key))
1104 1107 hgrc.close()
1105 1108
1106 1109 def fail(self, msg):
1107 1110 # unittest differentiates between errored and failed.
1108 1111 # Failed is denoted by AssertionError (by default at least).
1109 1112 raise AssertionError(msg)
1110 1113
1111 1114 def _runcommand(self, cmd, env, normalizenewlines=False):
1112 1115 """Run command in a sub-process, capturing the output (stdout and
1113 1116 stderr).
1114 1117
1115 1118 Return a tuple (exitcode, output). output is None in debug mode.
1116 1119 """
1117 1120 if self._debug:
1118 1121 proc = subprocess.Popen(cmd, shell=True, cwd=self._testtmp,
1119 1122 env=env)
1120 1123 ret = proc.wait()
1121 1124 return (ret, None)
1122 1125
1123 1126 proc = Popen4(cmd, self._testtmp, self._timeout, env)
1124 1127 def cleanup():
1125 1128 terminate(proc)
1126 1129 ret = proc.wait()
1127 1130 if ret == 0:
1128 1131 ret = signal.SIGTERM << 8
1129 1132 killdaemons(env['DAEMON_PIDS'])
1130 1133 return ret
1131 1134
1132 1135 output = ''
1133 1136 proc.tochild.close()
1134 1137
1135 1138 try:
1136 1139 output = proc.fromchild.read()
1137 1140 except KeyboardInterrupt:
1138 1141 vlog('# Handling keyboard interrupt')
1139 1142 cleanup()
1140 1143 raise
1141 1144
1142 1145 ret = proc.wait()
1143 1146 if wifexited(ret):
1144 1147 ret = os.WEXITSTATUS(ret)
1145 1148
1146 1149 if proc.timeout:
1147 1150 ret = 'timeout'
1148 1151
1149 1152 if ret:
1150 1153 killdaemons(env['DAEMON_PIDS'])
1151 1154
1152 1155 for s, r in self._getreplacements():
1153 1156 output = re.sub(s, r, output)
1154 1157
1155 1158 if normalizenewlines:
1156 1159 output = output.replace('\r\n', '\n')
1157 1160
1158 1161 return ret, output.splitlines(True)
1159 1162
1160 1163 class PythonTest(Test):
1161 1164 """A Python-based test."""
1162 1165
1163 1166 @property
1164 1167 def refpath(self):
1165 1168 return os.path.join(self._testdir, b'%s.out' % self.bname)
1166 1169
1167 1170 def _run(self, env):
1168 1171 py3kswitch = self._py3kwarnings and b' -3' or b''
1169 1172 cmd = b'%s%s "%s"' % (PYTHON, py3kswitch, self.path)
1170 1173 vlog("# Running", cmd)
1171 1174 normalizenewlines = os.name == 'nt'
1172 1175 result = self._runcommand(cmd, env,
1173 1176 normalizenewlines=normalizenewlines)
1174 1177 if self._aborted:
1175 1178 raise KeyboardInterrupt()
1176 1179
1177 1180 return result
1178 1181
1179 1182 # Some glob patterns apply only in some circumstances, so the script
1180 1183 # might want to remove (glob) annotations that otherwise should be
1181 1184 # retained.
1182 1185 checkcodeglobpats = [
1183 1186 # On Windows it looks like \ doesn't require a (glob), but we know
1184 1187 # better.
1185 1188 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
1186 1189 re.compile(br'^moving \S+/.*[^)]$'),
1187 1190 re.compile(br'^pulling from \$TESTTMP/.*[^)]$'),
1188 1191 # Not all platforms have 127.0.0.1 as loopback (though most do),
1189 1192 # so we always glob that too.
1190 1193 re.compile(br'.*\$LOCALIP.*$'),
1191 1194 ]
1192 1195
1193 1196 bchr = chr
1194 1197 if PYTHON3:
1195 1198 bchr = lambda x: bytes([x])
1196 1199
1197 1200 class TTest(Test):
1198 1201 """A "t test" is a test backed by a .t file."""
1199 1202
1200 1203 SKIPPED_PREFIX = b'skipped: '
1201 1204 FAILED_PREFIX = b'hghave check failed: '
1202 1205 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
1203 1206
1204 1207 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
1205 1208 ESCAPEMAP = dict((bchr(i), br'\x%02x' % i) for i in range(256))
1206 1209 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
1207 1210
1208 1211 def __init__(self, path, *args, **kwds):
1209 1212 # accept an extra "case" parameter
1210 1213 case = None
1211 1214 if 'case' in kwds:
1212 1215 case = kwds.pop('case')
1213 1216 self._case = case
1214 1217 self._allcases = parsettestcases(path)
1215 1218 super(TTest, self).__init__(path, *args, **kwds)
1216 1219 if case:
1217 1220 self.name = '%s (case %s)' % (self.name, _strpath(case))
1218 1221 self.errpath = b'%s.%s.err' % (self.errpath[:-4], case)
1219 1222 self._tmpname += b'-%s' % case
1220 1223
1221 1224 @property
1222 1225 def refpath(self):
1223 1226 return os.path.join(self._testdir, self.bname)
1224 1227
1225 1228 def _run(self, env):
1226 1229 f = open(self.path, 'rb')
1227 1230 lines = f.readlines()
1228 1231 f.close()
1229 1232
1230 1233 # .t file is both reference output and the test input, keep reference
1231 1234 # output updated with the the test input. This avoids some race
1232 1235 # conditions where the reference output does not match the actual test.
1233 1236 if self._refout is not None:
1234 1237 self._refout = lines
1235 1238
1236 1239 salt, script, after, expected = self._parsetest(lines)
1237 1240
1238 1241 # Write out the generated script.
1239 1242 fname = b'%s.sh' % self._testtmp
1240 1243 f = open(fname, 'wb')
1241 1244 for l in script:
1242 1245 f.write(l)
1243 1246 f.close()
1244 1247
1245 1248 cmd = b'%s "%s"' % (self._shell, fname)
1246 1249 vlog("# Running", cmd)
1247 1250
1248 1251 exitcode, output = self._runcommand(cmd, env)
1249 1252
1250 1253 if self._aborted:
1251 1254 raise KeyboardInterrupt()
1252 1255
1253 1256 # Do not merge output if skipped. Return hghave message instead.
1254 1257 # Similarly, with --debug, output is None.
1255 1258 if exitcode == self.SKIPPED_STATUS or output is None:
1256 1259 return exitcode, output
1257 1260
1258 1261 return self._processoutput(exitcode, output, salt, after, expected)
1259 1262
1260 1263 def _hghave(self, reqs):
1261 1264 # TODO do something smarter when all other uses of hghave are gone.
1262 1265 runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__)))
1263 1266 tdir = runtestdir.replace(b'\\', b'/')
1264 1267 proc = Popen4(b'%s -c "%s/hghave %s"' %
1265 1268 (self._shell, tdir, b' '.join(reqs)),
1266 1269 self._testtmp, 0, self._getenv())
1267 1270 stdout, stderr = proc.communicate()
1268 1271 ret = proc.wait()
1269 1272 if wifexited(ret):
1270 1273 ret = os.WEXITSTATUS(ret)
1271 1274 if ret == 2:
1272 1275 print(stdout.decode('utf-8'))
1273 1276 sys.exit(1)
1274 1277
1275 1278 if ret != 0:
1276 1279 return False, stdout
1277 1280
1278 1281 if b'slow' in reqs:
1279 1282 self._timeout = self._slowtimeout
1280 1283 return True, None
1281 1284
1282 1285 def _iftest(self, args):
1283 1286 # implements "#if"
1284 1287 reqs = []
1285 1288 for arg in args:
1286 1289 if arg.startswith(b'no-') and arg[3:] in self._allcases:
1287 1290 if arg[3:] == self._case:
1288 1291 return False
1289 1292 elif arg in self._allcases:
1290 1293 if arg != self._case:
1291 1294 return False
1292 1295 else:
1293 1296 reqs.append(arg)
1294 1297 return self._hghave(reqs)[0]
1295 1298
1296 1299 def _parsetest(self, lines):
1297 1300 # We generate a shell script which outputs unique markers to line
1298 1301 # up script results with our source. These markers include input
1299 1302 # line number and the last return code.
1300 1303 salt = b"SALT%d" % time.time()
1301 1304 def addsalt(line, inpython):
1302 1305 if inpython:
1303 1306 script.append(b'%s %d 0\n' % (salt, line))
1304 1307 else:
1305 1308 script.append(b'echo %s %d $?\n' % (salt, line))
1306 1309
1307 1310 script = []
1308 1311
1309 1312 # After we run the shell script, we re-unify the script output
1310 1313 # with non-active parts of the source, with synchronization by our
1311 1314 # SALT line number markers. The after table contains the non-active
1312 1315 # components, ordered by line number.
1313 1316 after = {}
1314 1317
1315 1318 # Expected shell script output.
1316 1319 expected = {}
1317 1320
1318 1321 pos = prepos = -1
1319 1322
1320 1323 # True or False when in a true or false conditional section
1321 1324 skipping = None
1322 1325
1323 1326 # We keep track of whether or not we're in a Python block so we
1324 1327 # can generate the surrounding doctest magic.
1325 1328 inpython = False
1326 1329
1327 1330 if self._debug:
1328 1331 script.append(b'set -x\n')
1329 1332 if self._hgcommand != b'hg':
1330 1333 script.append(b'alias hg="%s"\n' % self._hgcommand)
1331 1334 if os.getenv('MSYSTEM'):
1332 1335 script.append(b'alias pwd="pwd -W"\n')
1333 1336
1334 1337 n = 0
1335 1338 for n, l in enumerate(lines):
1336 1339 if not l.endswith(b'\n'):
1337 1340 l += b'\n'
1338 1341 if l.startswith(b'#require'):
1339 1342 lsplit = l.split()
1340 1343 if len(lsplit) < 2 or lsplit[0] != b'#require':
1341 1344 after.setdefault(pos, []).append(' !!! invalid #require\n')
1342 1345 haveresult, message = self._hghave(lsplit[1:])
1343 1346 if not haveresult:
1344 1347 script = [b'echo "%s"\nexit 80\n' % message]
1345 1348 break
1346 1349 after.setdefault(pos, []).append(l)
1347 1350 elif l.startswith(b'#if'):
1348 1351 lsplit = l.split()
1349 1352 if len(lsplit) < 2 or lsplit[0] != b'#if':
1350 1353 after.setdefault(pos, []).append(' !!! invalid #if\n')
1351 1354 if skipping is not None:
1352 1355 after.setdefault(pos, []).append(' !!! nested #if\n')
1353 1356 skipping = not self._iftest(lsplit[1:])
1354 1357 after.setdefault(pos, []).append(l)
1355 1358 elif l.startswith(b'#else'):
1356 1359 if skipping is None:
1357 1360 after.setdefault(pos, []).append(' !!! missing #if\n')
1358 1361 skipping = not skipping
1359 1362 after.setdefault(pos, []).append(l)
1360 1363 elif l.startswith(b'#endif'):
1361 1364 if skipping is None:
1362 1365 after.setdefault(pos, []).append(' !!! missing #if\n')
1363 1366 skipping = None
1364 1367 after.setdefault(pos, []).append(l)
1365 1368 elif skipping:
1366 1369 after.setdefault(pos, []).append(l)
1367 1370 elif l.startswith(b' >>> '): # python inlines
1368 1371 after.setdefault(pos, []).append(l)
1369 1372 prepos = pos
1370 1373 pos = n
1371 1374 if not inpython:
1372 1375 # We've just entered a Python block. Add the header.
1373 1376 inpython = True
1374 1377 addsalt(prepos, False) # Make sure we report the exit code.
1375 1378 script.append(b'%s -m heredoctest <<EOF\n' % PYTHON)
1376 1379 addsalt(n, True)
1377 1380 script.append(l[2:])
1378 1381 elif l.startswith(b' ... '): # python inlines
1379 1382 after.setdefault(prepos, []).append(l)
1380 1383 script.append(l[2:])
1381 1384 elif l.startswith(b' $ '): # commands
1382 1385 if inpython:
1383 1386 script.append(b'EOF\n')
1384 1387 inpython = False
1385 1388 after.setdefault(pos, []).append(l)
1386 1389 prepos = pos
1387 1390 pos = n
1388 1391 addsalt(n, False)
1389 1392 cmd = l[4:].split()
1390 1393 if len(cmd) == 2 and cmd[0] == b'cd':
1391 1394 l = b' $ cd %s || exit 1\n' % cmd[1]
1392 1395 script.append(l[4:])
1393 1396 elif l.startswith(b' > '): # continuations
1394 1397 after.setdefault(prepos, []).append(l)
1395 1398 script.append(l[4:])
1396 1399 elif l.startswith(b' '): # results
1397 1400 # Queue up a list of expected results.
1398 1401 expected.setdefault(pos, []).append(l[2:])
1399 1402 else:
1400 1403 if inpython:
1401 1404 script.append(b'EOF\n')
1402 1405 inpython = False
1403 1406 # Non-command/result. Queue up for merged output.
1404 1407 after.setdefault(pos, []).append(l)
1405 1408
1406 1409 if inpython:
1407 1410 script.append(b'EOF\n')
1408 1411 if skipping is not None:
1409 1412 after.setdefault(pos, []).append(' !!! missing #endif\n')
1410 1413 addsalt(n + 1, False)
1411 1414
1412 1415 return salt, script, after, expected
1413 1416
1414 1417 def _processoutput(self, exitcode, output, salt, after, expected):
1415 1418 # Merge the script output back into a unified test.
1416 1419 warnonly = 1 # 1: not yet; 2: yes; 3: for sure not
1417 1420 if exitcode != 0:
1418 1421 warnonly = 3
1419 1422
1420 1423 pos = -1
1421 1424 postout = []
1422 1425 for l in output:
1423 1426 lout, lcmd = l, None
1424 1427 if salt in l:
1425 1428 lout, lcmd = l.split(salt, 1)
1426 1429
1427 1430 while lout:
1428 1431 if not lout.endswith(b'\n'):
1429 1432 lout += b' (no-eol)\n'
1430 1433
1431 1434 # Find the expected output at the current position.
1432 1435 els = [None]
1433 1436 if expected.get(pos, None):
1434 1437 els = expected[pos]
1435 1438
1436 1439 i = 0
1437 1440 optional = []
1438 1441 while i < len(els):
1439 1442 el = els[i]
1440 1443
1441 1444 r = self.linematch(el, lout)
1442 1445 if isinstance(r, str):
1443 1446 if r == '+glob':
1444 1447 lout = el[:-1] + ' (glob)\n'
1445 1448 r = '' # Warn only this line.
1446 1449 elif r == '-glob':
1447 1450 lout = ''.join(el.rsplit(' (glob)', 1))
1448 1451 r = '' # Warn only this line.
1449 1452 elif r == "retry":
1450 1453 postout.append(b' ' + el)
1451 1454 els.pop(i)
1452 1455 break
1453 1456 else:
1454 1457 log('\ninfo, unknown linematch result: %r\n' % r)
1455 1458 r = False
1456 1459 if r:
1457 1460 els.pop(i)
1458 1461 break
1459 1462 if el:
1460 1463 if el.endswith(b" (?)\n"):
1461 1464 optional.append(i)
1462 1465 else:
1463 1466 m = optline.match(el)
1464 1467 if m:
1465 1468 conditions = [
1466 1469 c for c in m.group(2).split(b' ')]
1467 1470
1468 1471 if not self._iftest(conditions):
1469 1472 optional.append(i)
1470 1473
1471 1474 i += 1
1472 1475
1473 1476 if r:
1474 1477 if r == "retry":
1475 1478 continue
1476 1479 # clean up any optional leftovers
1477 1480 for i in optional:
1478 1481 postout.append(b' ' + els[i])
1479 1482 for i in reversed(optional):
1480 1483 del els[i]
1481 1484 postout.append(b' ' + el)
1482 1485 else:
1483 1486 if self.NEEDESCAPE(lout):
1484 1487 lout = TTest._stringescape(b'%s (esc)\n' %
1485 1488 lout.rstrip(b'\n'))
1486 1489 postout.append(b' ' + lout) # Let diff deal with it.
1487 1490 if r != '': # If line failed.
1488 1491 warnonly = 3 # for sure not
1489 1492 elif warnonly == 1: # Is "not yet" and line is warn only.
1490 1493 warnonly = 2 # Yes do warn.
1491 1494 break
1492 1495 else:
1493 1496 # clean up any optional leftovers
1494 1497 while expected.get(pos, None):
1495 1498 el = expected[pos].pop(0)
1496 1499 if el:
1497 1500 if not el.endswith(b" (?)\n"):
1498 1501 m = optline.match(el)
1499 1502 if m:
1500 1503 conditions = [c for c in m.group(2).split(b' ')]
1501 1504
1502 1505 if self._iftest(conditions):
1503 1506 # Don't append as optional line
1504 1507 continue
1505 1508 else:
1506 1509 continue
1507 1510 postout.append(b' ' + el)
1508 1511
1509 1512 if lcmd:
1510 1513 # Add on last return code.
1511 1514 ret = int(lcmd.split()[1])
1512 1515 if ret != 0:
1513 1516 postout.append(b' [%d]\n' % ret)
1514 1517 if pos in after:
1515 1518 # Merge in non-active test bits.
1516 1519 postout += after.pop(pos)
1517 1520 pos = int(lcmd.split()[0])
1518 1521
1519 1522 if pos in after:
1520 1523 postout += after.pop(pos)
1521 1524
1522 1525 if warnonly == 2:
1523 1526 exitcode = False # Set exitcode to warned.
1524 1527
1525 1528 return exitcode, postout
1526 1529
1527 1530 @staticmethod
1528 1531 def rematch(el, l):
1529 1532 try:
1530 1533 el = b'(?:' + el + b')'
1531 1534 # use \Z to ensure that the regex matches to the end of the string
1532 1535 if os.name == 'nt':
1533 1536 return re.match(el + br'\r?\n\Z', l)
1534 1537 return re.match(el + br'\n\Z', l)
1535 1538 except re.error:
1536 1539 # el is an invalid regex
1537 1540 return False
1538 1541
1539 1542 @staticmethod
1540 1543 def globmatch(el, l):
1541 1544 # The only supported special characters are * and ? plus / which also
1542 1545 # matches \ on windows. Escaping of these characters is supported.
1543 1546 if el + b'\n' == l:
1544 1547 if os.altsep:
1545 1548 # matching on "/" is not needed for this line
1546 1549 for pat in checkcodeglobpats:
1547 1550 if pat.match(el):
1548 1551 return True
1549 1552 return b'-glob'
1550 1553 return True
1551 1554 el = el.replace(b'$LOCALIP', b'*')
1552 1555 i, n = 0, len(el)
1553 1556 res = b''
1554 1557 while i < n:
1555 1558 c = el[i:i + 1]
1556 1559 i += 1
1557 1560 if c == b'\\' and i < n and el[i:i + 1] in b'*?\\/':
1558 1561 res += el[i - 1:i + 1]
1559 1562 i += 1
1560 1563 elif c == b'*':
1561 1564 res += b'.*'
1562 1565 elif c == b'?':
1563 1566 res += b'.'
1564 1567 elif c == b'/' and os.altsep:
1565 1568 res += b'[/\\\\]'
1566 1569 else:
1567 1570 res += re.escape(c)
1568 1571 return TTest.rematch(res, l)
1569 1572
1570 1573 def linematch(self, el, l):
1571 1574 retry = False
1572 1575 if el == l: # perfect match (fast)
1573 1576 return True
1574 1577 if el:
1575 1578 if el.endswith(b" (?)\n"):
1576 1579 retry = "retry"
1577 1580 el = el[:-5] + b"\n"
1578 1581 else:
1579 1582 m = optline.match(el)
1580 1583 if m:
1581 1584 conditions = [c for c in m.group(2).split(b' ')]
1582 1585
1583 1586 el = m.group(1) + b"\n"
1584 1587 if not self._iftest(conditions):
1585 1588 retry = "retry" # Not required by listed features
1586 1589
1587 1590 if el.endswith(b" (esc)\n"):
1588 1591 if PYTHON3:
1589 1592 el = el[:-7].decode('unicode_escape') + '\n'
1590 1593 el = el.encode('utf-8')
1591 1594 else:
1592 1595 el = el[:-7].decode('string-escape') + '\n'
1593 1596 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
1594 1597 return True
1595 1598 if el.endswith(b" (re)\n"):
1596 1599 return TTest.rematch(el[:-6], l) or retry
1597 1600 if el.endswith(b" (glob)\n"):
1598 1601 # ignore '(glob)' added to l by 'replacements'
1599 1602 if l.endswith(b" (glob)\n"):
1600 1603 l = l[:-8] + b"\n"
1601 1604 return TTest.globmatch(el[:-8], l) or retry
1602 1605 if os.altsep and l.replace(b'\\', b'/') == el:
1603 1606 return b'+glob'
1604 1607 return retry
1605 1608
1606 1609 @staticmethod
1607 1610 def parsehghaveoutput(lines):
1608 1611 '''Parse hghave log lines.
1609 1612
1610 1613 Return tuple of lists (missing, failed):
1611 1614 * the missing/unknown features
1612 1615 * the features for which existence check failed'''
1613 1616 missing = []
1614 1617 failed = []
1615 1618 for line in lines:
1616 1619 if line.startswith(TTest.SKIPPED_PREFIX):
1617 1620 line = line.splitlines()[0]
1618 1621 missing.append(line[len(TTest.SKIPPED_PREFIX):].decode('utf-8'))
1619 1622 elif line.startswith(TTest.FAILED_PREFIX):
1620 1623 line = line.splitlines()[0]
1621 1624 failed.append(line[len(TTest.FAILED_PREFIX):].decode('utf-8'))
1622 1625
1623 1626 return missing, failed
1624 1627
1625 1628 @staticmethod
1626 1629 def _escapef(m):
1627 1630 return TTest.ESCAPEMAP[m.group(0)]
1628 1631
1629 1632 @staticmethod
1630 1633 def _stringescape(s):
1631 1634 return TTest.ESCAPESUB(TTest._escapef, s)
1632 1635
1633 1636 iolock = threading.RLock()
1634 1637
1635 1638 class TestResult(unittest._TextTestResult):
1636 1639 """Holds results when executing via unittest."""
1637 1640 # Don't worry too much about accessing the non-public _TextTestResult.
1638 1641 # It is relatively common in Python testing tools.
1639 1642 def __init__(self, options, *args, **kwargs):
1640 1643 super(TestResult, self).__init__(*args, **kwargs)
1641 1644
1642 1645 self._options = options
1643 1646
1644 1647 # unittest.TestResult didn't have skipped until 2.7. We need to
1645 1648 # polyfill it.
1646 1649 self.skipped = []
1647 1650
1648 1651 # We have a custom "ignored" result that isn't present in any Python
1649 1652 # unittest implementation. It is very similar to skipped. It may make
1650 1653 # sense to map it into skip some day.
1651 1654 self.ignored = []
1652 1655
1653 1656 self.times = []
1654 1657 self._firststarttime = None
1655 1658 # Data stored for the benefit of generating xunit reports.
1656 1659 self.successes = []
1657 1660 self.faildata = {}
1658 1661
1659 1662 if options.color == 'auto':
1660 1663 self.color = pygmentspresent and self.stream.isatty()
1661 1664 elif options.color == 'never':
1662 1665 self.color = False
1663 1666 else: # 'always', for testing purposes
1664 1667 self.color = pygmentspresent
1665 1668
1666 1669 def addFailure(self, test, reason):
1667 1670 self.failures.append((test, reason))
1668 1671
1669 1672 if self._options.first:
1670 1673 self.stop()
1671 1674 else:
1672 1675 with iolock:
1673 1676 if reason == "timed out":
1674 1677 self.stream.write('t')
1675 1678 else:
1676 1679 if not self._options.nodiff:
1677 1680 self.stream.write('\n')
1678 1681 # Exclude the '\n' from highlighting to lex correctly
1679 1682 formatted = 'ERROR: %s output changed\n' % test
1680 1683 self.stream.write(highlightmsg(formatted, self.color))
1681 1684 self.stream.write('!')
1682 1685
1683 1686 self.stream.flush()
1684 1687
1685 1688 def addSuccess(self, test):
1686 1689 with iolock:
1687 1690 super(TestResult, self).addSuccess(test)
1688 1691 self.successes.append(test)
1689 1692
1690 1693 def addError(self, test, err):
1691 1694 super(TestResult, self).addError(test, err)
1692 1695 if self._options.first:
1693 1696 self.stop()
1694 1697
1695 1698 # Polyfill.
1696 1699 def addSkip(self, test, reason):
1697 1700 self.skipped.append((test, reason))
1698 1701 with iolock:
1699 1702 if self.showAll:
1700 1703 self.stream.writeln('skipped %s' % reason)
1701 1704 else:
1702 1705 self.stream.write('s')
1703 1706 self.stream.flush()
1704 1707
1705 1708 def addIgnore(self, test, reason):
1706 1709 self.ignored.append((test, reason))
1707 1710 with iolock:
1708 1711 if self.showAll:
1709 1712 self.stream.writeln('ignored %s' % reason)
1710 1713 else:
1711 1714 if reason not in ('not retesting', "doesn't match keyword"):
1712 1715 self.stream.write('i')
1713 1716 else:
1714 1717 self.testsRun += 1
1715 1718 self.stream.flush()
1716 1719
1717 1720 def addOutputMismatch(self, test, ret, got, expected):
1718 1721 """Record a mismatch in test output for a particular test."""
1719 1722 if self.shouldStop:
1720 1723 # don't print, some other test case already failed and
1721 1724 # printed, we're just stale and probably failed due to our
1722 1725 # temp dir getting cleaned up.
1723 1726 return
1724 1727
1725 1728 accepted = False
1726 1729 lines = []
1727 1730
1728 1731 with iolock:
1729 1732 if self._options.nodiff:
1730 1733 pass
1731 1734 elif self._options.view:
1732 1735 v = self._options.view
1733 1736 if PYTHON3:
1734 1737 v = _bytespath(v)
1735 1738 os.system(b"%s %s %s" %
1736 1739 (v, test.refpath, test.errpath))
1737 1740 else:
1738 1741 servefail, lines = getdiff(expected, got,
1739 1742 test.refpath, test.errpath)
1740 1743 if servefail:
1741 1744 raise test.failureException(
1742 1745 'server failed to start (HGPORT=%s)' % test._startport)
1743 1746 else:
1744 1747 self.stream.write('\n')
1745 1748 for line in lines:
1746 1749 line = highlightdiff(line, self.color)
1747 1750 if PYTHON3:
1748 1751 self.stream.flush()
1749 1752 self.stream.buffer.write(line)
1750 1753 self.stream.buffer.flush()
1751 1754 else:
1752 1755 self.stream.write(line)
1753 1756 self.stream.flush()
1754 1757
1755 1758 # handle interactive prompt without releasing iolock
1756 1759 if self._options.interactive:
1757 1760 if test.readrefout() != expected:
1758 1761 self.stream.write(
1759 1762 'Reference output has changed (run again to prompt '
1760 1763 'changes)')
1761 1764 else:
1762 1765 self.stream.write('Accept this change? [n] ')
1763 1766 answer = sys.stdin.readline().strip()
1764 1767 if answer.lower() in ('y', 'yes'):
1765 1768 if test.path.endswith(b'.t'):
1766 1769 rename(test.errpath, test.path)
1767 1770 else:
1768 1771 rename(test.errpath, '%s.out' % test.path)
1769 1772 accepted = True
1770 1773 if not accepted:
1771 1774 self.faildata[test.name] = b''.join(lines)
1772 1775
1773 1776 return accepted
1774 1777
1775 1778 def startTest(self, test):
1776 1779 super(TestResult, self).startTest(test)
1777 1780
1778 1781 # os.times module computes the user time and system time spent by
1779 1782 # child's processes along with real elapsed time taken by a process.
1780 1783 # This module has one limitation. It can only work for Linux user
1781 1784 # and not for Windows.
1782 1785 test.started = os.times()
1783 1786 if self._firststarttime is None: # thread racy but irrelevant
1784 1787 self._firststarttime = test.started[4]
1785 1788
1786 1789 def stopTest(self, test, interrupted=False):
1787 1790 super(TestResult, self).stopTest(test)
1788 1791
1789 1792 test.stopped = os.times()
1790 1793
1791 1794 starttime = test.started
1792 1795 endtime = test.stopped
1793 1796 origin = self._firststarttime
1794 1797 self.times.append((test.name,
1795 1798 endtime[2] - starttime[2], # user space CPU time
1796 1799 endtime[3] - starttime[3], # sys space CPU time
1797 1800 endtime[4] - starttime[4], # real time
1798 1801 starttime[4] - origin, # start date in run context
1799 1802 endtime[4] - origin, # end date in run context
1800 1803 ))
1801 1804
1802 1805 if interrupted:
1803 1806 with iolock:
1804 1807 self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
1805 1808 test.name, self.times[-1][3]))
1806 1809
1807 1810 class TestSuite(unittest.TestSuite):
1808 1811 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
1809 1812
1810 1813 def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
1811 1814 retest=False, keywords=None, loop=False, runs_per_test=1,
1812 1815 loadtest=None, showchannels=False,
1813 1816 *args, **kwargs):
1814 1817 """Create a new instance that can run tests with a configuration.
1815 1818
1816 1819 testdir specifies the directory where tests are executed from. This
1817 1820 is typically the ``tests`` directory from Mercurial's source
1818 1821 repository.
1819 1822
1820 1823 jobs specifies the number of jobs to run concurrently. Each test
1821 1824 executes on its own thread. Tests actually spawn new processes, so
1822 1825 state mutation should not be an issue.
1823 1826
1824 1827 If there is only one job, it will use the main thread.
1825 1828
1826 1829 whitelist and blacklist denote tests that have been whitelisted and
1827 1830 blacklisted, respectively. These arguments don't belong in TestSuite.
1828 1831 Instead, whitelist and blacklist should be handled by the thing that
1829 1832 populates the TestSuite with tests. They are present to preserve
1830 1833 backwards compatible behavior which reports skipped tests as part
1831 1834 of the results.
1832 1835
1833 1836 retest denotes whether to retest failed tests. This arguably belongs
1834 1837 outside of TestSuite.
1835 1838
1836 1839 keywords denotes key words that will be used to filter which tests
1837 1840 to execute. This arguably belongs outside of TestSuite.
1838 1841
1839 1842 loop denotes whether to loop over tests forever.
1840 1843 """
1841 1844 super(TestSuite, self).__init__(*args, **kwargs)
1842 1845
1843 1846 self._jobs = jobs
1844 1847 self._whitelist = whitelist
1845 1848 self._blacklist = blacklist
1846 1849 self._retest = retest
1847 1850 self._keywords = keywords
1848 1851 self._loop = loop
1849 1852 self._runs_per_test = runs_per_test
1850 1853 self._loadtest = loadtest
1851 1854 self._showchannels = showchannels
1852 1855
1853 1856 def run(self, result):
1854 1857 # We have a number of filters that need to be applied. We do this
1855 1858 # here instead of inside Test because it makes the running logic for
1856 1859 # Test simpler.
1857 1860 tests = []
1858 1861 num_tests = [0]
1859 1862 for test in self._tests:
1860 1863 def get():
1861 1864 num_tests[0] += 1
1862 1865 if getattr(test, 'should_reload', False):
1863 1866 return self._loadtest(test, num_tests[0])
1864 1867 return test
1865 1868 if not os.path.exists(test.path):
1866 1869 result.addSkip(test, "Doesn't exist")
1867 1870 continue
1868 1871
1869 1872 if not (self._whitelist and test.bname in self._whitelist):
1870 1873 if self._blacklist and test.bname in self._blacklist:
1871 1874 result.addSkip(test, 'blacklisted')
1872 1875 continue
1873 1876
1874 1877 if self._retest and not os.path.exists(test.errpath):
1875 1878 result.addIgnore(test, 'not retesting')
1876 1879 continue
1877 1880
1878 1881 if self._keywords:
1879 1882 f = open(test.path, 'rb')
1880 1883 t = f.read().lower() + test.bname.lower()
1881 1884 f.close()
1882 1885 ignored = False
1883 1886 for k in self._keywords.lower().split():
1884 1887 if k not in t:
1885 1888 result.addIgnore(test, "doesn't match keyword")
1886 1889 ignored = True
1887 1890 break
1888 1891
1889 1892 if ignored:
1890 1893 continue
1891 1894 for _ in xrange(self._runs_per_test):
1892 1895 tests.append(get())
1893 1896
1894 1897 runtests = list(tests)
1895 1898 done = queue.Queue()
1896 1899 running = 0
1897 1900
1898 1901 channels = [""] * self._jobs
1899 1902
1900 1903 def job(test, result):
1901 1904 for n, v in enumerate(channels):
1902 1905 if not v:
1903 1906 channel = n
1904 1907 break
1905 1908 else:
1906 1909 raise ValueError('Could not find output channel')
1907 1910 channels[channel] = "=" + test.name[5:].split(".")[0]
1908 1911 try:
1909 1912 test(result)
1910 1913 done.put(None)
1911 1914 except KeyboardInterrupt:
1912 1915 pass
1913 1916 except: # re-raises
1914 1917 done.put(('!', test, 'run-test raised an error, see traceback'))
1915 1918 raise
1916 1919 finally:
1917 1920 try:
1918 1921 channels[channel] = ''
1919 1922 except IndexError:
1920 1923 pass
1921 1924
1922 1925 def stat():
1923 1926 count = 0
1924 1927 while channels:
1925 1928 d = '\n%03s ' % count
1926 1929 for n, v in enumerate(channels):
1927 1930 if v:
1928 1931 d += v[0]
1929 1932 channels[n] = v[1:] or '.'
1930 1933 else:
1931 1934 d += ' '
1932 1935 d += ' '
1933 1936 with iolock:
1934 1937 sys.stdout.write(d + ' ')
1935 1938 sys.stdout.flush()
1936 1939 for x in xrange(10):
1937 1940 if channels:
1938 1941 time.sleep(.1)
1939 1942 count += 1
1940 1943
1941 1944 stoppedearly = False
1942 1945
1943 1946 if self._showchannels:
1944 1947 statthread = threading.Thread(target=stat, name="stat")
1945 1948 statthread.start()
1946 1949
1947 1950 try:
1948 1951 while tests or running:
1949 1952 if not done.empty() or running == self._jobs or not tests:
1950 1953 try:
1951 1954 done.get(True, 1)
1952 1955 running -= 1
1953 1956 if result and result.shouldStop:
1954 1957 stoppedearly = True
1955 1958 break
1956 1959 except queue.Empty:
1957 1960 continue
1958 1961 if tests and not running == self._jobs:
1959 1962 test = tests.pop(0)
1960 1963 if self._loop:
1961 1964 if getattr(test, 'should_reload', False):
1962 1965 num_tests[0] += 1
1963 1966 tests.append(
1964 1967 self._loadtest(test, num_tests[0]))
1965 1968 else:
1966 1969 tests.append(test)
1967 1970 if self._jobs == 1:
1968 1971 job(test, result)
1969 1972 else:
1970 1973 t = threading.Thread(target=job, name=test.name,
1971 1974 args=(test, result))
1972 1975 t.start()
1973 1976 running += 1
1974 1977
1975 1978 # If we stop early we still need to wait on started tests to
1976 1979 # finish. Otherwise, there is a race between the test completing
1977 1980 # and the test's cleanup code running. This could result in the
1978 1981 # test reporting incorrect.
1979 1982 if stoppedearly:
1980 1983 while running:
1981 1984 try:
1982 1985 done.get(True, 1)
1983 1986 running -= 1
1984 1987 except queue.Empty:
1985 1988 continue
1986 1989 except KeyboardInterrupt:
1987 1990 for test in runtests:
1988 1991 test.abort()
1989 1992
1990 1993 channels = []
1991 1994
1992 1995 return result
1993 1996
1994 1997 # Save the most recent 5 wall-clock runtimes of each test to a
1995 1998 # human-readable text file named .testtimes. Tests are sorted
1996 1999 # alphabetically, while times for each test are listed from oldest to
1997 2000 # newest.
1998 2001
1999 2002 def loadtimes(outputdir):
2000 2003 times = []
2001 2004 try:
2002 2005 with open(os.path.join(outputdir, b'.testtimes-')) as fp:
2003 2006 for line in fp:
2004 2007 ts = line.split()
2005 2008 times.append((ts[0], [float(t) for t in ts[1:]]))
2006 2009 except IOError as err:
2007 2010 if err.errno != errno.ENOENT:
2008 2011 raise
2009 2012 return times
2010 2013
2011 2014 def savetimes(outputdir, result):
2012 2015 saved = dict(loadtimes(outputdir))
2013 2016 maxruns = 5
2014 2017 skipped = set([str(t[0]) for t in result.skipped])
2015 2018 for tdata in result.times:
2016 2019 test, real = tdata[0], tdata[3]
2017 2020 if test not in skipped:
2018 2021 ts = saved.setdefault(test, [])
2019 2022 ts.append(real)
2020 2023 ts[:] = ts[-maxruns:]
2021 2024
2022 2025 fd, tmpname = tempfile.mkstemp(prefix=b'.testtimes',
2023 2026 dir=outputdir, text=True)
2024 2027 with os.fdopen(fd, 'w') as fp:
2025 2028 for name, ts in sorted(saved.items()):
2026 2029 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
2027 2030 timepath = os.path.join(outputdir, b'.testtimes')
2028 2031 try:
2029 2032 os.unlink(timepath)
2030 2033 except OSError:
2031 2034 pass
2032 2035 try:
2033 2036 os.rename(tmpname, timepath)
2034 2037 except OSError:
2035 2038 pass
2036 2039
2037 2040 class TextTestRunner(unittest.TextTestRunner):
2038 2041 """Custom unittest test runner that uses appropriate settings."""
2039 2042
2040 2043 def __init__(self, runner, *args, **kwargs):
2041 2044 super(TextTestRunner, self).__init__(*args, **kwargs)
2042 2045
2043 2046 self._runner = runner
2044 2047
2045 2048 def listtests(self, test):
2046 2049 result = TestResult(self._runner.options, self.stream,
2047 2050 self.descriptions, 0)
2048 2051 test = sorted(test, key=lambda t: t.name)
2049 2052 for t in test:
2050 2053 print(t.name)
2051 2054 result.addSuccess(t)
2052 2055
2053 2056 if self._runner.options.xunit:
2054 2057 with open(self._runner.options.xunit, "wb") as xuf:
2055 2058 self._writexunit(result, xuf)
2056 2059
2057 2060 if self._runner.options.json:
2058 2061 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2059 2062 with open(jsonpath, 'w') as fp:
2060 2063 self._writejson(result, fp)
2061 2064
2062 2065 return result
2063 2066
2064 2067 def run(self, test):
2065 2068 result = TestResult(self._runner.options, self.stream,
2066 2069 self.descriptions, self.verbosity)
2067 2070
2068 2071 test(result)
2069 2072
2070 2073 failed = len(result.failures)
2071 2074 skipped = len(result.skipped)
2072 2075 ignored = len(result.ignored)
2073 2076
2074 2077 with iolock:
2075 2078 self.stream.writeln('')
2076 2079
2077 2080 if not self._runner.options.noskips:
2078 2081 for test, msg in result.skipped:
2079 2082 formatted = 'Skipped %s: %s\n' % (test.name, msg)
2080 2083 self.stream.write(highlightmsg(formatted, result.color))
2081 2084 for test, msg in result.failures:
2082 2085 formatted = 'Failed %s: %s\n' % (test.name, msg)
2083 2086 self.stream.write(highlightmsg(formatted, result.color))
2084 2087 for test, msg in result.errors:
2085 2088 self.stream.writeln('Errored %s: %s' % (test.name, msg))
2086 2089
2087 2090 if self._runner.options.xunit:
2088 2091 with open(self._runner.options.xunit, "wb") as xuf:
2089 2092 self._writexunit(result, xuf)
2090 2093
2091 2094 if self._runner.options.json:
2092 2095 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2093 2096 with open(jsonpath, 'w') as fp:
2094 2097 self._writejson(result, fp)
2095 2098
2096 2099 self._runner._checkhglib('Tested')
2097 2100
2098 2101 savetimes(self._runner._outputdir, result)
2099 2102
2100 2103 if failed and self._runner.options.known_good_rev:
2101 2104 self._bisecttests(t for t, m in result.failures)
2102 2105 self.stream.writeln(
2103 2106 '# Ran %d tests, %d skipped, %d failed.'
2104 2107 % (result.testsRun, skipped + ignored, failed))
2105 2108 if failed:
2106 2109 self.stream.writeln('python hash seed: %s' %
2107 2110 os.environ['PYTHONHASHSEED'])
2108 2111 if self._runner.options.time:
2109 2112 self.printtimes(result.times)
2110 2113 self.stream.flush()
2111 2114
2112 2115 return result
2113 2116
2114 2117 def _bisecttests(self, tests):
2115 2118 bisectcmd = ['hg', 'bisect']
2116 2119 bisectrepo = self._runner.options.bisect_repo
2117 2120 if bisectrepo:
2118 2121 bisectcmd.extend(['-R', os.path.abspath(bisectrepo)])
2119 2122 def pread(args):
2120 2123 env = os.environ.copy()
2121 2124 env['HGPLAIN'] = '1'
2122 2125 p = subprocess.Popen(args, stderr=subprocess.STDOUT,
2123 2126 stdout=subprocess.PIPE, env=env)
2124 2127 data = p.stdout.read()
2125 2128 p.wait()
2126 2129 return data
2127 2130 for test in tests:
2128 2131 pread(bisectcmd + ['--reset']),
2129 2132 pread(bisectcmd + ['--bad', '.'])
2130 2133 pread(bisectcmd + ['--good', self._runner.options.known_good_rev])
2131 2134 # TODO: we probably need to forward more options
2132 2135 # that alter hg's behavior inside the tests.
2133 2136 opts = ''
2134 2137 withhg = self._runner.options.with_hg
2135 2138 if withhg:
2136 2139 opts += ' --with-hg=%s ' % shellquote(_strpath(withhg))
2137 2140 rtc = '%s %s %s %s' % (sys.executable, sys.argv[0], opts,
2138 2141 test)
2139 2142 data = pread(bisectcmd + ['--command', rtc])
2140 2143 m = re.search(
2141 2144 (br'\nThe first (?P<goodbad>bad|good) revision '
2142 2145 br'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
2143 2146 br'summary: +(?P<summary>[^\n]+)\n'),
2144 2147 data, (re.MULTILINE | re.DOTALL))
2145 2148 if m is None:
2146 2149 self.stream.writeln(
2147 2150 'Failed to identify failure point for %s' % test)
2148 2151 continue
2149 2152 dat = m.groupdict()
2150 2153 verb = 'broken' if dat['goodbad'] == 'bad' else 'fixed'
2151 2154 self.stream.writeln(
2152 2155 '%s %s by %s (%s)' % (
2153 2156 test, verb, dat['node'], dat['summary']))
2154 2157
2155 2158 def printtimes(self, times):
2156 2159 # iolock held by run
2157 2160 self.stream.writeln('# Producing time report')
2158 2161 times.sort(key=lambda t: (t[3]))
2159 2162 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
2160 2163 self.stream.writeln('%-7s %-7s %-7s %-7s %-7s %s' %
2161 2164 ('start', 'end', 'cuser', 'csys', 'real', 'Test'))
2162 2165 for tdata in times:
2163 2166 test = tdata[0]
2164 2167 cuser, csys, real, start, end = tdata[1:6]
2165 2168 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
2166 2169
2167 2170 @staticmethod
2168 2171 def _writexunit(result, outf):
2169 2172 # See http://llg.cubic.org/docs/junit/ for a reference.
2170 2173 timesd = dict((t[0], t[3]) for t in result.times)
2171 2174 doc = minidom.Document()
2172 2175 s = doc.createElement('testsuite')
2173 2176 s.setAttribute('name', 'run-tests')
2174 2177 s.setAttribute('tests', str(result.testsRun))
2175 2178 s.setAttribute('errors', "0") # TODO
2176 2179 s.setAttribute('failures', str(len(result.failures)))
2177 2180 s.setAttribute('skipped', str(len(result.skipped) +
2178 2181 len(result.ignored)))
2179 2182 doc.appendChild(s)
2180 2183 for tc in result.successes:
2181 2184 t = doc.createElement('testcase')
2182 2185 t.setAttribute('name', tc.name)
2183 2186 tctime = timesd.get(tc.name)
2184 2187 if tctime is not None:
2185 2188 t.setAttribute('time', '%.3f' % tctime)
2186 2189 s.appendChild(t)
2187 2190 for tc, err in sorted(result.faildata.items()):
2188 2191 t = doc.createElement('testcase')
2189 2192 t.setAttribute('name', tc)
2190 2193 tctime = timesd.get(tc)
2191 2194 if tctime is not None:
2192 2195 t.setAttribute('time', '%.3f' % tctime)
2193 2196 # createCDATASection expects a unicode or it will
2194 2197 # convert using default conversion rules, which will
2195 2198 # fail if string isn't ASCII.
2196 2199 err = cdatasafe(err).decode('utf-8', 'replace')
2197 2200 cd = doc.createCDATASection(err)
2198 2201 # Use 'failure' here instead of 'error' to match errors = 0,
2199 2202 # failures = len(result.failures) in the testsuite element.
2200 2203 failelem = doc.createElement('failure')
2201 2204 failelem.setAttribute('message', 'output changed')
2202 2205 failelem.setAttribute('type', 'output-mismatch')
2203 2206 failelem.appendChild(cd)
2204 2207 t.appendChild(failelem)
2205 2208 s.appendChild(t)
2206 2209 for tc, message in result.skipped:
2207 2210 # According to the schema, 'skipped' has no attributes. So store
2208 2211 # the skip message as a text node instead.
2209 2212 t = doc.createElement('testcase')
2210 2213 t.setAttribute('name', tc.name)
2211 2214 binmessage = message.encode('utf-8')
2212 2215 message = cdatasafe(binmessage).decode('utf-8', 'replace')
2213 2216 cd = doc.createCDATASection(message)
2214 2217 skipelem = doc.createElement('skipped')
2215 2218 skipelem.appendChild(cd)
2216 2219 t.appendChild(skipelem)
2217 2220 s.appendChild(t)
2218 2221 outf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
2219 2222
2220 2223 @staticmethod
2221 2224 def _writejson(result, outf):
2222 2225 timesd = {}
2223 2226 for tdata in result.times:
2224 2227 test = tdata[0]
2225 2228 timesd[test] = tdata[1:]
2226 2229
2227 2230 outcome = {}
2228 2231 groups = [('success', ((tc, None)
2229 2232 for tc in result.successes)),
2230 2233 ('failure', result.failures),
2231 2234 ('skip', result.skipped)]
2232 2235 for res, testcases in groups:
2233 2236 for tc, __ in testcases:
2234 2237 if tc.name in timesd:
2235 2238 diff = result.faildata.get(tc.name, b'')
2236 2239 try:
2237 2240 diff = diff.decode('unicode_escape')
2238 2241 except UnicodeDecodeError as e:
2239 2242 diff = '%r decoding diff, sorry' % e
2240 2243 tres = {'result': res,
2241 2244 'time': ('%0.3f' % timesd[tc.name][2]),
2242 2245 'cuser': ('%0.3f' % timesd[tc.name][0]),
2243 2246 'csys': ('%0.3f' % timesd[tc.name][1]),
2244 2247 'start': ('%0.3f' % timesd[tc.name][3]),
2245 2248 'end': ('%0.3f' % timesd[tc.name][4]),
2246 2249 'diff': diff,
2247 2250 }
2248 2251 else:
2249 2252 # blacklisted test
2250 2253 tres = {'result': res}
2251 2254
2252 2255 outcome[tc.name] = tres
2253 2256 jsonout = json.dumps(outcome, sort_keys=True, indent=4,
2254 2257 separators=(',', ': '))
2255 2258 outf.writelines(("testreport =", jsonout))
2256 2259
2257 2260 class TestRunner(object):
2258 2261 """Holds context for executing tests.
2259 2262
2260 2263 Tests rely on a lot of state. This object holds it for them.
2261 2264 """
2262 2265
2263 2266 # Programs required to run tests.
2264 2267 REQUIREDTOOLS = [
2265 2268 b'diff',
2266 2269 b'grep',
2267 2270 b'unzip',
2268 2271 b'gunzip',
2269 2272 b'bunzip2',
2270 2273 b'sed',
2271 2274 ]
2272 2275
2273 2276 # Maps file extensions to test class.
2274 2277 TESTTYPES = [
2275 2278 (b'.py', PythonTest),
2276 2279 (b'.t', TTest),
2277 2280 ]
2278 2281
2279 2282 def __init__(self):
2280 2283 self.options = None
2281 2284 self._hgroot = None
2282 2285 self._testdir = None
2283 2286 self._outputdir = None
2284 2287 self._hgtmp = None
2285 2288 self._installdir = None
2286 2289 self._bindir = None
2287 2290 self._tmpbinddir = None
2288 2291 self._pythondir = None
2289 2292 self._coveragefile = None
2290 2293 self._createdfiles = []
2291 2294 self._hgcommand = None
2292 2295 self._hgpath = None
2293 2296 self._portoffset = 0
2294 2297 self._ports = {}
2295 2298
2296 2299 def run(self, args, parser=None):
2297 2300 """Run the test suite."""
2298 2301 oldmask = os.umask(0o22)
2299 2302 try:
2300 2303 parser = parser or getparser()
2301 options, args = parseargs(args, parser)
2302 # positional arguments are paths to test files to run, so
2303 # we make sure they're all bytestrings
2304 args = [_bytespath(a) for a in args]
2304 options = parseargs(args, parser)
2305 tests = [_bytespath(a) for a in options.tests]
2305 2306 if options.test_list is not None:
2306 2307 for listfile in options.test_list:
2307 2308 with open(listfile, 'rb') as f:
2308 args.extend(t for t in f.read().splitlines() if t)
2309 tests.extend(t for t in f.read().splitlines() if t)
2309 2310 self.options = options
2310 2311
2311 2312 self._checktools()
2312 testdescs = self.findtests(args)
2313 testdescs = self.findtests(tests)
2313 2314 if options.profile_runner:
2314 2315 import statprof
2315 2316 statprof.start()
2316 2317 result = self._run(testdescs)
2317 2318 if options.profile_runner:
2318 2319 statprof.stop()
2319 2320 statprof.display()
2320 2321 return result
2321 2322
2322 2323 finally:
2323 2324 os.umask(oldmask)
2324 2325
2325 2326 def _run(self, testdescs):
2326 2327 if self.options.random:
2327 2328 random.shuffle(testdescs)
2328 2329 else:
2329 2330 # keywords for slow tests
2330 2331 slow = {b'svn': 10,
2331 2332 b'cvs': 10,
2332 2333 b'hghave': 10,
2333 2334 b'largefiles-update': 10,
2334 2335 b'run-tests': 10,
2335 2336 b'corruption': 10,
2336 2337 b'race': 10,
2337 2338 b'i18n': 10,
2338 2339 b'check': 100,
2339 2340 b'gendoc': 100,
2340 2341 b'contrib-perf': 200,
2341 2342 }
2342 2343 perf = {}
2343 2344 def sortkey(f):
2344 2345 # run largest tests first, as they tend to take the longest
2345 2346 f = f['path']
2346 2347 try:
2347 2348 return perf[f]
2348 2349 except KeyError:
2349 2350 try:
2350 2351 val = -os.stat(f).st_size
2351 2352 except OSError as e:
2352 2353 if e.errno != errno.ENOENT:
2353 2354 raise
2354 2355 perf[f] = -1e9 # file does not exist, tell early
2355 2356 return -1e9
2356 2357 for kw, mul in slow.items():
2357 2358 if kw in f:
2358 2359 val *= mul
2359 2360 if f.endswith(b'.py'):
2360 2361 val /= 10.0
2361 2362 perf[f] = val / 1000.0
2362 2363 return perf[f]
2363 2364 testdescs.sort(key=sortkey)
2364 2365
2365 2366 self._testdir = osenvironb[b'TESTDIR'] = getattr(
2366 2367 os, 'getcwdb', os.getcwd)()
2367 2368 # assume all tests in same folder for now
2368 2369 if testdescs:
2369 2370 pathname = os.path.dirname(testdescs[0]['path'])
2370 2371 if pathname:
2371 2372 osenvironb[b'TESTDIR'] = os.path.join(osenvironb[b'TESTDIR'],
2372 2373 pathname)
2373 2374 if self.options.outputdir:
2374 2375 self._outputdir = canonpath(_bytespath(self.options.outputdir))
2375 2376 else:
2376 2377 self._outputdir = self._testdir
2377 2378 if testdescs and pathname:
2378 2379 self._outputdir = os.path.join(self._outputdir, pathname)
2379 2380
2380 2381 if 'PYTHONHASHSEED' not in os.environ:
2381 2382 # use a random python hash seed all the time
2382 2383 # we do the randomness ourself to know what seed is used
2383 2384 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
2384 2385
2385 2386 if self.options.tmpdir:
2386 2387 self.options.keep_tmpdir = True
2387 2388 tmpdir = _bytespath(self.options.tmpdir)
2388 2389 if os.path.exists(tmpdir):
2389 2390 # Meaning of tmpdir has changed since 1.3: we used to create
2390 2391 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
2391 2392 # tmpdir already exists.
2392 2393 print("error: temp dir %r already exists" % tmpdir)
2393 2394 return 1
2394 2395
2395 2396 # Automatically removing tmpdir sounds convenient, but could
2396 2397 # really annoy anyone in the habit of using "--tmpdir=/tmp"
2397 2398 # or "--tmpdir=$HOME".
2398 2399 #vlog("# Removing temp dir", tmpdir)
2399 2400 #shutil.rmtree(tmpdir)
2400 2401 os.makedirs(tmpdir)
2401 2402 else:
2402 2403 d = None
2403 2404 if os.name == 'nt':
2404 2405 # without this, we get the default temp dir location, but
2405 2406 # in all lowercase, which causes troubles with paths (issue3490)
2406 2407 d = osenvironb.get(b'TMP', None)
2407 2408 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
2408 2409
2409 2410 self._hgtmp = osenvironb[b'HGTMP'] = (
2410 2411 os.path.realpath(tmpdir))
2411 2412
2412 2413 if self.options.with_hg:
2413 2414 self._installdir = None
2414 2415 whg = self.options.with_hg
2415 2416 self._bindir = os.path.dirname(os.path.realpath(whg))
2416 2417 assert isinstance(self._bindir, bytes)
2417 2418 self._hgcommand = os.path.basename(whg)
2418 2419 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
2419 2420 os.makedirs(self._tmpbindir)
2420 2421
2421 2422 # This looks redundant with how Python initializes sys.path from
2422 2423 # the location of the script being executed. Needed because the
2423 2424 # "hg" specified by --with-hg is not the only Python script
2424 2425 # executed in the test suite that needs to import 'mercurial'
2425 2426 # ... which means it's not really redundant at all.
2426 2427 self._pythondir = self._bindir
2427 2428 else:
2428 2429 self._installdir = os.path.join(self._hgtmp, b"install")
2429 2430 self._bindir = os.path.join(self._installdir, b"bin")
2430 2431 self._hgcommand = b'hg'
2431 2432 self._tmpbindir = self._bindir
2432 2433 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
2433 2434
2434 2435 # set CHGHG, then replace "hg" command by "chg"
2435 2436 chgbindir = self._bindir
2436 2437 if self.options.chg or self.options.with_chg:
2437 2438 osenvironb[b'CHGHG'] = os.path.join(self._bindir, self._hgcommand)
2438 2439 else:
2439 2440 osenvironb.pop(b'CHGHG', None) # drop flag for hghave
2440 2441 if self.options.chg:
2441 2442 self._hgcommand = b'chg'
2442 2443 elif self.options.with_chg:
2443 2444 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
2444 2445 self._hgcommand = os.path.basename(self.options.with_chg)
2445 2446
2446 2447 osenvironb[b"BINDIR"] = self._bindir
2447 2448 osenvironb[b"PYTHON"] = PYTHON
2448 2449
2449 2450 if self.options.with_python3:
2450 2451 osenvironb[b'PYTHON3'] = self.options.with_python3
2451 2452
2452 2453 fileb = _bytespath(__file__)
2453 2454 runtestdir = os.path.abspath(os.path.dirname(fileb))
2454 2455 osenvironb[b'RUNTESTDIR'] = runtestdir
2455 2456 if PYTHON3:
2456 2457 sepb = _bytespath(os.pathsep)
2457 2458 else:
2458 2459 sepb = os.pathsep
2459 2460 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
2460 2461 if os.path.islink(__file__):
2461 2462 # test helper will likely be at the end of the symlink
2462 2463 realfile = os.path.realpath(fileb)
2463 2464 realdir = os.path.abspath(os.path.dirname(realfile))
2464 2465 path.insert(2, realdir)
2465 2466 if chgbindir != self._bindir:
2466 2467 path.insert(1, chgbindir)
2467 2468 if self._testdir != runtestdir:
2468 2469 path = [self._testdir] + path
2469 2470 if self._tmpbindir != self._bindir:
2470 2471 path = [self._tmpbindir] + path
2471 2472 osenvironb[b"PATH"] = sepb.join(path)
2472 2473
2473 2474 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
2474 2475 # can run .../tests/run-tests.py test-foo where test-foo
2475 2476 # adds an extension to HGRC. Also include run-test.py directory to
2476 2477 # import modules like heredoctest.
2477 2478 pypath = [self._pythondir, self._testdir, runtestdir]
2478 2479 # We have to augment PYTHONPATH, rather than simply replacing
2479 2480 # it, in case external libraries are only available via current
2480 2481 # PYTHONPATH. (In particular, the Subversion bindings on OS X
2481 2482 # are in /opt/subversion.)
2482 2483 oldpypath = osenvironb.get(IMPL_PATH)
2483 2484 if oldpypath:
2484 2485 pypath.append(oldpypath)
2485 2486 osenvironb[IMPL_PATH] = sepb.join(pypath)
2486 2487
2487 2488 if self.options.pure:
2488 2489 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
2489 2490 os.environ["HGMODULEPOLICY"] = "py"
2490 2491
2491 2492 if self.options.allow_slow_tests:
2492 2493 os.environ["HGTEST_SLOW"] = "slow"
2493 2494 elif 'HGTEST_SLOW' in os.environ:
2494 2495 del os.environ['HGTEST_SLOW']
2495 2496
2496 2497 self._coveragefile = os.path.join(self._testdir, b'.coverage')
2497 2498
2498 2499 vlog("# Using TESTDIR", self._testdir)
2499 2500 vlog("# Using RUNTESTDIR", osenvironb[b'RUNTESTDIR'])
2500 2501 vlog("# Using HGTMP", self._hgtmp)
2501 2502 vlog("# Using PATH", os.environ["PATH"])
2502 2503 vlog("# Using", IMPL_PATH, osenvironb[IMPL_PATH])
2503 2504 vlog("# Writing to directory", self._outputdir)
2504 2505
2505 2506 try:
2506 2507 return self._runtests(testdescs) or 0
2507 2508 finally:
2508 2509 time.sleep(.1)
2509 2510 self._cleanup()
2510 2511
2511 2512 def findtests(self, args):
2512 2513 """Finds possible test files from arguments.
2513 2514
2514 2515 If you wish to inject custom tests into the test harness, this would
2515 2516 be a good function to monkeypatch or override in a derived class.
2516 2517 """
2517 2518 if not args:
2518 2519 if self.options.changed:
2519 2520 proc = Popen4('hg st --rev "%s" -man0 .' %
2520 2521 self.options.changed, None, 0)
2521 2522 stdout, stderr = proc.communicate()
2522 2523 args = stdout.strip(b'\0').split(b'\0')
2523 2524 else:
2524 2525 args = os.listdir(b'.')
2525 2526
2526 2527 expanded_args = []
2527 2528 for arg in args:
2528 2529 if os.path.isdir(arg):
2529 2530 if not arg.endswith(b'/'):
2530 2531 arg += b'/'
2531 2532 expanded_args.extend([arg + a for a in os.listdir(arg)])
2532 2533 else:
2533 2534 expanded_args.append(arg)
2534 2535 args = expanded_args
2535 2536
2536 2537 tests = []
2537 2538 for t in args:
2538 2539 if not (os.path.basename(t).startswith(b'test-')
2539 2540 and (t.endswith(b'.py') or t.endswith(b'.t'))):
2540 2541 continue
2541 2542 if t.endswith(b'.t'):
2542 2543 # .t file may contain multiple test cases
2543 2544 cases = sorted(parsettestcases(t))
2544 2545 if cases:
2545 2546 tests += [{'path': t, 'case': c} for c in sorted(cases)]
2546 2547 else:
2547 2548 tests.append({'path': t})
2548 2549 else:
2549 2550 tests.append({'path': t})
2550 2551 return tests
2551 2552
2552 2553 def _runtests(self, testdescs):
2553 2554 def _reloadtest(test, i):
2554 2555 # convert a test back to its description dict
2555 2556 desc = {'path': test.path}
2556 2557 case = getattr(test, '_case', None)
2557 2558 if case:
2558 2559 desc['case'] = case
2559 2560 return self._gettest(desc, i)
2560 2561
2561 2562 try:
2562 2563 if self.options.restart:
2563 2564 orig = list(testdescs)
2564 2565 while testdescs:
2565 2566 desc = testdescs[0]
2566 2567 # desc['path'] is a relative path
2567 2568 if 'case' in desc:
2568 2569 errpath = b'%s.%s.err' % (desc['path'], desc['case'])
2569 2570 else:
2570 2571 errpath = b'%s.err' % desc['path']
2571 2572 errpath = os.path.join(self._outputdir, errpath)
2572 2573 if os.path.exists(errpath):
2573 2574 break
2574 2575 testdescs.pop(0)
2575 2576 if not testdescs:
2576 2577 print("running all tests")
2577 2578 testdescs = orig
2578 2579
2579 2580 tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
2580 2581
2581 2582 failed = False
2582 2583 kws = self.options.keywords
2583 2584 if kws is not None and PYTHON3:
2584 2585 kws = kws.encode('utf-8')
2585 2586
2586 2587 suite = TestSuite(self._testdir,
2587 2588 jobs=self.options.jobs,
2588 2589 whitelist=self.options.whitelisted,
2589 2590 blacklist=self.options.blacklist,
2590 2591 retest=self.options.retest,
2591 2592 keywords=kws,
2592 2593 loop=self.options.loop,
2593 2594 runs_per_test=self.options.runs_per_test,
2594 2595 showchannels=self.options.showchannels,
2595 2596 tests=tests, loadtest=_reloadtest)
2596 2597 verbosity = 1
2597 2598 if self.options.verbose:
2598 2599 verbosity = 2
2599 2600 runner = TextTestRunner(self, verbosity=verbosity)
2600 2601
2601 2602 if self.options.list_tests:
2602 2603 result = runner.listtests(suite)
2603 2604 else:
2604 2605 if self._installdir:
2605 2606 self._installhg()
2606 2607 self._checkhglib("Testing")
2607 2608 else:
2608 2609 self._usecorrectpython()
2609 2610 if self.options.chg:
2610 2611 assert self._installdir
2611 2612 self._installchg()
2612 2613
2613 2614 result = runner.run(suite)
2614 2615
2615 2616 if result.failures:
2616 2617 failed = True
2617 2618
2618 2619 if self.options.anycoverage:
2619 2620 self._outputcoverage()
2620 2621 except KeyboardInterrupt:
2621 2622 failed = True
2622 2623 print("\ninterrupted!")
2623 2624
2624 2625 if failed:
2625 2626 return 1
2626 2627
2627 2628 def _getport(self, count):
2628 2629 port = self._ports.get(count) # do we have a cached entry?
2629 2630 if port is None:
2630 2631 portneeded = 3
2631 2632 # above 100 tries we just give up and let test reports failure
2632 2633 for tries in xrange(100):
2633 2634 allfree = True
2634 2635 port = self.options.port + self._portoffset
2635 2636 for idx in xrange(portneeded):
2636 2637 if not checkportisavailable(port + idx):
2637 2638 allfree = False
2638 2639 break
2639 2640 self._portoffset += portneeded
2640 2641 if allfree:
2641 2642 break
2642 2643 self._ports[count] = port
2643 2644 return port
2644 2645
2645 2646 def _gettest(self, testdesc, count):
2646 2647 """Obtain a Test by looking at its filename.
2647 2648
2648 2649 Returns a Test instance. The Test may not be runnable if it doesn't
2649 2650 map to a known type.
2650 2651 """
2651 2652 path = testdesc['path']
2652 2653 lctest = path.lower()
2653 2654 testcls = Test
2654 2655
2655 2656 for ext, cls in self.TESTTYPES:
2656 2657 if lctest.endswith(ext):
2657 2658 testcls = cls
2658 2659 break
2659 2660
2660 2661 refpath = os.path.join(self._testdir, path)
2661 2662 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
2662 2663
2663 2664 # extra keyword parameters. 'case' is used by .t tests
2664 2665 kwds = dict((k, testdesc[k]) for k in ['case'] if k in testdesc)
2665 2666
2666 2667 t = testcls(refpath, self._outputdir, tmpdir,
2667 2668 keeptmpdir=self.options.keep_tmpdir,
2668 2669 debug=self.options.debug,
2669 2670 timeout=self.options.timeout,
2670 2671 startport=self._getport(count),
2671 2672 extraconfigopts=self.options.extra_config_opt,
2672 2673 py3kwarnings=self.options.py3k_warnings,
2673 2674 shell=self.options.shell,
2674 2675 hgcommand=self._hgcommand,
2675 2676 usechg=bool(self.options.with_chg or self.options.chg),
2676 2677 useipv6=useipv6, **kwds)
2677 2678 t.should_reload = True
2678 2679 return t
2679 2680
2680 2681 def _cleanup(self):
2681 2682 """Clean up state from this test invocation."""
2682 2683 if self.options.keep_tmpdir:
2683 2684 return
2684 2685
2685 2686 vlog("# Cleaning up HGTMP", self._hgtmp)
2686 2687 shutil.rmtree(self._hgtmp, True)
2687 2688 for f in self._createdfiles:
2688 2689 try:
2689 2690 os.remove(f)
2690 2691 except OSError:
2691 2692 pass
2692 2693
2693 2694 def _usecorrectpython(self):
2694 2695 """Configure the environment to use the appropriate Python in tests."""
2695 2696 # Tests must use the same interpreter as us or bad things will happen.
2696 2697 pyexename = sys.platform == 'win32' and b'python.exe' or b'python'
2697 2698 if getattr(os, 'symlink', None):
2698 2699 vlog("# Making python executable in test path a symlink to '%s'" %
2699 2700 sys.executable)
2700 2701 mypython = os.path.join(self._tmpbindir, pyexename)
2701 2702 try:
2702 2703 if os.readlink(mypython) == sys.executable:
2703 2704 return
2704 2705 os.unlink(mypython)
2705 2706 except OSError as err:
2706 2707 if err.errno != errno.ENOENT:
2707 2708 raise
2708 2709 if self._findprogram(pyexename) != sys.executable:
2709 2710 try:
2710 2711 os.symlink(sys.executable, mypython)
2711 2712 self._createdfiles.append(mypython)
2712 2713 except OSError as err:
2713 2714 # child processes may race, which is harmless
2714 2715 if err.errno != errno.EEXIST:
2715 2716 raise
2716 2717 else:
2717 2718 exedir, exename = os.path.split(sys.executable)
2718 2719 vlog("# Modifying search path to find %s as %s in '%s'" %
2719 2720 (exename, pyexename, exedir))
2720 2721 path = os.environ['PATH'].split(os.pathsep)
2721 2722 while exedir in path:
2722 2723 path.remove(exedir)
2723 2724 os.environ['PATH'] = os.pathsep.join([exedir] + path)
2724 2725 if not self._findprogram(pyexename):
2725 2726 print("WARNING: Cannot find %s in search path" % pyexename)
2726 2727
2727 2728 def _installhg(self):
2728 2729 """Install hg into the test environment.
2729 2730
2730 2731 This will also configure hg with the appropriate testing settings.
2731 2732 """
2732 2733 vlog("# Performing temporary installation of HG")
2733 2734 installerrs = os.path.join(self._hgtmp, b"install.err")
2734 2735 compiler = ''
2735 2736 if self.options.compiler:
2736 2737 compiler = '--compiler ' + self.options.compiler
2737 2738 if self.options.pure:
2738 2739 pure = b"--pure"
2739 2740 else:
2740 2741 pure = b""
2741 2742
2742 2743 # Run installer in hg root
2743 2744 script = os.path.realpath(sys.argv[0])
2744 2745 exe = sys.executable
2745 2746 if PYTHON3:
2746 2747 compiler = _bytespath(compiler)
2747 2748 script = _bytespath(script)
2748 2749 exe = _bytespath(exe)
2749 2750 hgroot = os.path.dirname(os.path.dirname(script))
2750 2751 self._hgroot = hgroot
2751 2752 os.chdir(hgroot)
2752 2753 nohome = b'--home=""'
2753 2754 if os.name == 'nt':
2754 2755 # The --home="" trick works only on OS where os.sep == '/'
2755 2756 # because of a distutils convert_path() fast-path. Avoid it at
2756 2757 # least on Windows for now, deal with .pydistutils.cfg bugs
2757 2758 # when they happen.
2758 2759 nohome = b''
2759 2760 cmd = (b'%(exe)s setup.py %(pure)s clean --all'
2760 2761 b' build %(compiler)s --build-base="%(base)s"'
2761 2762 b' install --force --prefix="%(prefix)s"'
2762 2763 b' --install-lib="%(libdir)s"'
2763 2764 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
2764 2765 % {b'exe': exe, b'pure': pure,
2765 2766 b'compiler': compiler,
2766 2767 b'base': os.path.join(self._hgtmp, b"build"),
2767 2768 b'prefix': self._installdir, b'libdir': self._pythondir,
2768 2769 b'bindir': self._bindir,
2769 2770 b'nohome': nohome, b'logfile': installerrs})
2770 2771
2771 2772 # setuptools requires install directories to exist.
2772 2773 def makedirs(p):
2773 2774 try:
2774 2775 os.makedirs(p)
2775 2776 except OSError as e:
2776 2777 if e.errno != errno.EEXIST:
2777 2778 raise
2778 2779 makedirs(self._pythondir)
2779 2780 makedirs(self._bindir)
2780 2781
2781 2782 vlog("# Running", cmd)
2782 2783 if os.system(cmd) == 0:
2783 2784 if not self.options.verbose:
2784 2785 try:
2785 2786 os.remove(installerrs)
2786 2787 except OSError as e:
2787 2788 if e.errno != errno.ENOENT:
2788 2789 raise
2789 2790 else:
2790 2791 f = open(installerrs, 'rb')
2791 2792 for line in f:
2792 2793 if PYTHON3:
2793 2794 sys.stdout.buffer.write(line)
2794 2795 else:
2795 2796 sys.stdout.write(line)
2796 2797 f.close()
2797 2798 sys.exit(1)
2798 2799 os.chdir(self._testdir)
2799 2800
2800 2801 self._usecorrectpython()
2801 2802
2802 2803 if self.options.py3k_warnings and not self.options.anycoverage:
2803 2804 vlog("# Updating hg command to enable Py3k Warnings switch")
2804 2805 f = open(os.path.join(self._bindir, 'hg'), 'rb')
2805 2806 lines = [line.rstrip() for line in f]
2806 2807 lines[0] += ' -3'
2807 2808 f.close()
2808 2809 f = open(os.path.join(self._bindir, 'hg'), 'wb')
2809 2810 for line in lines:
2810 2811 f.write(line + '\n')
2811 2812 f.close()
2812 2813
2813 2814 hgbat = os.path.join(self._bindir, b'hg.bat')
2814 2815 if os.path.isfile(hgbat):
2815 2816 # hg.bat expects to be put in bin/scripts while run-tests.py
2816 2817 # installation layout put it in bin/ directly. Fix it
2817 2818 f = open(hgbat, 'rb')
2818 2819 data = f.read()
2819 2820 f.close()
2820 2821 if b'"%~dp0..\python" "%~dp0hg" %*' in data:
2821 2822 data = data.replace(b'"%~dp0..\python" "%~dp0hg" %*',
2822 2823 b'"%~dp0python" "%~dp0hg" %*')
2823 2824 f = open(hgbat, 'wb')
2824 2825 f.write(data)
2825 2826 f.close()
2826 2827 else:
2827 2828 print('WARNING: cannot fix hg.bat reference to python.exe')
2828 2829
2829 2830 if self.options.anycoverage:
2830 2831 custom = os.path.join(self._testdir, 'sitecustomize.py')
2831 2832 target = os.path.join(self._pythondir, 'sitecustomize.py')
2832 2833 vlog('# Installing coverage trigger to %s' % target)
2833 2834 shutil.copyfile(custom, target)
2834 2835 rc = os.path.join(self._testdir, '.coveragerc')
2835 2836 vlog('# Installing coverage rc to %s' % rc)
2836 2837 os.environ['COVERAGE_PROCESS_START'] = rc
2837 2838 covdir = os.path.join(self._installdir, '..', 'coverage')
2838 2839 try:
2839 2840 os.mkdir(covdir)
2840 2841 except OSError as e:
2841 2842 if e.errno != errno.EEXIST:
2842 2843 raise
2843 2844
2844 2845 os.environ['COVERAGE_DIR'] = covdir
2845 2846
2846 2847 def _checkhglib(self, verb):
2847 2848 """Ensure that the 'mercurial' package imported by python is
2848 2849 the one we expect it to be. If not, print a warning to stderr."""
2849 2850 if ((self._bindir == self._pythondir) and
2850 2851 (self._bindir != self._tmpbindir)):
2851 2852 # The pythondir has been inferred from --with-hg flag.
2852 2853 # We cannot expect anything sensible here.
2853 2854 return
2854 2855 expecthg = os.path.join(self._pythondir, b'mercurial')
2855 2856 actualhg = self._gethgpath()
2856 2857 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
2857 2858 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
2858 2859 ' (expected %s)\n'
2859 2860 % (verb, actualhg, expecthg))
2860 2861 def _gethgpath(self):
2861 2862 """Return the path to the mercurial package that is actually found by
2862 2863 the current Python interpreter."""
2863 2864 if self._hgpath is not None:
2864 2865 return self._hgpath
2865 2866
2866 2867 cmd = b'%s -c "import mercurial; print (mercurial.__path__[0])"'
2867 2868 cmd = cmd % PYTHON
2868 2869 if PYTHON3:
2869 2870 cmd = _strpath(cmd)
2870 2871 pipe = os.popen(cmd)
2871 2872 try:
2872 2873 self._hgpath = _bytespath(pipe.read().strip())
2873 2874 finally:
2874 2875 pipe.close()
2875 2876
2876 2877 return self._hgpath
2877 2878
2878 2879 def _installchg(self):
2879 2880 """Install chg into the test environment"""
2880 2881 vlog('# Performing temporary installation of CHG')
2881 2882 assert os.path.dirname(self._bindir) == self._installdir
2882 2883 assert self._hgroot, 'must be called after _installhg()'
2883 2884 cmd = (b'"%(make)s" clean install PREFIX="%(prefix)s"'
2884 2885 % {b'make': 'make', # TODO: switch by option or environment?
2885 2886 b'prefix': self._installdir})
2886 2887 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
2887 2888 vlog("# Running", cmd)
2888 2889 proc = subprocess.Popen(cmd, shell=True, cwd=cwd,
2889 2890 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
2890 2891 stderr=subprocess.STDOUT)
2891 2892 out, _err = proc.communicate()
2892 2893 if proc.returncode != 0:
2893 2894 if PYTHON3:
2894 2895 sys.stdout.buffer.write(out)
2895 2896 else:
2896 2897 sys.stdout.write(out)
2897 2898 sys.exit(1)
2898 2899
2899 2900 def _outputcoverage(self):
2900 2901 """Produce code coverage output."""
2901 2902 import coverage
2902 2903 coverage = coverage.coverage
2903 2904
2904 2905 vlog('# Producing coverage report')
2905 2906 # chdir is the easiest way to get short, relative paths in the
2906 2907 # output.
2907 2908 os.chdir(self._hgroot)
2908 2909 covdir = os.path.join(self._installdir, '..', 'coverage')
2909 2910 cov = coverage(data_file=os.path.join(covdir, 'cov'))
2910 2911
2911 2912 # Map install directory paths back to source directory.
2912 2913 cov.config.paths['srcdir'] = ['.', self._pythondir]
2913 2914
2914 2915 cov.combine()
2915 2916
2916 2917 omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]]
2917 2918 cov.report(ignore_errors=True, omit=omit)
2918 2919
2919 2920 if self.options.htmlcov:
2920 2921 htmldir = os.path.join(self._outputdir, 'htmlcov')
2921 2922 cov.html_report(directory=htmldir, omit=omit)
2922 2923 if self.options.annotate:
2923 2924 adir = os.path.join(self._outputdir, 'annotated')
2924 2925 if not os.path.isdir(adir):
2925 2926 os.mkdir(adir)
2926 2927 cov.annotate(directory=adir, omit=omit)
2927 2928
2928 2929 def _findprogram(self, program):
2929 2930 """Search PATH for a executable program"""
2930 2931 dpb = _bytespath(os.defpath)
2931 2932 sepb = _bytespath(os.pathsep)
2932 2933 for p in osenvironb.get(b'PATH', dpb).split(sepb):
2933 2934 name = os.path.join(p, program)
2934 2935 if os.name == 'nt' or os.access(name, os.X_OK):
2935 2936 return name
2936 2937 return None
2937 2938
2938 2939 def _checktools(self):
2939 2940 """Ensure tools required to run tests are present."""
2940 2941 for p in self.REQUIREDTOOLS:
2941 2942 if os.name == 'nt' and not p.endswith('.exe'):
2942 2943 p += '.exe'
2943 2944 found = self._findprogram(p)
2944 2945 if found:
2945 2946 vlog("# Found prerequisite", p, "at", found)
2946 2947 else:
2947 2948 print("WARNING: Did not find prerequisite tool: %s " %
2948 2949 p.decode("utf-8"))
2949 2950
2950 2951 if __name__ == '__main__':
2951 2952 runner = TestRunner()
2952 2953
2953 2954 try:
2954 2955 import msvcrt
2955 2956 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
2956 2957 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
2957 2958 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
2958 2959 except ImportError:
2959 2960 pass
2960 2961
2961 2962 sys.exit(runner.run(sys.argv[1:]))
@@ -1,1570 +1,1568 b''
1 1 This file tests the behavior of run-tests.py itself.
2 2
3 3 Avoid interference from actual test env:
4 4
5 5 $ . "$TESTDIR/helper-runtests.sh"
6 6
7 7 Smoke test with install
8 8 ============
9 9
10 10 $ run-tests.py $HGTEST_RUN_TESTS_PURE -l
11 11
12 12 # Ran 0 tests, 0 skipped, 0 failed.
13 13
14 14 Define a helper to avoid the install step
15 15 =============
16 16 $ rt()
17 17 > {
18 18 > run-tests.py --with-hg=`which hg` "$@"
19 19 > }
20 20
21 21 error paths
22 22
23 23 #if symlink
24 24 $ ln -s `which true` hg
25 25 $ run-tests.py --with-hg=./hg
26 26 warning: --with-hg should specify an hg script
27 27
28 28 # Ran 0 tests, 0 skipped, 0 failed.
29 29 $ rm hg
30 30 #endif
31 31
32 32 #if execbit
33 33 $ touch hg
34 34 $ run-tests.py --with-hg=./hg
35 Usage: run-tests.py [options] [tests]
36
35 usage: run-tests.py [options] [tests]
37 36 run-tests.py: error: --with-hg must specify an executable hg script
38 37 [2]
39 38 $ rm hg
40 39 #endif
41 40
42 41 Features for testing optional lines
43 42 ===================================
44 43
45 44 $ cat > hghaveaddon.py <<EOF
46 45 > import hghave
47 46 > @hghave.check("custom", "custom hghave feature")
48 47 > def has_custom():
49 48 > return True
50 49 > @hghave.check("missing", "missing hghave feature")
51 50 > def has_missing():
52 51 > return False
53 52 > EOF
54 53
55 54 an empty test
56 55 =======================
57 56
58 57 $ touch test-empty.t
59 58 $ rt
60 59 .
61 60 # Ran 1 tests, 0 skipped, 0 failed.
62 61 $ rm test-empty.t
63 62
64 63 a succesful test
65 64 =======================
66 65
67 66 $ cat > test-success.t << EOF
68 67 > $ echo babar
69 68 > babar
70 69 > $ echo xyzzy
71 70 > dont_print (?)
72 71 > nothing[42]line (re) (?)
73 72 > never*happens (glob) (?)
74 73 > more_nothing (?)
75 74 > xyzzy
76 75 > nor this (?)
77 76 > $ printf 'abc\ndef\nxyz\n'
78 77 > 123 (?)
79 78 > abc
80 79 > def (?)
81 80 > 456 (?)
82 81 > xyz
83 82 > $ printf 'zyx\nwvu\ntsr\n'
84 83 > abc (?)
85 84 > zyx (custom !)
86 85 > wvu
87 86 > no_print (no-custom !)
88 87 > tsr (no-missing !)
89 88 > missing (missing !)
90 89 > EOF
91 90
92 91 $ rt
93 92 .
94 93 # Ran 1 tests, 0 skipped, 0 failed.
95 94
96 95 failing test
97 96 ==================
98 97
99 98 test churn with globs
100 99 $ cat > test-failure.t <<EOF
101 100 > $ echo "bar-baz"; echo "bar-bad"; echo foo
102 101 > bar*bad (glob)
103 102 > bar*baz (glob)
104 103 > | fo (re)
105 104 > EOF
106 105 $ rt test-failure.t
107 106
108 107 --- $TESTTMP/test-failure.t
109 108 +++ $TESTTMP/test-failure.t.err
110 109 @@ -1,4 +1,4 @@
111 110 $ echo "bar-baz"; echo "bar-bad"; echo foo
112 111 + bar*baz (glob)
113 112 bar*bad (glob)
114 113 - bar*baz (glob)
115 114 - | fo (re)
116 115 + foo
117 116
118 117 ERROR: test-failure.t output changed
119 118 !
120 119 Failed test-failure.t: output changed
121 120 # Ran 1 tests, 0 skipped, 1 failed.
122 121 python hash seed: * (glob)
123 122 [1]
124 123
125 124 test diff colorisation
126 125
127 126 #if no-windows pygments
128 127 $ rt test-failure.t --color always
129 128
130 129 \x1b[38;5;124m--- $TESTTMP/test-failure.t\x1b[39m (esc)
131 130 \x1b[38;5;34m+++ $TESTTMP/test-failure.t.err\x1b[39m (esc)
132 131 \x1b[38;5;90;01m@@ -1,4 +1,4 @@\x1b[39;00m (esc)
133 132 $ echo "bar-baz"; echo "bar-bad"; echo foo
134 133 \x1b[38;5;34m+ bar*baz (glob)\x1b[39m (esc)
135 134 bar*bad (glob)
136 135 \x1b[38;5;124m- bar*baz (glob)\x1b[39m (esc)
137 136 \x1b[38;5;124m- | fo (re)\x1b[39m (esc)
138 137 \x1b[38;5;34m+ foo\x1b[39m (esc)
139 138
140 139 \x1b[38;5;88mERROR: \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m output changed\x1b[39m (esc)
141 140 !
142 141 \x1b[38;5;88mFailed \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m: output changed\x1b[39m (esc)
143 142 # Ran 1 tests, 0 skipped, 1 failed.
144 143 python hash seed: * (glob)
145 144 [1]
146 145
147 146 $ rt test-failure.t 2> tmp.log
148 147 [1]
149 148 $ cat tmp.log
150 149
151 150 --- $TESTTMP/test-failure.t
152 151 +++ $TESTTMP/test-failure.t.err
153 152 @@ -1,4 +1,4 @@
154 153 $ echo "bar-baz"; echo "bar-bad"; echo foo
155 154 + bar*baz (glob)
156 155 bar*bad (glob)
157 156 - bar*baz (glob)
158 157 - | fo (re)
159 158 + foo
160 159
161 160 ERROR: test-failure.t output changed
162 161 !
163 162 Failed test-failure.t: output changed
164 163 # Ran 1 tests, 0 skipped, 1 failed.
165 164 python hash seed: * (glob)
166 165 #endif
167 166
168 167 $ cat > test-failure.t << EOF
169 168 > $ true
170 169 > should go away (true !)
171 170 > $ true
172 171 > should stay (false !)
173 172 >
174 173 > Should remove first line, not second or third
175 174 > $ echo 'testing'
176 175 > baz*foo (glob) (true !)
177 176 > foobar*foo (glob) (false !)
178 177 > te*ting (glob) (true !)
179 178 >
180 179 > Should keep first two lines, remove third and last
181 180 > $ echo 'testing'
182 181 > test.ng (re) (true !)
183 182 > foo.ar (re) (false !)
184 183 > b.r (re) (true !)
185 184 > missing (?)
186 185 > awol (true !)
187 186 >
188 187 > The "missing" line should stay, even though awol is dropped
189 188 > $ echo 'testing'
190 189 > test.ng (re) (true !)
191 190 > foo.ar (?)
192 191 > awol
193 192 > missing (?)
194 193 > EOF
195 194 $ rt test-failure.t
196 195
197 196 --- $TESTTMP/test-failure.t
198 197 +++ $TESTTMP/test-failure.t.err
199 198 @@ -1,11 +1,9 @@
200 199 $ true
201 200 - should go away (true !)
202 201 $ true
203 202 should stay (false !)
204 203
205 204 Should remove first line, not second or third
206 205 $ echo 'testing'
207 206 - baz*foo (glob) (true !)
208 207 foobar*foo (glob) (false !)
209 208 te*ting (glob) (true !)
210 209
211 210 foo.ar (re) (false !)
212 211 missing (?)
213 212 @@ -13,13 +11,10 @@
214 213 $ echo 'testing'
215 214 test.ng (re) (true !)
216 215 foo.ar (re) (false !)
217 216 - b.r (re) (true !)
218 217 missing (?)
219 218 - awol (true !)
220 219
221 220 The "missing" line should stay, even though awol is dropped
222 221 $ echo 'testing'
223 222 test.ng (re) (true !)
224 223 foo.ar (?)
225 224 - awol
226 225 missing (?)
227 226
228 227 ERROR: test-failure.t output changed
229 228 !
230 229 Failed test-failure.t: output changed
231 230 # Ran 1 tests, 0 skipped, 1 failed.
232 231 python hash seed: * (glob)
233 232 [1]
234 233
235 234 basic failing test
236 235 $ cat > test-failure.t << EOF
237 236 > $ echo babar
238 237 > rataxes
239 238 > This is a noop statement so that
240 239 > this test is still more bytes than success.
241 240 > pad pad pad pad............................................................
242 241 > pad pad pad pad............................................................
243 242 > pad pad pad pad............................................................
244 243 > pad pad pad pad............................................................
245 244 > pad pad pad pad............................................................
246 245 > pad pad pad pad............................................................
247 246 > EOF
248 247
249 248 >>> fh = open('test-failure-unicode.t', 'wb')
250 249 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8')) and None
251 250 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8')) and None
252 251
253 252 $ rt
254 253
255 254 --- $TESTTMP/test-failure.t
256 255 +++ $TESTTMP/test-failure.t.err
257 256 @@ -1,5 +1,5 @@
258 257 $ echo babar
259 258 - rataxes
260 259 + babar
261 260 This is a noop statement so that
262 261 this test is still more bytes than success.
263 262 pad pad pad pad............................................................
264 263
265 264 ERROR: test-failure.t output changed
266 265 !.
267 266 --- $TESTTMP/test-failure-unicode.t
268 267 +++ $TESTTMP/test-failure-unicode.t.err
269 268 @@ -1,2 +1,2 @@
270 269 $ echo babar\xce\xb1 (esc)
271 270 - l\xce\xb5\xce\xb5t (esc)
272 271 + babar\xce\xb1 (esc)
273 272
274 273 ERROR: test-failure-unicode.t output changed
275 274 !
276 275 Failed test-failure.t: output changed
277 276 Failed test-failure-unicode.t: output changed
278 277 # Ran 3 tests, 0 skipped, 2 failed.
279 278 python hash seed: * (glob)
280 279 [1]
281 280
282 281 test --outputdir
283 282 $ mkdir output
284 283 $ rt --outputdir output
285 284
286 285 --- $TESTTMP/test-failure.t
287 286 +++ $TESTTMP/output/test-failure.t.err
288 287 @@ -1,5 +1,5 @@
289 288 $ echo babar
290 289 - rataxes
291 290 + babar
292 291 This is a noop statement so that
293 292 this test is still more bytes than success.
294 293 pad pad pad pad............................................................
295 294
296 295 ERROR: test-failure.t output changed
297 296 !.
298 297 --- $TESTTMP/test-failure-unicode.t
299 298 +++ $TESTTMP/output/test-failure-unicode.t.err
300 299 @@ -1,2 +1,2 @@
301 300 $ echo babar\xce\xb1 (esc)
302 301 - l\xce\xb5\xce\xb5t (esc)
303 302 + babar\xce\xb1 (esc)
304 303
305 304 ERROR: test-failure-unicode.t output changed
306 305 !
307 306 Failed test-failure.t: output changed
308 307 Failed test-failure-unicode.t: output changed
309 308 # Ran 3 tests, 0 skipped, 2 failed.
310 309 python hash seed: * (glob)
311 310 [1]
312 311 $ ls -a output
313 312 .
314 313 ..
315 314 .testtimes
316 315 test-failure-unicode.t.err
317 316 test-failure.t.err
318 317
319 318 test --xunit support
320 319 $ rt --xunit=xunit.xml
321 320
322 321 --- $TESTTMP/test-failure.t
323 322 +++ $TESTTMP/test-failure.t.err
324 323 @@ -1,5 +1,5 @@
325 324 $ echo babar
326 325 - rataxes
327 326 + babar
328 327 This is a noop statement so that
329 328 this test is still more bytes than success.
330 329 pad pad pad pad............................................................
331 330
332 331 ERROR: test-failure.t output changed
333 332 !.
334 333 --- $TESTTMP/test-failure-unicode.t
335 334 +++ $TESTTMP/test-failure-unicode.t.err
336 335 @@ -1,2 +1,2 @@
337 336 $ echo babar\xce\xb1 (esc)
338 337 - l\xce\xb5\xce\xb5t (esc)
339 338 + babar\xce\xb1 (esc)
340 339
341 340 ERROR: test-failure-unicode.t output changed
342 341 !
343 342 Failed test-failure.t: output changed
344 343 Failed test-failure-unicode.t: output changed
345 344 # Ran 3 tests, 0 skipped, 2 failed.
346 345 python hash seed: * (glob)
347 346 [1]
348 347 $ cat xunit.xml
349 348 <?xml version="1.0" encoding="utf-8"?>
350 349 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
351 350 <testcase name="test-success.t" time="*"/> (glob)
352 351 <testcase name="test-failure-unicode.t" time="*"> (glob)
353 352 <failure message="output changed" type="output-mismatch">
354 353 <![CDATA[--- $TESTTMP/test-failure-unicode.t
355 354 +++ $TESTTMP/test-failure-unicode.t.err
356 355 @@ -1,2 +1,2 @@
357 356 $ echo babar\xce\xb1 (esc)
358 357 - l\xce\xb5\xce\xb5t (esc)
359 358 + babar\xce\xb1 (esc)
360 359 ]]> </failure>
361 360 </testcase>
362 361 <testcase name="test-failure.t" time="*"> (glob)
363 362 <failure message="output changed" type="output-mismatch">
364 363 <![CDATA[--- $TESTTMP/test-failure.t
365 364 +++ $TESTTMP/test-failure.t.err
366 365 @@ -1,5 +1,5 @@
367 366 $ echo babar
368 367 - rataxes
369 368 + babar
370 369 This is a noop statement so that
371 370 this test is still more bytes than success.
372 371 pad pad pad pad............................................................
373 372 ]]> </failure>
374 373 </testcase>
375 374 </testsuite>
376 375
377 376 $ cat .testtimes
378 377 test-failure-unicode.t * (glob)
379 378 test-failure.t * (glob)
380 379 test-success.t * (glob)
381 380
382 381 $ rt --list-tests
383 382 test-failure-unicode.t
384 383 test-failure.t
385 384 test-success.t
386 385
387 386 $ rt --list-tests --json
388 387 test-failure-unicode.t
389 388 test-failure.t
390 389 test-success.t
391 390 $ cat report.json
392 391 testreport ={
393 392 "test-failure-unicode.t": {
394 393 "result": "success"
395 394 },
396 395 "test-failure.t": {
397 396 "result": "success"
398 397 },
399 398 "test-success.t": {
400 399 "result": "success"
401 400 }
402 401 } (no-eol)
403 402
404 403 $ rt --list-tests --xunit=xunit.xml
405 404 test-failure-unicode.t
406 405 test-failure.t
407 406 test-success.t
408 407 $ cat xunit.xml
409 408 <?xml version="1.0" encoding="utf-8"?>
410 409 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
411 410 <testcase name="test-failure-unicode.t"/>
412 411 <testcase name="test-failure.t"/>
413 412 <testcase name="test-success.t"/>
414 413 </testsuite>
415 414
416 415 $ rt --list-tests test-failure* --json --xunit=xunit.xml --outputdir output
417 416 test-failure-unicode.t
418 417 test-failure.t
419 418 $ cat output/report.json
420 419 testreport ={
421 420 "test-failure-unicode.t": {
422 421 "result": "success"
423 422 },
424 423 "test-failure.t": {
425 424 "result": "success"
426 425 }
427 426 } (no-eol)
428 427 $ cat xunit.xml
429 428 <?xml version="1.0" encoding="utf-8"?>
430 429 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
431 430 <testcase name="test-failure-unicode.t"/>
432 431 <testcase name="test-failure.t"/>
433 432 </testsuite>
434 433
435 434 $ rm test-failure-unicode.t
436 435
437 436 test for --retest
438 437 ====================
439 438
440 439 $ rt --retest
441 440
442 441 --- $TESTTMP/test-failure.t
443 442 +++ $TESTTMP/test-failure.t.err
444 443 @@ -1,5 +1,5 @@
445 444 $ echo babar
446 445 - rataxes
447 446 + babar
448 447 This is a noop statement so that
449 448 this test is still more bytes than success.
450 449 pad pad pad pad............................................................
451 450
452 451 ERROR: test-failure.t output changed
453 452 !
454 453 Failed test-failure.t: output changed
455 454 # Ran 2 tests, 1 skipped, 1 failed.
456 455 python hash seed: * (glob)
457 456 [1]
458 457
459 458 --retest works with --outputdir
460 459 $ rm -r output
461 460 $ mkdir output
462 461 $ mv test-failure.t.err output
463 462 $ rt --retest --outputdir output
464 463
465 464 --- $TESTTMP/test-failure.t
466 465 +++ $TESTTMP/output/test-failure.t.err
467 466 @@ -1,5 +1,5 @@
468 467 $ echo babar
469 468 - rataxes
470 469 + babar
471 470 This is a noop statement so that
472 471 this test is still more bytes than success.
473 472 pad pad pad pad............................................................
474 473
475 474 ERROR: test-failure.t output changed
476 475 !
477 476 Failed test-failure.t: output changed
478 477 # Ran 2 tests, 1 skipped, 1 failed.
479 478 python hash seed: * (glob)
480 479 [1]
481 480
482 481 Selecting Tests To Run
483 482 ======================
484 483
485 484 successful
486 485
487 486 $ rt test-success.t
488 487 .
489 488 # Ran 1 tests, 0 skipped, 0 failed.
490 489
491 490 success w/ keyword
492 491 $ rt -k xyzzy
493 492 .
494 493 # Ran 2 tests, 1 skipped, 0 failed.
495 494
496 495 failed
497 496
498 497 $ rt test-failure.t
499 498
500 499 --- $TESTTMP/test-failure.t
501 500 +++ $TESTTMP/test-failure.t.err
502 501 @@ -1,5 +1,5 @@
503 502 $ echo babar
504 503 - rataxes
505 504 + babar
506 505 This is a noop statement so that
507 506 this test is still more bytes than success.
508 507 pad pad pad pad............................................................
509 508
510 509 ERROR: test-failure.t output changed
511 510 !
512 511 Failed test-failure.t: output changed
513 512 # Ran 1 tests, 0 skipped, 1 failed.
514 513 python hash seed: * (glob)
515 514 [1]
516 515
517 516 failure w/ keyword
518 517 $ rt -k rataxes
519 518
520 519 --- $TESTTMP/test-failure.t
521 520 +++ $TESTTMP/test-failure.t.err
522 521 @@ -1,5 +1,5 @@
523 522 $ echo babar
524 523 - rataxes
525 524 + babar
526 525 This is a noop statement so that
527 526 this test is still more bytes than success.
528 527 pad pad pad pad............................................................
529 528
530 529 ERROR: test-failure.t output changed
531 530 !
532 531 Failed test-failure.t: output changed
533 532 # Ran 2 tests, 1 skipped, 1 failed.
534 533 python hash seed: * (glob)
535 534 [1]
536 535
537 536 Verify that when a process fails to start we show a useful message
538 537 ==================================================================
539 538
540 539 $ cat > test-serve-fail.t <<EOF
541 540 > $ echo 'abort: child process failed to start blah'
542 541 > EOF
543 542 $ rt test-serve-fail.t
544 543
545 544 ERROR: test-serve-fail.t output changed
546 545 !
547 546 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
548 547 # Ran 1 tests, 0 skipped, 1 failed.
549 548 python hash seed: * (glob)
550 549 [1]
551 550 $ rm test-serve-fail.t
552 551
553 552 Verify that we can try other ports
554 553 ===================================
555 554 $ hg init inuse
556 555 $ hg serve -R inuse -p $HGPORT -d --pid-file=blocks.pid
557 556 $ cat blocks.pid >> $DAEMON_PIDS
558 557 $ cat > test-serve-inuse.t <<EOF
559 558 > $ hg serve -R `pwd`/inuse -p \$HGPORT -d --pid-file=hg.pid
560 559 > $ cat hg.pid >> \$DAEMON_PIDS
561 560 > EOF
562 561 $ rt test-serve-inuse.t
563 562 .
564 563 # Ran 1 tests, 0 skipped, 0 failed.
565 564 $ rm test-serve-inuse.t
566 565 $ killdaemons.py $DAEMON_PIDS
567 566 $ rm $DAEMON_PIDS
568 567
569 568 Running In Debug Mode
570 569 ======================
571 570
572 571 $ rt --debug 2>&1 | grep -v pwd
573 572 + echo *SALT* 0 0 (glob)
574 573 *SALT* 0 0 (glob)
575 574 + echo babar
576 575 babar
577 576 + echo *SALT* 10 0 (glob)
578 577 *SALT* 10 0 (glob)
579 578 *+ echo *SALT* 0 0 (glob)
580 579 *SALT* 0 0 (glob)
581 580 + echo babar
582 581 babar
583 582 + echo *SALT* 2 0 (glob)
584 583 *SALT* 2 0 (glob)
585 584 + echo xyzzy
586 585 xyzzy
587 586 + echo *SALT* 9 0 (glob)
588 587 *SALT* 9 0 (glob)
589 588 + printf *abc\ndef\nxyz\n* (glob)
590 589 abc
591 590 def
592 591 xyz
593 592 + echo *SALT* 15 0 (glob)
594 593 *SALT* 15 0 (glob)
595 594 + printf *zyx\nwvu\ntsr\n* (glob)
596 595 zyx
597 596 wvu
598 597 tsr
599 598 + echo *SALT* 22 0 (glob)
600 599 *SALT* 22 0 (glob)
601 600 .
602 601 # Ran 2 tests, 0 skipped, 0 failed.
603 602
604 603 Parallel runs
605 604 ==============
606 605
607 606 (duplicate the failing test to get predictable output)
608 607 $ cp test-failure.t test-failure-copy.t
609 608
610 609 $ rt --jobs 2 test-failure*.t -n
611 610 !!
612 611 Failed test-failure*.t: output changed (glob)
613 612 Failed test-failure*.t: output changed (glob)
614 613 # Ran 2 tests, 0 skipped, 2 failed.
615 614 python hash seed: * (glob)
616 615 [1]
617 616
618 617 failures in parallel with --first should only print one failure
619 618 $ rt --jobs 2 --first test-failure*.t
620 619
621 620 --- $TESTTMP/test-failure*.t (glob)
622 621 +++ $TESTTMP/test-failure*.t.err (glob)
623 622 @@ -1,5 +1,5 @@
624 623 $ echo babar
625 624 - rataxes
626 625 + babar
627 626 This is a noop statement so that
628 627 this test is still more bytes than success.
629 628 pad pad pad pad............................................................
630 629
631 630 Failed test-failure*.t: output changed (glob)
632 631 Failed test-failure*.t: output changed (glob)
633 632 # Ran 2 tests, 0 skipped, 2 failed.
634 633 python hash seed: * (glob)
635 634 [1]
636 635
637 636
638 637 (delete the duplicated test file)
639 638 $ rm test-failure-copy.t
640 639
641 640
642 641 Interactive run
643 642 ===============
644 643
645 644 (backup the failing test)
646 645 $ cp test-failure.t backup
647 646
648 647 Refuse the fix
649 648
650 649 $ echo 'n' | rt -i
651 650
652 651 --- $TESTTMP/test-failure.t
653 652 +++ $TESTTMP/test-failure.t.err
654 653 @@ -1,5 +1,5 @@
655 654 $ echo babar
656 655 - rataxes
657 656 + babar
658 657 This is a noop statement so that
659 658 this test is still more bytes than success.
660 659 pad pad pad pad............................................................
661 660 Accept this change? [n]
662 661 ERROR: test-failure.t output changed
663 662 !.
664 663 Failed test-failure.t: output changed
665 664 # Ran 2 tests, 0 skipped, 1 failed.
666 665 python hash seed: * (glob)
667 666 [1]
668 667
669 668 $ cat test-failure.t
670 669 $ echo babar
671 670 rataxes
672 671 This is a noop statement so that
673 672 this test is still more bytes than success.
674 673 pad pad pad pad............................................................
675 674 pad pad pad pad............................................................
676 675 pad pad pad pad............................................................
677 676 pad pad pad pad............................................................
678 677 pad pad pad pad............................................................
679 678 pad pad pad pad............................................................
680 679
681 680 Interactive with custom view
682 681
683 682 $ echo 'n' | rt -i --view echo
684 683 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err (glob)
685 684 Accept this change? [n]* (glob)
686 685 ERROR: test-failure.t output changed
687 686 !.
688 687 Failed test-failure.t: output changed
689 688 # Ran 2 tests, 0 skipped, 1 failed.
690 689 python hash seed: * (glob)
691 690 [1]
692 691
693 692 View the fix
694 693
695 694 $ echo 'y' | rt --view echo
696 695 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err (glob)
697 696
698 697 ERROR: test-failure.t output changed
699 698 !.
700 699 Failed test-failure.t: output changed
701 700 # Ran 2 tests, 0 skipped, 1 failed.
702 701 python hash seed: * (glob)
703 702 [1]
704 703
705 704 Accept the fix
706 705
707 706 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
708 707 $ echo " saved backup bundle to \$TESTTMP/foo.hg" >> test-failure.t
709 708 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
710 709 $ echo " saved backup bundle to \$TESTTMP/foo.hg (glob)" >> test-failure.t
711 710 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
712 711 $ echo " saved backup bundle to \$TESTTMP/*.hg (glob)" >> test-failure.t
713 712 $ echo 'y' | rt -i 2>&1
714 713
715 714 --- $TESTTMP/test-failure.t
716 715 +++ $TESTTMP/test-failure.t.err
717 716 @@ -1,5 +1,5 @@
718 717 $ echo babar
719 718 - rataxes
720 719 + babar
721 720 This is a noop statement so that
722 721 this test is still more bytes than success.
723 722 pad pad pad pad............................................................
724 723 @@ -9,7 +9,7 @@
725 724 pad pad pad pad............................................................
726 725 pad pad pad pad............................................................
727 726 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
728 727 - saved backup bundle to $TESTTMP/foo.hg
729 728 + saved backup bundle to $TESTTMP/foo.hg* (glob)
730 729 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
731 730 saved backup bundle to $TESTTMP/foo.hg* (glob)
732 731 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
733 732 Accept this change? [n] ..
734 733 # Ran 2 tests, 0 skipped, 0 failed.
735 734
736 735 $ sed -e 's,(glob)$,&<,g' test-failure.t
737 736 $ echo babar
738 737 babar
739 738 This is a noop statement so that
740 739 this test is still more bytes than success.
741 740 pad pad pad pad............................................................
742 741 pad pad pad pad............................................................
743 742 pad pad pad pad............................................................
744 743 pad pad pad pad............................................................
745 744 pad pad pad pad............................................................
746 745 pad pad pad pad............................................................
747 746 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
748 747 saved backup bundle to $TESTTMP/foo.hg (glob)<
749 748 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
750 749 saved backup bundle to $TESTTMP/foo.hg (glob)<
751 750 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
752 751 saved backup bundle to $TESTTMP/*.hg (glob)<
753 752
754 753 Race condition - test file was modified when test is running
755 754
756 755 $ TESTRACEDIR=`pwd`
757 756 $ export TESTRACEDIR
758 757 $ cat > test-race.t <<EOF
759 758 > $ echo 1
760 759 > $ echo "# a new line" >> $TESTRACEDIR/test-race.t
761 760 > EOF
762 761
763 762 $ rt -i test-race.t
764 763
765 764 --- $TESTTMP/test-race.t
766 765 +++ $TESTTMP/test-race.t.err
767 766 @@ -1,2 +1,3 @@
768 767 $ echo 1
769 768 + 1
770 769 $ echo "# a new line" >> $TESTTMP/test-race.t
771 770 Reference output has changed (run again to prompt changes)
772 771 ERROR: test-race.t output changed
773 772 !
774 773 Failed test-race.t: output changed
775 774 # Ran 1 tests, 0 skipped, 1 failed.
776 775 python hash seed: * (glob)
777 776 [1]
778 777
779 778 $ rm test-race.t
780 779
781 780 When "#testcases" is used in .t files
782 781
783 782 $ cat >> test-cases.t <<EOF
784 783 > #testcases a b
785 784 > #if a
786 785 > $ echo 1
787 786 > #endif
788 787 > #if b
789 788 > $ echo 2
790 789 > #endif
791 790 > EOF
792 791
793 792 $ cat <<EOF | rt -i test-cases.t 2>&1
794 793 > y
795 794 > y
796 795 > EOF
797 796
798 797 --- $TESTTMP/test-cases.t
799 798 +++ $TESTTMP/test-cases.t.a.err
800 799 @@ -1,6 +1,7 @@
801 800 #testcases a b
802 801 #if a
803 802 $ echo 1
804 803 + 1
805 804 #endif
806 805 #if b
807 806 $ echo 2
808 807 Accept this change? [n] .
809 808 --- $TESTTMP/test-cases.t
810 809 +++ $TESTTMP/test-cases.t.b.err
811 810 @@ -5,4 +5,5 @@
812 811 #endif
813 812 #if b
814 813 $ echo 2
815 814 + 2
816 815 #endif
817 816 Accept this change? [n] .
818 817 # Ran 2 tests, 0 skipped, 0 failed.
819 818
820 819 $ cat test-cases.t
821 820 #testcases a b
822 821 #if a
823 822 $ echo 1
824 823 1
825 824 #endif
826 825 #if b
827 826 $ echo 2
828 827 2
829 828 #endif
830 829
831 830 $ cat >> test-cases.t <<'EOF'
832 831 > #if a
833 832 > $ NAME=A
834 833 > #else
835 834 > $ NAME=B
836 835 > #endif
837 836 > $ echo $NAME
838 837 > A (a !)
839 838 > B (b !)
840 839 > EOF
841 840 $ rt test-cases.t
842 841 ..
843 842 # Ran 2 tests, 0 skipped, 0 failed.
844 843
845 844 $ rm test-cases.t
846 845
847 846 (reinstall)
848 847 $ mv backup test-failure.t
849 848
850 849 No Diff
851 850 ===============
852 851
853 852 $ rt --nodiff
854 853 !.
855 854 Failed test-failure.t: output changed
856 855 # Ran 2 tests, 0 skipped, 1 failed.
857 856 python hash seed: * (glob)
858 857 [1]
859 858
860 859 test --tmpdir support
861 860 $ rt --tmpdir=$TESTTMP/keep test-success.t
862 861
863 862 Keeping testtmp dir: $TESTTMP/keep/child1/test-success.t (glob)
864 863 Keeping threadtmp dir: $TESTTMP/keep/child1 (glob)
865 864 .
866 865 # Ran 1 tests, 0 skipped, 0 failed.
867 866
868 867 timeouts
869 868 ========
870 869 $ cat > test-timeout.t <<EOF
871 870 > $ sleep 2
872 871 > $ echo pass
873 872 > pass
874 873 > EOF
875 874 > echo '#require slow' > test-slow-timeout.t
876 875 > cat test-timeout.t >> test-slow-timeout.t
877 876 $ rt --timeout=1 --slowtimeout=3 test-timeout.t test-slow-timeout.t
878 877 st
879 878 Skipped test-slow-timeout.t: missing feature: allow slow tests (use --allow-slow-tests)
880 879 Failed test-timeout.t: timed out
881 880 # Ran 1 tests, 1 skipped, 1 failed.
882 881 python hash seed: * (glob)
883 882 [1]
884 883 $ rt --timeout=1 --slowtimeout=3 \
885 884 > test-timeout.t test-slow-timeout.t --allow-slow-tests
886 885 .t
887 886 Failed test-timeout.t: timed out
888 887 # Ran 2 tests, 0 skipped, 1 failed.
889 888 python hash seed: * (glob)
890 889 [1]
891 890 $ rm test-timeout.t test-slow-timeout.t
892 891
893 892 test for --time
894 893 ==================
895 894
896 895 $ rt test-success.t --time
897 896 .
898 897 # Ran 1 tests, 0 skipped, 0 failed.
899 898 # Producing time report
900 899 start end cuser csys real Test
901 900 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
902 901
903 902 test for --time with --job enabled
904 903 ====================================
905 904
906 905 $ rt test-success.t --time --jobs 2
907 906 .
908 907 # Ran 1 tests, 0 skipped, 0 failed.
909 908 # Producing time report
910 909 start end cuser csys real Test
911 910 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
912 911
913 912 Skips
914 913 ================
915 914 $ cat > test-skip.t <<EOF
916 915 > $ echo xyzzy
917 916 > #require false
918 917 > EOF
919 918 $ rt --nodiff
920 919 !.s
921 920 Skipped test-skip.t: missing feature: nail clipper
922 921 Failed test-failure.t: output changed
923 922 # Ran 2 tests, 1 skipped, 1 failed.
924 923 python hash seed: * (glob)
925 924 [1]
926 925
927 926 $ rt --keyword xyzzy
928 927 .s
929 928 Skipped test-skip.t: missing feature: nail clipper
930 929 # Ran 2 tests, 2 skipped, 0 failed.
931 930
932 931 Skips with xml
933 932 $ rt --keyword xyzzy \
934 933 > --xunit=xunit.xml
935 934 .s
936 935 Skipped test-skip.t: missing feature: nail clipper
937 936 # Ran 2 tests, 2 skipped, 0 failed.
938 937 $ cat xunit.xml
939 938 <?xml version="1.0" encoding="utf-8"?>
940 939 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
941 940 <testcase name="test-success.t" time="*"/> (glob)
942 941 <testcase name="test-skip.t">
943 942 <skipped>
944 943 <![CDATA[missing feature: nail clipper]]> </skipped>
945 944 </testcase>
946 945 </testsuite>
947 946
948 947 Missing skips or blacklisted skips don't count as executed:
949 948 $ echo test-failure.t > blacklist
950 949 $ rt --blacklist=blacklist --json\
951 950 > test-failure.t test-bogus.t
952 951 ss
953 952 Skipped test-bogus.t: Doesn't exist
954 953 Skipped test-failure.t: blacklisted
955 954 # Ran 0 tests, 2 skipped, 0 failed.
956 955 $ cat report.json
957 956 testreport ={
958 957 "test-bogus.t": {
959 958 "result": "skip"
960 959 },
961 960 "test-failure.t": {
962 961 "result": "skip"
963 962 }
964 963 } (no-eol)
965 964
966 965 Whitelist trumps blacklist
967 966 $ echo test-failure.t > whitelist
968 967 $ rt --blacklist=blacklist --whitelist=whitelist --json\
969 968 > test-failure.t test-bogus.t
970 969 s
971 970 --- $TESTTMP/test-failure.t
972 971 +++ $TESTTMP/test-failure.t.err
973 972 @@ -1,5 +1,5 @@
974 973 $ echo babar
975 974 - rataxes
976 975 + babar
977 976 This is a noop statement so that
978 977 this test is still more bytes than success.
979 978 pad pad pad pad............................................................
980 979
981 980 ERROR: test-failure.t output changed
982 981 !
983 982 Skipped test-bogus.t: Doesn't exist
984 983 Failed test-failure.t: output changed
985 984 # Ran 1 tests, 1 skipped, 1 failed.
986 985 python hash seed: * (glob)
987 986 [1]
988 987
989 988 Ensure that --test-list causes only the tests listed in that file to
990 989 be executed.
991 990 $ echo test-success.t >> onlytest
992 991 $ rt --test-list=onlytest
993 992 .
994 993 # Ran 1 tests, 0 skipped, 0 failed.
995 994 $ echo test-bogus.t >> anothertest
996 995 $ rt --test-list=onlytest --test-list=anothertest
997 996 s.
998 997 Skipped test-bogus.t: Doesn't exist
999 998 # Ran 1 tests, 1 skipped, 0 failed.
1000 999 $ rm onlytest anothertest
1001 1000
1002 1001 test for --json
1003 1002 ==================
1004 1003
1005 1004 $ rt --json
1006 1005
1007 1006 --- $TESTTMP/test-failure.t
1008 1007 +++ $TESTTMP/test-failure.t.err
1009 1008 @@ -1,5 +1,5 @@
1010 1009 $ echo babar
1011 1010 - rataxes
1012 1011 + babar
1013 1012 This is a noop statement so that
1014 1013 this test is still more bytes than success.
1015 1014 pad pad pad pad............................................................
1016 1015
1017 1016 ERROR: test-failure.t output changed
1018 1017 !.s
1019 1018 Skipped test-skip.t: missing feature: nail clipper
1020 1019 Failed test-failure.t: output changed
1021 1020 # Ran 2 tests, 1 skipped, 1 failed.
1022 1021 python hash seed: * (glob)
1023 1022 [1]
1024 1023
1025 1024 $ cat report.json
1026 1025 testreport ={
1027 1026 "test-failure.t": [\{] (re)
1028 1027 "csys": "\s*[\d\.]{4,5}", ? (re)
1029 1028 "cuser": "\s*[\d\.]{4,5}", ? (re)
1030 1029 "diff": "---.+\+\+\+.+", ? (re)
1031 1030 "end": "\s*[\d\.]{4,5}", ? (re)
1032 1031 "result": "failure", ? (re)
1033 1032 "start": "\s*[\d\.]{4,5}", ? (re)
1034 1033 "time": "\s*[\d\.]{4,5}" (re)
1035 1034 }, ? (re)
1036 1035 "test-skip.t": {
1037 1036 "csys": "\s*[\d\.]{4,5}", ? (re)
1038 1037 "cuser": "\s*[\d\.]{4,5}", ? (re)
1039 1038 "diff": "", ? (re)
1040 1039 "end": "\s*[\d\.]{4,5}", ? (re)
1041 1040 "result": "skip", ? (re)
1042 1041 "start": "\s*[\d\.]{4,5}", ? (re)
1043 1042 "time": "\s*[\d\.]{4,5}" (re)
1044 1043 }, ? (re)
1045 1044 "test-success.t": [\{] (re)
1046 1045 "csys": "\s*[\d\.]{4,5}", ? (re)
1047 1046 "cuser": "\s*[\d\.]{4,5}", ? (re)
1048 1047 "diff": "", ? (re)
1049 1048 "end": "\s*[\d\.]{4,5}", ? (re)
1050 1049 "result": "success", ? (re)
1051 1050 "start": "\s*[\d\.]{4,5}", ? (re)
1052 1051 "time": "\s*[\d\.]{4,5}" (re)
1053 1052 }
1054 1053 } (no-eol)
1055 1054 --json with --outputdir
1056 1055
1057 1056 $ rm report.json
1058 1057 $ rm -r output
1059 1058 $ mkdir output
1060 1059 $ rt --json --outputdir output
1061 1060
1062 1061 --- $TESTTMP/test-failure.t
1063 1062 +++ $TESTTMP/output/test-failure.t.err
1064 1063 @@ -1,5 +1,5 @@
1065 1064 $ echo babar
1066 1065 - rataxes
1067 1066 + babar
1068 1067 This is a noop statement so that
1069 1068 this test is still more bytes than success.
1070 1069 pad pad pad pad............................................................
1071 1070
1072 1071 ERROR: test-failure.t output changed
1073 1072 !.s
1074 1073 Skipped test-skip.t: missing feature: nail clipper
1075 1074 Failed test-failure.t: output changed
1076 1075 # Ran 2 tests, 1 skipped, 1 failed.
1077 1076 python hash seed: * (glob)
1078 1077 [1]
1079 1078 $ f report.json
1080 1079 report.json: file not found
1081 1080 $ cat output/report.json
1082 1081 testreport ={
1083 1082 "test-failure.t": [\{] (re)
1084 1083 "csys": "\s*[\d\.]{4,5}", ? (re)
1085 1084 "cuser": "\s*[\d\.]{4,5}", ? (re)
1086 1085 "diff": "---.+\+\+\+.+", ? (re)
1087 1086 "end": "\s*[\d\.]{4,5}", ? (re)
1088 1087 "result": "failure", ? (re)
1089 1088 "start": "\s*[\d\.]{4,5}", ? (re)
1090 1089 "time": "\s*[\d\.]{4,5}" (re)
1091 1090 }, ? (re)
1092 1091 "test-skip.t": {
1093 1092 "csys": "\s*[\d\.]{4,5}", ? (re)
1094 1093 "cuser": "\s*[\d\.]{4,5}", ? (re)
1095 1094 "diff": "", ? (re)
1096 1095 "end": "\s*[\d\.]{4,5}", ? (re)
1097 1096 "result": "skip", ? (re)
1098 1097 "start": "\s*[\d\.]{4,5}", ? (re)
1099 1098 "time": "\s*[\d\.]{4,5}" (re)
1100 1099 }, ? (re)
1101 1100 "test-success.t": [\{] (re)
1102 1101 "csys": "\s*[\d\.]{4,5}", ? (re)
1103 1102 "cuser": "\s*[\d\.]{4,5}", ? (re)
1104 1103 "diff": "", ? (re)
1105 1104 "end": "\s*[\d\.]{4,5}", ? (re)
1106 1105 "result": "success", ? (re)
1107 1106 "start": "\s*[\d\.]{4,5}", ? (re)
1108 1107 "time": "\s*[\d\.]{4,5}" (re)
1109 1108 }
1110 1109 } (no-eol)
1111 1110 $ ls -a output
1112 1111 .
1113 1112 ..
1114 1113 .testtimes
1115 1114 report.json
1116 1115 test-failure.t.err
1117 1116
1118 1117 Test that failed test accepted through interactive are properly reported:
1119 1118
1120 1119 $ cp test-failure.t backup
1121 1120 $ echo y | rt --json -i
1122 1121
1123 1122 --- $TESTTMP/test-failure.t
1124 1123 +++ $TESTTMP/test-failure.t.err
1125 1124 @@ -1,5 +1,5 @@
1126 1125 $ echo babar
1127 1126 - rataxes
1128 1127 + babar
1129 1128 This is a noop statement so that
1130 1129 this test is still more bytes than success.
1131 1130 pad pad pad pad............................................................
1132 1131 Accept this change? [n] ..s
1133 1132 Skipped test-skip.t: missing feature: nail clipper
1134 1133 # Ran 2 tests, 1 skipped, 0 failed.
1135 1134
1136 1135 $ cat report.json
1137 1136 testreport ={
1138 1137 "test-failure.t": [\{] (re)
1139 1138 "csys": "\s*[\d\.]{4,5}", ? (re)
1140 1139 "cuser": "\s*[\d\.]{4,5}", ? (re)
1141 1140 "diff": "", ? (re)
1142 1141 "end": "\s*[\d\.]{4,5}", ? (re)
1143 1142 "result": "success", ? (re)
1144 1143 "start": "\s*[\d\.]{4,5}", ? (re)
1145 1144 "time": "\s*[\d\.]{4,5}" (re)
1146 1145 }, ? (re)
1147 1146 "test-skip.t": {
1148 1147 "csys": "\s*[\d\.]{4,5}", ? (re)
1149 1148 "cuser": "\s*[\d\.]{4,5}", ? (re)
1150 1149 "diff": "", ? (re)
1151 1150 "end": "\s*[\d\.]{4,5}", ? (re)
1152 1151 "result": "skip", ? (re)
1153 1152 "start": "\s*[\d\.]{4,5}", ? (re)
1154 1153 "time": "\s*[\d\.]{4,5}" (re)
1155 1154 }, ? (re)
1156 1155 "test-success.t": [\{] (re)
1157 1156 "csys": "\s*[\d\.]{4,5}", ? (re)
1158 1157 "cuser": "\s*[\d\.]{4,5}", ? (re)
1159 1158 "diff": "", ? (re)
1160 1159 "end": "\s*[\d\.]{4,5}", ? (re)
1161 1160 "result": "success", ? (re)
1162 1161 "start": "\s*[\d\.]{4,5}", ? (re)
1163 1162 "time": "\s*[\d\.]{4,5}" (re)
1164 1163 }
1165 1164 } (no-eol)
1166 1165 $ mv backup test-failure.t
1167 1166
1168 1167 backslash on end of line with glob matching is handled properly
1169 1168
1170 1169 $ cat > test-glob-backslash.t << EOF
1171 1170 > $ echo 'foo bar \\'
1172 1171 > foo * \ (glob)
1173 1172 > EOF
1174 1173
1175 1174 $ rt test-glob-backslash.t
1176 1175 .
1177 1176 # Ran 1 tests, 0 skipped, 0 failed.
1178 1177
1179 1178 $ rm -f test-glob-backslash.t
1180 1179
1181 1180 Test globbing of local IP addresses
1182 1181 $ echo 172.16.18.1
1183 1182 $LOCALIP (glob)
1184 1183 $ echo dead:beef::1
1185 1184 $LOCALIP (glob)
1186 1185
1187 1186 Test reusability for third party tools
1188 1187 ======================================
1189 1188
1190 1189 $ mkdir "$TESTTMP"/anothertests
1191 1190 $ cd "$TESTTMP"/anothertests
1192 1191
1193 1192 test that `run-tests.py` can execute hghave, even if it runs not in
1194 1193 Mercurial source tree.
1195 1194
1196 1195 $ cat > test-hghave.t <<EOF
1197 1196 > #require true
1198 1197 > $ echo foo
1199 1198 > foo
1200 1199 > EOF
1201 1200 $ rt test-hghave.t
1202 1201 .
1203 1202 # Ran 1 tests, 0 skipped, 0 failed.
1204 1203
1205 1204 test that RUNTESTDIR refers the directory, in which `run-tests.py` now
1206 1205 running is placed.
1207 1206
1208 1207 $ cat > test-runtestdir.t <<EOF
1209 1208 > - $TESTDIR, in which test-run-tests.t is placed
1210 1209 > - \$TESTDIR, in which test-runtestdir.t is placed (expanded at runtime)
1211 1210 > - \$RUNTESTDIR, in which run-tests.py is placed (expanded at runtime)
1212 1211 >
1213 1212 > #if windows
1214 1213 > $ test "\$TESTDIR" = "$TESTTMP\anothertests"
1215 1214 > #else
1216 1215 > $ test "\$TESTDIR" = "$TESTTMP"/anothertests
1217 1216 > #endif
1218 1217 > $ test "\$RUNTESTDIR" = "$TESTDIR"
1219 1218 > $ head -n 3 "\$RUNTESTDIR"/../contrib/check-code.py | sed 's@.!.*python@#!USRBINENVPY@'
1220 1219 > #!USRBINENVPY
1221 1220 > #
1222 1221 > # check-code - a style and portability checker for Mercurial
1223 1222 > EOF
1224 1223 $ rt test-runtestdir.t
1225 1224 .
1226 1225 # Ran 1 tests, 0 skipped, 0 failed.
1227 1226
1228 1227 #if execbit
1229 1228
1230 1229 test that TESTDIR is referred in PATH
1231 1230
1232 1231 $ cat > custom-command.sh <<EOF
1233 1232 > #!/bin/sh
1234 1233 > echo "hello world"
1235 1234 > EOF
1236 1235 $ chmod +x custom-command.sh
1237 1236 $ cat > test-testdir-path.t <<EOF
1238 1237 > $ custom-command.sh
1239 1238 > hello world
1240 1239 > EOF
1241 1240 $ rt test-testdir-path.t
1242 1241 .
1243 1242 # Ran 1 tests, 0 skipped, 0 failed.
1244 1243
1245 1244 #endif
1246 1245
1247 1246 test support for --allow-slow-tests
1248 1247 $ cat > test-very-slow-test.t <<EOF
1249 1248 > #require slow
1250 1249 > $ echo pass
1251 1250 > pass
1252 1251 > EOF
1253 1252 $ rt test-very-slow-test.t
1254 1253 s
1255 1254 Skipped test-very-slow-test.t: missing feature: allow slow tests (use --allow-slow-tests)
1256 1255 # Ran 0 tests, 1 skipped, 0 failed.
1257 1256 $ rt $HGTEST_RUN_TESTS_PURE --allow-slow-tests test-very-slow-test.t
1258 1257 .
1259 1258 # Ran 1 tests, 0 skipped, 0 failed.
1260 1259
1261 1260 support for running a test outside the current directory
1262 1261 $ mkdir nonlocal
1263 1262 $ cat > nonlocal/test-is-not-here.t << EOF
1264 1263 > $ echo pass
1265 1264 > pass
1266 1265 > EOF
1267 1266 $ rt nonlocal/test-is-not-here.t
1268 1267 .
1269 1268 # Ran 1 tests, 0 skipped, 0 failed.
1270 1269
1271 1270 support for automatically discovering test if arg is a folder
1272 1271 $ mkdir tmp && cd tmp
1273 1272
1274 1273 $ cat > test-uno.t << EOF
1275 1274 > $ echo line
1276 1275 > line
1277 1276 > EOF
1278 1277
1279 1278 $ cp test-uno.t test-dos.t
1280 1279 $ cd ..
1281 1280 $ cp -R tmp tmpp
1282 1281 $ cp tmp/test-uno.t test-solo.t
1283 1282
1284 1283 $ $PYTHON $TESTDIR/run-tests.py tmp/ test-solo.t tmpp
1285 1284 .....
1286 1285 # Ran 5 tests, 0 skipped, 0 failed.
1287 1286 $ rm -rf tmp tmpp
1288 1287
1289 1288 support for running run-tests.py from another directory
1290 1289 $ mkdir tmp && cd tmp
1291 1290
1292 1291 $ cat > useful-file.sh << EOF
1293 1292 > important command
1294 1293 > EOF
1295 1294
1296 1295 $ cat > test-folder.t << EOF
1297 1296 > $ cat \$TESTDIR/useful-file.sh
1298 1297 > important command
1299 1298 > EOF
1300 1299
1301 1300 $ cat > test-folder-fail.t << EOF
1302 1301 > $ cat \$TESTDIR/useful-file.sh
1303 1302 > important commando
1304 1303 > EOF
1305 1304
1306 1305 $ cd ..
1307 1306 $ $PYTHON $TESTDIR/run-tests.py tmp/test-*.t
1308 1307
1309 1308 --- $TESTTMP/anothertests/tmp/test-folder-fail.t
1310 1309 +++ $TESTTMP/anothertests/tmp/test-folder-fail.t.err
1311 1310 @@ -1,2 +1,2 @@
1312 1311 $ cat $TESTDIR/useful-file.sh
1313 1312 - important commando
1314 1313 + important command
1315 1314
1316 1315 ERROR: test-folder-fail.t output changed
1317 1316 !.
1318 1317 Failed test-folder-fail.t: output changed
1319 1318 # Ran 2 tests, 0 skipped, 1 failed.
1320 1319 python hash seed: * (glob)
1321 1320 [1]
1322 1321
1323 1322 support for bisecting failed tests automatically
1324 1323 $ hg init bisect
1325 1324 $ cd bisect
1326 1325 $ cat >> test-bisect.t <<EOF
1327 1326 > $ echo pass
1328 1327 > pass
1329 1328 > EOF
1330 1329 $ hg add test-bisect.t
1331 1330 $ hg ci -m 'good'
1332 1331 $ cat >> test-bisect.t <<EOF
1333 1332 > $ echo pass
1334 1333 > fail
1335 1334 > EOF
1336 1335 $ hg ci -m 'bad'
1337 1336 $ rt --known-good-rev=0 test-bisect.t
1338 1337
1339 1338 --- $TESTTMP/anothertests/bisect/test-bisect.t
1340 1339 +++ $TESTTMP/anothertests/bisect/test-bisect.t.err
1341 1340 @@ -1,4 +1,4 @@
1342 1341 $ echo pass
1343 1342 pass
1344 1343 $ echo pass
1345 1344 - fail
1346 1345 + pass
1347 1346
1348 1347 ERROR: test-bisect.t output changed
1349 1348 !
1350 1349 Failed test-bisect.t: output changed
1351 1350 test-bisect.t broken by 72cbf122d116 (bad)
1352 1351 # Ran 1 tests, 0 skipped, 1 failed.
1353 1352 python hash seed: * (glob)
1354 1353 [1]
1355 1354
1356 1355 $ cd ..
1357 1356
1358 1357 support bisecting a separate repo
1359 1358
1360 1359 $ hg init bisect-dependent
1361 1360 $ cd bisect-dependent
1362 1361 $ cat > test-bisect-dependent.t <<EOF
1363 1362 > $ tail -1 \$TESTDIR/../bisect/test-bisect.t
1364 1363 > pass
1365 1364 > EOF
1366 1365 $ hg commit -Am dependent test-bisect-dependent.t
1367 1366
1368 1367 $ rt --known-good-rev=0 test-bisect-dependent.t
1369 1368
1370 1369 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1371 1370 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1372 1371 @@ -1,2 +1,2 @@
1373 1372 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1374 1373 - pass
1375 1374 + fail
1376 1375
1377 1376 ERROR: test-bisect-dependent.t output changed
1378 1377 !
1379 1378 Failed test-bisect-dependent.t: output changed
1380 1379 Failed to identify failure point for test-bisect-dependent.t
1381 1380 # Ran 1 tests, 0 skipped, 1 failed.
1382 1381 python hash seed: * (glob)
1383 1382 [1]
1384 1383
1385 1384 $ rt --bisect-repo=../test-bisect test-bisect-dependent.t
1386 Usage: run-tests.py [options] [tests]
1387
1385 usage: run-tests.py [options] [tests]
1388 1386 run-tests.py: error: --bisect-repo cannot be used without --known-good-rev
1389 1387 [2]
1390 1388
1391 1389 $ rt --known-good-rev=0 --bisect-repo=../bisect test-bisect-dependent.t
1392 1390
1393 1391 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1394 1392 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1395 1393 @@ -1,2 +1,2 @@
1396 1394 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1397 1395 - pass
1398 1396 + fail
1399 1397
1400 1398 ERROR: test-bisect-dependent.t output changed
1401 1399 !
1402 1400 Failed test-bisect-dependent.t: output changed
1403 1401 test-bisect-dependent.t broken by 72cbf122d116 (bad)
1404 1402 # Ran 1 tests, 0 skipped, 1 failed.
1405 1403 python hash seed: * (glob)
1406 1404 [1]
1407 1405
1408 1406 $ cd ..
1409 1407
1410 1408 Test a broken #if statement doesn't break run-tests threading.
1411 1409 ==============================================================
1412 1410 $ mkdir broken
1413 1411 $ cd broken
1414 1412 $ cat > test-broken.t <<EOF
1415 1413 > true
1416 1414 > #if notarealhghavefeature
1417 1415 > $ false
1418 1416 > #endif
1419 1417 > EOF
1420 1418 $ for f in 1 2 3 4 ; do
1421 1419 > cat > test-works-$f.t <<EOF
1422 1420 > This is test case $f
1423 1421 > $ sleep 1
1424 1422 > EOF
1425 1423 > done
1426 1424 $ rt -j 2
1427 1425 ....
1428 1426 # Ran 5 tests, 0 skipped, 0 failed.
1429 1427 skipped: unknown feature: notarealhghavefeature
1430 1428
1431 1429 $ cd ..
1432 1430 $ rm -rf broken
1433 1431
1434 1432 Test cases in .t files
1435 1433 ======================
1436 1434 $ mkdir cases
1437 1435 $ cd cases
1438 1436 $ cat > test-cases-abc.t <<'EOF'
1439 1437 > #testcases A B C
1440 1438 > $ V=B
1441 1439 > #if A
1442 1440 > $ V=A
1443 1441 > #endif
1444 1442 > #if C
1445 1443 > $ V=C
1446 1444 > #endif
1447 1445 > $ echo $V | sed 's/A/C/'
1448 1446 > C
1449 1447 > #if C
1450 1448 > $ [ $V = C ]
1451 1449 > #endif
1452 1450 > #if A
1453 1451 > $ [ $V = C ]
1454 1452 > [1]
1455 1453 > #endif
1456 1454 > #if no-C
1457 1455 > $ [ $V = C ]
1458 1456 > [1]
1459 1457 > #endif
1460 1458 > $ [ $V = D ]
1461 1459 > [1]
1462 1460 > EOF
1463 1461 $ rt
1464 1462 .
1465 1463 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1466 1464 +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
1467 1465 @@ -7,7 +7,7 @@
1468 1466 $ V=C
1469 1467 #endif
1470 1468 $ echo $V | sed 's/A/C/'
1471 1469 - C
1472 1470 + B
1473 1471 #if C
1474 1472 $ [ $V = C ]
1475 1473 #endif
1476 1474
1477 1475 ERROR: test-cases-abc.t (case B) output changed
1478 1476 !.
1479 1477 Failed test-cases-abc.t (case B): output changed
1480 1478 # Ran 3 tests, 0 skipped, 1 failed.
1481 1479 python hash seed: * (glob)
1482 1480 [1]
1483 1481
1484 1482 --restart works
1485 1483
1486 1484 $ rt --restart
1487 1485
1488 1486 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1489 1487 +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
1490 1488 @@ -7,7 +7,7 @@
1491 1489 $ V=C
1492 1490 #endif
1493 1491 $ echo $V | sed 's/A/C/'
1494 1492 - C
1495 1493 + B
1496 1494 #if C
1497 1495 $ [ $V = C ]
1498 1496 #endif
1499 1497
1500 1498 ERROR: test-cases-abc.t (case B) output changed
1501 1499 !.
1502 1500 Failed test-cases-abc.t (case B): output changed
1503 1501 # Ran 2 tests, 0 skipped, 1 failed.
1504 1502 python hash seed: * (glob)
1505 1503 [1]
1506 1504
1507 1505 --restart works with outputdir
1508 1506
1509 1507 $ mkdir output
1510 1508 $ mv test-cases-abc.t.B.err output
1511 1509 $ rt --restart --outputdir output
1512 1510
1513 1511 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1514 1512 +++ $TESTTMP/anothertests/cases/output/test-cases-abc.t.B.err
1515 1513 @@ -7,7 +7,7 @@
1516 1514 $ V=C
1517 1515 #endif
1518 1516 $ echo $V | sed 's/A/C/'
1519 1517 - C
1520 1518 + B
1521 1519 #if C
1522 1520 $ [ $V = C ]
1523 1521 #endif
1524 1522
1525 1523 ERROR: test-cases-abc.t (case B) output changed
1526 1524 !.
1527 1525 Failed test-cases-abc.t (case B): output changed
1528 1526 # Ran 2 tests, 0 skipped, 1 failed.
1529 1527 python hash seed: * (glob)
1530 1528 [1]
1531 1529
1532 1530 Test automatic pattern replacement
1533 1531
1534 1532 $ cat << EOF >> common-pattern.py
1535 1533 > substitutions = [
1536 1534 > (br'foo-(.*)\\b',
1537 1535 > br'\$XXX=\\1\$'),
1538 1536 > (br'bar\\n',
1539 1537 > br'\$YYY$\\n'),
1540 1538 > ]
1541 1539 > EOF
1542 1540
1543 1541 $ cat << EOF >> test-substitution.t
1544 1542 > $ echo foo-12
1545 1543 > \$XXX=12$
1546 1544 > $ echo foo-42
1547 1545 > \$XXX=42$
1548 1546 > $ echo bar prior
1549 1547 > bar prior
1550 1548 > $ echo lastbar
1551 1549 > last\$YYY$
1552 1550 > $ echo foo-bar foo-baz
1553 1551 > EOF
1554 1552
1555 1553 $ rt test-substitution.t
1556 1554
1557 1555 --- $TESTTMP/anothertests/cases/test-substitution.t
1558 1556 +++ $TESTTMP/anothertests/cases/test-substitution.t.err
1559 1557 @@ -7,3 +7,4 @@
1560 1558 $ echo lastbar
1561 1559 last$YYY$
1562 1560 $ echo foo-bar foo-baz
1563 1561 + $XXX=bar foo-baz$
1564 1562
1565 1563 ERROR: test-substitution.t output changed
1566 1564 !
1567 1565 Failed test-substitution.t: output changed
1568 1566 # Ran 1 tests, 0 skipped, 1 failed.
1569 1567 python hash seed: * (glob)
1570 1568 [1]
General Comments 0
You need to be logged in to leave comments. Login now