##// END OF EJS Templates
run-tests: add support for external test result...
Boris Feld -
r38635:c44ae599 default
parent child Browse files
Show More
@@ -0,0 +1,46
1 from __future__ import print_function
2
3 import unittest
4
5 class TestResult(unittest._TextTestResult):
6
7 def __init__(self, options, *args, **kwargs):
8 super(TestResult, self).__init__(*args, **kwargs)
9 self._options = options
10
11 # unittest.TestResult didn't have skipped until 2.7. We need to
12 # polyfill it.
13 self.skipped = []
14
15 # We have a custom "ignored" result that isn't present in any Python
16 # unittest implementation. It is very similar to skipped. It may make
17 # sense to map it into skip some day.
18 self.ignored = []
19
20 self.times = []
21 self._firststarttime = None
22 # Data stored for the benefit of generating xunit reports.
23 self.successes = []
24 self.faildata = {}
25
26 def addFailure(self, test, reason):
27 print("FAILURE!", test, reason)
28
29 def addSuccess(self, test):
30 print("SUCCESS!", test)
31
32 def addError(self, test, err):
33 print("ERR!", test, err)
34
35 # Polyfill.
36 def addSkip(self, test, reason):
37 print("SKIP!", test, reason)
38
39 def addIgnore(self, test, reason):
40 print("IGNORE!", test, reason)
41
42 def addOutputMismatch(self, test, ret, got, expected):
43 return False
44
45 def stopTest(self, test, interrupted=False):
46 super(TestResult, self).stopTest(test)
@@ -1,3137 +1,3146
1 1 #!/usr/bin/env python
2 2 #
3 3 # run-tests.py - Run a set of tests on Mercurial
4 4 #
5 5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 # Modifying this script is tricky because it has many modes:
11 11 # - serial (default) vs parallel (-jN, N > 1)
12 12 # - no coverage (default) vs coverage (-c, -C, -s)
13 13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 14 # - tests are a mix of shell scripts and Python scripts
15 15 #
16 16 # If you change this script, it is recommended that you ensure you
17 17 # haven't broken it by running it in various modes with a representative
18 18 # sample of test scripts. For example:
19 19 #
20 20 # 1) serial, no coverage, temp install:
21 21 # ./run-tests.py test-s*
22 22 # 2) serial, no coverage, local hg:
23 23 # ./run-tests.py --local test-s*
24 24 # 3) serial, coverage, temp install:
25 25 # ./run-tests.py -c test-s*
26 26 # 4) serial, coverage, local hg:
27 27 # ./run-tests.py -c --local test-s* # unsupported
28 28 # 5) parallel, no coverage, temp install:
29 29 # ./run-tests.py -j2 test-s*
30 30 # 6) parallel, no coverage, local hg:
31 31 # ./run-tests.py -j2 --local test-s*
32 32 # 7) parallel, coverage, temp install:
33 33 # ./run-tests.py -j2 -c test-s* # currently broken
34 34 # 8) parallel, coverage, local install:
35 35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 36 # 9) parallel, custom tmp dir:
37 37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 38 # 10) parallel, pure, tests that call run-tests:
39 39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
40 40 #
41 41 # (You could use any subset of the tests: test-s* happens to match
42 42 # enough that it's worth doing parallel runs, few enough that it
43 43 # completes fairly quickly, includes both shell and Python scripts, and
44 44 # includes some scripts that run daemon processes.)
45 45
46 46 from __future__ import absolute_import, print_function
47 47
48 48 import argparse
49 49 import collections
50 50 import difflib
51 51 import distutils.version as version
52 52 import errno
53 53 import json
54 54 import os
55 55 import random
56 56 import re
57 57 import shutil
58 58 import signal
59 59 import socket
60 60 import subprocess
61 61 import sys
62 62 import sysconfig
63 63 import tempfile
64 64 import threading
65 65 import time
66 66 import unittest
67 67 import xml.dom.minidom as minidom
68 68
69 69 try:
70 70 import Queue as queue
71 71 except ImportError:
72 72 import queue
73 73
74 74 try:
75 75 import shlex
76 76 shellquote = shlex.quote
77 77 except (ImportError, AttributeError):
78 78 import pipes
79 79 shellquote = pipes.quote
80 80
81 81 if os.environ.get('RTUNICODEPEDANTRY', False):
82 82 try:
83 83 reload(sys)
84 84 sys.setdefaultencoding("undefined")
85 85 except NameError:
86 86 pass
87 87
88 88 origenviron = os.environ.copy()
89 89 osenvironb = getattr(os, 'environb', os.environ)
90 90 processlock = threading.Lock()
91 91
92 92 pygmentspresent = False
93 93 # ANSI color is unsupported prior to Windows 10
94 94 if os.name != 'nt':
95 95 try: # is pygments installed
96 96 import pygments
97 97 import pygments.lexers as lexers
98 98 import pygments.lexer as lexer
99 99 import pygments.formatters as formatters
100 100 import pygments.token as token
101 101 import pygments.style as style
102 102 pygmentspresent = True
103 103 difflexer = lexers.DiffLexer()
104 104 terminal256formatter = formatters.Terminal256Formatter()
105 105 except ImportError:
106 106 pass
107 107
108 108 if pygmentspresent:
109 109 class TestRunnerStyle(style.Style):
110 110 default_style = ""
111 111 skipped = token.string_to_tokentype("Token.Generic.Skipped")
112 112 failed = token.string_to_tokentype("Token.Generic.Failed")
113 113 skippedname = token.string_to_tokentype("Token.Generic.SName")
114 114 failedname = token.string_to_tokentype("Token.Generic.FName")
115 115 styles = {
116 116 skipped: '#e5e5e5',
117 117 skippedname: '#00ffff',
118 118 failed: '#7f0000',
119 119 failedname: '#ff0000',
120 120 }
121 121
122 122 class TestRunnerLexer(lexer.RegexLexer):
123 123 testpattern = r'[\w-]+\.(t|py)(#[a-zA-Z0-9_\-\.]+)?'
124 124 tokens = {
125 125 'root': [
126 126 (r'^Skipped', token.Generic.Skipped, 'skipped'),
127 127 (r'^Failed ', token.Generic.Failed, 'failed'),
128 128 (r'^ERROR: ', token.Generic.Failed, 'failed'),
129 129 ],
130 130 'skipped': [
131 131 (testpattern, token.Generic.SName),
132 132 (r':.*', token.Generic.Skipped),
133 133 ],
134 134 'failed': [
135 135 (testpattern, token.Generic.FName),
136 136 (r'(:| ).*', token.Generic.Failed),
137 137 ]
138 138 }
139 139
140 140 runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle)
141 141 runnerlexer = TestRunnerLexer()
142 142
143 143 if sys.version_info > (3, 5, 0):
144 144 PYTHON3 = True
145 145 xrange = range # we use xrange in one place, and we'd rather not use range
146 146 def _bytespath(p):
147 147 if p is None:
148 148 return p
149 149 return p.encode('utf-8')
150 150
151 151 def _strpath(p):
152 152 if p is None:
153 153 return p
154 154 return p.decode('utf-8')
155 155
156 156 elif sys.version_info >= (3, 0, 0):
157 157 print('%s is only supported on Python 3.5+ and 2.7, not %s' %
158 158 (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])))
159 159 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
160 160 else:
161 161 PYTHON3 = False
162 162
163 163 # In python 2.x, path operations are generally done using
164 164 # bytestrings by default, so we don't have to do any extra
165 165 # fiddling there. We define the wrapper functions anyway just to
166 166 # help keep code consistent between platforms.
167 167 def _bytespath(p):
168 168 return p
169 169
170 170 _strpath = _bytespath
171 171
172 172 # For Windows support
173 173 wifexited = getattr(os, "WIFEXITED", lambda x: False)
174 174
175 175 # Whether to use IPv6
176 176 def checksocketfamily(name, port=20058):
177 177 """return true if we can listen on localhost using family=name
178 178
179 179 name should be either 'AF_INET', or 'AF_INET6'.
180 180 port being used is okay - EADDRINUSE is considered as successful.
181 181 """
182 182 family = getattr(socket, name, None)
183 183 if family is None:
184 184 return False
185 185 try:
186 186 s = socket.socket(family, socket.SOCK_STREAM)
187 187 s.bind(('localhost', port))
188 188 s.close()
189 189 return True
190 190 except socket.error as exc:
191 191 if exc.errno == errno.EADDRINUSE:
192 192 return True
193 193 elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
194 194 return False
195 195 else:
196 196 raise
197 197 else:
198 198 return False
199 199
200 200 # useipv6 will be set by parseargs
201 201 useipv6 = None
202 202
203 203 def checkportisavailable(port):
204 204 """return true if a port seems free to bind on localhost"""
205 205 if useipv6:
206 206 family = socket.AF_INET6
207 207 else:
208 208 family = socket.AF_INET
209 209 try:
210 210 s = socket.socket(family, socket.SOCK_STREAM)
211 211 s.bind(('localhost', port))
212 212 s.close()
213 213 return True
214 214 except socket.error as exc:
215 215 if exc.errno not in (errno.EADDRINUSE, errno.EADDRNOTAVAIL,
216 216 errno.EPROTONOSUPPORT):
217 217 raise
218 218 return False
219 219
220 220 closefds = os.name == 'posix'
221 221 def Popen4(cmd, wd, timeout, env=None):
222 222 processlock.acquire()
223 223 p = subprocess.Popen(cmd, shell=True, bufsize=-1, cwd=wd, env=env,
224 224 close_fds=closefds,
225 225 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
226 226 stderr=subprocess.STDOUT)
227 227 processlock.release()
228 228
229 229 p.fromchild = p.stdout
230 230 p.tochild = p.stdin
231 231 p.childerr = p.stderr
232 232
233 233 p.timeout = False
234 234 if timeout:
235 235 def t():
236 236 start = time.time()
237 237 while time.time() - start < timeout and p.returncode is None:
238 238 time.sleep(.1)
239 239 p.timeout = True
240 240 if p.returncode is None:
241 241 terminate(p)
242 242 threading.Thread(target=t).start()
243 243
244 244 return p
245 245
246 246 PYTHON = _bytespath(sys.executable.replace('\\', '/'))
247 247 IMPL_PATH = b'PYTHONPATH'
248 248 if 'java' in sys.platform:
249 249 IMPL_PATH = b'JYTHONPATH'
250 250
251 251 defaults = {
252 252 'jobs': ('HGTEST_JOBS', 1),
253 253 'timeout': ('HGTEST_TIMEOUT', 180),
254 254 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 500),
255 255 'port': ('HGTEST_PORT', 20059),
256 256 'shell': ('HGTEST_SHELL', 'sh'),
257 257 }
258 258
259 259 def canonpath(path):
260 260 return os.path.realpath(os.path.expanduser(path))
261 261
262 262 def parselistfiles(files, listtype, warn=True):
263 263 entries = dict()
264 264 for filename in files:
265 265 try:
266 266 path = os.path.expanduser(os.path.expandvars(filename))
267 267 f = open(path, "rb")
268 268 except IOError as err:
269 269 if err.errno != errno.ENOENT:
270 270 raise
271 271 if warn:
272 272 print("warning: no such %s file: %s" % (listtype, filename))
273 273 continue
274 274
275 275 for line in f.readlines():
276 276 line = line.split(b'#', 1)[0].strip()
277 277 if line:
278 278 entries[line] = filename
279 279
280 280 f.close()
281 281 return entries
282 282
283 283 def parsettestcases(path):
284 284 """read a .t test file, return a set of test case names
285 285
286 286 If path does not exist, return an empty set.
287 287 """
288 288 cases = set()
289 289 try:
290 290 with open(path, 'rb') as f:
291 291 for l in f:
292 292 if l.startswith(b'#testcases '):
293 293 cases.update(l[11:].split())
294 294 except IOError as ex:
295 295 if ex.errno != errno.ENOENT:
296 296 raise
297 297 return cases
298 298
299 299 def getparser():
300 300 """Obtain the OptionParser used by the CLI."""
301 301 parser = argparse.ArgumentParser(usage='%(prog)s [options] [tests]')
302 302
303 303 selection = parser.add_argument_group('Test Selection')
304 304 selection.add_argument('--allow-slow-tests', action='store_true',
305 305 help='allow extremely slow tests')
306 306 selection.add_argument("--blacklist", action="append",
307 307 help="skip tests listed in the specified blacklist file")
308 308 selection.add_argument("--changed",
309 309 help="run tests that are changed in parent rev or working directory")
310 310 selection.add_argument("-k", "--keywords",
311 311 help="run tests matching keywords")
312 312 selection.add_argument("-r", "--retest", action="store_true",
313 313 help = "retest failed tests")
314 314 selection.add_argument("--test-list", action="append",
315 315 help="read tests to run from the specified file")
316 316 selection.add_argument("--whitelist", action="append",
317 317 help="always run tests listed in the specified whitelist file")
318 318 selection.add_argument('tests', metavar='TESTS', nargs='*',
319 319 help='Tests to run')
320 320
321 321 harness = parser.add_argument_group('Test Harness Behavior')
322 322 harness.add_argument('--bisect-repo',
323 323 metavar='bisect_repo',
324 324 help=("Path of a repo to bisect. Use together with "
325 325 "--known-good-rev"))
326 326 harness.add_argument("-d", "--debug", action="store_true",
327 327 help="debug mode: write output of test scripts to console"
328 328 " rather than capturing and diffing it (disables timeout)")
329 329 harness.add_argument("-f", "--first", action="store_true",
330 330 help="exit on the first test failure")
331 331 harness.add_argument("-i", "--interactive", action="store_true",
332 332 help="prompt to accept changed output")
333 333 harness.add_argument("-j", "--jobs", type=int,
334 334 help="number of jobs to run in parallel"
335 335 " (default: $%s or %d)" % defaults['jobs'])
336 336 harness.add_argument("--keep-tmpdir", action="store_true",
337 337 help="keep temporary directory after running tests")
338 338 harness.add_argument('--known-good-rev',
339 339 metavar="known_good_rev",
340 340 help=("Automatically bisect any failures using this "
341 341 "revision as a known-good revision."))
342 342 harness.add_argument("--list-tests", action="store_true",
343 343 help="list tests instead of running them")
344 344 harness.add_argument("--loop", action="store_true",
345 345 help="loop tests repeatedly")
346 346 harness.add_argument('--random', action="store_true",
347 347 help='run tests in random order')
348 348 harness.add_argument('--order-by-runtime', action="store_true",
349 349 help='run slowest tests first, according to .testtimes')
350 350 harness.add_argument("-p", "--port", type=int,
351 351 help="port on which servers should listen"
352 352 " (default: $%s or %d)" % defaults['port'])
353 353 harness.add_argument('--profile-runner', action='store_true',
354 354 help='run statprof on run-tests')
355 355 harness.add_argument("-R", "--restart", action="store_true",
356 356 help="restart at last error")
357 357 harness.add_argument("--runs-per-test", type=int, dest="runs_per_test",
358 358 help="run each test N times (default=1)", default=1)
359 359 harness.add_argument("--shell",
360 360 help="shell to use (default: $%s or %s)" % defaults['shell'])
361 361 harness.add_argument('--showchannels', action='store_true',
362 362 help='show scheduling channels')
363 363 harness.add_argument("--slowtimeout", type=int,
364 364 help="kill errant slow tests after SLOWTIMEOUT seconds"
365 365 " (default: $%s or %d)" % defaults['slowtimeout'])
366 366 harness.add_argument("-t", "--timeout", type=int,
367 367 help="kill errant tests after TIMEOUT seconds"
368 368 " (default: $%s or %d)" % defaults['timeout'])
369 369 harness.add_argument("--tmpdir",
370 370 help="run tests in the given temporary directory"
371 371 " (implies --keep-tmpdir)")
372 372 harness.add_argument("-v", "--verbose", action="store_true",
373 373 help="output verbose messages")
374 374
375 375 hgconf = parser.add_argument_group('Mercurial Configuration')
376 376 hgconf.add_argument("--chg", action="store_true",
377 377 help="install and use chg wrapper in place of hg")
378 378 hgconf.add_argument("--compiler",
379 379 help="compiler to build with")
380 380 hgconf.add_argument('--extra-config-opt', action="append", default=[],
381 381 help='set the given config opt in the test hgrc')
382 382 hgconf.add_argument("-l", "--local", action="store_true",
383 383 help="shortcut for --with-hg=<testdir>/../hg, "
384 384 "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set")
385 385 hgconf.add_argument("--ipv6", action="store_true",
386 386 help="prefer IPv6 to IPv4 for network related tests")
387 387 hgconf.add_argument("--pure", action="store_true",
388 388 help="use pure Python code instead of C extensions")
389 389 hgconf.add_argument("-3", "--py3k-warnings", action="store_true",
390 390 help="enable Py3k warnings on Python 2.7+")
391 391 hgconf.add_argument("--with-chg", metavar="CHG",
392 392 help="use specified chg wrapper in place of hg")
393 393 hgconf.add_argument("--with-hg",
394 394 metavar="HG",
395 395 help="test using specified hg script rather than a "
396 396 "temporary installation")
397 397 # This option should be deleted once test-check-py3-compat.t and other
398 398 # Python 3 tests run with Python 3.
399 399 hgconf.add_argument("--with-python3", metavar="PYTHON3",
400 400 help="Python 3 interpreter (if running under Python 2)"
401 401 " (TEMPORARY)")
402 402
403 403 reporting = parser.add_argument_group('Results Reporting')
404 404 reporting.add_argument("-C", "--annotate", action="store_true",
405 405 help="output files annotated with coverage")
406 406 reporting.add_argument("--color", choices=["always", "auto", "never"],
407 407 default=os.environ.get('HGRUNTESTSCOLOR', 'auto'),
408 408 help="colorisation: always|auto|never (default: auto)")
409 409 reporting.add_argument("-c", "--cover", action="store_true",
410 410 help="print a test coverage report")
411 411 reporting.add_argument('--exceptions', action='store_true',
412 412 help='log all exceptions and generate an exception report')
413 413 reporting.add_argument("-H", "--htmlcov", action="store_true",
414 414 help="create an HTML report of the coverage of the files")
415 415 reporting.add_argument("--json", action="store_true",
416 416 help="store test result data in 'report.json' file")
417 417 reporting.add_argument("--outputdir",
418 418 help="directory to write error logs to (default=test directory)")
419 419 reporting.add_argument("-n", "--nodiff", action="store_true",
420 420 help="skip showing test changes")
421 421 reporting.add_argument("-S", "--noskips", action="store_true",
422 422 help="don't report skip tests verbosely")
423 423 reporting.add_argument("--time", action="store_true",
424 424 help="time how long each test takes")
425 425 reporting.add_argument("--view",
426 426 help="external diff viewer")
427 427 reporting.add_argument("--xunit",
428 428 help="record xunit results at specified path")
429 429
430 430 for option, (envvar, default) in defaults.items():
431 431 defaults[option] = type(default)(os.environ.get(envvar, default))
432 432 parser.set_defaults(**defaults)
433 433
434 434 return parser
435 435
436 436 def parseargs(args, parser):
437 437 """Parse arguments with our OptionParser and validate results."""
438 438 options = parser.parse_args(args)
439 439
440 440 # jython is always pure
441 441 if 'java' in sys.platform or '__pypy__' in sys.modules:
442 442 options.pure = True
443 443
444 444 if options.with_hg:
445 445 options.with_hg = canonpath(_bytespath(options.with_hg))
446 446 if not (os.path.isfile(options.with_hg) and
447 447 os.access(options.with_hg, os.X_OK)):
448 448 parser.error('--with-hg must specify an executable hg script')
449 449 if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
450 450 sys.stderr.write('warning: --with-hg should specify an hg script\n')
451 451 if options.local:
452 452 testdir = os.path.dirname(_bytespath(canonpath(sys.argv[0])))
453 453 reporootdir = os.path.dirname(testdir)
454 454 pathandattrs = [(b'hg', 'with_hg')]
455 455 if options.chg:
456 456 pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
457 457 for relpath, attr in pathandattrs:
458 458 binpath = os.path.join(reporootdir, relpath)
459 459 if os.name != 'nt' and not os.access(binpath, os.X_OK):
460 460 parser.error('--local specified, but %r not found or '
461 461 'not executable' % binpath)
462 462 setattr(options, attr, binpath)
463 463
464 464 if (options.chg or options.with_chg) and os.name == 'nt':
465 465 parser.error('chg does not work on %s' % os.name)
466 466 if options.with_chg:
467 467 options.chg = False # no installation to temporary location
468 468 options.with_chg = canonpath(_bytespath(options.with_chg))
469 469 if not (os.path.isfile(options.with_chg) and
470 470 os.access(options.with_chg, os.X_OK)):
471 471 parser.error('--with-chg must specify a chg executable')
472 472 if options.chg and options.with_hg:
473 473 # chg shares installation location with hg
474 474 parser.error('--chg does not work when --with-hg is specified '
475 475 '(use --with-chg instead)')
476 476
477 477 if options.color == 'always' and not pygmentspresent:
478 478 sys.stderr.write('warning: --color=always ignored because '
479 479 'pygments is not installed\n')
480 480
481 481 if options.bisect_repo and not options.known_good_rev:
482 482 parser.error("--bisect-repo cannot be used without --known-good-rev")
483 483
484 484 global useipv6
485 485 if options.ipv6:
486 486 useipv6 = checksocketfamily('AF_INET6')
487 487 else:
488 488 # only use IPv6 if IPv4 is unavailable and IPv6 is available
489 489 useipv6 = ((not checksocketfamily('AF_INET'))
490 490 and checksocketfamily('AF_INET6'))
491 491
492 492 options.anycoverage = options.cover or options.annotate or options.htmlcov
493 493 if options.anycoverage:
494 494 try:
495 495 import coverage
496 496 covver = version.StrictVersion(coverage.__version__).version
497 497 if covver < (3, 3):
498 498 parser.error('coverage options require coverage 3.3 or later')
499 499 except ImportError:
500 500 parser.error('coverage options now require the coverage package')
501 501
502 502 if options.anycoverage and options.local:
503 503 # this needs some path mangling somewhere, I guess
504 504 parser.error("sorry, coverage options do not work when --local "
505 505 "is specified")
506 506
507 507 if options.anycoverage and options.with_hg:
508 508 parser.error("sorry, coverage options do not work when --with-hg "
509 509 "is specified")
510 510
511 511 global verbose
512 512 if options.verbose:
513 513 verbose = ''
514 514
515 515 if options.tmpdir:
516 516 options.tmpdir = canonpath(options.tmpdir)
517 517
518 518 if options.jobs < 1:
519 519 parser.error('--jobs must be positive')
520 520 if options.interactive and options.debug:
521 521 parser.error("-i/--interactive and -d/--debug are incompatible")
522 522 if options.debug:
523 523 if options.timeout != defaults['timeout']:
524 524 sys.stderr.write(
525 525 'warning: --timeout option ignored with --debug\n')
526 526 if options.slowtimeout != defaults['slowtimeout']:
527 527 sys.stderr.write(
528 528 'warning: --slowtimeout option ignored with --debug\n')
529 529 options.timeout = 0
530 530 options.slowtimeout = 0
531 531 if options.py3k_warnings:
532 532 if PYTHON3:
533 533 parser.error(
534 534 '--py3k-warnings can only be used on Python 2.7')
535 535 if options.with_python3:
536 536 if PYTHON3:
537 537 parser.error('--with-python3 cannot be used when executing with '
538 538 'Python 3')
539 539
540 540 options.with_python3 = canonpath(options.with_python3)
541 541 # Verify Python3 executable is acceptable.
542 542 proc = subprocess.Popen([options.with_python3, b'--version'],
543 543 stdout=subprocess.PIPE,
544 544 stderr=subprocess.STDOUT)
545 545 out, _err = proc.communicate()
546 546 ret = proc.wait()
547 547 if ret != 0:
548 548 parser.error('could not determine version of python 3')
549 549 if not out.startswith('Python '):
550 550 parser.error('unexpected output from python3 --version: %s' %
551 551 out)
552 552 vers = version.LooseVersion(out[len('Python '):])
553 553 if vers < version.LooseVersion('3.5.0'):
554 554 parser.error('--with-python3 version must be 3.5.0 or greater; '
555 555 'got %s' % out)
556 556
557 557 if options.blacklist:
558 558 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
559 559 if options.whitelist:
560 560 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
561 561 else:
562 562 options.whitelisted = {}
563 563
564 564 if options.showchannels:
565 565 options.nodiff = True
566 566
567 567 return options
568 568
569 569 def rename(src, dst):
570 570 """Like os.rename(), trade atomicity and opened files friendliness
571 571 for existing destination support.
572 572 """
573 573 shutil.copy(src, dst)
574 574 os.remove(src)
575 575
576 576 _unified_diff = difflib.unified_diff
577 577 if PYTHON3:
578 578 import functools
579 579 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
580 580
581 581 def getdiff(expected, output, ref, err):
582 582 servefail = False
583 583 lines = []
584 584 for line in _unified_diff(expected, output, ref, err):
585 585 if line.startswith(b'+++') or line.startswith(b'---'):
586 586 line = line.replace(b'\\', b'/')
587 587 if line.endswith(b' \n'):
588 588 line = line[:-2] + b'\n'
589 589 lines.append(line)
590 590 if not servefail and line.startswith(
591 591 b'+ abort: child process failed to start'):
592 592 servefail = True
593 593
594 594 return servefail, lines
595 595
596 596 verbose = False
597 597 def vlog(*msg):
598 598 """Log only when in verbose mode."""
599 599 if verbose is False:
600 600 return
601 601
602 602 return log(*msg)
603 603
604 604 # Bytes that break XML even in a CDATA block: control characters 0-31
605 605 # sans \t, \n and \r
606 606 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
607 607
608 608 # Match feature conditionalized output lines in the form, capturing the feature
609 609 # list in group 2, and the preceeding line output in group 1:
610 610 #
611 611 # output..output (feature !)\n
612 612 optline = re.compile(b'(.*) \((.+?) !\)\n$')
613 613
614 614 def cdatasafe(data):
615 615 """Make a string safe to include in a CDATA block.
616 616
617 617 Certain control characters are illegal in a CDATA block, and
618 618 there's no way to include a ]]> in a CDATA either. This function
619 619 replaces illegal bytes with ? and adds a space between the ]] so
620 620 that it won't break the CDATA block.
621 621 """
622 622 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
623 623
624 624 def log(*msg):
625 625 """Log something to stdout.
626 626
627 627 Arguments are strings to print.
628 628 """
629 629 with iolock:
630 630 if verbose:
631 631 print(verbose, end=' ')
632 632 for m in msg:
633 633 print(m, end=' ')
634 634 print()
635 635 sys.stdout.flush()
636 636
637 637 def highlightdiff(line, color):
638 638 if not color:
639 639 return line
640 640 assert pygmentspresent
641 641 return pygments.highlight(line.decode('latin1'), difflexer,
642 642 terminal256formatter).encode('latin1')
643 643
644 644 def highlightmsg(msg, color):
645 645 if not color:
646 646 return msg
647 647 assert pygmentspresent
648 648 return pygments.highlight(msg, runnerlexer, runnerformatter)
649 649
650 650 def terminate(proc):
651 651 """Terminate subprocess"""
652 652 vlog('# Terminating process %d' % proc.pid)
653 653 try:
654 654 proc.terminate()
655 655 except OSError:
656 656 pass
657 657
658 658 def killdaemons(pidfile):
659 659 import killdaemons as killmod
660 660 return killmod.killdaemons(pidfile, tryhard=False, remove=True,
661 661 logfn=vlog)
662 662
663 663 class Test(unittest.TestCase):
664 664 """Encapsulates a single, runnable test.
665 665
666 666 While this class conforms to the unittest.TestCase API, it differs in that
667 667 instances need to be instantiated manually. (Typically, unittest.TestCase
668 668 classes are instantiated automatically by scanning modules.)
669 669 """
670 670
671 671 # Status code reserved for skipped tests (used by hghave).
672 672 SKIPPED_STATUS = 80
673 673
674 674 def __init__(self, path, outputdir, tmpdir, keeptmpdir=False,
675 675 debug=False,
676 676 first=False,
677 677 timeout=None,
678 678 startport=None, extraconfigopts=None,
679 679 py3kwarnings=False, shell=None, hgcommand=None,
680 680 slowtimeout=None, usechg=False,
681 681 useipv6=False):
682 682 """Create a test from parameters.
683 683
684 684 path is the full path to the file defining the test.
685 685
686 686 tmpdir is the main temporary directory to use for this test.
687 687
688 688 keeptmpdir determines whether to keep the test's temporary directory
689 689 after execution. It defaults to removal (False).
690 690
691 691 debug mode will make the test execute verbosely, with unfiltered
692 692 output.
693 693
694 694 timeout controls the maximum run time of the test. It is ignored when
695 695 debug is True. See slowtimeout for tests with #require slow.
696 696
697 697 slowtimeout overrides timeout if the test has #require slow.
698 698
699 699 startport controls the starting port number to use for this test. Each
700 700 test will reserve 3 port numbers for execution. It is the caller's
701 701 responsibility to allocate a non-overlapping port range to Test
702 702 instances.
703 703
704 704 extraconfigopts is an iterable of extra hgrc config options. Values
705 705 must have the form "key=value" (something understood by hgrc). Values
706 706 of the form "foo.key=value" will result in "[foo] key=value".
707 707
708 708 py3kwarnings enables Py3k warnings.
709 709
710 710 shell is the shell to execute tests in.
711 711 """
712 712 if timeout is None:
713 713 timeout = defaults['timeout']
714 714 if startport is None:
715 715 startport = defaults['port']
716 716 if slowtimeout is None:
717 717 slowtimeout = defaults['slowtimeout']
718 718 self.path = path
719 719 self.bname = os.path.basename(path)
720 720 self.name = _strpath(self.bname)
721 721 self._testdir = os.path.dirname(path)
722 722 self._outputdir = outputdir
723 723 self._tmpname = os.path.basename(path)
724 724 self.errpath = os.path.join(self._outputdir, b'%s.err' % self.bname)
725 725
726 726 self._threadtmp = tmpdir
727 727 self._keeptmpdir = keeptmpdir
728 728 self._debug = debug
729 729 self._first = first
730 730 self._timeout = timeout
731 731 self._slowtimeout = slowtimeout
732 732 self._startport = startport
733 733 self._extraconfigopts = extraconfigopts or []
734 734 self._py3kwarnings = py3kwarnings
735 735 self._shell = _bytespath(shell)
736 736 self._hgcommand = hgcommand or b'hg'
737 737 self._usechg = usechg
738 738 self._useipv6 = useipv6
739 739
740 740 self._aborted = False
741 741 self._daemonpids = []
742 742 self._finished = None
743 743 self._ret = None
744 744 self._out = None
745 745 self._skipped = None
746 746 self._testtmp = None
747 747 self._chgsockdir = None
748 748
749 749 self._refout = self.readrefout()
750 750
751 751 def readrefout(self):
752 752 """read reference output"""
753 753 # If we're not in --debug mode and reference output file exists,
754 754 # check test output against it.
755 755 if self._debug:
756 756 return None # to match "out is None"
757 757 elif os.path.exists(self.refpath):
758 758 with open(self.refpath, 'rb') as f:
759 759 return f.read().splitlines(True)
760 760 else:
761 761 return []
762 762
763 763 # needed to get base class __repr__ running
764 764 @property
765 765 def _testMethodName(self):
766 766 return self.name
767 767
768 768 def __str__(self):
769 769 return self.name
770 770
771 771 def shortDescription(self):
772 772 return self.name
773 773
774 774 def setUp(self):
775 775 """Tasks to perform before run()."""
776 776 self._finished = False
777 777 self._ret = None
778 778 self._out = None
779 779 self._skipped = None
780 780
781 781 try:
782 782 os.mkdir(self._threadtmp)
783 783 except OSError as e:
784 784 if e.errno != errno.EEXIST:
785 785 raise
786 786
787 787 name = self._tmpname
788 788 self._testtmp = os.path.join(self._threadtmp, name)
789 789 os.mkdir(self._testtmp)
790 790
791 791 # Remove any previous output files.
792 792 if os.path.exists(self.errpath):
793 793 try:
794 794 os.remove(self.errpath)
795 795 except OSError as e:
796 796 # We might have raced another test to clean up a .err
797 797 # file, so ignore ENOENT when removing a previous .err
798 798 # file.
799 799 if e.errno != errno.ENOENT:
800 800 raise
801 801
802 802 if self._usechg:
803 803 self._chgsockdir = os.path.join(self._threadtmp,
804 804 b'%s.chgsock' % name)
805 805 os.mkdir(self._chgsockdir)
806 806
807 807 def run(self, result):
808 808 """Run this test and report results against a TestResult instance."""
809 809 # This function is extremely similar to unittest.TestCase.run(). Once
810 810 # we require Python 2.7 (or at least its version of unittest), this
811 811 # function can largely go away.
812 812 self._result = result
813 813 result.startTest(self)
814 814 try:
815 815 try:
816 816 self.setUp()
817 817 except (KeyboardInterrupt, SystemExit):
818 818 self._aborted = True
819 819 raise
820 820 except Exception:
821 821 result.addError(self, sys.exc_info())
822 822 return
823 823
824 824 success = False
825 825 try:
826 826 self.runTest()
827 827 except KeyboardInterrupt:
828 828 self._aborted = True
829 829 raise
830 830 except unittest.SkipTest as e:
831 831 result.addSkip(self, str(e))
832 832 # The base class will have already counted this as a
833 833 # test we "ran", but we want to exclude skipped tests
834 834 # from those we count towards those run.
835 835 result.testsRun -= 1
836 836 except self.failureException as e:
837 837 # This differs from unittest in that we don't capture
838 838 # the stack trace. This is for historical reasons and
839 839 # this decision could be revisited in the future,
840 840 # especially for PythonTest instances.
841 841 if result.addFailure(self, str(e)):
842 842 success = True
843 843 except Exception:
844 844 result.addError(self, sys.exc_info())
845 845 else:
846 846 success = True
847 847
848 848 try:
849 849 self.tearDown()
850 850 except (KeyboardInterrupt, SystemExit):
851 851 self._aborted = True
852 852 raise
853 853 except Exception:
854 854 result.addError(self, sys.exc_info())
855 855 success = False
856 856
857 857 if success:
858 858 result.addSuccess(self)
859 859 finally:
860 860 result.stopTest(self, interrupted=self._aborted)
861 861
862 862 def runTest(self):
863 863 """Run this test instance.
864 864
865 865 This will return a tuple describing the result of the test.
866 866 """
867 867 env = self._getenv()
868 868 self._genrestoreenv(env)
869 869 self._daemonpids.append(env['DAEMON_PIDS'])
870 870 self._createhgrc(env['HGRCPATH'])
871 871
872 872 vlog('# Test', self.name)
873 873
874 874 ret, out = self._run(env)
875 875 self._finished = True
876 876 self._ret = ret
877 877 self._out = out
878 878
879 879 def describe(ret):
880 880 if ret < 0:
881 881 return 'killed by signal: %d' % -ret
882 882 return 'returned error code %d' % ret
883 883
884 884 self._skipped = False
885 885
886 886 if ret == self.SKIPPED_STATUS:
887 887 if out is None: # Debug mode, nothing to parse.
888 888 missing = ['unknown']
889 889 failed = None
890 890 else:
891 891 missing, failed = TTest.parsehghaveoutput(out)
892 892
893 893 if not missing:
894 894 missing = ['skipped']
895 895
896 896 if failed:
897 897 self.fail('hg have failed checking for %s' % failed[-1])
898 898 else:
899 899 self._skipped = True
900 900 raise unittest.SkipTest(missing[-1])
901 901 elif ret == 'timeout':
902 902 self.fail('timed out')
903 903 elif ret is False:
904 904 self.fail('no result code from test')
905 905 elif out != self._refout:
906 906 # Diff generation may rely on written .err file.
907 907 if (ret != 0 or out != self._refout) and not self._skipped \
908 908 and not self._debug:
909 909 with open(self.errpath, 'wb') as f:
910 910 for line in out:
911 911 f.write(line)
912 912
913 913 # The result object handles diff calculation for us.
914 914 with firstlock:
915 915 if self._result.addOutputMismatch(self, ret, out, self._refout):
916 916 # change was accepted, skip failing
917 917 return
918 918 if self._first:
919 919 global firsterror
920 920 firsterror = True
921 921
922 922 if ret:
923 923 msg = 'output changed and ' + describe(ret)
924 924 else:
925 925 msg = 'output changed'
926 926
927 927 self.fail(msg)
928 928 elif ret:
929 929 self.fail(describe(ret))
930 930
931 931 def tearDown(self):
932 932 """Tasks to perform after run()."""
933 933 for entry in self._daemonpids:
934 934 killdaemons(entry)
935 935 self._daemonpids = []
936 936
937 937 if self._keeptmpdir:
938 938 log('\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' %
939 939 (self._testtmp.decode('utf-8'),
940 940 self._threadtmp.decode('utf-8')))
941 941 else:
942 942 shutil.rmtree(self._testtmp, True)
943 943 shutil.rmtree(self._threadtmp, True)
944 944
945 945 if self._usechg:
946 946 # chgservers will stop automatically after they find the socket
947 947 # files are deleted
948 948 shutil.rmtree(self._chgsockdir, True)
949 949
950 950 if (self._ret != 0 or self._out != self._refout) and not self._skipped \
951 951 and not self._debug and self._out:
952 952 with open(self.errpath, 'wb') as f:
953 953 for line in self._out:
954 954 f.write(line)
955 955
956 956 vlog("# Ret was:", self._ret, '(%s)' % self.name)
957 957
958 958 def _run(self, env):
959 959 # This should be implemented in child classes to run tests.
960 960 raise unittest.SkipTest('unknown test type')
961 961
962 962 def abort(self):
963 963 """Terminate execution of this test."""
964 964 self._aborted = True
965 965
966 966 def _portmap(self, i):
967 967 offset = b'' if i == 0 else b'%d' % i
968 968 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
969 969
970 970 def _getreplacements(self):
971 971 """Obtain a mapping of text replacements to apply to test output.
972 972
973 973 Test output needs to be normalized so it can be compared to expected
974 974 output. This function defines how some of that normalization will
975 975 occur.
976 976 """
977 977 r = [
978 978 # This list should be parallel to defineport in _getenv
979 979 self._portmap(0),
980 980 self._portmap(1),
981 981 self._portmap(2),
982 982 (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
983 983 (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
984 984 ]
985 985 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
986 986
987 987 replacementfile = os.path.join(self._testdir, b'common-pattern.py')
988 988
989 989 if os.path.exists(replacementfile):
990 990 data = {}
991 991 with open(replacementfile, mode='rb') as source:
992 992 # the intermediate 'compile' step help with debugging
993 993 code = compile(source.read(), replacementfile, 'exec')
994 994 exec(code, data)
995 995 for value in data.get('substitutions', ()):
996 996 if len(value) != 2:
997 997 msg = 'malformatted substitution in %s: %r'
998 998 msg %= (replacementfile, value)
999 999 raise ValueError(msg)
1000 1000 r.append(value)
1001 1001 return r
1002 1002
1003 1003 def _escapepath(self, p):
1004 1004 if os.name == 'nt':
1005 1005 return (
1006 1006 (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or
1007 1007 c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c
1008 1008 for c in p))
1009 1009 )
1010 1010 else:
1011 1011 return re.escape(p)
1012 1012
1013 1013 def _localip(self):
1014 1014 if self._useipv6:
1015 1015 return b'::1'
1016 1016 else:
1017 1017 return b'127.0.0.1'
1018 1018
1019 1019 def _genrestoreenv(self, testenv):
1020 1020 """Generate a script that can be used by tests to restore the original
1021 1021 environment."""
1022 1022 # Put the restoreenv script inside self._threadtmp
1023 1023 scriptpath = os.path.join(self._threadtmp, b'restoreenv.sh')
1024 1024 testenv['HGTEST_RESTOREENV'] = scriptpath
1025 1025
1026 1026 # Only restore environment variable names that the shell allows
1027 1027 # us to export.
1028 1028 name_regex = re.compile('^[a-zA-Z][a-zA-Z0-9_]*$')
1029 1029
1030 1030 # Do not restore these variables; otherwise tests would fail.
1031 1031 reqnames = {'PYTHON', 'TESTDIR', 'TESTTMP'}
1032 1032
1033 1033 with open(scriptpath, 'w') as envf:
1034 1034 for name, value in origenviron.items():
1035 1035 if not name_regex.match(name):
1036 1036 # Skip environment variables with unusual names not
1037 1037 # allowed by most shells.
1038 1038 continue
1039 1039 if name in reqnames:
1040 1040 continue
1041 1041 envf.write('%s=%s\n' % (name, shellquote(value)))
1042 1042
1043 1043 for name in testenv:
1044 1044 if name in origenviron or name in reqnames:
1045 1045 continue
1046 1046 envf.write('unset %s\n' % (name,))
1047 1047
1048 1048 def _getenv(self):
1049 1049 """Obtain environment variables to use during test execution."""
1050 1050 def defineport(i):
1051 1051 offset = '' if i == 0 else '%s' % i
1052 1052 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
1053 1053 env = os.environ.copy()
1054 1054 env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase') or ''
1055 1055 env['HGEMITWARNINGS'] = '1'
1056 1056 env['TESTTMP'] = self._testtmp
1057 1057 env['TESTNAME'] = self.name
1058 1058 env['HOME'] = self._testtmp
1059 1059 # This number should match portneeded in _getport
1060 1060 for port in xrange(3):
1061 1061 # This list should be parallel to _portmap in _getreplacements
1062 1062 defineport(port)
1063 1063 env["HGRCPATH"] = os.path.join(self._threadtmp, b'.hgrc')
1064 1064 env["DAEMON_PIDS"] = os.path.join(self._threadtmp, b'daemon.pids')
1065 1065 env["HGEDITOR"] = ('"' + sys.executable + '"'
1066 1066 + ' -c "import sys; sys.exit(0)"')
1067 1067 env["HGMERGE"] = "internal:merge"
1068 1068 env["HGUSER"] = "test"
1069 1069 env["HGENCODING"] = "ascii"
1070 1070 env["HGENCODINGMODE"] = "strict"
1071 1071 env['HGIPV6'] = str(int(self._useipv6))
1072 1072
1073 1073 extraextensions = []
1074 1074 for opt in self._extraconfigopts:
1075 1075 section, key = opt.encode('utf-8').split(b'.', 1)
1076 1076 if section != 'extensions':
1077 1077 continue
1078 1078 name = key.split(b'=', 1)[0]
1079 1079 extraextensions.append(name)
1080 1080
1081 1081 if extraextensions:
1082 1082 env['HGTESTEXTRAEXTENSIONS'] = b' '.join(extraextensions)
1083 1083
1084 1084 # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
1085 1085 # IP addresses.
1086 1086 env['LOCALIP'] = self._localip()
1087 1087
1088 1088 # Reset some environment variables to well-known values so that
1089 1089 # the tests produce repeatable output.
1090 1090 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
1091 1091 env['TZ'] = 'GMT'
1092 1092 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
1093 1093 env['COLUMNS'] = '80'
1094 1094 env['TERM'] = 'xterm'
1095 1095
1096 1096 for k in ('HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy ' +
1097 1097 'HGPLAIN HGPLAINEXCEPT EDITOR VISUAL PAGER ' +
1098 1098 'NO_PROXY CHGDEBUG').split():
1099 1099 if k in env:
1100 1100 del env[k]
1101 1101
1102 1102 # unset env related to hooks
1103 1103 for k in list(env):
1104 1104 if k.startswith('HG_'):
1105 1105 del env[k]
1106 1106
1107 1107 if self._usechg:
1108 1108 env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server')
1109 1109
1110 1110 return env
1111 1111
1112 1112 def _createhgrc(self, path):
1113 1113 """Create an hgrc file for this test."""
1114 1114 with open(path, 'wb') as hgrc:
1115 1115 hgrc.write(b'[ui]\n')
1116 1116 hgrc.write(b'slash = True\n')
1117 1117 hgrc.write(b'interactive = False\n')
1118 1118 hgrc.write(b'mergemarkers = detailed\n')
1119 1119 hgrc.write(b'promptecho = True\n')
1120 1120 hgrc.write(b'[defaults]\n')
1121 1121 hgrc.write(b'[devel]\n')
1122 1122 hgrc.write(b'all-warnings = true\n')
1123 1123 hgrc.write(b'default-date = 0 0\n')
1124 1124 hgrc.write(b'[largefiles]\n')
1125 1125 hgrc.write(b'usercache = %s\n' %
1126 1126 (os.path.join(self._testtmp, b'.cache/largefiles')))
1127 1127 hgrc.write(b'[lfs]\n')
1128 1128 hgrc.write(b'usercache = %s\n' %
1129 1129 (os.path.join(self._testtmp, b'.cache/lfs')))
1130 1130 hgrc.write(b'[web]\n')
1131 1131 hgrc.write(b'address = localhost\n')
1132 1132 hgrc.write(b'ipv6 = %s\n' % str(self._useipv6).encode('ascii'))
1133 1133 hgrc.write(b'server-header = testing stub value\n')
1134 1134
1135 1135 for opt in self._extraconfigopts:
1136 1136 section, key = opt.encode('utf-8').split(b'.', 1)
1137 1137 assert b'=' in key, ('extra config opt %s must '
1138 1138 'have an = for assignment' % opt)
1139 1139 hgrc.write(b'[%s]\n%s\n' % (section, key))
1140 1140
1141 1141 def fail(self, msg):
1142 1142 # unittest differentiates between errored and failed.
1143 1143 # Failed is denoted by AssertionError (by default at least).
1144 1144 raise AssertionError(msg)
1145 1145
1146 1146 def _runcommand(self, cmd, env, normalizenewlines=False):
1147 1147 """Run command in a sub-process, capturing the output (stdout and
1148 1148 stderr).
1149 1149
1150 1150 Return a tuple (exitcode, output). output is None in debug mode.
1151 1151 """
1152 1152 if self._debug:
1153 1153 proc = subprocess.Popen(cmd, shell=True, cwd=self._testtmp,
1154 1154 env=env)
1155 1155 ret = proc.wait()
1156 1156 return (ret, None)
1157 1157
1158 1158 proc = Popen4(cmd, self._testtmp, self._timeout, env)
1159 1159 def cleanup():
1160 1160 terminate(proc)
1161 1161 ret = proc.wait()
1162 1162 if ret == 0:
1163 1163 ret = signal.SIGTERM << 8
1164 1164 killdaemons(env['DAEMON_PIDS'])
1165 1165 return ret
1166 1166
1167 1167 output = ''
1168 1168 proc.tochild.close()
1169 1169
1170 1170 try:
1171 1171 output = proc.fromchild.read()
1172 1172 except KeyboardInterrupt:
1173 1173 vlog('# Handling keyboard interrupt')
1174 1174 cleanup()
1175 1175 raise
1176 1176
1177 1177 ret = proc.wait()
1178 1178 if wifexited(ret):
1179 1179 ret = os.WEXITSTATUS(ret)
1180 1180
1181 1181 if proc.timeout:
1182 1182 ret = 'timeout'
1183 1183
1184 1184 if ret:
1185 1185 killdaemons(env['DAEMON_PIDS'])
1186 1186
1187 1187 for s, r in self._getreplacements():
1188 1188 output = re.sub(s, r, output)
1189 1189
1190 1190 if normalizenewlines:
1191 1191 output = output.replace('\r\n', '\n')
1192 1192
1193 1193 return ret, output.splitlines(True)
1194 1194
1195 1195 class PythonTest(Test):
1196 1196 """A Python-based test."""
1197 1197
1198 1198 @property
1199 1199 def refpath(self):
1200 1200 return os.path.join(self._testdir, b'%s.out' % self.bname)
1201 1201
1202 1202 def _run(self, env):
1203 1203 py3kswitch = self._py3kwarnings and b' -3' or b''
1204 1204 cmd = b'%s%s "%s"' % (PYTHON, py3kswitch, self.path)
1205 1205 vlog("# Running", cmd)
1206 1206 normalizenewlines = os.name == 'nt'
1207 1207 result = self._runcommand(cmd, env,
1208 1208 normalizenewlines=normalizenewlines)
1209 1209 if self._aborted:
1210 1210 raise KeyboardInterrupt()
1211 1211
1212 1212 return result
1213 1213
1214 1214 # Some glob patterns apply only in some circumstances, so the script
1215 1215 # might want to remove (glob) annotations that otherwise should be
1216 1216 # retained.
1217 1217 checkcodeglobpats = [
1218 1218 # On Windows it looks like \ doesn't require a (glob), but we know
1219 1219 # better.
1220 1220 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
1221 1221 re.compile(br'^moving \S+/.*[^)]$'),
1222 1222 re.compile(br'^pulling from \$TESTTMP/.*[^)]$'),
1223 1223 # Not all platforms have 127.0.0.1 as loopback (though most do),
1224 1224 # so we always glob that too.
1225 1225 re.compile(br'.*\$LOCALIP.*$'),
1226 1226 ]
1227 1227
1228 1228 bchr = chr
1229 1229 if PYTHON3:
1230 1230 bchr = lambda x: bytes([x])
1231 1231
1232 1232 class TTest(Test):
1233 1233 """A "t test" is a test backed by a .t file."""
1234 1234
1235 1235 SKIPPED_PREFIX = b'skipped: '
1236 1236 FAILED_PREFIX = b'hghave check failed: '
1237 1237 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
1238 1238
1239 1239 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
1240 1240 ESCAPEMAP = dict((bchr(i), br'\x%02x' % i) for i in range(256))
1241 1241 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
1242 1242
1243 1243 def __init__(self, path, *args, **kwds):
1244 1244 # accept an extra "case" parameter
1245 1245 case = kwds.pop('case', None)
1246 1246 self._case = case
1247 1247 self._allcases = parsettestcases(path)
1248 1248 super(TTest, self).__init__(path, *args, **kwds)
1249 1249 if case:
1250 1250 self.name = '%s#%s' % (self.name, _strpath(case))
1251 1251 self.errpath = b'%s.%s.err' % (self.errpath[:-4], case)
1252 1252 self._tmpname += b'-%s' % case
1253 1253 self._have = {}
1254 1254
1255 1255 @property
1256 1256 def refpath(self):
1257 1257 return os.path.join(self._testdir, self.bname)
1258 1258
1259 1259 def _run(self, env):
1260 1260 with open(self.path, 'rb') as f:
1261 1261 lines = f.readlines()
1262 1262
1263 1263 # .t file is both reference output and the test input, keep reference
1264 1264 # output updated with the the test input. This avoids some race
1265 1265 # conditions where the reference output does not match the actual test.
1266 1266 if self._refout is not None:
1267 1267 self._refout = lines
1268 1268
1269 1269 salt, script, after, expected = self._parsetest(lines)
1270 1270
1271 1271 # Write out the generated script.
1272 1272 fname = b'%s.sh' % self._testtmp
1273 1273 with open(fname, 'wb') as f:
1274 1274 for l in script:
1275 1275 f.write(l)
1276 1276
1277 1277 cmd = b'%s "%s"' % (self._shell, fname)
1278 1278 vlog("# Running", cmd)
1279 1279
1280 1280 exitcode, output = self._runcommand(cmd, env)
1281 1281
1282 1282 if self._aborted:
1283 1283 raise KeyboardInterrupt()
1284 1284
1285 1285 # Do not merge output if skipped. Return hghave message instead.
1286 1286 # Similarly, with --debug, output is None.
1287 1287 if exitcode == self.SKIPPED_STATUS or output is None:
1288 1288 return exitcode, output
1289 1289
1290 1290 return self._processoutput(exitcode, output, salt, after, expected)
1291 1291
1292 1292 def _hghave(self, reqs):
1293 1293 allreqs = b' '.join(reqs)
1294 1294 if allreqs in self._have:
1295 1295 return self._have.get(allreqs)
1296 1296
1297 1297 # TODO do something smarter when all other uses of hghave are gone.
1298 1298 runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__)))
1299 1299 tdir = runtestdir.replace(b'\\', b'/')
1300 1300 proc = Popen4(b'%s -c "%s/hghave %s"' %
1301 1301 (self._shell, tdir, allreqs),
1302 1302 self._testtmp, 0, self._getenv())
1303 1303 stdout, stderr = proc.communicate()
1304 1304 ret = proc.wait()
1305 1305 if wifexited(ret):
1306 1306 ret = os.WEXITSTATUS(ret)
1307 1307 if ret == 2:
1308 1308 print(stdout.decode('utf-8'))
1309 1309 sys.exit(1)
1310 1310
1311 1311 if ret != 0:
1312 1312 self._have[allreqs] = (False, stdout)
1313 1313 return False, stdout
1314 1314
1315 1315 if b'slow' in reqs:
1316 1316 self._timeout = self._slowtimeout
1317 1317
1318 1318 self._have[allreqs] = (True, None)
1319 1319 return True, None
1320 1320
1321 1321 def _iftest(self, args):
1322 1322 # implements "#if"
1323 1323 reqs = []
1324 1324 for arg in args:
1325 1325 if arg.startswith(b'no-') and arg[3:] in self._allcases:
1326 1326 if arg[3:] == self._case:
1327 1327 return False
1328 1328 elif arg in self._allcases:
1329 1329 if arg != self._case:
1330 1330 return False
1331 1331 else:
1332 1332 reqs.append(arg)
1333 1333 return self._hghave(reqs)[0]
1334 1334
1335 1335 def _parsetest(self, lines):
1336 1336 # We generate a shell script which outputs unique markers to line
1337 1337 # up script results with our source. These markers include input
1338 1338 # line number and the last return code.
1339 1339 salt = b"SALT%d" % time.time()
1340 1340 def addsalt(line, inpython):
1341 1341 if inpython:
1342 1342 script.append(b'%s %d 0\n' % (salt, line))
1343 1343 else:
1344 1344 script.append(b'echo %s %d $?\n' % (salt, line))
1345 1345
1346 1346 script = []
1347 1347
1348 1348 # After we run the shell script, we re-unify the script output
1349 1349 # with non-active parts of the source, with synchronization by our
1350 1350 # SALT line number markers. The after table contains the non-active
1351 1351 # components, ordered by line number.
1352 1352 after = {}
1353 1353
1354 1354 # Expected shell script output.
1355 1355 expected = {}
1356 1356
1357 1357 pos = prepos = -1
1358 1358
1359 1359 # True or False when in a true or false conditional section
1360 1360 skipping = None
1361 1361
1362 1362 # We keep track of whether or not we're in a Python block so we
1363 1363 # can generate the surrounding doctest magic.
1364 1364 inpython = False
1365 1365
1366 1366 if self._debug:
1367 1367 script.append(b'set -x\n')
1368 1368 if self._hgcommand != b'hg':
1369 1369 script.append(b'alias hg="%s"\n' % self._hgcommand)
1370 1370 if os.getenv('MSYSTEM'):
1371 1371 script.append(b'alias pwd="pwd -W"\n')
1372 1372 if self._case:
1373 1373 if isinstance(self._case, str):
1374 1374 quoted = shellquote(self._case)
1375 1375 else:
1376 1376 quoted = shellquote(self._case.decode('utf8')).encode('utf8')
1377 1377 script.append(b'TESTCASE=%s\n' % quoted)
1378 1378 script.append(b'export TESTCASE\n')
1379 1379
1380 1380 n = 0
1381 1381 for n, l in enumerate(lines):
1382 1382 if not l.endswith(b'\n'):
1383 1383 l += b'\n'
1384 1384 if l.startswith(b'#require'):
1385 1385 lsplit = l.split()
1386 1386 if len(lsplit) < 2 or lsplit[0] != b'#require':
1387 1387 after.setdefault(pos, []).append(' !!! invalid #require\n')
1388 1388 if not skipping:
1389 1389 haveresult, message = self._hghave(lsplit[1:])
1390 1390 if not haveresult:
1391 1391 script = [b'echo "%s"\nexit 80\n' % message]
1392 1392 break
1393 1393 after.setdefault(pos, []).append(l)
1394 1394 elif l.startswith(b'#if'):
1395 1395 lsplit = l.split()
1396 1396 if len(lsplit) < 2 or lsplit[0] != b'#if':
1397 1397 after.setdefault(pos, []).append(' !!! invalid #if\n')
1398 1398 if skipping is not None:
1399 1399 after.setdefault(pos, []).append(' !!! nested #if\n')
1400 1400 skipping = not self._iftest(lsplit[1:])
1401 1401 after.setdefault(pos, []).append(l)
1402 1402 elif l.startswith(b'#else'):
1403 1403 if skipping is None:
1404 1404 after.setdefault(pos, []).append(' !!! missing #if\n')
1405 1405 skipping = not skipping
1406 1406 after.setdefault(pos, []).append(l)
1407 1407 elif l.startswith(b'#endif'):
1408 1408 if skipping is None:
1409 1409 after.setdefault(pos, []).append(' !!! missing #if\n')
1410 1410 skipping = None
1411 1411 after.setdefault(pos, []).append(l)
1412 1412 elif skipping:
1413 1413 after.setdefault(pos, []).append(l)
1414 1414 elif l.startswith(b' >>> '): # python inlines
1415 1415 after.setdefault(pos, []).append(l)
1416 1416 prepos = pos
1417 1417 pos = n
1418 1418 if not inpython:
1419 1419 # We've just entered a Python block. Add the header.
1420 1420 inpython = True
1421 1421 addsalt(prepos, False) # Make sure we report the exit code.
1422 1422 script.append(b'%s -m heredoctest <<EOF\n' % PYTHON)
1423 1423 addsalt(n, True)
1424 1424 script.append(l[2:])
1425 1425 elif l.startswith(b' ... '): # python inlines
1426 1426 after.setdefault(prepos, []).append(l)
1427 1427 script.append(l[2:])
1428 1428 elif l.startswith(b' $ '): # commands
1429 1429 if inpython:
1430 1430 script.append(b'EOF\n')
1431 1431 inpython = False
1432 1432 after.setdefault(pos, []).append(l)
1433 1433 prepos = pos
1434 1434 pos = n
1435 1435 addsalt(n, False)
1436 1436 cmd = l[4:].split()
1437 1437 if len(cmd) == 2 and cmd[0] == b'cd':
1438 1438 l = b' $ cd %s || exit 1\n' % cmd[1]
1439 1439 script.append(l[4:])
1440 1440 elif l.startswith(b' > '): # continuations
1441 1441 after.setdefault(prepos, []).append(l)
1442 1442 script.append(l[4:])
1443 1443 elif l.startswith(b' '): # results
1444 1444 # Queue up a list of expected results.
1445 1445 expected.setdefault(pos, []).append(l[2:])
1446 1446 else:
1447 1447 if inpython:
1448 1448 script.append(b'EOF\n')
1449 1449 inpython = False
1450 1450 # Non-command/result. Queue up for merged output.
1451 1451 after.setdefault(pos, []).append(l)
1452 1452
1453 1453 if inpython:
1454 1454 script.append(b'EOF\n')
1455 1455 if skipping is not None:
1456 1456 after.setdefault(pos, []).append(' !!! missing #endif\n')
1457 1457 addsalt(n + 1, False)
1458 1458
1459 1459 return salt, script, after, expected
1460 1460
1461 1461 def _processoutput(self, exitcode, output, salt, after, expected):
1462 1462 # Merge the script output back into a unified test.
1463 1463 warnonly = 1 # 1: not yet; 2: yes; 3: for sure not
1464 1464 if exitcode != 0:
1465 1465 warnonly = 3
1466 1466
1467 1467 pos = -1
1468 1468 postout = []
1469 1469 for l in output:
1470 1470 lout, lcmd = l, None
1471 1471 if salt in l:
1472 1472 lout, lcmd = l.split(salt, 1)
1473 1473
1474 1474 while lout:
1475 1475 if not lout.endswith(b'\n'):
1476 1476 lout += b' (no-eol)\n'
1477 1477
1478 1478 # Find the expected output at the current position.
1479 1479 els = [None]
1480 1480 if expected.get(pos, None):
1481 1481 els = expected[pos]
1482 1482
1483 1483 optional = []
1484 1484 for i, el in enumerate(els):
1485 1485 r = False
1486 1486 if el:
1487 1487 r, exact = self.linematch(el, lout)
1488 1488 if isinstance(r, str):
1489 1489 if r == '-glob':
1490 1490 lout = ''.join(el.rsplit(' (glob)', 1))
1491 1491 r = '' # Warn only this line.
1492 1492 elif r == "retry":
1493 1493 postout.append(b' ' + el)
1494 1494 else:
1495 1495 log('\ninfo, unknown linematch result: %r\n' % r)
1496 1496 r = False
1497 1497 if r:
1498 1498 els.pop(i)
1499 1499 break
1500 1500 if el:
1501 1501 if el.endswith(b" (?)\n"):
1502 1502 optional.append(i)
1503 1503 else:
1504 1504 m = optline.match(el)
1505 1505 if m:
1506 1506 conditions = [
1507 1507 c for c in m.group(2).split(b' ')]
1508 1508
1509 1509 if not self._iftest(conditions):
1510 1510 optional.append(i)
1511 1511 if exact:
1512 1512 # Don't allow line to be matches against a later
1513 1513 # line in the output
1514 1514 els.pop(i)
1515 1515 break
1516 1516
1517 1517 if r:
1518 1518 if r == "retry":
1519 1519 continue
1520 1520 # clean up any optional leftovers
1521 1521 for i in optional:
1522 1522 postout.append(b' ' + els[i])
1523 1523 for i in reversed(optional):
1524 1524 del els[i]
1525 1525 postout.append(b' ' + el)
1526 1526 else:
1527 1527 if self.NEEDESCAPE(lout):
1528 1528 lout = TTest._stringescape(b'%s (esc)\n' %
1529 1529 lout.rstrip(b'\n'))
1530 1530 postout.append(b' ' + lout) # Let diff deal with it.
1531 1531 if r != '': # If line failed.
1532 1532 warnonly = 3 # for sure not
1533 1533 elif warnonly == 1: # Is "not yet" and line is warn only.
1534 1534 warnonly = 2 # Yes do warn.
1535 1535 break
1536 1536 else:
1537 1537 # clean up any optional leftovers
1538 1538 while expected.get(pos, None):
1539 1539 el = expected[pos].pop(0)
1540 1540 if el:
1541 1541 if not el.endswith(b" (?)\n"):
1542 1542 m = optline.match(el)
1543 1543 if m:
1544 1544 conditions = [c for c in m.group(2).split(b' ')]
1545 1545
1546 1546 if self._iftest(conditions):
1547 1547 # Don't append as optional line
1548 1548 continue
1549 1549 else:
1550 1550 continue
1551 1551 postout.append(b' ' + el)
1552 1552
1553 1553 if lcmd:
1554 1554 # Add on last return code.
1555 1555 ret = int(lcmd.split()[1])
1556 1556 if ret != 0:
1557 1557 postout.append(b' [%d]\n' % ret)
1558 1558 if pos in after:
1559 1559 # Merge in non-active test bits.
1560 1560 postout += after.pop(pos)
1561 1561 pos = int(lcmd.split()[0])
1562 1562
1563 1563 if pos in after:
1564 1564 postout += after.pop(pos)
1565 1565
1566 1566 if warnonly == 2:
1567 1567 exitcode = False # Set exitcode to warned.
1568 1568
1569 1569 return exitcode, postout
1570 1570
1571 1571 @staticmethod
1572 1572 def rematch(el, l):
1573 1573 try:
1574 1574 el = b'(?:' + el + b')'
1575 1575 # use \Z to ensure that the regex matches to the end of the string
1576 1576 if os.name == 'nt':
1577 1577 return re.match(el + br'\r?\n\Z', l)
1578 1578 return re.match(el + br'\n\Z', l)
1579 1579 except re.error:
1580 1580 # el is an invalid regex
1581 1581 return False
1582 1582
1583 1583 @staticmethod
1584 1584 def globmatch(el, l):
1585 1585 # The only supported special characters are * and ? plus / which also
1586 1586 # matches \ on windows. Escaping of these characters is supported.
1587 1587 if el + b'\n' == l:
1588 1588 if os.altsep:
1589 1589 # matching on "/" is not needed for this line
1590 1590 for pat in checkcodeglobpats:
1591 1591 if pat.match(el):
1592 1592 return True
1593 1593 return b'-glob'
1594 1594 return True
1595 1595 el = el.replace(b'$LOCALIP', b'*')
1596 1596 i, n = 0, len(el)
1597 1597 res = b''
1598 1598 while i < n:
1599 1599 c = el[i:i + 1]
1600 1600 i += 1
1601 1601 if c == b'\\' and i < n and el[i:i + 1] in b'*?\\/':
1602 1602 res += el[i - 1:i + 1]
1603 1603 i += 1
1604 1604 elif c == b'*':
1605 1605 res += b'.*'
1606 1606 elif c == b'?':
1607 1607 res += b'.'
1608 1608 elif c == b'/' and os.altsep:
1609 1609 res += b'[/\\\\]'
1610 1610 else:
1611 1611 res += re.escape(c)
1612 1612 return TTest.rematch(res, l)
1613 1613
1614 1614 def linematch(self, el, l):
1615 1615 if el == l: # perfect match (fast)
1616 1616 return True, True
1617 1617 retry = False
1618 1618 if el.endswith(b" (?)\n"):
1619 1619 retry = "retry"
1620 1620 el = el[:-5] + b"\n"
1621 1621 else:
1622 1622 m = optline.match(el)
1623 1623 if m:
1624 1624 conditions = [c for c in m.group(2).split(b' ')]
1625 1625
1626 1626 el = m.group(1) + b"\n"
1627 1627 if not self._iftest(conditions):
1628 1628 retry = "retry" # Not required by listed features
1629 1629
1630 1630 if el.endswith(b" (esc)\n"):
1631 1631 if PYTHON3:
1632 1632 el = el[:-7].decode('unicode_escape') + '\n'
1633 1633 el = el.encode('utf-8')
1634 1634 else:
1635 1635 el = el[:-7].decode('string-escape') + '\n'
1636 1636 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
1637 1637 return True, True
1638 1638 if el.endswith(b" (re)\n"):
1639 1639 return (TTest.rematch(el[:-6], l) or retry), False
1640 1640 if el.endswith(b" (glob)\n"):
1641 1641 # ignore '(glob)' added to l by 'replacements'
1642 1642 if l.endswith(b" (glob)\n"):
1643 1643 l = l[:-8] + b"\n"
1644 1644 return (TTest.globmatch(el[:-8], l) or retry), False
1645 1645 if os.altsep:
1646 1646 _l = l.replace(b'\\', b'/')
1647 1647 if el == _l or os.name == 'nt' and el[:-1] + b'\r\n' == _l:
1648 1648 return True, True
1649 1649 return retry, True
1650 1650
1651 1651 @staticmethod
1652 1652 def parsehghaveoutput(lines):
1653 1653 '''Parse hghave log lines.
1654 1654
1655 1655 Return tuple of lists (missing, failed):
1656 1656 * the missing/unknown features
1657 1657 * the features for which existence check failed'''
1658 1658 missing = []
1659 1659 failed = []
1660 1660 for line in lines:
1661 1661 if line.startswith(TTest.SKIPPED_PREFIX):
1662 1662 line = line.splitlines()[0]
1663 1663 missing.append(line[len(TTest.SKIPPED_PREFIX):].decode('utf-8'))
1664 1664 elif line.startswith(TTest.FAILED_PREFIX):
1665 1665 line = line.splitlines()[0]
1666 1666 failed.append(line[len(TTest.FAILED_PREFIX):].decode('utf-8'))
1667 1667
1668 1668 return missing, failed
1669 1669
1670 1670 @staticmethod
1671 1671 def _escapef(m):
1672 1672 return TTest.ESCAPEMAP[m.group(0)]
1673 1673
1674 1674 @staticmethod
1675 1675 def _stringescape(s):
1676 1676 return TTest.ESCAPESUB(TTest._escapef, s)
1677 1677
1678 1678 iolock = threading.RLock()
1679 1679 firstlock = threading.RLock()
1680 1680 firsterror = False
1681 1681
1682 1682 class TestResult(unittest._TextTestResult):
1683 1683 """Holds results when executing via unittest."""
1684 1684 # Don't worry too much about accessing the non-public _TextTestResult.
1685 1685 # It is relatively common in Python testing tools.
1686 1686 def __init__(self, options, *args, **kwargs):
1687 1687 super(TestResult, self).__init__(*args, **kwargs)
1688 1688
1689 1689 self._options = options
1690 1690
1691 1691 # unittest.TestResult didn't have skipped until 2.7. We need to
1692 1692 # polyfill it.
1693 1693 self.skipped = []
1694 1694
1695 1695 # We have a custom "ignored" result that isn't present in any Python
1696 1696 # unittest implementation. It is very similar to skipped. It may make
1697 1697 # sense to map it into skip some day.
1698 1698 self.ignored = []
1699 1699
1700 1700 self.times = []
1701 1701 self._firststarttime = None
1702 1702 # Data stored for the benefit of generating xunit reports.
1703 1703 self.successes = []
1704 1704 self.faildata = {}
1705 1705
1706 1706 if options.color == 'auto':
1707 1707 self.color = pygmentspresent and self.stream.isatty()
1708 1708 elif options.color == 'never':
1709 1709 self.color = False
1710 1710 else: # 'always', for testing purposes
1711 1711 self.color = pygmentspresent
1712 1712
1713 1713 def addFailure(self, test, reason):
1714 1714 self.failures.append((test, reason))
1715 1715
1716 1716 if self._options.first:
1717 1717 self.stop()
1718 1718 else:
1719 1719 with iolock:
1720 1720 if reason == "timed out":
1721 1721 self.stream.write('t')
1722 1722 else:
1723 1723 if not self._options.nodiff:
1724 1724 self.stream.write('\n')
1725 1725 # Exclude the '\n' from highlighting to lex correctly
1726 1726 formatted = 'ERROR: %s output changed\n' % test
1727 1727 self.stream.write(highlightmsg(formatted, self.color))
1728 1728 self.stream.write('!')
1729 1729
1730 1730 self.stream.flush()
1731 1731
1732 1732 def addSuccess(self, test):
1733 1733 with iolock:
1734 1734 super(TestResult, self).addSuccess(test)
1735 1735 self.successes.append(test)
1736 1736
1737 1737 def addError(self, test, err):
1738 1738 super(TestResult, self).addError(test, err)
1739 1739 if self._options.first:
1740 1740 self.stop()
1741 1741
1742 1742 # Polyfill.
1743 1743 def addSkip(self, test, reason):
1744 1744 self.skipped.append((test, reason))
1745 1745 with iolock:
1746 1746 if self.showAll:
1747 1747 self.stream.writeln('skipped %s' % reason)
1748 1748 else:
1749 1749 self.stream.write('s')
1750 1750 self.stream.flush()
1751 1751
1752 1752 def addIgnore(self, test, reason):
1753 1753 self.ignored.append((test, reason))
1754 1754 with iolock:
1755 1755 if self.showAll:
1756 1756 self.stream.writeln('ignored %s' % reason)
1757 1757 else:
1758 1758 if reason not in ('not retesting', "doesn't match keyword"):
1759 1759 self.stream.write('i')
1760 1760 else:
1761 1761 self.testsRun += 1
1762 1762 self.stream.flush()
1763 1763
1764 1764 def addOutputMismatch(self, test, ret, got, expected):
1765 1765 """Record a mismatch in test output for a particular test."""
1766 1766 if self.shouldStop or firsterror:
1767 1767 # don't print, some other test case already failed and
1768 1768 # printed, we're just stale and probably failed due to our
1769 1769 # temp dir getting cleaned up.
1770 1770 return
1771 1771
1772 1772 accepted = False
1773 1773 lines = []
1774 1774
1775 1775 with iolock:
1776 1776 if self._options.nodiff:
1777 1777 pass
1778 1778 elif self._options.view:
1779 1779 v = self._options.view
1780 1780 if PYTHON3:
1781 1781 v = _bytespath(v)
1782 1782 os.system(b"%s %s %s" %
1783 1783 (v, test.refpath, test.errpath))
1784 1784 else:
1785 1785 servefail, lines = getdiff(expected, got,
1786 1786 test.refpath, test.errpath)
1787 1787 self.stream.write('\n')
1788 1788 for line in lines:
1789 1789 line = highlightdiff(line, self.color)
1790 1790 if PYTHON3:
1791 1791 self.stream.flush()
1792 1792 self.stream.buffer.write(line)
1793 1793 self.stream.buffer.flush()
1794 1794 else:
1795 1795 self.stream.write(line)
1796 1796 self.stream.flush()
1797 1797
1798 1798 if servefail:
1799 1799 raise test.failureException(
1800 1800 'server failed to start (HGPORT=%s)' % test._startport)
1801 1801
1802 1802 # handle interactive prompt without releasing iolock
1803 1803 if self._options.interactive:
1804 1804 if test.readrefout() != expected:
1805 1805 self.stream.write(
1806 1806 'Reference output has changed (run again to prompt '
1807 1807 'changes)')
1808 1808 else:
1809 1809 self.stream.write('Accept this change? [n] ')
1810 1810 answer = sys.stdin.readline().strip()
1811 1811 if answer.lower() in ('y', 'yes'):
1812 1812 if test.path.endswith(b'.t'):
1813 1813 rename(test.errpath, test.path)
1814 1814 else:
1815 1815 rename(test.errpath, '%s.out' % test.path)
1816 1816 accepted = True
1817 1817 if not accepted:
1818 1818 self.faildata[test.name] = b''.join(lines)
1819 1819
1820 1820 return accepted
1821 1821
1822 1822 def startTest(self, test):
1823 1823 super(TestResult, self).startTest(test)
1824 1824
1825 1825 # os.times module computes the user time and system time spent by
1826 1826 # child's processes along with real elapsed time taken by a process.
1827 1827 # This module has one limitation. It can only work for Linux user
1828 1828 # and not for Windows.
1829 1829 test.started = os.times()
1830 1830 if self._firststarttime is None: # thread racy but irrelevant
1831 1831 self._firststarttime = test.started[4]
1832 1832
1833 1833 def stopTest(self, test, interrupted=False):
1834 1834 super(TestResult, self).stopTest(test)
1835 1835
1836 1836 test.stopped = os.times()
1837 1837
1838 1838 starttime = test.started
1839 1839 endtime = test.stopped
1840 1840 origin = self._firststarttime
1841 1841 self.times.append((test.name,
1842 1842 endtime[2] - starttime[2], # user space CPU time
1843 1843 endtime[3] - starttime[3], # sys space CPU time
1844 1844 endtime[4] - starttime[4], # real time
1845 1845 starttime[4] - origin, # start date in run context
1846 1846 endtime[4] - origin, # end date in run context
1847 1847 ))
1848 1848
1849 1849 if interrupted:
1850 1850 with iolock:
1851 1851 self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
1852 1852 test.name, self.times[-1][3]))
1853 1853
1854 def getTestResult():
1855 """
1856 Returns the relevant test result
1857 """
1858 if "CUSTOM_TEST_RESULT" in os.environ:
1859 testresultmodule = __import__(os.environ["CUSTOM_TEST_RESULT"])
1860 return testresultmodule.TestResult
1861 else:
1862 return TestResult
1863
1854 1864 class TestSuite(unittest.TestSuite):
1855 1865 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
1856 1866
1857 1867 def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
1858 1868 retest=False, keywords=None, loop=False, runs_per_test=1,
1859 1869 loadtest=None, showchannels=False,
1860 1870 *args, **kwargs):
1861 1871 """Create a new instance that can run tests with a configuration.
1862 1872
1863 1873 testdir specifies the directory where tests are executed from. This
1864 1874 is typically the ``tests`` directory from Mercurial's source
1865 1875 repository.
1866 1876
1867 1877 jobs specifies the number of jobs to run concurrently. Each test
1868 1878 executes on its own thread. Tests actually spawn new processes, so
1869 1879 state mutation should not be an issue.
1870 1880
1871 1881 If there is only one job, it will use the main thread.
1872 1882
1873 1883 whitelist and blacklist denote tests that have been whitelisted and
1874 1884 blacklisted, respectively. These arguments don't belong in TestSuite.
1875 1885 Instead, whitelist and blacklist should be handled by the thing that
1876 1886 populates the TestSuite with tests. They are present to preserve
1877 1887 backwards compatible behavior which reports skipped tests as part
1878 1888 of the results.
1879 1889
1880 1890 retest denotes whether to retest failed tests. This arguably belongs
1881 1891 outside of TestSuite.
1882 1892
1883 1893 keywords denotes key words that will be used to filter which tests
1884 1894 to execute. This arguably belongs outside of TestSuite.
1885 1895
1886 1896 loop denotes whether to loop over tests forever.
1887 1897 """
1888 1898 super(TestSuite, self).__init__(*args, **kwargs)
1889 1899
1890 1900 self._jobs = jobs
1891 1901 self._whitelist = whitelist
1892 1902 self._blacklist = blacklist
1893 1903 self._retest = retest
1894 1904 self._keywords = keywords
1895 1905 self._loop = loop
1896 1906 self._runs_per_test = runs_per_test
1897 1907 self._loadtest = loadtest
1898 1908 self._showchannels = showchannels
1899 1909
1900 1910 def run(self, result):
1901 1911 # We have a number of filters that need to be applied. We do this
1902 1912 # here instead of inside Test because it makes the running logic for
1903 1913 # Test simpler.
1904 1914 tests = []
1905 1915 num_tests = [0]
1906 1916 for test in self._tests:
1907 1917 def get():
1908 1918 num_tests[0] += 1
1909 1919 if getattr(test, 'should_reload', False):
1910 1920 return self._loadtest(test, num_tests[0])
1911 1921 return test
1912 1922 if not os.path.exists(test.path):
1913 1923 result.addSkip(test, "Doesn't exist")
1914 1924 continue
1915 1925
1916 1926 if not (self._whitelist and test.bname in self._whitelist):
1917 1927 if self._blacklist and test.bname in self._blacklist:
1918 1928 result.addSkip(test, 'blacklisted')
1919 1929 continue
1920 1930
1921 1931 if self._retest and not os.path.exists(test.errpath):
1922 1932 result.addIgnore(test, 'not retesting')
1923 1933 continue
1924 1934
1925 1935 if self._keywords:
1926 1936 with open(test.path, 'rb') as f:
1927 1937 t = f.read().lower() + test.bname.lower()
1928 1938 ignored = False
1929 1939 for k in self._keywords.lower().split():
1930 1940 if k not in t:
1931 1941 result.addIgnore(test, "doesn't match keyword")
1932 1942 ignored = True
1933 1943 break
1934 1944
1935 1945 if ignored:
1936 1946 continue
1937 1947 for _ in xrange(self._runs_per_test):
1938 1948 tests.append(get())
1939 1949
1940 1950 runtests = list(tests)
1941 1951 done = queue.Queue()
1942 1952 running = 0
1943 1953
1944 1954 channels = [""] * self._jobs
1945 1955
1946 1956 def job(test, result):
1947 1957 for n, v in enumerate(channels):
1948 1958 if not v:
1949 1959 channel = n
1950 1960 break
1951 1961 else:
1952 1962 raise ValueError('Could not find output channel')
1953 1963 channels[channel] = "=" + test.name[5:].split(".")[0]
1954 1964 try:
1955 1965 test(result)
1956 1966 done.put(None)
1957 1967 except KeyboardInterrupt:
1958 1968 pass
1959 1969 except: # re-raises
1960 1970 done.put(('!', test, 'run-test raised an error, see traceback'))
1961 1971 raise
1962 1972 finally:
1963 1973 try:
1964 1974 channels[channel] = ''
1965 1975 except IndexError:
1966 1976 pass
1967 1977
1968 1978 def stat():
1969 1979 count = 0
1970 1980 while channels:
1971 1981 d = '\n%03s ' % count
1972 1982 for n, v in enumerate(channels):
1973 1983 if v:
1974 1984 d += v[0]
1975 1985 channels[n] = v[1:] or '.'
1976 1986 else:
1977 1987 d += ' '
1978 1988 d += ' '
1979 1989 with iolock:
1980 1990 sys.stdout.write(d + ' ')
1981 1991 sys.stdout.flush()
1982 1992 for x in xrange(10):
1983 1993 if channels:
1984 1994 time.sleep(.1)
1985 1995 count += 1
1986 1996
1987 1997 stoppedearly = False
1988 1998
1989 1999 if self._showchannels:
1990 2000 statthread = threading.Thread(target=stat, name="stat")
1991 2001 statthread.start()
1992 2002
1993 2003 try:
1994 2004 while tests or running:
1995 2005 if not done.empty() or running == self._jobs or not tests:
1996 2006 try:
1997 2007 done.get(True, 1)
1998 2008 running -= 1
1999 2009 if result and result.shouldStop:
2000 2010 stoppedearly = True
2001 2011 break
2002 2012 except queue.Empty:
2003 2013 continue
2004 2014 if tests and not running == self._jobs:
2005 2015 test = tests.pop(0)
2006 2016 if self._loop:
2007 2017 if getattr(test, 'should_reload', False):
2008 2018 num_tests[0] += 1
2009 2019 tests.append(
2010 2020 self._loadtest(test, num_tests[0]))
2011 2021 else:
2012 2022 tests.append(test)
2013 2023 if self._jobs == 1:
2014 2024 job(test, result)
2015 2025 else:
2016 2026 t = threading.Thread(target=job, name=test.name,
2017 2027 args=(test, result))
2018 2028 t.start()
2019 2029 running += 1
2020 2030
2021 2031 # If we stop early we still need to wait on started tests to
2022 2032 # finish. Otherwise, there is a race between the test completing
2023 2033 # and the test's cleanup code running. This could result in the
2024 2034 # test reporting incorrect.
2025 2035 if stoppedearly:
2026 2036 while running:
2027 2037 try:
2028 2038 done.get(True, 1)
2029 2039 running -= 1
2030 2040 except queue.Empty:
2031 2041 continue
2032 2042 except KeyboardInterrupt:
2033 2043 for test in runtests:
2034 2044 test.abort()
2035 2045
2036 2046 channels = []
2037 2047
2038 2048 return result
2039 2049
2040 2050 # Save the most recent 5 wall-clock runtimes of each test to a
2041 2051 # human-readable text file named .testtimes. Tests are sorted
2042 2052 # alphabetically, while times for each test are listed from oldest to
2043 2053 # newest.
2044 2054
2045 2055 def loadtimes(outputdir):
2046 2056 times = []
2047 2057 try:
2048 2058 with open(os.path.join(outputdir, b'.testtimes')) as fp:
2049 2059 for line in fp:
2050 2060 m = re.match('(.*?) ([0-9. ]+)', line)
2051 2061 times.append((m.group(1),
2052 2062 [float(t) for t in m.group(2).split()]))
2053 2063 except IOError as err:
2054 2064 if err.errno != errno.ENOENT:
2055 2065 raise
2056 2066 return times
2057 2067
2058 2068 def savetimes(outputdir, result):
2059 2069 saved = dict(loadtimes(outputdir))
2060 2070 maxruns = 5
2061 2071 skipped = set([str(t[0]) for t in result.skipped])
2062 2072 for tdata in result.times:
2063 2073 test, real = tdata[0], tdata[3]
2064 2074 if test not in skipped:
2065 2075 ts = saved.setdefault(test, [])
2066 2076 ts.append(real)
2067 2077 ts[:] = ts[-maxruns:]
2068 2078
2069 2079 fd, tmpname = tempfile.mkstemp(prefix=b'.testtimes',
2070 2080 dir=outputdir, text=True)
2071 2081 with os.fdopen(fd, 'w') as fp:
2072 2082 for name, ts in sorted(saved.items()):
2073 2083 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
2074 2084 timepath = os.path.join(outputdir, b'.testtimes')
2075 2085 try:
2076 2086 os.unlink(timepath)
2077 2087 except OSError:
2078 2088 pass
2079 2089 try:
2080 2090 os.rename(tmpname, timepath)
2081 2091 except OSError:
2082 2092 pass
2083 2093
2084 2094 class TextTestRunner(unittest.TextTestRunner):
2085 2095 """Custom unittest test runner that uses appropriate settings."""
2086 2096
2087 2097 def __init__(self, runner, *args, **kwargs):
2088 2098 super(TextTestRunner, self).__init__(*args, **kwargs)
2089 2099
2090 2100 self._runner = runner
2091 2101
2092 2102 def listtests(self, test):
2093 result = TestResult(self._runner.options, self.stream,
2094 self.descriptions, 0)
2103 result = getTestResult()(self._runner.options, self.stream,
2104 self.descriptions, 0)
2095 2105 test = sorted(test, key=lambda t: t.name)
2096 2106 for t in test:
2097 2107 print(t.name)
2098 2108 result.addSuccess(t)
2099 2109
2100 2110 if self._runner.options.xunit:
2101 2111 with open(self._runner.options.xunit, "wb") as xuf:
2102 2112 self._writexunit(result, xuf)
2103 2113
2104 2114 if self._runner.options.json:
2105 2115 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2106 2116 with open(jsonpath, 'w') as fp:
2107 2117 self._writejson(result, fp)
2108 2118
2109 2119 return result
2110 2120
2111 2121 def run(self, test):
2112 result = TestResult(self._runner.options, self.stream,
2113 self.descriptions, self.verbosity)
2114
2122 result = getTestResult()(self._runner.options, self.stream,
2123 self.descriptions, self.verbosity)
2115 2124 test(result)
2116 2125
2117 2126 failed = len(result.failures)
2118 2127 skipped = len(result.skipped)
2119 2128 ignored = len(result.ignored)
2120 2129
2121 2130 with iolock:
2122 2131 self.stream.writeln('')
2123 2132
2124 2133 if not self._runner.options.noskips:
2125 2134 for test, msg in result.skipped:
2126 2135 formatted = 'Skipped %s: %s\n' % (test.name, msg)
2127 2136 self.stream.write(highlightmsg(formatted, result.color))
2128 2137 for test, msg in result.failures:
2129 2138 formatted = 'Failed %s: %s\n' % (test.name, msg)
2130 2139 self.stream.write(highlightmsg(formatted, result.color))
2131 2140 for test, msg in result.errors:
2132 2141 self.stream.writeln('Errored %s: %s' % (test.name, msg))
2133 2142
2134 2143 if self._runner.options.xunit:
2135 2144 with open(self._runner.options.xunit, "wb") as xuf:
2136 2145 self._writexunit(result, xuf)
2137 2146
2138 2147 if self._runner.options.json:
2139 2148 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2140 2149 with open(jsonpath, 'w') as fp:
2141 2150 self._writejson(result, fp)
2142 2151
2143 2152 self._runner._checkhglib('Tested')
2144 2153
2145 2154 savetimes(self._runner._outputdir, result)
2146 2155
2147 2156 if failed and self._runner.options.known_good_rev:
2148 2157 self._bisecttests(t for t, m in result.failures)
2149 2158 self.stream.writeln(
2150 2159 '# Ran %d tests, %d skipped, %d failed.'
2151 2160 % (result.testsRun, skipped + ignored, failed))
2152 2161 if failed:
2153 2162 self.stream.writeln('python hash seed: %s' %
2154 2163 os.environ['PYTHONHASHSEED'])
2155 2164 if self._runner.options.time:
2156 2165 self.printtimes(result.times)
2157 2166
2158 2167 if self._runner.options.exceptions:
2159 2168 exceptions = aggregateexceptions(
2160 2169 os.path.join(self._runner._outputdir, b'exceptions'))
2161 2170
2162 2171 self.stream.writeln('Exceptions Report:')
2163 2172 self.stream.writeln('%d total from %d frames' %
2164 2173 (exceptions['total'],
2165 2174 len(exceptions['exceptioncounts'])))
2166 2175 combined = exceptions['combined']
2167 2176 for key in sorted(combined, key=combined.get, reverse=True):
2168 2177 frame, line, exc = key
2169 2178 totalcount, testcount, leastcount, leasttest = combined[key]
2170 2179
2171 2180 self.stream.writeln('%d (%d tests)\t%s: %s (%s - %d total)'
2172 2181 % (totalcount,
2173 2182 testcount,
2174 2183 frame, exc,
2175 2184 leasttest, leastcount))
2176 2185
2177 2186 self.stream.flush()
2178 2187
2179 2188 return result
2180 2189
2181 2190 def _bisecttests(self, tests):
2182 2191 bisectcmd = ['hg', 'bisect']
2183 2192 bisectrepo = self._runner.options.bisect_repo
2184 2193 if bisectrepo:
2185 2194 bisectcmd.extend(['-R', os.path.abspath(bisectrepo)])
2186 2195 def pread(args):
2187 2196 env = os.environ.copy()
2188 2197 env['HGPLAIN'] = '1'
2189 2198 p = subprocess.Popen(args, stderr=subprocess.STDOUT,
2190 2199 stdout=subprocess.PIPE, env=env)
2191 2200 data = p.stdout.read()
2192 2201 p.wait()
2193 2202 return data
2194 2203 for test in tests:
2195 2204 pread(bisectcmd + ['--reset']),
2196 2205 pread(bisectcmd + ['--bad', '.'])
2197 2206 pread(bisectcmd + ['--good', self._runner.options.known_good_rev])
2198 2207 # TODO: we probably need to forward more options
2199 2208 # that alter hg's behavior inside the tests.
2200 2209 opts = ''
2201 2210 withhg = self._runner.options.with_hg
2202 2211 if withhg:
2203 2212 opts += ' --with-hg=%s ' % shellquote(_strpath(withhg))
2204 2213 rtc = '%s %s %s %s' % (sys.executable, sys.argv[0], opts,
2205 2214 test)
2206 2215 data = pread(bisectcmd + ['--command', rtc])
2207 2216 m = re.search(
2208 2217 (br'\nThe first (?P<goodbad>bad|good) revision '
2209 2218 br'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
2210 2219 br'summary: +(?P<summary>[^\n]+)\n'),
2211 2220 data, (re.MULTILINE | re.DOTALL))
2212 2221 if m is None:
2213 2222 self.stream.writeln(
2214 2223 'Failed to identify failure point for %s' % test)
2215 2224 continue
2216 2225 dat = m.groupdict()
2217 2226 verb = 'broken' if dat['goodbad'] == b'bad' else 'fixed'
2218 2227 self.stream.writeln(
2219 2228 '%s %s by %s (%s)' % (
2220 2229 test, verb, dat['node'].decode('ascii'),
2221 2230 dat['summary'].decode('utf8', 'ignore')))
2222 2231
2223 2232 def printtimes(self, times):
2224 2233 # iolock held by run
2225 2234 self.stream.writeln('# Producing time report')
2226 2235 times.sort(key=lambda t: (t[3]))
2227 2236 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
2228 2237 self.stream.writeln('%-7s %-7s %-7s %-7s %-7s %s' %
2229 2238 ('start', 'end', 'cuser', 'csys', 'real', 'Test'))
2230 2239 for tdata in times:
2231 2240 test = tdata[0]
2232 2241 cuser, csys, real, start, end = tdata[1:6]
2233 2242 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
2234 2243
2235 2244 @staticmethod
2236 2245 def _writexunit(result, outf):
2237 2246 # See http://llg.cubic.org/docs/junit/ for a reference.
2238 2247 timesd = dict((t[0], t[3]) for t in result.times)
2239 2248 doc = minidom.Document()
2240 2249 s = doc.createElement('testsuite')
2241 2250 s.setAttribute('name', 'run-tests')
2242 2251 s.setAttribute('tests', str(result.testsRun))
2243 2252 s.setAttribute('errors', "0") # TODO
2244 2253 s.setAttribute('failures', str(len(result.failures)))
2245 2254 s.setAttribute('skipped', str(len(result.skipped) +
2246 2255 len(result.ignored)))
2247 2256 doc.appendChild(s)
2248 2257 for tc in result.successes:
2249 2258 t = doc.createElement('testcase')
2250 2259 t.setAttribute('name', tc.name)
2251 2260 tctime = timesd.get(tc.name)
2252 2261 if tctime is not None:
2253 2262 t.setAttribute('time', '%.3f' % tctime)
2254 2263 s.appendChild(t)
2255 2264 for tc, err in sorted(result.faildata.items()):
2256 2265 t = doc.createElement('testcase')
2257 2266 t.setAttribute('name', tc)
2258 2267 tctime = timesd.get(tc)
2259 2268 if tctime is not None:
2260 2269 t.setAttribute('time', '%.3f' % tctime)
2261 2270 # createCDATASection expects a unicode or it will
2262 2271 # convert using default conversion rules, which will
2263 2272 # fail if string isn't ASCII.
2264 2273 err = cdatasafe(err).decode('utf-8', 'replace')
2265 2274 cd = doc.createCDATASection(err)
2266 2275 # Use 'failure' here instead of 'error' to match errors = 0,
2267 2276 # failures = len(result.failures) in the testsuite element.
2268 2277 failelem = doc.createElement('failure')
2269 2278 failelem.setAttribute('message', 'output changed')
2270 2279 failelem.setAttribute('type', 'output-mismatch')
2271 2280 failelem.appendChild(cd)
2272 2281 t.appendChild(failelem)
2273 2282 s.appendChild(t)
2274 2283 for tc, message in result.skipped:
2275 2284 # According to the schema, 'skipped' has no attributes. So store
2276 2285 # the skip message as a text node instead.
2277 2286 t = doc.createElement('testcase')
2278 2287 t.setAttribute('name', tc.name)
2279 2288 binmessage = message.encode('utf-8')
2280 2289 message = cdatasafe(binmessage).decode('utf-8', 'replace')
2281 2290 cd = doc.createCDATASection(message)
2282 2291 skipelem = doc.createElement('skipped')
2283 2292 skipelem.appendChild(cd)
2284 2293 t.appendChild(skipelem)
2285 2294 s.appendChild(t)
2286 2295 outf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
2287 2296
2288 2297 @staticmethod
2289 2298 def _writejson(result, outf):
2290 2299 timesd = {}
2291 2300 for tdata in result.times:
2292 2301 test = tdata[0]
2293 2302 timesd[test] = tdata[1:]
2294 2303
2295 2304 outcome = {}
2296 2305 groups = [('success', ((tc, None)
2297 2306 for tc in result.successes)),
2298 2307 ('failure', result.failures),
2299 2308 ('skip', result.skipped)]
2300 2309 for res, testcases in groups:
2301 2310 for tc, __ in testcases:
2302 2311 if tc.name in timesd:
2303 2312 diff = result.faildata.get(tc.name, b'')
2304 2313 try:
2305 2314 diff = diff.decode('unicode_escape')
2306 2315 except UnicodeDecodeError as e:
2307 2316 diff = '%r decoding diff, sorry' % e
2308 2317 tres = {'result': res,
2309 2318 'time': ('%0.3f' % timesd[tc.name][2]),
2310 2319 'cuser': ('%0.3f' % timesd[tc.name][0]),
2311 2320 'csys': ('%0.3f' % timesd[tc.name][1]),
2312 2321 'start': ('%0.3f' % timesd[tc.name][3]),
2313 2322 'end': ('%0.3f' % timesd[tc.name][4]),
2314 2323 'diff': diff,
2315 2324 }
2316 2325 else:
2317 2326 # blacklisted test
2318 2327 tres = {'result': res}
2319 2328
2320 2329 outcome[tc.name] = tres
2321 2330 jsonout = json.dumps(outcome, sort_keys=True, indent=4,
2322 2331 separators=(',', ': '))
2323 2332 outf.writelines(("testreport =", jsonout))
2324 2333
2325 2334 def sorttests(testdescs, previoustimes, shuffle=False):
2326 2335 """Do an in-place sort of tests."""
2327 2336 if shuffle:
2328 2337 random.shuffle(testdescs)
2329 2338 return
2330 2339
2331 2340 if previoustimes:
2332 2341 def sortkey(f):
2333 2342 f = f['path']
2334 2343 if f in previoustimes:
2335 2344 # Use most recent time as estimate
2336 2345 return -previoustimes[f][-1]
2337 2346 else:
2338 2347 # Default to a rather arbitrary value of 1 second for new tests
2339 2348 return -1.0
2340 2349 else:
2341 2350 # keywords for slow tests
2342 2351 slow = {b'svn': 10,
2343 2352 b'cvs': 10,
2344 2353 b'hghave': 10,
2345 2354 b'largefiles-update': 10,
2346 2355 b'run-tests': 10,
2347 2356 b'corruption': 10,
2348 2357 b'race': 10,
2349 2358 b'i18n': 10,
2350 2359 b'check': 100,
2351 2360 b'gendoc': 100,
2352 2361 b'contrib-perf': 200,
2353 2362 }
2354 2363 perf = {}
2355 2364
2356 2365 def sortkey(f):
2357 2366 # run largest tests first, as they tend to take the longest
2358 2367 f = f['path']
2359 2368 try:
2360 2369 return perf[f]
2361 2370 except KeyError:
2362 2371 try:
2363 2372 val = -os.stat(f).st_size
2364 2373 except OSError as e:
2365 2374 if e.errno != errno.ENOENT:
2366 2375 raise
2367 2376 perf[f] = -1e9 # file does not exist, tell early
2368 2377 return -1e9
2369 2378 for kw, mul in slow.items():
2370 2379 if kw in f:
2371 2380 val *= mul
2372 2381 if f.endswith(b'.py'):
2373 2382 val /= 10.0
2374 2383 perf[f] = val / 1000.0
2375 2384 return perf[f]
2376 2385
2377 2386 testdescs.sort(key=sortkey)
2378 2387
2379 2388 class TestRunner(object):
2380 2389 """Holds context for executing tests.
2381 2390
2382 2391 Tests rely on a lot of state. This object holds it for them.
2383 2392 """
2384 2393
2385 2394 # Programs required to run tests.
2386 2395 REQUIREDTOOLS = [
2387 2396 b'diff',
2388 2397 b'grep',
2389 2398 b'unzip',
2390 2399 b'gunzip',
2391 2400 b'bunzip2',
2392 2401 b'sed',
2393 2402 ]
2394 2403
2395 2404 # Maps file extensions to test class.
2396 2405 TESTTYPES = [
2397 2406 (b'.py', PythonTest),
2398 2407 (b'.t', TTest),
2399 2408 ]
2400 2409
2401 2410 def __init__(self):
2402 2411 self.options = None
2403 2412 self._hgroot = None
2404 2413 self._testdir = None
2405 2414 self._outputdir = None
2406 2415 self._hgtmp = None
2407 2416 self._installdir = None
2408 2417 self._bindir = None
2409 2418 self._tmpbinddir = None
2410 2419 self._pythondir = None
2411 2420 self._coveragefile = None
2412 2421 self._createdfiles = []
2413 2422 self._hgcommand = None
2414 2423 self._hgpath = None
2415 2424 self._portoffset = 0
2416 2425 self._ports = {}
2417 2426
2418 2427 def run(self, args, parser=None):
2419 2428 """Run the test suite."""
2420 2429 oldmask = os.umask(0o22)
2421 2430 try:
2422 2431 parser = parser or getparser()
2423 2432 options = parseargs(args, parser)
2424 2433 tests = [_bytespath(a) for a in options.tests]
2425 2434 if options.test_list is not None:
2426 2435 for listfile in options.test_list:
2427 2436 with open(listfile, 'rb') as f:
2428 2437 tests.extend(t for t in f.read().splitlines() if t)
2429 2438 self.options = options
2430 2439
2431 2440 self._checktools()
2432 2441 testdescs = self.findtests(tests)
2433 2442 if options.profile_runner:
2434 2443 import statprof
2435 2444 statprof.start()
2436 2445 result = self._run(testdescs)
2437 2446 if options.profile_runner:
2438 2447 statprof.stop()
2439 2448 statprof.display()
2440 2449 return result
2441 2450
2442 2451 finally:
2443 2452 os.umask(oldmask)
2444 2453
2445 2454 def _run(self, testdescs):
2446 2455 self._testdir = osenvironb[b'TESTDIR'] = getattr(
2447 2456 os, 'getcwdb', os.getcwd)()
2448 2457 # assume all tests in same folder for now
2449 2458 if testdescs:
2450 2459 pathname = os.path.dirname(testdescs[0]['path'])
2451 2460 if pathname:
2452 2461 osenvironb[b'TESTDIR'] = os.path.join(osenvironb[b'TESTDIR'],
2453 2462 pathname)
2454 2463 if self.options.outputdir:
2455 2464 self._outputdir = canonpath(_bytespath(self.options.outputdir))
2456 2465 else:
2457 2466 self._outputdir = self._testdir
2458 2467 if testdescs and pathname:
2459 2468 self._outputdir = os.path.join(self._outputdir, pathname)
2460 2469 previoustimes = {}
2461 2470 if self.options.order_by_runtime:
2462 2471 previoustimes = dict(loadtimes(self._outputdir))
2463 2472 sorttests(testdescs, previoustimes, shuffle=self.options.random)
2464 2473
2465 2474 if 'PYTHONHASHSEED' not in os.environ:
2466 2475 # use a random python hash seed all the time
2467 2476 # we do the randomness ourself to know what seed is used
2468 2477 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
2469 2478
2470 2479 if self.options.tmpdir:
2471 2480 self.options.keep_tmpdir = True
2472 2481 tmpdir = _bytespath(self.options.tmpdir)
2473 2482 if os.path.exists(tmpdir):
2474 2483 # Meaning of tmpdir has changed since 1.3: we used to create
2475 2484 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
2476 2485 # tmpdir already exists.
2477 2486 print("error: temp dir %r already exists" % tmpdir)
2478 2487 return 1
2479 2488
2480 2489 os.makedirs(tmpdir)
2481 2490 else:
2482 2491 d = None
2483 2492 if os.name == 'nt':
2484 2493 # without this, we get the default temp dir location, but
2485 2494 # in all lowercase, which causes troubles with paths (issue3490)
2486 2495 d = osenvironb.get(b'TMP', None)
2487 2496 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
2488 2497
2489 2498 self._hgtmp = osenvironb[b'HGTMP'] = (
2490 2499 os.path.realpath(tmpdir))
2491 2500
2492 2501 if self.options.with_hg:
2493 2502 self._installdir = None
2494 2503 whg = self.options.with_hg
2495 2504 self._bindir = os.path.dirname(os.path.realpath(whg))
2496 2505 assert isinstance(self._bindir, bytes)
2497 2506 self._hgcommand = os.path.basename(whg)
2498 2507 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
2499 2508 os.makedirs(self._tmpbindir)
2500 2509
2501 2510 normbin = os.path.normpath(os.path.abspath(whg))
2502 2511 normbin = normbin.replace(os.sep.encode('ascii'), b'/')
2503 2512
2504 2513 # Other Python scripts in the test harness need to
2505 2514 # `import mercurial`. If `hg` is a Python script, we assume
2506 2515 # the Mercurial modules are relative to its path and tell the tests
2507 2516 # to load Python modules from its directory.
2508 2517 with open(whg, 'rb') as fh:
2509 2518 initial = fh.read(1024)
2510 2519
2511 2520 if re.match(b'#!.*python', initial):
2512 2521 self._pythondir = self._bindir
2513 2522 # If it looks like our in-repo Rust binary, use the source root.
2514 2523 # This is a bit hacky. But rhg is still not supported outside the
2515 2524 # source directory. So until it is, do the simple thing.
2516 2525 elif re.search(b'/rust/target/[^/]+/hg', normbin):
2517 2526 self._pythondir = os.path.dirname(self._testdir)
2518 2527 # Fall back to the legacy behavior.
2519 2528 else:
2520 2529 self._pythondir = self._bindir
2521 2530
2522 2531 else:
2523 2532 self._installdir = os.path.join(self._hgtmp, b"install")
2524 2533 self._bindir = os.path.join(self._installdir, b"bin")
2525 2534 self._hgcommand = b'hg'
2526 2535 self._tmpbindir = self._bindir
2527 2536 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
2528 2537
2529 2538 # set CHGHG, then replace "hg" command by "chg"
2530 2539 chgbindir = self._bindir
2531 2540 if self.options.chg or self.options.with_chg:
2532 2541 osenvironb[b'CHGHG'] = os.path.join(self._bindir, self._hgcommand)
2533 2542 else:
2534 2543 osenvironb.pop(b'CHGHG', None) # drop flag for hghave
2535 2544 if self.options.chg:
2536 2545 self._hgcommand = b'chg'
2537 2546 elif self.options.with_chg:
2538 2547 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
2539 2548 self._hgcommand = os.path.basename(self.options.with_chg)
2540 2549
2541 2550 osenvironb[b"BINDIR"] = self._bindir
2542 2551 osenvironb[b"PYTHON"] = PYTHON
2543 2552
2544 2553 if self.options.with_python3:
2545 2554 osenvironb[b'PYTHON3'] = self.options.with_python3
2546 2555
2547 2556 fileb = _bytespath(__file__)
2548 2557 runtestdir = os.path.abspath(os.path.dirname(fileb))
2549 2558 osenvironb[b'RUNTESTDIR'] = runtestdir
2550 2559 if PYTHON3:
2551 2560 sepb = _bytespath(os.pathsep)
2552 2561 else:
2553 2562 sepb = os.pathsep
2554 2563 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
2555 2564 if os.path.islink(__file__):
2556 2565 # test helper will likely be at the end of the symlink
2557 2566 realfile = os.path.realpath(fileb)
2558 2567 realdir = os.path.abspath(os.path.dirname(realfile))
2559 2568 path.insert(2, realdir)
2560 2569 if chgbindir != self._bindir:
2561 2570 path.insert(1, chgbindir)
2562 2571 if self._testdir != runtestdir:
2563 2572 path = [self._testdir] + path
2564 2573 if self._tmpbindir != self._bindir:
2565 2574 path = [self._tmpbindir] + path
2566 2575 osenvironb[b"PATH"] = sepb.join(path)
2567 2576
2568 2577 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
2569 2578 # can run .../tests/run-tests.py test-foo where test-foo
2570 2579 # adds an extension to HGRC. Also include run-test.py directory to
2571 2580 # import modules like heredoctest.
2572 2581 pypath = [self._pythondir, self._testdir, runtestdir]
2573 2582 # We have to augment PYTHONPATH, rather than simply replacing
2574 2583 # it, in case external libraries are only available via current
2575 2584 # PYTHONPATH. (In particular, the Subversion bindings on OS X
2576 2585 # are in /opt/subversion.)
2577 2586 oldpypath = osenvironb.get(IMPL_PATH)
2578 2587 if oldpypath:
2579 2588 pypath.append(oldpypath)
2580 2589 osenvironb[IMPL_PATH] = sepb.join(pypath)
2581 2590
2582 2591 if self.options.pure:
2583 2592 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
2584 2593 os.environ["HGMODULEPOLICY"] = "py"
2585 2594
2586 2595 if self.options.allow_slow_tests:
2587 2596 os.environ["HGTEST_SLOW"] = "slow"
2588 2597 elif 'HGTEST_SLOW' in os.environ:
2589 2598 del os.environ['HGTEST_SLOW']
2590 2599
2591 2600 self._coveragefile = os.path.join(self._testdir, b'.coverage')
2592 2601
2593 2602 if self.options.exceptions:
2594 2603 exceptionsdir = os.path.join(self._outputdir, b'exceptions')
2595 2604 try:
2596 2605 os.makedirs(exceptionsdir)
2597 2606 except OSError as e:
2598 2607 if e.errno != errno.EEXIST:
2599 2608 raise
2600 2609
2601 2610 # Remove all existing exception reports.
2602 2611 for f in os.listdir(exceptionsdir):
2603 2612 os.unlink(os.path.join(exceptionsdir, f))
2604 2613
2605 2614 osenvironb[b'HGEXCEPTIONSDIR'] = exceptionsdir
2606 2615 logexceptions = os.path.join(self._testdir, b'logexceptions.py')
2607 2616 self.options.extra_config_opt.append(
2608 2617 'extensions.logexceptions=%s' % logexceptions.decode('utf-8'))
2609 2618
2610 2619 vlog("# Using TESTDIR", self._testdir)
2611 2620 vlog("# Using RUNTESTDIR", osenvironb[b'RUNTESTDIR'])
2612 2621 vlog("# Using HGTMP", self._hgtmp)
2613 2622 vlog("# Using PATH", os.environ["PATH"])
2614 2623 vlog("# Using", IMPL_PATH, osenvironb[IMPL_PATH])
2615 2624 vlog("# Writing to directory", self._outputdir)
2616 2625
2617 2626 try:
2618 2627 return self._runtests(testdescs) or 0
2619 2628 finally:
2620 2629 time.sleep(.1)
2621 2630 self._cleanup()
2622 2631
2623 2632 def findtests(self, args):
2624 2633 """Finds possible test files from arguments.
2625 2634
2626 2635 If you wish to inject custom tests into the test harness, this would
2627 2636 be a good function to monkeypatch or override in a derived class.
2628 2637 """
2629 2638 if not args:
2630 2639 if self.options.changed:
2631 2640 proc = Popen4('hg st --rev "%s" -man0 .' %
2632 2641 self.options.changed, None, 0)
2633 2642 stdout, stderr = proc.communicate()
2634 2643 args = stdout.strip(b'\0').split(b'\0')
2635 2644 else:
2636 2645 args = os.listdir(b'.')
2637 2646
2638 2647 expanded_args = []
2639 2648 for arg in args:
2640 2649 if os.path.isdir(arg):
2641 2650 if not arg.endswith(b'/'):
2642 2651 arg += b'/'
2643 2652 expanded_args.extend([arg + a for a in os.listdir(arg)])
2644 2653 else:
2645 2654 expanded_args.append(arg)
2646 2655 args = expanded_args
2647 2656
2648 2657 testcasepattern = re.compile(br'([\w-]+\.t|py)(#([a-zA-Z0-9_\-\.]+))')
2649 2658 tests = []
2650 2659 for t in args:
2651 2660 case = None
2652 2661
2653 2662 if not (os.path.basename(t).startswith(b'test-')
2654 2663 and (t.endswith(b'.py') or t.endswith(b'.t'))):
2655 2664
2656 2665 m = testcasepattern.match(t)
2657 2666 if m is not None:
2658 2667 t, _, case = m.groups()
2659 2668 else:
2660 2669 continue
2661 2670
2662 2671 if t.endswith(b'.t'):
2663 2672 # .t file may contain multiple test cases
2664 2673 cases = sorted(parsettestcases(t))
2665 2674 if cases:
2666 2675 if case is not None and case in cases:
2667 2676 tests += [{'path': t, 'case': case}]
2668 2677 elif case is not None and case not in cases:
2669 2678 # Ignore invalid cases
2670 2679 pass
2671 2680 else:
2672 2681 tests += [{'path': t, 'case': c} for c in sorted(cases)]
2673 2682 else:
2674 2683 tests.append({'path': t})
2675 2684 else:
2676 2685 tests.append({'path': t})
2677 2686 return tests
2678 2687
2679 2688 def _runtests(self, testdescs):
2680 2689 def _reloadtest(test, i):
2681 2690 # convert a test back to its description dict
2682 2691 desc = {'path': test.path}
2683 2692 case = getattr(test, '_case', None)
2684 2693 if case:
2685 2694 desc['case'] = case
2686 2695 return self._gettest(desc, i)
2687 2696
2688 2697 try:
2689 2698 if self.options.restart:
2690 2699 orig = list(testdescs)
2691 2700 while testdescs:
2692 2701 desc = testdescs[0]
2693 2702 # desc['path'] is a relative path
2694 2703 if 'case' in desc:
2695 2704 errpath = b'%s.%s.err' % (desc['path'], desc['case'])
2696 2705 else:
2697 2706 errpath = b'%s.err' % desc['path']
2698 2707 errpath = os.path.join(self._outputdir, errpath)
2699 2708 if os.path.exists(errpath):
2700 2709 break
2701 2710 testdescs.pop(0)
2702 2711 if not testdescs:
2703 2712 print("running all tests")
2704 2713 testdescs = orig
2705 2714
2706 2715 tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
2707 2716
2708 2717 failed = False
2709 2718 kws = self.options.keywords
2710 2719 if kws is not None and PYTHON3:
2711 2720 kws = kws.encode('utf-8')
2712 2721
2713 2722 suite = TestSuite(self._testdir,
2714 2723 jobs=self.options.jobs,
2715 2724 whitelist=self.options.whitelisted,
2716 2725 blacklist=self.options.blacklist,
2717 2726 retest=self.options.retest,
2718 2727 keywords=kws,
2719 2728 loop=self.options.loop,
2720 2729 runs_per_test=self.options.runs_per_test,
2721 2730 showchannels=self.options.showchannels,
2722 2731 tests=tests, loadtest=_reloadtest)
2723 2732 verbosity = 1
2724 2733 if self.options.verbose:
2725 2734 verbosity = 2
2726 2735 runner = TextTestRunner(self, verbosity=verbosity)
2727 2736
2728 2737 if self.options.list_tests:
2729 2738 result = runner.listtests(suite)
2730 2739 else:
2731 2740 if self._installdir:
2732 2741 self._installhg()
2733 2742 self._checkhglib("Testing")
2734 2743 else:
2735 2744 self._usecorrectpython()
2736 2745 if self.options.chg:
2737 2746 assert self._installdir
2738 2747 self._installchg()
2739 2748
2740 2749 result = runner.run(suite)
2741 2750
2742 2751 if result.failures:
2743 2752 failed = True
2744 2753
2745 2754 if self.options.anycoverage:
2746 2755 self._outputcoverage()
2747 2756 except KeyboardInterrupt:
2748 2757 failed = True
2749 2758 print("\ninterrupted!")
2750 2759
2751 2760 if failed:
2752 2761 return 1
2753 2762
2754 2763 def _getport(self, count):
2755 2764 port = self._ports.get(count) # do we have a cached entry?
2756 2765 if port is None:
2757 2766 portneeded = 3
2758 2767 # above 100 tries we just give up and let test reports failure
2759 2768 for tries in xrange(100):
2760 2769 allfree = True
2761 2770 port = self.options.port + self._portoffset
2762 2771 for idx in xrange(portneeded):
2763 2772 if not checkportisavailable(port + idx):
2764 2773 allfree = False
2765 2774 break
2766 2775 self._portoffset += portneeded
2767 2776 if allfree:
2768 2777 break
2769 2778 self._ports[count] = port
2770 2779 return port
2771 2780
2772 2781 def _gettest(self, testdesc, count):
2773 2782 """Obtain a Test by looking at its filename.
2774 2783
2775 2784 Returns a Test instance. The Test may not be runnable if it doesn't
2776 2785 map to a known type.
2777 2786 """
2778 2787 path = testdesc['path']
2779 2788 lctest = path.lower()
2780 2789 testcls = Test
2781 2790
2782 2791 for ext, cls in self.TESTTYPES:
2783 2792 if lctest.endswith(ext):
2784 2793 testcls = cls
2785 2794 break
2786 2795
2787 2796 refpath = os.path.join(self._testdir, path)
2788 2797 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
2789 2798
2790 2799 # extra keyword parameters. 'case' is used by .t tests
2791 2800 kwds = dict((k, testdesc[k]) for k in ['case'] if k in testdesc)
2792 2801
2793 2802 t = testcls(refpath, self._outputdir, tmpdir,
2794 2803 keeptmpdir=self.options.keep_tmpdir,
2795 2804 debug=self.options.debug,
2796 2805 first=self.options.first,
2797 2806 timeout=self.options.timeout,
2798 2807 startport=self._getport(count),
2799 2808 extraconfigopts=self.options.extra_config_opt,
2800 2809 py3kwarnings=self.options.py3k_warnings,
2801 2810 shell=self.options.shell,
2802 2811 hgcommand=self._hgcommand,
2803 2812 usechg=bool(self.options.with_chg or self.options.chg),
2804 2813 useipv6=useipv6, **kwds)
2805 2814 t.should_reload = True
2806 2815 return t
2807 2816
2808 2817 def _cleanup(self):
2809 2818 """Clean up state from this test invocation."""
2810 2819 if self.options.keep_tmpdir:
2811 2820 return
2812 2821
2813 2822 vlog("# Cleaning up HGTMP", self._hgtmp)
2814 2823 shutil.rmtree(self._hgtmp, True)
2815 2824 for f in self._createdfiles:
2816 2825 try:
2817 2826 os.remove(f)
2818 2827 except OSError:
2819 2828 pass
2820 2829
2821 2830 def _usecorrectpython(self):
2822 2831 """Configure the environment to use the appropriate Python in tests."""
2823 2832 # Tests must use the same interpreter as us or bad things will happen.
2824 2833 pyexename = sys.platform == 'win32' and b'python.exe' or b'python'
2825 2834 if getattr(os, 'symlink', None):
2826 2835 vlog("# Making python executable in test path a symlink to '%s'" %
2827 2836 sys.executable)
2828 2837 mypython = os.path.join(self._tmpbindir, pyexename)
2829 2838 try:
2830 2839 if os.readlink(mypython) == sys.executable:
2831 2840 return
2832 2841 os.unlink(mypython)
2833 2842 except OSError as err:
2834 2843 if err.errno != errno.ENOENT:
2835 2844 raise
2836 2845 if self._findprogram(pyexename) != sys.executable:
2837 2846 try:
2838 2847 os.symlink(sys.executable, mypython)
2839 2848 self._createdfiles.append(mypython)
2840 2849 except OSError as err:
2841 2850 # child processes may race, which is harmless
2842 2851 if err.errno != errno.EEXIST:
2843 2852 raise
2844 2853 else:
2845 2854 exedir, exename = os.path.split(sys.executable)
2846 2855 vlog("# Modifying search path to find %s as %s in '%s'" %
2847 2856 (exename, pyexename, exedir))
2848 2857 path = os.environ['PATH'].split(os.pathsep)
2849 2858 while exedir in path:
2850 2859 path.remove(exedir)
2851 2860 os.environ['PATH'] = os.pathsep.join([exedir] + path)
2852 2861 if not self._findprogram(pyexename):
2853 2862 print("WARNING: Cannot find %s in search path" % pyexename)
2854 2863
2855 2864 def _installhg(self):
2856 2865 """Install hg into the test environment.
2857 2866
2858 2867 This will also configure hg with the appropriate testing settings.
2859 2868 """
2860 2869 vlog("# Performing temporary installation of HG")
2861 2870 installerrs = os.path.join(self._hgtmp, b"install.err")
2862 2871 compiler = ''
2863 2872 if self.options.compiler:
2864 2873 compiler = '--compiler ' + self.options.compiler
2865 2874 if self.options.pure:
2866 2875 pure = b"--pure"
2867 2876 else:
2868 2877 pure = b""
2869 2878
2870 2879 # Run installer in hg root
2871 2880 script = os.path.realpath(sys.argv[0])
2872 2881 exe = sys.executable
2873 2882 if PYTHON3:
2874 2883 compiler = _bytespath(compiler)
2875 2884 script = _bytespath(script)
2876 2885 exe = _bytespath(exe)
2877 2886 hgroot = os.path.dirname(os.path.dirname(script))
2878 2887 self._hgroot = hgroot
2879 2888 os.chdir(hgroot)
2880 2889 nohome = b'--home=""'
2881 2890 if os.name == 'nt':
2882 2891 # The --home="" trick works only on OS where os.sep == '/'
2883 2892 # because of a distutils convert_path() fast-path. Avoid it at
2884 2893 # least on Windows for now, deal with .pydistutils.cfg bugs
2885 2894 # when they happen.
2886 2895 nohome = b''
2887 2896 cmd = (b'%(exe)s setup.py %(pure)s clean --all'
2888 2897 b' build %(compiler)s --build-base="%(base)s"'
2889 2898 b' install --force --prefix="%(prefix)s"'
2890 2899 b' --install-lib="%(libdir)s"'
2891 2900 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
2892 2901 % {b'exe': exe, b'pure': pure,
2893 2902 b'compiler': compiler,
2894 2903 b'base': os.path.join(self._hgtmp, b"build"),
2895 2904 b'prefix': self._installdir, b'libdir': self._pythondir,
2896 2905 b'bindir': self._bindir,
2897 2906 b'nohome': nohome, b'logfile': installerrs})
2898 2907
2899 2908 # setuptools requires install directories to exist.
2900 2909 def makedirs(p):
2901 2910 try:
2902 2911 os.makedirs(p)
2903 2912 except OSError as e:
2904 2913 if e.errno != errno.EEXIST:
2905 2914 raise
2906 2915 makedirs(self._pythondir)
2907 2916 makedirs(self._bindir)
2908 2917
2909 2918 vlog("# Running", cmd)
2910 2919 if os.system(cmd) == 0:
2911 2920 if not self.options.verbose:
2912 2921 try:
2913 2922 os.remove(installerrs)
2914 2923 except OSError as e:
2915 2924 if e.errno != errno.ENOENT:
2916 2925 raise
2917 2926 else:
2918 2927 with open(installerrs, 'rb') as f:
2919 2928 for line in f:
2920 2929 if PYTHON3:
2921 2930 sys.stdout.buffer.write(line)
2922 2931 else:
2923 2932 sys.stdout.write(line)
2924 2933 sys.exit(1)
2925 2934 os.chdir(self._testdir)
2926 2935
2927 2936 self._usecorrectpython()
2928 2937
2929 2938 if self.options.py3k_warnings and not self.options.anycoverage:
2930 2939 vlog("# Updating hg command to enable Py3k Warnings switch")
2931 2940 with open(os.path.join(self._bindir, 'hg'), 'rb') as f:
2932 2941 lines = [line.rstrip() for line in f]
2933 2942 lines[0] += ' -3'
2934 2943 with open(os.path.join(self._bindir, 'hg'), 'wb') as f:
2935 2944 for line in lines:
2936 2945 f.write(line + '\n')
2937 2946
2938 2947 hgbat = os.path.join(self._bindir, b'hg.bat')
2939 2948 if os.path.isfile(hgbat):
2940 2949 # hg.bat expects to be put in bin/scripts while run-tests.py
2941 2950 # installation layout put it in bin/ directly. Fix it
2942 2951 with open(hgbat, 'rb') as f:
2943 2952 data = f.read()
2944 2953 if b'"%~dp0..\python" "%~dp0hg" %*' in data:
2945 2954 data = data.replace(b'"%~dp0..\python" "%~dp0hg" %*',
2946 2955 b'"%~dp0python" "%~dp0hg" %*')
2947 2956 with open(hgbat, 'wb') as f:
2948 2957 f.write(data)
2949 2958 else:
2950 2959 print('WARNING: cannot fix hg.bat reference to python.exe')
2951 2960
2952 2961 if self.options.anycoverage:
2953 2962 custom = os.path.join(self._testdir, 'sitecustomize.py')
2954 2963 target = os.path.join(self._pythondir, 'sitecustomize.py')
2955 2964 vlog('# Installing coverage trigger to %s' % target)
2956 2965 shutil.copyfile(custom, target)
2957 2966 rc = os.path.join(self._testdir, '.coveragerc')
2958 2967 vlog('# Installing coverage rc to %s' % rc)
2959 2968 os.environ['COVERAGE_PROCESS_START'] = rc
2960 2969 covdir = os.path.join(self._installdir, '..', 'coverage')
2961 2970 try:
2962 2971 os.mkdir(covdir)
2963 2972 except OSError as e:
2964 2973 if e.errno != errno.EEXIST:
2965 2974 raise
2966 2975
2967 2976 os.environ['COVERAGE_DIR'] = covdir
2968 2977
2969 2978 def _checkhglib(self, verb):
2970 2979 """Ensure that the 'mercurial' package imported by python is
2971 2980 the one we expect it to be. If not, print a warning to stderr."""
2972 2981 if ((self._bindir == self._pythondir) and
2973 2982 (self._bindir != self._tmpbindir)):
2974 2983 # The pythondir has been inferred from --with-hg flag.
2975 2984 # We cannot expect anything sensible here.
2976 2985 return
2977 2986 expecthg = os.path.join(self._pythondir, b'mercurial')
2978 2987 actualhg = self._gethgpath()
2979 2988 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
2980 2989 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
2981 2990 ' (expected %s)\n'
2982 2991 % (verb, actualhg, expecthg))
2983 2992 def _gethgpath(self):
2984 2993 """Return the path to the mercurial package that is actually found by
2985 2994 the current Python interpreter."""
2986 2995 if self._hgpath is not None:
2987 2996 return self._hgpath
2988 2997
2989 2998 cmd = b'%s -c "import mercurial; print (mercurial.__path__[0])"'
2990 2999 cmd = cmd % PYTHON
2991 3000 if PYTHON3:
2992 3001 cmd = _strpath(cmd)
2993 3002 pipe = os.popen(cmd)
2994 3003 try:
2995 3004 self._hgpath = _bytespath(pipe.read().strip())
2996 3005 finally:
2997 3006 pipe.close()
2998 3007
2999 3008 return self._hgpath
3000 3009
3001 3010 def _installchg(self):
3002 3011 """Install chg into the test environment"""
3003 3012 vlog('# Performing temporary installation of CHG')
3004 3013 assert os.path.dirname(self._bindir) == self._installdir
3005 3014 assert self._hgroot, 'must be called after _installhg()'
3006 3015 cmd = (b'"%(make)s" clean install PREFIX="%(prefix)s"'
3007 3016 % {b'make': 'make', # TODO: switch by option or environment?
3008 3017 b'prefix': self._installdir})
3009 3018 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
3010 3019 vlog("# Running", cmd)
3011 3020 proc = subprocess.Popen(cmd, shell=True, cwd=cwd,
3012 3021 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
3013 3022 stderr=subprocess.STDOUT)
3014 3023 out, _err = proc.communicate()
3015 3024 if proc.returncode != 0:
3016 3025 if PYTHON3:
3017 3026 sys.stdout.buffer.write(out)
3018 3027 else:
3019 3028 sys.stdout.write(out)
3020 3029 sys.exit(1)
3021 3030
3022 3031 def _outputcoverage(self):
3023 3032 """Produce code coverage output."""
3024 3033 import coverage
3025 3034 coverage = coverage.coverage
3026 3035
3027 3036 vlog('# Producing coverage report')
3028 3037 # chdir is the easiest way to get short, relative paths in the
3029 3038 # output.
3030 3039 os.chdir(self._hgroot)
3031 3040 covdir = os.path.join(self._installdir, '..', 'coverage')
3032 3041 cov = coverage(data_file=os.path.join(covdir, 'cov'))
3033 3042
3034 3043 # Map install directory paths back to source directory.
3035 3044 cov.config.paths['srcdir'] = ['.', self._pythondir]
3036 3045
3037 3046 cov.combine()
3038 3047
3039 3048 omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]]
3040 3049 cov.report(ignore_errors=True, omit=omit)
3041 3050
3042 3051 if self.options.htmlcov:
3043 3052 htmldir = os.path.join(self._outputdir, 'htmlcov')
3044 3053 cov.html_report(directory=htmldir, omit=omit)
3045 3054 if self.options.annotate:
3046 3055 adir = os.path.join(self._outputdir, 'annotated')
3047 3056 if not os.path.isdir(adir):
3048 3057 os.mkdir(adir)
3049 3058 cov.annotate(directory=adir, omit=omit)
3050 3059
3051 3060 def _findprogram(self, program):
3052 3061 """Search PATH for a executable program"""
3053 3062 dpb = _bytespath(os.defpath)
3054 3063 sepb = _bytespath(os.pathsep)
3055 3064 for p in osenvironb.get(b'PATH', dpb).split(sepb):
3056 3065 name = os.path.join(p, program)
3057 3066 if os.name == 'nt' or os.access(name, os.X_OK):
3058 3067 return name
3059 3068 return None
3060 3069
3061 3070 def _checktools(self):
3062 3071 """Ensure tools required to run tests are present."""
3063 3072 for p in self.REQUIREDTOOLS:
3064 3073 if os.name == 'nt' and not p.endswith('.exe'):
3065 3074 p += '.exe'
3066 3075 found = self._findprogram(p)
3067 3076 if found:
3068 3077 vlog("# Found prerequisite", p, "at", found)
3069 3078 else:
3070 3079 print("WARNING: Did not find prerequisite tool: %s " %
3071 3080 p.decode("utf-8"))
3072 3081
3073 3082 def aggregateexceptions(path):
3074 3083 exceptioncounts = collections.Counter()
3075 3084 testsbyfailure = collections.defaultdict(set)
3076 3085 failuresbytest = collections.defaultdict(set)
3077 3086
3078 3087 for f in os.listdir(path):
3079 3088 with open(os.path.join(path, f), 'rb') as fh:
3080 3089 data = fh.read().split(b'\0')
3081 3090 if len(data) != 5:
3082 3091 continue
3083 3092
3084 3093 exc, mainframe, hgframe, hgline, testname = data
3085 3094 exc = exc.decode('utf-8')
3086 3095 mainframe = mainframe.decode('utf-8')
3087 3096 hgframe = hgframe.decode('utf-8')
3088 3097 hgline = hgline.decode('utf-8')
3089 3098 testname = testname.decode('utf-8')
3090 3099
3091 3100 key = (hgframe, hgline, exc)
3092 3101 exceptioncounts[key] += 1
3093 3102 testsbyfailure[key].add(testname)
3094 3103 failuresbytest[testname].add(key)
3095 3104
3096 3105 # Find test having fewest failures for each failure.
3097 3106 leastfailing = {}
3098 3107 for key, tests in testsbyfailure.items():
3099 3108 fewesttest = None
3100 3109 fewestcount = 99999999
3101 3110 for test in sorted(tests):
3102 3111 if len(failuresbytest[test]) < fewestcount:
3103 3112 fewesttest = test
3104 3113 fewestcount = len(failuresbytest[test])
3105 3114
3106 3115 leastfailing[key] = (fewestcount, fewesttest)
3107 3116
3108 3117 # Create a combined counter so we can sort by total occurrences and
3109 3118 # impacted tests.
3110 3119 combined = {}
3111 3120 for key in exceptioncounts:
3112 3121 combined[key] = (exceptioncounts[key],
3113 3122 len(testsbyfailure[key]),
3114 3123 leastfailing[key][0],
3115 3124 leastfailing[key][1])
3116 3125
3117 3126 return {
3118 3127 'exceptioncounts': exceptioncounts,
3119 3128 'total': sum(exceptioncounts.values()),
3120 3129 'combined': combined,
3121 3130 'leastfailing': leastfailing,
3122 3131 'byfailure': testsbyfailure,
3123 3132 'bytest': failuresbytest,
3124 3133 }
3125 3134
3126 3135 if __name__ == '__main__':
3127 3136 runner = TestRunner()
3128 3137
3129 3138 try:
3130 3139 import msvcrt
3131 3140 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
3132 3141 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
3133 3142 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
3134 3143 except ImportError:
3135 3144 pass
3136 3145
3137 3146 sys.exit(runner.run(sys.argv[1:]))
@@ -1,1812 +1,1821
1 1 This file tests the behavior of run-tests.py itself.
2 2
3 3 Avoid interference from actual test env:
4 4
5 5 $ . "$TESTDIR/helper-runtests.sh"
6 6
7 7 Smoke test with install
8 8 ============
9 9 $ $PYTHON $TESTDIR/run-tests.py $HGTEST_RUN_TESTS_PURE -l
10 10
11 11 # Ran 0 tests, 0 skipped, 0 failed.
12 12
13 13 Define a helper to avoid the install step
14 14 =============
15 15 $ rt()
16 16 > {
17 17 > $PYTHON $TESTDIR/run-tests.py --with-hg=`which hg` "$@"
18 18 > }
19 19
20 20 error paths
21 21
22 22 #if symlink
23 23 $ ln -s `which true` hg
24 24 $ $PYTHON $TESTDIR/run-tests.py --with-hg=./hg
25 25 warning: --with-hg should specify an hg script
26 26
27 27 # Ran 0 tests, 0 skipped, 0 failed.
28 28 $ rm hg
29 29 #endif
30 30
31 31 #if execbit
32 32 $ touch hg
33 33 $ $PYTHON $TESTDIR/run-tests.py --with-hg=./hg
34 34 usage: run-tests.py [options] [tests]
35 35 run-tests.py: error: --with-hg must specify an executable hg script
36 36 [2]
37 37 $ rm hg
38 38 #endif
39 39
40 40 Features for testing optional lines
41 41 ===================================
42 42
43 43 $ cat > hghaveaddon.py <<EOF
44 44 > import hghave
45 45 > @hghave.check("custom", "custom hghave feature")
46 46 > def has_custom():
47 47 > return True
48 48 > @hghave.check("missing", "missing hghave feature")
49 49 > def has_missing():
50 50 > return False
51 51 > EOF
52 52
53 53 an empty test
54 54 =======================
55 55
56 56 $ touch test-empty.t
57 57 $ rt
58 58 .
59 59 # Ran 1 tests, 0 skipped, 0 failed.
60 60 $ rm test-empty.t
61 61
62 62 a succesful test
63 63 =======================
64 64
65 65 $ cat > test-success.t << EOF
66 66 > $ echo babar
67 67 > babar
68 68 > $ echo xyzzy
69 69 > dont_print (?)
70 70 > nothing[42]line (re) (?)
71 71 > never*happens (glob) (?)
72 72 > more_nothing (?)
73 73 > xyzzy
74 74 > nor this (?)
75 75 > $ printf 'abc\ndef\nxyz\n'
76 76 > 123 (?)
77 77 > abc
78 78 > def (?)
79 79 > 456 (?)
80 80 > xyz
81 81 > $ printf 'zyx\nwvu\ntsr\n'
82 82 > abc (?)
83 83 > zyx (custom !)
84 84 > wvu
85 85 > no_print (no-custom !)
86 86 > tsr (no-missing !)
87 87 > missing (missing !)
88 88 > EOF
89 89
90 90 $ rt
91 91 .
92 92 # Ran 1 tests, 0 skipped, 0 failed.
93 93
94 94 failing test
95 95 ==================
96 96
97 97 test churn with globs
98 98 $ cat > test-failure.t <<EOF
99 99 > $ echo "bar-baz"; echo "bar-bad"; echo foo
100 100 > bar*bad (glob)
101 101 > bar*baz (glob)
102 102 > | fo (re)
103 103 > EOF
104 104 $ rt test-failure.t
105 105
106 106 --- $TESTTMP/test-failure.t
107 107 +++ $TESTTMP/test-failure.t.err
108 108 @@ -1,4 +1,4 @@
109 109 $ echo "bar-baz"; echo "bar-bad"; echo foo
110 110 + bar*baz (glob)
111 111 bar*bad (glob)
112 112 - bar*baz (glob)
113 113 - | fo (re)
114 114 + foo
115 115
116 116 ERROR: test-failure.t output changed
117 117 !
118 118 Failed test-failure.t: output changed
119 119 # Ran 1 tests, 0 skipped, 1 failed.
120 120 python hash seed: * (glob)
121 121 [1]
122 122
123 123 test how multiple globs gets matched with lines in output
124 124 $ cat > test-failure-globs.t <<EOF
125 125 > $ echo "context"; echo "context"; \
126 126 > echo "key: 1"; echo "value: not a"; \
127 127 > echo "key: 2"; echo "value: not b"; \
128 128 > echo "key: 3"; echo "value: c"; \
129 129 > echo "key: 4"; echo "value: d"
130 130 > context
131 131 > context
132 132 > key: 1
133 133 > value: a
134 134 > key: 2
135 135 > value: b
136 136 > key: 3
137 137 > value: * (glob)
138 138 > key: 4
139 139 > value: * (glob)
140 140 > EOF
141 141 $ rt test-failure-globs.t
142 142
143 143 --- $TESTTMP/test-failure-globs.t
144 144 +++ $TESTTMP/test-failure-globs.t.err
145 145 @@ -2,9 +2,9 @@
146 146 context
147 147 context
148 148 key: 1
149 149 - value: a
150 150 + value: not a
151 151 key: 2
152 152 - value: b
153 153 + value: not b
154 154 key: 3
155 155 value: * (glob)
156 156 key: 4
157 157
158 158 ERROR: test-failure-globs.t output changed
159 159 !
160 160 Failed test-failure-globs.t: output changed
161 161 # Ran 1 tests, 0 skipped, 1 failed.
162 162 python hash seed: * (glob)
163 163 [1]
164 164 $ rm test-failure-globs.t
165 165
166 166 test diff colorisation
167 167
168 168 #if no-windows pygments
169 169 $ rt test-failure.t --color always
170 170
171 171 \x1b[38;5;124m--- $TESTTMP/test-failure.t\x1b[39m (esc)
172 172 \x1b[38;5;34m+++ $TESTTMP/test-failure.t.err\x1b[39m (esc)
173 173 \x1b[38;5;90;01m@@ -1,4 +1,4 @@\x1b[39;00m (esc)
174 174 $ echo "bar-baz"; echo "bar-bad"; echo foo
175 175 \x1b[38;5;34m+ bar*baz (glob)\x1b[39m (esc)
176 176 bar*bad (glob)
177 177 \x1b[38;5;124m- bar*baz (glob)\x1b[39m (esc)
178 178 \x1b[38;5;124m- | fo (re)\x1b[39m (esc)
179 179 \x1b[38;5;34m+ foo\x1b[39m (esc)
180 180
181 181 \x1b[38;5;88mERROR: \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m output changed\x1b[39m (esc)
182 182 !
183 183 \x1b[38;5;88mFailed \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m: output changed\x1b[39m (esc)
184 184 # Ran 1 tests, 0 skipped, 1 failed.
185 185 python hash seed: * (glob)
186 186 [1]
187 187
188 188 $ rt test-failure.t 2> tmp.log
189 189 [1]
190 190 $ cat tmp.log
191 191
192 192 --- $TESTTMP/test-failure.t
193 193 +++ $TESTTMP/test-failure.t.err
194 194 @@ -1,4 +1,4 @@
195 195 $ echo "bar-baz"; echo "bar-bad"; echo foo
196 196 + bar*baz (glob)
197 197 bar*bad (glob)
198 198 - bar*baz (glob)
199 199 - | fo (re)
200 200 + foo
201 201
202 202 ERROR: test-failure.t output changed
203 203 !
204 204 Failed test-failure.t: output changed
205 205 # Ran 1 tests, 0 skipped, 1 failed.
206 206 python hash seed: * (glob)
207 207 #endif
208 208
209 209 $ cat > test-failure.t << EOF
210 210 > $ true
211 211 > should go away (true !)
212 212 > $ true
213 213 > should stay (false !)
214 214 >
215 215 > Should remove first line, not second or third
216 216 > $ echo 'testing'
217 217 > baz*foo (glob) (true !)
218 218 > foobar*foo (glob) (false !)
219 219 > te*ting (glob) (true !)
220 220 >
221 221 > Should keep first two lines, remove third and last
222 222 > $ echo 'testing'
223 223 > test.ng (re) (true !)
224 224 > foo.ar (re) (false !)
225 225 > b.r (re) (true !)
226 226 > missing (?)
227 227 > awol (true !)
228 228 >
229 229 > The "missing" line should stay, even though awol is dropped
230 230 > $ echo 'testing'
231 231 > test.ng (re) (true !)
232 232 > foo.ar (?)
233 233 > awol
234 234 > missing (?)
235 235 > EOF
236 236 $ rt test-failure.t
237 237
238 238 --- $TESTTMP/test-failure.t
239 239 +++ $TESTTMP/test-failure.t.err
240 240 @@ -1,11 +1,9 @@
241 241 $ true
242 242 - should go away (true !)
243 243 $ true
244 244 should stay (false !)
245 245
246 246 Should remove first line, not second or third
247 247 $ echo 'testing'
248 248 - baz*foo (glob) (true !)
249 249 foobar*foo (glob) (false !)
250 250 te*ting (glob) (true !)
251 251
252 252 foo.ar (re) (false !)
253 253 missing (?)
254 254 @@ -13,13 +11,10 @@
255 255 $ echo 'testing'
256 256 test.ng (re) (true !)
257 257 foo.ar (re) (false !)
258 258 - b.r (re) (true !)
259 259 missing (?)
260 260 - awol (true !)
261 261
262 262 The "missing" line should stay, even though awol is dropped
263 263 $ echo 'testing'
264 264 test.ng (re) (true !)
265 265 foo.ar (?)
266 266 - awol
267 267 missing (?)
268 268
269 269 ERROR: test-failure.t output changed
270 270 !
271 271 Failed test-failure.t: output changed
272 272 # Ran 1 tests, 0 skipped, 1 failed.
273 273 python hash seed: * (glob)
274 274 [1]
275 275
276 276 basic failing test
277 277 $ cat > test-failure.t << EOF
278 278 > $ echo babar
279 279 > rataxes
280 280 > This is a noop statement so that
281 281 > this test is still more bytes than success.
282 282 > pad pad pad pad............................................................
283 283 > pad pad pad pad............................................................
284 284 > pad pad pad pad............................................................
285 285 > pad pad pad pad............................................................
286 286 > pad pad pad pad............................................................
287 287 > pad pad pad pad............................................................
288 288 > EOF
289 289
290 290 >>> fh = open('test-failure-unicode.t', 'wb')
291 291 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8')) and None
292 292 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8')) and None
293 293
294 294 $ rt
295 295
296 296 --- $TESTTMP/test-failure.t
297 297 +++ $TESTTMP/test-failure.t.err
298 298 @@ -1,5 +1,5 @@
299 299 $ echo babar
300 300 - rataxes
301 301 + babar
302 302 This is a noop statement so that
303 303 this test is still more bytes than success.
304 304 pad pad pad pad............................................................
305 305
306 306 ERROR: test-failure.t output changed
307 307 !.
308 308 --- $TESTTMP/test-failure-unicode.t
309 309 +++ $TESTTMP/test-failure-unicode.t.err
310 310 @@ -1,2 +1,2 @@
311 311 $ echo babar\xce\xb1 (esc)
312 312 - l\xce\xb5\xce\xb5t (esc)
313 313 + babar\xce\xb1 (esc)
314 314
315 315 ERROR: test-failure-unicode.t output changed
316 316 !
317 317 Failed test-failure.t: output changed
318 318 Failed test-failure-unicode.t: output changed
319 319 # Ran 3 tests, 0 skipped, 2 failed.
320 320 python hash seed: * (glob)
321 321 [1]
322 322
323 323 test --outputdir
324 324 $ mkdir output
325 325 $ rt --outputdir output
326 326
327 327 --- $TESTTMP/test-failure.t
328 328 +++ $TESTTMP/output/test-failure.t.err
329 329 @@ -1,5 +1,5 @@
330 330 $ echo babar
331 331 - rataxes
332 332 + babar
333 333 This is a noop statement so that
334 334 this test is still more bytes than success.
335 335 pad pad pad pad............................................................
336 336
337 337 ERROR: test-failure.t output changed
338 338 !.
339 339 --- $TESTTMP/test-failure-unicode.t
340 340 +++ $TESTTMP/output/test-failure-unicode.t.err
341 341 @@ -1,2 +1,2 @@
342 342 $ echo babar\xce\xb1 (esc)
343 343 - l\xce\xb5\xce\xb5t (esc)
344 344 + babar\xce\xb1 (esc)
345 345
346 346 ERROR: test-failure-unicode.t output changed
347 347 !
348 348 Failed test-failure.t: output changed
349 349 Failed test-failure-unicode.t: output changed
350 350 # Ran 3 tests, 0 skipped, 2 failed.
351 351 python hash seed: * (glob)
352 352 [1]
353 353 $ ls -a output
354 354 .
355 355 ..
356 356 .testtimes
357 357 test-failure-unicode.t.err
358 358 test-failure.t.err
359 359
360 360 test --xunit support
361 361 $ rt --xunit=xunit.xml
362 362
363 363 --- $TESTTMP/test-failure.t
364 364 +++ $TESTTMP/test-failure.t.err
365 365 @@ -1,5 +1,5 @@
366 366 $ echo babar
367 367 - rataxes
368 368 + babar
369 369 This is a noop statement so that
370 370 this test is still more bytes than success.
371 371 pad pad pad pad............................................................
372 372
373 373 ERROR: test-failure.t output changed
374 374 !.
375 375 --- $TESTTMP/test-failure-unicode.t
376 376 +++ $TESTTMP/test-failure-unicode.t.err
377 377 @@ -1,2 +1,2 @@
378 378 $ echo babar\xce\xb1 (esc)
379 379 - l\xce\xb5\xce\xb5t (esc)
380 380 + babar\xce\xb1 (esc)
381 381
382 382 ERROR: test-failure-unicode.t output changed
383 383 !
384 384 Failed test-failure.t: output changed
385 385 Failed test-failure-unicode.t: output changed
386 386 # Ran 3 tests, 0 skipped, 2 failed.
387 387 python hash seed: * (glob)
388 388 [1]
389 389 $ cat xunit.xml
390 390 <?xml version="1.0" encoding="utf-8"?>
391 391 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
392 392 <testcase name="test-success.t" time="*"/> (glob)
393 393 <testcase name="test-failure-unicode.t" time="*"> (glob)
394 394 <failure message="output changed" type="output-mismatch">
395 395 <![CDATA[--- $TESTTMP/test-failure-unicode.t
396 396 +++ $TESTTMP/test-failure-unicode.t.err
397 397 @@ -1,2 +1,2 @@
398 398 $ echo babar\xce\xb1 (esc)
399 399 - l\xce\xb5\xce\xb5t (esc)
400 400 + babar\xce\xb1 (esc)
401 401 ]]> </failure>
402 402 </testcase>
403 403 <testcase name="test-failure.t" time="*"> (glob)
404 404 <failure message="output changed" type="output-mismatch">
405 405 <![CDATA[--- $TESTTMP/test-failure.t
406 406 +++ $TESTTMP/test-failure.t.err
407 407 @@ -1,5 +1,5 @@
408 408 $ echo babar
409 409 - rataxes
410 410 + babar
411 411 This is a noop statement so that
412 412 this test is still more bytes than success.
413 413 pad pad pad pad............................................................
414 414 ]]> </failure>
415 415 </testcase>
416 416 </testsuite>
417 417
418 418 $ cat .testtimes
419 419 test-empty.t * (glob)
420 420 test-failure-globs.t * (glob)
421 421 test-failure-unicode.t * (glob)
422 422 test-failure.t * (glob)
423 423 test-success.t * (glob)
424 424
425 425 $ rt --list-tests
426 426 test-failure-unicode.t
427 427 test-failure.t
428 428 test-success.t
429 429
430 430 $ rt --list-tests --json
431 431 test-failure-unicode.t
432 432 test-failure.t
433 433 test-success.t
434 434 $ cat report.json
435 435 testreport ={
436 436 "test-failure-unicode.t": {
437 437 "result": "success"
438 438 },
439 439 "test-failure.t": {
440 440 "result": "success"
441 441 },
442 442 "test-success.t": {
443 443 "result": "success"
444 444 }
445 445 } (no-eol)
446 446
447 447 $ rt --list-tests --xunit=xunit.xml
448 448 test-failure-unicode.t
449 449 test-failure.t
450 450 test-success.t
451 451 $ cat xunit.xml
452 452 <?xml version="1.0" encoding="utf-8"?>
453 453 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
454 454 <testcase name="test-failure-unicode.t"/>
455 455 <testcase name="test-failure.t"/>
456 456 <testcase name="test-success.t"/>
457 457 </testsuite>
458 458
459 459 $ rt --list-tests test-failure* --json --xunit=xunit.xml --outputdir output
460 460 test-failure-unicode.t
461 461 test-failure.t
462 462 $ cat output/report.json
463 463 testreport ={
464 464 "test-failure-unicode.t": {
465 465 "result": "success"
466 466 },
467 467 "test-failure.t": {
468 468 "result": "success"
469 469 }
470 470 } (no-eol)
471 471 $ cat xunit.xml
472 472 <?xml version="1.0" encoding="utf-8"?>
473 473 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
474 474 <testcase name="test-failure-unicode.t"/>
475 475 <testcase name="test-failure.t"/>
476 476 </testsuite>
477 477
478 478 $ rm test-failure-unicode.t
479 479
480 480 test for --retest
481 481 ====================
482 482
483 483 $ rt --retest
484 484
485 485 --- $TESTTMP/test-failure.t
486 486 +++ $TESTTMP/test-failure.t.err
487 487 @@ -1,5 +1,5 @@
488 488 $ echo babar
489 489 - rataxes
490 490 + babar
491 491 This is a noop statement so that
492 492 this test is still more bytes than success.
493 493 pad pad pad pad............................................................
494 494
495 495 ERROR: test-failure.t output changed
496 496 !
497 497 Failed test-failure.t: output changed
498 498 # Ran 2 tests, 1 skipped, 1 failed.
499 499 python hash seed: * (glob)
500 500 [1]
501 501
502 502 --retest works with --outputdir
503 503 $ rm -r output
504 504 $ mkdir output
505 505 $ mv test-failure.t.err output
506 506 $ rt --retest --outputdir output
507 507
508 508 --- $TESTTMP/test-failure.t
509 509 +++ $TESTTMP/output/test-failure.t.err
510 510 @@ -1,5 +1,5 @@
511 511 $ echo babar
512 512 - rataxes
513 513 + babar
514 514 This is a noop statement so that
515 515 this test is still more bytes than success.
516 516 pad pad pad pad............................................................
517 517
518 518 ERROR: test-failure.t output changed
519 519 !
520 520 Failed test-failure.t: output changed
521 521 # Ran 2 tests, 1 skipped, 1 failed.
522 522 python hash seed: * (glob)
523 523 [1]
524 524
525 525 Selecting Tests To Run
526 526 ======================
527 527
528 528 successful
529 529
530 530 $ rt test-success.t
531 531 .
532 532 # Ran 1 tests, 0 skipped, 0 failed.
533 533
534 534 success w/ keyword
535 535 $ rt -k xyzzy
536 536 .
537 537 # Ran 2 tests, 1 skipped, 0 failed.
538 538
539 539 failed
540 540
541 541 $ rt test-failure.t
542 542
543 543 --- $TESTTMP/test-failure.t
544 544 +++ $TESTTMP/test-failure.t.err
545 545 @@ -1,5 +1,5 @@
546 546 $ echo babar
547 547 - rataxes
548 548 + babar
549 549 This is a noop statement so that
550 550 this test is still more bytes than success.
551 551 pad pad pad pad............................................................
552 552
553 553 ERROR: test-failure.t output changed
554 554 !
555 555 Failed test-failure.t: output changed
556 556 # Ran 1 tests, 0 skipped, 1 failed.
557 557 python hash seed: * (glob)
558 558 [1]
559 559
560 560 failure w/ keyword
561 561 $ rt -k rataxes
562 562
563 563 --- $TESTTMP/test-failure.t
564 564 +++ $TESTTMP/test-failure.t.err
565 565 @@ -1,5 +1,5 @@
566 566 $ echo babar
567 567 - rataxes
568 568 + babar
569 569 This is a noop statement so that
570 570 this test is still more bytes than success.
571 571 pad pad pad pad............................................................
572 572
573 573 ERROR: test-failure.t output changed
574 574 !
575 575 Failed test-failure.t: output changed
576 576 # Ran 2 tests, 1 skipped, 1 failed.
577 577 python hash seed: * (glob)
578 578 [1]
579 579
580 580 Verify that when a process fails to start we show a useful message
581 581 ==================================================================
582 582
583 583 $ cat > test-serve-fail.t <<EOF
584 584 > $ echo 'abort: child process failed to start blah'
585 585 > EOF
586 586 $ rt test-serve-fail.t
587 587
588 588 --- $TESTTMP/test-serve-fail.t
589 589 +++ $TESTTMP/test-serve-fail.t.err
590 590 @@ -1* +1,2 @@ (glob)
591 591 $ echo 'abort: child process failed to start blah'
592 592 + abort: child process failed to start blah
593 593
594 594 ERROR: test-serve-fail.t output changed
595 595 !
596 596 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
597 597 # Ran 1 tests, 0 skipped, 1 failed.
598 598 python hash seed: * (glob)
599 599 [1]
600 600 $ rm test-serve-fail.t
601 601
602 602 Verify that we can try other ports
603 603 ===================================
604 604
605 605 Extensions aren't inherited by the invoked run-tests.py. An extension
606 606 introducing a repository requirement could cause this to fail. So we force
607 607 HGRCPATH to get a clean environment.
608 608
609 609 $ HGRCPATH= hg init inuse
610 610 $ hg serve -R inuse -p $HGPORT -d --pid-file=blocks.pid
611 611 $ cat blocks.pid >> $DAEMON_PIDS
612 612 $ cat > test-serve-inuse.t <<EOF
613 613 > $ hg serve -R `pwd`/inuse -p \$HGPORT -d --pid-file=hg.pid
614 614 > $ cat hg.pid >> \$DAEMON_PIDS
615 615 > EOF
616 616 $ rt test-serve-inuse.t
617 617 .
618 618 # Ran 1 tests, 0 skipped, 0 failed.
619 619 $ rm test-serve-inuse.t
620 620 $ killdaemons.py $DAEMON_PIDS
621 621
622 622 Running In Debug Mode
623 623 ======================
624 624
625 625 $ rt --debug 2>&1 | grep -v pwd
626 626 + echo *SALT* 0 0 (glob)
627 627 *SALT* 0 0 (glob)
628 628 + echo babar
629 629 babar
630 630 + echo *SALT* 10 0 (glob)
631 631 *SALT* 10 0 (glob)
632 632 *+ echo *SALT* 0 0 (glob)
633 633 *SALT* 0 0 (glob)
634 634 + echo babar
635 635 babar
636 636 + echo *SALT* 2 0 (glob)
637 637 *SALT* 2 0 (glob)
638 638 + echo xyzzy
639 639 xyzzy
640 640 + echo *SALT* 9 0 (glob)
641 641 *SALT* 9 0 (glob)
642 642 + printf *abc\ndef\nxyz\n* (glob)
643 643 abc
644 644 def
645 645 xyz
646 646 + echo *SALT* 15 0 (glob)
647 647 *SALT* 15 0 (glob)
648 648 + printf *zyx\nwvu\ntsr\n* (glob)
649 649 zyx
650 650 wvu
651 651 tsr
652 652 + echo *SALT* 22 0 (glob)
653 653 *SALT* 22 0 (glob)
654 654 .
655 655 # Ran 2 tests, 0 skipped, 0 failed.
656 656
657 657 Parallel runs
658 658 ==============
659 659
660 660 (duplicate the failing test to get predictable output)
661 661 $ cp test-failure.t test-failure-copy.t
662 662
663 663 $ rt --jobs 2 test-failure*.t -n
664 664 !!
665 665 Failed test-failure*.t: output changed (glob)
666 666 Failed test-failure*.t: output changed (glob)
667 667 # Ran 2 tests, 0 skipped, 2 failed.
668 668 python hash seed: * (glob)
669 669 [1]
670 670
671 671 failures in parallel with --first should only print one failure
672 672 $ rt --jobs 2 --first test-failure*.t
673 673
674 674 --- $TESTTMP/test-failure*.t (glob)
675 675 +++ $TESTTMP/test-failure*.t.err (glob)
676 676 @@ -1,5 +1,5 @@
677 677 $ echo babar
678 678 - rataxes
679 679 + babar
680 680 This is a noop statement so that
681 681 this test is still more bytes than success.
682 682 pad pad pad pad............................................................
683 683
684 684 Failed test-failure*.t: output changed (glob)
685 685 Failed test-failure*.t: output changed (glob)
686 686 # Ran 2 tests, 0 skipped, 2 failed.
687 687 python hash seed: * (glob)
688 688 [1]
689 689
690 690
691 691 (delete the duplicated test file)
692 692 $ rm test-failure-copy.t
693 693
694 694
695 695 Interactive run
696 696 ===============
697 697
698 698 (backup the failing test)
699 699 $ cp test-failure.t backup
700 700
701 701 Refuse the fix
702 702
703 703 $ echo 'n' | rt -i
704 704
705 705 --- $TESTTMP/test-failure.t
706 706 +++ $TESTTMP/test-failure.t.err
707 707 @@ -1,5 +1,5 @@
708 708 $ echo babar
709 709 - rataxes
710 710 + babar
711 711 This is a noop statement so that
712 712 this test is still more bytes than success.
713 713 pad pad pad pad............................................................
714 714 Accept this change? [n]
715 715 ERROR: test-failure.t output changed
716 716 !.
717 717 Failed test-failure.t: output changed
718 718 # Ran 2 tests, 0 skipped, 1 failed.
719 719 python hash seed: * (glob)
720 720 [1]
721 721
722 722 $ cat test-failure.t
723 723 $ echo babar
724 724 rataxes
725 725 This is a noop statement so that
726 726 this test is still more bytes than success.
727 727 pad pad pad pad............................................................
728 728 pad pad pad pad............................................................
729 729 pad pad pad pad............................................................
730 730 pad pad pad pad............................................................
731 731 pad pad pad pad............................................................
732 732 pad pad pad pad............................................................
733 733
734 734 Interactive with custom view
735 735
736 736 $ echo 'n' | rt -i --view echo
737 737 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
738 738 Accept this change? [n]* (glob)
739 739 ERROR: test-failure.t output changed
740 740 !.
741 741 Failed test-failure.t: output changed
742 742 # Ran 2 tests, 0 skipped, 1 failed.
743 743 python hash seed: * (glob)
744 744 [1]
745 745
746 746 View the fix
747 747
748 748 $ echo 'y' | rt --view echo
749 749 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
750 750
751 751 ERROR: test-failure.t output changed
752 752 !.
753 753 Failed test-failure.t: output changed
754 754 # Ran 2 tests, 0 skipped, 1 failed.
755 755 python hash seed: * (glob)
756 756 [1]
757 757
758 758 Accept the fix
759 759
760 760 $ cat >> test-failure.t <<EOF
761 761 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
762 762 > saved backup bundle to \$TESTTMP/foo.hg
763 763 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
764 764 > saved backup bundle to $TESTTMP\\foo.hg
765 765 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
766 766 > saved backup bundle to \$TESTTMP/*.hg (glob)
767 767 > EOF
768 768 $ echo 'y' | rt -i 2>&1
769 769
770 770 --- $TESTTMP/test-failure.t
771 771 +++ $TESTTMP/test-failure.t.err
772 772 @@ -1,5 +1,5 @@
773 773 $ echo babar
774 774 - rataxes
775 775 + babar
776 776 This is a noop statement so that
777 777 this test is still more bytes than success.
778 778 pad pad pad pad............................................................
779 779 @@ -11,6 +11,6 @@
780 780 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
781 781 saved backup bundle to $TESTTMP/foo.hg
782 782 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
783 783 - saved backup bundle to $TESTTMP\foo.hg
784 784 + saved backup bundle to $TESTTMP/foo.hg
785 785 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
786 786 saved backup bundle to $TESTTMP/*.hg (glob)
787 787 Accept this change? [n] ..
788 788 # Ran 2 tests, 0 skipped, 0 failed.
789 789
790 790 $ sed -e 's,(glob)$,&<,g' test-failure.t
791 791 $ echo babar
792 792 babar
793 793 This is a noop statement so that
794 794 this test is still more bytes than success.
795 795 pad pad pad pad............................................................
796 796 pad pad pad pad............................................................
797 797 pad pad pad pad............................................................
798 798 pad pad pad pad............................................................
799 799 pad pad pad pad............................................................
800 800 pad pad pad pad............................................................
801 801 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
802 802 saved backup bundle to $TESTTMP/foo.hg
803 803 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
804 804 saved backup bundle to $TESTTMP/foo.hg
805 805 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
806 806 saved backup bundle to $TESTTMP/*.hg (glob)<
807 807
808 808 Race condition - test file was modified when test is running
809 809
810 810 $ TESTRACEDIR=`pwd`
811 811 $ export TESTRACEDIR
812 812 $ cat > test-race.t <<EOF
813 813 > $ echo 1
814 814 > $ echo "# a new line" >> $TESTRACEDIR/test-race.t
815 815 > EOF
816 816
817 817 $ rt -i test-race.t
818 818
819 819 --- $TESTTMP/test-race.t
820 820 +++ $TESTTMP/test-race.t.err
821 821 @@ -1,2 +1,3 @@
822 822 $ echo 1
823 823 + 1
824 824 $ echo "# a new line" >> $TESTTMP/test-race.t
825 825 Reference output has changed (run again to prompt changes)
826 826 ERROR: test-race.t output changed
827 827 !
828 828 Failed test-race.t: output changed
829 829 # Ran 1 tests, 0 skipped, 1 failed.
830 830 python hash seed: * (glob)
831 831 [1]
832 832
833 833 $ rm test-race.t
834 834
835 835 When "#testcases" is used in .t files
836 836
837 837 $ cat >> test-cases.t <<EOF
838 838 > #testcases a b
839 839 > #if a
840 840 > $ echo 1
841 841 > #endif
842 842 > #if b
843 843 > $ echo 2
844 844 > #endif
845 845 > EOF
846 846
847 847 $ cat <<EOF | rt -i test-cases.t 2>&1
848 848 > y
849 849 > y
850 850 > EOF
851 851
852 852 --- $TESTTMP/test-cases.t
853 853 +++ $TESTTMP/test-cases.t.a.err
854 854 @@ -1,6 +1,7 @@
855 855 #testcases a b
856 856 #if a
857 857 $ echo 1
858 858 + 1
859 859 #endif
860 860 #if b
861 861 $ echo 2
862 862 Accept this change? [n] .
863 863 --- $TESTTMP/test-cases.t
864 864 +++ $TESTTMP/test-cases.t.b.err
865 865 @@ -5,4 +5,5 @@
866 866 #endif
867 867 #if b
868 868 $ echo 2
869 869 + 2
870 870 #endif
871 871 Accept this change? [n] .
872 872 # Ran 2 tests, 0 skipped, 0 failed.
873 873
874 874 $ cat test-cases.t
875 875 #testcases a b
876 876 #if a
877 877 $ echo 1
878 878 1
879 879 #endif
880 880 #if b
881 881 $ echo 2
882 882 2
883 883 #endif
884 884
885 885 $ cat >> test-cases.t <<'EOF'
886 886 > #if a
887 887 > $ NAME=A
888 888 > #else
889 889 > $ NAME=B
890 890 > #endif
891 891 > $ echo $NAME
892 892 > A (a !)
893 893 > B (b !)
894 894 > EOF
895 895 $ rt test-cases.t
896 896 ..
897 897 # Ran 2 tests, 0 skipped, 0 failed.
898 898
899 899 $ rm test-cases.t
900 900
901 901 (reinstall)
902 902 $ mv backup test-failure.t
903 903
904 904 No Diff
905 905 ===============
906 906
907 907 $ rt --nodiff
908 908 !.
909 909 Failed test-failure.t: output changed
910 910 # Ran 2 tests, 0 skipped, 1 failed.
911 911 python hash seed: * (glob)
912 912 [1]
913 913
914 914 test --tmpdir support
915 915 $ rt --tmpdir=$TESTTMP/keep test-success.t
916 916
917 917 Keeping testtmp dir: $TESTTMP/keep/child1/test-success.t
918 918 Keeping threadtmp dir: $TESTTMP/keep/child1
919 919 .
920 920 # Ran 1 tests, 0 skipped, 0 failed.
921 921
922 922 timeouts
923 923 ========
924 924 $ cat > test-timeout.t <<EOF
925 925 > $ sleep 2
926 926 > $ echo pass
927 927 > pass
928 928 > EOF
929 929 > echo '#require slow' > test-slow-timeout.t
930 930 > cat test-timeout.t >> test-slow-timeout.t
931 931 $ rt --timeout=1 --slowtimeout=3 test-timeout.t test-slow-timeout.t
932 932 st
933 933 Skipped test-slow-timeout.t: missing feature: allow slow tests (use --allow-slow-tests)
934 934 Failed test-timeout.t: timed out
935 935 # Ran 1 tests, 1 skipped, 1 failed.
936 936 python hash seed: * (glob)
937 937 [1]
938 938 $ rt --timeout=1 --slowtimeout=3 \
939 939 > test-timeout.t test-slow-timeout.t --allow-slow-tests
940 940 .t
941 941 Failed test-timeout.t: timed out
942 942 # Ran 2 tests, 0 skipped, 1 failed.
943 943 python hash seed: * (glob)
944 944 [1]
945 945 $ rm test-timeout.t test-slow-timeout.t
946 946
947 947 test for --time
948 948 ==================
949 949
950 950 $ rt test-success.t --time
951 951 .
952 952 # Ran 1 tests, 0 skipped, 0 failed.
953 953 # Producing time report
954 954 start end cuser csys real Test
955 955 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
956 956
957 957 test for --time with --job enabled
958 958 ====================================
959 959
960 960 $ rt test-success.t --time --jobs 2
961 961 .
962 962 # Ran 1 tests, 0 skipped, 0 failed.
963 963 # Producing time report
964 964 start end cuser csys real Test
965 965 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
966 966
967 967 Skips
968 968 ================
969 969 $ cat > test-skip.t <<EOF
970 970 > $ echo xyzzy
971 971 > #if true
972 972 > #require false
973 973 > #end
974 974 > EOF
975 975 $ cat > test-noskip.t <<EOF
976 976 > #if false
977 977 > #require false
978 978 > #endif
979 979 > EOF
980 980 $ rt --nodiff
981 981 !.s.
982 982 Skipped test-skip.t: missing feature: nail clipper
983 983 Failed test-failure.t: output changed
984 984 # Ran 3 tests, 1 skipped, 1 failed.
985 985 python hash seed: * (glob)
986 986 [1]
987 987
988 988 $ rm test-noskip.t
989 989 $ rt --keyword xyzzy
990 990 .s
991 991 Skipped test-skip.t: missing feature: nail clipper
992 992 # Ran 2 tests, 2 skipped, 0 failed.
993 993
994 994 Skips with xml
995 995 $ rt --keyword xyzzy \
996 996 > --xunit=xunit.xml
997 997 .s
998 998 Skipped test-skip.t: missing feature: nail clipper
999 999 # Ran 2 tests, 2 skipped, 0 failed.
1000 1000 $ cat xunit.xml
1001 1001 <?xml version="1.0" encoding="utf-8"?>
1002 1002 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
1003 1003 <testcase name="test-success.t" time="*"/> (glob)
1004 1004 <testcase name="test-skip.t">
1005 1005 <skipped>
1006 1006 <![CDATA[missing feature: nail clipper]]> </skipped>
1007 1007 </testcase>
1008 1008 </testsuite>
1009 1009
1010 1010 Missing skips or blacklisted skips don't count as executed:
1011 1011 $ echo test-failure.t > blacklist
1012 1012 $ rt --blacklist=blacklist --json\
1013 1013 > test-failure.t test-bogus.t
1014 1014 ss
1015 1015 Skipped test-bogus.t: Doesn't exist
1016 1016 Skipped test-failure.t: blacklisted
1017 1017 # Ran 0 tests, 2 skipped, 0 failed.
1018 1018 $ cat report.json
1019 1019 testreport ={
1020 1020 "test-bogus.t": {
1021 1021 "result": "skip"
1022 1022 },
1023 1023 "test-failure.t": {
1024 1024 "result": "skip"
1025 1025 }
1026 1026 } (no-eol)
1027 1027
1028 1028 Whitelist trumps blacklist
1029 1029 $ echo test-failure.t > whitelist
1030 1030 $ rt --blacklist=blacklist --whitelist=whitelist --json\
1031 1031 > test-failure.t test-bogus.t
1032 1032 s
1033 1033 --- $TESTTMP/test-failure.t
1034 1034 +++ $TESTTMP/test-failure.t.err
1035 1035 @@ -1,5 +1,5 @@
1036 1036 $ echo babar
1037 1037 - rataxes
1038 1038 + babar
1039 1039 This is a noop statement so that
1040 1040 this test is still more bytes than success.
1041 1041 pad pad pad pad............................................................
1042 1042
1043 1043 ERROR: test-failure.t output changed
1044 1044 !
1045 1045 Skipped test-bogus.t: Doesn't exist
1046 1046 Failed test-failure.t: output changed
1047 1047 # Ran 1 tests, 1 skipped, 1 failed.
1048 1048 python hash seed: * (glob)
1049 1049 [1]
1050 1050
1051 1051 Ensure that --test-list causes only the tests listed in that file to
1052 1052 be executed.
1053 1053 $ echo test-success.t >> onlytest
1054 1054 $ rt --test-list=onlytest
1055 1055 .
1056 1056 # Ran 1 tests, 0 skipped, 0 failed.
1057 1057 $ echo test-bogus.t >> anothertest
1058 1058 $ rt --test-list=onlytest --test-list=anothertest
1059 1059 s.
1060 1060 Skipped test-bogus.t: Doesn't exist
1061 1061 # Ran 1 tests, 1 skipped, 0 failed.
1062 1062 $ rm onlytest anothertest
1063 1063
1064 1064 test for --json
1065 1065 ==================
1066 1066
1067 1067 $ rt --json
1068 1068
1069 1069 --- $TESTTMP/test-failure.t
1070 1070 +++ $TESTTMP/test-failure.t.err
1071 1071 @@ -1,5 +1,5 @@
1072 1072 $ echo babar
1073 1073 - rataxes
1074 1074 + babar
1075 1075 This is a noop statement so that
1076 1076 this test is still more bytes than success.
1077 1077 pad pad pad pad............................................................
1078 1078
1079 1079 ERROR: test-failure.t output changed
1080 1080 !.s
1081 1081 Skipped test-skip.t: missing feature: nail clipper
1082 1082 Failed test-failure.t: output changed
1083 1083 # Ran 2 tests, 1 skipped, 1 failed.
1084 1084 python hash seed: * (glob)
1085 1085 [1]
1086 1086
1087 1087 $ cat report.json
1088 1088 testreport ={
1089 1089 "test-failure.t": [\{] (re)
1090 1090 "csys": "\s*[\d\.]{4,5}", ? (re)
1091 1091 "cuser": "\s*[\d\.]{4,5}", ? (re)
1092 1092 "diff": "---.+\+\+\+.+", ? (re)
1093 1093 "end": "\s*[\d\.]{4,5}", ? (re)
1094 1094 "result": "failure", ? (re)
1095 1095 "start": "\s*[\d\.]{4,5}", ? (re)
1096 1096 "time": "\s*[\d\.]{4,5}" (re)
1097 1097 }, ? (re)
1098 1098 "test-skip.t": {
1099 1099 "csys": "\s*[\d\.]{4,5}", ? (re)
1100 1100 "cuser": "\s*[\d\.]{4,5}", ? (re)
1101 1101 "diff": "", ? (re)
1102 1102 "end": "\s*[\d\.]{4,5}", ? (re)
1103 1103 "result": "skip", ? (re)
1104 1104 "start": "\s*[\d\.]{4,5}", ? (re)
1105 1105 "time": "\s*[\d\.]{4,5}" (re)
1106 1106 }, ? (re)
1107 1107 "test-success.t": [\{] (re)
1108 1108 "csys": "\s*[\d\.]{4,5}", ? (re)
1109 1109 "cuser": "\s*[\d\.]{4,5}", ? (re)
1110 1110 "diff": "", ? (re)
1111 1111 "end": "\s*[\d\.]{4,5}", ? (re)
1112 1112 "result": "success", ? (re)
1113 1113 "start": "\s*[\d\.]{4,5}", ? (re)
1114 1114 "time": "\s*[\d\.]{4,5}" (re)
1115 1115 }
1116 1116 } (no-eol)
1117 1117 --json with --outputdir
1118 1118
1119 1119 $ rm report.json
1120 1120 $ rm -r output
1121 1121 $ mkdir output
1122 1122 $ rt --json --outputdir output
1123 1123
1124 1124 --- $TESTTMP/test-failure.t
1125 1125 +++ $TESTTMP/output/test-failure.t.err
1126 1126 @@ -1,5 +1,5 @@
1127 1127 $ echo babar
1128 1128 - rataxes
1129 1129 + babar
1130 1130 This is a noop statement so that
1131 1131 this test is still more bytes than success.
1132 1132 pad pad pad pad............................................................
1133 1133
1134 1134 ERROR: test-failure.t output changed
1135 1135 !.s
1136 1136 Skipped test-skip.t: missing feature: nail clipper
1137 1137 Failed test-failure.t: output changed
1138 1138 # Ran 2 tests, 1 skipped, 1 failed.
1139 1139 python hash seed: * (glob)
1140 1140 [1]
1141 1141 $ f report.json
1142 1142 report.json: file not found
1143 1143 $ cat output/report.json
1144 1144 testreport ={
1145 1145 "test-failure.t": [\{] (re)
1146 1146 "csys": "\s*[\d\.]{4,5}", ? (re)
1147 1147 "cuser": "\s*[\d\.]{4,5}", ? (re)
1148 1148 "diff": "---.+\+\+\+.+", ? (re)
1149 1149 "end": "\s*[\d\.]{4,5}", ? (re)
1150 1150 "result": "failure", ? (re)
1151 1151 "start": "\s*[\d\.]{4,5}", ? (re)
1152 1152 "time": "\s*[\d\.]{4,5}" (re)
1153 1153 }, ? (re)
1154 1154 "test-skip.t": {
1155 1155 "csys": "\s*[\d\.]{4,5}", ? (re)
1156 1156 "cuser": "\s*[\d\.]{4,5}", ? (re)
1157 1157 "diff": "", ? (re)
1158 1158 "end": "\s*[\d\.]{4,5}", ? (re)
1159 1159 "result": "skip", ? (re)
1160 1160 "start": "\s*[\d\.]{4,5}", ? (re)
1161 1161 "time": "\s*[\d\.]{4,5}" (re)
1162 1162 }, ? (re)
1163 1163 "test-success.t": [\{] (re)
1164 1164 "csys": "\s*[\d\.]{4,5}", ? (re)
1165 1165 "cuser": "\s*[\d\.]{4,5}", ? (re)
1166 1166 "diff": "", ? (re)
1167 1167 "end": "\s*[\d\.]{4,5}", ? (re)
1168 1168 "result": "success", ? (re)
1169 1169 "start": "\s*[\d\.]{4,5}", ? (re)
1170 1170 "time": "\s*[\d\.]{4,5}" (re)
1171 1171 }
1172 1172 } (no-eol)
1173 1173 $ ls -a output
1174 1174 .
1175 1175 ..
1176 1176 .testtimes
1177 1177 report.json
1178 1178 test-failure.t.err
1179 1179
1180 1180 Test that failed test accepted through interactive are properly reported:
1181 1181
1182 1182 $ cp test-failure.t backup
1183 1183 $ echo y | rt --json -i
1184 1184
1185 1185 --- $TESTTMP/test-failure.t
1186 1186 +++ $TESTTMP/test-failure.t.err
1187 1187 @@ -1,5 +1,5 @@
1188 1188 $ echo babar
1189 1189 - rataxes
1190 1190 + babar
1191 1191 This is a noop statement so that
1192 1192 this test is still more bytes than success.
1193 1193 pad pad pad pad............................................................
1194 1194 Accept this change? [n] ..s
1195 1195 Skipped test-skip.t: missing feature: nail clipper
1196 1196 # Ran 2 tests, 1 skipped, 0 failed.
1197 1197
1198 1198 $ cat report.json
1199 1199 testreport ={
1200 1200 "test-failure.t": [\{] (re)
1201 1201 "csys": "\s*[\d\.]{4,5}", ? (re)
1202 1202 "cuser": "\s*[\d\.]{4,5}", ? (re)
1203 1203 "diff": "", ? (re)
1204 1204 "end": "\s*[\d\.]{4,5}", ? (re)
1205 1205 "result": "success", ? (re)
1206 1206 "start": "\s*[\d\.]{4,5}", ? (re)
1207 1207 "time": "\s*[\d\.]{4,5}" (re)
1208 1208 }, ? (re)
1209 1209 "test-skip.t": {
1210 1210 "csys": "\s*[\d\.]{4,5}", ? (re)
1211 1211 "cuser": "\s*[\d\.]{4,5}", ? (re)
1212 1212 "diff": "", ? (re)
1213 1213 "end": "\s*[\d\.]{4,5}", ? (re)
1214 1214 "result": "skip", ? (re)
1215 1215 "start": "\s*[\d\.]{4,5}", ? (re)
1216 1216 "time": "\s*[\d\.]{4,5}" (re)
1217 1217 }, ? (re)
1218 1218 "test-success.t": [\{] (re)
1219 1219 "csys": "\s*[\d\.]{4,5}", ? (re)
1220 1220 "cuser": "\s*[\d\.]{4,5}", ? (re)
1221 1221 "diff": "", ? (re)
1222 1222 "end": "\s*[\d\.]{4,5}", ? (re)
1223 1223 "result": "success", ? (re)
1224 1224 "start": "\s*[\d\.]{4,5}", ? (re)
1225 1225 "time": "\s*[\d\.]{4,5}" (re)
1226 1226 }
1227 1227 } (no-eol)
1228 1228 $ mv backup test-failure.t
1229 1229
1230 1230 backslash on end of line with glob matching is handled properly
1231 1231
1232 1232 $ cat > test-glob-backslash.t << EOF
1233 1233 > $ echo 'foo bar \\'
1234 1234 > foo * \ (glob)
1235 1235 > EOF
1236 1236
1237 1237 $ rt test-glob-backslash.t
1238 1238 .
1239 1239 # Ran 1 tests, 0 skipped, 0 failed.
1240 1240
1241 1241 $ rm -f test-glob-backslash.t
1242 1242
1243 1243 Test globbing of local IP addresses
1244 1244 $ echo 172.16.18.1
1245 1245 $LOCALIP (glob)
1246 1246 $ echo dead:beef::1
1247 1247 $LOCALIP (glob)
1248 1248
1249 Add support for external test formatter
1250 =======================================
1251
1252 $ CUSTOM_TEST_RESULT=basic_test_result $PYTHON $TESTDIR/run-tests.py --with-hg=`which hg` "$@" test-success.t test-failure.t
1253
1254 # Ran 2 tests, 0 skipped, 0 failed.
1255 FAILURE! test-failure.t output changed
1256 SUCCESS! test-success.t
1257
1249 1258 Test reusability for third party tools
1250 1259 ======================================
1251 1260
1252 1261 $ mkdir "$TESTTMP"/anothertests
1253 1262 $ cd "$TESTTMP"/anothertests
1254 1263
1255 1264 test that `run-tests.py` can execute hghave, even if it runs not in
1256 1265 Mercurial source tree.
1257 1266
1258 1267 $ cat > test-hghave.t <<EOF
1259 1268 > #require true
1260 1269 > $ echo foo
1261 1270 > foo
1262 1271 > EOF
1263 1272 $ rt test-hghave.t
1264 1273 .
1265 1274 # Ran 1 tests, 0 skipped, 0 failed.
1266 1275
1267 1276 test that RUNTESTDIR refers the directory, in which `run-tests.py` now
1268 1277 running is placed.
1269 1278
1270 1279 $ cat > test-runtestdir.t <<EOF
1271 1280 > - $TESTDIR, in which test-run-tests.t is placed
1272 1281 > - \$TESTDIR, in which test-runtestdir.t is placed (expanded at runtime)
1273 1282 > - \$RUNTESTDIR, in which run-tests.py is placed (expanded at runtime)
1274 1283 >
1275 1284 > #if windows
1276 1285 > $ test "\$TESTDIR" = "$TESTTMP\anothertests"
1277 1286 > #else
1278 1287 > $ test "\$TESTDIR" = "$TESTTMP"/anothertests
1279 1288 > #endif
1280 1289 > If this prints a path, that means RUNTESTDIR didn't equal
1281 1290 > TESTDIR as it should have.
1282 1291 > $ test "\$RUNTESTDIR" = "$TESTDIR" || echo "\$RUNTESTDIR"
1283 1292 > This should print the start of check-code. If this passes but the
1284 1293 > previous check failed, that means we found a copy of check-code at whatever
1285 1294 > RUNTESTSDIR ended up containing, even though it doesn't match TESTDIR.
1286 1295 > $ head -n 3 "\$RUNTESTDIR"/../contrib/check-code.py | sed 's@.!.*python@#!USRBINENVPY@'
1287 1296 > #!USRBINENVPY
1288 1297 > #
1289 1298 > # check-code - a style and portability checker for Mercurial
1290 1299 > EOF
1291 1300 $ rt test-runtestdir.t
1292 1301 .
1293 1302 # Ran 1 tests, 0 skipped, 0 failed.
1294 1303
1295 1304 #if execbit
1296 1305
1297 1306 test that TESTDIR is referred in PATH
1298 1307
1299 1308 $ cat > custom-command.sh <<EOF
1300 1309 > #!/bin/sh
1301 1310 > echo "hello world"
1302 1311 > EOF
1303 1312 $ chmod +x custom-command.sh
1304 1313 $ cat > test-testdir-path.t <<EOF
1305 1314 > $ custom-command.sh
1306 1315 > hello world
1307 1316 > EOF
1308 1317 $ rt test-testdir-path.t
1309 1318 .
1310 1319 # Ran 1 tests, 0 skipped, 0 failed.
1311 1320
1312 1321 #endif
1313 1322
1314 1323 test support for --allow-slow-tests
1315 1324 $ cat > test-very-slow-test.t <<EOF
1316 1325 > #require slow
1317 1326 > $ echo pass
1318 1327 > pass
1319 1328 > EOF
1320 1329 $ rt test-very-slow-test.t
1321 1330 s
1322 1331 Skipped test-very-slow-test.t: missing feature: allow slow tests (use --allow-slow-tests)
1323 1332 # Ran 0 tests, 1 skipped, 0 failed.
1324 1333 $ rt $HGTEST_RUN_TESTS_PURE --allow-slow-tests test-very-slow-test.t
1325 1334 .
1326 1335 # Ran 1 tests, 0 skipped, 0 failed.
1327 1336
1328 1337 support for running a test outside the current directory
1329 1338 $ mkdir nonlocal
1330 1339 $ cat > nonlocal/test-is-not-here.t << EOF
1331 1340 > $ echo pass
1332 1341 > pass
1333 1342 > EOF
1334 1343 $ rt nonlocal/test-is-not-here.t
1335 1344 .
1336 1345 # Ran 1 tests, 0 skipped, 0 failed.
1337 1346
1338 1347 support for automatically discovering test if arg is a folder
1339 1348 $ mkdir tmp && cd tmp
1340 1349
1341 1350 $ cat > test-uno.t << EOF
1342 1351 > $ echo line
1343 1352 > line
1344 1353 > EOF
1345 1354
1346 1355 $ cp test-uno.t test-dos.t
1347 1356 $ cd ..
1348 1357 $ cp -R tmp tmpp
1349 1358 $ cp tmp/test-uno.t test-solo.t
1350 1359
1351 1360 $ rt tmp/ test-solo.t tmpp
1352 1361 .....
1353 1362 # Ran 5 tests, 0 skipped, 0 failed.
1354 1363 $ rm -rf tmp tmpp
1355 1364
1356 1365 support for running run-tests.py from another directory
1357 1366 $ mkdir tmp && cd tmp
1358 1367
1359 1368 $ cat > useful-file.sh << EOF
1360 1369 > important command
1361 1370 > EOF
1362 1371
1363 1372 $ cat > test-folder.t << EOF
1364 1373 > $ cat \$TESTDIR/useful-file.sh
1365 1374 > important command
1366 1375 > EOF
1367 1376
1368 1377 $ cat > test-folder-fail.t << EOF
1369 1378 > $ cat \$TESTDIR/useful-file.sh
1370 1379 > important commando
1371 1380 > EOF
1372 1381
1373 1382 $ cd ..
1374 1383 $ rt tmp/test-*.t
1375 1384
1376 1385 --- $TESTTMP/anothertests/tmp/test-folder-fail.t
1377 1386 +++ $TESTTMP/anothertests/tmp/test-folder-fail.t.err
1378 1387 @@ -1,2 +1,2 @@
1379 1388 $ cat $TESTDIR/useful-file.sh
1380 1389 - important commando
1381 1390 + important command
1382 1391
1383 1392 ERROR: test-folder-fail.t output changed
1384 1393 !.
1385 1394 Failed test-folder-fail.t: output changed
1386 1395 # Ran 2 tests, 0 skipped, 1 failed.
1387 1396 python hash seed: * (glob)
1388 1397 [1]
1389 1398
1390 1399 support for bisecting failed tests automatically
1391 1400 $ hg init bisect
1392 1401 $ cd bisect
1393 1402 $ cat >> test-bisect.t <<EOF
1394 1403 > $ echo pass
1395 1404 > pass
1396 1405 > EOF
1397 1406 $ hg add test-bisect.t
1398 1407 $ hg ci -m 'good'
1399 1408 $ cat >> test-bisect.t <<EOF
1400 1409 > $ echo pass
1401 1410 > fail
1402 1411 > EOF
1403 1412 $ hg ci -m 'bad'
1404 1413 $ rt --known-good-rev=0 test-bisect.t
1405 1414
1406 1415 --- $TESTTMP/anothertests/bisect/test-bisect.t
1407 1416 +++ $TESTTMP/anothertests/bisect/test-bisect.t.err
1408 1417 @@ -1,4 +1,4 @@
1409 1418 $ echo pass
1410 1419 pass
1411 1420 $ echo pass
1412 1421 - fail
1413 1422 + pass
1414 1423
1415 1424 ERROR: test-bisect.t output changed
1416 1425 !
1417 1426 Failed test-bisect.t: output changed
1418 1427 test-bisect.t broken by 72cbf122d116 (bad)
1419 1428 # Ran 1 tests, 0 skipped, 1 failed.
1420 1429 python hash seed: * (glob)
1421 1430 [1]
1422 1431
1423 1432 $ cd ..
1424 1433
1425 1434 support bisecting a separate repo
1426 1435
1427 1436 $ hg init bisect-dependent
1428 1437 $ cd bisect-dependent
1429 1438 $ cat > test-bisect-dependent.t <<EOF
1430 1439 > $ tail -1 \$TESTDIR/../bisect/test-bisect.t
1431 1440 > pass
1432 1441 > EOF
1433 1442 $ hg commit -Am dependent test-bisect-dependent.t
1434 1443
1435 1444 $ rt --known-good-rev=0 test-bisect-dependent.t
1436 1445
1437 1446 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1438 1447 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1439 1448 @@ -1,2 +1,2 @@
1440 1449 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1441 1450 - pass
1442 1451 + fail
1443 1452
1444 1453 ERROR: test-bisect-dependent.t output changed
1445 1454 !
1446 1455 Failed test-bisect-dependent.t: output changed
1447 1456 Failed to identify failure point for test-bisect-dependent.t
1448 1457 # Ran 1 tests, 0 skipped, 1 failed.
1449 1458 python hash seed: * (glob)
1450 1459 [1]
1451 1460
1452 1461 $ rt --bisect-repo=../test-bisect test-bisect-dependent.t
1453 1462 usage: run-tests.py [options] [tests]
1454 1463 run-tests.py: error: --bisect-repo cannot be used without --known-good-rev
1455 1464 [2]
1456 1465
1457 1466 $ rt --known-good-rev=0 --bisect-repo=../bisect test-bisect-dependent.t
1458 1467
1459 1468 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1460 1469 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1461 1470 @@ -1,2 +1,2 @@
1462 1471 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1463 1472 - pass
1464 1473 + fail
1465 1474
1466 1475 ERROR: test-bisect-dependent.t output changed
1467 1476 !
1468 1477 Failed test-bisect-dependent.t: output changed
1469 1478 test-bisect-dependent.t broken by 72cbf122d116 (bad)
1470 1479 # Ran 1 tests, 0 skipped, 1 failed.
1471 1480 python hash seed: * (glob)
1472 1481 [1]
1473 1482
1474 1483 $ cd ..
1475 1484
1476 1485 Test a broken #if statement doesn't break run-tests threading.
1477 1486 ==============================================================
1478 1487 $ mkdir broken
1479 1488 $ cd broken
1480 1489 $ cat > test-broken.t <<EOF
1481 1490 > true
1482 1491 > #if notarealhghavefeature
1483 1492 > $ false
1484 1493 > #endif
1485 1494 > EOF
1486 1495 $ for f in 1 2 3 4 ; do
1487 1496 > cat > test-works-$f.t <<EOF
1488 1497 > This is test case $f
1489 1498 > $ sleep 1
1490 1499 > EOF
1491 1500 > done
1492 1501 $ rt -j 2
1493 1502 ....
1494 1503 # Ran 5 tests, 0 skipped, 0 failed.
1495 1504 skipped: unknown feature: notarealhghavefeature
1496 1505
1497 1506 $ cd ..
1498 1507 $ rm -rf broken
1499 1508
1500 1509 Test cases in .t files
1501 1510 ======================
1502 1511 $ mkdir cases
1503 1512 $ cd cases
1504 1513 $ cat > test-cases-abc.t <<'EOF'
1505 1514 > #testcases A B C
1506 1515 > $ V=B
1507 1516 > #if A
1508 1517 > $ V=A
1509 1518 > #endif
1510 1519 > #if C
1511 1520 > $ V=C
1512 1521 > #endif
1513 1522 > $ echo $V | sed 's/A/C/'
1514 1523 > C
1515 1524 > #if C
1516 1525 > $ [ $V = C ]
1517 1526 > #endif
1518 1527 > #if A
1519 1528 > $ [ $V = C ]
1520 1529 > [1]
1521 1530 > #endif
1522 1531 > #if no-C
1523 1532 > $ [ $V = C ]
1524 1533 > [1]
1525 1534 > #endif
1526 1535 > $ [ $V = D ]
1527 1536 > [1]
1528 1537 > EOF
1529 1538 $ rt
1530 1539 .
1531 1540 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1532 1541 +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
1533 1542 @@ -7,7 +7,7 @@
1534 1543 $ V=C
1535 1544 #endif
1536 1545 $ echo $V | sed 's/A/C/'
1537 1546 - C
1538 1547 + B
1539 1548 #if C
1540 1549 $ [ $V = C ]
1541 1550 #endif
1542 1551
1543 1552 ERROR: test-cases-abc.t#B output changed
1544 1553 !.
1545 1554 Failed test-cases-abc.t#B: output changed
1546 1555 # Ran 3 tests, 0 skipped, 1 failed.
1547 1556 python hash seed: * (glob)
1548 1557 [1]
1549 1558
1550 1559 --restart works
1551 1560
1552 1561 $ rt --restart
1553 1562
1554 1563 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1555 1564 +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
1556 1565 @@ -7,7 +7,7 @@
1557 1566 $ V=C
1558 1567 #endif
1559 1568 $ echo $V | sed 's/A/C/'
1560 1569 - C
1561 1570 + B
1562 1571 #if C
1563 1572 $ [ $V = C ]
1564 1573 #endif
1565 1574
1566 1575 ERROR: test-cases-abc.t#B output changed
1567 1576 !.
1568 1577 Failed test-cases-abc.t#B: output changed
1569 1578 # Ran 2 tests, 0 skipped, 1 failed.
1570 1579 python hash seed: * (glob)
1571 1580 [1]
1572 1581
1573 1582 --restart works with outputdir
1574 1583
1575 1584 $ mkdir output
1576 1585 $ mv test-cases-abc.t.B.err output
1577 1586 $ rt --restart --outputdir output
1578 1587
1579 1588 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1580 1589 +++ $TESTTMP/anothertests/cases/output/test-cases-abc.t.B.err
1581 1590 @@ -7,7 +7,7 @@
1582 1591 $ V=C
1583 1592 #endif
1584 1593 $ echo $V | sed 's/A/C/'
1585 1594 - C
1586 1595 + B
1587 1596 #if C
1588 1597 $ [ $V = C ]
1589 1598 #endif
1590 1599
1591 1600 ERROR: test-cases-abc.t#B output changed
1592 1601 !.
1593 1602 Failed test-cases-abc.t#B: output changed
1594 1603 # Ran 2 tests, 0 skipped, 1 failed.
1595 1604 python hash seed: * (glob)
1596 1605 [1]
1597 1606
1598 1607 Test TESTCASE variable
1599 1608
1600 1609 $ cat > test-cases-ab.t <<'EOF'
1601 1610 > $ dostuff() {
1602 1611 > > echo "In case $TESTCASE"
1603 1612 > > }
1604 1613 > #testcases A B
1605 1614 > #if A
1606 1615 > $ dostuff
1607 1616 > In case A
1608 1617 > #endif
1609 1618 > #if B
1610 1619 > $ dostuff
1611 1620 > In case B
1612 1621 > #endif
1613 1622 > EOF
1614 1623 $ rt test-cases-ab.t
1615 1624 ..
1616 1625 # Ran 2 tests, 0 skipped, 0 failed.
1617 1626
1618 1627 Support running a specific test case
1619 1628
1620 1629 $ rt "test-cases-abc.t#B"
1621 1630
1622 1631 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1623 1632 +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
1624 1633 @@ -7,7 +7,7 @@
1625 1634 $ V=C
1626 1635 #endif
1627 1636 $ echo $V | sed 's/A/C/'
1628 1637 - C
1629 1638 + B
1630 1639 #if C
1631 1640 $ [ $V = C ]
1632 1641 #endif
1633 1642
1634 1643 ERROR: test-cases-abc.t#B output changed
1635 1644 !
1636 1645 Failed test-cases-abc.t#B: output changed
1637 1646 # Ran 1 tests, 0 skipped, 1 failed.
1638 1647 python hash seed: * (glob)
1639 1648 [1]
1640 1649
1641 1650 Support running multiple test cases in the same file
1642 1651
1643 1652 $ rt test-cases-abc.t#B test-cases-abc.t#C
1644 1653
1645 1654 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1646 1655 +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
1647 1656 @@ -7,7 +7,7 @@
1648 1657 $ V=C
1649 1658 #endif
1650 1659 $ echo $V | sed 's/A/C/'
1651 1660 - C
1652 1661 + B
1653 1662 #if C
1654 1663 $ [ $V = C ]
1655 1664 #endif
1656 1665
1657 1666 ERROR: test-cases-abc.t#B output changed
1658 1667 !.
1659 1668 Failed test-cases-abc.t#B: output changed
1660 1669 # Ran 2 tests, 0 skipped, 1 failed.
1661 1670 python hash seed: * (glob)
1662 1671 [1]
1663 1672
1664 1673 Support ignoring invalid test cases
1665 1674
1666 1675 $ rt test-cases-abc.t#B test-cases-abc.t#D
1667 1676
1668 1677 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1669 1678 +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
1670 1679 @@ -7,7 +7,7 @@
1671 1680 $ V=C
1672 1681 #endif
1673 1682 $ echo $V | sed 's/A/C/'
1674 1683 - C
1675 1684 + B
1676 1685 #if C
1677 1686 $ [ $V = C ]
1678 1687 #endif
1679 1688
1680 1689 ERROR: test-cases-abc.t#B output changed
1681 1690 !
1682 1691 Failed test-cases-abc.t#B: output changed
1683 1692 # Ran 1 tests, 0 skipped, 1 failed.
1684 1693 python hash seed: * (glob)
1685 1694 [1]
1686 1695
1687 1696 Support running complex test cases names
1688 1697
1689 1698 $ cat > test-cases-advanced-cases.t <<'EOF'
1690 1699 > #testcases simple case-with-dashes casewith_-.chars
1691 1700 > $ echo $TESTCASE
1692 1701 > simple
1693 1702 > EOF
1694 1703
1695 1704 $ cat test-cases-advanced-cases.t
1696 1705 #testcases simple case-with-dashes casewith_-.chars
1697 1706 $ echo $TESTCASE
1698 1707 simple
1699 1708
1700 1709 $ rt test-cases-advanced-cases.t
1701 1710
1702 1711 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1703 1712 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t.case-with-dashes.err
1704 1713 @@ -1,3 +1,3 @@
1705 1714 #testcases simple case-with-dashes casewith_-.chars
1706 1715 $ echo $TESTCASE
1707 1716 - simple
1708 1717 + case-with-dashes
1709 1718
1710 1719 ERROR: test-cases-advanced-cases.t#case-with-dashes output changed
1711 1720 !
1712 1721 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1713 1722 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t.casewith_-.chars.err
1714 1723 @@ -1,3 +1,3 @@
1715 1724 #testcases simple case-with-dashes casewith_-.chars
1716 1725 $ echo $TESTCASE
1717 1726 - simple
1718 1727 + casewith_-.chars
1719 1728
1720 1729 ERROR: test-cases-advanced-cases.t#casewith_-.chars output changed
1721 1730 !.
1722 1731 Failed test-cases-advanced-cases.t#case-with-dashes: output changed
1723 1732 Failed test-cases-advanced-cases.t#casewith_-.chars: output changed
1724 1733 # Ran 3 tests, 0 skipped, 2 failed.
1725 1734 python hash seed: * (glob)
1726 1735 [1]
1727 1736
1728 1737 $ rt "test-cases-advanced-cases.t#case-with-dashes"
1729 1738
1730 1739 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1731 1740 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t.case-with-dashes.err
1732 1741 @@ -1,3 +1,3 @@
1733 1742 #testcases simple case-with-dashes casewith_-.chars
1734 1743 $ echo $TESTCASE
1735 1744 - simple
1736 1745 + case-with-dashes
1737 1746
1738 1747 ERROR: test-cases-advanced-cases.t#case-with-dashes output changed
1739 1748 !
1740 1749 Failed test-cases-advanced-cases.t#case-with-dashes: output changed
1741 1750 # Ran 1 tests, 0 skipped, 1 failed.
1742 1751 python hash seed: * (glob)
1743 1752 [1]
1744 1753
1745 1754 $ rt "test-cases-advanced-cases.t#casewith_-.chars"
1746 1755
1747 1756 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1748 1757 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t.casewith_-.chars.err
1749 1758 @@ -1,3 +1,3 @@
1750 1759 #testcases simple case-with-dashes casewith_-.chars
1751 1760 $ echo $TESTCASE
1752 1761 - simple
1753 1762 + casewith_-.chars
1754 1763
1755 1764 ERROR: test-cases-advanced-cases.t#casewith_-.chars output changed
1756 1765 !
1757 1766 Failed test-cases-advanced-cases.t#casewith_-.chars: output changed
1758 1767 # Ran 1 tests, 0 skipped, 1 failed.
1759 1768 python hash seed: * (glob)
1760 1769 [1]
1761 1770
1762 1771 Test automatic pattern replacement
1763 1772 ==================================
1764 1773
1765 1774 $ cat << EOF >> common-pattern.py
1766 1775 > substitutions = [
1767 1776 > (br'foo-(.*)\\b',
1768 1777 > br'\$XXX=\\1\$'),
1769 1778 > (br'bar\\n',
1770 1779 > br'\$YYY$\\n'),
1771 1780 > ]
1772 1781 > EOF
1773 1782
1774 1783 $ cat << EOF >> test-substitution.t
1775 1784 > $ echo foo-12
1776 1785 > \$XXX=12$
1777 1786 > $ echo foo-42
1778 1787 > \$XXX=42$
1779 1788 > $ echo bar prior
1780 1789 > bar prior
1781 1790 > $ echo lastbar
1782 1791 > last\$YYY$
1783 1792 > $ echo foo-bar foo-baz
1784 1793 > EOF
1785 1794
1786 1795 $ rt test-substitution.t
1787 1796
1788 1797 --- $TESTTMP/anothertests/cases/test-substitution.t
1789 1798 +++ $TESTTMP/anothertests/cases/test-substitution.t.err
1790 1799 @@ -7,3 +7,4 @@
1791 1800 $ echo lastbar
1792 1801 last$YYY$
1793 1802 $ echo foo-bar foo-baz
1794 1803 + $XXX=bar foo-baz$
1795 1804
1796 1805 ERROR: test-substitution.t output changed
1797 1806 !
1798 1807 Failed test-substitution.t: output changed
1799 1808 # Ran 1 tests, 0 skipped, 1 failed.
1800 1809 python hash seed: * (glob)
1801 1810 [1]
1802 1811
1803 1812 --extra-config-opt works
1804 1813
1805 1814 $ cat << EOF >> test-config-opt.t
1806 1815 > $ hg init test-config-opt
1807 1816 > $ hg -R test-config-opt purge
1808 1817 > EOF
1809 1818
1810 1819 $ rt --extra-config-opt extensions.purge= test-config-opt.t
1811 1820 .
1812 1821 # Ran 1 tests, 0 skipped, 0 failed.
General Comments 0
You need to be logged in to leave comments. Login now