##// END OF EJS Templates
testrunner: use "#" for "test cases" suffix in .err filename too...
Martin von Zweigbergk -
r38859:a9a4b0fc default
parent child Browse files
Show More
@@ -1,3162 +1,3162
1 1 #!/usr/bin/env python
2 2 #
3 3 # run-tests.py - Run a set of tests on Mercurial
4 4 #
5 5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 # Modifying this script is tricky because it has many modes:
11 11 # - serial (default) vs parallel (-jN, N > 1)
12 12 # - no coverage (default) vs coverage (-c, -C, -s)
13 13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 14 # - tests are a mix of shell scripts and Python scripts
15 15 #
16 16 # If you change this script, it is recommended that you ensure you
17 17 # haven't broken it by running it in various modes with a representative
18 18 # sample of test scripts. For example:
19 19 #
20 20 # 1) serial, no coverage, temp install:
21 21 # ./run-tests.py test-s*
22 22 # 2) serial, no coverage, local hg:
23 23 # ./run-tests.py --local test-s*
24 24 # 3) serial, coverage, temp install:
25 25 # ./run-tests.py -c test-s*
26 26 # 4) serial, coverage, local hg:
27 27 # ./run-tests.py -c --local test-s* # unsupported
28 28 # 5) parallel, no coverage, temp install:
29 29 # ./run-tests.py -j2 test-s*
30 30 # 6) parallel, no coverage, local hg:
31 31 # ./run-tests.py -j2 --local test-s*
32 32 # 7) parallel, coverage, temp install:
33 33 # ./run-tests.py -j2 -c test-s* # currently broken
34 34 # 8) parallel, coverage, local install:
35 35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 36 # 9) parallel, custom tmp dir:
37 37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 38 # 10) parallel, pure, tests that call run-tests:
39 39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
40 40 #
41 41 # (You could use any subset of the tests: test-s* happens to match
42 42 # enough that it's worth doing parallel runs, few enough that it
43 43 # completes fairly quickly, includes both shell and Python scripts, and
44 44 # includes some scripts that run daemon processes.)
45 45
46 46 from __future__ import absolute_import, print_function
47 47
48 48 import argparse
49 49 import collections
50 50 import difflib
51 51 import distutils.version as version
52 52 import errno
53 53 import json
54 54 import os
55 55 import random
56 56 import re
57 57 import shutil
58 58 import signal
59 59 import socket
60 60 import subprocess
61 61 import sys
62 62 import sysconfig
63 63 import tempfile
64 64 import threading
65 65 import time
66 66 import unittest
67 67 import xml.dom.minidom as minidom
68 68
69 69 try:
70 70 import Queue as queue
71 71 except ImportError:
72 72 import queue
73 73
74 74 try:
75 75 import shlex
76 76 shellquote = shlex.quote
77 77 except (ImportError, AttributeError):
78 78 import pipes
79 79 shellquote = pipes.quote
80 80
81 81 if os.environ.get('RTUNICODEPEDANTRY', False):
82 82 try:
83 83 reload(sys)
84 84 sys.setdefaultencoding("undefined")
85 85 except NameError:
86 86 pass
87 87
88 88 origenviron = os.environ.copy()
89 89 osenvironb = getattr(os, 'environb', os.environ)
90 90 processlock = threading.Lock()
91 91
92 92 pygmentspresent = False
93 93 # ANSI color is unsupported prior to Windows 10
94 94 if os.name != 'nt':
95 95 try: # is pygments installed
96 96 import pygments
97 97 import pygments.lexers as lexers
98 98 import pygments.lexer as lexer
99 99 import pygments.formatters as formatters
100 100 import pygments.token as token
101 101 import pygments.style as style
102 102 pygmentspresent = True
103 103 difflexer = lexers.DiffLexer()
104 104 terminal256formatter = formatters.Terminal256Formatter()
105 105 except ImportError:
106 106 pass
107 107
108 108 if pygmentspresent:
109 109 class TestRunnerStyle(style.Style):
110 110 default_style = ""
111 111 skipped = token.string_to_tokentype("Token.Generic.Skipped")
112 112 failed = token.string_to_tokentype("Token.Generic.Failed")
113 113 skippedname = token.string_to_tokentype("Token.Generic.SName")
114 114 failedname = token.string_to_tokentype("Token.Generic.FName")
115 115 styles = {
116 116 skipped: '#e5e5e5',
117 117 skippedname: '#00ffff',
118 118 failed: '#7f0000',
119 119 failedname: '#ff0000',
120 120 }
121 121
122 122 class TestRunnerLexer(lexer.RegexLexer):
123 123 testpattern = r'[\w-]+\.(t|py)(#[a-zA-Z0-9_\-\.]+)?'
124 124 tokens = {
125 125 'root': [
126 126 (r'^Skipped', token.Generic.Skipped, 'skipped'),
127 127 (r'^Failed ', token.Generic.Failed, 'failed'),
128 128 (r'^ERROR: ', token.Generic.Failed, 'failed'),
129 129 ],
130 130 'skipped': [
131 131 (testpattern, token.Generic.SName),
132 132 (r':.*', token.Generic.Skipped),
133 133 ],
134 134 'failed': [
135 135 (testpattern, token.Generic.FName),
136 136 (r'(:| ).*', token.Generic.Failed),
137 137 ]
138 138 }
139 139
140 140 runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle)
141 141 runnerlexer = TestRunnerLexer()
142 142
143 143 if sys.version_info > (3, 5, 0):
144 144 PYTHON3 = True
145 145 xrange = range # we use xrange in one place, and we'd rather not use range
146 146 def _bytespath(p):
147 147 if p is None:
148 148 return p
149 149 return p.encode('utf-8')
150 150
151 151 def _strpath(p):
152 152 if p is None:
153 153 return p
154 154 return p.decode('utf-8')
155 155
156 156 elif sys.version_info >= (3, 0, 0):
157 157 print('%s is only supported on Python 3.5+ and 2.7, not %s' %
158 158 (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])))
159 159 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
160 160 else:
161 161 PYTHON3 = False
162 162
163 163 # In python 2.x, path operations are generally done using
164 164 # bytestrings by default, so we don't have to do any extra
165 165 # fiddling there. We define the wrapper functions anyway just to
166 166 # help keep code consistent between platforms.
167 167 def _bytespath(p):
168 168 return p
169 169
170 170 _strpath = _bytespath
171 171
172 172 # For Windows support
173 173 wifexited = getattr(os, "WIFEXITED", lambda x: False)
174 174
175 175 # Whether to use IPv6
176 176 def checksocketfamily(name, port=20058):
177 177 """return true if we can listen on localhost using family=name
178 178
179 179 name should be either 'AF_INET', or 'AF_INET6'.
180 180 port being used is okay - EADDRINUSE is considered as successful.
181 181 """
182 182 family = getattr(socket, name, None)
183 183 if family is None:
184 184 return False
185 185 try:
186 186 s = socket.socket(family, socket.SOCK_STREAM)
187 187 s.bind(('localhost', port))
188 188 s.close()
189 189 return True
190 190 except socket.error as exc:
191 191 if exc.errno == errno.EADDRINUSE:
192 192 return True
193 193 elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
194 194 return False
195 195 else:
196 196 raise
197 197 else:
198 198 return False
199 199
200 200 # useipv6 will be set by parseargs
201 201 useipv6 = None
202 202
203 203 def checkportisavailable(port):
204 204 """return true if a port seems free to bind on localhost"""
205 205 if useipv6:
206 206 family = socket.AF_INET6
207 207 else:
208 208 family = socket.AF_INET
209 209 try:
210 210 s = socket.socket(family, socket.SOCK_STREAM)
211 211 s.bind(('localhost', port))
212 212 s.close()
213 213 return True
214 214 except socket.error as exc:
215 215 if exc.errno not in (errno.EADDRINUSE, errno.EADDRNOTAVAIL,
216 216 errno.EPROTONOSUPPORT):
217 217 raise
218 218 return False
219 219
220 220 closefds = os.name == 'posix'
221 221 def Popen4(cmd, wd, timeout, env=None):
222 222 processlock.acquire()
223 223 p = subprocess.Popen(cmd, shell=True, bufsize=-1, cwd=wd, env=env,
224 224 close_fds=closefds,
225 225 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
226 226 stderr=subprocess.STDOUT)
227 227 processlock.release()
228 228
229 229 p.fromchild = p.stdout
230 230 p.tochild = p.stdin
231 231 p.childerr = p.stderr
232 232
233 233 p.timeout = False
234 234 if timeout:
235 235 def t():
236 236 start = time.time()
237 237 while time.time() - start < timeout and p.returncode is None:
238 238 time.sleep(.1)
239 239 p.timeout = True
240 240 if p.returncode is None:
241 241 terminate(p)
242 242 threading.Thread(target=t).start()
243 243
244 244 return p
245 245
246 246 PYTHON = _bytespath(sys.executable.replace('\\', '/'))
247 247 IMPL_PATH = b'PYTHONPATH'
248 248 if 'java' in sys.platform:
249 249 IMPL_PATH = b'JYTHONPATH'
250 250
251 251 defaults = {
252 252 'jobs': ('HGTEST_JOBS', 1),
253 253 'timeout': ('HGTEST_TIMEOUT', 180),
254 254 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 500),
255 255 'port': ('HGTEST_PORT', 20059),
256 256 'shell': ('HGTEST_SHELL', 'sh'),
257 257 }
258 258
259 259 def canonpath(path):
260 260 return os.path.realpath(os.path.expanduser(path))
261 261
262 262 def parselistfiles(files, listtype, warn=True):
263 263 entries = dict()
264 264 for filename in files:
265 265 try:
266 266 path = os.path.expanduser(os.path.expandvars(filename))
267 267 f = open(path, "rb")
268 268 except IOError as err:
269 269 if err.errno != errno.ENOENT:
270 270 raise
271 271 if warn:
272 272 print("warning: no such %s file: %s" % (listtype, filename))
273 273 continue
274 274
275 275 for line in f.readlines():
276 276 line = line.split(b'#', 1)[0].strip()
277 277 if line:
278 278 entries[line] = filename
279 279
280 280 f.close()
281 281 return entries
282 282
283 283 def parsettestcases(path):
284 284 """read a .t test file, return a set of test case names
285 285
286 286 If path does not exist, return an empty set.
287 287 """
288 288 cases = set()
289 289 try:
290 290 with open(path, 'rb') as f:
291 291 for l in f:
292 292 if l.startswith(b'#testcases '):
293 293 cases.update(l[11:].split())
294 294 except IOError as ex:
295 295 if ex.errno != errno.ENOENT:
296 296 raise
297 297 return cases
298 298
299 299 def getparser():
300 300 """Obtain the OptionParser used by the CLI."""
301 301 parser = argparse.ArgumentParser(usage='%(prog)s [options] [tests]')
302 302
303 303 selection = parser.add_argument_group('Test Selection')
304 304 selection.add_argument('--allow-slow-tests', action='store_true',
305 305 help='allow extremely slow tests')
306 306 selection.add_argument("--blacklist", action="append",
307 307 help="skip tests listed in the specified blacklist file")
308 308 selection.add_argument("--changed",
309 309 help="run tests that are changed in parent rev or working directory")
310 310 selection.add_argument("-k", "--keywords",
311 311 help="run tests matching keywords")
312 312 selection.add_argument("-r", "--retest", action="store_true",
313 313 help = "retest failed tests")
314 314 selection.add_argument("--test-list", action="append",
315 315 help="read tests to run from the specified file")
316 316 selection.add_argument("--whitelist", action="append",
317 317 help="always run tests listed in the specified whitelist file")
318 318 selection.add_argument('tests', metavar='TESTS', nargs='*',
319 319 help='Tests to run')
320 320
321 321 harness = parser.add_argument_group('Test Harness Behavior')
322 322 harness.add_argument('--bisect-repo',
323 323 metavar='bisect_repo',
324 324 help=("Path of a repo to bisect. Use together with "
325 325 "--known-good-rev"))
326 326 harness.add_argument("-d", "--debug", action="store_true",
327 327 help="debug mode: write output of test scripts to console"
328 328 " rather than capturing and diffing it (disables timeout)")
329 329 harness.add_argument("-f", "--first", action="store_true",
330 330 help="exit on the first test failure")
331 331 harness.add_argument("-i", "--interactive", action="store_true",
332 332 help="prompt to accept changed output")
333 333 harness.add_argument("-j", "--jobs", type=int,
334 334 help="number of jobs to run in parallel"
335 335 " (default: $%s or %d)" % defaults['jobs'])
336 336 harness.add_argument("--keep-tmpdir", action="store_true",
337 337 help="keep temporary directory after running tests")
338 338 harness.add_argument('--known-good-rev',
339 339 metavar="known_good_rev",
340 340 help=("Automatically bisect any failures using this "
341 341 "revision as a known-good revision."))
342 342 harness.add_argument("--list-tests", action="store_true",
343 343 help="list tests instead of running them")
344 344 harness.add_argument("--loop", action="store_true",
345 345 help="loop tests repeatedly")
346 346 harness.add_argument('--random', action="store_true",
347 347 help='run tests in random order')
348 348 harness.add_argument('--order-by-runtime', action="store_true",
349 349 help='run slowest tests first, according to .testtimes')
350 350 harness.add_argument("-p", "--port", type=int,
351 351 help="port on which servers should listen"
352 352 " (default: $%s or %d)" % defaults['port'])
353 353 harness.add_argument('--profile-runner', action='store_true',
354 354 help='run statprof on run-tests')
355 355 harness.add_argument("-R", "--restart", action="store_true",
356 356 help="restart at last error")
357 357 harness.add_argument("--runs-per-test", type=int, dest="runs_per_test",
358 358 help="run each test N times (default=1)", default=1)
359 359 harness.add_argument("--shell",
360 360 help="shell to use (default: $%s or %s)" % defaults['shell'])
361 361 harness.add_argument('--showchannels', action='store_true',
362 362 help='show scheduling channels')
363 363 harness.add_argument("--slowtimeout", type=int,
364 364 help="kill errant slow tests after SLOWTIMEOUT seconds"
365 365 " (default: $%s or %d)" % defaults['slowtimeout'])
366 366 harness.add_argument("-t", "--timeout", type=int,
367 367 help="kill errant tests after TIMEOUT seconds"
368 368 " (default: $%s or %d)" % defaults['timeout'])
369 369 harness.add_argument("--tmpdir",
370 370 help="run tests in the given temporary directory"
371 371 " (implies --keep-tmpdir)")
372 372 harness.add_argument("-v", "--verbose", action="store_true",
373 373 help="output verbose messages")
374 374
375 375 hgconf = parser.add_argument_group('Mercurial Configuration')
376 376 hgconf.add_argument("--chg", action="store_true",
377 377 help="install and use chg wrapper in place of hg")
378 378 hgconf.add_argument("--compiler",
379 379 help="compiler to build with")
380 380 hgconf.add_argument('--extra-config-opt', action="append", default=[],
381 381 help='set the given config opt in the test hgrc')
382 382 hgconf.add_argument("-l", "--local", action="store_true",
383 383 help="shortcut for --with-hg=<testdir>/../hg, "
384 384 "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set")
385 385 hgconf.add_argument("--ipv6", action="store_true",
386 386 help="prefer IPv6 to IPv4 for network related tests")
387 387 hgconf.add_argument("--pure", action="store_true",
388 388 help="use pure Python code instead of C extensions")
389 389 hgconf.add_argument("-3", "--py3k-warnings", action="store_true",
390 390 help="enable Py3k warnings on Python 2.7+")
391 391 hgconf.add_argument("--with-chg", metavar="CHG",
392 392 help="use specified chg wrapper in place of hg")
393 393 hgconf.add_argument("--with-hg",
394 394 metavar="HG",
395 395 help="test using specified hg script rather than a "
396 396 "temporary installation")
397 397 # This option should be deleted once test-check-py3-compat.t and other
398 398 # Python 3 tests run with Python 3.
399 399 hgconf.add_argument("--with-python3", metavar="PYTHON3",
400 400 help="Python 3 interpreter (if running under Python 2)"
401 401 " (TEMPORARY)")
402 402
403 403 reporting = parser.add_argument_group('Results Reporting')
404 404 reporting.add_argument("-C", "--annotate", action="store_true",
405 405 help="output files annotated with coverage")
406 406 reporting.add_argument("--color", choices=["always", "auto", "never"],
407 407 default=os.environ.get('HGRUNTESTSCOLOR', 'auto'),
408 408 help="colorisation: always|auto|never (default: auto)")
409 409 reporting.add_argument("-c", "--cover", action="store_true",
410 410 help="print a test coverage report")
411 411 reporting.add_argument('--exceptions', action='store_true',
412 412 help='log all exceptions and generate an exception report')
413 413 reporting.add_argument("-H", "--htmlcov", action="store_true",
414 414 help="create an HTML report of the coverage of the files")
415 415 reporting.add_argument("--json", action="store_true",
416 416 help="store test result data in 'report.json' file")
417 417 reporting.add_argument("--outputdir",
418 418 help="directory to write error logs to (default=test directory)")
419 419 reporting.add_argument("-n", "--nodiff", action="store_true",
420 420 help="skip showing test changes")
421 421 reporting.add_argument("-S", "--noskips", action="store_true",
422 422 help="don't report skip tests verbosely")
423 423 reporting.add_argument("--time", action="store_true",
424 424 help="time how long each test takes")
425 425 reporting.add_argument("--view",
426 426 help="external diff viewer")
427 427 reporting.add_argument("--xunit",
428 428 help="record xunit results at specified path")
429 429
430 430 for option, (envvar, default) in defaults.items():
431 431 defaults[option] = type(default)(os.environ.get(envvar, default))
432 432 parser.set_defaults(**defaults)
433 433
434 434 return parser
435 435
436 436 def parseargs(args, parser):
437 437 """Parse arguments with our OptionParser and validate results."""
438 438 options = parser.parse_args(args)
439 439
440 440 # jython is always pure
441 441 if 'java' in sys.platform or '__pypy__' in sys.modules:
442 442 options.pure = True
443 443
444 444 if options.with_hg:
445 445 options.with_hg = canonpath(_bytespath(options.with_hg))
446 446 if not (os.path.isfile(options.with_hg) and
447 447 os.access(options.with_hg, os.X_OK)):
448 448 parser.error('--with-hg must specify an executable hg script')
449 449 if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
450 450 sys.stderr.write('warning: --with-hg should specify an hg script\n')
451 451 if options.local:
452 452 testdir = os.path.dirname(_bytespath(canonpath(sys.argv[0])))
453 453 reporootdir = os.path.dirname(testdir)
454 454 pathandattrs = [(b'hg', 'with_hg')]
455 455 if options.chg:
456 456 pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
457 457 for relpath, attr in pathandattrs:
458 458 binpath = os.path.join(reporootdir, relpath)
459 459 if os.name != 'nt' and not os.access(binpath, os.X_OK):
460 460 parser.error('--local specified, but %r not found or '
461 461 'not executable' % binpath)
462 462 setattr(options, attr, binpath)
463 463
464 464 if (options.chg or options.with_chg) and os.name == 'nt':
465 465 parser.error('chg does not work on %s' % os.name)
466 466 if options.with_chg:
467 467 options.chg = False # no installation to temporary location
468 468 options.with_chg = canonpath(_bytespath(options.with_chg))
469 469 if not (os.path.isfile(options.with_chg) and
470 470 os.access(options.with_chg, os.X_OK)):
471 471 parser.error('--with-chg must specify a chg executable')
472 472 if options.chg and options.with_hg:
473 473 # chg shares installation location with hg
474 474 parser.error('--chg does not work when --with-hg is specified '
475 475 '(use --with-chg instead)')
476 476
477 477 if options.color == 'always' and not pygmentspresent:
478 478 sys.stderr.write('warning: --color=always ignored because '
479 479 'pygments is not installed\n')
480 480
481 481 if options.bisect_repo and not options.known_good_rev:
482 482 parser.error("--bisect-repo cannot be used without --known-good-rev")
483 483
484 484 global useipv6
485 485 if options.ipv6:
486 486 useipv6 = checksocketfamily('AF_INET6')
487 487 else:
488 488 # only use IPv6 if IPv4 is unavailable and IPv6 is available
489 489 useipv6 = ((not checksocketfamily('AF_INET'))
490 490 and checksocketfamily('AF_INET6'))
491 491
492 492 options.anycoverage = options.cover or options.annotate or options.htmlcov
493 493 if options.anycoverage:
494 494 try:
495 495 import coverage
496 496 covver = version.StrictVersion(coverage.__version__).version
497 497 if covver < (3, 3):
498 498 parser.error('coverage options require coverage 3.3 or later')
499 499 except ImportError:
500 500 parser.error('coverage options now require the coverage package')
501 501
502 502 if options.anycoverage and options.local:
503 503 # this needs some path mangling somewhere, I guess
504 504 parser.error("sorry, coverage options do not work when --local "
505 505 "is specified")
506 506
507 507 if options.anycoverage and options.with_hg:
508 508 parser.error("sorry, coverage options do not work when --with-hg "
509 509 "is specified")
510 510
511 511 global verbose
512 512 if options.verbose:
513 513 verbose = ''
514 514
515 515 if options.tmpdir:
516 516 options.tmpdir = canonpath(options.tmpdir)
517 517
518 518 if options.jobs < 1:
519 519 parser.error('--jobs must be positive')
520 520 if options.interactive and options.debug:
521 521 parser.error("-i/--interactive and -d/--debug are incompatible")
522 522 if options.debug:
523 523 if options.timeout != defaults['timeout']:
524 524 sys.stderr.write(
525 525 'warning: --timeout option ignored with --debug\n')
526 526 if options.slowtimeout != defaults['slowtimeout']:
527 527 sys.stderr.write(
528 528 'warning: --slowtimeout option ignored with --debug\n')
529 529 options.timeout = 0
530 530 options.slowtimeout = 0
531 531 if options.py3k_warnings:
532 532 if PYTHON3:
533 533 parser.error(
534 534 '--py3k-warnings can only be used on Python 2.7')
535 535 if options.with_python3:
536 536 if PYTHON3:
537 537 parser.error('--with-python3 cannot be used when executing with '
538 538 'Python 3')
539 539
540 540 options.with_python3 = canonpath(options.with_python3)
541 541 # Verify Python3 executable is acceptable.
542 542 proc = subprocess.Popen([options.with_python3, b'--version'],
543 543 stdout=subprocess.PIPE,
544 544 stderr=subprocess.STDOUT)
545 545 out, _err = proc.communicate()
546 546 ret = proc.wait()
547 547 if ret != 0:
548 548 parser.error('could not determine version of python 3')
549 549 if not out.startswith('Python '):
550 550 parser.error('unexpected output from python3 --version: %s' %
551 551 out)
552 552 vers = version.LooseVersion(out[len('Python '):])
553 553 if vers < version.LooseVersion('3.5.0'):
554 554 parser.error('--with-python3 version must be 3.5.0 or greater; '
555 555 'got %s' % out)
556 556
557 557 if options.blacklist:
558 558 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
559 559 if options.whitelist:
560 560 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
561 561 else:
562 562 options.whitelisted = {}
563 563
564 564 if options.showchannels:
565 565 options.nodiff = True
566 566
567 567 return options
568 568
569 569 def rename(src, dst):
570 570 """Like os.rename(), trade atomicity and opened files friendliness
571 571 for existing destination support.
572 572 """
573 573 shutil.copy(src, dst)
574 574 os.remove(src)
575 575
576 576 _unified_diff = difflib.unified_diff
577 577 if PYTHON3:
578 578 import functools
579 579 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
580 580
581 581 def getdiff(expected, output, ref, err):
582 582 servefail = False
583 583 lines = []
584 584 for line in _unified_diff(expected, output, ref, err):
585 585 if line.startswith(b'+++') or line.startswith(b'---'):
586 586 line = line.replace(b'\\', b'/')
587 587 if line.endswith(b' \n'):
588 588 line = line[:-2] + b'\n'
589 589 lines.append(line)
590 590 if not servefail and line.startswith(
591 591 b'+ abort: child process failed to start'):
592 592 servefail = True
593 593
594 594 return servefail, lines
595 595
596 596 verbose = False
597 597 def vlog(*msg):
598 598 """Log only when in verbose mode."""
599 599 if verbose is False:
600 600 return
601 601
602 602 return log(*msg)
603 603
604 604 # Bytes that break XML even in a CDATA block: control characters 0-31
605 605 # sans \t, \n and \r
606 606 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
607 607
608 608 # Match feature conditionalized output lines in the form, capturing the feature
609 609 # list in group 2, and the preceeding line output in group 1:
610 610 #
611 611 # output..output (feature !)\n
612 612 optline = re.compile(b'(.*) \((.+?) !\)\n$')
613 613
614 614 def cdatasafe(data):
615 615 """Make a string safe to include in a CDATA block.
616 616
617 617 Certain control characters are illegal in a CDATA block, and
618 618 there's no way to include a ]]> in a CDATA either. This function
619 619 replaces illegal bytes with ? and adds a space between the ]] so
620 620 that it won't break the CDATA block.
621 621 """
622 622 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
623 623
624 624 def log(*msg):
625 625 """Log something to stdout.
626 626
627 627 Arguments are strings to print.
628 628 """
629 629 with iolock:
630 630 if verbose:
631 631 print(verbose, end=' ')
632 632 for m in msg:
633 633 print(m, end=' ')
634 634 print()
635 635 sys.stdout.flush()
636 636
637 637 def highlightdiff(line, color):
638 638 if not color:
639 639 return line
640 640 assert pygmentspresent
641 641 return pygments.highlight(line.decode('latin1'), difflexer,
642 642 terminal256formatter).encode('latin1')
643 643
644 644 def highlightmsg(msg, color):
645 645 if not color:
646 646 return msg
647 647 assert pygmentspresent
648 648 return pygments.highlight(msg, runnerlexer, runnerformatter)
649 649
650 650 def terminate(proc):
651 651 """Terminate subprocess"""
652 652 vlog('# Terminating process %d' % proc.pid)
653 653 try:
654 654 proc.terminate()
655 655 except OSError:
656 656 pass
657 657
658 658 def killdaemons(pidfile):
659 659 import killdaemons as killmod
660 660 return killmod.killdaemons(pidfile, tryhard=False, remove=True,
661 661 logfn=vlog)
662 662
663 663 class Test(unittest.TestCase):
664 664 """Encapsulates a single, runnable test.
665 665
666 666 While this class conforms to the unittest.TestCase API, it differs in that
667 667 instances need to be instantiated manually. (Typically, unittest.TestCase
668 668 classes are instantiated automatically by scanning modules.)
669 669 """
670 670
671 671 # Status code reserved for skipped tests (used by hghave).
672 672 SKIPPED_STATUS = 80
673 673
674 674 def __init__(self, path, outputdir, tmpdir, keeptmpdir=False,
675 675 debug=False,
676 676 first=False,
677 677 timeout=None,
678 678 startport=None, extraconfigopts=None,
679 679 py3kwarnings=False, shell=None, hgcommand=None,
680 680 slowtimeout=None, usechg=False,
681 681 useipv6=False):
682 682 """Create a test from parameters.
683 683
684 684 path is the full path to the file defining the test.
685 685
686 686 tmpdir is the main temporary directory to use for this test.
687 687
688 688 keeptmpdir determines whether to keep the test's temporary directory
689 689 after execution. It defaults to removal (False).
690 690
691 691 debug mode will make the test execute verbosely, with unfiltered
692 692 output.
693 693
694 694 timeout controls the maximum run time of the test. It is ignored when
695 695 debug is True. See slowtimeout for tests with #require slow.
696 696
697 697 slowtimeout overrides timeout if the test has #require slow.
698 698
699 699 startport controls the starting port number to use for this test. Each
700 700 test will reserve 3 port numbers for execution. It is the caller's
701 701 responsibility to allocate a non-overlapping port range to Test
702 702 instances.
703 703
704 704 extraconfigopts is an iterable of extra hgrc config options. Values
705 705 must have the form "key=value" (something understood by hgrc). Values
706 706 of the form "foo.key=value" will result in "[foo] key=value".
707 707
708 708 py3kwarnings enables Py3k warnings.
709 709
710 710 shell is the shell to execute tests in.
711 711 """
712 712 if timeout is None:
713 713 timeout = defaults['timeout']
714 714 if startport is None:
715 715 startport = defaults['port']
716 716 if slowtimeout is None:
717 717 slowtimeout = defaults['slowtimeout']
718 718 self.path = path
719 719 self.bname = os.path.basename(path)
720 720 self.name = _strpath(self.bname)
721 721 self._testdir = os.path.dirname(path)
722 722 self._outputdir = outputdir
723 723 self._tmpname = os.path.basename(path)
724 724 self.errpath = os.path.join(self._outputdir, b'%s.err' % self.bname)
725 725
726 726 self._threadtmp = tmpdir
727 727 self._keeptmpdir = keeptmpdir
728 728 self._debug = debug
729 729 self._first = first
730 730 self._timeout = timeout
731 731 self._slowtimeout = slowtimeout
732 732 self._startport = startport
733 733 self._extraconfigopts = extraconfigopts or []
734 734 self._py3kwarnings = py3kwarnings
735 735 self._shell = _bytespath(shell)
736 736 self._hgcommand = hgcommand or b'hg'
737 737 self._usechg = usechg
738 738 self._useipv6 = useipv6
739 739
740 740 self._aborted = False
741 741 self._daemonpids = []
742 742 self._finished = None
743 743 self._ret = None
744 744 self._out = None
745 745 self._skipped = None
746 746 self._testtmp = None
747 747 self._chgsockdir = None
748 748
749 749 self._refout = self.readrefout()
750 750
751 751 def readrefout(self):
752 752 """read reference output"""
753 753 # If we're not in --debug mode and reference output file exists,
754 754 # check test output against it.
755 755 if self._debug:
756 756 return None # to match "out is None"
757 757 elif os.path.exists(self.refpath):
758 758 with open(self.refpath, 'rb') as f:
759 759 return f.read().splitlines(True)
760 760 else:
761 761 return []
762 762
763 763 # needed to get base class __repr__ running
764 764 @property
765 765 def _testMethodName(self):
766 766 return self.name
767 767
768 768 def __str__(self):
769 769 return self.name
770 770
771 771 def shortDescription(self):
772 772 return self.name
773 773
774 774 def setUp(self):
775 775 """Tasks to perform before run()."""
776 776 self._finished = False
777 777 self._ret = None
778 778 self._out = None
779 779 self._skipped = None
780 780
781 781 try:
782 782 os.mkdir(self._threadtmp)
783 783 except OSError as e:
784 784 if e.errno != errno.EEXIST:
785 785 raise
786 786
787 787 name = self._tmpname
788 788 self._testtmp = os.path.join(self._threadtmp, name)
789 789 os.mkdir(self._testtmp)
790 790
791 791 # Remove any previous output files.
792 792 if os.path.exists(self.errpath):
793 793 try:
794 794 os.remove(self.errpath)
795 795 except OSError as e:
796 796 # We might have raced another test to clean up a .err
797 797 # file, so ignore ENOENT when removing a previous .err
798 798 # file.
799 799 if e.errno != errno.ENOENT:
800 800 raise
801 801
802 802 if self._usechg:
803 803 self._chgsockdir = os.path.join(self._threadtmp,
804 804 b'%s.chgsock' % name)
805 805 os.mkdir(self._chgsockdir)
806 806
807 807 def run(self, result):
808 808 """Run this test and report results against a TestResult instance."""
809 809 # This function is extremely similar to unittest.TestCase.run(). Once
810 810 # we require Python 2.7 (or at least its version of unittest), this
811 811 # function can largely go away.
812 812 self._result = result
813 813 result.startTest(self)
814 814 try:
815 815 try:
816 816 self.setUp()
817 817 except (KeyboardInterrupt, SystemExit):
818 818 self._aborted = True
819 819 raise
820 820 except Exception:
821 821 result.addError(self, sys.exc_info())
822 822 return
823 823
824 824 success = False
825 825 try:
826 826 self.runTest()
827 827 except KeyboardInterrupt:
828 828 self._aborted = True
829 829 raise
830 830 except unittest.SkipTest as e:
831 831 result.addSkip(self, str(e))
832 832 # The base class will have already counted this as a
833 833 # test we "ran", but we want to exclude skipped tests
834 834 # from those we count towards those run.
835 835 result.testsRun -= 1
836 836 except self.failureException as e:
837 837 # This differs from unittest in that we don't capture
838 838 # the stack trace. This is for historical reasons and
839 839 # this decision could be revisited in the future,
840 840 # especially for PythonTest instances.
841 841 if result.addFailure(self, str(e)):
842 842 success = True
843 843 except Exception:
844 844 result.addError(self, sys.exc_info())
845 845 else:
846 846 success = True
847 847
848 848 try:
849 849 self.tearDown()
850 850 except (KeyboardInterrupt, SystemExit):
851 851 self._aborted = True
852 852 raise
853 853 except Exception:
854 854 result.addError(self, sys.exc_info())
855 855 success = False
856 856
857 857 if success:
858 858 result.addSuccess(self)
859 859 finally:
860 860 result.stopTest(self, interrupted=self._aborted)
861 861
862 862 def runTest(self):
863 863 """Run this test instance.
864 864
865 865 This will return a tuple describing the result of the test.
866 866 """
867 867 env = self._getenv()
868 868 self._genrestoreenv(env)
869 869 self._daemonpids.append(env['DAEMON_PIDS'])
870 870 self._createhgrc(env['HGRCPATH'])
871 871
872 872 vlog('# Test', self.name)
873 873
874 874 ret, out = self._run(env)
875 875 self._finished = True
876 876 self._ret = ret
877 877 self._out = out
878 878
879 879 def describe(ret):
880 880 if ret < 0:
881 881 return 'killed by signal: %d' % -ret
882 882 return 'returned error code %d' % ret
883 883
884 884 self._skipped = False
885 885
886 886 if ret == self.SKIPPED_STATUS:
887 887 if out is None: # Debug mode, nothing to parse.
888 888 missing = ['unknown']
889 889 failed = None
890 890 else:
891 891 missing, failed = TTest.parsehghaveoutput(out)
892 892
893 893 if not missing:
894 894 missing = ['skipped']
895 895
896 896 if failed:
897 897 self.fail('hg have failed checking for %s' % failed[-1])
898 898 else:
899 899 self._skipped = True
900 900 raise unittest.SkipTest(missing[-1])
901 901 elif ret == 'timeout':
902 902 self.fail('timed out')
903 903 elif ret is False:
904 904 self.fail('no result code from test')
905 905 elif out != self._refout:
906 906 # Diff generation may rely on written .err file.
907 907 if (ret != 0 or out != self._refout) and not self._skipped \
908 908 and not self._debug:
909 909 with open(self.errpath, 'wb') as f:
910 910 for line in out:
911 911 f.write(line)
912 912
913 913 # The result object handles diff calculation for us.
914 914 with firstlock:
915 915 if self._result.addOutputMismatch(self, ret, out, self._refout):
916 916 # change was accepted, skip failing
917 917 return
918 918 if self._first:
919 919 global firsterror
920 920 firsterror = True
921 921
922 922 if ret:
923 923 msg = 'output changed and ' + describe(ret)
924 924 else:
925 925 msg = 'output changed'
926 926
927 927 self.fail(msg)
928 928 elif ret:
929 929 self.fail(describe(ret))
930 930
931 931 def tearDown(self):
932 932 """Tasks to perform after run()."""
933 933 for entry in self._daemonpids:
934 934 killdaemons(entry)
935 935 self._daemonpids = []
936 936
937 937 if self._keeptmpdir:
938 938 log('\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' %
939 939 (self._testtmp.decode('utf-8'),
940 940 self._threadtmp.decode('utf-8')))
941 941 else:
942 942 shutil.rmtree(self._testtmp, True)
943 943 shutil.rmtree(self._threadtmp, True)
944 944
945 945 if self._usechg:
946 946 # chgservers will stop automatically after they find the socket
947 947 # files are deleted
948 948 shutil.rmtree(self._chgsockdir, True)
949 949
950 950 if (self._ret != 0 or self._out != self._refout) and not self._skipped \
951 951 and not self._debug and self._out:
952 952 with open(self.errpath, 'wb') as f:
953 953 for line in self._out:
954 954 f.write(line)
955 955
956 956 vlog("# Ret was:", self._ret, '(%s)' % self.name)
957 957
958 958 def _run(self, env):
959 959 # This should be implemented in child classes to run tests.
960 960 raise unittest.SkipTest('unknown test type')
961 961
962 962 def abort(self):
963 963 """Terminate execution of this test."""
964 964 self._aborted = True
965 965
966 966 def _portmap(self, i):
967 967 offset = b'' if i == 0 else b'%d' % i
968 968 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
969 969
970 970 def _getreplacements(self):
971 971 """Obtain a mapping of text replacements to apply to test output.
972 972
973 973 Test output needs to be normalized so it can be compared to expected
974 974 output. This function defines how some of that normalization will
975 975 occur.
976 976 """
977 977 r = [
978 978 # This list should be parallel to defineport in _getenv
979 979 self._portmap(0),
980 980 self._portmap(1),
981 981 self._portmap(2),
982 982 (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
983 983 (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
984 984 ]
985 985 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
986 986
987 987 replacementfile = os.path.join(self._testdir, b'common-pattern.py')
988 988
989 989 if os.path.exists(replacementfile):
990 990 data = {}
991 991 with open(replacementfile, mode='rb') as source:
992 992 # the intermediate 'compile' step help with debugging
993 993 code = compile(source.read(), replacementfile, 'exec')
994 994 exec(code, data)
995 995 for value in data.get('substitutions', ()):
996 996 if len(value) != 2:
997 997 msg = 'malformatted substitution in %s: %r'
998 998 msg %= (replacementfile, value)
999 999 raise ValueError(msg)
1000 1000 r.append(value)
1001 1001 return r
1002 1002
1003 1003 def _escapepath(self, p):
1004 1004 if os.name == 'nt':
1005 1005 return (
1006 1006 (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or
1007 1007 c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c
1008 1008 for c in p))
1009 1009 )
1010 1010 else:
1011 1011 return re.escape(p)
1012 1012
1013 1013 def _localip(self):
1014 1014 if self._useipv6:
1015 1015 return b'::1'
1016 1016 else:
1017 1017 return b'127.0.0.1'
1018 1018
1019 1019 def _genrestoreenv(self, testenv):
1020 1020 """Generate a script that can be used by tests to restore the original
1021 1021 environment."""
1022 1022 # Put the restoreenv script inside self._threadtmp
1023 1023 scriptpath = os.path.join(self._threadtmp, b'restoreenv.sh')
1024 1024 testenv['HGTEST_RESTOREENV'] = scriptpath
1025 1025
1026 1026 # Only restore environment variable names that the shell allows
1027 1027 # us to export.
1028 1028 name_regex = re.compile('^[a-zA-Z][a-zA-Z0-9_]*$')
1029 1029
1030 1030 # Do not restore these variables; otherwise tests would fail.
1031 1031 reqnames = {'PYTHON', 'TESTDIR', 'TESTTMP'}
1032 1032
1033 1033 with open(scriptpath, 'w') as envf:
1034 1034 for name, value in origenviron.items():
1035 1035 if not name_regex.match(name):
1036 1036 # Skip environment variables with unusual names not
1037 1037 # allowed by most shells.
1038 1038 continue
1039 1039 if name in reqnames:
1040 1040 continue
1041 1041 envf.write('%s=%s\n' % (name, shellquote(value)))
1042 1042
1043 1043 for name in testenv:
1044 1044 if name in origenviron or name in reqnames:
1045 1045 continue
1046 1046 envf.write('unset %s\n' % (name,))
1047 1047
1048 1048 def _getenv(self):
1049 1049 """Obtain environment variables to use during test execution."""
1050 1050 def defineport(i):
1051 1051 offset = '' if i == 0 else '%s' % i
1052 1052 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
1053 1053 env = os.environ.copy()
1054 1054 env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase') or ''
1055 1055 env['HGEMITWARNINGS'] = '1'
1056 1056 env['TESTTMP'] = self._testtmp
1057 1057 env['TESTNAME'] = self.name
1058 1058 env['HOME'] = self._testtmp
1059 1059 # This number should match portneeded in _getport
1060 1060 for port in xrange(3):
1061 1061 # This list should be parallel to _portmap in _getreplacements
1062 1062 defineport(port)
1063 1063 env["HGRCPATH"] = os.path.join(self._threadtmp, b'.hgrc')
1064 1064 env["DAEMON_PIDS"] = os.path.join(self._threadtmp, b'daemon.pids')
1065 1065 env["HGEDITOR"] = ('"' + sys.executable + '"'
1066 1066 + ' -c "import sys; sys.exit(0)"')
1067 1067 env["HGMERGE"] = "internal:merge"
1068 1068 env["HGUSER"] = "test"
1069 1069 env["HGENCODING"] = "ascii"
1070 1070 env["HGENCODINGMODE"] = "strict"
1071 1071 env['HGIPV6'] = str(int(self._useipv6))
1072 1072
1073 1073 extraextensions = []
1074 1074 for opt in self._extraconfigopts:
1075 1075 section, key = opt.encode('utf-8').split(b'.', 1)
1076 1076 if section != 'extensions':
1077 1077 continue
1078 1078 name = key.split(b'=', 1)[0]
1079 1079 extraextensions.append(name)
1080 1080
1081 1081 if extraextensions:
1082 1082 env['HGTESTEXTRAEXTENSIONS'] = b' '.join(extraextensions)
1083 1083
1084 1084 # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
1085 1085 # IP addresses.
1086 1086 env['LOCALIP'] = self._localip()
1087 1087
1088 1088 # Reset some environment variables to well-known values so that
1089 1089 # the tests produce repeatable output.
1090 1090 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
1091 1091 env['TZ'] = 'GMT'
1092 1092 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
1093 1093 env['COLUMNS'] = '80'
1094 1094 env['TERM'] = 'xterm'
1095 1095
1096 1096 for k in ('HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy ' +
1097 1097 'HGPLAIN HGPLAINEXCEPT EDITOR VISUAL PAGER ' +
1098 1098 'NO_PROXY CHGDEBUG').split():
1099 1099 if k in env:
1100 1100 del env[k]
1101 1101
1102 1102 # unset env related to hooks
1103 1103 for k in list(env):
1104 1104 if k.startswith('HG_'):
1105 1105 del env[k]
1106 1106
1107 1107 if self._usechg:
1108 1108 env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server')
1109 1109
1110 1110 return env
1111 1111
1112 1112 def _createhgrc(self, path):
1113 1113 """Create an hgrc file for this test."""
1114 1114 with open(path, 'wb') as hgrc:
1115 1115 hgrc.write(b'[ui]\n')
1116 1116 hgrc.write(b'slash = True\n')
1117 1117 hgrc.write(b'interactive = False\n')
1118 1118 hgrc.write(b'mergemarkers = detailed\n')
1119 1119 hgrc.write(b'promptecho = True\n')
1120 1120 hgrc.write(b'[defaults]\n')
1121 1121 hgrc.write(b'[devel]\n')
1122 1122 hgrc.write(b'all-warnings = true\n')
1123 1123 hgrc.write(b'default-date = 0 0\n')
1124 1124 hgrc.write(b'[largefiles]\n')
1125 1125 hgrc.write(b'usercache = %s\n' %
1126 1126 (os.path.join(self._testtmp, b'.cache/largefiles')))
1127 1127 hgrc.write(b'[lfs]\n')
1128 1128 hgrc.write(b'usercache = %s\n' %
1129 1129 (os.path.join(self._testtmp, b'.cache/lfs')))
1130 1130 hgrc.write(b'[web]\n')
1131 1131 hgrc.write(b'address = localhost\n')
1132 1132 hgrc.write(b'ipv6 = %s\n' % str(self._useipv6).encode('ascii'))
1133 1133 hgrc.write(b'server-header = testing stub value\n')
1134 1134
1135 1135 for opt in self._extraconfigopts:
1136 1136 section, key = opt.encode('utf-8').split(b'.', 1)
1137 1137 assert b'=' in key, ('extra config opt %s must '
1138 1138 'have an = for assignment' % opt)
1139 1139 hgrc.write(b'[%s]\n%s\n' % (section, key))
1140 1140
1141 1141 def fail(self, msg):
1142 1142 # unittest differentiates between errored and failed.
1143 1143 # Failed is denoted by AssertionError (by default at least).
1144 1144 raise AssertionError(msg)
1145 1145
1146 1146 def _runcommand(self, cmd, env, normalizenewlines=False):
1147 1147 """Run command in a sub-process, capturing the output (stdout and
1148 1148 stderr).
1149 1149
1150 1150 Return a tuple (exitcode, output). output is None in debug mode.
1151 1151 """
1152 1152 if self._debug:
1153 1153 proc = subprocess.Popen(cmd, shell=True, cwd=self._testtmp,
1154 1154 env=env)
1155 1155 ret = proc.wait()
1156 1156 return (ret, None)
1157 1157
1158 1158 proc = Popen4(cmd, self._testtmp, self._timeout, env)
1159 1159 def cleanup():
1160 1160 terminate(proc)
1161 1161 ret = proc.wait()
1162 1162 if ret == 0:
1163 1163 ret = signal.SIGTERM << 8
1164 1164 killdaemons(env['DAEMON_PIDS'])
1165 1165 return ret
1166 1166
1167 1167 output = ''
1168 1168 proc.tochild.close()
1169 1169
1170 1170 try:
1171 1171 output = proc.fromchild.read()
1172 1172 except KeyboardInterrupt:
1173 1173 vlog('# Handling keyboard interrupt')
1174 1174 cleanup()
1175 1175 raise
1176 1176
1177 1177 ret = proc.wait()
1178 1178 if wifexited(ret):
1179 1179 ret = os.WEXITSTATUS(ret)
1180 1180
1181 1181 if proc.timeout:
1182 1182 ret = 'timeout'
1183 1183
1184 1184 if ret:
1185 1185 killdaemons(env['DAEMON_PIDS'])
1186 1186
1187 1187 for s, r in self._getreplacements():
1188 1188 output = re.sub(s, r, output)
1189 1189
1190 1190 if normalizenewlines:
1191 1191 output = output.replace('\r\n', '\n')
1192 1192
1193 1193 return ret, output.splitlines(True)
1194 1194
1195 1195 class PythonTest(Test):
1196 1196 """A Python-based test."""
1197 1197
1198 1198 @property
1199 1199 def refpath(self):
1200 1200 return os.path.join(self._testdir, b'%s.out' % self.bname)
1201 1201
1202 1202 def _run(self, env):
1203 1203 py3kswitch = self._py3kwarnings and b' -3' or b''
1204 1204 cmd = b'%s%s "%s"' % (PYTHON, py3kswitch, self.path)
1205 1205 vlog("# Running", cmd)
1206 1206 normalizenewlines = os.name == 'nt'
1207 1207 result = self._runcommand(cmd, env,
1208 1208 normalizenewlines=normalizenewlines)
1209 1209 if self._aborted:
1210 1210 raise KeyboardInterrupt()
1211 1211
1212 1212 return result
1213 1213
1214 1214 # Some glob patterns apply only in some circumstances, so the script
1215 1215 # might want to remove (glob) annotations that otherwise should be
1216 1216 # retained.
1217 1217 checkcodeglobpats = [
1218 1218 # On Windows it looks like \ doesn't require a (glob), but we know
1219 1219 # better.
1220 1220 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
1221 1221 re.compile(br'^moving \S+/.*[^)]$'),
1222 1222 re.compile(br'^pulling from \$TESTTMP/.*[^)]$'),
1223 1223 # Not all platforms have 127.0.0.1 as loopback (though most do),
1224 1224 # so we always glob that too.
1225 1225 re.compile(br'.*\$LOCALIP.*$'),
1226 1226 ]
1227 1227
1228 1228 bchr = chr
1229 1229 if PYTHON3:
1230 1230 bchr = lambda x: bytes([x])
1231 1231
1232 1232 class TTest(Test):
1233 1233 """A "t test" is a test backed by a .t file."""
1234 1234
1235 1235 SKIPPED_PREFIX = b'skipped: '
1236 1236 FAILED_PREFIX = b'hghave check failed: '
1237 1237 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
1238 1238
1239 1239 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
1240 1240 ESCAPEMAP = dict((bchr(i), br'\x%02x' % i) for i in range(256))
1241 1241 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
1242 1242
1243 1243 def __init__(self, path, *args, **kwds):
1244 1244 # accept an extra "case" parameter
1245 1245 case = kwds.pop('case', None)
1246 1246 self._case = case
1247 1247 self._allcases = parsettestcases(path)
1248 1248 super(TTest, self).__init__(path, *args, **kwds)
1249 1249 if case:
1250 1250 self.name = '%s#%s' % (self.name, _strpath(case))
1251 self.errpath = b'%s.%s.err' % (self.errpath[:-4], case)
1251 self.errpath = b'%s#%s.err' % (self.errpath[:-4], case)
1252 1252 self._tmpname += b'-%s' % case
1253 1253 self._have = {}
1254 1254
1255 1255 @property
1256 1256 def refpath(self):
1257 1257 return os.path.join(self._testdir, self.bname)
1258 1258
1259 1259 def _run(self, env):
1260 1260 with open(self.path, 'rb') as f:
1261 1261 lines = f.readlines()
1262 1262
1263 1263 # .t file is both reference output and the test input, keep reference
1264 1264 # output updated with the the test input. This avoids some race
1265 1265 # conditions where the reference output does not match the actual test.
1266 1266 if self._refout is not None:
1267 1267 self._refout = lines
1268 1268
1269 1269 salt, script, after, expected = self._parsetest(lines)
1270 1270
1271 1271 # Write out the generated script.
1272 1272 fname = b'%s.sh' % self._testtmp
1273 1273 with open(fname, 'wb') as f:
1274 1274 for l in script:
1275 1275 f.write(l)
1276 1276
1277 1277 cmd = b'%s "%s"' % (self._shell, fname)
1278 1278 vlog("# Running", cmd)
1279 1279
1280 1280 exitcode, output = self._runcommand(cmd, env)
1281 1281
1282 1282 if self._aborted:
1283 1283 raise KeyboardInterrupt()
1284 1284
1285 1285 # Do not merge output if skipped. Return hghave message instead.
1286 1286 # Similarly, with --debug, output is None.
1287 1287 if exitcode == self.SKIPPED_STATUS or output is None:
1288 1288 return exitcode, output
1289 1289
1290 1290 return self._processoutput(exitcode, output, salt, after, expected)
1291 1291
1292 1292 def _hghave(self, reqs):
1293 1293 allreqs = b' '.join(reqs)
1294 1294 if allreqs in self._have:
1295 1295 return self._have.get(allreqs)
1296 1296
1297 1297 # TODO do something smarter when all other uses of hghave are gone.
1298 1298 runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__)))
1299 1299 tdir = runtestdir.replace(b'\\', b'/')
1300 1300 proc = Popen4(b'%s -c "%s/hghave %s"' %
1301 1301 (self._shell, tdir, allreqs),
1302 1302 self._testtmp, 0, self._getenv())
1303 1303 stdout, stderr = proc.communicate()
1304 1304 ret = proc.wait()
1305 1305 if wifexited(ret):
1306 1306 ret = os.WEXITSTATUS(ret)
1307 1307 if ret == 2:
1308 1308 print(stdout.decode('utf-8'))
1309 1309 sys.exit(1)
1310 1310
1311 1311 if ret != 0:
1312 1312 self._have[allreqs] = (False, stdout)
1313 1313 return False, stdout
1314 1314
1315 1315 if b'slow' in reqs:
1316 1316 self._timeout = self._slowtimeout
1317 1317
1318 1318 self._have[allreqs] = (True, None)
1319 1319 return True, None
1320 1320
1321 1321 def _iftest(self, args):
1322 1322 # implements "#if"
1323 1323 reqs = []
1324 1324 for arg in args:
1325 1325 if arg.startswith(b'no-') and arg[3:] in self._allcases:
1326 1326 if arg[3:] == self._case:
1327 1327 return False
1328 1328 elif arg in self._allcases:
1329 1329 if arg != self._case:
1330 1330 return False
1331 1331 else:
1332 1332 reqs.append(arg)
1333 1333 return self._hghave(reqs)[0]
1334 1334
1335 1335 def _parsetest(self, lines):
1336 1336 # We generate a shell script which outputs unique markers to line
1337 1337 # up script results with our source. These markers include input
1338 1338 # line number and the last return code.
1339 1339 salt = b"SALT%d" % time.time()
1340 1340 def addsalt(line, inpython):
1341 1341 if inpython:
1342 1342 script.append(b'%s %d 0\n' % (salt, line))
1343 1343 else:
1344 1344 script.append(b'echo %s %d $?\n' % (salt, line))
1345 1345
1346 1346 script = []
1347 1347
1348 1348 # After we run the shell script, we re-unify the script output
1349 1349 # with non-active parts of the source, with synchronization by our
1350 1350 # SALT line number markers. The after table contains the non-active
1351 1351 # components, ordered by line number.
1352 1352 after = {}
1353 1353
1354 1354 # Expected shell script output.
1355 1355 expected = {}
1356 1356
1357 1357 pos = prepos = -1
1358 1358
1359 1359 # True or False when in a true or false conditional section
1360 1360 skipping = None
1361 1361
1362 1362 # We keep track of whether or not we're in a Python block so we
1363 1363 # can generate the surrounding doctest magic.
1364 1364 inpython = False
1365 1365
1366 1366 if self._debug:
1367 1367 script.append(b'set -x\n')
1368 1368 if self._hgcommand != b'hg':
1369 1369 script.append(b'alias hg="%s"\n' % self._hgcommand)
1370 1370 if os.getenv('MSYSTEM'):
1371 1371 script.append(b'alias pwd="pwd -W"\n')
1372 1372 if self._case:
1373 1373 if isinstance(self._case, str):
1374 1374 quoted = shellquote(self._case)
1375 1375 else:
1376 1376 quoted = shellquote(self._case.decode('utf8')).encode('utf8')
1377 1377 script.append(b'TESTCASE=%s\n' % quoted)
1378 1378 script.append(b'export TESTCASE\n')
1379 1379
1380 1380 n = 0
1381 1381 for n, l in enumerate(lines):
1382 1382 if not l.endswith(b'\n'):
1383 1383 l += b'\n'
1384 1384 if l.startswith(b'#require'):
1385 1385 lsplit = l.split()
1386 1386 if len(lsplit) < 2 or lsplit[0] != b'#require':
1387 1387 after.setdefault(pos, []).append(' !!! invalid #require\n')
1388 1388 if not skipping:
1389 1389 haveresult, message = self._hghave(lsplit[1:])
1390 1390 if not haveresult:
1391 1391 script = [b'echo "%s"\nexit 80\n' % message]
1392 1392 break
1393 1393 after.setdefault(pos, []).append(l)
1394 1394 elif l.startswith(b'#if'):
1395 1395 lsplit = l.split()
1396 1396 if len(lsplit) < 2 or lsplit[0] != b'#if':
1397 1397 after.setdefault(pos, []).append(' !!! invalid #if\n')
1398 1398 if skipping is not None:
1399 1399 after.setdefault(pos, []).append(' !!! nested #if\n')
1400 1400 skipping = not self._iftest(lsplit[1:])
1401 1401 after.setdefault(pos, []).append(l)
1402 1402 elif l.startswith(b'#else'):
1403 1403 if skipping is None:
1404 1404 after.setdefault(pos, []).append(' !!! missing #if\n')
1405 1405 skipping = not skipping
1406 1406 after.setdefault(pos, []).append(l)
1407 1407 elif l.startswith(b'#endif'):
1408 1408 if skipping is None:
1409 1409 after.setdefault(pos, []).append(' !!! missing #if\n')
1410 1410 skipping = None
1411 1411 after.setdefault(pos, []).append(l)
1412 1412 elif skipping:
1413 1413 after.setdefault(pos, []).append(l)
1414 1414 elif l.startswith(b' >>> '): # python inlines
1415 1415 after.setdefault(pos, []).append(l)
1416 1416 prepos = pos
1417 1417 pos = n
1418 1418 if not inpython:
1419 1419 # We've just entered a Python block. Add the header.
1420 1420 inpython = True
1421 1421 addsalt(prepos, False) # Make sure we report the exit code.
1422 1422 script.append(b'%s -m heredoctest <<EOF\n' % PYTHON)
1423 1423 addsalt(n, True)
1424 1424 script.append(l[2:])
1425 1425 elif l.startswith(b' ... '): # python inlines
1426 1426 after.setdefault(prepos, []).append(l)
1427 1427 script.append(l[2:])
1428 1428 elif l.startswith(b' $ '): # commands
1429 1429 if inpython:
1430 1430 script.append(b'EOF\n')
1431 1431 inpython = False
1432 1432 after.setdefault(pos, []).append(l)
1433 1433 prepos = pos
1434 1434 pos = n
1435 1435 addsalt(n, False)
1436 1436 cmd = l[4:].split()
1437 1437 if len(cmd) == 2 and cmd[0] == b'cd':
1438 1438 l = b' $ cd %s || exit 1\n' % cmd[1]
1439 1439 script.append(l[4:])
1440 1440 elif l.startswith(b' > '): # continuations
1441 1441 after.setdefault(prepos, []).append(l)
1442 1442 script.append(l[4:])
1443 1443 elif l.startswith(b' '): # results
1444 1444 # Queue up a list of expected results.
1445 1445 expected.setdefault(pos, []).append(l[2:])
1446 1446 else:
1447 1447 if inpython:
1448 1448 script.append(b'EOF\n')
1449 1449 inpython = False
1450 1450 # Non-command/result. Queue up for merged output.
1451 1451 after.setdefault(pos, []).append(l)
1452 1452
1453 1453 if inpython:
1454 1454 script.append(b'EOF\n')
1455 1455 if skipping is not None:
1456 1456 after.setdefault(pos, []).append(' !!! missing #endif\n')
1457 1457 addsalt(n + 1, False)
1458 1458
1459 1459 return salt, script, after, expected
1460 1460
1461 1461 def _processoutput(self, exitcode, output, salt, after, expected):
1462 1462 # Merge the script output back into a unified test.
1463 1463 warnonly = 1 # 1: not yet; 2: yes; 3: for sure not
1464 1464 if exitcode != 0:
1465 1465 warnonly = 3
1466 1466
1467 1467 pos = -1
1468 1468 postout = []
1469 1469 for l in output:
1470 1470 lout, lcmd = l, None
1471 1471 if salt in l:
1472 1472 lout, lcmd = l.split(salt, 1)
1473 1473
1474 1474 while lout:
1475 1475 if not lout.endswith(b'\n'):
1476 1476 lout += b' (no-eol)\n'
1477 1477
1478 1478 # Find the expected output at the current position.
1479 1479 els = [None]
1480 1480 if expected.get(pos, None):
1481 1481 els = expected[pos]
1482 1482
1483 1483 optional = []
1484 1484 for i, el in enumerate(els):
1485 1485 r = False
1486 1486 if el:
1487 1487 r, exact = self.linematch(el, lout)
1488 1488 if isinstance(r, str):
1489 1489 if r == '-glob':
1490 1490 lout = ''.join(el.rsplit(' (glob)', 1))
1491 1491 r = '' # Warn only this line.
1492 1492 elif r == "retry":
1493 1493 postout.append(b' ' + el)
1494 1494 else:
1495 1495 log('\ninfo, unknown linematch result: %r\n' % r)
1496 1496 r = False
1497 1497 if r:
1498 1498 els.pop(i)
1499 1499 break
1500 1500 if el:
1501 1501 if el.endswith(b" (?)\n"):
1502 1502 optional.append(i)
1503 1503 else:
1504 1504 m = optline.match(el)
1505 1505 if m:
1506 1506 conditions = [
1507 1507 c for c in m.group(2).split(b' ')]
1508 1508
1509 1509 if not self._iftest(conditions):
1510 1510 optional.append(i)
1511 1511 if exact:
1512 1512 # Don't allow line to be matches against a later
1513 1513 # line in the output
1514 1514 els.pop(i)
1515 1515 break
1516 1516
1517 1517 if r:
1518 1518 if r == "retry":
1519 1519 continue
1520 1520 # clean up any optional leftovers
1521 1521 for i in optional:
1522 1522 postout.append(b' ' + els[i])
1523 1523 for i in reversed(optional):
1524 1524 del els[i]
1525 1525 postout.append(b' ' + el)
1526 1526 else:
1527 1527 if self.NEEDESCAPE(lout):
1528 1528 lout = TTest._stringescape(b'%s (esc)\n' %
1529 1529 lout.rstrip(b'\n'))
1530 1530 postout.append(b' ' + lout) # Let diff deal with it.
1531 1531 if r != '': # If line failed.
1532 1532 warnonly = 3 # for sure not
1533 1533 elif warnonly == 1: # Is "not yet" and line is warn only.
1534 1534 warnonly = 2 # Yes do warn.
1535 1535 break
1536 1536 else:
1537 1537 # clean up any optional leftovers
1538 1538 while expected.get(pos, None):
1539 1539 el = expected[pos].pop(0)
1540 1540 if el:
1541 1541 if not el.endswith(b" (?)\n"):
1542 1542 m = optline.match(el)
1543 1543 if m:
1544 1544 conditions = [c for c in m.group(2).split(b' ')]
1545 1545
1546 1546 if self._iftest(conditions):
1547 1547 # Don't append as optional line
1548 1548 continue
1549 1549 else:
1550 1550 continue
1551 1551 postout.append(b' ' + el)
1552 1552
1553 1553 if lcmd:
1554 1554 # Add on last return code.
1555 1555 ret = int(lcmd.split()[1])
1556 1556 if ret != 0:
1557 1557 postout.append(b' [%d]\n' % ret)
1558 1558 if pos in after:
1559 1559 # Merge in non-active test bits.
1560 1560 postout += after.pop(pos)
1561 1561 pos = int(lcmd.split()[0])
1562 1562
1563 1563 if pos in after:
1564 1564 postout += after.pop(pos)
1565 1565
1566 1566 if warnonly == 2:
1567 1567 exitcode = False # Set exitcode to warned.
1568 1568
1569 1569 return exitcode, postout
1570 1570
1571 1571 @staticmethod
1572 1572 def rematch(el, l):
1573 1573 try:
1574 1574 el = b'(?:' + el + b')'
1575 1575 # use \Z to ensure that the regex matches to the end of the string
1576 1576 if os.name == 'nt':
1577 1577 return re.match(el + br'\r?\n\Z', l)
1578 1578 return re.match(el + br'\n\Z', l)
1579 1579 except re.error:
1580 1580 # el is an invalid regex
1581 1581 return False
1582 1582
1583 1583 @staticmethod
1584 1584 def globmatch(el, l):
1585 1585 # The only supported special characters are * and ? plus / which also
1586 1586 # matches \ on windows. Escaping of these characters is supported.
1587 1587 if el + b'\n' == l:
1588 1588 if os.altsep:
1589 1589 # matching on "/" is not needed for this line
1590 1590 for pat in checkcodeglobpats:
1591 1591 if pat.match(el):
1592 1592 return True
1593 1593 return b'-glob'
1594 1594 return True
1595 1595 el = el.replace(b'$LOCALIP', b'*')
1596 1596 i, n = 0, len(el)
1597 1597 res = b''
1598 1598 while i < n:
1599 1599 c = el[i:i + 1]
1600 1600 i += 1
1601 1601 if c == b'\\' and i < n and el[i:i + 1] in b'*?\\/':
1602 1602 res += el[i - 1:i + 1]
1603 1603 i += 1
1604 1604 elif c == b'*':
1605 1605 res += b'.*'
1606 1606 elif c == b'?':
1607 1607 res += b'.'
1608 1608 elif c == b'/' and os.altsep:
1609 1609 res += b'[/\\\\]'
1610 1610 else:
1611 1611 res += re.escape(c)
1612 1612 return TTest.rematch(res, l)
1613 1613
1614 1614 def linematch(self, el, l):
1615 1615 if el == l: # perfect match (fast)
1616 1616 return True, True
1617 1617 retry = False
1618 1618 if el.endswith(b" (?)\n"):
1619 1619 retry = "retry"
1620 1620 el = el[:-5] + b"\n"
1621 1621 else:
1622 1622 m = optline.match(el)
1623 1623 if m:
1624 1624 conditions = [c for c in m.group(2).split(b' ')]
1625 1625
1626 1626 el = m.group(1) + b"\n"
1627 1627 if not self._iftest(conditions):
1628 1628 retry = "retry" # Not required by listed features
1629 1629
1630 1630 if el.endswith(b" (esc)\n"):
1631 1631 if PYTHON3:
1632 1632 el = el[:-7].decode('unicode_escape') + '\n'
1633 1633 el = el.encode('utf-8')
1634 1634 else:
1635 1635 el = el[:-7].decode('string-escape') + '\n'
1636 1636 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
1637 1637 return True, True
1638 1638 if el.endswith(b" (re)\n"):
1639 1639 return (TTest.rematch(el[:-6], l) or retry), False
1640 1640 if el.endswith(b" (glob)\n"):
1641 1641 # ignore '(glob)' added to l by 'replacements'
1642 1642 if l.endswith(b" (glob)\n"):
1643 1643 l = l[:-8] + b"\n"
1644 1644 return (TTest.globmatch(el[:-8], l) or retry), False
1645 1645 if os.altsep:
1646 1646 _l = l.replace(b'\\', b'/')
1647 1647 if el == _l or os.name == 'nt' and el[:-1] + b'\r\n' == _l:
1648 1648 return True, True
1649 1649 return retry, True
1650 1650
1651 1651 @staticmethod
1652 1652 def parsehghaveoutput(lines):
1653 1653 '''Parse hghave log lines.
1654 1654
1655 1655 Return tuple of lists (missing, failed):
1656 1656 * the missing/unknown features
1657 1657 * the features for which existence check failed'''
1658 1658 missing = []
1659 1659 failed = []
1660 1660 for line in lines:
1661 1661 if line.startswith(TTest.SKIPPED_PREFIX):
1662 1662 line = line.splitlines()[0]
1663 1663 missing.append(line[len(TTest.SKIPPED_PREFIX):].decode('utf-8'))
1664 1664 elif line.startswith(TTest.FAILED_PREFIX):
1665 1665 line = line.splitlines()[0]
1666 1666 failed.append(line[len(TTest.FAILED_PREFIX):].decode('utf-8'))
1667 1667
1668 1668 return missing, failed
1669 1669
1670 1670 @staticmethod
1671 1671 def _escapef(m):
1672 1672 return TTest.ESCAPEMAP[m.group(0)]
1673 1673
1674 1674 @staticmethod
1675 1675 def _stringescape(s):
1676 1676 return TTest.ESCAPESUB(TTest._escapef, s)
1677 1677
1678 1678 iolock = threading.RLock()
1679 1679 firstlock = threading.RLock()
1680 1680 firsterror = False
1681 1681
1682 1682 class TestResult(unittest._TextTestResult):
1683 1683 """Holds results when executing via unittest."""
1684 1684 # Don't worry too much about accessing the non-public _TextTestResult.
1685 1685 # It is relatively common in Python testing tools.
1686 1686 def __init__(self, options, *args, **kwargs):
1687 1687 super(TestResult, self).__init__(*args, **kwargs)
1688 1688
1689 1689 self._options = options
1690 1690
1691 1691 # unittest.TestResult didn't have skipped until 2.7. We need to
1692 1692 # polyfill it.
1693 1693 self.skipped = []
1694 1694
1695 1695 # We have a custom "ignored" result that isn't present in any Python
1696 1696 # unittest implementation. It is very similar to skipped. It may make
1697 1697 # sense to map it into skip some day.
1698 1698 self.ignored = []
1699 1699
1700 1700 self.times = []
1701 1701 self._firststarttime = None
1702 1702 # Data stored for the benefit of generating xunit reports.
1703 1703 self.successes = []
1704 1704 self.faildata = {}
1705 1705
1706 1706 if options.color == 'auto':
1707 1707 self.color = pygmentspresent and self.stream.isatty()
1708 1708 elif options.color == 'never':
1709 1709 self.color = False
1710 1710 else: # 'always', for testing purposes
1711 1711 self.color = pygmentspresent
1712 1712
1713 1713 def onStart(self, test):
1714 1714 """ Can be overriden by custom TestResult
1715 1715 """
1716 1716
1717 1717 def onEnd(self):
1718 1718 """ Can be overriden by custom TestResult
1719 1719 """
1720 1720
1721 1721 def addFailure(self, test, reason):
1722 1722 self.failures.append((test, reason))
1723 1723
1724 1724 if self._options.first:
1725 1725 self.stop()
1726 1726 else:
1727 1727 with iolock:
1728 1728 if reason == "timed out":
1729 1729 self.stream.write('t')
1730 1730 else:
1731 1731 if not self._options.nodiff:
1732 1732 self.stream.write('\n')
1733 1733 # Exclude the '\n' from highlighting to lex correctly
1734 1734 formatted = 'ERROR: %s output changed\n' % test
1735 1735 self.stream.write(highlightmsg(formatted, self.color))
1736 1736 self.stream.write('!')
1737 1737
1738 1738 self.stream.flush()
1739 1739
1740 1740 def addSuccess(self, test):
1741 1741 with iolock:
1742 1742 super(TestResult, self).addSuccess(test)
1743 1743 self.successes.append(test)
1744 1744
1745 1745 def addError(self, test, err):
1746 1746 super(TestResult, self).addError(test, err)
1747 1747 if self._options.first:
1748 1748 self.stop()
1749 1749
1750 1750 # Polyfill.
1751 1751 def addSkip(self, test, reason):
1752 1752 self.skipped.append((test, reason))
1753 1753 with iolock:
1754 1754 if self.showAll:
1755 1755 self.stream.writeln('skipped %s' % reason)
1756 1756 else:
1757 1757 self.stream.write('s')
1758 1758 self.stream.flush()
1759 1759
1760 1760 def addIgnore(self, test, reason):
1761 1761 self.ignored.append((test, reason))
1762 1762 with iolock:
1763 1763 if self.showAll:
1764 1764 self.stream.writeln('ignored %s' % reason)
1765 1765 else:
1766 1766 if reason not in ('not retesting', "doesn't match keyword"):
1767 1767 self.stream.write('i')
1768 1768 else:
1769 1769 self.testsRun += 1
1770 1770 self.stream.flush()
1771 1771
1772 1772 def addOutputMismatch(self, test, ret, got, expected):
1773 1773 """Record a mismatch in test output for a particular test."""
1774 1774 if self.shouldStop or firsterror:
1775 1775 # don't print, some other test case already failed and
1776 1776 # printed, we're just stale and probably failed due to our
1777 1777 # temp dir getting cleaned up.
1778 1778 return
1779 1779
1780 1780 accepted = False
1781 1781 lines = []
1782 1782
1783 1783 with iolock:
1784 1784 if self._options.nodiff:
1785 1785 pass
1786 1786 elif self._options.view:
1787 1787 v = self._options.view
1788 1788 if PYTHON3:
1789 1789 v = _bytespath(v)
1790 1790 os.system(b"%s %s %s" %
1791 1791 (v, test.refpath, test.errpath))
1792 1792 else:
1793 1793 servefail, lines = getdiff(expected, got,
1794 1794 test.refpath, test.errpath)
1795 1795 self.stream.write('\n')
1796 1796 for line in lines:
1797 1797 line = highlightdiff(line, self.color)
1798 1798 if PYTHON3:
1799 1799 self.stream.flush()
1800 1800 self.stream.buffer.write(line)
1801 1801 self.stream.buffer.flush()
1802 1802 else:
1803 1803 self.stream.write(line)
1804 1804 self.stream.flush()
1805 1805
1806 1806 if servefail:
1807 1807 raise test.failureException(
1808 1808 'server failed to start (HGPORT=%s)' % test._startport)
1809 1809
1810 1810 # handle interactive prompt without releasing iolock
1811 1811 if self._options.interactive:
1812 1812 if test.readrefout() != expected:
1813 1813 self.stream.write(
1814 1814 'Reference output has changed (run again to prompt '
1815 1815 'changes)')
1816 1816 else:
1817 1817 self.stream.write('Accept this change? [n] ')
1818 1818 answer = sys.stdin.readline().strip()
1819 1819 if answer.lower() in ('y', 'yes'):
1820 1820 if test.path.endswith(b'.t'):
1821 1821 rename(test.errpath, test.path)
1822 1822 else:
1823 1823 rename(test.errpath, '%s.out' % test.path)
1824 1824 accepted = True
1825 1825 if not accepted:
1826 1826 self.faildata[test.name] = b''.join(lines)
1827 1827
1828 1828 return accepted
1829 1829
1830 1830 def startTest(self, test):
1831 1831 super(TestResult, self).startTest(test)
1832 1832
1833 1833 # os.times module computes the user time and system time spent by
1834 1834 # child's processes along with real elapsed time taken by a process.
1835 1835 # This module has one limitation. It can only work for Linux user
1836 1836 # and not for Windows.
1837 1837 test.started = os.times()
1838 1838 if self._firststarttime is None: # thread racy but irrelevant
1839 1839 self._firststarttime = test.started[4]
1840 1840
1841 1841 def stopTest(self, test, interrupted=False):
1842 1842 super(TestResult, self).stopTest(test)
1843 1843
1844 1844 test.stopped = os.times()
1845 1845
1846 1846 starttime = test.started
1847 1847 endtime = test.stopped
1848 1848 origin = self._firststarttime
1849 1849 self.times.append((test.name,
1850 1850 endtime[2] - starttime[2], # user space CPU time
1851 1851 endtime[3] - starttime[3], # sys space CPU time
1852 1852 endtime[4] - starttime[4], # real time
1853 1853 starttime[4] - origin, # start date in run context
1854 1854 endtime[4] - origin, # end date in run context
1855 1855 ))
1856 1856
1857 1857 if interrupted:
1858 1858 with iolock:
1859 1859 self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
1860 1860 test.name, self.times[-1][3]))
1861 1861
1862 1862 def getTestResult():
1863 1863 """
1864 1864 Returns the relevant test result
1865 1865 """
1866 1866 if "CUSTOM_TEST_RESULT" in os.environ:
1867 1867 testresultmodule = __import__(os.environ["CUSTOM_TEST_RESULT"])
1868 1868 return testresultmodule.TestResult
1869 1869 else:
1870 1870 return TestResult
1871 1871
1872 1872 class TestSuite(unittest.TestSuite):
1873 1873 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
1874 1874
1875 1875 def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
1876 1876 retest=False, keywords=None, loop=False, runs_per_test=1,
1877 1877 loadtest=None, showchannels=False,
1878 1878 *args, **kwargs):
1879 1879 """Create a new instance that can run tests with a configuration.
1880 1880
1881 1881 testdir specifies the directory where tests are executed from. This
1882 1882 is typically the ``tests`` directory from Mercurial's source
1883 1883 repository.
1884 1884
1885 1885 jobs specifies the number of jobs to run concurrently. Each test
1886 1886 executes on its own thread. Tests actually spawn new processes, so
1887 1887 state mutation should not be an issue.
1888 1888
1889 1889 If there is only one job, it will use the main thread.
1890 1890
1891 1891 whitelist and blacklist denote tests that have been whitelisted and
1892 1892 blacklisted, respectively. These arguments don't belong in TestSuite.
1893 1893 Instead, whitelist and blacklist should be handled by the thing that
1894 1894 populates the TestSuite with tests. They are present to preserve
1895 1895 backwards compatible behavior which reports skipped tests as part
1896 1896 of the results.
1897 1897
1898 1898 retest denotes whether to retest failed tests. This arguably belongs
1899 1899 outside of TestSuite.
1900 1900
1901 1901 keywords denotes key words that will be used to filter which tests
1902 1902 to execute. This arguably belongs outside of TestSuite.
1903 1903
1904 1904 loop denotes whether to loop over tests forever.
1905 1905 """
1906 1906 super(TestSuite, self).__init__(*args, **kwargs)
1907 1907
1908 1908 self._jobs = jobs
1909 1909 self._whitelist = whitelist
1910 1910 self._blacklist = blacklist
1911 1911 self._retest = retest
1912 1912 self._keywords = keywords
1913 1913 self._loop = loop
1914 1914 self._runs_per_test = runs_per_test
1915 1915 self._loadtest = loadtest
1916 1916 self._showchannels = showchannels
1917 1917
1918 1918 def run(self, result):
1919 1919 # We have a number of filters that need to be applied. We do this
1920 1920 # here instead of inside Test because it makes the running logic for
1921 1921 # Test simpler.
1922 1922 tests = []
1923 1923 num_tests = [0]
1924 1924 for test in self._tests:
1925 1925 def get():
1926 1926 num_tests[0] += 1
1927 1927 if getattr(test, 'should_reload', False):
1928 1928 return self._loadtest(test, num_tests[0])
1929 1929 return test
1930 1930 if not os.path.exists(test.path):
1931 1931 result.addSkip(test, "Doesn't exist")
1932 1932 continue
1933 1933
1934 1934 if not (self._whitelist and test.bname in self._whitelist):
1935 1935 if self._blacklist and test.bname in self._blacklist:
1936 1936 result.addSkip(test, 'blacklisted')
1937 1937 continue
1938 1938
1939 1939 if self._retest and not os.path.exists(test.errpath):
1940 1940 result.addIgnore(test, 'not retesting')
1941 1941 continue
1942 1942
1943 1943 if self._keywords:
1944 1944 with open(test.path, 'rb') as f:
1945 1945 t = f.read().lower() + test.bname.lower()
1946 1946 ignored = False
1947 1947 for k in self._keywords.lower().split():
1948 1948 if k not in t:
1949 1949 result.addIgnore(test, "doesn't match keyword")
1950 1950 ignored = True
1951 1951 break
1952 1952
1953 1953 if ignored:
1954 1954 continue
1955 1955 for _ in xrange(self._runs_per_test):
1956 1956 tests.append(get())
1957 1957
1958 1958 runtests = list(tests)
1959 1959 done = queue.Queue()
1960 1960 running = 0
1961 1961
1962 1962 channels = [""] * self._jobs
1963 1963
1964 1964 def job(test, result):
1965 1965 for n, v in enumerate(channels):
1966 1966 if not v:
1967 1967 channel = n
1968 1968 break
1969 1969 else:
1970 1970 raise ValueError('Could not find output channel')
1971 1971 channels[channel] = "=" + test.name[5:].split(".")[0]
1972 1972 try:
1973 1973 test(result)
1974 1974 done.put(None)
1975 1975 except KeyboardInterrupt:
1976 1976 pass
1977 1977 except: # re-raises
1978 1978 done.put(('!', test, 'run-test raised an error, see traceback'))
1979 1979 raise
1980 1980 finally:
1981 1981 try:
1982 1982 channels[channel] = ''
1983 1983 except IndexError:
1984 1984 pass
1985 1985
1986 1986 def stat():
1987 1987 count = 0
1988 1988 while channels:
1989 1989 d = '\n%03s ' % count
1990 1990 for n, v in enumerate(channels):
1991 1991 if v:
1992 1992 d += v[0]
1993 1993 channels[n] = v[1:] or '.'
1994 1994 else:
1995 1995 d += ' '
1996 1996 d += ' '
1997 1997 with iolock:
1998 1998 sys.stdout.write(d + ' ')
1999 1999 sys.stdout.flush()
2000 2000 for x in xrange(10):
2001 2001 if channels:
2002 2002 time.sleep(.1)
2003 2003 count += 1
2004 2004
2005 2005 stoppedearly = False
2006 2006
2007 2007 if self._showchannels:
2008 2008 statthread = threading.Thread(target=stat, name="stat")
2009 2009 statthread.start()
2010 2010
2011 2011 try:
2012 2012 while tests or running:
2013 2013 if not done.empty() or running == self._jobs or not tests:
2014 2014 try:
2015 2015 done.get(True, 1)
2016 2016 running -= 1
2017 2017 if result and result.shouldStop:
2018 2018 stoppedearly = True
2019 2019 break
2020 2020 except queue.Empty:
2021 2021 continue
2022 2022 if tests and not running == self._jobs:
2023 2023 test = tests.pop(0)
2024 2024 if self._loop:
2025 2025 if getattr(test, 'should_reload', False):
2026 2026 num_tests[0] += 1
2027 2027 tests.append(
2028 2028 self._loadtest(test, num_tests[0]))
2029 2029 else:
2030 2030 tests.append(test)
2031 2031 if self._jobs == 1:
2032 2032 job(test, result)
2033 2033 else:
2034 2034 t = threading.Thread(target=job, name=test.name,
2035 2035 args=(test, result))
2036 2036 t.start()
2037 2037 running += 1
2038 2038
2039 2039 # If we stop early we still need to wait on started tests to
2040 2040 # finish. Otherwise, there is a race between the test completing
2041 2041 # and the test's cleanup code running. This could result in the
2042 2042 # test reporting incorrect.
2043 2043 if stoppedearly:
2044 2044 while running:
2045 2045 try:
2046 2046 done.get(True, 1)
2047 2047 running -= 1
2048 2048 except queue.Empty:
2049 2049 continue
2050 2050 except KeyboardInterrupt:
2051 2051 for test in runtests:
2052 2052 test.abort()
2053 2053
2054 2054 channels = []
2055 2055
2056 2056 return result
2057 2057
2058 2058 # Save the most recent 5 wall-clock runtimes of each test to a
2059 2059 # human-readable text file named .testtimes. Tests are sorted
2060 2060 # alphabetically, while times for each test are listed from oldest to
2061 2061 # newest.
2062 2062
2063 2063 def loadtimes(outputdir):
2064 2064 times = []
2065 2065 try:
2066 2066 with open(os.path.join(outputdir, b'.testtimes')) as fp:
2067 2067 for line in fp:
2068 2068 m = re.match('(.*?) ([0-9. ]+)', line)
2069 2069 times.append((m.group(1),
2070 2070 [float(t) for t in m.group(2).split()]))
2071 2071 except IOError as err:
2072 2072 if err.errno != errno.ENOENT:
2073 2073 raise
2074 2074 return times
2075 2075
2076 2076 def savetimes(outputdir, result):
2077 2077 saved = dict(loadtimes(outputdir))
2078 2078 maxruns = 5
2079 2079 skipped = set([str(t[0]) for t in result.skipped])
2080 2080 for tdata in result.times:
2081 2081 test, real = tdata[0], tdata[3]
2082 2082 if test not in skipped:
2083 2083 ts = saved.setdefault(test, [])
2084 2084 ts.append(real)
2085 2085 ts[:] = ts[-maxruns:]
2086 2086
2087 2087 fd, tmpname = tempfile.mkstemp(prefix=b'.testtimes',
2088 2088 dir=outputdir, text=True)
2089 2089 with os.fdopen(fd, 'w') as fp:
2090 2090 for name, ts in sorted(saved.items()):
2091 2091 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
2092 2092 timepath = os.path.join(outputdir, b'.testtimes')
2093 2093 try:
2094 2094 os.unlink(timepath)
2095 2095 except OSError:
2096 2096 pass
2097 2097 try:
2098 2098 os.rename(tmpname, timepath)
2099 2099 except OSError:
2100 2100 pass
2101 2101
2102 2102 class TextTestRunner(unittest.TextTestRunner):
2103 2103 """Custom unittest test runner that uses appropriate settings."""
2104 2104
2105 2105 def __init__(self, runner, *args, **kwargs):
2106 2106 super(TextTestRunner, self).__init__(*args, **kwargs)
2107 2107
2108 2108 self._runner = runner
2109 2109
2110 2110 self._result = getTestResult()(self._runner.options, self.stream,
2111 2111 self.descriptions, self.verbosity)
2112 2112
2113 2113 def listtests(self, test):
2114 2114 test = sorted(test, key=lambda t: t.name)
2115 2115
2116 2116 self._result.onStart(test)
2117 2117
2118 2118 for t in test:
2119 2119 print(t.name)
2120 2120 self._result.addSuccess(t)
2121 2121
2122 2122 if self._runner.options.xunit:
2123 2123 with open(self._runner.options.xunit, "wb") as xuf:
2124 2124 self._writexunit(self._result, xuf)
2125 2125
2126 2126 if self._runner.options.json:
2127 2127 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2128 2128 with open(jsonpath, 'w') as fp:
2129 2129 self._writejson(self._result, fp)
2130 2130
2131 2131 return self._result
2132 2132
2133 2133 def run(self, test):
2134 2134 self._result.onStart(test)
2135 2135 test(self._result)
2136 2136
2137 2137 failed = len(self._result.failures)
2138 2138 skipped = len(self._result.skipped)
2139 2139 ignored = len(self._result.ignored)
2140 2140
2141 2141 with iolock:
2142 2142 self.stream.writeln('')
2143 2143
2144 2144 if not self._runner.options.noskips:
2145 2145 for test, msg in self._result.skipped:
2146 2146 formatted = 'Skipped %s: %s\n' % (test.name, msg)
2147 2147 msg = highlightmsg(formatted, self._result.color)
2148 2148 self.stream.write(msg)
2149 2149 for test, msg in self._result.failures:
2150 2150 formatted = 'Failed %s: %s\n' % (test.name, msg)
2151 2151 self.stream.write(highlightmsg(formatted, self._result.color))
2152 2152 for test, msg in self._result.errors:
2153 2153 self.stream.writeln('Errored %s: %s' % (test.name, msg))
2154 2154
2155 2155 if self._runner.options.xunit:
2156 2156 with open(self._runner.options.xunit, "wb") as xuf:
2157 2157 self._writexunit(self._result, xuf)
2158 2158
2159 2159 if self._runner.options.json:
2160 2160 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2161 2161 with open(jsonpath, 'w') as fp:
2162 2162 self._writejson(self._result, fp)
2163 2163
2164 2164 self._runner._checkhglib('Tested')
2165 2165
2166 2166 savetimes(self._runner._outputdir, self._result)
2167 2167
2168 2168 if failed and self._runner.options.known_good_rev:
2169 2169 self._bisecttests(t for t, m in self._result.failures)
2170 2170 self.stream.writeln(
2171 2171 '# Ran %d tests, %d skipped, %d failed.'
2172 2172 % (self._result.testsRun, skipped + ignored, failed))
2173 2173 if failed:
2174 2174 self.stream.writeln('python hash seed: %s' %
2175 2175 os.environ['PYTHONHASHSEED'])
2176 2176 if self._runner.options.time:
2177 2177 self.printtimes(self._result.times)
2178 2178
2179 2179 if self._runner.options.exceptions:
2180 2180 exceptions = aggregateexceptions(
2181 2181 os.path.join(self._runner._outputdir, b'exceptions'))
2182 2182
2183 2183 self.stream.writeln('Exceptions Report:')
2184 2184 self.stream.writeln('%d total from %d frames' %
2185 2185 (exceptions['total'],
2186 2186 len(exceptions['exceptioncounts'])))
2187 2187 combined = exceptions['combined']
2188 2188 for key in sorted(combined, key=combined.get, reverse=True):
2189 2189 frame, line, exc = key
2190 2190 totalcount, testcount, leastcount, leasttest = combined[key]
2191 2191
2192 2192 self.stream.writeln('%d (%d tests)\t%s: %s (%s - %d total)'
2193 2193 % (totalcount,
2194 2194 testcount,
2195 2195 frame, exc,
2196 2196 leasttest, leastcount))
2197 2197
2198 2198 self.stream.flush()
2199 2199
2200 2200 return self._result
2201 2201
2202 2202 def _bisecttests(self, tests):
2203 2203 bisectcmd = ['hg', 'bisect']
2204 2204 bisectrepo = self._runner.options.bisect_repo
2205 2205 if bisectrepo:
2206 2206 bisectcmd.extend(['-R', os.path.abspath(bisectrepo)])
2207 2207 def pread(args):
2208 2208 env = os.environ.copy()
2209 2209 env['HGPLAIN'] = '1'
2210 2210 p = subprocess.Popen(args, stderr=subprocess.STDOUT,
2211 2211 stdout=subprocess.PIPE, env=env)
2212 2212 data = p.stdout.read()
2213 2213 p.wait()
2214 2214 return data
2215 2215 for test in tests:
2216 2216 pread(bisectcmd + ['--reset']),
2217 2217 pread(bisectcmd + ['--bad', '.'])
2218 2218 pread(bisectcmd + ['--good', self._runner.options.known_good_rev])
2219 2219 # TODO: we probably need to forward more options
2220 2220 # that alter hg's behavior inside the tests.
2221 2221 opts = ''
2222 2222 withhg = self._runner.options.with_hg
2223 2223 if withhg:
2224 2224 opts += ' --with-hg=%s ' % shellquote(_strpath(withhg))
2225 2225 rtc = '%s %s %s %s' % (sys.executable, sys.argv[0], opts,
2226 2226 test)
2227 2227 data = pread(bisectcmd + ['--command', rtc])
2228 2228 m = re.search(
2229 2229 (br'\nThe first (?P<goodbad>bad|good) revision '
2230 2230 br'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
2231 2231 br'summary: +(?P<summary>[^\n]+)\n'),
2232 2232 data, (re.MULTILINE | re.DOTALL))
2233 2233 if m is None:
2234 2234 self.stream.writeln(
2235 2235 'Failed to identify failure point for %s' % test)
2236 2236 continue
2237 2237 dat = m.groupdict()
2238 2238 verb = 'broken' if dat['goodbad'] == b'bad' else 'fixed'
2239 2239 self.stream.writeln(
2240 2240 '%s %s by %s (%s)' % (
2241 2241 test, verb, dat['node'].decode('ascii'),
2242 2242 dat['summary'].decode('utf8', 'ignore')))
2243 2243
2244 2244 def printtimes(self, times):
2245 2245 # iolock held by run
2246 2246 self.stream.writeln('# Producing time report')
2247 2247 times.sort(key=lambda t: (t[3]))
2248 2248 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
2249 2249 self.stream.writeln('%-7s %-7s %-7s %-7s %-7s %s' %
2250 2250 ('start', 'end', 'cuser', 'csys', 'real', 'Test'))
2251 2251 for tdata in times:
2252 2252 test = tdata[0]
2253 2253 cuser, csys, real, start, end = tdata[1:6]
2254 2254 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
2255 2255
2256 2256 @staticmethod
2257 2257 def _writexunit(result, outf):
2258 2258 # See http://llg.cubic.org/docs/junit/ for a reference.
2259 2259 timesd = dict((t[0], t[3]) for t in result.times)
2260 2260 doc = minidom.Document()
2261 2261 s = doc.createElement('testsuite')
2262 2262 s.setAttribute('name', 'run-tests')
2263 2263 s.setAttribute('tests', str(result.testsRun))
2264 2264 s.setAttribute('errors', "0") # TODO
2265 2265 s.setAttribute('failures', str(len(result.failures)))
2266 2266 s.setAttribute('skipped', str(len(result.skipped) +
2267 2267 len(result.ignored)))
2268 2268 doc.appendChild(s)
2269 2269 for tc in result.successes:
2270 2270 t = doc.createElement('testcase')
2271 2271 t.setAttribute('name', tc.name)
2272 2272 tctime = timesd.get(tc.name)
2273 2273 if tctime is not None:
2274 2274 t.setAttribute('time', '%.3f' % tctime)
2275 2275 s.appendChild(t)
2276 2276 for tc, err in sorted(result.faildata.items()):
2277 2277 t = doc.createElement('testcase')
2278 2278 t.setAttribute('name', tc)
2279 2279 tctime = timesd.get(tc)
2280 2280 if tctime is not None:
2281 2281 t.setAttribute('time', '%.3f' % tctime)
2282 2282 # createCDATASection expects a unicode or it will
2283 2283 # convert using default conversion rules, which will
2284 2284 # fail if string isn't ASCII.
2285 2285 err = cdatasafe(err).decode('utf-8', 'replace')
2286 2286 cd = doc.createCDATASection(err)
2287 2287 # Use 'failure' here instead of 'error' to match errors = 0,
2288 2288 # failures = len(result.failures) in the testsuite element.
2289 2289 failelem = doc.createElement('failure')
2290 2290 failelem.setAttribute('message', 'output changed')
2291 2291 failelem.setAttribute('type', 'output-mismatch')
2292 2292 failelem.appendChild(cd)
2293 2293 t.appendChild(failelem)
2294 2294 s.appendChild(t)
2295 2295 for tc, message in result.skipped:
2296 2296 # According to the schema, 'skipped' has no attributes. So store
2297 2297 # the skip message as a text node instead.
2298 2298 t = doc.createElement('testcase')
2299 2299 t.setAttribute('name', tc.name)
2300 2300 binmessage = message.encode('utf-8')
2301 2301 message = cdatasafe(binmessage).decode('utf-8', 'replace')
2302 2302 cd = doc.createCDATASection(message)
2303 2303 skipelem = doc.createElement('skipped')
2304 2304 skipelem.appendChild(cd)
2305 2305 t.appendChild(skipelem)
2306 2306 s.appendChild(t)
2307 2307 outf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
2308 2308
2309 2309 @staticmethod
2310 2310 def _writejson(result, outf):
2311 2311 timesd = {}
2312 2312 for tdata in result.times:
2313 2313 test = tdata[0]
2314 2314 timesd[test] = tdata[1:]
2315 2315
2316 2316 outcome = {}
2317 2317 groups = [('success', ((tc, None)
2318 2318 for tc in result.successes)),
2319 2319 ('failure', result.failures),
2320 2320 ('skip', result.skipped)]
2321 2321 for res, testcases in groups:
2322 2322 for tc, __ in testcases:
2323 2323 if tc.name in timesd:
2324 2324 diff = result.faildata.get(tc.name, b'')
2325 2325 try:
2326 2326 diff = diff.decode('unicode_escape')
2327 2327 except UnicodeDecodeError as e:
2328 2328 diff = '%r decoding diff, sorry' % e
2329 2329 tres = {'result': res,
2330 2330 'time': ('%0.3f' % timesd[tc.name][2]),
2331 2331 'cuser': ('%0.3f' % timesd[tc.name][0]),
2332 2332 'csys': ('%0.3f' % timesd[tc.name][1]),
2333 2333 'start': ('%0.3f' % timesd[tc.name][3]),
2334 2334 'end': ('%0.3f' % timesd[tc.name][4]),
2335 2335 'diff': diff,
2336 2336 }
2337 2337 else:
2338 2338 # blacklisted test
2339 2339 tres = {'result': res}
2340 2340
2341 2341 outcome[tc.name] = tres
2342 2342 jsonout = json.dumps(outcome, sort_keys=True, indent=4,
2343 2343 separators=(',', ': '))
2344 2344 outf.writelines(("testreport =", jsonout))
2345 2345
2346 2346 def sorttests(testdescs, previoustimes, shuffle=False):
2347 2347 """Do an in-place sort of tests."""
2348 2348 if shuffle:
2349 2349 random.shuffle(testdescs)
2350 2350 return
2351 2351
2352 2352 if previoustimes:
2353 2353 def sortkey(f):
2354 2354 f = f['path']
2355 2355 if f in previoustimes:
2356 2356 # Use most recent time as estimate
2357 2357 return -previoustimes[f][-1]
2358 2358 else:
2359 2359 # Default to a rather arbitrary value of 1 second for new tests
2360 2360 return -1.0
2361 2361 else:
2362 2362 # keywords for slow tests
2363 2363 slow = {b'svn': 10,
2364 2364 b'cvs': 10,
2365 2365 b'hghave': 10,
2366 2366 b'largefiles-update': 10,
2367 2367 b'run-tests': 10,
2368 2368 b'corruption': 10,
2369 2369 b'race': 10,
2370 2370 b'i18n': 10,
2371 2371 b'check': 100,
2372 2372 b'gendoc': 100,
2373 2373 b'contrib-perf': 200,
2374 2374 }
2375 2375 perf = {}
2376 2376
2377 2377 def sortkey(f):
2378 2378 # run largest tests first, as they tend to take the longest
2379 2379 f = f['path']
2380 2380 try:
2381 2381 return perf[f]
2382 2382 except KeyError:
2383 2383 try:
2384 2384 val = -os.stat(f).st_size
2385 2385 except OSError as e:
2386 2386 if e.errno != errno.ENOENT:
2387 2387 raise
2388 2388 perf[f] = -1e9 # file does not exist, tell early
2389 2389 return -1e9
2390 2390 for kw, mul in slow.items():
2391 2391 if kw in f:
2392 2392 val *= mul
2393 2393 if f.endswith(b'.py'):
2394 2394 val /= 10.0
2395 2395 perf[f] = val / 1000.0
2396 2396 return perf[f]
2397 2397
2398 2398 testdescs.sort(key=sortkey)
2399 2399
2400 2400 class TestRunner(object):
2401 2401 """Holds context for executing tests.
2402 2402
2403 2403 Tests rely on a lot of state. This object holds it for them.
2404 2404 """
2405 2405
2406 2406 # Programs required to run tests.
2407 2407 REQUIREDTOOLS = [
2408 2408 b'diff',
2409 2409 b'grep',
2410 2410 b'unzip',
2411 2411 b'gunzip',
2412 2412 b'bunzip2',
2413 2413 b'sed',
2414 2414 ]
2415 2415
2416 2416 # Maps file extensions to test class.
2417 2417 TESTTYPES = [
2418 2418 (b'.py', PythonTest),
2419 2419 (b'.t', TTest),
2420 2420 ]
2421 2421
2422 2422 def __init__(self):
2423 2423 self.options = None
2424 2424 self._hgroot = None
2425 2425 self._testdir = None
2426 2426 self._outputdir = None
2427 2427 self._hgtmp = None
2428 2428 self._installdir = None
2429 2429 self._bindir = None
2430 2430 self._tmpbinddir = None
2431 2431 self._pythondir = None
2432 2432 self._coveragefile = None
2433 2433 self._createdfiles = []
2434 2434 self._hgcommand = None
2435 2435 self._hgpath = None
2436 2436 self._portoffset = 0
2437 2437 self._ports = {}
2438 2438
2439 2439 def run(self, args, parser=None):
2440 2440 """Run the test suite."""
2441 2441 oldmask = os.umask(0o22)
2442 2442 try:
2443 2443 parser = parser or getparser()
2444 2444 options = parseargs(args, parser)
2445 2445 tests = [_bytespath(a) for a in options.tests]
2446 2446 if options.test_list is not None:
2447 2447 for listfile in options.test_list:
2448 2448 with open(listfile, 'rb') as f:
2449 2449 tests.extend(t for t in f.read().splitlines() if t)
2450 2450 self.options = options
2451 2451
2452 2452 self._checktools()
2453 2453 testdescs = self.findtests(tests)
2454 2454 if options.profile_runner:
2455 2455 import statprof
2456 2456 statprof.start()
2457 2457 result = self._run(testdescs)
2458 2458 if options.profile_runner:
2459 2459 statprof.stop()
2460 2460 statprof.display()
2461 2461 return result
2462 2462
2463 2463 finally:
2464 2464 os.umask(oldmask)
2465 2465
2466 2466 def _run(self, testdescs):
2467 2467 self._testdir = osenvironb[b'TESTDIR'] = getattr(
2468 2468 os, 'getcwdb', os.getcwd)()
2469 2469 # assume all tests in same folder for now
2470 2470 if testdescs:
2471 2471 pathname = os.path.dirname(testdescs[0]['path'])
2472 2472 if pathname:
2473 2473 osenvironb[b'TESTDIR'] = os.path.join(osenvironb[b'TESTDIR'],
2474 2474 pathname)
2475 2475 if self.options.outputdir:
2476 2476 self._outputdir = canonpath(_bytespath(self.options.outputdir))
2477 2477 else:
2478 2478 self._outputdir = self._testdir
2479 2479 if testdescs and pathname:
2480 2480 self._outputdir = os.path.join(self._outputdir, pathname)
2481 2481 previoustimes = {}
2482 2482 if self.options.order_by_runtime:
2483 2483 previoustimes = dict(loadtimes(self._outputdir))
2484 2484 sorttests(testdescs, previoustimes, shuffle=self.options.random)
2485 2485
2486 2486 if 'PYTHONHASHSEED' not in os.environ:
2487 2487 # use a random python hash seed all the time
2488 2488 # we do the randomness ourself to know what seed is used
2489 2489 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
2490 2490
2491 2491 if self.options.tmpdir:
2492 2492 self.options.keep_tmpdir = True
2493 2493 tmpdir = _bytespath(self.options.tmpdir)
2494 2494 if os.path.exists(tmpdir):
2495 2495 # Meaning of tmpdir has changed since 1.3: we used to create
2496 2496 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
2497 2497 # tmpdir already exists.
2498 2498 print("error: temp dir %r already exists" % tmpdir)
2499 2499 return 1
2500 2500
2501 2501 os.makedirs(tmpdir)
2502 2502 else:
2503 2503 d = None
2504 2504 if os.name == 'nt':
2505 2505 # without this, we get the default temp dir location, but
2506 2506 # in all lowercase, which causes troubles with paths (issue3490)
2507 2507 d = osenvironb.get(b'TMP', None)
2508 2508 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
2509 2509
2510 2510 self._hgtmp = osenvironb[b'HGTMP'] = (
2511 2511 os.path.realpath(tmpdir))
2512 2512
2513 2513 if self.options.with_hg:
2514 2514 self._installdir = None
2515 2515 whg = self.options.with_hg
2516 2516 self._bindir = os.path.dirname(os.path.realpath(whg))
2517 2517 assert isinstance(self._bindir, bytes)
2518 2518 self._hgcommand = os.path.basename(whg)
2519 2519 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
2520 2520 os.makedirs(self._tmpbindir)
2521 2521
2522 2522 normbin = os.path.normpath(os.path.abspath(whg))
2523 2523 normbin = normbin.replace(os.sep.encode('ascii'), b'/')
2524 2524
2525 2525 # Other Python scripts in the test harness need to
2526 2526 # `import mercurial`. If `hg` is a Python script, we assume
2527 2527 # the Mercurial modules are relative to its path and tell the tests
2528 2528 # to load Python modules from its directory.
2529 2529 with open(whg, 'rb') as fh:
2530 2530 initial = fh.read(1024)
2531 2531
2532 2532 if re.match(b'#!.*python', initial):
2533 2533 self._pythondir = self._bindir
2534 2534 # If it looks like our in-repo Rust binary, use the source root.
2535 2535 # This is a bit hacky. But rhg is still not supported outside the
2536 2536 # source directory. So until it is, do the simple thing.
2537 2537 elif re.search(b'/rust/target/[^/]+/hg', normbin):
2538 2538 self._pythondir = os.path.dirname(self._testdir)
2539 2539 # Fall back to the legacy behavior.
2540 2540 else:
2541 2541 self._pythondir = self._bindir
2542 2542
2543 2543 else:
2544 2544 self._installdir = os.path.join(self._hgtmp, b"install")
2545 2545 self._bindir = os.path.join(self._installdir, b"bin")
2546 2546 self._hgcommand = b'hg'
2547 2547 self._tmpbindir = self._bindir
2548 2548 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
2549 2549
2550 2550 # set CHGHG, then replace "hg" command by "chg"
2551 2551 chgbindir = self._bindir
2552 2552 if self.options.chg or self.options.with_chg:
2553 2553 osenvironb[b'CHGHG'] = os.path.join(self._bindir, self._hgcommand)
2554 2554 else:
2555 2555 osenvironb.pop(b'CHGHG', None) # drop flag for hghave
2556 2556 if self.options.chg:
2557 2557 self._hgcommand = b'chg'
2558 2558 elif self.options.with_chg:
2559 2559 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
2560 2560 self._hgcommand = os.path.basename(self.options.with_chg)
2561 2561
2562 2562 osenvironb[b"BINDIR"] = self._bindir
2563 2563 osenvironb[b"PYTHON"] = PYTHON
2564 2564
2565 2565 if self.options.with_python3:
2566 2566 osenvironb[b'PYTHON3'] = self.options.with_python3
2567 2567
2568 2568 fileb = _bytespath(__file__)
2569 2569 runtestdir = os.path.abspath(os.path.dirname(fileb))
2570 2570 osenvironb[b'RUNTESTDIR'] = runtestdir
2571 2571 if PYTHON3:
2572 2572 sepb = _bytespath(os.pathsep)
2573 2573 else:
2574 2574 sepb = os.pathsep
2575 2575 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
2576 2576 if os.path.islink(__file__):
2577 2577 # test helper will likely be at the end of the symlink
2578 2578 realfile = os.path.realpath(fileb)
2579 2579 realdir = os.path.abspath(os.path.dirname(realfile))
2580 2580 path.insert(2, realdir)
2581 2581 if chgbindir != self._bindir:
2582 2582 path.insert(1, chgbindir)
2583 2583 if self._testdir != runtestdir:
2584 2584 path = [self._testdir] + path
2585 2585 if self._tmpbindir != self._bindir:
2586 2586 path = [self._tmpbindir] + path
2587 2587 osenvironb[b"PATH"] = sepb.join(path)
2588 2588
2589 2589 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
2590 2590 # can run .../tests/run-tests.py test-foo where test-foo
2591 2591 # adds an extension to HGRC. Also include run-test.py directory to
2592 2592 # import modules like heredoctest.
2593 2593 pypath = [self._pythondir, self._testdir, runtestdir]
2594 2594 # We have to augment PYTHONPATH, rather than simply replacing
2595 2595 # it, in case external libraries are only available via current
2596 2596 # PYTHONPATH. (In particular, the Subversion bindings on OS X
2597 2597 # are in /opt/subversion.)
2598 2598 oldpypath = osenvironb.get(IMPL_PATH)
2599 2599 if oldpypath:
2600 2600 pypath.append(oldpypath)
2601 2601 osenvironb[IMPL_PATH] = sepb.join(pypath)
2602 2602
2603 2603 if self.options.pure:
2604 2604 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
2605 2605 os.environ["HGMODULEPOLICY"] = "py"
2606 2606
2607 2607 if self.options.allow_slow_tests:
2608 2608 os.environ["HGTEST_SLOW"] = "slow"
2609 2609 elif 'HGTEST_SLOW' in os.environ:
2610 2610 del os.environ['HGTEST_SLOW']
2611 2611
2612 2612 self._coveragefile = os.path.join(self._testdir, b'.coverage')
2613 2613
2614 2614 if self.options.exceptions:
2615 2615 exceptionsdir = os.path.join(self._outputdir, b'exceptions')
2616 2616 try:
2617 2617 os.makedirs(exceptionsdir)
2618 2618 except OSError as e:
2619 2619 if e.errno != errno.EEXIST:
2620 2620 raise
2621 2621
2622 2622 # Remove all existing exception reports.
2623 2623 for f in os.listdir(exceptionsdir):
2624 2624 os.unlink(os.path.join(exceptionsdir, f))
2625 2625
2626 2626 osenvironb[b'HGEXCEPTIONSDIR'] = exceptionsdir
2627 2627 logexceptions = os.path.join(self._testdir, b'logexceptions.py')
2628 2628 self.options.extra_config_opt.append(
2629 2629 'extensions.logexceptions=%s' % logexceptions.decode('utf-8'))
2630 2630
2631 2631 vlog("# Using TESTDIR", self._testdir)
2632 2632 vlog("# Using RUNTESTDIR", osenvironb[b'RUNTESTDIR'])
2633 2633 vlog("# Using HGTMP", self._hgtmp)
2634 2634 vlog("# Using PATH", os.environ["PATH"])
2635 2635 vlog("# Using", IMPL_PATH, osenvironb[IMPL_PATH])
2636 2636 vlog("# Writing to directory", self._outputdir)
2637 2637
2638 2638 try:
2639 2639 return self._runtests(testdescs) or 0
2640 2640 finally:
2641 2641 time.sleep(.1)
2642 2642 self._cleanup()
2643 2643
2644 2644 def findtests(self, args):
2645 2645 """Finds possible test files from arguments.
2646 2646
2647 2647 If you wish to inject custom tests into the test harness, this would
2648 2648 be a good function to monkeypatch or override in a derived class.
2649 2649 """
2650 2650 if not args:
2651 2651 if self.options.changed:
2652 2652 proc = Popen4('hg st --rev "%s" -man0 .' %
2653 2653 self.options.changed, None, 0)
2654 2654 stdout, stderr = proc.communicate()
2655 2655 args = stdout.strip(b'\0').split(b'\0')
2656 2656 else:
2657 2657 args = os.listdir(b'.')
2658 2658
2659 2659 expanded_args = []
2660 2660 for arg in args:
2661 2661 if os.path.isdir(arg):
2662 2662 if not arg.endswith(b'/'):
2663 2663 arg += b'/'
2664 2664 expanded_args.extend([arg + a for a in os.listdir(arg)])
2665 2665 else:
2666 2666 expanded_args.append(arg)
2667 2667 args = expanded_args
2668 2668
2669 2669 testcasepattern = re.compile(br'([\w-]+\.t|py)(#([a-zA-Z0-9_\-\.]+))')
2670 2670 tests = []
2671 2671 for t in args:
2672 2672 case = None
2673 2673
2674 2674 if not (os.path.basename(t).startswith(b'test-')
2675 2675 and (t.endswith(b'.py') or t.endswith(b'.t'))):
2676 2676
2677 2677 m = testcasepattern.match(t)
2678 2678 if m is not None:
2679 2679 t, _, case = m.groups()
2680 2680 else:
2681 2681 continue
2682 2682
2683 2683 if t.endswith(b'.t'):
2684 2684 # .t file may contain multiple test cases
2685 2685 cases = sorted(parsettestcases(t))
2686 2686 if cases:
2687 2687 if case is not None and case in cases:
2688 2688 tests += [{'path': t, 'case': case}]
2689 2689 elif case is not None and case not in cases:
2690 2690 # Ignore invalid cases
2691 2691 pass
2692 2692 else:
2693 2693 tests += [{'path': t, 'case': c} for c in sorted(cases)]
2694 2694 else:
2695 2695 tests.append({'path': t})
2696 2696 else:
2697 2697 tests.append({'path': t})
2698 2698 return tests
2699 2699
2700 2700 def _runtests(self, testdescs):
2701 2701 def _reloadtest(test, i):
2702 2702 # convert a test back to its description dict
2703 2703 desc = {'path': test.path}
2704 2704 case = getattr(test, '_case', None)
2705 2705 if case:
2706 2706 desc['case'] = case
2707 2707 return self._gettest(desc, i)
2708 2708
2709 2709 try:
2710 2710 if self.options.restart:
2711 2711 orig = list(testdescs)
2712 2712 while testdescs:
2713 2713 desc = testdescs[0]
2714 2714 # desc['path'] is a relative path
2715 2715 if 'case' in desc:
2716 errpath = b'%s.%s.err' % (desc['path'], desc['case'])
2716 errpath = b'%s#%s.err' % (desc['path'], desc['case'])
2717 2717 else:
2718 2718 errpath = b'%s.err' % desc['path']
2719 2719 errpath = os.path.join(self._outputdir, errpath)
2720 2720 if os.path.exists(errpath):
2721 2721 break
2722 2722 testdescs.pop(0)
2723 2723 if not testdescs:
2724 2724 print("running all tests")
2725 2725 testdescs = orig
2726 2726
2727 2727 tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
2728 2728
2729 2729 failed = False
2730 2730 kws = self.options.keywords
2731 2731 if kws is not None and PYTHON3:
2732 2732 kws = kws.encode('utf-8')
2733 2733
2734 2734 suite = TestSuite(self._testdir,
2735 2735 jobs=self.options.jobs,
2736 2736 whitelist=self.options.whitelisted,
2737 2737 blacklist=self.options.blacklist,
2738 2738 retest=self.options.retest,
2739 2739 keywords=kws,
2740 2740 loop=self.options.loop,
2741 2741 runs_per_test=self.options.runs_per_test,
2742 2742 showchannels=self.options.showchannels,
2743 2743 tests=tests, loadtest=_reloadtest)
2744 2744 verbosity = 1
2745 2745 if self.options.list_tests:
2746 2746 verbosity = 0
2747 2747 elif self.options.verbose:
2748 2748 verbosity = 2
2749 2749 runner = TextTestRunner(self, verbosity=verbosity)
2750 2750
2751 2751 if self.options.list_tests:
2752 2752 result = runner.listtests(suite)
2753 2753 else:
2754 2754 if self._installdir:
2755 2755 self._installhg()
2756 2756 self._checkhglib("Testing")
2757 2757 else:
2758 2758 self._usecorrectpython()
2759 2759 if self.options.chg:
2760 2760 assert self._installdir
2761 2761 self._installchg()
2762 2762
2763 2763 result = runner.run(suite)
2764 2764
2765 2765 if result.failures:
2766 2766 failed = True
2767 2767
2768 2768 result.onEnd()
2769 2769
2770 2770 if self.options.anycoverage:
2771 2771 self._outputcoverage()
2772 2772 except KeyboardInterrupt:
2773 2773 failed = True
2774 2774 print("\ninterrupted!")
2775 2775
2776 2776 if failed:
2777 2777 return 1
2778 2778
2779 2779 def _getport(self, count):
2780 2780 port = self._ports.get(count) # do we have a cached entry?
2781 2781 if port is None:
2782 2782 portneeded = 3
2783 2783 # above 100 tries we just give up and let test reports failure
2784 2784 for tries in xrange(100):
2785 2785 allfree = True
2786 2786 port = self.options.port + self._portoffset
2787 2787 for idx in xrange(portneeded):
2788 2788 if not checkportisavailable(port + idx):
2789 2789 allfree = False
2790 2790 break
2791 2791 self._portoffset += portneeded
2792 2792 if allfree:
2793 2793 break
2794 2794 self._ports[count] = port
2795 2795 return port
2796 2796
2797 2797 def _gettest(self, testdesc, count):
2798 2798 """Obtain a Test by looking at its filename.
2799 2799
2800 2800 Returns a Test instance. The Test may not be runnable if it doesn't
2801 2801 map to a known type.
2802 2802 """
2803 2803 path = testdesc['path']
2804 2804 lctest = path.lower()
2805 2805 testcls = Test
2806 2806
2807 2807 for ext, cls in self.TESTTYPES:
2808 2808 if lctest.endswith(ext):
2809 2809 testcls = cls
2810 2810 break
2811 2811
2812 2812 refpath = os.path.join(self._testdir, path)
2813 2813 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
2814 2814
2815 2815 # extra keyword parameters. 'case' is used by .t tests
2816 2816 kwds = dict((k, testdesc[k]) for k in ['case'] if k in testdesc)
2817 2817
2818 2818 t = testcls(refpath, self._outputdir, tmpdir,
2819 2819 keeptmpdir=self.options.keep_tmpdir,
2820 2820 debug=self.options.debug,
2821 2821 first=self.options.first,
2822 2822 timeout=self.options.timeout,
2823 2823 startport=self._getport(count),
2824 2824 extraconfigopts=self.options.extra_config_opt,
2825 2825 py3kwarnings=self.options.py3k_warnings,
2826 2826 shell=self.options.shell,
2827 2827 hgcommand=self._hgcommand,
2828 2828 usechg=bool(self.options.with_chg or self.options.chg),
2829 2829 useipv6=useipv6, **kwds)
2830 2830 t.should_reload = True
2831 2831 return t
2832 2832
2833 2833 def _cleanup(self):
2834 2834 """Clean up state from this test invocation."""
2835 2835 if self.options.keep_tmpdir:
2836 2836 return
2837 2837
2838 2838 vlog("# Cleaning up HGTMP", self._hgtmp)
2839 2839 shutil.rmtree(self._hgtmp, True)
2840 2840 for f in self._createdfiles:
2841 2841 try:
2842 2842 os.remove(f)
2843 2843 except OSError:
2844 2844 pass
2845 2845
2846 2846 def _usecorrectpython(self):
2847 2847 """Configure the environment to use the appropriate Python in tests."""
2848 2848 # Tests must use the same interpreter as us or bad things will happen.
2849 2849 pyexename = sys.platform == 'win32' and b'python.exe' or b'python'
2850 2850 if getattr(os, 'symlink', None):
2851 2851 vlog("# Making python executable in test path a symlink to '%s'" %
2852 2852 sys.executable)
2853 2853 mypython = os.path.join(self._tmpbindir, pyexename)
2854 2854 try:
2855 2855 if os.readlink(mypython) == sys.executable:
2856 2856 return
2857 2857 os.unlink(mypython)
2858 2858 except OSError as err:
2859 2859 if err.errno != errno.ENOENT:
2860 2860 raise
2861 2861 if self._findprogram(pyexename) != sys.executable:
2862 2862 try:
2863 2863 os.symlink(sys.executable, mypython)
2864 2864 self._createdfiles.append(mypython)
2865 2865 except OSError as err:
2866 2866 # child processes may race, which is harmless
2867 2867 if err.errno != errno.EEXIST:
2868 2868 raise
2869 2869 else:
2870 2870 exedir, exename = os.path.split(sys.executable)
2871 2871 vlog("# Modifying search path to find %s as %s in '%s'" %
2872 2872 (exename, pyexename, exedir))
2873 2873 path = os.environ['PATH'].split(os.pathsep)
2874 2874 while exedir in path:
2875 2875 path.remove(exedir)
2876 2876 os.environ['PATH'] = os.pathsep.join([exedir] + path)
2877 2877 if not self._findprogram(pyexename):
2878 2878 print("WARNING: Cannot find %s in search path" % pyexename)
2879 2879
2880 2880 def _installhg(self):
2881 2881 """Install hg into the test environment.
2882 2882
2883 2883 This will also configure hg with the appropriate testing settings.
2884 2884 """
2885 2885 vlog("# Performing temporary installation of HG")
2886 2886 installerrs = os.path.join(self._hgtmp, b"install.err")
2887 2887 compiler = ''
2888 2888 if self.options.compiler:
2889 2889 compiler = '--compiler ' + self.options.compiler
2890 2890 if self.options.pure:
2891 2891 pure = b"--pure"
2892 2892 else:
2893 2893 pure = b""
2894 2894
2895 2895 # Run installer in hg root
2896 2896 script = os.path.realpath(sys.argv[0])
2897 2897 exe = sys.executable
2898 2898 if PYTHON3:
2899 2899 compiler = _bytespath(compiler)
2900 2900 script = _bytespath(script)
2901 2901 exe = _bytespath(exe)
2902 2902 hgroot = os.path.dirname(os.path.dirname(script))
2903 2903 self._hgroot = hgroot
2904 2904 os.chdir(hgroot)
2905 2905 nohome = b'--home=""'
2906 2906 if os.name == 'nt':
2907 2907 # The --home="" trick works only on OS where os.sep == '/'
2908 2908 # because of a distutils convert_path() fast-path. Avoid it at
2909 2909 # least on Windows for now, deal with .pydistutils.cfg bugs
2910 2910 # when they happen.
2911 2911 nohome = b''
2912 2912 cmd = (b'%(exe)s setup.py %(pure)s clean --all'
2913 2913 b' build %(compiler)s --build-base="%(base)s"'
2914 2914 b' install --force --prefix="%(prefix)s"'
2915 2915 b' --install-lib="%(libdir)s"'
2916 2916 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
2917 2917 % {b'exe': exe, b'pure': pure,
2918 2918 b'compiler': compiler,
2919 2919 b'base': os.path.join(self._hgtmp, b"build"),
2920 2920 b'prefix': self._installdir, b'libdir': self._pythondir,
2921 2921 b'bindir': self._bindir,
2922 2922 b'nohome': nohome, b'logfile': installerrs})
2923 2923
2924 2924 # setuptools requires install directories to exist.
2925 2925 def makedirs(p):
2926 2926 try:
2927 2927 os.makedirs(p)
2928 2928 except OSError as e:
2929 2929 if e.errno != errno.EEXIST:
2930 2930 raise
2931 2931 makedirs(self._pythondir)
2932 2932 makedirs(self._bindir)
2933 2933
2934 2934 vlog("# Running", cmd)
2935 2935 if os.system(cmd) == 0:
2936 2936 if not self.options.verbose:
2937 2937 try:
2938 2938 os.remove(installerrs)
2939 2939 except OSError as e:
2940 2940 if e.errno != errno.ENOENT:
2941 2941 raise
2942 2942 else:
2943 2943 with open(installerrs, 'rb') as f:
2944 2944 for line in f:
2945 2945 if PYTHON3:
2946 2946 sys.stdout.buffer.write(line)
2947 2947 else:
2948 2948 sys.stdout.write(line)
2949 2949 sys.exit(1)
2950 2950 os.chdir(self._testdir)
2951 2951
2952 2952 self._usecorrectpython()
2953 2953
2954 2954 if self.options.py3k_warnings and not self.options.anycoverage:
2955 2955 vlog("# Updating hg command to enable Py3k Warnings switch")
2956 2956 with open(os.path.join(self._bindir, 'hg'), 'rb') as f:
2957 2957 lines = [line.rstrip() for line in f]
2958 2958 lines[0] += ' -3'
2959 2959 with open(os.path.join(self._bindir, 'hg'), 'wb') as f:
2960 2960 for line in lines:
2961 2961 f.write(line + '\n')
2962 2962
2963 2963 hgbat = os.path.join(self._bindir, b'hg.bat')
2964 2964 if os.path.isfile(hgbat):
2965 2965 # hg.bat expects to be put in bin/scripts while run-tests.py
2966 2966 # installation layout put it in bin/ directly. Fix it
2967 2967 with open(hgbat, 'rb') as f:
2968 2968 data = f.read()
2969 2969 if b'"%~dp0..\python" "%~dp0hg" %*' in data:
2970 2970 data = data.replace(b'"%~dp0..\python" "%~dp0hg" %*',
2971 2971 b'"%~dp0python" "%~dp0hg" %*')
2972 2972 with open(hgbat, 'wb') as f:
2973 2973 f.write(data)
2974 2974 else:
2975 2975 print('WARNING: cannot fix hg.bat reference to python.exe')
2976 2976
2977 2977 if self.options.anycoverage:
2978 2978 custom = os.path.join(self._testdir, 'sitecustomize.py')
2979 2979 target = os.path.join(self._pythondir, 'sitecustomize.py')
2980 2980 vlog('# Installing coverage trigger to %s' % target)
2981 2981 shutil.copyfile(custom, target)
2982 2982 rc = os.path.join(self._testdir, '.coveragerc')
2983 2983 vlog('# Installing coverage rc to %s' % rc)
2984 2984 os.environ['COVERAGE_PROCESS_START'] = rc
2985 2985 covdir = os.path.join(self._installdir, '..', 'coverage')
2986 2986 try:
2987 2987 os.mkdir(covdir)
2988 2988 except OSError as e:
2989 2989 if e.errno != errno.EEXIST:
2990 2990 raise
2991 2991
2992 2992 os.environ['COVERAGE_DIR'] = covdir
2993 2993
2994 2994 def _checkhglib(self, verb):
2995 2995 """Ensure that the 'mercurial' package imported by python is
2996 2996 the one we expect it to be. If not, print a warning to stderr."""
2997 2997 if ((self._bindir == self._pythondir) and
2998 2998 (self._bindir != self._tmpbindir)):
2999 2999 # The pythondir has been inferred from --with-hg flag.
3000 3000 # We cannot expect anything sensible here.
3001 3001 return
3002 3002 expecthg = os.path.join(self._pythondir, b'mercurial')
3003 3003 actualhg = self._gethgpath()
3004 3004 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
3005 3005 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
3006 3006 ' (expected %s)\n'
3007 3007 % (verb, actualhg, expecthg))
3008 3008 def _gethgpath(self):
3009 3009 """Return the path to the mercurial package that is actually found by
3010 3010 the current Python interpreter."""
3011 3011 if self._hgpath is not None:
3012 3012 return self._hgpath
3013 3013
3014 3014 cmd = b'%s -c "import mercurial; print (mercurial.__path__[0])"'
3015 3015 cmd = cmd % PYTHON
3016 3016 if PYTHON3:
3017 3017 cmd = _strpath(cmd)
3018 3018 pipe = os.popen(cmd)
3019 3019 try:
3020 3020 self._hgpath = _bytespath(pipe.read().strip())
3021 3021 finally:
3022 3022 pipe.close()
3023 3023
3024 3024 return self._hgpath
3025 3025
3026 3026 def _installchg(self):
3027 3027 """Install chg into the test environment"""
3028 3028 vlog('# Performing temporary installation of CHG')
3029 3029 assert os.path.dirname(self._bindir) == self._installdir
3030 3030 assert self._hgroot, 'must be called after _installhg()'
3031 3031 cmd = (b'"%(make)s" clean install PREFIX="%(prefix)s"'
3032 3032 % {b'make': 'make', # TODO: switch by option or environment?
3033 3033 b'prefix': self._installdir})
3034 3034 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
3035 3035 vlog("# Running", cmd)
3036 3036 proc = subprocess.Popen(cmd, shell=True, cwd=cwd,
3037 3037 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
3038 3038 stderr=subprocess.STDOUT)
3039 3039 out, _err = proc.communicate()
3040 3040 if proc.returncode != 0:
3041 3041 if PYTHON3:
3042 3042 sys.stdout.buffer.write(out)
3043 3043 else:
3044 3044 sys.stdout.write(out)
3045 3045 sys.exit(1)
3046 3046
3047 3047 def _outputcoverage(self):
3048 3048 """Produce code coverage output."""
3049 3049 import coverage
3050 3050 coverage = coverage.coverage
3051 3051
3052 3052 vlog('# Producing coverage report')
3053 3053 # chdir is the easiest way to get short, relative paths in the
3054 3054 # output.
3055 3055 os.chdir(self._hgroot)
3056 3056 covdir = os.path.join(self._installdir, '..', 'coverage')
3057 3057 cov = coverage(data_file=os.path.join(covdir, 'cov'))
3058 3058
3059 3059 # Map install directory paths back to source directory.
3060 3060 cov.config.paths['srcdir'] = ['.', self._pythondir]
3061 3061
3062 3062 cov.combine()
3063 3063
3064 3064 omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]]
3065 3065 cov.report(ignore_errors=True, omit=omit)
3066 3066
3067 3067 if self.options.htmlcov:
3068 3068 htmldir = os.path.join(self._outputdir, 'htmlcov')
3069 3069 cov.html_report(directory=htmldir, omit=omit)
3070 3070 if self.options.annotate:
3071 3071 adir = os.path.join(self._outputdir, 'annotated')
3072 3072 if not os.path.isdir(adir):
3073 3073 os.mkdir(adir)
3074 3074 cov.annotate(directory=adir, omit=omit)
3075 3075
3076 3076 def _findprogram(self, program):
3077 3077 """Search PATH for a executable program"""
3078 3078 dpb = _bytespath(os.defpath)
3079 3079 sepb = _bytespath(os.pathsep)
3080 3080 for p in osenvironb.get(b'PATH', dpb).split(sepb):
3081 3081 name = os.path.join(p, program)
3082 3082 if os.name == 'nt' or os.access(name, os.X_OK):
3083 3083 return name
3084 3084 return None
3085 3085
3086 3086 def _checktools(self):
3087 3087 """Ensure tools required to run tests are present."""
3088 3088 for p in self.REQUIREDTOOLS:
3089 3089 if os.name == 'nt' and not p.endswith('.exe'):
3090 3090 p += '.exe'
3091 3091 found = self._findprogram(p)
3092 3092 if found:
3093 3093 vlog("# Found prerequisite", p, "at", found)
3094 3094 else:
3095 3095 print("WARNING: Did not find prerequisite tool: %s " %
3096 3096 p.decode("utf-8"))
3097 3097
3098 3098 def aggregateexceptions(path):
3099 3099 exceptioncounts = collections.Counter()
3100 3100 testsbyfailure = collections.defaultdict(set)
3101 3101 failuresbytest = collections.defaultdict(set)
3102 3102
3103 3103 for f in os.listdir(path):
3104 3104 with open(os.path.join(path, f), 'rb') as fh:
3105 3105 data = fh.read().split(b'\0')
3106 3106 if len(data) != 5:
3107 3107 continue
3108 3108
3109 3109 exc, mainframe, hgframe, hgline, testname = data
3110 3110 exc = exc.decode('utf-8')
3111 3111 mainframe = mainframe.decode('utf-8')
3112 3112 hgframe = hgframe.decode('utf-8')
3113 3113 hgline = hgline.decode('utf-8')
3114 3114 testname = testname.decode('utf-8')
3115 3115
3116 3116 key = (hgframe, hgline, exc)
3117 3117 exceptioncounts[key] += 1
3118 3118 testsbyfailure[key].add(testname)
3119 3119 failuresbytest[testname].add(key)
3120 3120
3121 3121 # Find test having fewest failures for each failure.
3122 3122 leastfailing = {}
3123 3123 for key, tests in testsbyfailure.items():
3124 3124 fewesttest = None
3125 3125 fewestcount = 99999999
3126 3126 for test in sorted(tests):
3127 3127 if len(failuresbytest[test]) < fewestcount:
3128 3128 fewesttest = test
3129 3129 fewestcount = len(failuresbytest[test])
3130 3130
3131 3131 leastfailing[key] = (fewestcount, fewesttest)
3132 3132
3133 3133 # Create a combined counter so we can sort by total occurrences and
3134 3134 # impacted tests.
3135 3135 combined = {}
3136 3136 for key in exceptioncounts:
3137 3137 combined[key] = (exceptioncounts[key],
3138 3138 len(testsbyfailure[key]),
3139 3139 leastfailing[key][0],
3140 3140 leastfailing[key][1])
3141 3141
3142 3142 return {
3143 3143 'exceptioncounts': exceptioncounts,
3144 3144 'total': sum(exceptioncounts.values()),
3145 3145 'combined': combined,
3146 3146 'leastfailing': leastfailing,
3147 3147 'byfailure': testsbyfailure,
3148 3148 'bytest': failuresbytest,
3149 3149 }
3150 3150
3151 3151 if __name__ == '__main__':
3152 3152 runner = TestRunner()
3153 3153
3154 3154 try:
3155 3155 import msvcrt
3156 3156 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
3157 3157 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
3158 3158 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
3159 3159 except ImportError:
3160 3160 pass
3161 3161
3162 3162 sys.exit(runner.run(sys.argv[1:]))
@@ -1,1823 +1,1823
1 1 This file tests the behavior of run-tests.py itself.
2 2
3 3 Avoid interference from actual test env:
4 4
5 5 $ . "$TESTDIR/helper-runtests.sh"
6 6
7 7 Smoke test with install
8 8 ============
9 9 $ $PYTHON $TESTDIR/run-tests.py $HGTEST_RUN_TESTS_PURE -l
10 10
11 11 # Ran 0 tests, 0 skipped, 0 failed.
12 12
13 13 Define a helper to avoid the install step
14 14 =============
15 15 $ rt()
16 16 > {
17 17 > $PYTHON $TESTDIR/run-tests.py --with-hg=`which hg` "$@"
18 18 > }
19 19
20 20 error paths
21 21
22 22 #if symlink
23 23 $ ln -s `which true` hg
24 24 $ $PYTHON $TESTDIR/run-tests.py --with-hg=./hg
25 25 warning: --with-hg should specify an hg script
26 26
27 27 # Ran 0 tests, 0 skipped, 0 failed.
28 28 $ rm hg
29 29 #endif
30 30
31 31 #if execbit
32 32 $ touch hg
33 33 $ $PYTHON $TESTDIR/run-tests.py --with-hg=./hg
34 34 usage: run-tests.py [options] [tests]
35 35 run-tests.py: error: --with-hg must specify an executable hg script
36 36 [2]
37 37 $ rm hg
38 38 #endif
39 39
40 40 Features for testing optional lines
41 41 ===================================
42 42
43 43 $ cat > hghaveaddon.py <<EOF
44 44 > import hghave
45 45 > @hghave.check("custom", "custom hghave feature")
46 46 > def has_custom():
47 47 > return True
48 48 > @hghave.check("missing", "missing hghave feature")
49 49 > def has_missing():
50 50 > return False
51 51 > EOF
52 52
53 53 an empty test
54 54 =======================
55 55
56 56 $ touch test-empty.t
57 57 $ rt
58 58 .
59 59 # Ran 1 tests, 0 skipped, 0 failed.
60 60 $ rm test-empty.t
61 61
62 62 a succesful test
63 63 =======================
64 64
65 65 $ cat > test-success.t << EOF
66 66 > $ echo babar
67 67 > babar
68 68 > $ echo xyzzy
69 69 > dont_print (?)
70 70 > nothing[42]line (re) (?)
71 71 > never*happens (glob) (?)
72 72 > more_nothing (?)
73 73 > xyzzy
74 74 > nor this (?)
75 75 > $ printf 'abc\ndef\nxyz\n'
76 76 > 123 (?)
77 77 > abc
78 78 > def (?)
79 79 > 456 (?)
80 80 > xyz
81 81 > $ printf 'zyx\nwvu\ntsr\n'
82 82 > abc (?)
83 83 > zyx (custom !)
84 84 > wvu
85 85 > no_print (no-custom !)
86 86 > tsr (no-missing !)
87 87 > missing (missing !)
88 88 > EOF
89 89
90 90 $ rt
91 91 .
92 92 # Ran 1 tests, 0 skipped, 0 failed.
93 93
94 94 failing test
95 95 ==================
96 96
97 97 test churn with globs
98 98 $ cat > test-failure.t <<EOF
99 99 > $ echo "bar-baz"; echo "bar-bad"; echo foo
100 100 > bar*bad (glob)
101 101 > bar*baz (glob)
102 102 > | fo (re)
103 103 > EOF
104 104 $ rt test-failure.t
105 105
106 106 --- $TESTTMP/test-failure.t
107 107 +++ $TESTTMP/test-failure.t.err
108 108 @@ -1,4 +1,4 @@
109 109 $ echo "bar-baz"; echo "bar-bad"; echo foo
110 110 + bar*baz (glob)
111 111 bar*bad (glob)
112 112 - bar*baz (glob)
113 113 - | fo (re)
114 114 + foo
115 115
116 116 ERROR: test-failure.t output changed
117 117 !
118 118 Failed test-failure.t: output changed
119 119 # Ran 1 tests, 0 skipped, 1 failed.
120 120 python hash seed: * (glob)
121 121 [1]
122 122
123 123 test how multiple globs gets matched with lines in output
124 124 $ cat > test-failure-globs.t <<EOF
125 125 > $ echo "context"; echo "context"; \
126 126 > echo "key: 1"; echo "value: not a"; \
127 127 > echo "key: 2"; echo "value: not b"; \
128 128 > echo "key: 3"; echo "value: c"; \
129 129 > echo "key: 4"; echo "value: d"
130 130 > context
131 131 > context
132 132 > key: 1
133 133 > value: a
134 134 > key: 2
135 135 > value: b
136 136 > key: 3
137 137 > value: * (glob)
138 138 > key: 4
139 139 > value: * (glob)
140 140 > EOF
141 141 $ rt test-failure-globs.t
142 142
143 143 --- $TESTTMP/test-failure-globs.t
144 144 +++ $TESTTMP/test-failure-globs.t.err
145 145 @@ -2,9 +2,9 @@
146 146 context
147 147 context
148 148 key: 1
149 149 - value: a
150 150 + value: not a
151 151 key: 2
152 152 - value: b
153 153 + value: not b
154 154 key: 3
155 155 value: * (glob)
156 156 key: 4
157 157
158 158 ERROR: test-failure-globs.t output changed
159 159 !
160 160 Failed test-failure-globs.t: output changed
161 161 # Ran 1 tests, 0 skipped, 1 failed.
162 162 python hash seed: * (glob)
163 163 [1]
164 164 $ rm test-failure-globs.t
165 165
166 166 test diff colorisation
167 167
168 168 #if no-windows pygments
169 169 $ rt test-failure.t --color always
170 170
171 171 \x1b[38;5;124m--- $TESTTMP/test-failure.t\x1b[39m (esc)
172 172 \x1b[38;5;34m+++ $TESTTMP/test-failure.t.err\x1b[39m (esc)
173 173 \x1b[38;5;90;01m@@ -1,4 +1,4 @@\x1b[39;00m (esc)
174 174 $ echo "bar-baz"; echo "bar-bad"; echo foo
175 175 \x1b[38;5;34m+ bar*baz (glob)\x1b[39m (esc)
176 176 bar*bad (glob)
177 177 \x1b[38;5;124m- bar*baz (glob)\x1b[39m (esc)
178 178 \x1b[38;5;124m- | fo (re)\x1b[39m (esc)
179 179 \x1b[38;5;34m+ foo\x1b[39m (esc)
180 180
181 181 \x1b[38;5;88mERROR: \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m output changed\x1b[39m (esc)
182 182 !
183 183 \x1b[38;5;88mFailed \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m: output changed\x1b[39m (esc)
184 184 # Ran 1 tests, 0 skipped, 1 failed.
185 185 python hash seed: * (glob)
186 186 [1]
187 187
188 188 $ rt test-failure.t 2> tmp.log
189 189 [1]
190 190 $ cat tmp.log
191 191
192 192 --- $TESTTMP/test-failure.t
193 193 +++ $TESTTMP/test-failure.t.err
194 194 @@ -1,4 +1,4 @@
195 195 $ echo "bar-baz"; echo "bar-bad"; echo foo
196 196 + bar*baz (glob)
197 197 bar*bad (glob)
198 198 - bar*baz (glob)
199 199 - | fo (re)
200 200 + foo
201 201
202 202 ERROR: test-failure.t output changed
203 203 !
204 204 Failed test-failure.t: output changed
205 205 # Ran 1 tests, 0 skipped, 1 failed.
206 206 python hash seed: * (glob)
207 207 #endif
208 208
209 209 $ cat > test-failure.t << EOF
210 210 > $ true
211 211 > should go away (true !)
212 212 > $ true
213 213 > should stay (false !)
214 214 >
215 215 > Should remove first line, not second or third
216 216 > $ echo 'testing'
217 217 > baz*foo (glob) (true !)
218 218 > foobar*foo (glob) (false !)
219 219 > te*ting (glob) (true !)
220 220 >
221 221 > Should keep first two lines, remove third and last
222 222 > $ echo 'testing'
223 223 > test.ng (re) (true !)
224 224 > foo.ar (re) (false !)
225 225 > b.r (re) (true !)
226 226 > missing (?)
227 227 > awol (true !)
228 228 >
229 229 > The "missing" line should stay, even though awol is dropped
230 230 > $ echo 'testing'
231 231 > test.ng (re) (true !)
232 232 > foo.ar (?)
233 233 > awol
234 234 > missing (?)
235 235 > EOF
236 236 $ rt test-failure.t
237 237
238 238 --- $TESTTMP/test-failure.t
239 239 +++ $TESTTMP/test-failure.t.err
240 240 @@ -1,11 +1,9 @@
241 241 $ true
242 242 - should go away (true !)
243 243 $ true
244 244 should stay (false !)
245 245
246 246 Should remove first line, not second or third
247 247 $ echo 'testing'
248 248 - baz*foo (glob) (true !)
249 249 foobar*foo (glob) (false !)
250 250 te*ting (glob) (true !)
251 251
252 252 foo.ar (re) (false !)
253 253 missing (?)
254 254 @@ -13,13 +11,10 @@
255 255 $ echo 'testing'
256 256 test.ng (re) (true !)
257 257 foo.ar (re) (false !)
258 258 - b.r (re) (true !)
259 259 missing (?)
260 260 - awol (true !)
261 261
262 262 The "missing" line should stay, even though awol is dropped
263 263 $ echo 'testing'
264 264 test.ng (re) (true !)
265 265 foo.ar (?)
266 266 - awol
267 267 missing (?)
268 268
269 269 ERROR: test-failure.t output changed
270 270 !
271 271 Failed test-failure.t: output changed
272 272 # Ran 1 tests, 0 skipped, 1 failed.
273 273 python hash seed: * (glob)
274 274 [1]
275 275
276 276 basic failing test
277 277 $ cat > test-failure.t << EOF
278 278 > $ echo babar
279 279 > rataxes
280 280 > This is a noop statement so that
281 281 > this test is still more bytes than success.
282 282 > pad pad pad pad............................................................
283 283 > pad pad pad pad............................................................
284 284 > pad pad pad pad............................................................
285 285 > pad pad pad pad............................................................
286 286 > pad pad pad pad............................................................
287 287 > pad pad pad pad............................................................
288 288 > EOF
289 289
290 290 >>> fh = open('test-failure-unicode.t', 'wb')
291 291 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8')) and None
292 292 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8')) and None
293 293
294 294 $ rt
295 295
296 296 --- $TESTTMP/test-failure.t
297 297 +++ $TESTTMP/test-failure.t.err
298 298 @@ -1,5 +1,5 @@
299 299 $ echo babar
300 300 - rataxes
301 301 + babar
302 302 This is a noop statement so that
303 303 this test is still more bytes than success.
304 304 pad pad pad pad............................................................
305 305
306 306 ERROR: test-failure.t output changed
307 307 !.
308 308 --- $TESTTMP/test-failure-unicode.t
309 309 +++ $TESTTMP/test-failure-unicode.t.err
310 310 @@ -1,2 +1,2 @@
311 311 $ echo babar\xce\xb1 (esc)
312 312 - l\xce\xb5\xce\xb5t (esc)
313 313 + babar\xce\xb1 (esc)
314 314
315 315 ERROR: test-failure-unicode.t output changed
316 316 !
317 317 Failed test-failure.t: output changed
318 318 Failed test-failure-unicode.t: output changed
319 319 # Ran 3 tests, 0 skipped, 2 failed.
320 320 python hash seed: * (glob)
321 321 [1]
322 322
323 323 test --outputdir
324 324 $ mkdir output
325 325 $ rt --outputdir output
326 326
327 327 --- $TESTTMP/test-failure.t
328 328 +++ $TESTTMP/output/test-failure.t.err
329 329 @@ -1,5 +1,5 @@
330 330 $ echo babar
331 331 - rataxes
332 332 + babar
333 333 This is a noop statement so that
334 334 this test is still more bytes than success.
335 335 pad pad pad pad............................................................
336 336
337 337 ERROR: test-failure.t output changed
338 338 !.
339 339 --- $TESTTMP/test-failure-unicode.t
340 340 +++ $TESTTMP/output/test-failure-unicode.t.err
341 341 @@ -1,2 +1,2 @@
342 342 $ echo babar\xce\xb1 (esc)
343 343 - l\xce\xb5\xce\xb5t (esc)
344 344 + babar\xce\xb1 (esc)
345 345
346 346 ERROR: test-failure-unicode.t output changed
347 347 !
348 348 Failed test-failure.t: output changed
349 349 Failed test-failure-unicode.t: output changed
350 350 # Ran 3 tests, 0 skipped, 2 failed.
351 351 python hash seed: * (glob)
352 352 [1]
353 353 $ ls -a output
354 354 .
355 355 ..
356 356 .testtimes
357 357 test-failure-unicode.t.err
358 358 test-failure.t.err
359 359
360 360 test --xunit support
361 361 $ rt --xunit=xunit.xml
362 362
363 363 --- $TESTTMP/test-failure.t
364 364 +++ $TESTTMP/test-failure.t.err
365 365 @@ -1,5 +1,5 @@
366 366 $ echo babar
367 367 - rataxes
368 368 + babar
369 369 This is a noop statement so that
370 370 this test is still more bytes than success.
371 371 pad pad pad pad............................................................
372 372
373 373 ERROR: test-failure.t output changed
374 374 !.
375 375 --- $TESTTMP/test-failure-unicode.t
376 376 +++ $TESTTMP/test-failure-unicode.t.err
377 377 @@ -1,2 +1,2 @@
378 378 $ echo babar\xce\xb1 (esc)
379 379 - l\xce\xb5\xce\xb5t (esc)
380 380 + babar\xce\xb1 (esc)
381 381
382 382 ERROR: test-failure-unicode.t output changed
383 383 !
384 384 Failed test-failure.t: output changed
385 385 Failed test-failure-unicode.t: output changed
386 386 # Ran 3 tests, 0 skipped, 2 failed.
387 387 python hash seed: * (glob)
388 388 [1]
389 389 $ cat xunit.xml
390 390 <?xml version="1.0" encoding="utf-8"?>
391 391 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
392 392 <testcase name="test-success.t" time="*"/> (glob)
393 393 <testcase name="test-failure-unicode.t" time="*"> (glob)
394 394 <failure message="output changed" type="output-mismatch">
395 395 <![CDATA[--- $TESTTMP/test-failure-unicode.t
396 396 +++ $TESTTMP/test-failure-unicode.t.err
397 397 @@ -1,2 +1,2 @@
398 398 $ echo babar\xce\xb1 (esc)
399 399 - l\xce\xb5\xce\xb5t (esc)
400 400 + babar\xce\xb1 (esc)
401 401 ]]> </failure>
402 402 </testcase>
403 403 <testcase name="test-failure.t" time="*"> (glob)
404 404 <failure message="output changed" type="output-mismatch">
405 405 <![CDATA[--- $TESTTMP/test-failure.t
406 406 +++ $TESTTMP/test-failure.t.err
407 407 @@ -1,5 +1,5 @@
408 408 $ echo babar
409 409 - rataxes
410 410 + babar
411 411 This is a noop statement so that
412 412 this test is still more bytes than success.
413 413 pad pad pad pad............................................................
414 414 ]]> </failure>
415 415 </testcase>
416 416 </testsuite>
417 417
418 418 $ cat .testtimes
419 419 test-empty.t * (glob)
420 420 test-failure-globs.t * (glob)
421 421 test-failure-unicode.t * (glob)
422 422 test-failure.t * (glob)
423 423 test-success.t * (glob)
424 424
425 425 $ rt --list-tests
426 426 test-failure-unicode.t
427 427 test-failure.t
428 428 test-success.t
429 429
430 430 $ rt --list-tests --json
431 431 test-failure-unicode.t
432 432 test-failure.t
433 433 test-success.t
434 434 $ cat report.json
435 435 testreport ={
436 436 "test-failure-unicode.t": {
437 437 "result": "success"
438 438 },
439 439 "test-failure.t": {
440 440 "result": "success"
441 441 },
442 442 "test-success.t": {
443 443 "result": "success"
444 444 }
445 445 } (no-eol)
446 446
447 447 $ rt --list-tests --xunit=xunit.xml
448 448 test-failure-unicode.t
449 449 test-failure.t
450 450 test-success.t
451 451 $ cat xunit.xml
452 452 <?xml version="1.0" encoding="utf-8"?>
453 453 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
454 454 <testcase name="test-failure-unicode.t"/>
455 455 <testcase name="test-failure.t"/>
456 456 <testcase name="test-success.t"/>
457 457 </testsuite>
458 458
459 459 $ rt --list-tests test-failure* --json --xunit=xunit.xml --outputdir output
460 460 test-failure-unicode.t
461 461 test-failure.t
462 462 $ cat output/report.json
463 463 testreport ={
464 464 "test-failure-unicode.t": {
465 465 "result": "success"
466 466 },
467 467 "test-failure.t": {
468 468 "result": "success"
469 469 }
470 470 } (no-eol)
471 471 $ cat xunit.xml
472 472 <?xml version="1.0" encoding="utf-8"?>
473 473 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
474 474 <testcase name="test-failure-unicode.t"/>
475 475 <testcase name="test-failure.t"/>
476 476 </testsuite>
477 477
478 478 $ rm test-failure-unicode.t
479 479
480 480 test for --retest
481 481 ====================
482 482
483 483 $ rt --retest
484 484
485 485 --- $TESTTMP/test-failure.t
486 486 +++ $TESTTMP/test-failure.t.err
487 487 @@ -1,5 +1,5 @@
488 488 $ echo babar
489 489 - rataxes
490 490 + babar
491 491 This is a noop statement so that
492 492 this test is still more bytes than success.
493 493 pad pad pad pad............................................................
494 494
495 495 ERROR: test-failure.t output changed
496 496 !
497 497 Failed test-failure.t: output changed
498 498 # Ran 2 tests, 1 skipped, 1 failed.
499 499 python hash seed: * (glob)
500 500 [1]
501 501
502 502 --retest works with --outputdir
503 503 $ rm -r output
504 504 $ mkdir output
505 505 $ mv test-failure.t.err output
506 506 $ rt --retest --outputdir output
507 507
508 508 --- $TESTTMP/test-failure.t
509 509 +++ $TESTTMP/output/test-failure.t.err
510 510 @@ -1,5 +1,5 @@
511 511 $ echo babar
512 512 - rataxes
513 513 + babar
514 514 This is a noop statement so that
515 515 this test is still more bytes than success.
516 516 pad pad pad pad............................................................
517 517
518 518 ERROR: test-failure.t output changed
519 519 !
520 520 Failed test-failure.t: output changed
521 521 # Ran 2 tests, 1 skipped, 1 failed.
522 522 python hash seed: * (glob)
523 523 [1]
524 524
525 525 Selecting Tests To Run
526 526 ======================
527 527
528 528 successful
529 529
530 530 $ rt test-success.t
531 531 .
532 532 # Ran 1 tests, 0 skipped, 0 failed.
533 533
534 534 success w/ keyword
535 535 $ rt -k xyzzy
536 536 .
537 537 # Ran 2 tests, 1 skipped, 0 failed.
538 538
539 539 failed
540 540
541 541 $ rt test-failure.t
542 542
543 543 --- $TESTTMP/test-failure.t
544 544 +++ $TESTTMP/test-failure.t.err
545 545 @@ -1,5 +1,5 @@
546 546 $ echo babar
547 547 - rataxes
548 548 + babar
549 549 This is a noop statement so that
550 550 this test is still more bytes than success.
551 551 pad pad pad pad............................................................
552 552
553 553 ERROR: test-failure.t output changed
554 554 !
555 555 Failed test-failure.t: output changed
556 556 # Ran 1 tests, 0 skipped, 1 failed.
557 557 python hash seed: * (glob)
558 558 [1]
559 559
560 560 failure w/ keyword
561 561 $ rt -k rataxes
562 562
563 563 --- $TESTTMP/test-failure.t
564 564 +++ $TESTTMP/test-failure.t.err
565 565 @@ -1,5 +1,5 @@
566 566 $ echo babar
567 567 - rataxes
568 568 + babar
569 569 This is a noop statement so that
570 570 this test is still more bytes than success.
571 571 pad pad pad pad............................................................
572 572
573 573 ERROR: test-failure.t output changed
574 574 !
575 575 Failed test-failure.t: output changed
576 576 # Ran 2 tests, 1 skipped, 1 failed.
577 577 python hash seed: * (glob)
578 578 [1]
579 579
580 580 Verify that when a process fails to start we show a useful message
581 581 ==================================================================
582 582
583 583 $ cat > test-serve-fail.t <<EOF
584 584 > $ echo 'abort: child process failed to start blah'
585 585 > EOF
586 586 $ rt test-serve-fail.t
587 587
588 588 --- $TESTTMP/test-serve-fail.t
589 589 +++ $TESTTMP/test-serve-fail.t.err
590 590 @@ -1* +1,2 @@ (glob)
591 591 $ echo 'abort: child process failed to start blah'
592 592 + abort: child process failed to start blah
593 593
594 594 ERROR: test-serve-fail.t output changed
595 595 !
596 596 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
597 597 # Ran 1 tests, 0 skipped, 1 failed.
598 598 python hash seed: * (glob)
599 599 [1]
600 600 $ rm test-serve-fail.t
601 601
602 602 Verify that we can try other ports
603 603 ===================================
604 604
605 605 Extensions aren't inherited by the invoked run-tests.py. An extension
606 606 introducing a repository requirement could cause this to fail. So we force
607 607 HGRCPATH to get a clean environment.
608 608
609 609 $ HGRCPATH= hg init inuse
610 610 $ hg serve -R inuse -p $HGPORT -d --pid-file=blocks.pid
611 611 $ cat blocks.pid >> $DAEMON_PIDS
612 612 $ cat > test-serve-inuse.t <<EOF
613 613 > $ hg serve -R `pwd`/inuse -p \$HGPORT -d --pid-file=hg.pid
614 614 > $ cat hg.pid >> \$DAEMON_PIDS
615 615 > EOF
616 616 $ rt test-serve-inuse.t
617 617 .
618 618 # Ran 1 tests, 0 skipped, 0 failed.
619 619 $ rm test-serve-inuse.t
620 620 $ killdaemons.py $DAEMON_PIDS
621 621
622 622 Running In Debug Mode
623 623 ======================
624 624
625 625 $ rt --debug 2>&1 | grep -v pwd
626 626 + echo *SALT* 0 0 (glob)
627 627 *SALT* 0 0 (glob)
628 628 + echo babar
629 629 babar
630 630 + echo *SALT* 10 0 (glob)
631 631 *SALT* 10 0 (glob)
632 632 *+ echo *SALT* 0 0 (glob)
633 633 *SALT* 0 0 (glob)
634 634 + echo babar
635 635 babar
636 636 + echo *SALT* 2 0 (glob)
637 637 *SALT* 2 0 (glob)
638 638 + echo xyzzy
639 639 xyzzy
640 640 + echo *SALT* 9 0 (glob)
641 641 *SALT* 9 0 (glob)
642 642 + printf *abc\ndef\nxyz\n* (glob)
643 643 abc
644 644 def
645 645 xyz
646 646 + echo *SALT* 15 0 (glob)
647 647 *SALT* 15 0 (glob)
648 648 + printf *zyx\nwvu\ntsr\n* (glob)
649 649 zyx
650 650 wvu
651 651 tsr
652 652 + echo *SALT* 22 0 (glob)
653 653 *SALT* 22 0 (glob)
654 654 .
655 655 # Ran 2 tests, 0 skipped, 0 failed.
656 656
657 657 Parallel runs
658 658 ==============
659 659
660 660 (duplicate the failing test to get predictable output)
661 661 $ cp test-failure.t test-failure-copy.t
662 662
663 663 $ rt --jobs 2 test-failure*.t -n
664 664 !!
665 665 Failed test-failure*.t: output changed (glob)
666 666 Failed test-failure*.t: output changed (glob)
667 667 # Ran 2 tests, 0 skipped, 2 failed.
668 668 python hash seed: * (glob)
669 669 [1]
670 670
671 671 failures in parallel with --first should only print one failure
672 672 $ rt --jobs 2 --first test-failure*.t
673 673
674 674 --- $TESTTMP/test-failure*.t (glob)
675 675 +++ $TESTTMP/test-failure*.t.err (glob)
676 676 @@ -1,5 +1,5 @@
677 677 $ echo babar
678 678 - rataxes
679 679 + babar
680 680 This is a noop statement so that
681 681 this test is still more bytes than success.
682 682 pad pad pad pad............................................................
683 683
684 684 Failed test-failure*.t: output changed (glob)
685 685 Failed test-failure*.t: output changed (glob)
686 686 # Ran 2 tests, 0 skipped, 2 failed.
687 687 python hash seed: * (glob)
688 688 [1]
689 689
690 690
691 691 (delete the duplicated test file)
692 692 $ rm test-failure-copy.t
693 693
694 694
695 695 Interactive run
696 696 ===============
697 697
698 698 (backup the failing test)
699 699 $ cp test-failure.t backup
700 700
701 701 Refuse the fix
702 702
703 703 $ echo 'n' | rt -i
704 704
705 705 --- $TESTTMP/test-failure.t
706 706 +++ $TESTTMP/test-failure.t.err
707 707 @@ -1,5 +1,5 @@
708 708 $ echo babar
709 709 - rataxes
710 710 + babar
711 711 This is a noop statement so that
712 712 this test is still more bytes than success.
713 713 pad pad pad pad............................................................
714 714 Accept this change? [n]
715 715 ERROR: test-failure.t output changed
716 716 !.
717 717 Failed test-failure.t: output changed
718 718 # Ran 2 tests, 0 skipped, 1 failed.
719 719 python hash seed: * (glob)
720 720 [1]
721 721
722 722 $ cat test-failure.t
723 723 $ echo babar
724 724 rataxes
725 725 This is a noop statement so that
726 726 this test is still more bytes than success.
727 727 pad pad pad pad............................................................
728 728 pad pad pad pad............................................................
729 729 pad pad pad pad............................................................
730 730 pad pad pad pad............................................................
731 731 pad pad pad pad............................................................
732 732 pad pad pad pad............................................................
733 733
734 734 Interactive with custom view
735 735
736 736 $ echo 'n' | rt -i --view echo
737 737 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
738 738 Accept this change? [n]* (glob)
739 739 ERROR: test-failure.t output changed
740 740 !.
741 741 Failed test-failure.t: output changed
742 742 # Ran 2 tests, 0 skipped, 1 failed.
743 743 python hash seed: * (glob)
744 744 [1]
745 745
746 746 View the fix
747 747
748 748 $ echo 'y' | rt --view echo
749 749 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
750 750
751 751 ERROR: test-failure.t output changed
752 752 !.
753 753 Failed test-failure.t: output changed
754 754 # Ran 2 tests, 0 skipped, 1 failed.
755 755 python hash seed: * (glob)
756 756 [1]
757 757
758 758 Accept the fix
759 759
760 760 $ cat >> test-failure.t <<EOF
761 761 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
762 762 > saved backup bundle to \$TESTTMP/foo.hg
763 763 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
764 764 > saved backup bundle to $TESTTMP\\foo.hg
765 765 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
766 766 > saved backup bundle to \$TESTTMP/*.hg (glob)
767 767 > EOF
768 768 $ echo 'y' | rt -i 2>&1
769 769
770 770 --- $TESTTMP/test-failure.t
771 771 +++ $TESTTMP/test-failure.t.err
772 772 @@ -1,5 +1,5 @@
773 773 $ echo babar
774 774 - rataxes
775 775 + babar
776 776 This is a noop statement so that
777 777 this test is still more bytes than success.
778 778 pad pad pad pad............................................................
779 779 @@ -11,6 +11,6 @@
780 780 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
781 781 saved backup bundle to $TESTTMP/foo.hg
782 782 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
783 783 - saved backup bundle to $TESTTMP\foo.hg
784 784 + saved backup bundle to $TESTTMP/foo.hg
785 785 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
786 786 saved backup bundle to $TESTTMP/*.hg (glob)
787 787 Accept this change? [n] ..
788 788 # Ran 2 tests, 0 skipped, 0 failed.
789 789
790 790 $ sed -e 's,(glob)$,&<,g' test-failure.t
791 791 $ echo babar
792 792 babar
793 793 This is a noop statement so that
794 794 this test is still more bytes than success.
795 795 pad pad pad pad............................................................
796 796 pad pad pad pad............................................................
797 797 pad pad pad pad............................................................
798 798 pad pad pad pad............................................................
799 799 pad pad pad pad............................................................
800 800 pad pad pad pad............................................................
801 801 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
802 802 saved backup bundle to $TESTTMP/foo.hg
803 803 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
804 804 saved backup bundle to $TESTTMP/foo.hg
805 805 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
806 806 saved backup bundle to $TESTTMP/*.hg (glob)<
807 807
808 808 Race condition - test file was modified when test is running
809 809
810 810 $ TESTRACEDIR=`pwd`
811 811 $ export TESTRACEDIR
812 812 $ cat > test-race.t <<EOF
813 813 > $ echo 1
814 814 > $ echo "# a new line" >> $TESTRACEDIR/test-race.t
815 815 > EOF
816 816
817 817 $ rt -i test-race.t
818 818
819 819 --- $TESTTMP/test-race.t
820 820 +++ $TESTTMP/test-race.t.err
821 821 @@ -1,2 +1,3 @@
822 822 $ echo 1
823 823 + 1
824 824 $ echo "# a new line" >> $TESTTMP/test-race.t
825 825 Reference output has changed (run again to prompt changes)
826 826 ERROR: test-race.t output changed
827 827 !
828 828 Failed test-race.t: output changed
829 829 # Ran 1 tests, 0 skipped, 1 failed.
830 830 python hash seed: * (glob)
831 831 [1]
832 832
833 833 $ rm test-race.t
834 834
835 835 When "#testcases" is used in .t files
836 836
837 837 $ cat >> test-cases.t <<EOF
838 838 > #testcases a b
839 839 > #if a
840 840 > $ echo 1
841 841 > #endif
842 842 > #if b
843 843 > $ echo 2
844 844 > #endif
845 845 > EOF
846 846
847 847 $ cat <<EOF | rt -i test-cases.t 2>&1
848 848 > y
849 849 > y
850 850 > EOF
851 851
852 852 --- $TESTTMP/test-cases.t
853 +++ $TESTTMP/test-cases.t.a.err
853 +++ $TESTTMP/test-cases.t#a.err
854 854 @@ -1,6 +1,7 @@
855 855 #testcases a b
856 856 #if a
857 857 $ echo 1
858 858 + 1
859 859 #endif
860 860 #if b
861 861 $ echo 2
862 862 Accept this change? [n] .
863 863 --- $TESTTMP/test-cases.t
864 +++ $TESTTMP/test-cases.t.b.err
864 +++ $TESTTMP/test-cases.t#b.err
865 865 @@ -5,4 +5,5 @@
866 866 #endif
867 867 #if b
868 868 $ echo 2
869 869 + 2
870 870 #endif
871 871 Accept this change? [n] .
872 872 # Ran 2 tests, 0 skipped, 0 failed.
873 873
874 874 $ cat test-cases.t
875 875 #testcases a b
876 876 #if a
877 877 $ echo 1
878 878 1
879 879 #endif
880 880 #if b
881 881 $ echo 2
882 882 2
883 883 #endif
884 884
885 885 $ cat >> test-cases.t <<'EOF'
886 886 > #if a
887 887 > $ NAME=A
888 888 > #else
889 889 > $ NAME=B
890 890 > #endif
891 891 > $ echo $NAME
892 892 > A (a !)
893 893 > B (b !)
894 894 > EOF
895 895 $ rt test-cases.t
896 896 ..
897 897 # Ran 2 tests, 0 skipped, 0 failed.
898 898
899 899 $ rm test-cases.t
900 900
901 901 (reinstall)
902 902 $ mv backup test-failure.t
903 903
904 904 No Diff
905 905 ===============
906 906
907 907 $ rt --nodiff
908 908 !.
909 909 Failed test-failure.t: output changed
910 910 # Ran 2 tests, 0 skipped, 1 failed.
911 911 python hash seed: * (glob)
912 912 [1]
913 913
914 914 test --tmpdir support
915 915 $ rt --tmpdir=$TESTTMP/keep test-success.t
916 916
917 917 Keeping testtmp dir: $TESTTMP/keep/child1/test-success.t
918 918 Keeping threadtmp dir: $TESTTMP/keep/child1
919 919 .
920 920 # Ran 1 tests, 0 skipped, 0 failed.
921 921
922 922 timeouts
923 923 ========
924 924 $ cat > test-timeout.t <<EOF
925 925 > $ sleep 2
926 926 > $ echo pass
927 927 > pass
928 928 > EOF
929 929 > echo '#require slow' > test-slow-timeout.t
930 930 > cat test-timeout.t >> test-slow-timeout.t
931 931 $ rt --timeout=1 --slowtimeout=3 test-timeout.t test-slow-timeout.t
932 932 st
933 933 Skipped test-slow-timeout.t: missing feature: allow slow tests (use --allow-slow-tests)
934 934 Failed test-timeout.t: timed out
935 935 # Ran 1 tests, 1 skipped, 1 failed.
936 936 python hash seed: * (glob)
937 937 [1]
938 938 $ rt --timeout=1 --slowtimeout=3 \
939 939 > test-timeout.t test-slow-timeout.t --allow-slow-tests
940 940 .t
941 941 Failed test-timeout.t: timed out
942 942 # Ran 2 tests, 0 skipped, 1 failed.
943 943 python hash seed: * (glob)
944 944 [1]
945 945 $ rm test-timeout.t test-slow-timeout.t
946 946
947 947 test for --time
948 948 ==================
949 949
950 950 $ rt test-success.t --time
951 951 .
952 952 # Ran 1 tests, 0 skipped, 0 failed.
953 953 # Producing time report
954 954 start end cuser csys real Test
955 955 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
956 956
957 957 test for --time with --job enabled
958 958 ====================================
959 959
960 960 $ rt test-success.t --time --jobs 2
961 961 .
962 962 # Ran 1 tests, 0 skipped, 0 failed.
963 963 # Producing time report
964 964 start end cuser csys real Test
965 965 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
966 966
967 967 Skips
968 968 ================
969 969 $ cat > test-skip.t <<EOF
970 970 > $ echo xyzzy
971 971 > #if true
972 972 > #require false
973 973 > #end
974 974 > EOF
975 975 $ cat > test-noskip.t <<EOF
976 976 > #if false
977 977 > #require false
978 978 > #endif
979 979 > EOF
980 980 $ rt --nodiff
981 981 !.s.
982 982 Skipped test-skip.t: missing feature: nail clipper
983 983 Failed test-failure.t: output changed
984 984 # Ran 3 tests, 1 skipped, 1 failed.
985 985 python hash seed: * (glob)
986 986 [1]
987 987
988 988 $ rm test-noskip.t
989 989 $ rt --keyword xyzzy
990 990 .s
991 991 Skipped test-skip.t: missing feature: nail clipper
992 992 # Ran 2 tests, 2 skipped, 0 failed.
993 993
994 994 Skips with xml
995 995 $ rt --keyword xyzzy \
996 996 > --xunit=xunit.xml
997 997 .s
998 998 Skipped test-skip.t: missing feature: nail clipper
999 999 # Ran 2 tests, 2 skipped, 0 failed.
1000 1000 $ cat xunit.xml
1001 1001 <?xml version="1.0" encoding="utf-8"?>
1002 1002 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
1003 1003 <testcase name="test-success.t" time="*"/> (glob)
1004 1004 <testcase name="test-skip.t">
1005 1005 <skipped>
1006 1006 <![CDATA[missing feature: nail clipper]]> </skipped>
1007 1007 </testcase>
1008 1008 </testsuite>
1009 1009
1010 1010 Missing skips or blacklisted skips don't count as executed:
1011 1011 $ echo test-failure.t > blacklist
1012 1012 $ rt --blacklist=blacklist --json\
1013 1013 > test-failure.t test-bogus.t
1014 1014 ss
1015 1015 Skipped test-bogus.t: Doesn't exist
1016 1016 Skipped test-failure.t: blacklisted
1017 1017 # Ran 0 tests, 2 skipped, 0 failed.
1018 1018 $ cat report.json
1019 1019 testreport ={
1020 1020 "test-bogus.t": {
1021 1021 "result": "skip"
1022 1022 },
1023 1023 "test-failure.t": {
1024 1024 "result": "skip"
1025 1025 }
1026 1026 } (no-eol)
1027 1027
1028 1028 Whitelist trumps blacklist
1029 1029 $ echo test-failure.t > whitelist
1030 1030 $ rt --blacklist=blacklist --whitelist=whitelist --json\
1031 1031 > test-failure.t test-bogus.t
1032 1032 s
1033 1033 --- $TESTTMP/test-failure.t
1034 1034 +++ $TESTTMP/test-failure.t.err
1035 1035 @@ -1,5 +1,5 @@
1036 1036 $ echo babar
1037 1037 - rataxes
1038 1038 + babar
1039 1039 This is a noop statement so that
1040 1040 this test is still more bytes than success.
1041 1041 pad pad pad pad............................................................
1042 1042
1043 1043 ERROR: test-failure.t output changed
1044 1044 !
1045 1045 Skipped test-bogus.t: Doesn't exist
1046 1046 Failed test-failure.t: output changed
1047 1047 # Ran 1 tests, 1 skipped, 1 failed.
1048 1048 python hash seed: * (glob)
1049 1049 [1]
1050 1050
1051 1051 Ensure that --test-list causes only the tests listed in that file to
1052 1052 be executed.
1053 1053 $ echo test-success.t >> onlytest
1054 1054 $ rt --test-list=onlytest
1055 1055 .
1056 1056 # Ran 1 tests, 0 skipped, 0 failed.
1057 1057 $ echo test-bogus.t >> anothertest
1058 1058 $ rt --test-list=onlytest --test-list=anothertest
1059 1059 s.
1060 1060 Skipped test-bogus.t: Doesn't exist
1061 1061 # Ran 1 tests, 1 skipped, 0 failed.
1062 1062 $ rm onlytest anothertest
1063 1063
1064 1064 test for --json
1065 1065 ==================
1066 1066
1067 1067 $ rt --json
1068 1068
1069 1069 --- $TESTTMP/test-failure.t
1070 1070 +++ $TESTTMP/test-failure.t.err
1071 1071 @@ -1,5 +1,5 @@
1072 1072 $ echo babar
1073 1073 - rataxes
1074 1074 + babar
1075 1075 This is a noop statement so that
1076 1076 this test is still more bytes than success.
1077 1077 pad pad pad pad............................................................
1078 1078
1079 1079 ERROR: test-failure.t output changed
1080 1080 !.s
1081 1081 Skipped test-skip.t: missing feature: nail clipper
1082 1082 Failed test-failure.t: output changed
1083 1083 # Ran 2 tests, 1 skipped, 1 failed.
1084 1084 python hash seed: * (glob)
1085 1085 [1]
1086 1086
1087 1087 $ cat report.json
1088 1088 testreport ={
1089 1089 "test-failure.t": [\{] (re)
1090 1090 "csys": "\s*[\d\.]{4,5}", ? (re)
1091 1091 "cuser": "\s*[\d\.]{4,5}", ? (re)
1092 1092 "diff": "---.+\+\+\+.+", ? (re)
1093 1093 "end": "\s*[\d\.]{4,5}", ? (re)
1094 1094 "result": "failure", ? (re)
1095 1095 "start": "\s*[\d\.]{4,5}", ? (re)
1096 1096 "time": "\s*[\d\.]{4,5}" (re)
1097 1097 }, ? (re)
1098 1098 "test-skip.t": {
1099 1099 "csys": "\s*[\d\.]{4,5}", ? (re)
1100 1100 "cuser": "\s*[\d\.]{4,5}", ? (re)
1101 1101 "diff": "", ? (re)
1102 1102 "end": "\s*[\d\.]{4,5}", ? (re)
1103 1103 "result": "skip", ? (re)
1104 1104 "start": "\s*[\d\.]{4,5}", ? (re)
1105 1105 "time": "\s*[\d\.]{4,5}" (re)
1106 1106 }, ? (re)
1107 1107 "test-success.t": [\{] (re)
1108 1108 "csys": "\s*[\d\.]{4,5}", ? (re)
1109 1109 "cuser": "\s*[\d\.]{4,5}", ? (re)
1110 1110 "diff": "", ? (re)
1111 1111 "end": "\s*[\d\.]{4,5}", ? (re)
1112 1112 "result": "success", ? (re)
1113 1113 "start": "\s*[\d\.]{4,5}", ? (re)
1114 1114 "time": "\s*[\d\.]{4,5}" (re)
1115 1115 }
1116 1116 } (no-eol)
1117 1117 --json with --outputdir
1118 1118
1119 1119 $ rm report.json
1120 1120 $ rm -r output
1121 1121 $ mkdir output
1122 1122 $ rt --json --outputdir output
1123 1123
1124 1124 --- $TESTTMP/test-failure.t
1125 1125 +++ $TESTTMP/output/test-failure.t.err
1126 1126 @@ -1,5 +1,5 @@
1127 1127 $ echo babar
1128 1128 - rataxes
1129 1129 + babar
1130 1130 This is a noop statement so that
1131 1131 this test is still more bytes than success.
1132 1132 pad pad pad pad............................................................
1133 1133
1134 1134 ERROR: test-failure.t output changed
1135 1135 !.s
1136 1136 Skipped test-skip.t: missing feature: nail clipper
1137 1137 Failed test-failure.t: output changed
1138 1138 # Ran 2 tests, 1 skipped, 1 failed.
1139 1139 python hash seed: * (glob)
1140 1140 [1]
1141 1141 $ f report.json
1142 1142 report.json: file not found
1143 1143 $ cat output/report.json
1144 1144 testreport ={
1145 1145 "test-failure.t": [\{] (re)
1146 1146 "csys": "\s*[\d\.]{4,5}", ? (re)
1147 1147 "cuser": "\s*[\d\.]{4,5}", ? (re)
1148 1148 "diff": "---.+\+\+\+.+", ? (re)
1149 1149 "end": "\s*[\d\.]{4,5}", ? (re)
1150 1150 "result": "failure", ? (re)
1151 1151 "start": "\s*[\d\.]{4,5}", ? (re)
1152 1152 "time": "\s*[\d\.]{4,5}" (re)
1153 1153 }, ? (re)
1154 1154 "test-skip.t": {
1155 1155 "csys": "\s*[\d\.]{4,5}", ? (re)
1156 1156 "cuser": "\s*[\d\.]{4,5}", ? (re)
1157 1157 "diff": "", ? (re)
1158 1158 "end": "\s*[\d\.]{4,5}", ? (re)
1159 1159 "result": "skip", ? (re)
1160 1160 "start": "\s*[\d\.]{4,5}", ? (re)
1161 1161 "time": "\s*[\d\.]{4,5}" (re)
1162 1162 }, ? (re)
1163 1163 "test-success.t": [\{] (re)
1164 1164 "csys": "\s*[\d\.]{4,5}", ? (re)
1165 1165 "cuser": "\s*[\d\.]{4,5}", ? (re)
1166 1166 "diff": "", ? (re)
1167 1167 "end": "\s*[\d\.]{4,5}", ? (re)
1168 1168 "result": "success", ? (re)
1169 1169 "start": "\s*[\d\.]{4,5}", ? (re)
1170 1170 "time": "\s*[\d\.]{4,5}" (re)
1171 1171 }
1172 1172 } (no-eol)
1173 1173 $ ls -a output
1174 1174 .
1175 1175 ..
1176 1176 .testtimes
1177 1177 report.json
1178 1178 test-failure.t.err
1179 1179
1180 1180 Test that failed test accepted through interactive are properly reported:
1181 1181
1182 1182 $ cp test-failure.t backup
1183 1183 $ echo y | rt --json -i
1184 1184
1185 1185 --- $TESTTMP/test-failure.t
1186 1186 +++ $TESTTMP/test-failure.t.err
1187 1187 @@ -1,5 +1,5 @@
1188 1188 $ echo babar
1189 1189 - rataxes
1190 1190 + babar
1191 1191 This is a noop statement so that
1192 1192 this test is still more bytes than success.
1193 1193 pad pad pad pad............................................................
1194 1194 Accept this change? [n] ..s
1195 1195 Skipped test-skip.t: missing feature: nail clipper
1196 1196 # Ran 2 tests, 1 skipped, 0 failed.
1197 1197
1198 1198 $ cat report.json
1199 1199 testreport ={
1200 1200 "test-failure.t": [\{] (re)
1201 1201 "csys": "\s*[\d\.]{4,5}", ? (re)
1202 1202 "cuser": "\s*[\d\.]{4,5}", ? (re)
1203 1203 "diff": "", ? (re)
1204 1204 "end": "\s*[\d\.]{4,5}", ? (re)
1205 1205 "result": "success", ? (re)
1206 1206 "start": "\s*[\d\.]{4,5}", ? (re)
1207 1207 "time": "\s*[\d\.]{4,5}" (re)
1208 1208 }, ? (re)
1209 1209 "test-skip.t": {
1210 1210 "csys": "\s*[\d\.]{4,5}", ? (re)
1211 1211 "cuser": "\s*[\d\.]{4,5}", ? (re)
1212 1212 "diff": "", ? (re)
1213 1213 "end": "\s*[\d\.]{4,5}", ? (re)
1214 1214 "result": "skip", ? (re)
1215 1215 "start": "\s*[\d\.]{4,5}", ? (re)
1216 1216 "time": "\s*[\d\.]{4,5}" (re)
1217 1217 }, ? (re)
1218 1218 "test-success.t": [\{] (re)
1219 1219 "csys": "\s*[\d\.]{4,5}", ? (re)
1220 1220 "cuser": "\s*[\d\.]{4,5}", ? (re)
1221 1221 "diff": "", ? (re)
1222 1222 "end": "\s*[\d\.]{4,5}", ? (re)
1223 1223 "result": "success", ? (re)
1224 1224 "start": "\s*[\d\.]{4,5}", ? (re)
1225 1225 "time": "\s*[\d\.]{4,5}" (re)
1226 1226 }
1227 1227 } (no-eol)
1228 1228 $ mv backup test-failure.t
1229 1229
1230 1230 backslash on end of line with glob matching is handled properly
1231 1231
1232 1232 $ cat > test-glob-backslash.t << EOF
1233 1233 > $ echo 'foo bar \\'
1234 1234 > foo * \ (glob)
1235 1235 > EOF
1236 1236
1237 1237 $ rt test-glob-backslash.t
1238 1238 .
1239 1239 # Ran 1 tests, 0 skipped, 0 failed.
1240 1240
1241 1241 $ rm -f test-glob-backslash.t
1242 1242
1243 1243 Test globbing of local IP addresses
1244 1244 $ echo 172.16.18.1
1245 1245 $LOCALIP (glob)
1246 1246 $ echo dead:beef::1
1247 1247 $LOCALIP (glob)
1248 1248
1249 1249 Add support for external test formatter
1250 1250 =======================================
1251 1251
1252 1252 $ CUSTOM_TEST_RESULT=basic_test_result $PYTHON $TESTDIR/run-tests.py --with-hg=`which hg` "$@" test-success.t test-failure.t
1253 1253
1254 1254 # Ran 2 tests, 0 skipped, 0 failed.
1255 1255 ON_START! <__main__.TestSuite tests=[<__main__.TTest testMethod=test-failure.t>, <__main__.TTest testMethod=test-success.t>]>
1256 1256 FAILURE! test-failure.t output changed
1257 1257 SUCCESS! test-success.t
1258 1258 ON_END!
1259 1259
1260 1260 Test reusability for third party tools
1261 1261 ======================================
1262 1262
1263 1263 $ mkdir "$TESTTMP"/anothertests
1264 1264 $ cd "$TESTTMP"/anothertests
1265 1265
1266 1266 test that `run-tests.py` can execute hghave, even if it runs not in
1267 1267 Mercurial source tree.
1268 1268
1269 1269 $ cat > test-hghave.t <<EOF
1270 1270 > #require true
1271 1271 > $ echo foo
1272 1272 > foo
1273 1273 > EOF
1274 1274 $ rt test-hghave.t
1275 1275 .
1276 1276 # Ran 1 tests, 0 skipped, 0 failed.
1277 1277
1278 1278 test that RUNTESTDIR refers the directory, in which `run-tests.py` now
1279 1279 running is placed.
1280 1280
1281 1281 $ cat > test-runtestdir.t <<EOF
1282 1282 > - $TESTDIR, in which test-run-tests.t is placed
1283 1283 > - \$TESTDIR, in which test-runtestdir.t is placed (expanded at runtime)
1284 1284 > - \$RUNTESTDIR, in which run-tests.py is placed (expanded at runtime)
1285 1285 >
1286 1286 > #if windows
1287 1287 > $ test "\$TESTDIR" = "$TESTTMP\anothertests"
1288 1288 > #else
1289 1289 > $ test "\$TESTDIR" = "$TESTTMP"/anothertests
1290 1290 > #endif
1291 1291 > If this prints a path, that means RUNTESTDIR didn't equal
1292 1292 > TESTDIR as it should have.
1293 1293 > $ test "\$RUNTESTDIR" = "$TESTDIR" || echo "\$RUNTESTDIR"
1294 1294 > This should print the start of check-code. If this passes but the
1295 1295 > previous check failed, that means we found a copy of check-code at whatever
1296 1296 > RUNTESTSDIR ended up containing, even though it doesn't match TESTDIR.
1297 1297 > $ head -n 3 "\$RUNTESTDIR"/../contrib/check-code.py | sed 's@.!.*python@#!USRBINENVPY@'
1298 1298 > #!USRBINENVPY
1299 1299 > #
1300 1300 > # check-code - a style and portability checker for Mercurial
1301 1301 > EOF
1302 1302 $ rt test-runtestdir.t
1303 1303 .
1304 1304 # Ran 1 tests, 0 skipped, 0 failed.
1305 1305
1306 1306 #if execbit
1307 1307
1308 1308 test that TESTDIR is referred in PATH
1309 1309
1310 1310 $ cat > custom-command.sh <<EOF
1311 1311 > #!/bin/sh
1312 1312 > echo "hello world"
1313 1313 > EOF
1314 1314 $ chmod +x custom-command.sh
1315 1315 $ cat > test-testdir-path.t <<EOF
1316 1316 > $ custom-command.sh
1317 1317 > hello world
1318 1318 > EOF
1319 1319 $ rt test-testdir-path.t
1320 1320 .
1321 1321 # Ran 1 tests, 0 skipped, 0 failed.
1322 1322
1323 1323 #endif
1324 1324
1325 1325 test support for --allow-slow-tests
1326 1326 $ cat > test-very-slow-test.t <<EOF
1327 1327 > #require slow
1328 1328 > $ echo pass
1329 1329 > pass
1330 1330 > EOF
1331 1331 $ rt test-very-slow-test.t
1332 1332 s
1333 1333 Skipped test-very-slow-test.t: missing feature: allow slow tests (use --allow-slow-tests)
1334 1334 # Ran 0 tests, 1 skipped, 0 failed.
1335 1335 $ rt $HGTEST_RUN_TESTS_PURE --allow-slow-tests test-very-slow-test.t
1336 1336 .
1337 1337 # Ran 1 tests, 0 skipped, 0 failed.
1338 1338
1339 1339 support for running a test outside the current directory
1340 1340 $ mkdir nonlocal
1341 1341 $ cat > nonlocal/test-is-not-here.t << EOF
1342 1342 > $ echo pass
1343 1343 > pass
1344 1344 > EOF
1345 1345 $ rt nonlocal/test-is-not-here.t
1346 1346 .
1347 1347 # Ran 1 tests, 0 skipped, 0 failed.
1348 1348
1349 1349 support for automatically discovering test if arg is a folder
1350 1350 $ mkdir tmp && cd tmp
1351 1351
1352 1352 $ cat > test-uno.t << EOF
1353 1353 > $ echo line
1354 1354 > line
1355 1355 > EOF
1356 1356
1357 1357 $ cp test-uno.t test-dos.t
1358 1358 $ cd ..
1359 1359 $ cp -R tmp tmpp
1360 1360 $ cp tmp/test-uno.t test-solo.t
1361 1361
1362 1362 $ rt tmp/ test-solo.t tmpp
1363 1363 .....
1364 1364 # Ran 5 tests, 0 skipped, 0 failed.
1365 1365 $ rm -rf tmp tmpp
1366 1366
1367 1367 support for running run-tests.py from another directory
1368 1368 $ mkdir tmp && cd tmp
1369 1369
1370 1370 $ cat > useful-file.sh << EOF
1371 1371 > important command
1372 1372 > EOF
1373 1373
1374 1374 $ cat > test-folder.t << EOF
1375 1375 > $ cat \$TESTDIR/useful-file.sh
1376 1376 > important command
1377 1377 > EOF
1378 1378
1379 1379 $ cat > test-folder-fail.t << EOF
1380 1380 > $ cat \$TESTDIR/useful-file.sh
1381 1381 > important commando
1382 1382 > EOF
1383 1383
1384 1384 $ cd ..
1385 1385 $ rt tmp/test-*.t
1386 1386
1387 1387 --- $TESTTMP/anothertests/tmp/test-folder-fail.t
1388 1388 +++ $TESTTMP/anothertests/tmp/test-folder-fail.t.err
1389 1389 @@ -1,2 +1,2 @@
1390 1390 $ cat $TESTDIR/useful-file.sh
1391 1391 - important commando
1392 1392 + important command
1393 1393
1394 1394 ERROR: test-folder-fail.t output changed
1395 1395 !.
1396 1396 Failed test-folder-fail.t: output changed
1397 1397 # Ran 2 tests, 0 skipped, 1 failed.
1398 1398 python hash seed: * (glob)
1399 1399 [1]
1400 1400
1401 1401 support for bisecting failed tests automatically
1402 1402 $ hg init bisect
1403 1403 $ cd bisect
1404 1404 $ cat >> test-bisect.t <<EOF
1405 1405 > $ echo pass
1406 1406 > pass
1407 1407 > EOF
1408 1408 $ hg add test-bisect.t
1409 1409 $ hg ci -m 'good'
1410 1410 $ cat >> test-bisect.t <<EOF
1411 1411 > $ echo pass
1412 1412 > fail
1413 1413 > EOF
1414 1414 $ hg ci -m 'bad'
1415 1415 $ rt --known-good-rev=0 test-bisect.t
1416 1416
1417 1417 --- $TESTTMP/anothertests/bisect/test-bisect.t
1418 1418 +++ $TESTTMP/anothertests/bisect/test-bisect.t.err
1419 1419 @@ -1,4 +1,4 @@
1420 1420 $ echo pass
1421 1421 pass
1422 1422 $ echo pass
1423 1423 - fail
1424 1424 + pass
1425 1425
1426 1426 ERROR: test-bisect.t output changed
1427 1427 !
1428 1428 Failed test-bisect.t: output changed
1429 1429 test-bisect.t broken by 72cbf122d116 (bad)
1430 1430 # Ran 1 tests, 0 skipped, 1 failed.
1431 1431 python hash seed: * (glob)
1432 1432 [1]
1433 1433
1434 1434 $ cd ..
1435 1435
1436 1436 support bisecting a separate repo
1437 1437
1438 1438 $ hg init bisect-dependent
1439 1439 $ cd bisect-dependent
1440 1440 $ cat > test-bisect-dependent.t <<EOF
1441 1441 > $ tail -1 \$TESTDIR/../bisect/test-bisect.t
1442 1442 > pass
1443 1443 > EOF
1444 1444 $ hg commit -Am dependent test-bisect-dependent.t
1445 1445
1446 1446 $ rt --known-good-rev=0 test-bisect-dependent.t
1447 1447
1448 1448 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1449 1449 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1450 1450 @@ -1,2 +1,2 @@
1451 1451 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1452 1452 - pass
1453 1453 + fail
1454 1454
1455 1455 ERROR: test-bisect-dependent.t output changed
1456 1456 !
1457 1457 Failed test-bisect-dependent.t: output changed
1458 1458 Failed to identify failure point for test-bisect-dependent.t
1459 1459 # Ran 1 tests, 0 skipped, 1 failed.
1460 1460 python hash seed: * (glob)
1461 1461 [1]
1462 1462
1463 1463 $ rt --bisect-repo=../test-bisect test-bisect-dependent.t
1464 1464 usage: run-tests.py [options] [tests]
1465 1465 run-tests.py: error: --bisect-repo cannot be used without --known-good-rev
1466 1466 [2]
1467 1467
1468 1468 $ rt --known-good-rev=0 --bisect-repo=../bisect test-bisect-dependent.t
1469 1469
1470 1470 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1471 1471 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1472 1472 @@ -1,2 +1,2 @@
1473 1473 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1474 1474 - pass
1475 1475 + fail
1476 1476
1477 1477 ERROR: test-bisect-dependent.t output changed
1478 1478 !
1479 1479 Failed test-bisect-dependent.t: output changed
1480 1480 test-bisect-dependent.t broken by 72cbf122d116 (bad)
1481 1481 # Ran 1 tests, 0 skipped, 1 failed.
1482 1482 python hash seed: * (glob)
1483 1483 [1]
1484 1484
1485 1485 $ cd ..
1486 1486
1487 1487 Test a broken #if statement doesn't break run-tests threading.
1488 1488 ==============================================================
1489 1489 $ mkdir broken
1490 1490 $ cd broken
1491 1491 $ cat > test-broken.t <<EOF
1492 1492 > true
1493 1493 > #if notarealhghavefeature
1494 1494 > $ false
1495 1495 > #endif
1496 1496 > EOF
1497 1497 $ for f in 1 2 3 4 ; do
1498 1498 > cat > test-works-$f.t <<EOF
1499 1499 > This is test case $f
1500 1500 > $ sleep 1
1501 1501 > EOF
1502 1502 > done
1503 1503 $ rt -j 2
1504 1504 ....
1505 1505 # Ran 5 tests, 0 skipped, 0 failed.
1506 1506 skipped: unknown feature: notarealhghavefeature
1507 1507
1508 1508 $ cd ..
1509 1509 $ rm -rf broken
1510 1510
1511 1511 Test cases in .t files
1512 1512 ======================
1513 1513 $ mkdir cases
1514 1514 $ cd cases
1515 1515 $ cat > test-cases-abc.t <<'EOF'
1516 1516 > #testcases A B C
1517 1517 > $ V=B
1518 1518 > #if A
1519 1519 > $ V=A
1520 1520 > #endif
1521 1521 > #if C
1522 1522 > $ V=C
1523 1523 > #endif
1524 1524 > $ echo $V | sed 's/A/C/'
1525 1525 > C
1526 1526 > #if C
1527 1527 > $ [ $V = C ]
1528 1528 > #endif
1529 1529 > #if A
1530 1530 > $ [ $V = C ]
1531 1531 > [1]
1532 1532 > #endif
1533 1533 > #if no-C
1534 1534 > $ [ $V = C ]
1535 1535 > [1]
1536 1536 > #endif
1537 1537 > $ [ $V = D ]
1538 1538 > [1]
1539 1539 > EOF
1540 1540 $ rt
1541 1541 .
1542 1542 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1543 +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
1543 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1544 1544 @@ -7,7 +7,7 @@
1545 1545 $ V=C
1546 1546 #endif
1547 1547 $ echo $V | sed 's/A/C/'
1548 1548 - C
1549 1549 + B
1550 1550 #if C
1551 1551 $ [ $V = C ]
1552 1552 #endif
1553 1553
1554 1554 ERROR: test-cases-abc.t#B output changed
1555 1555 !.
1556 1556 Failed test-cases-abc.t#B: output changed
1557 1557 # Ran 3 tests, 0 skipped, 1 failed.
1558 1558 python hash seed: * (glob)
1559 1559 [1]
1560 1560
1561 1561 --restart works
1562 1562
1563 1563 $ rt --restart
1564 1564
1565 1565 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1566 +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
1566 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1567 1567 @@ -7,7 +7,7 @@
1568 1568 $ V=C
1569 1569 #endif
1570 1570 $ echo $V | sed 's/A/C/'
1571 1571 - C
1572 1572 + B
1573 1573 #if C
1574 1574 $ [ $V = C ]
1575 1575 #endif
1576 1576
1577 1577 ERROR: test-cases-abc.t#B output changed
1578 1578 !.
1579 1579 Failed test-cases-abc.t#B: output changed
1580 1580 # Ran 2 tests, 0 skipped, 1 failed.
1581 1581 python hash seed: * (glob)
1582 1582 [1]
1583 1583
1584 1584 --restart works with outputdir
1585 1585
1586 1586 $ mkdir output
1587 $ mv test-cases-abc.t.B.err output
1587 $ mv test-cases-abc.t#B.err output
1588 1588 $ rt --restart --outputdir output
1589 1589
1590 1590 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1591 +++ $TESTTMP/anothertests/cases/output/test-cases-abc.t.B.err
1591 +++ $TESTTMP/anothertests/cases/output/test-cases-abc.t#B.err
1592 1592 @@ -7,7 +7,7 @@
1593 1593 $ V=C
1594 1594 #endif
1595 1595 $ echo $V | sed 's/A/C/'
1596 1596 - C
1597 1597 + B
1598 1598 #if C
1599 1599 $ [ $V = C ]
1600 1600 #endif
1601 1601
1602 1602 ERROR: test-cases-abc.t#B output changed
1603 1603 !.
1604 1604 Failed test-cases-abc.t#B: output changed
1605 1605 # Ran 2 tests, 0 skipped, 1 failed.
1606 1606 python hash seed: * (glob)
1607 1607 [1]
1608 1608
1609 1609 Test TESTCASE variable
1610 1610
1611 1611 $ cat > test-cases-ab.t <<'EOF'
1612 1612 > $ dostuff() {
1613 1613 > > echo "In case $TESTCASE"
1614 1614 > > }
1615 1615 > #testcases A B
1616 1616 > #if A
1617 1617 > $ dostuff
1618 1618 > In case A
1619 1619 > #endif
1620 1620 > #if B
1621 1621 > $ dostuff
1622 1622 > In case B
1623 1623 > #endif
1624 1624 > EOF
1625 1625 $ rt test-cases-ab.t
1626 1626 ..
1627 1627 # Ran 2 tests, 0 skipped, 0 failed.
1628 1628
1629 1629 Support running a specific test case
1630 1630
1631 1631 $ rt "test-cases-abc.t#B"
1632 1632
1633 1633 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1634 +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
1634 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1635 1635 @@ -7,7 +7,7 @@
1636 1636 $ V=C
1637 1637 #endif
1638 1638 $ echo $V | sed 's/A/C/'
1639 1639 - C
1640 1640 + B
1641 1641 #if C
1642 1642 $ [ $V = C ]
1643 1643 #endif
1644 1644
1645 1645 ERROR: test-cases-abc.t#B output changed
1646 1646 !
1647 1647 Failed test-cases-abc.t#B: output changed
1648 1648 # Ran 1 tests, 0 skipped, 1 failed.
1649 1649 python hash seed: * (glob)
1650 1650 [1]
1651 1651
1652 1652 Support running multiple test cases in the same file
1653 1653
1654 1654 $ rt test-cases-abc.t#B test-cases-abc.t#C
1655 1655
1656 1656 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1657 +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
1657 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1658 1658 @@ -7,7 +7,7 @@
1659 1659 $ V=C
1660 1660 #endif
1661 1661 $ echo $V | sed 's/A/C/'
1662 1662 - C
1663 1663 + B
1664 1664 #if C
1665 1665 $ [ $V = C ]
1666 1666 #endif
1667 1667
1668 1668 ERROR: test-cases-abc.t#B output changed
1669 1669 !.
1670 1670 Failed test-cases-abc.t#B: output changed
1671 1671 # Ran 2 tests, 0 skipped, 1 failed.
1672 1672 python hash seed: * (glob)
1673 1673 [1]
1674 1674
1675 1675 Support ignoring invalid test cases
1676 1676
1677 1677 $ rt test-cases-abc.t#B test-cases-abc.t#D
1678 1678
1679 1679 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1680 +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
1680 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1681 1681 @@ -7,7 +7,7 @@
1682 1682 $ V=C
1683 1683 #endif
1684 1684 $ echo $V | sed 's/A/C/'
1685 1685 - C
1686 1686 + B
1687 1687 #if C
1688 1688 $ [ $V = C ]
1689 1689 #endif
1690 1690
1691 1691 ERROR: test-cases-abc.t#B output changed
1692 1692 !
1693 1693 Failed test-cases-abc.t#B: output changed
1694 1694 # Ran 1 tests, 0 skipped, 1 failed.
1695 1695 python hash seed: * (glob)
1696 1696 [1]
1697 1697
1698 1698 Support running complex test cases names
1699 1699
1700 1700 $ cat > test-cases-advanced-cases.t <<'EOF'
1701 1701 > #testcases simple case-with-dashes casewith_-.chars
1702 1702 > $ echo $TESTCASE
1703 1703 > simple
1704 1704 > EOF
1705 1705
1706 1706 $ cat test-cases-advanced-cases.t
1707 1707 #testcases simple case-with-dashes casewith_-.chars
1708 1708 $ echo $TESTCASE
1709 1709 simple
1710 1710
1711 1711 $ rt test-cases-advanced-cases.t
1712 1712
1713 1713 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1714 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t.case-with-dashes.err
1714 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err
1715 1715 @@ -1,3 +1,3 @@
1716 1716 #testcases simple case-with-dashes casewith_-.chars
1717 1717 $ echo $TESTCASE
1718 1718 - simple
1719 1719 + case-with-dashes
1720 1720
1721 1721 ERROR: test-cases-advanced-cases.t#case-with-dashes output changed
1722 1722 !
1723 1723 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1724 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t.casewith_-.chars.err
1724 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err
1725 1725 @@ -1,3 +1,3 @@
1726 1726 #testcases simple case-with-dashes casewith_-.chars
1727 1727 $ echo $TESTCASE
1728 1728 - simple
1729 1729 + casewith_-.chars
1730 1730
1731 1731 ERROR: test-cases-advanced-cases.t#casewith_-.chars output changed
1732 1732 !.
1733 1733 Failed test-cases-advanced-cases.t#case-with-dashes: output changed
1734 1734 Failed test-cases-advanced-cases.t#casewith_-.chars: output changed
1735 1735 # Ran 3 tests, 0 skipped, 2 failed.
1736 1736 python hash seed: * (glob)
1737 1737 [1]
1738 1738
1739 1739 $ rt "test-cases-advanced-cases.t#case-with-dashes"
1740 1740
1741 1741 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1742 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t.case-with-dashes.err
1742 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err
1743 1743 @@ -1,3 +1,3 @@
1744 1744 #testcases simple case-with-dashes casewith_-.chars
1745 1745 $ echo $TESTCASE
1746 1746 - simple
1747 1747 + case-with-dashes
1748 1748
1749 1749 ERROR: test-cases-advanced-cases.t#case-with-dashes output changed
1750 1750 !
1751 1751 Failed test-cases-advanced-cases.t#case-with-dashes: output changed
1752 1752 # Ran 1 tests, 0 skipped, 1 failed.
1753 1753 python hash seed: * (glob)
1754 1754 [1]
1755 1755
1756 1756 $ rt "test-cases-advanced-cases.t#casewith_-.chars"
1757 1757
1758 1758 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1759 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t.casewith_-.chars.err
1759 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err
1760 1760 @@ -1,3 +1,3 @@
1761 1761 #testcases simple case-with-dashes casewith_-.chars
1762 1762 $ echo $TESTCASE
1763 1763 - simple
1764 1764 + casewith_-.chars
1765 1765
1766 1766 ERROR: test-cases-advanced-cases.t#casewith_-.chars output changed
1767 1767 !
1768 1768 Failed test-cases-advanced-cases.t#casewith_-.chars: output changed
1769 1769 # Ran 1 tests, 0 skipped, 1 failed.
1770 1770 python hash seed: * (glob)
1771 1771 [1]
1772 1772
1773 1773 Test automatic pattern replacement
1774 1774 ==================================
1775 1775
1776 1776 $ cat << EOF >> common-pattern.py
1777 1777 > substitutions = [
1778 1778 > (br'foo-(.*)\\b',
1779 1779 > br'\$XXX=\\1\$'),
1780 1780 > (br'bar\\n',
1781 1781 > br'\$YYY$\\n'),
1782 1782 > ]
1783 1783 > EOF
1784 1784
1785 1785 $ cat << EOF >> test-substitution.t
1786 1786 > $ echo foo-12
1787 1787 > \$XXX=12$
1788 1788 > $ echo foo-42
1789 1789 > \$XXX=42$
1790 1790 > $ echo bar prior
1791 1791 > bar prior
1792 1792 > $ echo lastbar
1793 1793 > last\$YYY$
1794 1794 > $ echo foo-bar foo-baz
1795 1795 > EOF
1796 1796
1797 1797 $ rt test-substitution.t
1798 1798
1799 1799 --- $TESTTMP/anothertests/cases/test-substitution.t
1800 1800 +++ $TESTTMP/anothertests/cases/test-substitution.t.err
1801 1801 @@ -7,3 +7,4 @@
1802 1802 $ echo lastbar
1803 1803 last$YYY$
1804 1804 $ echo foo-bar foo-baz
1805 1805 + $XXX=bar foo-baz$
1806 1806
1807 1807 ERROR: test-substitution.t output changed
1808 1808 !
1809 1809 Failed test-substitution.t: output changed
1810 1810 # Ran 1 tests, 0 skipped, 1 failed.
1811 1811 python hash seed: * (glob)
1812 1812 [1]
1813 1813
1814 1814 --extra-config-opt works
1815 1815
1816 1816 $ cat << EOF >> test-config-opt.t
1817 1817 > $ hg init test-config-opt
1818 1818 > $ hg -R test-config-opt purge
1819 1819 > EOF
1820 1820
1821 1821 $ rt --extra-config-opt extensions.purge= test-config-opt.t
1822 1822 .
1823 1823 # Ran 1 tests, 0 skipped, 0 failed.
General Comments 0
You need to be logged in to leave comments. Login now