##// END OF EJS Templates
run-tests: update the test case name format...
Boris Feld -
r38462:b865bba5 default
parent child Browse files
Show More
@@ -1,3138 +1,3138
1 1 #!/usr/bin/env python
2 2 #
3 3 # run-tests.py - Run a set of tests on Mercurial
4 4 #
5 5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 # Modifying this script is tricky because it has many modes:
11 11 # - serial (default) vs parallel (-jN, N > 1)
12 12 # - no coverage (default) vs coverage (-c, -C, -s)
13 13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 14 # - tests are a mix of shell scripts and Python scripts
15 15 #
16 16 # If you change this script, it is recommended that you ensure you
17 17 # haven't broken it by running it in various modes with a representative
18 18 # sample of test scripts. For example:
19 19 #
20 20 # 1) serial, no coverage, temp install:
21 21 # ./run-tests.py test-s*
22 22 # 2) serial, no coverage, local hg:
23 23 # ./run-tests.py --local test-s*
24 24 # 3) serial, coverage, temp install:
25 25 # ./run-tests.py -c test-s*
26 26 # 4) serial, coverage, local hg:
27 27 # ./run-tests.py -c --local test-s* # unsupported
28 28 # 5) parallel, no coverage, temp install:
29 29 # ./run-tests.py -j2 test-s*
30 30 # 6) parallel, no coverage, local hg:
31 31 # ./run-tests.py -j2 --local test-s*
32 32 # 7) parallel, coverage, temp install:
33 33 # ./run-tests.py -j2 -c test-s* # currently broken
34 34 # 8) parallel, coverage, local install:
35 35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 36 # 9) parallel, custom tmp dir:
37 37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 38 # 10) parallel, pure, tests that call run-tests:
39 39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
40 40 #
41 41 # (You could use any subset of the tests: test-s* happens to match
42 42 # enough that it's worth doing parallel runs, few enough that it
43 43 # completes fairly quickly, includes both shell and Python scripts, and
44 44 # includes some scripts that run daemon processes.)
45 45
46 46 from __future__ import absolute_import, print_function
47 47
48 48 import argparse
49 49 import collections
50 50 import difflib
51 51 import distutils.version as version
52 52 import errno
53 53 import json
54 54 import os
55 55 import random
56 56 import re
57 57 import shutil
58 58 import signal
59 59 import socket
60 60 import subprocess
61 61 import sys
62 62 import sysconfig
63 63 import tempfile
64 64 import threading
65 65 import time
66 66 import unittest
67 67 import xml.dom.minidom as minidom
68 68
69 69 try:
70 70 import Queue as queue
71 71 except ImportError:
72 72 import queue
73 73
74 74 try:
75 75 import shlex
76 76 shellquote = shlex.quote
77 77 except (ImportError, AttributeError):
78 78 import pipes
79 79 shellquote = pipes.quote
80 80
81 81 if os.environ.get('RTUNICODEPEDANTRY', False):
82 82 try:
83 83 reload(sys)
84 84 sys.setdefaultencoding("undefined")
85 85 except NameError:
86 86 pass
87 87
88 88 origenviron = os.environ.copy()
89 89 osenvironb = getattr(os, 'environb', os.environ)
90 90 processlock = threading.Lock()
91 91
92 92 pygmentspresent = False
93 93 # ANSI color is unsupported prior to Windows 10
94 94 if os.name != 'nt':
95 95 try: # is pygments installed
96 96 import pygments
97 97 import pygments.lexers as lexers
98 98 import pygments.lexer as lexer
99 99 import pygments.formatters as formatters
100 100 import pygments.token as token
101 101 import pygments.style as style
102 102 pygmentspresent = True
103 103 difflexer = lexers.DiffLexer()
104 104 terminal256formatter = formatters.Terminal256Formatter()
105 105 except ImportError:
106 106 pass
107 107
108 108 if pygmentspresent:
109 109 class TestRunnerStyle(style.Style):
110 110 default_style = ""
111 111 skipped = token.string_to_tokentype("Token.Generic.Skipped")
112 112 failed = token.string_to_tokentype("Token.Generic.Failed")
113 113 skippedname = token.string_to_tokentype("Token.Generic.SName")
114 114 failedname = token.string_to_tokentype("Token.Generic.FName")
115 115 styles = {
116 116 skipped: '#e5e5e5',
117 117 skippedname: '#00ffff',
118 118 failed: '#7f0000',
119 119 failedname: '#ff0000',
120 120 }
121 121
122 122 class TestRunnerLexer(lexer.RegexLexer):
123 testpattern = r'[\w-]+\.(t|py)( \(case [\w-]+\))?'
123 testpattern = r'[\w-]+\.(t|py)(#[\w-]+)?'
124 124 tokens = {
125 125 'root': [
126 126 (r'^Skipped', token.Generic.Skipped, 'skipped'),
127 127 (r'^Failed ', token.Generic.Failed, 'failed'),
128 128 (r'^ERROR: ', token.Generic.Failed, 'failed'),
129 129 ],
130 130 'skipped': [
131 131 (testpattern, token.Generic.SName),
132 132 (r':.*', token.Generic.Skipped),
133 133 ],
134 134 'failed': [
135 135 (testpattern, token.Generic.FName),
136 136 (r'(:| ).*', token.Generic.Failed),
137 137 ]
138 138 }
139 139
140 140 runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle)
141 141 runnerlexer = TestRunnerLexer()
142 142
143 143 if sys.version_info > (3, 5, 0):
144 144 PYTHON3 = True
145 145 xrange = range # we use xrange in one place, and we'd rather not use range
146 146 def _bytespath(p):
147 147 if p is None:
148 148 return p
149 149 return p.encode('utf-8')
150 150
151 151 def _strpath(p):
152 152 if p is None:
153 153 return p
154 154 return p.decode('utf-8')
155 155
156 156 elif sys.version_info >= (3, 0, 0):
157 157 print('%s is only supported on Python 3.5+ and 2.7, not %s' %
158 158 (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])))
159 159 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
160 160 else:
161 161 PYTHON3 = False
162 162
163 163 # In python 2.x, path operations are generally done using
164 164 # bytestrings by default, so we don't have to do any extra
165 165 # fiddling there. We define the wrapper functions anyway just to
166 166 # help keep code consistent between platforms.
167 167 def _bytespath(p):
168 168 return p
169 169
170 170 _strpath = _bytespath
171 171
172 172 # For Windows support
173 173 wifexited = getattr(os, "WIFEXITED", lambda x: False)
174 174
175 175 # Whether to use IPv6
176 176 def checksocketfamily(name, port=20058):
177 177 """return true if we can listen on localhost using family=name
178 178
179 179 name should be either 'AF_INET', or 'AF_INET6'.
180 180 port being used is okay - EADDRINUSE is considered as successful.
181 181 """
182 182 family = getattr(socket, name, None)
183 183 if family is None:
184 184 return False
185 185 try:
186 186 s = socket.socket(family, socket.SOCK_STREAM)
187 187 s.bind(('localhost', port))
188 188 s.close()
189 189 return True
190 190 except socket.error as exc:
191 191 if exc.errno == errno.EADDRINUSE:
192 192 return True
193 193 elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
194 194 return False
195 195 else:
196 196 raise
197 197 else:
198 198 return False
199 199
200 200 # useipv6 will be set by parseargs
201 201 useipv6 = None
202 202
203 203 def checkportisavailable(port):
204 204 """return true if a port seems free to bind on localhost"""
205 205 if useipv6:
206 206 family = socket.AF_INET6
207 207 else:
208 208 family = socket.AF_INET
209 209 try:
210 210 s = socket.socket(family, socket.SOCK_STREAM)
211 211 s.bind(('localhost', port))
212 212 s.close()
213 213 return True
214 214 except socket.error as exc:
215 215 if exc.errno not in (errno.EADDRINUSE, errno.EADDRNOTAVAIL,
216 216 errno.EPROTONOSUPPORT):
217 217 raise
218 218 return False
219 219
220 220 closefds = os.name == 'posix'
221 221 def Popen4(cmd, wd, timeout, env=None):
222 222 processlock.acquire()
223 223 p = subprocess.Popen(cmd, shell=True, bufsize=-1, cwd=wd, env=env,
224 224 close_fds=closefds,
225 225 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
226 226 stderr=subprocess.STDOUT)
227 227 processlock.release()
228 228
229 229 p.fromchild = p.stdout
230 230 p.tochild = p.stdin
231 231 p.childerr = p.stderr
232 232
233 233 p.timeout = False
234 234 if timeout:
235 235 def t():
236 236 start = time.time()
237 237 while time.time() - start < timeout and p.returncode is None:
238 238 time.sleep(.1)
239 239 p.timeout = True
240 240 if p.returncode is None:
241 241 terminate(p)
242 242 threading.Thread(target=t).start()
243 243
244 244 return p
245 245
246 246 PYTHON = _bytespath(sys.executable.replace('\\', '/'))
247 247 IMPL_PATH = b'PYTHONPATH'
248 248 if 'java' in sys.platform:
249 249 IMPL_PATH = b'JYTHONPATH'
250 250
251 251 defaults = {
252 252 'jobs': ('HGTEST_JOBS', 1),
253 253 'timeout': ('HGTEST_TIMEOUT', 180),
254 254 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 500),
255 255 'port': ('HGTEST_PORT', 20059),
256 256 'shell': ('HGTEST_SHELL', 'sh'),
257 257 }
258 258
259 259 def canonpath(path):
260 260 return os.path.realpath(os.path.expanduser(path))
261 261
262 262 def parselistfiles(files, listtype, warn=True):
263 263 entries = dict()
264 264 for filename in files:
265 265 try:
266 266 path = os.path.expanduser(os.path.expandvars(filename))
267 267 f = open(path, "rb")
268 268 except IOError as err:
269 269 if err.errno != errno.ENOENT:
270 270 raise
271 271 if warn:
272 272 print("warning: no such %s file: %s" % (listtype, filename))
273 273 continue
274 274
275 275 for line in f.readlines():
276 276 line = line.split(b'#', 1)[0].strip()
277 277 if line:
278 278 entries[line] = filename
279 279
280 280 f.close()
281 281 return entries
282 282
283 283 def parsettestcases(path):
284 284 """read a .t test file, return a set of test case names
285 285
286 286 If path does not exist, return an empty set.
287 287 """
288 288 cases = set()
289 289 try:
290 290 with open(path, 'rb') as f:
291 291 for l in f:
292 292 if l.startswith(b'#testcases '):
293 293 cases.update(l[11:].split())
294 294 except IOError as ex:
295 295 if ex.errno != errno.ENOENT:
296 296 raise
297 297 return cases
298 298
299 299 def getparser():
300 300 """Obtain the OptionParser used by the CLI."""
301 301 parser = argparse.ArgumentParser(usage='%(prog)s [options] [tests]')
302 302
303 303 selection = parser.add_argument_group('Test Selection')
304 304 selection.add_argument('--allow-slow-tests', action='store_true',
305 305 help='allow extremely slow tests')
306 306 selection.add_argument("--blacklist", action="append",
307 307 help="skip tests listed in the specified blacklist file")
308 308 selection.add_argument("--changed",
309 309 help="run tests that are changed in parent rev or working directory")
310 310 selection.add_argument("-k", "--keywords",
311 311 help="run tests matching keywords")
312 312 selection.add_argument("-r", "--retest", action="store_true",
313 313 help = "retest failed tests")
314 314 selection.add_argument("--test-list", action="append",
315 315 help="read tests to run from the specified file")
316 316 selection.add_argument("--whitelist", action="append",
317 317 help="always run tests listed in the specified whitelist file")
318 318 selection.add_argument('tests', metavar='TESTS', nargs='*',
319 319 help='Tests to run')
320 320
321 321 harness = parser.add_argument_group('Test Harness Behavior')
322 322 harness.add_argument('--bisect-repo',
323 323 metavar='bisect_repo',
324 324 help=("Path of a repo to bisect. Use together with "
325 325 "--known-good-rev"))
326 326 harness.add_argument("-d", "--debug", action="store_true",
327 327 help="debug mode: write output of test scripts to console"
328 328 " rather than capturing and diffing it (disables timeout)")
329 329 harness.add_argument("-f", "--first", action="store_true",
330 330 help="exit on the first test failure")
331 331 harness.add_argument("-i", "--interactive", action="store_true",
332 332 help="prompt to accept changed output")
333 333 harness.add_argument("-j", "--jobs", type=int,
334 334 help="number of jobs to run in parallel"
335 335 " (default: $%s or %d)" % defaults['jobs'])
336 336 harness.add_argument("--keep-tmpdir", action="store_true",
337 337 help="keep temporary directory after running tests")
338 338 harness.add_argument('--known-good-rev',
339 339 metavar="known_good_rev",
340 340 help=("Automatically bisect any failures using this "
341 341 "revision as a known-good revision."))
342 342 harness.add_argument("--list-tests", action="store_true",
343 343 help="list tests instead of running them")
344 344 harness.add_argument("--loop", action="store_true",
345 345 help="loop tests repeatedly")
346 346 harness.add_argument('--random', action="store_true",
347 347 help='run tests in random order')
348 348 harness.add_argument('--order-by-runtime', action="store_true",
349 349 help='run slowest tests first, according to .testtimes')
350 350 harness.add_argument("-p", "--port", type=int,
351 351 help="port on which servers should listen"
352 352 " (default: $%s or %d)" % defaults['port'])
353 353 harness.add_argument('--profile-runner', action='store_true',
354 354 help='run statprof on run-tests')
355 355 harness.add_argument("-R", "--restart", action="store_true",
356 356 help="restart at last error")
357 357 harness.add_argument("--runs-per-test", type=int, dest="runs_per_test",
358 358 help="run each test N times (default=1)", default=1)
359 359 harness.add_argument("--shell",
360 360 help="shell to use (default: $%s or %s)" % defaults['shell'])
361 361 harness.add_argument('--showchannels', action='store_true',
362 362 help='show scheduling channels')
363 363 harness.add_argument("--slowtimeout", type=int,
364 364 help="kill errant slow tests after SLOWTIMEOUT seconds"
365 365 " (default: $%s or %d)" % defaults['slowtimeout'])
366 366 harness.add_argument("-t", "--timeout", type=int,
367 367 help="kill errant tests after TIMEOUT seconds"
368 368 " (default: $%s or %d)" % defaults['timeout'])
369 369 harness.add_argument("--tmpdir",
370 370 help="run tests in the given temporary directory"
371 371 " (implies --keep-tmpdir)")
372 372 harness.add_argument("-v", "--verbose", action="store_true",
373 373 help="output verbose messages")
374 374
375 375 hgconf = parser.add_argument_group('Mercurial Configuration')
376 376 hgconf.add_argument("--chg", action="store_true",
377 377 help="install and use chg wrapper in place of hg")
378 378 hgconf.add_argument("--compiler",
379 379 help="compiler to build with")
380 380 hgconf.add_argument('--extra-config-opt', action="append", default=[],
381 381 help='set the given config opt in the test hgrc')
382 382 hgconf.add_argument("-l", "--local", action="store_true",
383 383 help="shortcut for --with-hg=<testdir>/../hg, "
384 384 "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set")
385 385 hgconf.add_argument("--ipv6", action="store_true",
386 386 help="prefer IPv6 to IPv4 for network related tests")
387 387 hgconf.add_argument("--pure", action="store_true",
388 388 help="use pure Python code instead of C extensions")
389 389 hgconf.add_argument("-3", "--py3k-warnings", action="store_true",
390 390 help="enable Py3k warnings on Python 2.7+")
391 391 hgconf.add_argument("--with-chg", metavar="CHG",
392 392 help="use specified chg wrapper in place of hg")
393 393 hgconf.add_argument("--with-hg",
394 394 metavar="HG",
395 395 help="test using specified hg script rather than a "
396 396 "temporary installation")
397 397 # This option should be deleted once test-check-py3-compat.t and other
398 398 # Python 3 tests run with Python 3.
399 399 hgconf.add_argument("--with-python3", metavar="PYTHON3",
400 400 help="Python 3 interpreter (if running under Python 2)"
401 401 " (TEMPORARY)")
402 402
403 403 reporting = parser.add_argument_group('Results Reporting')
404 404 reporting.add_argument("-C", "--annotate", action="store_true",
405 405 help="output files annotated with coverage")
406 406 reporting.add_argument("--color", choices=["always", "auto", "never"],
407 407 default=os.environ.get('HGRUNTESTSCOLOR', 'auto'),
408 408 help="colorisation: always|auto|never (default: auto)")
409 409 reporting.add_argument("-c", "--cover", action="store_true",
410 410 help="print a test coverage report")
411 411 reporting.add_argument('--exceptions', action='store_true',
412 412 help='log all exceptions and generate an exception report')
413 413 reporting.add_argument("-H", "--htmlcov", action="store_true",
414 414 help="create an HTML report of the coverage of the files")
415 415 reporting.add_argument("--json", action="store_true",
416 416 help="store test result data in 'report.json' file")
417 417 reporting.add_argument("--outputdir",
418 418 help="directory to write error logs to (default=test directory)")
419 419 reporting.add_argument("-n", "--nodiff", action="store_true",
420 420 help="skip showing test changes")
421 421 reporting.add_argument("-S", "--noskips", action="store_true",
422 422 help="don't report skip tests verbosely")
423 423 reporting.add_argument("--time", action="store_true",
424 424 help="time how long each test takes")
425 425 reporting.add_argument("--view",
426 426 help="external diff viewer")
427 427 reporting.add_argument("--xunit",
428 428 help="record xunit results at specified path")
429 429
430 430 for option, (envvar, default) in defaults.items():
431 431 defaults[option] = type(default)(os.environ.get(envvar, default))
432 432 parser.set_defaults(**defaults)
433 433
434 434 return parser
435 435
436 436 def parseargs(args, parser):
437 437 """Parse arguments with our OptionParser and validate results."""
438 438 options = parser.parse_args(args)
439 439
440 440 # jython is always pure
441 441 if 'java' in sys.platform or '__pypy__' in sys.modules:
442 442 options.pure = True
443 443
444 444 if options.with_hg:
445 445 options.with_hg = canonpath(_bytespath(options.with_hg))
446 446 if not (os.path.isfile(options.with_hg) and
447 447 os.access(options.with_hg, os.X_OK)):
448 448 parser.error('--with-hg must specify an executable hg script')
449 449 if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
450 450 sys.stderr.write('warning: --with-hg should specify an hg script\n')
451 451 if options.local:
452 452 testdir = os.path.dirname(_bytespath(canonpath(sys.argv[0])))
453 453 reporootdir = os.path.dirname(testdir)
454 454 pathandattrs = [(b'hg', 'with_hg')]
455 455 if options.chg:
456 456 pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
457 457 for relpath, attr in pathandattrs:
458 458 binpath = os.path.join(reporootdir, relpath)
459 459 if os.name != 'nt' and not os.access(binpath, os.X_OK):
460 460 parser.error('--local specified, but %r not found or '
461 461 'not executable' % binpath)
462 462 setattr(options, attr, binpath)
463 463
464 464 if (options.chg or options.with_chg) and os.name == 'nt':
465 465 parser.error('chg does not work on %s' % os.name)
466 466 if options.with_chg:
467 467 options.chg = False # no installation to temporary location
468 468 options.with_chg = canonpath(_bytespath(options.with_chg))
469 469 if not (os.path.isfile(options.with_chg) and
470 470 os.access(options.with_chg, os.X_OK)):
471 471 parser.error('--with-chg must specify a chg executable')
472 472 if options.chg and options.with_hg:
473 473 # chg shares installation location with hg
474 474 parser.error('--chg does not work when --with-hg is specified '
475 475 '(use --with-chg instead)')
476 476
477 477 if options.color == 'always' and not pygmentspresent:
478 478 sys.stderr.write('warning: --color=always ignored because '
479 479 'pygments is not installed\n')
480 480
481 481 if options.bisect_repo and not options.known_good_rev:
482 482 parser.error("--bisect-repo cannot be used without --known-good-rev")
483 483
484 484 global useipv6
485 485 if options.ipv6:
486 486 useipv6 = checksocketfamily('AF_INET6')
487 487 else:
488 488 # only use IPv6 if IPv4 is unavailable and IPv6 is available
489 489 useipv6 = ((not checksocketfamily('AF_INET'))
490 490 and checksocketfamily('AF_INET6'))
491 491
492 492 options.anycoverage = options.cover or options.annotate or options.htmlcov
493 493 if options.anycoverage:
494 494 try:
495 495 import coverage
496 496 covver = version.StrictVersion(coverage.__version__).version
497 497 if covver < (3, 3):
498 498 parser.error('coverage options require coverage 3.3 or later')
499 499 except ImportError:
500 500 parser.error('coverage options now require the coverage package')
501 501
502 502 if options.anycoverage and options.local:
503 503 # this needs some path mangling somewhere, I guess
504 504 parser.error("sorry, coverage options do not work when --local "
505 505 "is specified")
506 506
507 507 if options.anycoverage and options.with_hg:
508 508 parser.error("sorry, coverage options do not work when --with-hg "
509 509 "is specified")
510 510
511 511 global verbose
512 512 if options.verbose:
513 513 verbose = ''
514 514
515 515 if options.tmpdir:
516 516 options.tmpdir = canonpath(options.tmpdir)
517 517
518 518 if options.jobs < 1:
519 519 parser.error('--jobs must be positive')
520 520 if options.interactive and options.debug:
521 521 parser.error("-i/--interactive and -d/--debug are incompatible")
522 522 if options.debug:
523 523 if options.timeout != defaults['timeout']:
524 524 sys.stderr.write(
525 525 'warning: --timeout option ignored with --debug\n')
526 526 if options.slowtimeout != defaults['slowtimeout']:
527 527 sys.stderr.write(
528 528 'warning: --slowtimeout option ignored with --debug\n')
529 529 options.timeout = 0
530 530 options.slowtimeout = 0
531 531 if options.py3k_warnings:
532 532 if PYTHON3:
533 533 parser.error(
534 534 '--py3k-warnings can only be used on Python 2.7')
535 535 if options.with_python3:
536 536 if PYTHON3:
537 537 parser.error('--with-python3 cannot be used when executing with '
538 538 'Python 3')
539 539
540 540 options.with_python3 = canonpath(options.with_python3)
541 541 # Verify Python3 executable is acceptable.
542 542 proc = subprocess.Popen([options.with_python3, b'--version'],
543 543 stdout=subprocess.PIPE,
544 544 stderr=subprocess.STDOUT)
545 545 out, _err = proc.communicate()
546 546 ret = proc.wait()
547 547 if ret != 0:
548 548 parser.error('could not determine version of python 3')
549 549 if not out.startswith('Python '):
550 550 parser.error('unexpected output from python3 --version: %s' %
551 551 out)
552 552 vers = version.LooseVersion(out[len('Python '):])
553 553 if vers < version.LooseVersion('3.5.0'):
554 554 parser.error('--with-python3 version must be 3.5.0 or greater; '
555 555 'got %s' % out)
556 556
557 557 if options.blacklist:
558 558 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
559 559 if options.whitelist:
560 560 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
561 561 else:
562 562 options.whitelisted = {}
563 563
564 564 if options.showchannels:
565 565 options.nodiff = True
566 566
567 567 return options
568 568
569 569 def rename(src, dst):
570 570 """Like os.rename(), trade atomicity and opened files friendliness
571 571 for existing destination support.
572 572 """
573 573 shutil.copy(src, dst)
574 574 os.remove(src)
575 575
576 576 _unified_diff = difflib.unified_diff
577 577 if PYTHON3:
578 578 import functools
579 579 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
580 580
581 581 def getdiff(expected, output, ref, err):
582 582 servefail = False
583 583 lines = []
584 584 for line in _unified_diff(expected, output, ref, err):
585 585 if line.startswith(b'+++') or line.startswith(b'---'):
586 586 line = line.replace(b'\\', b'/')
587 587 if line.endswith(b' \n'):
588 588 line = line[:-2] + b'\n'
589 589 lines.append(line)
590 590 if not servefail and line.startswith(
591 591 b'+ abort: child process failed to start'):
592 592 servefail = True
593 593
594 594 return servefail, lines
595 595
596 596 verbose = False
597 597 def vlog(*msg):
598 598 """Log only when in verbose mode."""
599 599 if verbose is False:
600 600 return
601 601
602 602 return log(*msg)
603 603
604 604 # Bytes that break XML even in a CDATA block: control characters 0-31
605 605 # sans \t, \n and \r
606 606 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
607 607
608 608 # Match feature conditionalized output lines in the form, capturing the feature
609 609 # list in group 2, and the preceeding line output in group 1:
610 610 #
611 611 # output..output (feature !)\n
612 612 optline = re.compile(b'(.*) \((.+?) !\)\n$')
613 613
614 614 def cdatasafe(data):
615 615 """Make a string safe to include in a CDATA block.
616 616
617 617 Certain control characters are illegal in a CDATA block, and
618 618 there's no way to include a ]]> in a CDATA either. This function
619 619 replaces illegal bytes with ? and adds a space between the ]] so
620 620 that it won't break the CDATA block.
621 621 """
622 622 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
623 623
624 624 def log(*msg):
625 625 """Log something to stdout.
626 626
627 627 Arguments are strings to print.
628 628 """
629 629 with iolock:
630 630 if verbose:
631 631 print(verbose, end=' ')
632 632 for m in msg:
633 633 print(m, end=' ')
634 634 print()
635 635 sys.stdout.flush()
636 636
637 637 def highlightdiff(line, color):
638 638 if not color:
639 639 return line
640 640 assert pygmentspresent
641 641 return pygments.highlight(line.decode('latin1'), difflexer,
642 642 terminal256formatter).encode('latin1')
643 643
644 644 def highlightmsg(msg, color):
645 645 if not color:
646 646 return msg
647 647 assert pygmentspresent
648 648 return pygments.highlight(msg, runnerlexer, runnerformatter)
649 649
650 650 def terminate(proc):
651 651 """Terminate subprocess"""
652 652 vlog('# Terminating process %d' % proc.pid)
653 653 try:
654 654 proc.terminate()
655 655 except OSError:
656 656 pass
657 657
658 658 def killdaemons(pidfile):
659 659 import killdaemons as killmod
660 660 return killmod.killdaemons(pidfile, tryhard=False, remove=True,
661 661 logfn=vlog)
662 662
663 663 class Test(unittest.TestCase):
664 664 """Encapsulates a single, runnable test.
665 665
666 666 While this class conforms to the unittest.TestCase API, it differs in that
667 667 instances need to be instantiated manually. (Typically, unittest.TestCase
668 668 classes are instantiated automatically by scanning modules.)
669 669 """
670 670
671 671 # Status code reserved for skipped tests (used by hghave).
672 672 SKIPPED_STATUS = 80
673 673
674 674 def __init__(self, path, outputdir, tmpdir, keeptmpdir=False,
675 675 debug=False,
676 676 first=False,
677 677 timeout=None,
678 678 startport=None, extraconfigopts=None,
679 679 py3kwarnings=False, shell=None, hgcommand=None,
680 680 slowtimeout=None, usechg=False,
681 681 useipv6=False):
682 682 """Create a test from parameters.
683 683
684 684 path is the full path to the file defining the test.
685 685
686 686 tmpdir is the main temporary directory to use for this test.
687 687
688 688 keeptmpdir determines whether to keep the test's temporary directory
689 689 after execution. It defaults to removal (False).
690 690
691 691 debug mode will make the test execute verbosely, with unfiltered
692 692 output.
693 693
694 694 timeout controls the maximum run time of the test. It is ignored when
695 695 debug is True. See slowtimeout for tests with #require slow.
696 696
697 697 slowtimeout overrides timeout if the test has #require slow.
698 698
699 699 startport controls the starting port number to use for this test. Each
700 700 test will reserve 3 port numbers for execution. It is the caller's
701 701 responsibility to allocate a non-overlapping port range to Test
702 702 instances.
703 703
704 704 extraconfigopts is an iterable of extra hgrc config options. Values
705 705 must have the form "key=value" (something understood by hgrc). Values
706 706 of the form "foo.key=value" will result in "[foo] key=value".
707 707
708 708 py3kwarnings enables Py3k warnings.
709 709
710 710 shell is the shell to execute tests in.
711 711 """
712 712 if timeout is None:
713 713 timeout = defaults['timeout']
714 714 if startport is None:
715 715 startport = defaults['port']
716 716 if slowtimeout is None:
717 717 slowtimeout = defaults['slowtimeout']
718 718 self.path = path
719 719 self.bname = os.path.basename(path)
720 720 self.name = _strpath(self.bname)
721 721 self._testdir = os.path.dirname(path)
722 722 self._outputdir = outputdir
723 723 self._tmpname = os.path.basename(path)
724 724 self.errpath = os.path.join(self._outputdir, b'%s.err' % self.bname)
725 725
726 726 self._threadtmp = tmpdir
727 727 self._keeptmpdir = keeptmpdir
728 728 self._debug = debug
729 729 self._first = first
730 730 self._timeout = timeout
731 731 self._slowtimeout = slowtimeout
732 732 self._startport = startport
733 733 self._extraconfigopts = extraconfigopts or []
734 734 self._py3kwarnings = py3kwarnings
735 735 self._shell = _bytespath(shell)
736 736 self._hgcommand = hgcommand or b'hg'
737 737 self._usechg = usechg
738 738 self._useipv6 = useipv6
739 739
740 740 self._aborted = False
741 741 self._daemonpids = []
742 742 self._finished = None
743 743 self._ret = None
744 744 self._out = None
745 745 self._skipped = None
746 746 self._testtmp = None
747 747 self._chgsockdir = None
748 748
749 749 self._refout = self.readrefout()
750 750
751 751 def readrefout(self):
752 752 """read reference output"""
753 753 # If we're not in --debug mode and reference output file exists,
754 754 # check test output against it.
755 755 if self._debug:
756 756 return None # to match "out is None"
757 757 elif os.path.exists(self.refpath):
758 758 with open(self.refpath, 'rb') as f:
759 759 return f.read().splitlines(True)
760 760 else:
761 761 return []
762 762
763 763 # needed to get base class __repr__ running
764 764 @property
765 765 def _testMethodName(self):
766 766 return self.name
767 767
768 768 def __str__(self):
769 769 return self.name
770 770
771 771 def shortDescription(self):
772 772 return self.name
773 773
774 774 def setUp(self):
775 775 """Tasks to perform before run()."""
776 776 self._finished = False
777 777 self._ret = None
778 778 self._out = None
779 779 self._skipped = None
780 780
781 781 try:
782 782 os.mkdir(self._threadtmp)
783 783 except OSError as e:
784 784 if e.errno != errno.EEXIST:
785 785 raise
786 786
787 787 name = self._tmpname
788 788 self._testtmp = os.path.join(self._threadtmp, name)
789 789 os.mkdir(self._testtmp)
790 790
791 791 # Remove any previous output files.
792 792 if os.path.exists(self.errpath):
793 793 try:
794 794 os.remove(self.errpath)
795 795 except OSError as e:
796 796 # We might have raced another test to clean up a .err
797 797 # file, so ignore ENOENT when removing a previous .err
798 798 # file.
799 799 if e.errno != errno.ENOENT:
800 800 raise
801 801
802 802 if self._usechg:
803 803 self._chgsockdir = os.path.join(self._threadtmp,
804 804 b'%s.chgsock' % name)
805 805 os.mkdir(self._chgsockdir)
806 806
807 807 def run(self, result):
808 808 """Run this test and report results against a TestResult instance."""
809 809 # This function is extremely similar to unittest.TestCase.run(). Once
810 810 # we require Python 2.7 (or at least its version of unittest), this
811 811 # function can largely go away.
812 812 self._result = result
813 813 result.startTest(self)
814 814 try:
815 815 try:
816 816 self.setUp()
817 817 except (KeyboardInterrupt, SystemExit):
818 818 self._aborted = True
819 819 raise
820 820 except Exception:
821 821 result.addError(self, sys.exc_info())
822 822 return
823 823
824 824 success = False
825 825 try:
826 826 self.runTest()
827 827 except KeyboardInterrupt:
828 828 self._aborted = True
829 829 raise
830 830 except unittest.SkipTest as e:
831 831 result.addSkip(self, str(e))
832 832 # The base class will have already counted this as a
833 833 # test we "ran", but we want to exclude skipped tests
834 834 # from those we count towards those run.
835 835 result.testsRun -= 1
836 836 except self.failureException as e:
837 837 # This differs from unittest in that we don't capture
838 838 # the stack trace. This is for historical reasons and
839 839 # this decision could be revisited in the future,
840 840 # especially for PythonTest instances.
841 841 if result.addFailure(self, str(e)):
842 842 success = True
843 843 except Exception:
844 844 result.addError(self, sys.exc_info())
845 845 else:
846 846 success = True
847 847
848 848 try:
849 849 self.tearDown()
850 850 except (KeyboardInterrupt, SystemExit):
851 851 self._aborted = True
852 852 raise
853 853 except Exception:
854 854 result.addError(self, sys.exc_info())
855 855 success = False
856 856
857 857 if success:
858 858 result.addSuccess(self)
859 859 finally:
860 860 result.stopTest(self, interrupted=self._aborted)
861 861
862 862 def runTest(self):
863 863 """Run this test instance.
864 864
865 865 This will return a tuple describing the result of the test.
866 866 """
867 867 env = self._getenv()
868 868 self._genrestoreenv(env)
869 869 self._daemonpids.append(env['DAEMON_PIDS'])
870 870 self._createhgrc(env['HGRCPATH'])
871 871
872 872 vlog('# Test', self.name)
873 873
874 874 ret, out = self._run(env)
875 875 self._finished = True
876 876 self._ret = ret
877 877 self._out = out
878 878
879 879 def describe(ret):
880 880 if ret < 0:
881 881 return 'killed by signal: %d' % -ret
882 882 return 'returned error code %d' % ret
883 883
884 884 self._skipped = False
885 885
886 886 if ret == self.SKIPPED_STATUS:
887 887 if out is None: # Debug mode, nothing to parse.
888 888 missing = ['unknown']
889 889 failed = None
890 890 else:
891 891 missing, failed = TTest.parsehghaveoutput(out)
892 892
893 893 if not missing:
894 894 missing = ['skipped']
895 895
896 896 if failed:
897 897 self.fail('hg have failed checking for %s' % failed[-1])
898 898 else:
899 899 self._skipped = True
900 900 raise unittest.SkipTest(missing[-1])
901 901 elif ret == 'timeout':
902 902 self.fail('timed out')
903 903 elif ret is False:
904 904 self.fail('no result code from test')
905 905 elif out != self._refout:
906 906 # Diff generation may rely on written .err file.
907 907 if (ret != 0 or out != self._refout) and not self._skipped \
908 908 and not self._debug:
909 909 with open(self.errpath, 'wb') as f:
910 910 for line in out:
911 911 f.write(line)
912 912
913 913 # The result object handles diff calculation for us.
914 914 with firstlock:
915 915 if self._result.addOutputMismatch(self, ret, out, self._refout):
916 916 # change was accepted, skip failing
917 917 return
918 918 if self._first:
919 919 global firsterror
920 920 firsterror = True
921 921
922 922 if ret:
923 923 msg = 'output changed and ' + describe(ret)
924 924 else:
925 925 msg = 'output changed'
926 926
927 927 self.fail(msg)
928 928 elif ret:
929 929 self.fail(describe(ret))
930 930
931 931 def tearDown(self):
932 932 """Tasks to perform after run()."""
933 933 for entry in self._daemonpids:
934 934 killdaemons(entry)
935 935 self._daemonpids = []
936 936
937 937 if self._keeptmpdir:
938 938 log('\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' %
939 939 (self._testtmp.decode('utf-8'),
940 940 self._threadtmp.decode('utf-8')))
941 941 else:
942 942 shutil.rmtree(self._testtmp, True)
943 943 shutil.rmtree(self._threadtmp, True)
944 944
945 945 if self._usechg:
946 946 # chgservers will stop automatically after they find the socket
947 947 # files are deleted
948 948 shutil.rmtree(self._chgsockdir, True)
949 949
950 950 if (self._ret != 0 or self._out != self._refout) and not self._skipped \
951 951 and not self._debug and self._out:
952 952 with open(self.errpath, 'wb') as f:
953 953 for line in self._out:
954 954 f.write(line)
955 955
956 956 vlog("# Ret was:", self._ret, '(%s)' % self.name)
957 957
958 958 def _run(self, env):
959 959 # This should be implemented in child classes to run tests.
960 960 raise unittest.SkipTest('unknown test type')
961 961
962 962 def abort(self):
963 963 """Terminate execution of this test."""
964 964 self._aborted = True
965 965
966 966 def _portmap(self, i):
967 967 offset = b'' if i == 0 else b'%d' % i
968 968 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
969 969
970 970 def _getreplacements(self):
971 971 """Obtain a mapping of text replacements to apply to test output.
972 972
973 973 Test output needs to be normalized so it can be compared to expected
974 974 output. This function defines how some of that normalization will
975 975 occur.
976 976 """
977 977 r = [
978 978 # This list should be parallel to defineport in _getenv
979 979 self._portmap(0),
980 980 self._portmap(1),
981 981 self._portmap(2),
982 982 (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
983 983 (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
984 984 ]
985 985 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
986 986
987 987 replacementfile = os.path.join(self._testdir, b'common-pattern.py')
988 988
989 989 if os.path.exists(replacementfile):
990 990 data = {}
991 991 with open(replacementfile, mode='rb') as source:
992 992 # the intermediate 'compile' step help with debugging
993 993 code = compile(source.read(), replacementfile, 'exec')
994 994 exec(code, data)
995 995 for value in data.get('substitutions', ()):
996 996 if len(value) != 2:
997 997 msg = 'malformatted substitution in %s: %r'
998 998 msg %= (replacementfile, value)
999 999 raise ValueError(msg)
1000 1000 r.append(value)
1001 1001 return r
1002 1002
1003 1003 def _escapepath(self, p):
1004 1004 if os.name == 'nt':
1005 1005 return (
1006 1006 (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or
1007 1007 c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c
1008 1008 for c in p))
1009 1009 )
1010 1010 else:
1011 1011 return re.escape(p)
1012 1012
1013 1013 def _localip(self):
1014 1014 if self._useipv6:
1015 1015 return b'::1'
1016 1016 else:
1017 1017 return b'127.0.0.1'
1018 1018
1019 1019 def _genrestoreenv(self, testenv):
1020 1020 """Generate a script that can be used by tests to restore the original
1021 1021 environment."""
1022 1022 # Put the restoreenv script inside self._threadtmp
1023 1023 scriptpath = os.path.join(self._threadtmp, b'restoreenv.sh')
1024 1024 testenv['HGTEST_RESTOREENV'] = scriptpath
1025 1025
1026 1026 # Only restore environment variable names that the shell allows
1027 1027 # us to export.
1028 1028 name_regex = re.compile('^[a-zA-Z][a-zA-Z0-9_]*$')
1029 1029
1030 1030 # Do not restore these variables; otherwise tests would fail.
1031 1031 reqnames = {'PYTHON', 'TESTDIR', 'TESTTMP'}
1032 1032
1033 1033 with open(scriptpath, 'w') as envf:
1034 1034 for name, value in origenviron.items():
1035 1035 if not name_regex.match(name):
1036 1036 # Skip environment variables with unusual names not
1037 1037 # allowed by most shells.
1038 1038 continue
1039 1039 if name in reqnames:
1040 1040 continue
1041 1041 envf.write('%s=%s\n' % (name, shellquote(value)))
1042 1042
1043 1043 for name in testenv:
1044 1044 if name in origenviron or name in reqnames:
1045 1045 continue
1046 1046 envf.write('unset %s\n' % (name,))
1047 1047
1048 1048 def _getenv(self):
1049 1049 """Obtain environment variables to use during test execution."""
1050 1050 def defineport(i):
1051 1051 offset = '' if i == 0 else '%s' % i
1052 1052 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
1053 1053 env = os.environ.copy()
1054 1054 env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase') or ''
1055 1055 env['HGEMITWARNINGS'] = '1'
1056 1056 env['TESTTMP'] = self._testtmp
1057 1057 env['TESTNAME'] = self.name
1058 1058 env['HOME'] = self._testtmp
1059 1059 # This number should match portneeded in _getport
1060 1060 for port in xrange(3):
1061 1061 # This list should be parallel to _portmap in _getreplacements
1062 1062 defineport(port)
1063 1063 env["HGRCPATH"] = os.path.join(self._threadtmp, b'.hgrc')
1064 1064 env["DAEMON_PIDS"] = os.path.join(self._threadtmp, b'daemon.pids')
1065 1065 env["HGEDITOR"] = ('"' + sys.executable + '"'
1066 1066 + ' -c "import sys; sys.exit(0)"')
1067 1067 env["HGMERGE"] = "internal:merge"
1068 1068 env["HGUSER"] = "test"
1069 1069 env["HGENCODING"] = "ascii"
1070 1070 env["HGENCODINGMODE"] = "strict"
1071 1071 env['HGIPV6'] = str(int(self._useipv6))
1072 1072
1073 1073 extraextensions = []
1074 1074 for opt in self._extraconfigopts:
1075 1075 section, key = opt.encode('utf-8').split(b'.', 1)
1076 1076 if section != 'extensions':
1077 1077 continue
1078 1078 name = key.split(b'=', 1)[0]
1079 1079 extraextensions.append(name)
1080 1080
1081 1081 if extraextensions:
1082 1082 env['HGTESTEXTRAEXTENSIONS'] = b' '.join(extraextensions)
1083 1083
1084 1084 # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
1085 1085 # IP addresses.
1086 1086 env['LOCALIP'] = self._localip()
1087 1087
1088 1088 # Reset some environment variables to well-known values so that
1089 1089 # the tests produce repeatable output.
1090 1090 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
1091 1091 env['TZ'] = 'GMT'
1092 1092 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
1093 1093 env['COLUMNS'] = '80'
1094 1094 env['TERM'] = 'xterm'
1095 1095
1096 1096 for k in ('HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy ' +
1097 1097 'HGPLAIN HGPLAINEXCEPT EDITOR VISUAL PAGER ' +
1098 1098 'NO_PROXY CHGDEBUG').split():
1099 1099 if k in env:
1100 1100 del env[k]
1101 1101
1102 1102 # unset env related to hooks
1103 1103 for k in list(env):
1104 1104 if k.startswith('HG_'):
1105 1105 del env[k]
1106 1106
1107 1107 if self._usechg:
1108 1108 env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server')
1109 1109
1110 1110 return env
1111 1111
1112 1112 def _createhgrc(self, path):
1113 1113 """Create an hgrc file for this test."""
1114 1114 with open(path, 'wb') as hgrc:
1115 1115 hgrc.write(b'[ui]\n')
1116 1116 hgrc.write(b'slash = True\n')
1117 1117 hgrc.write(b'interactive = False\n')
1118 1118 hgrc.write(b'mergemarkers = detailed\n')
1119 1119 hgrc.write(b'promptecho = True\n')
1120 1120 hgrc.write(b'[defaults]\n')
1121 1121 hgrc.write(b'[devel]\n')
1122 1122 hgrc.write(b'all-warnings = true\n')
1123 1123 hgrc.write(b'default-date = 0 0\n')
1124 1124 hgrc.write(b'[largefiles]\n')
1125 1125 hgrc.write(b'usercache = %s\n' %
1126 1126 (os.path.join(self._testtmp, b'.cache/largefiles')))
1127 1127 hgrc.write(b'[lfs]\n')
1128 1128 hgrc.write(b'usercache = %s\n' %
1129 1129 (os.path.join(self._testtmp, b'.cache/lfs')))
1130 1130 hgrc.write(b'[web]\n')
1131 1131 hgrc.write(b'address = localhost\n')
1132 1132 hgrc.write(b'ipv6 = %s\n' % str(self._useipv6).encode('ascii'))
1133 1133 hgrc.write(b'server-header = testing stub value\n')
1134 1134
1135 1135 for opt in self._extraconfigopts:
1136 1136 section, key = opt.encode('utf-8').split(b'.', 1)
1137 1137 assert b'=' in key, ('extra config opt %s must '
1138 1138 'have an = for assignment' % opt)
1139 1139 hgrc.write(b'[%s]\n%s\n' % (section, key))
1140 1140
1141 1141 def fail(self, msg):
1142 1142 # unittest differentiates between errored and failed.
1143 1143 # Failed is denoted by AssertionError (by default at least).
1144 1144 raise AssertionError(msg)
1145 1145
1146 1146 def _runcommand(self, cmd, env, normalizenewlines=False):
1147 1147 """Run command in a sub-process, capturing the output (stdout and
1148 1148 stderr).
1149 1149
1150 1150 Return a tuple (exitcode, output). output is None in debug mode.
1151 1151 """
1152 1152 if self._debug:
1153 1153 proc = subprocess.Popen(cmd, shell=True, cwd=self._testtmp,
1154 1154 env=env)
1155 1155 ret = proc.wait()
1156 1156 return (ret, None)
1157 1157
1158 1158 proc = Popen4(cmd, self._testtmp, self._timeout, env)
1159 1159 def cleanup():
1160 1160 terminate(proc)
1161 1161 ret = proc.wait()
1162 1162 if ret == 0:
1163 1163 ret = signal.SIGTERM << 8
1164 1164 killdaemons(env['DAEMON_PIDS'])
1165 1165 return ret
1166 1166
1167 1167 output = ''
1168 1168 proc.tochild.close()
1169 1169
1170 1170 try:
1171 1171 output = proc.fromchild.read()
1172 1172 except KeyboardInterrupt:
1173 1173 vlog('# Handling keyboard interrupt')
1174 1174 cleanup()
1175 1175 raise
1176 1176
1177 1177 ret = proc.wait()
1178 1178 if wifexited(ret):
1179 1179 ret = os.WEXITSTATUS(ret)
1180 1180
1181 1181 if proc.timeout:
1182 1182 ret = 'timeout'
1183 1183
1184 1184 if ret:
1185 1185 killdaemons(env['DAEMON_PIDS'])
1186 1186
1187 1187 for s, r in self._getreplacements():
1188 1188 output = re.sub(s, r, output)
1189 1189
1190 1190 if normalizenewlines:
1191 1191 output = output.replace('\r\n', '\n')
1192 1192
1193 1193 return ret, output.splitlines(True)
1194 1194
1195 1195 class PythonTest(Test):
1196 1196 """A Python-based test."""
1197 1197
1198 1198 @property
1199 1199 def refpath(self):
1200 1200 return os.path.join(self._testdir, b'%s.out' % self.bname)
1201 1201
1202 1202 def _run(self, env):
1203 1203 py3kswitch = self._py3kwarnings and b' -3' or b''
1204 1204 cmd = b'%s%s "%s"' % (PYTHON, py3kswitch, self.path)
1205 1205 vlog("# Running", cmd)
1206 1206 normalizenewlines = os.name == 'nt'
1207 1207 result = self._runcommand(cmd, env,
1208 1208 normalizenewlines=normalizenewlines)
1209 1209 if self._aborted:
1210 1210 raise KeyboardInterrupt()
1211 1211
1212 1212 return result
1213 1213
1214 1214 # Some glob patterns apply only in some circumstances, so the script
1215 1215 # might want to remove (glob) annotations that otherwise should be
1216 1216 # retained.
1217 1217 checkcodeglobpats = [
1218 1218 # On Windows it looks like \ doesn't require a (glob), but we know
1219 1219 # better.
1220 1220 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
1221 1221 re.compile(br'^moving \S+/.*[^)]$'),
1222 1222 re.compile(br'^pulling from \$TESTTMP/.*[^)]$'),
1223 1223 # Not all platforms have 127.0.0.1 as loopback (though most do),
1224 1224 # so we always glob that too.
1225 1225 re.compile(br'.*\$LOCALIP.*$'),
1226 1226 ]
1227 1227
1228 1228 bchr = chr
1229 1229 if PYTHON3:
1230 1230 bchr = lambda x: bytes([x])
1231 1231
1232 1232 class TTest(Test):
1233 1233 """A "t test" is a test backed by a .t file."""
1234 1234
1235 1235 SKIPPED_PREFIX = b'skipped: '
1236 1236 FAILED_PREFIX = b'hghave check failed: '
1237 1237 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
1238 1238
1239 1239 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
1240 1240 ESCAPEMAP = dict((bchr(i), br'\x%02x' % i) for i in range(256))
1241 1241 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
1242 1242
1243 1243 def __init__(self, path, *args, **kwds):
1244 1244 # accept an extra "case" parameter
1245 1245 case = kwds.pop('case', None)
1246 1246 self._case = case
1247 1247 self._allcases = parsettestcases(path)
1248 1248 super(TTest, self).__init__(path, *args, **kwds)
1249 1249 if case:
1250 self.name = '%s (case %s)' % (self.name, _strpath(case))
1250 self.name = '%s#%s' % (self.name, _strpath(case))
1251 1251 self.errpath = b'%s.%s.err' % (self.errpath[:-4], case)
1252 1252 self._tmpname += b'-%s' % case
1253 1253 self._have = {}
1254 1254
1255 1255 @property
1256 1256 def refpath(self):
1257 1257 return os.path.join(self._testdir, self.bname)
1258 1258
1259 1259 def _run(self, env):
1260 1260 with open(self.path, 'rb') as f:
1261 1261 lines = f.readlines()
1262 1262
1263 1263 # .t file is both reference output and the test input, keep reference
1264 1264 # output updated with the the test input. This avoids some race
1265 1265 # conditions where the reference output does not match the actual test.
1266 1266 if self._refout is not None:
1267 1267 self._refout = lines
1268 1268
1269 1269 salt, script, after, expected = self._parsetest(lines)
1270 1270
1271 1271 # Write out the generated script.
1272 1272 fname = b'%s.sh' % self._testtmp
1273 1273 with open(fname, 'wb') as f:
1274 1274 for l in script:
1275 1275 f.write(l)
1276 1276
1277 1277 cmd = b'%s "%s"' % (self._shell, fname)
1278 1278 vlog("# Running", cmd)
1279 1279
1280 1280 exitcode, output = self._runcommand(cmd, env)
1281 1281
1282 1282 if self._aborted:
1283 1283 raise KeyboardInterrupt()
1284 1284
1285 1285 # Do not merge output if skipped. Return hghave message instead.
1286 1286 # Similarly, with --debug, output is None.
1287 1287 if exitcode == self.SKIPPED_STATUS or output is None:
1288 1288 return exitcode, output
1289 1289
1290 1290 return self._processoutput(exitcode, output, salt, after, expected)
1291 1291
1292 1292 def _hghave(self, reqs):
1293 1293 allreqs = b' '.join(reqs)
1294 1294 if allreqs in self._have:
1295 1295 return self._have.get(allreqs)
1296 1296
1297 1297 # TODO do something smarter when all other uses of hghave are gone.
1298 1298 runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__)))
1299 1299 tdir = runtestdir.replace(b'\\', b'/')
1300 1300 proc = Popen4(b'%s -c "%s/hghave %s"' %
1301 1301 (self._shell, tdir, allreqs),
1302 1302 self._testtmp, 0, self._getenv())
1303 1303 stdout, stderr = proc.communicate()
1304 1304 ret = proc.wait()
1305 1305 if wifexited(ret):
1306 1306 ret = os.WEXITSTATUS(ret)
1307 1307 if ret == 2:
1308 1308 print(stdout.decode('utf-8'))
1309 1309 sys.exit(1)
1310 1310
1311 1311 if ret != 0:
1312 1312 self._have[allreqs] = (False, stdout)
1313 1313 return False, stdout
1314 1314
1315 1315 if b'slow' in reqs:
1316 1316 self._timeout = self._slowtimeout
1317 1317
1318 1318 self._have[allreqs] = (True, None)
1319 1319 return True, None
1320 1320
1321 1321 def _iftest(self, args):
1322 1322 # implements "#if"
1323 1323 reqs = []
1324 1324 for arg in args:
1325 1325 if arg.startswith(b'no-') and arg[3:] in self._allcases:
1326 1326 if arg[3:] == self._case:
1327 1327 return False
1328 1328 elif arg in self._allcases:
1329 1329 if arg != self._case:
1330 1330 return False
1331 1331 else:
1332 1332 reqs.append(arg)
1333 1333 return self._hghave(reqs)[0]
1334 1334
1335 1335 def _parsetest(self, lines):
1336 1336 # We generate a shell script which outputs unique markers to line
1337 1337 # up script results with our source. These markers include input
1338 1338 # line number and the last return code.
1339 1339 salt = b"SALT%d" % time.time()
1340 1340 def addsalt(line, inpython):
1341 1341 if inpython:
1342 1342 script.append(b'%s %d 0\n' % (salt, line))
1343 1343 else:
1344 1344 script.append(b'echo %s %d $?\n' % (salt, line))
1345 1345
1346 1346 script = []
1347 1347
1348 1348 # After we run the shell script, we re-unify the script output
1349 1349 # with non-active parts of the source, with synchronization by our
1350 1350 # SALT line number markers. The after table contains the non-active
1351 1351 # components, ordered by line number.
1352 1352 after = {}
1353 1353
1354 1354 # Expected shell script output.
1355 1355 expected = {}
1356 1356
1357 1357 pos = prepos = -1
1358 1358
1359 1359 # True or False when in a true or false conditional section
1360 1360 skipping = None
1361 1361
1362 1362 # We keep track of whether or not we're in a Python block so we
1363 1363 # can generate the surrounding doctest magic.
1364 1364 inpython = False
1365 1365
1366 1366 if self._debug:
1367 1367 script.append(b'set -x\n')
1368 1368 if self._hgcommand != b'hg':
1369 1369 script.append(b'alias hg="%s"\n' % self._hgcommand)
1370 1370 if os.getenv('MSYSTEM'):
1371 1371 script.append(b'alias pwd="pwd -W"\n')
1372 1372 if self._case:
1373 1373 if isinstance(self._case, str):
1374 1374 quoted = shellquote(self._case)
1375 1375 else:
1376 1376 quoted = shellquote(self._case.decode('utf8')).encode('utf8')
1377 1377 script.append(b'TESTCASE=%s\n' % quoted)
1378 1378 script.append(b'export TESTCASE\n')
1379 1379
1380 1380 n = 0
1381 1381 for n, l in enumerate(lines):
1382 1382 if not l.endswith(b'\n'):
1383 1383 l += b'\n'
1384 1384 if l.startswith(b'#require'):
1385 1385 lsplit = l.split()
1386 1386 if len(lsplit) < 2 or lsplit[0] != b'#require':
1387 1387 after.setdefault(pos, []).append(' !!! invalid #require\n')
1388 1388 if not skipping:
1389 1389 haveresult, message = self._hghave(lsplit[1:])
1390 1390 if not haveresult:
1391 1391 script = [b'echo "%s"\nexit 80\n' % message]
1392 1392 break
1393 1393 after.setdefault(pos, []).append(l)
1394 1394 elif l.startswith(b'#if'):
1395 1395 lsplit = l.split()
1396 1396 if len(lsplit) < 2 or lsplit[0] != b'#if':
1397 1397 after.setdefault(pos, []).append(' !!! invalid #if\n')
1398 1398 if skipping is not None:
1399 1399 after.setdefault(pos, []).append(' !!! nested #if\n')
1400 1400 skipping = not self._iftest(lsplit[1:])
1401 1401 after.setdefault(pos, []).append(l)
1402 1402 elif l.startswith(b'#else'):
1403 1403 if skipping is None:
1404 1404 after.setdefault(pos, []).append(' !!! missing #if\n')
1405 1405 skipping = not skipping
1406 1406 after.setdefault(pos, []).append(l)
1407 1407 elif l.startswith(b'#endif'):
1408 1408 if skipping is None:
1409 1409 after.setdefault(pos, []).append(' !!! missing #if\n')
1410 1410 skipping = None
1411 1411 after.setdefault(pos, []).append(l)
1412 1412 elif skipping:
1413 1413 after.setdefault(pos, []).append(l)
1414 1414 elif l.startswith(b' >>> '): # python inlines
1415 1415 after.setdefault(pos, []).append(l)
1416 1416 prepos = pos
1417 1417 pos = n
1418 1418 if not inpython:
1419 1419 # We've just entered a Python block. Add the header.
1420 1420 inpython = True
1421 1421 addsalt(prepos, False) # Make sure we report the exit code.
1422 1422 script.append(b'%s -m heredoctest <<EOF\n' % PYTHON)
1423 1423 addsalt(n, True)
1424 1424 script.append(l[2:])
1425 1425 elif l.startswith(b' ... '): # python inlines
1426 1426 after.setdefault(prepos, []).append(l)
1427 1427 script.append(l[2:])
1428 1428 elif l.startswith(b' $ '): # commands
1429 1429 if inpython:
1430 1430 script.append(b'EOF\n')
1431 1431 inpython = False
1432 1432 after.setdefault(pos, []).append(l)
1433 1433 prepos = pos
1434 1434 pos = n
1435 1435 addsalt(n, False)
1436 1436 cmd = l[4:].split()
1437 1437 if len(cmd) == 2 and cmd[0] == b'cd':
1438 1438 l = b' $ cd %s || exit 1\n' % cmd[1]
1439 1439 script.append(l[4:])
1440 1440 elif l.startswith(b' > '): # continuations
1441 1441 after.setdefault(prepos, []).append(l)
1442 1442 script.append(l[4:])
1443 1443 elif l.startswith(b' '): # results
1444 1444 # Queue up a list of expected results.
1445 1445 expected.setdefault(pos, []).append(l[2:])
1446 1446 else:
1447 1447 if inpython:
1448 1448 script.append(b'EOF\n')
1449 1449 inpython = False
1450 1450 # Non-command/result. Queue up for merged output.
1451 1451 after.setdefault(pos, []).append(l)
1452 1452
1453 1453 if inpython:
1454 1454 script.append(b'EOF\n')
1455 1455 if skipping is not None:
1456 1456 after.setdefault(pos, []).append(' !!! missing #endif\n')
1457 1457 addsalt(n + 1, False)
1458 1458
1459 1459 return salt, script, after, expected
1460 1460
1461 1461 def _processoutput(self, exitcode, output, salt, after, expected):
1462 1462 # Merge the script output back into a unified test.
1463 1463 warnonly = 1 # 1: not yet; 2: yes; 3: for sure not
1464 1464 if exitcode != 0:
1465 1465 warnonly = 3
1466 1466
1467 1467 pos = -1
1468 1468 postout = []
1469 1469 for l in output:
1470 1470 lout, lcmd = l, None
1471 1471 if salt in l:
1472 1472 lout, lcmd = l.split(salt, 1)
1473 1473
1474 1474 while lout:
1475 1475 if not lout.endswith(b'\n'):
1476 1476 lout += b' (no-eol)\n'
1477 1477
1478 1478 # Find the expected output at the current position.
1479 1479 els = [None]
1480 1480 if expected.get(pos, None):
1481 1481 els = expected[pos]
1482 1482
1483 1483 i = 0
1484 1484 optional = []
1485 1485 while i < len(els):
1486 1486 el = els[i]
1487 1487
1488 1488 r = self.linematch(el, lout)
1489 1489 if isinstance(r, str):
1490 1490 if r == '-glob':
1491 1491 lout = ''.join(el.rsplit(' (glob)', 1))
1492 1492 r = '' # Warn only this line.
1493 1493 elif r == "retry":
1494 1494 postout.append(b' ' + el)
1495 1495 els.pop(i)
1496 1496 break
1497 1497 else:
1498 1498 log('\ninfo, unknown linematch result: %r\n' % r)
1499 1499 r = False
1500 1500 if r:
1501 1501 els.pop(i)
1502 1502 break
1503 1503 if el:
1504 1504 if el.endswith(b" (?)\n"):
1505 1505 optional.append(i)
1506 1506 else:
1507 1507 m = optline.match(el)
1508 1508 if m:
1509 1509 conditions = [
1510 1510 c for c in m.group(2).split(b' ')]
1511 1511
1512 1512 if not self._iftest(conditions):
1513 1513 optional.append(i)
1514 1514
1515 1515 i += 1
1516 1516
1517 1517 if r:
1518 1518 if r == "retry":
1519 1519 continue
1520 1520 # clean up any optional leftovers
1521 1521 for i in optional:
1522 1522 postout.append(b' ' + els[i])
1523 1523 for i in reversed(optional):
1524 1524 del els[i]
1525 1525 postout.append(b' ' + el)
1526 1526 else:
1527 1527 if self.NEEDESCAPE(lout):
1528 1528 lout = TTest._stringescape(b'%s (esc)\n' %
1529 1529 lout.rstrip(b'\n'))
1530 1530 postout.append(b' ' + lout) # Let diff deal with it.
1531 1531 if r != '': # If line failed.
1532 1532 warnonly = 3 # for sure not
1533 1533 elif warnonly == 1: # Is "not yet" and line is warn only.
1534 1534 warnonly = 2 # Yes do warn.
1535 1535 break
1536 1536 else:
1537 1537 # clean up any optional leftovers
1538 1538 while expected.get(pos, None):
1539 1539 el = expected[pos].pop(0)
1540 1540 if el:
1541 1541 if not el.endswith(b" (?)\n"):
1542 1542 m = optline.match(el)
1543 1543 if m:
1544 1544 conditions = [c for c in m.group(2).split(b' ')]
1545 1545
1546 1546 if self._iftest(conditions):
1547 1547 # Don't append as optional line
1548 1548 continue
1549 1549 else:
1550 1550 continue
1551 1551 postout.append(b' ' + el)
1552 1552
1553 1553 if lcmd:
1554 1554 # Add on last return code.
1555 1555 ret = int(lcmd.split()[1])
1556 1556 if ret != 0:
1557 1557 postout.append(b' [%d]\n' % ret)
1558 1558 if pos in after:
1559 1559 # Merge in non-active test bits.
1560 1560 postout += after.pop(pos)
1561 1561 pos = int(lcmd.split()[0])
1562 1562
1563 1563 if pos in after:
1564 1564 postout += after.pop(pos)
1565 1565
1566 1566 if warnonly == 2:
1567 1567 exitcode = False # Set exitcode to warned.
1568 1568
1569 1569 return exitcode, postout
1570 1570
1571 1571 @staticmethod
1572 1572 def rematch(el, l):
1573 1573 try:
1574 1574 el = b'(?:' + el + b')'
1575 1575 # use \Z to ensure that the regex matches to the end of the string
1576 1576 if os.name == 'nt':
1577 1577 return re.match(el + br'\r?\n\Z', l)
1578 1578 return re.match(el + br'\n\Z', l)
1579 1579 except re.error:
1580 1580 # el is an invalid regex
1581 1581 return False
1582 1582
1583 1583 @staticmethod
1584 1584 def globmatch(el, l):
1585 1585 # The only supported special characters are * and ? plus / which also
1586 1586 # matches \ on windows. Escaping of these characters is supported.
1587 1587 if el + b'\n' == l:
1588 1588 if os.altsep:
1589 1589 # matching on "/" is not needed for this line
1590 1590 for pat in checkcodeglobpats:
1591 1591 if pat.match(el):
1592 1592 return True
1593 1593 return b'-glob'
1594 1594 return True
1595 1595 el = el.replace(b'$LOCALIP', b'*')
1596 1596 i, n = 0, len(el)
1597 1597 res = b''
1598 1598 while i < n:
1599 1599 c = el[i:i + 1]
1600 1600 i += 1
1601 1601 if c == b'\\' and i < n and el[i:i + 1] in b'*?\\/':
1602 1602 res += el[i - 1:i + 1]
1603 1603 i += 1
1604 1604 elif c == b'*':
1605 1605 res += b'.*'
1606 1606 elif c == b'?':
1607 1607 res += b'.'
1608 1608 elif c == b'/' and os.altsep:
1609 1609 res += b'[/\\\\]'
1610 1610 else:
1611 1611 res += re.escape(c)
1612 1612 return TTest.rematch(res, l)
1613 1613
1614 1614 def linematch(self, el, l):
1615 1615 retry = False
1616 1616 if el == l: # perfect match (fast)
1617 1617 return True
1618 1618 if el:
1619 1619 if el.endswith(b" (?)\n"):
1620 1620 retry = "retry"
1621 1621 el = el[:-5] + b"\n"
1622 1622 else:
1623 1623 m = optline.match(el)
1624 1624 if m:
1625 1625 conditions = [c for c in m.group(2).split(b' ')]
1626 1626
1627 1627 el = m.group(1) + b"\n"
1628 1628 if not self._iftest(conditions):
1629 1629 retry = "retry" # Not required by listed features
1630 1630
1631 1631 if el.endswith(b" (esc)\n"):
1632 1632 if PYTHON3:
1633 1633 el = el[:-7].decode('unicode_escape') + '\n'
1634 1634 el = el.encode('utf-8')
1635 1635 else:
1636 1636 el = el[:-7].decode('string-escape') + '\n'
1637 1637 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
1638 1638 return True
1639 1639 if el.endswith(b" (re)\n"):
1640 1640 return TTest.rematch(el[:-6], l) or retry
1641 1641 if el.endswith(b" (glob)\n"):
1642 1642 # ignore '(glob)' added to l by 'replacements'
1643 1643 if l.endswith(b" (glob)\n"):
1644 1644 l = l[:-8] + b"\n"
1645 1645 return TTest.globmatch(el[:-8], l) or retry
1646 1646 if os.altsep:
1647 1647 _l = l.replace(b'\\', b'/')
1648 1648 if el == _l or os.name == 'nt' and el[:-1] + b'\r\n' == _l:
1649 1649 return True
1650 1650 return retry
1651 1651
1652 1652 @staticmethod
1653 1653 def parsehghaveoutput(lines):
1654 1654 '''Parse hghave log lines.
1655 1655
1656 1656 Return tuple of lists (missing, failed):
1657 1657 * the missing/unknown features
1658 1658 * the features for which existence check failed'''
1659 1659 missing = []
1660 1660 failed = []
1661 1661 for line in lines:
1662 1662 if line.startswith(TTest.SKIPPED_PREFIX):
1663 1663 line = line.splitlines()[0]
1664 1664 missing.append(line[len(TTest.SKIPPED_PREFIX):].decode('utf-8'))
1665 1665 elif line.startswith(TTest.FAILED_PREFIX):
1666 1666 line = line.splitlines()[0]
1667 1667 failed.append(line[len(TTest.FAILED_PREFIX):].decode('utf-8'))
1668 1668
1669 1669 return missing, failed
1670 1670
1671 1671 @staticmethod
1672 1672 def _escapef(m):
1673 1673 return TTest.ESCAPEMAP[m.group(0)]
1674 1674
1675 1675 @staticmethod
1676 1676 def _stringescape(s):
1677 1677 return TTest.ESCAPESUB(TTest._escapef, s)
1678 1678
1679 1679 iolock = threading.RLock()
1680 1680 firstlock = threading.RLock()
1681 1681 firsterror = False
1682 1682
1683 1683 class TestResult(unittest._TextTestResult):
1684 1684 """Holds results when executing via unittest."""
1685 1685 # Don't worry too much about accessing the non-public _TextTestResult.
1686 1686 # It is relatively common in Python testing tools.
1687 1687 def __init__(self, options, *args, **kwargs):
1688 1688 super(TestResult, self).__init__(*args, **kwargs)
1689 1689
1690 1690 self._options = options
1691 1691
1692 1692 # unittest.TestResult didn't have skipped until 2.7. We need to
1693 1693 # polyfill it.
1694 1694 self.skipped = []
1695 1695
1696 1696 # We have a custom "ignored" result that isn't present in any Python
1697 1697 # unittest implementation. It is very similar to skipped. It may make
1698 1698 # sense to map it into skip some day.
1699 1699 self.ignored = []
1700 1700
1701 1701 self.times = []
1702 1702 self._firststarttime = None
1703 1703 # Data stored for the benefit of generating xunit reports.
1704 1704 self.successes = []
1705 1705 self.faildata = {}
1706 1706
1707 1707 if options.color == 'auto':
1708 1708 self.color = pygmentspresent and self.stream.isatty()
1709 1709 elif options.color == 'never':
1710 1710 self.color = False
1711 1711 else: # 'always', for testing purposes
1712 1712 self.color = pygmentspresent
1713 1713
1714 1714 def addFailure(self, test, reason):
1715 1715 self.failures.append((test, reason))
1716 1716
1717 1717 if self._options.first:
1718 1718 self.stop()
1719 1719 else:
1720 1720 with iolock:
1721 1721 if reason == "timed out":
1722 1722 self.stream.write('t')
1723 1723 else:
1724 1724 if not self._options.nodiff:
1725 1725 self.stream.write('\n')
1726 1726 # Exclude the '\n' from highlighting to lex correctly
1727 1727 formatted = 'ERROR: %s output changed\n' % test
1728 1728 self.stream.write(highlightmsg(formatted, self.color))
1729 1729 self.stream.write('!')
1730 1730
1731 1731 self.stream.flush()
1732 1732
1733 1733 def addSuccess(self, test):
1734 1734 with iolock:
1735 1735 super(TestResult, self).addSuccess(test)
1736 1736 self.successes.append(test)
1737 1737
1738 1738 def addError(self, test, err):
1739 1739 super(TestResult, self).addError(test, err)
1740 1740 if self._options.first:
1741 1741 self.stop()
1742 1742
1743 1743 # Polyfill.
1744 1744 def addSkip(self, test, reason):
1745 1745 self.skipped.append((test, reason))
1746 1746 with iolock:
1747 1747 if self.showAll:
1748 1748 self.stream.writeln('skipped %s' % reason)
1749 1749 else:
1750 1750 self.stream.write('s')
1751 1751 self.stream.flush()
1752 1752
1753 1753 def addIgnore(self, test, reason):
1754 1754 self.ignored.append((test, reason))
1755 1755 with iolock:
1756 1756 if self.showAll:
1757 1757 self.stream.writeln('ignored %s' % reason)
1758 1758 else:
1759 1759 if reason not in ('not retesting', "doesn't match keyword"):
1760 1760 self.stream.write('i')
1761 1761 else:
1762 1762 self.testsRun += 1
1763 1763 self.stream.flush()
1764 1764
1765 1765 def addOutputMismatch(self, test, ret, got, expected):
1766 1766 """Record a mismatch in test output for a particular test."""
1767 1767 if self.shouldStop or firsterror:
1768 1768 # don't print, some other test case already failed and
1769 1769 # printed, we're just stale and probably failed due to our
1770 1770 # temp dir getting cleaned up.
1771 1771 return
1772 1772
1773 1773 accepted = False
1774 1774 lines = []
1775 1775
1776 1776 with iolock:
1777 1777 if self._options.nodiff:
1778 1778 pass
1779 1779 elif self._options.view:
1780 1780 v = self._options.view
1781 1781 if PYTHON3:
1782 1782 v = _bytespath(v)
1783 1783 os.system(b"%s %s %s" %
1784 1784 (v, test.refpath, test.errpath))
1785 1785 else:
1786 1786 servefail, lines = getdiff(expected, got,
1787 1787 test.refpath, test.errpath)
1788 1788 self.stream.write('\n')
1789 1789 for line in lines:
1790 1790 line = highlightdiff(line, self.color)
1791 1791 if PYTHON3:
1792 1792 self.stream.flush()
1793 1793 self.stream.buffer.write(line)
1794 1794 self.stream.buffer.flush()
1795 1795 else:
1796 1796 self.stream.write(line)
1797 1797 self.stream.flush()
1798 1798
1799 1799 if servefail:
1800 1800 raise test.failureException(
1801 1801 'server failed to start (HGPORT=%s)' % test._startport)
1802 1802
1803 1803 # handle interactive prompt without releasing iolock
1804 1804 if self._options.interactive:
1805 1805 if test.readrefout() != expected:
1806 1806 self.stream.write(
1807 1807 'Reference output has changed (run again to prompt '
1808 1808 'changes)')
1809 1809 else:
1810 1810 self.stream.write('Accept this change? [n] ')
1811 1811 answer = sys.stdin.readline().strip()
1812 1812 if answer.lower() in ('y', 'yes'):
1813 1813 if test.path.endswith(b'.t'):
1814 1814 rename(test.errpath, test.path)
1815 1815 else:
1816 1816 rename(test.errpath, '%s.out' % test.path)
1817 1817 accepted = True
1818 1818 if not accepted:
1819 1819 self.faildata[test.name] = b''.join(lines)
1820 1820
1821 1821 return accepted
1822 1822
1823 1823 def startTest(self, test):
1824 1824 super(TestResult, self).startTest(test)
1825 1825
1826 1826 # os.times module computes the user time and system time spent by
1827 1827 # child's processes along with real elapsed time taken by a process.
1828 1828 # This module has one limitation. It can only work for Linux user
1829 1829 # and not for Windows.
1830 1830 test.started = os.times()
1831 1831 if self._firststarttime is None: # thread racy but irrelevant
1832 1832 self._firststarttime = test.started[4]
1833 1833
1834 1834 def stopTest(self, test, interrupted=False):
1835 1835 super(TestResult, self).stopTest(test)
1836 1836
1837 1837 test.stopped = os.times()
1838 1838
1839 1839 starttime = test.started
1840 1840 endtime = test.stopped
1841 1841 origin = self._firststarttime
1842 1842 self.times.append((test.name,
1843 1843 endtime[2] - starttime[2], # user space CPU time
1844 1844 endtime[3] - starttime[3], # sys space CPU time
1845 1845 endtime[4] - starttime[4], # real time
1846 1846 starttime[4] - origin, # start date in run context
1847 1847 endtime[4] - origin, # end date in run context
1848 1848 ))
1849 1849
1850 1850 if interrupted:
1851 1851 with iolock:
1852 1852 self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
1853 1853 test.name, self.times[-1][3]))
1854 1854
1855 1855 class TestSuite(unittest.TestSuite):
1856 1856 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
1857 1857
1858 1858 def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
1859 1859 retest=False, keywords=None, loop=False, runs_per_test=1,
1860 1860 loadtest=None, showchannels=False,
1861 1861 *args, **kwargs):
1862 1862 """Create a new instance that can run tests with a configuration.
1863 1863
1864 1864 testdir specifies the directory where tests are executed from. This
1865 1865 is typically the ``tests`` directory from Mercurial's source
1866 1866 repository.
1867 1867
1868 1868 jobs specifies the number of jobs to run concurrently. Each test
1869 1869 executes on its own thread. Tests actually spawn new processes, so
1870 1870 state mutation should not be an issue.
1871 1871
1872 1872 If there is only one job, it will use the main thread.
1873 1873
1874 1874 whitelist and blacklist denote tests that have been whitelisted and
1875 1875 blacklisted, respectively. These arguments don't belong in TestSuite.
1876 1876 Instead, whitelist and blacklist should be handled by the thing that
1877 1877 populates the TestSuite with tests. They are present to preserve
1878 1878 backwards compatible behavior which reports skipped tests as part
1879 1879 of the results.
1880 1880
1881 1881 retest denotes whether to retest failed tests. This arguably belongs
1882 1882 outside of TestSuite.
1883 1883
1884 1884 keywords denotes key words that will be used to filter which tests
1885 1885 to execute. This arguably belongs outside of TestSuite.
1886 1886
1887 1887 loop denotes whether to loop over tests forever.
1888 1888 """
1889 1889 super(TestSuite, self).__init__(*args, **kwargs)
1890 1890
1891 1891 self._jobs = jobs
1892 1892 self._whitelist = whitelist
1893 1893 self._blacklist = blacklist
1894 1894 self._retest = retest
1895 1895 self._keywords = keywords
1896 1896 self._loop = loop
1897 1897 self._runs_per_test = runs_per_test
1898 1898 self._loadtest = loadtest
1899 1899 self._showchannels = showchannels
1900 1900
1901 1901 def run(self, result):
1902 1902 # We have a number of filters that need to be applied. We do this
1903 1903 # here instead of inside Test because it makes the running logic for
1904 1904 # Test simpler.
1905 1905 tests = []
1906 1906 num_tests = [0]
1907 1907 for test in self._tests:
1908 1908 def get():
1909 1909 num_tests[0] += 1
1910 1910 if getattr(test, 'should_reload', False):
1911 1911 return self._loadtest(test, num_tests[0])
1912 1912 return test
1913 1913 if not os.path.exists(test.path):
1914 1914 result.addSkip(test, "Doesn't exist")
1915 1915 continue
1916 1916
1917 1917 if not (self._whitelist and test.bname in self._whitelist):
1918 1918 if self._blacklist and test.bname in self._blacklist:
1919 1919 result.addSkip(test, 'blacklisted')
1920 1920 continue
1921 1921
1922 1922 if self._retest and not os.path.exists(test.errpath):
1923 1923 result.addIgnore(test, 'not retesting')
1924 1924 continue
1925 1925
1926 1926 if self._keywords:
1927 1927 with open(test.path, 'rb') as f:
1928 1928 t = f.read().lower() + test.bname.lower()
1929 1929 ignored = False
1930 1930 for k in self._keywords.lower().split():
1931 1931 if k not in t:
1932 1932 result.addIgnore(test, "doesn't match keyword")
1933 1933 ignored = True
1934 1934 break
1935 1935
1936 1936 if ignored:
1937 1937 continue
1938 1938 for _ in xrange(self._runs_per_test):
1939 1939 tests.append(get())
1940 1940
1941 1941 runtests = list(tests)
1942 1942 done = queue.Queue()
1943 1943 running = 0
1944 1944
1945 1945 channels = [""] * self._jobs
1946 1946
1947 1947 def job(test, result):
1948 1948 for n, v in enumerate(channels):
1949 1949 if not v:
1950 1950 channel = n
1951 1951 break
1952 1952 else:
1953 1953 raise ValueError('Could not find output channel')
1954 1954 channels[channel] = "=" + test.name[5:].split(".")[0]
1955 1955 try:
1956 1956 test(result)
1957 1957 done.put(None)
1958 1958 except KeyboardInterrupt:
1959 1959 pass
1960 1960 except: # re-raises
1961 1961 done.put(('!', test, 'run-test raised an error, see traceback'))
1962 1962 raise
1963 1963 finally:
1964 1964 try:
1965 1965 channels[channel] = ''
1966 1966 except IndexError:
1967 1967 pass
1968 1968
1969 1969 def stat():
1970 1970 count = 0
1971 1971 while channels:
1972 1972 d = '\n%03s ' % count
1973 1973 for n, v in enumerate(channels):
1974 1974 if v:
1975 1975 d += v[0]
1976 1976 channels[n] = v[1:] or '.'
1977 1977 else:
1978 1978 d += ' '
1979 1979 d += ' '
1980 1980 with iolock:
1981 1981 sys.stdout.write(d + ' ')
1982 1982 sys.stdout.flush()
1983 1983 for x in xrange(10):
1984 1984 if channels:
1985 1985 time.sleep(.1)
1986 1986 count += 1
1987 1987
1988 1988 stoppedearly = False
1989 1989
1990 1990 if self._showchannels:
1991 1991 statthread = threading.Thread(target=stat, name="stat")
1992 1992 statthread.start()
1993 1993
1994 1994 try:
1995 1995 while tests or running:
1996 1996 if not done.empty() or running == self._jobs or not tests:
1997 1997 try:
1998 1998 done.get(True, 1)
1999 1999 running -= 1
2000 2000 if result and result.shouldStop:
2001 2001 stoppedearly = True
2002 2002 break
2003 2003 except queue.Empty:
2004 2004 continue
2005 2005 if tests and not running == self._jobs:
2006 2006 test = tests.pop(0)
2007 2007 if self._loop:
2008 2008 if getattr(test, 'should_reload', False):
2009 2009 num_tests[0] += 1
2010 2010 tests.append(
2011 2011 self._loadtest(test, num_tests[0]))
2012 2012 else:
2013 2013 tests.append(test)
2014 2014 if self._jobs == 1:
2015 2015 job(test, result)
2016 2016 else:
2017 2017 t = threading.Thread(target=job, name=test.name,
2018 2018 args=(test, result))
2019 2019 t.start()
2020 2020 running += 1
2021 2021
2022 2022 # If we stop early we still need to wait on started tests to
2023 2023 # finish. Otherwise, there is a race between the test completing
2024 2024 # and the test's cleanup code running. This could result in the
2025 2025 # test reporting incorrect.
2026 2026 if stoppedearly:
2027 2027 while running:
2028 2028 try:
2029 2029 done.get(True, 1)
2030 2030 running -= 1
2031 2031 except queue.Empty:
2032 2032 continue
2033 2033 except KeyboardInterrupt:
2034 2034 for test in runtests:
2035 2035 test.abort()
2036 2036
2037 2037 channels = []
2038 2038
2039 2039 return result
2040 2040
2041 2041 # Save the most recent 5 wall-clock runtimes of each test to a
2042 2042 # human-readable text file named .testtimes. Tests are sorted
2043 2043 # alphabetically, while times for each test are listed from oldest to
2044 2044 # newest.
2045 2045
2046 2046 def loadtimes(outputdir):
2047 2047 times = []
2048 2048 try:
2049 2049 with open(os.path.join(outputdir, b'.testtimes')) as fp:
2050 2050 for line in fp:
2051 2051 m = re.match('(.*?) ([0-9. ]+)', line)
2052 2052 times.append((m.group(1),
2053 2053 [float(t) for t in m.group(2).split()]))
2054 2054 except IOError as err:
2055 2055 if err.errno != errno.ENOENT:
2056 2056 raise
2057 2057 return times
2058 2058
2059 2059 def savetimes(outputdir, result):
2060 2060 saved = dict(loadtimes(outputdir))
2061 2061 maxruns = 5
2062 2062 skipped = set([str(t[0]) for t in result.skipped])
2063 2063 for tdata in result.times:
2064 2064 test, real = tdata[0], tdata[3]
2065 2065 if test not in skipped:
2066 2066 ts = saved.setdefault(test, [])
2067 2067 ts.append(real)
2068 2068 ts[:] = ts[-maxruns:]
2069 2069
2070 2070 fd, tmpname = tempfile.mkstemp(prefix=b'.testtimes',
2071 2071 dir=outputdir, text=True)
2072 2072 with os.fdopen(fd, 'w') as fp:
2073 2073 for name, ts in sorted(saved.items()):
2074 2074 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
2075 2075 timepath = os.path.join(outputdir, b'.testtimes')
2076 2076 try:
2077 2077 os.unlink(timepath)
2078 2078 except OSError:
2079 2079 pass
2080 2080 try:
2081 2081 os.rename(tmpname, timepath)
2082 2082 except OSError:
2083 2083 pass
2084 2084
2085 2085 class TextTestRunner(unittest.TextTestRunner):
2086 2086 """Custom unittest test runner that uses appropriate settings."""
2087 2087
2088 2088 def __init__(self, runner, *args, **kwargs):
2089 2089 super(TextTestRunner, self).__init__(*args, **kwargs)
2090 2090
2091 2091 self._runner = runner
2092 2092
2093 2093 def listtests(self, test):
2094 2094 result = TestResult(self._runner.options, self.stream,
2095 2095 self.descriptions, 0)
2096 2096 test = sorted(test, key=lambda t: t.name)
2097 2097 for t in test:
2098 2098 print(t.name)
2099 2099 result.addSuccess(t)
2100 2100
2101 2101 if self._runner.options.xunit:
2102 2102 with open(self._runner.options.xunit, "wb") as xuf:
2103 2103 self._writexunit(result, xuf)
2104 2104
2105 2105 if self._runner.options.json:
2106 2106 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2107 2107 with open(jsonpath, 'w') as fp:
2108 2108 self._writejson(result, fp)
2109 2109
2110 2110 return result
2111 2111
2112 2112 def run(self, test):
2113 2113 result = TestResult(self._runner.options, self.stream,
2114 2114 self.descriptions, self.verbosity)
2115 2115
2116 2116 test(result)
2117 2117
2118 2118 failed = len(result.failures)
2119 2119 skipped = len(result.skipped)
2120 2120 ignored = len(result.ignored)
2121 2121
2122 2122 with iolock:
2123 2123 self.stream.writeln('')
2124 2124
2125 2125 if not self._runner.options.noskips:
2126 2126 for test, msg in result.skipped:
2127 2127 formatted = 'Skipped %s: %s\n' % (test.name, msg)
2128 2128 self.stream.write(highlightmsg(formatted, result.color))
2129 2129 for test, msg in result.failures:
2130 2130 formatted = 'Failed %s: %s\n' % (test.name, msg)
2131 2131 self.stream.write(highlightmsg(formatted, result.color))
2132 2132 for test, msg in result.errors:
2133 2133 self.stream.writeln('Errored %s: %s' % (test.name, msg))
2134 2134
2135 2135 if self._runner.options.xunit:
2136 2136 with open(self._runner.options.xunit, "wb") as xuf:
2137 2137 self._writexunit(result, xuf)
2138 2138
2139 2139 if self._runner.options.json:
2140 2140 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2141 2141 with open(jsonpath, 'w') as fp:
2142 2142 self._writejson(result, fp)
2143 2143
2144 2144 self._runner._checkhglib('Tested')
2145 2145
2146 2146 savetimes(self._runner._outputdir, result)
2147 2147
2148 2148 if failed and self._runner.options.known_good_rev:
2149 2149 self._bisecttests(t for t, m in result.failures)
2150 2150 self.stream.writeln(
2151 2151 '# Ran %d tests, %d skipped, %d failed.'
2152 2152 % (result.testsRun, skipped + ignored, failed))
2153 2153 if failed:
2154 2154 self.stream.writeln('python hash seed: %s' %
2155 2155 os.environ['PYTHONHASHSEED'])
2156 2156 if self._runner.options.time:
2157 2157 self.printtimes(result.times)
2158 2158
2159 2159 if self._runner.options.exceptions:
2160 2160 exceptions = aggregateexceptions(
2161 2161 os.path.join(self._runner._outputdir, b'exceptions'))
2162 2162
2163 2163 self.stream.writeln('Exceptions Report:')
2164 2164 self.stream.writeln('%d total from %d frames' %
2165 2165 (exceptions['total'],
2166 2166 len(exceptions['exceptioncounts'])))
2167 2167 combined = exceptions['combined']
2168 2168 for key in sorted(combined, key=combined.get, reverse=True):
2169 2169 frame, line, exc = key
2170 2170 totalcount, testcount, leastcount, leasttest = combined[key]
2171 2171
2172 2172 self.stream.writeln('%d (%d tests)\t%s: %s (%s - %d total)'
2173 2173 % (totalcount,
2174 2174 testcount,
2175 2175 frame, exc,
2176 2176 leasttest, leastcount))
2177 2177
2178 2178 self.stream.flush()
2179 2179
2180 2180 return result
2181 2181
2182 2182 def _bisecttests(self, tests):
2183 2183 bisectcmd = ['hg', 'bisect']
2184 2184 bisectrepo = self._runner.options.bisect_repo
2185 2185 if bisectrepo:
2186 2186 bisectcmd.extend(['-R', os.path.abspath(bisectrepo)])
2187 2187 def pread(args):
2188 2188 env = os.environ.copy()
2189 2189 env['HGPLAIN'] = '1'
2190 2190 p = subprocess.Popen(args, stderr=subprocess.STDOUT,
2191 2191 stdout=subprocess.PIPE, env=env)
2192 2192 data = p.stdout.read()
2193 2193 p.wait()
2194 2194 return data
2195 2195 for test in tests:
2196 2196 pread(bisectcmd + ['--reset']),
2197 2197 pread(bisectcmd + ['--bad', '.'])
2198 2198 pread(bisectcmd + ['--good', self._runner.options.known_good_rev])
2199 2199 # TODO: we probably need to forward more options
2200 2200 # that alter hg's behavior inside the tests.
2201 2201 opts = ''
2202 2202 withhg = self._runner.options.with_hg
2203 2203 if withhg:
2204 2204 opts += ' --with-hg=%s ' % shellquote(_strpath(withhg))
2205 2205 rtc = '%s %s %s %s' % (sys.executable, sys.argv[0], opts,
2206 2206 test)
2207 2207 data = pread(bisectcmd + ['--command', rtc])
2208 2208 m = re.search(
2209 2209 (br'\nThe first (?P<goodbad>bad|good) revision '
2210 2210 br'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
2211 2211 br'summary: +(?P<summary>[^\n]+)\n'),
2212 2212 data, (re.MULTILINE | re.DOTALL))
2213 2213 if m is None:
2214 2214 self.stream.writeln(
2215 2215 'Failed to identify failure point for %s' % test)
2216 2216 continue
2217 2217 dat = m.groupdict()
2218 2218 verb = 'broken' if dat['goodbad'] == b'bad' else 'fixed'
2219 2219 self.stream.writeln(
2220 2220 '%s %s by %s (%s)' % (
2221 2221 test, verb, dat['node'].decode('ascii'),
2222 2222 dat['summary'].decode('utf8', 'ignore')))
2223 2223
2224 2224 def printtimes(self, times):
2225 2225 # iolock held by run
2226 2226 self.stream.writeln('# Producing time report')
2227 2227 times.sort(key=lambda t: (t[3]))
2228 2228 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
2229 2229 self.stream.writeln('%-7s %-7s %-7s %-7s %-7s %s' %
2230 2230 ('start', 'end', 'cuser', 'csys', 'real', 'Test'))
2231 2231 for tdata in times:
2232 2232 test = tdata[0]
2233 2233 cuser, csys, real, start, end = tdata[1:6]
2234 2234 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
2235 2235
2236 2236 @staticmethod
2237 2237 def _writexunit(result, outf):
2238 2238 # See http://llg.cubic.org/docs/junit/ for a reference.
2239 2239 timesd = dict((t[0], t[3]) for t in result.times)
2240 2240 doc = minidom.Document()
2241 2241 s = doc.createElement('testsuite')
2242 2242 s.setAttribute('name', 'run-tests')
2243 2243 s.setAttribute('tests', str(result.testsRun))
2244 2244 s.setAttribute('errors', "0") # TODO
2245 2245 s.setAttribute('failures', str(len(result.failures)))
2246 2246 s.setAttribute('skipped', str(len(result.skipped) +
2247 2247 len(result.ignored)))
2248 2248 doc.appendChild(s)
2249 2249 for tc in result.successes:
2250 2250 t = doc.createElement('testcase')
2251 2251 t.setAttribute('name', tc.name)
2252 2252 tctime = timesd.get(tc.name)
2253 2253 if tctime is not None:
2254 2254 t.setAttribute('time', '%.3f' % tctime)
2255 2255 s.appendChild(t)
2256 2256 for tc, err in sorted(result.faildata.items()):
2257 2257 t = doc.createElement('testcase')
2258 2258 t.setAttribute('name', tc)
2259 2259 tctime = timesd.get(tc)
2260 2260 if tctime is not None:
2261 2261 t.setAttribute('time', '%.3f' % tctime)
2262 2262 # createCDATASection expects a unicode or it will
2263 2263 # convert using default conversion rules, which will
2264 2264 # fail if string isn't ASCII.
2265 2265 err = cdatasafe(err).decode('utf-8', 'replace')
2266 2266 cd = doc.createCDATASection(err)
2267 2267 # Use 'failure' here instead of 'error' to match errors = 0,
2268 2268 # failures = len(result.failures) in the testsuite element.
2269 2269 failelem = doc.createElement('failure')
2270 2270 failelem.setAttribute('message', 'output changed')
2271 2271 failelem.setAttribute('type', 'output-mismatch')
2272 2272 failelem.appendChild(cd)
2273 2273 t.appendChild(failelem)
2274 2274 s.appendChild(t)
2275 2275 for tc, message in result.skipped:
2276 2276 # According to the schema, 'skipped' has no attributes. So store
2277 2277 # the skip message as a text node instead.
2278 2278 t = doc.createElement('testcase')
2279 2279 t.setAttribute('name', tc.name)
2280 2280 binmessage = message.encode('utf-8')
2281 2281 message = cdatasafe(binmessage).decode('utf-8', 'replace')
2282 2282 cd = doc.createCDATASection(message)
2283 2283 skipelem = doc.createElement('skipped')
2284 2284 skipelem.appendChild(cd)
2285 2285 t.appendChild(skipelem)
2286 2286 s.appendChild(t)
2287 2287 outf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
2288 2288
2289 2289 @staticmethod
2290 2290 def _writejson(result, outf):
2291 2291 timesd = {}
2292 2292 for tdata in result.times:
2293 2293 test = tdata[0]
2294 2294 timesd[test] = tdata[1:]
2295 2295
2296 2296 outcome = {}
2297 2297 groups = [('success', ((tc, None)
2298 2298 for tc in result.successes)),
2299 2299 ('failure', result.failures),
2300 2300 ('skip', result.skipped)]
2301 2301 for res, testcases in groups:
2302 2302 for tc, __ in testcases:
2303 2303 if tc.name in timesd:
2304 2304 diff = result.faildata.get(tc.name, b'')
2305 2305 try:
2306 2306 diff = diff.decode('unicode_escape')
2307 2307 except UnicodeDecodeError as e:
2308 2308 diff = '%r decoding diff, sorry' % e
2309 2309 tres = {'result': res,
2310 2310 'time': ('%0.3f' % timesd[tc.name][2]),
2311 2311 'cuser': ('%0.3f' % timesd[tc.name][0]),
2312 2312 'csys': ('%0.3f' % timesd[tc.name][1]),
2313 2313 'start': ('%0.3f' % timesd[tc.name][3]),
2314 2314 'end': ('%0.3f' % timesd[tc.name][4]),
2315 2315 'diff': diff,
2316 2316 }
2317 2317 else:
2318 2318 # blacklisted test
2319 2319 tres = {'result': res}
2320 2320
2321 2321 outcome[tc.name] = tres
2322 2322 jsonout = json.dumps(outcome, sort_keys=True, indent=4,
2323 2323 separators=(',', ': '))
2324 2324 outf.writelines(("testreport =", jsonout))
2325 2325
2326 2326 def sorttests(testdescs, previoustimes, shuffle=False):
2327 2327 """Do an in-place sort of tests."""
2328 2328 if shuffle:
2329 2329 random.shuffle(testdescs)
2330 2330 return
2331 2331
2332 2332 if previoustimes:
2333 2333 def sortkey(f):
2334 2334 f = f['path']
2335 2335 if f in previoustimes:
2336 2336 # Use most recent time as estimate
2337 2337 return -previoustimes[f][-1]
2338 2338 else:
2339 2339 # Default to a rather arbitrary value of 1 second for new tests
2340 2340 return -1.0
2341 2341 else:
2342 2342 # keywords for slow tests
2343 2343 slow = {b'svn': 10,
2344 2344 b'cvs': 10,
2345 2345 b'hghave': 10,
2346 2346 b'largefiles-update': 10,
2347 2347 b'run-tests': 10,
2348 2348 b'corruption': 10,
2349 2349 b'race': 10,
2350 2350 b'i18n': 10,
2351 2351 b'check': 100,
2352 2352 b'gendoc': 100,
2353 2353 b'contrib-perf': 200,
2354 2354 }
2355 2355 perf = {}
2356 2356
2357 2357 def sortkey(f):
2358 2358 # run largest tests first, as they tend to take the longest
2359 2359 f = f['path']
2360 2360 try:
2361 2361 return perf[f]
2362 2362 except KeyError:
2363 2363 try:
2364 2364 val = -os.stat(f).st_size
2365 2365 except OSError as e:
2366 2366 if e.errno != errno.ENOENT:
2367 2367 raise
2368 2368 perf[f] = -1e9 # file does not exist, tell early
2369 2369 return -1e9
2370 2370 for kw, mul in slow.items():
2371 2371 if kw in f:
2372 2372 val *= mul
2373 2373 if f.endswith(b'.py'):
2374 2374 val /= 10.0
2375 2375 perf[f] = val / 1000.0
2376 2376 return perf[f]
2377 2377
2378 2378 testdescs.sort(key=sortkey)
2379 2379
2380 2380 class TestRunner(object):
2381 2381 """Holds context for executing tests.
2382 2382
2383 2383 Tests rely on a lot of state. This object holds it for them.
2384 2384 """
2385 2385
2386 2386 # Programs required to run tests.
2387 2387 REQUIREDTOOLS = [
2388 2388 b'diff',
2389 2389 b'grep',
2390 2390 b'unzip',
2391 2391 b'gunzip',
2392 2392 b'bunzip2',
2393 2393 b'sed',
2394 2394 ]
2395 2395
2396 2396 # Maps file extensions to test class.
2397 2397 TESTTYPES = [
2398 2398 (b'.py', PythonTest),
2399 2399 (b'.t', TTest),
2400 2400 ]
2401 2401
2402 2402 def __init__(self):
2403 2403 self.options = None
2404 2404 self._hgroot = None
2405 2405 self._testdir = None
2406 2406 self._outputdir = None
2407 2407 self._hgtmp = None
2408 2408 self._installdir = None
2409 2409 self._bindir = None
2410 2410 self._tmpbinddir = None
2411 2411 self._pythondir = None
2412 2412 self._coveragefile = None
2413 2413 self._createdfiles = []
2414 2414 self._hgcommand = None
2415 2415 self._hgpath = None
2416 2416 self._portoffset = 0
2417 2417 self._ports = {}
2418 2418
2419 2419 def run(self, args, parser=None):
2420 2420 """Run the test suite."""
2421 2421 oldmask = os.umask(0o22)
2422 2422 try:
2423 2423 parser = parser or getparser()
2424 2424 options = parseargs(args, parser)
2425 2425 tests = [_bytespath(a) for a in options.tests]
2426 2426 if options.test_list is not None:
2427 2427 for listfile in options.test_list:
2428 2428 with open(listfile, 'rb') as f:
2429 2429 tests.extend(t for t in f.read().splitlines() if t)
2430 2430 self.options = options
2431 2431
2432 2432 self._checktools()
2433 2433 testdescs = self.findtests(tests)
2434 2434 if options.profile_runner:
2435 2435 import statprof
2436 2436 statprof.start()
2437 2437 result = self._run(testdescs)
2438 2438 if options.profile_runner:
2439 2439 statprof.stop()
2440 2440 statprof.display()
2441 2441 return result
2442 2442
2443 2443 finally:
2444 2444 os.umask(oldmask)
2445 2445
2446 2446 def _run(self, testdescs):
2447 2447 self._testdir = osenvironb[b'TESTDIR'] = getattr(
2448 2448 os, 'getcwdb', os.getcwd)()
2449 2449 # assume all tests in same folder for now
2450 2450 if testdescs:
2451 2451 pathname = os.path.dirname(testdescs[0]['path'])
2452 2452 if pathname:
2453 2453 osenvironb[b'TESTDIR'] = os.path.join(osenvironb[b'TESTDIR'],
2454 2454 pathname)
2455 2455 if self.options.outputdir:
2456 2456 self._outputdir = canonpath(_bytespath(self.options.outputdir))
2457 2457 else:
2458 2458 self._outputdir = self._testdir
2459 2459 if testdescs and pathname:
2460 2460 self._outputdir = os.path.join(self._outputdir, pathname)
2461 2461 previoustimes = {}
2462 2462 if self.options.order_by_runtime:
2463 2463 previoustimes = dict(loadtimes(self._outputdir))
2464 2464 sorttests(testdescs, previoustimes, shuffle=self.options.random)
2465 2465
2466 2466 if 'PYTHONHASHSEED' not in os.environ:
2467 2467 # use a random python hash seed all the time
2468 2468 # we do the randomness ourself to know what seed is used
2469 2469 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
2470 2470
2471 2471 if self.options.tmpdir:
2472 2472 self.options.keep_tmpdir = True
2473 2473 tmpdir = _bytespath(self.options.tmpdir)
2474 2474 if os.path.exists(tmpdir):
2475 2475 # Meaning of tmpdir has changed since 1.3: we used to create
2476 2476 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
2477 2477 # tmpdir already exists.
2478 2478 print("error: temp dir %r already exists" % tmpdir)
2479 2479 return 1
2480 2480
2481 2481 os.makedirs(tmpdir)
2482 2482 else:
2483 2483 d = None
2484 2484 if os.name == 'nt':
2485 2485 # without this, we get the default temp dir location, but
2486 2486 # in all lowercase, which causes troubles with paths (issue3490)
2487 2487 d = osenvironb.get(b'TMP', None)
2488 2488 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
2489 2489
2490 2490 self._hgtmp = osenvironb[b'HGTMP'] = (
2491 2491 os.path.realpath(tmpdir))
2492 2492
2493 2493 if self.options.with_hg:
2494 2494 self._installdir = None
2495 2495 whg = self.options.with_hg
2496 2496 self._bindir = os.path.dirname(os.path.realpath(whg))
2497 2497 assert isinstance(self._bindir, bytes)
2498 2498 self._hgcommand = os.path.basename(whg)
2499 2499 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
2500 2500 os.makedirs(self._tmpbindir)
2501 2501
2502 2502 normbin = os.path.normpath(os.path.abspath(whg))
2503 2503 normbin = normbin.replace(os.sep.encode('ascii'), b'/')
2504 2504
2505 2505 # Other Python scripts in the test harness need to
2506 2506 # `import mercurial`. If `hg` is a Python script, we assume
2507 2507 # the Mercurial modules are relative to its path and tell the tests
2508 2508 # to load Python modules from its directory.
2509 2509 with open(whg, 'rb') as fh:
2510 2510 initial = fh.read(1024)
2511 2511
2512 2512 if re.match(b'#!.*python', initial):
2513 2513 self._pythondir = self._bindir
2514 2514 # If it looks like our in-repo Rust binary, use the source root.
2515 2515 # This is a bit hacky. But rhg is still not supported outside the
2516 2516 # source directory. So until it is, do the simple thing.
2517 2517 elif re.search(b'/rust/target/[^/]+/hg', normbin):
2518 2518 self._pythondir = os.path.dirname(self._testdir)
2519 2519 # Fall back to the legacy behavior.
2520 2520 else:
2521 2521 self._pythondir = self._bindir
2522 2522
2523 2523 else:
2524 2524 self._installdir = os.path.join(self._hgtmp, b"install")
2525 2525 self._bindir = os.path.join(self._installdir, b"bin")
2526 2526 self._hgcommand = b'hg'
2527 2527 self._tmpbindir = self._bindir
2528 2528 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
2529 2529
2530 2530 # set CHGHG, then replace "hg" command by "chg"
2531 2531 chgbindir = self._bindir
2532 2532 if self.options.chg or self.options.with_chg:
2533 2533 osenvironb[b'CHGHG'] = os.path.join(self._bindir, self._hgcommand)
2534 2534 else:
2535 2535 osenvironb.pop(b'CHGHG', None) # drop flag for hghave
2536 2536 if self.options.chg:
2537 2537 self._hgcommand = b'chg'
2538 2538 elif self.options.with_chg:
2539 2539 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
2540 2540 self._hgcommand = os.path.basename(self.options.with_chg)
2541 2541
2542 2542 osenvironb[b"BINDIR"] = self._bindir
2543 2543 osenvironb[b"PYTHON"] = PYTHON
2544 2544
2545 2545 if self.options.with_python3:
2546 2546 osenvironb[b'PYTHON3'] = self.options.with_python3
2547 2547
2548 2548 fileb = _bytespath(__file__)
2549 2549 runtestdir = os.path.abspath(os.path.dirname(fileb))
2550 2550 osenvironb[b'RUNTESTDIR'] = runtestdir
2551 2551 if PYTHON3:
2552 2552 sepb = _bytespath(os.pathsep)
2553 2553 else:
2554 2554 sepb = os.pathsep
2555 2555 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
2556 2556 if os.path.islink(__file__):
2557 2557 # test helper will likely be at the end of the symlink
2558 2558 realfile = os.path.realpath(fileb)
2559 2559 realdir = os.path.abspath(os.path.dirname(realfile))
2560 2560 path.insert(2, realdir)
2561 2561 if chgbindir != self._bindir:
2562 2562 path.insert(1, chgbindir)
2563 2563 if self._testdir != runtestdir:
2564 2564 path = [self._testdir] + path
2565 2565 if self._tmpbindir != self._bindir:
2566 2566 path = [self._tmpbindir] + path
2567 2567 osenvironb[b"PATH"] = sepb.join(path)
2568 2568
2569 2569 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
2570 2570 # can run .../tests/run-tests.py test-foo where test-foo
2571 2571 # adds an extension to HGRC. Also include run-test.py directory to
2572 2572 # import modules like heredoctest.
2573 2573 pypath = [self._pythondir, self._testdir, runtestdir]
2574 2574 # We have to augment PYTHONPATH, rather than simply replacing
2575 2575 # it, in case external libraries are only available via current
2576 2576 # PYTHONPATH. (In particular, the Subversion bindings on OS X
2577 2577 # are in /opt/subversion.)
2578 2578 oldpypath = osenvironb.get(IMPL_PATH)
2579 2579 if oldpypath:
2580 2580 pypath.append(oldpypath)
2581 2581 osenvironb[IMPL_PATH] = sepb.join(pypath)
2582 2582
2583 2583 if self.options.pure:
2584 2584 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
2585 2585 os.environ["HGMODULEPOLICY"] = "py"
2586 2586
2587 2587 if self.options.allow_slow_tests:
2588 2588 os.environ["HGTEST_SLOW"] = "slow"
2589 2589 elif 'HGTEST_SLOW' in os.environ:
2590 2590 del os.environ['HGTEST_SLOW']
2591 2591
2592 2592 self._coveragefile = os.path.join(self._testdir, b'.coverage')
2593 2593
2594 2594 if self.options.exceptions:
2595 2595 exceptionsdir = os.path.join(self._outputdir, b'exceptions')
2596 2596 try:
2597 2597 os.makedirs(exceptionsdir)
2598 2598 except OSError as e:
2599 2599 if e.errno != errno.EEXIST:
2600 2600 raise
2601 2601
2602 2602 # Remove all existing exception reports.
2603 2603 for f in os.listdir(exceptionsdir):
2604 2604 os.unlink(os.path.join(exceptionsdir, f))
2605 2605
2606 2606 osenvironb[b'HGEXCEPTIONSDIR'] = exceptionsdir
2607 2607 logexceptions = os.path.join(self._testdir, b'logexceptions.py')
2608 2608 self.options.extra_config_opt.append(
2609 2609 'extensions.logexceptions=%s' % logexceptions.decode('utf-8'))
2610 2610
2611 2611 vlog("# Using TESTDIR", self._testdir)
2612 2612 vlog("# Using RUNTESTDIR", osenvironb[b'RUNTESTDIR'])
2613 2613 vlog("# Using HGTMP", self._hgtmp)
2614 2614 vlog("# Using PATH", os.environ["PATH"])
2615 2615 vlog("# Using", IMPL_PATH, osenvironb[IMPL_PATH])
2616 2616 vlog("# Writing to directory", self._outputdir)
2617 2617
2618 2618 try:
2619 2619 return self._runtests(testdescs) or 0
2620 2620 finally:
2621 2621 time.sleep(.1)
2622 2622 self._cleanup()
2623 2623
2624 2624 def findtests(self, args):
2625 2625 """Finds possible test files from arguments.
2626 2626
2627 2627 If you wish to inject custom tests into the test harness, this would
2628 2628 be a good function to monkeypatch or override in a derived class.
2629 2629 """
2630 2630 if not args:
2631 2631 if self.options.changed:
2632 2632 proc = Popen4('hg st --rev "%s" -man0 .' %
2633 2633 self.options.changed, None, 0)
2634 2634 stdout, stderr = proc.communicate()
2635 2635 args = stdout.strip(b'\0').split(b'\0')
2636 2636 else:
2637 2637 args = os.listdir(b'.')
2638 2638
2639 2639 expanded_args = []
2640 2640 for arg in args:
2641 2641 if os.path.isdir(arg):
2642 2642 if not arg.endswith(b'/'):
2643 2643 arg += b'/'
2644 2644 expanded_args.extend([arg + a for a in os.listdir(arg)])
2645 2645 else:
2646 2646 expanded_args.append(arg)
2647 2647 args = expanded_args
2648 2648
2649 testcasepattern = re.compile(r'([\w-]+\.t|py)( \(case ([\w-])+\))')
2649 testcasepattern = re.compile(r'([\w-]+\.t|py)(#([\w-])+)')
2650 2650 tests = []
2651 2651 for t in args:
2652 2652 case = None
2653 2653
2654 2654 if not (os.path.basename(t).startswith(b'test-')
2655 2655 and (t.endswith(b'.py') or t.endswith(b'.t'))):
2656 2656
2657 2657 m = testcasepattern.match(t)
2658 2658 if m is not None:
2659 2659 t, _, case = m.groups()
2660 2660 else:
2661 2661 continue
2662 2662
2663 2663 if t.endswith(b'.t'):
2664 2664 # .t file may contain multiple test cases
2665 2665 cases = sorted(parsettestcases(t))
2666 2666 if cases:
2667 2667 if case is not None and case in cases:
2668 2668 tests += [{'path': t, 'case': case}]
2669 2669 elif case is not None and case not in cases:
2670 2670 # Ignore invalid cases
2671 2671 pass
2672 2672 else:
2673 2673 tests += [{'path': t, 'case': c} for c in sorted(cases)]
2674 2674 else:
2675 2675 tests.append({'path': t})
2676 2676 else:
2677 2677 tests.append({'path': t})
2678 2678 return tests
2679 2679
2680 2680 def _runtests(self, testdescs):
2681 2681 def _reloadtest(test, i):
2682 2682 # convert a test back to its description dict
2683 2683 desc = {'path': test.path}
2684 2684 case = getattr(test, '_case', None)
2685 2685 if case:
2686 2686 desc['case'] = case
2687 2687 return self._gettest(desc, i)
2688 2688
2689 2689 try:
2690 2690 if self.options.restart:
2691 2691 orig = list(testdescs)
2692 2692 while testdescs:
2693 2693 desc = testdescs[0]
2694 2694 # desc['path'] is a relative path
2695 2695 if 'case' in desc:
2696 2696 errpath = b'%s.%s.err' % (desc['path'], desc['case'])
2697 2697 else:
2698 2698 errpath = b'%s.err' % desc['path']
2699 2699 errpath = os.path.join(self._outputdir, errpath)
2700 2700 if os.path.exists(errpath):
2701 2701 break
2702 2702 testdescs.pop(0)
2703 2703 if not testdescs:
2704 2704 print("running all tests")
2705 2705 testdescs = orig
2706 2706
2707 2707 tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
2708 2708
2709 2709 failed = False
2710 2710 kws = self.options.keywords
2711 2711 if kws is not None and PYTHON3:
2712 2712 kws = kws.encode('utf-8')
2713 2713
2714 2714 suite = TestSuite(self._testdir,
2715 2715 jobs=self.options.jobs,
2716 2716 whitelist=self.options.whitelisted,
2717 2717 blacklist=self.options.blacklist,
2718 2718 retest=self.options.retest,
2719 2719 keywords=kws,
2720 2720 loop=self.options.loop,
2721 2721 runs_per_test=self.options.runs_per_test,
2722 2722 showchannels=self.options.showchannels,
2723 2723 tests=tests, loadtest=_reloadtest)
2724 2724 verbosity = 1
2725 2725 if self.options.verbose:
2726 2726 verbosity = 2
2727 2727 runner = TextTestRunner(self, verbosity=verbosity)
2728 2728
2729 2729 if self.options.list_tests:
2730 2730 result = runner.listtests(suite)
2731 2731 else:
2732 2732 if self._installdir:
2733 2733 self._installhg()
2734 2734 self._checkhglib("Testing")
2735 2735 else:
2736 2736 self._usecorrectpython()
2737 2737 if self.options.chg:
2738 2738 assert self._installdir
2739 2739 self._installchg()
2740 2740
2741 2741 result = runner.run(suite)
2742 2742
2743 2743 if result.failures:
2744 2744 failed = True
2745 2745
2746 2746 if self.options.anycoverage:
2747 2747 self._outputcoverage()
2748 2748 except KeyboardInterrupt:
2749 2749 failed = True
2750 2750 print("\ninterrupted!")
2751 2751
2752 2752 if failed:
2753 2753 return 1
2754 2754
2755 2755 def _getport(self, count):
2756 2756 port = self._ports.get(count) # do we have a cached entry?
2757 2757 if port is None:
2758 2758 portneeded = 3
2759 2759 # above 100 tries we just give up and let test reports failure
2760 2760 for tries in xrange(100):
2761 2761 allfree = True
2762 2762 port = self.options.port + self._portoffset
2763 2763 for idx in xrange(portneeded):
2764 2764 if not checkportisavailable(port + idx):
2765 2765 allfree = False
2766 2766 break
2767 2767 self._portoffset += portneeded
2768 2768 if allfree:
2769 2769 break
2770 2770 self._ports[count] = port
2771 2771 return port
2772 2772
2773 2773 def _gettest(self, testdesc, count):
2774 2774 """Obtain a Test by looking at its filename.
2775 2775
2776 2776 Returns a Test instance. The Test may not be runnable if it doesn't
2777 2777 map to a known type.
2778 2778 """
2779 2779 path = testdesc['path']
2780 2780 lctest = path.lower()
2781 2781 testcls = Test
2782 2782
2783 2783 for ext, cls in self.TESTTYPES:
2784 2784 if lctest.endswith(ext):
2785 2785 testcls = cls
2786 2786 break
2787 2787
2788 2788 refpath = os.path.join(self._testdir, path)
2789 2789 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
2790 2790
2791 2791 # extra keyword parameters. 'case' is used by .t tests
2792 2792 kwds = dict((k, testdesc[k]) for k in ['case'] if k in testdesc)
2793 2793
2794 2794 t = testcls(refpath, self._outputdir, tmpdir,
2795 2795 keeptmpdir=self.options.keep_tmpdir,
2796 2796 debug=self.options.debug,
2797 2797 first=self.options.first,
2798 2798 timeout=self.options.timeout,
2799 2799 startport=self._getport(count),
2800 2800 extraconfigopts=self.options.extra_config_opt,
2801 2801 py3kwarnings=self.options.py3k_warnings,
2802 2802 shell=self.options.shell,
2803 2803 hgcommand=self._hgcommand,
2804 2804 usechg=bool(self.options.with_chg or self.options.chg),
2805 2805 useipv6=useipv6, **kwds)
2806 2806 t.should_reload = True
2807 2807 return t
2808 2808
2809 2809 def _cleanup(self):
2810 2810 """Clean up state from this test invocation."""
2811 2811 if self.options.keep_tmpdir:
2812 2812 return
2813 2813
2814 2814 vlog("# Cleaning up HGTMP", self._hgtmp)
2815 2815 shutil.rmtree(self._hgtmp, True)
2816 2816 for f in self._createdfiles:
2817 2817 try:
2818 2818 os.remove(f)
2819 2819 except OSError:
2820 2820 pass
2821 2821
2822 2822 def _usecorrectpython(self):
2823 2823 """Configure the environment to use the appropriate Python in tests."""
2824 2824 # Tests must use the same interpreter as us or bad things will happen.
2825 2825 pyexename = sys.platform == 'win32' and b'python.exe' or b'python'
2826 2826 if getattr(os, 'symlink', None):
2827 2827 vlog("# Making python executable in test path a symlink to '%s'" %
2828 2828 sys.executable)
2829 2829 mypython = os.path.join(self._tmpbindir, pyexename)
2830 2830 try:
2831 2831 if os.readlink(mypython) == sys.executable:
2832 2832 return
2833 2833 os.unlink(mypython)
2834 2834 except OSError as err:
2835 2835 if err.errno != errno.ENOENT:
2836 2836 raise
2837 2837 if self._findprogram(pyexename) != sys.executable:
2838 2838 try:
2839 2839 os.symlink(sys.executable, mypython)
2840 2840 self._createdfiles.append(mypython)
2841 2841 except OSError as err:
2842 2842 # child processes may race, which is harmless
2843 2843 if err.errno != errno.EEXIST:
2844 2844 raise
2845 2845 else:
2846 2846 exedir, exename = os.path.split(sys.executable)
2847 2847 vlog("# Modifying search path to find %s as %s in '%s'" %
2848 2848 (exename, pyexename, exedir))
2849 2849 path = os.environ['PATH'].split(os.pathsep)
2850 2850 while exedir in path:
2851 2851 path.remove(exedir)
2852 2852 os.environ['PATH'] = os.pathsep.join([exedir] + path)
2853 2853 if not self._findprogram(pyexename):
2854 2854 print("WARNING: Cannot find %s in search path" % pyexename)
2855 2855
2856 2856 def _installhg(self):
2857 2857 """Install hg into the test environment.
2858 2858
2859 2859 This will also configure hg with the appropriate testing settings.
2860 2860 """
2861 2861 vlog("# Performing temporary installation of HG")
2862 2862 installerrs = os.path.join(self._hgtmp, b"install.err")
2863 2863 compiler = ''
2864 2864 if self.options.compiler:
2865 2865 compiler = '--compiler ' + self.options.compiler
2866 2866 if self.options.pure:
2867 2867 pure = b"--pure"
2868 2868 else:
2869 2869 pure = b""
2870 2870
2871 2871 # Run installer in hg root
2872 2872 script = os.path.realpath(sys.argv[0])
2873 2873 exe = sys.executable
2874 2874 if PYTHON3:
2875 2875 compiler = _bytespath(compiler)
2876 2876 script = _bytespath(script)
2877 2877 exe = _bytespath(exe)
2878 2878 hgroot = os.path.dirname(os.path.dirname(script))
2879 2879 self._hgroot = hgroot
2880 2880 os.chdir(hgroot)
2881 2881 nohome = b'--home=""'
2882 2882 if os.name == 'nt':
2883 2883 # The --home="" trick works only on OS where os.sep == '/'
2884 2884 # because of a distutils convert_path() fast-path. Avoid it at
2885 2885 # least on Windows for now, deal with .pydistutils.cfg bugs
2886 2886 # when they happen.
2887 2887 nohome = b''
2888 2888 cmd = (b'%(exe)s setup.py %(pure)s clean --all'
2889 2889 b' build %(compiler)s --build-base="%(base)s"'
2890 2890 b' install --force --prefix="%(prefix)s"'
2891 2891 b' --install-lib="%(libdir)s"'
2892 2892 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
2893 2893 % {b'exe': exe, b'pure': pure,
2894 2894 b'compiler': compiler,
2895 2895 b'base': os.path.join(self._hgtmp, b"build"),
2896 2896 b'prefix': self._installdir, b'libdir': self._pythondir,
2897 2897 b'bindir': self._bindir,
2898 2898 b'nohome': nohome, b'logfile': installerrs})
2899 2899
2900 2900 # setuptools requires install directories to exist.
2901 2901 def makedirs(p):
2902 2902 try:
2903 2903 os.makedirs(p)
2904 2904 except OSError as e:
2905 2905 if e.errno != errno.EEXIST:
2906 2906 raise
2907 2907 makedirs(self._pythondir)
2908 2908 makedirs(self._bindir)
2909 2909
2910 2910 vlog("# Running", cmd)
2911 2911 if os.system(cmd) == 0:
2912 2912 if not self.options.verbose:
2913 2913 try:
2914 2914 os.remove(installerrs)
2915 2915 except OSError as e:
2916 2916 if e.errno != errno.ENOENT:
2917 2917 raise
2918 2918 else:
2919 2919 with open(installerrs, 'rb') as f:
2920 2920 for line in f:
2921 2921 if PYTHON3:
2922 2922 sys.stdout.buffer.write(line)
2923 2923 else:
2924 2924 sys.stdout.write(line)
2925 2925 sys.exit(1)
2926 2926 os.chdir(self._testdir)
2927 2927
2928 2928 self._usecorrectpython()
2929 2929
2930 2930 if self.options.py3k_warnings and not self.options.anycoverage:
2931 2931 vlog("# Updating hg command to enable Py3k Warnings switch")
2932 2932 with open(os.path.join(self._bindir, 'hg'), 'rb') as f:
2933 2933 lines = [line.rstrip() for line in f]
2934 2934 lines[0] += ' -3'
2935 2935 with open(os.path.join(self._bindir, 'hg'), 'wb') as f:
2936 2936 for line in lines:
2937 2937 f.write(line + '\n')
2938 2938
2939 2939 hgbat = os.path.join(self._bindir, b'hg.bat')
2940 2940 if os.path.isfile(hgbat):
2941 2941 # hg.bat expects to be put in bin/scripts while run-tests.py
2942 2942 # installation layout put it in bin/ directly. Fix it
2943 2943 with open(hgbat, 'rb') as f:
2944 2944 data = f.read()
2945 2945 if b'"%~dp0..\python" "%~dp0hg" %*' in data:
2946 2946 data = data.replace(b'"%~dp0..\python" "%~dp0hg" %*',
2947 2947 b'"%~dp0python" "%~dp0hg" %*')
2948 2948 with open(hgbat, 'wb') as f:
2949 2949 f.write(data)
2950 2950 else:
2951 2951 print('WARNING: cannot fix hg.bat reference to python.exe')
2952 2952
2953 2953 if self.options.anycoverage:
2954 2954 custom = os.path.join(self._testdir, 'sitecustomize.py')
2955 2955 target = os.path.join(self._pythondir, 'sitecustomize.py')
2956 2956 vlog('# Installing coverage trigger to %s' % target)
2957 2957 shutil.copyfile(custom, target)
2958 2958 rc = os.path.join(self._testdir, '.coveragerc')
2959 2959 vlog('# Installing coverage rc to %s' % rc)
2960 2960 os.environ['COVERAGE_PROCESS_START'] = rc
2961 2961 covdir = os.path.join(self._installdir, '..', 'coverage')
2962 2962 try:
2963 2963 os.mkdir(covdir)
2964 2964 except OSError as e:
2965 2965 if e.errno != errno.EEXIST:
2966 2966 raise
2967 2967
2968 2968 os.environ['COVERAGE_DIR'] = covdir
2969 2969
2970 2970 def _checkhglib(self, verb):
2971 2971 """Ensure that the 'mercurial' package imported by python is
2972 2972 the one we expect it to be. If not, print a warning to stderr."""
2973 2973 if ((self._bindir == self._pythondir) and
2974 2974 (self._bindir != self._tmpbindir)):
2975 2975 # The pythondir has been inferred from --with-hg flag.
2976 2976 # We cannot expect anything sensible here.
2977 2977 return
2978 2978 expecthg = os.path.join(self._pythondir, b'mercurial')
2979 2979 actualhg = self._gethgpath()
2980 2980 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
2981 2981 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
2982 2982 ' (expected %s)\n'
2983 2983 % (verb, actualhg, expecthg))
2984 2984 def _gethgpath(self):
2985 2985 """Return the path to the mercurial package that is actually found by
2986 2986 the current Python interpreter."""
2987 2987 if self._hgpath is not None:
2988 2988 return self._hgpath
2989 2989
2990 2990 cmd = b'%s -c "import mercurial; print (mercurial.__path__[0])"'
2991 2991 cmd = cmd % PYTHON
2992 2992 if PYTHON3:
2993 2993 cmd = _strpath(cmd)
2994 2994 pipe = os.popen(cmd)
2995 2995 try:
2996 2996 self._hgpath = _bytespath(pipe.read().strip())
2997 2997 finally:
2998 2998 pipe.close()
2999 2999
3000 3000 return self._hgpath
3001 3001
3002 3002 def _installchg(self):
3003 3003 """Install chg into the test environment"""
3004 3004 vlog('# Performing temporary installation of CHG')
3005 3005 assert os.path.dirname(self._bindir) == self._installdir
3006 3006 assert self._hgroot, 'must be called after _installhg()'
3007 3007 cmd = (b'"%(make)s" clean install PREFIX="%(prefix)s"'
3008 3008 % {b'make': 'make', # TODO: switch by option or environment?
3009 3009 b'prefix': self._installdir})
3010 3010 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
3011 3011 vlog("# Running", cmd)
3012 3012 proc = subprocess.Popen(cmd, shell=True, cwd=cwd,
3013 3013 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
3014 3014 stderr=subprocess.STDOUT)
3015 3015 out, _err = proc.communicate()
3016 3016 if proc.returncode != 0:
3017 3017 if PYTHON3:
3018 3018 sys.stdout.buffer.write(out)
3019 3019 else:
3020 3020 sys.stdout.write(out)
3021 3021 sys.exit(1)
3022 3022
3023 3023 def _outputcoverage(self):
3024 3024 """Produce code coverage output."""
3025 3025 import coverage
3026 3026 coverage = coverage.coverage
3027 3027
3028 3028 vlog('# Producing coverage report')
3029 3029 # chdir is the easiest way to get short, relative paths in the
3030 3030 # output.
3031 3031 os.chdir(self._hgroot)
3032 3032 covdir = os.path.join(self._installdir, '..', 'coverage')
3033 3033 cov = coverage(data_file=os.path.join(covdir, 'cov'))
3034 3034
3035 3035 # Map install directory paths back to source directory.
3036 3036 cov.config.paths['srcdir'] = ['.', self._pythondir]
3037 3037
3038 3038 cov.combine()
3039 3039
3040 3040 omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]]
3041 3041 cov.report(ignore_errors=True, omit=omit)
3042 3042
3043 3043 if self.options.htmlcov:
3044 3044 htmldir = os.path.join(self._outputdir, 'htmlcov')
3045 3045 cov.html_report(directory=htmldir, omit=omit)
3046 3046 if self.options.annotate:
3047 3047 adir = os.path.join(self._outputdir, 'annotated')
3048 3048 if not os.path.isdir(adir):
3049 3049 os.mkdir(adir)
3050 3050 cov.annotate(directory=adir, omit=omit)
3051 3051
3052 3052 def _findprogram(self, program):
3053 3053 """Search PATH for a executable program"""
3054 3054 dpb = _bytespath(os.defpath)
3055 3055 sepb = _bytespath(os.pathsep)
3056 3056 for p in osenvironb.get(b'PATH', dpb).split(sepb):
3057 3057 name = os.path.join(p, program)
3058 3058 if os.name == 'nt' or os.access(name, os.X_OK):
3059 3059 return name
3060 3060 return None
3061 3061
3062 3062 def _checktools(self):
3063 3063 """Ensure tools required to run tests are present."""
3064 3064 for p in self.REQUIREDTOOLS:
3065 3065 if os.name == 'nt' and not p.endswith('.exe'):
3066 3066 p += '.exe'
3067 3067 found = self._findprogram(p)
3068 3068 if found:
3069 3069 vlog("# Found prerequisite", p, "at", found)
3070 3070 else:
3071 3071 print("WARNING: Did not find prerequisite tool: %s " %
3072 3072 p.decode("utf-8"))
3073 3073
3074 3074 def aggregateexceptions(path):
3075 3075 exceptioncounts = collections.Counter()
3076 3076 testsbyfailure = collections.defaultdict(set)
3077 3077 failuresbytest = collections.defaultdict(set)
3078 3078
3079 3079 for f in os.listdir(path):
3080 3080 with open(os.path.join(path, f), 'rb') as fh:
3081 3081 data = fh.read().split(b'\0')
3082 3082 if len(data) != 5:
3083 3083 continue
3084 3084
3085 3085 exc, mainframe, hgframe, hgline, testname = data
3086 3086 exc = exc.decode('utf-8')
3087 3087 mainframe = mainframe.decode('utf-8')
3088 3088 hgframe = hgframe.decode('utf-8')
3089 3089 hgline = hgline.decode('utf-8')
3090 3090 testname = testname.decode('utf-8')
3091 3091
3092 3092 key = (hgframe, hgline, exc)
3093 3093 exceptioncounts[key] += 1
3094 3094 testsbyfailure[key].add(testname)
3095 3095 failuresbytest[testname].add(key)
3096 3096
3097 3097 # Find test having fewest failures for each failure.
3098 3098 leastfailing = {}
3099 3099 for key, tests in testsbyfailure.items():
3100 3100 fewesttest = None
3101 3101 fewestcount = 99999999
3102 3102 for test in sorted(tests):
3103 3103 if len(failuresbytest[test]) < fewestcount:
3104 3104 fewesttest = test
3105 3105 fewestcount = len(failuresbytest[test])
3106 3106
3107 3107 leastfailing[key] = (fewestcount, fewesttest)
3108 3108
3109 3109 # Create a combined counter so we can sort by total occurrences and
3110 3110 # impacted tests.
3111 3111 combined = {}
3112 3112 for key in exceptioncounts:
3113 3113 combined[key] = (exceptioncounts[key],
3114 3114 len(testsbyfailure[key]),
3115 3115 leastfailing[key][0],
3116 3116 leastfailing[key][1])
3117 3117
3118 3118 return {
3119 3119 'exceptioncounts': exceptioncounts,
3120 3120 'total': sum(exceptioncounts.values()),
3121 3121 'combined': combined,
3122 3122 'leastfailing': leastfailing,
3123 3123 'byfailure': testsbyfailure,
3124 3124 'bytest': failuresbytest,
3125 3125 }
3126 3126
3127 3127 if __name__ == '__main__':
3128 3128 runner = TestRunner()
3129 3129
3130 3130 try:
3131 3131 import msvcrt
3132 3132 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
3133 3133 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
3134 3134 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
3135 3135 except ImportError:
3136 3136 pass
3137 3137
3138 3138 sys.exit(runner.run(sys.argv[1:]))
@@ -1,1693 +1,1693
1 1 This file tests the behavior of run-tests.py itself.
2 2
3 3 Avoid interference from actual test env:
4 4
5 5 $ . "$TESTDIR/helper-runtests.sh"
6 6
7 7 Smoke test with install
8 8 ============
9 9 $ $PYTHON $TESTDIR/run-tests.py $HGTEST_RUN_TESTS_PURE -l
10 10
11 11 # Ran 0 tests, 0 skipped, 0 failed.
12 12
13 13 Define a helper to avoid the install step
14 14 =============
15 15 $ rt()
16 16 > {
17 17 > $PYTHON $TESTDIR/run-tests.py --with-hg=`which hg` "$@"
18 18 > }
19 19
20 20 error paths
21 21
22 22 #if symlink
23 23 $ ln -s `which true` hg
24 24 $ $PYTHON $TESTDIR/run-tests.py --with-hg=./hg
25 25 warning: --with-hg should specify an hg script
26 26
27 27 # Ran 0 tests, 0 skipped, 0 failed.
28 28 $ rm hg
29 29 #endif
30 30
31 31 #if execbit
32 32 $ touch hg
33 33 $ $PYTHON $TESTDIR/run-tests.py --with-hg=./hg
34 34 usage: run-tests.py [options] [tests]
35 35 run-tests.py: error: --with-hg must specify an executable hg script
36 36 [2]
37 37 $ rm hg
38 38 #endif
39 39
40 40 Features for testing optional lines
41 41 ===================================
42 42
43 43 $ cat > hghaveaddon.py <<EOF
44 44 > import hghave
45 45 > @hghave.check("custom", "custom hghave feature")
46 46 > def has_custom():
47 47 > return True
48 48 > @hghave.check("missing", "missing hghave feature")
49 49 > def has_missing():
50 50 > return False
51 51 > EOF
52 52
53 53 an empty test
54 54 =======================
55 55
56 56 $ touch test-empty.t
57 57 $ rt
58 58 .
59 59 # Ran 1 tests, 0 skipped, 0 failed.
60 60 $ rm test-empty.t
61 61
62 62 a succesful test
63 63 =======================
64 64
65 65 $ cat > test-success.t << EOF
66 66 > $ echo babar
67 67 > babar
68 68 > $ echo xyzzy
69 69 > dont_print (?)
70 70 > nothing[42]line (re) (?)
71 71 > never*happens (glob) (?)
72 72 > more_nothing (?)
73 73 > xyzzy
74 74 > nor this (?)
75 75 > $ printf 'abc\ndef\nxyz\n'
76 76 > 123 (?)
77 77 > abc
78 78 > def (?)
79 79 > 456 (?)
80 80 > xyz
81 81 > $ printf 'zyx\nwvu\ntsr\n'
82 82 > abc (?)
83 83 > zyx (custom !)
84 84 > wvu
85 85 > no_print (no-custom !)
86 86 > tsr (no-missing !)
87 87 > missing (missing !)
88 88 > EOF
89 89
90 90 $ rt
91 91 .
92 92 # Ran 1 tests, 0 skipped, 0 failed.
93 93
94 94 failing test
95 95 ==================
96 96
97 97 test churn with globs
98 98 $ cat > test-failure.t <<EOF
99 99 > $ echo "bar-baz"; echo "bar-bad"; echo foo
100 100 > bar*bad (glob)
101 101 > bar*baz (glob)
102 102 > | fo (re)
103 103 > EOF
104 104 $ rt test-failure.t
105 105
106 106 --- $TESTTMP/test-failure.t
107 107 +++ $TESTTMP/test-failure.t.err
108 108 @@ -1,4 +1,4 @@
109 109 $ echo "bar-baz"; echo "bar-bad"; echo foo
110 110 + bar*baz (glob)
111 111 bar*bad (glob)
112 112 - bar*baz (glob)
113 113 - | fo (re)
114 114 + foo
115 115
116 116 ERROR: test-failure.t output changed
117 117 !
118 118 Failed test-failure.t: output changed
119 119 # Ran 1 tests, 0 skipped, 1 failed.
120 120 python hash seed: * (glob)
121 121 [1]
122 122
123 123 test diff colorisation
124 124
125 125 #if no-windows pygments
126 126 $ rt test-failure.t --color always
127 127
128 128 \x1b[38;5;124m--- $TESTTMP/test-failure.t\x1b[39m (esc)
129 129 \x1b[38;5;34m+++ $TESTTMP/test-failure.t.err\x1b[39m (esc)
130 130 \x1b[38;5;90;01m@@ -1,4 +1,4 @@\x1b[39;00m (esc)
131 131 $ echo "bar-baz"; echo "bar-bad"; echo foo
132 132 \x1b[38;5;34m+ bar*baz (glob)\x1b[39m (esc)
133 133 bar*bad (glob)
134 134 \x1b[38;5;124m- bar*baz (glob)\x1b[39m (esc)
135 135 \x1b[38;5;124m- | fo (re)\x1b[39m (esc)
136 136 \x1b[38;5;34m+ foo\x1b[39m (esc)
137 137
138 138 \x1b[38;5;88mERROR: \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m output changed\x1b[39m (esc)
139 139 !
140 140 \x1b[38;5;88mFailed \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m: output changed\x1b[39m (esc)
141 141 # Ran 1 tests, 0 skipped, 1 failed.
142 142 python hash seed: * (glob)
143 143 [1]
144 144
145 145 $ rt test-failure.t 2> tmp.log
146 146 [1]
147 147 $ cat tmp.log
148 148
149 149 --- $TESTTMP/test-failure.t
150 150 +++ $TESTTMP/test-failure.t.err
151 151 @@ -1,4 +1,4 @@
152 152 $ echo "bar-baz"; echo "bar-bad"; echo foo
153 153 + bar*baz (glob)
154 154 bar*bad (glob)
155 155 - bar*baz (glob)
156 156 - | fo (re)
157 157 + foo
158 158
159 159 ERROR: test-failure.t output changed
160 160 !
161 161 Failed test-failure.t: output changed
162 162 # Ran 1 tests, 0 skipped, 1 failed.
163 163 python hash seed: * (glob)
164 164 #endif
165 165
166 166 $ cat > test-failure.t << EOF
167 167 > $ true
168 168 > should go away (true !)
169 169 > $ true
170 170 > should stay (false !)
171 171 >
172 172 > Should remove first line, not second or third
173 173 > $ echo 'testing'
174 174 > baz*foo (glob) (true !)
175 175 > foobar*foo (glob) (false !)
176 176 > te*ting (glob) (true !)
177 177 >
178 178 > Should keep first two lines, remove third and last
179 179 > $ echo 'testing'
180 180 > test.ng (re) (true !)
181 181 > foo.ar (re) (false !)
182 182 > b.r (re) (true !)
183 183 > missing (?)
184 184 > awol (true !)
185 185 >
186 186 > The "missing" line should stay, even though awol is dropped
187 187 > $ echo 'testing'
188 188 > test.ng (re) (true !)
189 189 > foo.ar (?)
190 190 > awol
191 191 > missing (?)
192 192 > EOF
193 193 $ rt test-failure.t
194 194
195 195 --- $TESTTMP/test-failure.t
196 196 +++ $TESTTMP/test-failure.t.err
197 197 @@ -1,11 +1,9 @@
198 198 $ true
199 199 - should go away (true !)
200 200 $ true
201 201 should stay (false !)
202 202
203 203 Should remove first line, not second or third
204 204 $ echo 'testing'
205 205 - baz*foo (glob) (true !)
206 206 foobar*foo (glob) (false !)
207 207 te*ting (glob) (true !)
208 208
209 209 foo.ar (re) (false !)
210 210 missing (?)
211 211 @@ -13,13 +11,10 @@
212 212 $ echo 'testing'
213 213 test.ng (re) (true !)
214 214 foo.ar (re) (false !)
215 215 - b.r (re) (true !)
216 216 missing (?)
217 217 - awol (true !)
218 218
219 219 The "missing" line should stay, even though awol is dropped
220 220 $ echo 'testing'
221 221 test.ng (re) (true !)
222 222 foo.ar (?)
223 223 - awol
224 224 missing (?)
225 225
226 226 ERROR: test-failure.t output changed
227 227 !
228 228 Failed test-failure.t: output changed
229 229 # Ran 1 tests, 0 skipped, 1 failed.
230 230 python hash seed: * (glob)
231 231 [1]
232 232
233 233 basic failing test
234 234 $ cat > test-failure.t << EOF
235 235 > $ echo babar
236 236 > rataxes
237 237 > This is a noop statement so that
238 238 > this test is still more bytes than success.
239 239 > pad pad pad pad............................................................
240 240 > pad pad pad pad............................................................
241 241 > pad pad pad pad............................................................
242 242 > pad pad pad pad............................................................
243 243 > pad pad pad pad............................................................
244 244 > pad pad pad pad............................................................
245 245 > EOF
246 246
247 247 >>> fh = open('test-failure-unicode.t', 'wb')
248 248 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8')) and None
249 249 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8')) and None
250 250
251 251 $ rt
252 252
253 253 --- $TESTTMP/test-failure.t
254 254 +++ $TESTTMP/test-failure.t.err
255 255 @@ -1,5 +1,5 @@
256 256 $ echo babar
257 257 - rataxes
258 258 + babar
259 259 This is a noop statement so that
260 260 this test is still more bytes than success.
261 261 pad pad pad pad............................................................
262 262
263 263 ERROR: test-failure.t output changed
264 264 !.
265 265 --- $TESTTMP/test-failure-unicode.t
266 266 +++ $TESTTMP/test-failure-unicode.t.err
267 267 @@ -1,2 +1,2 @@
268 268 $ echo babar\xce\xb1 (esc)
269 269 - l\xce\xb5\xce\xb5t (esc)
270 270 + babar\xce\xb1 (esc)
271 271
272 272 ERROR: test-failure-unicode.t output changed
273 273 !
274 274 Failed test-failure.t: output changed
275 275 Failed test-failure-unicode.t: output changed
276 276 # Ran 3 tests, 0 skipped, 2 failed.
277 277 python hash seed: * (glob)
278 278 [1]
279 279
280 280 test --outputdir
281 281 $ mkdir output
282 282 $ rt --outputdir output
283 283
284 284 --- $TESTTMP/test-failure.t
285 285 +++ $TESTTMP/output/test-failure.t.err
286 286 @@ -1,5 +1,5 @@
287 287 $ echo babar
288 288 - rataxes
289 289 + babar
290 290 This is a noop statement so that
291 291 this test is still more bytes than success.
292 292 pad pad pad pad............................................................
293 293
294 294 ERROR: test-failure.t output changed
295 295 !.
296 296 --- $TESTTMP/test-failure-unicode.t
297 297 +++ $TESTTMP/output/test-failure-unicode.t.err
298 298 @@ -1,2 +1,2 @@
299 299 $ echo babar\xce\xb1 (esc)
300 300 - l\xce\xb5\xce\xb5t (esc)
301 301 + babar\xce\xb1 (esc)
302 302
303 303 ERROR: test-failure-unicode.t output changed
304 304 !
305 305 Failed test-failure.t: output changed
306 306 Failed test-failure-unicode.t: output changed
307 307 # Ran 3 tests, 0 skipped, 2 failed.
308 308 python hash seed: * (glob)
309 309 [1]
310 310 $ ls -a output
311 311 .
312 312 ..
313 313 .testtimes
314 314 test-failure-unicode.t.err
315 315 test-failure.t.err
316 316
317 317 test --xunit support
318 318 $ rt --xunit=xunit.xml
319 319
320 320 --- $TESTTMP/test-failure.t
321 321 +++ $TESTTMP/test-failure.t.err
322 322 @@ -1,5 +1,5 @@
323 323 $ echo babar
324 324 - rataxes
325 325 + babar
326 326 This is a noop statement so that
327 327 this test is still more bytes than success.
328 328 pad pad pad pad............................................................
329 329
330 330 ERROR: test-failure.t output changed
331 331 !.
332 332 --- $TESTTMP/test-failure-unicode.t
333 333 +++ $TESTTMP/test-failure-unicode.t.err
334 334 @@ -1,2 +1,2 @@
335 335 $ echo babar\xce\xb1 (esc)
336 336 - l\xce\xb5\xce\xb5t (esc)
337 337 + babar\xce\xb1 (esc)
338 338
339 339 ERROR: test-failure-unicode.t output changed
340 340 !
341 341 Failed test-failure.t: output changed
342 342 Failed test-failure-unicode.t: output changed
343 343 # Ran 3 tests, 0 skipped, 2 failed.
344 344 python hash seed: * (glob)
345 345 [1]
346 346 $ cat xunit.xml
347 347 <?xml version="1.0" encoding="utf-8"?>
348 348 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
349 349 <testcase name="test-success.t" time="*"/> (glob)
350 350 <testcase name="test-failure-unicode.t" time="*"> (glob)
351 351 <failure message="output changed" type="output-mismatch">
352 352 <![CDATA[--- $TESTTMP/test-failure-unicode.t
353 353 +++ $TESTTMP/test-failure-unicode.t.err
354 354 @@ -1,2 +1,2 @@
355 355 $ echo babar\xce\xb1 (esc)
356 356 - l\xce\xb5\xce\xb5t (esc)
357 357 + babar\xce\xb1 (esc)
358 358 ]]> </failure>
359 359 </testcase>
360 360 <testcase name="test-failure.t" time="*"> (glob)
361 361 <failure message="output changed" type="output-mismatch">
362 362 <![CDATA[--- $TESTTMP/test-failure.t
363 363 +++ $TESTTMP/test-failure.t.err
364 364 @@ -1,5 +1,5 @@
365 365 $ echo babar
366 366 - rataxes
367 367 + babar
368 368 This is a noop statement so that
369 369 this test is still more bytes than success.
370 370 pad pad pad pad............................................................
371 371 ]]> </failure>
372 372 </testcase>
373 373 </testsuite>
374 374
375 375 $ cat .testtimes
376 376 test-empty.t * (glob)
377 377 test-failure-unicode.t * (glob)
378 378 test-failure.t * (glob)
379 379 test-success.t * (glob)
380 380
381 381 $ rt --list-tests
382 382 test-failure-unicode.t
383 383 test-failure.t
384 384 test-success.t
385 385
386 386 $ rt --list-tests --json
387 387 test-failure-unicode.t
388 388 test-failure.t
389 389 test-success.t
390 390 $ cat report.json
391 391 testreport ={
392 392 "test-failure-unicode.t": {
393 393 "result": "success"
394 394 },
395 395 "test-failure.t": {
396 396 "result": "success"
397 397 },
398 398 "test-success.t": {
399 399 "result": "success"
400 400 }
401 401 } (no-eol)
402 402
403 403 $ rt --list-tests --xunit=xunit.xml
404 404 test-failure-unicode.t
405 405 test-failure.t
406 406 test-success.t
407 407 $ cat xunit.xml
408 408 <?xml version="1.0" encoding="utf-8"?>
409 409 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
410 410 <testcase name="test-failure-unicode.t"/>
411 411 <testcase name="test-failure.t"/>
412 412 <testcase name="test-success.t"/>
413 413 </testsuite>
414 414
415 415 $ rt --list-tests test-failure* --json --xunit=xunit.xml --outputdir output
416 416 test-failure-unicode.t
417 417 test-failure.t
418 418 $ cat output/report.json
419 419 testreport ={
420 420 "test-failure-unicode.t": {
421 421 "result": "success"
422 422 },
423 423 "test-failure.t": {
424 424 "result": "success"
425 425 }
426 426 } (no-eol)
427 427 $ cat xunit.xml
428 428 <?xml version="1.0" encoding="utf-8"?>
429 429 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
430 430 <testcase name="test-failure-unicode.t"/>
431 431 <testcase name="test-failure.t"/>
432 432 </testsuite>
433 433
434 434 $ rm test-failure-unicode.t
435 435
436 436 test for --retest
437 437 ====================
438 438
439 439 $ rt --retest
440 440
441 441 --- $TESTTMP/test-failure.t
442 442 +++ $TESTTMP/test-failure.t.err
443 443 @@ -1,5 +1,5 @@
444 444 $ echo babar
445 445 - rataxes
446 446 + babar
447 447 This is a noop statement so that
448 448 this test is still more bytes than success.
449 449 pad pad pad pad............................................................
450 450
451 451 ERROR: test-failure.t output changed
452 452 !
453 453 Failed test-failure.t: output changed
454 454 # Ran 2 tests, 1 skipped, 1 failed.
455 455 python hash seed: * (glob)
456 456 [1]
457 457
458 458 --retest works with --outputdir
459 459 $ rm -r output
460 460 $ mkdir output
461 461 $ mv test-failure.t.err output
462 462 $ rt --retest --outputdir output
463 463
464 464 --- $TESTTMP/test-failure.t
465 465 +++ $TESTTMP/output/test-failure.t.err
466 466 @@ -1,5 +1,5 @@
467 467 $ echo babar
468 468 - rataxes
469 469 + babar
470 470 This is a noop statement so that
471 471 this test is still more bytes than success.
472 472 pad pad pad pad............................................................
473 473
474 474 ERROR: test-failure.t output changed
475 475 !
476 476 Failed test-failure.t: output changed
477 477 # Ran 2 tests, 1 skipped, 1 failed.
478 478 python hash seed: * (glob)
479 479 [1]
480 480
481 481 Selecting Tests To Run
482 482 ======================
483 483
484 484 successful
485 485
486 486 $ rt test-success.t
487 487 .
488 488 # Ran 1 tests, 0 skipped, 0 failed.
489 489
490 490 success w/ keyword
491 491 $ rt -k xyzzy
492 492 .
493 493 # Ran 2 tests, 1 skipped, 0 failed.
494 494
495 495 failed
496 496
497 497 $ rt test-failure.t
498 498
499 499 --- $TESTTMP/test-failure.t
500 500 +++ $TESTTMP/test-failure.t.err
501 501 @@ -1,5 +1,5 @@
502 502 $ echo babar
503 503 - rataxes
504 504 + babar
505 505 This is a noop statement so that
506 506 this test is still more bytes than success.
507 507 pad pad pad pad............................................................
508 508
509 509 ERROR: test-failure.t output changed
510 510 !
511 511 Failed test-failure.t: output changed
512 512 # Ran 1 tests, 0 skipped, 1 failed.
513 513 python hash seed: * (glob)
514 514 [1]
515 515
516 516 failure w/ keyword
517 517 $ rt -k rataxes
518 518
519 519 --- $TESTTMP/test-failure.t
520 520 +++ $TESTTMP/test-failure.t.err
521 521 @@ -1,5 +1,5 @@
522 522 $ echo babar
523 523 - rataxes
524 524 + babar
525 525 This is a noop statement so that
526 526 this test is still more bytes than success.
527 527 pad pad pad pad............................................................
528 528
529 529 ERROR: test-failure.t output changed
530 530 !
531 531 Failed test-failure.t: output changed
532 532 # Ran 2 tests, 1 skipped, 1 failed.
533 533 python hash seed: * (glob)
534 534 [1]
535 535
536 536 Verify that when a process fails to start we show a useful message
537 537 ==================================================================
538 538
539 539 $ cat > test-serve-fail.t <<EOF
540 540 > $ echo 'abort: child process failed to start blah'
541 541 > EOF
542 542 $ rt test-serve-fail.t
543 543
544 544 --- $TESTTMP/test-serve-fail.t
545 545 +++ $TESTTMP/test-serve-fail.t.err
546 546 @@ -1* +1,2 @@ (glob)
547 547 $ echo 'abort: child process failed to start blah'
548 548 + abort: child process failed to start blah
549 549
550 550 ERROR: test-serve-fail.t output changed
551 551 !
552 552 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
553 553 # Ran 1 tests, 0 skipped, 1 failed.
554 554 python hash seed: * (glob)
555 555 [1]
556 556 $ rm test-serve-fail.t
557 557
558 558 Verify that we can try other ports
559 559 ===================================
560 560
561 561 Extensions aren't inherited by the invoked run-tests.py. An extension
562 562 introducing a repository requirement could cause this to fail. So we force
563 563 HGRCPATH to get a clean environment.
564 564
565 565 $ HGRCPATH= hg init inuse
566 566 $ hg serve -R inuse -p $HGPORT -d --pid-file=blocks.pid
567 567 $ cat blocks.pid >> $DAEMON_PIDS
568 568 $ cat > test-serve-inuse.t <<EOF
569 569 > $ hg serve -R `pwd`/inuse -p \$HGPORT -d --pid-file=hg.pid
570 570 > $ cat hg.pid >> \$DAEMON_PIDS
571 571 > EOF
572 572 $ rt test-serve-inuse.t
573 573 .
574 574 # Ran 1 tests, 0 skipped, 0 failed.
575 575 $ rm test-serve-inuse.t
576 576 $ killdaemons.py $DAEMON_PIDS
577 577
578 578 Running In Debug Mode
579 579 ======================
580 580
581 581 $ rt --debug 2>&1 | grep -v pwd
582 582 + echo *SALT* 0 0 (glob)
583 583 *SALT* 0 0 (glob)
584 584 + echo babar
585 585 babar
586 586 + echo *SALT* 10 0 (glob)
587 587 *SALT* 10 0 (glob)
588 588 *+ echo *SALT* 0 0 (glob)
589 589 *SALT* 0 0 (glob)
590 590 + echo babar
591 591 babar
592 592 + echo *SALT* 2 0 (glob)
593 593 *SALT* 2 0 (glob)
594 594 + echo xyzzy
595 595 xyzzy
596 596 + echo *SALT* 9 0 (glob)
597 597 *SALT* 9 0 (glob)
598 598 + printf *abc\ndef\nxyz\n* (glob)
599 599 abc
600 600 def
601 601 xyz
602 602 + echo *SALT* 15 0 (glob)
603 603 *SALT* 15 0 (glob)
604 604 + printf *zyx\nwvu\ntsr\n* (glob)
605 605 zyx
606 606 wvu
607 607 tsr
608 608 + echo *SALT* 22 0 (glob)
609 609 *SALT* 22 0 (glob)
610 610 .
611 611 # Ran 2 tests, 0 skipped, 0 failed.
612 612
613 613 Parallel runs
614 614 ==============
615 615
616 616 (duplicate the failing test to get predictable output)
617 617 $ cp test-failure.t test-failure-copy.t
618 618
619 619 $ rt --jobs 2 test-failure*.t -n
620 620 !!
621 621 Failed test-failure*.t: output changed (glob)
622 622 Failed test-failure*.t: output changed (glob)
623 623 # Ran 2 tests, 0 skipped, 2 failed.
624 624 python hash seed: * (glob)
625 625 [1]
626 626
627 627 failures in parallel with --first should only print one failure
628 628 $ rt --jobs 2 --first test-failure*.t
629 629
630 630 --- $TESTTMP/test-failure*.t (glob)
631 631 +++ $TESTTMP/test-failure*.t.err (glob)
632 632 @@ -1,5 +1,5 @@
633 633 $ echo babar
634 634 - rataxes
635 635 + babar
636 636 This is a noop statement so that
637 637 this test is still more bytes than success.
638 638 pad pad pad pad............................................................
639 639
640 640 Failed test-failure*.t: output changed (glob)
641 641 Failed test-failure*.t: output changed (glob)
642 642 # Ran 2 tests, 0 skipped, 2 failed.
643 643 python hash seed: * (glob)
644 644 [1]
645 645
646 646
647 647 (delete the duplicated test file)
648 648 $ rm test-failure-copy.t
649 649
650 650
651 651 Interactive run
652 652 ===============
653 653
654 654 (backup the failing test)
655 655 $ cp test-failure.t backup
656 656
657 657 Refuse the fix
658 658
659 659 $ echo 'n' | rt -i
660 660
661 661 --- $TESTTMP/test-failure.t
662 662 +++ $TESTTMP/test-failure.t.err
663 663 @@ -1,5 +1,5 @@
664 664 $ echo babar
665 665 - rataxes
666 666 + babar
667 667 This is a noop statement so that
668 668 this test is still more bytes than success.
669 669 pad pad pad pad............................................................
670 670 Accept this change? [n]
671 671 ERROR: test-failure.t output changed
672 672 !.
673 673 Failed test-failure.t: output changed
674 674 # Ran 2 tests, 0 skipped, 1 failed.
675 675 python hash seed: * (glob)
676 676 [1]
677 677
678 678 $ cat test-failure.t
679 679 $ echo babar
680 680 rataxes
681 681 This is a noop statement so that
682 682 this test is still more bytes than success.
683 683 pad pad pad pad............................................................
684 684 pad pad pad pad............................................................
685 685 pad pad pad pad............................................................
686 686 pad pad pad pad............................................................
687 687 pad pad pad pad............................................................
688 688 pad pad pad pad............................................................
689 689
690 690 Interactive with custom view
691 691
692 692 $ echo 'n' | rt -i --view echo
693 693 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
694 694 Accept this change? [n]* (glob)
695 695 ERROR: test-failure.t output changed
696 696 !.
697 697 Failed test-failure.t: output changed
698 698 # Ran 2 tests, 0 skipped, 1 failed.
699 699 python hash seed: * (glob)
700 700 [1]
701 701
702 702 View the fix
703 703
704 704 $ echo 'y' | rt --view echo
705 705 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
706 706
707 707 ERROR: test-failure.t output changed
708 708 !.
709 709 Failed test-failure.t: output changed
710 710 # Ran 2 tests, 0 skipped, 1 failed.
711 711 python hash seed: * (glob)
712 712 [1]
713 713
714 714 Accept the fix
715 715
716 716 $ cat >> test-failure.t <<EOF
717 717 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
718 718 > saved backup bundle to \$TESTTMP/foo.hg
719 719 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
720 720 > saved backup bundle to $TESTTMP\\foo.hg
721 721 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
722 722 > saved backup bundle to \$TESTTMP/*.hg (glob)
723 723 > EOF
724 724 $ echo 'y' | rt -i 2>&1
725 725
726 726 --- $TESTTMP/test-failure.t
727 727 +++ $TESTTMP/test-failure.t.err
728 728 @@ -1,5 +1,5 @@
729 729 $ echo babar
730 730 - rataxes
731 731 + babar
732 732 This is a noop statement so that
733 733 this test is still more bytes than success.
734 734 pad pad pad pad............................................................
735 735 @@ -11,6 +11,6 @@
736 736 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
737 737 saved backup bundle to $TESTTMP/foo.hg
738 738 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
739 739 - saved backup bundle to $TESTTMP\foo.hg
740 740 + saved backup bundle to $TESTTMP/foo.hg
741 741 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
742 742 saved backup bundle to $TESTTMP/*.hg (glob)
743 743 Accept this change? [n] ..
744 744 # Ran 2 tests, 0 skipped, 0 failed.
745 745
746 746 $ sed -e 's,(glob)$,&<,g' test-failure.t
747 747 $ echo babar
748 748 babar
749 749 This is a noop statement so that
750 750 this test is still more bytes than success.
751 751 pad pad pad pad............................................................
752 752 pad pad pad pad............................................................
753 753 pad pad pad pad............................................................
754 754 pad pad pad pad............................................................
755 755 pad pad pad pad............................................................
756 756 pad pad pad pad............................................................
757 757 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
758 758 saved backup bundle to $TESTTMP/foo.hg
759 759 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
760 760 saved backup bundle to $TESTTMP/foo.hg
761 761 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
762 762 saved backup bundle to $TESTTMP/*.hg (glob)<
763 763
764 764 Race condition - test file was modified when test is running
765 765
766 766 $ TESTRACEDIR=`pwd`
767 767 $ export TESTRACEDIR
768 768 $ cat > test-race.t <<EOF
769 769 > $ echo 1
770 770 > $ echo "# a new line" >> $TESTRACEDIR/test-race.t
771 771 > EOF
772 772
773 773 $ rt -i test-race.t
774 774
775 775 --- $TESTTMP/test-race.t
776 776 +++ $TESTTMP/test-race.t.err
777 777 @@ -1,2 +1,3 @@
778 778 $ echo 1
779 779 + 1
780 780 $ echo "# a new line" >> $TESTTMP/test-race.t
781 781 Reference output has changed (run again to prompt changes)
782 782 ERROR: test-race.t output changed
783 783 !
784 784 Failed test-race.t: output changed
785 785 # Ran 1 tests, 0 skipped, 1 failed.
786 786 python hash seed: * (glob)
787 787 [1]
788 788
789 789 $ rm test-race.t
790 790
791 791 When "#testcases" is used in .t files
792 792
793 793 $ cat >> test-cases.t <<EOF
794 794 > #testcases a b
795 795 > #if a
796 796 > $ echo 1
797 797 > #endif
798 798 > #if b
799 799 > $ echo 2
800 800 > #endif
801 801 > EOF
802 802
803 803 $ cat <<EOF | rt -i test-cases.t 2>&1
804 804 > y
805 805 > y
806 806 > EOF
807 807
808 808 --- $TESTTMP/test-cases.t
809 809 +++ $TESTTMP/test-cases.t.a.err
810 810 @@ -1,6 +1,7 @@
811 811 #testcases a b
812 812 #if a
813 813 $ echo 1
814 814 + 1
815 815 #endif
816 816 #if b
817 817 $ echo 2
818 818 Accept this change? [n] .
819 819 --- $TESTTMP/test-cases.t
820 820 +++ $TESTTMP/test-cases.t.b.err
821 821 @@ -5,4 +5,5 @@
822 822 #endif
823 823 #if b
824 824 $ echo 2
825 825 + 2
826 826 #endif
827 827 Accept this change? [n] .
828 828 # Ran 2 tests, 0 skipped, 0 failed.
829 829
830 830 $ cat test-cases.t
831 831 #testcases a b
832 832 #if a
833 833 $ echo 1
834 834 1
835 835 #endif
836 836 #if b
837 837 $ echo 2
838 838 2
839 839 #endif
840 840
841 841 $ cat >> test-cases.t <<'EOF'
842 842 > #if a
843 843 > $ NAME=A
844 844 > #else
845 845 > $ NAME=B
846 846 > #endif
847 847 > $ echo $NAME
848 848 > A (a !)
849 849 > B (b !)
850 850 > EOF
851 851 $ rt test-cases.t
852 852 ..
853 853 # Ran 2 tests, 0 skipped, 0 failed.
854 854
855 855 $ rm test-cases.t
856 856
857 857 (reinstall)
858 858 $ mv backup test-failure.t
859 859
860 860 No Diff
861 861 ===============
862 862
863 863 $ rt --nodiff
864 864 !.
865 865 Failed test-failure.t: output changed
866 866 # Ran 2 tests, 0 skipped, 1 failed.
867 867 python hash seed: * (glob)
868 868 [1]
869 869
870 870 test --tmpdir support
871 871 $ rt --tmpdir=$TESTTMP/keep test-success.t
872 872
873 873 Keeping testtmp dir: $TESTTMP/keep/child1/test-success.t
874 874 Keeping threadtmp dir: $TESTTMP/keep/child1
875 875 .
876 876 # Ran 1 tests, 0 skipped, 0 failed.
877 877
878 878 timeouts
879 879 ========
880 880 $ cat > test-timeout.t <<EOF
881 881 > $ sleep 2
882 882 > $ echo pass
883 883 > pass
884 884 > EOF
885 885 > echo '#require slow' > test-slow-timeout.t
886 886 > cat test-timeout.t >> test-slow-timeout.t
887 887 $ rt --timeout=1 --slowtimeout=3 test-timeout.t test-slow-timeout.t
888 888 st
889 889 Skipped test-slow-timeout.t: missing feature: allow slow tests (use --allow-slow-tests)
890 890 Failed test-timeout.t: timed out
891 891 # Ran 1 tests, 1 skipped, 1 failed.
892 892 python hash seed: * (glob)
893 893 [1]
894 894 $ rt --timeout=1 --slowtimeout=3 \
895 895 > test-timeout.t test-slow-timeout.t --allow-slow-tests
896 896 .t
897 897 Failed test-timeout.t: timed out
898 898 # Ran 2 tests, 0 skipped, 1 failed.
899 899 python hash seed: * (glob)
900 900 [1]
901 901 $ rm test-timeout.t test-slow-timeout.t
902 902
903 903 test for --time
904 904 ==================
905 905
906 906 $ rt test-success.t --time
907 907 .
908 908 # Ran 1 tests, 0 skipped, 0 failed.
909 909 # Producing time report
910 910 start end cuser csys real Test
911 911 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
912 912
913 913 test for --time with --job enabled
914 914 ====================================
915 915
916 916 $ rt test-success.t --time --jobs 2
917 917 .
918 918 # Ran 1 tests, 0 skipped, 0 failed.
919 919 # Producing time report
920 920 start end cuser csys real Test
921 921 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
922 922
923 923 Skips
924 924 ================
925 925 $ cat > test-skip.t <<EOF
926 926 > $ echo xyzzy
927 927 > #if true
928 928 > #require false
929 929 > #end
930 930 > EOF
931 931 $ cat > test-noskip.t <<EOF
932 932 > #if false
933 933 > #require false
934 934 > #endif
935 935 > EOF
936 936 $ rt --nodiff
937 937 !.s.
938 938 Skipped test-skip.t: missing feature: nail clipper
939 939 Failed test-failure.t: output changed
940 940 # Ran 3 tests, 1 skipped, 1 failed.
941 941 python hash seed: * (glob)
942 942 [1]
943 943
944 944 $ rm test-noskip.t
945 945 $ rt --keyword xyzzy
946 946 .s
947 947 Skipped test-skip.t: missing feature: nail clipper
948 948 # Ran 2 tests, 2 skipped, 0 failed.
949 949
950 950 Skips with xml
951 951 $ rt --keyword xyzzy \
952 952 > --xunit=xunit.xml
953 953 .s
954 954 Skipped test-skip.t: missing feature: nail clipper
955 955 # Ran 2 tests, 2 skipped, 0 failed.
956 956 $ cat xunit.xml
957 957 <?xml version="1.0" encoding="utf-8"?>
958 958 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
959 959 <testcase name="test-success.t" time="*"/> (glob)
960 960 <testcase name="test-skip.t">
961 961 <skipped>
962 962 <![CDATA[missing feature: nail clipper]]> </skipped>
963 963 </testcase>
964 964 </testsuite>
965 965
966 966 Missing skips or blacklisted skips don't count as executed:
967 967 $ echo test-failure.t > blacklist
968 968 $ rt --blacklist=blacklist --json\
969 969 > test-failure.t test-bogus.t
970 970 ss
971 971 Skipped test-bogus.t: Doesn't exist
972 972 Skipped test-failure.t: blacklisted
973 973 # Ran 0 tests, 2 skipped, 0 failed.
974 974 $ cat report.json
975 975 testreport ={
976 976 "test-bogus.t": {
977 977 "result": "skip"
978 978 },
979 979 "test-failure.t": {
980 980 "result": "skip"
981 981 }
982 982 } (no-eol)
983 983
984 984 Whitelist trumps blacklist
985 985 $ echo test-failure.t > whitelist
986 986 $ rt --blacklist=blacklist --whitelist=whitelist --json\
987 987 > test-failure.t test-bogus.t
988 988 s
989 989 --- $TESTTMP/test-failure.t
990 990 +++ $TESTTMP/test-failure.t.err
991 991 @@ -1,5 +1,5 @@
992 992 $ echo babar
993 993 - rataxes
994 994 + babar
995 995 This is a noop statement so that
996 996 this test is still more bytes than success.
997 997 pad pad pad pad............................................................
998 998
999 999 ERROR: test-failure.t output changed
1000 1000 !
1001 1001 Skipped test-bogus.t: Doesn't exist
1002 1002 Failed test-failure.t: output changed
1003 1003 # Ran 1 tests, 1 skipped, 1 failed.
1004 1004 python hash seed: * (glob)
1005 1005 [1]
1006 1006
1007 1007 Ensure that --test-list causes only the tests listed in that file to
1008 1008 be executed.
1009 1009 $ echo test-success.t >> onlytest
1010 1010 $ rt --test-list=onlytest
1011 1011 .
1012 1012 # Ran 1 tests, 0 skipped, 0 failed.
1013 1013 $ echo test-bogus.t >> anothertest
1014 1014 $ rt --test-list=onlytest --test-list=anothertest
1015 1015 s.
1016 1016 Skipped test-bogus.t: Doesn't exist
1017 1017 # Ran 1 tests, 1 skipped, 0 failed.
1018 1018 $ rm onlytest anothertest
1019 1019
1020 1020 test for --json
1021 1021 ==================
1022 1022
1023 1023 $ rt --json
1024 1024
1025 1025 --- $TESTTMP/test-failure.t
1026 1026 +++ $TESTTMP/test-failure.t.err
1027 1027 @@ -1,5 +1,5 @@
1028 1028 $ echo babar
1029 1029 - rataxes
1030 1030 + babar
1031 1031 This is a noop statement so that
1032 1032 this test is still more bytes than success.
1033 1033 pad pad pad pad............................................................
1034 1034
1035 1035 ERROR: test-failure.t output changed
1036 1036 !.s
1037 1037 Skipped test-skip.t: missing feature: nail clipper
1038 1038 Failed test-failure.t: output changed
1039 1039 # Ran 2 tests, 1 skipped, 1 failed.
1040 1040 python hash seed: * (glob)
1041 1041 [1]
1042 1042
1043 1043 $ cat report.json
1044 1044 testreport ={
1045 1045 "test-failure.t": [\{] (re)
1046 1046 "csys": "\s*[\d\.]{4,5}", ? (re)
1047 1047 "cuser": "\s*[\d\.]{4,5}", ? (re)
1048 1048 "diff": "---.+\+\+\+.+", ? (re)
1049 1049 "end": "\s*[\d\.]{4,5}", ? (re)
1050 1050 "result": "failure", ? (re)
1051 1051 "start": "\s*[\d\.]{4,5}", ? (re)
1052 1052 "time": "\s*[\d\.]{4,5}" (re)
1053 1053 }, ? (re)
1054 1054 "test-skip.t": {
1055 1055 "csys": "\s*[\d\.]{4,5}", ? (re)
1056 1056 "cuser": "\s*[\d\.]{4,5}", ? (re)
1057 1057 "diff": "", ? (re)
1058 1058 "end": "\s*[\d\.]{4,5}", ? (re)
1059 1059 "result": "skip", ? (re)
1060 1060 "start": "\s*[\d\.]{4,5}", ? (re)
1061 1061 "time": "\s*[\d\.]{4,5}" (re)
1062 1062 }, ? (re)
1063 1063 "test-success.t": [\{] (re)
1064 1064 "csys": "\s*[\d\.]{4,5}", ? (re)
1065 1065 "cuser": "\s*[\d\.]{4,5}", ? (re)
1066 1066 "diff": "", ? (re)
1067 1067 "end": "\s*[\d\.]{4,5}", ? (re)
1068 1068 "result": "success", ? (re)
1069 1069 "start": "\s*[\d\.]{4,5}", ? (re)
1070 1070 "time": "\s*[\d\.]{4,5}" (re)
1071 1071 }
1072 1072 } (no-eol)
1073 1073 --json with --outputdir
1074 1074
1075 1075 $ rm report.json
1076 1076 $ rm -r output
1077 1077 $ mkdir output
1078 1078 $ rt --json --outputdir output
1079 1079
1080 1080 --- $TESTTMP/test-failure.t
1081 1081 +++ $TESTTMP/output/test-failure.t.err
1082 1082 @@ -1,5 +1,5 @@
1083 1083 $ echo babar
1084 1084 - rataxes
1085 1085 + babar
1086 1086 This is a noop statement so that
1087 1087 this test is still more bytes than success.
1088 1088 pad pad pad pad............................................................
1089 1089
1090 1090 ERROR: test-failure.t output changed
1091 1091 !.s
1092 1092 Skipped test-skip.t: missing feature: nail clipper
1093 1093 Failed test-failure.t: output changed
1094 1094 # Ran 2 tests, 1 skipped, 1 failed.
1095 1095 python hash seed: * (glob)
1096 1096 [1]
1097 1097 $ f report.json
1098 1098 report.json: file not found
1099 1099 $ cat output/report.json
1100 1100 testreport ={
1101 1101 "test-failure.t": [\{] (re)
1102 1102 "csys": "\s*[\d\.]{4,5}", ? (re)
1103 1103 "cuser": "\s*[\d\.]{4,5}", ? (re)
1104 1104 "diff": "---.+\+\+\+.+", ? (re)
1105 1105 "end": "\s*[\d\.]{4,5}", ? (re)
1106 1106 "result": "failure", ? (re)
1107 1107 "start": "\s*[\d\.]{4,5}", ? (re)
1108 1108 "time": "\s*[\d\.]{4,5}" (re)
1109 1109 }, ? (re)
1110 1110 "test-skip.t": {
1111 1111 "csys": "\s*[\d\.]{4,5}", ? (re)
1112 1112 "cuser": "\s*[\d\.]{4,5}", ? (re)
1113 1113 "diff": "", ? (re)
1114 1114 "end": "\s*[\d\.]{4,5}", ? (re)
1115 1115 "result": "skip", ? (re)
1116 1116 "start": "\s*[\d\.]{4,5}", ? (re)
1117 1117 "time": "\s*[\d\.]{4,5}" (re)
1118 1118 }, ? (re)
1119 1119 "test-success.t": [\{] (re)
1120 1120 "csys": "\s*[\d\.]{4,5}", ? (re)
1121 1121 "cuser": "\s*[\d\.]{4,5}", ? (re)
1122 1122 "diff": "", ? (re)
1123 1123 "end": "\s*[\d\.]{4,5}", ? (re)
1124 1124 "result": "success", ? (re)
1125 1125 "start": "\s*[\d\.]{4,5}", ? (re)
1126 1126 "time": "\s*[\d\.]{4,5}" (re)
1127 1127 }
1128 1128 } (no-eol)
1129 1129 $ ls -a output
1130 1130 .
1131 1131 ..
1132 1132 .testtimes
1133 1133 report.json
1134 1134 test-failure.t.err
1135 1135
1136 1136 Test that failed test accepted through interactive are properly reported:
1137 1137
1138 1138 $ cp test-failure.t backup
1139 1139 $ echo y | rt --json -i
1140 1140
1141 1141 --- $TESTTMP/test-failure.t
1142 1142 +++ $TESTTMP/test-failure.t.err
1143 1143 @@ -1,5 +1,5 @@
1144 1144 $ echo babar
1145 1145 - rataxes
1146 1146 + babar
1147 1147 This is a noop statement so that
1148 1148 this test is still more bytes than success.
1149 1149 pad pad pad pad............................................................
1150 1150 Accept this change? [n] ..s
1151 1151 Skipped test-skip.t: missing feature: nail clipper
1152 1152 # Ran 2 tests, 1 skipped, 0 failed.
1153 1153
1154 1154 $ cat report.json
1155 1155 testreport ={
1156 1156 "test-failure.t": [\{] (re)
1157 1157 "csys": "\s*[\d\.]{4,5}", ? (re)
1158 1158 "cuser": "\s*[\d\.]{4,5}", ? (re)
1159 1159 "diff": "", ? (re)
1160 1160 "end": "\s*[\d\.]{4,5}", ? (re)
1161 1161 "result": "success", ? (re)
1162 1162 "start": "\s*[\d\.]{4,5}", ? (re)
1163 1163 "time": "\s*[\d\.]{4,5}" (re)
1164 1164 }, ? (re)
1165 1165 "test-skip.t": {
1166 1166 "csys": "\s*[\d\.]{4,5}", ? (re)
1167 1167 "cuser": "\s*[\d\.]{4,5}", ? (re)
1168 1168 "diff": "", ? (re)
1169 1169 "end": "\s*[\d\.]{4,5}", ? (re)
1170 1170 "result": "skip", ? (re)
1171 1171 "start": "\s*[\d\.]{4,5}", ? (re)
1172 1172 "time": "\s*[\d\.]{4,5}" (re)
1173 1173 }, ? (re)
1174 1174 "test-success.t": [\{] (re)
1175 1175 "csys": "\s*[\d\.]{4,5}", ? (re)
1176 1176 "cuser": "\s*[\d\.]{4,5}", ? (re)
1177 1177 "diff": "", ? (re)
1178 1178 "end": "\s*[\d\.]{4,5}", ? (re)
1179 1179 "result": "success", ? (re)
1180 1180 "start": "\s*[\d\.]{4,5}", ? (re)
1181 1181 "time": "\s*[\d\.]{4,5}" (re)
1182 1182 }
1183 1183 } (no-eol)
1184 1184 $ mv backup test-failure.t
1185 1185
1186 1186 backslash on end of line with glob matching is handled properly
1187 1187
1188 1188 $ cat > test-glob-backslash.t << EOF
1189 1189 > $ echo 'foo bar \\'
1190 1190 > foo * \ (glob)
1191 1191 > EOF
1192 1192
1193 1193 $ rt test-glob-backslash.t
1194 1194 .
1195 1195 # Ran 1 tests, 0 skipped, 0 failed.
1196 1196
1197 1197 $ rm -f test-glob-backslash.t
1198 1198
1199 1199 Test globbing of local IP addresses
1200 1200 $ echo 172.16.18.1
1201 1201 $LOCALIP (glob)
1202 1202 $ echo dead:beef::1
1203 1203 $LOCALIP (glob)
1204 1204
1205 1205 Test reusability for third party tools
1206 1206 ======================================
1207 1207
1208 1208 $ mkdir "$TESTTMP"/anothertests
1209 1209 $ cd "$TESTTMP"/anothertests
1210 1210
1211 1211 test that `run-tests.py` can execute hghave, even if it runs not in
1212 1212 Mercurial source tree.
1213 1213
1214 1214 $ cat > test-hghave.t <<EOF
1215 1215 > #require true
1216 1216 > $ echo foo
1217 1217 > foo
1218 1218 > EOF
1219 1219 $ rt test-hghave.t
1220 1220 .
1221 1221 # Ran 1 tests, 0 skipped, 0 failed.
1222 1222
1223 1223 test that RUNTESTDIR refers the directory, in which `run-tests.py` now
1224 1224 running is placed.
1225 1225
1226 1226 $ cat > test-runtestdir.t <<EOF
1227 1227 > - $TESTDIR, in which test-run-tests.t is placed
1228 1228 > - \$TESTDIR, in which test-runtestdir.t is placed (expanded at runtime)
1229 1229 > - \$RUNTESTDIR, in which run-tests.py is placed (expanded at runtime)
1230 1230 >
1231 1231 > #if windows
1232 1232 > $ test "\$TESTDIR" = "$TESTTMP\anothertests"
1233 1233 > #else
1234 1234 > $ test "\$TESTDIR" = "$TESTTMP"/anothertests
1235 1235 > #endif
1236 1236 > If this prints a path, that means RUNTESTDIR didn't equal
1237 1237 > TESTDIR as it should have.
1238 1238 > $ test "\$RUNTESTDIR" = "$TESTDIR" || echo "\$RUNTESTDIR"
1239 1239 > This should print the start of check-code. If this passes but the
1240 1240 > previous check failed, that means we found a copy of check-code at whatever
1241 1241 > RUNTESTSDIR ended up containing, even though it doesn't match TESTDIR.
1242 1242 > $ head -n 3 "\$RUNTESTDIR"/../contrib/check-code.py | sed 's@.!.*python@#!USRBINENVPY@'
1243 1243 > #!USRBINENVPY
1244 1244 > #
1245 1245 > # check-code - a style and portability checker for Mercurial
1246 1246 > EOF
1247 1247 $ rt test-runtestdir.t
1248 1248 .
1249 1249 # Ran 1 tests, 0 skipped, 0 failed.
1250 1250
1251 1251 #if execbit
1252 1252
1253 1253 test that TESTDIR is referred in PATH
1254 1254
1255 1255 $ cat > custom-command.sh <<EOF
1256 1256 > #!/bin/sh
1257 1257 > echo "hello world"
1258 1258 > EOF
1259 1259 $ chmod +x custom-command.sh
1260 1260 $ cat > test-testdir-path.t <<EOF
1261 1261 > $ custom-command.sh
1262 1262 > hello world
1263 1263 > EOF
1264 1264 $ rt test-testdir-path.t
1265 1265 .
1266 1266 # Ran 1 tests, 0 skipped, 0 failed.
1267 1267
1268 1268 #endif
1269 1269
1270 1270 test support for --allow-slow-tests
1271 1271 $ cat > test-very-slow-test.t <<EOF
1272 1272 > #require slow
1273 1273 > $ echo pass
1274 1274 > pass
1275 1275 > EOF
1276 1276 $ rt test-very-slow-test.t
1277 1277 s
1278 1278 Skipped test-very-slow-test.t: missing feature: allow slow tests (use --allow-slow-tests)
1279 1279 # Ran 0 tests, 1 skipped, 0 failed.
1280 1280 $ rt $HGTEST_RUN_TESTS_PURE --allow-slow-tests test-very-slow-test.t
1281 1281 .
1282 1282 # Ran 1 tests, 0 skipped, 0 failed.
1283 1283
1284 1284 support for running a test outside the current directory
1285 1285 $ mkdir nonlocal
1286 1286 $ cat > nonlocal/test-is-not-here.t << EOF
1287 1287 > $ echo pass
1288 1288 > pass
1289 1289 > EOF
1290 1290 $ rt nonlocal/test-is-not-here.t
1291 1291 .
1292 1292 # Ran 1 tests, 0 skipped, 0 failed.
1293 1293
1294 1294 support for automatically discovering test if arg is a folder
1295 1295 $ mkdir tmp && cd tmp
1296 1296
1297 1297 $ cat > test-uno.t << EOF
1298 1298 > $ echo line
1299 1299 > line
1300 1300 > EOF
1301 1301
1302 1302 $ cp test-uno.t test-dos.t
1303 1303 $ cd ..
1304 1304 $ cp -R tmp tmpp
1305 1305 $ cp tmp/test-uno.t test-solo.t
1306 1306
1307 1307 $ rt tmp/ test-solo.t tmpp
1308 1308 .....
1309 1309 # Ran 5 tests, 0 skipped, 0 failed.
1310 1310 $ rm -rf tmp tmpp
1311 1311
1312 1312 support for running run-tests.py from another directory
1313 1313 $ mkdir tmp && cd tmp
1314 1314
1315 1315 $ cat > useful-file.sh << EOF
1316 1316 > important command
1317 1317 > EOF
1318 1318
1319 1319 $ cat > test-folder.t << EOF
1320 1320 > $ cat \$TESTDIR/useful-file.sh
1321 1321 > important command
1322 1322 > EOF
1323 1323
1324 1324 $ cat > test-folder-fail.t << EOF
1325 1325 > $ cat \$TESTDIR/useful-file.sh
1326 1326 > important commando
1327 1327 > EOF
1328 1328
1329 1329 $ cd ..
1330 1330 $ rt tmp/test-*.t
1331 1331
1332 1332 --- $TESTTMP/anothertests/tmp/test-folder-fail.t
1333 1333 +++ $TESTTMP/anothertests/tmp/test-folder-fail.t.err
1334 1334 @@ -1,2 +1,2 @@
1335 1335 $ cat $TESTDIR/useful-file.sh
1336 1336 - important commando
1337 1337 + important command
1338 1338
1339 1339 ERROR: test-folder-fail.t output changed
1340 1340 !.
1341 1341 Failed test-folder-fail.t: output changed
1342 1342 # Ran 2 tests, 0 skipped, 1 failed.
1343 1343 python hash seed: * (glob)
1344 1344 [1]
1345 1345
1346 1346 support for bisecting failed tests automatically
1347 1347 $ hg init bisect
1348 1348 $ cd bisect
1349 1349 $ cat >> test-bisect.t <<EOF
1350 1350 > $ echo pass
1351 1351 > pass
1352 1352 > EOF
1353 1353 $ hg add test-bisect.t
1354 1354 $ hg ci -m 'good'
1355 1355 $ cat >> test-bisect.t <<EOF
1356 1356 > $ echo pass
1357 1357 > fail
1358 1358 > EOF
1359 1359 $ hg ci -m 'bad'
1360 1360 $ rt --known-good-rev=0 test-bisect.t
1361 1361
1362 1362 --- $TESTTMP/anothertests/bisect/test-bisect.t
1363 1363 +++ $TESTTMP/anothertests/bisect/test-bisect.t.err
1364 1364 @@ -1,4 +1,4 @@
1365 1365 $ echo pass
1366 1366 pass
1367 1367 $ echo pass
1368 1368 - fail
1369 1369 + pass
1370 1370
1371 1371 ERROR: test-bisect.t output changed
1372 1372 !
1373 1373 Failed test-bisect.t: output changed
1374 1374 test-bisect.t broken by 72cbf122d116 (bad)
1375 1375 # Ran 1 tests, 0 skipped, 1 failed.
1376 1376 python hash seed: * (glob)
1377 1377 [1]
1378 1378
1379 1379 $ cd ..
1380 1380
1381 1381 support bisecting a separate repo
1382 1382
1383 1383 $ hg init bisect-dependent
1384 1384 $ cd bisect-dependent
1385 1385 $ cat > test-bisect-dependent.t <<EOF
1386 1386 > $ tail -1 \$TESTDIR/../bisect/test-bisect.t
1387 1387 > pass
1388 1388 > EOF
1389 1389 $ hg commit -Am dependent test-bisect-dependent.t
1390 1390
1391 1391 $ rt --known-good-rev=0 test-bisect-dependent.t
1392 1392
1393 1393 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1394 1394 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1395 1395 @@ -1,2 +1,2 @@
1396 1396 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1397 1397 - pass
1398 1398 + fail
1399 1399
1400 1400 ERROR: test-bisect-dependent.t output changed
1401 1401 !
1402 1402 Failed test-bisect-dependent.t: output changed
1403 1403 Failed to identify failure point for test-bisect-dependent.t
1404 1404 # Ran 1 tests, 0 skipped, 1 failed.
1405 1405 python hash seed: * (glob)
1406 1406 [1]
1407 1407
1408 1408 $ rt --bisect-repo=../test-bisect test-bisect-dependent.t
1409 1409 usage: run-tests.py [options] [tests]
1410 1410 run-tests.py: error: --bisect-repo cannot be used without --known-good-rev
1411 1411 [2]
1412 1412
1413 1413 $ rt --known-good-rev=0 --bisect-repo=../bisect test-bisect-dependent.t
1414 1414
1415 1415 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1416 1416 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1417 1417 @@ -1,2 +1,2 @@
1418 1418 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1419 1419 - pass
1420 1420 + fail
1421 1421
1422 1422 ERROR: test-bisect-dependent.t output changed
1423 1423 !
1424 1424 Failed test-bisect-dependent.t: output changed
1425 1425 test-bisect-dependent.t broken by 72cbf122d116 (bad)
1426 1426 # Ran 1 tests, 0 skipped, 1 failed.
1427 1427 python hash seed: * (glob)
1428 1428 [1]
1429 1429
1430 1430 $ cd ..
1431 1431
1432 1432 Test a broken #if statement doesn't break run-tests threading.
1433 1433 ==============================================================
1434 1434 $ mkdir broken
1435 1435 $ cd broken
1436 1436 $ cat > test-broken.t <<EOF
1437 1437 > true
1438 1438 > #if notarealhghavefeature
1439 1439 > $ false
1440 1440 > #endif
1441 1441 > EOF
1442 1442 $ for f in 1 2 3 4 ; do
1443 1443 > cat > test-works-$f.t <<EOF
1444 1444 > This is test case $f
1445 1445 > $ sleep 1
1446 1446 > EOF
1447 1447 > done
1448 1448 $ rt -j 2
1449 1449 ....
1450 1450 # Ran 5 tests, 0 skipped, 0 failed.
1451 1451 skipped: unknown feature: notarealhghavefeature
1452 1452
1453 1453 $ cd ..
1454 1454 $ rm -rf broken
1455 1455
1456 1456 Test cases in .t files
1457 1457 ======================
1458 1458 $ mkdir cases
1459 1459 $ cd cases
1460 1460 $ cat > test-cases-abc.t <<'EOF'
1461 1461 > #testcases A B C
1462 1462 > $ V=B
1463 1463 > #if A
1464 1464 > $ V=A
1465 1465 > #endif
1466 1466 > #if C
1467 1467 > $ V=C
1468 1468 > #endif
1469 1469 > $ echo $V | sed 's/A/C/'
1470 1470 > C
1471 1471 > #if C
1472 1472 > $ [ $V = C ]
1473 1473 > #endif
1474 1474 > #if A
1475 1475 > $ [ $V = C ]
1476 1476 > [1]
1477 1477 > #endif
1478 1478 > #if no-C
1479 1479 > $ [ $V = C ]
1480 1480 > [1]
1481 1481 > #endif
1482 1482 > $ [ $V = D ]
1483 1483 > [1]
1484 1484 > EOF
1485 1485 $ rt
1486 1486 .
1487 1487 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1488 1488 +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
1489 1489 @@ -7,7 +7,7 @@
1490 1490 $ V=C
1491 1491 #endif
1492 1492 $ echo $V | sed 's/A/C/'
1493 1493 - C
1494 1494 + B
1495 1495 #if C
1496 1496 $ [ $V = C ]
1497 1497 #endif
1498 1498
1499 ERROR: test-cases-abc.t (case B) output changed
1499 ERROR: test-cases-abc.t#B output changed
1500 1500 !.
1501 Failed test-cases-abc.t (case B): output changed
1501 Failed test-cases-abc.t#B: output changed
1502 1502 # Ran 3 tests, 0 skipped, 1 failed.
1503 1503 python hash seed: * (glob)
1504 1504 [1]
1505 1505
1506 1506 --restart works
1507 1507
1508 1508 $ rt --restart
1509 1509
1510 1510 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1511 1511 +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
1512 1512 @@ -7,7 +7,7 @@
1513 1513 $ V=C
1514 1514 #endif
1515 1515 $ echo $V | sed 's/A/C/'
1516 1516 - C
1517 1517 + B
1518 1518 #if C
1519 1519 $ [ $V = C ]
1520 1520 #endif
1521 1521
1522 ERROR: test-cases-abc.t (case B) output changed
1522 ERROR: test-cases-abc.t#B output changed
1523 1523 !.
1524 Failed test-cases-abc.t (case B): output changed
1524 Failed test-cases-abc.t#B: output changed
1525 1525 # Ran 2 tests, 0 skipped, 1 failed.
1526 1526 python hash seed: * (glob)
1527 1527 [1]
1528 1528
1529 1529 --restart works with outputdir
1530 1530
1531 1531 $ mkdir output
1532 1532 $ mv test-cases-abc.t.B.err output
1533 1533 $ rt --restart --outputdir output
1534 1534
1535 1535 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1536 1536 +++ $TESTTMP/anothertests/cases/output/test-cases-abc.t.B.err
1537 1537 @@ -7,7 +7,7 @@
1538 1538 $ V=C
1539 1539 #endif
1540 1540 $ echo $V | sed 's/A/C/'
1541 1541 - C
1542 1542 + B
1543 1543 #if C
1544 1544 $ [ $V = C ]
1545 1545 #endif
1546 1546
1547 ERROR: test-cases-abc.t (case B) output changed
1547 ERROR: test-cases-abc.t#B output changed
1548 1548 !.
1549 Failed test-cases-abc.t (case B): output changed
1549 Failed test-cases-abc.t#B: output changed
1550 1550 # Ran 2 tests, 0 skipped, 1 failed.
1551 1551 python hash seed: * (glob)
1552 1552 [1]
1553 1553
1554 1554 Test TESTCASE variable
1555 1555
1556 1556 $ cat > test-cases-ab.t <<'EOF'
1557 1557 > $ dostuff() {
1558 1558 > > echo "In case $TESTCASE"
1559 1559 > > }
1560 1560 > #testcases A B
1561 1561 > #if A
1562 1562 > $ dostuff
1563 1563 > In case A
1564 1564 > #endif
1565 1565 > #if B
1566 1566 > $ dostuff
1567 1567 > In case B
1568 1568 > #endif
1569 1569 > EOF
1570 1570 $ rt test-cases-ab.t
1571 1571 ..
1572 1572 # Ran 2 tests, 0 skipped, 0 failed.
1573 1573
1574 1574 Support running a specific test case
1575 1575
1576 $ rt "test-cases-abc.t (case B)"
1576 $ rt "test-cases-abc.t#B"
1577 1577
1578 1578 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1579 1579 +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
1580 1580 @@ -7,7 +7,7 @@
1581 1581 $ V=C
1582 1582 #endif
1583 1583 $ echo $V | sed 's/A/C/'
1584 1584 - C
1585 1585 + B
1586 1586 #if C
1587 1587 $ [ $V = C ]
1588 1588 #endif
1589 1589
1590 ERROR: test-cases-abc.t (case B) output changed
1590 ERROR: test-cases-abc.t#B output changed
1591 1591 !
1592 Failed test-cases-abc.t (case B): output changed
1592 Failed test-cases-abc.t#B: output changed
1593 1593 # Ran 1 tests, 0 skipped, 1 failed.
1594 1594 python hash seed: * (glob)
1595 1595 [1]
1596 1596
1597 1597 Support running multiple test cases in the same file
1598 1598
1599 $ rt "test-cases-abc.t (case B)" "test-cases-abc.t (case C)"
1599 $ rt test-cases-abc.t#B test-cases-abc.t#C
1600 1600
1601 1601 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1602 1602 +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
1603 1603 @@ -7,7 +7,7 @@
1604 1604 $ V=C
1605 1605 #endif
1606 1606 $ echo $V | sed 's/A/C/'
1607 1607 - C
1608 1608 + B
1609 1609 #if C
1610 1610 $ [ $V = C ]
1611 1611 #endif
1612 1612
1613 ERROR: test-cases-abc.t (case B) output changed
1613 ERROR: test-cases-abc.t#B output changed
1614 1614 !.
1615 Failed test-cases-abc.t (case B): output changed
1615 Failed test-cases-abc.t#B: output changed
1616 1616 # Ran 2 tests, 0 skipped, 1 failed.
1617 1617 python hash seed: * (glob)
1618 1618 [1]
1619 1619
1620 1620 Support running invalid test cases
1621 1621
1622 $ rt "test-cases-abc.t (case B)" "test-cases-abc.t (case D)"
1622 $ rt test-cases-abc.t#B test-cases-abc.t#D
1623 1623
1624 1624 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1625 1625 +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
1626 1626 @@ -7,7 +7,7 @@
1627 1627 $ V=C
1628 1628 #endif
1629 1629 $ echo $V | sed 's/A/C/'
1630 1630 - C
1631 1631 + B
1632 1632 #if C
1633 1633 $ [ $V = C ]
1634 1634 #endif
1635 1635
1636 ERROR: test-cases-abc.t (case B) output changed
1636 ERROR: test-cases-abc.t#B output changed
1637 1637 !
1638 Failed test-cases-abc.t (case B): output changed
1638 Failed test-cases-abc.t#B: output changed
1639 1639 # Ran 1 tests, 0 skipped, 1 failed.
1640 1640 python hash seed: * (glob)
1641 1641 [1]
1642 1642
1643 1643 Test automatic pattern replacement
1644 1644 ==================================
1645 1645
1646 1646 $ cat << EOF >> common-pattern.py
1647 1647 > substitutions = [
1648 1648 > (br'foo-(.*)\\b',
1649 1649 > br'\$XXX=\\1\$'),
1650 1650 > (br'bar\\n',
1651 1651 > br'\$YYY$\\n'),
1652 1652 > ]
1653 1653 > EOF
1654 1654
1655 1655 $ cat << EOF >> test-substitution.t
1656 1656 > $ echo foo-12
1657 1657 > \$XXX=12$
1658 1658 > $ echo foo-42
1659 1659 > \$XXX=42$
1660 1660 > $ echo bar prior
1661 1661 > bar prior
1662 1662 > $ echo lastbar
1663 1663 > last\$YYY$
1664 1664 > $ echo foo-bar foo-baz
1665 1665 > EOF
1666 1666
1667 1667 $ rt test-substitution.t
1668 1668
1669 1669 --- $TESTTMP/anothertests/cases/test-substitution.t
1670 1670 +++ $TESTTMP/anothertests/cases/test-substitution.t.err
1671 1671 @@ -7,3 +7,4 @@
1672 1672 $ echo lastbar
1673 1673 last$YYY$
1674 1674 $ echo foo-bar foo-baz
1675 1675 + $XXX=bar foo-baz$
1676 1676
1677 1677 ERROR: test-substitution.t output changed
1678 1678 !
1679 1679 Failed test-substitution.t: output changed
1680 1680 # Ran 1 tests, 0 skipped, 1 failed.
1681 1681 python hash seed: * (glob)
1682 1682 [1]
1683 1683
1684 1684 --extra-config-opt works
1685 1685
1686 1686 $ cat << EOF >> test-config-opt.t
1687 1687 > $ hg init test-config-opt
1688 1688 > $ hg -R test-config-opt purge
1689 1689 > EOF
1690 1690
1691 1691 $ rt --extra-config-opt extensions.purge= test-config-opt.t
1692 1692 .
1693 1693 # Ran 1 tests, 0 skipped, 0 failed.
General Comments 0
You need to be logged in to leave comments. Login now