##// END OF EJS Templates
tests: make #testcase available as env var in test...
Martin von Zweigbergk -
r35554:f04d16be default
parent child Browse files
Show More
@@ -1,3005 +1,3008 b''
1 1 #!/usr/bin/env python
2 2 #
3 3 # run-tests.py - Run a set of tests on Mercurial
4 4 #
5 5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 # Modifying this script is tricky because it has many modes:
11 11 # - serial (default) vs parallel (-jN, N > 1)
12 12 # - no coverage (default) vs coverage (-c, -C, -s)
13 13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 14 # - tests are a mix of shell scripts and Python scripts
15 15 #
16 16 # If you change this script, it is recommended that you ensure you
17 17 # haven't broken it by running it in various modes with a representative
18 18 # sample of test scripts. For example:
19 19 #
20 20 # 1) serial, no coverage, temp install:
21 21 # ./run-tests.py test-s*
22 22 # 2) serial, no coverage, local hg:
23 23 # ./run-tests.py --local test-s*
24 24 # 3) serial, coverage, temp install:
25 25 # ./run-tests.py -c test-s*
26 26 # 4) serial, coverage, local hg:
27 27 # ./run-tests.py -c --local test-s* # unsupported
28 28 # 5) parallel, no coverage, temp install:
29 29 # ./run-tests.py -j2 test-s*
30 30 # 6) parallel, no coverage, local hg:
31 31 # ./run-tests.py -j2 --local test-s*
32 32 # 7) parallel, coverage, temp install:
33 33 # ./run-tests.py -j2 -c test-s* # currently broken
34 34 # 8) parallel, coverage, local install:
35 35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 36 # 9) parallel, custom tmp dir:
37 37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 38 # 10) parallel, pure, tests that call run-tests:
39 39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
40 40 #
41 41 # (You could use any subset of the tests: test-s* happens to match
42 42 # enough that it's worth doing parallel runs, few enough that it
43 43 # completes fairly quickly, includes both shell and Python scripts, and
44 44 # includes some scripts that run daemon processes.)
45 45
46 46 from __future__ import absolute_import, print_function
47 47
48 48 import argparse
49 49 import collections
50 50 import difflib
51 51 import distutils.version as version
52 52 import errno
53 53 import json
54 54 import os
55 55 import random
56 56 import re
57 57 import shutil
58 58 import signal
59 59 import socket
60 60 import subprocess
61 61 import sys
62 62 import sysconfig
63 63 import tempfile
64 64 import threading
65 65 import time
66 66 import unittest
67 67 import xml.dom.minidom as minidom
68 68
69 69 try:
70 70 import Queue as queue
71 71 except ImportError:
72 72 import queue
73 73
74 74 try:
75 75 import shlex
76 76 shellquote = shlex.quote
77 77 except (ImportError, AttributeError):
78 78 import pipes
79 79 shellquote = pipes.quote
80 80
81 81 if os.environ.get('RTUNICODEPEDANTRY', False):
82 82 try:
83 83 reload(sys)
84 84 sys.setdefaultencoding("undefined")
85 85 except NameError:
86 86 pass
87 87
88 88 origenviron = os.environ.copy()
89 89 osenvironb = getattr(os, 'environb', os.environ)
90 90 processlock = threading.Lock()
91 91
92 92 pygmentspresent = False
93 93 # ANSI color is unsupported prior to Windows 10
94 94 if os.name != 'nt':
95 95 try: # is pygments installed
96 96 import pygments
97 97 import pygments.lexers as lexers
98 98 import pygments.lexer as lexer
99 99 import pygments.formatters as formatters
100 100 import pygments.token as token
101 101 import pygments.style as style
102 102 pygmentspresent = True
103 103 difflexer = lexers.DiffLexer()
104 104 terminal256formatter = formatters.Terminal256Formatter()
105 105 except ImportError:
106 106 pass
107 107
108 108 if pygmentspresent:
109 109 class TestRunnerStyle(style.Style):
110 110 default_style = ""
111 111 skipped = token.string_to_tokentype("Token.Generic.Skipped")
112 112 failed = token.string_to_tokentype("Token.Generic.Failed")
113 113 skippedname = token.string_to_tokentype("Token.Generic.SName")
114 114 failedname = token.string_to_tokentype("Token.Generic.FName")
115 115 styles = {
116 116 skipped: '#e5e5e5',
117 117 skippedname: '#00ffff',
118 118 failed: '#7f0000',
119 119 failedname: '#ff0000',
120 120 }
121 121
122 122 class TestRunnerLexer(lexer.RegexLexer):
123 123 tokens = {
124 124 'root': [
125 125 (r'^Skipped', token.Generic.Skipped, 'skipped'),
126 126 (r'^Failed ', token.Generic.Failed, 'failed'),
127 127 (r'^ERROR: ', token.Generic.Failed, 'failed'),
128 128 ],
129 129 'skipped': [
130 130 (r'[\w-]+\.(t|py)', token.Generic.SName),
131 131 (r':.*', token.Generic.Skipped),
132 132 ],
133 133 'failed': [
134 134 (r'[\w-]+\.(t|py)', token.Generic.FName),
135 135 (r'(:| ).*', token.Generic.Failed),
136 136 ]
137 137 }
138 138
139 139 runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle)
140 140 runnerlexer = TestRunnerLexer()
141 141
142 142 if sys.version_info > (3, 5, 0):
143 143 PYTHON3 = True
144 144 xrange = range # we use xrange in one place, and we'd rather not use range
145 145 def _bytespath(p):
146 146 if p is None:
147 147 return p
148 148 return p.encode('utf-8')
149 149
150 150 def _strpath(p):
151 151 if p is None:
152 152 return p
153 153 return p.decode('utf-8')
154 154
155 155 elif sys.version_info >= (3, 0, 0):
156 156 print('%s is only supported on Python 3.5+ and 2.7, not %s' %
157 157 (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])))
158 158 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
159 159 else:
160 160 PYTHON3 = False
161 161
162 162 # In python 2.x, path operations are generally done using
163 163 # bytestrings by default, so we don't have to do any extra
164 164 # fiddling there. We define the wrapper functions anyway just to
165 165 # help keep code consistent between platforms.
166 166 def _bytespath(p):
167 167 return p
168 168
169 169 _strpath = _bytespath
170 170
171 171 # For Windows support
172 172 wifexited = getattr(os, "WIFEXITED", lambda x: False)
173 173
174 174 # Whether to use IPv6
175 175 def checksocketfamily(name, port=20058):
176 176 """return true if we can listen on localhost using family=name
177 177
178 178 name should be either 'AF_INET', or 'AF_INET6'.
179 179 port being used is okay - EADDRINUSE is considered as successful.
180 180 """
181 181 family = getattr(socket, name, None)
182 182 if family is None:
183 183 return False
184 184 try:
185 185 s = socket.socket(family, socket.SOCK_STREAM)
186 186 s.bind(('localhost', port))
187 187 s.close()
188 188 return True
189 189 except socket.error as exc:
190 190 if exc.errno == errno.EADDRINUSE:
191 191 return True
192 192 elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
193 193 return False
194 194 else:
195 195 raise
196 196 else:
197 197 return False
198 198
199 199 # useipv6 will be set by parseargs
200 200 useipv6 = None
201 201
202 202 def checkportisavailable(port):
203 203 """return true if a port seems free to bind on localhost"""
204 204 if useipv6:
205 205 family = socket.AF_INET6
206 206 else:
207 207 family = socket.AF_INET
208 208 try:
209 209 s = socket.socket(family, socket.SOCK_STREAM)
210 210 s.bind(('localhost', port))
211 211 s.close()
212 212 return True
213 213 except socket.error as exc:
214 214 if exc.errno not in (errno.EADDRINUSE, errno.EADDRNOTAVAIL,
215 215 errno.EPROTONOSUPPORT):
216 216 raise
217 217 return False
218 218
219 219 closefds = os.name == 'posix'
220 220 def Popen4(cmd, wd, timeout, env=None):
221 221 processlock.acquire()
222 222 p = subprocess.Popen(cmd, shell=True, bufsize=-1, cwd=wd, env=env,
223 223 close_fds=closefds,
224 224 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
225 225 stderr=subprocess.STDOUT)
226 226 processlock.release()
227 227
228 228 p.fromchild = p.stdout
229 229 p.tochild = p.stdin
230 230 p.childerr = p.stderr
231 231
232 232 p.timeout = False
233 233 if timeout:
234 234 def t():
235 235 start = time.time()
236 236 while time.time() - start < timeout and p.returncode is None:
237 237 time.sleep(.1)
238 238 p.timeout = True
239 239 if p.returncode is None:
240 240 terminate(p)
241 241 threading.Thread(target=t).start()
242 242
243 243 return p
244 244
245 245 PYTHON = _bytespath(sys.executable.replace('\\', '/'))
246 246 IMPL_PATH = b'PYTHONPATH'
247 247 if 'java' in sys.platform:
248 248 IMPL_PATH = b'JYTHONPATH'
249 249
250 250 defaults = {
251 251 'jobs': ('HGTEST_JOBS', 1),
252 252 'timeout': ('HGTEST_TIMEOUT', 180),
253 253 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 500),
254 254 'port': ('HGTEST_PORT', 20059),
255 255 'shell': ('HGTEST_SHELL', 'sh'),
256 256 }
257 257
258 258 def canonpath(path):
259 259 return os.path.realpath(os.path.expanduser(path))
260 260
261 261 def parselistfiles(files, listtype, warn=True):
262 262 entries = dict()
263 263 for filename in files:
264 264 try:
265 265 path = os.path.expanduser(os.path.expandvars(filename))
266 266 f = open(path, "rb")
267 267 except IOError as err:
268 268 if err.errno != errno.ENOENT:
269 269 raise
270 270 if warn:
271 271 print("warning: no such %s file: %s" % (listtype, filename))
272 272 continue
273 273
274 274 for line in f.readlines():
275 275 line = line.split(b'#', 1)[0].strip()
276 276 if line:
277 277 entries[line] = filename
278 278
279 279 f.close()
280 280 return entries
281 281
282 282 def parsettestcases(path):
283 283 """read a .t test file, return a set of test case names
284 284
285 285 If path does not exist, return an empty set.
286 286 """
287 287 cases = set()
288 288 try:
289 289 with open(path, 'rb') as f:
290 290 for l in f:
291 291 if l.startswith(b'#testcases '):
292 292 cases.update(l[11:].split())
293 293 except IOError as ex:
294 294 if ex.errno != errno.ENOENT:
295 295 raise
296 296 return cases
297 297
298 298 def getparser():
299 299 """Obtain the OptionParser used by the CLI."""
300 300 parser = argparse.ArgumentParser(usage='%(prog)s [options] [tests]')
301 301
302 302 selection = parser.add_argument_group('Test Selection')
303 303 selection.add_argument('--allow-slow-tests', action='store_true',
304 304 help='allow extremely slow tests')
305 305 selection.add_argument("--blacklist", action="append",
306 306 help="skip tests listed in the specified blacklist file")
307 307 selection.add_argument("--changed",
308 308 help="run tests that are changed in parent rev or working directory")
309 309 selection.add_argument("-k", "--keywords",
310 310 help="run tests matching keywords")
311 311 selection.add_argument("-r", "--retest", action="store_true",
312 312 help = "retest failed tests")
313 313 selection.add_argument("--test-list", action="append",
314 314 help="read tests to run from the specified file")
315 315 selection.add_argument("--whitelist", action="append",
316 316 help="always run tests listed in the specified whitelist file")
317 317 selection.add_argument('tests', metavar='TESTS', nargs='*',
318 318 help='Tests to run')
319 319
320 320 harness = parser.add_argument_group('Test Harness Behavior')
321 321 harness.add_argument('--bisect-repo',
322 322 metavar='bisect_repo',
323 323 help=("Path of a repo to bisect. Use together with "
324 324 "--known-good-rev"))
325 325 harness.add_argument("-d", "--debug", action="store_true",
326 326 help="debug mode: write output of test scripts to console"
327 327 " rather than capturing and diffing it (disables timeout)")
328 328 harness.add_argument("-f", "--first", action="store_true",
329 329 help="exit on the first test failure")
330 330 harness.add_argument("-i", "--interactive", action="store_true",
331 331 help="prompt to accept changed output")
332 332 harness.add_argument("-j", "--jobs", type=int,
333 333 help="number of jobs to run in parallel"
334 334 " (default: $%s or %d)" % defaults['jobs'])
335 335 harness.add_argument("--keep-tmpdir", action="store_true",
336 336 help="keep temporary directory after running tests")
337 337 harness.add_argument('--known-good-rev',
338 338 metavar="known_good_rev",
339 339 help=("Automatically bisect any failures using this "
340 340 "revision as a known-good revision."))
341 341 harness.add_argument("--list-tests", action="store_true",
342 342 help="list tests instead of running them")
343 343 harness.add_argument("--loop", action="store_true",
344 344 help="loop tests repeatedly")
345 345 harness.add_argument('--random', action="store_true",
346 346 help='run tests in random order')
347 347 harness.add_argument("-p", "--port", type=int,
348 348 help="port on which servers should listen"
349 349 " (default: $%s or %d)" % defaults['port'])
350 350 harness.add_argument('--profile-runner', action='store_true',
351 351 help='run statprof on run-tests')
352 352 harness.add_argument("-R", "--restart", action="store_true",
353 353 help="restart at last error")
354 354 harness.add_argument("--runs-per-test", type=int, dest="runs_per_test",
355 355 help="run each test N times (default=1)", default=1)
356 356 harness.add_argument("--shell",
357 357 help="shell to use (default: $%s or %s)" % defaults['shell'])
358 358 harness.add_argument('--showchannels', action='store_true',
359 359 help='show scheduling channels')
360 360 harness.add_argument("--slowtimeout", type=int,
361 361 help="kill errant slow tests after SLOWTIMEOUT seconds"
362 362 " (default: $%s or %d)" % defaults['slowtimeout'])
363 363 harness.add_argument("-t", "--timeout", type=int,
364 364 help="kill errant tests after TIMEOUT seconds"
365 365 " (default: $%s or %d)" % defaults['timeout'])
366 366 harness.add_argument("--tmpdir",
367 367 help="run tests in the given temporary directory"
368 368 " (implies --keep-tmpdir)")
369 369 harness.add_argument("-v", "--verbose", action="store_true",
370 370 help="output verbose messages")
371 371
372 372 hgconf = parser.add_argument_group('Mercurial Configuration')
373 373 hgconf.add_argument("--chg", action="store_true",
374 374 help="install and use chg wrapper in place of hg")
375 375 hgconf.add_argument("--compiler",
376 376 help="compiler to build with")
377 377 hgconf.add_argument('--extra-config-opt', action="append", default=[],
378 378 help='set the given config opt in the test hgrc')
379 379 hgconf.add_argument("-l", "--local", action="store_true",
380 380 help="shortcut for --with-hg=<testdir>/../hg, "
381 381 "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set")
382 382 hgconf.add_argument("--ipv6", action="store_true",
383 383 help="prefer IPv6 to IPv4 for network related tests")
384 384 hgconf.add_argument("--pure", action="store_true",
385 385 help="use pure Python code instead of C extensions")
386 386 hgconf.add_argument("-3", "--py3k-warnings", action="store_true",
387 387 help="enable Py3k warnings on Python 2.7+")
388 388 hgconf.add_argument("--with-chg", metavar="CHG",
389 389 help="use specified chg wrapper in place of hg")
390 390 hgconf.add_argument("--with-hg",
391 391 metavar="HG",
392 392 help="test using specified hg script rather than a "
393 393 "temporary installation")
394 394 # This option should be deleted once test-check-py3-compat.t and other
395 395 # Python 3 tests run with Python 3.
396 396 hgconf.add_argument("--with-python3", metavar="PYTHON3",
397 397 help="Python 3 interpreter (if running under Python 2)"
398 398 " (TEMPORARY)")
399 399
400 400 reporting = parser.add_argument_group('Results Reporting')
401 401 reporting.add_argument("-C", "--annotate", action="store_true",
402 402 help="output files annotated with coverage")
403 403 reporting.add_argument("--color", choices=["always", "auto", "never"],
404 404 default=os.environ.get('HGRUNTESTSCOLOR', 'auto'),
405 405 help="colorisation: always|auto|never (default: auto)")
406 406 reporting.add_argument("-c", "--cover", action="store_true",
407 407 help="print a test coverage report")
408 408 reporting.add_argument('--exceptions', action='store_true',
409 409 help='log all exceptions and generate an exception report')
410 410 reporting.add_argument("-H", "--htmlcov", action="store_true",
411 411 help="create an HTML report of the coverage of the files")
412 412 reporting.add_argument("--json", action="store_true",
413 413 help="store test result data in 'report.json' file")
414 414 reporting.add_argument("--outputdir",
415 415 help="directory to write error logs to (default=test directory)")
416 416 reporting.add_argument("-n", "--nodiff", action="store_true",
417 417 help="skip showing test changes")
418 418 reporting.add_argument("-S", "--noskips", action="store_true",
419 419 help="don't report skip tests verbosely")
420 420 reporting.add_argument("--time", action="store_true",
421 421 help="time how long each test takes")
422 422 reporting.add_argument("--view",
423 423 help="external diff viewer")
424 424 reporting.add_argument("--xunit",
425 425 help="record xunit results at specified path")
426 426
427 427 for option, (envvar, default) in defaults.items():
428 428 defaults[option] = type(default)(os.environ.get(envvar, default))
429 429 parser.set_defaults(**defaults)
430 430
431 431 return parser
432 432
433 433 def parseargs(args, parser):
434 434 """Parse arguments with our OptionParser and validate results."""
435 435 options = parser.parse_args(args)
436 436
437 437 # jython is always pure
438 438 if 'java' in sys.platform or '__pypy__' in sys.modules:
439 439 options.pure = True
440 440
441 441 if options.with_hg:
442 442 options.with_hg = canonpath(_bytespath(options.with_hg))
443 443 if not (os.path.isfile(options.with_hg) and
444 444 os.access(options.with_hg, os.X_OK)):
445 445 parser.error('--with-hg must specify an executable hg script')
446 446 if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
447 447 sys.stderr.write('warning: --with-hg should specify an hg script\n')
448 448 if options.local:
449 449 testdir = os.path.dirname(_bytespath(canonpath(sys.argv[0])))
450 450 reporootdir = os.path.dirname(testdir)
451 451 pathandattrs = [(b'hg', 'with_hg')]
452 452 if options.chg:
453 453 pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
454 454 for relpath, attr in pathandattrs:
455 455 binpath = os.path.join(reporootdir, relpath)
456 456 if os.name != 'nt' and not os.access(binpath, os.X_OK):
457 457 parser.error('--local specified, but %r not found or '
458 458 'not executable' % binpath)
459 459 setattr(options, attr, binpath)
460 460
461 461 if (options.chg or options.with_chg) and os.name == 'nt':
462 462 parser.error('chg does not work on %s' % os.name)
463 463 if options.with_chg:
464 464 options.chg = False # no installation to temporary location
465 465 options.with_chg = canonpath(_bytespath(options.with_chg))
466 466 if not (os.path.isfile(options.with_chg) and
467 467 os.access(options.with_chg, os.X_OK)):
468 468 parser.error('--with-chg must specify a chg executable')
469 469 if options.chg and options.with_hg:
470 470 # chg shares installation location with hg
471 471 parser.error('--chg does not work when --with-hg is specified '
472 472 '(use --with-chg instead)')
473 473
474 474 if options.color == 'always' and not pygmentspresent:
475 475 sys.stderr.write('warning: --color=always ignored because '
476 476 'pygments is not installed\n')
477 477
478 478 if options.bisect_repo and not options.known_good_rev:
479 479 parser.error("--bisect-repo cannot be used without --known-good-rev")
480 480
481 481 global useipv6
482 482 if options.ipv6:
483 483 useipv6 = checksocketfamily('AF_INET6')
484 484 else:
485 485 # only use IPv6 if IPv4 is unavailable and IPv6 is available
486 486 useipv6 = ((not checksocketfamily('AF_INET'))
487 487 and checksocketfamily('AF_INET6'))
488 488
489 489 options.anycoverage = options.cover or options.annotate or options.htmlcov
490 490 if options.anycoverage:
491 491 try:
492 492 import coverage
493 493 covver = version.StrictVersion(coverage.__version__).version
494 494 if covver < (3, 3):
495 495 parser.error('coverage options require coverage 3.3 or later')
496 496 except ImportError:
497 497 parser.error('coverage options now require the coverage package')
498 498
499 499 if options.anycoverage and options.local:
500 500 # this needs some path mangling somewhere, I guess
501 501 parser.error("sorry, coverage options do not work when --local "
502 502 "is specified")
503 503
504 504 if options.anycoverage and options.with_hg:
505 505 parser.error("sorry, coverage options do not work when --with-hg "
506 506 "is specified")
507 507
508 508 global verbose
509 509 if options.verbose:
510 510 verbose = ''
511 511
512 512 if options.tmpdir:
513 513 options.tmpdir = canonpath(options.tmpdir)
514 514
515 515 if options.jobs < 1:
516 516 parser.error('--jobs must be positive')
517 517 if options.interactive and options.debug:
518 518 parser.error("-i/--interactive and -d/--debug are incompatible")
519 519 if options.debug:
520 520 if options.timeout != defaults['timeout']:
521 521 sys.stderr.write(
522 522 'warning: --timeout option ignored with --debug\n')
523 523 if options.slowtimeout != defaults['slowtimeout']:
524 524 sys.stderr.write(
525 525 'warning: --slowtimeout option ignored with --debug\n')
526 526 options.timeout = 0
527 527 options.slowtimeout = 0
528 528 if options.py3k_warnings:
529 529 if PYTHON3:
530 530 parser.error(
531 531 '--py3k-warnings can only be used on Python 2.7')
532 532 if options.with_python3:
533 533 if PYTHON3:
534 534 parser.error('--with-python3 cannot be used when executing with '
535 535 'Python 3')
536 536
537 537 options.with_python3 = canonpath(options.with_python3)
538 538 # Verify Python3 executable is acceptable.
539 539 proc = subprocess.Popen([options.with_python3, b'--version'],
540 540 stdout=subprocess.PIPE,
541 541 stderr=subprocess.STDOUT)
542 542 out, _err = proc.communicate()
543 543 ret = proc.wait()
544 544 if ret != 0:
545 545 parser.error('could not determine version of python 3')
546 546 if not out.startswith('Python '):
547 547 parser.error('unexpected output from python3 --version: %s' %
548 548 out)
549 549 vers = version.LooseVersion(out[len('Python '):])
550 550 if vers < version.LooseVersion('3.5.0'):
551 551 parser.error('--with-python3 version must be 3.5.0 or greater; '
552 552 'got %s' % out)
553 553
554 554 if options.blacklist:
555 555 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
556 556 if options.whitelist:
557 557 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
558 558 else:
559 559 options.whitelisted = {}
560 560
561 561 if options.showchannels:
562 562 options.nodiff = True
563 563
564 564 return options
565 565
566 566 def rename(src, dst):
567 567 """Like os.rename(), trade atomicity and opened files friendliness
568 568 for existing destination support.
569 569 """
570 570 shutil.copy(src, dst)
571 571 os.remove(src)
572 572
573 573 _unified_diff = difflib.unified_diff
574 574 if PYTHON3:
575 575 import functools
576 576 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
577 577
578 578 def getdiff(expected, output, ref, err):
579 579 servefail = False
580 580 lines = []
581 581 for line in _unified_diff(expected, output, ref, err):
582 582 if line.startswith(b'+++') or line.startswith(b'---'):
583 583 line = line.replace(b'\\', b'/')
584 584 if line.endswith(b' \n'):
585 585 line = line[:-2] + b'\n'
586 586 lines.append(line)
587 587 if not servefail and line.startswith(
588 588 b'+ abort: child process failed to start'):
589 589 servefail = True
590 590
591 591 return servefail, lines
592 592
593 593 verbose = False
594 594 def vlog(*msg):
595 595 """Log only when in verbose mode."""
596 596 if verbose is False:
597 597 return
598 598
599 599 return log(*msg)
600 600
601 601 # Bytes that break XML even in a CDATA block: control characters 0-31
602 602 # sans \t, \n and \r
603 603 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
604 604
605 605 # Match feature conditionalized output lines in the form, capturing the feature
606 606 # list in group 2, and the preceeding line output in group 1:
607 607 #
608 608 # output..output (feature !)\n
609 609 optline = re.compile(b'(.*) \((.+?) !\)\n$')
610 610
611 611 def cdatasafe(data):
612 612 """Make a string safe to include in a CDATA block.
613 613
614 614 Certain control characters are illegal in a CDATA block, and
615 615 there's no way to include a ]]> in a CDATA either. This function
616 616 replaces illegal bytes with ? and adds a space between the ]] so
617 617 that it won't break the CDATA block.
618 618 """
619 619 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
620 620
621 621 def log(*msg):
622 622 """Log something to stdout.
623 623
624 624 Arguments are strings to print.
625 625 """
626 626 with iolock:
627 627 if verbose:
628 628 print(verbose, end=' ')
629 629 for m in msg:
630 630 print(m, end=' ')
631 631 print()
632 632 sys.stdout.flush()
633 633
634 634 def highlightdiff(line, color):
635 635 if not color:
636 636 return line
637 637 assert pygmentspresent
638 638 return pygments.highlight(line.decode('latin1'), difflexer,
639 639 terminal256formatter).encode('latin1')
640 640
641 641 def highlightmsg(msg, color):
642 642 if not color:
643 643 return msg
644 644 assert pygmentspresent
645 645 return pygments.highlight(msg, runnerlexer, runnerformatter)
646 646
647 647 def terminate(proc):
648 648 """Terminate subprocess"""
649 649 vlog('# Terminating process %d' % proc.pid)
650 650 try:
651 651 proc.terminate()
652 652 except OSError:
653 653 pass
654 654
655 655 def killdaemons(pidfile):
656 656 import killdaemons as killmod
657 657 return killmod.killdaemons(pidfile, tryhard=False, remove=True,
658 658 logfn=vlog)
659 659
660 660 class Test(unittest.TestCase):
661 661 """Encapsulates a single, runnable test.
662 662
663 663 While this class conforms to the unittest.TestCase API, it differs in that
664 664 instances need to be instantiated manually. (Typically, unittest.TestCase
665 665 classes are instantiated automatically by scanning modules.)
666 666 """
667 667
668 668 # Status code reserved for skipped tests (used by hghave).
669 669 SKIPPED_STATUS = 80
670 670
671 671 def __init__(self, path, outputdir, tmpdir, keeptmpdir=False,
672 672 debug=False,
673 673 timeout=None,
674 674 startport=None, extraconfigopts=None,
675 675 py3kwarnings=False, shell=None, hgcommand=None,
676 676 slowtimeout=None, usechg=False,
677 677 useipv6=False):
678 678 """Create a test from parameters.
679 679
680 680 path is the full path to the file defining the test.
681 681
682 682 tmpdir is the main temporary directory to use for this test.
683 683
684 684 keeptmpdir determines whether to keep the test's temporary directory
685 685 after execution. It defaults to removal (False).
686 686
687 687 debug mode will make the test execute verbosely, with unfiltered
688 688 output.
689 689
690 690 timeout controls the maximum run time of the test. It is ignored when
691 691 debug is True. See slowtimeout for tests with #require slow.
692 692
693 693 slowtimeout overrides timeout if the test has #require slow.
694 694
695 695 startport controls the starting port number to use for this test. Each
696 696 test will reserve 3 port numbers for execution. It is the caller's
697 697 responsibility to allocate a non-overlapping port range to Test
698 698 instances.
699 699
700 700 extraconfigopts is an iterable of extra hgrc config options. Values
701 701 must have the form "key=value" (something understood by hgrc). Values
702 702 of the form "foo.key=value" will result in "[foo] key=value".
703 703
704 704 py3kwarnings enables Py3k warnings.
705 705
706 706 shell is the shell to execute tests in.
707 707 """
708 708 if timeout is None:
709 709 timeout = defaults['timeout']
710 710 if startport is None:
711 711 startport = defaults['port']
712 712 if slowtimeout is None:
713 713 slowtimeout = defaults['slowtimeout']
714 714 self.path = path
715 715 self.bname = os.path.basename(path)
716 716 self.name = _strpath(self.bname)
717 717 self._testdir = os.path.dirname(path)
718 718 self._outputdir = outputdir
719 719 self._tmpname = os.path.basename(path)
720 720 self.errpath = os.path.join(self._outputdir, b'%s.err' % self.bname)
721 721
722 722 self._threadtmp = tmpdir
723 723 self._keeptmpdir = keeptmpdir
724 724 self._debug = debug
725 725 self._timeout = timeout
726 726 self._slowtimeout = slowtimeout
727 727 self._startport = startport
728 728 self._extraconfigopts = extraconfigopts or []
729 729 self._py3kwarnings = py3kwarnings
730 730 self._shell = _bytespath(shell)
731 731 self._hgcommand = hgcommand or b'hg'
732 732 self._usechg = usechg
733 733 self._useipv6 = useipv6
734 734
735 735 self._aborted = False
736 736 self._daemonpids = []
737 737 self._finished = None
738 738 self._ret = None
739 739 self._out = None
740 740 self._skipped = None
741 741 self._testtmp = None
742 742 self._chgsockdir = None
743 743
744 744 self._refout = self.readrefout()
745 745
746 746 def readrefout(self):
747 747 """read reference output"""
748 748 # If we're not in --debug mode and reference output file exists,
749 749 # check test output against it.
750 750 if self._debug:
751 751 return None # to match "out is None"
752 752 elif os.path.exists(self.refpath):
753 753 with open(self.refpath, 'rb') as f:
754 754 return f.read().splitlines(True)
755 755 else:
756 756 return []
757 757
758 758 # needed to get base class __repr__ running
759 759 @property
760 760 def _testMethodName(self):
761 761 return self.name
762 762
763 763 def __str__(self):
764 764 return self.name
765 765
766 766 def shortDescription(self):
767 767 return self.name
768 768
769 769 def setUp(self):
770 770 """Tasks to perform before run()."""
771 771 self._finished = False
772 772 self._ret = None
773 773 self._out = None
774 774 self._skipped = None
775 775
776 776 try:
777 777 os.mkdir(self._threadtmp)
778 778 except OSError as e:
779 779 if e.errno != errno.EEXIST:
780 780 raise
781 781
782 782 name = self._tmpname
783 783 self._testtmp = os.path.join(self._threadtmp, name)
784 784 os.mkdir(self._testtmp)
785 785
786 786 # Remove any previous output files.
787 787 if os.path.exists(self.errpath):
788 788 try:
789 789 os.remove(self.errpath)
790 790 except OSError as e:
791 791 # We might have raced another test to clean up a .err
792 792 # file, so ignore ENOENT when removing a previous .err
793 793 # file.
794 794 if e.errno != errno.ENOENT:
795 795 raise
796 796
797 797 if self._usechg:
798 798 self._chgsockdir = os.path.join(self._threadtmp,
799 799 b'%s.chgsock' % name)
800 800 os.mkdir(self._chgsockdir)
801 801
802 802 def run(self, result):
803 803 """Run this test and report results against a TestResult instance."""
804 804 # This function is extremely similar to unittest.TestCase.run(). Once
805 805 # we require Python 2.7 (or at least its version of unittest), this
806 806 # function can largely go away.
807 807 self._result = result
808 808 result.startTest(self)
809 809 try:
810 810 try:
811 811 self.setUp()
812 812 except (KeyboardInterrupt, SystemExit):
813 813 self._aborted = True
814 814 raise
815 815 except Exception:
816 816 result.addError(self, sys.exc_info())
817 817 return
818 818
819 819 success = False
820 820 try:
821 821 self.runTest()
822 822 except KeyboardInterrupt:
823 823 self._aborted = True
824 824 raise
825 825 except unittest.SkipTest as e:
826 826 result.addSkip(self, str(e))
827 827 # The base class will have already counted this as a
828 828 # test we "ran", but we want to exclude skipped tests
829 829 # from those we count towards those run.
830 830 result.testsRun -= 1
831 831 except self.failureException as e:
832 832 # This differs from unittest in that we don't capture
833 833 # the stack trace. This is for historical reasons and
834 834 # this decision could be revisited in the future,
835 835 # especially for PythonTest instances.
836 836 if result.addFailure(self, str(e)):
837 837 success = True
838 838 except Exception:
839 839 result.addError(self, sys.exc_info())
840 840 else:
841 841 success = True
842 842
843 843 try:
844 844 self.tearDown()
845 845 except (KeyboardInterrupt, SystemExit):
846 846 self._aborted = True
847 847 raise
848 848 except Exception:
849 849 result.addError(self, sys.exc_info())
850 850 success = False
851 851
852 852 if success:
853 853 result.addSuccess(self)
854 854 finally:
855 855 result.stopTest(self, interrupted=self._aborted)
856 856
857 857 def runTest(self):
858 858 """Run this test instance.
859 859
860 860 This will return a tuple describing the result of the test.
861 861 """
862 862 env = self._getenv()
863 863 self._genrestoreenv(env)
864 864 self._daemonpids.append(env['DAEMON_PIDS'])
865 865 self._createhgrc(env['HGRCPATH'])
866 866
867 867 vlog('# Test', self.name)
868 868
869 869 ret, out = self._run(env)
870 870 self._finished = True
871 871 self._ret = ret
872 872 self._out = out
873 873
874 874 def describe(ret):
875 875 if ret < 0:
876 876 return 'killed by signal: %d' % -ret
877 877 return 'returned error code %d' % ret
878 878
879 879 self._skipped = False
880 880
881 881 if ret == self.SKIPPED_STATUS:
882 882 if out is None: # Debug mode, nothing to parse.
883 883 missing = ['unknown']
884 884 failed = None
885 885 else:
886 886 missing, failed = TTest.parsehghaveoutput(out)
887 887
888 888 if not missing:
889 889 missing = ['skipped']
890 890
891 891 if failed:
892 892 self.fail('hg have failed checking for %s' % failed[-1])
893 893 else:
894 894 self._skipped = True
895 895 raise unittest.SkipTest(missing[-1])
896 896 elif ret == 'timeout':
897 897 self.fail('timed out')
898 898 elif ret is False:
899 899 self.fail('no result code from test')
900 900 elif out != self._refout:
901 901 # Diff generation may rely on written .err file.
902 902 if (ret != 0 or out != self._refout) and not self._skipped \
903 903 and not self._debug:
904 904 with open(self.errpath, 'wb') as f:
905 905 for line in out:
906 906 f.write(line)
907 907
908 908 # The result object handles diff calculation for us.
909 909 if self._result.addOutputMismatch(self, ret, out, self._refout):
910 910 # change was accepted, skip failing
911 911 return
912 912
913 913 if ret:
914 914 msg = 'output changed and ' + describe(ret)
915 915 else:
916 916 msg = 'output changed'
917 917
918 918 self.fail(msg)
919 919 elif ret:
920 920 self.fail(describe(ret))
921 921
922 922 def tearDown(self):
923 923 """Tasks to perform after run()."""
924 924 for entry in self._daemonpids:
925 925 killdaemons(entry)
926 926 self._daemonpids = []
927 927
928 928 if self._keeptmpdir:
929 929 log('\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' %
930 930 (self._testtmp.decode('utf-8'),
931 931 self._threadtmp.decode('utf-8')))
932 932 else:
933 933 shutil.rmtree(self._testtmp, True)
934 934 shutil.rmtree(self._threadtmp, True)
935 935
936 936 if self._usechg:
937 937 # chgservers will stop automatically after they find the socket
938 938 # files are deleted
939 939 shutil.rmtree(self._chgsockdir, True)
940 940
941 941 if (self._ret != 0 or self._out != self._refout) and not self._skipped \
942 942 and not self._debug and self._out:
943 943 with open(self.errpath, 'wb') as f:
944 944 for line in self._out:
945 945 f.write(line)
946 946
947 947 vlog("# Ret was:", self._ret, '(%s)' % self.name)
948 948
949 949 def _run(self, env):
950 950 # This should be implemented in child classes to run tests.
951 951 raise unittest.SkipTest('unknown test type')
952 952
953 953 def abort(self):
954 954 """Terminate execution of this test."""
955 955 self._aborted = True
956 956
957 957 def _portmap(self, i):
958 958 offset = b'' if i == 0 else b'%d' % i
959 959 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
960 960
961 961 def _getreplacements(self):
962 962 """Obtain a mapping of text replacements to apply to test output.
963 963
964 964 Test output needs to be normalized so it can be compared to expected
965 965 output. This function defines how some of that normalization will
966 966 occur.
967 967 """
968 968 r = [
969 969 # This list should be parallel to defineport in _getenv
970 970 self._portmap(0),
971 971 self._portmap(1),
972 972 self._portmap(2),
973 973 (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
974 974 (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
975 975 ]
976 976 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
977 977
978 978 replacementfile = os.path.join(self._testdir, b'common-pattern.py')
979 979
980 980 if os.path.exists(replacementfile):
981 981 data = {}
982 982 with open(replacementfile, mode='rb') as source:
983 983 # the intermediate 'compile' step help with debugging
984 984 code = compile(source.read(), replacementfile, 'exec')
985 985 exec(code, data)
986 986 r.extend(data.get('substitutions', ()))
987 987 return r
988 988
989 989 def _escapepath(self, p):
990 990 if os.name == 'nt':
991 991 return (
992 992 (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or
993 993 c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c
994 994 for c in p))
995 995 )
996 996 else:
997 997 return re.escape(p)
998 998
999 999 def _localip(self):
1000 1000 if self._useipv6:
1001 1001 return b'::1'
1002 1002 else:
1003 1003 return b'127.0.0.1'
1004 1004
1005 1005 def _genrestoreenv(self, testenv):
1006 1006 """Generate a script that can be used by tests to restore the original
1007 1007 environment."""
1008 1008 # Put the restoreenv script inside self._threadtmp
1009 1009 scriptpath = os.path.join(self._threadtmp, b'restoreenv.sh')
1010 1010 testenv['HGTEST_RESTOREENV'] = scriptpath
1011 1011
1012 1012 # Only restore environment variable names that the shell allows
1013 1013 # us to export.
1014 1014 name_regex = re.compile('^[a-zA-Z][a-zA-Z0-9_]*$')
1015 1015
1016 1016 # Do not restore these variables; otherwise tests would fail.
1017 1017 reqnames = {'PYTHON', 'TESTDIR', 'TESTTMP'}
1018 1018
1019 1019 with open(scriptpath, 'w') as envf:
1020 1020 for name, value in origenviron.items():
1021 1021 if not name_regex.match(name):
1022 1022 # Skip environment variables with unusual names not
1023 1023 # allowed by most shells.
1024 1024 continue
1025 1025 if name in reqnames:
1026 1026 continue
1027 1027 envf.write('%s=%s\n' % (name, shellquote(value)))
1028 1028
1029 1029 for name in testenv:
1030 1030 if name in origenviron or name in reqnames:
1031 1031 continue
1032 1032 envf.write('unset %s\n' % (name,))
1033 1033
1034 1034 def _getenv(self):
1035 1035 """Obtain environment variables to use during test execution."""
1036 1036 def defineport(i):
1037 1037 offset = '' if i == 0 else '%s' % i
1038 1038 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
1039 1039 env = os.environ.copy()
1040 1040 env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase')
1041 1041 env['HGEMITWARNINGS'] = '1'
1042 1042 env['TESTTMP'] = self._testtmp
1043 1043 env['HOME'] = self._testtmp
1044 1044 # This number should match portneeded in _getport
1045 1045 for port in xrange(3):
1046 1046 # This list should be parallel to _portmap in _getreplacements
1047 1047 defineport(port)
1048 1048 env["HGRCPATH"] = os.path.join(self._threadtmp, b'.hgrc')
1049 1049 env["DAEMON_PIDS"] = os.path.join(self._threadtmp, b'daemon.pids')
1050 1050 env["HGEDITOR"] = ('"' + sys.executable + '"'
1051 1051 + ' -c "import sys; sys.exit(0)"')
1052 1052 env["HGMERGE"] = "internal:merge"
1053 1053 env["HGUSER"] = "test"
1054 1054 env["HGENCODING"] = "ascii"
1055 1055 env["HGENCODINGMODE"] = "strict"
1056 1056 env['HGIPV6'] = str(int(self._useipv6))
1057 1057
1058 1058 # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
1059 1059 # IP addresses.
1060 1060 env['LOCALIP'] = self._localip()
1061 1061
1062 1062 # Reset some environment variables to well-known values so that
1063 1063 # the tests produce repeatable output.
1064 1064 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
1065 1065 env['TZ'] = 'GMT'
1066 1066 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
1067 1067 env['COLUMNS'] = '80'
1068 1068 env['TERM'] = 'xterm'
1069 1069
1070 1070 for k in ('HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy ' +
1071 1071 'HGPLAIN HGPLAINEXCEPT EDITOR VISUAL PAGER ' +
1072 1072 'NO_PROXY CHGDEBUG').split():
1073 1073 if k in env:
1074 1074 del env[k]
1075 1075
1076 1076 # unset env related to hooks
1077 1077 for k in env.keys():
1078 1078 if k.startswith('HG_'):
1079 1079 del env[k]
1080 1080
1081 1081 if self._usechg:
1082 1082 env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server')
1083 1083
1084 1084 return env
1085 1085
1086 1086 def _createhgrc(self, path):
1087 1087 """Create an hgrc file for this test."""
1088 1088 with open(path, 'wb') as hgrc:
1089 1089 hgrc.write(b'[ui]\n')
1090 1090 hgrc.write(b'slash = True\n')
1091 1091 hgrc.write(b'interactive = False\n')
1092 1092 hgrc.write(b'mergemarkers = detailed\n')
1093 1093 hgrc.write(b'promptecho = True\n')
1094 1094 hgrc.write(b'[defaults]\n')
1095 1095 hgrc.write(b'[devel]\n')
1096 1096 hgrc.write(b'all-warnings = true\n')
1097 1097 hgrc.write(b'default-date = 0 0\n')
1098 1098 hgrc.write(b'[largefiles]\n')
1099 1099 hgrc.write(b'usercache = %s\n' %
1100 1100 (os.path.join(self._testtmp, b'.cache/largefiles')))
1101 1101 hgrc.write(b'[lfs]\n')
1102 1102 hgrc.write(b'usercache = %s\n' %
1103 1103 (os.path.join(self._testtmp, b'.cache/lfs')))
1104 1104 hgrc.write(b'[web]\n')
1105 1105 hgrc.write(b'address = localhost\n')
1106 1106 hgrc.write(b'ipv6 = %s\n' % str(self._useipv6).encode('ascii'))
1107 1107
1108 1108 for opt in self._extraconfigopts:
1109 1109 section, key = opt.encode('utf-8').split(b'.', 1)
1110 1110 assert b'=' in key, ('extra config opt %s must '
1111 1111 'have an = for assignment' % opt)
1112 1112 hgrc.write(b'[%s]\n%s\n' % (section, key))
1113 1113
1114 1114 def fail(self, msg):
1115 1115 # unittest differentiates between errored and failed.
1116 1116 # Failed is denoted by AssertionError (by default at least).
1117 1117 raise AssertionError(msg)
1118 1118
1119 1119 def _runcommand(self, cmd, env, normalizenewlines=False):
1120 1120 """Run command in a sub-process, capturing the output (stdout and
1121 1121 stderr).
1122 1122
1123 1123 Return a tuple (exitcode, output). output is None in debug mode.
1124 1124 """
1125 1125 if self._debug:
1126 1126 proc = subprocess.Popen(cmd, shell=True, cwd=self._testtmp,
1127 1127 env=env)
1128 1128 ret = proc.wait()
1129 1129 return (ret, None)
1130 1130
1131 1131 proc = Popen4(cmd, self._testtmp, self._timeout, env)
1132 1132 def cleanup():
1133 1133 terminate(proc)
1134 1134 ret = proc.wait()
1135 1135 if ret == 0:
1136 1136 ret = signal.SIGTERM << 8
1137 1137 killdaemons(env['DAEMON_PIDS'])
1138 1138 return ret
1139 1139
1140 1140 output = ''
1141 1141 proc.tochild.close()
1142 1142
1143 1143 try:
1144 1144 output = proc.fromchild.read()
1145 1145 except KeyboardInterrupt:
1146 1146 vlog('# Handling keyboard interrupt')
1147 1147 cleanup()
1148 1148 raise
1149 1149
1150 1150 ret = proc.wait()
1151 1151 if wifexited(ret):
1152 1152 ret = os.WEXITSTATUS(ret)
1153 1153
1154 1154 if proc.timeout:
1155 1155 ret = 'timeout'
1156 1156
1157 1157 if ret:
1158 1158 killdaemons(env['DAEMON_PIDS'])
1159 1159
1160 1160 for s, r in self._getreplacements():
1161 1161 output = re.sub(s, r, output)
1162 1162
1163 1163 if normalizenewlines:
1164 1164 output = output.replace('\r\n', '\n')
1165 1165
1166 1166 return ret, output.splitlines(True)
1167 1167
1168 1168 class PythonTest(Test):
1169 1169 """A Python-based test."""
1170 1170
1171 1171 @property
1172 1172 def refpath(self):
1173 1173 return os.path.join(self._testdir, b'%s.out' % self.bname)
1174 1174
1175 1175 def _run(self, env):
1176 1176 py3kswitch = self._py3kwarnings and b' -3' or b''
1177 1177 cmd = b'%s%s "%s"' % (PYTHON, py3kswitch, self.path)
1178 1178 vlog("# Running", cmd)
1179 1179 normalizenewlines = os.name == 'nt'
1180 1180 result = self._runcommand(cmd, env,
1181 1181 normalizenewlines=normalizenewlines)
1182 1182 if self._aborted:
1183 1183 raise KeyboardInterrupt()
1184 1184
1185 1185 return result
1186 1186
1187 1187 # Some glob patterns apply only in some circumstances, so the script
1188 1188 # might want to remove (glob) annotations that otherwise should be
1189 1189 # retained.
1190 1190 checkcodeglobpats = [
1191 1191 # On Windows it looks like \ doesn't require a (glob), but we know
1192 1192 # better.
1193 1193 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
1194 1194 re.compile(br'^moving \S+/.*[^)]$'),
1195 1195 re.compile(br'^pulling from \$TESTTMP/.*[^)]$'),
1196 1196 # Not all platforms have 127.0.0.1 as loopback (though most do),
1197 1197 # so we always glob that too.
1198 1198 re.compile(br'.*\$LOCALIP.*$'),
1199 1199 ]
1200 1200
1201 1201 bchr = chr
1202 1202 if PYTHON3:
1203 1203 bchr = lambda x: bytes([x])
1204 1204
1205 1205 class TTest(Test):
1206 1206 """A "t test" is a test backed by a .t file."""
1207 1207
1208 1208 SKIPPED_PREFIX = b'skipped: '
1209 1209 FAILED_PREFIX = b'hghave check failed: '
1210 1210 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
1211 1211
1212 1212 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
1213 1213 ESCAPEMAP = dict((bchr(i), br'\x%02x' % i) for i in range(256))
1214 1214 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
1215 1215
1216 1216 def __init__(self, path, *args, **kwds):
1217 1217 # accept an extra "case" parameter
1218 1218 case = kwds.pop('case', None)
1219 1219 self._case = case
1220 1220 self._allcases = parsettestcases(path)
1221 1221 super(TTest, self).__init__(path, *args, **kwds)
1222 1222 if case:
1223 1223 self.name = '%s (case %s)' % (self.name, _strpath(case))
1224 1224 self.errpath = b'%s.%s.err' % (self.errpath[:-4], case)
1225 1225 self._tmpname += b'-%s' % case
1226 1226
1227 1227 @property
1228 1228 def refpath(self):
1229 1229 return os.path.join(self._testdir, self.bname)
1230 1230
1231 1231 def _run(self, env):
1232 1232 with open(self.path, 'rb') as f:
1233 1233 lines = f.readlines()
1234 1234
1235 1235 # .t file is both reference output and the test input, keep reference
1236 1236 # output updated with the the test input. This avoids some race
1237 1237 # conditions where the reference output does not match the actual test.
1238 1238 if self._refout is not None:
1239 1239 self._refout = lines
1240 1240
1241 1241 salt, script, after, expected = self._parsetest(lines)
1242 1242
1243 1243 # Write out the generated script.
1244 1244 fname = b'%s.sh' % self._testtmp
1245 1245 with open(fname, 'wb') as f:
1246 1246 for l in script:
1247 1247 f.write(l)
1248 1248
1249 1249 cmd = b'%s "%s"' % (self._shell, fname)
1250 1250 vlog("# Running", cmd)
1251 1251
1252 1252 exitcode, output = self._runcommand(cmd, env)
1253 1253
1254 1254 if self._aborted:
1255 1255 raise KeyboardInterrupt()
1256 1256
1257 1257 # Do not merge output if skipped. Return hghave message instead.
1258 1258 # Similarly, with --debug, output is None.
1259 1259 if exitcode == self.SKIPPED_STATUS or output is None:
1260 1260 return exitcode, output
1261 1261
1262 1262 return self._processoutput(exitcode, output, salt, after, expected)
1263 1263
1264 1264 def _hghave(self, reqs):
1265 1265 # TODO do something smarter when all other uses of hghave are gone.
1266 1266 runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__)))
1267 1267 tdir = runtestdir.replace(b'\\', b'/')
1268 1268 proc = Popen4(b'%s -c "%s/hghave %s"' %
1269 1269 (self._shell, tdir, b' '.join(reqs)),
1270 1270 self._testtmp, 0, self._getenv())
1271 1271 stdout, stderr = proc.communicate()
1272 1272 ret = proc.wait()
1273 1273 if wifexited(ret):
1274 1274 ret = os.WEXITSTATUS(ret)
1275 1275 if ret == 2:
1276 1276 print(stdout.decode('utf-8'))
1277 1277 sys.exit(1)
1278 1278
1279 1279 if ret != 0:
1280 1280 return False, stdout
1281 1281
1282 1282 if b'slow' in reqs:
1283 1283 self._timeout = self._slowtimeout
1284 1284 return True, None
1285 1285
1286 1286 def _iftest(self, args):
1287 1287 # implements "#if"
1288 1288 reqs = []
1289 1289 for arg in args:
1290 1290 if arg.startswith(b'no-') and arg[3:] in self._allcases:
1291 1291 if arg[3:] == self._case:
1292 1292 return False
1293 1293 elif arg in self._allcases:
1294 1294 if arg != self._case:
1295 1295 return False
1296 1296 else:
1297 1297 reqs.append(arg)
1298 1298 return self._hghave(reqs)[0]
1299 1299
1300 1300 def _parsetest(self, lines):
1301 1301 # We generate a shell script which outputs unique markers to line
1302 1302 # up script results with our source. These markers include input
1303 1303 # line number and the last return code.
1304 1304 salt = b"SALT%d" % time.time()
1305 1305 def addsalt(line, inpython):
1306 1306 if inpython:
1307 1307 script.append(b'%s %d 0\n' % (salt, line))
1308 1308 else:
1309 1309 script.append(b'echo %s %d $?\n' % (salt, line))
1310 1310
1311 1311 script = []
1312 1312
1313 1313 # After we run the shell script, we re-unify the script output
1314 1314 # with non-active parts of the source, with synchronization by our
1315 1315 # SALT line number markers. The after table contains the non-active
1316 1316 # components, ordered by line number.
1317 1317 after = {}
1318 1318
1319 1319 # Expected shell script output.
1320 1320 expected = {}
1321 1321
1322 1322 pos = prepos = -1
1323 1323
1324 1324 # True or False when in a true or false conditional section
1325 1325 skipping = None
1326 1326
1327 1327 # We keep track of whether or not we're in a Python block so we
1328 1328 # can generate the surrounding doctest magic.
1329 1329 inpython = False
1330 1330
1331 1331 if self._debug:
1332 1332 script.append(b'set -x\n')
1333 1333 if self._hgcommand != b'hg':
1334 1334 script.append(b'alias hg="%s"\n' % self._hgcommand)
1335 1335 if os.getenv('MSYSTEM'):
1336 1336 script.append(b'alias pwd="pwd -W"\n')
1337 if self._case:
1338 script.append(b'TESTCASE=%s\n' % shellquote(self._case))
1339 script.append(b'export TESTCASE\n')
1337 1340
1338 1341 n = 0
1339 1342 for n, l in enumerate(lines):
1340 1343 if not l.endswith(b'\n'):
1341 1344 l += b'\n'
1342 1345 if l.startswith(b'#require'):
1343 1346 lsplit = l.split()
1344 1347 if len(lsplit) < 2 or lsplit[0] != b'#require':
1345 1348 after.setdefault(pos, []).append(' !!! invalid #require\n')
1346 1349 haveresult, message = self._hghave(lsplit[1:])
1347 1350 if not haveresult:
1348 1351 script = [b'echo "%s"\nexit 80\n' % message]
1349 1352 break
1350 1353 after.setdefault(pos, []).append(l)
1351 1354 elif l.startswith(b'#if'):
1352 1355 lsplit = l.split()
1353 1356 if len(lsplit) < 2 or lsplit[0] != b'#if':
1354 1357 after.setdefault(pos, []).append(' !!! invalid #if\n')
1355 1358 if skipping is not None:
1356 1359 after.setdefault(pos, []).append(' !!! nested #if\n')
1357 1360 skipping = not self._iftest(lsplit[1:])
1358 1361 after.setdefault(pos, []).append(l)
1359 1362 elif l.startswith(b'#else'):
1360 1363 if skipping is None:
1361 1364 after.setdefault(pos, []).append(' !!! missing #if\n')
1362 1365 skipping = not skipping
1363 1366 after.setdefault(pos, []).append(l)
1364 1367 elif l.startswith(b'#endif'):
1365 1368 if skipping is None:
1366 1369 after.setdefault(pos, []).append(' !!! missing #if\n')
1367 1370 skipping = None
1368 1371 after.setdefault(pos, []).append(l)
1369 1372 elif skipping:
1370 1373 after.setdefault(pos, []).append(l)
1371 1374 elif l.startswith(b' >>> '): # python inlines
1372 1375 after.setdefault(pos, []).append(l)
1373 1376 prepos = pos
1374 1377 pos = n
1375 1378 if not inpython:
1376 1379 # We've just entered a Python block. Add the header.
1377 1380 inpython = True
1378 1381 addsalt(prepos, False) # Make sure we report the exit code.
1379 1382 script.append(b'%s -m heredoctest <<EOF\n' % PYTHON)
1380 1383 addsalt(n, True)
1381 1384 script.append(l[2:])
1382 1385 elif l.startswith(b' ... '): # python inlines
1383 1386 after.setdefault(prepos, []).append(l)
1384 1387 script.append(l[2:])
1385 1388 elif l.startswith(b' $ '): # commands
1386 1389 if inpython:
1387 1390 script.append(b'EOF\n')
1388 1391 inpython = False
1389 1392 after.setdefault(pos, []).append(l)
1390 1393 prepos = pos
1391 1394 pos = n
1392 1395 addsalt(n, False)
1393 1396 cmd = l[4:].split()
1394 1397 if len(cmd) == 2 and cmd[0] == b'cd':
1395 1398 l = b' $ cd %s || exit 1\n' % cmd[1]
1396 1399 script.append(l[4:])
1397 1400 elif l.startswith(b' > '): # continuations
1398 1401 after.setdefault(prepos, []).append(l)
1399 1402 script.append(l[4:])
1400 1403 elif l.startswith(b' '): # results
1401 1404 # Queue up a list of expected results.
1402 1405 expected.setdefault(pos, []).append(l[2:])
1403 1406 else:
1404 1407 if inpython:
1405 1408 script.append(b'EOF\n')
1406 1409 inpython = False
1407 1410 # Non-command/result. Queue up for merged output.
1408 1411 after.setdefault(pos, []).append(l)
1409 1412
1410 1413 if inpython:
1411 1414 script.append(b'EOF\n')
1412 1415 if skipping is not None:
1413 1416 after.setdefault(pos, []).append(' !!! missing #endif\n')
1414 1417 addsalt(n + 1, False)
1415 1418
1416 1419 return salt, script, after, expected
1417 1420
1418 1421 def _processoutput(self, exitcode, output, salt, after, expected):
1419 1422 # Merge the script output back into a unified test.
1420 1423 warnonly = 1 # 1: not yet; 2: yes; 3: for sure not
1421 1424 if exitcode != 0:
1422 1425 warnonly = 3
1423 1426
1424 1427 pos = -1
1425 1428 postout = []
1426 1429 for l in output:
1427 1430 lout, lcmd = l, None
1428 1431 if salt in l:
1429 1432 lout, lcmd = l.split(salt, 1)
1430 1433
1431 1434 while lout:
1432 1435 if not lout.endswith(b'\n'):
1433 1436 lout += b' (no-eol)\n'
1434 1437
1435 1438 # Find the expected output at the current position.
1436 1439 els = [None]
1437 1440 if expected.get(pos, None):
1438 1441 els = expected[pos]
1439 1442
1440 1443 i = 0
1441 1444 optional = []
1442 1445 while i < len(els):
1443 1446 el = els[i]
1444 1447
1445 1448 r = self.linematch(el, lout)
1446 1449 if isinstance(r, str):
1447 1450 if r == '-glob':
1448 1451 lout = ''.join(el.rsplit(' (glob)', 1))
1449 1452 r = '' # Warn only this line.
1450 1453 elif r == "retry":
1451 1454 postout.append(b' ' + el)
1452 1455 els.pop(i)
1453 1456 break
1454 1457 else:
1455 1458 log('\ninfo, unknown linematch result: %r\n' % r)
1456 1459 r = False
1457 1460 if r:
1458 1461 els.pop(i)
1459 1462 break
1460 1463 if el:
1461 1464 if el.endswith(b" (?)\n"):
1462 1465 optional.append(i)
1463 1466 else:
1464 1467 m = optline.match(el)
1465 1468 if m:
1466 1469 conditions = [
1467 1470 c for c in m.group(2).split(b' ')]
1468 1471
1469 1472 if not self._iftest(conditions):
1470 1473 optional.append(i)
1471 1474
1472 1475 i += 1
1473 1476
1474 1477 if r:
1475 1478 if r == "retry":
1476 1479 continue
1477 1480 # clean up any optional leftovers
1478 1481 for i in optional:
1479 1482 postout.append(b' ' + els[i])
1480 1483 for i in reversed(optional):
1481 1484 del els[i]
1482 1485 postout.append(b' ' + el)
1483 1486 else:
1484 1487 if self.NEEDESCAPE(lout):
1485 1488 lout = TTest._stringescape(b'%s (esc)\n' %
1486 1489 lout.rstrip(b'\n'))
1487 1490 postout.append(b' ' + lout) # Let diff deal with it.
1488 1491 if r != '': # If line failed.
1489 1492 warnonly = 3 # for sure not
1490 1493 elif warnonly == 1: # Is "not yet" and line is warn only.
1491 1494 warnonly = 2 # Yes do warn.
1492 1495 break
1493 1496 else:
1494 1497 # clean up any optional leftovers
1495 1498 while expected.get(pos, None):
1496 1499 el = expected[pos].pop(0)
1497 1500 if el:
1498 1501 if not el.endswith(b" (?)\n"):
1499 1502 m = optline.match(el)
1500 1503 if m:
1501 1504 conditions = [c for c in m.group(2).split(b' ')]
1502 1505
1503 1506 if self._iftest(conditions):
1504 1507 # Don't append as optional line
1505 1508 continue
1506 1509 else:
1507 1510 continue
1508 1511 postout.append(b' ' + el)
1509 1512
1510 1513 if lcmd:
1511 1514 # Add on last return code.
1512 1515 ret = int(lcmd.split()[1])
1513 1516 if ret != 0:
1514 1517 postout.append(b' [%d]\n' % ret)
1515 1518 if pos in after:
1516 1519 # Merge in non-active test bits.
1517 1520 postout += after.pop(pos)
1518 1521 pos = int(lcmd.split()[0])
1519 1522
1520 1523 if pos in after:
1521 1524 postout += after.pop(pos)
1522 1525
1523 1526 if warnonly == 2:
1524 1527 exitcode = False # Set exitcode to warned.
1525 1528
1526 1529 return exitcode, postout
1527 1530
1528 1531 @staticmethod
1529 1532 def rematch(el, l):
1530 1533 try:
1531 1534 el = b'(?:' + el + b')'
1532 1535 # use \Z to ensure that the regex matches to the end of the string
1533 1536 if os.name == 'nt':
1534 1537 return re.match(el + br'\r?\n\Z', l)
1535 1538 return re.match(el + br'\n\Z', l)
1536 1539 except re.error:
1537 1540 # el is an invalid regex
1538 1541 return False
1539 1542
1540 1543 @staticmethod
1541 1544 def globmatch(el, l):
1542 1545 # The only supported special characters are * and ? plus / which also
1543 1546 # matches \ on windows. Escaping of these characters is supported.
1544 1547 if el + b'\n' == l:
1545 1548 if os.altsep:
1546 1549 # matching on "/" is not needed for this line
1547 1550 for pat in checkcodeglobpats:
1548 1551 if pat.match(el):
1549 1552 return True
1550 1553 return b'-glob'
1551 1554 return True
1552 1555 el = el.replace(b'$LOCALIP', b'*')
1553 1556 i, n = 0, len(el)
1554 1557 res = b''
1555 1558 while i < n:
1556 1559 c = el[i:i + 1]
1557 1560 i += 1
1558 1561 if c == b'\\' and i < n and el[i:i + 1] in b'*?\\/':
1559 1562 res += el[i - 1:i + 1]
1560 1563 i += 1
1561 1564 elif c == b'*':
1562 1565 res += b'.*'
1563 1566 elif c == b'?':
1564 1567 res += b'.'
1565 1568 elif c == b'/' and os.altsep:
1566 1569 res += b'[/\\\\]'
1567 1570 else:
1568 1571 res += re.escape(c)
1569 1572 return TTest.rematch(res, l)
1570 1573
1571 1574 def linematch(self, el, l):
1572 1575 retry = False
1573 1576 if el == l: # perfect match (fast)
1574 1577 return True
1575 1578 if el:
1576 1579 if el.endswith(b" (?)\n"):
1577 1580 retry = "retry"
1578 1581 el = el[:-5] + b"\n"
1579 1582 else:
1580 1583 m = optline.match(el)
1581 1584 if m:
1582 1585 conditions = [c for c in m.group(2).split(b' ')]
1583 1586
1584 1587 el = m.group(1) + b"\n"
1585 1588 if not self._iftest(conditions):
1586 1589 retry = "retry" # Not required by listed features
1587 1590
1588 1591 if el.endswith(b" (esc)\n"):
1589 1592 if PYTHON3:
1590 1593 el = el[:-7].decode('unicode_escape') + '\n'
1591 1594 el = el.encode('utf-8')
1592 1595 else:
1593 1596 el = el[:-7].decode('string-escape') + '\n'
1594 1597 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
1595 1598 return True
1596 1599 if el.endswith(b" (re)\n"):
1597 1600 return TTest.rematch(el[:-6], l) or retry
1598 1601 if el.endswith(b" (glob)\n"):
1599 1602 # ignore '(glob)' added to l by 'replacements'
1600 1603 if l.endswith(b" (glob)\n"):
1601 1604 l = l[:-8] + b"\n"
1602 1605 return TTest.globmatch(el[:-8], l) or retry
1603 1606 if os.altsep:
1604 1607 _l = l.replace(b'\\', b'/')
1605 1608 if el == _l or os.name == 'nt' and el[:-1] + b'\r\n' == _l:
1606 1609 return True
1607 1610 return retry
1608 1611
1609 1612 @staticmethod
1610 1613 def parsehghaveoutput(lines):
1611 1614 '''Parse hghave log lines.
1612 1615
1613 1616 Return tuple of lists (missing, failed):
1614 1617 * the missing/unknown features
1615 1618 * the features for which existence check failed'''
1616 1619 missing = []
1617 1620 failed = []
1618 1621 for line in lines:
1619 1622 if line.startswith(TTest.SKIPPED_PREFIX):
1620 1623 line = line.splitlines()[0]
1621 1624 missing.append(line[len(TTest.SKIPPED_PREFIX):].decode('utf-8'))
1622 1625 elif line.startswith(TTest.FAILED_PREFIX):
1623 1626 line = line.splitlines()[0]
1624 1627 failed.append(line[len(TTest.FAILED_PREFIX):].decode('utf-8'))
1625 1628
1626 1629 return missing, failed
1627 1630
1628 1631 @staticmethod
1629 1632 def _escapef(m):
1630 1633 return TTest.ESCAPEMAP[m.group(0)]
1631 1634
1632 1635 @staticmethod
1633 1636 def _stringescape(s):
1634 1637 return TTest.ESCAPESUB(TTest._escapef, s)
1635 1638
1636 1639 iolock = threading.RLock()
1637 1640
1638 1641 class TestResult(unittest._TextTestResult):
1639 1642 """Holds results when executing via unittest."""
1640 1643 # Don't worry too much about accessing the non-public _TextTestResult.
1641 1644 # It is relatively common in Python testing tools.
1642 1645 def __init__(self, options, *args, **kwargs):
1643 1646 super(TestResult, self).__init__(*args, **kwargs)
1644 1647
1645 1648 self._options = options
1646 1649
1647 1650 # unittest.TestResult didn't have skipped until 2.7. We need to
1648 1651 # polyfill it.
1649 1652 self.skipped = []
1650 1653
1651 1654 # We have a custom "ignored" result that isn't present in any Python
1652 1655 # unittest implementation. It is very similar to skipped. It may make
1653 1656 # sense to map it into skip some day.
1654 1657 self.ignored = []
1655 1658
1656 1659 self.times = []
1657 1660 self._firststarttime = None
1658 1661 # Data stored for the benefit of generating xunit reports.
1659 1662 self.successes = []
1660 1663 self.faildata = {}
1661 1664
1662 1665 if options.color == 'auto':
1663 1666 self.color = pygmentspresent and self.stream.isatty()
1664 1667 elif options.color == 'never':
1665 1668 self.color = False
1666 1669 else: # 'always', for testing purposes
1667 1670 self.color = pygmentspresent
1668 1671
1669 1672 def addFailure(self, test, reason):
1670 1673 self.failures.append((test, reason))
1671 1674
1672 1675 if self._options.first:
1673 1676 self.stop()
1674 1677 else:
1675 1678 with iolock:
1676 1679 if reason == "timed out":
1677 1680 self.stream.write('t')
1678 1681 else:
1679 1682 if not self._options.nodiff:
1680 1683 self.stream.write('\n')
1681 1684 # Exclude the '\n' from highlighting to lex correctly
1682 1685 formatted = 'ERROR: %s output changed\n' % test
1683 1686 self.stream.write(highlightmsg(formatted, self.color))
1684 1687 self.stream.write('!')
1685 1688
1686 1689 self.stream.flush()
1687 1690
1688 1691 def addSuccess(self, test):
1689 1692 with iolock:
1690 1693 super(TestResult, self).addSuccess(test)
1691 1694 self.successes.append(test)
1692 1695
1693 1696 def addError(self, test, err):
1694 1697 super(TestResult, self).addError(test, err)
1695 1698 if self._options.first:
1696 1699 self.stop()
1697 1700
1698 1701 # Polyfill.
1699 1702 def addSkip(self, test, reason):
1700 1703 self.skipped.append((test, reason))
1701 1704 with iolock:
1702 1705 if self.showAll:
1703 1706 self.stream.writeln('skipped %s' % reason)
1704 1707 else:
1705 1708 self.stream.write('s')
1706 1709 self.stream.flush()
1707 1710
1708 1711 def addIgnore(self, test, reason):
1709 1712 self.ignored.append((test, reason))
1710 1713 with iolock:
1711 1714 if self.showAll:
1712 1715 self.stream.writeln('ignored %s' % reason)
1713 1716 else:
1714 1717 if reason not in ('not retesting', "doesn't match keyword"):
1715 1718 self.stream.write('i')
1716 1719 else:
1717 1720 self.testsRun += 1
1718 1721 self.stream.flush()
1719 1722
1720 1723 def addOutputMismatch(self, test, ret, got, expected):
1721 1724 """Record a mismatch in test output for a particular test."""
1722 1725 if self.shouldStop:
1723 1726 # don't print, some other test case already failed and
1724 1727 # printed, we're just stale and probably failed due to our
1725 1728 # temp dir getting cleaned up.
1726 1729 return
1727 1730
1728 1731 accepted = False
1729 1732 lines = []
1730 1733
1731 1734 with iolock:
1732 1735 if self._options.nodiff:
1733 1736 pass
1734 1737 elif self._options.view:
1735 1738 v = self._options.view
1736 1739 if PYTHON3:
1737 1740 v = _bytespath(v)
1738 1741 os.system(b"%s %s %s" %
1739 1742 (v, test.refpath, test.errpath))
1740 1743 else:
1741 1744 servefail, lines = getdiff(expected, got,
1742 1745 test.refpath, test.errpath)
1743 1746 if servefail:
1744 1747 raise test.failureException(
1745 1748 'server failed to start (HGPORT=%s)' % test._startport)
1746 1749 else:
1747 1750 self.stream.write('\n')
1748 1751 for line in lines:
1749 1752 line = highlightdiff(line, self.color)
1750 1753 if PYTHON3:
1751 1754 self.stream.flush()
1752 1755 self.stream.buffer.write(line)
1753 1756 self.stream.buffer.flush()
1754 1757 else:
1755 1758 self.stream.write(line)
1756 1759 self.stream.flush()
1757 1760
1758 1761 # handle interactive prompt without releasing iolock
1759 1762 if self._options.interactive:
1760 1763 if test.readrefout() != expected:
1761 1764 self.stream.write(
1762 1765 'Reference output has changed (run again to prompt '
1763 1766 'changes)')
1764 1767 else:
1765 1768 self.stream.write('Accept this change? [n] ')
1766 1769 answer = sys.stdin.readline().strip()
1767 1770 if answer.lower() in ('y', 'yes'):
1768 1771 if test.path.endswith(b'.t'):
1769 1772 rename(test.errpath, test.path)
1770 1773 else:
1771 1774 rename(test.errpath, '%s.out' % test.path)
1772 1775 accepted = True
1773 1776 if not accepted:
1774 1777 self.faildata[test.name] = b''.join(lines)
1775 1778
1776 1779 return accepted
1777 1780
1778 1781 def startTest(self, test):
1779 1782 super(TestResult, self).startTest(test)
1780 1783
1781 1784 # os.times module computes the user time and system time spent by
1782 1785 # child's processes along with real elapsed time taken by a process.
1783 1786 # This module has one limitation. It can only work for Linux user
1784 1787 # and not for Windows.
1785 1788 test.started = os.times()
1786 1789 if self._firststarttime is None: # thread racy but irrelevant
1787 1790 self._firststarttime = test.started[4]
1788 1791
1789 1792 def stopTest(self, test, interrupted=False):
1790 1793 super(TestResult, self).stopTest(test)
1791 1794
1792 1795 test.stopped = os.times()
1793 1796
1794 1797 starttime = test.started
1795 1798 endtime = test.stopped
1796 1799 origin = self._firststarttime
1797 1800 self.times.append((test.name,
1798 1801 endtime[2] - starttime[2], # user space CPU time
1799 1802 endtime[3] - starttime[3], # sys space CPU time
1800 1803 endtime[4] - starttime[4], # real time
1801 1804 starttime[4] - origin, # start date in run context
1802 1805 endtime[4] - origin, # end date in run context
1803 1806 ))
1804 1807
1805 1808 if interrupted:
1806 1809 with iolock:
1807 1810 self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
1808 1811 test.name, self.times[-1][3]))
1809 1812
1810 1813 class TestSuite(unittest.TestSuite):
1811 1814 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
1812 1815
1813 1816 def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
1814 1817 retest=False, keywords=None, loop=False, runs_per_test=1,
1815 1818 loadtest=None, showchannels=False,
1816 1819 *args, **kwargs):
1817 1820 """Create a new instance that can run tests with a configuration.
1818 1821
1819 1822 testdir specifies the directory where tests are executed from. This
1820 1823 is typically the ``tests`` directory from Mercurial's source
1821 1824 repository.
1822 1825
1823 1826 jobs specifies the number of jobs to run concurrently. Each test
1824 1827 executes on its own thread. Tests actually spawn new processes, so
1825 1828 state mutation should not be an issue.
1826 1829
1827 1830 If there is only one job, it will use the main thread.
1828 1831
1829 1832 whitelist and blacklist denote tests that have been whitelisted and
1830 1833 blacklisted, respectively. These arguments don't belong in TestSuite.
1831 1834 Instead, whitelist and blacklist should be handled by the thing that
1832 1835 populates the TestSuite with tests. They are present to preserve
1833 1836 backwards compatible behavior which reports skipped tests as part
1834 1837 of the results.
1835 1838
1836 1839 retest denotes whether to retest failed tests. This arguably belongs
1837 1840 outside of TestSuite.
1838 1841
1839 1842 keywords denotes key words that will be used to filter which tests
1840 1843 to execute. This arguably belongs outside of TestSuite.
1841 1844
1842 1845 loop denotes whether to loop over tests forever.
1843 1846 """
1844 1847 super(TestSuite, self).__init__(*args, **kwargs)
1845 1848
1846 1849 self._jobs = jobs
1847 1850 self._whitelist = whitelist
1848 1851 self._blacklist = blacklist
1849 1852 self._retest = retest
1850 1853 self._keywords = keywords
1851 1854 self._loop = loop
1852 1855 self._runs_per_test = runs_per_test
1853 1856 self._loadtest = loadtest
1854 1857 self._showchannels = showchannels
1855 1858
1856 1859 def run(self, result):
1857 1860 # We have a number of filters that need to be applied. We do this
1858 1861 # here instead of inside Test because it makes the running logic for
1859 1862 # Test simpler.
1860 1863 tests = []
1861 1864 num_tests = [0]
1862 1865 for test in self._tests:
1863 1866 def get():
1864 1867 num_tests[0] += 1
1865 1868 if getattr(test, 'should_reload', False):
1866 1869 return self._loadtest(test, num_tests[0])
1867 1870 return test
1868 1871 if not os.path.exists(test.path):
1869 1872 result.addSkip(test, "Doesn't exist")
1870 1873 continue
1871 1874
1872 1875 if not (self._whitelist and test.bname in self._whitelist):
1873 1876 if self._blacklist and test.bname in self._blacklist:
1874 1877 result.addSkip(test, 'blacklisted')
1875 1878 continue
1876 1879
1877 1880 if self._retest and not os.path.exists(test.errpath):
1878 1881 result.addIgnore(test, 'not retesting')
1879 1882 continue
1880 1883
1881 1884 if self._keywords:
1882 1885 with open(test.path, 'rb') as f:
1883 1886 t = f.read().lower() + test.bname.lower()
1884 1887 ignored = False
1885 1888 for k in self._keywords.lower().split():
1886 1889 if k not in t:
1887 1890 result.addIgnore(test, "doesn't match keyword")
1888 1891 ignored = True
1889 1892 break
1890 1893
1891 1894 if ignored:
1892 1895 continue
1893 1896 for _ in xrange(self._runs_per_test):
1894 1897 tests.append(get())
1895 1898
1896 1899 runtests = list(tests)
1897 1900 done = queue.Queue()
1898 1901 running = 0
1899 1902
1900 1903 channels = [""] * self._jobs
1901 1904
1902 1905 def job(test, result):
1903 1906 for n, v in enumerate(channels):
1904 1907 if not v:
1905 1908 channel = n
1906 1909 break
1907 1910 else:
1908 1911 raise ValueError('Could not find output channel')
1909 1912 channels[channel] = "=" + test.name[5:].split(".")[0]
1910 1913 try:
1911 1914 test(result)
1912 1915 done.put(None)
1913 1916 except KeyboardInterrupt:
1914 1917 pass
1915 1918 except: # re-raises
1916 1919 done.put(('!', test, 'run-test raised an error, see traceback'))
1917 1920 raise
1918 1921 finally:
1919 1922 try:
1920 1923 channels[channel] = ''
1921 1924 except IndexError:
1922 1925 pass
1923 1926
1924 1927 def stat():
1925 1928 count = 0
1926 1929 while channels:
1927 1930 d = '\n%03s ' % count
1928 1931 for n, v in enumerate(channels):
1929 1932 if v:
1930 1933 d += v[0]
1931 1934 channels[n] = v[1:] or '.'
1932 1935 else:
1933 1936 d += ' '
1934 1937 d += ' '
1935 1938 with iolock:
1936 1939 sys.stdout.write(d + ' ')
1937 1940 sys.stdout.flush()
1938 1941 for x in xrange(10):
1939 1942 if channels:
1940 1943 time.sleep(.1)
1941 1944 count += 1
1942 1945
1943 1946 stoppedearly = False
1944 1947
1945 1948 if self._showchannels:
1946 1949 statthread = threading.Thread(target=stat, name="stat")
1947 1950 statthread.start()
1948 1951
1949 1952 try:
1950 1953 while tests or running:
1951 1954 if not done.empty() or running == self._jobs or not tests:
1952 1955 try:
1953 1956 done.get(True, 1)
1954 1957 running -= 1
1955 1958 if result and result.shouldStop:
1956 1959 stoppedearly = True
1957 1960 break
1958 1961 except queue.Empty:
1959 1962 continue
1960 1963 if tests and not running == self._jobs:
1961 1964 test = tests.pop(0)
1962 1965 if self._loop:
1963 1966 if getattr(test, 'should_reload', False):
1964 1967 num_tests[0] += 1
1965 1968 tests.append(
1966 1969 self._loadtest(test, num_tests[0]))
1967 1970 else:
1968 1971 tests.append(test)
1969 1972 if self._jobs == 1:
1970 1973 job(test, result)
1971 1974 else:
1972 1975 t = threading.Thread(target=job, name=test.name,
1973 1976 args=(test, result))
1974 1977 t.start()
1975 1978 running += 1
1976 1979
1977 1980 # If we stop early we still need to wait on started tests to
1978 1981 # finish. Otherwise, there is a race between the test completing
1979 1982 # and the test's cleanup code running. This could result in the
1980 1983 # test reporting incorrect.
1981 1984 if stoppedearly:
1982 1985 while running:
1983 1986 try:
1984 1987 done.get(True, 1)
1985 1988 running -= 1
1986 1989 except queue.Empty:
1987 1990 continue
1988 1991 except KeyboardInterrupt:
1989 1992 for test in runtests:
1990 1993 test.abort()
1991 1994
1992 1995 channels = []
1993 1996
1994 1997 return result
1995 1998
1996 1999 # Save the most recent 5 wall-clock runtimes of each test to a
1997 2000 # human-readable text file named .testtimes. Tests are sorted
1998 2001 # alphabetically, while times for each test are listed from oldest to
1999 2002 # newest.
2000 2003
2001 2004 def loadtimes(outputdir):
2002 2005 times = []
2003 2006 try:
2004 2007 with open(os.path.join(outputdir, b'.testtimes-')) as fp:
2005 2008 for line in fp:
2006 2009 ts = line.split()
2007 2010 times.append((ts[0], [float(t) for t in ts[1:]]))
2008 2011 except IOError as err:
2009 2012 if err.errno != errno.ENOENT:
2010 2013 raise
2011 2014 return times
2012 2015
2013 2016 def savetimes(outputdir, result):
2014 2017 saved = dict(loadtimes(outputdir))
2015 2018 maxruns = 5
2016 2019 skipped = set([str(t[0]) for t in result.skipped])
2017 2020 for tdata in result.times:
2018 2021 test, real = tdata[0], tdata[3]
2019 2022 if test not in skipped:
2020 2023 ts = saved.setdefault(test, [])
2021 2024 ts.append(real)
2022 2025 ts[:] = ts[-maxruns:]
2023 2026
2024 2027 fd, tmpname = tempfile.mkstemp(prefix=b'.testtimes',
2025 2028 dir=outputdir, text=True)
2026 2029 with os.fdopen(fd, 'w') as fp:
2027 2030 for name, ts in sorted(saved.items()):
2028 2031 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
2029 2032 timepath = os.path.join(outputdir, b'.testtimes')
2030 2033 try:
2031 2034 os.unlink(timepath)
2032 2035 except OSError:
2033 2036 pass
2034 2037 try:
2035 2038 os.rename(tmpname, timepath)
2036 2039 except OSError:
2037 2040 pass
2038 2041
2039 2042 class TextTestRunner(unittest.TextTestRunner):
2040 2043 """Custom unittest test runner that uses appropriate settings."""
2041 2044
2042 2045 def __init__(self, runner, *args, **kwargs):
2043 2046 super(TextTestRunner, self).__init__(*args, **kwargs)
2044 2047
2045 2048 self._runner = runner
2046 2049
2047 2050 def listtests(self, test):
2048 2051 result = TestResult(self._runner.options, self.stream,
2049 2052 self.descriptions, 0)
2050 2053 test = sorted(test, key=lambda t: t.name)
2051 2054 for t in test:
2052 2055 print(t.name)
2053 2056 result.addSuccess(t)
2054 2057
2055 2058 if self._runner.options.xunit:
2056 2059 with open(self._runner.options.xunit, "wb") as xuf:
2057 2060 self._writexunit(result, xuf)
2058 2061
2059 2062 if self._runner.options.json:
2060 2063 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2061 2064 with open(jsonpath, 'w') as fp:
2062 2065 self._writejson(result, fp)
2063 2066
2064 2067 return result
2065 2068
2066 2069 def run(self, test):
2067 2070 result = TestResult(self._runner.options, self.stream,
2068 2071 self.descriptions, self.verbosity)
2069 2072
2070 2073 test(result)
2071 2074
2072 2075 failed = len(result.failures)
2073 2076 skipped = len(result.skipped)
2074 2077 ignored = len(result.ignored)
2075 2078
2076 2079 with iolock:
2077 2080 self.stream.writeln('')
2078 2081
2079 2082 if not self._runner.options.noskips:
2080 2083 for test, msg in result.skipped:
2081 2084 formatted = 'Skipped %s: %s\n' % (test.name, msg)
2082 2085 self.stream.write(highlightmsg(formatted, result.color))
2083 2086 for test, msg in result.failures:
2084 2087 formatted = 'Failed %s: %s\n' % (test.name, msg)
2085 2088 self.stream.write(highlightmsg(formatted, result.color))
2086 2089 for test, msg in result.errors:
2087 2090 self.stream.writeln('Errored %s: %s' % (test.name, msg))
2088 2091
2089 2092 if self._runner.options.xunit:
2090 2093 with open(self._runner.options.xunit, "wb") as xuf:
2091 2094 self._writexunit(result, xuf)
2092 2095
2093 2096 if self._runner.options.json:
2094 2097 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2095 2098 with open(jsonpath, 'w') as fp:
2096 2099 self._writejson(result, fp)
2097 2100
2098 2101 self._runner._checkhglib('Tested')
2099 2102
2100 2103 savetimes(self._runner._outputdir, result)
2101 2104
2102 2105 if failed and self._runner.options.known_good_rev:
2103 2106 self._bisecttests(t for t, m in result.failures)
2104 2107 self.stream.writeln(
2105 2108 '# Ran %d tests, %d skipped, %d failed.'
2106 2109 % (result.testsRun, skipped + ignored, failed))
2107 2110 if failed:
2108 2111 self.stream.writeln('python hash seed: %s' %
2109 2112 os.environ['PYTHONHASHSEED'])
2110 2113 if self._runner.options.time:
2111 2114 self.printtimes(result.times)
2112 2115
2113 2116 if self._runner.options.exceptions:
2114 2117 exceptions = aggregateexceptions(
2115 2118 os.path.join(self._runner._outputdir, b'exceptions'))
2116 2119 total = sum(exceptions.values())
2117 2120
2118 2121 self.stream.writeln('Exceptions Report:')
2119 2122 self.stream.writeln('%d total from %d frames' %
2120 2123 (total, len(exceptions)))
2121 2124 for (frame, line, exc), count in exceptions.most_common():
2122 2125 self.stream.writeln('%d\t%s: %s' % (count, frame, exc))
2123 2126
2124 2127 self.stream.flush()
2125 2128
2126 2129 return result
2127 2130
2128 2131 def _bisecttests(self, tests):
2129 2132 bisectcmd = ['hg', 'bisect']
2130 2133 bisectrepo = self._runner.options.bisect_repo
2131 2134 if bisectrepo:
2132 2135 bisectcmd.extend(['-R', os.path.abspath(bisectrepo)])
2133 2136 def pread(args):
2134 2137 env = os.environ.copy()
2135 2138 env['HGPLAIN'] = '1'
2136 2139 p = subprocess.Popen(args, stderr=subprocess.STDOUT,
2137 2140 stdout=subprocess.PIPE, env=env)
2138 2141 data = p.stdout.read()
2139 2142 p.wait()
2140 2143 return data
2141 2144 for test in tests:
2142 2145 pread(bisectcmd + ['--reset']),
2143 2146 pread(bisectcmd + ['--bad', '.'])
2144 2147 pread(bisectcmd + ['--good', self._runner.options.known_good_rev])
2145 2148 # TODO: we probably need to forward more options
2146 2149 # that alter hg's behavior inside the tests.
2147 2150 opts = ''
2148 2151 withhg = self._runner.options.with_hg
2149 2152 if withhg:
2150 2153 opts += ' --with-hg=%s ' % shellquote(_strpath(withhg))
2151 2154 rtc = '%s %s %s %s' % (sys.executable, sys.argv[0], opts,
2152 2155 test)
2153 2156 data = pread(bisectcmd + ['--command', rtc])
2154 2157 m = re.search(
2155 2158 (br'\nThe first (?P<goodbad>bad|good) revision '
2156 2159 br'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
2157 2160 br'summary: +(?P<summary>[^\n]+)\n'),
2158 2161 data, (re.MULTILINE | re.DOTALL))
2159 2162 if m is None:
2160 2163 self.stream.writeln(
2161 2164 'Failed to identify failure point for %s' % test)
2162 2165 continue
2163 2166 dat = m.groupdict()
2164 2167 verb = 'broken' if dat['goodbad'] == 'bad' else 'fixed'
2165 2168 self.stream.writeln(
2166 2169 '%s %s by %s (%s)' % (
2167 2170 test, verb, dat['node'], dat['summary']))
2168 2171
2169 2172 def printtimes(self, times):
2170 2173 # iolock held by run
2171 2174 self.stream.writeln('# Producing time report')
2172 2175 times.sort(key=lambda t: (t[3]))
2173 2176 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
2174 2177 self.stream.writeln('%-7s %-7s %-7s %-7s %-7s %s' %
2175 2178 ('start', 'end', 'cuser', 'csys', 'real', 'Test'))
2176 2179 for tdata in times:
2177 2180 test = tdata[0]
2178 2181 cuser, csys, real, start, end = tdata[1:6]
2179 2182 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
2180 2183
2181 2184 @staticmethod
2182 2185 def _writexunit(result, outf):
2183 2186 # See http://llg.cubic.org/docs/junit/ for a reference.
2184 2187 timesd = dict((t[0], t[3]) for t in result.times)
2185 2188 doc = minidom.Document()
2186 2189 s = doc.createElement('testsuite')
2187 2190 s.setAttribute('name', 'run-tests')
2188 2191 s.setAttribute('tests', str(result.testsRun))
2189 2192 s.setAttribute('errors', "0") # TODO
2190 2193 s.setAttribute('failures', str(len(result.failures)))
2191 2194 s.setAttribute('skipped', str(len(result.skipped) +
2192 2195 len(result.ignored)))
2193 2196 doc.appendChild(s)
2194 2197 for tc in result.successes:
2195 2198 t = doc.createElement('testcase')
2196 2199 t.setAttribute('name', tc.name)
2197 2200 tctime = timesd.get(tc.name)
2198 2201 if tctime is not None:
2199 2202 t.setAttribute('time', '%.3f' % tctime)
2200 2203 s.appendChild(t)
2201 2204 for tc, err in sorted(result.faildata.items()):
2202 2205 t = doc.createElement('testcase')
2203 2206 t.setAttribute('name', tc)
2204 2207 tctime = timesd.get(tc)
2205 2208 if tctime is not None:
2206 2209 t.setAttribute('time', '%.3f' % tctime)
2207 2210 # createCDATASection expects a unicode or it will
2208 2211 # convert using default conversion rules, which will
2209 2212 # fail if string isn't ASCII.
2210 2213 err = cdatasafe(err).decode('utf-8', 'replace')
2211 2214 cd = doc.createCDATASection(err)
2212 2215 # Use 'failure' here instead of 'error' to match errors = 0,
2213 2216 # failures = len(result.failures) in the testsuite element.
2214 2217 failelem = doc.createElement('failure')
2215 2218 failelem.setAttribute('message', 'output changed')
2216 2219 failelem.setAttribute('type', 'output-mismatch')
2217 2220 failelem.appendChild(cd)
2218 2221 t.appendChild(failelem)
2219 2222 s.appendChild(t)
2220 2223 for tc, message in result.skipped:
2221 2224 # According to the schema, 'skipped' has no attributes. So store
2222 2225 # the skip message as a text node instead.
2223 2226 t = doc.createElement('testcase')
2224 2227 t.setAttribute('name', tc.name)
2225 2228 binmessage = message.encode('utf-8')
2226 2229 message = cdatasafe(binmessage).decode('utf-8', 'replace')
2227 2230 cd = doc.createCDATASection(message)
2228 2231 skipelem = doc.createElement('skipped')
2229 2232 skipelem.appendChild(cd)
2230 2233 t.appendChild(skipelem)
2231 2234 s.appendChild(t)
2232 2235 outf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
2233 2236
2234 2237 @staticmethod
2235 2238 def _writejson(result, outf):
2236 2239 timesd = {}
2237 2240 for tdata in result.times:
2238 2241 test = tdata[0]
2239 2242 timesd[test] = tdata[1:]
2240 2243
2241 2244 outcome = {}
2242 2245 groups = [('success', ((tc, None)
2243 2246 for tc in result.successes)),
2244 2247 ('failure', result.failures),
2245 2248 ('skip', result.skipped)]
2246 2249 for res, testcases in groups:
2247 2250 for tc, __ in testcases:
2248 2251 if tc.name in timesd:
2249 2252 diff = result.faildata.get(tc.name, b'')
2250 2253 try:
2251 2254 diff = diff.decode('unicode_escape')
2252 2255 except UnicodeDecodeError as e:
2253 2256 diff = '%r decoding diff, sorry' % e
2254 2257 tres = {'result': res,
2255 2258 'time': ('%0.3f' % timesd[tc.name][2]),
2256 2259 'cuser': ('%0.3f' % timesd[tc.name][0]),
2257 2260 'csys': ('%0.3f' % timesd[tc.name][1]),
2258 2261 'start': ('%0.3f' % timesd[tc.name][3]),
2259 2262 'end': ('%0.3f' % timesd[tc.name][4]),
2260 2263 'diff': diff,
2261 2264 }
2262 2265 else:
2263 2266 # blacklisted test
2264 2267 tres = {'result': res}
2265 2268
2266 2269 outcome[tc.name] = tres
2267 2270 jsonout = json.dumps(outcome, sort_keys=True, indent=4,
2268 2271 separators=(',', ': '))
2269 2272 outf.writelines(("testreport =", jsonout))
2270 2273
2271 2274 def sorttests(testdescs, shuffle=False):
2272 2275 """Do an in-place sort of tests."""
2273 2276 if shuffle:
2274 2277 random.shuffle(testdescs)
2275 2278 return
2276 2279
2277 2280 # keywords for slow tests
2278 2281 slow = {b'svn': 10,
2279 2282 b'cvs': 10,
2280 2283 b'hghave': 10,
2281 2284 b'largefiles-update': 10,
2282 2285 b'run-tests': 10,
2283 2286 b'corruption': 10,
2284 2287 b'race': 10,
2285 2288 b'i18n': 10,
2286 2289 b'check': 100,
2287 2290 b'gendoc': 100,
2288 2291 b'contrib-perf': 200,
2289 2292 }
2290 2293 perf = {}
2291 2294
2292 2295 def sortkey(f):
2293 2296 # run largest tests first, as they tend to take the longest
2294 2297 f = f['path']
2295 2298 try:
2296 2299 return perf[f]
2297 2300 except KeyError:
2298 2301 try:
2299 2302 val = -os.stat(f).st_size
2300 2303 except OSError as e:
2301 2304 if e.errno != errno.ENOENT:
2302 2305 raise
2303 2306 perf[f] = -1e9 # file does not exist, tell early
2304 2307 return -1e9
2305 2308 for kw, mul in slow.items():
2306 2309 if kw in f:
2307 2310 val *= mul
2308 2311 if f.endswith(b'.py'):
2309 2312 val /= 10.0
2310 2313 perf[f] = val / 1000.0
2311 2314 return perf[f]
2312 2315
2313 2316 testdescs.sort(key=sortkey)
2314 2317
2315 2318 class TestRunner(object):
2316 2319 """Holds context for executing tests.
2317 2320
2318 2321 Tests rely on a lot of state. This object holds it for them.
2319 2322 """
2320 2323
2321 2324 # Programs required to run tests.
2322 2325 REQUIREDTOOLS = [
2323 2326 b'diff',
2324 2327 b'grep',
2325 2328 b'unzip',
2326 2329 b'gunzip',
2327 2330 b'bunzip2',
2328 2331 b'sed',
2329 2332 ]
2330 2333
2331 2334 # Maps file extensions to test class.
2332 2335 TESTTYPES = [
2333 2336 (b'.py', PythonTest),
2334 2337 (b'.t', TTest),
2335 2338 ]
2336 2339
2337 2340 def __init__(self):
2338 2341 self.options = None
2339 2342 self._hgroot = None
2340 2343 self._testdir = None
2341 2344 self._outputdir = None
2342 2345 self._hgtmp = None
2343 2346 self._installdir = None
2344 2347 self._bindir = None
2345 2348 self._tmpbinddir = None
2346 2349 self._pythondir = None
2347 2350 self._coveragefile = None
2348 2351 self._createdfiles = []
2349 2352 self._hgcommand = None
2350 2353 self._hgpath = None
2351 2354 self._portoffset = 0
2352 2355 self._ports = {}
2353 2356
2354 2357 def run(self, args, parser=None):
2355 2358 """Run the test suite."""
2356 2359 oldmask = os.umask(0o22)
2357 2360 try:
2358 2361 parser = parser or getparser()
2359 2362 options = parseargs(args, parser)
2360 2363 tests = [_bytespath(a) for a in options.tests]
2361 2364 if options.test_list is not None:
2362 2365 for listfile in options.test_list:
2363 2366 with open(listfile, 'rb') as f:
2364 2367 tests.extend(t for t in f.read().splitlines() if t)
2365 2368 self.options = options
2366 2369
2367 2370 self._checktools()
2368 2371 testdescs = self.findtests(tests)
2369 2372 if options.profile_runner:
2370 2373 import statprof
2371 2374 statprof.start()
2372 2375 result = self._run(testdescs)
2373 2376 if options.profile_runner:
2374 2377 statprof.stop()
2375 2378 statprof.display()
2376 2379 return result
2377 2380
2378 2381 finally:
2379 2382 os.umask(oldmask)
2380 2383
2381 2384 def _run(self, testdescs):
2382 2385 sorttests(testdescs, shuffle=self.options.random)
2383 2386
2384 2387 self._testdir = osenvironb[b'TESTDIR'] = getattr(
2385 2388 os, 'getcwdb', os.getcwd)()
2386 2389 # assume all tests in same folder for now
2387 2390 if testdescs:
2388 2391 pathname = os.path.dirname(testdescs[0]['path'])
2389 2392 if pathname:
2390 2393 osenvironb[b'TESTDIR'] = os.path.join(osenvironb[b'TESTDIR'],
2391 2394 pathname)
2392 2395 if self.options.outputdir:
2393 2396 self._outputdir = canonpath(_bytespath(self.options.outputdir))
2394 2397 else:
2395 2398 self._outputdir = self._testdir
2396 2399 if testdescs and pathname:
2397 2400 self._outputdir = os.path.join(self._outputdir, pathname)
2398 2401
2399 2402 if 'PYTHONHASHSEED' not in os.environ:
2400 2403 # use a random python hash seed all the time
2401 2404 # we do the randomness ourself to know what seed is used
2402 2405 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
2403 2406
2404 2407 if self.options.tmpdir:
2405 2408 self.options.keep_tmpdir = True
2406 2409 tmpdir = _bytespath(self.options.tmpdir)
2407 2410 if os.path.exists(tmpdir):
2408 2411 # Meaning of tmpdir has changed since 1.3: we used to create
2409 2412 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
2410 2413 # tmpdir already exists.
2411 2414 print("error: temp dir %r already exists" % tmpdir)
2412 2415 return 1
2413 2416
2414 2417 os.makedirs(tmpdir)
2415 2418 else:
2416 2419 d = None
2417 2420 if os.name == 'nt':
2418 2421 # without this, we get the default temp dir location, but
2419 2422 # in all lowercase, which causes troubles with paths (issue3490)
2420 2423 d = osenvironb.get(b'TMP', None)
2421 2424 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
2422 2425
2423 2426 self._hgtmp = osenvironb[b'HGTMP'] = (
2424 2427 os.path.realpath(tmpdir))
2425 2428
2426 2429 if self.options.with_hg:
2427 2430 self._installdir = None
2428 2431 whg = self.options.with_hg
2429 2432 self._bindir = os.path.dirname(os.path.realpath(whg))
2430 2433 assert isinstance(self._bindir, bytes)
2431 2434 self._hgcommand = os.path.basename(whg)
2432 2435 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
2433 2436 os.makedirs(self._tmpbindir)
2434 2437
2435 2438 # This looks redundant with how Python initializes sys.path from
2436 2439 # the location of the script being executed. Needed because the
2437 2440 # "hg" specified by --with-hg is not the only Python script
2438 2441 # executed in the test suite that needs to import 'mercurial'
2439 2442 # ... which means it's not really redundant at all.
2440 2443 self._pythondir = self._bindir
2441 2444 else:
2442 2445 self._installdir = os.path.join(self._hgtmp, b"install")
2443 2446 self._bindir = os.path.join(self._installdir, b"bin")
2444 2447 self._hgcommand = b'hg'
2445 2448 self._tmpbindir = self._bindir
2446 2449 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
2447 2450
2448 2451 # set CHGHG, then replace "hg" command by "chg"
2449 2452 chgbindir = self._bindir
2450 2453 if self.options.chg or self.options.with_chg:
2451 2454 osenvironb[b'CHGHG'] = os.path.join(self._bindir, self._hgcommand)
2452 2455 else:
2453 2456 osenvironb.pop(b'CHGHG', None) # drop flag for hghave
2454 2457 if self.options.chg:
2455 2458 self._hgcommand = b'chg'
2456 2459 elif self.options.with_chg:
2457 2460 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
2458 2461 self._hgcommand = os.path.basename(self.options.with_chg)
2459 2462
2460 2463 osenvironb[b"BINDIR"] = self._bindir
2461 2464 osenvironb[b"PYTHON"] = PYTHON
2462 2465
2463 2466 if self.options.with_python3:
2464 2467 osenvironb[b'PYTHON3'] = self.options.with_python3
2465 2468
2466 2469 fileb = _bytespath(__file__)
2467 2470 runtestdir = os.path.abspath(os.path.dirname(fileb))
2468 2471 osenvironb[b'RUNTESTDIR'] = runtestdir
2469 2472 if PYTHON3:
2470 2473 sepb = _bytespath(os.pathsep)
2471 2474 else:
2472 2475 sepb = os.pathsep
2473 2476 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
2474 2477 if os.path.islink(__file__):
2475 2478 # test helper will likely be at the end of the symlink
2476 2479 realfile = os.path.realpath(fileb)
2477 2480 realdir = os.path.abspath(os.path.dirname(realfile))
2478 2481 path.insert(2, realdir)
2479 2482 if chgbindir != self._bindir:
2480 2483 path.insert(1, chgbindir)
2481 2484 if self._testdir != runtestdir:
2482 2485 path = [self._testdir] + path
2483 2486 if self._tmpbindir != self._bindir:
2484 2487 path = [self._tmpbindir] + path
2485 2488 osenvironb[b"PATH"] = sepb.join(path)
2486 2489
2487 2490 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
2488 2491 # can run .../tests/run-tests.py test-foo where test-foo
2489 2492 # adds an extension to HGRC. Also include run-test.py directory to
2490 2493 # import modules like heredoctest.
2491 2494 pypath = [self._pythondir, self._testdir, runtestdir]
2492 2495 # We have to augment PYTHONPATH, rather than simply replacing
2493 2496 # it, in case external libraries are only available via current
2494 2497 # PYTHONPATH. (In particular, the Subversion bindings on OS X
2495 2498 # are in /opt/subversion.)
2496 2499 oldpypath = osenvironb.get(IMPL_PATH)
2497 2500 if oldpypath:
2498 2501 pypath.append(oldpypath)
2499 2502 osenvironb[IMPL_PATH] = sepb.join(pypath)
2500 2503
2501 2504 if self.options.pure:
2502 2505 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
2503 2506 os.environ["HGMODULEPOLICY"] = "py"
2504 2507
2505 2508 if self.options.allow_slow_tests:
2506 2509 os.environ["HGTEST_SLOW"] = "slow"
2507 2510 elif 'HGTEST_SLOW' in os.environ:
2508 2511 del os.environ['HGTEST_SLOW']
2509 2512
2510 2513 self._coveragefile = os.path.join(self._testdir, b'.coverage')
2511 2514
2512 2515 if self.options.exceptions:
2513 2516 exceptionsdir = os.path.join(self._outputdir, b'exceptions')
2514 2517 try:
2515 2518 os.makedirs(exceptionsdir)
2516 2519 except OSError as e:
2517 2520 if e.errno != errno.EEXIST:
2518 2521 raise
2519 2522
2520 2523 # Remove all existing exception reports.
2521 2524 for f in os.listdir(exceptionsdir):
2522 2525 os.unlink(os.path.join(exceptionsdir, f))
2523 2526
2524 2527 osenvironb[b'HGEXCEPTIONSDIR'] = exceptionsdir
2525 2528 logexceptions = os.path.join(self._testdir, b'logexceptions.py')
2526 2529 self.options.extra_config_opt.append(
2527 2530 'extensions.logexceptions=%s' % logexceptions.decode('utf-8'))
2528 2531
2529 2532 vlog("# Using TESTDIR", self._testdir)
2530 2533 vlog("# Using RUNTESTDIR", osenvironb[b'RUNTESTDIR'])
2531 2534 vlog("# Using HGTMP", self._hgtmp)
2532 2535 vlog("# Using PATH", os.environ["PATH"])
2533 2536 vlog("# Using", IMPL_PATH, osenvironb[IMPL_PATH])
2534 2537 vlog("# Writing to directory", self._outputdir)
2535 2538
2536 2539 try:
2537 2540 return self._runtests(testdescs) or 0
2538 2541 finally:
2539 2542 time.sleep(.1)
2540 2543 self._cleanup()
2541 2544
2542 2545 def findtests(self, args):
2543 2546 """Finds possible test files from arguments.
2544 2547
2545 2548 If you wish to inject custom tests into the test harness, this would
2546 2549 be a good function to monkeypatch or override in a derived class.
2547 2550 """
2548 2551 if not args:
2549 2552 if self.options.changed:
2550 2553 proc = Popen4('hg st --rev "%s" -man0 .' %
2551 2554 self.options.changed, None, 0)
2552 2555 stdout, stderr = proc.communicate()
2553 2556 args = stdout.strip(b'\0').split(b'\0')
2554 2557 else:
2555 2558 args = os.listdir(b'.')
2556 2559
2557 2560 expanded_args = []
2558 2561 for arg in args:
2559 2562 if os.path.isdir(arg):
2560 2563 if not arg.endswith(b'/'):
2561 2564 arg += b'/'
2562 2565 expanded_args.extend([arg + a for a in os.listdir(arg)])
2563 2566 else:
2564 2567 expanded_args.append(arg)
2565 2568 args = expanded_args
2566 2569
2567 2570 tests = []
2568 2571 for t in args:
2569 2572 if not (os.path.basename(t).startswith(b'test-')
2570 2573 and (t.endswith(b'.py') or t.endswith(b'.t'))):
2571 2574 continue
2572 2575 if t.endswith(b'.t'):
2573 2576 # .t file may contain multiple test cases
2574 2577 cases = sorted(parsettestcases(t))
2575 2578 if cases:
2576 2579 tests += [{'path': t, 'case': c} for c in sorted(cases)]
2577 2580 else:
2578 2581 tests.append({'path': t})
2579 2582 else:
2580 2583 tests.append({'path': t})
2581 2584 return tests
2582 2585
2583 2586 def _runtests(self, testdescs):
2584 2587 def _reloadtest(test, i):
2585 2588 # convert a test back to its description dict
2586 2589 desc = {'path': test.path}
2587 2590 case = getattr(test, '_case', None)
2588 2591 if case:
2589 2592 desc['case'] = case
2590 2593 return self._gettest(desc, i)
2591 2594
2592 2595 try:
2593 2596 if self.options.restart:
2594 2597 orig = list(testdescs)
2595 2598 while testdescs:
2596 2599 desc = testdescs[0]
2597 2600 # desc['path'] is a relative path
2598 2601 if 'case' in desc:
2599 2602 errpath = b'%s.%s.err' % (desc['path'], desc['case'])
2600 2603 else:
2601 2604 errpath = b'%s.err' % desc['path']
2602 2605 errpath = os.path.join(self._outputdir, errpath)
2603 2606 if os.path.exists(errpath):
2604 2607 break
2605 2608 testdescs.pop(0)
2606 2609 if not testdescs:
2607 2610 print("running all tests")
2608 2611 testdescs = orig
2609 2612
2610 2613 tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
2611 2614
2612 2615 failed = False
2613 2616 kws = self.options.keywords
2614 2617 if kws is not None and PYTHON3:
2615 2618 kws = kws.encode('utf-8')
2616 2619
2617 2620 suite = TestSuite(self._testdir,
2618 2621 jobs=self.options.jobs,
2619 2622 whitelist=self.options.whitelisted,
2620 2623 blacklist=self.options.blacklist,
2621 2624 retest=self.options.retest,
2622 2625 keywords=kws,
2623 2626 loop=self.options.loop,
2624 2627 runs_per_test=self.options.runs_per_test,
2625 2628 showchannels=self.options.showchannels,
2626 2629 tests=tests, loadtest=_reloadtest)
2627 2630 verbosity = 1
2628 2631 if self.options.verbose:
2629 2632 verbosity = 2
2630 2633 runner = TextTestRunner(self, verbosity=verbosity)
2631 2634
2632 2635 if self.options.list_tests:
2633 2636 result = runner.listtests(suite)
2634 2637 else:
2635 2638 if self._installdir:
2636 2639 self._installhg()
2637 2640 self._checkhglib("Testing")
2638 2641 else:
2639 2642 self._usecorrectpython()
2640 2643 if self.options.chg:
2641 2644 assert self._installdir
2642 2645 self._installchg()
2643 2646
2644 2647 result = runner.run(suite)
2645 2648
2646 2649 if result.failures:
2647 2650 failed = True
2648 2651
2649 2652 if self.options.anycoverage:
2650 2653 self._outputcoverage()
2651 2654 except KeyboardInterrupt:
2652 2655 failed = True
2653 2656 print("\ninterrupted!")
2654 2657
2655 2658 if failed:
2656 2659 return 1
2657 2660
2658 2661 def _getport(self, count):
2659 2662 port = self._ports.get(count) # do we have a cached entry?
2660 2663 if port is None:
2661 2664 portneeded = 3
2662 2665 # above 100 tries we just give up and let test reports failure
2663 2666 for tries in xrange(100):
2664 2667 allfree = True
2665 2668 port = self.options.port + self._portoffset
2666 2669 for idx in xrange(portneeded):
2667 2670 if not checkportisavailable(port + idx):
2668 2671 allfree = False
2669 2672 break
2670 2673 self._portoffset += portneeded
2671 2674 if allfree:
2672 2675 break
2673 2676 self._ports[count] = port
2674 2677 return port
2675 2678
2676 2679 def _gettest(self, testdesc, count):
2677 2680 """Obtain a Test by looking at its filename.
2678 2681
2679 2682 Returns a Test instance. The Test may not be runnable if it doesn't
2680 2683 map to a known type.
2681 2684 """
2682 2685 path = testdesc['path']
2683 2686 lctest = path.lower()
2684 2687 testcls = Test
2685 2688
2686 2689 for ext, cls in self.TESTTYPES:
2687 2690 if lctest.endswith(ext):
2688 2691 testcls = cls
2689 2692 break
2690 2693
2691 2694 refpath = os.path.join(self._testdir, path)
2692 2695 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
2693 2696
2694 2697 # extra keyword parameters. 'case' is used by .t tests
2695 2698 kwds = dict((k, testdesc[k]) for k in ['case'] if k in testdesc)
2696 2699
2697 2700 t = testcls(refpath, self._outputdir, tmpdir,
2698 2701 keeptmpdir=self.options.keep_tmpdir,
2699 2702 debug=self.options.debug,
2700 2703 timeout=self.options.timeout,
2701 2704 startport=self._getport(count),
2702 2705 extraconfigopts=self.options.extra_config_opt,
2703 2706 py3kwarnings=self.options.py3k_warnings,
2704 2707 shell=self.options.shell,
2705 2708 hgcommand=self._hgcommand,
2706 2709 usechg=bool(self.options.with_chg or self.options.chg),
2707 2710 useipv6=useipv6, **kwds)
2708 2711 t.should_reload = True
2709 2712 return t
2710 2713
2711 2714 def _cleanup(self):
2712 2715 """Clean up state from this test invocation."""
2713 2716 if self.options.keep_tmpdir:
2714 2717 return
2715 2718
2716 2719 vlog("# Cleaning up HGTMP", self._hgtmp)
2717 2720 shutil.rmtree(self._hgtmp, True)
2718 2721 for f in self._createdfiles:
2719 2722 try:
2720 2723 os.remove(f)
2721 2724 except OSError:
2722 2725 pass
2723 2726
2724 2727 def _usecorrectpython(self):
2725 2728 """Configure the environment to use the appropriate Python in tests."""
2726 2729 # Tests must use the same interpreter as us or bad things will happen.
2727 2730 pyexename = sys.platform == 'win32' and b'python.exe' or b'python'
2728 2731 if getattr(os, 'symlink', None):
2729 2732 vlog("# Making python executable in test path a symlink to '%s'" %
2730 2733 sys.executable)
2731 2734 mypython = os.path.join(self._tmpbindir, pyexename)
2732 2735 try:
2733 2736 if os.readlink(mypython) == sys.executable:
2734 2737 return
2735 2738 os.unlink(mypython)
2736 2739 except OSError as err:
2737 2740 if err.errno != errno.ENOENT:
2738 2741 raise
2739 2742 if self._findprogram(pyexename) != sys.executable:
2740 2743 try:
2741 2744 os.symlink(sys.executable, mypython)
2742 2745 self._createdfiles.append(mypython)
2743 2746 except OSError as err:
2744 2747 # child processes may race, which is harmless
2745 2748 if err.errno != errno.EEXIST:
2746 2749 raise
2747 2750 else:
2748 2751 exedir, exename = os.path.split(sys.executable)
2749 2752 vlog("# Modifying search path to find %s as %s in '%s'" %
2750 2753 (exename, pyexename, exedir))
2751 2754 path = os.environ['PATH'].split(os.pathsep)
2752 2755 while exedir in path:
2753 2756 path.remove(exedir)
2754 2757 os.environ['PATH'] = os.pathsep.join([exedir] + path)
2755 2758 if not self._findprogram(pyexename):
2756 2759 print("WARNING: Cannot find %s in search path" % pyexename)
2757 2760
2758 2761 def _installhg(self):
2759 2762 """Install hg into the test environment.
2760 2763
2761 2764 This will also configure hg with the appropriate testing settings.
2762 2765 """
2763 2766 vlog("# Performing temporary installation of HG")
2764 2767 installerrs = os.path.join(self._hgtmp, b"install.err")
2765 2768 compiler = ''
2766 2769 if self.options.compiler:
2767 2770 compiler = '--compiler ' + self.options.compiler
2768 2771 if self.options.pure:
2769 2772 pure = b"--pure"
2770 2773 else:
2771 2774 pure = b""
2772 2775
2773 2776 # Run installer in hg root
2774 2777 script = os.path.realpath(sys.argv[0])
2775 2778 exe = sys.executable
2776 2779 if PYTHON3:
2777 2780 compiler = _bytespath(compiler)
2778 2781 script = _bytespath(script)
2779 2782 exe = _bytespath(exe)
2780 2783 hgroot = os.path.dirname(os.path.dirname(script))
2781 2784 self._hgroot = hgroot
2782 2785 os.chdir(hgroot)
2783 2786 nohome = b'--home=""'
2784 2787 if os.name == 'nt':
2785 2788 # The --home="" trick works only on OS where os.sep == '/'
2786 2789 # because of a distutils convert_path() fast-path. Avoid it at
2787 2790 # least on Windows for now, deal with .pydistutils.cfg bugs
2788 2791 # when they happen.
2789 2792 nohome = b''
2790 2793 cmd = (b'%(exe)s setup.py %(pure)s clean --all'
2791 2794 b' build %(compiler)s --build-base="%(base)s"'
2792 2795 b' install --force --prefix="%(prefix)s"'
2793 2796 b' --install-lib="%(libdir)s"'
2794 2797 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
2795 2798 % {b'exe': exe, b'pure': pure,
2796 2799 b'compiler': compiler,
2797 2800 b'base': os.path.join(self._hgtmp, b"build"),
2798 2801 b'prefix': self._installdir, b'libdir': self._pythondir,
2799 2802 b'bindir': self._bindir,
2800 2803 b'nohome': nohome, b'logfile': installerrs})
2801 2804
2802 2805 # setuptools requires install directories to exist.
2803 2806 def makedirs(p):
2804 2807 try:
2805 2808 os.makedirs(p)
2806 2809 except OSError as e:
2807 2810 if e.errno != errno.EEXIST:
2808 2811 raise
2809 2812 makedirs(self._pythondir)
2810 2813 makedirs(self._bindir)
2811 2814
2812 2815 vlog("# Running", cmd)
2813 2816 if os.system(cmd) == 0:
2814 2817 if not self.options.verbose:
2815 2818 try:
2816 2819 os.remove(installerrs)
2817 2820 except OSError as e:
2818 2821 if e.errno != errno.ENOENT:
2819 2822 raise
2820 2823 else:
2821 2824 with open(installerrs, 'rb') as f:
2822 2825 for line in f:
2823 2826 if PYTHON3:
2824 2827 sys.stdout.buffer.write(line)
2825 2828 else:
2826 2829 sys.stdout.write(line)
2827 2830 sys.exit(1)
2828 2831 os.chdir(self._testdir)
2829 2832
2830 2833 self._usecorrectpython()
2831 2834
2832 2835 if self.options.py3k_warnings and not self.options.anycoverage:
2833 2836 vlog("# Updating hg command to enable Py3k Warnings switch")
2834 2837 with open(os.path.join(self._bindir, 'hg'), 'rb') as f:
2835 2838 lines = [line.rstrip() for line in f]
2836 2839 lines[0] += ' -3'
2837 2840 with open(os.path.join(self._bindir, 'hg'), 'wb') as f:
2838 2841 for line in lines:
2839 2842 f.write(line + '\n')
2840 2843
2841 2844 hgbat = os.path.join(self._bindir, b'hg.bat')
2842 2845 if os.path.isfile(hgbat):
2843 2846 # hg.bat expects to be put in bin/scripts while run-tests.py
2844 2847 # installation layout put it in bin/ directly. Fix it
2845 2848 with open(hgbat, 'rb') as f:
2846 2849 data = f.read()
2847 2850 if b'"%~dp0..\python" "%~dp0hg" %*' in data:
2848 2851 data = data.replace(b'"%~dp0..\python" "%~dp0hg" %*',
2849 2852 b'"%~dp0python" "%~dp0hg" %*')
2850 2853 with open(hgbat, 'wb') as f:
2851 2854 f.write(data)
2852 2855 else:
2853 2856 print('WARNING: cannot fix hg.bat reference to python.exe')
2854 2857
2855 2858 if self.options.anycoverage:
2856 2859 custom = os.path.join(self._testdir, 'sitecustomize.py')
2857 2860 target = os.path.join(self._pythondir, 'sitecustomize.py')
2858 2861 vlog('# Installing coverage trigger to %s' % target)
2859 2862 shutil.copyfile(custom, target)
2860 2863 rc = os.path.join(self._testdir, '.coveragerc')
2861 2864 vlog('# Installing coverage rc to %s' % rc)
2862 2865 os.environ['COVERAGE_PROCESS_START'] = rc
2863 2866 covdir = os.path.join(self._installdir, '..', 'coverage')
2864 2867 try:
2865 2868 os.mkdir(covdir)
2866 2869 except OSError as e:
2867 2870 if e.errno != errno.EEXIST:
2868 2871 raise
2869 2872
2870 2873 os.environ['COVERAGE_DIR'] = covdir
2871 2874
2872 2875 def _checkhglib(self, verb):
2873 2876 """Ensure that the 'mercurial' package imported by python is
2874 2877 the one we expect it to be. If not, print a warning to stderr."""
2875 2878 if ((self._bindir == self._pythondir) and
2876 2879 (self._bindir != self._tmpbindir)):
2877 2880 # The pythondir has been inferred from --with-hg flag.
2878 2881 # We cannot expect anything sensible here.
2879 2882 return
2880 2883 expecthg = os.path.join(self._pythondir, b'mercurial')
2881 2884 actualhg = self._gethgpath()
2882 2885 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
2883 2886 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
2884 2887 ' (expected %s)\n'
2885 2888 % (verb, actualhg, expecthg))
2886 2889 def _gethgpath(self):
2887 2890 """Return the path to the mercurial package that is actually found by
2888 2891 the current Python interpreter."""
2889 2892 if self._hgpath is not None:
2890 2893 return self._hgpath
2891 2894
2892 2895 cmd = b'%s -c "import mercurial; print (mercurial.__path__[0])"'
2893 2896 cmd = cmd % PYTHON
2894 2897 if PYTHON3:
2895 2898 cmd = _strpath(cmd)
2896 2899 pipe = os.popen(cmd)
2897 2900 try:
2898 2901 self._hgpath = _bytespath(pipe.read().strip())
2899 2902 finally:
2900 2903 pipe.close()
2901 2904
2902 2905 return self._hgpath
2903 2906
2904 2907 def _installchg(self):
2905 2908 """Install chg into the test environment"""
2906 2909 vlog('# Performing temporary installation of CHG')
2907 2910 assert os.path.dirname(self._bindir) == self._installdir
2908 2911 assert self._hgroot, 'must be called after _installhg()'
2909 2912 cmd = (b'"%(make)s" clean install PREFIX="%(prefix)s"'
2910 2913 % {b'make': 'make', # TODO: switch by option or environment?
2911 2914 b'prefix': self._installdir})
2912 2915 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
2913 2916 vlog("# Running", cmd)
2914 2917 proc = subprocess.Popen(cmd, shell=True, cwd=cwd,
2915 2918 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
2916 2919 stderr=subprocess.STDOUT)
2917 2920 out, _err = proc.communicate()
2918 2921 if proc.returncode != 0:
2919 2922 if PYTHON3:
2920 2923 sys.stdout.buffer.write(out)
2921 2924 else:
2922 2925 sys.stdout.write(out)
2923 2926 sys.exit(1)
2924 2927
2925 2928 def _outputcoverage(self):
2926 2929 """Produce code coverage output."""
2927 2930 import coverage
2928 2931 coverage = coverage.coverage
2929 2932
2930 2933 vlog('# Producing coverage report')
2931 2934 # chdir is the easiest way to get short, relative paths in the
2932 2935 # output.
2933 2936 os.chdir(self._hgroot)
2934 2937 covdir = os.path.join(self._installdir, '..', 'coverage')
2935 2938 cov = coverage(data_file=os.path.join(covdir, 'cov'))
2936 2939
2937 2940 # Map install directory paths back to source directory.
2938 2941 cov.config.paths['srcdir'] = ['.', self._pythondir]
2939 2942
2940 2943 cov.combine()
2941 2944
2942 2945 omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]]
2943 2946 cov.report(ignore_errors=True, omit=omit)
2944 2947
2945 2948 if self.options.htmlcov:
2946 2949 htmldir = os.path.join(self._outputdir, 'htmlcov')
2947 2950 cov.html_report(directory=htmldir, omit=omit)
2948 2951 if self.options.annotate:
2949 2952 adir = os.path.join(self._outputdir, 'annotated')
2950 2953 if not os.path.isdir(adir):
2951 2954 os.mkdir(adir)
2952 2955 cov.annotate(directory=adir, omit=omit)
2953 2956
2954 2957 def _findprogram(self, program):
2955 2958 """Search PATH for a executable program"""
2956 2959 dpb = _bytespath(os.defpath)
2957 2960 sepb = _bytespath(os.pathsep)
2958 2961 for p in osenvironb.get(b'PATH', dpb).split(sepb):
2959 2962 name = os.path.join(p, program)
2960 2963 if os.name == 'nt' or os.access(name, os.X_OK):
2961 2964 return name
2962 2965 return None
2963 2966
2964 2967 def _checktools(self):
2965 2968 """Ensure tools required to run tests are present."""
2966 2969 for p in self.REQUIREDTOOLS:
2967 2970 if os.name == 'nt' and not p.endswith('.exe'):
2968 2971 p += '.exe'
2969 2972 found = self._findprogram(p)
2970 2973 if found:
2971 2974 vlog("# Found prerequisite", p, "at", found)
2972 2975 else:
2973 2976 print("WARNING: Did not find prerequisite tool: %s " %
2974 2977 p.decode("utf-8"))
2975 2978
2976 2979 def aggregateexceptions(path):
2977 2980 exceptions = collections.Counter()
2978 2981
2979 2982 for f in os.listdir(path):
2980 2983 with open(os.path.join(path, f), 'rb') as fh:
2981 2984 data = fh.read().split(b'\0')
2982 2985 if len(data) != 4:
2983 2986 continue
2984 2987
2985 2988 exc, mainframe, hgframe, hgline = data
2986 2989 exc = exc.decode('utf-8')
2987 2990 mainframe = mainframe.decode('utf-8')
2988 2991 hgframe = hgframe.decode('utf-8')
2989 2992 hgline = hgline.decode('utf-8')
2990 2993 exceptions[(hgframe, hgline, exc)] += 1
2991 2994
2992 2995 return exceptions
2993 2996
2994 2997 if __name__ == '__main__':
2995 2998 runner = TestRunner()
2996 2999
2997 3000 try:
2998 3001 import msvcrt
2999 3002 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
3000 3003 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
3001 3004 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
3002 3005 except ImportError:
3003 3006 pass
3004 3007
3005 3008 sys.exit(runner.run(sys.argv[1:]))
@@ -1,1585 +1,1605 b''
1 1 This file tests the behavior of run-tests.py itself.
2 2
3 3 Avoid interference from actual test env:
4 4
5 5 $ . "$TESTDIR/helper-runtests.sh"
6 6
7 7 Smoke test with install
8 8 ============
9 9
10 10 $ run-tests.py $HGTEST_RUN_TESTS_PURE -l
11 11
12 12 # Ran 0 tests, 0 skipped, 0 failed.
13 13
14 14 Define a helper to avoid the install step
15 15 =============
16 16 $ rt()
17 17 > {
18 18 > run-tests.py --with-hg=`which hg` "$@"
19 19 > }
20 20
21 21 error paths
22 22
23 23 #if symlink
24 24 $ ln -s `which true` hg
25 25 $ run-tests.py --with-hg=./hg
26 26 warning: --with-hg should specify an hg script
27 27
28 28 # Ran 0 tests, 0 skipped, 0 failed.
29 29 $ rm hg
30 30 #endif
31 31
32 32 #if execbit
33 33 $ touch hg
34 34 $ run-tests.py --with-hg=./hg
35 35 usage: run-tests.py [options] [tests]
36 36 run-tests.py: error: --with-hg must specify an executable hg script
37 37 [2]
38 38 $ rm hg
39 39 #endif
40 40
41 41 Features for testing optional lines
42 42 ===================================
43 43
44 44 $ cat > hghaveaddon.py <<EOF
45 45 > import hghave
46 46 > @hghave.check("custom", "custom hghave feature")
47 47 > def has_custom():
48 48 > return True
49 49 > @hghave.check("missing", "missing hghave feature")
50 50 > def has_missing():
51 51 > return False
52 52 > EOF
53 53
54 54 an empty test
55 55 =======================
56 56
57 57 $ touch test-empty.t
58 58 $ rt
59 59 .
60 60 # Ran 1 tests, 0 skipped, 0 failed.
61 61 $ rm test-empty.t
62 62
63 63 a succesful test
64 64 =======================
65 65
66 66 $ cat > test-success.t << EOF
67 67 > $ echo babar
68 68 > babar
69 69 > $ echo xyzzy
70 70 > dont_print (?)
71 71 > nothing[42]line (re) (?)
72 72 > never*happens (glob) (?)
73 73 > more_nothing (?)
74 74 > xyzzy
75 75 > nor this (?)
76 76 > $ printf 'abc\ndef\nxyz\n'
77 77 > 123 (?)
78 78 > abc
79 79 > def (?)
80 80 > 456 (?)
81 81 > xyz
82 82 > $ printf 'zyx\nwvu\ntsr\n'
83 83 > abc (?)
84 84 > zyx (custom !)
85 85 > wvu
86 86 > no_print (no-custom !)
87 87 > tsr (no-missing !)
88 88 > missing (missing !)
89 89 > EOF
90 90
91 91 $ rt
92 92 .
93 93 # Ran 1 tests, 0 skipped, 0 failed.
94 94
95 95 failing test
96 96 ==================
97 97
98 98 test churn with globs
99 99 $ cat > test-failure.t <<EOF
100 100 > $ echo "bar-baz"; echo "bar-bad"; echo foo
101 101 > bar*bad (glob)
102 102 > bar*baz (glob)
103 103 > | fo (re)
104 104 > EOF
105 105 $ rt test-failure.t
106 106
107 107 --- $TESTTMP/test-failure.t
108 108 +++ $TESTTMP/test-failure.t.err
109 109 @@ -1,4 +1,4 @@
110 110 $ echo "bar-baz"; echo "bar-bad"; echo foo
111 111 + bar*baz (glob)
112 112 bar*bad (glob)
113 113 - bar*baz (glob)
114 114 - | fo (re)
115 115 + foo
116 116
117 117 ERROR: test-failure.t output changed
118 118 !
119 119 Failed test-failure.t: output changed
120 120 # Ran 1 tests, 0 skipped, 1 failed.
121 121 python hash seed: * (glob)
122 122 [1]
123 123
124 124 test diff colorisation
125 125
126 126 #if no-windows pygments
127 127 $ rt test-failure.t --color always
128 128
129 129 \x1b[38;5;124m--- $TESTTMP/test-failure.t\x1b[39m (esc)
130 130 \x1b[38;5;34m+++ $TESTTMP/test-failure.t.err\x1b[39m (esc)
131 131 \x1b[38;5;90;01m@@ -1,4 +1,4 @@\x1b[39;00m (esc)
132 132 $ echo "bar-baz"; echo "bar-bad"; echo foo
133 133 \x1b[38;5;34m+ bar*baz (glob)\x1b[39m (esc)
134 134 bar*bad (glob)
135 135 \x1b[38;5;124m- bar*baz (glob)\x1b[39m (esc)
136 136 \x1b[38;5;124m- | fo (re)\x1b[39m (esc)
137 137 \x1b[38;5;34m+ foo\x1b[39m (esc)
138 138
139 139 \x1b[38;5;88mERROR: \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m output changed\x1b[39m (esc)
140 140 !
141 141 \x1b[38;5;88mFailed \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m: output changed\x1b[39m (esc)
142 142 # Ran 1 tests, 0 skipped, 1 failed.
143 143 python hash seed: * (glob)
144 144 [1]
145 145
146 146 $ rt test-failure.t 2> tmp.log
147 147 [1]
148 148 $ cat tmp.log
149 149
150 150 --- $TESTTMP/test-failure.t
151 151 +++ $TESTTMP/test-failure.t.err
152 152 @@ -1,4 +1,4 @@
153 153 $ echo "bar-baz"; echo "bar-bad"; echo foo
154 154 + bar*baz (glob)
155 155 bar*bad (glob)
156 156 - bar*baz (glob)
157 157 - | fo (re)
158 158 + foo
159 159
160 160 ERROR: test-failure.t output changed
161 161 !
162 162 Failed test-failure.t: output changed
163 163 # Ran 1 tests, 0 skipped, 1 failed.
164 164 python hash seed: * (glob)
165 165 #endif
166 166
167 167 $ cat > test-failure.t << EOF
168 168 > $ true
169 169 > should go away (true !)
170 170 > $ true
171 171 > should stay (false !)
172 172 >
173 173 > Should remove first line, not second or third
174 174 > $ echo 'testing'
175 175 > baz*foo (glob) (true !)
176 176 > foobar*foo (glob) (false !)
177 177 > te*ting (glob) (true !)
178 178 >
179 179 > Should keep first two lines, remove third and last
180 180 > $ echo 'testing'
181 181 > test.ng (re) (true !)
182 182 > foo.ar (re) (false !)
183 183 > b.r (re) (true !)
184 184 > missing (?)
185 185 > awol (true !)
186 186 >
187 187 > The "missing" line should stay, even though awol is dropped
188 188 > $ echo 'testing'
189 189 > test.ng (re) (true !)
190 190 > foo.ar (?)
191 191 > awol
192 192 > missing (?)
193 193 > EOF
194 194 $ rt test-failure.t
195 195
196 196 --- $TESTTMP/test-failure.t
197 197 +++ $TESTTMP/test-failure.t.err
198 198 @@ -1,11 +1,9 @@
199 199 $ true
200 200 - should go away (true !)
201 201 $ true
202 202 should stay (false !)
203 203
204 204 Should remove first line, not second or third
205 205 $ echo 'testing'
206 206 - baz*foo (glob) (true !)
207 207 foobar*foo (glob) (false !)
208 208 te*ting (glob) (true !)
209 209
210 210 foo.ar (re) (false !)
211 211 missing (?)
212 212 @@ -13,13 +11,10 @@
213 213 $ echo 'testing'
214 214 test.ng (re) (true !)
215 215 foo.ar (re) (false !)
216 216 - b.r (re) (true !)
217 217 missing (?)
218 218 - awol (true !)
219 219
220 220 The "missing" line should stay, even though awol is dropped
221 221 $ echo 'testing'
222 222 test.ng (re) (true !)
223 223 foo.ar (?)
224 224 - awol
225 225 missing (?)
226 226
227 227 ERROR: test-failure.t output changed
228 228 !
229 229 Failed test-failure.t: output changed
230 230 # Ran 1 tests, 0 skipped, 1 failed.
231 231 python hash seed: * (glob)
232 232 [1]
233 233
234 234 basic failing test
235 235 $ cat > test-failure.t << EOF
236 236 > $ echo babar
237 237 > rataxes
238 238 > This is a noop statement so that
239 239 > this test is still more bytes than success.
240 240 > pad pad pad pad............................................................
241 241 > pad pad pad pad............................................................
242 242 > pad pad pad pad............................................................
243 243 > pad pad pad pad............................................................
244 244 > pad pad pad pad............................................................
245 245 > pad pad pad pad............................................................
246 246 > EOF
247 247
248 248 >>> fh = open('test-failure-unicode.t', 'wb')
249 249 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8')) and None
250 250 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8')) and None
251 251
252 252 $ rt
253 253
254 254 --- $TESTTMP/test-failure.t
255 255 +++ $TESTTMP/test-failure.t.err
256 256 @@ -1,5 +1,5 @@
257 257 $ echo babar
258 258 - rataxes
259 259 + babar
260 260 This is a noop statement so that
261 261 this test is still more bytes than success.
262 262 pad pad pad pad............................................................
263 263
264 264 ERROR: test-failure.t output changed
265 265 !.
266 266 --- $TESTTMP/test-failure-unicode.t
267 267 +++ $TESTTMP/test-failure-unicode.t.err
268 268 @@ -1,2 +1,2 @@
269 269 $ echo babar\xce\xb1 (esc)
270 270 - l\xce\xb5\xce\xb5t (esc)
271 271 + babar\xce\xb1 (esc)
272 272
273 273 ERROR: test-failure-unicode.t output changed
274 274 !
275 275 Failed test-failure.t: output changed
276 276 Failed test-failure-unicode.t: output changed
277 277 # Ran 3 tests, 0 skipped, 2 failed.
278 278 python hash seed: * (glob)
279 279 [1]
280 280
281 281 test --outputdir
282 282 $ mkdir output
283 283 $ rt --outputdir output
284 284
285 285 --- $TESTTMP/test-failure.t
286 286 +++ $TESTTMP/output/test-failure.t.err
287 287 @@ -1,5 +1,5 @@
288 288 $ echo babar
289 289 - rataxes
290 290 + babar
291 291 This is a noop statement so that
292 292 this test is still more bytes than success.
293 293 pad pad pad pad............................................................
294 294
295 295 ERROR: test-failure.t output changed
296 296 !.
297 297 --- $TESTTMP/test-failure-unicode.t
298 298 +++ $TESTTMP/output/test-failure-unicode.t.err
299 299 @@ -1,2 +1,2 @@
300 300 $ echo babar\xce\xb1 (esc)
301 301 - l\xce\xb5\xce\xb5t (esc)
302 302 + babar\xce\xb1 (esc)
303 303
304 304 ERROR: test-failure-unicode.t output changed
305 305 !
306 306 Failed test-failure.t: output changed
307 307 Failed test-failure-unicode.t: output changed
308 308 # Ran 3 tests, 0 skipped, 2 failed.
309 309 python hash seed: * (glob)
310 310 [1]
311 311 $ ls -a output
312 312 .
313 313 ..
314 314 .testtimes
315 315 test-failure-unicode.t.err
316 316 test-failure.t.err
317 317
318 318 test --xunit support
319 319 $ rt --xunit=xunit.xml
320 320
321 321 --- $TESTTMP/test-failure.t
322 322 +++ $TESTTMP/test-failure.t.err
323 323 @@ -1,5 +1,5 @@
324 324 $ echo babar
325 325 - rataxes
326 326 + babar
327 327 This is a noop statement so that
328 328 this test is still more bytes than success.
329 329 pad pad pad pad............................................................
330 330
331 331 ERROR: test-failure.t output changed
332 332 !.
333 333 --- $TESTTMP/test-failure-unicode.t
334 334 +++ $TESTTMP/test-failure-unicode.t.err
335 335 @@ -1,2 +1,2 @@
336 336 $ echo babar\xce\xb1 (esc)
337 337 - l\xce\xb5\xce\xb5t (esc)
338 338 + babar\xce\xb1 (esc)
339 339
340 340 ERROR: test-failure-unicode.t output changed
341 341 !
342 342 Failed test-failure.t: output changed
343 343 Failed test-failure-unicode.t: output changed
344 344 # Ran 3 tests, 0 skipped, 2 failed.
345 345 python hash seed: * (glob)
346 346 [1]
347 347 $ cat xunit.xml
348 348 <?xml version="1.0" encoding="utf-8"?>
349 349 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
350 350 <testcase name="test-success.t" time="*"/> (glob)
351 351 <testcase name="test-failure-unicode.t" time="*"> (glob)
352 352 <failure message="output changed" type="output-mismatch">
353 353 <![CDATA[--- $TESTTMP/test-failure-unicode.t
354 354 +++ $TESTTMP/test-failure-unicode.t.err
355 355 @@ -1,2 +1,2 @@
356 356 $ echo babar\xce\xb1 (esc)
357 357 - l\xce\xb5\xce\xb5t (esc)
358 358 + babar\xce\xb1 (esc)
359 359 ]]> </failure>
360 360 </testcase>
361 361 <testcase name="test-failure.t" time="*"> (glob)
362 362 <failure message="output changed" type="output-mismatch">
363 363 <![CDATA[--- $TESTTMP/test-failure.t
364 364 +++ $TESTTMP/test-failure.t.err
365 365 @@ -1,5 +1,5 @@
366 366 $ echo babar
367 367 - rataxes
368 368 + babar
369 369 This is a noop statement so that
370 370 this test is still more bytes than success.
371 371 pad pad pad pad............................................................
372 372 ]]> </failure>
373 373 </testcase>
374 374 </testsuite>
375 375
376 376 $ cat .testtimes
377 377 test-failure-unicode.t * (glob)
378 378 test-failure.t * (glob)
379 379 test-success.t * (glob)
380 380
381 381 $ rt --list-tests
382 382 test-failure-unicode.t
383 383 test-failure.t
384 384 test-success.t
385 385
386 386 $ rt --list-tests --json
387 387 test-failure-unicode.t
388 388 test-failure.t
389 389 test-success.t
390 390 $ cat report.json
391 391 testreport ={
392 392 "test-failure-unicode.t": {
393 393 "result": "success"
394 394 },
395 395 "test-failure.t": {
396 396 "result": "success"
397 397 },
398 398 "test-success.t": {
399 399 "result": "success"
400 400 }
401 401 } (no-eol)
402 402
403 403 $ rt --list-tests --xunit=xunit.xml
404 404 test-failure-unicode.t
405 405 test-failure.t
406 406 test-success.t
407 407 $ cat xunit.xml
408 408 <?xml version="1.0" encoding="utf-8"?>
409 409 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
410 410 <testcase name="test-failure-unicode.t"/>
411 411 <testcase name="test-failure.t"/>
412 412 <testcase name="test-success.t"/>
413 413 </testsuite>
414 414
415 415 $ rt --list-tests test-failure* --json --xunit=xunit.xml --outputdir output
416 416 test-failure-unicode.t
417 417 test-failure.t
418 418 $ cat output/report.json
419 419 testreport ={
420 420 "test-failure-unicode.t": {
421 421 "result": "success"
422 422 },
423 423 "test-failure.t": {
424 424 "result": "success"
425 425 }
426 426 } (no-eol)
427 427 $ cat xunit.xml
428 428 <?xml version="1.0" encoding="utf-8"?>
429 429 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
430 430 <testcase name="test-failure-unicode.t"/>
431 431 <testcase name="test-failure.t"/>
432 432 </testsuite>
433 433
434 434 $ rm test-failure-unicode.t
435 435
436 436 test for --retest
437 437 ====================
438 438
439 439 $ rt --retest
440 440
441 441 --- $TESTTMP/test-failure.t
442 442 +++ $TESTTMP/test-failure.t.err
443 443 @@ -1,5 +1,5 @@
444 444 $ echo babar
445 445 - rataxes
446 446 + babar
447 447 This is a noop statement so that
448 448 this test is still more bytes than success.
449 449 pad pad pad pad............................................................
450 450
451 451 ERROR: test-failure.t output changed
452 452 !
453 453 Failed test-failure.t: output changed
454 454 # Ran 2 tests, 1 skipped, 1 failed.
455 455 python hash seed: * (glob)
456 456 [1]
457 457
458 458 --retest works with --outputdir
459 459 $ rm -r output
460 460 $ mkdir output
461 461 $ mv test-failure.t.err output
462 462 $ rt --retest --outputdir output
463 463
464 464 --- $TESTTMP/test-failure.t
465 465 +++ $TESTTMP/output/test-failure.t.err
466 466 @@ -1,5 +1,5 @@
467 467 $ echo babar
468 468 - rataxes
469 469 + babar
470 470 This is a noop statement so that
471 471 this test is still more bytes than success.
472 472 pad pad pad pad............................................................
473 473
474 474 ERROR: test-failure.t output changed
475 475 !
476 476 Failed test-failure.t: output changed
477 477 # Ran 2 tests, 1 skipped, 1 failed.
478 478 python hash seed: * (glob)
479 479 [1]
480 480
481 481 Selecting Tests To Run
482 482 ======================
483 483
484 484 successful
485 485
486 486 $ rt test-success.t
487 487 .
488 488 # Ran 1 tests, 0 skipped, 0 failed.
489 489
490 490 success w/ keyword
491 491 $ rt -k xyzzy
492 492 .
493 493 # Ran 2 tests, 1 skipped, 0 failed.
494 494
495 495 failed
496 496
497 497 $ rt test-failure.t
498 498
499 499 --- $TESTTMP/test-failure.t
500 500 +++ $TESTTMP/test-failure.t.err
501 501 @@ -1,5 +1,5 @@
502 502 $ echo babar
503 503 - rataxes
504 504 + babar
505 505 This is a noop statement so that
506 506 this test is still more bytes than success.
507 507 pad pad pad pad............................................................
508 508
509 509 ERROR: test-failure.t output changed
510 510 !
511 511 Failed test-failure.t: output changed
512 512 # Ran 1 tests, 0 skipped, 1 failed.
513 513 python hash seed: * (glob)
514 514 [1]
515 515
516 516 failure w/ keyword
517 517 $ rt -k rataxes
518 518
519 519 --- $TESTTMP/test-failure.t
520 520 +++ $TESTTMP/test-failure.t.err
521 521 @@ -1,5 +1,5 @@
522 522 $ echo babar
523 523 - rataxes
524 524 + babar
525 525 This is a noop statement so that
526 526 this test is still more bytes than success.
527 527 pad pad pad pad............................................................
528 528
529 529 ERROR: test-failure.t output changed
530 530 !
531 531 Failed test-failure.t: output changed
532 532 # Ran 2 tests, 1 skipped, 1 failed.
533 533 python hash seed: * (glob)
534 534 [1]
535 535
536 536 Verify that when a process fails to start we show a useful message
537 537 ==================================================================
538 538
539 539 $ cat > test-serve-fail.t <<EOF
540 540 > $ echo 'abort: child process failed to start blah'
541 541 > EOF
542 542 $ rt test-serve-fail.t
543 543
544 544 ERROR: test-serve-fail.t output changed
545 545 !
546 546 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
547 547 # Ran 1 tests, 0 skipped, 1 failed.
548 548 python hash seed: * (glob)
549 549 [1]
550 550 $ rm test-serve-fail.t
551 551
552 552 Verify that we can try other ports
553 553 ===================================
554 554 $ hg init inuse
555 555 $ hg serve -R inuse -p $HGPORT -d --pid-file=blocks.pid
556 556 $ cat blocks.pid >> $DAEMON_PIDS
557 557 $ cat > test-serve-inuse.t <<EOF
558 558 > $ hg serve -R `pwd`/inuse -p \$HGPORT -d --pid-file=hg.pid
559 559 > $ cat hg.pid >> \$DAEMON_PIDS
560 560 > EOF
561 561 $ rt test-serve-inuse.t
562 562 .
563 563 # Ran 1 tests, 0 skipped, 0 failed.
564 564 $ rm test-serve-inuse.t
565 565 $ killdaemons.py $DAEMON_PIDS
566 566 $ rm $DAEMON_PIDS
567 567
568 568 Running In Debug Mode
569 569 ======================
570 570
571 571 $ rt --debug 2>&1 | grep -v pwd
572 572 + echo *SALT* 0 0 (glob)
573 573 *SALT* 0 0 (glob)
574 574 + echo babar
575 575 babar
576 576 + echo *SALT* 10 0 (glob)
577 577 *SALT* 10 0 (glob)
578 578 *+ echo *SALT* 0 0 (glob)
579 579 *SALT* 0 0 (glob)
580 580 + echo babar
581 581 babar
582 582 + echo *SALT* 2 0 (glob)
583 583 *SALT* 2 0 (glob)
584 584 + echo xyzzy
585 585 xyzzy
586 586 + echo *SALT* 9 0 (glob)
587 587 *SALT* 9 0 (glob)
588 588 + printf *abc\ndef\nxyz\n* (glob)
589 589 abc
590 590 def
591 591 xyz
592 592 + echo *SALT* 15 0 (glob)
593 593 *SALT* 15 0 (glob)
594 594 + printf *zyx\nwvu\ntsr\n* (glob)
595 595 zyx
596 596 wvu
597 597 tsr
598 598 + echo *SALT* 22 0 (glob)
599 599 *SALT* 22 0 (glob)
600 600 .
601 601 # Ran 2 tests, 0 skipped, 0 failed.
602 602
603 603 Parallel runs
604 604 ==============
605 605
606 606 (duplicate the failing test to get predictable output)
607 607 $ cp test-failure.t test-failure-copy.t
608 608
609 609 $ rt --jobs 2 test-failure*.t -n
610 610 !!
611 611 Failed test-failure*.t: output changed (glob)
612 612 Failed test-failure*.t: output changed (glob)
613 613 # Ran 2 tests, 0 skipped, 2 failed.
614 614 python hash seed: * (glob)
615 615 [1]
616 616
617 617 failures in parallel with --first should only print one failure
618 618 $ rt --jobs 2 --first test-failure*.t
619 619
620 620 --- $TESTTMP/test-failure*.t (glob)
621 621 +++ $TESTTMP/test-failure*.t.err (glob)
622 622 @@ -1,5 +1,5 @@
623 623 $ echo babar
624 624 - rataxes
625 625 + babar
626 626 This is a noop statement so that
627 627 this test is still more bytes than success.
628 628 pad pad pad pad............................................................
629 629
630 630 Failed test-failure*.t: output changed (glob)
631 631 Failed test-failure*.t: output changed (glob)
632 632 # Ran 2 tests, 0 skipped, 2 failed.
633 633 python hash seed: * (glob)
634 634 [1]
635 635
636 636
637 637 (delete the duplicated test file)
638 638 $ rm test-failure-copy.t
639 639
640 640
641 641 Interactive run
642 642 ===============
643 643
644 644 (backup the failing test)
645 645 $ cp test-failure.t backup
646 646
647 647 Refuse the fix
648 648
649 649 $ echo 'n' | rt -i
650 650
651 651 --- $TESTTMP/test-failure.t
652 652 +++ $TESTTMP/test-failure.t.err
653 653 @@ -1,5 +1,5 @@
654 654 $ echo babar
655 655 - rataxes
656 656 + babar
657 657 This is a noop statement so that
658 658 this test is still more bytes than success.
659 659 pad pad pad pad............................................................
660 660 Accept this change? [n]
661 661 ERROR: test-failure.t output changed
662 662 !.
663 663 Failed test-failure.t: output changed
664 664 # Ran 2 tests, 0 skipped, 1 failed.
665 665 python hash seed: * (glob)
666 666 [1]
667 667
668 668 $ cat test-failure.t
669 669 $ echo babar
670 670 rataxes
671 671 This is a noop statement so that
672 672 this test is still more bytes than success.
673 673 pad pad pad pad............................................................
674 674 pad pad pad pad............................................................
675 675 pad pad pad pad............................................................
676 676 pad pad pad pad............................................................
677 677 pad pad pad pad............................................................
678 678 pad pad pad pad............................................................
679 679
680 680 Interactive with custom view
681 681
682 682 $ echo 'n' | rt -i --view echo
683 683 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
684 684 Accept this change? [n]* (glob)
685 685 ERROR: test-failure.t output changed
686 686 !.
687 687 Failed test-failure.t: output changed
688 688 # Ran 2 tests, 0 skipped, 1 failed.
689 689 python hash seed: * (glob)
690 690 [1]
691 691
692 692 View the fix
693 693
694 694 $ echo 'y' | rt --view echo
695 695 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
696 696
697 697 ERROR: test-failure.t output changed
698 698 !.
699 699 Failed test-failure.t: output changed
700 700 # Ran 2 tests, 0 skipped, 1 failed.
701 701 python hash seed: * (glob)
702 702 [1]
703 703
704 704 Accept the fix
705 705
706 706 $ cat >> test-failure.t <<EOF
707 707 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
708 708 > saved backup bundle to \$TESTTMP/foo.hg
709 709 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
710 710 > saved backup bundle to $TESTTMP\\foo.hg
711 711 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
712 712 > saved backup bundle to \$TESTTMP/*.hg (glob)
713 713 > EOF
714 714 $ echo 'y' | rt -i 2>&1
715 715
716 716 --- $TESTTMP/test-failure.t
717 717 +++ $TESTTMP/test-failure.t.err
718 718 @@ -1,5 +1,5 @@
719 719 $ echo babar
720 720 - rataxes
721 721 + babar
722 722 This is a noop statement so that
723 723 this test is still more bytes than success.
724 724 pad pad pad pad............................................................
725 725 @@ -11,6 +11,6 @@
726 726 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
727 727 saved backup bundle to $TESTTMP/foo.hg
728 728 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
729 729 - saved backup bundle to $TESTTMP\foo.hg
730 730 + saved backup bundle to $TESTTMP/foo.hg
731 731 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
732 732 saved backup bundle to $TESTTMP/*.hg (glob)
733 733 Accept this change? [n] ..
734 734 # Ran 2 tests, 0 skipped, 0 failed.
735 735
736 736 $ sed -e 's,(glob)$,&<,g' test-failure.t
737 737 $ echo babar
738 738 babar
739 739 This is a noop statement so that
740 740 this test is still more bytes than success.
741 741 pad pad pad pad............................................................
742 742 pad pad pad pad............................................................
743 743 pad pad pad pad............................................................
744 744 pad pad pad pad............................................................
745 745 pad pad pad pad............................................................
746 746 pad pad pad pad............................................................
747 747 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
748 748 saved backup bundle to $TESTTMP/foo.hg
749 749 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
750 750 saved backup bundle to $TESTTMP/foo.hg
751 751 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
752 752 saved backup bundle to $TESTTMP/*.hg (glob)<
753 753
754 754 Race condition - test file was modified when test is running
755 755
756 756 $ TESTRACEDIR=`pwd`
757 757 $ export TESTRACEDIR
758 758 $ cat > test-race.t <<EOF
759 759 > $ echo 1
760 760 > $ echo "# a new line" >> $TESTRACEDIR/test-race.t
761 761 > EOF
762 762
763 763 $ rt -i test-race.t
764 764
765 765 --- $TESTTMP/test-race.t
766 766 +++ $TESTTMP/test-race.t.err
767 767 @@ -1,2 +1,3 @@
768 768 $ echo 1
769 769 + 1
770 770 $ echo "# a new line" >> $TESTTMP/test-race.t
771 771 Reference output has changed (run again to prompt changes)
772 772 ERROR: test-race.t output changed
773 773 !
774 774 Failed test-race.t: output changed
775 775 # Ran 1 tests, 0 skipped, 1 failed.
776 776 python hash seed: * (glob)
777 777 [1]
778 778
779 779 $ rm test-race.t
780 780
781 781 When "#testcases" is used in .t files
782 782
783 783 $ cat >> test-cases.t <<EOF
784 784 > #testcases a b
785 785 > #if a
786 786 > $ echo 1
787 787 > #endif
788 788 > #if b
789 789 > $ echo 2
790 790 > #endif
791 791 > EOF
792 792
793 793 $ cat <<EOF | rt -i test-cases.t 2>&1
794 794 > y
795 795 > y
796 796 > EOF
797 797
798 798 --- $TESTTMP/test-cases.t
799 799 +++ $TESTTMP/test-cases.t.a.err
800 800 @@ -1,6 +1,7 @@
801 801 #testcases a b
802 802 #if a
803 803 $ echo 1
804 804 + 1
805 805 #endif
806 806 #if b
807 807 $ echo 2
808 808 Accept this change? [n] .
809 809 --- $TESTTMP/test-cases.t
810 810 +++ $TESTTMP/test-cases.t.b.err
811 811 @@ -5,4 +5,5 @@
812 812 #endif
813 813 #if b
814 814 $ echo 2
815 815 + 2
816 816 #endif
817 817 Accept this change? [n] .
818 818 # Ran 2 tests, 0 skipped, 0 failed.
819 819
820 820 $ cat test-cases.t
821 821 #testcases a b
822 822 #if a
823 823 $ echo 1
824 824 1
825 825 #endif
826 826 #if b
827 827 $ echo 2
828 828 2
829 829 #endif
830 830
831 831 $ cat >> test-cases.t <<'EOF'
832 832 > #if a
833 833 > $ NAME=A
834 834 > #else
835 835 > $ NAME=B
836 836 > #endif
837 837 > $ echo $NAME
838 838 > A (a !)
839 839 > B (b !)
840 840 > EOF
841 841 $ rt test-cases.t
842 842 ..
843 843 # Ran 2 tests, 0 skipped, 0 failed.
844 844
845 845 $ rm test-cases.t
846 846
847 847 (reinstall)
848 848 $ mv backup test-failure.t
849 849
850 850 No Diff
851 851 ===============
852 852
853 853 $ rt --nodiff
854 854 !.
855 855 Failed test-failure.t: output changed
856 856 # Ran 2 tests, 0 skipped, 1 failed.
857 857 python hash seed: * (glob)
858 858 [1]
859 859
860 860 test --tmpdir support
861 861 $ rt --tmpdir=$TESTTMP/keep test-success.t
862 862
863 863 Keeping testtmp dir: $TESTTMP/keep/child1/test-success.t
864 864 Keeping threadtmp dir: $TESTTMP/keep/child1
865 865 .
866 866 # Ran 1 tests, 0 skipped, 0 failed.
867 867
868 868 timeouts
869 869 ========
870 870 $ cat > test-timeout.t <<EOF
871 871 > $ sleep 2
872 872 > $ echo pass
873 873 > pass
874 874 > EOF
875 875 > echo '#require slow' > test-slow-timeout.t
876 876 > cat test-timeout.t >> test-slow-timeout.t
877 877 $ rt --timeout=1 --slowtimeout=3 test-timeout.t test-slow-timeout.t
878 878 st
879 879 Skipped test-slow-timeout.t: missing feature: allow slow tests (use --allow-slow-tests)
880 880 Failed test-timeout.t: timed out
881 881 # Ran 1 tests, 1 skipped, 1 failed.
882 882 python hash seed: * (glob)
883 883 [1]
884 884 $ rt --timeout=1 --slowtimeout=3 \
885 885 > test-timeout.t test-slow-timeout.t --allow-slow-tests
886 886 .t
887 887 Failed test-timeout.t: timed out
888 888 # Ran 2 tests, 0 skipped, 1 failed.
889 889 python hash seed: * (glob)
890 890 [1]
891 891 $ rm test-timeout.t test-slow-timeout.t
892 892
893 893 test for --time
894 894 ==================
895 895
896 896 $ rt test-success.t --time
897 897 .
898 898 # Ran 1 tests, 0 skipped, 0 failed.
899 899 # Producing time report
900 900 start end cuser csys real Test
901 901 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
902 902
903 903 test for --time with --job enabled
904 904 ====================================
905 905
906 906 $ rt test-success.t --time --jobs 2
907 907 .
908 908 # Ran 1 tests, 0 skipped, 0 failed.
909 909 # Producing time report
910 910 start end cuser csys real Test
911 911 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
912 912
913 913 Skips
914 914 ================
915 915 $ cat > test-skip.t <<EOF
916 916 > $ echo xyzzy
917 917 > #require false
918 918 > EOF
919 919 $ rt --nodiff
920 920 !.s
921 921 Skipped test-skip.t: missing feature: nail clipper
922 922 Failed test-failure.t: output changed
923 923 # Ran 2 tests, 1 skipped, 1 failed.
924 924 python hash seed: * (glob)
925 925 [1]
926 926
927 927 $ rt --keyword xyzzy
928 928 .s
929 929 Skipped test-skip.t: missing feature: nail clipper
930 930 # Ran 2 tests, 2 skipped, 0 failed.
931 931
932 932 Skips with xml
933 933 $ rt --keyword xyzzy \
934 934 > --xunit=xunit.xml
935 935 .s
936 936 Skipped test-skip.t: missing feature: nail clipper
937 937 # Ran 2 tests, 2 skipped, 0 failed.
938 938 $ cat xunit.xml
939 939 <?xml version="1.0" encoding="utf-8"?>
940 940 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
941 941 <testcase name="test-success.t" time="*"/> (glob)
942 942 <testcase name="test-skip.t">
943 943 <skipped>
944 944 <![CDATA[missing feature: nail clipper]]> </skipped>
945 945 </testcase>
946 946 </testsuite>
947 947
948 948 Missing skips or blacklisted skips don't count as executed:
949 949 $ echo test-failure.t > blacklist
950 950 $ rt --blacklist=blacklist --json\
951 951 > test-failure.t test-bogus.t
952 952 ss
953 953 Skipped test-bogus.t: Doesn't exist
954 954 Skipped test-failure.t: blacklisted
955 955 # Ran 0 tests, 2 skipped, 0 failed.
956 956 $ cat report.json
957 957 testreport ={
958 958 "test-bogus.t": {
959 959 "result": "skip"
960 960 },
961 961 "test-failure.t": {
962 962 "result": "skip"
963 963 }
964 964 } (no-eol)
965 965
966 966 Whitelist trumps blacklist
967 967 $ echo test-failure.t > whitelist
968 968 $ rt --blacklist=blacklist --whitelist=whitelist --json\
969 969 > test-failure.t test-bogus.t
970 970 s
971 971 --- $TESTTMP/test-failure.t
972 972 +++ $TESTTMP/test-failure.t.err
973 973 @@ -1,5 +1,5 @@
974 974 $ echo babar
975 975 - rataxes
976 976 + babar
977 977 This is a noop statement so that
978 978 this test is still more bytes than success.
979 979 pad pad pad pad............................................................
980 980
981 981 ERROR: test-failure.t output changed
982 982 !
983 983 Skipped test-bogus.t: Doesn't exist
984 984 Failed test-failure.t: output changed
985 985 # Ran 1 tests, 1 skipped, 1 failed.
986 986 python hash seed: * (glob)
987 987 [1]
988 988
989 989 Ensure that --test-list causes only the tests listed in that file to
990 990 be executed.
991 991 $ echo test-success.t >> onlytest
992 992 $ rt --test-list=onlytest
993 993 .
994 994 # Ran 1 tests, 0 skipped, 0 failed.
995 995 $ echo test-bogus.t >> anothertest
996 996 $ rt --test-list=onlytest --test-list=anothertest
997 997 s.
998 998 Skipped test-bogus.t: Doesn't exist
999 999 # Ran 1 tests, 1 skipped, 0 failed.
1000 1000 $ rm onlytest anothertest
1001 1001
1002 1002 test for --json
1003 1003 ==================
1004 1004
1005 1005 $ rt --json
1006 1006
1007 1007 --- $TESTTMP/test-failure.t
1008 1008 +++ $TESTTMP/test-failure.t.err
1009 1009 @@ -1,5 +1,5 @@
1010 1010 $ echo babar
1011 1011 - rataxes
1012 1012 + babar
1013 1013 This is a noop statement so that
1014 1014 this test is still more bytes than success.
1015 1015 pad pad pad pad............................................................
1016 1016
1017 1017 ERROR: test-failure.t output changed
1018 1018 !.s
1019 1019 Skipped test-skip.t: missing feature: nail clipper
1020 1020 Failed test-failure.t: output changed
1021 1021 # Ran 2 tests, 1 skipped, 1 failed.
1022 1022 python hash seed: * (glob)
1023 1023 [1]
1024 1024
1025 1025 $ cat report.json
1026 1026 testreport ={
1027 1027 "test-failure.t": [\{] (re)
1028 1028 "csys": "\s*[\d\.]{4,5}", ? (re)
1029 1029 "cuser": "\s*[\d\.]{4,5}", ? (re)
1030 1030 "diff": "---.+\+\+\+.+", ? (re)
1031 1031 "end": "\s*[\d\.]{4,5}", ? (re)
1032 1032 "result": "failure", ? (re)
1033 1033 "start": "\s*[\d\.]{4,5}", ? (re)
1034 1034 "time": "\s*[\d\.]{4,5}" (re)
1035 1035 }, ? (re)
1036 1036 "test-skip.t": {
1037 1037 "csys": "\s*[\d\.]{4,5}", ? (re)
1038 1038 "cuser": "\s*[\d\.]{4,5}", ? (re)
1039 1039 "diff": "", ? (re)
1040 1040 "end": "\s*[\d\.]{4,5}", ? (re)
1041 1041 "result": "skip", ? (re)
1042 1042 "start": "\s*[\d\.]{4,5}", ? (re)
1043 1043 "time": "\s*[\d\.]{4,5}" (re)
1044 1044 }, ? (re)
1045 1045 "test-success.t": [\{] (re)
1046 1046 "csys": "\s*[\d\.]{4,5}", ? (re)
1047 1047 "cuser": "\s*[\d\.]{4,5}", ? (re)
1048 1048 "diff": "", ? (re)
1049 1049 "end": "\s*[\d\.]{4,5}", ? (re)
1050 1050 "result": "success", ? (re)
1051 1051 "start": "\s*[\d\.]{4,5}", ? (re)
1052 1052 "time": "\s*[\d\.]{4,5}" (re)
1053 1053 }
1054 1054 } (no-eol)
1055 1055 --json with --outputdir
1056 1056
1057 1057 $ rm report.json
1058 1058 $ rm -r output
1059 1059 $ mkdir output
1060 1060 $ rt --json --outputdir output
1061 1061
1062 1062 --- $TESTTMP/test-failure.t
1063 1063 +++ $TESTTMP/output/test-failure.t.err
1064 1064 @@ -1,5 +1,5 @@
1065 1065 $ echo babar
1066 1066 - rataxes
1067 1067 + babar
1068 1068 This is a noop statement so that
1069 1069 this test is still more bytes than success.
1070 1070 pad pad pad pad............................................................
1071 1071
1072 1072 ERROR: test-failure.t output changed
1073 1073 !.s
1074 1074 Skipped test-skip.t: missing feature: nail clipper
1075 1075 Failed test-failure.t: output changed
1076 1076 # Ran 2 tests, 1 skipped, 1 failed.
1077 1077 python hash seed: * (glob)
1078 1078 [1]
1079 1079 $ f report.json
1080 1080 report.json: file not found
1081 1081 $ cat output/report.json
1082 1082 testreport ={
1083 1083 "test-failure.t": [\{] (re)
1084 1084 "csys": "\s*[\d\.]{4,5}", ? (re)
1085 1085 "cuser": "\s*[\d\.]{4,5}", ? (re)
1086 1086 "diff": "---.+\+\+\+.+", ? (re)
1087 1087 "end": "\s*[\d\.]{4,5}", ? (re)
1088 1088 "result": "failure", ? (re)
1089 1089 "start": "\s*[\d\.]{4,5}", ? (re)
1090 1090 "time": "\s*[\d\.]{4,5}" (re)
1091 1091 }, ? (re)
1092 1092 "test-skip.t": {
1093 1093 "csys": "\s*[\d\.]{4,5}", ? (re)
1094 1094 "cuser": "\s*[\d\.]{4,5}", ? (re)
1095 1095 "diff": "", ? (re)
1096 1096 "end": "\s*[\d\.]{4,5}", ? (re)
1097 1097 "result": "skip", ? (re)
1098 1098 "start": "\s*[\d\.]{4,5}", ? (re)
1099 1099 "time": "\s*[\d\.]{4,5}" (re)
1100 1100 }, ? (re)
1101 1101 "test-success.t": [\{] (re)
1102 1102 "csys": "\s*[\d\.]{4,5}", ? (re)
1103 1103 "cuser": "\s*[\d\.]{4,5}", ? (re)
1104 1104 "diff": "", ? (re)
1105 1105 "end": "\s*[\d\.]{4,5}", ? (re)
1106 1106 "result": "success", ? (re)
1107 1107 "start": "\s*[\d\.]{4,5}", ? (re)
1108 1108 "time": "\s*[\d\.]{4,5}" (re)
1109 1109 }
1110 1110 } (no-eol)
1111 1111 $ ls -a output
1112 1112 .
1113 1113 ..
1114 1114 .testtimes
1115 1115 report.json
1116 1116 test-failure.t.err
1117 1117
1118 1118 Test that failed test accepted through interactive are properly reported:
1119 1119
1120 1120 $ cp test-failure.t backup
1121 1121 $ echo y | rt --json -i
1122 1122
1123 1123 --- $TESTTMP/test-failure.t
1124 1124 +++ $TESTTMP/test-failure.t.err
1125 1125 @@ -1,5 +1,5 @@
1126 1126 $ echo babar
1127 1127 - rataxes
1128 1128 + babar
1129 1129 This is a noop statement so that
1130 1130 this test is still more bytes than success.
1131 1131 pad pad pad pad............................................................
1132 1132 Accept this change? [n] ..s
1133 1133 Skipped test-skip.t: missing feature: nail clipper
1134 1134 # Ran 2 tests, 1 skipped, 0 failed.
1135 1135
1136 1136 $ cat report.json
1137 1137 testreport ={
1138 1138 "test-failure.t": [\{] (re)
1139 1139 "csys": "\s*[\d\.]{4,5}", ? (re)
1140 1140 "cuser": "\s*[\d\.]{4,5}", ? (re)
1141 1141 "diff": "", ? (re)
1142 1142 "end": "\s*[\d\.]{4,5}", ? (re)
1143 1143 "result": "success", ? (re)
1144 1144 "start": "\s*[\d\.]{4,5}", ? (re)
1145 1145 "time": "\s*[\d\.]{4,5}" (re)
1146 1146 }, ? (re)
1147 1147 "test-skip.t": {
1148 1148 "csys": "\s*[\d\.]{4,5}", ? (re)
1149 1149 "cuser": "\s*[\d\.]{4,5}", ? (re)
1150 1150 "diff": "", ? (re)
1151 1151 "end": "\s*[\d\.]{4,5}", ? (re)
1152 1152 "result": "skip", ? (re)
1153 1153 "start": "\s*[\d\.]{4,5}", ? (re)
1154 1154 "time": "\s*[\d\.]{4,5}" (re)
1155 1155 }, ? (re)
1156 1156 "test-success.t": [\{] (re)
1157 1157 "csys": "\s*[\d\.]{4,5}", ? (re)
1158 1158 "cuser": "\s*[\d\.]{4,5}", ? (re)
1159 1159 "diff": "", ? (re)
1160 1160 "end": "\s*[\d\.]{4,5}", ? (re)
1161 1161 "result": "success", ? (re)
1162 1162 "start": "\s*[\d\.]{4,5}", ? (re)
1163 1163 "time": "\s*[\d\.]{4,5}" (re)
1164 1164 }
1165 1165 } (no-eol)
1166 1166 $ mv backup test-failure.t
1167 1167
1168 1168 backslash on end of line with glob matching is handled properly
1169 1169
1170 1170 $ cat > test-glob-backslash.t << EOF
1171 1171 > $ echo 'foo bar \\'
1172 1172 > foo * \ (glob)
1173 1173 > EOF
1174 1174
1175 1175 $ rt test-glob-backslash.t
1176 1176 .
1177 1177 # Ran 1 tests, 0 skipped, 0 failed.
1178 1178
1179 1179 $ rm -f test-glob-backslash.t
1180 1180
1181 1181 Test globbing of local IP addresses
1182 1182 $ echo 172.16.18.1
1183 1183 $LOCALIP (glob)
1184 1184 $ echo dead:beef::1
1185 1185 $LOCALIP (glob)
1186 1186
1187 1187 Test reusability for third party tools
1188 1188 ======================================
1189 1189
1190 1190 $ mkdir "$TESTTMP"/anothertests
1191 1191 $ cd "$TESTTMP"/anothertests
1192 1192
1193 1193 test that `run-tests.py` can execute hghave, even if it runs not in
1194 1194 Mercurial source tree.
1195 1195
1196 1196 $ cat > test-hghave.t <<EOF
1197 1197 > #require true
1198 1198 > $ echo foo
1199 1199 > foo
1200 1200 > EOF
1201 1201 $ rt test-hghave.t
1202 1202 .
1203 1203 # Ran 1 tests, 0 skipped, 0 failed.
1204 1204
1205 1205 test that RUNTESTDIR refers the directory, in which `run-tests.py` now
1206 1206 running is placed.
1207 1207
1208 1208 $ cat > test-runtestdir.t <<EOF
1209 1209 > - $TESTDIR, in which test-run-tests.t is placed
1210 1210 > - \$TESTDIR, in which test-runtestdir.t is placed (expanded at runtime)
1211 1211 > - \$RUNTESTDIR, in which run-tests.py is placed (expanded at runtime)
1212 1212 >
1213 1213 > #if windows
1214 1214 > $ test "\$TESTDIR" = "$TESTTMP\anothertests"
1215 1215 > #else
1216 1216 > $ test "\$TESTDIR" = "$TESTTMP"/anothertests
1217 1217 > #endif
1218 1218 > If this prints a path, that means RUNTESTDIR didn't equal
1219 1219 > TESTDIR as it should have.
1220 1220 > $ test "\$RUNTESTDIR" = "$TESTDIR" || echo "\$RUNTESTDIR"
1221 1221 > This should print the start of check-code. If this passes but the
1222 1222 > previous check failed, that means we found a copy of check-code at whatever
1223 1223 > RUNTESTSDIR ended up containing, even though it doesn't match TESTDIR.
1224 1224 > $ head -n 3 "\$RUNTESTDIR"/../contrib/check-code.py | sed 's@.!.*python@#!USRBINENVPY@'
1225 1225 > #!USRBINENVPY
1226 1226 > #
1227 1227 > # check-code - a style and portability checker for Mercurial
1228 1228 > EOF
1229 1229 $ rt test-runtestdir.t
1230 1230 .
1231 1231 # Ran 1 tests, 0 skipped, 0 failed.
1232 1232
1233 1233 #if execbit
1234 1234
1235 1235 test that TESTDIR is referred in PATH
1236 1236
1237 1237 $ cat > custom-command.sh <<EOF
1238 1238 > #!/bin/sh
1239 1239 > echo "hello world"
1240 1240 > EOF
1241 1241 $ chmod +x custom-command.sh
1242 1242 $ cat > test-testdir-path.t <<EOF
1243 1243 > $ custom-command.sh
1244 1244 > hello world
1245 1245 > EOF
1246 1246 $ rt test-testdir-path.t
1247 1247 .
1248 1248 # Ran 1 tests, 0 skipped, 0 failed.
1249 1249
1250 1250 #endif
1251 1251
1252 1252 test support for --allow-slow-tests
1253 1253 $ cat > test-very-slow-test.t <<EOF
1254 1254 > #require slow
1255 1255 > $ echo pass
1256 1256 > pass
1257 1257 > EOF
1258 1258 $ rt test-very-slow-test.t
1259 1259 s
1260 1260 Skipped test-very-slow-test.t: missing feature: allow slow tests (use --allow-slow-tests)
1261 1261 # Ran 0 tests, 1 skipped, 0 failed.
1262 1262 $ rt $HGTEST_RUN_TESTS_PURE --allow-slow-tests test-very-slow-test.t
1263 1263 .
1264 1264 # Ran 1 tests, 0 skipped, 0 failed.
1265 1265
1266 1266 support for running a test outside the current directory
1267 1267 $ mkdir nonlocal
1268 1268 $ cat > nonlocal/test-is-not-here.t << EOF
1269 1269 > $ echo pass
1270 1270 > pass
1271 1271 > EOF
1272 1272 $ rt nonlocal/test-is-not-here.t
1273 1273 .
1274 1274 # Ran 1 tests, 0 skipped, 0 failed.
1275 1275
1276 1276 support for automatically discovering test if arg is a folder
1277 1277 $ mkdir tmp && cd tmp
1278 1278
1279 1279 $ cat > test-uno.t << EOF
1280 1280 > $ echo line
1281 1281 > line
1282 1282 > EOF
1283 1283
1284 1284 $ cp test-uno.t test-dos.t
1285 1285 $ cd ..
1286 1286 $ cp -R tmp tmpp
1287 1287 $ cp tmp/test-uno.t test-solo.t
1288 1288
1289 1289 $ rt tmp/ test-solo.t tmpp
1290 1290 .....
1291 1291 # Ran 5 tests, 0 skipped, 0 failed.
1292 1292 $ rm -rf tmp tmpp
1293 1293
1294 1294 support for running run-tests.py from another directory
1295 1295 $ mkdir tmp && cd tmp
1296 1296
1297 1297 $ cat > useful-file.sh << EOF
1298 1298 > important command
1299 1299 > EOF
1300 1300
1301 1301 $ cat > test-folder.t << EOF
1302 1302 > $ cat \$TESTDIR/useful-file.sh
1303 1303 > important command
1304 1304 > EOF
1305 1305
1306 1306 $ cat > test-folder-fail.t << EOF
1307 1307 > $ cat \$TESTDIR/useful-file.sh
1308 1308 > important commando
1309 1309 > EOF
1310 1310
1311 1311 $ cd ..
1312 1312 $ rt tmp/test-*.t
1313 1313
1314 1314 --- $TESTTMP/anothertests/tmp/test-folder-fail.t
1315 1315 +++ $TESTTMP/anothertests/tmp/test-folder-fail.t.err
1316 1316 @@ -1,2 +1,2 @@
1317 1317 $ cat $TESTDIR/useful-file.sh
1318 1318 - important commando
1319 1319 + important command
1320 1320
1321 1321 ERROR: test-folder-fail.t output changed
1322 1322 !.
1323 1323 Failed test-folder-fail.t: output changed
1324 1324 # Ran 2 tests, 0 skipped, 1 failed.
1325 1325 python hash seed: * (glob)
1326 1326 [1]
1327 1327
1328 1328 support for bisecting failed tests automatically
1329 1329 $ hg init bisect
1330 1330 $ cd bisect
1331 1331 $ cat >> test-bisect.t <<EOF
1332 1332 > $ echo pass
1333 1333 > pass
1334 1334 > EOF
1335 1335 $ hg add test-bisect.t
1336 1336 $ hg ci -m 'good'
1337 1337 $ cat >> test-bisect.t <<EOF
1338 1338 > $ echo pass
1339 1339 > fail
1340 1340 > EOF
1341 1341 $ hg ci -m 'bad'
1342 1342 $ rt --known-good-rev=0 test-bisect.t
1343 1343
1344 1344 --- $TESTTMP/anothertests/bisect/test-bisect.t
1345 1345 +++ $TESTTMP/anothertests/bisect/test-bisect.t.err
1346 1346 @@ -1,4 +1,4 @@
1347 1347 $ echo pass
1348 1348 pass
1349 1349 $ echo pass
1350 1350 - fail
1351 1351 + pass
1352 1352
1353 1353 ERROR: test-bisect.t output changed
1354 1354 !
1355 1355 Failed test-bisect.t: output changed
1356 1356 test-bisect.t broken by 72cbf122d116 (bad)
1357 1357 # Ran 1 tests, 0 skipped, 1 failed.
1358 1358 python hash seed: * (glob)
1359 1359 [1]
1360 1360
1361 1361 $ cd ..
1362 1362
1363 1363 support bisecting a separate repo
1364 1364
1365 1365 $ hg init bisect-dependent
1366 1366 $ cd bisect-dependent
1367 1367 $ cat > test-bisect-dependent.t <<EOF
1368 1368 > $ tail -1 \$TESTDIR/../bisect/test-bisect.t
1369 1369 > pass
1370 1370 > EOF
1371 1371 $ hg commit -Am dependent test-bisect-dependent.t
1372 1372
1373 1373 $ rt --known-good-rev=0 test-bisect-dependent.t
1374 1374
1375 1375 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1376 1376 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1377 1377 @@ -1,2 +1,2 @@
1378 1378 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1379 1379 - pass
1380 1380 + fail
1381 1381
1382 1382 ERROR: test-bisect-dependent.t output changed
1383 1383 !
1384 1384 Failed test-bisect-dependent.t: output changed
1385 1385 Failed to identify failure point for test-bisect-dependent.t
1386 1386 # Ran 1 tests, 0 skipped, 1 failed.
1387 1387 python hash seed: * (glob)
1388 1388 [1]
1389 1389
1390 1390 $ rt --bisect-repo=../test-bisect test-bisect-dependent.t
1391 1391 usage: run-tests.py [options] [tests]
1392 1392 run-tests.py: error: --bisect-repo cannot be used without --known-good-rev
1393 1393 [2]
1394 1394
1395 1395 $ rt --known-good-rev=0 --bisect-repo=../bisect test-bisect-dependent.t
1396 1396
1397 1397 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1398 1398 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1399 1399 @@ -1,2 +1,2 @@
1400 1400 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1401 1401 - pass
1402 1402 + fail
1403 1403
1404 1404 ERROR: test-bisect-dependent.t output changed
1405 1405 !
1406 1406 Failed test-bisect-dependent.t: output changed
1407 1407 test-bisect-dependent.t broken by 72cbf122d116 (bad)
1408 1408 # Ran 1 tests, 0 skipped, 1 failed.
1409 1409 python hash seed: * (glob)
1410 1410 [1]
1411 1411
1412 1412 $ cd ..
1413 1413
1414 1414 Test a broken #if statement doesn't break run-tests threading.
1415 1415 ==============================================================
1416 1416 $ mkdir broken
1417 1417 $ cd broken
1418 1418 $ cat > test-broken.t <<EOF
1419 1419 > true
1420 1420 > #if notarealhghavefeature
1421 1421 > $ false
1422 1422 > #endif
1423 1423 > EOF
1424 1424 $ for f in 1 2 3 4 ; do
1425 1425 > cat > test-works-$f.t <<EOF
1426 1426 > This is test case $f
1427 1427 > $ sleep 1
1428 1428 > EOF
1429 1429 > done
1430 1430 $ rt -j 2
1431 1431 ....
1432 1432 # Ran 5 tests, 0 skipped, 0 failed.
1433 1433 skipped: unknown feature: notarealhghavefeature
1434 1434
1435 1435 $ cd ..
1436 1436 $ rm -rf broken
1437 1437
1438 1438 Test cases in .t files
1439 1439 ======================
1440 1440 $ mkdir cases
1441 1441 $ cd cases
1442 1442 $ cat > test-cases-abc.t <<'EOF'
1443 1443 > #testcases A B C
1444 1444 > $ V=B
1445 1445 > #if A
1446 1446 > $ V=A
1447 1447 > #endif
1448 1448 > #if C
1449 1449 > $ V=C
1450 1450 > #endif
1451 1451 > $ echo $V | sed 's/A/C/'
1452 1452 > C
1453 1453 > #if C
1454 1454 > $ [ $V = C ]
1455 1455 > #endif
1456 1456 > #if A
1457 1457 > $ [ $V = C ]
1458 1458 > [1]
1459 1459 > #endif
1460 1460 > #if no-C
1461 1461 > $ [ $V = C ]
1462 1462 > [1]
1463 1463 > #endif
1464 1464 > $ [ $V = D ]
1465 1465 > [1]
1466 1466 > EOF
1467 1467 $ rt
1468 1468 .
1469 1469 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1470 1470 +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
1471 1471 @@ -7,7 +7,7 @@
1472 1472 $ V=C
1473 1473 #endif
1474 1474 $ echo $V | sed 's/A/C/'
1475 1475 - C
1476 1476 + B
1477 1477 #if C
1478 1478 $ [ $V = C ]
1479 1479 #endif
1480 1480
1481 1481 ERROR: test-cases-abc.t (case B) output changed
1482 1482 !.
1483 1483 Failed test-cases-abc.t (case B): output changed
1484 1484 # Ran 3 tests, 0 skipped, 1 failed.
1485 1485 python hash seed: * (glob)
1486 1486 [1]
1487 1487
1488 1488 --restart works
1489 1489
1490 1490 $ rt --restart
1491 1491
1492 1492 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1493 1493 +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
1494 1494 @@ -7,7 +7,7 @@
1495 1495 $ V=C
1496 1496 #endif
1497 1497 $ echo $V | sed 's/A/C/'
1498 1498 - C
1499 1499 + B
1500 1500 #if C
1501 1501 $ [ $V = C ]
1502 1502 #endif
1503 1503
1504 1504 ERROR: test-cases-abc.t (case B) output changed
1505 1505 !.
1506 1506 Failed test-cases-abc.t (case B): output changed
1507 1507 # Ran 2 tests, 0 skipped, 1 failed.
1508 1508 python hash seed: * (glob)
1509 1509 [1]
1510 1510
1511 1511 --restart works with outputdir
1512 1512
1513 1513 $ mkdir output
1514 1514 $ mv test-cases-abc.t.B.err output
1515 1515 $ rt --restart --outputdir output
1516 1516
1517 1517 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1518 1518 +++ $TESTTMP/anothertests/cases/output/test-cases-abc.t.B.err
1519 1519 @@ -7,7 +7,7 @@
1520 1520 $ V=C
1521 1521 #endif
1522 1522 $ echo $V | sed 's/A/C/'
1523 1523 - C
1524 1524 + B
1525 1525 #if C
1526 1526 $ [ $V = C ]
1527 1527 #endif
1528 1528
1529 1529 ERROR: test-cases-abc.t (case B) output changed
1530 1530 !.
1531 1531 Failed test-cases-abc.t (case B): output changed
1532 1532 # Ran 2 tests, 0 skipped, 1 failed.
1533 1533 python hash seed: * (glob)
1534 1534 [1]
1535 1535
1536 Test TESTCASE variable
1537
1538 $ cat > test-cases-ab.t <<'EOF'
1539 > $ dostuff() {
1540 > > echo "In case $TESTCASE"
1541 > > }
1542 > #testcases A B
1543 > #if A
1544 > $ dostuff
1545 > In case A
1546 > #endif
1547 > #if B
1548 > $ dostuff
1549 > In case B
1550 > #endif
1551 > EOF
1552 $ rt test-cases-ab.t
1553 ..
1554 # Ran 2 tests, 0 skipped, 0 failed.
1555
1536 1556 Test automatic pattern replacement
1537 1557
1538 1558 $ cat << EOF >> common-pattern.py
1539 1559 > substitutions = [
1540 1560 > (br'foo-(.*)\\b',
1541 1561 > br'\$XXX=\\1\$'),
1542 1562 > (br'bar\\n',
1543 1563 > br'\$YYY$\\n'),
1544 1564 > ]
1545 1565 > EOF
1546 1566
1547 1567 $ cat << EOF >> test-substitution.t
1548 1568 > $ echo foo-12
1549 1569 > \$XXX=12$
1550 1570 > $ echo foo-42
1551 1571 > \$XXX=42$
1552 1572 > $ echo bar prior
1553 1573 > bar prior
1554 1574 > $ echo lastbar
1555 1575 > last\$YYY$
1556 1576 > $ echo foo-bar foo-baz
1557 1577 > EOF
1558 1578
1559 1579 $ rt test-substitution.t
1560 1580
1561 1581 --- $TESTTMP/anothertests/cases/test-substitution.t
1562 1582 +++ $TESTTMP/anothertests/cases/test-substitution.t.err
1563 1583 @@ -7,3 +7,4 @@
1564 1584 $ echo lastbar
1565 1585 last$YYY$
1566 1586 $ echo foo-bar foo-baz
1567 1587 + $XXX=bar foo-baz$
1568 1588
1569 1589 ERROR: test-substitution.t output changed
1570 1590 !
1571 1591 Failed test-substitution.t: output changed
1572 1592 # Ran 1 tests, 0 skipped, 1 failed.
1573 1593 python hash seed: * (glob)
1574 1594 [1]
1575 1595
1576 1596 --extra-config-opt works
1577 1597
1578 1598 $ cat << EOF >> test-config-opt.t
1579 1599 > $ hg init test-config-opt
1580 1600 > $ hg -R test-config-opt purge
1581 1601 > EOF
1582 1602
1583 1603 $ rt --extra-config-opt extensions.purge= test-config-opt.t
1584 1604 .
1585 1605 # Ran 1 tests, 0 skipped, 0 failed.
General Comments 0
You need to be logged in to leave comments. Login now