##// END OF EJS Templates
run-tests: resume raising an exception when a server fails to start...
Matt Harbison -
r36479:51a9f024 default
parent child Browse files
Show More
@@ -1,3087 +1,3087 b''
1 1 #!/usr/bin/env python
2 2 #
3 3 # run-tests.py - Run a set of tests on Mercurial
4 4 #
5 5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 # Modifying this script is tricky because it has many modes:
11 11 # - serial (default) vs parallel (-jN, N > 1)
12 12 # - no coverage (default) vs coverage (-c, -C, -s)
13 13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 14 # - tests are a mix of shell scripts and Python scripts
15 15 #
16 16 # If you change this script, it is recommended that you ensure you
17 17 # haven't broken it by running it in various modes with a representative
18 18 # sample of test scripts. For example:
19 19 #
20 20 # 1) serial, no coverage, temp install:
21 21 # ./run-tests.py test-s*
22 22 # 2) serial, no coverage, local hg:
23 23 # ./run-tests.py --local test-s*
24 24 # 3) serial, coverage, temp install:
25 25 # ./run-tests.py -c test-s*
26 26 # 4) serial, coverage, local hg:
27 27 # ./run-tests.py -c --local test-s* # unsupported
28 28 # 5) parallel, no coverage, temp install:
29 29 # ./run-tests.py -j2 test-s*
30 30 # 6) parallel, no coverage, local hg:
31 31 # ./run-tests.py -j2 --local test-s*
32 32 # 7) parallel, coverage, temp install:
33 33 # ./run-tests.py -j2 -c test-s* # currently broken
34 34 # 8) parallel, coverage, local install:
35 35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 36 # 9) parallel, custom tmp dir:
37 37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 38 # 10) parallel, pure, tests that call run-tests:
39 39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
40 40 #
41 41 # (You could use any subset of the tests: test-s* happens to match
42 42 # enough that it's worth doing parallel runs, few enough that it
43 43 # completes fairly quickly, includes both shell and Python scripts, and
44 44 # includes some scripts that run daemon processes.)
45 45
46 46 from __future__ import absolute_import, print_function
47 47
48 48 import argparse
49 49 import collections
50 50 import difflib
51 51 import distutils.version as version
52 52 import errno
53 53 import json
54 54 import os
55 55 import random
56 56 import re
57 57 import shutil
58 58 import signal
59 59 import socket
60 60 import subprocess
61 61 import sys
62 62 import sysconfig
63 63 import tempfile
64 64 import threading
65 65 import time
66 66 import unittest
67 67 import xml.dom.minidom as minidom
68 68
69 69 try:
70 70 import Queue as queue
71 71 except ImportError:
72 72 import queue
73 73
74 74 try:
75 75 import shlex
76 76 shellquote = shlex.quote
77 77 except (ImportError, AttributeError):
78 78 import pipes
79 79 shellquote = pipes.quote
80 80
81 81 if os.environ.get('RTUNICODEPEDANTRY', False):
82 82 try:
83 83 reload(sys)
84 84 sys.setdefaultencoding("undefined")
85 85 except NameError:
86 86 pass
87 87
88 88 origenviron = os.environ.copy()
89 89 osenvironb = getattr(os, 'environb', os.environ)
90 90 processlock = threading.Lock()
91 91
92 92 pygmentspresent = False
93 93 # ANSI color is unsupported prior to Windows 10
94 94 if os.name != 'nt':
95 95 try: # is pygments installed
96 96 import pygments
97 97 import pygments.lexers as lexers
98 98 import pygments.lexer as lexer
99 99 import pygments.formatters as formatters
100 100 import pygments.token as token
101 101 import pygments.style as style
102 102 pygmentspresent = True
103 103 difflexer = lexers.DiffLexer()
104 104 terminal256formatter = formatters.Terminal256Formatter()
105 105 except ImportError:
106 106 pass
107 107
108 108 if pygmentspresent:
109 109 class TestRunnerStyle(style.Style):
110 110 default_style = ""
111 111 skipped = token.string_to_tokentype("Token.Generic.Skipped")
112 112 failed = token.string_to_tokentype("Token.Generic.Failed")
113 113 skippedname = token.string_to_tokentype("Token.Generic.SName")
114 114 failedname = token.string_to_tokentype("Token.Generic.FName")
115 115 styles = {
116 116 skipped: '#e5e5e5',
117 117 skippedname: '#00ffff',
118 118 failed: '#7f0000',
119 119 failedname: '#ff0000',
120 120 }
121 121
122 122 class TestRunnerLexer(lexer.RegexLexer):
123 123 testpattern = r'[\w-]+\.(t|py)( \(case [\w-]+\))?'
124 124 tokens = {
125 125 'root': [
126 126 (r'^Skipped', token.Generic.Skipped, 'skipped'),
127 127 (r'^Failed ', token.Generic.Failed, 'failed'),
128 128 (r'^ERROR: ', token.Generic.Failed, 'failed'),
129 129 ],
130 130 'skipped': [
131 131 (testpattern, token.Generic.SName),
132 132 (r':.*', token.Generic.Skipped),
133 133 ],
134 134 'failed': [
135 135 (testpattern, token.Generic.FName),
136 136 (r'(:| ).*', token.Generic.Failed),
137 137 ]
138 138 }
139 139
140 140 runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle)
141 141 runnerlexer = TestRunnerLexer()
142 142
143 143 if sys.version_info > (3, 5, 0):
144 144 PYTHON3 = True
145 145 xrange = range # we use xrange in one place, and we'd rather not use range
146 146 def _bytespath(p):
147 147 if p is None:
148 148 return p
149 149 return p.encode('utf-8')
150 150
151 151 def _strpath(p):
152 152 if p is None:
153 153 return p
154 154 return p.decode('utf-8')
155 155
156 156 elif sys.version_info >= (3, 0, 0):
157 157 print('%s is only supported on Python 3.5+ and 2.7, not %s' %
158 158 (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])))
159 159 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
160 160 else:
161 161 PYTHON3 = False
162 162
163 163 # In python 2.x, path operations are generally done using
164 164 # bytestrings by default, so we don't have to do any extra
165 165 # fiddling there. We define the wrapper functions anyway just to
166 166 # help keep code consistent between platforms.
167 167 def _bytespath(p):
168 168 return p
169 169
170 170 _strpath = _bytespath
171 171
172 172 # For Windows support
173 173 wifexited = getattr(os, "WIFEXITED", lambda x: False)
174 174
175 175 # Whether to use IPv6
176 176 def checksocketfamily(name, port=20058):
177 177 """return true if we can listen on localhost using family=name
178 178
179 179 name should be either 'AF_INET', or 'AF_INET6'.
180 180 port being used is okay - EADDRINUSE is considered as successful.
181 181 """
182 182 family = getattr(socket, name, None)
183 183 if family is None:
184 184 return False
185 185 try:
186 186 s = socket.socket(family, socket.SOCK_STREAM)
187 187 s.bind(('localhost', port))
188 188 s.close()
189 189 return True
190 190 except socket.error as exc:
191 191 if exc.errno == errno.EADDRINUSE:
192 192 return True
193 193 elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
194 194 return False
195 195 else:
196 196 raise
197 197 else:
198 198 return False
199 199
200 200 # useipv6 will be set by parseargs
201 201 useipv6 = None
202 202
203 203 def checkportisavailable(port):
204 204 """return true if a port seems free to bind on localhost"""
205 205 if useipv6:
206 206 family = socket.AF_INET6
207 207 else:
208 208 family = socket.AF_INET
209 209 try:
210 210 s = socket.socket(family, socket.SOCK_STREAM)
211 211 s.bind(('localhost', port))
212 212 s.close()
213 213 return True
214 214 except socket.error as exc:
215 215 if exc.errno not in (errno.EADDRINUSE, errno.EADDRNOTAVAIL,
216 216 errno.EPROTONOSUPPORT):
217 217 raise
218 218 return False
219 219
220 220 closefds = os.name == 'posix'
221 221 def Popen4(cmd, wd, timeout, env=None):
222 222 processlock.acquire()
223 223 p = subprocess.Popen(cmd, shell=True, bufsize=-1, cwd=wd, env=env,
224 224 close_fds=closefds,
225 225 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
226 226 stderr=subprocess.STDOUT)
227 227 processlock.release()
228 228
229 229 p.fromchild = p.stdout
230 230 p.tochild = p.stdin
231 231 p.childerr = p.stderr
232 232
233 233 p.timeout = False
234 234 if timeout:
235 235 def t():
236 236 start = time.time()
237 237 while time.time() - start < timeout and p.returncode is None:
238 238 time.sleep(.1)
239 239 p.timeout = True
240 240 if p.returncode is None:
241 241 terminate(p)
242 242 threading.Thread(target=t).start()
243 243
244 244 return p
245 245
246 246 PYTHON = _bytespath(sys.executable.replace('\\', '/'))
247 247 IMPL_PATH = b'PYTHONPATH'
248 248 if 'java' in sys.platform:
249 249 IMPL_PATH = b'JYTHONPATH'
250 250
251 251 defaults = {
252 252 'jobs': ('HGTEST_JOBS', 1),
253 253 'timeout': ('HGTEST_TIMEOUT', 180),
254 254 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 500),
255 255 'port': ('HGTEST_PORT', 20059),
256 256 'shell': ('HGTEST_SHELL', 'sh'),
257 257 }
258 258
259 259 def canonpath(path):
260 260 return os.path.realpath(os.path.expanduser(path))
261 261
262 262 def parselistfiles(files, listtype, warn=True):
263 263 entries = dict()
264 264 for filename in files:
265 265 try:
266 266 path = os.path.expanduser(os.path.expandvars(filename))
267 267 f = open(path, "rb")
268 268 except IOError as err:
269 269 if err.errno != errno.ENOENT:
270 270 raise
271 271 if warn:
272 272 print("warning: no such %s file: %s" % (listtype, filename))
273 273 continue
274 274
275 275 for line in f.readlines():
276 276 line = line.split(b'#', 1)[0].strip()
277 277 if line:
278 278 entries[line] = filename
279 279
280 280 f.close()
281 281 return entries
282 282
283 283 def parsettestcases(path):
284 284 """read a .t test file, return a set of test case names
285 285
286 286 If path does not exist, return an empty set.
287 287 """
288 288 cases = set()
289 289 try:
290 290 with open(path, 'rb') as f:
291 291 for l in f:
292 292 if l.startswith(b'#testcases '):
293 293 cases.update(l[11:].split())
294 294 except IOError as ex:
295 295 if ex.errno != errno.ENOENT:
296 296 raise
297 297 return cases
298 298
299 299 def getparser():
300 300 """Obtain the OptionParser used by the CLI."""
301 301 parser = argparse.ArgumentParser(usage='%(prog)s [options] [tests]')
302 302
303 303 selection = parser.add_argument_group('Test Selection')
304 304 selection.add_argument('--allow-slow-tests', action='store_true',
305 305 help='allow extremely slow tests')
306 306 selection.add_argument("--blacklist", action="append",
307 307 help="skip tests listed in the specified blacklist file")
308 308 selection.add_argument("--changed",
309 309 help="run tests that are changed in parent rev or working directory")
310 310 selection.add_argument("-k", "--keywords",
311 311 help="run tests matching keywords")
312 312 selection.add_argument("-r", "--retest", action="store_true",
313 313 help = "retest failed tests")
314 314 selection.add_argument("--test-list", action="append",
315 315 help="read tests to run from the specified file")
316 316 selection.add_argument("--whitelist", action="append",
317 317 help="always run tests listed in the specified whitelist file")
318 318 selection.add_argument('tests', metavar='TESTS', nargs='*',
319 319 help='Tests to run')
320 320
321 321 harness = parser.add_argument_group('Test Harness Behavior')
322 322 harness.add_argument('--bisect-repo',
323 323 metavar='bisect_repo',
324 324 help=("Path of a repo to bisect. Use together with "
325 325 "--known-good-rev"))
326 326 harness.add_argument("-d", "--debug", action="store_true",
327 327 help="debug mode: write output of test scripts to console"
328 328 " rather than capturing and diffing it (disables timeout)")
329 329 harness.add_argument("-f", "--first", action="store_true",
330 330 help="exit on the first test failure")
331 331 harness.add_argument("-i", "--interactive", action="store_true",
332 332 help="prompt to accept changed output")
333 333 harness.add_argument("-j", "--jobs", type=int,
334 334 help="number of jobs to run in parallel"
335 335 " (default: $%s or %d)" % defaults['jobs'])
336 336 harness.add_argument("--keep-tmpdir", action="store_true",
337 337 help="keep temporary directory after running tests")
338 338 harness.add_argument('--known-good-rev',
339 339 metavar="known_good_rev",
340 340 help=("Automatically bisect any failures using this "
341 341 "revision as a known-good revision."))
342 342 harness.add_argument("--list-tests", action="store_true",
343 343 help="list tests instead of running them")
344 344 harness.add_argument("--loop", action="store_true",
345 345 help="loop tests repeatedly")
346 346 harness.add_argument('--random', action="store_true",
347 347 help='run tests in random order')
348 348 harness.add_argument("-p", "--port", type=int,
349 349 help="port on which servers should listen"
350 350 " (default: $%s or %d)" % defaults['port'])
351 351 harness.add_argument('--profile-runner', action='store_true',
352 352 help='run statprof on run-tests')
353 353 harness.add_argument("-R", "--restart", action="store_true",
354 354 help="restart at last error")
355 355 harness.add_argument("--runs-per-test", type=int, dest="runs_per_test",
356 356 help="run each test N times (default=1)", default=1)
357 357 harness.add_argument("--shell",
358 358 help="shell to use (default: $%s or %s)" % defaults['shell'])
359 359 harness.add_argument('--showchannels', action='store_true',
360 360 help='show scheduling channels')
361 361 harness.add_argument("--slowtimeout", type=int,
362 362 help="kill errant slow tests after SLOWTIMEOUT seconds"
363 363 " (default: $%s or %d)" % defaults['slowtimeout'])
364 364 harness.add_argument("-t", "--timeout", type=int,
365 365 help="kill errant tests after TIMEOUT seconds"
366 366 " (default: $%s or %d)" % defaults['timeout'])
367 367 harness.add_argument("--tmpdir",
368 368 help="run tests in the given temporary directory"
369 369 " (implies --keep-tmpdir)")
370 370 harness.add_argument("-v", "--verbose", action="store_true",
371 371 help="output verbose messages")
372 372
373 373 hgconf = parser.add_argument_group('Mercurial Configuration')
374 374 hgconf.add_argument("--chg", action="store_true",
375 375 help="install and use chg wrapper in place of hg")
376 376 hgconf.add_argument("--compiler",
377 377 help="compiler to build with")
378 378 hgconf.add_argument('--extra-config-opt', action="append", default=[],
379 379 help='set the given config opt in the test hgrc')
380 380 hgconf.add_argument("-l", "--local", action="store_true",
381 381 help="shortcut for --with-hg=<testdir>/../hg, "
382 382 "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set")
383 383 hgconf.add_argument("--ipv6", action="store_true",
384 384 help="prefer IPv6 to IPv4 for network related tests")
385 385 hgconf.add_argument("--pure", action="store_true",
386 386 help="use pure Python code instead of C extensions")
387 387 hgconf.add_argument("-3", "--py3k-warnings", action="store_true",
388 388 help="enable Py3k warnings on Python 2.7+")
389 389 hgconf.add_argument("--with-chg", metavar="CHG",
390 390 help="use specified chg wrapper in place of hg")
391 391 hgconf.add_argument("--with-hg",
392 392 metavar="HG",
393 393 help="test using specified hg script rather than a "
394 394 "temporary installation")
395 395 # This option should be deleted once test-check-py3-compat.t and other
396 396 # Python 3 tests run with Python 3.
397 397 hgconf.add_argument("--with-python3", metavar="PYTHON3",
398 398 help="Python 3 interpreter (if running under Python 2)"
399 399 " (TEMPORARY)")
400 400
401 401 reporting = parser.add_argument_group('Results Reporting')
402 402 reporting.add_argument("-C", "--annotate", action="store_true",
403 403 help="output files annotated with coverage")
404 404 reporting.add_argument("--color", choices=["always", "auto", "never"],
405 405 default=os.environ.get('HGRUNTESTSCOLOR', 'auto'),
406 406 help="colorisation: always|auto|never (default: auto)")
407 407 reporting.add_argument("-c", "--cover", action="store_true",
408 408 help="print a test coverage report")
409 409 reporting.add_argument('--exceptions', action='store_true',
410 410 help='log all exceptions and generate an exception report')
411 411 reporting.add_argument("-H", "--htmlcov", action="store_true",
412 412 help="create an HTML report of the coverage of the files")
413 413 reporting.add_argument("--json", action="store_true",
414 414 help="store test result data in 'report.json' file")
415 415 reporting.add_argument("--outputdir",
416 416 help="directory to write error logs to (default=test directory)")
417 417 reporting.add_argument("-n", "--nodiff", action="store_true",
418 418 help="skip showing test changes")
419 419 reporting.add_argument("-S", "--noskips", action="store_true",
420 420 help="don't report skip tests verbosely")
421 421 reporting.add_argument("--time", action="store_true",
422 422 help="time how long each test takes")
423 423 reporting.add_argument("--view",
424 424 help="external diff viewer")
425 425 reporting.add_argument("--xunit",
426 426 help="record xunit results at specified path")
427 427
428 428 for option, (envvar, default) in defaults.items():
429 429 defaults[option] = type(default)(os.environ.get(envvar, default))
430 430 parser.set_defaults(**defaults)
431 431
432 432 return parser
433 433
434 434 def parseargs(args, parser):
435 435 """Parse arguments with our OptionParser and validate results."""
436 436 options = parser.parse_args(args)
437 437
438 438 # jython is always pure
439 439 if 'java' in sys.platform or '__pypy__' in sys.modules:
440 440 options.pure = True
441 441
442 442 if options.with_hg:
443 443 options.with_hg = canonpath(_bytespath(options.with_hg))
444 444 if not (os.path.isfile(options.with_hg) and
445 445 os.access(options.with_hg, os.X_OK)):
446 446 parser.error('--with-hg must specify an executable hg script')
447 447 if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
448 448 sys.stderr.write('warning: --with-hg should specify an hg script\n')
449 449 if options.local:
450 450 testdir = os.path.dirname(_bytespath(canonpath(sys.argv[0])))
451 451 reporootdir = os.path.dirname(testdir)
452 452 pathandattrs = [(b'hg', 'with_hg')]
453 453 if options.chg:
454 454 pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
455 455 for relpath, attr in pathandattrs:
456 456 binpath = os.path.join(reporootdir, relpath)
457 457 if os.name != 'nt' and not os.access(binpath, os.X_OK):
458 458 parser.error('--local specified, but %r not found or '
459 459 'not executable' % binpath)
460 460 setattr(options, attr, binpath)
461 461
462 462 if (options.chg or options.with_chg) and os.name == 'nt':
463 463 parser.error('chg does not work on %s' % os.name)
464 464 if options.with_chg:
465 465 options.chg = False # no installation to temporary location
466 466 options.with_chg = canonpath(_bytespath(options.with_chg))
467 467 if not (os.path.isfile(options.with_chg) and
468 468 os.access(options.with_chg, os.X_OK)):
469 469 parser.error('--with-chg must specify a chg executable')
470 470 if options.chg and options.with_hg:
471 471 # chg shares installation location with hg
472 472 parser.error('--chg does not work when --with-hg is specified '
473 473 '(use --with-chg instead)')
474 474
475 475 if options.color == 'always' and not pygmentspresent:
476 476 sys.stderr.write('warning: --color=always ignored because '
477 477 'pygments is not installed\n')
478 478
479 479 if options.bisect_repo and not options.known_good_rev:
480 480 parser.error("--bisect-repo cannot be used without --known-good-rev")
481 481
482 482 global useipv6
483 483 if options.ipv6:
484 484 useipv6 = checksocketfamily('AF_INET6')
485 485 else:
486 486 # only use IPv6 if IPv4 is unavailable and IPv6 is available
487 487 useipv6 = ((not checksocketfamily('AF_INET'))
488 488 and checksocketfamily('AF_INET6'))
489 489
490 490 options.anycoverage = options.cover or options.annotate or options.htmlcov
491 491 if options.anycoverage:
492 492 try:
493 493 import coverage
494 494 covver = version.StrictVersion(coverage.__version__).version
495 495 if covver < (3, 3):
496 496 parser.error('coverage options require coverage 3.3 or later')
497 497 except ImportError:
498 498 parser.error('coverage options now require the coverage package')
499 499
500 500 if options.anycoverage and options.local:
501 501 # this needs some path mangling somewhere, I guess
502 502 parser.error("sorry, coverage options do not work when --local "
503 503 "is specified")
504 504
505 505 if options.anycoverage and options.with_hg:
506 506 parser.error("sorry, coverage options do not work when --with-hg "
507 507 "is specified")
508 508
509 509 global verbose
510 510 if options.verbose:
511 511 verbose = ''
512 512
513 513 if options.tmpdir:
514 514 options.tmpdir = canonpath(options.tmpdir)
515 515
516 516 if options.jobs < 1:
517 517 parser.error('--jobs must be positive')
518 518 if options.interactive and options.debug:
519 519 parser.error("-i/--interactive and -d/--debug are incompatible")
520 520 if options.debug:
521 521 if options.timeout != defaults['timeout']:
522 522 sys.stderr.write(
523 523 'warning: --timeout option ignored with --debug\n')
524 524 if options.slowtimeout != defaults['slowtimeout']:
525 525 sys.stderr.write(
526 526 'warning: --slowtimeout option ignored with --debug\n')
527 527 options.timeout = 0
528 528 options.slowtimeout = 0
529 529 if options.py3k_warnings:
530 530 if PYTHON3:
531 531 parser.error(
532 532 '--py3k-warnings can only be used on Python 2.7')
533 533 if options.with_python3:
534 534 if PYTHON3:
535 535 parser.error('--with-python3 cannot be used when executing with '
536 536 'Python 3')
537 537
538 538 options.with_python3 = canonpath(options.with_python3)
539 539 # Verify Python3 executable is acceptable.
540 540 proc = subprocess.Popen([options.with_python3, b'--version'],
541 541 stdout=subprocess.PIPE,
542 542 stderr=subprocess.STDOUT)
543 543 out, _err = proc.communicate()
544 544 ret = proc.wait()
545 545 if ret != 0:
546 546 parser.error('could not determine version of python 3')
547 547 if not out.startswith('Python '):
548 548 parser.error('unexpected output from python3 --version: %s' %
549 549 out)
550 550 vers = version.LooseVersion(out[len('Python '):])
551 551 if vers < version.LooseVersion('3.5.0'):
552 552 parser.error('--with-python3 version must be 3.5.0 or greater; '
553 553 'got %s' % out)
554 554
555 555 if options.blacklist:
556 556 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
557 557 if options.whitelist:
558 558 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
559 559 else:
560 560 options.whitelisted = {}
561 561
562 562 if options.showchannels:
563 563 options.nodiff = True
564 564
565 565 return options
566 566
567 567 def rename(src, dst):
568 568 """Like os.rename(), trade atomicity and opened files friendliness
569 569 for existing destination support.
570 570 """
571 571 shutil.copy(src, dst)
572 572 os.remove(src)
573 573
574 574 _unified_diff = difflib.unified_diff
575 575 if PYTHON3:
576 576 import functools
577 577 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
578 578
579 579 def getdiff(expected, output, ref, err):
580 580 servefail = False
581 581 lines = []
582 582 for line in _unified_diff(expected, output, ref, err):
583 583 if line.startswith(b'+++') or line.startswith(b'---'):
584 584 line = line.replace(b'\\', b'/')
585 585 if line.endswith(b' \n'):
586 586 line = line[:-2] + b'\n'
587 587 lines.append(line)
588 588 if not servefail and line.startswith(
589 589 b'+ abort: child process failed to start'):
590 590 servefail = True
591 591
592 592 return servefail, lines
593 593
594 594 verbose = False
595 595 def vlog(*msg):
596 596 """Log only when in verbose mode."""
597 597 if verbose is False:
598 598 return
599 599
600 600 return log(*msg)
601 601
602 602 # Bytes that break XML even in a CDATA block: control characters 0-31
603 603 # sans \t, \n and \r
604 604 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
605 605
606 606 # Match feature conditionalized output lines in the form, capturing the feature
607 607 # list in group 2, and the preceeding line output in group 1:
608 608 #
609 609 # output..output (feature !)\n
610 610 optline = re.compile(b'(.*) \((.+?) !\)\n$')
611 611
612 612 def cdatasafe(data):
613 613 """Make a string safe to include in a CDATA block.
614 614
615 615 Certain control characters are illegal in a CDATA block, and
616 616 there's no way to include a ]]> in a CDATA either. This function
617 617 replaces illegal bytes with ? and adds a space between the ]] so
618 618 that it won't break the CDATA block.
619 619 """
620 620 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
621 621
622 622 def log(*msg):
623 623 """Log something to stdout.
624 624
625 625 Arguments are strings to print.
626 626 """
627 627 with iolock:
628 628 if verbose:
629 629 print(verbose, end=' ')
630 630 for m in msg:
631 631 print(m, end=' ')
632 632 print()
633 633 sys.stdout.flush()
634 634
635 635 def highlightdiff(line, color):
636 636 if not color:
637 637 return line
638 638 assert pygmentspresent
639 639 return pygments.highlight(line.decode('latin1'), difflexer,
640 640 terminal256formatter).encode('latin1')
641 641
642 642 def highlightmsg(msg, color):
643 643 if not color:
644 644 return msg
645 645 assert pygmentspresent
646 646 return pygments.highlight(msg, runnerlexer, runnerformatter)
647 647
648 648 def terminate(proc):
649 649 """Terminate subprocess"""
650 650 vlog('# Terminating process %d' % proc.pid)
651 651 try:
652 652 proc.terminate()
653 653 except OSError:
654 654 pass
655 655
656 656 def killdaemons(pidfile):
657 657 import killdaemons as killmod
658 658 return killmod.killdaemons(pidfile, tryhard=False, remove=True,
659 659 logfn=vlog)
660 660
661 661 class Test(unittest.TestCase):
662 662 """Encapsulates a single, runnable test.
663 663
664 664 While this class conforms to the unittest.TestCase API, it differs in that
665 665 instances need to be instantiated manually. (Typically, unittest.TestCase
666 666 classes are instantiated automatically by scanning modules.)
667 667 """
668 668
669 669 # Status code reserved for skipped tests (used by hghave).
670 670 SKIPPED_STATUS = 80
671 671
672 672 def __init__(self, path, outputdir, tmpdir, keeptmpdir=False,
673 673 debug=False,
674 674 first=False,
675 675 timeout=None,
676 676 startport=None, extraconfigopts=None,
677 677 py3kwarnings=False, shell=None, hgcommand=None,
678 678 slowtimeout=None, usechg=False,
679 679 useipv6=False):
680 680 """Create a test from parameters.
681 681
682 682 path is the full path to the file defining the test.
683 683
684 684 tmpdir is the main temporary directory to use for this test.
685 685
686 686 keeptmpdir determines whether to keep the test's temporary directory
687 687 after execution. It defaults to removal (False).
688 688
689 689 debug mode will make the test execute verbosely, with unfiltered
690 690 output.
691 691
692 692 timeout controls the maximum run time of the test. It is ignored when
693 693 debug is True. See slowtimeout for tests with #require slow.
694 694
695 695 slowtimeout overrides timeout if the test has #require slow.
696 696
697 697 startport controls the starting port number to use for this test. Each
698 698 test will reserve 3 port numbers for execution. It is the caller's
699 699 responsibility to allocate a non-overlapping port range to Test
700 700 instances.
701 701
702 702 extraconfigopts is an iterable of extra hgrc config options. Values
703 703 must have the form "key=value" (something understood by hgrc). Values
704 704 of the form "foo.key=value" will result in "[foo] key=value".
705 705
706 706 py3kwarnings enables Py3k warnings.
707 707
708 708 shell is the shell to execute tests in.
709 709 """
710 710 if timeout is None:
711 711 timeout = defaults['timeout']
712 712 if startport is None:
713 713 startport = defaults['port']
714 714 if slowtimeout is None:
715 715 slowtimeout = defaults['slowtimeout']
716 716 self.path = path
717 717 self.bname = os.path.basename(path)
718 718 self.name = _strpath(self.bname)
719 719 self._testdir = os.path.dirname(path)
720 720 self._outputdir = outputdir
721 721 self._tmpname = os.path.basename(path)
722 722 self.errpath = os.path.join(self._outputdir, b'%s.err' % self.bname)
723 723
724 724 self._threadtmp = tmpdir
725 725 self._keeptmpdir = keeptmpdir
726 726 self._debug = debug
727 727 self._first = first
728 728 self._timeout = timeout
729 729 self._slowtimeout = slowtimeout
730 730 self._startport = startport
731 731 self._extraconfigopts = extraconfigopts or []
732 732 self._py3kwarnings = py3kwarnings
733 733 self._shell = _bytespath(shell)
734 734 self._hgcommand = hgcommand or b'hg'
735 735 self._usechg = usechg
736 736 self._useipv6 = useipv6
737 737
738 738 self._aborted = False
739 739 self._daemonpids = []
740 740 self._finished = None
741 741 self._ret = None
742 742 self._out = None
743 743 self._skipped = None
744 744 self._testtmp = None
745 745 self._chgsockdir = None
746 746
747 747 self._refout = self.readrefout()
748 748
749 749 def readrefout(self):
750 750 """read reference output"""
751 751 # If we're not in --debug mode and reference output file exists,
752 752 # check test output against it.
753 753 if self._debug:
754 754 return None # to match "out is None"
755 755 elif os.path.exists(self.refpath):
756 756 with open(self.refpath, 'rb') as f:
757 757 return f.read().splitlines(True)
758 758 else:
759 759 return []
760 760
761 761 # needed to get base class __repr__ running
762 762 @property
763 763 def _testMethodName(self):
764 764 return self.name
765 765
766 766 def __str__(self):
767 767 return self.name
768 768
769 769 def shortDescription(self):
770 770 return self.name
771 771
772 772 def setUp(self):
773 773 """Tasks to perform before run()."""
774 774 self._finished = False
775 775 self._ret = None
776 776 self._out = None
777 777 self._skipped = None
778 778
779 779 try:
780 780 os.mkdir(self._threadtmp)
781 781 except OSError as e:
782 782 if e.errno != errno.EEXIST:
783 783 raise
784 784
785 785 name = self._tmpname
786 786 self._testtmp = os.path.join(self._threadtmp, name)
787 787 os.mkdir(self._testtmp)
788 788
789 789 # Remove any previous output files.
790 790 if os.path.exists(self.errpath):
791 791 try:
792 792 os.remove(self.errpath)
793 793 except OSError as e:
794 794 # We might have raced another test to clean up a .err
795 795 # file, so ignore ENOENT when removing a previous .err
796 796 # file.
797 797 if e.errno != errno.ENOENT:
798 798 raise
799 799
800 800 if self._usechg:
801 801 self._chgsockdir = os.path.join(self._threadtmp,
802 802 b'%s.chgsock' % name)
803 803 os.mkdir(self._chgsockdir)
804 804
805 805 def run(self, result):
806 806 """Run this test and report results against a TestResult instance."""
807 807 # This function is extremely similar to unittest.TestCase.run(). Once
808 808 # we require Python 2.7 (or at least its version of unittest), this
809 809 # function can largely go away.
810 810 self._result = result
811 811 result.startTest(self)
812 812 try:
813 813 try:
814 814 self.setUp()
815 815 except (KeyboardInterrupt, SystemExit):
816 816 self._aborted = True
817 817 raise
818 818 except Exception:
819 819 result.addError(self, sys.exc_info())
820 820 return
821 821
822 822 success = False
823 823 try:
824 824 self.runTest()
825 825 except KeyboardInterrupt:
826 826 self._aborted = True
827 827 raise
828 828 except unittest.SkipTest as e:
829 829 result.addSkip(self, str(e))
830 830 # The base class will have already counted this as a
831 831 # test we "ran", but we want to exclude skipped tests
832 832 # from those we count towards those run.
833 833 result.testsRun -= 1
834 834 except self.failureException as e:
835 835 # This differs from unittest in that we don't capture
836 836 # the stack trace. This is for historical reasons and
837 837 # this decision could be revisited in the future,
838 838 # especially for PythonTest instances.
839 839 if result.addFailure(self, str(e)):
840 840 success = True
841 841 except Exception:
842 842 result.addError(self, sys.exc_info())
843 843 else:
844 844 success = True
845 845
846 846 try:
847 847 self.tearDown()
848 848 except (KeyboardInterrupt, SystemExit):
849 849 self._aborted = True
850 850 raise
851 851 except Exception:
852 852 result.addError(self, sys.exc_info())
853 853 success = False
854 854
855 855 if success:
856 856 result.addSuccess(self)
857 857 finally:
858 858 result.stopTest(self, interrupted=self._aborted)
859 859
860 860 def runTest(self):
861 861 """Run this test instance.
862 862
863 863 This will return a tuple describing the result of the test.
864 864 """
865 865 env = self._getenv()
866 866 self._genrestoreenv(env)
867 867 self._daemonpids.append(env['DAEMON_PIDS'])
868 868 self._createhgrc(env['HGRCPATH'])
869 869
870 870 vlog('# Test', self.name)
871 871
872 872 ret, out = self._run(env)
873 873 self._finished = True
874 874 self._ret = ret
875 875 self._out = out
876 876
877 877 def describe(ret):
878 878 if ret < 0:
879 879 return 'killed by signal: %d' % -ret
880 880 return 'returned error code %d' % ret
881 881
882 882 self._skipped = False
883 883
884 884 if ret == self.SKIPPED_STATUS:
885 885 if out is None: # Debug mode, nothing to parse.
886 886 missing = ['unknown']
887 887 failed = None
888 888 else:
889 889 missing, failed = TTest.parsehghaveoutput(out)
890 890
891 891 if not missing:
892 892 missing = ['skipped']
893 893
894 894 if failed:
895 895 self.fail('hg have failed checking for %s' % failed[-1])
896 896 else:
897 897 self._skipped = True
898 898 raise unittest.SkipTest(missing[-1])
899 899 elif ret == 'timeout':
900 900 self.fail('timed out')
901 901 elif ret is False:
902 902 self.fail('no result code from test')
903 903 elif out != self._refout:
904 904 # Diff generation may rely on written .err file.
905 905 if (ret != 0 or out != self._refout) and not self._skipped \
906 906 and not self._debug:
907 907 with open(self.errpath, 'wb') as f:
908 908 for line in out:
909 909 f.write(line)
910 910
911 911 # The result object handles diff calculation for us.
912 912 with firstlock:
913 913 if self._result.addOutputMismatch(self, ret, out, self._refout):
914 914 # change was accepted, skip failing
915 915 return
916 916 if self._first:
917 917 global firsterror
918 918 firsterror = True
919 919
920 920 if ret:
921 921 msg = 'output changed and ' + describe(ret)
922 922 else:
923 923 msg = 'output changed'
924 924
925 925 self.fail(msg)
926 926 elif ret:
927 927 self.fail(describe(ret))
928 928
929 929 def tearDown(self):
930 930 """Tasks to perform after run()."""
931 931 for entry in self._daemonpids:
932 932 killdaemons(entry)
933 933 self._daemonpids = []
934 934
935 935 if self._keeptmpdir:
936 936 log('\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' %
937 937 (self._testtmp.decode('utf-8'),
938 938 self._threadtmp.decode('utf-8')))
939 939 else:
940 940 shutil.rmtree(self._testtmp, True)
941 941 shutil.rmtree(self._threadtmp, True)
942 942
943 943 if self._usechg:
944 944 # chgservers will stop automatically after they find the socket
945 945 # files are deleted
946 946 shutil.rmtree(self._chgsockdir, True)
947 947
948 948 if (self._ret != 0 or self._out != self._refout) and not self._skipped \
949 949 and not self._debug and self._out:
950 950 with open(self.errpath, 'wb') as f:
951 951 for line in self._out:
952 952 f.write(line)
953 953
954 954 vlog("# Ret was:", self._ret, '(%s)' % self.name)
955 955
956 956 def _run(self, env):
957 957 # This should be implemented in child classes to run tests.
958 958 raise unittest.SkipTest('unknown test type')
959 959
960 960 def abort(self):
961 961 """Terminate execution of this test."""
962 962 self._aborted = True
963 963
964 964 def _portmap(self, i):
965 965 offset = b'' if i == 0 else b'%d' % i
966 966 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
967 967
968 968 def _getreplacements(self):
969 969 """Obtain a mapping of text replacements to apply to test output.
970 970
971 971 Test output needs to be normalized so it can be compared to expected
972 972 output. This function defines how some of that normalization will
973 973 occur.
974 974 """
975 975 r = [
976 976 # This list should be parallel to defineport in _getenv
977 977 self._portmap(0),
978 978 self._portmap(1),
979 979 self._portmap(2),
980 980 (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
981 981 (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
982 982 ]
983 983 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
984 984
985 985 replacementfile = os.path.join(self._testdir, b'common-pattern.py')
986 986
987 987 if os.path.exists(replacementfile):
988 988 data = {}
989 989 with open(replacementfile, mode='rb') as source:
990 990 # the intermediate 'compile' step help with debugging
991 991 code = compile(source.read(), replacementfile, 'exec')
992 992 exec(code, data)
993 993 for value in data.get('substitutions', ()):
994 994 if len(value) != 2:
995 995 msg = 'malformatted substitution in %s: %r'
996 996 msg %= (replacementfile, value)
997 997 raise ValueError(msg)
998 998 r.append(value)
999 999 return r
1000 1000
1001 1001 def _escapepath(self, p):
1002 1002 if os.name == 'nt':
1003 1003 return (
1004 1004 (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or
1005 1005 c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c
1006 1006 for c in p))
1007 1007 )
1008 1008 else:
1009 1009 return re.escape(p)
1010 1010
1011 1011 def _localip(self):
1012 1012 if self._useipv6:
1013 1013 return b'::1'
1014 1014 else:
1015 1015 return b'127.0.0.1'
1016 1016
1017 1017 def _genrestoreenv(self, testenv):
1018 1018 """Generate a script that can be used by tests to restore the original
1019 1019 environment."""
1020 1020 # Put the restoreenv script inside self._threadtmp
1021 1021 scriptpath = os.path.join(self._threadtmp, b'restoreenv.sh')
1022 1022 testenv['HGTEST_RESTOREENV'] = scriptpath
1023 1023
1024 1024 # Only restore environment variable names that the shell allows
1025 1025 # us to export.
1026 1026 name_regex = re.compile('^[a-zA-Z][a-zA-Z0-9_]*$')
1027 1027
1028 1028 # Do not restore these variables; otherwise tests would fail.
1029 1029 reqnames = {'PYTHON', 'TESTDIR', 'TESTTMP'}
1030 1030
1031 1031 with open(scriptpath, 'w') as envf:
1032 1032 for name, value in origenviron.items():
1033 1033 if not name_regex.match(name):
1034 1034 # Skip environment variables with unusual names not
1035 1035 # allowed by most shells.
1036 1036 continue
1037 1037 if name in reqnames:
1038 1038 continue
1039 1039 envf.write('%s=%s\n' % (name, shellquote(value)))
1040 1040
1041 1041 for name in testenv:
1042 1042 if name in origenviron or name in reqnames:
1043 1043 continue
1044 1044 envf.write('unset %s\n' % (name,))
1045 1045
1046 1046 def _getenv(self):
1047 1047 """Obtain environment variables to use during test execution."""
1048 1048 def defineport(i):
1049 1049 offset = '' if i == 0 else '%s' % i
1050 1050 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
1051 1051 env = os.environ.copy()
1052 1052 env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase') or ''
1053 1053 env['HGEMITWARNINGS'] = '1'
1054 1054 env['TESTTMP'] = self._testtmp
1055 1055 env['TESTNAME'] = self.name
1056 1056 env['HOME'] = self._testtmp
1057 1057 # This number should match portneeded in _getport
1058 1058 for port in xrange(3):
1059 1059 # This list should be parallel to _portmap in _getreplacements
1060 1060 defineport(port)
1061 1061 env["HGRCPATH"] = os.path.join(self._threadtmp, b'.hgrc')
1062 1062 env["DAEMON_PIDS"] = os.path.join(self._threadtmp, b'daemon.pids')
1063 1063 env["HGEDITOR"] = ('"' + sys.executable + '"'
1064 1064 + ' -c "import sys; sys.exit(0)"')
1065 1065 env["HGMERGE"] = "internal:merge"
1066 1066 env["HGUSER"] = "test"
1067 1067 env["HGENCODING"] = "ascii"
1068 1068 env["HGENCODINGMODE"] = "strict"
1069 1069 env['HGIPV6'] = str(int(self._useipv6))
1070 1070
1071 1071 # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
1072 1072 # IP addresses.
1073 1073 env['LOCALIP'] = self._localip()
1074 1074
1075 1075 # Reset some environment variables to well-known values so that
1076 1076 # the tests produce repeatable output.
1077 1077 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
1078 1078 env['TZ'] = 'GMT'
1079 1079 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
1080 1080 env['COLUMNS'] = '80'
1081 1081 env['TERM'] = 'xterm'
1082 1082
1083 1083 for k in ('HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy ' +
1084 1084 'HGPLAIN HGPLAINEXCEPT EDITOR VISUAL PAGER ' +
1085 1085 'NO_PROXY CHGDEBUG').split():
1086 1086 if k in env:
1087 1087 del env[k]
1088 1088
1089 1089 # unset env related to hooks
1090 1090 for k in env.keys():
1091 1091 if k.startswith('HG_'):
1092 1092 del env[k]
1093 1093
1094 1094 if self._usechg:
1095 1095 env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server')
1096 1096
1097 1097 return env
1098 1098
1099 1099 def _createhgrc(self, path):
1100 1100 """Create an hgrc file for this test."""
1101 1101 with open(path, 'wb') as hgrc:
1102 1102 hgrc.write(b'[ui]\n')
1103 1103 hgrc.write(b'slash = True\n')
1104 1104 hgrc.write(b'interactive = False\n')
1105 1105 hgrc.write(b'mergemarkers = detailed\n')
1106 1106 hgrc.write(b'promptecho = True\n')
1107 1107 hgrc.write(b'[defaults]\n')
1108 1108 hgrc.write(b'[devel]\n')
1109 1109 hgrc.write(b'all-warnings = true\n')
1110 1110 hgrc.write(b'default-date = 0 0\n')
1111 1111 hgrc.write(b'[largefiles]\n')
1112 1112 hgrc.write(b'usercache = %s\n' %
1113 1113 (os.path.join(self._testtmp, b'.cache/largefiles')))
1114 1114 hgrc.write(b'[lfs]\n')
1115 1115 hgrc.write(b'usercache = %s\n' %
1116 1116 (os.path.join(self._testtmp, b'.cache/lfs')))
1117 1117 hgrc.write(b'[web]\n')
1118 1118 hgrc.write(b'address = localhost\n')
1119 1119 hgrc.write(b'ipv6 = %s\n' % str(self._useipv6).encode('ascii'))
1120 1120
1121 1121 for opt in self._extraconfigopts:
1122 1122 section, key = opt.encode('utf-8').split(b'.', 1)
1123 1123 assert b'=' in key, ('extra config opt %s must '
1124 1124 'have an = for assignment' % opt)
1125 1125 hgrc.write(b'[%s]\n%s\n' % (section, key))
1126 1126
1127 1127 def fail(self, msg):
1128 1128 # unittest differentiates between errored and failed.
1129 1129 # Failed is denoted by AssertionError (by default at least).
1130 1130 raise AssertionError(msg)
1131 1131
1132 1132 def _runcommand(self, cmd, env, normalizenewlines=False):
1133 1133 """Run command in a sub-process, capturing the output (stdout and
1134 1134 stderr).
1135 1135
1136 1136 Return a tuple (exitcode, output). output is None in debug mode.
1137 1137 """
1138 1138 if self._debug:
1139 1139 proc = subprocess.Popen(cmd, shell=True, cwd=self._testtmp,
1140 1140 env=env)
1141 1141 ret = proc.wait()
1142 1142 return (ret, None)
1143 1143
1144 1144 proc = Popen4(cmd, self._testtmp, self._timeout, env)
1145 1145 def cleanup():
1146 1146 terminate(proc)
1147 1147 ret = proc.wait()
1148 1148 if ret == 0:
1149 1149 ret = signal.SIGTERM << 8
1150 1150 killdaemons(env['DAEMON_PIDS'])
1151 1151 return ret
1152 1152
1153 1153 output = ''
1154 1154 proc.tochild.close()
1155 1155
1156 1156 try:
1157 1157 output = proc.fromchild.read()
1158 1158 except KeyboardInterrupt:
1159 1159 vlog('# Handling keyboard interrupt')
1160 1160 cleanup()
1161 1161 raise
1162 1162
1163 1163 ret = proc.wait()
1164 1164 if wifexited(ret):
1165 1165 ret = os.WEXITSTATUS(ret)
1166 1166
1167 1167 if proc.timeout:
1168 1168 ret = 'timeout'
1169 1169
1170 1170 if ret:
1171 1171 killdaemons(env['DAEMON_PIDS'])
1172 1172
1173 1173 for s, r in self._getreplacements():
1174 1174 output = re.sub(s, r, output)
1175 1175
1176 1176 if normalizenewlines:
1177 1177 output = output.replace('\r\n', '\n')
1178 1178
1179 1179 return ret, output.splitlines(True)
1180 1180
1181 1181 class PythonTest(Test):
1182 1182 """A Python-based test."""
1183 1183
1184 1184 @property
1185 1185 def refpath(self):
1186 1186 return os.path.join(self._testdir, b'%s.out' % self.bname)
1187 1187
1188 1188 def _run(self, env):
1189 1189 py3kswitch = self._py3kwarnings and b' -3' or b''
1190 1190 cmd = b'%s%s "%s"' % (PYTHON, py3kswitch, self.path)
1191 1191 vlog("# Running", cmd)
1192 1192 normalizenewlines = os.name == 'nt'
1193 1193 result = self._runcommand(cmd, env,
1194 1194 normalizenewlines=normalizenewlines)
1195 1195 if self._aborted:
1196 1196 raise KeyboardInterrupt()
1197 1197
1198 1198 return result
1199 1199
1200 1200 # Some glob patterns apply only in some circumstances, so the script
1201 1201 # might want to remove (glob) annotations that otherwise should be
1202 1202 # retained.
1203 1203 checkcodeglobpats = [
1204 1204 # On Windows it looks like \ doesn't require a (glob), but we know
1205 1205 # better.
1206 1206 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
1207 1207 re.compile(br'^moving \S+/.*[^)]$'),
1208 1208 re.compile(br'^pulling from \$TESTTMP/.*[^)]$'),
1209 1209 # Not all platforms have 127.0.0.1 as loopback (though most do),
1210 1210 # so we always glob that too.
1211 1211 re.compile(br'.*\$LOCALIP.*$'),
1212 1212 ]
1213 1213
1214 1214 bchr = chr
1215 1215 if PYTHON3:
1216 1216 bchr = lambda x: bytes([x])
1217 1217
1218 1218 class TTest(Test):
1219 1219 """A "t test" is a test backed by a .t file."""
1220 1220
1221 1221 SKIPPED_PREFIX = b'skipped: '
1222 1222 FAILED_PREFIX = b'hghave check failed: '
1223 1223 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
1224 1224
1225 1225 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
1226 1226 ESCAPEMAP = dict((bchr(i), br'\x%02x' % i) for i in range(256))
1227 1227 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
1228 1228
1229 1229 def __init__(self, path, *args, **kwds):
1230 1230 # accept an extra "case" parameter
1231 1231 case = kwds.pop('case', None)
1232 1232 self._case = case
1233 1233 self._allcases = parsettestcases(path)
1234 1234 super(TTest, self).__init__(path, *args, **kwds)
1235 1235 if case:
1236 1236 self.name = '%s (case %s)' % (self.name, _strpath(case))
1237 1237 self.errpath = b'%s.%s.err' % (self.errpath[:-4], case)
1238 1238 self._tmpname += b'-%s' % case
1239 1239
1240 1240 @property
1241 1241 def refpath(self):
1242 1242 return os.path.join(self._testdir, self.bname)
1243 1243
1244 1244 def _run(self, env):
1245 1245 with open(self.path, 'rb') as f:
1246 1246 lines = f.readlines()
1247 1247
1248 1248 # .t file is both reference output and the test input, keep reference
1249 1249 # output updated with the the test input. This avoids some race
1250 1250 # conditions where the reference output does not match the actual test.
1251 1251 if self._refout is not None:
1252 1252 self._refout = lines
1253 1253
1254 1254 salt, script, after, expected = self._parsetest(lines)
1255 1255
1256 1256 # Write out the generated script.
1257 1257 fname = b'%s.sh' % self._testtmp
1258 1258 with open(fname, 'wb') as f:
1259 1259 for l in script:
1260 1260 f.write(l)
1261 1261
1262 1262 cmd = b'%s "%s"' % (self._shell, fname)
1263 1263 vlog("# Running", cmd)
1264 1264
1265 1265 exitcode, output = self._runcommand(cmd, env)
1266 1266
1267 1267 if self._aborted:
1268 1268 raise KeyboardInterrupt()
1269 1269
1270 1270 # Do not merge output if skipped. Return hghave message instead.
1271 1271 # Similarly, with --debug, output is None.
1272 1272 if exitcode == self.SKIPPED_STATUS or output is None:
1273 1273 return exitcode, output
1274 1274
1275 1275 return self._processoutput(exitcode, output, salt, after, expected)
1276 1276
1277 1277 def _hghave(self, reqs):
1278 1278 # TODO do something smarter when all other uses of hghave are gone.
1279 1279 runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__)))
1280 1280 tdir = runtestdir.replace(b'\\', b'/')
1281 1281 proc = Popen4(b'%s -c "%s/hghave %s"' %
1282 1282 (self._shell, tdir, b' '.join(reqs)),
1283 1283 self._testtmp, 0, self._getenv())
1284 1284 stdout, stderr = proc.communicate()
1285 1285 ret = proc.wait()
1286 1286 if wifexited(ret):
1287 1287 ret = os.WEXITSTATUS(ret)
1288 1288 if ret == 2:
1289 1289 print(stdout.decode('utf-8'))
1290 1290 sys.exit(1)
1291 1291
1292 1292 if ret != 0:
1293 1293 return False, stdout
1294 1294
1295 1295 if b'slow' in reqs:
1296 1296 self._timeout = self._slowtimeout
1297 1297 return True, None
1298 1298
1299 1299 def _iftest(self, args):
1300 1300 # implements "#if"
1301 1301 reqs = []
1302 1302 for arg in args:
1303 1303 if arg.startswith(b'no-') and arg[3:] in self._allcases:
1304 1304 if arg[3:] == self._case:
1305 1305 return False
1306 1306 elif arg in self._allcases:
1307 1307 if arg != self._case:
1308 1308 return False
1309 1309 else:
1310 1310 reqs.append(arg)
1311 1311 return self._hghave(reqs)[0]
1312 1312
1313 1313 def _parsetest(self, lines):
1314 1314 # We generate a shell script which outputs unique markers to line
1315 1315 # up script results with our source. These markers include input
1316 1316 # line number and the last return code.
1317 1317 salt = b"SALT%d" % time.time()
1318 1318 def addsalt(line, inpython):
1319 1319 if inpython:
1320 1320 script.append(b'%s %d 0\n' % (salt, line))
1321 1321 else:
1322 1322 script.append(b'echo %s %d $?\n' % (salt, line))
1323 1323
1324 1324 script = []
1325 1325
1326 1326 # After we run the shell script, we re-unify the script output
1327 1327 # with non-active parts of the source, with synchronization by our
1328 1328 # SALT line number markers. The after table contains the non-active
1329 1329 # components, ordered by line number.
1330 1330 after = {}
1331 1331
1332 1332 # Expected shell script output.
1333 1333 expected = {}
1334 1334
1335 1335 pos = prepos = -1
1336 1336
1337 1337 # True or False when in a true or false conditional section
1338 1338 skipping = None
1339 1339
1340 1340 # We keep track of whether or not we're in a Python block so we
1341 1341 # can generate the surrounding doctest magic.
1342 1342 inpython = False
1343 1343
1344 1344 if self._debug:
1345 1345 script.append(b'set -x\n')
1346 1346 if self._hgcommand != b'hg':
1347 1347 script.append(b'alias hg="%s"\n' % self._hgcommand)
1348 1348 if os.getenv('MSYSTEM'):
1349 1349 script.append(b'alias pwd="pwd -W"\n')
1350 1350 if self._case:
1351 1351 if isinstance(self._case, str):
1352 1352 quoted = shellquote(self._case)
1353 1353 else:
1354 1354 quoted = shellquote(self._case.decode('utf8')).encode('utf8')
1355 1355 script.append(b'TESTCASE=%s\n' % quoted)
1356 1356 script.append(b'export TESTCASE\n')
1357 1357
1358 1358 n = 0
1359 1359 for n, l in enumerate(lines):
1360 1360 if not l.endswith(b'\n'):
1361 1361 l += b'\n'
1362 1362 if l.startswith(b'#require'):
1363 1363 lsplit = l.split()
1364 1364 if len(lsplit) < 2 or lsplit[0] != b'#require':
1365 1365 after.setdefault(pos, []).append(' !!! invalid #require\n')
1366 1366 haveresult, message = self._hghave(lsplit[1:])
1367 1367 if not haveresult:
1368 1368 script = [b'echo "%s"\nexit 80\n' % message]
1369 1369 break
1370 1370 after.setdefault(pos, []).append(l)
1371 1371 elif l.startswith(b'#if'):
1372 1372 lsplit = l.split()
1373 1373 if len(lsplit) < 2 or lsplit[0] != b'#if':
1374 1374 after.setdefault(pos, []).append(' !!! invalid #if\n')
1375 1375 if skipping is not None:
1376 1376 after.setdefault(pos, []).append(' !!! nested #if\n')
1377 1377 skipping = not self._iftest(lsplit[1:])
1378 1378 after.setdefault(pos, []).append(l)
1379 1379 elif l.startswith(b'#else'):
1380 1380 if skipping is None:
1381 1381 after.setdefault(pos, []).append(' !!! missing #if\n')
1382 1382 skipping = not skipping
1383 1383 after.setdefault(pos, []).append(l)
1384 1384 elif l.startswith(b'#endif'):
1385 1385 if skipping is None:
1386 1386 after.setdefault(pos, []).append(' !!! missing #if\n')
1387 1387 skipping = None
1388 1388 after.setdefault(pos, []).append(l)
1389 1389 elif skipping:
1390 1390 after.setdefault(pos, []).append(l)
1391 1391 elif l.startswith(b' >>> '): # python inlines
1392 1392 after.setdefault(pos, []).append(l)
1393 1393 prepos = pos
1394 1394 pos = n
1395 1395 if not inpython:
1396 1396 # We've just entered a Python block. Add the header.
1397 1397 inpython = True
1398 1398 addsalt(prepos, False) # Make sure we report the exit code.
1399 1399 script.append(b'%s -m heredoctest <<EOF\n' % PYTHON)
1400 1400 addsalt(n, True)
1401 1401 script.append(l[2:])
1402 1402 elif l.startswith(b' ... '): # python inlines
1403 1403 after.setdefault(prepos, []).append(l)
1404 1404 script.append(l[2:])
1405 1405 elif l.startswith(b' $ '): # commands
1406 1406 if inpython:
1407 1407 script.append(b'EOF\n')
1408 1408 inpython = False
1409 1409 after.setdefault(pos, []).append(l)
1410 1410 prepos = pos
1411 1411 pos = n
1412 1412 addsalt(n, False)
1413 1413 cmd = l[4:].split()
1414 1414 if len(cmd) == 2 and cmd[0] == b'cd':
1415 1415 l = b' $ cd %s || exit 1\n' % cmd[1]
1416 1416 script.append(l[4:])
1417 1417 elif l.startswith(b' > '): # continuations
1418 1418 after.setdefault(prepos, []).append(l)
1419 1419 script.append(l[4:])
1420 1420 elif l.startswith(b' '): # results
1421 1421 # Queue up a list of expected results.
1422 1422 expected.setdefault(pos, []).append(l[2:])
1423 1423 else:
1424 1424 if inpython:
1425 1425 script.append(b'EOF\n')
1426 1426 inpython = False
1427 1427 # Non-command/result. Queue up for merged output.
1428 1428 after.setdefault(pos, []).append(l)
1429 1429
1430 1430 if inpython:
1431 1431 script.append(b'EOF\n')
1432 1432 if skipping is not None:
1433 1433 after.setdefault(pos, []).append(' !!! missing #endif\n')
1434 1434 addsalt(n + 1, False)
1435 1435
1436 1436 return salt, script, after, expected
1437 1437
1438 1438 def _processoutput(self, exitcode, output, salt, after, expected):
1439 1439 # Merge the script output back into a unified test.
1440 1440 warnonly = 1 # 1: not yet; 2: yes; 3: for sure not
1441 1441 if exitcode != 0:
1442 1442 warnonly = 3
1443 1443
1444 1444 pos = -1
1445 1445 postout = []
1446 1446 for l in output:
1447 1447 lout, lcmd = l, None
1448 1448 if salt in l:
1449 1449 lout, lcmd = l.split(salt, 1)
1450 1450
1451 1451 while lout:
1452 1452 if not lout.endswith(b'\n'):
1453 1453 lout += b' (no-eol)\n'
1454 1454
1455 1455 # Find the expected output at the current position.
1456 1456 els = [None]
1457 1457 if expected.get(pos, None):
1458 1458 els = expected[pos]
1459 1459
1460 1460 i = 0
1461 1461 optional = []
1462 1462 while i < len(els):
1463 1463 el = els[i]
1464 1464
1465 1465 r = self.linematch(el, lout)
1466 1466 if isinstance(r, str):
1467 1467 if r == '-glob':
1468 1468 lout = ''.join(el.rsplit(' (glob)', 1))
1469 1469 r = '' # Warn only this line.
1470 1470 elif r == "retry":
1471 1471 postout.append(b' ' + el)
1472 1472 els.pop(i)
1473 1473 break
1474 1474 else:
1475 1475 log('\ninfo, unknown linematch result: %r\n' % r)
1476 1476 r = False
1477 1477 if r:
1478 1478 els.pop(i)
1479 1479 break
1480 1480 if el:
1481 1481 if el.endswith(b" (?)\n"):
1482 1482 optional.append(i)
1483 1483 else:
1484 1484 m = optline.match(el)
1485 1485 if m:
1486 1486 conditions = [
1487 1487 c for c in m.group(2).split(b' ')]
1488 1488
1489 1489 if not self._iftest(conditions):
1490 1490 optional.append(i)
1491 1491
1492 1492 i += 1
1493 1493
1494 1494 if r:
1495 1495 if r == "retry":
1496 1496 continue
1497 1497 # clean up any optional leftovers
1498 1498 for i in optional:
1499 1499 postout.append(b' ' + els[i])
1500 1500 for i in reversed(optional):
1501 1501 del els[i]
1502 1502 postout.append(b' ' + el)
1503 1503 else:
1504 1504 if self.NEEDESCAPE(lout):
1505 1505 lout = TTest._stringescape(b'%s (esc)\n' %
1506 1506 lout.rstrip(b'\n'))
1507 1507 postout.append(b' ' + lout) # Let diff deal with it.
1508 1508 if r != '': # If line failed.
1509 1509 warnonly = 3 # for sure not
1510 1510 elif warnonly == 1: # Is "not yet" and line is warn only.
1511 1511 warnonly = 2 # Yes do warn.
1512 1512 break
1513 1513 else:
1514 1514 # clean up any optional leftovers
1515 1515 while expected.get(pos, None):
1516 1516 el = expected[pos].pop(0)
1517 1517 if el:
1518 1518 if not el.endswith(b" (?)\n"):
1519 1519 m = optline.match(el)
1520 1520 if m:
1521 1521 conditions = [c for c in m.group(2).split(b' ')]
1522 1522
1523 1523 if self._iftest(conditions):
1524 1524 # Don't append as optional line
1525 1525 continue
1526 1526 else:
1527 1527 continue
1528 1528 postout.append(b' ' + el)
1529 1529
1530 1530 if lcmd:
1531 1531 # Add on last return code.
1532 1532 ret = int(lcmd.split()[1])
1533 1533 if ret != 0:
1534 1534 postout.append(b' [%d]\n' % ret)
1535 1535 if pos in after:
1536 1536 # Merge in non-active test bits.
1537 1537 postout += after.pop(pos)
1538 1538 pos = int(lcmd.split()[0])
1539 1539
1540 1540 if pos in after:
1541 1541 postout += after.pop(pos)
1542 1542
1543 1543 if warnonly == 2:
1544 1544 exitcode = False # Set exitcode to warned.
1545 1545
1546 1546 return exitcode, postout
1547 1547
1548 1548 @staticmethod
1549 1549 def rematch(el, l):
1550 1550 try:
1551 1551 el = b'(?:' + el + b')'
1552 1552 # use \Z to ensure that the regex matches to the end of the string
1553 1553 if os.name == 'nt':
1554 1554 return re.match(el + br'\r?\n\Z', l)
1555 1555 return re.match(el + br'\n\Z', l)
1556 1556 except re.error:
1557 1557 # el is an invalid regex
1558 1558 return False
1559 1559
1560 1560 @staticmethod
1561 1561 def globmatch(el, l):
1562 1562 # The only supported special characters are * and ? plus / which also
1563 1563 # matches \ on windows. Escaping of these characters is supported.
1564 1564 if el + b'\n' == l:
1565 1565 if os.altsep:
1566 1566 # matching on "/" is not needed for this line
1567 1567 for pat in checkcodeglobpats:
1568 1568 if pat.match(el):
1569 1569 return True
1570 1570 return b'-glob'
1571 1571 return True
1572 1572 el = el.replace(b'$LOCALIP', b'*')
1573 1573 i, n = 0, len(el)
1574 1574 res = b''
1575 1575 while i < n:
1576 1576 c = el[i:i + 1]
1577 1577 i += 1
1578 1578 if c == b'\\' and i < n and el[i:i + 1] in b'*?\\/':
1579 1579 res += el[i - 1:i + 1]
1580 1580 i += 1
1581 1581 elif c == b'*':
1582 1582 res += b'.*'
1583 1583 elif c == b'?':
1584 1584 res += b'.'
1585 1585 elif c == b'/' and os.altsep:
1586 1586 res += b'[/\\\\]'
1587 1587 else:
1588 1588 res += re.escape(c)
1589 1589 return TTest.rematch(res, l)
1590 1590
1591 1591 def linematch(self, el, l):
1592 1592 retry = False
1593 1593 if el == l: # perfect match (fast)
1594 1594 return True
1595 1595 if el:
1596 1596 if el.endswith(b" (?)\n"):
1597 1597 retry = "retry"
1598 1598 el = el[:-5] + b"\n"
1599 1599 else:
1600 1600 m = optline.match(el)
1601 1601 if m:
1602 1602 conditions = [c for c in m.group(2).split(b' ')]
1603 1603
1604 1604 el = m.group(1) + b"\n"
1605 1605 if not self._iftest(conditions):
1606 1606 retry = "retry" # Not required by listed features
1607 1607
1608 1608 if el.endswith(b" (esc)\n"):
1609 1609 if PYTHON3:
1610 1610 el = el[:-7].decode('unicode_escape') + '\n'
1611 1611 el = el.encode('utf-8')
1612 1612 else:
1613 1613 el = el[:-7].decode('string-escape') + '\n'
1614 1614 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
1615 1615 return True
1616 1616 if el.endswith(b" (re)\n"):
1617 1617 return TTest.rematch(el[:-6], l) or retry
1618 1618 if el.endswith(b" (glob)\n"):
1619 1619 # ignore '(glob)' added to l by 'replacements'
1620 1620 if l.endswith(b" (glob)\n"):
1621 1621 l = l[:-8] + b"\n"
1622 1622 return TTest.globmatch(el[:-8], l) or retry
1623 1623 if os.altsep:
1624 1624 _l = l.replace(b'\\', b'/')
1625 1625 if el == _l or os.name == 'nt' and el[:-1] + b'\r\n' == _l:
1626 1626 return True
1627 1627 return retry
1628 1628
1629 1629 @staticmethod
1630 1630 def parsehghaveoutput(lines):
1631 1631 '''Parse hghave log lines.
1632 1632
1633 1633 Return tuple of lists (missing, failed):
1634 1634 * the missing/unknown features
1635 1635 * the features for which existence check failed'''
1636 1636 missing = []
1637 1637 failed = []
1638 1638 for line in lines:
1639 1639 if line.startswith(TTest.SKIPPED_PREFIX):
1640 1640 line = line.splitlines()[0]
1641 1641 missing.append(line[len(TTest.SKIPPED_PREFIX):].decode('utf-8'))
1642 1642 elif line.startswith(TTest.FAILED_PREFIX):
1643 1643 line = line.splitlines()[0]
1644 1644 failed.append(line[len(TTest.FAILED_PREFIX):].decode('utf-8'))
1645 1645
1646 1646 return missing, failed
1647 1647
1648 1648 @staticmethod
1649 1649 def _escapef(m):
1650 1650 return TTest.ESCAPEMAP[m.group(0)]
1651 1651
1652 1652 @staticmethod
1653 1653 def _stringescape(s):
1654 1654 return TTest.ESCAPESUB(TTest._escapef, s)
1655 1655
1656 1656 iolock = threading.RLock()
1657 1657 firstlock = threading.RLock()
1658 1658 firsterror = False
1659 1659
1660 1660 class TestResult(unittest._TextTestResult):
1661 1661 """Holds results when executing via unittest."""
1662 1662 # Don't worry too much about accessing the non-public _TextTestResult.
1663 1663 # It is relatively common in Python testing tools.
1664 1664 def __init__(self, options, *args, **kwargs):
1665 1665 super(TestResult, self).__init__(*args, **kwargs)
1666 1666
1667 1667 self._options = options
1668 1668
1669 1669 # unittest.TestResult didn't have skipped until 2.7. We need to
1670 1670 # polyfill it.
1671 1671 self.skipped = []
1672 1672
1673 1673 # We have a custom "ignored" result that isn't present in any Python
1674 1674 # unittest implementation. It is very similar to skipped. It may make
1675 1675 # sense to map it into skip some day.
1676 1676 self.ignored = []
1677 1677
1678 1678 self.times = []
1679 1679 self._firststarttime = None
1680 1680 # Data stored for the benefit of generating xunit reports.
1681 1681 self.successes = []
1682 1682 self.faildata = {}
1683 1683
1684 1684 if options.color == 'auto':
1685 1685 self.color = pygmentspresent and self.stream.isatty()
1686 1686 elif options.color == 'never':
1687 1687 self.color = False
1688 1688 else: # 'always', for testing purposes
1689 1689 self.color = pygmentspresent
1690 1690
1691 1691 def addFailure(self, test, reason):
1692 1692 self.failures.append((test, reason))
1693 1693
1694 1694 if self._options.first:
1695 1695 self.stop()
1696 1696 else:
1697 1697 with iolock:
1698 1698 if reason == "timed out":
1699 1699 self.stream.write('t')
1700 1700 else:
1701 1701 if not self._options.nodiff:
1702 1702 self.stream.write('\n')
1703 1703 # Exclude the '\n' from highlighting to lex correctly
1704 1704 formatted = 'ERROR: %s output changed\n' % test
1705 1705 self.stream.write(highlightmsg(formatted, self.color))
1706 1706 self.stream.write('!')
1707 1707
1708 1708 self.stream.flush()
1709 1709
1710 1710 def addSuccess(self, test):
1711 1711 with iolock:
1712 1712 super(TestResult, self).addSuccess(test)
1713 1713 self.successes.append(test)
1714 1714
1715 1715 def addError(self, test, err):
1716 1716 super(TestResult, self).addError(test, err)
1717 1717 if self._options.first:
1718 1718 self.stop()
1719 1719
1720 1720 # Polyfill.
1721 1721 def addSkip(self, test, reason):
1722 1722 self.skipped.append((test, reason))
1723 1723 with iolock:
1724 1724 if self.showAll:
1725 1725 self.stream.writeln('skipped %s' % reason)
1726 1726 else:
1727 1727 self.stream.write('s')
1728 1728 self.stream.flush()
1729 1729
1730 1730 def addIgnore(self, test, reason):
1731 1731 self.ignored.append((test, reason))
1732 1732 with iolock:
1733 1733 if self.showAll:
1734 1734 self.stream.writeln('ignored %s' % reason)
1735 1735 else:
1736 1736 if reason not in ('not retesting', "doesn't match keyword"):
1737 1737 self.stream.write('i')
1738 1738 else:
1739 1739 self.testsRun += 1
1740 1740 self.stream.flush()
1741 1741
1742 1742 def addOutputMismatch(self, test, ret, got, expected):
1743 1743 """Record a mismatch in test output for a particular test."""
1744 1744 if self.shouldStop or firsterror:
1745 1745 # don't print, some other test case already failed and
1746 1746 # printed, we're just stale and probably failed due to our
1747 1747 # temp dir getting cleaned up.
1748 1748 return
1749 1749
1750 1750 accepted = False
1751 1751 lines = []
1752 1752
1753 1753 with iolock:
1754 1754 if self._options.nodiff:
1755 1755 pass
1756 1756 elif self._options.view:
1757 1757 v = self._options.view
1758 1758 if PYTHON3:
1759 1759 v = _bytespath(v)
1760 1760 os.system(b"%s %s %s" %
1761 1761 (v, test.refpath, test.errpath))
1762 1762 else:
1763 1763 servefail, lines = getdiff(expected, got,
1764 1764 test.refpath, test.errpath)
1765 if servefail:
1766 self.stream.write(
1767 'server failed to start (HGPORT=%s)' % test._startport)
1768
1769 1765 self.stream.write('\n')
1770 1766 for line in lines:
1771 1767 line = highlightdiff(line, self.color)
1772 1768 if PYTHON3:
1773 1769 self.stream.flush()
1774 1770 self.stream.buffer.write(line)
1775 1771 self.stream.buffer.flush()
1776 1772 else:
1777 1773 self.stream.write(line)
1778 1774 self.stream.flush()
1779 1775
1776 if servefail:
1777 raise test.failureException(
1778 'server failed to start (HGPORT=%s)' % test._startport)
1779
1780 1780 # handle interactive prompt without releasing iolock
1781 1781 if self._options.interactive:
1782 1782 if test.readrefout() != expected:
1783 1783 self.stream.write(
1784 1784 'Reference output has changed (run again to prompt '
1785 1785 'changes)')
1786 1786 else:
1787 1787 self.stream.write('Accept this change? [n] ')
1788 1788 answer = sys.stdin.readline().strip()
1789 1789 if answer.lower() in ('y', 'yes'):
1790 1790 if test.path.endswith(b'.t'):
1791 1791 rename(test.errpath, test.path)
1792 1792 else:
1793 1793 rename(test.errpath, '%s.out' % test.path)
1794 1794 accepted = True
1795 1795 if not accepted:
1796 1796 self.faildata[test.name] = b''.join(lines)
1797 1797
1798 1798 return accepted
1799 1799
1800 1800 def startTest(self, test):
1801 1801 super(TestResult, self).startTest(test)
1802 1802
1803 1803 # os.times module computes the user time and system time spent by
1804 1804 # child's processes along with real elapsed time taken by a process.
1805 1805 # This module has one limitation. It can only work for Linux user
1806 1806 # and not for Windows.
1807 1807 test.started = os.times()
1808 1808 if self._firststarttime is None: # thread racy but irrelevant
1809 1809 self._firststarttime = test.started[4]
1810 1810
1811 1811 def stopTest(self, test, interrupted=False):
1812 1812 super(TestResult, self).stopTest(test)
1813 1813
1814 1814 test.stopped = os.times()
1815 1815
1816 1816 starttime = test.started
1817 1817 endtime = test.stopped
1818 1818 origin = self._firststarttime
1819 1819 self.times.append((test.name,
1820 1820 endtime[2] - starttime[2], # user space CPU time
1821 1821 endtime[3] - starttime[3], # sys space CPU time
1822 1822 endtime[4] - starttime[4], # real time
1823 1823 starttime[4] - origin, # start date in run context
1824 1824 endtime[4] - origin, # end date in run context
1825 1825 ))
1826 1826
1827 1827 if interrupted:
1828 1828 with iolock:
1829 1829 self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
1830 1830 test.name, self.times[-1][3]))
1831 1831
1832 1832 class TestSuite(unittest.TestSuite):
1833 1833 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
1834 1834
1835 1835 def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
1836 1836 retest=False, keywords=None, loop=False, runs_per_test=1,
1837 1837 loadtest=None, showchannels=False,
1838 1838 *args, **kwargs):
1839 1839 """Create a new instance that can run tests with a configuration.
1840 1840
1841 1841 testdir specifies the directory where tests are executed from. This
1842 1842 is typically the ``tests`` directory from Mercurial's source
1843 1843 repository.
1844 1844
1845 1845 jobs specifies the number of jobs to run concurrently. Each test
1846 1846 executes on its own thread. Tests actually spawn new processes, so
1847 1847 state mutation should not be an issue.
1848 1848
1849 1849 If there is only one job, it will use the main thread.
1850 1850
1851 1851 whitelist and blacklist denote tests that have been whitelisted and
1852 1852 blacklisted, respectively. These arguments don't belong in TestSuite.
1853 1853 Instead, whitelist and blacklist should be handled by the thing that
1854 1854 populates the TestSuite with tests. They are present to preserve
1855 1855 backwards compatible behavior which reports skipped tests as part
1856 1856 of the results.
1857 1857
1858 1858 retest denotes whether to retest failed tests. This arguably belongs
1859 1859 outside of TestSuite.
1860 1860
1861 1861 keywords denotes key words that will be used to filter which tests
1862 1862 to execute. This arguably belongs outside of TestSuite.
1863 1863
1864 1864 loop denotes whether to loop over tests forever.
1865 1865 """
1866 1866 super(TestSuite, self).__init__(*args, **kwargs)
1867 1867
1868 1868 self._jobs = jobs
1869 1869 self._whitelist = whitelist
1870 1870 self._blacklist = blacklist
1871 1871 self._retest = retest
1872 1872 self._keywords = keywords
1873 1873 self._loop = loop
1874 1874 self._runs_per_test = runs_per_test
1875 1875 self._loadtest = loadtest
1876 1876 self._showchannels = showchannels
1877 1877
1878 1878 def run(self, result):
1879 1879 # We have a number of filters that need to be applied. We do this
1880 1880 # here instead of inside Test because it makes the running logic for
1881 1881 # Test simpler.
1882 1882 tests = []
1883 1883 num_tests = [0]
1884 1884 for test in self._tests:
1885 1885 def get():
1886 1886 num_tests[0] += 1
1887 1887 if getattr(test, 'should_reload', False):
1888 1888 return self._loadtest(test, num_tests[0])
1889 1889 return test
1890 1890 if not os.path.exists(test.path):
1891 1891 result.addSkip(test, "Doesn't exist")
1892 1892 continue
1893 1893
1894 1894 if not (self._whitelist and test.bname in self._whitelist):
1895 1895 if self._blacklist and test.bname in self._blacklist:
1896 1896 result.addSkip(test, 'blacklisted')
1897 1897 continue
1898 1898
1899 1899 if self._retest and not os.path.exists(test.errpath):
1900 1900 result.addIgnore(test, 'not retesting')
1901 1901 continue
1902 1902
1903 1903 if self._keywords:
1904 1904 with open(test.path, 'rb') as f:
1905 1905 t = f.read().lower() + test.bname.lower()
1906 1906 ignored = False
1907 1907 for k in self._keywords.lower().split():
1908 1908 if k not in t:
1909 1909 result.addIgnore(test, "doesn't match keyword")
1910 1910 ignored = True
1911 1911 break
1912 1912
1913 1913 if ignored:
1914 1914 continue
1915 1915 for _ in xrange(self._runs_per_test):
1916 1916 tests.append(get())
1917 1917
1918 1918 runtests = list(tests)
1919 1919 done = queue.Queue()
1920 1920 running = 0
1921 1921
1922 1922 channels = [""] * self._jobs
1923 1923
1924 1924 def job(test, result):
1925 1925 for n, v in enumerate(channels):
1926 1926 if not v:
1927 1927 channel = n
1928 1928 break
1929 1929 else:
1930 1930 raise ValueError('Could not find output channel')
1931 1931 channels[channel] = "=" + test.name[5:].split(".")[0]
1932 1932 try:
1933 1933 test(result)
1934 1934 done.put(None)
1935 1935 except KeyboardInterrupt:
1936 1936 pass
1937 1937 except: # re-raises
1938 1938 done.put(('!', test, 'run-test raised an error, see traceback'))
1939 1939 raise
1940 1940 finally:
1941 1941 try:
1942 1942 channels[channel] = ''
1943 1943 except IndexError:
1944 1944 pass
1945 1945
1946 1946 def stat():
1947 1947 count = 0
1948 1948 while channels:
1949 1949 d = '\n%03s ' % count
1950 1950 for n, v in enumerate(channels):
1951 1951 if v:
1952 1952 d += v[0]
1953 1953 channels[n] = v[1:] or '.'
1954 1954 else:
1955 1955 d += ' '
1956 1956 d += ' '
1957 1957 with iolock:
1958 1958 sys.stdout.write(d + ' ')
1959 1959 sys.stdout.flush()
1960 1960 for x in xrange(10):
1961 1961 if channels:
1962 1962 time.sleep(.1)
1963 1963 count += 1
1964 1964
1965 1965 stoppedearly = False
1966 1966
1967 1967 if self._showchannels:
1968 1968 statthread = threading.Thread(target=stat, name="stat")
1969 1969 statthread.start()
1970 1970
1971 1971 try:
1972 1972 while tests or running:
1973 1973 if not done.empty() or running == self._jobs or not tests:
1974 1974 try:
1975 1975 done.get(True, 1)
1976 1976 running -= 1
1977 1977 if result and result.shouldStop:
1978 1978 stoppedearly = True
1979 1979 break
1980 1980 except queue.Empty:
1981 1981 continue
1982 1982 if tests and not running == self._jobs:
1983 1983 test = tests.pop(0)
1984 1984 if self._loop:
1985 1985 if getattr(test, 'should_reload', False):
1986 1986 num_tests[0] += 1
1987 1987 tests.append(
1988 1988 self._loadtest(test, num_tests[0]))
1989 1989 else:
1990 1990 tests.append(test)
1991 1991 if self._jobs == 1:
1992 1992 job(test, result)
1993 1993 else:
1994 1994 t = threading.Thread(target=job, name=test.name,
1995 1995 args=(test, result))
1996 1996 t.start()
1997 1997 running += 1
1998 1998
1999 1999 # If we stop early we still need to wait on started tests to
2000 2000 # finish. Otherwise, there is a race between the test completing
2001 2001 # and the test's cleanup code running. This could result in the
2002 2002 # test reporting incorrect.
2003 2003 if stoppedearly:
2004 2004 while running:
2005 2005 try:
2006 2006 done.get(True, 1)
2007 2007 running -= 1
2008 2008 except queue.Empty:
2009 2009 continue
2010 2010 except KeyboardInterrupt:
2011 2011 for test in runtests:
2012 2012 test.abort()
2013 2013
2014 2014 channels = []
2015 2015
2016 2016 return result
2017 2017
2018 2018 # Save the most recent 5 wall-clock runtimes of each test to a
2019 2019 # human-readable text file named .testtimes. Tests are sorted
2020 2020 # alphabetically, while times for each test are listed from oldest to
2021 2021 # newest.
2022 2022
2023 2023 def loadtimes(outputdir):
2024 2024 times = []
2025 2025 try:
2026 2026 with open(os.path.join(outputdir, b'.testtimes')) as fp:
2027 2027 for line in fp:
2028 2028 m = re.match('(.*?) ([0-9. ]+)', line)
2029 2029 times.append((m.group(1),
2030 2030 [float(t) for t in m.group(2).split()]))
2031 2031 except IOError as err:
2032 2032 if err.errno != errno.ENOENT:
2033 2033 raise
2034 2034 return times
2035 2035
2036 2036 def savetimes(outputdir, result):
2037 2037 saved = dict(loadtimes(outputdir))
2038 2038 maxruns = 5
2039 2039 skipped = set([str(t[0]) for t in result.skipped])
2040 2040 for tdata in result.times:
2041 2041 test, real = tdata[0], tdata[3]
2042 2042 if test not in skipped:
2043 2043 ts = saved.setdefault(test, [])
2044 2044 ts.append(real)
2045 2045 ts[:] = ts[-maxruns:]
2046 2046
2047 2047 fd, tmpname = tempfile.mkstemp(prefix=b'.testtimes',
2048 2048 dir=outputdir, text=True)
2049 2049 with os.fdopen(fd, 'w') as fp:
2050 2050 for name, ts in sorted(saved.items()):
2051 2051 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
2052 2052 timepath = os.path.join(outputdir, b'.testtimes')
2053 2053 try:
2054 2054 os.unlink(timepath)
2055 2055 except OSError:
2056 2056 pass
2057 2057 try:
2058 2058 os.rename(tmpname, timepath)
2059 2059 except OSError:
2060 2060 pass
2061 2061
2062 2062 class TextTestRunner(unittest.TextTestRunner):
2063 2063 """Custom unittest test runner that uses appropriate settings."""
2064 2064
2065 2065 def __init__(self, runner, *args, **kwargs):
2066 2066 super(TextTestRunner, self).__init__(*args, **kwargs)
2067 2067
2068 2068 self._runner = runner
2069 2069
2070 2070 def listtests(self, test):
2071 2071 result = TestResult(self._runner.options, self.stream,
2072 2072 self.descriptions, 0)
2073 2073 test = sorted(test, key=lambda t: t.name)
2074 2074 for t in test:
2075 2075 print(t.name)
2076 2076 result.addSuccess(t)
2077 2077
2078 2078 if self._runner.options.xunit:
2079 2079 with open(self._runner.options.xunit, "wb") as xuf:
2080 2080 self._writexunit(result, xuf)
2081 2081
2082 2082 if self._runner.options.json:
2083 2083 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2084 2084 with open(jsonpath, 'w') as fp:
2085 2085 self._writejson(result, fp)
2086 2086
2087 2087 return result
2088 2088
2089 2089 def run(self, test):
2090 2090 result = TestResult(self._runner.options, self.stream,
2091 2091 self.descriptions, self.verbosity)
2092 2092
2093 2093 test(result)
2094 2094
2095 2095 failed = len(result.failures)
2096 2096 skipped = len(result.skipped)
2097 2097 ignored = len(result.ignored)
2098 2098
2099 2099 with iolock:
2100 2100 self.stream.writeln('')
2101 2101
2102 2102 if not self._runner.options.noskips:
2103 2103 for test, msg in result.skipped:
2104 2104 formatted = 'Skipped %s: %s\n' % (test.name, msg)
2105 2105 self.stream.write(highlightmsg(formatted, result.color))
2106 2106 for test, msg in result.failures:
2107 2107 formatted = 'Failed %s: %s\n' % (test.name, msg)
2108 2108 self.stream.write(highlightmsg(formatted, result.color))
2109 2109 for test, msg in result.errors:
2110 2110 self.stream.writeln('Errored %s: %s' % (test.name, msg))
2111 2111
2112 2112 if self._runner.options.xunit:
2113 2113 with open(self._runner.options.xunit, "wb") as xuf:
2114 2114 self._writexunit(result, xuf)
2115 2115
2116 2116 if self._runner.options.json:
2117 2117 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2118 2118 with open(jsonpath, 'w') as fp:
2119 2119 self._writejson(result, fp)
2120 2120
2121 2121 self._runner._checkhglib('Tested')
2122 2122
2123 2123 savetimes(self._runner._outputdir, result)
2124 2124
2125 2125 if failed and self._runner.options.known_good_rev:
2126 2126 self._bisecttests(t for t, m in result.failures)
2127 2127 self.stream.writeln(
2128 2128 '# Ran %d tests, %d skipped, %d failed.'
2129 2129 % (result.testsRun, skipped + ignored, failed))
2130 2130 if failed:
2131 2131 self.stream.writeln('python hash seed: %s' %
2132 2132 os.environ['PYTHONHASHSEED'])
2133 2133 if self._runner.options.time:
2134 2134 self.printtimes(result.times)
2135 2135
2136 2136 if self._runner.options.exceptions:
2137 2137 exceptions = aggregateexceptions(
2138 2138 os.path.join(self._runner._outputdir, b'exceptions'))
2139 2139
2140 2140 self.stream.writeln('Exceptions Report:')
2141 2141 self.stream.writeln('%d total from %d frames' %
2142 2142 (exceptions['total'],
2143 2143 len(exceptions['exceptioncounts'])))
2144 2144 combined = exceptions['combined']
2145 2145 for key in sorted(combined, key=combined.get, reverse=True):
2146 2146 frame, line, exc = key
2147 2147 totalcount, testcount, leastcount, leasttest = combined[key]
2148 2148
2149 2149 self.stream.writeln('%d (%d tests)\t%s: %s (%s - %d total)'
2150 2150 % (totalcount,
2151 2151 testcount,
2152 2152 frame, exc,
2153 2153 leasttest, leastcount))
2154 2154
2155 2155 self.stream.flush()
2156 2156
2157 2157 return result
2158 2158
2159 2159 def _bisecttests(self, tests):
2160 2160 bisectcmd = ['hg', 'bisect']
2161 2161 bisectrepo = self._runner.options.bisect_repo
2162 2162 if bisectrepo:
2163 2163 bisectcmd.extend(['-R', os.path.abspath(bisectrepo)])
2164 2164 def pread(args):
2165 2165 env = os.environ.copy()
2166 2166 env['HGPLAIN'] = '1'
2167 2167 p = subprocess.Popen(args, stderr=subprocess.STDOUT,
2168 2168 stdout=subprocess.PIPE, env=env)
2169 2169 data = p.stdout.read()
2170 2170 p.wait()
2171 2171 return data
2172 2172 for test in tests:
2173 2173 pread(bisectcmd + ['--reset']),
2174 2174 pread(bisectcmd + ['--bad', '.'])
2175 2175 pread(bisectcmd + ['--good', self._runner.options.known_good_rev])
2176 2176 # TODO: we probably need to forward more options
2177 2177 # that alter hg's behavior inside the tests.
2178 2178 opts = ''
2179 2179 withhg = self._runner.options.with_hg
2180 2180 if withhg:
2181 2181 opts += ' --with-hg=%s ' % shellquote(_strpath(withhg))
2182 2182 rtc = '%s %s %s %s' % (sys.executable, sys.argv[0], opts,
2183 2183 test)
2184 2184 data = pread(bisectcmd + ['--command', rtc])
2185 2185 m = re.search(
2186 2186 (br'\nThe first (?P<goodbad>bad|good) revision '
2187 2187 br'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
2188 2188 br'summary: +(?P<summary>[^\n]+)\n'),
2189 2189 data, (re.MULTILINE | re.DOTALL))
2190 2190 if m is None:
2191 2191 self.stream.writeln(
2192 2192 'Failed to identify failure point for %s' % test)
2193 2193 continue
2194 2194 dat = m.groupdict()
2195 2195 verb = 'broken' if dat['goodbad'] == 'bad' else 'fixed'
2196 2196 self.stream.writeln(
2197 2197 '%s %s by %s (%s)' % (
2198 2198 test, verb, dat['node'], dat['summary']))
2199 2199
2200 2200 def printtimes(self, times):
2201 2201 # iolock held by run
2202 2202 self.stream.writeln('# Producing time report')
2203 2203 times.sort(key=lambda t: (t[3]))
2204 2204 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
2205 2205 self.stream.writeln('%-7s %-7s %-7s %-7s %-7s %s' %
2206 2206 ('start', 'end', 'cuser', 'csys', 'real', 'Test'))
2207 2207 for tdata in times:
2208 2208 test = tdata[0]
2209 2209 cuser, csys, real, start, end = tdata[1:6]
2210 2210 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
2211 2211
2212 2212 @staticmethod
2213 2213 def _writexunit(result, outf):
2214 2214 # See http://llg.cubic.org/docs/junit/ for a reference.
2215 2215 timesd = dict((t[0], t[3]) for t in result.times)
2216 2216 doc = minidom.Document()
2217 2217 s = doc.createElement('testsuite')
2218 2218 s.setAttribute('name', 'run-tests')
2219 2219 s.setAttribute('tests', str(result.testsRun))
2220 2220 s.setAttribute('errors', "0") # TODO
2221 2221 s.setAttribute('failures', str(len(result.failures)))
2222 2222 s.setAttribute('skipped', str(len(result.skipped) +
2223 2223 len(result.ignored)))
2224 2224 doc.appendChild(s)
2225 2225 for tc in result.successes:
2226 2226 t = doc.createElement('testcase')
2227 2227 t.setAttribute('name', tc.name)
2228 2228 tctime = timesd.get(tc.name)
2229 2229 if tctime is not None:
2230 2230 t.setAttribute('time', '%.3f' % tctime)
2231 2231 s.appendChild(t)
2232 2232 for tc, err in sorted(result.faildata.items()):
2233 2233 t = doc.createElement('testcase')
2234 2234 t.setAttribute('name', tc)
2235 2235 tctime = timesd.get(tc)
2236 2236 if tctime is not None:
2237 2237 t.setAttribute('time', '%.3f' % tctime)
2238 2238 # createCDATASection expects a unicode or it will
2239 2239 # convert using default conversion rules, which will
2240 2240 # fail if string isn't ASCII.
2241 2241 err = cdatasafe(err).decode('utf-8', 'replace')
2242 2242 cd = doc.createCDATASection(err)
2243 2243 # Use 'failure' here instead of 'error' to match errors = 0,
2244 2244 # failures = len(result.failures) in the testsuite element.
2245 2245 failelem = doc.createElement('failure')
2246 2246 failelem.setAttribute('message', 'output changed')
2247 2247 failelem.setAttribute('type', 'output-mismatch')
2248 2248 failelem.appendChild(cd)
2249 2249 t.appendChild(failelem)
2250 2250 s.appendChild(t)
2251 2251 for tc, message in result.skipped:
2252 2252 # According to the schema, 'skipped' has no attributes. So store
2253 2253 # the skip message as a text node instead.
2254 2254 t = doc.createElement('testcase')
2255 2255 t.setAttribute('name', tc.name)
2256 2256 binmessage = message.encode('utf-8')
2257 2257 message = cdatasafe(binmessage).decode('utf-8', 'replace')
2258 2258 cd = doc.createCDATASection(message)
2259 2259 skipelem = doc.createElement('skipped')
2260 2260 skipelem.appendChild(cd)
2261 2261 t.appendChild(skipelem)
2262 2262 s.appendChild(t)
2263 2263 outf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
2264 2264
2265 2265 @staticmethod
2266 2266 def _writejson(result, outf):
2267 2267 timesd = {}
2268 2268 for tdata in result.times:
2269 2269 test = tdata[0]
2270 2270 timesd[test] = tdata[1:]
2271 2271
2272 2272 outcome = {}
2273 2273 groups = [('success', ((tc, None)
2274 2274 for tc in result.successes)),
2275 2275 ('failure', result.failures),
2276 2276 ('skip', result.skipped)]
2277 2277 for res, testcases in groups:
2278 2278 for tc, __ in testcases:
2279 2279 if tc.name in timesd:
2280 2280 diff = result.faildata.get(tc.name, b'')
2281 2281 try:
2282 2282 diff = diff.decode('unicode_escape')
2283 2283 except UnicodeDecodeError as e:
2284 2284 diff = '%r decoding diff, sorry' % e
2285 2285 tres = {'result': res,
2286 2286 'time': ('%0.3f' % timesd[tc.name][2]),
2287 2287 'cuser': ('%0.3f' % timesd[tc.name][0]),
2288 2288 'csys': ('%0.3f' % timesd[tc.name][1]),
2289 2289 'start': ('%0.3f' % timesd[tc.name][3]),
2290 2290 'end': ('%0.3f' % timesd[tc.name][4]),
2291 2291 'diff': diff,
2292 2292 }
2293 2293 else:
2294 2294 # blacklisted test
2295 2295 tres = {'result': res}
2296 2296
2297 2297 outcome[tc.name] = tres
2298 2298 jsonout = json.dumps(outcome, sort_keys=True, indent=4,
2299 2299 separators=(',', ': '))
2300 2300 outf.writelines(("testreport =", jsonout))
2301 2301
2302 2302 def sorttests(testdescs, shuffle=False):
2303 2303 """Do an in-place sort of tests."""
2304 2304 if shuffle:
2305 2305 random.shuffle(testdescs)
2306 2306 return
2307 2307
2308 2308 # keywords for slow tests
2309 2309 slow = {b'svn': 10,
2310 2310 b'cvs': 10,
2311 2311 b'hghave': 10,
2312 2312 b'largefiles-update': 10,
2313 2313 b'run-tests': 10,
2314 2314 b'corruption': 10,
2315 2315 b'race': 10,
2316 2316 b'i18n': 10,
2317 2317 b'check': 100,
2318 2318 b'gendoc': 100,
2319 2319 b'contrib-perf': 200,
2320 2320 }
2321 2321 perf = {}
2322 2322
2323 2323 def sortkey(f):
2324 2324 # run largest tests first, as they tend to take the longest
2325 2325 f = f['path']
2326 2326 try:
2327 2327 return perf[f]
2328 2328 except KeyError:
2329 2329 try:
2330 2330 val = -os.stat(f).st_size
2331 2331 except OSError as e:
2332 2332 if e.errno != errno.ENOENT:
2333 2333 raise
2334 2334 perf[f] = -1e9 # file does not exist, tell early
2335 2335 return -1e9
2336 2336 for kw, mul in slow.items():
2337 2337 if kw in f:
2338 2338 val *= mul
2339 2339 if f.endswith(b'.py'):
2340 2340 val /= 10.0
2341 2341 perf[f] = val / 1000.0
2342 2342 return perf[f]
2343 2343
2344 2344 testdescs.sort(key=sortkey)
2345 2345
2346 2346 class TestRunner(object):
2347 2347 """Holds context for executing tests.
2348 2348
2349 2349 Tests rely on a lot of state. This object holds it for them.
2350 2350 """
2351 2351
2352 2352 # Programs required to run tests.
2353 2353 REQUIREDTOOLS = [
2354 2354 b'diff',
2355 2355 b'grep',
2356 2356 b'unzip',
2357 2357 b'gunzip',
2358 2358 b'bunzip2',
2359 2359 b'sed',
2360 2360 ]
2361 2361
2362 2362 # Maps file extensions to test class.
2363 2363 TESTTYPES = [
2364 2364 (b'.py', PythonTest),
2365 2365 (b'.t', TTest),
2366 2366 ]
2367 2367
2368 2368 def __init__(self):
2369 2369 self.options = None
2370 2370 self._hgroot = None
2371 2371 self._testdir = None
2372 2372 self._outputdir = None
2373 2373 self._hgtmp = None
2374 2374 self._installdir = None
2375 2375 self._bindir = None
2376 2376 self._tmpbinddir = None
2377 2377 self._pythondir = None
2378 2378 self._coveragefile = None
2379 2379 self._createdfiles = []
2380 2380 self._hgcommand = None
2381 2381 self._hgpath = None
2382 2382 self._portoffset = 0
2383 2383 self._ports = {}
2384 2384
2385 2385 def run(self, args, parser=None):
2386 2386 """Run the test suite."""
2387 2387 oldmask = os.umask(0o22)
2388 2388 try:
2389 2389 parser = parser or getparser()
2390 2390 options = parseargs(args, parser)
2391 2391 tests = [_bytespath(a) for a in options.tests]
2392 2392 if options.test_list is not None:
2393 2393 for listfile in options.test_list:
2394 2394 with open(listfile, 'rb') as f:
2395 2395 tests.extend(t for t in f.read().splitlines() if t)
2396 2396 self.options = options
2397 2397
2398 2398 self._checktools()
2399 2399 testdescs = self.findtests(tests)
2400 2400 if options.profile_runner:
2401 2401 import statprof
2402 2402 statprof.start()
2403 2403 result = self._run(testdescs)
2404 2404 if options.profile_runner:
2405 2405 statprof.stop()
2406 2406 statprof.display()
2407 2407 return result
2408 2408
2409 2409 finally:
2410 2410 os.umask(oldmask)
2411 2411
2412 2412 def _run(self, testdescs):
2413 2413 sorttests(testdescs, shuffle=self.options.random)
2414 2414
2415 2415 self._testdir = osenvironb[b'TESTDIR'] = getattr(
2416 2416 os, 'getcwdb', os.getcwd)()
2417 2417 # assume all tests in same folder for now
2418 2418 if testdescs:
2419 2419 pathname = os.path.dirname(testdescs[0]['path'])
2420 2420 if pathname:
2421 2421 osenvironb[b'TESTDIR'] = os.path.join(osenvironb[b'TESTDIR'],
2422 2422 pathname)
2423 2423 if self.options.outputdir:
2424 2424 self._outputdir = canonpath(_bytespath(self.options.outputdir))
2425 2425 else:
2426 2426 self._outputdir = self._testdir
2427 2427 if testdescs and pathname:
2428 2428 self._outputdir = os.path.join(self._outputdir, pathname)
2429 2429
2430 2430 if 'PYTHONHASHSEED' not in os.environ:
2431 2431 # use a random python hash seed all the time
2432 2432 # we do the randomness ourself to know what seed is used
2433 2433 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
2434 2434
2435 2435 if self.options.tmpdir:
2436 2436 self.options.keep_tmpdir = True
2437 2437 tmpdir = _bytespath(self.options.tmpdir)
2438 2438 if os.path.exists(tmpdir):
2439 2439 # Meaning of tmpdir has changed since 1.3: we used to create
2440 2440 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
2441 2441 # tmpdir already exists.
2442 2442 print("error: temp dir %r already exists" % tmpdir)
2443 2443 return 1
2444 2444
2445 2445 os.makedirs(tmpdir)
2446 2446 else:
2447 2447 d = None
2448 2448 if os.name == 'nt':
2449 2449 # without this, we get the default temp dir location, but
2450 2450 # in all lowercase, which causes troubles with paths (issue3490)
2451 2451 d = osenvironb.get(b'TMP', None)
2452 2452 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
2453 2453
2454 2454 self._hgtmp = osenvironb[b'HGTMP'] = (
2455 2455 os.path.realpath(tmpdir))
2456 2456
2457 2457 if self.options.with_hg:
2458 2458 self._installdir = None
2459 2459 whg = self.options.with_hg
2460 2460 self._bindir = os.path.dirname(os.path.realpath(whg))
2461 2461 assert isinstance(self._bindir, bytes)
2462 2462 self._hgcommand = os.path.basename(whg)
2463 2463 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
2464 2464 os.makedirs(self._tmpbindir)
2465 2465
2466 2466 normbin = os.path.normpath(os.path.abspath(whg))
2467 2467 normbin = normbin.replace(os.sep.encode('ascii'), b'/')
2468 2468
2469 2469 # Other Python scripts in the test harness need to
2470 2470 # `import mercurial`. If `hg` is a Python script, we assume
2471 2471 # the Mercurial modules are relative to its path and tell the tests
2472 2472 # to load Python modules from its directory.
2473 2473 with open(whg, 'rb') as fh:
2474 2474 initial = fh.read(1024)
2475 2475
2476 2476 if re.match(b'#!.*python', initial):
2477 2477 self._pythondir = self._bindir
2478 2478 # If it looks like our in-repo Rust binary, use the source root.
2479 2479 # This is a bit hacky. But rhg is still not supported outside the
2480 2480 # source directory. So until it is, do the simple thing.
2481 2481 elif re.search(b'/rust/target/[^/]+/hg', normbin):
2482 2482 self._pythondir = os.path.dirname(self._testdir)
2483 2483 # Fall back to the legacy behavior.
2484 2484 else:
2485 2485 self._pythondir = self._bindir
2486 2486
2487 2487 else:
2488 2488 self._installdir = os.path.join(self._hgtmp, b"install")
2489 2489 self._bindir = os.path.join(self._installdir, b"bin")
2490 2490 self._hgcommand = b'hg'
2491 2491 self._tmpbindir = self._bindir
2492 2492 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
2493 2493
2494 2494 # set CHGHG, then replace "hg" command by "chg"
2495 2495 chgbindir = self._bindir
2496 2496 if self.options.chg or self.options.with_chg:
2497 2497 osenvironb[b'CHGHG'] = os.path.join(self._bindir, self._hgcommand)
2498 2498 else:
2499 2499 osenvironb.pop(b'CHGHG', None) # drop flag for hghave
2500 2500 if self.options.chg:
2501 2501 self._hgcommand = b'chg'
2502 2502 elif self.options.with_chg:
2503 2503 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
2504 2504 self._hgcommand = os.path.basename(self.options.with_chg)
2505 2505
2506 2506 osenvironb[b"BINDIR"] = self._bindir
2507 2507 osenvironb[b"PYTHON"] = PYTHON
2508 2508
2509 2509 if self.options.with_python3:
2510 2510 osenvironb[b'PYTHON3'] = self.options.with_python3
2511 2511
2512 2512 fileb = _bytespath(__file__)
2513 2513 runtestdir = os.path.abspath(os.path.dirname(fileb))
2514 2514 osenvironb[b'RUNTESTDIR'] = runtestdir
2515 2515 if PYTHON3:
2516 2516 sepb = _bytespath(os.pathsep)
2517 2517 else:
2518 2518 sepb = os.pathsep
2519 2519 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
2520 2520 if os.path.islink(__file__):
2521 2521 # test helper will likely be at the end of the symlink
2522 2522 realfile = os.path.realpath(fileb)
2523 2523 realdir = os.path.abspath(os.path.dirname(realfile))
2524 2524 path.insert(2, realdir)
2525 2525 if chgbindir != self._bindir:
2526 2526 path.insert(1, chgbindir)
2527 2527 if self._testdir != runtestdir:
2528 2528 path = [self._testdir] + path
2529 2529 if self._tmpbindir != self._bindir:
2530 2530 path = [self._tmpbindir] + path
2531 2531 osenvironb[b"PATH"] = sepb.join(path)
2532 2532
2533 2533 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
2534 2534 # can run .../tests/run-tests.py test-foo where test-foo
2535 2535 # adds an extension to HGRC. Also include run-test.py directory to
2536 2536 # import modules like heredoctest.
2537 2537 pypath = [self._pythondir, self._testdir, runtestdir]
2538 2538 # We have to augment PYTHONPATH, rather than simply replacing
2539 2539 # it, in case external libraries are only available via current
2540 2540 # PYTHONPATH. (In particular, the Subversion bindings on OS X
2541 2541 # are in /opt/subversion.)
2542 2542 oldpypath = osenvironb.get(IMPL_PATH)
2543 2543 if oldpypath:
2544 2544 pypath.append(oldpypath)
2545 2545 osenvironb[IMPL_PATH] = sepb.join(pypath)
2546 2546
2547 2547 if self.options.pure:
2548 2548 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
2549 2549 os.environ["HGMODULEPOLICY"] = "py"
2550 2550
2551 2551 if self.options.allow_slow_tests:
2552 2552 os.environ["HGTEST_SLOW"] = "slow"
2553 2553 elif 'HGTEST_SLOW' in os.environ:
2554 2554 del os.environ['HGTEST_SLOW']
2555 2555
2556 2556 self._coveragefile = os.path.join(self._testdir, b'.coverage')
2557 2557
2558 2558 if self.options.exceptions:
2559 2559 exceptionsdir = os.path.join(self._outputdir, b'exceptions')
2560 2560 try:
2561 2561 os.makedirs(exceptionsdir)
2562 2562 except OSError as e:
2563 2563 if e.errno != errno.EEXIST:
2564 2564 raise
2565 2565
2566 2566 # Remove all existing exception reports.
2567 2567 for f in os.listdir(exceptionsdir):
2568 2568 os.unlink(os.path.join(exceptionsdir, f))
2569 2569
2570 2570 osenvironb[b'HGEXCEPTIONSDIR'] = exceptionsdir
2571 2571 logexceptions = os.path.join(self._testdir, b'logexceptions.py')
2572 2572 self.options.extra_config_opt.append(
2573 2573 'extensions.logexceptions=%s' % logexceptions.decode('utf-8'))
2574 2574
2575 2575 vlog("# Using TESTDIR", self._testdir)
2576 2576 vlog("# Using RUNTESTDIR", osenvironb[b'RUNTESTDIR'])
2577 2577 vlog("# Using HGTMP", self._hgtmp)
2578 2578 vlog("# Using PATH", os.environ["PATH"])
2579 2579 vlog("# Using", IMPL_PATH, osenvironb[IMPL_PATH])
2580 2580 vlog("# Writing to directory", self._outputdir)
2581 2581
2582 2582 try:
2583 2583 return self._runtests(testdescs) or 0
2584 2584 finally:
2585 2585 time.sleep(.1)
2586 2586 self._cleanup()
2587 2587
2588 2588 def findtests(self, args):
2589 2589 """Finds possible test files from arguments.
2590 2590
2591 2591 If you wish to inject custom tests into the test harness, this would
2592 2592 be a good function to monkeypatch or override in a derived class.
2593 2593 """
2594 2594 if not args:
2595 2595 if self.options.changed:
2596 2596 proc = Popen4('hg st --rev "%s" -man0 .' %
2597 2597 self.options.changed, None, 0)
2598 2598 stdout, stderr = proc.communicate()
2599 2599 args = stdout.strip(b'\0').split(b'\0')
2600 2600 else:
2601 2601 args = os.listdir(b'.')
2602 2602
2603 2603 expanded_args = []
2604 2604 for arg in args:
2605 2605 if os.path.isdir(arg):
2606 2606 if not arg.endswith(b'/'):
2607 2607 arg += b'/'
2608 2608 expanded_args.extend([arg + a for a in os.listdir(arg)])
2609 2609 else:
2610 2610 expanded_args.append(arg)
2611 2611 args = expanded_args
2612 2612
2613 2613 tests = []
2614 2614 for t in args:
2615 2615 if not (os.path.basename(t).startswith(b'test-')
2616 2616 and (t.endswith(b'.py') or t.endswith(b'.t'))):
2617 2617 continue
2618 2618 if t.endswith(b'.t'):
2619 2619 # .t file may contain multiple test cases
2620 2620 cases = sorted(parsettestcases(t))
2621 2621 if cases:
2622 2622 tests += [{'path': t, 'case': c} for c in sorted(cases)]
2623 2623 else:
2624 2624 tests.append({'path': t})
2625 2625 else:
2626 2626 tests.append({'path': t})
2627 2627 return tests
2628 2628
2629 2629 def _runtests(self, testdescs):
2630 2630 def _reloadtest(test, i):
2631 2631 # convert a test back to its description dict
2632 2632 desc = {'path': test.path}
2633 2633 case = getattr(test, '_case', None)
2634 2634 if case:
2635 2635 desc['case'] = case
2636 2636 return self._gettest(desc, i)
2637 2637
2638 2638 try:
2639 2639 if self.options.restart:
2640 2640 orig = list(testdescs)
2641 2641 while testdescs:
2642 2642 desc = testdescs[0]
2643 2643 # desc['path'] is a relative path
2644 2644 if 'case' in desc:
2645 2645 errpath = b'%s.%s.err' % (desc['path'], desc['case'])
2646 2646 else:
2647 2647 errpath = b'%s.err' % desc['path']
2648 2648 errpath = os.path.join(self._outputdir, errpath)
2649 2649 if os.path.exists(errpath):
2650 2650 break
2651 2651 testdescs.pop(0)
2652 2652 if not testdescs:
2653 2653 print("running all tests")
2654 2654 testdescs = orig
2655 2655
2656 2656 tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
2657 2657
2658 2658 failed = False
2659 2659 kws = self.options.keywords
2660 2660 if kws is not None and PYTHON3:
2661 2661 kws = kws.encode('utf-8')
2662 2662
2663 2663 suite = TestSuite(self._testdir,
2664 2664 jobs=self.options.jobs,
2665 2665 whitelist=self.options.whitelisted,
2666 2666 blacklist=self.options.blacklist,
2667 2667 retest=self.options.retest,
2668 2668 keywords=kws,
2669 2669 loop=self.options.loop,
2670 2670 runs_per_test=self.options.runs_per_test,
2671 2671 showchannels=self.options.showchannels,
2672 2672 tests=tests, loadtest=_reloadtest)
2673 2673 verbosity = 1
2674 2674 if self.options.verbose:
2675 2675 verbosity = 2
2676 2676 runner = TextTestRunner(self, verbosity=verbosity)
2677 2677
2678 2678 if self.options.list_tests:
2679 2679 result = runner.listtests(suite)
2680 2680 else:
2681 2681 if self._installdir:
2682 2682 self._installhg()
2683 2683 self._checkhglib("Testing")
2684 2684 else:
2685 2685 self._usecorrectpython()
2686 2686 if self.options.chg:
2687 2687 assert self._installdir
2688 2688 self._installchg()
2689 2689
2690 2690 result = runner.run(suite)
2691 2691
2692 2692 if result.failures:
2693 2693 failed = True
2694 2694
2695 2695 if self.options.anycoverage:
2696 2696 self._outputcoverage()
2697 2697 except KeyboardInterrupt:
2698 2698 failed = True
2699 2699 print("\ninterrupted!")
2700 2700
2701 2701 if failed:
2702 2702 return 1
2703 2703
2704 2704 def _getport(self, count):
2705 2705 port = self._ports.get(count) # do we have a cached entry?
2706 2706 if port is None:
2707 2707 portneeded = 3
2708 2708 # above 100 tries we just give up and let test reports failure
2709 2709 for tries in xrange(100):
2710 2710 allfree = True
2711 2711 port = self.options.port + self._portoffset
2712 2712 for idx in xrange(portneeded):
2713 2713 if not checkportisavailable(port + idx):
2714 2714 allfree = False
2715 2715 break
2716 2716 self._portoffset += portneeded
2717 2717 if allfree:
2718 2718 break
2719 2719 self._ports[count] = port
2720 2720 return port
2721 2721
2722 2722 def _gettest(self, testdesc, count):
2723 2723 """Obtain a Test by looking at its filename.
2724 2724
2725 2725 Returns a Test instance. The Test may not be runnable if it doesn't
2726 2726 map to a known type.
2727 2727 """
2728 2728 path = testdesc['path']
2729 2729 lctest = path.lower()
2730 2730 testcls = Test
2731 2731
2732 2732 for ext, cls in self.TESTTYPES:
2733 2733 if lctest.endswith(ext):
2734 2734 testcls = cls
2735 2735 break
2736 2736
2737 2737 refpath = os.path.join(self._testdir, path)
2738 2738 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
2739 2739
2740 2740 # extra keyword parameters. 'case' is used by .t tests
2741 2741 kwds = dict((k, testdesc[k]) for k in ['case'] if k in testdesc)
2742 2742
2743 2743 t = testcls(refpath, self._outputdir, tmpdir,
2744 2744 keeptmpdir=self.options.keep_tmpdir,
2745 2745 debug=self.options.debug,
2746 2746 first=self.options.first,
2747 2747 timeout=self.options.timeout,
2748 2748 startport=self._getport(count),
2749 2749 extraconfigopts=self.options.extra_config_opt,
2750 2750 py3kwarnings=self.options.py3k_warnings,
2751 2751 shell=self.options.shell,
2752 2752 hgcommand=self._hgcommand,
2753 2753 usechg=bool(self.options.with_chg or self.options.chg),
2754 2754 useipv6=useipv6, **kwds)
2755 2755 t.should_reload = True
2756 2756 return t
2757 2757
2758 2758 def _cleanup(self):
2759 2759 """Clean up state from this test invocation."""
2760 2760 if self.options.keep_tmpdir:
2761 2761 return
2762 2762
2763 2763 vlog("# Cleaning up HGTMP", self._hgtmp)
2764 2764 shutil.rmtree(self._hgtmp, True)
2765 2765 for f in self._createdfiles:
2766 2766 try:
2767 2767 os.remove(f)
2768 2768 except OSError:
2769 2769 pass
2770 2770
2771 2771 def _usecorrectpython(self):
2772 2772 """Configure the environment to use the appropriate Python in tests."""
2773 2773 # Tests must use the same interpreter as us or bad things will happen.
2774 2774 pyexename = sys.platform == 'win32' and b'python.exe' or b'python'
2775 2775 if getattr(os, 'symlink', None):
2776 2776 vlog("# Making python executable in test path a symlink to '%s'" %
2777 2777 sys.executable)
2778 2778 mypython = os.path.join(self._tmpbindir, pyexename)
2779 2779 try:
2780 2780 if os.readlink(mypython) == sys.executable:
2781 2781 return
2782 2782 os.unlink(mypython)
2783 2783 except OSError as err:
2784 2784 if err.errno != errno.ENOENT:
2785 2785 raise
2786 2786 if self._findprogram(pyexename) != sys.executable:
2787 2787 try:
2788 2788 os.symlink(sys.executable, mypython)
2789 2789 self._createdfiles.append(mypython)
2790 2790 except OSError as err:
2791 2791 # child processes may race, which is harmless
2792 2792 if err.errno != errno.EEXIST:
2793 2793 raise
2794 2794 else:
2795 2795 exedir, exename = os.path.split(sys.executable)
2796 2796 vlog("# Modifying search path to find %s as %s in '%s'" %
2797 2797 (exename, pyexename, exedir))
2798 2798 path = os.environ['PATH'].split(os.pathsep)
2799 2799 while exedir in path:
2800 2800 path.remove(exedir)
2801 2801 os.environ['PATH'] = os.pathsep.join([exedir] + path)
2802 2802 if not self._findprogram(pyexename):
2803 2803 print("WARNING: Cannot find %s in search path" % pyexename)
2804 2804
2805 2805 def _installhg(self):
2806 2806 """Install hg into the test environment.
2807 2807
2808 2808 This will also configure hg with the appropriate testing settings.
2809 2809 """
2810 2810 vlog("# Performing temporary installation of HG")
2811 2811 installerrs = os.path.join(self._hgtmp, b"install.err")
2812 2812 compiler = ''
2813 2813 if self.options.compiler:
2814 2814 compiler = '--compiler ' + self.options.compiler
2815 2815 if self.options.pure:
2816 2816 pure = b"--pure"
2817 2817 else:
2818 2818 pure = b""
2819 2819
2820 2820 # Run installer in hg root
2821 2821 script = os.path.realpath(sys.argv[0])
2822 2822 exe = sys.executable
2823 2823 if PYTHON3:
2824 2824 compiler = _bytespath(compiler)
2825 2825 script = _bytespath(script)
2826 2826 exe = _bytespath(exe)
2827 2827 hgroot = os.path.dirname(os.path.dirname(script))
2828 2828 self._hgroot = hgroot
2829 2829 os.chdir(hgroot)
2830 2830 nohome = b'--home=""'
2831 2831 if os.name == 'nt':
2832 2832 # The --home="" trick works only on OS where os.sep == '/'
2833 2833 # because of a distutils convert_path() fast-path. Avoid it at
2834 2834 # least on Windows for now, deal with .pydistutils.cfg bugs
2835 2835 # when they happen.
2836 2836 nohome = b''
2837 2837 cmd = (b'%(exe)s setup.py %(pure)s clean --all'
2838 2838 b' build %(compiler)s --build-base="%(base)s"'
2839 2839 b' install --force --prefix="%(prefix)s"'
2840 2840 b' --install-lib="%(libdir)s"'
2841 2841 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
2842 2842 % {b'exe': exe, b'pure': pure,
2843 2843 b'compiler': compiler,
2844 2844 b'base': os.path.join(self._hgtmp, b"build"),
2845 2845 b'prefix': self._installdir, b'libdir': self._pythondir,
2846 2846 b'bindir': self._bindir,
2847 2847 b'nohome': nohome, b'logfile': installerrs})
2848 2848
2849 2849 # setuptools requires install directories to exist.
2850 2850 def makedirs(p):
2851 2851 try:
2852 2852 os.makedirs(p)
2853 2853 except OSError as e:
2854 2854 if e.errno != errno.EEXIST:
2855 2855 raise
2856 2856 makedirs(self._pythondir)
2857 2857 makedirs(self._bindir)
2858 2858
2859 2859 vlog("# Running", cmd)
2860 2860 if os.system(cmd) == 0:
2861 2861 if not self.options.verbose:
2862 2862 try:
2863 2863 os.remove(installerrs)
2864 2864 except OSError as e:
2865 2865 if e.errno != errno.ENOENT:
2866 2866 raise
2867 2867 else:
2868 2868 with open(installerrs, 'rb') as f:
2869 2869 for line in f:
2870 2870 if PYTHON3:
2871 2871 sys.stdout.buffer.write(line)
2872 2872 else:
2873 2873 sys.stdout.write(line)
2874 2874 sys.exit(1)
2875 2875 os.chdir(self._testdir)
2876 2876
2877 2877 self._usecorrectpython()
2878 2878
2879 2879 if self.options.py3k_warnings and not self.options.anycoverage:
2880 2880 vlog("# Updating hg command to enable Py3k Warnings switch")
2881 2881 with open(os.path.join(self._bindir, 'hg'), 'rb') as f:
2882 2882 lines = [line.rstrip() for line in f]
2883 2883 lines[0] += ' -3'
2884 2884 with open(os.path.join(self._bindir, 'hg'), 'wb') as f:
2885 2885 for line in lines:
2886 2886 f.write(line + '\n')
2887 2887
2888 2888 hgbat = os.path.join(self._bindir, b'hg.bat')
2889 2889 if os.path.isfile(hgbat):
2890 2890 # hg.bat expects to be put in bin/scripts while run-tests.py
2891 2891 # installation layout put it in bin/ directly. Fix it
2892 2892 with open(hgbat, 'rb') as f:
2893 2893 data = f.read()
2894 2894 if b'"%~dp0..\python" "%~dp0hg" %*' in data:
2895 2895 data = data.replace(b'"%~dp0..\python" "%~dp0hg" %*',
2896 2896 b'"%~dp0python" "%~dp0hg" %*')
2897 2897 with open(hgbat, 'wb') as f:
2898 2898 f.write(data)
2899 2899 else:
2900 2900 print('WARNING: cannot fix hg.bat reference to python.exe')
2901 2901
2902 2902 if self.options.anycoverage:
2903 2903 custom = os.path.join(self._testdir, 'sitecustomize.py')
2904 2904 target = os.path.join(self._pythondir, 'sitecustomize.py')
2905 2905 vlog('# Installing coverage trigger to %s' % target)
2906 2906 shutil.copyfile(custom, target)
2907 2907 rc = os.path.join(self._testdir, '.coveragerc')
2908 2908 vlog('# Installing coverage rc to %s' % rc)
2909 2909 os.environ['COVERAGE_PROCESS_START'] = rc
2910 2910 covdir = os.path.join(self._installdir, '..', 'coverage')
2911 2911 try:
2912 2912 os.mkdir(covdir)
2913 2913 except OSError as e:
2914 2914 if e.errno != errno.EEXIST:
2915 2915 raise
2916 2916
2917 2917 os.environ['COVERAGE_DIR'] = covdir
2918 2918
2919 2919 def _checkhglib(self, verb):
2920 2920 """Ensure that the 'mercurial' package imported by python is
2921 2921 the one we expect it to be. If not, print a warning to stderr."""
2922 2922 if ((self._bindir == self._pythondir) and
2923 2923 (self._bindir != self._tmpbindir)):
2924 2924 # The pythondir has been inferred from --with-hg flag.
2925 2925 # We cannot expect anything sensible here.
2926 2926 return
2927 2927 expecthg = os.path.join(self._pythondir, b'mercurial')
2928 2928 actualhg = self._gethgpath()
2929 2929 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
2930 2930 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
2931 2931 ' (expected %s)\n'
2932 2932 % (verb, actualhg, expecthg))
2933 2933 def _gethgpath(self):
2934 2934 """Return the path to the mercurial package that is actually found by
2935 2935 the current Python interpreter."""
2936 2936 if self._hgpath is not None:
2937 2937 return self._hgpath
2938 2938
2939 2939 cmd = b'%s -c "import mercurial; print (mercurial.__path__[0])"'
2940 2940 cmd = cmd % PYTHON
2941 2941 if PYTHON3:
2942 2942 cmd = _strpath(cmd)
2943 2943 pipe = os.popen(cmd)
2944 2944 try:
2945 2945 self._hgpath = _bytespath(pipe.read().strip())
2946 2946 finally:
2947 2947 pipe.close()
2948 2948
2949 2949 return self._hgpath
2950 2950
2951 2951 def _installchg(self):
2952 2952 """Install chg into the test environment"""
2953 2953 vlog('# Performing temporary installation of CHG')
2954 2954 assert os.path.dirname(self._bindir) == self._installdir
2955 2955 assert self._hgroot, 'must be called after _installhg()'
2956 2956 cmd = (b'"%(make)s" clean install PREFIX="%(prefix)s"'
2957 2957 % {b'make': 'make', # TODO: switch by option or environment?
2958 2958 b'prefix': self._installdir})
2959 2959 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
2960 2960 vlog("# Running", cmd)
2961 2961 proc = subprocess.Popen(cmd, shell=True, cwd=cwd,
2962 2962 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
2963 2963 stderr=subprocess.STDOUT)
2964 2964 out, _err = proc.communicate()
2965 2965 if proc.returncode != 0:
2966 2966 if PYTHON3:
2967 2967 sys.stdout.buffer.write(out)
2968 2968 else:
2969 2969 sys.stdout.write(out)
2970 2970 sys.exit(1)
2971 2971
2972 2972 def _outputcoverage(self):
2973 2973 """Produce code coverage output."""
2974 2974 import coverage
2975 2975 coverage = coverage.coverage
2976 2976
2977 2977 vlog('# Producing coverage report')
2978 2978 # chdir is the easiest way to get short, relative paths in the
2979 2979 # output.
2980 2980 os.chdir(self._hgroot)
2981 2981 covdir = os.path.join(self._installdir, '..', 'coverage')
2982 2982 cov = coverage(data_file=os.path.join(covdir, 'cov'))
2983 2983
2984 2984 # Map install directory paths back to source directory.
2985 2985 cov.config.paths['srcdir'] = ['.', self._pythondir]
2986 2986
2987 2987 cov.combine()
2988 2988
2989 2989 omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]]
2990 2990 cov.report(ignore_errors=True, omit=omit)
2991 2991
2992 2992 if self.options.htmlcov:
2993 2993 htmldir = os.path.join(self._outputdir, 'htmlcov')
2994 2994 cov.html_report(directory=htmldir, omit=omit)
2995 2995 if self.options.annotate:
2996 2996 adir = os.path.join(self._outputdir, 'annotated')
2997 2997 if not os.path.isdir(adir):
2998 2998 os.mkdir(adir)
2999 2999 cov.annotate(directory=adir, omit=omit)
3000 3000
3001 3001 def _findprogram(self, program):
3002 3002 """Search PATH for a executable program"""
3003 3003 dpb = _bytespath(os.defpath)
3004 3004 sepb = _bytespath(os.pathsep)
3005 3005 for p in osenvironb.get(b'PATH', dpb).split(sepb):
3006 3006 name = os.path.join(p, program)
3007 3007 if os.name == 'nt' or os.access(name, os.X_OK):
3008 3008 return name
3009 3009 return None
3010 3010
3011 3011 def _checktools(self):
3012 3012 """Ensure tools required to run tests are present."""
3013 3013 for p in self.REQUIREDTOOLS:
3014 3014 if os.name == 'nt' and not p.endswith('.exe'):
3015 3015 p += '.exe'
3016 3016 found = self._findprogram(p)
3017 3017 if found:
3018 3018 vlog("# Found prerequisite", p, "at", found)
3019 3019 else:
3020 3020 print("WARNING: Did not find prerequisite tool: %s " %
3021 3021 p.decode("utf-8"))
3022 3022
3023 3023 def aggregateexceptions(path):
3024 3024 exceptioncounts = collections.Counter()
3025 3025 testsbyfailure = collections.defaultdict(set)
3026 3026 failuresbytest = collections.defaultdict(set)
3027 3027
3028 3028 for f in os.listdir(path):
3029 3029 with open(os.path.join(path, f), 'rb') as fh:
3030 3030 data = fh.read().split(b'\0')
3031 3031 if len(data) != 5:
3032 3032 continue
3033 3033
3034 3034 exc, mainframe, hgframe, hgline, testname = data
3035 3035 exc = exc.decode('utf-8')
3036 3036 mainframe = mainframe.decode('utf-8')
3037 3037 hgframe = hgframe.decode('utf-8')
3038 3038 hgline = hgline.decode('utf-8')
3039 3039 testname = testname.decode('utf-8')
3040 3040
3041 3041 key = (hgframe, hgline, exc)
3042 3042 exceptioncounts[key] += 1
3043 3043 testsbyfailure[key].add(testname)
3044 3044 failuresbytest[testname].add(key)
3045 3045
3046 3046 # Find test having fewest failures for each failure.
3047 3047 leastfailing = {}
3048 3048 for key, tests in testsbyfailure.items():
3049 3049 fewesttest = None
3050 3050 fewestcount = 99999999
3051 3051 for test in sorted(tests):
3052 3052 if len(failuresbytest[test]) < fewestcount:
3053 3053 fewesttest = test
3054 3054 fewestcount = len(failuresbytest[test])
3055 3055
3056 3056 leastfailing[key] = (fewestcount, fewesttest)
3057 3057
3058 3058 # Create a combined counter so we can sort by total occurrences and
3059 3059 # impacted tests.
3060 3060 combined = {}
3061 3061 for key in exceptioncounts:
3062 3062 combined[key] = (exceptioncounts[key],
3063 3063 len(testsbyfailure[key]),
3064 3064 leastfailing[key][0],
3065 3065 leastfailing[key][1])
3066 3066
3067 3067 return {
3068 3068 'exceptioncounts': exceptioncounts,
3069 3069 'total': sum(exceptioncounts.values()),
3070 3070 'combined': combined,
3071 3071 'leastfailing': leastfailing,
3072 3072 'byfailure': testsbyfailure,
3073 3073 'bytest': failuresbytest,
3074 3074 }
3075 3075
3076 3076 if __name__ == '__main__':
3077 3077 runner = TestRunner()
3078 3078
3079 3079 try:
3080 3080 import msvcrt
3081 3081 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
3082 3082 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
3083 3083 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
3084 3084 except ImportError:
3085 3085 pass
3086 3086
3087 3087 sys.exit(runner.run(sys.argv[1:]))
@@ -1,1612 +1,1612 b''
1 1 This file tests the behavior of run-tests.py itself.
2 2
3 3 Avoid interference from actual test env:
4 4
5 5 $ . "$TESTDIR/helper-runtests.sh"
6 6
7 7 Smoke test with install
8 8 ============
9 9
10 10 $ run-tests.py $HGTEST_RUN_TESTS_PURE -l
11 11
12 12 # Ran 0 tests, 0 skipped, 0 failed.
13 13
14 14 Define a helper to avoid the install step
15 15 =============
16 16 $ rt()
17 17 > {
18 18 > run-tests.py --with-hg=`which hg` "$@"
19 19 > }
20 20
21 21 error paths
22 22
23 23 #if symlink
24 24 $ ln -s `which true` hg
25 25 $ run-tests.py --with-hg=./hg
26 26 warning: --with-hg should specify an hg script
27 27
28 28 # Ran 0 tests, 0 skipped, 0 failed.
29 29 $ rm hg
30 30 #endif
31 31
32 32 #if execbit
33 33 $ touch hg
34 34 $ run-tests.py --with-hg=./hg
35 35 usage: run-tests.py [options] [tests]
36 36 run-tests.py: error: --with-hg must specify an executable hg script
37 37 [2]
38 38 $ rm hg
39 39 #endif
40 40
41 41 Features for testing optional lines
42 42 ===================================
43 43
44 44 $ cat > hghaveaddon.py <<EOF
45 45 > import hghave
46 46 > @hghave.check("custom", "custom hghave feature")
47 47 > def has_custom():
48 48 > return True
49 49 > @hghave.check("missing", "missing hghave feature")
50 50 > def has_missing():
51 51 > return False
52 52 > EOF
53 53
54 54 an empty test
55 55 =======================
56 56
57 57 $ touch test-empty.t
58 58 $ rt
59 59 .
60 60 # Ran 1 tests, 0 skipped, 0 failed.
61 61 $ rm test-empty.t
62 62
63 63 a succesful test
64 64 =======================
65 65
66 66 $ cat > test-success.t << EOF
67 67 > $ echo babar
68 68 > babar
69 69 > $ echo xyzzy
70 70 > dont_print (?)
71 71 > nothing[42]line (re) (?)
72 72 > never*happens (glob) (?)
73 73 > more_nothing (?)
74 74 > xyzzy
75 75 > nor this (?)
76 76 > $ printf 'abc\ndef\nxyz\n'
77 77 > 123 (?)
78 78 > abc
79 79 > def (?)
80 80 > 456 (?)
81 81 > xyz
82 82 > $ printf 'zyx\nwvu\ntsr\n'
83 83 > abc (?)
84 84 > zyx (custom !)
85 85 > wvu
86 86 > no_print (no-custom !)
87 87 > tsr (no-missing !)
88 88 > missing (missing !)
89 89 > EOF
90 90
91 91 $ rt
92 92 .
93 93 # Ran 1 tests, 0 skipped, 0 failed.
94 94
95 95 failing test
96 96 ==================
97 97
98 98 test churn with globs
99 99 $ cat > test-failure.t <<EOF
100 100 > $ echo "bar-baz"; echo "bar-bad"; echo foo
101 101 > bar*bad (glob)
102 102 > bar*baz (glob)
103 103 > | fo (re)
104 104 > EOF
105 105 $ rt test-failure.t
106 106
107 107 --- $TESTTMP/test-failure.t
108 108 +++ $TESTTMP/test-failure.t.err
109 109 @@ -1,4 +1,4 @@
110 110 $ echo "bar-baz"; echo "bar-bad"; echo foo
111 111 + bar*baz (glob)
112 112 bar*bad (glob)
113 113 - bar*baz (glob)
114 114 - | fo (re)
115 115 + foo
116 116
117 117 ERROR: test-failure.t output changed
118 118 !
119 119 Failed test-failure.t: output changed
120 120 # Ran 1 tests, 0 skipped, 1 failed.
121 121 python hash seed: * (glob)
122 122 [1]
123 123
124 124 test diff colorisation
125 125
126 126 #if no-windows pygments
127 127 $ rt test-failure.t --color always
128 128
129 129 \x1b[38;5;124m--- $TESTTMP/test-failure.t\x1b[39m (esc)
130 130 \x1b[38;5;34m+++ $TESTTMP/test-failure.t.err\x1b[39m (esc)
131 131 \x1b[38;5;90;01m@@ -1,4 +1,4 @@\x1b[39;00m (esc)
132 132 $ echo "bar-baz"; echo "bar-bad"; echo foo
133 133 \x1b[38;5;34m+ bar*baz (glob)\x1b[39m (esc)
134 134 bar*bad (glob)
135 135 \x1b[38;5;124m- bar*baz (glob)\x1b[39m (esc)
136 136 \x1b[38;5;124m- | fo (re)\x1b[39m (esc)
137 137 \x1b[38;5;34m+ foo\x1b[39m (esc)
138 138
139 139 \x1b[38;5;88mERROR: \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m output changed\x1b[39m (esc)
140 140 !
141 141 \x1b[38;5;88mFailed \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m: output changed\x1b[39m (esc)
142 142 # Ran 1 tests, 0 skipped, 1 failed.
143 143 python hash seed: * (glob)
144 144 [1]
145 145
146 146 $ rt test-failure.t 2> tmp.log
147 147 [1]
148 148 $ cat tmp.log
149 149
150 150 --- $TESTTMP/test-failure.t
151 151 +++ $TESTTMP/test-failure.t.err
152 152 @@ -1,4 +1,4 @@
153 153 $ echo "bar-baz"; echo "bar-bad"; echo foo
154 154 + bar*baz (glob)
155 155 bar*bad (glob)
156 156 - bar*baz (glob)
157 157 - | fo (re)
158 158 + foo
159 159
160 160 ERROR: test-failure.t output changed
161 161 !
162 162 Failed test-failure.t: output changed
163 163 # Ran 1 tests, 0 skipped, 1 failed.
164 164 python hash seed: * (glob)
165 165 #endif
166 166
167 167 $ cat > test-failure.t << EOF
168 168 > $ true
169 169 > should go away (true !)
170 170 > $ true
171 171 > should stay (false !)
172 172 >
173 173 > Should remove first line, not second or third
174 174 > $ echo 'testing'
175 175 > baz*foo (glob) (true !)
176 176 > foobar*foo (glob) (false !)
177 177 > te*ting (glob) (true !)
178 178 >
179 179 > Should keep first two lines, remove third and last
180 180 > $ echo 'testing'
181 181 > test.ng (re) (true !)
182 182 > foo.ar (re) (false !)
183 183 > b.r (re) (true !)
184 184 > missing (?)
185 185 > awol (true !)
186 186 >
187 187 > The "missing" line should stay, even though awol is dropped
188 188 > $ echo 'testing'
189 189 > test.ng (re) (true !)
190 190 > foo.ar (?)
191 191 > awol
192 192 > missing (?)
193 193 > EOF
194 194 $ rt test-failure.t
195 195
196 196 --- $TESTTMP/test-failure.t
197 197 +++ $TESTTMP/test-failure.t.err
198 198 @@ -1,11 +1,9 @@
199 199 $ true
200 200 - should go away (true !)
201 201 $ true
202 202 should stay (false !)
203 203
204 204 Should remove first line, not second or third
205 205 $ echo 'testing'
206 206 - baz*foo (glob) (true !)
207 207 foobar*foo (glob) (false !)
208 208 te*ting (glob) (true !)
209 209
210 210 foo.ar (re) (false !)
211 211 missing (?)
212 212 @@ -13,13 +11,10 @@
213 213 $ echo 'testing'
214 214 test.ng (re) (true !)
215 215 foo.ar (re) (false !)
216 216 - b.r (re) (true !)
217 217 missing (?)
218 218 - awol (true !)
219 219
220 220 The "missing" line should stay, even though awol is dropped
221 221 $ echo 'testing'
222 222 test.ng (re) (true !)
223 223 foo.ar (?)
224 224 - awol
225 225 missing (?)
226 226
227 227 ERROR: test-failure.t output changed
228 228 !
229 229 Failed test-failure.t: output changed
230 230 # Ran 1 tests, 0 skipped, 1 failed.
231 231 python hash seed: * (glob)
232 232 [1]
233 233
234 234 basic failing test
235 235 $ cat > test-failure.t << EOF
236 236 > $ echo babar
237 237 > rataxes
238 238 > This is a noop statement so that
239 239 > this test is still more bytes than success.
240 240 > pad pad pad pad............................................................
241 241 > pad pad pad pad............................................................
242 242 > pad pad pad pad............................................................
243 243 > pad pad pad pad............................................................
244 244 > pad pad pad pad............................................................
245 245 > pad pad pad pad............................................................
246 246 > EOF
247 247
248 248 >>> fh = open('test-failure-unicode.t', 'wb')
249 249 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8')) and None
250 250 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8')) and None
251 251
252 252 $ rt
253 253
254 254 --- $TESTTMP/test-failure.t
255 255 +++ $TESTTMP/test-failure.t.err
256 256 @@ -1,5 +1,5 @@
257 257 $ echo babar
258 258 - rataxes
259 259 + babar
260 260 This is a noop statement so that
261 261 this test is still more bytes than success.
262 262 pad pad pad pad............................................................
263 263
264 264 ERROR: test-failure.t output changed
265 265 !.
266 266 --- $TESTTMP/test-failure-unicode.t
267 267 +++ $TESTTMP/test-failure-unicode.t.err
268 268 @@ -1,2 +1,2 @@
269 269 $ echo babar\xce\xb1 (esc)
270 270 - l\xce\xb5\xce\xb5t (esc)
271 271 + babar\xce\xb1 (esc)
272 272
273 273 ERROR: test-failure-unicode.t output changed
274 274 !
275 275 Failed test-failure.t: output changed
276 276 Failed test-failure-unicode.t: output changed
277 277 # Ran 3 tests, 0 skipped, 2 failed.
278 278 python hash seed: * (glob)
279 279 [1]
280 280
281 281 test --outputdir
282 282 $ mkdir output
283 283 $ rt --outputdir output
284 284
285 285 --- $TESTTMP/test-failure.t
286 286 +++ $TESTTMP/output/test-failure.t.err
287 287 @@ -1,5 +1,5 @@
288 288 $ echo babar
289 289 - rataxes
290 290 + babar
291 291 This is a noop statement so that
292 292 this test is still more bytes than success.
293 293 pad pad pad pad............................................................
294 294
295 295 ERROR: test-failure.t output changed
296 296 !.
297 297 --- $TESTTMP/test-failure-unicode.t
298 298 +++ $TESTTMP/output/test-failure-unicode.t.err
299 299 @@ -1,2 +1,2 @@
300 300 $ echo babar\xce\xb1 (esc)
301 301 - l\xce\xb5\xce\xb5t (esc)
302 302 + babar\xce\xb1 (esc)
303 303
304 304 ERROR: test-failure-unicode.t output changed
305 305 !
306 306 Failed test-failure.t: output changed
307 307 Failed test-failure-unicode.t: output changed
308 308 # Ran 3 tests, 0 skipped, 2 failed.
309 309 python hash seed: * (glob)
310 310 [1]
311 311 $ ls -a output
312 312 .
313 313 ..
314 314 .testtimes
315 315 test-failure-unicode.t.err
316 316 test-failure.t.err
317 317
318 318 test --xunit support
319 319 $ rt --xunit=xunit.xml
320 320
321 321 --- $TESTTMP/test-failure.t
322 322 +++ $TESTTMP/test-failure.t.err
323 323 @@ -1,5 +1,5 @@
324 324 $ echo babar
325 325 - rataxes
326 326 + babar
327 327 This is a noop statement so that
328 328 this test is still more bytes than success.
329 329 pad pad pad pad............................................................
330 330
331 331 ERROR: test-failure.t output changed
332 332 !.
333 333 --- $TESTTMP/test-failure-unicode.t
334 334 +++ $TESTTMP/test-failure-unicode.t.err
335 335 @@ -1,2 +1,2 @@
336 336 $ echo babar\xce\xb1 (esc)
337 337 - l\xce\xb5\xce\xb5t (esc)
338 338 + babar\xce\xb1 (esc)
339 339
340 340 ERROR: test-failure-unicode.t output changed
341 341 !
342 342 Failed test-failure.t: output changed
343 343 Failed test-failure-unicode.t: output changed
344 344 # Ran 3 tests, 0 skipped, 2 failed.
345 345 python hash seed: * (glob)
346 346 [1]
347 347 $ cat xunit.xml
348 348 <?xml version="1.0" encoding="utf-8"?>
349 349 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
350 350 <testcase name="test-success.t" time="*"/> (glob)
351 351 <testcase name="test-failure-unicode.t" time="*"> (glob)
352 352 <failure message="output changed" type="output-mismatch">
353 353 <![CDATA[--- $TESTTMP/test-failure-unicode.t
354 354 +++ $TESTTMP/test-failure-unicode.t.err
355 355 @@ -1,2 +1,2 @@
356 356 $ echo babar\xce\xb1 (esc)
357 357 - l\xce\xb5\xce\xb5t (esc)
358 358 + babar\xce\xb1 (esc)
359 359 ]]> </failure>
360 360 </testcase>
361 361 <testcase name="test-failure.t" time="*"> (glob)
362 362 <failure message="output changed" type="output-mismatch">
363 363 <![CDATA[--- $TESTTMP/test-failure.t
364 364 +++ $TESTTMP/test-failure.t.err
365 365 @@ -1,5 +1,5 @@
366 366 $ echo babar
367 367 - rataxes
368 368 + babar
369 369 This is a noop statement so that
370 370 this test is still more bytes than success.
371 371 pad pad pad pad............................................................
372 372 ]]> </failure>
373 373 </testcase>
374 374 </testsuite>
375 375
376 376 $ cat .testtimes
377 377 test-empty.t * (glob)
378 378 test-failure-unicode.t * (glob)
379 379 test-failure.t * (glob)
380 380 test-success.t * (glob)
381 381
382 382 $ rt --list-tests
383 383 test-failure-unicode.t
384 384 test-failure.t
385 385 test-success.t
386 386
387 387 $ rt --list-tests --json
388 388 test-failure-unicode.t
389 389 test-failure.t
390 390 test-success.t
391 391 $ cat report.json
392 392 testreport ={
393 393 "test-failure-unicode.t": {
394 394 "result": "success"
395 395 },
396 396 "test-failure.t": {
397 397 "result": "success"
398 398 },
399 399 "test-success.t": {
400 400 "result": "success"
401 401 }
402 402 } (no-eol)
403 403
404 404 $ rt --list-tests --xunit=xunit.xml
405 405 test-failure-unicode.t
406 406 test-failure.t
407 407 test-success.t
408 408 $ cat xunit.xml
409 409 <?xml version="1.0" encoding="utf-8"?>
410 410 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
411 411 <testcase name="test-failure-unicode.t"/>
412 412 <testcase name="test-failure.t"/>
413 413 <testcase name="test-success.t"/>
414 414 </testsuite>
415 415
416 416 $ rt --list-tests test-failure* --json --xunit=xunit.xml --outputdir output
417 417 test-failure-unicode.t
418 418 test-failure.t
419 419 $ cat output/report.json
420 420 testreport ={
421 421 "test-failure-unicode.t": {
422 422 "result": "success"
423 423 },
424 424 "test-failure.t": {
425 425 "result": "success"
426 426 }
427 427 } (no-eol)
428 428 $ cat xunit.xml
429 429 <?xml version="1.0" encoding="utf-8"?>
430 430 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
431 431 <testcase name="test-failure-unicode.t"/>
432 432 <testcase name="test-failure.t"/>
433 433 </testsuite>
434 434
435 435 $ rm test-failure-unicode.t
436 436
437 437 test for --retest
438 438 ====================
439 439
440 440 $ rt --retest
441 441
442 442 --- $TESTTMP/test-failure.t
443 443 +++ $TESTTMP/test-failure.t.err
444 444 @@ -1,5 +1,5 @@
445 445 $ echo babar
446 446 - rataxes
447 447 + babar
448 448 This is a noop statement so that
449 449 this test is still more bytes than success.
450 450 pad pad pad pad............................................................
451 451
452 452 ERROR: test-failure.t output changed
453 453 !
454 454 Failed test-failure.t: output changed
455 455 # Ran 2 tests, 1 skipped, 1 failed.
456 456 python hash seed: * (glob)
457 457 [1]
458 458
459 459 --retest works with --outputdir
460 460 $ rm -r output
461 461 $ mkdir output
462 462 $ mv test-failure.t.err output
463 463 $ rt --retest --outputdir output
464 464
465 465 --- $TESTTMP/test-failure.t
466 466 +++ $TESTTMP/output/test-failure.t.err
467 467 @@ -1,5 +1,5 @@
468 468 $ echo babar
469 469 - rataxes
470 470 + babar
471 471 This is a noop statement so that
472 472 this test is still more bytes than success.
473 473 pad pad pad pad............................................................
474 474
475 475 ERROR: test-failure.t output changed
476 476 !
477 477 Failed test-failure.t: output changed
478 478 # Ran 2 tests, 1 skipped, 1 failed.
479 479 python hash seed: * (glob)
480 480 [1]
481 481
482 482 Selecting Tests To Run
483 483 ======================
484 484
485 485 successful
486 486
487 487 $ rt test-success.t
488 488 .
489 489 # Ran 1 tests, 0 skipped, 0 failed.
490 490
491 491 success w/ keyword
492 492 $ rt -k xyzzy
493 493 .
494 494 # Ran 2 tests, 1 skipped, 0 failed.
495 495
496 496 failed
497 497
498 498 $ rt test-failure.t
499 499
500 500 --- $TESTTMP/test-failure.t
501 501 +++ $TESTTMP/test-failure.t.err
502 502 @@ -1,5 +1,5 @@
503 503 $ echo babar
504 504 - rataxes
505 505 + babar
506 506 This is a noop statement so that
507 507 this test is still more bytes than success.
508 508 pad pad pad pad............................................................
509 509
510 510 ERROR: test-failure.t output changed
511 511 !
512 512 Failed test-failure.t: output changed
513 513 # Ran 1 tests, 0 skipped, 1 failed.
514 514 python hash seed: * (glob)
515 515 [1]
516 516
517 517 failure w/ keyword
518 518 $ rt -k rataxes
519 519
520 520 --- $TESTTMP/test-failure.t
521 521 +++ $TESTTMP/test-failure.t.err
522 522 @@ -1,5 +1,5 @@
523 523 $ echo babar
524 524 - rataxes
525 525 + babar
526 526 This is a noop statement so that
527 527 this test is still more bytes than success.
528 528 pad pad pad pad............................................................
529 529
530 530 ERROR: test-failure.t output changed
531 531 !
532 532 Failed test-failure.t: output changed
533 533 # Ran 2 tests, 1 skipped, 1 failed.
534 534 python hash seed: * (glob)
535 535 [1]
536 536
537 537 Verify that when a process fails to start we show a useful message
538 538 ==================================================================
539 539
540 540 $ cat > test-serve-fail.t <<EOF
541 541 > $ echo 'abort: child process failed to start blah'
542 542 > EOF
543 543 $ rt test-serve-fail.t
544 server failed to start (HGPORT=*) (glob)
544
545 545 --- $TESTTMP/test-serve-fail.t
546 546 +++ $TESTTMP/test-serve-fail.t.err
547 547 @@ -1* +1,2 @@ (glob)
548 548 $ echo 'abort: child process failed to start blah'
549 549 + abort: child process failed to start blah
550 550
551 551 ERROR: test-serve-fail.t output changed
552 552 !
553 Failed test-serve-fail.t: output changed
553 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
554 554 # Ran 1 tests, 0 skipped, 1 failed.
555 555 python hash seed: * (glob)
556 556 [1]
557 557 $ rm test-serve-fail.t
558 558
559 559 Verify that we can try other ports
560 560 ===================================
561 561 $ hg init inuse
562 562 $ hg serve -R inuse -p $HGPORT -d --pid-file=blocks.pid
563 563 $ cat blocks.pid >> $DAEMON_PIDS
564 564 $ cat > test-serve-inuse.t <<EOF
565 565 > $ hg serve -R `pwd`/inuse -p \$HGPORT -d --pid-file=hg.pid
566 566 > $ cat hg.pid >> \$DAEMON_PIDS
567 567 > EOF
568 568 $ rt test-serve-inuse.t
569 569 .
570 570 # Ran 1 tests, 0 skipped, 0 failed.
571 571 $ rm test-serve-inuse.t
572 572 $ killdaemons.py $DAEMON_PIDS
573 573 $ rm $DAEMON_PIDS
574 574
575 575 Running In Debug Mode
576 576 ======================
577 577
578 578 $ rt --debug 2>&1 | grep -v pwd
579 579 + echo *SALT* 0 0 (glob)
580 580 *SALT* 0 0 (glob)
581 581 + echo babar
582 582 babar
583 583 + echo *SALT* 10 0 (glob)
584 584 *SALT* 10 0 (glob)
585 585 *+ echo *SALT* 0 0 (glob)
586 586 *SALT* 0 0 (glob)
587 587 + echo babar
588 588 babar
589 589 + echo *SALT* 2 0 (glob)
590 590 *SALT* 2 0 (glob)
591 591 + echo xyzzy
592 592 xyzzy
593 593 + echo *SALT* 9 0 (glob)
594 594 *SALT* 9 0 (glob)
595 595 + printf *abc\ndef\nxyz\n* (glob)
596 596 abc
597 597 def
598 598 xyz
599 599 + echo *SALT* 15 0 (glob)
600 600 *SALT* 15 0 (glob)
601 601 + printf *zyx\nwvu\ntsr\n* (glob)
602 602 zyx
603 603 wvu
604 604 tsr
605 605 + echo *SALT* 22 0 (glob)
606 606 *SALT* 22 0 (glob)
607 607 .
608 608 # Ran 2 tests, 0 skipped, 0 failed.
609 609
610 610 Parallel runs
611 611 ==============
612 612
613 613 (duplicate the failing test to get predictable output)
614 614 $ cp test-failure.t test-failure-copy.t
615 615
616 616 $ rt --jobs 2 test-failure*.t -n
617 617 !!
618 618 Failed test-failure*.t: output changed (glob)
619 619 Failed test-failure*.t: output changed (glob)
620 620 # Ran 2 tests, 0 skipped, 2 failed.
621 621 python hash seed: * (glob)
622 622 [1]
623 623
624 624 failures in parallel with --first should only print one failure
625 625 $ rt --jobs 2 --first test-failure*.t
626 626
627 627 --- $TESTTMP/test-failure*.t (glob)
628 628 +++ $TESTTMP/test-failure*.t.err (glob)
629 629 @@ -1,5 +1,5 @@
630 630 $ echo babar
631 631 - rataxes
632 632 + babar
633 633 This is a noop statement so that
634 634 this test is still more bytes than success.
635 635 pad pad pad pad............................................................
636 636
637 637 Failed test-failure*.t: output changed (glob)
638 638 Failed test-failure*.t: output changed (glob)
639 639 # Ran 2 tests, 0 skipped, 2 failed.
640 640 python hash seed: * (glob)
641 641 [1]
642 642
643 643
644 644 (delete the duplicated test file)
645 645 $ rm test-failure-copy.t
646 646
647 647
648 648 Interactive run
649 649 ===============
650 650
651 651 (backup the failing test)
652 652 $ cp test-failure.t backup
653 653
654 654 Refuse the fix
655 655
656 656 $ echo 'n' | rt -i
657 657
658 658 --- $TESTTMP/test-failure.t
659 659 +++ $TESTTMP/test-failure.t.err
660 660 @@ -1,5 +1,5 @@
661 661 $ echo babar
662 662 - rataxes
663 663 + babar
664 664 This is a noop statement so that
665 665 this test is still more bytes than success.
666 666 pad pad pad pad............................................................
667 667 Accept this change? [n]
668 668 ERROR: test-failure.t output changed
669 669 !.
670 670 Failed test-failure.t: output changed
671 671 # Ran 2 tests, 0 skipped, 1 failed.
672 672 python hash seed: * (glob)
673 673 [1]
674 674
675 675 $ cat test-failure.t
676 676 $ echo babar
677 677 rataxes
678 678 This is a noop statement so that
679 679 this test is still more bytes than success.
680 680 pad pad pad pad............................................................
681 681 pad pad pad pad............................................................
682 682 pad pad pad pad............................................................
683 683 pad pad pad pad............................................................
684 684 pad pad pad pad............................................................
685 685 pad pad pad pad............................................................
686 686
687 687 Interactive with custom view
688 688
689 689 $ echo 'n' | rt -i --view echo
690 690 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
691 691 Accept this change? [n]* (glob)
692 692 ERROR: test-failure.t output changed
693 693 !.
694 694 Failed test-failure.t: output changed
695 695 # Ran 2 tests, 0 skipped, 1 failed.
696 696 python hash seed: * (glob)
697 697 [1]
698 698
699 699 View the fix
700 700
701 701 $ echo 'y' | rt --view echo
702 702 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
703 703
704 704 ERROR: test-failure.t output changed
705 705 !.
706 706 Failed test-failure.t: output changed
707 707 # Ran 2 tests, 0 skipped, 1 failed.
708 708 python hash seed: * (glob)
709 709 [1]
710 710
711 711 Accept the fix
712 712
713 713 $ cat >> test-failure.t <<EOF
714 714 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
715 715 > saved backup bundle to \$TESTTMP/foo.hg
716 716 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
717 717 > saved backup bundle to $TESTTMP\\foo.hg
718 718 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
719 719 > saved backup bundle to \$TESTTMP/*.hg (glob)
720 720 > EOF
721 721 $ echo 'y' | rt -i 2>&1
722 722
723 723 --- $TESTTMP/test-failure.t
724 724 +++ $TESTTMP/test-failure.t.err
725 725 @@ -1,5 +1,5 @@
726 726 $ echo babar
727 727 - rataxes
728 728 + babar
729 729 This is a noop statement so that
730 730 this test is still more bytes than success.
731 731 pad pad pad pad............................................................
732 732 @@ -11,6 +11,6 @@
733 733 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
734 734 saved backup bundle to $TESTTMP/foo.hg
735 735 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
736 736 - saved backup bundle to $TESTTMP\foo.hg
737 737 + saved backup bundle to $TESTTMP/foo.hg
738 738 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
739 739 saved backup bundle to $TESTTMP/*.hg (glob)
740 740 Accept this change? [n] ..
741 741 # Ran 2 tests, 0 skipped, 0 failed.
742 742
743 743 $ sed -e 's,(glob)$,&<,g' test-failure.t
744 744 $ echo babar
745 745 babar
746 746 This is a noop statement so that
747 747 this test is still more bytes than success.
748 748 pad pad pad pad............................................................
749 749 pad pad pad pad............................................................
750 750 pad pad pad pad............................................................
751 751 pad pad pad pad............................................................
752 752 pad pad pad pad............................................................
753 753 pad pad pad pad............................................................
754 754 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
755 755 saved backup bundle to $TESTTMP/foo.hg
756 756 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
757 757 saved backup bundle to $TESTTMP/foo.hg
758 758 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
759 759 saved backup bundle to $TESTTMP/*.hg (glob)<
760 760
761 761 Race condition - test file was modified when test is running
762 762
763 763 $ TESTRACEDIR=`pwd`
764 764 $ export TESTRACEDIR
765 765 $ cat > test-race.t <<EOF
766 766 > $ echo 1
767 767 > $ echo "# a new line" >> $TESTRACEDIR/test-race.t
768 768 > EOF
769 769
770 770 $ rt -i test-race.t
771 771
772 772 --- $TESTTMP/test-race.t
773 773 +++ $TESTTMP/test-race.t.err
774 774 @@ -1,2 +1,3 @@
775 775 $ echo 1
776 776 + 1
777 777 $ echo "# a new line" >> $TESTTMP/test-race.t
778 778 Reference output has changed (run again to prompt changes)
779 779 ERROR: test-race.t output changed
780 780 !
781 781 Failed test-race.t: output changed
782 782 # Ran 1 tests, 0 skipped, 1 failed.
783 783 python hash seed: * (glob)
784 784 [1]
785 785
786 786 $ rm test-race.t
787 787
788 788 When "#testcases" is used in .t files
789 789
790 790 $ cat >> test-cases.t <<EOF
791 791 > #testcases a b
792 792 > #if a
793 793 > $ echo 1
794 794 > #endif
795 795 > #if b
796 796 > $ echo 2
797 797 > #endif
798 798 > EOF
799 799
800 800 $ cat <<EOF | rt -i test-cases.t 2>&1
801 801 > y
802 802 > y
803 803 > EOF
804 804
805 805 --- $TESTTMP/test-cases.t
806 806 +++ $TESTTMP/test-cases.t.a.err
807 807 @@ -1,6 +1,7 @@
808 808 #testcases a b
809 809 #if a
810 810 $ echo 1
811 811 + 1
812 812 #endif
813 813 #if b
814 814 $ echo 2
815 815 Accept this change? [n] .
816 816 --- $TESTTMP/test-cases.t
817 817 +++ $TESTTMP/test-cases.t.b.err
818 818 @@ -5,4 +5,5 @@
819 819 #endif
820 820 #if b
821 821 $ echo 2
822 822 + 2
823 823 #endif
824 824 Accept this change? [n] .
825 825 # Ran 2 tests, 0 skipped, 0 failed.
826 826
827 827 $ cat test-cases.t
828 828 #testcases a b
829 829 #if a
830 830 $ echo 1
831 831 1
832 832 #endif
833 833 #if b
834 834 $ echo 2
835 835 2
836 836 #endif
837 837
838 838 $ cat >> test-cases.t <<'EOF'
839 839 > #if a
840 840 > $ NAME=A
841 841 > #else
842 842 > $ NAME=B
843 843 > #endif
844 844 > $ echo $NAME
845 845 > A (a !)
846 846 > B (b !)
847 847 > EOF
848 848 $ rt test-cases.t
849 849 ..
850 850 # Ran 2 tests, 0 skipped, 0 failed.
851 851
852 852 $ rm test-cases.t
853 853
854 854 (reinstall)
855 855 $ mv backup test-failure.t
856 856
857 857 No Diff
858 858 ===============
859 859
860 860 $ rt --nodiff
861 861 !.
862 862 Failed test-failure.t: output changed
863 863 # Ran 2 tests, 0 skipped, 1 failed.
864 864 python hash seed: * (glob)
865 865 [1]
866 866
867 867 test --tmpdir support
868 868 $ rt --tmpdir=$TESTTMP/keep test-success.t
869 869
870 870 Keeping testtmp dir: $TESTTMP/keep/child1/test-success.t
871 871 Keeping threadtmp dir: $TESTTMP/keep/child1
872 872 .
873 873 # Ran 1 tests, 0 skipped, 0 failed.
874 874
875 875 timeouts
876 876 ========
877 877 $ cat > test-timeout.t <<EOF
878 878 > $ sleep 2
879 879 > $ echo pass
880 880 > pass
881 881 > EOF
882 882 > echo '#require slow' > test-slow-timeout.t
883 883 > cat test-timeout.t >> test-slow-timeout.t
884 884 $ rt --timeout=1 --slowtimeout=3 test-timeout.t test-slow-timeout.t
885 885 st
886 886 Skipped test-slow-timeout.t: missing feature: allow slow tests (use --allow-slow-tests)
887 887 Failed test-timeout.t: timed out
888 888 # Ran 1 tests, 1 skipped, 1 failed.
889 889 python hash seed: * (glob)
890 890 [1]
891 891 $ rt --timeout=1 --slowtimeout=3 \
892 892 > test-timeout.t test-slow-timeout.t --allow-slow-tests
893 893 .t
894 894 Failed test-timeout.t: timed out
895 895 # Ran 2 tests, 0 skipped, 1 failed.
896 896 python hash seed: * (glob)
897 897 [1]
898 898 $ rm test-timeout.t test-slow-timeout.t
899 899
900 900 test for --time
901 901 ==================
902 902
903 903 $ rt test-success.t --time
904 904 .
905 905 # Ran 1 tests, 0 skipped, 0 failed.
906 906 # Producing time report
907 907 start end cuser csys real Test
908 908 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
909 909
910 910 test for --time with --job enabled
911 911 ====================================
912 912
913 913 $ rt test-success.t --time --jobs 2
914 914 .
915 915 # Ran 1 tests, 0 skipped, 0 failed.
916 916 # Producing time report
917 917 start end cuser csys real Test
918 918 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
919 919
920 920 Skips
921 921 ================
922 922 $ cat > test-skip.t <<EOF
923 923 > $ echo xyzzy
924 924 > #require false
925 925 > EOF
926 926 $ rt --nodiff
927 927 !.s
928 928 Skipped test-skip.t: missing feature: nail clipper
929 929 Failed test-failure.t: output changed
930 930 # Ran 2 tests, 1 skipped, 1 failed.
931 931 python hash seed: * (glob)
932 932 [1]
933 933
934 934 $ rt --keyword xyzzy
935 935 .s
936 936 Skipped test-skip.t: missing feature: nail clipper
937 937 # Ran 2 tests, 2 skipped, 0 failed.
938 938
939 939 Skips with xml
940 940 $ rt --keyword xyzzy \
941 941 > --xunit=xunit.xml
942 942 .s
943 943 Skipped test-skip.t: missing feature: nail clipper
944 944 # Ran 2 tests, 2 skipped, 0 failed.
945 945 $ cat xunit.xml
946 946 <?xml version="1.0" encoding="utf-8"?>
947 947 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
948 948 <testcase name="test-success.t" time="*"/> (glob)
949 949 <testcase name="test-skip.t">
950 950 <skipped>
951 951 <![CDATA[missing feature: nail clipper]]> </skipped>
952 952 </testcase>
953 953 </testsuite>
954 954
955 955 Missing skips or blacklisted skips don't count as executed:
956 956 $ echo test-failure.t > blacklist
957 957 $ rt --blacklist=blacklist --json\
958 958 > test-failure.t test-bogus.t
959 959 ss
960 960 Skipped test-bogus.t: Doesn't exist
961 961 Skipped test-failure.t: blacklisted
962 962 # Ran 0 tests, 2 skipped, 0 failed.
963 963 $ cat report.json
964 964 testreport ={
965 965 "test-bogus.t": {
966 966 "result": "skip"
967 967 },
968 968 "test-failure.t": {
969 969 "result": "skip"
970 970 }
971 971 } (no-eol)
972 972
973 973 Whitelist trumps blacklist
974 974 $ echo test-failure.t > whitelist
975 975 $ rt --blacklist=blacklist --whitelist=whitelist --json\
976 976 > test-failure.t test-bogus.t
977 977 s
978 978 --- $TESTTMP/test-failure.t
979 979 +++ $TESTTMP/test-failure.t.err
980 980 @@ -1,5 +1,5 @@
981 981 $ echo babar
982 982 - rataxes
983 983 + babar
984 984 This is a noop statement so that
985 985 this test is still more bytes than success.
986 986 pad pad pad pad............................................................
987 987
988 988 ERROR: test-failure.t output changed
989 989 !
990 990 Skipped test-bogus.t: Doesn't exist
991 991 Failed test-failure.t: output changed
992 992 # Ran 1 tests, 1 skipped, 1 failed.
993 993 python hash seed: * (glob)
994 994 [1]
995 995
996 996 Ensure that --test-list causes only the tests listed in that file to
997 997 be executed.
998 998 $ echo test-success.t >> onlytest
999 999 $ rt --test-list=onlytest
1000 1000 .
1001 1001 # Ran 1 tests, 0 skipped, 0 failed.
1002 1002 $ echo test-bogus.t >> anothertest
1003 1003 $ rt --test-list=onlytest --test-list=anothertest
1004 1004 s.
1005 1005 Skipped test-bogus.t: Doesn't exist
1006 1006 # Ran 1 tests, 1 skipped, 0 failed.
1007 1007 $ rm onlytest anothertest
1008 1008
1009 1009 test for --json
1010 1010 ==================
1011 1011
1012 1012 $ rt --json
1013 1013
1014 1014 --- $TESTTMP/test-failure.t
1015 1015 +++ $TESTTMP/test-failure.t.err
1016 1016 @@ -1,5 +1,5 @@
1017 1017 $ echo babar
1018 1018 - rataxes
1019 1019 + babar
1020 1020 This is a noop statement so that
1021 1021 this test is still more bytes than success.
1022 1022 pad pad pad pad............................................................
1023 1023
1024 1024 ERROR: test-failure.t output changed
1025 1025 !.s
1026 1026 Skipped test-skip.t: missing feature: nail clipper
1027 1027 Failed test-failure.t: output changed
1028 1028 # Ran 2 tests, 1 skipped, 1 failed.
1029 1029 python hash seed: * (glob)
1030 1030 [1]
1031 1031
1032 1032 $ cat report.json
1033 1033 testreport ={
1034 1034 "test-failure.t": [\{] (re)
1035 1035 "csys": "\s*[\d\.]{4,5}", ? (re)
1036 1036 "cuser": "\s*[\d\.]{4,5}", ? (re)
1037 1037 "diff": "---.+\+\+\+.+", ? (re)
1038 1038 "end": "\s*[\d\.]{4,5}", ? (re)
1039 1039 "result": "failure", ? (re)
1040 1040 "start": "\s*[\d\.]{4,5}", ? (re)
1041 1041 "time": "\s*[\d\.]{4,5}" (re)
1042 1042 }, ? (re)
1043 1043 "test-skip.t": {
1044 1044 "csys": "\s*[\d\.]{4,5}", ? (re)
1045 1045 "cuser": "\s*[\d\.]{4,5}", ? (re)
1046 1046 "diff": "", ? (re)
1047 1047 "end": "\s*[\d\.]{4,5}", ? (re)
1048 1048 "result": "skip", ? (re)
1049 1049 "start": "\s*[\d\.]{4,5}", ? (re)
1050 1050 "time": "\s*[\d\.]{4,5}" (re)
1051 1051 }, ? (re)
1052 1052 "test-success.t": [\{] (re)
1053 1053 "csys": "\s*[\d\.]{4,5}", ? (re)
1054 1054 "cuser": "\s*[\d\.]{4,5}", ? (re)
1055 1055 "diff": "", ? (re)
1056 1056 "end": "\s*[\d\.]{4,5}", ? (re)
1057 1057 "result": "success", ? (re)
1058 1058 "start": "\s*[\d\.]{4,5}", ? (re)
1059 1059 "time": "\s*[\d\.]{4,5}" (re)
1060 1060 }
1061 1061 } (no-eol)
1062 1062 --json with --outputdir
1063 1063
1064 1064 $ rm report.json
1065 1065 $ rm -r output
1066 1066 $ mkdir output
1067 1067 $ rt --json --outputdir output
1068 1068
1069 1069 --- $TESTTMP/test-failure.t
1070 1070 +++ $TESTTMP/output/test-failure.t.err
1071 1071 @@ -1,5 +1,5 @@
1072 1072 $ echo babar
1073 1073 - rataxes
1074 1074 + babar
1075 1075 This is a noop statement so that
1076 1076 this test is still more bytes than success.
1077 1077 pad pad pad pad............................................................
1078 1078
1079 1079 ERROR: test-failure.t output changed
1080 1080 !.s
1081 1081 Skipped test-skip.t: missing feature: nail clipper
1082 1082 Failed test-failure.t: output changed
1083 1083 # Ran 2 tests, 1 skipped, 1 failed.
1084 1084 python hash seed: * (glob)
1085 1085 [1]
1086 1086 $ f report.json
1087 1087 report.json: file not found
1088 1088 $ cat output/report.json
1089 1089 testreport ={
1090 1090 "test-failure.t": [\{] (re)
1091 1091 "csys": "\s*[\d\.]{4,5}", ? (re)
1092 1092 "cuser": "\s*[\d\.]{4,5}", ? (re)
1093 1093 "diff": "---.+\+\+\+.+", ? (re)
1094 1094 "end": "\s*[\d\.]{4,5}", ? (re)
1095 1095 "result": "failure", ? (re)
1096 1096 "start": "\s*[\d\.]{4,5}", ? (re)
1097 1097 "time": "\s*[\d\.]{4,5}" (re)
1098 1098 }, ? (re)
1099 1099 "test-skip.t": {
1100 1100 "csys": "\s*[\d\.]{4,5}", ? (re)
1101 1101 "cuser": "\s*[\d\.]{4,5}", ? (re)
1102 1102 "diff": "", ? (re)
1103 1103 "end": "\s*[\d\.]{4,5}", ? (re)
1104 1104 "result": "skip", ? (re)
1105 1105 "start": "\s*[\d\.]{4,5}", ? (re)
1106 1106 "time": "\s*[\d\.]{4,5}" (re)
1107 1107 }, ? (re)
1108 1108 "test-success.t": [\{] (re)
1109 1109 "csys": "\s*[\d\.]{4,5}", ? (re)
1110 1110 "cuser": "\s*[\d\.]{4,5}", ? (re)
1111 1111 "diff": "", ? (re)
1112 1112 "end": "\s*[\d\.]{4,5}", ? (re)
1113 1113 "result": "success", ? (re)
1114 1114 "start": "\s*[\d\.]{4,5}", ? (re)
1115 1115 "time": "\s*[\d\.]{4,5}" (re)
1116 1116 }
1117 1117 } (no-eol)
1118 1118 $ ls -a output
1119 1119 .
1120 1120 ..
1121 1121 .testtimes
1122 1122 report.json
1123 1123 test-failure.t.err
1124 1124
1125 1125 Test that failed test accepted through interactive are properly reported:
1126 1126
1127 1127 $ cp test-failure.t backup
1128 1128 $ echo y | rt --json -i
1129 1129
1130 1130 --- $TESTTMP/test-failure.t
1131 1131 +++ $TESTTMP/test-failure.t.err
1132 1132 @@ -1,5 +1,5 @@
1133 1133 $ echo babar
1134 1134 - rataxes
1135 1135 + babar
1136 1136 This is a noop statement so that
1137 1137 this test is still more bytes than success.
1138 1138 pad pad pad pad............................................................
1139 1139 Accept this change? [n] ..s
1140 1140 Skipped test-skip.t: missing feature: nail clipper
1141 1141 # Ran 2 tests, 1 skipped, 0 failed.
1142 1142
1143 1143 $ cat report.json
1144 1144 testreport ={
1145 1145 "test-failure.t": [\{] (re)
1146 1146 "csys": "\s*[\d\.]{4,5}", ? (re)
1147 1147 "cuser": "\s*[\d\.]{4,5}", ? (re)
1148 1148 "diff": "", ? (re)
1149 1149 "end": "\s*[\d\.]{4,5}", ? (re)
1150 1150 "result": "success", ? (re)
1151 1151 "start": "\s*[\d\.]{4,5}", ? (re)
1152 1152 "time": "\s*[\d\.]{4,5}" (re)
1153 1153 }, ? (re)
1154 1154 "test-skip.t": {
1155 1155 "csys": "\s*[\d\.]{4,5}", ? (re)
1156 1156 "cuser": "\s*[\d\.]{4,5}", ? (re)
1157 1157 "diff": "", ? (re)
1158 1158 "end": "\s*[\d\.]{4,5}", ? (re)
1159 1159 "result": "skip", ? (re)
1160 1160 "start": "\s*[\d\.]{4,5}", ? (re)
1161 1161 "time": "\s*[\d\.]{4,5}" (re)
1162 1162 }, ? (re)
1163 1163 "test-success.t": [\{] (re)
1164 1164 "csys": "\s*[\d\.]{4,5}", ? (re)
1165 1165 "cuser": "\s*[\d\.]{4,5}", ? (re)
1166 1166 "diff": "", ? (re)
1167 1167 "end": "\s*[\d\.]{4,5}", ? (re)
1168 1168 "result": "success", ? (re)
1169 1169 "start": "\s*[\d\.]{4,5}", ? (re)
1170 1170 "time": "\s*[\d\.]{4,5}" (re)
1171 1171 }
1172 1172 } (no-eol)
1173 1173 $ mv backup test-failure.t
1174 1174
1175 1175 backslash on end of line with glob matching is handled properly
1176 1176
1177 1177 $ cat > test-glob-backslash.t << EOF
1178 1178 > $ echo 'foo bar \\'
1179 1179 > foo * \ (glob)
1180 1180 > EOF
1181 1181
1182 1182 $ rt test-glob-backslash.t
1183 1183 .
1184 1184 # Ran 1 tests, 0 skipped, 0 failed.
1185 1185
1186 1186 $ rm -f test-glob-backslash.t
1187 1187
1188 1188 Test globbing of local IP addresses
1189 1189 $ echo 172.16.18.1
1190 1190 $LOCALIP (glob)
1191 1191 $ echo dead:beef::1
1192 1192 $LOCALIP (glob)
1193 1193
1194 1194 Test reusability for third party tools
1195 1195 ======================================
1196 1196
1197 1197 $ mkdir "$TESTTMP"/anothertests
1198 1198 $ cd "$TESTTMP"/anothertests
1199 1199
1200 1200 test that `run-tests.py` can execute hghave, even if it runs not in
1201 1201 Mercurial source tree.
1202 1202
1203 1203 $ cat > test-hghave.t <<EOF
1204 1204 > #require true
1205 1205 > $ echo foo
1206 1206 > foo
1207 1207 > EOF
1208 1208 $ rt test-hghave.t
1209 1209 .
1210 1210 # Ran 1 tests, 0 skipped, 0 failed.
1211 1211
1212 1212 test that RUNTESTDIR refers the directory, in which `run-tests.py` now
1213 1213 running is placed.
1214 1214
1215 1215 $ cat > test-runtestdir.t <<EOF
1216 1216 > - $TESTDIR, in which test-run-tests.t is placed
1217 1217 > - \$TESTDIR, in which test-runtestdir.t is placed (expanded at runtime)
1218 1218 > - \$RUNTESTDIR, in which run-tests.py is placed (expanded at runtime)
1219 1219 >
1220 1220 > #if windows
1221 1221 > $ test "\$TESTDIR" = "$TESTTMP\anothertests"
1222 1222 > #else
1223 1223 > $ test "\$TESTDIR" = "$TESTTMP"/anothertests
1224 1224 > #endif
1225 1225 > If this prints a path, that means RUNTESTDIR didn't equal
1226 1226 > TESTDIR as it should have.
1227 1227 > $ test "\$RUNTESTDIR" = "$TESTDIR" || echo "\$RUNTESTDIR"
1228 1228 > This should print the start of check-code. If this passes but the
1229 1229 > previous check failed, that means we found a copy of check-code at whatever
1230 1230 > RUNTESTSDIR ended up containing, even though it doesn't match TESTDIR.
1231 1231 > $ head -n 3 "\$RUNTESTDIR"/../contrib/check-code.py | sed 's@.!.*python@#!USRBINENVPY@'
1232 1232 > #!USRBINENVPY
1233 1233 > #
1234 1234 > # check-code - a style and portability checker for Mercurial
1235 1235 > EOF
1236 1236 $ rt test-runtestdir.t
1237 1237 .
1238 1238 # Ran 1 tests, 0 skipped, 0 failed.
1239 1239
1240 1240 #if execbit
1241 1241
1242 1242 test that TESTDIR is referred in PATH
1243 1243
1244 1244 $ cat > custom-command.sh <<EOF
1245 1245 > #!/bin/sh
1246 1246 > echo "hello world"
1247 1247 > EOF
1248 1248 $ chmod +x custom-command.sh
1249 1249 $ cat > test-testdir-path.t <<EOF
1250 1250 > $ custom-command.sh
1251 1251 > hello world
1252 1252 > EOF
1253 1253 $ rt test-testdir-path.t
1254 1254 .
1255 1255 # Ran 1 tests, 0 skipped, 0 failed.
1256 1256
1257 1257 #endif
1258 1258
1259 1259 test support for --allow-slow-tests
1260 1260 $ cat > test-very-slow-test.t <<EOF
1261 1261 > #require slow
1262 1262 > $ echo pass
1263 1263 > pass
1264 1264 > EOF
1265 1265 $ rt test-very-slow-test.t
1266 1266 s
1267 1267 Skipped test-very-slow-test.t: missing feature: allow slow tests (use --allow-slow-tests)
1268 1268 # Ran 0 tests, 1 skipped, 0 failed.
1269 1269 $ rt $HGTEST_RUN_TESTS_PURE --allow-slow-tests test-very-slow-test.t
1270 1270 .
1271 1271 # Ran 1 tests, 0 skipped, 0 failed.
1272 1272
1273 1273 support for running a test outside the current directory
1274 1274 $ mkdir nonlocal
1275 1275 $ cat > nonlocal/test-is-not-here.t << EOF
1276 1276 > $ echo pass
1277 1277 > pass
1278 1278 > EOF
1279 1279 $ rt nonlocal/test-is-not-here.t
1280 1280 .
1281 1281 # Ran 1 tests, 0 skipped, 0 failed.
1282 1282
1283 1283 support for automatically discovering test if arg is a folder
1284 1284 $ mkdir tmp && cd tmp
1285 1285
1286 1286 $ cat > test-uno.t << EOF
1287 1287 > $ echo line
1288 1288 > line
1289 1289 > EOF
1290 1290
1291 1291 $ cp test-uno.t test-dos.t
1292 1292 $ cd ..
1293 1293 $ cp -R tmp tmpp
1294 1294 $ cp tmp/test-uno.t test-solo.t
1295 1295
1296 1296 $ rt tmp/ test-solo.t tmpp
1297 1297 .....
1298 1298 # Ran 5 tests, 0 skipped, 0 failed.
1299 1299 $ rm -rf tmp tmpp
1300 1300
1301 1301 support for running run-tests.py from another directory
1302 1302 $ mkdir tmp && cd tmp
1303 1303
1304 1304 $ cat > useful-file.sh << EOF
1305 1305 > important command
1306 1306 > EOF
1307 1307
1308 1308 $ cat > test-folder.t << EOF
1309 1309 > $ cat \$TESTDIR/useful-file.sh
1310 1310 > important command
1311 1311 > EOF
1312 1312
1313 1313 $ cat > test-folder-fail.t << EOF
1314 1314 > $ cat \$TESTDIR/useful-file.sh
1315 1315 > important commando
1316 1316 > EOF
1317 1317
1318 1318 $ cd ..
1319 1319 $ rt tmp/test-*.t
1320 1320
1321 1321 --- $TESTTMP/anothertests/tmp/test-folder-fail.t
1322 1322 +++ $TESTTMP/anothertests/tmp/test-folder-fail.t.err
1323 1323 @@ -1,2 +1,2 @@
1324 1324 $ cat $TESTDIR/useful-file.sh
1325 1325 - important commando
1326 1326 + important command
1327 1327
1328 1328 ERROR: test-folder-fail.t output changed
1329 1329 !.
1330 1330 Failed test-folder-fail.t: output changed
1331 1331 # Ran 2 tests, 0 skipped, 1 failed.
1332 1332 python hash seed: * (glob)
1333 1333 [1]
1334 1334
1335 1335 support for bisecting failed tests automatically
1336 1336 $ hg init bisect
1337 1337 $ cd bisect
1338 1338 $ cat >> test-bisect.t <<EOF
1339 1339 > $ echo pass
1340 1340 > pass
1341 1341 > EOF
1342 1342 $ hg add test-bisect.t
1343 1343 $ hg ci -m 'good'
1344 1344 $ cat >> test-bisect.t <<EOF
1345 1345 > $ echo pass
1346 1346 > fail
1347 1347 > EOF
1348 1348 $ hg ci -m 'bad'
1349 1349 $ rt --known-good-rev=0 test-bisect.t
1350 1350
1351 1351 --- $TESTTMP/anothertests/bisect/test-bisect.t
1352 1352 +++ $TESTTMP/anothertests/bisect/test-bisect.t.err
1353 1353 @@ -1,4 +1,4 @@
1354 1354 $ echo pass
1355 1355 pass
1356 1356 $ echo pass
1357 1357 - fail
1358 1358 + pass
1359 1359
1360 1360 ERROR: test-bisect.t output changed
1361 1361 !
1362 1362 Failed test-bisect.t: output changed
1363 1363 test-bisect.t broken by 72cbf122d116 (bad)
1364 1364 # Ran 1 tests, 0 skipped, 1 failed.
1365 1365 python hash seed: * (glob)
1366 1366 [1]
1367 1367
1368 1368 $ cd ..
1369 1369
1370 1370 support bisecting a separate repo
1371 1371
1372 1372 $ hg init bisect-dependent
1373 1373 $ cd bisect-dependent
1374 1374 $ cat > test-bisect-dependent.t <<EOF
1375 1375 > $ tail -1 \$TESTDIR/../bisect/test-bisect.t
1376 1376 > pass
1377 1377 > EOF
1378 1378 $ hg commit -Am dependent test-bisect-dependent.t
1379 1379
1380 1380 $ rt --known-good-rev=0 test-bisect-dependent.t
1381 1381
1382 1382 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1383 1383 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1384 1384 @@ -1,2 +1,2 @@
1385 1385 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1386 1386 - pass
1387 1387 + fail
1388 1388
1389 1389 ERROR: test-bisect-dependent.t output changed
1390 1390 !
1391 1391 Failed test-bisect-dependent.t: output changed
1392 1392 Failed to identify failure point for test-bisect-dependent.t
1393 1393 # Ran 1 tests, 0 skipped, 1 failed.
1394 1394 python hash seed: * (glob)
1395 1395 [1]
1396 1396
1397 1397 $ rt --bisect-repo=../test-bisect test-bisect-dependent.t
1398 1398 usage: run-tests.py [options] [tests]
1399 1399 run-tests.py: error: --bisect-repo cannot be used without --known-good-rev
1400 1400 [2]
1401 1401
1402 1402 $ rt --known-good-rev=0 --bisect-repo=../bisect test-bisect-dependent.t
1403 1403
1404 1404 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1405 1405 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1406 1406 @@ -1,2 +1,2 @@
1407 1407 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1408 1408 - pass
1409 1409 + fail
1410 1410
1411 1411 ERROR: test-bisect-dependent.t output changed
1412 1412 !
1413 1413 Failed test-bisect-dependent.t: output changed
1414 1414 test-bisect-dependent.t broken by 72cbf122d116 (bad)
1415 1415 # Ran 1 tests, 0 skipped, 1 failed.
1416 1416 python hash seed: * (glob)
1417 1417 [1]
1418 1418
1419 1419 $ cd ..
1420 1420
1421 1421 Test a broken #if statement doesn't break run-tests threading.
1422 1422 ==============================================================
1423 1423 $ mkdir broken
1424 1424 $ cd broken
1425 1425 $ cat > test-broken.t <<EOF
1426 1426 > true
1427 1427 > #if notarealhghavefeature
1428 1428 > $ false
1429 1429 > #endif
1430 1430 > EOF
1431 1431 $ for f in 1 2 3 4 ; do
1432 1432 > cat > test-works-$f.t <<EOF
1433 1433 > This is test case $f
1434 1434 > $ sleep 1
1435 1435 > EOF
1436 1436 > done
1437 1437 $ rt -j 2
1438 1438 ....
1439 1439 # Ran 5 tests, 0 skipped, 0 failed.
1440 1440 skipped: unknown feature: notarealhghavefeature
1441 1441
1442 1442 $ cd ..
1443 1443 $ rm -rf broken
1444 1444
1445 1445 Test cases in .t files
1446 1446 ======================
1447 1447 $ mkdir cases
1448 1448 $ cd cases
1449 1449 $ cat > test-cases-abc.t <<'EOF'
1450 1450 > #testcases A B C
1451 1451 > $ V=B
1452 1452 > #if A
1453 1453 > $ V=A
1454 1454 > #endif
1455 1455 > #if C
1456 1456 > $ V=C
1457 1457 > #endif
1458 1458 > $ echo $V | sed 's/A/C/'
1459 1459 > C
1460 1460 > #if C
1461 1461 > $ [ $V = C ]
1462 1462 > #endif
1463 1463 > #if A
1464 1464 > $ [ $V = C ]
1465 1465 > [1]
1466 1466 > #endif
1467 1467 > #if no-C
1468 1468 > $ [ $V = C ]
1469 1469 > [1]
1470 1470 > #endif
1471 1471 > $ [ $V = D ]
1472 1472 > [1]
1473 1473 > EOF
1474 1474 $ rt
1475 1475 .
1476 1476 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1477 1477 +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
1478 1478 @@ -7,7 +7,7 @@
1479 1479 $ V=C
1480 1480 #endif
1481 1481 $ echo $V | sed 's/A/C/'
1482 1482 - C
1483 1483 + B
1484 1484 #if C
1485 1485 $ [ $V = C ]
1486 1486 #endif
1487 1487
1488 1488 ERROR: test-cases-abc.t (case B) output changed
1489 1489 !.
1490 1490 Failed test-cases-abc.t (case B): output changed
1491 1491 # Ran 3 tests, 0 skipped, 1 failed.
1492 1492 python hash seed: * (glob)
1493 1493 [1]
1494 1494
1495 1495 --restart works
1496 1496
1497 1497 $ rt --restart
1498 1498
1499 1499 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1500 1500 +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
1501 1501 @@ -7,7 +7,7 @@
1502 1502 $ V=C
1503 1503 #endif
1504 1504 $ echo $V | sed 's/A/C/'
1505 1505 - C
1506 1506 + B
1507 1507 #if C
1508 1508 $ [ $V = C ]
1509 1509 #endif
1510 1510
1511 1511 ERROR: test-cases-abc.t (case B) output changed
1512 1512 !.
1513 1513 Failed test-cases-abc.t (case B): output changed
1514 1514 # Ran 2 tests, 0 skipped, 1 failed.
1515 1515 python hash seed: * (glob)
1516 1516 [1]
1517 1517
1518 1518 --restart works with outputdir
1519 1519
1520 1520 $ mkdir output
1521 1521 $ mv test-cases-abc.t.B.err output
1522 1522 $ rt --restart --outputdir output
1523 1523
1524 1524 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1525 1525 +++ $TESTTMP/anothertests/cases/output/test-cases-abc.t.B.err
1526 1526 @@ -7,7 +7,7 @@
1527 1527 $ V=C
1528 1528 #endif
1529 1529 $ echo $V | sed 's/A/C/'
1530 1530 - C
1531 1531 + B
1532 1532 #if C
1533 1533 $ [ $V = C ]
1534 1534 #endif
1535 1535
1536 1536 ERROR: test-cases-abc.t (case B) output changed
1537 1537 !.
1538 1538 Failed test-cases-abc.t (case B): output changed
1539 1539 # Ran 2 tests, 0 skipped, 1 failed.
1540 1540 python hash seed: * (glob)
1541 1541 [1]
1542 1542
1543 1543 Test TESTCASE variable
1544 1544
1545 1545 $ cat > test-cases-ab.t <<'EOF'
1546 1546 > $ dostuff() {
1547 1547 > > echo "In case $TESTCASE"
1548 1548 > > }
1549 1549 > #testcases A B
1550 1550 > #if A
1551 1551 > $ dostuff
1552 1552 > In case A
1553 1553 > #endif
1554 1554 > #if B
1555 1555 > $ dostuff
1556 1556 > In case B
1557 1557 > #endif
1558 1558 > EOF
1559 1559 $ rt test-cases-ab.t
1560 1560 ..
1561 1561 # Ran 2 tests, 0 skipped, 0 failed.
1562 1562
1563 1563 Test automatic pattern replacement
1564 1564
1565 1565 $ cat << EOF >> common-pattern.py
1566 1566 > substitutions = [
1567 1567 > (br'foo-(.*)\\b',
1568 1568 > br'\$XXX=\\1\$'),
1569 1569 > (br'bar\\n',
1570 1570 > br'\$YYY$\\n'),
1571 1571 > ]
1572 1572 > EOF
1573 1573
1574 1574 $ cat << EOF >> test-substitution.t
1575 1575 > $ echo foo-12
1576 1576 > \$XXX=12$
1577 1577 > $ echo foo-42
1578 1578 > \$XXX=42$
1579 1579 > $ echo bar prior
1580 1580 > bar prior
1581 1581 > $ echo lastbar
1582 1582 > last\$YYY$
1583 1583 > $ echo foo-bar foo-baz
1584 1584 > EOF
1585 1585
1586 1586 $ rt test-substitution.t
1587 1587
1588 1588 --- $TESTTMP/anothertests/cases/test-substitution.t
1589 1589 +++ $TESTTMP/anothertests/cases/test-substitution.t.err
1590 1590 @@ -7,3 +7,4 @@
1591 1591 $ echo lastbar
1592 1592 last$YYY$
1593 1593 $ echo foo-bar foo-baz
1594 1594 + $XXX=bar foo-baz$
1595 1595
1596 1596 ERROR: test-substitution.t output changed
1597 1597 !
1598 1598 Failed test-substitution.t: output changed
1599 1599 # Ran 1 tests, 0 skipped, 1 failed.
1600 1600 python hash seed: * (glob)
1601 1601 [1]
1602 1602
1603 1603 --extra-config-opt works
1604 1604
1605 1605 $ cat << EOF >> test-config-opt.t
1606 1606 > $ hg init test-config-opt
1607 1607 > $ hg -R test-config-opt purge
1608 1608 > EOF
1609 1609
1610 1610 $ rt --extra-config-opt extensions.purge= test-config-opt.t
1611 1611 .
1612 1612 # Ran 1 tests, 0 skipped, 0 failed.
General Comments 0
You need to be logged in to leave comments. Login now