##// END OF EJS Templates
run-tests: outputdir also has to be changed if $TESTDIR is not $PWD...
Matthieu Laneuville -
r35096:fc0f3ed0 default
parent child Browse files
Show More
@@ -1,2958 +1,2960 b''
1 1 #!/usr/bin/env python
2 2 #
3 3 # run-tests.py - Run a set of tests on Mercurial
4 4 #
5 5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 # Modifying this script is tricky because it has many modes:
11 11 # - serial (default) vs parallel (-jN, N > 1)
12 12 # - no coverage (default) vs coverage (-c, -C, -s)
13 13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 14 # - tests are a mix of shell scripts and Python scripts
15 15 #
16 16 # If you change this script, it is recommended that you ensure you
17 17 # haven't broken it by running it in various modes with a representative
18 18 # sample of test scripts. For example:
19 19 #
20 20 # 1) serial, no coverage, temp install:
21 21 # ./run-tests.py test-s*
22 22 # 2) serial, no coverage, local hg:
23 23 # ./run-tests.py --local test-s*
24 24 # 3) serial, coverage, temp install:
25 25 # ./run-tests.py -c test-s*
26 26 # 4) serial, coverage, local hg:
27 27 # ./run-tests.py -c --local test-s* # unsupported
28 28 # 5) parallel, no coverage, temp install:
29 29 # ./run-tests.py -j2 test-s*
30 30 # 6) parallel, no coverage, local hg:
31 31 # ./run-tests.py -j2 --local test-s*
32 32 # 7) parallel, coverage, temp install:
33 33 # ./run-tests.py -j2 -c test-s* # currently broken
34 34 # 8) parallel, coverage, local install:
35 35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 36 # 9) parallel, custom tmp dir:
37 37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 38 # 10) parallel, pure, tests that call run-tests:
39 39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
40 40 #
41 41 # (You could use any subset of the tests: test-s* happens to match
42 42 # enough that it's worth doing parallel runs, few enough that it
43 43 # completes fairly quickly, includes both shell and Python scripts, and
44 44 # includes some scripts that run daemon processes.)
45 45
46 46 from __future__ import absolute_import, print_function
47 47
48 48 import difflib
49 49 import distutils.version as version
50 50 import errno
51 51 import json
52 52 import optparse
53 53 import os
54 54 import random
55 55 import re
56 56 import shutil
57 57 import signal
58 58 import socket
59 59 import subprocess
60 60 import sys
61 61 import sysconfig
62 62 import tempfile
63 63 import threading
64 64 import time
65 65 import unittest
66 66 import xml.dom.minidom as minidom
67 67
68 68 try:
69 69 import Queue as queue
70 70 except ImportError:
71 71 import queue
72 72
73 73 try:
74 74 import shlex
75 75 shellquote = shlex.quote
76 76 except (ImportError, AttributeError):
77 77 import pipes
78 78 shellquote = pipes.quote
79 79
80 80 if os.environ.get('RTUNICODEPEDANTRY', False):
81 81 try:
82 82 reload(sys)
83 83 sys.setdefaultencoding("undefined")
84 84 except NameError:
85 85 pass
86 86
87 87 origenviron = os.environ.copy()
88 88 osenvironb = getattr(os, 'environb', os.environ)
89 89 processlock = threading.Lock()
90 90
91 91 pygmentspresent = False
92 92 # ANSI color is unsupported prior to Windows 10
93 93 if os.name != 'nt':
94 94 try: # is pygments installed
95 95 import pygments
96 96 import pygments.lexers as lexers
97 97 import pygments.lexer as lexer
98 98 import pygments.formatters as formatters
99 99 import pygments.token as token
100 100 import pygments.style as style
101 101 pygmentspresent = True
102 102 difflexer = lexers.DiffLexer()
103 103 terminal256formatter = formatters.Terminal256Formatter()
104 104 except ImportError:
105 105 pass
106 106
107 107 if pygmentspresent:
108 108 class TestRunnerStyle(style.Style):
109 109 default_style = ""
110 110 skipped = token.string_to_tokentype("Token.Generic.Skipped")
111 111 failed = token.string_to_tokentype("Token.Generic.Failed")
112 112 skippedname = token.string_to_tokentype("Token.Generic.SName")
113 113 failedname = token.string_to_tokentype("Token.Generic.FName")
114 114 styles = {
115 115 skipped: '#e5e5e5',
116 116 skippedname: '#00ffff',
117 117 failed: '#7f0000',
118 118 failedname: '#ff0000',
119 119 }
120 120
121 121 class TestRunnerLexer(lexer.RegexLexer):
122 122 tokens = {
123 123 'root': [
124 124 (r'^Skipped', token.Generic.Skipped, 'skipped'),
125 125 (r'^Failed ', token.Generic.Failed, 'failed'),
126 126 (r'^ERROR: ', token.Generic.Failed, 'failed'),
127 127 ],
128 128 'skipped': [
129 129 (r'[\w-]+\.(t|py)', token.Generic.SName),
130 130 (r':.*', token.Generic.Skipped),
131 131 ],
132 132 'failed': [
133 133 (r'[\w-]+\.(t|py)', token.Generic.FName),
134 134 (r'(:| ).*', token.Generic.Failed),
135 135 ]
136 136 }
137 137
138 138 runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle)
139 139 runnerlexer = TestRunnerLexer()
140 140
141 141 if sys.version_info > (3, 5, 0):
142 142 PYTHON3 = True
143 143 xrange = range # we use xrange in one place, and we'd rather not use range
144 144 def _bytespath(p):
145 145 if p is None:
146 146 return p
147 147 return p.encode('utf-8')
148 148
149 149 def _strpath(p):
150 150 if p is None:
151 151 return p
152 152 return p.decode('utf-8')
153 153
154 154 elif sys.version_info >= (3, 0, 0):
155 155 print('%s is only supported on Python 3.5+ and 2.7, not %s' %
156 156 (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])))
157 157 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
158 158 else:
159 159 PYTHON3 = False
160 160
161 161 # In python 2.x, path operations are generally done using
162 162 # bytestrings by default, so we don't have to do any extra
163 163 # fiddling there. We define the wrapper functions anyway just to
164 164 # help keep code consistent between platforms.
165 165 def _bytespath(p):
166 166 return p
167 167
168 168 _strpath = _bytespath
169 169
170 170 # For Windows support
171 171 wifexited = getattr(os, "WIFEXITED", lambda x: False)
172 172
173 173 # Whether to use IPv6
174 174 def checksocketfamily(name, port=20058):
175 175 """return true if we can listen on localhost using family=name
176 176
177 177 name should be either 'AF_INET', or 'AF_INET6'.
178 178 port being used is okay - EADDRINUSE is considered as successful.
179 179 """
180 180 family = getattr(socket, name, None)
181 181 if family is None:
182 182 return False
183 183 try:
184 184 s = socket.socket(family, socket.SOCK_STREAM)
185 185 s.bind(('localhost', port))
186 186 s.close()
187 187 return True
188 188 except socket.error as exc:
189 189 if exc.errno == errno.EADDRINUSE:
190 190 return True
191 191 elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
192 192 return False
193 193 else:
194 194 raise
195 195 else:
196 196 return False
197 197
198 198 # useipv6 will be set by parseargs
199 199 useipv6 = None
200 200
201 201 def checkportisavailable(port):
202 202 """return true if a port seems free to bind on localhost"""
203 203 if useipv6:
204 204 family = socket.AF_INET6
205 205 else:
206 206 family = socket.AF_INET
207 207 try:
208 208 s = socket.socket(family, socket.SOCK_STREAM)
209 209 s.bind(('localhost', port))
210 210 s.close()
211 211 return True
212 212 except socket.error as exc:
213 213 if exc.errno not in (errno.EADDRINUSE, errno.EADDRNOTAVAIL,
214 214 errno.EPROTONOSUPPORT):
215 215 raise
216 216 return False
217 217
218 218 closefds = os.name == 'posix'
219 219 def Popen4(cmd, wd, timeout, env=None):
220 220 processlock.acquire()
221 221 p = subprocess.Popen(cmd, shell=True, bufsize=-1, cwd=wd, env=env,
222 222 close_fds=closefds,
223 223 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
224 224 stderr=subprocess.STDOUT)
225 225 processlock.release()
226 226
227 227 p.fromchild = p.stdout
228 228 p.tochild = p.stdin
229 229 p.childerr = p.stderr
230 230
231 231 p.timeout = False
232 232 if timeout:
233 233 def t():
234 234 start = time.time()
235 235 while time.time() - start < timeout and p.returncode is None:
236 236 time.sleep(.1)
237 237 p.timeout = True
238 238 if p.returncode is None:
239 239 terminate(p)
240 240 threading.Thread(target=t).start()
241 241
242 242 return p
243 243
244 244 PYTHON = _bytespath(sys.executable.replace('\\', '/'))
245 245 IMPL_PATH = b'PYTHONPATH'
246 246 if 'java' in sys.platform:
247 247 IMPL_PATH = b'JYTHONPATH'
248 248
249 249 defaults = {
250 250 'jobs': ('HGTEST_JOBS', 1),
251 251 'timeout': ('HGTEST_TIMEOUT', 180),
252 252 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 500),
253 253 'port': ('HGTEST_PORT', 20059),
254 254 'shell': ('HGTEST_SHELL', 'sh'),
255 255 }
256 256
257 257 def canonpath(path):
258 258 return os.path.realpath(os.path.expanduser(path))
259 259
260 260 def parselistfiles(files, listtype, warn=True):
261 261 entries = dict()
262 262 for filename in files:
263 263 try:
264 264 path = os.path.expanduser(os.path.expandvars(filename))
265 265 f = open(path, "rb")
266 266 except IOError as err:
267 267 if err.errno != errno.ENOENT:
268 268 raise
269 269 if warn:
270 270 print("warning: no such %s file: %s" % (listtype, filename))
271 271 continue
272 272
273 273 for line in f.readlines():
274 274 line = line.split(b'#', 1)[0].strip()
275 275 if line:
276 276 entries[line] = filename
277 277
278 278 f.close()
279 279 return entries
280 280
281 281 def parsettestcases(path):
282 282 """read a .t test file, return a set of test case names
283 283
284 284 If path does not exist, return an empty set.
285 285 """
286 286 cases = set()
287 287 try:
288 288 with open(path, 'rb') as f:
289 289 for l in f:
290 290 if l.startswith(b'#testcases '):
291 291 cases.update(l[11:].split())
292 292 except IOError as ex:
293 293 if ex.errno != errno.ENOENT:
294 294 raise
295 295 return cases
296 296
297 297 def getparser():
298 298 """Obtain the OptionParser used by the CLI."""
299 299 parser = optparse.OptionParser("%prog [options] [tests]")
300 300
301 301 # keep these sorted
302 302 parser.add_option("--blacklist", action="append",
303 303 help="skip tests listed in the specified blacklist file")
304 304 parser.add_option("--whitelist", action="append",
305 305 help="always run tests listed in the specified whitelist file")
306 306 parser.add_option("--test-list", action="append",
307 307 help="read tests to run from the specified file")
308 308 parser.add_option("--changed", type="string",
309 309 help="run tests that are changed in parent rev or working directory")
310 310 parser.add_option("-C", "--annotate", action="store_true",
311 311 help="output files annotated with coverage")
312 312 parser.add_option("-c", "--cover", action="store_true",
313 313 help="print a test coverage report")
314 314 parser.add_option("--color", choices=["always", "auto", "never"],
315 315 default=os.environ.get('HGRUNTESTSCOLOR', 'auto'),
316 316 help="colorisation: always|auto|never (default: auto)")
317 317 parser.add_option("-d", "--debug", action="store_true",
318 318 help="debug mode: write output of test scripts to console"
319 319 " rather than capturing and diffing it (disables timeout)")
320 320 parser.add_option("-f", "--first", action="store_true",
321 321 help="exit on the first test failure")
322 322 parser.add_option("-H", "--htmlcov", action="store_true",
323 323 help="create an HTML report of the coverage of the files")
324 324 parser.add_option("-i", "--interactive", action="store_true",
325 325 help="prompt to accept changed output")
326 326 parser.add_option("-j", "--jobs", type="int",
327 327 help="number of jobs to run in parallel"
328 328 " (default: $%s or %d)" % defaults['jobs'])
329 329 parser.add_option("--keep-tmpdir", action="store_true",
330 330 help="keep temporary directory after running tests")
331 331 parser.add_option("-k", "--keywords",
332 332 help="run tests matching keywords")
333 333 parser.add_option("--list-tests", action="store_true",
334 334 help="list tests instead of running them")
335 335 parser.add_option("-l", "--local", action="store_true",
336 336 help="shortcut for --with-hg=<testdir>/../hg, "
337 337 "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set")
338 338 parser.add_option("--loop", action="store_true",
339 339 help="loop tests repeatedly")
340 340 parser.add_option("--runs-per-test", type="int", dest="runs_per_test",
341 341 help="run each test N times (default=1)", default=1)
342 342 parser.add_option("-n", "--nodiff", action="store_true",
343 343 help="skip showing test changes")
344 344 parser.add_option("--outputdir", type="string",
345 345 help="directory to write error logs to (default=test directory)")
346 346 parser.add_option("-p", "--port", type="int",
347 347 help="port on which servers should listen"
348 348 " (default: $%s or %d)" % defaults['port'])
349 349 parser.add_option("--compiler", type="string",
350 350 help="compiler to build with")
351 351 parser.add_option("--pure", action="store_true",
352 352 help="use pure Python code instead of C extensions")
353 353 parser.add_option("-R", "--restart", action="store_true",
354 354 help="restart at last error")
355 355 parser.add_option("-r", "--retest", action="store_true",
356 356 help="retest failed tests")
357 357 parser.add_option("-S", "--noskips", action="store_true",
358 358 help="don't report skip tests verbosely")
359 359 parser.add_option("--shell", type="string",
360 360 help="shell to use (default: $%s or %s)" % defaults['shell'])
361 361 parser.add_option("-t", "--timeout", type="int",
362 362 help="kill errant tests after TIMEOUT seconds"
363 363 " (default: $%s or %d)" % defaults['timeout'])
364 364 parser.add_option("--slowtimeout", type="int",
365 365 help="kill errant slow tests after SLOWTIMEOUT seconds"
366 366 " (default: $%s or %d)" % defaults['slowtimeout'])
367 367 parser.add_option("--time", action="store_true",
368 368 help="time how long each test takes")
369 369 parser.add_option("--json", action="store_true",
370 370 help="store test result data in 'report.json' file")
371 371 parser.add_option("--tmpdir", type="string",
372 372 help="run tests in the given temporary directory"
373 373 " (implies --keep-tmpdir)")
374 374 parser.add_option("-v", "--verbose", action="store_true",
375 375 help="output verbose messages")
376 376 parser.add_option("--xunit", type="string",
377 377 help="record xunit results at specified path")
378 378 parser.add_option("--view", type="string",
379 379 help="external diff viewer")
380 380 parser.add_option("--with-hg", type="string",
381 381 metavar="HG",
382 382 help="test using specified hg script rather than a "
383 383 "temporary installation")
384 384 parser.add_option("--chg", action="store_true",
385 385 help="install and use chg wrapper in place of hg")
386 386 parser.add_option("--with-chg", metavar="CHG",
387 387 help="use specified chg wrapper in place of hg")
388 388 parser.add_option("--ipv6", action="store_true",
389 389 help="prefer IPv6 to IPv4 for network related tests")
390 390 parser.add_option("-3", "--py3k-warnings", action="store_true",
391 391 help="enable Py3k warnings on Python 2.7+")
392 392 # This option should be deleted once test-check-py3-compat.t and other
393 393 # Python 3 tests run with Python 3.
394 394 parser.add_option("--with-python3", metavar="PYTHON3",
395 395 help="Python 3 interpreter (if running under Python 2)"
396 396 " (TEMPORARY)")
397 397 parser.add_option('--extra-config-opt', action="append",
398 398 help='set the given config opt in the test hgrc')
399 399 parser.add_option('--random', action="store_true",
400 400 help='run tests in random order')
401 401 parser.add_option('--profile-runner', action='store_true',
402 402 help='run statprof on run-tests')
403 403 parser.add_option('--allow-slow-tests', action='store_true',
404 404 help='allow extremely slow tests')
405 405 parser.add_option('--showchannels', action='store_true',
406 406 help='show scheduling channels')
407 407 parser.add_option('--known-good-rev', type="string",
408 408 metavar="known_good_rev",
409 409 help=("Automatically bisect any failures using this "
410 410 "revision as a known-good revision."))
411 411 parser.add_option('--bisect-repo', type="string",
412 412 metavar='bisect_repo',
413 413 help=("Path of a repo to bisect. Use together with "
414 414 "--known-good-rev"))
415 415
416 416 for option, (envvar, default) in defaults.items():
417 417 defaults[option] = type(default)(os.environ.get(envvar, default))
418 418 parser.set_defaults(**defaults)
419 419
420 420 return parser
421 421
422 422 def parseargs(args, parser):
423 423 """Parse arguments with our OptionParser and validate results."""
424 424 (options, args) = parser.parse_args(args)
425 425
426 426 # jython is always pure
427 427 if 'java' in sys.platform or '__pypy__' in sys.modules:
428 428 options.pure = True
429 429
430 430 if options.with_hg:
431 431 options.with_hg = canonpath(_bytespath(options.with_hg))
432 432 if not (os.path.isfile(options.with_hg) and
433 433 os.access(options.with_hg, os.X_OK)):
434 434 parser.error('--with-hg must specify an executable hg script')
435 435 if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
436 436 sys.stderr.write('warning: --with-hg should specify an hg script\n')
437 437 if options.local:
438 438 testdir = os.path.dirname(_bytespath(canonpath(sys.argv[0])))
439 439 reporootdir = os.path.dirname(testdir)
440 440 pathandattrs = [(b'hg', 'with_hg')]
441 441 if options.chg:
442 442 pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
443 443 for relpath, attr in pathandattrs:
444 444 binpath = os.path.join(reporootdir, relpath)
445 445 if os.name != 'nt' and not os.access(binpath, os.X_OK):
446 446 parser.error('--local specified, but %r not found or '
447 447 'not executable' % binpath)
448 448 setattr(options, attr, binpath)
449 449
450 450 if (options.chg or options.with_chg) and os.name == 'nt':
451 451 parser.error('chg does not work on %s' % os.name)
452 452 if options.with_chg:
453 453 options.chg = False # no installation to temporary location
454 454 options.with_chg = canonpath(_bytespath(options.with_chg))
455 455 if not (os.path.isfile(options.with_chg) and
456 456 os.access(options.with_chg, os.X_OK)):
457 457 parser.error('--with-chg must specify a chg executable')
458 458 if options.chg and options.with_hg:
459 459 # chg shares installation location with hg
460 460 parser.error('--chg does not work when --with-hg is specified '
461 461 '(use --with-chg instead)')
462 462
463 463 if options.color == 'always' and not pygmentspresent:
464 464 sys.stderr.write('warning: --color=always ignored because '
465 465 'pygments is not installed\n')
466 466
467 467 if options.bisect_repo and not options.known_good_rev:
468 468 parser.error("--bisect-repo cannot be used without --known-good-rev")
469 469
470 470 global useipv6
471 471 if options.ipv6:
472 472 useipv6 = checksocketfamily('AF_INET6')
473 473 else:
474 474 # only use IPv6 if IPv4 is unavailable and IPv6 is available
475 475 useipv6 = ((not checksocketfamily('AF_INET'))
476 476 and checksocketfamily('AF_INET6'))
477 477
478 478 options.anycoverage = options.cover or options.annotate or options.htmlcov
479 479 if options.anycoverage:
480 480 try:
481 481 import coverage
482 482 covver = version.StrictVersion(coverage.__version__).version
483 483 if covver < (3, 3):
484 484 parser.error('coverage options require coverage 3.3 or later')
485 485 except ImportError:
486 486 parser.error('coverage options now require the coverage package')
487 487
488 488 if options.anycoverage and options.local:
489 489 # this needs some path mangling somewhere, I guess
490 490 parser.error("sorry, coverage options do not work when --local "
491 491 "is specified")
492 492
493 493 if options.anycoverage and options.with_hg:
494 494 parser.error("sorry, coverage options do not work when --with-hg "
495 495 "is specified")
496 496
497 497 global verbose
498 498 if options.verbose:
499 499 verbose = ''
500 500
501 501 if options.tmpdir:
502 502 options.tmpdir = canonpath(options.tmpdir)
503 503
504 504 if options.jobs < 1:
505 505 parser.error('--jobs must be positive')
506 506 if options.interactive and options.debug:
507 507 parser.error("-i/--interactive and -d/--debug are incompatible")
508 508 if options.debug:
509 509 if options.timeout != defaults['timeout']:
510 510 sys.stderr.write(
511 511 'warning: --timeout option ignored with --debug\n')
512 512 if options.slowtimeout != defaults['slowtimeout']:
513 513 sys.stderr.write(
514 514 'warning: --slowtimeout option ignored with --debug\n')
515 515 options.timeout = 0
516 516 options.slowtimeout = 0
517 517 if options.py3k_warnings:
518 518 if PYTHON3:
519 519 parser.error(
520 520 '--py3k-warnings can only be used on Python 2.7')
521 521 if options.with_python3:
522 522 if PYTHON3:
523 523 parser.error('--with-python3 cannot be used when executing with '
524 524 'Python 3')
525 525
526 526 options.with_python3 = canonpath(options.with_python3)
527 527 # Verify Python3 executable is acceptable.
528 528 proc = subprocess.Popen([options.with_python3, b'--version'],
529 529 stdout=subprocess.PIPE,
530 530 stderr=subprocess.STDOUT)
531 531 out, _err = proc.communicate()
532 532 ret = proc.wait()
533 533 if ret != 0:
534 534 parser.error('could not determine version of python 3')
535 535 if not out.startswith('Python '):
536 536 parser.error('unexpected output from python3 --version: %s' %
537 537 out)
538 538 vers = version.LooseVersion(out[len('Python '):])
539 539 if vers < version.LooseVersion('3.5.0'):
540 540 parser.error('--with-python3 version must be 3.5.0 or greater; '
541 541 'got %s' % out)
542 542
543 543 if options.blacklist:
544 544 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
545 545 if options.whitelist:
546 546 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
547 547 else:
548 548 options.whitelisted = {}
549 549
550 550 if options.showchannels:
551 551 options.nodiff = True
552 552
553 553 return (options, args)
554 554
555 555 def rename(src, dst):
556 556 """Like os.rename(), trade atomicity and opened files friendliness
557 557 for existing destination support.
558 558 """
559 559 shutil.copy(src, dst)
560 560 os.remove(src)
561 561
562 562 _unified_diff = difflib.unified_diff
563 563 if PYTHON3:
564 564 import functools
565 565 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
566 566
567 567 def getdiff(expected, output, ref, err):
568 568 servefail = False
569 569 lines = []
570 570 for line in _unified_diff(expected, output, ref, err):
571 571 if line.startswith(b'+++') or line.startswith(b'---'):
572 572 line = line.replace(b'\\', b'/')
573 573 if line.endswith(b' \n'):
574 574 line = line[:-2] + b'\n'
575 575 lines.append(line)
576 576 if not servefail and line.startswith(
577 577 b'+ abort: child process failed to start'):
578 578 servefail = True
579 579
580 580 return servefail, lines
581 581
582 582 verbose = False
583 583 def vlog(*msg):
584 584 """Log only when in verbose mode."""
585 585 if verbose is False:
586 586 return
587 587
588 588 return log(*msg)
589 589
590 590 # Bytes that break XML even in a CDATA block: control characters 0-31
591 591 # sans \t, \n and \r
592 592 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
593 593
594 594 # Match feature conditionalized output lines in the form, capturing the feature
595 595 # list in group 2, and the preceeding line output in group 1:
596 596 #
597 597 # output..output (feature !)\n
598 598 optline = re.compile(b'(.*) \((.+?) !\)\n$')
599 599
600 600 def cdatasafe(data):
601 601 """Make a string safe to include in a CDATA block.
602 602
603 603 Certain control characters are illegal in a CDATA block, and
604 604 there's no way to include a ]]> in a CDATA either. This function
605 605 replaces illegal bytes with ? and adds a space between the ]] so
606 606 that it won't break the CDATA block.
607 607 """
608 608 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
609 609
610 610 def log(*msg):
611 611 """Log something to stdout.
612 612
613 613 Arguments are strings to print.
614 614 """
615 615 with iolock:
616 616 if verbose:
617 617 print(verbose, end=' ')
618 618 for m in msg:
619 619 print(m, end=' ')
620 620 print()
621 621 sys.stdout.flush()
622 622
623 623 def highlightdiff(line, color):
624 624 if not color:
625 625 return line
626 626 assert pygmentspresent
627 627 return pygments.highlight(line.decode('latin1'), difflexer,
628 628 terminal256formatter).encode('latin1')
629 629
630 630 def highlightmsg(msg, color):
631 631 if not color:
632 632 return msg
633 633 assert pygmentspresent
634 634 return pygments.highlight(msg, runnerlexer, runnerformatter)
635 635
636 636 def terminate(proc):
637 637 """Terminate subprocess"""
638 638 vlog('# Terminating process %d' % proc.pid)
639 639 try:
640 640 proc.terminate()
641 641 except OSError:
642 642 pass
643 643
644 644 def killdaemons(pidfile):
645 645 import killdaemons as killmod
646 646 return killmod.killdaemons(pidfile, tryhard=False, remove=True,
647 647 logfn=vlog)
648 648
649 649 class Test(unittest.TestCase):
650 650 """Encapsulates a single, runnable test.
651 651
652 652 While this class conforms to the unittest.TestCase API, it differs in that
653 653 instances need to be instantiated manually. (Typically, unittest.TestCase
654 654 classes are instantiated automatically by scanning modules.)
655 655 """
656 656
657 657 # Status code reserved for skipped tests (used by hghave).
658 658 SKIPPED_STATUS = 80
659 659
660 660 def __init__(self, path, outputdir, tmpdir, keeptmpdir=False,
661 661 debug=False,
662 662 timeout=None,
663 663 startport=None, extraconfigopts=None,
664 664 py3kwarnings=False, shell=None, hgcommand=None,
665 665 slowtimeout=None, usechg=False,
666 666 useipv6=False):
667 667 """Create a test from parameters.
668 668
669 669 path is the full path to the file defining the test.
670 670
671 671 tmpdir is the main temporary directory to use for this test.
672 672
673 673 keeptmpdir determines whether to keep the test's temporary directory
674 674 after execution. It defaults to removal (False).
675 675
676 676 debug mode will make the test execute verbosely, with unfiltered
677 677 output.
678 678
679 679 timeout controls the maximum run time of the test. It is ignored when
680 680 debug is True. See slowtimeout for tests with #require slow.
681 681
682 682 slowtimeout overrides timeout if the test has #require slow.
683 683
684 684 startport controls the starting port number to use for this test. Each
685 685 test will reserve 3 port numbers for execution. It is the caller's
686 686 responsibility to allocate a non-overlapping port range to Test
687 687 instances.
688 688
689 689 extraconfigopts is an iterable of extra hgrc config options. Values
690 690 must have the form "key=value" (something understood by hgrc). Values
691 691 of the form "foo.key=value" will result in "[foo] key=value".
692 692
693 693 py3kwarnings enables Py3k warnings.
694 694
695 695 shell is the shell to execute tests in.
696 696 """
697 697 if timeout is None:
698 698 timeout = defaults['timeout']
699 699 if startport is None:
700 700 startport = defaults['port']
701 701 if slowtimeout is None:
702 702 slowtimeout = defaults['slowtimeout']
703 703 self.path = path
704 704 self.bname = os.path.basename(path)
705 705 self.name = _strpath(self.bname)
706 706 self._testdir = os.path.dirname(path)
707 707 self._outputdir = outputdir
708 708 self._tmpname = os.path.basename(path)
709 709 self.errpath = os.path.join(self._outputdir, b'%s.err' % self.bname)
710 710
711 711 self._threadtmp = tmpdir
712 712 self._keeptmpdir = keeptmpdir
713 713 self._debug = debug
714 714 self._timeout = timeout
715 715 self._slowtimeout = slowtimeout
716 716 self._startport = startport
717 717 self._extraconfigopts = extraconfigopts or []
718 718 self._py3kwarnings = py3kwarnings
719 719 self._shell = _bytespath(shell)
720 720 self._hgcommand = hgcommand or b'hg'
721 721 self._usechg = usechg
722 722 self._useipv6 = useipv6
723 723
724 724 self._aborted = False
725 725 self._daemonpids = []
726 726 self._finished = None
727 727 self._ret = None
728 728 self._out = None
729 729 self._skipped = None
730 730 self._testtmp = None
731 731 self._chgsockdir = None
732 732
733 733 self._refout = self.readrefout()
734 734
735 735 def readrefout(self):
736 736 """read reference output"""
737 737 # If we're not in --debug mode and reference output file exists,
738 738 # check test output against it.
739 739 if self._debug:
740 740 return None # to match "out is None"
741 741 elif os.path.exists(self.refpath):
742 742 with open(self.refpath, 'rb') as f:
743 743 return f.read().splitlines(True)
744 744 else:
745 745 return []
746 746
747 747 # needed to get base class __repr__ running
748 748 @property
749 749 def _testMethodName(self):
750 750 return self.name
751 751
752 752 def __str__(self):
753 753 return self.name
754 754
755 755 def shortDescription(self):
756 756 return self.name
757 757
758 758 def setUp(self):
759 759 """Tasks to perform before run()."""
760 760 self._finished = False
761 761 self._ret = None
762 762 self._out = None
763 763 self._skipped = None
764 764
765 765 try:
766 766 os.mkdir(self._threadtmp)
767 767 except OSError as e:
768 768 if e.errno != errno.EEXIST:
769 769 raise
770 770
771 771 name = self._tmpname
772 772 self._testtmp = os.path.join(self._threadtmp, name)
773 773 os.mkdir(self._testtmp)
774 774
775 775 # Remove any previous output files.
776 776 if os.path.exists(self.errpath):
777 777 try:
778 778 os.remove(self.errpath)
779 779 except OSError as e:
780 780 # We might have raced another test to clean up a .err
781 781 # file, so ignore ENOENT when removing a previous .err
782 782 # file.
783 783 if e.errno != errno.ENOENT:
784 784 raise
785 785
786 786 if self._usechg:
787 787 self._chgsockdir = os.path.join(self._threadtmp,
788 788 b'%s.chgsock' % name)
789 789 os.mkdir(self._chgsockdir)
790 790
791 791 def run(self, result):
792 792 """Run this test and report results against a TestResult instance."""
793 793 # This function is extremely similar to unittest.TestCase.run(). Once
794 794 # we require Python 2.7 (or at least its version of unittest), this
795 795 # function can largely go away.
796 796 self._result = result
797 797 result.startTest(self)
798 798 try:
799 799 try:
800 800 self.setUp()
801 801 except (KeyboardInterrupt, SystemExit):
802 802 self._aborted = True
803 803 raise
804 804 except Exception:
805 805 result.addError(self, sys.exc_info())
806 806 return
807 807
808 808 success = False
809 809 try:
810 810 self.runTest()
811 811 except KeyboardInterrupt:
812 812 self._aborted = True
813 813 raise
814 814 except unittest.SkipTest as e:
815 815 result.addSkip(self, str(e))
816 816 # The base class will have already counted this as a
817 817 # test we "ran", but we want to exclude skipped tests
818 818 # from those we count towards those run.
819 819 result.testsRun -= 1
820 820 except self.failureException as e:
821 821 # This differs from unittest in that we don't capture
822 822 # the stack trace. This is for historical reasons and
823 823 # this decision could be revisited in the future,
824 824 # especially for PythonTest instances.
825 825 if result.addFailure(self, str(e)):
826 826 success = True
827 827 except Exception:
828 828 result.addError(self, sys.exc_info())
829 829 else:
830 830 success = True
831 831
832 832 try:
833 833 self.tearDown()
834 834 except (KeyboardInterrupt, SystemExit):
835 835 self._aborted = True
836 836 raise
837 837 except Exception:
838 838 result.addError(self, sys.exc_info())
839 839 success = False
840 840
841 841 if success:
842 842 result.addSuccess(self)
843 843 finally:
844 844 result.stopTest(self, interrupted=self._aborted)
845 845
846 846 def runTest(self):
847 847 """Run this test instance.
848 848
849 849 This will return a tuple describing the result of the test.
850 850 """
851 851 env = self._getenv()
852 852 self._genrestoreenv(env)
853 853 self._daemonpids.append(env['DAEMON_PIDS'])
854 854 self._createhgrc(env['HGRCPATH'])
855 855
856 856 vlog('# Test', self.name)
857 857
858 858 ret, out = self._run(env)
859 859 self._finished = True
860 860 self._ret = ret
861 861 self._out = out
862 862
863 863 def describe(ret):
864 864 if ret < 0:
865 865 return 'killed by signal: %d' % -ret
866 866 return 'returned error code %d' % ret
867 867
868 868 self._skipped = False
869 869
870 870 if ret == self.SKIPPED_STATUS:
871 871 if out is None: # Debug mode, nothing to parse.
872 872 missing = ['unknown']
873 873 failed = None
874 874 else:
875 875 missing, failed = TTest.parsehghaveoutput(out)
876 876
877 877 if not missing:
878 878 missing = ['skipped']
879 879
880 880 if failed:
881 881 self.fail('hg have failed checking for %s' % failed[-1])
882 882 else:
883 883 self._skipped = True
884 884 raise unittest.SkipTest(missing[-1])
885 885 elif ret == 'timeout':
886 886 self.fail('timed out')
887 887 elif ret is False:
888 888 self.fail('no result code from test')
889 889 elif out != self._refout:
890 890 # Diff generation may rely on written .err file.
891 891 if (ret != 0 or out != self._refout) and not self._skipped \
892 892 and not self._debug:
893 893 f = open(self.errpath, 'wb')
894 894 for line in out:
895 895 f.write(line)
896 896 f.close()
897 897
898 898 # The result object handles diff calculation for us.
899 899 if self._result.addOutputMismatch(self, ret, out, self._refout):
900 900 # change was accepted, skip failing
901 901 return
902 902
903 903 if ret:
904 904 msg = 'output changed and ' + describe(ret)
905 905 else:
906 906 msg = 'output changed'
907 907
908 908 self.fail(msg)
909 909 elif ret:
910 910 self.fail(describe(ret))
911 911
912 912 def tearDown(self):
913 913 """Tasks to perform after run()."""
914 914 for entry in self._daemonpids:
915 915 killdaemons(entry)
916 916 self._daemonpids = []
917 917
918 918 if self._keeptmpdir:
919 919 log('\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' %
920 920 (self._testtmp.decode('utf-8'),
921 921 self._threadtmp.decode('utf-8')))
922 922 else:
923 923 shutil.rmtree(self._testtmp, True)
924 924 shutil.rmtree(self._threadtmp, True)
925 925
926 926 if self._usechg:
927 927 # chgservers will stop automatically after they find the socket
928 928 # files are deleted
929 929 shutil.rmtree(self._chgsockdir, True)
930 930
931 931 if (self._ret != 0 or self._out != self._refout) and not self._skipped \
932 932 and not self._debug and self._out:
933 933 f = open(self.errpath, 'wb')
934 934 for line in self._out:
935 935 f.write(line)
936 936 f.close()
937 937
938 938 vlog("# Ret was:", self._ret, '(%s)' % self.name)
939 939
940 940 def _run(self, env):
941 941 # This should be implemented in child classes to run tests.
942 942 raise unittest.SkipTest('unknown test type')
943 943
944 944 def abort(self):
945 945 """Terminate execution of this test."""
946 946 self._aborted = True
947 947
948 948 def _portmap(self, i):
949 949 offset = b'' if i == 0 else b'%d' % i
950 950 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
951 951
952 952 def _getreplacements(self):
953 953 """Obtain a mapping of text replacements to apply to test output.
954 954
955 955 Test output needs to be normalized so it can be compared to expected
956 956 output. This function defines how some of that normalization will
957 957 occur.
958 958 """
959 959 r = [
960 960 # This list should be parallel to defineport in _getenv
961 961 self._portmap(0),
962 962 self._portmap(1),
963 963 self._portmap(2),
964 964 (br'(?m)^(saved backup bundle to .*\.hg)( \(glob\))?$',
965 965 br'\1 (glob)'),
966 966 (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
967 967 (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
968 968 ]
969 969 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
970 970
971 971 testdir = os.path.dirname(self.path)
972 972 replacementfile = os.path.join(testdir, b'common-pattern.py')
973 973
974 974 if os.path.exists(replacementfile):
975 975 data = {}
976 976 with open(replacementfile, mode='rb') as source:
977 977 # the intermediate 'compile' step help with debugging
978 978 code = compile(source.read(), replacementfile, 'exec')
979 979 exec(code, data)
980 980 r.extend(data.get('substitutions', ()))
981 981 return r
982 982
983 983 def _escapepath(self, p):
984 984 if os.name == 'nt':
985 985 return (
986 986 (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or
987 987 c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c
988 988 for c in p))
989 989 )
990 990 else:
991 991 return re.escape(p)
992 992
993 993 def _localip(self):
994 994 if self._useipv6:
995 995 return b'::1'
996 996 else:
997 997 return b'127.0.0.1'
998 998
999 999 def _genrestoreenv(self, testenv):
1000 1000 """Generate a script that can be used by tests to restore the original
1001 1001 environment."""
1002 1002 # Put the restoreenv script inside self._threadtmp
1003 1003 scriptpath = os.path.join(self._threadtmp, b'restoreenv.sh')
1004 1004 testenv['HGTEST_RESTOREENV'] = scriptpath
1005 1005
1006 1006 # Only restore environment variable names that the shell allows
1007 1007 # us to export.
1008 1008 name_regex = re.compile('^[a-zA-Z][a-zA-Z0-9_]*$')
1009 1009
1010 1010 # Do not restore these variables; otherwise tests would fail.
1011 1011 reqnames = {'PYTHON', 'TESTDIR', 'TESTTMP'}
1012 1012
1013 1013 with open(scriptpath, 'w') as envf:
1014 1014 for name, value in origenviron.items():
1015 1015 if not name_regex.match(name):
1016 1016 # Skip environment variables with unusual names not
1017 1017 # allowed by most shells.
1018 1018 continue
1019 1019 if name in reqnames:
1020 1020 continue
1021 1021 envf.write('%s=%s\n' % (name, shellquote(value)))
1022 1022
1023 1023 for name in testenv:
1024 1024 if name in origenviron or name in reqnames:
1025 1025 continue
1026 1026 envf.write('unset %s\n' % (name,))
1027 1027
1028 1028 def _getenv(self):
1029 1029 """Obtain environment variables to use during test execution."""
1030 1030 def defineport(i):
1031 1031 offset = '' if i == 0 else '%s' % i
1032 1032 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
1033 1033 env = os.environ.copy()
1034 1034 env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase')
1035 1035 env['HGEMITWARNINGS'] = '1'
1036 1036 env['TESTTMP'] = self._testtmp
1037 1037 env['HOME'] = self._testtmp
1038 1038 # This number should match portneeded in _getport
1039 1039 for port in xrange(3):
1040 1040 # This list should be parallel to _portmap in _getreplacements
1041 1041 defineport(port)
1042 1042 env["HGRCPATH"] = os.path.join(self._threadtmp, b'.hgrc')
1043 1043 env["DAEMON_PIDS"] = os.path.join(self._threadtmp, b'daemon.pids')
1044 1044 env["HGEDITOR"] = ('"' + sys.executable + '"'
1045 1045 + ' -c "import sys; sys.exit(0)"')
1046 1046 env["HGMERGE"] = "internal:merge"
1047 1047 env["HGUSER"] = "test"
1048 1048 env["HGENCODING"] = "ascii"
1049 1049 env["HGENCODINGMODE"] = "strict"
1050 1050 env['HGIPV6'] = str(int(self._useipv6))
1051 1051
1052 1052 # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
1053 1053 # IP addresses.
1054 1054 env['LOCALIP'] = self._localip()
1055 1055
1056 1056 # Reset some environment variables to well-known values so that
1057 1057 # the tests produce repeatable output.
1058 1058 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
1059 1059 env['TZ'] = 'GMT'
1060 1060 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
1061 1061 env['COLUMNS'] = '80'
1062 1062 env['TERM'] = 'xterm'
1063 1063
1064 1064 for k in ('HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy ' +
1065 1065 'HGPLAIN HGPLAINEXCEPT EDITOR VISUAL PAGER ' +
1066 1066 'NO_PROXY CHGDEBUG').split():
1067 1067 if k in env:
1068 1068 del env[k]
1069 1069
1070 1070 # unset env related to hooks
1071 1071 for k in env.keys():
1072 1072 if k.startswith('HG_'):
1073 1073 del env[k]
1074 1074
1075 1075 if self._usechg:
1076 1076 env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server')
1077 1077
1078 1078 return env
1079 1079
1080 1080 def _createhgrc(self, path):
1081 1081 """Create an hgrc file for this test."""
1082 1082 hgrc = open(path, 'wb')
1083 1083 hgrc.write(b'[ui]\n')
1084 1084 hgrc.write(b'slash = True\n')
1085 1085 hgrc.write(b'interactive = False\n')
1086 1086 hgrc.write(b'mergemarkers = detailed\n')
1087 1087 hgrc.write(b'promptecho = True\n')
1088 1088 hgrc.write(b'[defaults]\n')
1089 1089 hgrc.write(b'[devel]\n')
1090 1090 hgrc.write(b'all-warnings = true\n')
1091 1091 hgrc.write(b'default-date = 0 0\n')
1092 1092 hgrc.write(b'[largefiles]\n')
1093 1093 hgrc.write(b'usercache = %s\n' %
1094 1094 (os.path.join(self._testtmp, b'.cache/largefiles')))
1095 1095 hgrc.write(b'[web]\n')
1096 1096 hgrc.write(b'address = localhost\n')
1097 1097 hgrc.write(b'ipv6 = %s\n' % str(self._useipv6).encode('ascii'))
1098 1098
1099 1099 for opt in self._extraconfigopts:
1100 1100 section, key = opt.split('.', 1)
1101 1101 assert '=' in key, ('extra config opt %s must '
1102 1102 'have an = for assignment' % opt)
1103 1103 hgrc.write(b'[%s]\n%s\n' % (section, key))
1104 1104 hgrc.close()
1105 1105
1106 1106 def fail(self, msg):
1107 1107 # unittest differentiates between errored and failed.
1108 1108 # Failed is denoted by AssertionError (by default at least).
1109 1109 raise AssertionError(msg)
1110 1110
1111 1111 def _runcommand(self, cmd, env, normalizenewlines=False):
1112 1112 """Run command in a sub-process, capturing the output (stdout and
1113 1113 stderr).
1114 1114
1115 1115 Return a tuple (exitcode, output). output is None in debug mode.
1116 1116 """
1117 1117 if self._debug:
1118 1118 proc = subprocess.Popen(cmd, shell=True, cwd=self._testtmp,
1119 1119 env=env)
1120 1120 ret = proc.wait()
1121 1121 return (ret, None)
1122 1122
1123 1123 proc = Popen4(cmd, self._testtmp, self._timeout, env)
1124 1124 def cleanup():
1125 1125 terminate(proc)
1126 1126 ret = proc.wait()
1127 1127 if ret == 0:
1128 1128 ret = signal.SIGTERM << 8
1129 1129 killdaemons(env['DAEMON_PIDS'])
1130 1130 return ret
1131 1131
1132 1132 output = ''
1133 1133 proc.tochild.close()
1134 1134
1135 1135 try:
1136 1136 output = proc.fromchild.read()
1137 1137 except KeyboardInterrupt:
1138 1138 vlog('# Handling keyboard interrupt')
1139 1139 cleanup()
1140 1140 raise
1141 1141
1142 1142 ret = proc.wait()
1143 1143 if wifexited(ret):
1144 1144 ret = os.WEXITSTATUS(ret)
1145 1145
1146 1146 if proc.timeout:
1147 1147 ret = 'timeout'
1148 1148
1149 1149 if ret:
1150 1150 killdaemons(env['DAEMON_PIDS'])
1151 1151
1152 1152 for s, r in self._getreplacements():
1153 1153 output = re.sub(s, r, output)
1154 1154
1155 1155 if normalizenewlines:
1156 1156 output = output.replace('\r\n', '\n')
1157 1157
1158 1158 return ret, output.splitlines(True)
1159 1159
1160 1160 class PythonTest(Test):
1161 1161 """A Python-based test."""
1162 1162
1163 1163 @property
1164 1164 def refpath(self):
1165 1165 return os.path.join(self._testdir, b'%s.out' % self.bname)
1166 1166
1167 1167 def _run(self, env):
1168 1168 py3kswitch = self._py3kwarnings and b' -3' or b''
1169 1169 cmd = b'%s%s "%s"' % (PYTHON, py3kswitch, self.path)
1170 1170 vlog("# Running", cmd)
1171 1171 normalizenewlines = os.name == 'nt'
1172 1172 result = self._runcommand(cmd, env,
1173 1173 normalizenewlines=normalizenewlines)
1174 1174 if self._aborted:
1175 1175 raise KeyboardInterrupt()
1176 1176
1177 1177 return result
1178 1178
1179 1179 # Some glob patterns apply only in some circumstances, so the script
1180 1180 # might want to remove (glob) annotations that otherwise should be
1181 1181 # retained.
1182 1182 checkcodeglobpats = [
1183 1183 # On Windows it looks like \ doesn't require a (glob), but we know
1184 1184 # better.
1185 1185 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
1186 1186 re.compile(br'^moving \S+/.*[^)]$'),
1187 1187 re.compile(br'^pulling from \$TESTTMP/.*[^)]$'),
1188 1188 # Not all platforms have 127.0.0.1 as loopback (though most do),
1189 1189 # so we always glob that too.
1190 1190 re.compile(br'.*\$LOCALIP.*$'),
1191 1191 ]
1192 1192
1193 1193 bchr = chr
1194 1194 if PYTHON3:
1195 1195 bchr = lambda x: bytes([x])
1196 1196
1197 1197 class TTest(Test):
1198 1198 """A "t test" is a test backed by a .t file."""
1199 1199
1200 1200 SKIPPED_PREFIX = b'skipped: '
1201 1201 FAILED_PREFIX = b'hghave check failed: '
1202 1202 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
1203 1203
1204 1204 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
1205 1205 ESCAPEMAP = dict((bchr(i), br'\x%02x' % i) for i in range(256))
1206 1206 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
1207 1207
1208 1208 def __init__(self, path, *args, **kwds):
1209 1209 # accept an extra "case" parameter
1210 1210 case = None
1211 1211 if 'case' in kwds:
1212 1212 case = kwds.pop('case')
1213 1213 self._case = case
1214 1214 self._allcases = parsettestcases(path)
1215 1215 super(TTest, self).__init__(path, *args, **kwds)
1216 1216 if case:
1217 1217 self.name = '%s (case %s)' % (self.name, _strpath(case))
1218 1218 self.errpath = b'%s.%s.err' % (self.errpath[:-4], case)
1219 1219 self._tmpname += b'-%s' % case
1220 1220
1221 1221 @property
1222 1222 def refpath(self):
1223 1223 return os.path.join(self._testdir, self.bname)
1224 1224
1225 1225 def _run(self, env):
1226 1226 f = open(self.path, 'rb')
1227 1227 lines = f.readlines()
1228 1228 f.close()
1229 1229
1230 1230 # .t file is both reference output and the test input, keep reference
1231 1231 # output updated with the the test input. This avoids some race
1232 1232 # conditions where the reference output does not match the actual test.
1233 1233 if self._refout is not None:
1234 1234 self._refout = lines
1235 1235
1236 1236 salt, script, after, expected = self._parsetest(lines)
1237 1237
1238 1238 # Write out the generated script.
1239 1239 fname = b'%s.sh' % self._testtmp
1240 1240 f = open(fname, 'wb')
1241 1241 for l in script:
1242 1242 f.write(l)
1243 1243 f.close()
1244 1244
1245 1245 cmd = b'%s "%s"' % (self._shell, fname)
1246 1246 vlog("# Running", cmd)
1247 1247
1248 1248 exitcode, output = self._runcommand(cmd, env)
1249 1249
1250 1250 if self._aborted:
1251 1251 raise KeyboardInterrupt()
1252 1252
1253 1253 # Do not merge output if skipped. Return hghave message instead.
1254 1254 # Similarly, with --debug, output is None.
1255 1255 if exitcode == self.SKIPPED_STATUS or output is None:
1256 1256 return exitcode, output
1257 1257
1258 1258 return self._processoutput(exitcode, output, salt, after, expected)
1259 1259
1260 1260 def _hghave(self, reqs):
1261 1261 # TODO do something smarter when all other uses of hghave are gone.
1262 1262 runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__)))
1263 1263 tdir = runtestdir.replace(b'\\', b'/')
1264 1264 proc = Popen4(b'%s -c "%s/hghave %s"' %
1265 1265 (self._shell, tdir, b' '.join(reqs)),
1266 1266 self._testtmp, 0, self._getenv())
1267 1267 stdout, stderr = proc.communicate()
1268 1268 ret = proc.wait()
1269 1269 if wifexited(ret):
1270 1270 ret = os.WEXITSTATUS(ret)
1271 1271 if ret == 2:
1272 1272 print(stdout.decode('utf-8'))
1273 1273 sys.exit(1)
1274 1274
1275 1275 if ret != 0:
1276 1276 return False, stdout
1277 1277
1278 1278 if b'slow' in reqs:
1279 1279 self._timeout = self._slowtimeout
1280 1280 return True, None
1281 1281
1282 1282 def _iftest(self, args):
1283 1283 # implements "#if"
1284 1284 reqs = []
1285 1285 for arg in args:
1286 1286 if arg.startswith(b'no-') and arg[3:] in self._allcases:
1287 1287 if arg[3:] == self._case:
1288 1288 return False
1289 1289 elif arg in self._allcases:
1290 1290 if arg != self._case:
1291 1291 return False
1292 1292 else:
1293 1293 reqs.append(arg)
1294 1294 return self._hghave(reqs)[0]
1295 1295
1296 1296 def _parsetest(self, lines):
1297 1297 # We generate a shell script which outputs unique markers to line
1298 1298 # up script results with our source. These markers include input
1299 1299 # line number and the last return code.
1300 1300 salt = b"SALT%d" % time.time()
1301 1301 def addsalt(line, inpython):
1302 1302 if inpython:
1303 1303 script.append(b'%s %d 0\n' % (salt, line))
1304 1304 else:
1305 1305 script.append(b'echo %s %d $?\n' % (salt, line))
1306 1306
1307 1307 script = []
1308 1308
1309 1309 # After we run the shell script, we re-unify the script output
1310 1310 # with non-active parts of the source, with synchronization by our
1311 1311 # SALT line number markers. The after table contains the non-active
1312 1312 # components, ordered by line number.
1313 1313 after = {}
1314 1314
1315 1315 # Expected shell script output.
1316 1316 expected = {}
1317 1317
1318 1318 pos = prepos = -1
1319 1319
1320 1320 # True or False when in a true or false conditional section
1321 1321 skipping = None
1322 1322
1323 1323 # We keep track of whether or not we're in a Python block so we
1324 1324 # can generate the surrounding doctest magic.
1325 1325 inpython = False
1326 1326
1327 1327 if self._debug:
1328 1328 script.append(b'set -x\n')
1329 1329 if self._hgcommand != b'hg':
1330 1330 script.append(b'alias hg="%s"\n' % self._hgcommand)
1331 1331 if os.getenv('MSYSTEM'):
1332 1332 script.append(b'alias pwd="pwd -W"\n')
1333 1333
1334 1334 n = 0
1335 1335 for n, l in enumerate(lines):
1336 1336 if not l.endswith(b'\n'):
1337 1337 l += b'\n'
1338 1338 if l.startswith(b'#require'):
1339 1339 lsplit = l.split()
1340 1340 if len(lsplit) < 2 or lsplit[0] != b'#require':
1341 1341 after.setdefault(pos, []).append(' !!! invalid #require\n')
1342 1342 haveresult, message = self._hghave(lsplit[1:])
1343 1343 if not haveresult:
1344 1344 script = [b'echo "%s"\nexit 80\n' % message]
1345 1345 break
1346 1346 after.setdefault(pos, []).append(l)
1347 1347 elif l.startswith(b'#if'):
1348 1348 lsplit = l.split()
1349 1349 if len(lsplit) < 2 or lsplit[0] != b'#if':
1350 1350 after.setdefault(pos, []).append(' !!! invalid #if\n')
1351 1351 if skipping is not None:
1352 1352 after.setdefault(pos, []).append(' !!! nested #if\n')
1353 1353 skipping = not self._iftest(lsplit[1:])
1354 1354 after.setdefault(pos, []).append(l)
1355 1355 elif l.startswith(b'#else'):
1356 1356 if skipping is None:
1357 1357 after.setdefault(pos, []).append(' !!! missing #if\n')
1358 1358 skipping = not skipping
1359 1359 after.setdefault(pos, []).append(l)
1360 1360 elif l.startswith(b'#endif'):
1361 1361 if skipping is None:
1362 1362 after.setdefault(pos, []).append(' !!! missing #if\n')
1363 1363 skipping = None
1364 1364 after.setdefault(pos, []).append(l)
1365 1365 elif skipping:
1366 1366 after.setdefault(pos, []).append(l)
1367 1367 elif l.startswith(b' >>> '): # python inlines
1368 1368 after.setdefault(pos, []).append(l)
1369 1369 prepos = pos
1370 1370 pos = n
1371 1371 if not inpython:
1372 1372 # We've just entered a Python block. Add the header.
1373 1373 inpython = True
1374 1374 addsalt(prepos, False) # Make sure we report the exit code.
1375 1375 script.append(b'%s -m heredoctest <<EOF\n' % PYTHON)
1376 1376 addsalt(n, True)
1377 1377 script.append(l[2:])
1378 1378 elif l.startswith(b' ... '): # python inlines
1379 1379 after.setdefault(prepos, []).append(l)
1380 1380 script.append(l[2:])
1381 1381 elif l.startswith(b' $ '): # commands
1382 1382 if inpython:
1383 1383 script.append(b'EOF\n')
1384 1384 inpython = False
1385 1385 after.setdefault(pos, []).append(l)
1386 1386 prepos = pos
1387 1387 pos = n
1388 1388 addsalt(n, False)
1389 1389 cmd = l[4:].split()
1390 1390 if len(cmd) == 2 and cmd[0] == b'cd':
1391 1391 l = b' $ cd %s || exit 1\n' % cmd[1]
1392 1392 script.append(l[4:])
1393 1393 elif l.startswith(b' > '): # continuations
1394 1394 after.setdefault(prepos, []).append(l)
1395 1395 script.append(l[4:])
1396 1396 elif l.startswith(b' '): # results
1397 1397 # Queue up a list of expected results.
1398 1398 expected.setdefault(pos, []).append(l[2:])
1399 1399 else:
1400 1400 if inpython:
1401 1401 script.append(b'EOF\n')
1402 1402 inpython = False
1403 1403 # Non-command/result. Queue up for merged output.
1404 1404 after.setdefault(pos, []).append(l)
1405 1405
1406 1406 if inpython:
1407 1407 script.append(b'EOF\n')
1408 1408 if skipping is not None:
1409 1409 after.setdefault(pos, []).append(' !!! missing #endif\n')
1410 1410 addsalt(n + 1, False)
1411 1411
1412 1412 return salt, script, after, expected
1413 1413
1414 1414 def _processoutput(self, exitcode, output, salt, after, expected):
1415 1415 # Merge the script output back into a unified test.
1416 1416 warnonly = 1 # 1: not yet; 2: yes; 3: for sure not
1417 1417 if exitcode != 0:
1418 1418 warnonly = 3
1419 1419
1420 1420 pos = -1
1421 1421 postout = []
1422 1422 for l in output:
1423 1423 lout, lcmd = l, None
1424 1424 if salt in l:
1425 1425 lout, lcmd = l.split(salt, 1)
1426 1426
1427 1427 while lout:
1428 1428 if not lout.endswith(b'\n'):
1429 1429 lout += b' (no-eol)\n'
1430 1430
1431 1431 # Find the expected output at the current position.
1432 1432 els = [None]
1433 1433 if expected.get(pos, None):
1434 1434 els = expected[pos]
1435 1435
1436 1436 i = 0
1437 1437 optional = []
1438 1438 while i < len(els):
1439 1439 el = els[i]
1440 1440
1441 1441 r = self.linematch(el, lout)
1442 1442 if isinstance(r, str):
1443 1443 if r == '+glob':
1444 1444 lout = el[:-1] + ' (glob)\n'
1445 1445 r = '' # Warn only this line.
1446 1446 elif r == '-glob':
1447 1447 lout = ''.join(el.rsplit(' (glob)', 1))
1448 1448 r = '' # Warn only this line.
1449 1449 elif r == "retry":
1450 1450 postout.append(b' ' + el)
1451 1451 els.pop(i)
1452 1452 break
1453 1453 else:
1454 1454 log('\ninfo, unknown linematch result: %r\n' % r)
1455 1455 r = False
1456 1456 if r:
1457 1457 els.pop(i)
1458 1458 break
1459 1459 if el:
1460 1460 if el.endswith(b" (?)\n"):
1461 1461 optional.append(i)
1462 1462 else:
1463 1463 m = optline.match(el)
1464 1464 if m:
1465 1465 conditions = [
1466 1466 c for c in m.group(2).split(b' ')]
1467 1467
1468 1468 if not self._iftest(conditions):
1469 1469 optional.append(i)
1470 1470
1471 1471 i += 1
1472 1472
1473 1473 if r:
1474 1474 if r == "retry":
1475 1475 continue
1476 1476 # clean up any optional leftovers
1477 1477 for i in optional:
1478 1478 postout.append(b' ' + els[i])
1479 1479 for i in reversed(optional):
1480 1480 del els[i]
1481 1481 postout.append(b' ' + el)
1482 1482 else:
1483 1483 if self.NEEDESCAPE(lout):
1484 1484 lout = TTest._stringescape(b'%s (esc)\n' %
1485 1485 lout.rstrip(b'\n'))
1486 1486 postout.append(b' ' + lout) # Let diff deal with it.
1487 1487 if r != '': # If line failed.
1488 1488 warnonly = 3 # for sure not
1489 1489 elif warnonly == 1: # Is "not yet" and line is warn only.
1490 1490 warnonly = 2 # Yes do warn.
1491 1491 break
1492 1492 else:
1493 1493 # clean up any optional leftovers
1494 1494 while expected.get(pos, None):
1495 1495 el = expected[pos].pop(0)
1496 1496 if el:
1497 1497 if not el.endswith(b" (?)\n"):
1498 1498 m = optline.match(el)
1499 1499 if m:
1500 1500 conditions = [c for c in m.group(2).split(b' ')]
1501 1501
1502 1502 if self._iftest(conditions):
1503 1503 # Don't append as optional line
1504 1504 continue
1505 1505 else:
1506 1506 continue
1507 1507 postout.append(b' ' + el)
1508 1508
1509 1509 if lcmd:
1510 1510 # Add on last return code.
1511 1511 ret = int(lcmd.split()[1])
1512 1512 if ret != 0:
1513 1513 postout.append(b' [%d]\n' % ret)
1514 1514 if pos in after:
1515 1515 # Merge in non-active test bits.
1516 1516 postout += after.pop(pos)
1517 1517 pos = int(lcmd.split()[0])
1518 1518
1519 1519 if pos in after:
1520 1520 postout += after.pop(pos)
1521 1521
1522 1522 if warnonly == 2:
1523 1523 exitcode = False # Set exitcode to warned.
1524 1524
1525 1525 return exitcode, postout
1526 1526
1527 1527 @staticmethod
1528 1528 def rematch(el, l):
1529 1529 try:
1530 1530 # use \Z to ensure that the regex matches to the end of the string
1531 1531 if os.name == 'nt':
1532 1532 return re.match(el + br'\r?\n\Z', l)
1533 1533 return re.match(el + br'\n\Z', l)
1534 1534 except re.error:
1535 1535 # el is an invalid regex
1536 1536 return False
1537 1537
1538 1538 @staticmethod
1539 1539 def globmatch(el, l):
1540 1540 # The only supported special characters are * and ? plus / which also
1541 1541 # matches \ on windows. Escaping of these characters is supported.
1542 1542 if el + b'\n' == l:
1543 1543 if os.altsep:
1544 1544 # matching on "/" is not needed for this line
1545 1545 for pat in checkcodeglobpats:
1546 1546 if pat.match(el):
1547 1547 return True
1548 1548 return b'-glob'
1549 1549 return True
1550 1550 el = el.replace(b'$LOCALIP', b'*')
1551 1551 i, n = 0, len(el)
1552 1552 res = b''
1553 1553 while i < n:
1554 1554 c = el[i:i + 1]
1555 1555 i += 1
1556 1556 if c == b'\\' and i < n and el[i:i + 1] in b'*?\\/':
1557 1557 res += el[i - 1:i + 1]
1558 1558 i += 1
1559 1559 elif c == b'*':
1560 1560 res += b'.*'
1561 1561 elif c == b'?':
1562 1562 res += b'.'
1563 1563 elif c == b'/' and os.altsep:
1564 1564 res += b'[/\\\\]'
1565 1565 else:
1566 1566 res += re.escape(c)
1567 1567 return TTest.rematch(res, l)
1568 1568
1569 1569 def linematch(self, el, l):
1570 1570 retry = False
1571 1571 if el == l: # perfect match (fast)
1572 1572 return True
1573 1573 if el:
1574 1574 if el.endswith(b" (?)\n"):
1575 1575 retry = "retry"
1576 1576 el = el[:-5] + b"\n"
1577 1577 else:
1578 1578 m = optline.match(el)
1579 1579 if m:
1580 1580 conditions = [c for c in m.group(2).split(b' ')]
1581 1581
1582 1582 el = m.group(1) + b"\n"
1583 1583 if not self._iftest(conditions):
1584 1584 retry = "retry" # Not required by listed features
1585 1585
1586 1586 if el.endswith(b" (esc)\n"):
1587 1587 if PYTHON3:
1588 1588 el = el[:-7].decode('unicode_escape') + '\n'
1589 1589 el = el.encode('utf-8')
1590 1590 else:
1591 1591 el = el[:-7].decode('string-escape') + '\n'
1592 1592 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
1593 1593 return True
1594 1594 if el.endswith(b" (re)\n"):
1595 1595 return TTest.rematch(el[:-6], l) or retry
1596 1596 if el.endswith(b" (glob)\n"):
1597 1597 # ignore '(glob)' added to l by 'replacements'
1598 1598 if l.endswith(b" (glob)\n"):
1599 1599 l = l[:-8] + b"\n"
1600 1600 return TTest.globmatch(el[:-8], l) or retry
1601 1601 if os.altsep and l.replace(b'\\', b'/') == el:
1602 1602 return b'+glob'
1603 1603 return retry
1604 1604
1605 1605 @staticmethod
1606 1606 def parsehghaveoutput(lines):
1607 1607 '''Parse hghave log lines.
1608 1608
1609 1609 Return tuple of lists (missing, failed):
1610 1610 * the missing/unknown features
1611 1611 * the features for which existence check failed'''
1612 1612 missing = []
1613 1613 failed = []
1614 1614 for line in lines:
1615 1615 if line.startswith(TTest.SKIPPED_PREFIX):
1616 1616 line = line.splitlines()[0]
1617 1617 missing.append(line[len(TTest.SKIPPED_PREFIX):].decode('utf-8'))
1618 1618 elif line.startswith(TTest.FAILED_PREFIX):
1619 1619 line = line.splitlines()[0]
1620 1620 failed.append(line[len(TTest.FAILED_PREFIX):].decode('utf-8'))
1621 1621
1622 1622 return missing, failed
1623 1623
1624 1624 @staticmethod
1625 1625 def _escapef(m):
1626 1626 return TTest.ESCAPEMAP[m.group(0)]
1627 1627
1628 1628 @staticmethod
1629 1629 def _stringescape(s):
1630 1630 return TTest.ESCAPESUB(TTest._escapef, s)
1631 1631
1632 1632 iolock = threading.RLock()
1633 1633
1634 1634 class TestResult(unittest._TextTestResult):
1635 1635 """Holds results when executing via unittest."""
1636 1636 # Don't worry too much about accessing the non-public _TextTestResult.
1637 1637 # It is relatively common in Python testing tools.
1638 1638 def __init__(self, options, *args, **kwargs):
1639 1639 super(TestResult, self).__init__(*args, **kwargs)
1640 1640
1641 1641 self._options = options
1642 1642
1643 1643 # unittest.TestResult didn't have skipped until 2.7. We need to
1644 1644 # polyfill it.
1645 1645 self.skipped = []
1646 1646
1647 1647 # We have a custom "ignored" result that isn't present in any Python
1648 1648 # unittest implementation. It is very similar to skipped. It may make
1649 1649 # sense to map it into skip some day.
1650 1650 self.ignored = []
1651 1651
1652 1652 self.times = []
1653 1653 self._firststarttime = None
1654 1654 # Data stored for the benefit of generating xunit reports.
1655 1655 self.successes = []
1656 1656 self.faildata = {}
1657 1657
1658 1658 if options.color == 'auto':
1659 1659 self.color = pygmentspresent and self.stream.isatty()
1660 1660 elif options.color == 'never':
1661 1661 self.color = False
1662 1662 else: # 'always', for testing purposes
1663 1663 self.color = pygmentspresent
1664 1664
1665 1665 def addFailure(self, test, reason):
1666 1666 self.failures.append((test, reason))
1667 1667
1668 1668 if self._options.first:
1669 1669 self.stop()
1670 1670 else:
1671 1671 with iolock:
1672 1672 if reason == "timed out":
1673 1673 self.stream.write('t')
1674 1674 else:
1675 1675 if not self._options.nodiff:
1676 1676 self.stream.write('\n')
1677 1677 # Exclude the '\n' from highlighting to lex correctly
1678 1678 formatted = 'ERROR: %s output changed\n' % test
1679 1679 self.stream.write(highlightmsg(formatted, self.color))
1680 1680 self.stream.write('!')
1681 1681
1682 1682 self.stream.flush()
1683 1683
1684 1684 def addSuccess(self, test):
1685 1685 with iolock:
1686 1686 super(TestResult, self).addSuccess(test)
1687 1687 self.successes.append(test)
1688 1688
1689 1689 def addError(self, test, err):
1690 1690 super(TestResult, self).addError(test, err)
1691 1691 if self._options.first:
1692 1692 self.stop()
1693 1693
1694 1694 # Polyfill.
1695 1695 def addSkip(self, test, reason):
1696 1696 self.skipped.append((test, reason))
1697 1697 with iolock:
1698 1698 if self.showAll:
1699 1699 self.stream.writeln('skipped %s' % reason)
1700 1700 else:
1701 1701 self.stream.write('s')
1702 1702 self.stream.flush()
1703 1703
1704 1704 def addIgnore(self, test, reason):
1705 1705 self.ignored.append((test, reason))
1706 1706 with iolock:
1707 1707 if self.showAll:
1708 1708 self.stream.writeln('ignored %s' % reason)
1709 1709 else:
1710 1710 if reason not in ('not retesting', "doesn't match keyword"):
1711 1711 self.stream.write('i')
1712 1712 else:
1713 1713 self.testsRun += 1
1714 1714 self.stream.flush()
1715 1715
1716 1716 def addOutputMismatch(self, test, ret, got, expected):
1717 1717 """Record a mismatch in test output for a particular test."""
1718 1718 if self.shouldStop:
1719 1719 # don't print, some other test case already failed and
1720 1720 # printed, we're just stale and probably failed due to our
1721 1721 # temp dir getting cleaned up.
1722 1722 return
1723 1723
1724 1724 accepted = False
1725 1725 lines = []
1726 1726
1727 1727 with iolock:
1728 1728 if self._options.nodiff:
1729 1729 pass
1730 1730 elif self._options.view:
1731 1731 v = self._options.view
1732 1732 if PYTHON3:
1733 1733 v = _bytespath(v)
1734 1734 os.system(b"%s %s %s" %
1735 1735 (v, test.refpath, test.errpath))
1736 1736 else:
1737 1737 servefail, lines = getdiff(expected, got,
1738 1738 test.refpath, test.errpath)
1739 1739 if servefail:
1740 1740 raise test.failureException(
1741 1741 'server failed to start (HGPORT=%s)' % test._startport)
1742 1742 else:
1743 1743 self.stream.write('\n')
1744 1744 for line in lines:
1745 1745 line = highlightdiff(line, self.color)
1746 1746 if PYTHON3:
1747 1747 self.stream.flush()
1748 1748 self.stream.buffer.write(line)
1749 1749 self.stream.buffer.flush()
1750 1750 else:
1751 1751 self.stream.write(line)
1752 1752 self.stream.flush()
1753 1753
1754 1754 # handle interactive prompt without releasing iolock
1755 1755 if self._options.interactive:
1756 1756 if test.readrefout() != expected:
1757 1757 self.stream.write(
1758 1758 'Reference output has changed (run again to prompt '
1759 1759 'changes)')
1760 1760 else:
1761 1761 self.stream.write('Accept this change? [n] ')
1762 1762 answer = sys.stdin.readline().strip()
1763 1763 if answer.lower() in ('y', 'yes'):
1764 1764 if test.path.endswith(b'.t'):
1765 1765 rename(test.errpath, test.path)
1766 1766 else:
1767 1767 rename(test.errpath, '%s.out' % test.path)
1768 1768 accepted = True
1769 1769 if not accepted:
1770 1770 self.faildata[test.name] = b''.join(lines)
1771 1771
1772 1772 return accepted
1773 1773
1774 1774 def startTest(self, test):
1775 1775 super(TestResult, self).startTest(test)
1776 1776
1777 1777 # os.times module computes the user time and system time spent by
1778 1778 # child's processes along with real elapsed time taken by a process.
1779 1779 # This module has one limitation. It can only work for Linux user
1780 1780 # and not for Windows.
1781 1781 test.started = os.times()
1782 1782 if self._firststarttime is None: # thread racy but irrelevant
1783 1783 self._firststarttime = test.started[4]
1784 1784
1785 1785 def stopTest(self, test, interrupted=False):
1786 1786 super(TestResult, self).stopTest(test)
1787 1787
1788 1788 test.stopped = os.times()
1789 1789
1790 1790 starttime = test.started
1791 1791 endtime = test.stopped
1792 1792 origin = self._firststarttime
1793 1793 self.times.append((test.name,
1794 1794 endtime[2] - starttime[2], # user space CPU time
1795 1795 endtime[3] - starttime[3], # sys space CPU time
1796 1796 endtime[4] - starttime[4], # real time
1797 1797 starttime[4] - origin, # start date in run context
1798 1798 endtime[4] - origin, # end date in run context
1799 1799 ))
1800 1800
1801 1801 if interrupted:
1802 1802 with iolock:
1803 1803 self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
1804 1804 test.name, self.times[-1][3]))
1805 1805
1806 1806 class TestSuite(unittest.TestSuite):
1807 1807 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
1808 1808
1809 1809 def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
1810 1810 retest=False, keywords=None, loop=False, runs_per_test=1,
1811 1811 loadtest=None, showchannels=False,
1812 1812 *args, **kwargs):
1813 1813 """Create a new instance that can run tests with a configuration.
1814 1814
1815 1815 testdir specifies the directory where tests are executed from. This
1816 1816 is typically the ``tests`` directory from Mercurial's source
1817 1817 repository.
1818 1818
1819 1819 jobs specifies the number of jobs to run concurrently. Each test
1820 1820 executes on its own thread. Tests actually spawn new processes, so
1821 1821 state mutation should not be an issue.
1822 1822
1823 1823 If there is only one job, it will use the main thread.
1824 1824
1825 1825 whitelist and blacklist denote tests that have been whitelisted and
1826 1826 blacklisted, respectively. These arguments don't belong in TestSuite.
1827 1827 Instead, whitelist and blacklist should be handled by the thing that
1828 1828 populates the TestSuite with tests. They are present to preserve
1829 1829 backwards compatible behavior which reports skipped tests as part
1830 1830 of the results.
1831 1831
1832 1832 retest denotes whether to retest failed tests. This arguably belongs
1833 1833 outside of TestSuite.
1834 1834
1835 1835 keywords denotes key words that will be used to filter which tests
1836 1836 to execute. This arguably belongs outside of TestSuite.
1837 1837
1838 1838 loop denotes whether to loop over tests forever.
1839 1839 """
1840 1840 super(TestSuite, self).__init__(*args, **kwargs)
1841 1841
1842 1842 self._jobs = jobs
1843 1843 self._whitelist = whitelist
1844 1844 self._blacklist = blacklist
1845 1845 self._retest = retest
1846 1846 self._keywords = keywords
1847 1847 self._loop = loop
1848 1848 self._runs_per_test = runs_per_test
1849 1849 self._loadtest = loadtest
1850 1850 self._showchannels = showchannels
1851 1851
1852 1852 def run(self, result):
1853 1853 # We have a number of filters that need to be applied. We do this
1854 1854 # here instead of inside Test because it makes the running logic for
1855 1855 # Test simpler.
1856 1856 tests = []
1857 1857 num_tests = [0]
1858 1858 for test in self._tests:
1859 1859 def get():
1860 1860 num_tests[0] += 1
1861 1861 if getattr(test, 'should_reload', False):
1862 1862 return self._loadtest(test, num_tests[0])
1863 1863 return test
1864 1864 if not os.path.exists(test.path):
1865 1865 result.addSkip(test, "Doesn't exist")
1866 1866 continue
1867 1867
1868 1868 if not (self._whitelist and test.bname in self._whitelist):
1869 1869 if self._blacklist and test.bname in self._blacklist:
1870 1870 result.addSkip(test, 'blacklisted')
1871 1871 continue
1872 1872
1873 1873 if self._retest and not os.path.exists(test.errpath):
1874 1874 result.addIgnore(test, 'not retesting')
1875 1875 continue
1876 1876
1877 1877 if self._keywords:
1878 1878 f = open(test.path, 'rb')
1879 1879 t = f.read().lower() + test.bname.lower()
1880 1880 f.close()
1881 1881 ignored = False
1882 1882 for k in self._keywords.lower().split():
1883 1883 if k not in t:
1884 1884 result.addIgnore(test, "doesn't match keyword")
1885 1885 ignored = True
1886 1886 break
1887 1887
1888 1888 if ignored:
1889 1889 continue
1890 1890 for _ in xrange(self._runs_per_test):
1891 1891 tests.append(get())
1892 1892
1893 1893 runtests = list(tests)
1894 1894 done = queue.Queue()
1895 1895 running = 0
1896 1896
1897 1897 channels = [""] * self._jobs
1898 1898
1899 1899 def job(test, result):
1900 1900 for n, v in enumerate(channels):
1901 1901 if not v:
1902 1902 channel = n
1903 1903 break
1904 1904 else:
1905 1905 raise ValueError('Could not find output channel')
1906 1906 channels[channel] = "=" + test.name[5:].split(".")[0]
1907 1907 try:
1908 1908 test(result)
1909 1909 done.put(None)
1910 1910 except KeyboardInterrupt:
1911 1911 pass
1912 1912 except: # re-raises
1913 1913 done.put(('!', test, 'run-test raised an error, see traceback'))
1914 1914 raise
1915 1915 finally:
1916 1916 try:
1917 1917 channels[channel] = ''
1918 1918 except IndexError:
1919 1919 pass
1920 1920
1921 1921 def stat():
1922 1922 count = 0
1923 1923 while channels:
1924 1924 d = '\n%03s ' % count
1925 1925 for n, v in enumerate(channels):
1926 1926 if v:
1927 1927 d += v[0]
1928 1928 channels[n] = v[1:] or '.'
1929 1929 else:
1930 1930 d += ' '
1931 1931 d += ' '
1932 1932 with iolock:
1933 1933 sys.stdout.write(d + ' ')
1934 1934 sys.stdout.flush()
1935 1935 for x in xrange(10):
1936 1936 if channels:
1937 1937 time.sleep(.1)
1938 1938 count += 1
1939 1939
1940 1940 stoppedearly = False
1941 1941
1942 1942 if self._showchannels:
1943 1943 statthread = threading.Thread(target=stat, name="stat")
1944 1944 statthread.start()
1945 1945
1946 1946 try:
1947 1947 while tests or running:
1948 1948 if not done.empty() or running == self._jobs or not tests:
1949 1949 try:
1950 1950 done.get(True, 1)
1951 1951 running -= 1
1952 1952 if result and result.shouldStop:
1953 1953 stoppedearly = True
1954 1954 break
1955 1955 except queue.Empty:
1956 1956 continue
1957 1957 if tests and not running == self._jobs:
1958 1958 test = tests.pop(0)
1959 1959 if self._loop:
1960 1960 if getattr(test, 'should_reload', False):
1961 1961 num_tests[0] += 1
1962 1962 tests.append(
1963 1963 self._loadtest(test, num_tests[0]))
1964 1964 else:
1965 1965 tests.append(test)
1966 1966 if self._jobs == 1:
1967 1967 job(test, result)
1968 1968 else:
1969 1969 t = threading.Thread(target=job, name=test.name,
1970 1970 args=(test, result))
1971 1971 t.start()
1972 1972 running += 1
1973 1973
1974 1974 # If we stop early we still need to wait on started tests to
1975 1975 # finish. Otherwise, there is a race between the test completing
1976 1976 # and the test's cleanup code running. This could result in the
1977 1977 # test reporting incorrect.
1978 1978 if stoppedearly:
1979 1979 while running:
1980 1980 try:
1981 1981 done.get(True, 1)
1982 1982 running -= 1
1983 1983 except queue.Empty:
1984 1984 continue
1985 1985 except KeyboardInterrupt:
1986 1986 for test in runtests:
1987 1987 test.abort()
1988 1988
1989 1989 channels = []
1990 1990
1991 1991 return result
1992 1992
1993 1993 # Save the most recent 5 wall-clock runtimes of each test to a
1994 1994 # human-readable text file named .testtimes. Tests are sorted
1995 1995 # alphabetically, while times for each test are listed from oldest to
1996 1996 # newest.
1997 1997
1998 1998 def loadtimes(outputdir):
1999 1999 times = []
2000 2000 try:
2001 2001 with open(os.path.join(outputdir, b'.testtimes-')) as fp:
2002 2002 for line in fp:
2003 2003 ts = line.split()
2004 2004 times.append((ts[0], [float(t) for t in ts[1:]]))
2005 2005 except IOError as err:
2006 2006 if err.errno != errno.ENOENT:
2007 2007 raise
2008 2008 return times
2009 2009
2010 2010 def savetimes(outputdir, result):
2011 2011 saved = dict(loadtimes(outputdir))
2012 2012 maxruns = 5
2013 2013 skipped = set([str(t[0]) for t in result.skipped])
2014 2014 for tdata in result.times:
2015 2015 test, real = tdata[0], tdata[3]
2016 2016 if test not in skipped:
2017 2017 ts = saved.setdefault(test, [])
2018 2018 ts.append(real)
2019 2019 ts[:] = ts[-maxruns:]
2020 2020
2021 2021 fd, tmpname = tempfile.mkstemp(prefix=b'.testtimes',
2022 2022 dir=outputdir, text=True)
2023 2023 with os.fdopen(fd, 'w') as fp:
2024 2024 for name, ts in sorted(saved.items()):
2025 2025 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
2026 2026 timepath = os.path.join(outputdir, b'.testtimes')
2027 2027 try:
2028 2028 os.unlink(timepath)
2029 2029 except OSError:
2030 2030 pass
2031 2031 try:
2032 2032 os.rename(tmpname, timepath)
2033 2033 except OSError:
2034 2034 pass
2035 2035
2036 2036 class TextTestRunner(unittest.TextTestRunner):
2037 2037 """Custom unittest test runner that uses appropriate settings."""
2038 2038
2039 2039 def __init__(self, runner, *args, **kwargs):
2040 2040 super(TextTestRunner, self).__init__(*args, **kwargs)
2041 2041
2042 2042 self._runner = runner
2043 2043
2044 2044 def listtests(self, test):
2045 2045 result = TestResult(self._runner.options, self.stream,
2046 2046 self.descriptions, 0)
2047 2047 test = sorted(test, key=lambda t: t.name)
2048 2048 for t in test:
2049 2049 print(t.name)
2050 2050 result.addSuccess(t)
2051 2051
2052 2052 if self._runner.options.xunit:
2053 2053 with open(self._runner.options.xunit, "wb") as xuf:
2054 2054 self._writexunit(result, xuf)
2055 2055
2056 2056 if self._runner.options.json:
2057 2057 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2058 2058 with open(jsonpath, 'w') as fp:
2059 2059 self._writejson(result, fp)
2060 2060
2061 2061 return result
2062 2062
2063 2063 def run(self, test):
2064 2064 result = TestResult(self._runner.options, self.stream,
2065 2065 self.descriptions, self.verbosity)
2066 2066
2067 2067 test(result)
2068 2068
2069 2069 failed = len(result.failures)
2070 2070 skipped = len(result.skipped)
2071 2071 ignored = len(result.ignored)
2072 2072
2073 2073 with iolock:
2074 2074 self.stream.writeln('')
2075 2075
2076 2076 if not self._runner.options.noskips:
2077 2077 for test, msg in result.skipped:
2078 2078 formatted = 'Skipped %s: %s\n' % (test.name, msg)
2079 2079 self.stream.write(highlightmsg(formatted, result.color))
2080 2080 for test, msg in result.failures:
2081 2081 formatted = 'Failed %s: %s\n' % (test.name, msg)
2082 2082 self.stream.write(highlightmsg(formatted, result.color))
2083 2083 for test, msg in result.errors:
2084 2084 self.stream.writeln('Errored %s: %s' % (test.name, msg))
2085 2085
2086 2086 if self._runner.options.xunit:
2087 2087 with open(self._runner.options.xunit, "wb") as xuf:
2088 2088 self._writexunit(result, xuf)
2089 2089
2090 2090 if self._runner.options.json:
2091 2091 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2092 2092 with open(jsonpath, 'w') as fp:
2093 2093 self._writejson(result, fp)
2094 2094
2095 2095 self._runner._checkhglib('Tested')
2096 2096
2097 2097 savetimes(self._runner._outputdir, result)
2098 2098
2099 2099 if failed and self._runner.options.known_good_rev:
2100 2100 self._bisecttests(t for t, m in result.failures)
2101 2101 self.stream.writeln(
2102 2102 '# Ran %d tests, %d skipped, %d failed.'
2103 2103 % (result.testsRun, skipped + ignored, failed))
2104 2104 if failed:
2105 2105 self.stream.writeln('python hash seed: %s' %
2106 2106 os.environ['PYTHONHASHSEED'])
2107 2107 if self._runner.options.time:
2108 2108 self.printtimes(result.times)
2109 2109 self.stream.flush()
2110 2110
2111 2111 return result
2112 2112
2113 2113 def _bisecttests(self, tests):
2114 2114 bisectcmd = ['hg', 'bisect']
2115 2115 bisectrepo = self._runner.options.bisect_repo
2116 2116 if bisectrepo:
2117 2117 bisectcmd.extend(['-R', os.path.abspath(bisectrepo)])
2118 2118 def pread(args):
2119 2119 env = os.environ.copy()
2120 2120 env['HGPLAIN'] = '1'
2121 2121 p = subprocess.Popen(args, stderr=subprocess.STDOUT,
2122 2122 stdout=subprocess.PIPE, env=env)
2123 2123 data = p.stdout.read()
2124 2124 p.wait()
2125 2125 return data
2126 2126 for test in tests:
2127 2127 pread(bisectcmd + ['--reset']),
2128 2128 pread(bisectcmd + ['--bad', '.'])
2129 2129 pread(bisectcmd + ['--good', self._runner.options.known_good_rev])
2130 2130 # TODO: we probably need to forward more options
2131 2131 # that alter hg's behavior inside the tests.
2132 2132 opts = ''
2133 2133 withhg = self._runner.options.with_hg
2134 2134 if withhg:
2135 2135 opts += ' --with-hg=%s ' % shellquote(_strpath(withhg))
2136 2136 rtc = '%s %s %s %s' % (sys.executable, sys.argv[0], opts,
2137 2137 test)
2138 2138 data = pread(bisectcmd + ['--command', rtc])
2139 2139 m = re.search(
2140 2140 (br'\nThe first (?P<goodbad>bad|good) revision '
2141 2141 br'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
2142 2142 br'summary: +(?P<summary>[^\n]+)\n'),
2143 2143 data, (re.MULTILINE | re.DOTALL))
2144 2144 if m is None:
2145 2145 self.stream.writeln(
2146 2146 'Failed to identify failure point for %s' % test)
2147 2147 continue
2148 2148 dat = m.groupdict()
2149 2149 verb = 'broken' if dat['goodbad'] == 'bad' else 'fixed'
2150 2150 self.stream.writeln(
2151 2151 '%s %s by %s (%s)' % (
2152 2152 test, verb, dat['node'], dat['summary']))
2153 2153
2154 2154 def printtimes(self, times):
2155 2155 # iolock held by run
2156 2156 self.stream.writeln('# Producing time report')
2157 2157 times.sort(key=lambda t: (t[3]))
2158 2158 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
2159 2159 self.stream.writeln('%-7s %-7s %-7s %-7s %-7s %s' %
2160 2160 ('start', 'end', 'cuser', 'csys', 'real', 'Test'))
2161 2161 for tdata in times:
2162 2162 test = tdata[0]
2163 2163 cuser, csys, real, start, end = tdata[1:6]
2164 2164 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
2165 2165
2166 2166 @staticmethod
2167 2167 def _writexunit(result, outf):
2168 2168 # See http://llg.cubic.org/docs/junit/ for a reference.
2169 2169 timesd = dict((t[0], t[3]) for t in result.times)
2170 2170 doc = minidom.Document()
2171 2171 s = doc.createElement('testsuite')
2172 2172 s.setAttribute('name', 'run-tests')
2173 2173 s.setAttribute('tests', str(result.testsRun))
2174 2174 s.setAttribute('errors', "0") # TODO
2175 2175 s.setAttribute('failures', str(len(result.failures)))
2176 2176 s.setAttribute('skipped', str(len(result.skipped) +
2177 2177 len(result.ignored)))
2178 2178 doc.appendChild(s)
2179 2179 for tc in result.successes:
2180 2180 t = doc.createElement('testcase')
2181 2181 t.setAttribute('name', tc.name)
2182 2182 tctime = timesd.get(tc.name)
2183 2183 if tctime is not None:
2184 2184 t.setAttribute('time', '%.3f' % tctime)
2185 2185 s.appendChild(t)
2186 2186 for tc, err in sorted(result.faildata.items()):
2187 2187 t = doc.createElement('testcase')
2188 2188 t.setAttribute('name', tc)
2189 2189 tctime = timesd.get(tc)
2190 2190 if tctime is not None:
2191 2191 t.setAttribute('time', '%.3f' % tctime)
2192 2192 # createCDATASection expects a unicode or it will
2193 2193 # convert using default conversion rules, which will
2194 2194 # fail if string isn't ASCII.
2195 2195 err = cdatasafe(err).decode('utf-8', 'replace')
2196 2196 cd = doc.createCDATASection(err)
2197 2197 # Use 'failure' here instead of 'error' to match errors = 0,
2198 2198 # failures = len(result.failures) in the testsuite element.
2199 2199 failelem = doc.createElement('failure')
2200 2200 failelem.setAttribute('message', 'output changed')
2201 2201 failelem.setAttribute('type', 'output-mismatch')
2202 2202 failelem.appendChild(cd)
2203 2203 t.appendChild(failelem)
2204 2204 s.appendChild(t)
2205 2205 for tc, message in result.skipped:
2206 2206 # According to the schema, 'skipped' has no attributes. So store
2207 2207 # the skip message as a text node instead.
2208 2208 t = doc.createElement('testcase')
2209 2209 t.setAttribute('name', tc.name)
2210 2210 binmessage = message.encode('utf-8')
2211 2211 message = cdatasafe(binmessage).decode('utf-8', 'replace')
2212 2212 cd = doc.createCDATASection(message)
2213 2213 skipelem = doc.createElement('skipped')
2214 2214 skipelem.appendChild(cd)
2215 2215 t.appendChild(skipelem)
2216 2216 s.appendChild(t)
2217 2217 outf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
2218 2218
2219 2219 @staticmethod
2220 2220 def _writejson(result, outf):
2221 2221 timesd = {}
2222 2222 for tdata in result.times:
2223 2223 test = tdata[0]
2224 2224 timesd[test] = tdata[1:]
2225 2225
2226 2226 outcome = {}
2227 2227 groups = [('success', ((tc, None)
2228 2228 for tc in result.successes)),
2229 2229 ('failure', result.failures),
2230 2230 ('skip', result.skipped)]
2231 2231 for res, testcases in groups:
2232 2232 for tc, __ in testcases:
2233 2233 if tc.name in timesd:
2234 2234 diff = result.faildata.get(tc.name, b'')
2235 2235 try:
2236 2236 diff = diff.decode('unicode_escape')
2237 2237 except UnicodeDecodeError as e:
2238 2238 diff = '%r decoding diff, sorry' % e
2239 2239 tres = {'result': res,
2240 2240 'time': ('%0.3f' % timesd[tc.name][2]),
2241 2241 'cuser': ('%0.3f' % timesd[tc.name][0]),
2242 2242 'csys': ('%0.3f' % timesd[tc.name][1]),
2243 2243 'start': ('%0.3f' % timesd[tc.name][3]),
2244 2244 'end': ('%0.3f' % timesd[tc.name][4]),
2245 2245 'diff': diff,
2246 2246 }
2247 2247 else:
2248 2248 # blacklisted test
2249 2249 tres = {'result': res}
2250 2250
2251 2251 outcome[tc.name] = tres
2252 2252 jsonout = json.dumps(outcome, sort_keys=True, indent=4,
2253 2253 separators=(',', ': '))
2254 2254 outf.writelines(("testreport =", jsonout))
2255 2255
2256 2256 class TestRunner(object):
2257 2257 """Holds context for executing tests.
2258 2258
2259 2259 Tests rely on a lot of state. This object holds it for them.
2260 2260 """
2261 2261
2262 2262 # Programs required to run tests.
2263 2263 REQUIREDTOOLS = [
2264 2264 b'diff',
2265 2265 b'grep',
2266 2266 b'unzip',
2267 2267 b'gunzip',
2268 2268 b'bunzip2',
2269 2269 b'sed',
2270 2270 ]
2271 2271
2272 2272 # Maps file extensions to test class.
2273 2273 TESTTYPES = [
2274 2274 (b'.py', PythonTest),
2275 2275 (b'.t', TTest),
2276 2276 ]
2277 2277
2278 2278 def __init__(self):
2279 2279 self.options = None
2280 2280 self._hgroot = None
2281 2281 self._testdir = None
2282 2282 self._outputdir = None
2283 2283 self._hgtmp = None
2284 2284 self._installdir = None
2285 2285 self._bindir = None
2286 2286 self._tmpbinddir = None
2287 2287 self._pythondir = None
2288 2288 self._coveragefile = None
2289 2289 self._createdfiles = []
2290 2290 self._hgcommand = None
2291 2291 self._hgpath = None
2292 2292 self._portoffset = 0
2293 2293 self._ports = {}
2294 2294
2295 2295 def run(self, args, parser=None):
2296 2296 """Run the test suite."""
2297 2297 oldmask = os.umask(0o22)
2298 2298 try:
2299 2299 parser = parser or getparser()
2300 2300 options, args = parseargs(args, parser)
2301 2301 # positional arguments are paths to test files to run, so
2302 2302 # we make sure they're all bytestrings
2303 2303 args = [_bytespath(a) for a in args]
2304 2304 if options.test_list is not None:
2305 2305 for listfile in options.test_list:
2306 2306 with open(listfile, 'rb') as f:
2307 2307 args.extend(t for t in f.read().splitlines() if t)
2308 2308 self.options = options
2309 2309
2310 2310 self._checktools()
2311 2311 testdescs = self.findtests(args)
2312 2312 if options.profile_runner:
2313 2313 import statprof
2314 2314 statprof.start()
2315 2315 result = self._run(testdescs)
2316 2316 if options.profile_runner:
2317 2317 statprof.stop()
2318 2318 statprof.display()
2319 2319 return result
2320 2320
2321 2321 finally:
2322 2322 os.umask(oldmask)
2323 2323
2324 2324 def _run(self, testdescs):
2325 2325 if self.options.random:
2326 2326 random.shuffle(testdescs)
2327 2327 else:
2328 2328 # keywords for slow tests
2329 2329 slow = {b'svn': 10,
2330 2330 b'cvs': 10,
2331 2331 b'hghave': 10,
2332 2332 b'largefiles-update': 10,
2333 2333 b'run-tests': 10,
2334 2334 b'corruption': 10,
2335 2335 b'race': 10,
2336 2336 b'i18n': 10,
2337 2337 b'check': 100,
2338 2338 b'gendoc': 100,
2339 2339 b'contrib-perf': 200,
2340 2340 }
2341 2341 perf = {}
2342 2342 def sortkey(f):
2343 2343 # run largest tests first, as they tend to take the longest
2344 2344 f = f['path']
2345 2345 try:
2346 2346 return perf[f]
2347 2347 except KeyError:
2348 2348 try:
2349 2349 val = -os.stat(f).st_size
2350 2350 except OSError as e:
2351 2351 if e.errno != errno.ENOENT:
2352 2352 raise
2353 2353 perf[f] = -1e9 # file does not exist, tell early
2354 2354 return -1e9
2355 2355 for kw, mul in slow.items():
2356 2356 if kw in f:
2357 2357 val *= mul
2358 2358 if f.endswith(b'.py'):
2359 2359 val /= 10.0
2360 2360 perf[f] = val / 1000.0
2361 2361 return perf[f]
2362 2362 testdescs.sort(key=sortkey)
2363 2363
2364 2364 self._testdir = osenvironb[b'TESTDIR'] = getattr(
2365 2365 os, 'getcwdb', os.getcwd)()
2366 2366 # assume all tests in same folder for now
2367 2367 if testdescs:
2368 2368 pathname = os.path.dirname(testdescs[0]['path'])
2369 2369 if pathname:
2370 2370 osenvironb[b'TESTDIR'] = os.path.join(osenvironb[b'TESTDIR'],
2371 2371 pathname)
2372 2372 if self.options.outputdir:
2373 2373 self._outputdir = canonpath(_bytespath(self.options.outputdir))
2374 2374 else:
2375 2375 self._outputdir = self._testdir
2376 if testdescs and pathname:
2377 self._outputdir = os.path.join(self._outputdir, pathname)
2376 2378
2377 2379 if 'PYTHONHASHSEED' not in os.environ:
2378 2380 # use a random python hash seed all the time
2379 2381 # we do the randomness ourself to know what seed is used
2380 2382 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
2381 2383
2382 2384 if self.options.tmpdir:
2383 2385 self.options.keep_tmpdir = True
2384 2386 tmpdir = _bytespath(self.options.tmpdir)
2385 2387 if os.path.exists(tmpdir):
2386 2388 # Meaning of tmpdir has changed since 1.3: we used to create
2387 2389 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
2388 2390 # tmpdir already exists.
2389 2391 print("error: temp dir %r already exists" % tmpdir)
2390 2392 return 1
2391 2393
2392 2394 # Automatically removing tmpdir sounds convenient, but could
2393 2395 # really annoy anyone in the habit of using "--tmpdir=/tmp"
2394 2396 # or "--tmpdir=$HOME".
2395 2397 #vlog("# Removing temp dir", tmpdir)
2396 2398 #shutil.rmtree(tmpdir)
2397 2399 os.makedirs(tmpdir)
2398 2400 else:
2399 2401 d = None
2400 2402 if os.name == 'nt':
2401 2403 # without this, we get the default temp dir location, but
2402 2404 # in all lowercase, which causes troubles with paths (issue3490)
2403 2405 d = osenvironb.get(b'TMP', None)
2404 2406 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
2405 2407
2406 2408 self._hgtmp = osenvironb[b'HGTMP'] = (
2407 2409 os.path.realpath(tmpdir))
2408 2410
2409 2411 if self.options.with_hg:
2410 2412 self._installdir = None
2411 2413 whg = self.options.with_hg
2412 2414 self._bindir = os.path.dirname(os.path.realpath(whg))
2413 2415 assert isinstance(self._bindir, bytes)
2414 2416 self._hgcommand = os.path.basename(whg)
2415 2417 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
2416 2418 os.makedirs(self._tmpbindir)
2417 2419
2418 2420 # This looks redundant with how Python initializes sys.path from
2419 2421 # the location of the script being executed. Needed because the
2420 2422 # "hg" specified by --with-hg is not the only Python script
2421 2423 # executed in the test suite that needs to import 'mercurial'
2422 2424 # ... which means it's not really redundant at all.
2423 2425 self._pythondir = self._bindir
2424 2426 else:
2425 2427 self._installdir = os.path.join(self._hgtmp, b"install")
2426 2428 self._bindir = os.path.join(self._installdir, b"bin")
2427 2429 self._hgcommand = b'hg'
2428 2430 self._tmpbindir = self._bindir
2429 2431 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
2430 2432
2431 2433 # set CHGHG, then replace "hg" command by "chg"
2432 2434 chgbindir = self._bindir
2433 2435 if self.options.chg or self.options.with_chg:
2434 2436 osenvironb[b'CHGHG'] = os.path.join(self._bindir, self._hgcommand)
2435 2437 else:
2436 2438 osenvironb.pop(b'CHGHG', None) # drop flag for hghave
2437 2439 if self.options.chg:
2438 2440 self._hgcommand = b'chg'
2439 2441 elif self.options.with_chg:
2440 2442 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
2441 2443 self._hgcommand = os.path.basename(self.options.with_chg)
2442 2444
2443 2445 osenvironb[b"BINDIR"] = self._bindir
2444 2446 osenvironb[b"PYTHON"] = PYTHON
2445 2447
2446 2448 if self.options.with_python3:
2447 2449 osenvironb[b'PYTHON3'] = self.options.with_python3
2448 2450
2449 2451 fileb = _bytespath(__file__)
2450 2452 runtestdir = os.path.abspath(os.path.dirname(fileb))
2451 2453 osenvironb[b'RUNTESTDIR'] = runtestdir
2452 2454 if PYTHON3:
2453 2455 sepb = _bytespath(os.pathsep)
2454 2456 else:
2455 2457 sepb = os.pathsep
2456 2458 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
2457 2459 if os.path.islink(__file__):
2458 2460 # test helper will likely be at the end of the symlink
2459 2461 realfile = os.path.realpath(fileb)
2460 2462 realdir = os.path.abspath(os.path.dirname(realfile))
2461 2463 path.insert(2, realdir)
2462 2464 if chgbindir != self._bindir:
2463 2465 path.insert(1, chgbindir)
2464 2466 if self._testdir != runtestdir:
2465 2467 path = [self._testdir] + path
2466 2468 if self._tmpbindir != self._bindir:
2467 2469 path = [self._tmpbindir] + path
2468 2470 osenvironb[b"PATH"] = sepb.join(path)
2469 2471
2470 2472 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
2471 2473 # can run .../tests/run-tests.py test-foo where test-foo
2472 2474 # adds an extension to HGRC. Also include run-test.py directory to
2473 2475 # import modules like heredoctest.
2474 2476 pypath = [self._pythondir, self._testdir, runtestdir]
2475 2477 # We have to augment PYTHONPATH, rather than simply replacing
2476 2478 # it, in case external libraries are only available via current
2477 2479 # PYTHONPATH. (In particular, the Subversion bindings on OS X
2478 2480 # are in /opt/subversion.)
2479 2481 oldpypath = osenvironb.get(IMPL_PATH)
2480 2482 if oldpypath:
2481 2483 pypath.append(oldpypath)
2482 2484 osenvironb[IMPL_PATH] = sepb.join(pypath)
2483 2485
2484 2486 if self.options.pure:
2485 2487 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
2486 2488 os.environ["HGMODULEPOLICY"] = "py"
2487 2489
2488 2490 if self.options.allow_slow_tests:
2489 2491 os.environ["HGTEST_SLOW"] = "slow"
2490 2492 elif 'HGTEST_SLOW' in os.environ:
2491 2493 del os.environ['HGTEST_SLOW']
2492 2494
2493 2495 self._coveragefile = os.path.join(self._testdir, b'.coverage')
2494 2496
2495 2497 vlog("# Using TESTDIR", self._testdir)
2496 2498 vlog("# Using RUNTESTDIR", osenvironb[b'RUNTESTDIR'])
2497 2499 vlog("# Using HGTMP", self._hgtmp)
2498 2500 vlog("# Using PATH", os.environ["PATH"])
2499 2501 vlog("# Using", IMPL_PATH, osenvironb[IMPL_PATH])
2500 2502 vlog("# Writing to directory", self._outputdir)
2501 2503
2502 2504 try:
2503 2505 return self._runtests(testdescs) or 0
2504 2506 finally:
2505 2507 time.sleep(.1)
2506 2508 self._cleanup()
2507 2509
2508 2510 def findtests(self, args):
2509 2511 """Finds possible test files from arguments.
2510 2512
2511 2513 If you wish to inject custom tests into the test harness, this would
2512 2514 be a good function to monkeypatch or override in a derived class.
2513 2515 """
2514 2516 if not args:
2515 2517 if self.options.changed:
2516 2518 proc = Popen4('hg st --rev "%s" -man0 .' %
2517 2519 self.options.changed, None, 0)
2518 2520 stdout, stderr = proc.communicate()
2519 2521 args = stdout.strip(b'\0').split(b'\0')
2520 2522 else:
2521 2523 args = os.listdir(b'.')
2522 2524
2523 2525 expanded_args = []
2524 2526 for arg in args:
2525 2527 if os.path.isdir(arg):
2526 2528 if not arg.endswith(b'/'):
2527 2529 arg += b'/'
2528 2530 expanded_args.extend([arg + a for a in os.listdir(arg)])
2529 2531 else:
2530 2532 expanded_args.append(arg)
2531 2533 args = expanded_args
2532 2534
2533 2535 tests = []
2534 2536 for t in args:
2535 2537 if not (os.path.basename(t).startswith(b'test-')
2536 2538 and (t.endswith(b'.py') or t.endswith(b'.t'))):
2537 2539 continue
2538 2540 if t.endswith(b'.t'):
2539 2541 # .t file may contain multiple test cases
2540 2542 cases = sorted(parsettestcases(t))
2541 2543 if cases:
2542 2544 tests += [{'path': t, 'case': c} for c in sorted(cases)]
2543 2545 else:
2544 2546 tests.append({'path': t})
2545 2547 else:
2546 2548 tests.append({'path': t})
2547 2549 return tests
2548 2550
2549 2551 def _runtests(self, testdescs):
2550 2552 def _reloadtest(test, i):
2551 2553 # convert a test back to its description dict
2552 2554 desc = {'path': test.path}
2553 2555 case = getattr(test, '_case', None)
2554 2556 if case:
2555 2557 desc['case'] = case
2556 2558 return self._gettest(desc, i)
2557 2559
2558 2560 try:
2559 2561 if self.options.restart:
2560 2562 orig = list(testdescs)
2561 2563 while testdescs:
2562 2564 desc = testdescs[0]
2563 2565 # desc['path'] is a relative path
2564 2566 if 'case' in desc:
2565 2567 errpath = b'%s.%s.err' % (desc['path'], desc['case'])
2566 2568 else:
2567 2569 errpath = b'%s.err' % desc['path']
2568 2570 errpath = os.path.join(self._outputdir, errpath)
2569 2571 if os.path.exists(errpath):
2570 2572 break
2571 2573 testdescs.pop(0)
2572 2574 if not testdescs:
2573 2575 print("running all tests")
2574 2576 testdescs = orig
2575 2577
2576 2578 tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
2577 2579
2578 2580 failed = False
2579 2581 kws = self.options.keywords
2580 2582 if kws is not None and PYTHON3:
2581 2583 kws = kws.encode('utf-8')
2582 2584
2583 2585 suite = TestSuite(self._testdir,
2584 2586 jobs=self.options.jobs,
2585 2587 whitelist=self.options.whitelisted,
2586 2588 blacklist=self.options.blacklist,
2587 2589 retest=self.options.retest,
2588 2590 keywords=kws,
2589 2591 loop=self.options.loop,
2590 2592 runs_per_test=self.options.runs_per_test,
2591 2593 showchannels=self.options.showchannels,
2592 2594 tests=tests, loadtest=_reloadtest)
2593 2595 verbosity = 1
2594 2596 if self.options.verbose:
2595 2597 verbosity = 2
2596 2598 runner = TextTestRunner(self, verbosity=verbosity)
2597 2599
2598 2600 if self.options.list_tests:
2599 2601 result = runner.listtests(suite)
2600 2602 else:
2601 2603 if self._installdir:
2602 2604 self._installhg()
2603 2605 self._checkhglib("Testing")
2604 2606 else:
2605 2607 self._usecorrectpython()
2606 2608 if self.options.chg:
2607 2609 assert self._installdir
2608 2610 self._installchg()
2609 2611
2610 2612 result = runner.run(suite)
2611 2613
2612 2614 if result.failures:
2613 2615 failed = True
2614 2616
2615 2617 if self.options.anycoverage:
2616 2618 self._outputcoverage()
2617 2619 except KeyboardInterrupt:
2618 2620 failed = True
2619 2621 print("\ninterrupted!")
2620 2622
2621 2623 if failed:
2622 2624 return 1
2623 2625
2624 2626 def _getport(self, count):
2625 2627 port = self._ports.get(count) # do we have a cached entry?
2626 2628 if port is None:
2627 2629 portneeded = 3
2628 2630 # above 100 tries we just give up and let test reports failure
2629 2631 for tries in xrange(100):
2630 2632 allfree = True
2631 2633 port = self.options.port + self._portoffset
2632 2634 for idx in xrange(portneeded):
2633 2635 if not checkportisavailable(port + idx):
2634 2636 allfree = False
2635 2637 break
2636 2638 self._portoffset += portneeded
2637 2639 if allfree:
2638 2640 break
2639 2641 self._ports[count] = port
2640 2642 return port
2641 2643
2642 2644 def _gettest(self, testdesc, count):
2643 2645 """Obtain a Test by looking at its filename.
2644 2646
2645 2647 Returns a Test instance. The Test may not be runnable if it doesn't
2646 2648 map to a known type.
2647 2649 """
2648 2650 path = testdesc['path']
2649 2651 lctest = path.lower()
2650 2652 testcls = Test
2651 2653
2652 2654 for ext, cls in self.TESTTYPES:
2653 2655 if lctest.endswith(ext):
2654 2656 testcls = cls
2655 2657 break
2656 2658
2657 2659 refpath = os.path.join(self._testdir, path)
2658 2660 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
2659 2661
2660 2662 # extra keyword parameters. 'case' is used by .t tests
2661 2663 kwds = dict((k, testdesc[k]) for k in ['case'] if k in testdesc)
2662 2664
2663 2665 t = testcls(refpath, self._outputdir, tmpdir,
2664 2666 keeptmpdir=self.options.keep_tmpdir,
2665 2667 debug=self.options.debug,
2666 2668 timeout=self.options.timeout,
2667 2669 startport=self._getport(count),
2668 2670 extraconfigopts=self.options.extra_config_opt,
2669 2671 py3kwarnings=self.options.py3k_warnings,
2670 2672 shell=self.options.shell,
2671 2673 hgcommand=self._hgcommand,
2672 2674 usechg=bool(self.options.with_chg or self.options.chg),
2673 2675 useipv6=useipv6, **kwds)
2674 2676 t.should_reload = True
2675 2677 return t
2676 2678
2677 2679 def _cleanup(self):
2678 2680 """Clean up state from this test invocation."""
2679 2681 if self.options.keep_tmpdir:
2680 2682 return
2681 2683
2682 2684 vlog("# Cleaning up HGTMP", self._hgtmp)
2683 2685 shutil.rmtree(self._hgtmp, True)
2684 2686 for f in self._createdfiles:
2685 2687 try:
2686 2688 os.remove(f)
2687 2689 except OSError:
2688 2690 pass
2689 2691
2690 2692 def _usecorrectpython(self):
2691 2693 """Configure the environment to use the appropriate Python in tests."""
2692 2694 # Tests must use the same interpreter as us or bad things will happen.
2693 2695 pyexename = sys.platform == 'win32' and b'python.exe' or b'python'
2694 2696 if getattr(os, 'symlink', None):
2695 2697 vlog("# Making python executable in test path a symlink to '%s'" %
2696 2698 sys.executable)
2697 2699 mypython = os.path.join(self._tmpbindir, pyexename)
2698 2700 try:
2699 2701 if os.readlink(mypython) == sys.executable:
2700 2702 return
2701 2703 os.unlink(mypython)
2702 2704 except OSError as err:
2703 2705 if err.errno != errno.ENOENT:
2704 2706 raise
2705 2707 if self._findprogram(pyexename) != sys.executable:
2706 2708 try:
2707 2709 os.symlink(sys.executable, mypython)
2708 2710 self._createdfiles.append(mypython)
2709 2711 except OSError as err:
2710 2712 # child processes may race, which is harmless
2711 2713 if err.errno != errno.EEXIST:
2712 2714 raise
2713 2715 else:
2714 2716 exedir, exename = os.path.split(sys.executable)
2715 2717 vlog("# Modifying search path to find %s as %s in '%s'" %
2716 2718 (exename, pyexename, exedir))
2717 2719 path = os.environ['PATH'].split(os.pathsep)
2718 2720 while exedir in path:
2719 2721 path.remove(exedir)
2720 2722 os.environ['PATH'] = os.pathsep.join([exedir] + path)
2721 2723 if not self._findprogram(pyexename):
2722 2724 print("WARNING: Cannot find %s in search path" % pyexename)
2723 2725
2724 2726 def _installhg(self):
2725 2727 """Install hg into the test environment.
2726 2728
2727 2729 This will also configure hg with the appropriate testing settings.
2728 2730 """
2729 2731 vlog("# Performing temporary installation of HG")
2730 2732 installerrs = os.path.join(self._hgtmp, b"install.err")
2731 2733 compiler = ''
2732 2734 if self.options.compiler:
2733 2735 compiler = '--compiler ' + self.options.compiler
2734 2736 if self.options.pure:
2735 2737 pure = b"--pure"
2736 2738 else:
2737 2739 pure = b""
2738 2740
2739 2741 # Run installer in hg root
2740 2742 script = os.path.realpath(sys.argv[0])
2741 2743 exe = sys.executable
2742 2744 if PYTHON3:
2743 2745 compiler = _bytespath(compiler)
2744 2746 script = _bytespath(script)
2745 2747 exe = _bytespath(exe)
2746 2748 hgroot = os.path.dirname(os.path.dirname(script))
2747 2749 self._hgroot = hgroot
2748 2750 os.chdir(hgroot)
2749 2751 nohome = b'--home=""'
2750 2752 if os.name == 'nt':
2751 2753 # The --home="" trick works only on OS where os.sep == '/'
2752 2754 # because of a distutils convert_path() fast-path. Avoid it at
2753 2755 # least on Windows for now, deal with .pydistutils.cfg bugs
2754 2756 # when they happen.
2755 2757 nohome = b''
2756 2758 cmd = (b'%(exe)s setup.py %(pure)s clean --all'
2757 2759 b' build %(compiler)s --build-base="%(base)s"'
2758 2760 b' install --force --prefix="%(prefix)s"'
2759 2761 b' --install-lib="%(libdir)s"'
2760 2762 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
2761 2763 % {b'exe': exe, b'pure': pure,
2762 2764 b'compiler': compiler,
2763 2765 b'base': os.path.join(self._hgtmp, b"build"),
2764 2766 b'prefix': self._installdir, b'libdir': self._pythondir,
2765 2767 b'bindir': self._bindir,
2766 2768 b'nohome': nohome, b'logfile': installerrs})
2767 2769
2768 2770 # setuptools requires install directories to exist.
2769 2771 def makedirs(p):
2770 2772 try:
2771 2773 os.makedirs(p)
2772 2774 except OSError as e:
2773 2775 if e.errno != errno.EEXIST:
2774 2776 raise
2775 2777 makedirs(self._pythondir)
2776 2778 makedirs(self._bindir)
2777 2779
2778 2780 vlog("# Running", cmd)
2779 2781 if os.system(cmd) == 0:
2780 2782 if not self.options.verbose:
2781 2783 try:
2782 2784 os.remove(installerrs)
2783 2785 except OSError as e:
2784 2786 if e.errno != errno.ENOENT:
2785 2787 raise
2786 2788 else:
2787 2789 f = open(installerrs, 'rb')
2788 2790 for line in f:
2789 2791 if PYTHON3:
2790 2792 sys.stdout.buffer.write(line)
2791 2793 else:
2792 2794 sys.stdout.write(line)
2793 2795 f.close()
2794 2796 sys.exit(1)
2795 2797 os.chdir(self._testdir)
2796 2798
2797 2799 self._usecorrectpython()
2798 2800
2799 2801 if self.options.py3k_warnings and not self.options.anycoverage:
2800 2802 vlog("# Updating hg command to enable Py3k Warnings switch")
2801 2803 f = open(os.path.join(self._bindir, 'hg'), 'rb')
2802 2804 lines = [line.rstrip() for line in f]
2803 2805 lines[0] += ' -3'
2804 2806 f.close()
2805 2807 f = open(os.path.join(self._bindir, 'hg'), 'wb')
2806 2808 for line in lines:
2807 2809 f.write(line + '\n')
2808 2810 f.close()
2809 2811
2810 2812 hgbat = os.path.join(self._bindir, b'hg.bat')
2811 2813 if os.path.isfile(hgbat):
2812 2814 # hg.bat expects to be put in bin/scripts while run-tests.py
2813 2815 # installation layout put it in bin/ directly. Fix it
2814 2816 f = open(hgbat, 'rb')
2815 2817 data = f.read()
2816 2818 f.close()
2817 2819 if b'"%~dp0..\python" "%~dp0hg" %*' in data:
2818 2820 data = data.replace(b'"%~dp0..\python" "%~dp0hg" %*',
2819 2821 b'"%~dp0python" "%~dp0hg" %*')
2820 2822 f = open(hgbat, 'wb')
2821 2823 f.write(data)
2822 2824 f.close()
2823 2825 else:
2824 2826 print('WARNING: cannot fix hg.bat reference to python.exe')
2825 2827
2826 2828 if self.options.anycoverage:
2827 2829 custom = os.path.join(self._testdir, 'sitecustomize.py')
2828 2830 target = os.path.join(self._pythondir, 'sitecustomize.py')
2829 2831 vlog('# Installing coverage trigger to %s' % target)
2830 2832 shutil.copyfile(custom, target)
2831 2833 rc = os.path.join(self._testdir, '.coveragerc')
2832 2834 vlog('# Installing coverage rc to %s' % rc)
2833 2835 os.environ['COVERAGE_PROCESS_START'] = rc
2834 2836 covdir = os.path.join(self._installdir, '..', 'coverage')
2835 2837 try:
2836 2838 os.mkdir(covdir)
2837 2839 except OSError as e:
2838 2840 if e.errno != errno.EEXIST:
2839 2841 raise
2840 2842
2841 2843 os.environ['COVERAGE_DIR'] = covdir
2842 2844
2843 2845 def _checkhglib(self, verb):
2844 2846 """Ensure that the 'mercurial' package imported by python is
2845 2847 the one we expect it to be. If not, print a warning to stderr."""
2846 2848 if ((self._bindir == self._pythondir) and
2847 2849 (self._bindir != self._tmpbindir)):
2848 2850 # The pythondir has been inferred from --with-hg flag.
2849 2851 # We cannot expect anything sensible here.
2850 2852 return
2851 2853 expecthg = os.path.join(self._pythondir, b'mercurial')
2852 2854 actualhg = self._gethgpath()
2853 2855 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
2854 2856 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
2855 2857 ' (expected %s)\n'
2856 2858 % (verb, actualhg, expecthg))
2857 2859 def _gethgpath(self):
2858 2860 """Return the path to the mercurial package that is actually found by
2859 2861 the current Python interpreter."""
2860 2862 if self._hgpath is not None:
2861 2863 return self._hgpath
2862 2864
2863 2865 cmd = b'%s -c "import mercurial; print (mercurial.__path__[0])"'
2864 2866 cmd = cmd % PYTHON
2865 2867 if PYTHON3:
2866 2868 cmd = _strpath(cmd)
2867 2869 pipe = os.popen(cmd)
2868 2870 try:
2869 2871 self._hgpath = _bytespath(pipe.read().strip())
2870 2872 finally:
2871 2873 pipe.close()
2872 2874
2873 2875 return self._hgpath
2874 2876
2875 2877 def _installchg(self):
2876 2878 """Install chg into the test environment"""
2877 2879 vlog('# Performing temporary installation of CHG')
2878 2880 assert os.path.dirname(self._bindir) == self._installdir
2879 2881 assert self._hgroot, 'must be called after _installhg()'
2880 2882 cmd = (b'"%(make)s" clean install PREFIX="%(prefix)s"'
2881 2883 % {b'make': 'make', # TODO: switch by option or environment?
2882 2884 b'prefix': self._installdir})
2883 2885 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
2884 2886 vlog("# Running", cmd)
2885 2887 proc = subprocess.Popen(cmd, shell=True, cwd=cwd,
2886 2888 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
2887 2889 stderr=subprocess.STDOUT)
2888 2890 out, _err = proc.communicate()
2889 2891 if proc.returncode != 0:
2890 2892 if PYTHON3:
2891 2893 sys.stdout.buffer.write(out)
2892 2894 else:
2893 2895 sys.stdout.write(out)
2894 2896 sys.exit(1)
2895 2897
2896 2898 def _outputcoverage(self):
2897 2899 """Produce code coverage output."""
2898 2900 import coverage
2899 2901 coverage = coverage.coverage
2900 2902
2901 2903 vlog('# Producing coverage report')
2902 2904 # chdir is the easiest way to get short, relative paths in the
2903 2905 # output.
2904 2906 os.chdir(self._hgroot)
2905 2907 covdir = os.path.join(self._installdir, '..', 'coverage')
2906 2908 cov = coverage(data_file=os.path.join(covdir, 'cov'))
2907 2909
2908 2910 # Map install directory paths back to source directory.
2909 2911 cov.config.paths['srcdir'] = ['.', self._pythondir]
2910 2912
2911 2913 cov.combine()
2912 2914
2913 2915 omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]]
2914 2916 cov.report(ignore_errors=True, omit=omit)
2915 2917
2916 2918 if self.options.htmlcov:
2917 2919 htmldir = os.path.join(self._outputdir, 'htmlcov')
2918 2920 cov.html_report(directory=htmldir, omit=omit)
2919 2921 if self.options.annotate:
2920 2922 adir = os.path.join(self._outputdir, 'annotated')
2921 2923 if not os.path.isdir(adir):
2922 2924 os.mkdir(adir)
2923 2925 cov.annotate(directory=adir, omit=omit)
2924 2926
2925 2927 def _findprogram(self, program):
2926 2928 """Search PATH for a executable program"""
2927 2929 dpb = _bytespath(os.defpath)
2928 2930 sepb = _bytespath(os.pathsep)
2929 2931 for p in osenvironb.get(b'PATH', dpb).split(sepb):
2930 2932 name = os.path.join(p, program)
2931 2933 if os.name == 'nt' or os.access(name, os.X_OK):
2932 2934 return name
2933 2935 return None
2934 2936
2935 2937 def _checktools(self):
2936 2938 """Ensure tools required to run tests are present."""
2937 2939 for p in self.REQUIREDTOOLS:
2938 2940 if os.name == 'nt' and not p.endswith('.exe'):
2939 2941 p += '.exe'
2940 2942 found = self._findprogram(p)
2941 2943 if found:
2942 2944 vlog("# Found prerequisite", p, "at", found)
2943 2945 else:
2944 2946 print("WARNING: Did not find prerequisite tool: %s " %
2945 2947 p.decode("utf-8"))
2946 2948
2947 2949 if __name__ == '__main__':
2948 2950 runner = TestRunner()
2949 2951
2950 2952 try:
2951 2953 import msvcrt
2952 2954 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
2953 2955 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
2954 2956 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
2955 2957 except ImportError:
2956 2958 pass
2957 2959
2958 2960 sys.exit(runner.run(sys.argv[1:]))
@@ -1,1545 +1,1563 b''
1 1 This file tests the behavior of run-tests.py itself.
2 2
3 3 Avoid interference from actual test env:
4 4
5 5 $ . "$TESTDIR/helper-runtests.sh"
6 6
7 7 Smoke test with install
8 8 ============
9 9
10 10 $ run-tests.py $HGTEST_RUN_TESTS_PURE -l
11 11
12 12 # Ran 0 tests, 0 skipped, 0 failed.
13 13
14 14 Define a helper to avoid the install step
15 15 =============
16 16 $ rt()
17 17 > {
18 18 > run-tests.py --with-hg=`which hg` "$@"
19 19 > }
20 20
21 21 error paths
22 22
23 23 #if symlink
24 24 $ ln -s `which true` hg
25 25 $ run-tests.py --with-hg=./hg
26 26 warning: --with-hg should specify an hg script
27 27
28 28 # Ran 0 tests, 0 skipped, 0 failed.
29 29 $ rm hg
30 30 #endif
31 31
32 32 #if execbit
33 33 $ touch hg
34 34 $ run-tests.py --with-hg=./hg
35 35 Usage: run-tests.py [options] [tests]
36 36
37 37 run-tests.py: error: --with-hg must specify an executable hg script
38 38 [2]
39 39 $ rm hg
40 40 #endif
41 41
42 42 Features for testing optional lines
43 43 ===================================
44 44
45 45 $ cat > hghaveaddon.py <<EOF
46 46 > import hghave
47 47 > @hghave.check("custom", "custom hghave feature")
48 48 > def has_custom():
49 49 > return True
50 50 > @hghave.check("missing", "missing hghave feature")
51 51 > def has_missing():
52 52 > return False
53 53 > EOF
54 54
55 55 an empty test
56 56 =======================
57 57
58 58 $ touch test-empty.t
59 59 $ rt
60 60 .
61 61 # Ran 1 tests, 0 skipped, 0 failed.
62 62 $ rm test-empty.t
63 63
64 64 a succesful test
65 65 =======================
66 66
67 67 $ cat > test-success.t << EOF
68 68 > $ echo babar
69 69 > babar
70 70 > $ echo xyzzy
71 71 > dont_print (?)
72 72 > nothing[42]line (re) (?)
73 73 > never*happens (glob) (?)
74 74 > more_nothing (?)
75 75 > xyzzy
76 76 > nor this (?)
77 77 > $ printf 'abc\ndef\nxyz\n'
78 78 > 123 (?)
79 79 > abc
80 80 > def (?)
81 81 > 456 (?)
82 82 > xyz
83 83 > $ printf 'zyx\nwvu\ntsr\n'
84 84 > abc (?)
85 85 > zyx (custom !)
86 86 > wvu
87 87 > no_print (no-custom !)
88 88 > tsr (no-missing !)
89 89 > missing (missing !)
90 90 > EOF
91 91
92 92 $ rt
93 93 .
94 94 # Ran 1 tests, 0 skipped, 0 failed.
95 95
96 96 failing test
97 97 ==================
98 98
99 99 test churn with globs
100 100 $ cat > test-failure.t <<EOF
101 101 > $ echo "bar-baz"; echo "bar-bad"
102 102 > bar*bad (glob)
103 103 > bar*baz (glob)
104 104 > EOF
105 105 $ rt test-failure.t
106 106
107 107 --- $TESTTMP/test-failure.t
108 108 +++ $TESTTMP/test-failure.t.err
109 109 @@ -1,3 +1,3 @@
110 110 $ echo "bar-baz"; echo "bar-bad"
111 111 + bar*baz (glob)
112 112 bar*bad (glob)
113 113 - bar*baz (glob)
114 114
115 115 ERROR: test-failure.t output changed
116 116 !
117 117 Failed test-failure.t: output changed
118 118 # Ran 1 tests, 0 skipped, 1 failed.
119 119 python hash seed: * (glob)
120 120 [1]
121 121
122 122 test diff colorisation
123 123
124 124 #if no-windows pygments
125 125 $ rt test-failure.t --color always
126 126
127 127 \x1b[38;5;124m--- $TESTTMP/test-failure.t\x1b[39m (esc)
128 128 \x1b[38;5;34m+++ $TESTTMP/test-failure.t.err\x1b[39m (esc)
129 129 \x1b[38;5;90;01m@@ -1,3 +1,3 @@\x1b[39;00m (esc)
130 130 $ echo "bar-baz"; echo "bar-bad"
131 131 \x1b[38;5;34m+ bar*baz (glob)\x1b[39m (esc)
132 132 bar*bad (glob)
133 133 \x1b[38;5;124m- bar*baz (glob)\x1b[39m (esc)
134 134
135 135 \x1b[38;5;88mERROR: \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m output changed\x1b[39m (esc)
136 136 !
137 137 \x1b[38;5;88mFailed \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m: output changed\x1b[39m (esc)
138 138 # Ran 1 tests, 0 skipped, 1 failed.
139 139 python hash seed: * (glob)
140 140 [1]
141 141
142 142 $ rt test-failure.t 2> tmp.log
143 143 [1]
144 144 $ cat tmp.log
145 145
146 146 --- $TESTTMP/test-failure.t
147 147 +++ $TESTTMP/test-failure.t.err
148 148 @@ -1,3 +1,3 @@
149 149 $ echo "bar-baz"; echo "bar-bad"
150 150 + bar*baz (glob)
151 151 bar*bad (glob)
152 152 - bar*baz (glob)
153 153
154 154 ERROR: test-failure.t output changed
155 155 !
156 156 Failed test-failure.t: output changed
157 157 # Ran 1 tests, 0 skipped, 1 failed.
158 158 python hash seed: * (glob)
159 159 #endif
160 160
161 161 $ cat > test-failure.t << EOF
162 162 > $ true
163 163 > should go away (true !)
164 164 > $ true
165 165 > should stay (false !)
166 166 >
167 167 > Should remove first line, not second or third
168 168 > $ echo 'testing'
169 169 > baz*foo (glob) (true !)
170 170 > foobar*foo (glob) (false !)
171 171 > te*ting (glob) (true !)
172 172 >
173 173 > Should keep first two lines, remove third and last
174 174 > $ echo 'testing'
175 175 > test.ng (re) (true !)
176 176 > foo.ar (re) (false !)
177 177 > b.r (re) (true !)
178 178 > missing (?)
179 179 > awol (true !)
180 180 >
181 181 > The "missing" line should stay, even though awol is dropped
182 182 > $ echo 'testing'
183 183 > test.ng (re) (true !)
184 184 > foo.ar (?)
185 185 > awol
186 186 > missing (?)
187 187 > EOF
188 188 $ rt test-failure.t
189 189
190 190 --- $TESTTMP/test-failure.t
191 191 +++ $TESTTMP/test-failure.t.err
192 192 @@ -1,11 +1,9 @@
193 193 $ true
194 194 - should go away (true !)
195 195 $ true
196 196 should stay (false !)
197 197
198 198 Should remove first line, not second or third
199 199 $ echo 'testing'
200 200 - baz*foo (glob) (true !)
201 201 foobar*foo (glob) (false !)
202 202 te*ting (glob) (true !)
203 203
204 204 foo.ar (re) (false !)
205 205 missing (?)
206 206 @@ -13,13 +11,10 @@
207 207 $ echo 'testing'
208 208 test.ng (re) (true !)
209 209 foo.ar (re) (false !)
210 210 - b.r (re) (true !)
211 211 missing (?)
212 212 - awol (true !)
213 213
214 214 The "missing" line should stay, even though awol is dropped
215 215 $ echo 'testing'
216 216 test.ng (re) (true !)
217 217 foo.ar (?)
218 218 - awol
219 219 missing (?)
220 220
221 221 ERROR: test-failure.t output changed
222 222 !
223 223 Failed test-failure.t: output changed
224 224 # Ran 1 tests, 0 skipped, 1 failed.
225 225 python hash seed: * (glob)
226 226 [1]
227 227
228 228 basic failing test
229 229 $ cat > test-failure.t << EOF
230 230 > $ echo babar
231 231 > rataxes
232 232 > This is a noop statement so that
233 233 > this test is still more bytes than success.
234 234 > pad pad pad pad............................................................
235 235 > pad pad pad pad............................................................
236 236 > pad pad pad pad............................................................
237 237 > pad pad pad pad............................................................
238 238 > pad pad pad pad............................................................
239 239 > pad pad pad pad............................................................
240 240 > EOF
241 241
242 242 >>> fh = open('test-failure-unicode.t', 'wb')
243 243 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8')) and None
244 244 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8')) and None
245 245
246 246 $ rt
247 247
248 248 --- $TESTTMP/test-failure.t
249 249 +++ $TESTTMP/test-failure.t.err
250 250 @@ -1,5 +1,5 @@
251 251 $ echo babar
252 252 - rataxes
253 253 + babar
254 254 This is a noop statement so that
255 255 this test is still more bytes than success.
256 256 pad pad pad pad............................................................
257 257
258 258 ERROR: test-failure.t output changed
259 259 !.
260 260 --- $TESTTMP/test-failure-unicode.t
261 261 +++ $TESTTMP/test-failure-unicode.t.err
262 262 @@ -1,2 +1,2 @@
263 263 $ echo babar\xce\xb1 (esc)
264 264 - l\xce\xb5\xce\xb5t (esc)
265 265 + babar\xce\xb1 (esc)
266 266
267 267 ERROR: test-failure-unicode.t output changed
268 268 !
269 269 Failed test-failure.t: output changed
270 270 Failed test-failure-unicode.t: output changed
271 271 # Ran 3 tests, 0 skipped, 2 failed.
272 272 python hash seed: * (glob)
273 273 [1]
274 274
275 275 test --outputdir
276 276 $ mkdir output
277 277 $ rt --outputdir output
278 278
279 279 --- $TESTTMP/test-failure.t
280 280 +++ $TESTTMP/output/test-failure.t.err
281 281 @@ -1,5 +1,5 @@
282 282 $ echo babar
283 283 - rataxes
284 284 + babar
285 285 This is a noop statement so that
286 286 this test is still more bytes than success.
287 287 pad pad pad pad............................................................
288 288
289 289 ERROR: test-failure.t output changed
290 290 !.
291 291 --- $TESTTMP/test-failure-unicode.t
292 292 +++ $TESTTMP/output/test-failure-unicode.t.err
293 293 @@ -1,2 +1,2 @@
294 294 $ echo babar\xce\xb1 (esc)
295 295 - l\xce\xb5\xce\xb5t (esc)
296 296 + babar\xce\xb1 (esc)
297 297
298 298 ERROR: test-failure-unicode.t output changed
299 299 !
300 300 Failed test-failure.t: output changed
301 301 Failed test-failure-unicode.t: output changed
302 302 # Ran 3 tests, 0 skipped, 2 failed.
303 303 python hash seed: * (glob)
304 304 [1]
305 305 $ ls -a output
306 306 .
307 307 ..
308 308 .testtimes
309 309 test-failure-unicode.t.err
310 310 test-failure.t.err
311 311
312 312 test --xunit support
313 313 $ rt --xunit=xunit.xml
314 314
315 315 --- $TESTTMP/test-failure.t
316 316 +++ $TESTTMP/test-failure.t.err
317 317 @@ -1,5 +1,5 @@
318 318 $ echo babar
319 319 - rataxes
320 320 + babar
321 321 This is a noop statement so that
322 322 this test is still more bytes than success.
323 323 pad pad pad pad............................................................
324 324
325 325 ERROR: test-failure.t output changed
326 326 !.
327 327 --- $TESTTMP/test-failure-unicode.t
328 328 +++ $TESTTMP/test-failure-unicode.t.err
329 329 @@ -1,2 +1,2 @@
330 330 $ echo babar\xce\xb1 (esc)
331 331 - l\xce\xb5\xce\xb5t (esc)
332 332 + babar\xce\xb1 (esc)
333 333
334 334 ERROR: test-failure-unicode.t output changed
335 335 !
336 336 Failed test-failure.t: output changed
337 337 Failed test-failure-unicode.t: output changed
338 338 # Ran 3 tests, 0 skipped, 2 failed.
339 339 python hash seed: * (glob)
340 340 [1]
341 341 $ cat xunit.xml
342 342 <?xml version="1.0" encoding="utf-8"?>
343 343 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
344 344 <testcase name="test-success.t" time="*"/> (glob)
345 345 <testcase name="test-failure-unicode.t" time="*"> (glob)
346 346 <failure message="output changed" type="output-mismatch">
347 347 <![CDATA[--- $TESTTMP/test-failure-unicode.t
348 348 +++ $TESTTMP/test-failure-unicode.t.err
349 349 @@ -1,2 +1,2 @@
350 350 $ echo babar\xce\xb1 (esc)
351 351 - l\xce\xb5\xce\xb5t (esc)
352 352 + babar\xce\xb1 (esc)
353 353 ]]> </failure>
354 354 </testcase>
355 355 <testcase name="test-failure.t" time="*"> (glob)
356 356 <failure message="output changed" type="output-mismatch">
357 357 <![CDATA[--- $TESTTMP/test-failure.t
358 358 +++ $TESTTMP/test-failure.t.err
359 359 @@ -1,5 +1,5 @@
360 360 $ echo babar
361 361 - rataxes
362 362 + babar
363 363 This is a noop statement so that
364 364 this test is still more bytes than success.
365 365 pad pad pad pad............................................................
366 366 ]]> </failure>
367 367 </testcase>
368 368 </testsuite>
369 369
370 370 $ cat .testtimes
371 371 test-failure-unicode.t * (glob)
372 372 test-failure.t * (glob)
373 373 test-success.t * (glob)
374 374
375 375 $ rt --list-tests
376 376 test-failure-unicode.t
377 377 test-failure.t
378 378 test-success.t
379 379
380 380 $ rt --list-tests --json
381 381 test-failure-unicode.t
382 382 test-failure.t
383 383 test-success.t
384 384 $ cat report.json
385 385 testreport ={
386 386 "test-failure-unicode.t": {
387 387 "result": "success"
388 388 },
389 389 "test-failure.t": {
390 390 "result": "success"
391 391 },
392 392 "test-success.t": {
393 393 "result": "success"
394 394 }
395 395 } (no-eol)
396 396
397 397 $ rt --list-tests --xunit=xunit.xml
398 398 test-failure-unicode.t
399 399 test-failure.t
400 400 test-success.t
401 401 $ cat xunit.xml
402 402 <?xml version="1.0" encoding="utf-8"?>
403 403 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
404 404 <testcase name="test-failure-unicode.t"/>
405 405 <testcase name="test-failure.t"/>
406 406 <testcase name="test-success.t"/>
407 407 </testsuite>
408 408
409 409 $ rt --list-tests test-failure* --json --xunit=xunit.xml --outputdir output
410 410 test-failure-unicode.t
411 411 test-failure.t
412 412 $ cat output/report.json
413 413 testreport ={
414 414 "test-failure-unicode.t": {
415 415 "result": "success"
416 416 },
417 417 "test-failure.t": {
418 418 "result": "success"
419 419 }
420 420 } (no-eol)
421 421 $ cat xunit.xml
422 422 <?xml version="1.0" encoding="utf-8"?>
423 423 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
424 424 <testcase name="test-failure-unicode.t"/>
425 425 <testcase name="test-failure.t"/>
426 426 </testsuite>
427 427
428 428 $ rm test-failure-unicode.t
429 429
430 430 test for --retest
431 431 ====================
432 432
433 433 $ rt --retest
434 434
435 435 --- $TESTTMP/test-failure.t
436 436 +++ $TESTTMP/test-failure.t.err
437 437 @@ -1,5 +1,5 @@
438 438 $ echo babar
439 439 - rataxes
440 440 + babar
441 441 This is a noop statement so that
442 442 this test is still more bytes than success.
443 443 pad pad pad pad............................................................
444 444
445 445 ERROR: test-failure.t output changed
446 446 !
447 447 Failed test-failure.t: output changed
448 448 # Ran 2 tests, 1 skipped, 1 failed.
449 449 python hash seed: * (glob)
450 450 [1]
451 451
452 452 --retest works with --outputdir
453 453 $ rm -r output
454 454 $ mkdir output
455 455 $ mv test-failure.t.err output
456 456 $ rt --retest --outputdir output
457 457
458 458 --- $TESTTMP/test-failure.t
459 459 +++ $TESTTMP/output/test-failure.t.err
460 460 @@ -1,5 +1,5 @@
461 461 $ echo babar
462 462 - rataxes
463 463 + babar
464 464 This is a noop statement so that
465 465 this test is still more bytes than success.
466 466 pad pad pad pad............................................................
467 467
468 468 ERROR: test-failure.t output changed
469 469 !
470 470 Failed test-failure.t: output changed
471 471 # Ran 2 tests, 1 skipped, 1 failed.
472 472 python hash seed: * (glob)
473 473 [1]
474 474
475 475 Selecting Tests To Run
476 476 ======================
477 477
478 478 successful
479 479
480 480 $ rt test-success.t
481 481 .
482 482 # Ran 1 tests, 0 skipped, 0 failed.
483 483
484 484 success w/ keyword
485 485 $ rt -k xyzzy
486 486 .
487 487 # Ran 2 tests, 1 skipped, 0 failed.
488 488
489 489 failed
490 490
491 491 $ rt test-failure.t
492 492
493 493 --- $TESTTMP/test-failure.t
494 494 +++ $TESTTMP/test-failure.t.err
495 495 @@ -1,5 +1,5 @@
496 496 $ echo babar
497 497 - rataxes
498 498 + babar
499 499 This is a noop statement so that
500 500 this test is still more bytes than success.
501 501 pad pad pad pad............................................................
502 502
503 503 ERROR: test-failure.t output changed
504 504 !
505 505 Failed test-failure.t: output changed
506 506 # Ran 1 tests, 0 skipped, 1 failed.
507 507 python hash seed: * (glob)
508 508 [1]
509 509
510 510 failure w/ keyword
511 511 $ rt -k rataxes
512 512
513 513 --- $TESTTMP/test-failure.t
514 514 +++ $TESTTMP/test-failure.t.err
515 515 @@ -1,5 +1,5 @@
516 516 $ echo babar
517 517 - rataxes
518 518 + babar
519 519 This is a noop statement so that
520 520 this test is still more bytes than success.
521 521 pad pad pad pad............................................................
522 522
523 523 ERROR: test-failure.t output changed
524 524 !
525 525 Failed test-failure.t: output changed
526 526 # Ran 2 tests, 1 skipped, 1 failed.
527 527 python hash seed: * (glob)
528 528 [1]
529 529
530 530 Verify that when a process fails to start we show a useful message
531 531 ==================================================================
532 532
533 533 $ cat > test-serve-fail.t <<EOF
534 534 > $ echo 'abort: child process failed to start blah'
535 535 > EOF
536 536 $ rt test-serve-fail.t
537 537
538 538 ERROR: test-serve-fail.t output changed
539 539 !
540 540 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
541 541 # Ran 1 tests, 0 skipped, 1 failed.
542 542 python hash seed: * (glob)
543 543 [1]
544 544 $ rm test-serve-fail.t
545 545
546 546 Verify that we can try other ports
547 547 ===================================
548 548 $ hg init inuse
549 549 $ hg serve -R inuse -p $HGPORT -d --pid-file=blocks.pid
550 550 $ cat blocks.pid >> $DAEMON_PIDS
551 551 $ cat > test-serve-inuse.t <<EOF
552 552 > $ hg serve -R `pwd`/inuse -p \$HGPORT -d --pid-file=hg.pid
553 553 > $ cat hg.pid >> \$DAEMON_PIDS
554 554 > EOF
555 555 $ rt test-serve-inuse.t
556 556 .
557 557 # Ran 1 tests, 0 skipped, 0 failed.
558 558 $ rm test-serve-inuse.t
559 559 $ killdaemons.py $DAEMON_PIDS
560 560 $ rm $DAEMON_PIDS
561 561
562 562 Running In Debug Mode
563 563 ======================
564 564
565 565 $ rt --debug 2>&1 | grep -v pwd
566 566 + echo *SALT* 0 0 (glob)
567 567 *SALT* 0 0 (glob)
568 568 + echo babar
569 569 babar
570 570 + echo *SALT* 10 0 (glob)
571 571 *SALT* 10 0 (glob)
572 572 *+ echo *SALT* 0 0 (glob)
573 573 *SALT* 0 0 (glob)
574 574 + echo babar
575 575 babar
576 576 + echo *SALT* 2 0 (glob)
577 577 *SALT* 2 0 (glob)
578 578 + echo xyzzy
579 579 xyzzy
580 580 + echo *SALT* 9 0 (glob)
581 581 *SALT* 9 0 (glob)
582 582 + printf *abc\ndef\nxyz\n* (glob)
583 583 abc
584 584 def
585 585 xyz
586 586 + echo *SALT* 15 0 (glob)
587 587 *SALT* 15 0 (glob)
588 588 + printf *zyx\nwvu\ntsr\n* (glob)
589 589 zyx
590 590 wvu
591 591 tsr
592 592 + echo *SALT* 22 0 (glob)
593 593 *SALT* 22 0 (glob)
594 594 .
595 595 # Ran 2 tests, 0 skipped, 0 failed.
596 596
597 597 Parallel runs
598 598 ==============
599 599
600 600 (duplicate the failing test to get predictable output)
601 601 $ cp test-failure.t test-failure-copy.t
602 602
603 603 $ rt --jobs 2 test-failure*.t -n
604 604 !!
605 605 Failed test-failure*.t: output changed (glob)
606 606 Failed test-failure*.t: output changed (glob)
607 607 # Ran 2 tests, 0 skipped, 2 failed.
608 608 python hash seed: * (glob)
609 609 [1]
610 610
611 611 failures in parallel with --first should only print one failure
612 612 $ rt --jobs 2 --first test-failure*.t
613 613
614 614 --- $TESTTMP/test-failure*.t (glob)
615 615 +++ $TESTTMP/test-failure*.t.err (glob)
616 616 @@ -1,5 +1,5 @@
617 617 $ echo babar
618 618 - rataxes
619 619 + babar
620 620 This is a noop statement so that
621 621 this test is still more bytes than success.
622 622 pad pad pad pad............................................................
623 623
624 624 Failed test-failure*.t: output changed (glob)
625 625 Failed test-failure*.t: output changed (glob)
626 626 # Ran 2 tests, 0 skipped, 2 failed.
627 627 python hash seed: * (glob)
628 628 [1]
629 629
630 630
631 631 (delete the duplicated test file)
632 632 $ rm test-failure-copy.t
633 633
634 634
635 635 Interactive run
636 636 ===============
637 637
638 638 (backup the failing test)
639 639 $ cp test-failure.t backup
640 640
641 641 Refuse the fix
642 642
643 643 $ echo 'n' | rt -i
644 644
645 645 --- $TESTTMP/test-failure.t
646 646 +++ $TESTTMP/test-failure.t.err
647 647 @@ -1,5 +1,5 @@
648 648 $ echo babar
649 649 - rataxes
650 650 + babar
651 651 This is a noop statement so that
652 652 this test is still more bytes than success.
653 653 pad pad pad pad............................................................
654 654 Accept this change? [n]
655 655 ERROR: test-failure.t output changed
656 656 !.
657 657 Failed test-failure.t: output changed
658 658 # Ran 2 tests, 0 skipped, 1 failed.
659 659 python hash seed: * (glob)
660 660 [1]
661 661
662 662 $ cat test-failure.t
663 663 $ echo babar
664 664 rataxes
665 665 This is a noop statement so that
666 666 this test is still more bytes than success.
667 667 pad pad pad pad............................................................
668 668 pad pad pad pad............................................................
669 669 pad pad pad pad............................................................
670 670 pad pad pad pad............................................................
671 671 pad pad pad pad............................................................
672 672 pad pad pad pad............................................................
673 673
674 674 Interactive with custom view
675 675
676 676 $ echo 'n' | rt -i --view echo
677 677 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err (glob)
678 678 Accept this change? [n]* (glob)
679 679 ERROR: test-failure.t output changed
680 680 !.
681 681 Failed test-failure.t: output changed
682 682 # Ran 2 tests, 0 skipped, 1 failed.
683 683 python hash seed: * (glob)
684 684 [1]
685 685
686 686 View the fix
687 687
688 688 $ echo 'y' | rt --view echo
689 689 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err (glob)
690 690
691 691 ERROR: test-failure.t output changed
692 692 !.
693 693 Failed test-failure.t: output changed
694 694 # Ran 2 tests, 0 skipped, 1 failed.
695 695 python hash seed: * (glob)
696 696 [1]
697 697
698 698 Accept the fix
699 699
700 700 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
701 701 $ echo " saved backup bundle to \$TESTTMP/foo.hg" >> test-failure.t
702 702 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
703 703 $ echo " saved backup bundle to \$TESTTMP/foo.hg (glob)" >> test-failure.t
704 704 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
705 705 $ echo " saved backup bundle to \$TESTTMP/*.hg (glob)" >> test-failure.t
706 706 $ echo 'y' | rt -i 2>&1
707 707
708 708 --- $TESTTMP/test-failure.t
709 709 +++ $TESTTMP/test-failure.t.err
710 710 @@ -1,5 +1,5 @@
711 711 $ echo babar
712 712 - rataxes
713 713 + babar
714 714 This is a noop statement so that
715 715 this test is still more bytes than success.
716 716 pad pad pad pad............................................................
717 717 @@ -9,7 +9,7 @@
718 718 pad pad pad pad............................................................
719 719 pad pad pad pad............................................................
720 720 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
721 721 - saved backup bundle to $TESTTMP/foo.hg
722 722 + saved backup bundle to $TESTTMP/foo.hg* (glob)
723 723 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
724 724 saved backup bundle to $TESTTMP/foo.hg* (glob)
725 725 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
726 726 Accept this change? [n] ..
727 727 # Ran 2 tests, 0 skipped, 0 failed.
728 728
729 729 $ sed -e 's,(glob)$,&<,g' test-failure.t
730 730 $ echo babar
731 731 babar
732 732 This is a noop statement so that
733 733 this test is still more bytes than success.
734 734 pad pad pad pad............................................................
735 735 pad pad pad pad............................................................
736 736 pad pad pad pad............................................................
737 737 pad pad pad pad............................................................
738 738 pad pad pad pad............................................................
739 739 pad pad pad pad............................................................
740 740 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
741 741 saved backup bundle to $TESTTMP/foo.hg (glob)<
742 742 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
743 743 saved backup bundle to $TESTTMP/foo.hg (glob)<
744 744 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
745 745 saved backup bundle to $TESTTMP/*.hg (glob)<
746 746
747 747 Race condition - test file was modified when test is running
748 748
749 749 $ TESTRACEDIR=`pwd`
750 750 $ export TESTRACEDIR
751 751 $ cat > test-race.t <<EOF
752 752 > $ echo 1
753 753 > $ echo "# a new line" >> $TESTRACEDIR/test-race.t
754 754 > EOF
755 755
756 756 $ rt -i test-race.t
757 757
758 758 --- $TESTTMP/test-race.t
759 759 +++ $TESTTMP/test-race.t.err
760 760 @@ -1,2 +1,3 @@
761 761 $ echo 1
762 762 + 1
763 763 $ echo "# a new line" >> $TESTTMP/test-race.t
764 764 Reference output has changed (run again to prompt changes)
765 765 ERROR: test-race.t output changed
766 766 !
767 767 Failed test-race.t: output changed
768 768 # Ran 1 tests, 0 skipped, 1 failed.
769 769 python hash seed: * (glob)
770 770 [1]
771 771
772 772 $ rm test-race.t
773 773
774 774 When "#testcases" is used in .t files
775 775
776 776 $ cat >> test-cases.t <<EOF
777 777 > #testcases a b
778 778 > #if a
779 779 > $ echo 1
780 780 > #endif
781 781 > #if b
782 782 > $ echo 2
783 783 > #endif
784 784 > EOF
785 785
786 786 $ cat <<EOF | rt -i test-cases.t 2>&1
787 787 > y
788 788 > y
789 789 > EOF
790 790
791 791 --- $TESTTMP/test-cases.t
792 792 +++ $TESTTMP/test-cases.t.a.err
793 793 @@ -1,6 +1,7 @@
794 794 #testcases a b
795 795 #if a
796 796 $ echo 1
797 797 + 1
798 798 #endif
799 799 #if b
800 800 $ echo 2
801 801 Accept this change? [n] .
802 802 --- $TESTTMP/test-cases.t
803 803 +++ $TESTTMP/test-cases.t.b.err
804 804 @@ -5,4 +5,5 @@
805 805 #endif
806 806 #if b
807 807 $ echo 2
808 808 + 2
809 809 #endif
810 810 Accept this change? [n] .
811 811 # Ran 2 tests, 0 skipped, 0 failed.
812 812
813 813 $ cat test-cases.t
814 814 #testcases a b
815 815 #if a
816 816 $ echo 1
817 817 1
818 818 #endif
819 819 #if b
820 820 $ echo 2
821 821 2
822 822 #endif
823 823
824 824 $ cat >> test-cases.t <<'EOF'
825 825 > #if a
826 826 > $ NAME=A
827 827 > #else
828 828 > $ NAME=B
829 829 > #endif
830 830 > $ echo $NAME
831 831 > A (a !)
832 832 > B (b !)
833 833 > EOF
834 834 $ rt test-cases.t
835 835 ..
836 836 # Ran 2 tests, 0 skipped, 0 failed.
837 837
838 838 $ rm test-cases.t
839 839
840 840 (reinstall)
841 841 $ mv backup test-failure.t
842 842
843 843 No Diff
844 844 ===============
845 845
846 846 $ rt --nodiff
847 847 !.
848 848 Failed test-failure.t: output changed
849 849 # Ran 2 tests, 0 skipped, 1 failed.
850 850 python hash seed: * (glob)
851 851 [1]
852 852
853 853 test --tmpdir support
854 854 $ rt --tmpdir=$TESTTMP/keep test-success.t
855 855
856 856 Keeping testtmp dir: $TESTTMP/keep/child1/test-success.t (glob)
857 857 Keeping threadtmp dir: $TESTTMP/keep/child1 (glob)
858 858 .
859 859 # Ran 1 tests, 0 skipped, 0 failed.
860 860
861 861 timeouts
862 862 ========
863 863 $ cat > test-timeout.t <<EOF
864 864 > $ sleep 2
865 865 > $ echo pass
866 866 > pass
867 867 > EOF
868 868 > echo '#require slow' > test-slow-timeout.t
869 869 > cat test-timeout.t >> test-slow-timeout.t
870 870 $ rt --timeout=1 --slowtimeout=3 test-timeout.t test-slow-timeout.t
871 871 st
872 872 Skipped test-slow-timeout.t: missing feature: allow slow tests (use --allow-slow-tests)
873 873 Failed test-timeout.t: timed out
874 874 # Ran 1 tests, 1 skipped, 1 failed.
875 875 python hash seed: * (glob)
876 876 [1]
877 877 $ rt --timeout=1 --slowtimeout=3 \
878 878 > test-timeout.t test-slow-timeout.t --allow-slow-tests
879 879 .t
880 880 Failed test-timeout.t: timed out
881 881 # Ran 2 tests, 0 skipped, 1 failed.
882 882 python hash seed: * (glob)
883 883 [1]
884 884 $ rm test-timeout.t test-slow-timeout.t
885 885
886 886 test for --time
887 887 ==================
888 888
889 889 $ rt test-success.t --time
890 890 .
891 891 # Ran 1 tests, 0 skipped, 0 failed.
892 892 # Producing time report
893 893 start end cuser csys real Test
894 894 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
895 895
896 896 test for --time with --job enabled
897 897 ====================================
898 898
899 899 $ rt test-success.t --time --jobs 2
900 900 .
901 901 # Ran 1 tests, 0 skipped, 0 failed.
902 902 # Producing time report
903 903 start end cuser csys real Test
904 904 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
905 905
906 906 Skips
907 907 ================
908 908 $ cat > test-skip.t <<EOF
909 909 > $ echo xyzzy
910 910 > #require false
911 911 > EOF
912 912 $ rt --nodiff
913 913 !.s
914 914 Skipped test-skip.t: missing feature: nail clipper
915 915 Failed test-failure.t: output changed
916 916 # Ran 2 tests, 1 skipped, 1 failed.
917 917 python hash seed: * (glob)
918 918 [1]
919 919
920 920 $ rt --keyword xyzzy
921 921 .s
922 922 Skipped test-skip.t: missing feature: nail clipper
923 923 # Ran 2 tests, 2 skipped, 0 failed.
924 924
925 925 Skips with xml
926 926 $ rt --keyword xyzzy \
927 927 > --xunit=xunit.xml
928 928 .s
929 929 Skipped test-skip.t: missing feature: nail clipper
930 930 # Ran 2 tests, 2 skipped, 0 failed.
931 931 $ cat xunit.xml
932 932 <?xml version="1.0" encoding="utf-8"?>
933 933 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
934 934 <testcase name="test-success.t" time="*"/> (glob)
935 935 <testcase name="test-skip.t">
936 936 <skipped>
937 937 <![CDATA[missing feature: nail clipper]]> </skipped>
938 938 </testcase>
939 939 </testsuite>
940 940
941 941 Missing skips or blacklisted skips don't count as executed:
942 942 $ echo test-failure.t > blacklist
943 943 $ rt --blacklist=blacklist --json\
944 944 > test-failure.t test-bogus.t
945 945 ss
946 946 Skipped test-bogus.t: Doesn't exist
947 947 Skipped test-failure.t: blacklisted
948 948 # Ran 0 tests, 2 skipped, 0 failed.
949 949 $ cat report.json
950 950 testreport ={
951 951 "test-bogus.t": {
952 952 "result": "skip"
953 953 },
954 954 "test-failure.t": {
955 955 "result": "skip"
956 956 }
957 957 } (no-eol)
958 958
959 959 Whitelist trumps blacklist
960 960 $ echo test-failure.t > whitelist
961 961 $ rt --blacklist=blacklist --whitelist=whitelist --json\
962 962 > test-failure.t test-bogus.t
963 963 s
964 964 --- $TESTTMP/test-failure.t
965 965 +++ $TESTTMP/test-failure.t.err
966 966 @@ -1,5 +1,5 @@
967 967 $ echo babar
968 968 - rataxes
969 969 + babar
970 970 This is a noop statement so that
971 971 this test is still more bytes than success.
972 972 pad pad pad pad............................................................
973 973
974 974 ERROR: test-failure.t output changed
975 975 !
976 976 Skipped test-bogus.t: Doesn't exist
977 977 Failed test-failure.t: output changed
978 978 # Ran 1 tests, 1 skipped, 1 failed.
979 979 python hash seed: * (glob)
980 980 [1]
981 981
982 982 Ensure that --test-list causes only the tests listed in that file to
983 983 be executed.
984 984 $ echo test-success.t >> onlytest
985 985 $ rt --test-list=onlytest
986 986 .
987 987 # Ran 1 tests, 0 skipped, 0 failed.
988 988 $ echo test-bogus.t >> anothertest
989 989 $ rt --test-list=onlytest --test-list=anothertest
990 990 s.
991 991 Skipped test-bogus.t: Doesn't exist
992 992 # Ran 1 tests, 1 skipped, 0 failed.
993 993 $ rm onlytest anothertest
994 994
995 995 test for --json
996 996 ==================
997 997
998 998 $ rt --json
999 999
1000 1000 --- $TESTTMP/test-failure.t
1001 1001 +++ $TESTTMP/test-failure.t.err
1002 1002 @@ -1,5 +1,5 @@
1003 1003 $ echo babar
1004 1004 - rataxes
1005 1005 + babar
1006 1006 This is a noop statement so that
1007 1007 this test is still more bytes than success.
1008 1008 pad pad pad pad............................................................
1009 1009
1010 1010 ERROR: test-failure.t output changed
1011 1011 !.s
1012 1012 Skipped test-skip.t: missing feature: nail clipper
1013 1013 Failed test-failure.t: output changed
1014 1014 # Ran 2 tests, 1 skipped, 1 failed.
1015 1015 python hash seed: * (glob)
1016 1016 [1]
1017 1017
1018 1018 $ cat report.json
1019 1019 testreport ={
1020 1020 "test-failure.t": [\{] (re)
1021 1021 "csys": "\s*[\d\.]{4,5}", ? (re)
1022 1022 "cuser": "\s*[\d\.]{4,5}", ? (re)
1023 1023 "diff": "---.+\+\+\+.+", ? (re)
1024 1024 "end": "\s*[\d\.]{4,5}", ? (re)
1025 1025 "result": "failure", ? (re)
1026 1026 "start": "\s*[\d\.]{4,5}", ? (re)
1027 1027 "time": "\s*[\d\.]{4,5}" (re)
1028 1028 }, ? (re)
1029 1029 "test-skip.t": {
1030 1030 "csys": "\s*[\d\.]{4,5}", ? (re)
1031 1031 "cuser": "\s*[\d\.]{4,5}", ? (re)
1032 1032 "diff": "", ? (re)
1033 1033 "end": "\s*[\d\.]{4,5}", ? (re)
1034 1034 "result": "skip", ? (re)
1035 1035 "start": "\s*[\d\.]{4,5}", ? (re)
1036 1036 "time": "\s*[\d\.]{4,5}" (re)
1037 1037 }, ? (re)
1038 1038 "test-success.t": [\{] (re)
1039 1039 "csys": "\s*[\d\.]{4,5}", ? (re)
1040 1040 "cuser": "\s*[\d\.]{4,5}", ? (re)
1041 1041 "diff": "", ? (re)
1042 1042 "end": "\s*[\d\.]{4,5}", ? (re)
1043 1043 "result": "success", ? (re)
1044 1044 "start": "\s*[\d\.]{4,5}", ? (re)
1045 1045 "time": "\s*[\d\.]{4,5}" (re)
1046 1046 }
1047 1047 } (no-eol)
1048 1048 --json with --outputdir
1049 1049
1050 1050 $ rm report.json
1051 1051 $ rm -r output
1052 1052 $ mkdir output
1053 1053 $ rt --json --outputdir output
1054 1054
1055 1055 --- $TESTTMP/test-failure.t
1056 1056 +++ $TESTTMP/output/test-failure.t.err
1057 1057 @@ -1,5 +1,5 @@
1058 1058 $ echo babar
1059 1059 - rataxes
1060 1060 + babar
1061 1061 This is a noop statement so that
1062 1062 this test is still more bytes than success.
1063 1063 pad pad pad pad............................................................
1064 1064
1065 1065 ERROR: test-failure.t output changed
1066 1066 !.s
1067 1067 Skipped test-skip.t: missing feature: nail clipper
1068 1068 Failed test-failure.t: output changed
1069 1069 # Ran 2 tests, 1 skipped, 1 failed.
1070 1070 python hash seed: * (glob)
1071 1071 [1]
1072 1072 $ f report.json
1073 1073 report.json: file not found
1074 1074 $ cat output/report.json
1075 1075 testreport ={
1076 1076 "test-failure.t": [\{] (re)
1077 1077 "csys": "\s*[\d\.]{4,5}", ? (re)
1078 1078 "cuser": "\s*[\d\.]{4,5}", ? (re)
1079 1079 "diff": "---.+\+\+\+.+", ? (re)
1080 1080 "end": "\s*[\d\.]{4,5}", ? (re)
1081 1081 "result": "failure", ? (re)
1082 1082 "start": "\s*[\d\.]{4,5}", ? (re)
1083 1083 "time": "\s*[\d\.]{4,5}" (re)
1084 1084 }, ? (re)
1085 1085 "test-skip.t": {
1086 1086 "csys": "\s*[\d\.]{4,5}", ? (re)
1087 1087 "cuser": "\s*[\d\.]{4,5}", ? (re)
1088 1088 "diff": "", ? (re)
1089 1089 "end": "\s*[\d\.]{4,5}", ? (re)
1090 1090 "result": "skip", ? (re)
1091 1091 "start": "\s*[\d\.]{4,5}", ? (re)
1092 1092 "time": "\s*[\d\.]{4,5}" (re)
1093 1093 }, ? (re)
1094 1094 "test-success.t": [\{] (re)
1095 1095 "csys": "\s*[\d\.]{4,5}", ? (re)
1096 1096 "cuser": "\s*[\d\.]{4,5}", ? (re)
1097 1097 "diff": "", ? (re)
1098 1098 "end": "\s*[\d\.]{4,5}", ? (re)
1099 1099 "result": "success", ? (re)
1100 1100 "start": "\s*[\d\.]{4,5}", ? (re)
1101 1101 "time": "\s*[\d\.]{4,5}" (re)
1102 1102 }
1103 1103 } (no-eol)
1104 1104 $ ls -a output
1105 1105 .
1106 1106 ..
1107 1107 .testtimes
1108 1108 report.json
1109 1109 test-failure.t.err
1110 1110
1111 1111 Test that failed test accepted through interactive are properly reported:
1112 1112
1113 1113 $ cp test-failure.t backup
1114 1114 $ echo y | rt --json -i
1115 1115
1116 1116 --- $TESTTMP/test-failure.t
1117 1117 +++ $TESTTMP/test-failure.t.err
1118 1118 @@ -1,5 +1,5 @@
1119 1119 $ echo babar
1120 1120 - rataxes
1121 1121 + babar
1122 1122 This is a noop statement so that
1123 1123 this test is still more bytes than success.
1124 1124 pad pad pad pad............................................................
1125 1125 Accept this change? [n] ..s
1126 1126 Skipped test-skip.t: missing feature: nail clipper
1127 1127 # Ran 2 tests, 1 skipped, 0 failed.
1128 1128
1129 1129 $ cat report.json
1130 1130 testreport ={
1131 1131 "test-failure.t": [\{] (re)
1132 1132 "csys": "\s*[\d\.]{4,5}", ? (re)
1133 1133 "cuser": "\s*[\d\.]{4,5}", ? (re)
1134 1134 "diff": "", ? (re)
1135 1135 "end": "\s*[\d\.]{4,5}", ? (re)
1136 1136 "result": "success", ? (re)
1137 1137 "start": "\s*[\d\.]{4,5}", ? (re)
1138 1138 "time": "\s*[\d\.]{4,5}" (re)
1139 1139 }, ? (re)
1140 1140 "test-skip.t": {
1141 1141 "csys": "\s*[\d\.]{4,5}", ? (re)
1142 1142 "cuser": "\s*[\d\.]{4,5}", ? (re)
1143 1143 "diff": "", ? (re)
1144 1144 "end": "\s*[\d\.]{4,5}", ? (re)
1145 1145 "result": "skip", ? (re)
1146 1146 "start": "\s*[\d\.]{4,5}", ? (re)
1147 1147 "time": "\s*[\d\.]{4,5}" (re)
1148 1148 }, ? (re)
1149 1149 "test-success.t": [\{] (re)
1150 1150 "csys": "\s*[\d\.]{4,5}", ? (re)
1151 1151 "cuser": "\s*[\d\.]{4,5}", ? (re)
1152 1152 "diff": "", ? (re)
1153 1153 "end": "\s*[\d\.]{4,5}", ? (re)
1154 1154 "result": "success", ? (re)
1155 1155 "start": "\s*[\d\.]{4,5}", ? (re)
1156 1156 "time": "\s*[\d\.]{4,5}" (re)
1157 1157 }
1158 1158 } (no-eol)
1159 1159 $ mv backup test-failure.t
1160 1160
1161 1161 backslash on end of line with glob matching is handled properly
1162 1162
1163 1163 $ cat > test-glob-backslash.t << EOF
1164 1164 > $ echo 'foo bar \\'
1165 1165 > foo * \ (glob)
1166 1166 > EOF
1167 1167
1168 1168 $ rt test-glob-backslash.t
1169 1169 .
1170 1170 # Ran 1 tests, 0 skipped, 0 failed.
1171 1171
1172 1172 $ rm -f test-glob-backslash.t
1173 1173
1174 1174 Test globbing of local IP addresses
1175 1175 $ echo 172.16.18.1
1176 1176 $LOCALIP (glob)
1177 1177 $ echo dead:beef::1
1178 1178 $LOCALIP (glob)
1179 1179
1180 1180 Test reusability for third party tools
1181 1181 ======================================
1182 1182
1183 1183 $ mkdir "$TESTTMP"/anothertests
1184 1184 $ cd "$TESTTMP"/anothertests
1185 1185
1186 1186 test that `run-tests.py` can execute hghave, even if it runs not in
1187 1187 Mercurial source tree.
1188 1188
1189 1189 $ cat > test-hghave.t <<EOF
1190 1190 > #require true
1191 1191 > $ echo foo
1192 1192 > foo
1193 1193 > EOF
1194 1194 $ rt test-hghave.t
1195 1195 .
1196 1196 # Ran 1 tests, 0 skipped, 0 failed.
1197 1197
1198 1198 test that RUNTESTDIR refers the directory, in which `run-tests.py` now
1199 1199 running is placed.
1200 1200
1201 1201 $ cat > test-runtestdir.t <<EOF
1202 1202 > - $TESTDIR, in which test-run-tests.t is placed
1203 1203 > - \$TESTDIR, in which test-runtestdir.t is placed (expanded at runtime)
1204 1204 > - \$RUNTESTDIR, in which run-tests.py is placed (expanded at runtime)
1205 1205 >
1206 1206 > #if windows
1207 1207 > $ test "\$TESTDIR" = "$TESTTMP\anothertests"
1208 1208 > #else
1209 1209 > $ test "\$TESTDIR" = "$TESTTMP"/anothertests
1210 1210 > #endif
1211 1211 > $ test "\$RUNTESTDIR" = "$TESTDIR"
1212 1212 > $ head -n 3 "\$RUNTESTDIR"/../contrib/check-code.py | sed 's@.!.*python@#!USRBINENVPY@'
1213 1213 > #!USRBINENVPY
1214 1214 > #
1215 1215 > # check-code - a style and portability checker for Mercurial
1216 1216 > EOF
1217 1217 $ rt test-runtestdir.t
1218 1218 .
1219 1219 # Ran 1 tests, 0 skipped, 0 failed.
1220 1220
1221 1221 #if execbit
1222 1222
1223 1223 test that TESTDIR is referred in PATH
1224 1224
1225 1225 $ cat > custom-command.sh <<EOF
1226 1226 > #!/bin/sh
1227 1227 > echo "hello world"
1228 1228 > EOF
1229 1229 $ chmod +x custom-command.sh
1230 1230 $ cat > test-testdir-path.t <<EOF
1231 1231 > $ custom-command.sh
1232 1232 > hello world
1233 1233 > EOF
1234 1234 $ rt test-testdir-path.t
1235 1235 .
1236 1236 # Ran 1 tests, 0 skipped, 0 failed.
1237 1237
1238 1238 #endif
1239 1239
1240 1240 test support for --allow-slow-tests
1241 1241 $ cat > test-very-slow-test.t <<EOF
1242 1242 > #require slow
1243 1243 > $ echo pass
1244 1244 > pass
1245 1245 > EOF
1246 1246 $ rt test-very-slow-test.t
1247 1247 s
1248 1248 Skipped test-very-slow-test.t: missing feature: allow slow tests (use --allow-slow-tests)
1249 1249 # Ran 0 tests, 1 skipped, 0 failed.
1250 1250 $ rt $HGTEST_RUN_TESTS_PURE --allow-slow-tests test-very-slow-test.t
1251 1251 .
1252 1252 # Ran 1 tests, 0 skipped, 0 failed.
1253 1253
1254 1254 support for running a test outside the current directory
1255 1255 $ mkdir nonlocal
1256 1256 $ cat > nonlocal/test-is-not-here.t << EOF
1257 1257 > $ echo pass
1258 1258 > pass
1259 1259 > EOF
1260 1260 $ rt nonlocal/test-is-not-here.t
1261 1261 .
1262 1262 # Ran 1 tests, 0 skipped, 0 failed.
1263 1263
1264 1264 support for automatically discovering test if arg is a folder
1265 1265 $ mkdir tmp && cd tmp
1266 1266
1267 1267 $ cat > test-uno.t << EOF
1268 1268 > $ echo line
1269 1269 > line
1270 1270 > EOF
1271 1271
1272 1272 $ cp test-uno.t test-dos.t
1273 1273 $ cd ..
1274 1274 $ cp -R tmp tmpp
1275 1275 $ cp tmp/test-uno.t test-solo.t
1276 1276
1277 1277 $ $PYTHON $TESTDIR/run-tests.py tmp/ test-solo.t tmpp
1278 1278 .....
1279 1279 # Ran 5 tests, 0 skipped, 0 failed.
1280 1280 $ rm -rf tmp tmpp
1281 1281
1282 1282 support for running run-tests.py from another directory
1283 1283 $ mkdir tmp && cd tmp
1284
1284 1285 $ cat > useful-file.sh << EOF
1285 1286 > important command
1286 1287 > EOF
1287 1288
1288 1289 $ cat > test-folder.t << EOF
1289 1290 > $ cat \$TESTDIR/useful-file.sh
1290 1291 > important command
1291 1292 > EOF
1292 1293
1294 $ cat > test-folder-fail.t << EOF
1295 > $ cat \$TESTDIR/useful-file.sh
1296 > important commando
1297 > EOF
1298
1293 1299 $ cd ..
1294 $ $PYTHON $TESTDIR/run-tests.py tmp/test-folder.t
1295 .
1296 # Ran 1 tests, 0 skipped, 0 failed.
1300 $ $PYTHON $TESTDIR/run-tests.py tmp/test-*.t
1301
1302 --- $TESTTMP/anothertests/tmp/test-folder-fail.t
1303 +++ $TESTTMP/anothertests/tmp/test-folder-fail.t.err
1304 @@ -1,2 +1,2 @@
1305 $ cat $TESTDIR/useful-file.sh
1306 - important commando
1307 + important command
1308
1309 ERROR: test-folder-fail.t output changed
1310 !.
1311 Failed test-folder-fail.t: output changed
1312 # Ran 2 tests, 0 skipped, 1 failed.
1313 python hash seed: * (glob)
1314 [1]
1297 1315
1298 1316 support for bisecting failed tests automatically
1299 1317 $ hg init bisect
1300 1318 $ cd bisect
1301 1319 $ cat >> test-bisect.t <<EOF
1302 1320 > $ echo pass
1303 1321 > pass
1304 1322 > EOF
1305 1323 $ hg add test-bisect.t
1306 1324 $ hg ci -m 'good'
1307 1325 $ cat >> test-bisect.t <<EOF
1308 1326 > $ echo pass
1309 1327 > fail
1310 1328 > EOF
1311 1329 $ hg ci -m 'bad'
1312 1330 $ rt --known-good-rev=0 test-bisect.t
1313 1331
1314 1332 --- $TESTTMP/anothertests/bisect/test-bisect.t
1315 1333 +++ $TESTTMP/anothertests/bisect/test-bisect.t.err
1316 1334 @@ -1,4 +1,4 @@
1317 1335 $ echo pass
1318 1336 pass
1319 1337 $ echo pass
1320 1338 - fail
1321 1339 + pass
1322 1340
1323 1341 ERROR: test-bisect.t output changed
1324 1342 !
1325 1343 Failed test-bisect.t: output changed
1326 1344 test-bisect.t broken by 72cbf122d116 (bad)
1327 1345 # Ran 1 tests, 0 skipped, 1 failed.
1328 1346 python hash seed: * (glob)
1329 1347 [1]
1330 1348
1331 1349 $ cd ..
1332 1350
1333 1351 support bisecting a separate repo
1334 1352
1335 1353 $ hg init bisect-dependent
1336 1354 $ cd bisect-dependent
1337 1355 $ cat > test-bisect-dependent.t <<EOF
1338 1356 > $ tail -1 \$TESTDIR/../bisect/test-bisect.t
1339 1357 > pass
1340 1358 > EOF
1341 1359 $ hg commit -Am dependent test-bisect-dependent.t
1342 1360
1343 1361 $ rt --known-good-rev=0 test-bisect-dependent.t
1344 1362
1345 1363 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1346 1364 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1347 1365 @@ -1,2 +1,2 @@
1348 1366 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1349 1367 - pass
1350 1368 + fail
1351 1369
1352 1370 ERROR: test-bisect-dependent.t output changed
1353 1371 !
1354 1372 Failed test-bisect-dependent.t: output changed
1355 1373 Failed to identify failure point for test-bisect-dependent.t
1356 1374 # Ran 1 tests, 0 skipped, 1 failed.
1357 1375 python hash seed: * (glob)
1358 1376 [1]
1359 1377
1360 1378 $ rt --bisect-repo=../test-bisect test-bisect-dependent.t
1361 1379 Usage: run-tests.py [options] [tests]
1362 1380
1363 1381 run-tests.py: error: --bisect-repo cannot be used without --known-good-rev
1364 1382 [2]
1365 1383
1366 1384 $ rt --known-good-rev=0 --bisect-repo=../bisect test-bisect-dependent.t
1367 1385
1368 1386 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1369 1387 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1370 1388 @@ -1,2 +1,2 @@
1371 1389 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1372 1390 - pass
1373 1391 + fail
1374 1392
1375 1393 ERROR: test-bisect-dependent.t output changed
1376 1394 !
1377 1395 Failed test-bisect-dependent.t: output changed
1378 1396 test-bisect-dependent.t broken by 72cbf122d116 (bad)
1379 1397 # Ran 1 tests, 0 skipped, 1 failed.
1380 1398 python hash seed: * (glob)
1381 1399 [1]
1382 1400
1383 1401 $ cd ..
1384 1402
1385 1403 Test a broken #if statement doesn't break run-tests threading.
1386 1404 ==============================================================
1387 1405 $ mkdir broken
1388 1406 $ cd broken
1389 1407 $ cat > test-broken.t <<EOF
1390 1408 > true
1391 1409 > #if notarealhghavefeature
1392 1410 > $ false
1393 1411 > #endif
1394 1412 > EOF
1395 1413 $ for f in 1 2 3 4 ; do
1396 1414 > cat > test-works-$f.t <<EOF
1397 1415 > This is test case $f
1398 1416 > $ sleep 1
1399 1417 > EOF
1400 1418 > done
1401 1419 $ rt -j 2
1402 1420 ....
1403 1421 # Ran 5 tests, 0 skipped, 0 failed.
1404 1422 skipped: unknown feature: notarealhghavefeature
1405 1423
1406 1424 $ cd ..
1407 1425 $ rm -rf broken
1408 1426
1409 1427 Test cases in .t files
1410 1428 ======================
1411 1429 $ mkdir cases
1412 1430 $ cd cases
1413 1431 $ cat > test-cases-abc.t <<'EOF'
1414 1432 > #testcases A B C
1415 1433 > $ V=B
1416 1434 > #if A
1417 1435 > $ V=A
1418 1436 > #endif
1419 1437 > #if C
1420 1438 > $ V=C
1421 1439 > #endif
1422 1440 > $ echo $V | sed 's/A/C/'
1423 1441 > C
1424 1442 > #if C
1425 1443 > $ [ $V = C ]
1426 1444 > #endif
1427 1445 > #if A
1428 1446 > $ [ $V = C ]
1429 1447 > [1]
1430 1448 > #endif
1431 1449 > #if no-C
1432 1450 > $ [ $V = C ]
1433 1451 > [1]
1434 1452 > #endif
1435 1453 > $ [ $V = D ]
1436 1454 > [1]
1437 1455 > EOF
1438 1456 $ rt
1439 1457 .
1440 1458 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1441 1459 +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
1442 1460 @@ -7,7 +7,7 @@
1443 1461 $ V=C
1444 1462 #endif
1445 1463 $ echo $V | sed 's/A/C/'
1446 1464 - C
1447 1465 + B
1448 1466 #if C
1449 1467 $ [ $V = C ]
1450 1468 #endif
1451 1469
1452 1470 ERROR: test-cases-abc.t (case B) output changed
1453 1471 !.
1454 1472 Failed test-cases-abc.t (case B): output changed
1455 1473 # Ran 3 tests, 0 skipped, 1 failed.
1456 1474 python hash seed: * (glob)
1457 1475 [1]
1458 1476
1459 1477 --restart works
1460 1478
1461 1479 $ rt --restart
1462 1480
1463 1481 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1464 1482 +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
1465 1483 @@ -7,7 +7,7 @@
1466 1484 $ V=C
1467 1485 #endif
1468 1486 $ echo $V | sed 's/A/C/'
1469 1487 - C
1470 1488 + B
1471 1489 #if C
1472 1490 $ [ $V = C ]
1473 1491 #endif
1474 1492
1475 1493 ERROR: test-cases-abc.t (case B) output changed
1476 1494 !.
1477 1495 Failed test-cases-abc.t (case B): output changed
1478 1496 # Ran 2 tests, 0 skipped, 1 failed.
1479 1497 python hash seed: * (glob)
1480 1498 [1]
1481 1499
1482 1500 --restart works with outputdir
1483 1501
1484 1502 $ mkdir output
1485 1503 $ mv test-cases-abc.t.B.err output
1486 1504 $ rt --restart --outputdir output
1487 1505
1488 1506 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1489 1507 +++ $TESTTMP/anothertests/cases/output/test-cases-abc.t.B.err
1490 1508 @@ -7,7 +7,7 @@
1491 1509 $ V=C
1492 1510 #endif
1493 1511 $ echo $V | sed 's/A/C/'
1494 1512 - C
1495 1513 + B
1496 1514 #if C
1497 1515 $ [ $V = C ]
1498 1516 #endif
1499 1517
1500 1518 ERROR: test-cases-abc.t (case B) output changed
1501 1519 !.
1502 1520 Failed test-cases-abc.t (case B): output changed
1503 1521 # Ran 2 tests, 0 skipped, 1 failed.
1504 1522 python hash seed: * (glob)
1505 1523 [1]
1506 1524
1507 1525 Test automatic pattern replacement
1508 1526
1509 1527 $ cat << EOF >> common-pattern.py
1510 1528 > substitutions = [
1511 1529 > (br'foo-(.*)\\b',
1512 1530 > br'\$XXX=\\1\$'),
1513 1531 > (br'bar\\n',
1514 1532 > br'\$YYY$\\n'),
1515 1533 > ]
1516 1534 > EOF
1517 1535
1518 1536 $ cat << EOF >> test-substitution.t
1519 1537 > $ echo foo-12
1520 1538 > \$XXX=12$
1521 1539 > $ echo foo-42
1522 1540 > \$XXX=42$
1523 1541 > $ echo bar prior
1524 1542 > bar prior
1525 1543 > $ echo lastbar
1526 1544 > last\$YYY$
1527 1545 > $ echo foo-bar foo-baz
1528 1546 > EOF
1529 1547
1530 1548 $ rt test-substitution.t
1531 1549
1532 1550 --- $TESTTMP/anothertests/cases/test-substitution.t
1533 1551 +++ $TESTTMP/anothertests/cases/test-substitution.t.err
1534 1552 @@ -7,3 +7,4 @@
1535 1553 $ echo lastbar
1536 1554 last$YYY$
1537 1555 $ echo foo-bar foo-baz
1538 1556 + $XXX=bar foo-baz$
1539 1557
1540 1558 ERROR: test-substitution.t output changed
1541 1559 !
1542 1560 Failed test-substitution.t: output changed
1543 1561 # Ran 1 tests, 0 skipped, 1 failed.
1544 1562 python hash seed: * (glob)
1545 1563 [1]
General Comments 0
You need to be logged in to leave comments. Login now