##// END OF EJS Templates
run-tests: allow automatic test discovery when providing folder as argument...
Matthieu Laneuville -
r34970:d600bda4 default
parent child Browse files
Show More
@@ -1,2938 +1,2948 b''
1 1 #!/usr/bin/env python
2 2 #
3 3 # run-tests.py - Run a set of tests on Mercurial
4 4 #
5 5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 # Modifying this script is tricky because it has many modes:
11 11 # - serial (default) vs parallel (-jN, N > 1)
12 12 # - no coverage (default) vs coverage (-c, -C, -s)
13 13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 14 # - tests are a mix of shell scripts and Python scripts
15 15 #
16 16 # If you change this script, it is recommended that you ensure you
17 17 # haven't broken it by running it in various modes with a representative
18 18 # sample of test scripts. For example:
19 19 #
20 20 # 1) serial, no coverage, temp install:
21 21 # ./run-tests.py test-s*
22 22 # 2) serial, no coverage, local hg:
23 23 # ./run-tests.py --local test-s*
24 24 # 3) serial, coverage, temp install:
25 25 # ./run-tests.py -c test-s*
26 26 # 4) serial, coverage, local hg:
27 27 # ./run-tests.py -c --local test-s* # unsupported
28 28 # 5) parallel, no coverage, temp install:
29 29 # ./run-tests.py -j2 test-s*
30 30 # 6) parallel, no coverage, local hg:
31 31 # ./run-tests.py -j2 --local test-s*
32 32 # 7) parallel, coverage, temp install:
33 33 # ./run-tests.py -j2 -c test-s* # currently broken
34 34 # 8) parallel, coverage, local install:
35 35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 36 # 9) parallel, custom tmp dir:
37 37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 38 # 10) parallel, pure, tests that call run-tests:
39 39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
40 40 #
41 41 # (You could use any subset of the tests: test-s* happens to match
42 42 # enough that it's worth doing parallel runs, few enough that it
43 43 # completes fairly quickly, includes both shell and Python scripts, and
44 44 # includes some scripts that run daemon processes.)
45 45
46 46 from __future__ import absolute_import, print_function
47 47
48 48 import difflib
49 49 import distutils.version as version
50 50 import errno
51 51 import json
52 52 import optparse
53 53 import os
54 54 import random
55 55 import re
56 56 import shutil
57 57 import signal
58 58 import socket
59 59 import subprocess
60 60 import sys
61 61 import sysconfig
62 62 import tempfile
63 63 import threading
64 64 import time
65 65 import unittest
66 66 import xml.dom.minidom as minidom
67 67
68 68 try:
69 69 import Queue as queue
70 70 except ImportError:
71 71 import queue
72 72
73 73 try:
74 74 import shlex
75 75 shellquote = shlex.quote
76 76 except (ImportError, AttributeError):
77 77 import pipes
78 78 shellquote = pipes.quote
79 79
80 80 if os.environ.get('RTUNICODEPEDANTRY', False):
81 81 try:
82 82 reload(sys)
83 83 sys.setdefaultencoding("undefined")
84 84 except NameError:
85 85 pass
86 86
87 87 origenviron = os.environ.copy()
88 88 osenvironb = getattr(os, 'environb', os.environ)
89 89 processlock = threading.Lock()
90 90
91 91 pygmentspresent = False
92 92 # ANSI color is unsupported prior to Windows 10
93 93 if os.name != 'nt':
94 94 try: # is pygments installed
95 95 import pygments
96 96 import pygments.lexers as lexers
97 97 import pygments.lexer as lexer
98 98 import pygments.formatters as formatters
99 99 import pygments.token as token
100 100 import pygments.style as style
101 101 pygmentspresent = True
102 102 difflexer = lexers.DiffLexer()
103 103 terminal256formatter = formatters.Terminal256Formatter()
104 104 except ImportError:
105 105 pass
106 106
107 107 if pygmentspresent:
108 108 class TestRunnerStyle(style.Style):
109 109 default_style = ""
110 110 skipped = token.string_to_tokentype("Token.Generic.Skipped")
111 111 failed = token.string_to_tokentype("Token.Generic.Failed")
112 112 skippedname = token.string_to_tokentype("Token.Generic.SName")
113 113 failedname = token.string_to_tokentype("Token.Generic.FName")
114 114 styles = {
115 115 skipped: '#e5e5e5',
116 116 skippedname: '#00ffff',
117 117 failed: '#7f0000',
118 118 failedname: '#ff0000',
119 119 }
120 120
121 121 class TestRunnerLexer(lexer.RegexLexer):
122 122 tokens = {
123 123 'root': [
124 124 (r'^Skipped', token.Generic.Skipped, 'skipped'),
125 125 (r'^Failed ', token.Generic.Failed, 'failed'),
126 126 (r'^ERROR: ', token.Generic.Failed, 'failed'),
127 127 ],
128 128 'skipped': [
129 129 (r'[\w-]+\.(t|py)', token.Generic.SName),
130 130 (r':.*', token.Generic.Skipped),
131 131 ],
132 132 'failed': [
133 133 (r'[\w-]+\.(t|py)', token.Generic.FName),
134 134 (r'(:| ).*', token.Generic.Failed),
135 135 ]
136 136 }
137 137
138 138 runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle)
139 139 runnerlexer = TestRunnerLexer()
140 140
141 141 if sys.version_info > (3, 5, 0):
142 142 PYTHON3 = True
143 143 xrange = range # we use xrange in one place, and we'd rather not use range
144 144 def _bytespath(p):
145 145 if p is None:
146 146 return p
147 147 return p.encode('utf-8')
148 148
149 149 def _strpath(p):
150 150 if p is None:
151 151 return p
152 152 return p.decode('utf-8')
153 153
154 154 elif sys.version_info >= (3, 0, 0):
155 155 print('%s is only supported on Python 3.5+ and 2.7, not %s' %
156 156 (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])))
157 157 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
158 158 else:
159 159 PYTHON3 = False
160 160
161 161 # In python 2.x, path operations are generally done using
162 162 # bytestrings by default, so we don't have to do any extra
163 163 # fiddling there. We define the wrapper functions anyway just to
164 164 # help keep code consistent between platforms.
165 165 def _bytespath(p):
166 166 return p
167 167
168 168 _strpath = _bytespath
169 169
170 170 # For Windows support
171 171 wifexited = getattr(os, "WIFEXITED", lambda x: False)
172 172
173 173 # Whether to use IPv6
174 174 def checksocketfamily(name, port=20058):
175 175 """return true if we can listen on localhost using family=name
176 176
177 177 name should be either 'AF_INET', or 'AF_INET6'.
178 178 port being used is okay - EADDRINUSE is considered as successful.
179 179 """
180 180 family = getattr(socket, name, None)
181 181 if family is None:
182 182 return False
183 183 try:
184 184 s = socket.socket(family, socket.SOCK_STREAM)
185 185 s.bind(('localhost', port))
186 186 s.close()
187 187 return True
188 188 except socket.error as exc:
189 189 if exc.errno == errno.EADDRINUSE:
190 190 return True
191 191 elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
192 192 return False
193 193 else:
194 194 raise
195 195 else:
196 196 return False
197 197
198 198 # useipv6 will be set by parseargs
199 199 useipv6 = None
200 200
201 201 def checkportisavailable(port):
202 202 """return true if a port seems free to bind on localhost"""
203 203 if useipv6:
204 204 family = socket.AF_INET6
205 205 else:
206 206 family = socket.AF_INET
207 207 try:
208 208 s = socket.socket(family, socket.SOCK_STREAM)
209 209 s.bind(('localhost', port))
210 210 s.close()
211 211 return True
212 212 except socket.error as exc:
213 213 if exc.errno not in (errno.EADDRINUSE, errno.EADDRNOTAVAIL,
214 214 errno.EPROTONOSUPPORT):
215 215 raise
216 216 return False
217 217
218 218 closefds = os.name == 'posix'
219 219 def Popen4(cmd, wd, timeout, env=None):
220 220 processlock.acquire()
221 221 p = subprocess.Popen(cmd, shell=True, bufsize=-1, cwd=wd, env=env,
222 222 close_fds=closefds,
223 223 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
224 224 stderr=subprocess.STDOUT)
225 225 processlock.release()
226 226
227 227 p.fromchild = p.stdout
228 228 p.tochild = p.stdin
229 229 p.childerr = p.stderr
230 230
231 231 p.timeout = False
232 232 if timeout:
233 233 def t():
234 234 start = time.time()
235 235 while time.time() - start < timeout and p.returncode is None:
236 236 time.sleep(.1)
237 237 p.timeout = True
238 238 if p.returncode is None:
239 239 terminate(p)
240 240 threading.Thread(target=t).start()
241 241
242 242 return p
243 243
244 244 PYTHON = _bytespath(sys.executable.replace('\\', '/'))
245 245 IMPL_PATH = b'PYTHONPATH'
246 246 if 'java' in sys.platform:
247 247 IMPL_PATH = b'JYTHONPATH'
248 248
249 249 defaults = {
250 250 'jobs': ('HGTEST_JOBS', 1),
251 251 'timeout': ('HGTEST_TIMEOUT', 180),
252 252 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 500),
253 253 'port': ('HGTEST_PORT', 20059),
254 254 'shell': ('HGTEST_SHELL', 'sh'),
255 255 }
256 256
257 257 def canonpath(path):
258 258 return os.path.realpath(os.path.expanduser(path))
259 259
260 260 def parselistfiles(files, listtype, warn=True):
261 261 entries = dict()
262 262 for filename in files:
263 263 try:
264 264 path = os.path.expanduser(os.path.expandvars(filename))
265 265 f = open(path, "rb")
266 266 except IOError as err:
267 267 if err.errno != errno.ENOENT:
268 268 raise
269 269 if warn:
270 270 print("warning: no such %s file: %s" % (listtype, filename))
271 271 continue
272 272
273 273 for line in f.readlines():
274 274 line = line.split(b'#', 1)[0].strip()
275 275 if line:
276 276 entries[line] = filename
277 277
278 278 f.close()
279 279 return entries
280 280
281 281 def parsettestcases(path):
282 282 """read a .t test file, return a set of test case names
283 283
284 284 If path does not exist, return an empty set.
285 285 """
286 286 cases = set()
287 287 try:
288 288 with open(path, 'rb') as f:
289 289 for l in f:
290 290 if l.startswith(b'#testcases '):
291 291 cases.update(l[11:].split())
292 292 except IOError as ex:
293 293 if ex.errno != errno.ENOENT:
294 294 raise
295 295 return cases
296 296
297 297 def getparser():
298 298 """Obtain the OptionParser used by the CLI."""
299 299 parser = optparse.OptionParser("%prog [options] [tests]")
300 300
301 301 # keep these sorted
302 302 parser.add_option("--blacklist", action="append",
303 303 help="skip tests listed in the specified blacklist file")
304 304 parser.add_option("--whitelist", action="append",
305 305 help="always run tests listed in the specified whitelist file")
306 306 parser.add_option("--test-list", action="append",
307 307 help="read tests to run from the specified file")
308 308 parser.add_option("--changed", type="string",
309 309 help="run tests that are changed in parent rev or working directory")
310 310 parser.add_option("-C", "--annotate", action="store_true",
311 311 help="output files annotated with coverage")
312 312 parser.add_option("-c", "--cover", action="store_true",
313 313 help="print a test coverage report")
314 314 parser.add_option("--color", choices=["always", "auto", "never"],
315 315 default=os.environ.get('HGRUNTESTSCOLOR', 'auto'),
316 316 help="colorisation: always|auto|never (default: auto)")
317 317 parser.add_option("-d", "--debug", action="store_true",
318 318 help="debug mode: write output of test scripts to console"
319 319 " rather than capturing and diffing it (disables timeout)")
320 320 parser.add_option("-f", "--first", action="store_true",
321 321 help="exit on the first test failure")
322 322 parser.add_option("-H", "--htmlcov", action="store_true",
323 323 help="create an HTML report of the coverage of the files")
324 324 parser.add_option("-i", "--interactive", action="store_true",
325 325 help="prompt to accept changed output")
326 326 parser.add_option("-j", "--jobs", type="int",
327 327 help="number of jobs to run in parallel"
328 328 " (default: $%s or %d)" % defaults['jobs'])
329 329 parser.add_option("--keep-tmpdir", action="store_true",
330 330 help="keep temporary directory after running tests")
331 331 parser.add_option("-k", "--keywords",
332 332 help="run tests matching keywords")
333 333 parser.add_option("--list-tests", action="store_true",
334 334 help="list tests instead of running them")
335 335 parser.add_option("-l", "--local", action="store_true",
336 336 help="shortcut for --with-hg=<testdir>/../hg, "
337 337 "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set")
338 338 parser.add_option("--loop", action="store_true",
339 339 help="loop tests repeatedly")
340 340 parser.add_option("--runs-per-test", type="int", dest="runs_per_test",
341 341 help="run each test N times (default=1)", default=1)
342 342 parser.add_option("-n", "--nodiff", action="store_true",
343 343 help="skip showing test changes")
344 344 parser.add_option("--outputdir", type="string",
345 345 help="directory to write error logs to (default=test directory)")
346 346 parser.add_option("-p", "--port", type="int",
347 347 help="port on which servers should listen"
348 348 " (default: $%s or %d)" % defaults['port'])
349 349 parser.add_option("--compiler", type="string",
350 350 help="compiler to build with")
351 351 parser.add_option("--pure", action="store_true",
352 352 help="use pure Python code instead of C extensions")
353 353 parser.add_option("-R", "--restart", action="store_true",
354 354 help="restart at last error")
355 355 parser.add_option("-r", "--retest", action="store_true",
356 356 help="retest failed tests")
357 357 parser.add_option("-S", "--noskips", action="store_true",
358 358 help="don't report skip tests verbosely")
359 359 parser.add_option("--shell", type="string",
360 360 help="shell to use (default: $%s or %s)" % defaults['shell'])
361 361 parser.add_option("-t", "--timeout", type="int",
362 362 help="kill errant tests after TIMEOUT seconds"
363 363 " (default: $%s or %d)" % defaults['timeout'])
364 364 parser.add_option("--slowtimeout", type="int",
365 365 help="kill errant slow tests after SLOWTIMEOUT seconds"
366 366 " (default: $%s or %d)" % defaults['slowtimeout'])
367 367 parser.add_option("--time", action="store_true",
368 368 help="time how long each test takes")
369 369 parser.add_option("--json", action="store_true",
370 370 help="store test result data in 'report.json' file")
371 371 parser.add_option("--tmpdir", type="string",
372 372 help="run tests in the given temporary directory"
373 373 " (implies --keep-tmpdir)")
374 374 parser.add_option("-v", "--verbose", action="store_true",
375 375 help="output verbose messages")
376 376 parser.add_option("--xunit", type="string",
377 377 help="record xunit results at specified path")
378 378 parser.add_option("--view", type="string",
379 379 help="external diff viewer")
380 380 parser.add_option("--with-hg", type="string",
381 381 metavar="HG",
382 382 help="test using specified hg script rather than a "
383 383 "temporary installation")
384 384 parser.add_option("--chg", action="store_true",
385 385 help="install and use chg wrapper in place of hg")
386 386 parser.add_option("--with-chg", metavar="CHG",
387 387 help="use specified chg wrapper in place of hg")
388 388 parser.add_option("--ipv6", action="store_true",
389 389 help="prefer IPv6 to IPv4 for network related tests")
390 390 parser.add_option("-3", "--py3k-warnings", action="store_true",
391 391 help="enable Py3k warnings on Python 2.7+")
392 392 # This option should be deleted once test-check-py3-compat.t and other
393 393 # Python 3 tests run with Python 3.
394 394 parser.add_option("--with-python3", metavar="PYTHON3",
395 395 help="Python 3 interpreter (if running under Python 2)"
396 396 " (TEMPORARY)")
397 397 parser.add_option('--extra-config-opt', action="append",
398 398 help='set the given config opt in the test hgrc')
399 399 parser.add_option('--random', action="store_true",
400 400 help='run tests in random order')
401 401 parser.add_option('--profile-runner', action='store_true',
402 402 help='run statprof on run-tests')
403 403 parser.add_option('--allow-slow-tests', action='store_true',
404 404 help='allow extremely slow tests')
405 405 parser.add_option('--showchannels', action='store_true',
406 406 help='show scheduling channels')
407 407 parser.add_option('--known-good-rev', type="string",
408 408 metavar="known_good_rev",
409 409 help=("Automatically bisect any failures using this "
410 410 "revision as a known-good revision."))
411 411 parser.add_option('--bisect-repo', type="string",
412 412 metavar='bisect_repo',
413 413 help=("Path of a repo to bisect. Use together with "
414 414 "--known-good-rev"))
415 415
416 416 for option, (envvar, default) in defaults.items():
417 417 defaults[option] = type(default)(os.environ.get(envvar, default))
418 418 parser.set_defaults(**defaults)
419 419
420 420 return parser
421 421
422 422 def parseargs(args, parser):
423 423 """Parse arguments with our OptionParser and validate results."""
424 424 (options, args) = parser.parse_args(args)
425 425
426 426 # jython is always pure
427 427 if 'java' in sys.platform or '__pypy__' in sys.modules:
428 428 options.pure = True
429 429
430 430 if options.with_hg:
431 431 options.with_hg = canonpath(_bytespath(options.with_hg))
432 432 if not (os.path.isfile(options.with_hg) and
433 433 os.access(options.with_hg, os.X_OK)):
434 434 parser.error('--with-hg must specify an executable hg script')
435 435 if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
436 436 sys.stderr.write('warning: --with-hg should specify an hg script\n')
437 437 if options.local:
438 438 testdir = os.path.dirname(_bytespath(canonpath(sys.argv[0])))
439 439 reporootdir = os.path.dirname(testdir)
440 440 pathandattrs = [(b'hg', 'with_hg')]
441 441 if options.chg:
442 442 pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
443 443 for relpath, attr in pathandattrs:
444 444 binpath = os.path.join(reporootdir, relpath)
445 445 if os.name != 'nt' and not os.access(binpath, os.X_OK):
446 446 parser.error('--local specified, but %r not found or '
447 447 'not executable' % binpath)
448 448 setattr(options, attr, binpath)
449 449
450 450 if (options.chg or options.with_chg) and os.name == 'nt':
451 451 parser.error('chg does not work on %s' % os.name)
452 452 if options.with_chg:
453 453 options.chg = False # no installation to temporary location
454 454 options.with_chg = canonpath(_bytespath(options.with_chg))
455 455 if not (os.path.isfile(options.with_chg) and
456 456 os.access(options.with_chg, os.X_OK)):
457 457 parser.error('--with-chg must specify a chg executable')
458 458 if options.chg and options.with_hg:
459 459 # chg shares installation location with hg
460 460 parser.error('--chg does not work when --with-hg is specified '
461 461 '(use --with-chg instead)')
462 462
463 463 if options.color == 'always' and not pygmentspresent:
464 464 sys.stderr.write('warning: --color=always ignored because '
465 465 'pygments is not installed\n')
466 466
467 467 if options.bisect_repo and not options.known_good_rev:
468 468 parser.error("--bisect-repo cannot be used without --known-good-rev")
469 469
470 470 global useipv6
471 471 if options.ipv6:
472 472 useipv6 = checksocketfamily('AF_INET6')
473 473 else:
474 474 # only use IPv6 if IPv4 is unavailable and IPv6 is available
475 475 useipv6 = ((not checksocketfamily('AF_INET'))
476 476 and checksocketfamily('AF_INET6'))
477 477
478 478 options.anycoverage = options.cover or options.annotate or options.htmlcov
479 479 if options.anycoverage:
480 480 try:
481 481 import coverage
482 482 covver = version.StrictVersion(coverage.__version__).version
483 483 if covver < (3, 3):
484 484 parser.error('coverage options require coverage 3.3 or later')
485 485 except ImportError:
486 486 parser.error('coverage options now require the coverage package')
487 487
488 488 if options.anycoverage and options.local:
489 489 # this needs some path mangling somewhere, I guess
490 490 parser.error("sorry, coverage options do not work when --local "
491 491 "is specified")
492 492
493 493 if options.anycoverage and options.with_hg:
494 494 parser.error("sorry, coverage options do not work when --with-hg "
495 495 "is specified")
496 496
497 497 global verbose
498 498 if options.verbose:
499 499 verbose = ''
500 500
501 501 if options.tmpdir:
502 502 options.tmpdir = canonpath(options.tmpdir)
503 503
504 504 if options.jobs < 1:
505 505 parser.error('--jobs must be positive')
506 506 if options.interactive and options.debug:
507 507 parser.error("-i/--interactive and -d/--debug are incompatible")
508 508 if options.debug:
509 509 if options.timeout != defaults['timeout']:
510 510 sys.stderr.write(
511 511 'warning: --timeout option ignored with --debug\n')
512 512 if options.slowtimeout != defaults['slowtimeout']:
513 513 sys.stderr.write(
514 514 'warning: --slowtimeout option ignored with --debug\n')
515 515 options.timeout = 0
516 516 options.slowtimeout = 0
517 517 if options.py3k_warnings:
518 518 if PYTHON3:
519 519 parser.error(
520 520 '--py3k-warnings can only be used on Python 2.7')
521 521 if options.with_python3:
522 522 if PYTHON3:
523 523 parser.error('--with-python3 cannot be used when executing with '
524 524 'Python 3')
525 525
526 526 options.with_python3 = canonpath(options.with_python3)
527 527 # Verify Python3 executable is acceptable.
528 528 proc = subprocess.Popen([options.with_python3, b'--version'],
529 529 stdout=subprocess.PIPE,
530 530 stderr=subprocess.STDOUT)
531 531 out, _err = proc.communicate()
532 532 ret = proc.wait()
533 533 if ret != 0:
534 534 parser.error('could not determine version of python 3')
535 535 if not out.startswith('Python '):
536 536 parser.error('unexpected output from python3 --version: %s' %
537 537 out)
538 538 vers = version.LooseVersion(out[len('Python '):])
539 539 if vers < version.LooseVersion('3.5.0'):
540 540 parser.error('--with-python3 version must be 3.5.0 or greater; '
541 541 'got %s' % out)
542 542
543 543 if options.blacklist:
544 544 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
545 545 if options.whitelist:
546 546 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
547 547 else:
548 548 options.whitelisted = {}
549 549
550 550 if options.showchannels:
551 551 options.nodiff = True
552 552
553 553 return (options, args)
554 554
555 555 def rename(src, dst):
556 556 """Like os.rename(), trade atomicity and opened files friendliness
557 557 for existing destination support.
558 558 """
559 559 shutil.copy(src, dst)
560 560 os.remove(src)
561 561
562 562 _unified_diff = difflib.unified_diff
563 563 if PYTHON3:
564 564 import functools
565 565 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
566 566
567 567 def getdiff(expected, output, ref, err):
568 568 servefail = False
569 569 lines = []
570 570 for line in _unified_diff(expected, output, ref, err):
571 571 if line.startswith(b'+++') or line.startswith(b'---'):
572 572 line = line.replace(b'\\', b'/')
573 573 if line.endswith(b' \n'):
574 574 line = line[:-2] + b'\n'
575 575 lines.append(line)
576 576 if not servefail and line.startswith(
577 577 b'+ abort: child process failed to start'):
578 578 servefail = True
579 579
580 580 return servefail, lines
581 581
582 582 verbose = False
583 583 def vlog(*msg):
584 584 """Log only when in verbose mode."""
585 585 if verbose is False:
586 586 return
587 587
588 588 return log(*msg)
589 589
590 590 # Bytes that break XML even in a CDATA block: control characters 0-31
591 591 # sans \t, \n and \r
592 592 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
593 593
594 594 # Match feature conditionalized output lines in the form, capturing the feature
595 595 # list in group 2, and the preceeding line output in group 1:
596 596 #
597 597 # output..output (feature !)\n
598 598 optline = re.compile(b'(.*) \((.+?) !\)\n$')
599 599
600 600 def cdatasafe(data):
601 601 """Make a string safe to include in a CDATA block.
602 602
603 603 Certain control characters are illegal in a CDATA block, and
604 604 there's no way to include a ]]> in a CDATA either. This function
605 605 replaces illegal bytes with ? and adds a space between the ]] so
606 606 that it won't break the CDATA block.
607 607 """
608 608 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
609 609
610 610 def log(*msg):
611 611 """Log something to stdout.
612 612
613 613 Arguments are strings to print.
614 614 """
615 615 with iolock:
616 616 if verbose:
617 617 print(verbose, end=' ')
618 618 for m in msg:
619 619 print(m, end=' ')
620 620 print()
621 621 sys.stdout.flush()
622 622
623 623 def highlightdiff(line, color):
624 624 if not color:
625 625 return line
626 626 assert pygmentspresent
627 627 return pygments.highlight(line.decode('latin1'), difflexer,
628 628 terminal256formatter).encode('latin1')
629 629
630 630 def highlightmsg(msg, color):
631 631 if not color:
632 632 return msg
633 633 assert pygmentspresent
634 634 return pygments.highlight(msg, runnerlexer, runnerformatter)
635 635
636 636 def terminate(proc):
637 637 """Terminate subprocess"""
638 638 vlog('# Terminating process %d' % proc.pid)
639 639 try:
640 640 proc.terminate()
641 641 except OSError:
642 642 pass
643 643
644 644 def killdaemons(pidfile):
645 645 import killdaemons as killmod
646 646 return killmod.killdaemons(pidfile, tryhard=False, remove=True,
647 647 logfn=vlog)
648 648
649 649 class Test(unittest.TestCase):
650 650 """Encapsulates a single, runnable test.
651 651
652 652 While this class conforms to the unittest.TestCase API, it differs in that
653 653 instances need to be instantiated manually. (Typically, unittest.TestCase
654 654 classes are instantiated automatically by scanning modules.)
655 655 """
656 656
657 657 # Status code reserved for skipped tests (used by hghave).
658 658 SKIPPED_STATUS = 80
659 659
660 660 def __init__(self, path, outputdir, tmpdir, keeptmpdir=False,
661 661 debug=False,
662 662 timeout=None,
663 663 startport=None, extraconfigopts=None,
664 664 py3kwarnings=False, shell=None, hgcommand=None,
665 665 slowtimeout=None, usechg=False,
666 666 useipv6=False):
667 667 """Create a test from parameters.
668 668
669 669 path is the full path to the file defining the test.
670 670
671 671 tmpdir is the main temporary directory to use for this test.
672 672
673 673 keeptmpdir determines whether to keep the test's temporary directory
674 674 after execution. It defaults to removal (False).
675 675
676 676 debug mode will make the test execute verbosely, with unfiltered
677 677 output.
678 678
679 679 timeout controls the maximum run time of the test. It is ignored when
680 680 debug is True. See slowtimeout for tests with #require slow.
681 681
682 682 slowtimeout overrides timeout if the test has #require slow.
683 683
684 684 startport controls the starting port number to use for this test. Each
685 685 test will reserve 3 port numbers for execution. It is the caller's
686 686 responsibility to allocate a non-overlapping port range to Test
687 687 instances.
688 688
689 689 extraconfigopts is an iterable of extra hgrc config options. Values
690 690 must have the form "key=value" (something understood by hgrc). Values
691 691 of the form "foo.key=value" will result in "[foo] key=value".
692 692
693 693 py3kwarnings enables Py3k warnings.
694 694
695 695 shell is the shell to execute tests in.
696 696 """
697 697 if timeout is None:
698 698 timeout = defaults['timeout']
699 699 if startport is None:
700 700 startport = defaults['port']
701 701 if slowtimeout is None:
702 702 slowtimeout = defaults['slowtimeout']
703 703 self.path = path
704 704 self.bname = os.path.basename(path)
705 705 self.name = _strpath(self.bname)
706 706 self._testdir = os.path.dirname(path)
707 707 self._outputdir = outputdir
708 708 self._tmpname = os.path.basename(path)
709 709 self.errpath = os.path.join(self._outputdir, b'%s.err' % self.bname)
710 710
711 711 self._threadtmp = tmpdir
712 712 self._keeptmpdir = keeptmpdir
713 713 self._debug = debug
714 714 self._timeout = timeout
715 715 self._slowtimeout = slowtimeout
716 716 self._startport = startport
717 717 self._extraconfigopts = extraconfigopts or []
718 718 self._py3kwarnings = py3kwarnings
719 719 self._shell = _bytespath(shell)
720 720 self._hgcommand = hgcommand or b'hg'
721 721 self._usechg = usechg
722 722 self._useipv6 = useipv6
723 723
724 724 self._aborted = False
725 725 self._daemonpids = []
726 726 self._finished = None
727 727 self._ret = None
728 728 self._out = None
729 729 self._skipped = None
730 730 self._testtmp = None
731 731 self._chgsockdir = None
732 732
733 733 self._refout = self.readrefout()
734 734
735 735 def readrefout(self):
736 736 """read reference output"""
737 737 # If we're not in --debug mode and reference output file exists,
738 738 # check test output against it.
739 739 if self._debug:
740 740 return None # to match "out is None"
741 741 elif os.path.exists(self.refpath):
742 742 with open(self.refpath, 'rb') as f:
743 743 return f.read().splitlines(True)
744 744 else:
745 745 return []
746 746
747 747 # needed to get base class __repr__ running
748 748 @property
749 749 def _testMethodName(self):
750 750 return self.name
751 751
752 752 def __str__(self):
753 753 return self.name
754 754
755 755 def shortDescription(self):
756 756 return self.name
757 757
758 758 def setUp(self):
759 759 """Tasks to perform before run()."""
760 760 self._finished = False
761 761 self._ret = None
762 762 self._out = None
763 763 self._skipped = None
764 764
765 765 try:
766 766 os.mkdir(self._threadtmp)
767 767 except OSError as e:
768 768 if e.errno != errno.EEXIST:
769 769 raise
770 770
771 771 name = self._tmpname
772 772 self._testtmp = os.path.join(self._threadtmp, name)
773 773 os.mkdir(self._testtmp)
774 774
775 775 # Remove any previous output files.
776 776 if os.path.exists(self.errpath):
777 777 try:
778 778 os.remove(self.errpath)
779 779 except OSError as e:
780 780 # We might have raced another test to clean up a .err
781 781 # file, so ignore ENOENT when removing a previous .err
782 782 # file.
783 783 if e.errno != errno.ENOENT:
784 784 raise
785 785
786 786 if self._usechg:
787 787 self._chgsockdir = os.path.join(self._threadtmp,
788 788 b'%s.chgsock' % name)
789 789 os.mkdir(self._chgsockdir)
790 790
791 791 def run(self, result):
792 792 """Run this test and report results against a TestResult instance."""
793 793 # This function is extremely similar to unittest.TestCase.run(). Once
794 794 # we require Python 2.7 (or at least its version of unittest), this
795 795 # function can largely go away.
796 796 self._result = result
797 797 result.startTest(self)
798 798 try:
799 799 try:
800 800 self.setUp()
801 801 except (KeyboardInterrupt, SystemExit):
802 802 self._aborted = True
803 803 raise
804 804 except Exception:
805 805 result.addError(self, sys.exc_info())
806 806 return
807 807
808 808 success = False
809 809 try:
810 810 self.runTest()
811 811 except KeyboardInterrupt:
812 812 self._aborted = True
813 813 raise
814 814 except unittest.SkipTest as e:
815 815 result.addSkip(self, str(e))
816 816 # The base class will have already counted this as a
817 817 # test we "ran", but we want to exclude skipped tests
818 818 # from those we count towards those run.
819 819 result.testsRun -= 1
820 820 except self.failureException as e:
821 821 # This differs from unittest in that we don't capture
822 822 # the stack trace. This is for historical reasons and
823 823 # this decision could be revisited in the future,
824 824 # especially for PythonTest instances.
825 825 if result.addFailure(self, str(e)):
826 826 success = True
827 827 except Exception:
828 828 result.addError(self, sys.exc_info())
829 829 else:
830 830 success = True
831 831
832 832 try:
833 833 self.tearDown()
834 834 except (KeyboardInterrupt, SystemExit):
835 835 self._aborted = True
836 836 raise
837 837 except Exception:
838 838 result.addError(self, sys.exc_info())
839 839 success = False
840 840
841 841 if success:
842 842 result.addSuccess(self)
843 843 finally:
844 844 result.stopTest(self, interrupted=self._aborted)
845 845
846 846 def runTest(self):
847 847 """Run this test instance.
848 848
849 849 This will return a tuple describing the result of the test.
850 850 """
851 851 env = self._getenv()
852 852 self._genrestoreenv(env)
853 853 self._daemonpids.append(env['DAEMON_PIDS'])
854 854 self._createhgrc(env['HGRCPATH'])
855 855
856 856 vlog('# Test', self.name)
857 857
858 858 ret, out = self._run(env)
859 859 self._finished = True
860 860 self._ret = ret
861 861 self._out = out
862 862
863 863 def describe(ret):
864 864 if ret < 0:
865 865 return 'killed by signal: %d' % -ret
866 866 return 'returned error code %d' % ret
867 867
868 868 self._skipped = False
869 869
870 870 if ret == self.SKIPPED_STATUS:
871 871 if out is None: # Debug mode, nothing to parse.
872 872 missing = ['unknown']
873 873 failed = None
874 874 else:
875 875 missing, failed = TTest.parsehghaveoutput(out)
876 876
877 877 if not missing:
878 878 missing = ['skipped']
879 879
880 880 if failed:
881 881 self.fail('hg have failed checking for %s' % failed[-1])
882 882 else:
883 883 self._skipped = True
884 884 raise unittest.SkipTest(missing[-1])
885 885 elif ret == 'timeout':
886 886 self.fail('timed out')
887 887 elif ret is False:
888 888 self.fail('no result code from test')
889 889 elif out != self._refout:
890 890 # Diff generation may rely on written .err file.
891 891 if (ret != 0 or out != self._refout) and not self._skipped \
892 892 and not self._debug:
893 893 f = open(self.errpath, 'wb')
894 894 for line in out:
895 895 f.write(line)
896 896 f.close()
897 897
898 898 # The result object handles diff calculation for us.
899 899 if self._result.addOutputMismatch(self, ret, out, self._refout):
900 900 # change was accepted, skip failing
901 901 return
902 902
903 903 if ret:
904 904 msg = 'output changed and ' + describe(ret)
905 905 else:
906 906 msg = 'output changed'
907 907
908 908 self.fail(msg)
909 909 elif ret:
910 910 self.fail(describe(ret))
911 911
912 912 def tearDown(self):
913 913 """Tasks to perform after run()."""
914 914 for entry in self._daemonpids:
915 915 killdaemons(entry)
916 916 self._daemonpids = []
917 917
918 918 if self._keeptmpdir:
919 919 log('\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' %
920 920 (self._testtmp.decode('utf-8'),
921 921 self._threadtmp.decode('utf-8')))
922 922 else:
923 923 shutil.rmtree(self._testtmp, True)
924 924 shutil.rmtree(self._threadtmp, True)
925 925
926 926 if self._usechg:
927 927 # chgservers will stop automatically after they find the socket
928 928 # files are deleted
929 929 shutil.rmtree(self._chgsockdir, True)
930 930
931 931 if (self._ret != 0 or self._out != self._refout) and not self._skipped \
932 932 and not self._debug and self._out:
933 933 f = open(self.errpath, 'wb')
934 934 for line in self._out:
935 935 f.write(line)
936 936 f.close()
937 937
938 938 vlog("# Ret was:", self._ret, '(%s)' % self.name)
939 939
940 940 def _run(self, env):
941 941 # This should be implemented in child classes to run tests.
942 942 raise unittest.SkipTest('unknown test type')
943 943
944 944 def abort(self):
945 945 """Terminate execution of this test."""
946 946 self._aborted = True
947 947
948 948 def _portmap(self, i):
949 949 offset = b'' if i == 0 else b'%d' % i
950 950 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
951 951
952 952 def _getreplacements(self):
953 953 """Obtain a mapping of text replacements to apply to test output.
954 954
955 955 Test output needs to be normalized so it can be compared to expected
956 956 output. This function defines how some of that normalization will
957 957 occur.
958 958 """
959 959 r = [
960 960 # This list should be parallel to defineport in _getenv
961 961 self._portmap(0),
962 962 self._portmap(1),
963 963 self._portmap(2),
964 964 (br'(?m)^(saved backup bundle to .*\.hg)( \(glob\))?$',
965 965 br'\1 (glob)'),
966 966 (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
967 967 (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
968 968 ]
969 969 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
970 970
971 971 return r
972 972
973 973 def _escapepath(self, p):
974 974 if os.name == 'nt':
975 975 return (
976 976 (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or
977 977 c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c
978 978 for c in p))
979 979 )
980 980 else:
981 981 return re.escape(p)
982 982
983 983 def _localip(self):
984 984 if self._useipv6:
985 985 return b'::1'
986 986 else:
987 987 return b'127.0.0.1'
988 988
989 989 def _genrestoreenv(self, testenv):
990 990 """Generate a script that can be used by tests to restore the original
991 991 environment."""
992 992 # Put the restoreenv script inside self._threadtmp
993 993 scriptpath = os.path.join(self._threadtmp, b'restoreenv.sh')
994 994 testenv['HGTEST_RESTOREENV'] = scriptpath
995 995
996 996 # Only restore environment variable names that the shell allows
997 997 # us to export.
998 998 name_regex = re.compile('^[a-zA-Z][a-zA-Z0-9_]*$')
999 999
1000 1000 # Do not restore these variables; otherwise tests would fail.
1001 1001 reqnames = {'PYTHON', 'TESTDIR', 'TESTTMP'}
1002 1002
1003 1003 with open(scriptpath, 'w') as envf:
1004 1004 for name, value in origenviron.items():
1005 1005 if not name_regex.match(name):
1006 1006 # Skip environment variables with unusual names not
1007 1007 # allowed by most shells.
1008 1008 continue
1009 1009 if name in reqnames:
1010 1010 continue
1011 1011 envf.write('%s=%s\n' % (name, shellquote(value)))
1012 1012
1013 1013 for name in testenv:
1014 1014 if name in origenviron or name in reqnames:
1015 1015 continue
1016 1016 envf.write('unset %s\n' % (name,))
1017 1017
1018 1018 def _getenv(self):
1019 1019 """Obtain environment variables to use during test execution."""
1020 1020 def defineport(i):
1021 1021 offset = '' if i == 0 else '%s' % i
1022 1022 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
1023 1023 env = os.environ.copy()
1024 1024 env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase')
1025 1025 env['HGEMITWARNINGS'] = '1'
1026 1026 env['TESTTMP'] = self._testtmp
1027 1027 env['HOME'] = self._testtmp
1028 1028 # This number should match portneeded in _getport
1029 1029 for port in xrange(3):
1030 1030 # This list should be parallel to _portmap in _getreplacements
1031 1031 defineport(port)
1032 1032 env["HGRCPATH"] = os.path.join(self._threadtmp, b'.hgrc')
1033 1033 env["DAEMON_PIDS"] = os.path.join(self._threadtmp, b'daemon.pids')
1034 1034 env["HGEDITOR"] = ('"' + sys.executable + '"'
1035 1035 + ' -c "import sys; sys.exit(0)"')
1036 1036 env["HGMERGE"] = "internal:merge"
1037 1037 env["HGUSER"] = "test"
1038 1038 env["HGENCODING"] = "ascii"
1039 1039 env["HGENCODINGMODE"] = "strict"
1040 1040 env['HGIPV6'] = str(int(self._useipv6))
1041 1041
1042 1042 # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
1043 1043 # IP addresses.
1044 1044 env['LOCALIP'] = self._localip()
1045 1045
1046 1046 # Reset some environment variables to well-known values so that
1047 1047 # the tests produce repeatable output.
1048 1048 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
1049 1049 env['TZ'] = 'GMT'
1050 1050 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
1051 1051 env['COLUMNS'] = '80'
1052 1052 env['TERM'] = 'xterm'
1053 1053
1054 1054 for k in ('HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy ' +
1055 1055 'HGPLAIN HGPLAINEXCEPT EDITOR VISUAL PAGER ' +
1056 1056 'NO_PROXY CHGDEBUG').split():
1057 1057 if k in env:
1058 1058 del env[k]
1059 1059
1060 1060 # unset env related to hooks
1061 1061 for k in env.keys():
1062 1062 if k.startswith('HG_'):
1063 1063 del env[k]
1064 1064
1065 1065 if self._usechg:
1066 1066 env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server')
1067 1067
1068 1068 return env
1069 1069
1070 1070 def _createhgrc(self, path):
1071 1071 """Create an hgrc file for this test."""
1072 1072 hgrc = open(path, 'wb')
1073 1073 hgrc.write(b'[ui]\n')
1074 1074 hgrc.write(b'slash = True\n')
1075 1075 hgrc.write(b'interactive = False\n')
1076 1076 hgrc.write(b'mergemarkers = detailed\n')
1077 1077 hgrc.write(b'promptecho = True\n')
1078 1078 hgrc.write(b'[defaults]\n')
1079 1079 hgrc.write(b'[devel]\n')
1080 1080 hgrc.write(b'all-warnings = true\n')
1081 1081 hgrc.write(b'default-date = 0 0\n')
1082 1082 hgrc.write(b'[largefiles]\n')
1083 1083 hgrc.write(b'usercache = %s\n' %
1084 1084 (os.path.join(self._testtmp, b'.cache/largefiles')))
1085 1085 hgrc.write(b'[web]\n')
1086 1086 hgrc.write(b'address = localhost\n')
1087 1087 hgrc.write(b'ipv6 = %s\n' % str(self._useipv6).encode('ascii'))
1088 1088
1089 1089 for opt in self._extraconfigopts:
1090 1090 section, key = opt.split('.', 1)
1091 1091 assert '=' in key, ('extra config opt %s must '
1092 1092 'have an = for assignment' % opt)
1093 1093 hgrc.write(b'[%s]\n%s\n' % (section, key))
1094 1094 hgrc.close()
1095 1095
1096 1096 def fail(self, msg):
1097 1097 # unittest differentiates between errored and failed.
1098 1098 # Failed is denoted by AssertionError (by default at least).
1099 1099 raise AssertionError(msg)
1100 1100
1101 1101 def _runcommand(self, cmd, env, normalizenewlines=False):
1102 1102 """Run command in a sub-process, capturing the output (stdout and
1103 1103 stderr).
1104 1104
1105 1105 Return a tuple (exitcode, output). output is None in debug mode.
1106 1106 """
1107 1107 if self._debug:
1108 1108 proc = subprocess.Popen(cmd, shell=True, cwd=self._testtmp,
1109 1109 env=env)
1110 1110 ret = proc.wait()
1111 1111 return (ret, None)
1112 1112
1113 1113 proc = Popen4(cmd, self._testtmp, self._timeout, env)
1114 1114 def cleanup():
1115 1115 terminate(proc)
1116 1116 ret = proc.wait()
1117 1117 if ret == 0:
1118 1118 ret = signal.SIGTERM << 8
1119 1119 killdaemons(env['DAEMON_PIDS'])
1120 1120 return ret
1121 1121
1122 1122 output = ''
1123 1123 proc.tochild.close()
1124 1124
1125 1125 try:
1126 1126 output = proc.fromchild.read()
1127 1127 except KeyboardInterrupt:
1128 1128 vlog('# Handling keyboard interrupt')
1129 1129 cleanup()
1130 1130 raise
1131 1131
1132 1132 ret = proc.wait()
1133 1133 if wifexited(ret):
1134 1134 ret = os.WEXITSTATUS(ret)
1135 1135
1136 1136 if proc.timeout:
1137 1137 ret = 'timeout'
1138 1138
1139 1139 if ret:
1140 1140 killdaemons(env['DAEMON_PIDS'])
1141 1141
1142 1142 for s, r in self._getreplacements():
1143 1143 output = re.sub(s, r, output)
1144 1144
1145 1145 if normalizenewlines:
1146 1146 output = output.replace('\r\n', '\n')
1147 1147
1148 1148 return ret, output.splitlines(True)
1149 1149
1150 1150 class PythonTest(Test):
1151 1151 """A Python-based test."""
1152 1152
1153 1153 @property
1154 1154 def refpath(self):
1155 1155 return os.path.join(self._testdir, b'%s.out' % self.bname)
1156 1156
1157 1157 def _run(self, env):
1158 1158 py3kswitch = self._py3kwarnings and b' -3' or b''
1159 1159 cmd = b'%s%s "%s"' % (PYTHON, py3kswitch, self.path)
1160 1160 vlog("# Running", cmd)
1161 1161 normalizenewlines = os.name == 'nt'
1162 1162 result = self._runcommand(cmd, env,
1163 1163 normalizenewlines=normalizenewlines)
1164 1164 if self._aborted:
1165 1165 raise KeyboardInterrupt()
1166 1166
1167 1167 return result
1168 1168
1169 1169 # Some glob patterns apply only in some circumstances, so the script
1170 1170 # might want to remove (glob) annotations that otherwise should be
1171 1171 # retained.
1172 1172 checkcodeglobpats = [
1173 1173 # On Windows it looks like \ doesn't require a (glob), but we know
1174 1174 # better.
1175 1175 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
1176 1176 re.compile(br'^moving \S+/.*[^)]$'),
1177 1177 re.compile(br'^pulling from \$TESTTMP/.*[^)]$'),
1178 1178 # Not all platforms have 127.0.0.1 as loopback (though most do),
1179 1179 # so we always glob that too.
1180 1180 re.compile(br'.*\$LOCALIP.*$'),
1181 1181 ]
1182 1182
1183 1183 bchr = chr
1184 1184 if PYTHON3:
1185 1185 bchr = lambda x: bytes([x])
1186 1186
1187 1187 class TTest(Test):
1188 1188 """A "t test" is a test backed by a .t file."""
1189 1189
1190 1190 SKIPPED_PREFIX = b'skipped: '
1191 1191 FAILED_PREFIX = b'hghave check failed: '
1192 1192 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
1193 1193
1194 1194 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
1195 1195 ESCAPEMAP = dict((bchr(i), br'\x%02x' % i) for i in range(256))
1196 1196 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
1197 1197
1198 1198 def __init__(self, path, *args, **kwds):
1199 1199 # accept an extra "case" parameter
1200 1200 case = None
1201 1201 if 'case' in kwds:
1202 1202 case = kwds.pop('case')
1203 1203 self._case = case
1204 1204 self._allcases = parsettestcases(path)
1205 1205 super(TTest, self).__init__(path, *args, **kwds)
1206 1206 if case:
1207 1207 self.name = '%s (case %s)' % (self.name, _strpath(case))
1208 1208 self.errpath = b'%s.%s.err' % (self.errpath[:-4], case)
1209 1209 self._tmpname += b'-%s' % case
1210 1210
1211 1211 @property
1212 1212 def refpath(self):
1213 1213 return os.path.join(self._testdir, self.bname)
1214 1214
1215 1215 def _run(self, env):
1216 1216 f = open(self.path, 'rb')
1217 1217 lines = f.readlines()
1218 1218 f.close()
1219 1219
1220 1220 # .t file is both reference output and the test input, keep reference
1221 1221 # output updated with the the test input. This avoids some race
1222 1222 # conditions where the reference output does not match the actual test.
1223 1223 if self._refout is not None:
1224 1224 self._refout = lines
1225 1225
1226 1226 salt, script, after, expected = self._parsetest(lines)
1227 1227
1228 1228 # Write out the generated script.
1229 1229 fname = b'%s.sh' % self._testtmp
1230 1230 f = open(fname, 'wb')
1231 1231 for l in script:
1232 1232 f.write(l)
1233 1233 f.close()
1234 1234
1235 1235 cmd = b'%s "%s"' % (self._shell, fname)
1236 1236 vlog("# Running", cmd)
1237 1237
1238 1238 exitcode, output = self._runcommand(cmd, env)
1239 1239
1240 1240 if self._aborted:
1241 1241 raise KeyboardInterrupt()
1242 1242
1243 1243 # Do not merge output if skipped. Return hghave message instead.
1244 1244 # Similarly, with --debug, output is None.
1245 1245 if exitcode == self.SKIPPED_STATUS or output is None:
1246 1246 return exitcode, output
1247 1247
1248 1248 return self._processoutput(exitcode, output, salt, after, expected)
1249 1249
1250 1250 def _hghave(self, reqs):
1251 1251 # TODO do something smarter when all other uses of hghave are gone.
1252 1252 runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__)))
1253 1253 tdir = runtestdir.replace(b'\\', b'/')
1254 1254 proc = Popen4(b'%s -c "%s/hghave %s"' %
1255 1255 (self._shell, tdir, b' '.join(reqs)),
1256 1256 self._testtmp, 0, self._getenv())
1257 1257 stdout, stderr = proc.communicate()
1258 1258 ret = proc.wait()
1259 1259 if wifexited(ret):
1260 1260 ret = os.WEXITSTATUS(ret)
1261 1261 if ret == 2:
1262 1262 print(stdout.decode('utf-8'))
1263 1263 sys.exit(1)
1264 1264
1265 1265 if ret != 0:
1266 1266 return False, stdout
1267 1267
1268 1268 if b'slow' in reqs:
1269 1269 self._timeout = self._slowtimeout
1270 1270 return True, None
1271 1271
1272 1272 def _iftest(self, args):
1273 1273 # implements "#if"
1274 1274 reqs = []
1275 1275 for arg in args:
1276 1276 if arg.startswith(b'no-') and arg[3:] in self._allcases:
1277 1277 if arg[3:] == self._case:
1278 1278 return False
1279 1279 elif arg in self._allcases:
1280 1280 if arg != self._case:
1281 1281 return False
1282 1282 else:
1283 1283 reqs.append(arg)
1284 1284 return self._hghave(reqs)[0]
1285 1285
1286 1286 def _parsetest(self, lines):
1287 1287 # We generate a shell script which outputs unique markers to line
1288 1288 # up script results with our source. These markers include input
1289 1289 # line number and the last return code.
1290 1290 salt = b"SALT%d" % time.time()
1291 1291 def addsalt(line, inpython):
1292 1292 if inpython:
1293 1293 script.append(b'%s %d 0\n' % (salt, line))
1294 1294 else:
1295 1295 script.append(b'echo %s %d $?\n' % (salt, line))
1296 1296
1297 1297 script = []
1298 1298
1299 1299 # After we run the shell script, we re-unify the script output
1300 1300 # with non-active parts of the source, with synchronization by our
1301 1301 # SALT line number markers. The after table contains the non-active
1302 1302 # components, ordered by line number.
1303 1303 after = {}
1304 1304
1305 1305 # Expected shell script output.
1306 1306 expected = {}
1307 1307
1308 1308 pos = prepos = -1
1309 1309
1310 1310 # True or False when in a true or false conditional section
1311 1311 skipping = None
1312 1312
1313 1313 # We keep track of whether or not we're in a Python block so we
1314 1314 # can generate the surrounding doctest magic.
1315 1315 inpython = False
1316 1316
1317 1317 if self._debug:
1318 1318 script.append(b'set -x\n')
1319 1319 if self._hgcommand != b'hg':
1320 1320 script.append(b'alias hg="%s"\n' % self._hgcommand)
1321 1321 if os.getenv('MSYSTEM'):
1322 1322 script.append(b'alias pwd="pwd -W"\n')
1323 1323
1324 1324 n = 0
1325 1325 for n, l in enumerate(lines):
1326 1326 if not l.endswith(b'\n'):
1327 1327 l += b'\n'
1328 1328 if l.startswith(b'#require'):
1329 1329 lsplit = l.split()
1330 1330 if len(lsplit) < 2 or lsplit[0] != b'#require':
1331 1331 after.setdefault(pos, []).append(' !!! invalid #require\n')
1332 1332 haveresult, message = self._hghave(lsplit[1:])
1333 1333 if not haveresult:
1334 1334 script = [b'echo "%s"\nexit 80\n' % message]
1335 1335 break
1336 1336 after.setdefault(pos, []).append(l)
1337 1337 elif l.startswith(b'#if'):
1338 1338 lsplit = l.split()
1339 1339 if len(lsplit) < 2 or lsplit[0] != b'#if':
1340 1340 after.setdefault(pos, []).append(' !!! invalid #if\n')
1341 1341 if skipping is not None:
1342 1342 after.setdefault(pos, []).append(' !!! nested #if\n')
1343 1343 skipping = not self._iftest(lsplit[1:])
1344 1344 after.setdefault(pos, []).append(l)
1345 1345 elif l.startswith(b'#else'):
1346 1346 if skipping is None:
1347 1347 after.setdefault(pos, []).append(' !!! missing #if\n')
1348 1348 skipping = not skipping
1349 1349 after.setdefault(pos, []).append(l)
1350 1350 elif l.startswith(b'#endif'):
1351 1351 if skipping is None:
1352 1352 after.setdefault(pos, []).append(' !!! missing #if\n')
1353 1353 skipping = None
1354 1354 after.setdefault(pos, []).append(l)
1355 1355 elif skipping:
1356 1356 after.setdefault(pos, []).append(l)
1357 1357 elif l.startswith(b' >>> '): # python inlines
1358 1358 after.setdefault(pos, []).append(l)
1359 1359 prepos = pos
1360 1360 pos = n
1361 1361 if not inpython:
1362 1362 # We've just entered a Python block. Add the header.
1363 1363 inpython = True
1364 1364 addsalt(prepos, False) # Make sure we report the exit code.
1365 1365 script.append(b'%s -m heredoctest <<EOF\n' % PYTHON)
1366 1366 addsalt(n, True)
1367 1367 script.append(l[2:])
1368 1368 elif l.startswith(b' ... '): # python inlines
1369 1369 after.setdefault(prepos, []).append(l)
1370 1370 script.append(l[2:])
1371 1371 elif l.startswith(b' $ '): # commands
1372 1372 if inpython:
1373 1373 script.append(b'EOF\n')
1374 1374 inpython = False
1375 1375 after.setdefault(pos, []).append(l)
1376 1376 prepos = pos
1377 1377 pos = n
1378 1378 addsalt(n, False)
1379 1379 cmd = l[4:].split()
1380 1380 if len(cmd) == 2 and cmd[0] == b'cd':
1381 1381 l = b' $ cd %s || exit 1\n' % cmd[1]
1382 1382 script.append(l[4:])
1383 1383 elif l.startswith(b' > '): # continuations
1384 1384 after.setdefault(prepos, []).append(l)
1385 1385 script.append(l[4:])
1386 1386 elif l.startswith(b' '): # results
1387 1387 # Queue up a list of expected results.
1388 1388 expected.setdefault(pos, []).append(l[2:])
1389 1389 else:
1390 1390 if inpython:
1391 1391 script.append(b'EOF\n')
1392 1392 inpython = False
1393 1393 # Non-command/result. Queue up for merged output.
1394 1394 after.setdefault(pos, []).append(l)
1395 1395
1396 1396 if inpython:
1397 1397 script.append(b'EOF\n')
1398 1398 if skipping is not None:
1399 1399 after.setdefault(pos, []).append(' !!! missing #endif\n')
1400 1400 addsalt(n + 1, False)
1401 1401
1402 1402 return salt, script, after, expected
1403 1403
1404 1404 def _processoutput(self, exitcode, output, salt, after, expected):
1405 1405 # Merge the script output back into a unified test.
1406 1406 warnonly = 1 # 1: not yet; 2: yes; 3: for sure not
1407 1407 if exitcode != 0:
1408 1408 warnonly = 3
1409 1409
1410 1410 pos = -1
1411 1411 postout = []
1412 1412 for l in output:
1413 1413 lout, lcmd = l, None
1414 1414 if salt in l:
1415 1415 lout, lcmd = l.split(salt, 1)
1416 1416
1417 1417 while lout:
1418 1418 if not lout.endswith(b'\n'):
1419 1419 lout += b' (no-eol)\n'
1420 1420
1421 1421 # Find the expected output at the current position.
1422 1422 els = [None]
1423 1423 if expected.get(pos, None):
1424 1424 els = expected[pos]
1425 1425
1426 1426 i = 0
1427 1427 optional = []
1428 1428 while i < len(els):
1429 1429 el = els[i]
1430 1430
1431 1431 r = self.linematch(el, lout)
1432 1432 if isinstance(r, str):
1433 1433 if r == '+glob':
1434 1434 lout = el[:-1] + ' (glob)\n'
1435 1435 r = '' # Warn only this line.
1436 1436 elif r == '-glob':
1437 1437 lout = ''.join(el.rsplit(' (glob)', 1))
1438 1438 r = '' # Warn only this line.
1439 1439 elif r == "retry":
1440 1440 postout.append(b' ' + el)
1441 1441 els.pop(i)
1442 1442 break
1443 1443 else:
1444 1444 log('\ninfo, unknown linematch result: %r\n' % r)
1445 1445 r = False
1446 1446 if r:
1447 1447 els.pop(i)
1448 1448 break
1449 1449 if el:
1450 1450 if el.endswith(b" (?)\n"):
1451 1451 optional.append(i)
1452 1452 else:
1453 1453 m = optline.match(el)
1454 1454 if m:
1455 1455 conditions = [
1456 1456 c for c in m.group(2).split(b' ')]
1457 1457
1458 1458 if not self._iftest(conditions):
1459 1459 optional.append(i)
1460 1460
1461 1461 i += 1
1462 1462
1463 1463 if r:
1464 1464 if r == "retry":
1465 1465 continue
1466 1466 # clean up any optional leftovers
1467 1467 for i in optional:
1468 1468 postout.append(b' ' + els[i])
1469 1469 for i in reversed(optional):
1470 1470 del els[i]
1471 1471 postout.append(b' ' + el)
1472 1472 else:
1473 1473 if self.NEEDESCAPE(lout):
1474 1474 lout = TTest._stringescape(b'%s (esc)\n' %
1475 1475 lout.rstrip(b'\n'))
1476 1476 postout.append(b' ' + lout) # Let diff deal with it.
1477 1477 if r != '': # If line failed.
1478 1478 warnonly = 3 # for sure not
1479 1479 elif warnonly == 1: # Is "not yet" and line is warn only.
1480 1480 warnonly = 2 # Yes do warn.
1481 1481 break
1482 1482 else:
1483 1483 # clean up any optional leftovers
1484 1484 while expected.get(pos, None):
1485 1485 el = expected[pos].pop(0)
1486 1486 if el:
1487 1487 if not el.endswith(b" (?)\n"):
1488 1488 m = optline.match(el)
1489 1489 if m:
1490 1490 conditions = [c for c in m.group(2).split(b' ')]
1491 1491
1492 1492 if self._iftest(conditions):
1493 1493 # Don't append as optional line
1494 1494 continue
1495 1495 else:
1496 1496 continue
1497 1497 postout.append(b' ' + el)
1498 1498
1499 1499 if lcmd:
1500 1500 # Add on last return code.
1501 1501 ret = int(lcmd.split()[1])
1502 1502 if ret != 0:
1503 1503 postout.append(b' [%d]\n' % ret)
1504 1504 if pos in after:
1505 1505 # Merge in non-active test bits.
1506 1506 postout += after.pop(pos)
1507 1507 pos = int(lcmd.split()[0])
1508 1508
1509 1509 if pos in after:
1510 1510 postout += after.pop(pos)
1511 1511
1512 1512 if warnonly == 2:
1513 1513 exitcode = False # Set exitcode to warned.
1514 1514
1515 1515 return exitcode, postout
1516 1516
1517 1517 @staticmethod
1518 1518 def rematch(el, l):
1519 1519 try:
1520 1520 # use \Z to ensure that the regex matches to the end of the string
1521 1521 if os.name == 'nt':
1522 1522 return re.match(el + br'\r?\n\Z', l)
1523 1523 return re.match(el + br'\n\Z', l)
1524 1524 except re.error:
1525 1525 # el is an invalid regex
1526 1526 return False
1527 1527
1528 1528 @staticmethod
1529 1529 def globmatch(el, l):
1530 1530 # The only supported special characters are * and ? plus / which also
1531 1531 # matches \ on windows. Escaping of these characters is supported.
1532 1532 if el + b'\n' == l:
1533 1533 if os.altsep:
1534 1534 # matching on "/" is not needed for this line
1535 1535 for pat in checkcodeglobpats:
1536 1536 if pat.match(el):
1537 1537 return True
1538 1538 return b'-glob'
1539 1539 return True
1540 1540 el = el.replace(b'$LOCALIP', b'*')
1541 1541 i, n = 0, len(el)
1542 1542 res = b''
1543 1543 while i < n:
1544 1544 c = el[i:i + 1]
1545 1545 i += 1
1546 1546 if c == b'\\' and i < n and el[i:i + 1] in b'*?\\/':
1547 1547 res += el[i - 1:i + 1]
1548 1548 i += 1
1549 1549 elif c == b'*':
1550 1550 res += b'.*'
1551 1551 elif c == b'?':
1552 1552 res += b'.'
1553 1553 elif c == b'/' and os.altsep:
1554 1554 res += b'[/\\\\]'
1555 1555 else:
1556 1556 res += re.escape(c)
1557 1557 return TTest.rematch(res, l)
1558 1558
1559 1559 def linematch(self, el, l):
1560 1560 retry = False
1561 1561 if el == l: # perfect match (fast)
1562 1562 return True
1563 1563 if el:
1564 1564 if el.endswith(b" (?)\n"):
1565 1565 retry = "retry"
1566 1566 el = el[:-5] + b"\n"
1567 1567 else:
1568 1568 m = optline.match(el)
1569 1569 if m:
1570 1570 conditions = [c for c in m.group(2).split(b' ')]
1571 1571
1572 1572 el = m.group(1) + b"\n"
1573 1573 if not self._iftest(conditions):
1574 1574 retry = "retry" # Not required by listed features
1575 1575
1576 1576 if el.endswith(b" (esc)\n"):
1577 1577 if PYTHON3:
1578 1578 el = el[:-7].decode('unicode_escape') + '\n'
1579 1579 el = el.encode('utf-8')
1580 1580 else:
1581 1581 el = el[:-7].decode('string-escape') + '\n'
1582 1582 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
1583 1583 return True
1584 1584 if el.endswith(b" (re)\n"):
1585 1585 return TTest.rematch(el[:-6], l) or retry
1586 1586 if el.endswith(b" (glob)\n"):
1587 1587 # ignore '(glob)' added to l by 'replacements'
1588 1588 if l.endswith(b" (glob)\n"):
1589 1589 l = l[:-8] + b"\n"
1590 1590 return TTest.globmatch(el[:-8], l) or retry
1591 1591 if os.altsep and l.replace(b'\\', b'/') == el:
1592 1592 return b'+glob'
1593 1593 return retry
1594 1594
1595 1595 @staticmethod
1596 1596 def parsehghaveoutput(lines):
1597 1597 '''Parse hghave log lines.
1598 1598
1599 1599 Return tuple of lists (missing, failed):
1600 1600 * the missing/unknown features
1601 1601 * the features for which existence check failed'''
1602 1602 missing = []
1603 1603 failed = []
1604 1604 for line in lines:
1605 1605 if line.startswith(TTest.SKIPPED_PREFIX):
1606 1606 line = line.splitlines()[0]
1607 1607 missing.append(line[len(TTest.SKIPPED_PREFIX):].decode('utf-8'))
1608 1608 elif line.startswith(TTest.FAILED_PREFIX):
1609 1609 line = line.splitlines()[0]
1610 1610 failed.append(line[len(TTest.FAILED_PREFIX):].decode('utf-8'))
1611 1611
1612 1612 return missing, failed
1613 1613
1614 1614 @staticmethod
1615 1615 def _escapef(m):
1616 1616 return TTest.ESCAPEMAP[m.group(0)]
1617 1617
1618 1618 @staticmethod
1619 1619 def _stringescape(s):
1620 1620 return TTest.ESCAPESUB(TTest._escapef, s)
1621 1621
1622 1622 iolock = threading.RLock()
1623 1623
1624 1624 class TestResult(unittest._TextTestResult):
1625 1625 """Holds results when executing via unittest."""
1626 1626 # Don't worry too much about accessing the non-public _TextTestResult.
1627 1627 # It is relatively common in Python testing tools.
1628 1628 def __init__(self, options, *args, **kwargs):
1629 1629 super(TestResult, self).__init__(*args, **kwargs)
1630 1630
1631 1631 self._options = options
1632 1632
1633 1633 # unittest.TestResult didn't have skipped until 2.7. We need to
1634 1634 # polyfill it.
1635 1635 self.skipped = []
1636 1636
1637 1637 # We have a custom "ignored" result that isn't present in any Python
1638 1638 # unittest implementation. It is very similar to skipped. It may make
1639 1639 # sense to map it into skip some day.
1640 1640 self.ignored = []
1641 1641
1642 1642 self.times = []
1643 1643 self._firststarttime = None
1644 1644 # Data stored for the benefit of generating xunit reports.
1645 1645 self.successes = []
1646 1646 self.faildata = {}
1647 1647
1648 1648 if options.color == 'auto':
1649 1649 self.color = pygmentspresent and self.stream.isatty()
1650 1650 elif options.color == 'never':
1651 1651 self.color = False
1652 1652 else: # 'always', for testing purposes
1653 1653 self.color = pygmentspresent
1654 1654
1655 1655 def addFailure(self, test, reason):
1656 1656 self.failures.append((test, reason))
1657 1657
1658 1658 if self._options.first:
1659 1659 self.stop()
1660 1660 else:
1661 1661 with iolock:
1662 1662 if reason == "timed out":
1663 1663 self.stream.write('t')
1664 1664 else:
1665 1665 if not self._options.nodiff:
1666 1666 self.stream.write('\n')
1667 1667 # Exclude the '\n' from highlighting to lex correctly
1668 1668 formatted = 'ERROR: %s output changed\n' % test
1669 1669 self.stream.write(highlightmsg(formatted, self.color))
1670 1670 self.stream.write('!')
1671 1671
1672 1672 self.stream.flush()
1673 1673
1674 1674 def addSuccess(self, test):
1675 1675 with iolock:
1676 1676 super(TestResult, self).addSuccess(test)
1677 1677 self.successes.append(test)
1678 1678
1679 1679 def addError(self, test, err):
1680 1680 super(TestResult, self).addError(test, err)
1681 1681 if self._options.first:
1682 1682 self.stop()
1683 1683
1684 1684 # Polyfill.
1685 1685 def addSkip(self, test, reason):
1686 1686 self.skipped.append((test, reason))
1687 1687 with iolock:
1688 1688 if self.showAll:
1689 1689 self.stream.writeln('skipped %s' % reason)
1690 1690 else:
1691 1691 self.stream.write('s')
1692 1692 self.stream.flush()
1693 1693
1694 1694 def addIgnore(self, test, reason):
1695 1695 self.ignored.append((test, reason))
1696 1696 with iolock:
1697 1697 if self.showAll:
1698 1698 self.stream.writeln('ignored %s' % reason)
1699 1699 else:
1700 1700 if reason not in ('not retesting', "doesn't match keyword"):
1701 1701 self.stream.write('i')
1702 1702 else:
1703 1703 self.testsRun += 1
1704 1704 self.stream.flush()
1705 1705
1706 1706 def addOutputMismatch(self, test, ret, got, expected):
1707 1707 """Record a mismatch in test output for a particular test."""
1708 1708 if self.shouldStop:
1709 1709 # don't print, some other test case already failed and
1710 1710 # printed, we're just stale and probably failed due to our
1711 1711 # temp dir getting cleaned up.
1712 1712 return
1713 1713
1714 1714 accepted = False
1715 1715 lines = []
1716 1716
1717 1717 with iolock:
1718 1718 if self._options.nodiff:
1719 1719 pass
1720 1720 elif self._options.view:
1721 1721 v = self._options.view
1722 1722 if PYTHON3:
1723 1723 v = _bytespath(v)
1724 1724 os.system(b"%s %s %s" %
1725 1725 (v, test.refpath, test.errpath))
1726 1726 else:
1727 1727 servefail, lines = getdiff(expected, got,
1728 1728 test.refpath, test.errpath)
1729 1729 if servefail:
1730 1730 raise test.failureException(
1731 1731 'server failed to start (HGPORT=%s)' % test._startport)
1732 1732 else:
1733 1733 self.stream.write('\n')
1734 1734 for line in lines:
1735 1735 line = highlightdiff(line, self.color)
1736 1736 if PYTHON3:
1737 1737 self.stream.flush()
1738 1738 self.stream.buffer.write(line)
1739 1739 self.stream.buffer.flush()
1740 1740 else:
1741 1741 self.stream.write(line)
1742 1742 self.stream.flush()
1743 1743
1744 1744 # handle interactive prompt without releasing iolock
1745 1745 if self._options.interactive:
1746 1746 if test.readrefout() != expected:
1747 1747 self.stream.write(
1748 1748 'Reference output has changed (run again to prompt '
1749 1749 'changes)')
1750 1750 else:
1751 1751 self.stream.write('Accept this change? [n] ')
1752 1752 answer = sys.stdin.readline().strip()
1753 1753 if answer.lower() in ('y', 'yes'):
1754 1754 if test.path.endswith(b'.t'):
1755 1755 rename(test.errpath, test.path)
1756 1756 else:
1757 1757 rename(test.errpath, '%s.out' % test.path)
1758 1758 accepted = True
1759 1759 if not accepted:
1760 1760 self.faildata[test.name] = b''.join(lines)
1761 1761
1762 1762 return accepted
1763 1763
1764 1764 def startTest(self, test):
1765 1765 super(TestResult, self).startTest(test)
1766 1766
1767 1767 # os.times module computes the user time and system time spent by
1768 1768 # child's processes along with real elapsed time taken by a process.
1769 1769 # This module has one limitation. It can only work for Linux user
1770 1770 # and not for Windows.
1771 1771 test.started = os.times()
1772 1772 if self._firststarttime is None: # thread racy but irrelevant
1773 1773 self._firststarttime = test.started[4]
1774 1774
1775 1775 def stopTest(self, test, interrupted=False):
1776 1776 super(TestResult, self).stopTest(test)
1777 1777
1778 1778 test.stopped = os.times()
1779 1779
1780 1780 starttime = test.started
1781 1781 endtime = test.stopped
1782 1782 origin = self._firststarttime
1783 1783 self.times.append((test.name,
1784 1784 endtime[2] - starttime[2], # user space CPU time
1785 1785 endtime[3] - starttime[3], # sys space CPU time
1786 1786 endtime[4] - starttime[4], # real time
1787 1787 starttime[4] - origin, # start date in run context
1788 1788 endtime[4] - origin, # end date in run context
1789 1789 ))
1790 1790
1791 1791 if interrupted:
1792 1792 with iolock:
1793 1793 self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
1794 1794 test.name, self.times[-1][3]))
1795 1795
1796 1796 class TestSuite(unittest.TestSuite):
1797 1797 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
1798 1798
1799 1799 def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
1800 1800 retest=False, keywords=None, loop=False, runs_per_test=1,
1801 1801 loadtest=None, showchannels=False,
1802 1802 *args, **kwargs):
1803 1803 """Create a new instance that can run tests with a configuration.
1804 1804
1805 1805 testdir specifies the directory where tests are executed from. This
1806 1806 is typically the ``tests`` directory from Mercurial's source
1807 1807 repository.
1808 1808
1809 1809 jobs specifies the number of jobs to run concurrently. Each test
1810 1810 executes on its own thread. Tests actually spawn new processes, so
1811 1811 state mutation should not be an issue.
1812 1812
1813 1813 If there is only one job, it will use the main thread.
1814 1814
1815 1815 whitelist and blacklist denote tests that have been whitelisted and
1816 1816 blacklisted, respectively. These arguments don't belong in TestSuite.
1817 1817 Instead, whitelist and blacklist should be handled by the thing that
1818 1818 populates the TestSuite with tests. They are present to preserve
1819 1819 backwards compatible behavior which reports skipped tests as part
1820 1820 of the results.
1821 1821
1822 1822 retest denotes whether to retest failed tests. This arguably belongs
1823 1823 outside of TestSuite.
1824 1824
1825 1825 keywords denotes key words that will be used to filter which tests
1826 1826 to execute. This arguably belongs outside of TestSuite.
1827 1827
1828 1828 loop denotes whether to loop over tests forever.
1829 1829 """
1830 1830 super(TestSuite, self).__init__(*args, **kwargs)
1831 1831
1832 1832 self._jobs = jobs
1833 1833 self._whitelist = whitelist
1834 1834 self._blacklist = blacklist
1835 1835 self._retest = retest
1836 1836 self._keywords = keywords
1837 1837 self._loop = loop
1838 1838 self._runs_per_test = runs_per_test
1839 1839 self._loadtest = loadtest
1840 1840 self._showchannels = showchannels
1841 1841
1842 1842 def run(self, result):
1843 1843 # We have a number of filters that need to be applied. We do this
1844 1844 # here instead of inside Test because it makes the running logic for
1845 1845 # Test simpler.
1846 1846 tests = []
1847 1847 num_tests = [0]
1848 1848 for test in self._tests:
1849 1849 def get():
1850 1850 num_tests[0] += 1
1851 1851 if getattr(test, 'should_reload', False):
1852 1852 return self._loadtest(test, num_tests[0])
1853 1853 return test
1854 1854 if not os.path.exists(test.path):
1855 1855 result.addSkip(test, "Doesn't exist")
1856 1856 continue
1857 1857
1858 1858 if not (self._whitelist and test.bname in self._whitelist):
1859 1859 if self._blacklist and test.bname in self._blacklist:
1860 1860 result.addSkip(test, 'blacklisted')
1861 1861 continue
1862 1862
1863 1863 if self._retest and not os.path.exists(test.errpath):
1864 1864 result.addIgnore(test, 'not retesting')
1865 1865 continue
1866 1866
1867 1867 if self._keywords:
1868 1868 f = open(test.path, 'rb')
1869 1869 t = f.read().lower() + test.bname.lower()
1870 1870 f.close()
1871 1871 ignored = False
1872 1872 for k in self._keywords.lower().split():
1873 1873 if k not in t:
1874 1874 result.addIgnore(test, "doesn't match keyword")
1875 1875 ignored = True
1876 1876 break
1877 1877
1878 1878 if ignored:
1879 1879 continue
1880 1880 for _ in xrange(self._runs_per_test):
1881 1881 tests.append(get())
1882 1882
1883 1883 runtests = list(tests)
1884 1884 done = queue.Queue()
1885 1885 running = 0
1886 1886
1887 1887 channels = [""] * self._jobs
1888 1888
1889 1889 def job(test, result):
1890 1890 for n, v in enumerate(channels):
1891 1891 if not v:
1892 1892 channel = n
1893 1893 break
1894 1894 else:
1895 1895 raise ValueError('Could not find output channel')
1896 1896 channels[channel] = "=" + test.name[5:].split(".")[0]
1897 1897 try:
1898 1898 test(result)
1899 1899 done.put(None)
1900 1900 except KeyboardInterrupt:
1901 1901 pass
1902 1902 except: # re-raises
1903 1903 done.put(('!', test, 'run-test raised an error, see traceback'))
1904 1904 raise
1905 1905 finally:
1906 1906 try:
1907 1907 channels[channel] = ''
1908 1908 except IndexError:
1909 1909 pass
1910 1910
1911 1911 def stat():
1912 1912 count = 0
1913 1913 while channels:
1914 1914 d = '\n%03s ' % count
1915 1915 for n, v in enumerate(channels):
1916 1916 if v:
1917 1917 d += v[0]
1918 1918 channels[n] = v[1:] or '.'
1919 1919 else:
1920 1920 d += ' '
1921 1921 d += ' '
1922 1922 with iolock:
1923 1923 sys.stdout.write(d + ' ')
1924 1924 sys.stdout.flush()
1925 1925 for x in xrange(10):
1926 1926 if channels:
1927 1927 time.sleep(.1)
1928 1928 count += 1
1929 1929
1930 1930 stoppedearly = False
1931 1931
1932 1932 if self._showchannels:
1933 1933 statthread = threading.Thread(target=stat, name="stat")
1934 1934 statthread.start()
1935 1935
1936 1936 try:
1937 1937 while tests or running:
1938 1938 if not done.empty() or running == self._jobs or not tests:
1939 1939 try:
1940 1940 done.get(True, 1)
1941 1941 running -= 1
1942 1942 if result and result.shouldStop:
1943 1943 stoppedearly = True
1944 1944 break
1945 1945 except queue.Empty:
1946 1946 continue
1947 1947 if tests and not running == self._jobs:
1948 1948 test = tests.pop(0)
1949 1949 if self._loop:
1950 1950 if getattr(test, 'should_reload', False):
1951 1951 num_tests[0] += 1
1952 1952 tests.append(
1953 1953 self._loadtest(test, num_tests[0]))
1954 1954 else:
1955 1955 tests.append(test)
1956 1956 if self._jobs == 1:
1957 1957 job(test, result)
1958 1958 else:
1959 1959 t = threading.Thread(target=job, name=test.name,
1960 1960 args=(test, result))
1961 1961 t.start()
1962 1962 running += 1
1963 1963
1964 1964 # If we stop early we still need to wait on started tests to
1965 1965 # finish. Otherwise, there is a race between the test completing
1966 1966 # and the test's cleanup code running. This could result in the
1967 1967 # test reporting incorrect.
1968 1968 if stoppedearly:
1969 1969 while running:
1970 1970 try:
1971 1971 done.get(True, 1)
1972 1972 running -= 1
1973 1973 except queue.Empty:
1974 1974 continue
1975 1975 except KeyboardInterrupt:
1976 1976 for test in runtests:
1977 1977 test.abort()
1978 1978
1979 1979 channels = []
1980 1980
1981 1981 return result
1982 1982
1983 1983 # Save the most recent 5 wall-clock runtimes of each test to a
1984 1984 # human-readable text file named .testtimes. Tests are sorted
1985 1985 # alphabetically, while times for each test are listed from oldest to
1986 1986 # newest.
1987 1987
1988 1988 def loadtimes(outputdir):
1989 1989 times = []
1990 1990 try:
1991 1991 with open(os.path.join(outputdir, b'.testtimes-')) as fp:
1992 1992 for line in fp:
1993 1993 ts = line.split()
1994 1994 times.append((ts[0], [float(t) for t in ts[1:]]))
1995 1995 except IOError as err:
1996 1996 if err.errno != errno.ENOENT:
1997 1997 raise
1998 1998 return times
1999 1999
2000 2000 def savetimes(outputdir, result):
2001 2001 saved = dict(loadtimes(outputdir))
2002 2002 maxruns = 5
2003 2003 skipped = set([str(t[0]) for t in result.skipped])
2004 2004 for tdata in result.times:
2005 2005 test, real = tdata[0], tdata[3]
2006 2006 if test not in skipped:
2007 2007 ts = saved.setdefault(test, [])
2008 2008 ts.append(real)
2009 2009 ts[:] = ts[-maxruns:]
2010 2010
2011 2011 fd, tmpname = tempfile.mkstemp(prefix=b'.testtimes',
2012 2012 dir=outputdir, text=True)
2013 2013 with os.fdopen(fd, 'w') as fp:
2014 2014 for name, ts in sorted(saved.items()):
2015 2015 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
2016 2016 timepath = os.path.join(outputdir, b'.testtimes')
2017 2017 try:
2018 2018 os.unlink(timepath)
2019 2019 except OSError:
2020 2020 pass
2021 2021 try:
2022 2022 os.rename(tmpname, timepath)
2023 2023 except OSError:
2024 2024 pass
2025 2025
2026 2026 class TextTestRunner(unittest.TextTestRunner):
2027 2027 """Custom unittest test runner that uses appropriate settings."""
2028 2028
2029 2029 def __init__(self, runner, *args, **kwargs):
2030 2030 super(TextTestRunner, self).__init__(*args, **kwargs)
2031 2031
2032 2032 self._runner = runner
2033 2033
2034 2034 def listtests(self, test):
2035 2035 result = TestResult(self._runner.options, self.stream,
2036 2036 self.descriptions, 0)
2037 2037 test = sorted(test, key=lambda t: t.name)
2038 2038 for t in test:
2039 2039 print(t.name)
2040 2040 result.addSuccess(t)
2041 2041
2042 2042 if self._runner.options.xunit:
2043 2043 with open(self._runner.options.xunit, "wb") as xuf:
2044 2044 self._writexunit(result, xuf)
2045 2045
2046 2046 if self._runner.options.json:
2047 2047 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2048 2048 with open(jsonpath, 'w') as fp:
2049 2049 self._writejson(result, fp)
2050 2050
2051 2051 return result
2052 2052
2053 2053 def run(self, test):
2054 2054 result = TestResult(self._runner.options, self.stream,
2055 2055 self.descriptions, self.verbosity)
2056 2056
2057 2057 test(result)
2058 2058
2059 2059 failed = len(result.failures)
2060 2060 skipped = len(result.skipped)
2061 2061 ignored = len(result.ignored)
2062 2062
2063 2063 with iolock:
2064 2064 self.stream.writeln('')
2065 2065
2066 2066 if not self._runner.options.noskips:
2067 2067 for test, msg in result.skipped:
2068 2068 formatted = 'Skipped %s: %s\n' % (test.name, msg)
2069 2069 self.stream.write(highlightmsg(formatted, result.color))
2070 2070 for test, msg in result.failures:
2071 2071 formatted = 'Failed %s: %s\n' % (test.name, msg)
2072 2072 self.stream.write(highlightmsg(formatted, result.color))
2073 2073 for test, msg in result.errors:
2074 2074 self.stream.writeln('Errored %s: %s' % (test.name, msg))
2075 2075
2076 2076 if self._runner.options.xunit:
2077 2077 with open(self._runner.options.xunit, "wb") as xuf:
2078 2078 self._writexunit(result, xuf)
2079 2079
2080 2080 if self._runner.options.json:
2081 2081 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2082 2082 with open(jsonpath, 'w') as fp:
2083 2083 self._writejson(result, fp)
2084 2084
2085 2085 self._runner._checkhglib('Tested')
2086 2086
2087 2087 savetimes(self._runner._outputdir, result)
2088 2088
2089 2089 if failed and self._runner.options.known_good_rev:
2090 2090 self._bisecttests(t for t, m in result.failures)
2091 2091 self.stream.writeln(
2092 2092 '# Ran %d tests, %d skipped, %d failed.'
2093 2093 % (result.testsRun, skipped + ignored, failed))
2094 2094 if failed:
2095 2095 self.stream.writeln('python hash seed: %s' %
2096 2096 os.environ['PYTHONHASHSEED'])
2097 2097 if self._runner.options.time:
2098 2098 self.printtimes(result.times)
2099 2099 self.stream.flush()
2100 2100
2101 2101 return result
2102 2102
2103 2103 def _bisecttests(self, tests):
2104 2104 bisectcmd = ['hg', 'bisect']
2105 2105 bisectrepo = self._runner.options.bisect_repo
2106 2106 if bisectrepo:
2107 2107 bisectcmd.extend(['-R', os.path.abspath(bisectrepo)])
2108 2108 def pread(args):
2109 2109 env = os.environ.copy()
2110 2110 env['HGPLAIN'] = '1'
2111 2111 p = subprocess.Popen(args, stderr=subprocess.STDOUT,
2112 2112 stdout=subprocess.PIPE, env=env)
2113 2113 data = p.stdout.read()
2114 2114 p.wait()
2115 2115 return data
2116 2116 for test in tests:
2117 2117 pread(bisectcmd + ['--reset']),
2118 2118 pread(bisectcmd + ['--bad', '.'])
2119 2119 pread(bisectcmd + ['--good', self._runner.options.known_good_rev])
2120 2120 # TODO: we probably need to forward more options
2121 2121 # that alter hg's behavior inside the tests.
2122 2122 opts = ''
2123 2123 withhg = self._runner.options.with_hg
2124 2124 if withhg:
2125 2125 opts += ' --with-hg=%s ' % shellquote(_strpath(withhg))
2126 2126 rtc = '%s %s %s %s' % (sys.executable, sys.argv[0], opts,
2127 2127 test)
2128 2128 data = pread(bisectcmd + ['--command', rtc])
2129 2129 m = re.search(
2130 2130 (br'\nThe first (?P<goodbad>bad|good) revision '
2131 2131 br'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
2132 2132 br'summary: +(?P<summary>[^\n]+)\n'),
2133 2133 data, (re.MULTILINE | re.DOTALL))
2134 2134 if m is None:
2135 2135 self.stream.writeln(
2136 2136 'Failed to identify failure point for %s' % test)
2137 2137 continue
2138 2138 dat = m.groupdict()
2139 2139 verb = 'broken' if dat['goodbad'] == 'bad' else 'fixed'
2140 2140 self.stream.writeln(
2141 2141 '%s %s by %s (%s)' % (
2142 2142 test, verb, dat['node'], dat['summary']))
2143 2143
2144 2144 def printtimes(self, times):
2145 2145 # iolock held by run
2146 2146 self.stream.writeln('# Producing time report')
2147 2147 times.sort(key=lambda t: (t[3]))
2148 2148 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
2149 2149 self.stream.writeln('%-7s %-7s %-7s %-7s %-7s %s' %
2150 2150 ('start', 'end', 'cuser', 'csys', 'real', 'Test'))
2151 2151 for tdata in times:
2152 2152 test = tdata[0]
2153 2153 cuser, csys, real, start, end = tdata[1:6]
2154 2154 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
2155 2155
2156 2156 @staticmethod
2157 2157 def _writexunit(result, outf):
2158 2158 # See http://llg.cubic.org/docs/junit/ for a reference.
2159 2159 timesd = dict((t[0], t[3]) for t in result.times)
2160 2160 doc = minidom.Document()
2161 2161 s = doc.createElement('testsuite')
2162 2162 s.setAttribute('name', 'run-tests')
2163 2163 s.setAttribute('tests', str(result.testsRun))
2164 2164 s.setAttribute('errors', "0") # TODO
2165 2165 s.setAttribute('failures', str(len(result.failures)))
2166 2166 s.setAttribute('skipped', str(len(result.skipped) +
2167 2167 len(result.ignored)))
2168 2168 doc.appendChild(s)
2169 2169 for tc in result.successes:
2170 2170 t = doc.createElement('testcase')
2171 2171 t.setAttribute('name', tc.name)
2172 2172 tctime = timesd.get(tc.name)
2173 2173 if tctime is not None:
2174 2174 t.setAttribute('time', '%.3f' % tctime)
2175 2175 s.appendChild(t)
2176 2176 for tc, err in sorted(result.faildata.items()):
2177 2177 t = doc.createElement('testcase')
2178 2178 t.setAttribute('name', tc)
2179 2179 tctime = timesd.get(tc)
2180 2180 if tctime is not None:
2181 2181 t.setAttribute('time', '%.3f' % tctime)
2182 2182 # createCDATASection expects a unicode or it will
2183 2183 # convert using default conversion rules, which will
2184 2184 # fail if string isn't ASCII.
2185 2185 err = cdatasafe(err).decode('utf-8', 'replace')
2186 2186 cd = doc.createCDATASection(err)
2187 2187 # Use 'failure' here instead of 'error' to match errors = 0,
2188 2188 # failures = len(result.failures) in the testsuite element.
2189 2189 failelem = doc.createElement('failure')
2190 2190 failelem.setAttribute('message', 'output changed')
2191 2191 failelem.setAttribute('type', 'output-mismatch')
2192 2192 failelem.appendChild(cd)
2193 2193 t.appendChild(failelem)
2194 2194 s.appendChild(t)
2195 2195 for tc, message in result.skipped:
2196 2196 # According to the schema, 'skipped' has no attributes. So store
2197 2197 # the skip message as a text node instead.
2198 2198 t = doc.createElement('testcase')
2199 2199 t.setAttribute('name', tc.name)
2200 2200 binmessage = message.encode('utf-8')
2201 2201 message = cdatasafe(binmessage).decode('utf-8', 'replace')
2202 2202 cd = doc.createCDATASection(message)
2203 2203 skipelem = doc.createElement('skipped')
2204 2204 skipelem.appendChild(cd)
2205 2205 t.appendChild(skipelem)
2206 2206 s.appendChild(t)
2207 2207 outf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
2208 2208
2209 2209 @staticmethod
2210 2210 def _writejson(result, outf):
2211 2211 timesd = {}
2212 2212 for tdata in result.times:
2213 2213 test = tdata[0]
2214 2214 timesd[test] = tdata[1:]
2215 2215
2216 2216 outcome = {}
2217 2217 groups = [('success', ((tc, None)
2218 2218 for tc in result.successes)),
2219 2219 ('failure', result.failures),
2220 2220 ('skip', result.skipped)]
2221 2221 for res, testcases in groups:
2222 2222 for tc, __ in testcases:
2223 2223 if tc.name in timesd:
2224 2224 diff = result.faildata.get(tc.name, b'')
2225 2225 try:
2226 2226 diff = diff.decode('unicode_escape')
2227 2227 except UnicodeDecodeError as e:
2228 2228 diff = '%r decoding diff, sorry' % e
2229 2229 tres = {'result': res,
2230 2230 'time': ('%0.3f' % timesd[tc.name][2]),
2231 2231 'cuser': ('%0.3f' % timesd[tc.name][0]),
2232 2232 'csys': ('%0.3f' % timesd[tc.name][1]),
2233 2233 'start': ('%0.3f' % timesd[tc.name][3]),
2234 2234 'end': ('%0.3f' % timesd[tc.name][4]),
2235 2235 'diff': diff,
2236 2236 }
2237 2237 else:
2238 2238 # blacklisted test
2239 2239 tres = {'result': res}
2240 2240
2241 2241 outcome[tc.name] = tres
2242 2242 jsonout = json.dumps(outcome, sort_keys=True, indent=4,
2243 2243 separators=(',', ': '))
2244 2244 outf.writelines(("testreport =", jsonout))
2245 2245
2246 2246 class TestRunner(object):
2247 2247 """Holds context for executing tests.
2248 2248
2249 2249 Tests rely on a lot of state. This object holds it for them.
2250 2250 """
2251 2251
2252 2252 # Programs required to run tests.
2253 2253 REQUIREDTOOLS = [
2254 2254 b'diff',
2255 2255 b'grep',
2256 2256 b'unzip',
2257 2257 b'gunzip',
2258 2258 b'bunzip2',
2259 2259 b'sed',
2260 2260 ]
2261 2261
2262 2262 # Maps file extensions to test class.
2263 2263 TESTTYPES = [
2264 2264 (b'.py', PythonTest),
2265 2265 (b'.t', TTest),
2266 2266 ]
2267 2267
2268 2268 def __init__(self):
2269 2269 self.options = None
2270 2270 self._hgroot = None
2271 2271 self._testdir = None
2272 2272 self._outputdir = None
2273 2273 self._hgtmp = None
2274 2274 self._installdir = None
2275 2275 self._bindir = None
2276 2276 self._tmpbinddir = None
2277 2277 self._pythondir = None
2278 2278 self._coveragefile = None
2279 2279 self._createdfiles = []
2280 2280 self._hgcommand = None
2281 2281 self._hgpath = None
2282 2282 self._portoffset = 0
2283 2283 self._ports = {}
2284 2284
2285 2285 def run(self, args, parser=None):
2286 2286 """Run the test suite."""
2287 2287 oldmask = os.umask(0o22)
2288 2288 try:
2289 2289 parser = parser or getparser()
2290 2290 options, args = parseargs(args, parser)
2291 2291 # positional arguments are paths to test files to run, so
2292 2292 # we make sure they're all bytestrings
2293 2293 args = [_bytespath(a) for a in args]
2294 2294 if options.test_list is not None:
2295 2295 for listfile in options.test_list:
2296 2296 with open(listfile, 'rb') as f:
2297 2297 args.extend(t for t in f.read().splitlines() if t)
2298 2298 self.options = options
2299 2299
2300 2300 self._checktools()
2301 2301 testdescs = self.findtests(args)
2302 2302 if options.profile_runner:
2303 2303 import statprof
2304 2304 statprof.start()
2305 2305 result = self._run(testdescs)
2306 2306 if options.profile_runner:
2307 2307 statprof.stop()
2308 2308 statprof.display()
2309 2309 return result
2310 2310
2311 2311 finally:
2312 2312 os.umask(oldmask)
2313 2313
2314 2314 def _run(self, testdescs):
2315 2315 if self.options.random:
2316 2316 random.shuffle(testdescs)
2317 2317 else:
2318 2318 # keywords for slow tests
2319 2319 slow = {b'svn': 10,
2320 2320 b'cvs': 10,
2321 2321 b'hghave': 10,
2322 2322 b'largefiles-update': 10,
2323 2323 b'run-tests': 10,
2324 2324 b'corruption': 10,
2325 2325 b'race': 10,
2326 2326 b'i18n': 10,
2327 2327 b'check': 100,
2328 2328 b'gendoc': 100,
2329 2329 b'contrib-perf': 200,
2330 2330 }
2331 2331 perf = {}
2332 2332 def sortkey(f):
2333 2333 # run largest tests first, as they tend to take the longest
2334 2334 f = f['path']
2335 2335 try:
2336 2336 return perf[f]
2337 2337 except KeyError:
2338 2338 try:
2339 2339 val = -os.stat(f).st_size
2340 2340 except OSError as e:
2341 2341 if e.errno != errno.ENOENT:
2342 2342 raise
2343 2343 perf[f] = -1e9 # file does not exist, tell early
2344 2344 return -1e9
2345 2345 for kw, mul in slow.items():
2346 2346 if kw in f:
2347 2347 val *= mul
2348 2348 if f.endswith(b'.py'):
2349 2349 val /= 10.0
2350 2350 perf[f] = val / 1000.0
2351 2351 return perf[f]
2352 2352 testdescs.sort(key=sortkey)
2353 2353
2354 2354 self._testdir = osenvironb[b'TESTDIR'] = getattr(
2355 2355 os, 'getcwdb', os.getcwd)()
2356 2356 # assume all tests in same folder for now
2357 2357 if testdescs:
2358 2358 pathname = os.path.dirname(testdescs[0]['path'])
2359 2359 if pathname and not osenvironb[b'TESTDIR'].endswith(b'/'):
2360 2360 osenvironb[b'TESTDIR'] += b'/'
2361 2361 osenvironb[b'TESTDIR'] += pathname
2362 2362 if self.options.outputdir:
2363 2363 self._outputdir = canonpath(_bytespath(self.options.outputdir))
2364 2364 else:
2365 2365 self._outputdir = self._testdir
2366 2366
2367 2367 if 'PYTHONHASHSEED' not in os.environ:
2368 2368 # use a random python hash seed all the time
2369 2369 # we do the randomness ourself to know what seed is used
2370 2370 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
2371 2371
2372 2372 if self.options.tmpdir:
2373 2373 self.options.keep_tmpdir = True
2374 2374 tmpdir = _bytespath(self.options.tmpdir)
2375 2375 if os.path.exists(tmpdir):
2376 2376 # Meaning of tmpdir has changed since 1.3: we used to create
2377 2377 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
2378 2378 # tmpdir already exists.
2379 2379 print("error: temp dir %r already exists" % tmpdir)
2380 2380 return 1
2381 2381
2382 2382 # Automatically removing tmpdir sounds convenient, but could
2383 2383 # really annoy anyone in the habit of using "--tmpdir=/tmp"
2384 2384 # or "--tmpdir=$HOME".
2385 2385 #vlog("# Removing temp dir", tmpdir)
2386 2386 #shutil.rmtree(tmpdir)
2387 2387 os.makedirs(tmpdir)
2388 2388 else:
2389 2389 d = None
2390 2390 if os.name == 'nt':
2391 2391 # without this, we get the default temp dir location, but
2392 2392 # in all lowercase, which causes troubles with paths (issue3490)
2393 2393 d = osenvironb.get(b'TMP', None)
2394 2394 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
2395 2395
2396 2396 self._hgtmp = osenvironb[b'HGTMP'] = (
2397 2397 os.path.realpath(tmpdir))
2398 2398
2399 2399 if self.options.with_hg:
2400 2400 self._installdir = None
2401 2401 whg = self.options.with_hg
2402 2402 self._bindir = os.path.dirname(os.path.realpath(whg))
2403 2403 assert isinstance(self._bindir, bytes)
2404 2404 self._hgcommand = os.path.basename(whg)
2405 2405 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
2406 2406 os.makedirs(self._tmpbindir)
2407 2407
2408 2408 # This looks redundant with how Python initializes sys.path from
2409 2409 # the location of the script being executed. Needed because the
2410 2410 # "hg" specified by --with-hg is not the only Python script
2411 2411 # executed in the test suite that needs to import 'mercurial'
2412 2412 # ... which means it's not really redundant at all.
2413 2413 self._pythondir = self._bindir
2414 2414 else:
2415 2415 self._installdir = os.path.join(self._hgtmp, b"install")
2416 2416 self._bindir = os.path.join(self._installdir, b"bin")
2417 2417 self._hgcommand = b'hg'
2418 2418 self._tmpbindir = self._bindir
2419 2419 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
2420 2420
2421 2421 # set CHGHG, then replace "hg" command by "chg"
2422 2422 chgbindir = self._bindir
2423 2423 if self.options.chg or self.options.with_chg:
2424 2424 osenvironb[b'CHGHG'] = os.path.join(self._bindir, self._hgcommand)
2425 2425 else:
2426 2426 osenvironb.pop(b'CHGHG', None) # drop flag for hghave
2427 2427 if self.options.chg:
2428 2428 self._hgcommand = b'chg'
2429 2429 elif self.options.with_chg:
2430 2430 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
2431 2431 self._hgcommand = os.path.basename(self.options.with_chg)
2432 2432
2433 2433 osenvironb[b"BINDIR"] = self._bindir
2434 2434 osenvironb[b"PYTHON"] = PYTHON
2435 2435
2436 2436 if self.options.with_python3:
2437 2437 osenvironb[b'PYTHON3'] = self.options.with_python3
2438 2438
2439 2439 fileb = _bytespath(__file__)
2440 2440 runtestdir = os.path.abspath(os.path.dirname(fileb))
2441 2441 osenvironb[b'RUNTESTDIR'] = runtestdir
2442 2442 if PYTHON3:
2443 2443 sepb = _bytespath(os.pathsep)
2444 2444 else:
2445 2445 sepb = os.pathsep
2446 2446 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
2447 2447 if os.path.islink(__file__):
2448 2448 # test helper will likely be at the end of the symlink
2449 2449 realfile = os.path.realpath(fileb)
2450 2450 realdir = os.path.abspath(os.path.dirname(realfile))
2451 2451 path.insert(2, realdir)
2452 2452 if chgbindir != self._bindir:
2453 2453 path.insert(1, chgbindir)
2454 2454 if self._testdir != runtestdir:
2455 2455 path = [self._testdir] + path
2456 2456 if self._tmpbindir != self._bindir:
2457 2457 path = [self._tmpbindir] + path
2458 2458 osenvironb[b"PATH"] = sepb.join(path)
2459 2459
2460 2460 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
2461 2461 # can run .../tests/run-tests.py test-foo where test-foo
2462 2462 # adds an extension to HGRC. Also include run-test.py directory to
2463 2463 # import modules like heredoctest.
2464 2464 pypath = [self._pythondir, self._testdir, runtestdir]
2465 2465 # We have to augment PYTHONPATH, rather than simply replacing
2466 2466 # it, in case external libraries are only available via current
2467 2467 # PYTHONPATH. (In particular, the Subversion bindings on OS X
2468 2468 # are in /opt/subversion.)
2469 2469 oldpypath = osenvironb.get(IMPL_PATH)
2470 2470 if oldpypath:
2471 2471 pypath.append(oldpypath)
2472 2472 osenvironb[IMPL_PATH] = sepb.join(pypath)
2473 2473
2474 2474 if self.options.pure:
2475 2475 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
2476 2476 os.environ["HGMODULEPOLICY"] = "py"
2477 2477
2478 2478 if self.options.allow_slow_tests:
2479 2479 os.environ["HGTEST_SLOW"] = "slow"
2480 2480 elif 'HGTEST_SLOW' in os.environ:
2481 2481 del os.environ['HGTEST_SLOW']
2482 2482
2483 2483 self._coveragefile = os.path.join(self._testdir, b'.coverage')
2484 2484
2485 2485 vlog("# Using TESTDIR", self._testdir)
2486 2486 vlog("# Using RUNTESTDIR", osenvironb[b'RUNTESTDIR'])
2487 2487 vlog("# Using HGTMP", self._hgtmp)
2488 2488 vlog("# Using PATH", os.environ["PATH"])
2489 2489 vlog("# Using", IMPL_PATH, osenvironb[IMPL_PATH])
2490 2490 vlog("# Writing to directory", self._outputdir)
2491 2491
2492 2492 try:
2493 2493 return self._runtests(testdescs) or 0
2494 2494 finally:
2495 2495 time.sleep(.1)
2496 2496 self._cleanup()
2497 2497
2498 2498 def findtests(self, args):
2499 2499 """Finds possible test files from arguments.
2500 2500
2501 2501 If you wish to inject custom tests into the test harness, this would
2502 2502 be a good function to monkeypatch or override in a derived class.
2503 2503 """
2504 2504 if not args:
2505 2505 if self.options.changed:
2506 2506 proc = Popen4('hg st --rev "%s" -man0 .' %
2507 2507 self.options.changed, None, 0)
2508 2508 stdout, stderr = proc.communicate()
2509 2509 args = stdout.strip(b'\0').split(b'\0')
2510 2510 else:
2511 2511 args = os.listdir(b'.')
2512 2512
2513 expanded_args = []
2514 for arg in args:
2515 if os.path.isdir(arg):
2516 if not arg.endswith(b'/'):
2517 arg += b'/'
2518 expanded_args.extend([arg + a for a in os.listdir(arg)])
2519 else:
2520 expanded_args.append(arg)
2521 args = expanded_args
2522
2513 2523 tests = []
2514 2524 for t in args:
2515 2525 if not (os.path.basename(t).startswith(b'test-')
2516 2526 and (t.endswith(b'.py') or t.endswith(b'.t'))):
2517 2527 continue
2518 2528 if t.endswith(b'.t'):
2519 2529 # .t file may contain multiple test cases
2520 2530 cases = sorted(parsettestcases(t))
2521 2531 if cases:
2522 2532 tests += [{'path': t, 'case': c} for c in sorted(cases)]
2523 2533 else:
2524 2534 tests.append({'path': t})
2525 2535 else:
2526 2536 tests.append({'path': t})
2527 2537 return tests
2528 2538
2529 2539 def _runtests(self, testdescs):
2530 2540 def _reloadtest(test, i):
2531 2541 # convert a test back to its description dict
2532 2542 desc = {'path': test.path}
2533 2543 case = getattr(test, '_case', None)
2534 2544 if case:
2535 2545 desc['case'] = case
2536 2546 return self._gettest(desc, i)
2537 2547
2538 2548 try:
2539 2549 if self.options.restart:
2540 2550 orig = list(testdescs)
2541 2551 while testdescs:
2542 2552 desc = testdescs[0]
2543 2553 # desc['path'] is a relative path
2544 2554 if 'case' in desc:
2545 2555 errpath = b'%s.%s.err' % (desc['path'], desc['case'])
2546 2556 else:
2547 2557 errpath = b'%s.err' % desc['path']
2548 2558 errpath = os.path.join(self._outputdir, errpath)
2549 2559 if os.path.exists(errpath):
2550 2560 break
2551 2561 testdescs.pop(0)
2552 2562 if not testdescs:
2553 2563 print("running all tests")
2554 2564 testdescs = orig
2555 2565
2556 2566 tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
2557 2567
2558 2568 failed = False
2559 2569 kws = self.options.keywords
2560 2570 if kws is not None and PYTHON3:
2561 2571 kws = kws.encode('utf-8')
2562 2572
2563 2573 suite = TestSuite(self._testdir,
2564 2574 jobs=self.options.jobs,
2565 2575 whitelist=self.options.whitelisted,
2566 2576 blacklist=self.options.blacklist,
2567 2577 retest=self.options.retest,
2568 2578 keywords=kws,
2569 2579 loop=self.options.loop,
2570 2580 runs_per_test=self.options.runs_per_test,
2571 2581 showchannels=self.options.showchannels,
2572 2582 tests=tests, loadtest=_reloadtest)
2573 2583 verbosity = 1
2574 2584 if self.options.verbose:
2575 2585 verbosity = 2
2576 2586 runner = TextTestRunner(self, verbosity=verbosity)
2577 2587
2578 2588 if self.options.list_tests:
2579 2589 result = runner.listtests(suite)
2580 2590 else:
2581 2591 if self._installdir:
2582 2592 self._installhg()
2583 2593 self._checkhglib("Testing")
2584 2594 else:
2585 2595 self._usecorrectpython()
2586 2596 if self.options.chg:
2587 2597 assert self._installdir
2588 2598 self._installchg()
2589 2599
2590 2600 result = runner.run(suite)
2591 2601
2592 2602 if result.failures:
2593 2603 failed = True
2594 2604
2595 2605 if self.options.anycoverage:
2596 2606 self._outputcoverage()
2597 2607 except KeyboardInterrupt:
2598 2608 failed = True
2599 2609 print("\ninterrupted!")
2600 2610
2601 2611 if failed:
2602 2612 return 1
2603 2613
2604 2614 def _getport(self, count):
2605 2615 port = self._ports.get(count) # do we have a cached entry?
2606 2616 if port is None:
2607 2617 portneeded = 3
2608 2618 # above 100 tries we just give up and let test reports failure
2609 2619 for tries in xrange(100):
2610 2620 allfree = True
2611 2621 port = self.options.port + self._portoffset
2612 2622 for idx in xrange(portneeded):
2613 2623 if not checkportisavailable(port + idx):
2614 2624 allfree = False
2615 2625 break
2616 2626 self._portoffset += portneeded
2617 2627 if allfree:
2618 2628 break
2619 2629 self._ports[count] = port
2620 2630 return port
2621 2631
2622 2632 def _gettest(self, testdesc, count):
2623 2633 """Obtain a Test by looking at its filename.
2624 2634
2625 2635 Returns a Test instance. The Test may not be runnable if it doesn't
2626 2636 map to a known type.
2627 2637 """
2628 2638 path = testdesc['path']
2629 2639 lctest = path.lower()
2630 2640 testcls = Test
2631 2641
2632 2642 for ext, cls in self.TESTTYPES:
2633 2643 if lctest.endswith(ext):
2634 2644 testcls = cls
2635 2645 break
2636 2646
2637 2647 refpath = os.path.join(self._testdir, path)
2638 2648 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
2639 2649
2640 2650 # extra keyword parameters. 'case' is used by .t tests
2641 2651 kwds = dict((k, testdesc[k]) for k in ['case'] if k in testdesc)
2642 2652
2643 2653 t = testcls(refpath, self._outputdir, tmpdir,
2644 2654 keeptmpdir=self.options.keep_tmpdir,
2645 2655 debug=self.options.debug,
2646 2656 timeout=self.options.timeout,
2647 2657 startport=self._getport(count),
2648 2658 extraconfigopts=self.options.extra_config_opt,
2649 2659 py3kwarnings=self.options.py3k_warnings,
2650 2660 shell=self.options.shell,
2651 2661 hgcommand=self._hgcommand,
2652 2662 usechg=bool(self.options.with_chg or self.options.chg),
2653 2663 useipv6=useipv6, **kwds)
2654 2664 t.should_reload = True
2655 2665 return t
2656 2666
2657 2667 def _cleanup(self):
2658 2668 """Clean up state from this test invocation."""
2659 2669 if self.options.keep_tmpdir:
2660 2670 return
2661 2671
2662 2672 vlog("# Cleaning up HGTMP", self._hgtmp)
2663 2673 shutil.rmtree(self._hgtmp, True)
2664 2674 for f in self._createdfiles:
2665 2675 try:
2666 2676 os.remove(f)
2667 2677 except OSError:
2668 2678 pass
2669 2679
2670 2680 def _usecorrectpython(self):
2671 2681 """Configure the environment to use the appropriate Python in tests."""
2672 2682 # Tests must use the same interpreter as us or bad things will happen.
2673 2683 pyexename = sys.platform == 'win32' and b'python.exe' or b'python'
2674 2684 if getattr(os, 'symlink', None):
2675 2685 vlog("# Making python executable in test path a symlink to '%s'" %
2676 2686 sys.executable)
2677 2687 mypython = os.path.join(self._tmpbindir, pyexename)
2678 2688 try:
2679 2689 if os.readlink(mypython) == sys.executable:
2680 2690 return
2681 2691 os.unlink(mypython)
2682 2692 except OSError as err:
2683 2693 if err.errno != errno.ENOENT:
2684 2694 raise
2685 2695 if self._findprogram(pyexename) != sys.executable:
2686 2696 try:
2687 2697 os.symlink(sys.executable, mypython)
2688 2698 self._createdfiles.append(mypython)
2689 2699 except OSError as err:
2690 2700 # child processes may race, which is harmless
2691 2701 if err.errno != errno.EEXIST:
2692 2702 raise
2693 2703 else:
2694 2704 exedir, exename = os.path.split(sys.executable)
2695 2705 vlog("# Modifying search path to find %s as %s in '%s'" %
2696 2706 (exename, pyexename, exedir))
2697 2707 path = os.environ['PATH'].split(os.pathsep)
2698 2708 while exedir in path:
2699 2709 path.remove(exedir)
2700 2710 os.environ['PATH'] = os.pathsep.join([exedir] + path)
2701 2711 if not self._findprogram(pyexename):
2702 2712 print("WARNING: Cannot find %s in search path" % pyexename)
2703 2713
2704 2714 def _installhg(self):
2705 2715 """Install hg into the test environment.
2706 2716
2707 2717 This will also configure hg with the appropriate testing settings.
2708 2718 """
2709 2719 vlog("# Performing temporary installation of HG")
2710 2720 installerrs = os.path.join(self._hgtmp, b"install.err")
2711 2721 compiler = ''
2712 2722 if self.options.compiler:
2713 2723 compiler = '--compiler ' + self.options.compiler
2714 2724 if self.options.pure:
2715 2725 pure = b"--pure"
2716 2726 else:
2717 2727 pure = b""
2718 2728
2719 2729 # Run installer in hg root
2720 2730 script = os.path.realpath(sys.argv[0])
2721 2731 exe = sys.executable
2722 2732 if PYTHON3:
2723 2733 compiler = _bytespath(compiler)
2724 2734 script = _bytespath(script)
2725 2735 exe = _bytespath(exe)
2726 2736 hgroot = os.path.dirname(os.path.dirname(script))
2727 2737 self._hgroot = hgroot
2728 2738 os.chdir(hgroot)
2729 2739 nohome = b'--home=""'
2730 2740 if os.name == 'nt':
2731 2741 # The --home="" trick works only on OS where os.sep == '/'
2732 2742 # because of a distutils convert_path() fast-path. Avoid it at
2733 2743 # least on Windows for now, deal with .pydistutils.cfg bugs
2734 2744 # when they happen.
2735 2745 nohome = b''
2736 2746 cmd = (b'%(exe)s setup.py %(pure)s clean --all'
2737 2747 b' build %(compiler)s --build-base="%(base)s"'
2738 2748 b' install --force --prefix="%(prefix)s"'
2739 2749 b' --install-lib="%(libdir)s"'
2740 2750 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
2741 2751 % {b'exe': exe, b'pure': pure,
2742 2752 b'compiler': compiler,
2743 2753 b'base': os.path.join(self._hgtmp, b"build"),
2744 2754 b'prefix': self._installdir, b'libdir': self._pythondir,
2745 2755 b'bindir': self._bindir,
2746 2756 b'nohome': nohome, b'logfile': installerrs})
2747 2757
2748 2758 # setuptools requires install directories to exist.
2749 2759 def makedirs(p):
2750 2760 try:
2751 2761 os.makedirs(p)
2752 2762 except OSError as e:
2753 2763 if e.errno != errno.EEXIST:
2754 2764 raise
2755 2765 makedirs(self._pythondir)
2756 2766 makedirs(self._bindir)
2757 2767
2758 2768 vlog("# Running", cmd)
2759 2769 if os.system(cmd) == 0:
2760 2770 if not self.options.verbose:
2761 2771 try:
2762 2772 os.remove(installerrs)
2763 2773 except OSError as e:
2764 2774 if e.errno != errno.ENOENT:
2765 2775 raise
2766 2776 else:
2767 2777 f = open(installerrs, 'rb')
2768 2778 for line in f:
2769 2779 if PYTHON3:
2770 2780 sys.stdout.buffer.write(line)
2771 2781 else:
2772 2782 sys.stdout.write(line)
2773 2783 f.close()
2774 2784 sys.exit(1)
2775 2785 os.chdir(self._testdir)
2776 2786
2777 2787 self._usecorrectpython()
2778 2788
2779 2789 if self.options.py3k_warnings and not self.options.anycoverage:
2780 2790 vlog("# Updating hg command to enable Py3k Warnings switch")
2781 2791 f = open(os.path.join(self._bindir, 'hg'), 'rb')
2782 2792 lines = [line.rstrip() for line in f]
2783 2793 lines[0] += ' -3'
2784 2794 f.close()
2785 2795 f = open(os.path.join(self._bindir, 'hg'), 'wb')
2786 2796 for line in lines:
2787 2797 f.write(line + '\n')
2788 2798 f.close()
2789 2799
2790 2800 hgbat = os.path.join(self._bindir, b'hg.bat')
2791 2801 if os.path.isfile(hgbat):
2792 2802 # hg.bat expects to be put in bin/scripts while run-tests.py
2793 2803 # installation layout put it in bin/ directly. Fix it
2794 2804 f = open(hgbat, 'rb')
2795 2805 data = f.read()
2796 2806 f.close()
2797 2807 if b'"%~dp0..\python" "%~dp0hg" %*' in data:
2798 2808 data = data.replace(b'"%~dp0..\python" "%~dp0hg" %*',
2799 2809 b'"%~dp0python" "%~dp0hg" %*')
2800 2810 f = open(hgbat, 'wb')
2801 2811 f.write(data)
2802 2812 f.close()
2803 2813 else:
2804 2814 print('WARNING: cannot fix hg.bat reference to python.exe')
2805 2815
2806 2816 if self.options.anycoverage:
2807 2817 custom = os.path.join(self._testdir, 'sitecustomize.py')
2808 2818 target = os.path.join(self._pythondir, 'sitecustomize.py')
2809 2819 vlog('# Installing coverage trigger to %s' % target)
2810 2820 shutil.copyfile(custom, target)
2811 2821 rc = os.path.join(self._testdir, '.coveragerc')
2812 2822 vlog('# Installing coverage rc to %s' % rc)
2813 2823 os.environ['COVERAGE_PROCESS_START'] = rc
2814 2824 covdir = os.path.join(self._installdir, '..', 'coverage')
2815 2825 try:
2816 2826 os.mkdir(covdir)
2817 2827 except OSError as e:
2818 2828 if e.errno != errno.EEXIST:
2819 2829 raise
2820 2830
2821 2831 os.environ['COVERAGE_DIR'] = covdir
2822 2832
2823 2833 def _checkhglib(self, verb):
2824 2834 """Ensure that the 'mercurial' package imported by python is
2825 2835 the one we expect it to be. If not, print a warning to stderr."""
2826 2836 if ((self._bindir == self._pythondir) and
2827 2837 (self._bindir != self._tmpbindir)):
2828 2838 # The pythondir has been inferred from --with-hg flag.
2829 2839 # We cannot expect anything sensible here.
2830 2840 return
2831 2841 expecthg = os.path.join(self._pythondir, b'mercurial')
2832 2842 actualhg = self._gethgpath()
2833 2843 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
2834 2844 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
2835 2845 ' (expected %s)\n'
2836 2846 % (verb, actualhg, expecthg))
2837 2847 def _gethgpath(self):
2838 2848 """Return the path to the mercurial package that is actually found by
2839 2849 the current Python interpreter."""
2840 2850 if self._hgpath is not None:
2841 2851 return self._hgpath
2842 2852
2843 2853 cmd = b'%s -c "import mercurial; print (mercurial.__path__[0])"'
2844 2854 cmd = cmd % PYTHON
2845 2855 if PYTHON3:
2846 2856 cmd = _strpath(cmd)
2847 2857 pipe = os.popen(cmd)
2848 2858 try:
2849 2859 self._hgpath = _bytespath(pipe.read().strip())
2850 2860 finally:
2851 2861 pipe.close()
2852 2862
2853 2863 return self._hgpath
2854 2864
2855 2865 def _installchg(self):
2856 2866 """Install chg into the test environment"""
2857 2867 vlog('# Performing temporary installation of CHG')
2858 2868 assert os.path.dirname(self._bindir) == self._installdir
2859 2869 assert self._hgroot, 'must be called after _installhg()'
2860 2870 cmd = (b'"%(make)s" clean install PREFIX="%(prefix)s"'
2861 2871 % {b'make': 'make', # TODO: switch by option or environment?
2862 2872 b'prefix': self._installdir})
2863 2873 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
2864 2874 vlog("# Running", cmd)
2865 2875 proc = subprocess.Popen(cmd, shell=True, cwd=cwd,
2866 2876 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
2867 2877 stderr=subprocess.STDOUT)
2868 2878 out, _err = proc.communicate()
2869 2879 if proc.returncode != 0:
2870 2880 if PYTHON3:
2871 2881 sys.stdout.buffer.write(out)
2872 2882 else:
2873 2883 sys.stdout.write(out)
2874 2884 sys.exit(1)
2875 2885
2876 2886 def _outputcoverage(self):
2877 2887 """Produce code coverage output."""
2878 2888 import coverage
2879 2889 coverage = coverage.coverage
2880 2890
2881 2891 vlog('# Producing coverage report')
2882 2892 # chdir is the easiest way to get short, relative paths in the
2883 2893 # output.
2884 2894 os.chdir(self._hgroot)
2885 2895 covdir = os.path.join(self._installdir, '..', 'coverage')
2886 2896 cov = coverage(data_file=os.path.join(covdir, 'cov'))
2887 2897
2888 2898 # Map install directory paths back to source directory.
2889 2899 cov.config.paths['srcdir'] = ['.', self._pythondir]
2890 2900
2891 2901 cov.combine()
2892 2902
2893 2903 omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]]
2894 2904 cov.report(ignore_errors=True, omit=omit)
2895 2905
2896 2906 if self.options.htmlcov:
2897 2907 htmldir = os.path.join(self._outputdir, 'htmlcov')
2898 2908 cov.html_report(directory=htmldir, omit=omit)
2899 2909 if self.options.annotate:
2900 2910 adir = os.path.join(self._outputdir, 'annotated')
2901 2911 if not os.path.isdir(adir):
2902 2912 os.mkdir(adir)
2903 2913 cov.annotate(directory=adir, omit=omit)
2904 2914
2905 2915 def _findprogram(self, program):
2906 2916 """Search PATH for a executable program"""
2907 2917 dpb = _bytespath(os.defpath)
2908 2918 sepb = _bytespath(os.pathsep)
2909 2919 for p in osenvironb.get(b'PATH', dpb).split(sepb):
2910 2920 name = os.path.join(p, program)
2911 2921 if os.name == 'nt' or os.access(name, os.X_OK):
2912 2922 return name
2913 2923 return None
2914 2924
2915 2925 def _checktools(self):
2916 2926 """Ensure tools required to run tests are present."""
2917 2927 for p in self.REQUIREDTOOLS:
2918 2928 if os.name == 'nt' and not p.endswith('.exe'):
2919 2929 p += '.exe'
2920 2930 found = self._findprogram(p)
2921 2931 if found:
2922 2932 vlog("# Found prerequisite", p, "at", found)
2923 2933 else:
2924 2934 print("WARNING: Did not find prerequisite tool: %s " %
2925 2935 p.decode("utf-8"))
2926 2936
2927 2937 if __name__ == '__main__':
2928 2938 runner = TestRunner()
2929 2939
2930 2940 try:
2931 2941 import msvcrt
2932 2942 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
2933 2943 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
2934 2944 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
2935 2945 except ImportError:
2936 2946 pass
2937 2947
2938 2948 sys.exit(runner.run(sys.argv[1:]))
@@ -1,1487 +1,1505 b''
1 1 This file tests the behavior of run-tests.py itself.
2 2
3 3 Avoid interference from actual test env:
4 4
5 5 $ . "$TESTDIR/helper-runtests.sh"
6 6
7 7 Smoke test with install
8 8 ============
9 9
10 10 $ run-tests.py $HGTEST_RUN_TESTS_PURE -l
11 11
12 12 # Ran 0 tests, 0 skipped, 0 failed.
13 13
14 14 Define a helper to avoid the install step
15 15 =============
16 16 $ rt()
17 17 > {
18 18 > run-tests.py --with-hg=`which hg` "$@"
19 19 > }
20 20
21 21 error paths
22 22
23 23 #if symlink
24 24 $ ln -s `which true` hg
25 25 $ run-tests.py --with-hg=./hg
26 26 warning: --with-hg should specify an hg script
27 27
28 28 # Ran 0 tests, 0 skipped, 0 failed.
29 29 $ rm hg
30 30 #endif
31 31
32 32 #if execbit
33 33 $ touch hg
34 34 $ run-tests.py --with-hg=./hg
35 35 Usage: run-tests.py [options] [tests]
36 36
37 37 run-tests.py: error: --with-hg must specify an executable hg script
38 38 [2]
39 39 $ rm hg
40 40 #endif
41 41
42 42 Features for testing optional lines
43 43 ===================================
44 44
45 45 $ cat > hghaveaddon.py <<EOF
46 46 > import hghave
47 47 > @hghave.check("custom", "custom hghave feature")
48 48 > def has_custom():
49 49 > return True
50 50 > @hghave.check("missing", "missing hghave feature")
51 51 > def has_missing():
52 52 > return False
53 53 > EOF
54 54
55 55 an empty test
56 56 =======================
57 57
58 58 $ touch test-empty.t
59 59 $ rt
60 60 .
61 61 # Ran 1 tests, 0 skipped, 0 failed.
62 62 $ rm test-empty.t
63 63
64 64 a succesful test
65 65 =======================
66 66
67 67 $ cat > test-success.t << EOF
68 68 > $ echo babar
69 69 > babar
70 70 > $ echo xyzzy
71 71 > dont_print (?)
72 72 > nothing[42]line (re) (?)
73 73 > never*happens (glob) (?)
74 74 > more_nothing (?)
75 75 > xyzzy
76 76 > nor this (?)
77 77 > $ printf 'abc\ndef\nxyz\n'
78 78 > 123 (?)
79 79 > abc
80 80 > def (?)
81 81 > 456 (?)
82 82 > xyz
83 83 > $ printf 'zyx\nwvu\ntsr\n'
84 84 > abc (?)
85 85 > zyx (custom !)
86 86 > wvu
87 87 > no_print (no-custom !)
88 88 > tsr (no-missing !)
89 89 > missing (missing !)
90 90 > EOF
91 91
92 92 $ rt
93 93 .
94 94 # Ran 1 tests, 0 skipped, 0 failed.
95 95
96 96 failing test
97 97 ==================
98 98
99 99 test churn with globs
100 100 $ cat > test-failure.t <<EOF
101 101 > $ echo "bar-baz"; echo "bar-bad"
102 102 > bar*bad (glob)
103 103 > bar*baz (glob)
104 104 > EOF
105 105 $ rt test-failure.t
106 106
107 107 --- $TESTTMP/test-failure.t
108 108 +++ $TESTTMP/test-failure.t.err
109 109 @@ -1,3 +1,3 @@
110 110 $ echo "bar-baz"; echo "bar-bad"
111 111 + bar*baz (glob)
112 112 bar*bad (glob)
113 113 - bar*baz (glob)
114 114
115 115 ERROR: test-failure.t output changed
116 116 !
117 117 Failed test-failure.t: output changed
118 118 # Ran 1 tests, 0 skipped, 1 failed.
119 119 python hash seed: * (glob)
120 120 [1]
121 121
122 122 test diff colorisation
123 123
124 124 #if no-windows pygments
125 125 $ rt test-failure.t --color always
126 126
127 127 \x1b[38;5;124m--- $TESTTMP/test-failure.t\x1b[39m (esc)
128 128 \x1b[38;5;34m+++ $TESTTMP/test-failure.t.err\x1b[39m (esc)
129 129 \x1b[38;5;90;01m@@ -1,3 +1,3 @@\x1b[39;00m (esc)
130 130 $ echo "bar-baz"; echo "bar-bad"
131 131 \x1b[38;5;34m+ bar*baz (glob)\x1b[39m (esc)
132 132 bar*bad (glob)
133 133 \x1b[38;5;124m- bar*baz (glob)\x1b[39m (esc)
134 134
135 135 \x1b[38;5;88mERROR: \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m output changed\x1b[39m (esc)
136 136 !
137 137 \x1b[38;5;88mFailed \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m: output changed\x1b[39m (esc)
138 138 # Ran 1 tests, 0 skipped, 1 failed.
139 139 python hash seed: * (glob)
140 140 [1]
141 141
142 142 $ rt test-failure.t 2> tmp.log
143 143 [1]
144 144 $ cat tmp.log
145 145
146 146 --- $TESTTMP/test-failure.t
147 147 +++ $TESTTMP/test-failure.t.err
148 148 @@ -1,3 +1,3 @@
149 149 $ echo "bar-baz"; echo "bar-bad"
150 150 + bar*baz (glob)
151 151 bar*bad (glob)
152 152 - bar*baz (glob)
153 153
154 154 ERROR: test-failure.t output changed
155 155 !
156 156 Failed test-failure.t: output changed
157 157 # Ran 1 tests, 0 skipped, 1 failed.
158 158 python hash seed: * (glob)
159 159 #endif
160 160
161 161 $ cat > test-failure.t << EOF
162 162 > $ true
163 163 > should go away (true !)
164 164 > $ true
165 165 > should stay (false !)
166 166 >
167 167 > Should remove first line, not second or third
168 168 > $ echo 'testing'
169 169 > baz*foo (glob) (true !)
170 170 > foobar*foo (glob) (false !)
171 171 > te*ting (glob) (true !)
172 172 >
173 173 > Should keep first two lines, remove third and last
174 174 > $ echo 'testing'
175 175 > test.ng (re) (true !)
176 176 > foo.ar (re) (false !)
177 177 > b.r (re) (true !)
178 178 > missing (?)
179 179 > awol (true !)
180 180 >
181 181 > The "missing" line should stay, even though awol is dropped
182 182 > $ echo 'testing'
183 183 > test.ng (re) (true !)
184 184 > foo.ar (?)
185 185 > awol
186 186 > missing (?)
187 187 > EOF
188 188 $ rt test-failure.t
189 189
190 190 --- $TESTTMP/test-failure.t
191 191 +++ $TESTTMP/test-failure.t.err
192 192 @@ -1,11 +1,9 @@
193 193 $ true
194 194 - should go away (true !)
195 195 $ true
196 196 should stay (false !)
197 197
198 198 Should remove first line, not second or third
199 199 $ echo 'testing'
200 200 - baz*foo (glob) (true !)
201 201 foobar*foo (glob) (false !)
202 202 te*ting (glob) (true !)
203 203
204 204 foo.ar (re) (false !)
205 205 missing (?)
206 206 @@ -13,13 +11,10 @@
207 207 $ echo 'testing'
208 208 test.ng (re) (true !)
209 209 foo.ar (re) (false !)
210 210 - b.r (re) (true !)
211 211 missing (?)
212 212 - awol (true !)
213 213
214 214 The "missing" line should stay, even though awol is dropped
215 215 $ echo 'testing'
216 216 test.ng (re) (true !)
217 217 foo.ar (?)
218 218 - awol
219 219 missing (?)
220 220
221 221 ERROR: test-failure.t output changed
222 222 !
223 223 Failed test-failure.t: output changed
224 224 # Ran 1 tests, 0 skipped, 1 failed.
225 225 python hash seed: * (glob)
226 226 [1]
227 227
228 228 basic failing test
229 229 $ cat > test-failure.t << EOF
230 230 > $ echo babar
231 231 > rataxes
232 232 > This is a noop statement so that
233 233 > this test is still more bytes than success.
234 234 > pad pad pad pad............................................................
235 235 > pad pad pad pad............................................................
236 236 > pad pad pad pad............................................................
237 237 > pad pad pad pad............................................................
238 238 > pad pad pad pad............................................................
239 239 > pad pad pad pad............................................................
240 240 > EOF
241 241
242 242 >>> fh = open('test-failure-unicode.t', 'wb')
243 243 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8')) and None
244 244 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8')) and None
245 245
246 246 $ rt
247 247
248 248 --- $TESTTMP/test-failure.t
249 249 +++ $TESTTMP/test-failure.t.err
250 250 @@ -1,5 +1,5 @@
251 251 $ echo babar
252 252 - rataxes
253 253 + babar
254 254 This is a noop statement so that
255 255 this test is still more bytes than success.
256 256 pad pad pad pad............................................................
257 257
258 258 ERROR: test-failure.t output changed
259 259 !.
260 260 --- $TESTTMP/test-failure-unicode.t
261 261 +++ $TESTTMP/test-failure-unicode.t.err
262 262 @@ -1,2 +1,2 @@
263 263 $ echo babar\xce\xb1 (esc)
264 264 - l\xce\xb5\xce\xb5t (esc)
265 265 + babar\xce\xb1 (esc)
266 266
267 267 ERROR: test-failure-unicode.t output changed
268 268 !
269 269 Failed test-failure.t: output changed
270 270 Failed test-failure-unicode.t: output changed
271 271 # Ran 3 tests, 0 skipped, 2 failed.
272 272 python hash seed: * (glob)
273 273 [1]
274 274
275 275 test --outputdir
276 276 $ mkdir output
277 277 $ rt --outputdir output
278 278
279 279 --- $TESTTMP/test-failure.t
280 280 +++ $TESTTMP/output/test-failure.t.err
281 281 @@ -1,5 +1,5 @@
282 282 $ echo babar
283 283 - rataxes
284 284 + babar
285 285 This is a noop statement so that
286 286 this test is still more bytes than success.
287 287 pad pad pad pad............................................................
288 288
289 289 ERROR: test-failure.t output changed
290 290 !.
291 291 --- $TESTTMP/test-failure-unicode.t
292 292 +++ $TESTTMP/output/test-failure-unicode.t.err
293 293 @@ -1,2 +1,2 @@
294 294 $ echo babar\xce\xb1 (esc)
295 295 - l\xce\xb5\xce\xb5t (esc)
296 296 + babar\xce\xb1 (esc)
297 297
298 298 ERROR: test-failure-unicode.t output changed
299 299 !
300 300 Failed test-failure.t: output changed
301 301 Failed test-failure-unicode.t: output changed
302 302 # Ran 3 tests, 0 skipped, 2 failed.
303 303 python hash seed: * (glob)
304 304 [1]
305 305 $ ls -a output
306 306 .
307 307 ..
308 308 .testtimes
309 309 test-failure-unicode.t.err
310 310 test-failure.t.err
311 311
312 312 test --xunit support
313 313 $ rt --xunit=xunit.xml
314 314
315 315 --- $TESTTMP/test-failure.t
316 316 +++ $TESTTMP/test-failure.t.err
317 317 @@ -1,5 +1,5 @@
318 318 $ echo babar
319 319 - rataxes
320 320 + babar
321 321 This is a noop statement so that
322 322 this test is still more bytes than success.
323 323 pad pad pad pad............................................................
324 324
325 325 ERROR: test-failure.t output changed
326 326 !.
327 327 --- $TESTTMP/test-failure-unicode.t
328 328 +++ $TESTTMP/test-failure-unicode.t.err
329 329 @@ -1,2 +1,2 @@
330 330 $ echo babar\xce\xb1 (esc)
331 331 - l\xce\xb5\xce\xb5t (esc)
332 332 + babar\xce\xb1 (esc)
333 333
334 334 ERROR: test-failure-unicode.t output changed
335 335 !
336 336 Failed test-failure.t: output changed
337 337 Failed test-failure-unicode.t: output changed
338 338 # Ran 3 tests, 0 skipped, 2 failed.
339 339 python hash seed: * (glob)
340 340 [1]
341 341 $ cat xunit.xml
342 342 <?xml version="1.0" encoding="utf-8"?>
343 343 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
344 344 <testcase name="test-success.t" time="*"/> (glob)
345 345 <testcase name="test-failure-unicode.t" time="*"> (glob)
346 346 <failure message="output changed" type="output-mismatch">
347 347 <![CDATA[--- $TESTTMP/test-failure-unicode.t
348 348 +++ $TESTTMP/test-failure-unicode.t.err
349 349 @@ -1,2 +1,2 @@
350 350 $ echo babar\xce\xb1 (esc)
351 351 - l\xce\xb5\xce\xb5t (esc)
352 352 + babar\xce\xb1 (esc)
353 353 ]]> </failure>
354 354 </testcase>
355 355 <testcase name="test-failure.t" time="*"> (glob)
356 356 <failure message="output changed" type="output-mismatch">
357 357 <![CDATA[--- $TESTTMP/test-failure.t
358 358 +++ $TESTTMP/test-failure.t.err
359 359 @@ -1,5 +1,5 @@
360 360 $ echo babar
361 361 - rataxes
362 362 + babar
363 363 This is a noop statement so that
364 364 this test is still more bytes than success.
365 365 pad pad pad pad............................................................
366 366 ]]> </failure>
367 367 </testcase>
368 368 </testsuite>
369 369
370 370 $ cat .testtimes
371 371 test-failure-unicode.t * (glob)
372 372 test-failure.t * (glob)
373 373 test-success.t * (glob)
374 374
375 375 $ rt --list-tests
376 376 test-failure-unicode.t
377 377 test-failure.t
378 378 test-success.t
379 379
380 380 $ rt --list-tests --json
381 381 test-failure-unicode.t
382 382 test-failure.t
383 383 test-success.t
384 384 $ cat report.json
385 385 testreport ={
386 386 "test-failure-unicode.t": {
387 387 "result": "success"
388 388 },
389 389 "test-failure.t": {
390 390 "result": "success"
391 391 },
392 392 "test-success.t": {
393 393 "result": "success"
394 394 }
395 395 } (no-eol)
396 396
397 397 $ rt --list-tests --xunit=xunit.xml
398 398 test-failure-unicode.t
399 399 test-failure.t
400 400 test-success.t
401 401 $ cat xunit.xml
402 402 <?xml version="1.0" encoding="utf-8"?>
403 403 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
404 404 <testcase name="test-failure-unicode.t"/>
405 405 <testcase name="test-failure.t"/>
406 406 <testcase name="test-success.t"/>
407 407 </testsuite>
408 408
409 409 $ rt --list-tests test-failure* --json --xunit=xunit.xml --outputdir output
410 410 test-failure-unicode.t
411 411 test-failure.t
412 412 $ cat output/report.json
413 413 testreport ={
414 414 "test-failure-unicode.t": {
415 415 "result": "success"
416 416 },
417 417 "test-failure.t": {
418 418 "result": "success"
419 419 }
420 420 } (no-eol)
421 421 $ cat xunit.xml
422 422 <?xml version="1.0" encoding="utf-8"?>
423 423 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
424 424 <testcase name="test-failure-unicode.t"/>
425 425 <testcase name="test-failure.t"/>
426 426 </testsuite>
427 427
428 428 $ rm test-failure-unicode.t
429 429
430 430 test for --retest
431 431 ====================
432 432
433 433 $ rt --retest
434 434
435 435 --- $TESTTMP/test-failure.t
436 436 +++ $TESTTMP/test-failure.t.err
437 437 @@ -1,5 +1,5 @@
438 438 $ echo babar
439 439 - rataxes
440 440 + babar
441 441 This is a noop statement so that
442 442 this test is still more bytes than success.
443 443 pad pad pad pad............................................................
444 444
445 445 ERROR: test-failure.t output changed
446 446 !
447 447 Failed test-failure.t: output changed
448 448 # Ran 2 tests, 1 skipped, 1 failed.
449 449 python hash seed: * (glob)
450 450 [1]
451 451
452 452 --retest works with --outputdir
453 453 $ rm -r output
454 454 $ mkdir output
455 455 $ mv test-failure.t.err output
456 456 $ rt --retest --outputdir output
457 457
458 458 --- $TESTTMP/test-failure.t
459 459 +++ $TESTTMP/output/test-failure.t.err
460 460 @@ -1,5 +1,5 @@
461 461 $ echo babar
462 462 - rataxes
463 463 + babar
464 464 This is a noop statement so that
465 465 this test is still more bytes than success.
466 466 pad pad pad pad............................................................
467 467
468 468 ERROR: test-failure.t output changed
469 469 !
470 470 Failed test-failure.t: output changed
471 471 # Ran 2 tests, 1 skipped, 1 failed.
472 472 python hash seed: * (glob)
473 473 [1]
474 474
475 475 Selecting Tests To Run
476 476 ======================
477 477
478 478 successful
479 479
480 480 $ rt test-success.t
481 481 .
482 482 # Ran 1 tests, 0 skipped, 0 failed.
483 483
484 484 success w/ keyword
485 485 $ rt -k xyzzy
486 486 .
487 487 # Ran 2 tests, 1 skipped, 0 failed.
488 488
489 489 failed
490 490
491 491 $ rt test-failure.t
492 492
493 493 --- $TESTTMP/test-failure.t
494 494 +++ $TESTTMP/test-failure.t.err
495 495 @@ -1,5 +1,5 @@
496 496 $ echo babar
497 497 - rataxes
498 498 + babar
499 499 This is a noop statement so that
500 500 this test is still more bytes than success.
501 501 pad pad pad pad............................................................
502 502
503 503 ERROR: test-failure.t output changed
504 504 !
505 505 Failed test-failure.t: output changed
506 506 # Ran 1 tests, 0 skipped, 1 failed.
507 507 python hash seed: * (glob)
508 508 [1]
509 509
510 510 failure w/ keyword
511 511 $ rt -k rataxes
512 512
513 513 --- $TESTTMP/test-failure.t
514 514 +++ $TESTTMP/test-failure.t.err
515 515 @@ -1,5 +1,5 @@
516 516 $ echo babar
517 517 - rataxes
518 518 + babar
519 519 This is a noop statement so that
520 520 this test is still more bytes than success.
521 521 pad pad pad pad............................................................
522 522
523 523 ERROR: test-failure.t output changed
524 524 !
525 525 Failed test-failure.t: output changed
526 526 # Ran 2 tests, 1 skipped, 1 failed.
527 527 python hash seed: * (glob)
528 528 [1]
529 529
530 530 Verify that when a process fails to start we show a useful message
531 531 ==================================================================
532 532
533 533 $ cat > test-serve-fail.t <<EOF
534 534 > $ echo 'abort: child process failed to start blah'
535 535 > EOF
536 536 $ rt test-serve-fail.t
537 537
538 538 ERROR: test-serve-fail.t output changed
539 539 !
540 540 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
541 541 # Ran 1 tests, 0 skipped, 1 failed.
542 542 python hash seed: * (glob)
543 543 [1]
544 544 $ rm test-serve-fail.t
545 545
546 546 Verify that we can try other ports
547 547 ===================================
548 548 $ hg init inuse
549 549 $ hg serve -R inuse -p $HGPORT -d --pid-file=blocks.pid
550 550 $ cat blocks.pid >> $DAEMON_PIDS
551 551 $ cat > test-serve-inuse.t <<EOF
552 552 > $ hg serve -R `pwd`/inuse -p \$HGPORT -d --pid-file=hg.pid
553 553 > $ cat hg.pid >> \$DAEMON_PIDS
554 554 > EOF
555 555 $ rt test-serve-inuse.t
556 556 .
557 557 # Ran 1 tests, 0 skipped, 0 failed.
558 558 $ rm test-serve-inuse.t
559 559 $ killdaemons.py $DAEMON_PIDS
560 560 $ rm $DAEMON_PIDS
561 561
562 562 Running In Debug Mode
563 563 ======================
564 564
565 565 $ rt --debug 2>&1 | grep -v pwd
566 566 + echo *SALT* 0 0 (glob)
567 567 *SALT* 0 0 (glob)
568 568 + echo babar
569 569 babar
570 570 + echo *SALT* 10 0 (glob)
571 571 *SALT* 10 0 (glob)
572 572 *+ echo *SALT* 0 0 (glob)
573 573 *SALT* 0 0 (glob)
574 574 + echo babar
575 575 babar
576 576 + echo *SALT* 2 0 (glob)
577 577 *SALT* 2 0 (glob)
578 578 + echo xyzzy
579 579 xyzzy
580 580 + echo *SALT* 9 0 (glob)
581 581 *SALT* 9 0 (glob)
582 582 + printf *abc\ndef\nxyz\n* (glob)
583 583 abc
584 584 def
585 585 xyz
586 586 + echo *SALT* 15 0 (glob)
587 587 *SALT* 15 0 (glob)
588 588 + printf *zyx\nwvu\ntsr\n* (glob)
589 589 zyx
590 590 wvu
591 591 tsr
592 592 + echo *SALT* 22 0 (glob)
593 593 *SALT* 22 0 (glob)
594 594 .
595 595 # Ran 2 tests, 0 skipped, 0 failed.
596 596
597 597 Parallel runs
598 598 ==============
599 599
600 600 (duplicate the failing test to get predictable output)
601 601 $ cp test-failure.t test-failure-copy.t
602 602
603 603 $ rt --jobs 2 test-failure*.t -n
604 604 !!
605 605 Failed test-failure*.t: output changed (glob)
606 606 Failed test-failure*.t: output changed (glob)
607 607 # Ran 2 tests, 0 skipped, 2 failed.
608 608 python hash seed: * (glob)
609 609 [1]
610 610
611 611 failures in parallel with --first should only print one failure
612 612 $ rt --jobs 2 --first test-failure*.t
613 613
614 614 --- $TESTTMP/test-failure*.t (glob)
615 615 +++ $TESTTMP/test-failure*.t.err (glob)
616 616 @@ -1,5 +1,5 @@
617 617 $ echo babar
618 618 - rataxes
619 619 + babar
620 620 This is a noop statement so that
621 621 this test is still more bytes than success.
622 622 pad pad pad pad............................................................
623 623
624 624 Failed test-failure*.t: output changed (glob)
625 625 Failed test-failure*.t: output changed (glob)
626 626 # Ran 2 tests, 0 skipped, 2 failed.
627 627 python hash seed: * (glob)
628 628 [1]
629 629
630 630
631 631 (delete the duplicated test file)
632 632 $ rm test-failure-copy.t
633 633
634 634
635 635 Interactive run
636 636 ===============
637 637
638 638 (backup the failing test)
639 639 $ cp test-failure.t backup
640 640
641 641 Refuse the fix
642 642
643 643 $ echo 'n' | rt -i
644 644
645 645 --- $TESTTMP/test-failure.t
646 646 +++ $TESTTMP/test-failure.t.err
647 647 @@ -1,5 +1,5 @@
648 648 $ echo babar
649 649 - rataxes
650 650 + babar
651 651 This is a noop statement so that
652 652 this test is still more bytes than success.
653 653 pad pad pad pad............................................................
654 654 Accept this change? [n]
655 655 ERROR: test-failure.t output changed
656 656 !.
657 657 Failed test-failure.t: output changed
658 658 # Ran 2 tests, 0 skipped, 1 failed.
659 659 python hash seed: * (glob)
660 660 [1]
661 661
662 662 $ cat test-failure.t
663 663 $ echo babar
664 664 rataxes
665 665 This is a noop statement so that
666 666 this test is still more bytes than success.
667 667 pad pad pad pad............................................................
668 668 pad pad pad pad............................................................
669 669 pad pad pad pad............................................................
670 670 pad pad pad pad............................................................
671 671 pad pad pad pad............................................................
672 672 pad pad pad pad............................................................
673 673
674 674 Interactive with custom view
675 675
676 676 $ echo 'n' | rt -i --view echo
677 677 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err (glob)
678 678 Accept this change? [n]* (glob)
679 679 ERROR: test-failure.t output changed
680 680 !.
681 681 Failed test-failure.t: output changed
682 682 # Ran 2 tests, 0 skipped, 1 failed.
683 683 python hash seed: * (glob)
684 684 [1]
685 685
686 686 View the fix
687 687
688 688 $ echo 'y' | rt --view echo
689 689 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err (glob)
690 690
691 691 ERROR: test-failure.t output changed
692 692 !.
693 693 Failed test-failure.t: output changed
694 694 # Ran 2 tests, 0 skipped, 1 failed.
695 695 python hash seed: * (glob)
696 696 [1]
697 697
698 698 Accept the fix
699 699
700 700 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
701 701 $ echo " saved backup bundle to \$TESTTMP/foo.hg" >> test-failure.t
702 702 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
703 703 $ echo " saved backup bundle to \$TESTTMP/foo.hg (glob)" >> test-failure.t
704 704 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
705 705 $ echo " saved backup bundle to \$TESTTMP/*.hg (glob)" >> test-failure.t
706 706 $ echo 'y' | rt -i 2>&1
707 707
708 708 --- $TESTTMP/test-failure.t
709 709 +++ $TESTTMP/test-failure.t.err
710 710 @@ -1,5 +1,5 @@
711 711 $ echo babar
712 712 - rataxes
713 713 + babar
714 714 This is a noop statement so that
715 715 this test is still more bytes than success.
716 716 pad pad pad pad............................................................
717 717 @@ -9,7 +9,7 @@
718 718 pad pad pad pad............................................................
719 719 pad pad pad pad............................................................
720 720 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
721 721 - saved backup bundle to $TESTTMP/foo.hg
722 722 + saved backup bundle to $TESTTMP/foo.hg* (glob)
723 723 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
724 724 saved backup bundle to $TESTTMP/foo.hg* (glob)
725 725 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
726 726 Accept this change? [n] ..
727 727 # Ran 2 tests, 0 skipped, 0 failed.
728 728
729 729 $ sed -e 's,(glob)$,&<,g' test-failure.t
730 730 $ echo babar
731 731 babar
732 732 This is a noop statement so that
733 733 this test is still more bytes than success.
734 734 pad pad pad pad............................................................
735 735 pad pad pad pad............................................................
736 736 pad pad pad pad............................................................
737 737 pad pad pad pad............................................................
738 738 pad pad pad pad............................................................
739 739 pad pad pad pad............................................................
740 740 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
741 741 saved backup bundle to $TESTTMP/foo.hg (glob)<
742 742 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
743 743 saved backup bundle to $TESTTMP/foo.hg (glob)<
744 744 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
745 745 saved backup bundle to $TESTTMP/*.hg (glob)<
746 746
747 747 Race condition - test file was modified when test is running
748 748
749 749 $ TESTRACEDIR=`pwd`
750 750 $ export TESTRACEDIR
751 751 $ cat > test-race.t <<EOF
752 752 > $ echo 1
753 753 > $ echo "# a new line" >> $TESTRACEDIR/test-race.t
754 754 > EOF
755 755
756 756 $ rt -i test-race.t
757 757
758 758 --- $TESTTMP/test-race.t
759 759 +++ $TESTTMP/test-race.t.err
760 760 @@ -1,2 +1,3 @@
761 761 $ echo 1
762 762 + 1
763 763 $ echo "# a new line" >> $TESTTMP/test-race.t
764 764 Reference output has changed (run again to prompt changes)
765 765 ERROR: test-race.t output changed
766 766 !
767 767 Failed test-race.t: output changed
768 768 # Ran 1 tests, 0 skipped, 1 failed.
769 769 python hash seed: * (glob)
770 770 [1]
771 771
772 772 $ rm test-race.t
773 773
774 774 When "#testcases" is used in .t files
775 775
776 776 $ cat >> test-cases.t <<EOF
777 777 > #testcases a b
778 778 > #if a
779 779 > $ echo 1
780 780 > #endif
781 781 > #if b
782 782 > $ echo 2
783 783 > #endif
784 784 > EOF
785 785
786 786 $ cat <<EOF | rt -i test-cases.t 2>&1
787 787 > y
788 788 > y
789 789 > EOF
790 790
791 791 --- $TESTTMP/test-cases.t
792 792 +++ $TESTTMP/test-cases.t.a.err
793 793 @@ -1,6 +1,7 @@
794 794 #testcases a b
795 795 #if a
796 796 $ echo 1
797 797 + 1
798 798 #endif
799 799 #if b
800 800 $ echo 2
801 801 Accept this change? [n] .
802 802 --- $TESTTMP/test-cases.t
803 803 +++ $TESTTMP/test-cases.t.b.err
804 804 @@ -5,4 +5,5 @@
805 805 #endif
806 806 #if b
807 807 $ echo 2
808 808 + 2
809 809 #endif
810 810 Accept this change? [n] .
811 811 # Ran 2 tests, 0 skipped, 0 failed.
812 812
813 813 $ cat test-cases.t
814 814 #testcases a b
815 815 #if a
816 816 $ echo 1
817 817 1
818 818 #endif
819 819 #if b
820 820 $ echo 2
821 821 2
822 822 #endif
823 823
824 824 $ cat >> test-cases.t <<'EOF'
825 825 > #if a
826 826 > $ NAME=A
827 827 > #else
828 828 > $ NAME=B
829 829 > #endif
830 830 > $ echo $NAME
831 831 > A (a !)
832 832 > B (b !)
833 833 > EOF
834 834 $ rt test-cases.t
835 835 ..
836 836 # Ran 2 tests, 0 skipped, 0 failed.
837 837
838 838 $ rm test-cases.t
839 839
840 840 (reinstall)
841 841 $ mv backup test-failure.t
842 842
843 843 No Diff
844 844 ===============
845 845
846 846 $ rt --nodiff
847 847 !.
848 848 Failed test-failure.t: output changed
849 849 # Ran 2 tests, 0 skipped, 1 failed.
850 850 python hash seed: * (glob)
851 851 [1]
852 852
853 853 test --tmpdir support
854 854 $ rt --tmpdir=$TESTTMP/keep test-success.t
855 855
856 856 Keeping testtmp dir: $TESTTMP/keep/child1/test-success.t (glob)
857 857 Keeping threadtmp dir: $TESTTMP/keep/child1 (glob)
858 858 .
859 859 # Ran 1 tests, 0 skipped, 0 failed.
860 860
861 861 timeouts
862 862 ========
863 863 $ cat > test-timeout.t <<EOF
864 864 > $ sleep 2
865 865 > $ echo pass
866 866 > pass
867 867 > EOF
868 868 > echo '#require slow' > test-slow-timeout.t
869 869 > cat test-timeout.t >> test-slow-timeout.t
870 870 $ rt --timeout=1 --slowtimeout=3 test-timeout.t test-slow-timeout.t
871 871 st
872 872 Skipped test-slow-timeout.t: missing feature: allow slow tests (use --allow-slow-tests)
873 873 Failed test-timeout.t: timed out
874 874 # Ran 1 tests, 1 skipped, 1 failed.
875 875 python hash seed: * (glob)
876 876 [1]
877 877 $ rt --timeout=1 --slowtimeout=3 \
878 878 > test-timeout.t test-slow-timeout.t --allow-slow-tests
879 879 .t
880 880 Failed test-timeout.t: timed out
881 881 # Ran 2 tests, 0 skipped, 1 failed.
882 882 python hash seed: * (glob)
883 883 [1]
884 884 $ rm test-timeout.t test-slow-timeout.t
885 885
886 886 test for --time
887 887 ==================
888 888
889 889 $ rt test-success.t --time
890 890 .
891 891 # Ran 1 tests, 0 skipped, 0 failed.
892 892 # Producing time report
893 893 start end cuser csys real Test
894 894 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
895 895
896 896 test for --time with --job enabled
897 897 ====================================
898 898
899 899 $ rt test-success.t --time --jobs 2
900 900 .
901 901 # Ran 1 tests, 0 skipped, 0 failed.
902 902 # Producing time report
903 903 start end cuser csys real Test
904 904 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
905 905
906 906 Skips
907 907 ================
908 908 $ cat > test-skip.t <<EOF
909 909 > $ echo xyzzy
910 910 > #require false
911 911 > EOF
912 912 $ rt --nodiff
913 913 !.s
914 914 Skipped test-skip.t: missing feature: nail clipper
915 915 Failed test-failure.t: output changed
916 916 # Ran 2 tests, 1 skipped, 1 failed.
917 917 python hash seed: * (glob)
918 918 [1]
919 919
920 920 $ rt --keyword xyzzy
921 921 .s
922 922 Skipped test-skip.t: missing feature: nail clipper
923 923 # Ran 2 tests, 2 skipped, 0 failed.
924 924
925 925 Skips with xml
926 926 $ rt --keyword xyzzy \
927 927 > --xunit=xunit.xml
928 928 .s
929 929 Skipped test-skip.t: missing feature: nail clipper
930 930 # Ran 2 tests, 2 skipped, 0 failed.
931 931 $ cat xunit.xml
932 932 <?xml version="1.0" encoding="utf-8"?>
933 933 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
934 934 <testcase name="test-success.t" time="*"/> (glob)
935 935 <testcase name="test-skip.t">
936 936 <skipped>
937 937 <![CDATA[missing feature: nail clipper]]> </skipped>
938 938 </testcase>
939 939 </testsuite>
940 940
941 941 Missing skips or blacklisted skips don't count as executed:
942 942 $ echo test-failure.t > blacklist
943 943 $ rt --blacklist=blacklist --json\
944 944 > test-failure.t test-bogus.t
945 945 ss
946 946 Skipped test-bogus.t: Doesn't exist
947 947 Skipped test-failure.t: blacklisted
948 948 # Ran 0 tests, 2 skipped, 0 failed.
949 949 $ cat report.json
950 950 testreport ={
951 951 "test-bogus.t": {
952 952 "result": "skip"
953 953 },
954 954 "test-failure.t": {
955 955 "result": "skip"
956 956 }
957 957 } (no-eol)
958 958
959 959 Whitelist trumps blacklist
960 960 $ echo test-failure.t > whitelist
961 961 $ rt --blacklist=blacklist --whitelist=whitelist --json\
962 962 > test-failure.t test-bogus.t
963 963 s
964 964 --- $TESTTMP/test-failure.t
965 965 +++ $TESTTMP/test-failure.t.err
966 966 @@ -1,5 +1,5 @@
967 967 $ echo babar
968 968 - rataxes
969 969 + babar
970 970 This is a noop statement so that
971 971 this test is still more bytes than success.
972 972 pad pad pad pad............................................................
973 973
974 974 ERROR: test-failure.t output changed
975 975 !
976 976 Skipped test-bogus.t: Doesn't exist
977 977 Failed test-failure.t: output changed
978 978 # Ran 1 tests, 1 skipped, 1 failed.
979 979 python hash seed: * (glob)
980 980 [1]
981 981
982 982 Ensure that --test-list causes only the tests listed in that file to
983 983 be executed.
984 984 $ echo test-success.t >> onlytest
985 985 $ rt --test-list=onlytest
986 986 .
987 987 # Ran 1 tests, 0 skipped, 0 failed.
988 988 $ echo test-bogus.t >> anothertest
989 989 $ rt --test-list=onlytest --test-list=anothertest
990 990 s.
991 991 Skipped test-bogus.t: Doesn't exist
992 992 # Ran 1 tests, 1 skipped, 0 failed.
993 993 $ rm onlytest anothertest
994 994
995 995 test for --json
996 996 ==================
997 997
998 998 $ rt --json
999 999
1000 1000 --- $TESTTMP/test-failure.t
1001 1001 +++ $TESTTMP/test-failure.t.err
1002 1002 @@ -1,5 +1,5 @@
1003 1003 $ echo babar
1004 1004 - rataxes
1005 1005 + babar
1006 1006 This is a noop statement so that
1007 1007 this test is still more bytes than success.
1008 1008 pad pad pad pad............................................................
1009 1009
1010 1010 ERROR: test-failure.t output changed
1011 1011 !.s
1012 1012 Skipped test-skip.t: missing feature: nail clipper
1013 1013 Failed test-failure.t: output changed
1014 1014 # Ran 2 tests, 1 skipped, 1 failed.
1015 1015 python hash seed: * (glob)
1016 1016 [1]
1017 1017
1018 1018 $ cat report.json
1019 1019 testreport ={
1020 1020 "test-failure.t": [\{] (re)
1021 1021 "csys": "\s*[\d\.]{4,5}", ? (re)
1022 1022 "cuser": "\s*[\d\.]{4,5}", ? (re)
1023 1023 "diff": "---.+\+\+\+.+", ? (re)
1024 1024 "end": "\s*[\d\.]{4,5}", ? (re)
1025 1025 "result": "failure", ? (re)
1026 1026 "start": "\s*[\d\.]{4,5}", ? (re)
1027 1027 "time": "\s*[\d\.]{4,5}" (re)
1028 1028 }, ? (re)
1029 1029 "test-skip.t": {
1030 1030 "csys": "\s*[\d\.]{4,5}", ? (re)
1031 1031 "cuser": "\s*[\d\.]{4,5}", ? (re)
1032 1032 "diff": "", ? (re)
1033 1033 "end": "\s*[\d\.]{4,5}", ? (re)
1034 1034 "result": "skip", ? (re)
1035 1035 "start": "\s*[\d\.]{4,5}", ? (re)
1036 1036 "time": "\s*[\d\.]{4,5}" (re)
1037 1037 }, ? (re)
1038 1038 "test-success.t": [\{] (re)
1039 1039 "csys": "\s*[\d\.]{4,5}", ? (re)
1040 1040 "cuser": "\s*[\d\.]{4,5}", ? (re)
1041 1041 "diff": "", ? (re)
1042 1042 "end": "\s*[\d\.]{4,5}", ? (re)
1043 1043 "result": "success", ? (re)
1044 1044 "start": "\s*[\d\.]{4,5}", ? (re)
1045 1045 "time": "\s*[\d\.]{4,5}" (re)
1046 1046 }
1047 1047 } (no-eol)
1048 1048 --json with --outputdir
1049 1049
1050 1050 $ rm report.json
1051 1051 $ rm -r output
1052 1052 $ mkdir output
1053 1053 $ rt --json --outputdir output
1054 1054
1055 1055 --- $TESTTMP/test-failure.t
1056 1056 +++ $TESTTMP/output/test-failure.t.err
1057 1057 @@ -1,5 +1,5 @@
1058 1058 $ echo babar
1059 1059 - rataxes
1060 1060 + babar
1061 1061 This is a noop statement so that
1062 1062 this test is still more bytes than success.
1063 1063 pad pad pad pad............................................................
1064 1064
1065 1065 ERROR: test-failure.t output changed
1066 1066 !.s
1067 1067 Skipped test-skip.t: missing feature: nail clipper
1068 1068 Failed test-failure.t: output changed
1069 1069 # Ran 2 tests, 1 skipped, 1 failed.
1070 1070 python hash seed: * (glob)
1071 1071 [1]
1072 1072 $ f report.json
1073 1073 report.json: file not found
1074 1074 $ cat output/report.json
1075 1075 testreport ={
1076 1076 "test-failure.t": [\{] (re)
1077 1077 "csys": "\s*[\d\.]{4,5}", ? (re)
1078 1078 "cuser": "\s*[\d\.]{4,5}", ? (re)
1079 1079 "diff": "---.+\+\+\+.+", ? (re)
1080 1080 "end": "\s*[\d\.]{4,5}", ? (re)
1081 1081 "result": "failure", ? (re)
1082 1082 "start": "\s*[\d\.]{4,5}", ? (re)
1083 1083 "time": "\s*[\d\.]{4,5}" (re)
1084 1084 }, ? (re)
1085 1085 "test-skip.t": {
1086 1086 "csys": "\s*[\d\.]{4,5}", ? (re)
1087 1087 "cuser": "\s*[\d\.]{4,5}", ? (re)
1088 1088 "diff": "", ? (re)
1089 1089 "end": "\s*[\d\.]{4,5}", ? (re)
1090 1090 "result": "skip", ? (re)
1091 1091 "start": "\s*[\d\.]{4,5}", ? (re)
1092 1092 "time": "\s*[\d\.]{4,5}" (re)
1093 1093 }, ? (re)
1094 1094 "test-success.t": [\{] (re)
1095 1095 "csys": "\s*[\d\.]{4,5}", ? (re)
1096 1096 "cuser": "\s*[\d\.]{4,5}", ? (re)
1097 1097 "diff": "", ? (re)
1098 1098 "end": "\s*[\d\.]{4,5}", ? (re)
1099 1099 "result": "success", ? (re)
1100 1100 "start": "\s*[\d\.]{4,5}", ? (re)
1101 1101 "time": "\s*[\d\.]{4,5}" (re)
1102 1102 }
1103 1103 } (no-eol)
1104 1104 $ ls -a output
1105 1105 .
1106 1106 ..
1107 1107 .testtimes
1108 1108 report.json
1109 1109 test-failure.t.err
1110 1110
1111 1111 Test that failed test accepted through interactive are properly reported:
1112 1112
1113 1113 $ cp test-failure.t backup
1114 1114 $ echo y | rt --json -i
1115 1115
1116 1116 --- $TESTTMP/test-failure.t
1117 1117 +++ $TESTTMP/test-failure.t.err
1118 1118 @@ -1,5 +1,5 @@
1119 1119 $ echo babar
1120 1120 - rataxes
1121 1121 + babar
1122 1122 This is a noop statement so that
1123 1123 this test is still more bytes than success.
1124 1124 pad pad pad pad............................................................
1125 1125 Accept this change? [n] ..s
1126 1126 Skipped test-skip.t: missing feature: nail clipper
1127 1127 # Ran 2 tests, 1 skipped, 0 failed.
1128 1128
1129 1129 $ cat report.json
1130 1130 testreport ={
1131 1131 "test-failure.t": [\{] (re)
1132 1132 "csys": "\s*[\d\.]{4,5}", ? (re)
1133 1133 "cuser": "\s*[\d\.]{4,5}", ? (re)
1134 1134 "diff": "", ? (re)
1135 1135 "end": "\s*[\d\.]{4,5}", ? (re)
1136 1136 "result": "success", ? (re)
1137 1137 "start": "\s*[\d\.]{4,5}", ? (re)
1138 1138 "time": "\s*[\d\.]{4,5}" (re)
1139 1139 }, ? (re)
1140 1140 "test-skip.t": {
1141 1141 "csys": "\s*[\d\.]{4,5}", ? (re)
1142 1142 "cuser": "\s*[\d\.]{4,5}", ? (re)
1143 1143 "diff": "", ? (re)
1144 1144 "end": "\s*[\d\.]{4,5}", ? (re)
1145 1145 "result": "skip", ? (re)
1146 1146 "start": "\s*[\d\.]{4,5}", ? (re)
1147 1147 "time": "\s*[\d\.]{4,5}" (re)
1148 1148 }, ? (re)
1149 1149 "test-success.t": [\{] (re)
1150 1150 "csys": "\s*[\d\.]{4,5}", ? (re)
1151 1151 "cuser": "\s*[\d\.]{4,5}", ? (re)
1152 1152 "diff": "", ? (re)
1153 1153 "end": "\s*[\d\.]{4,5}", ? (re)
1154 1154 "result": "success", ? (re)
1155 1155 "start": "\s*[\d\.]{4,5}", ? (re)
1156 1156 "time": "\s*[\d\.]{4,5}" (re)
1157 1157 }
1158 1158 } (no-eol)
1159 1159 $ mv backup test-failure.t
1160 1160
1161 1161 backslash on end of line with glob matching is handled properly
1162 1162
1163 1163 $ cat > test-glob-backslash.t << EOF
1164 1164 > $ echo 'foo bar \\'
1165 1165 > foo * \ (glob)
1166 1166 > EOF
1167 1167
1168 1168 $ rt test-glob-backslash.t
1169 1169 .
1170 1170 # Ran 1 tests, 0 skipped, 0 failed.
1171 1171
1172 1172 $ rm -f test-glob-backslash.t
1173 1173
1174 1174 Test globbing of local IP addresses
1175 1175 $ echo 172.16.18.1
1176 1176 $LOCALIP (glob)
1177 1177 $ echo dead:beef::1
1178 1178 $LOCALIP (glob)
1179 1179
1180 1180 Test reusability for third party tools
1181 1181 ======================================
1182 1182
1183 1183 $ mkdir "$TESTTMP"/anothertests
1184 1184 $ cd "$TESTTMP"/anothertests
1185 1185
1186 1186 test that `run-tests.py` can execute hghave, even if it runs not in
1187 1187 Mercurial source tree.
1188 1188
1189 1189 $ cat > test-hghave.t <<EOF
1190 1190 > #require true
1191 1191 > $ echo foo
1192 1192 > foo
1193 1193 > EOF
1194 1194 $ rt test-hghave.t
1195 1195 .
1196 1196 # Ran 1 tests, 0 skipped, 0 failed.
1197 1197
1198 1198 test that RUNTESTDIR refers the directory, in which `run-tests.py` now
1199 1199 running is placed.
1200 1200
1201 1201 $ cat > test-runtestdir.t <<EOF
1202 1202 > - $TESTDIR, in which test-run-tests.t is placed
1203 1203 > - \$TESTDIR, in which test-runtestdir.t is placed (expanded at runtime)
1204 1204 > - \$RUNTESTDIR, in which run-tests.py is placed (expanded at runtime)
1205 1205 >
1206 1206 > #if windows
1207 1207 > $ test "\$TESTDIR" = "$TESTTMP\anothertests"
1208 1208 > #else
1209 1209 > $ test "\$TESTDIR" = "$TESTTMP"/anothertests
1210 1210 > #endif
1211 1211 > $ test "\$RUNTESTDIR" = "$TESTDIR"
1212 1212 > $ head -n 3 "\$RUNTESTDIR"/../contrib/check-code.py | sed 's@.!.*python@#!USRBINENVPY@'
1213 1213 > #!USRBINENVPY
1214 1214 > #
1215 1215 > # check-code - a style and portability checker for Mercurial
1216 1216 > EOF
1217 1217 $ rt test-runtestdir.t
1218 1218 .
1219 1219 # Ran 1 tests, 0 skipped, 0 failed.
1220 1220
1221 1221 #if execbit
1222 1222
1223 1223 test that TESTDIR is referred in PATH
1224 1224
1225 1225 $ cat > custom-command.sh <<EOF
1226 1226 > #!/bin/sh
1227 1227 > echo "hello world"
1228 1228 > EOF
1229 1229 $ chmod +x custom-command.sh
1230 1230 $ cat > test-testdir-path.t <<EOF
1231 1231 > $ custom-command.sh
1232 1232 > hello world
1233 1233 > EOF
1234 1234 $ rt test-testdir-path.t
1235 1235 .
1236 1236 # Ran 1 tests, 0 skipped, 0 failed.
1237 1237
1238 1238 #endif
1239 1239
1240 1240 test support for --allow-slow-tests
1241 1241 $ cat > test-very-slow-test.t <<EOF
1242 1242 > #require slow
1243 1243 > $ echo pass
1244 1244 > pass
1245 1245 > EOF
1246 1246 $ rt test-very-slow-test.t
1247 1247 s
1248 1248 Skipped test-very-slow-test.t: missing feature: allow slow tests (use --allow-slow-tests)
1249 1249 # Ran 0 tests, 1 skipped, 0 failed.
1250 1250 $ rt $HGTEST_RUN_TESTS_PURE --allow-slow-tests test-very-slow-test.t
1251 1251 .
1252 1252 # Ran 1 tests, 0 skipped, 0 failed.
1253 1253
1254 1254 support for running a test outside the current directory
1255 1255 $ mkdir nonlocal
1256 1256 $ cat > nonlocal/test-is-not-here.t << EOF
1257 1257 > $ echo pass
1258 1258 > pass
1259 1259 > EOF
1260 1260 $ rt nonlocal/test-is-not-here.t
1261 1261 .
1262 1262 # Ran 1 tests, 0 skipped, 0 failed.
1263 1263
1264 support for automatically discovering test if arg is a folder
1265 $ mkdir tmp && cd tmp
1266
1267 $ cat > test-uno.t << EOF
1268 > $ echo line
1269 > line
1270 > EOF
1271
1272 $ cp test-uno.t test-dos.t
1273 $ cd ..
1274 $ cp -R tmp tmpp
1275 $ cp tmp/test-uno.t test-solo.t
1276
1277 $ $PYTHON $TESTDIR/run-tests.py tmp/ test-solo.t tmpp
1278 .....
1279 # Ran 5 tests, 0 skipped, 0 failed.
1280 $ rm -rf tmp tmpp
1281
1264 1282 support for running run-tests.py from another directory
1265 1283 $ mkdir tmp && cd tmp
1266 1284 $ cat > useful-file.sh << EOF
1267 1285 > important command
1268 1286 > EOF
1269 1287
1270 1288 $ cat > test-folder.t << EOF
1271 1289 > $ cat \$TESTDIR/useful-file.sh
1272 1290 > important command
1273 1291 > EOF
1274 1292
1275 1293 $ cd ..
1276 1294 $ $PYTHON $TESTDIR/run-tests.py tmp/test-folder.t
1277 1295 .
1278 1296 # Ran 1 tests, 0 skipped, 0 failed.
1279 1297
1280 1298 support for bisecting failed tests automatically
1281 1299 $ hg init bisect
1282 1300 $ cd bisect
1283 1301 $ cat >> test-bisect.t <<EOF
1284 1302 > $ echo pass
1285 1303 > pass
1286 1304 > EOF
1287 1305 $ hg add test-bisect.t
1288 1306 $ hg ci -m 'good'
1289 1307 $ cat >> test-bisect.t <<EOF
1290 1308 > $ echo pass
1291 1309 > fail
1292 1310 > EOF
1293 1311 $ hg ci -m 'bad'
1294 1312 $ rt --known-good-rev=0 test-bisect.t
1295 1313
1296 1314 --- $TESTTMP/anothertests/bisect/test-bisect.t
1297 1315 +++ $TESTTMP/anothertests/bisect/test-bisect.t.err
1298 1316 @@ -1,4 +1,4 @@
1299 1317 $ echo pass
1300 1318 pass
1301 1319 $ echo pass
1302 1320 - fail
1303 1321 + pass
1304 1322
1305 1323 ERROR: test-bisect.t output changed
1306 1324 !
1307 1325 Failed test-bisect.t: output changed
1308 1326 test-bisect.t broken by 72cbf122d116 (bad)
1309 1327 # Ran 1 tests, 0 skipped, 1 failed.
1310 1328 python hash seed: * (glob)
1311 1329 [1]
1312 1330
1313 1331 $ cd ..
1314 1332
1315 1333 support bisecting a separate repo
1316 1334
1317 1335 $ hg init bisect-dependent
1318 1336 $ cd bisect-dependent
1319 1337 $ cat > test-bisect-dependent.t <<EOF
1320 1338 > $ tail -1 \$TESTDIR/../bisect/test-bisect.t
1321 1339 > pass
1322 1340 > EOF
1323 1341 $ hg commit -Am dependent test-bisect-dependent.t
1324 1342
1325 1343 $ rt --known-good-rev=0 test-bisect-dependent.t
1326 1344
1327 1345 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1328 1346 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1329 1347 @@ -1,2 +1,2 @@
1330 1348 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1331 1349 - pass
1332 1350 + fail
1333 1351
1334 1352 ERROR: test-bisect-dependent.t output changed
1335 1353 !
1336 1354 Failed test-bisect-dependent.t: output changed
1337 1355 Failed to identify failure point for test-bisect-dependent.t
1338 1356 # Ran 1 tests, 0 skipped, 1 failed.
1339 1357 python hash seed: * (glob)
1340 1358 [1]
1341 1359
1342 1360 $ rt --bisect-repo=../test-bisect test-bisect-dependent.t
1343 1361 Usage: run-tests.py [options] [tests]
1344 1362
1345 1363 run-tests.py: error: --bisect-repo cannot be used without --known-good-rev
1346 1364 [2]
1347 1365
1348 1366 $ rt --known-good-rev=0 --bisect-repo=../bisect test-bisect-dependent.t
1349 1367
1350 1368 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1351 1369 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1352 1370 @@ -1,2 +1,2 @@
1353 1371 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1354 1372 - pass
1355 1373 + fail
1356 1374
1357 1375 ERROR: test-bisect-dependent.t output changed
1358 1376 !
1359 1377 Failed test-bisect-dependent.t: output changed
1360 1378 test-bisect-dependent.t broken by 72cbf122d116 (bad)
1361 1379 # Ran 1 tests, 0 skipped, 1 failed.
1362 1380 python hash seed: * (glob)
1363 1381 [1]
1364 1382
1365 1383 $ cd ..
1366 1384
1367 1385 Test a broken #if statement doesn't break run-tests threading.
1368 1386 ==============================================================
1369 1387 $ mkdir broken
1370 1388 $ cd broken
1371 1389 $ cat > test-broken.t <<EOF
1372 1390 > true
1373 1391 > #if notarealhghavefeature
1374 1392 > $ false
1375 1393 > #endif
1376 1394 > EOF
1377 1395 $ for f in 1 2 3 4 ; do
1378 1396 > cat > test-works-$f.t <<EOF
1379 1397 > This is test case $f
1380 1398 > $ sleep 1
1381 1399 > EOF
1382 1400 > done
1383 1401 $ rt -j 2
1384 1402 ....
1385 1403 # Ran 5 tests, 0 skipped, 0 failed.
1386 1404 skipped: unknown feature: notarealhghavefeature
1387 1405
1388 1406 $ cd ..
1389 1407 $ rm -rf broken
1390 1408
1391 1409 Test cases in .t files
1392 1410 ======================
1393 1411 $ mkdir cases
1394 1412 $ cd cases
1395 1413 $ cat > test-cases-abc.t <<'EOF'
1396 1414 > #testcases A B C
1397 1415 > $ V=B
1398 1416 > #if A
1399 1417 > $ V=A
1400 1418 > #endif
1401 1419 > #if C
1402 1420 > $ V=C
1403 1421 > #endif
1404 1422 > $ echo $V | sed 's/A/C/'
1405 1423 > C
1406 1424 > #if C
1407 1425 > $ [ $V = C ]
1408 1426 > #endif
1409 1427 > #if A
1410 1428 > $ [ $V = C ]
1411 1429 > [1]
1412 1430 > #endif
1413 1431 > #if no-C
1414 1432 > $ [ $V = C ]
1415 1433 > [1]
1416 1434 > #endif
1417 1435 > $ [ $V = D ]
1418 1436 > [1]
1419 1437 > EOF
1420 1438 $ rt
1421 1439 .
1422 1440 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1423 1441 +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
1424 1442 @@ -7,7 +7,7 @@
1425 1443 $ V=C
1426 1444 #endif
1427 1445 $ echo $V | sed 's/A/C/'
1428 1446 - C
1429 1447 + B
1430 1448 #if C
1431 1449 $ [ $V = C ]
1432 1450 #endif
1433 1451
1434 1452 ERROR: test-cases-abc.t (case B) output changed
1435 1453 !.
1436 1454 Failed test-cases-abc.t (case B): output changed
1437 1455 # Ran 3 tests, 0 skipped, 1 failed.
1438 1456 python hash seed: * (glob)
1439 1457 [1]
1440 1458
1441 1459 --restart works
1442 1460
1443 1461 $ rt --restart
1444 1462
1445 1463 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1446 1464 +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
1447 1465 @@ -7,7 +7,7 @@
1448 1466 $ V=C
1449 1467 #endif
1450 1468 $ echo $V | sed 's/A/C/'
1451 1469 - C
1452 1470 + B
1453 1471 #if C
1454 1472 $ [ $V = C ]
1455 1473 #endif
1456 1474
1457 1475 ERROR: test-cases-abc.t (case B) output changed
1458 1476 !.
1459 1477 Failed test-cases-abc.t (case B): output changed
1460 1478 # Ran 2 tests, 0 skipped, 1 failed.
1461 1479 python hash seed: * (glob)
1462 1480 [1]
1463 1481
1464 1482 --restart works with outputdir
1465 1483
1466 1484 $ mkdir output
1467 1485 $ mv test-cases-abc.t.B.err output
1468 1486 $ rt --restart --outputdir output
1469 1487
1470 1488 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1471 1489 +++ $TESTTMP/anothertests/cases/output/test-cases-abc.t.B.err
1472 1490 @@ -7,7 +7,7 @@
1473 1491 $ V=C
1474 1492 #endif
1475 1493 $ echo $V | sed 's/A/C/'
1476 1494 - C
1477 1495 + B
1478 1496 #if C
1479 1497 $ [ $V = C ]
1480 1498 #endif
1481 1499
1482 1500 ERROR: test-cases-abc.t (case B) output changed
1483 1501 !.
1484 1502 Failed test-cases-abc.t (case B): output changed
1485 1503 # Ran 2 tests, 0 skipped, 1 failed.
1486 1504 python hash seed: * (glob)
1487 1505 [1]
General Comments 0
You need to be logged in to leave comments. Login now