##// END OF EJS Templates
tests: add support for listing tests to run in a file...
Augie Fackler -
r34264:15333717 default
parent child Browse files
Show More
@@ -1,2916 +1,2922 b''
1 1 #!/usr/bin/env python
2 2 #
3 3 # run-tests.py - Run a set of tests on Mercurial
4 4 #
5 5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 # Modifying this script is tricky because it has many modes:
11 11 # - serial (default) vs parallel (-jN, N > 1)
12 12 # - no coverage (default) vs coverage (-c, -C, -s)
13 13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 14 # - tests are a mix of shell scripts and Python scripts
15 15 #
16 16 # If you change this script, it is recommended that you ensure you
17 17 # haven't broken it by running it in various modes with a representative
18 18 # sample of test scripts. For example:
19 19 #
20 20 # 1) serial, no coverage, temp install:
21 21 # ./run-tests.py test-s*
22 22 # 2) serial, no coverage, local hg:
23 23 # ./run-tests.py --local test-s*
24 24 # 3) serial, coverage, temp install:
25 25 # ./run-tests.py -c test-s*
26 26 # 4) serial, coverage, local hg:
27 27 # ./run-tests.py -c --local test-s* # unsupported
28 28 # 5) parallel, no coverage, temp install:
29 29 # ./run-tests.py -j2 test-s*
30 30 # 6) parallel, no coverage, local hg:
31 31 # ./run-tests.py -j2 --local test-s*
32 32 # 7) parallel, coverage, temp install:
33 33 # ./run-tests.py -j2 -c test-s* # currently broken
34 34 # 8) parallel, coverage, local install:
35 35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 36 # 9) parallel, custom tmp dir:
37 37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 38 # 10) parallel, pure, tests that call run-tests:
39 39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
40 40 #
41 41 # (You could use any subset of the tests: test-s* happens to match
42 42 # enough that it's worth doing parallel runs, few enough that it
43 43 # completes fairly quickly, includes both shell and Python scripts, and
44 44 # includes some scripts that run daemon processes.)
45 45
46 46 from __future__ import absolute_import, print_function
47 47
48 48 import difflib
49 49 import distutils.version as version
50 50 import errno
51 51 import json
52 52 import optparse
53 53 import os
54 54 import random
55 55 import re
56 56 import shutil
57 57 import signal
58 58 import socket
59 59 import subprocess
60 60 import sys
61 61 import sysconfig
62 62 import tempfile
63 63 import threading
64 64 import time
65 65 import unittest
66 66 import xml.dom.minidom as minidom
67 67
68 68 try:
69 69 import Queue as queue
70 70 except ImportError:
71 71 import queue
72 72
73 73 try:
74 74 import shlex
75 75 shellquote = shlex.quote
76 76 except (ImportError, AttributeError):
77 77 import pipes
78 78 shellquote = pipes.quote
79 79
80 80 if os.environ.get('RTUNICODEPEDANTRY', False):
81 81 try:
82 82 reload(sys)
83 83 sys.setdefaultencoding("undefined")
84 84 except NameError:
85 85 pass
86 86
87 87 origenviron = os.environ.copy()
88 88 osenvironb = getattr(os, 'environb', os.environ)
89 89 processlock = threading.Lock()
90 90
91 91 pygmentspresent = False
92 92 # ANSI color is unsupported prior to Windows 10
93 93 if os.name != 'nt':
94 94 try: # is pygments installed
95 95 import pygments
96 96 import pygments.lexers as lexers
97 97 import pygments.lexer as lexer
98 98 import pygments.formatters as formatters
99 99 import pygments.token as token
100 100 import pygments.style as style
101 101 pygmentspresent = True
102 102 difflexer = lexers.DiffLexer()
103 103 terminal256formatter = formatters.Terminal256Formatter()
104 104 except ImportError:
105 105 pass
106 106
107 107 if pygmentspresent:
108 108 class TestRunnerStyle(style.Style):
109 109 default_style = ""
110 110 skipped = token.string_to_tokentype("Token.Generic.Skipped")
111 111 failed = token.string_to_tokentype("Token.Generic.Failed")
112 112 skippedname = token.string_to_tokentype("Token.Generic.SName")
113 113 failedname = token.string_to_tokentype("Token.Generic.FName")
114 114 styles = {
115 115 skipped: '#e5e5e5',
116 116 skippedname: '#00ffff',
117 117 failed: '#7f0000',
118 118 failedname: '#ff0000',
119 119 }
120 120
121 121 class TestRunnerLexer(lexer.RegexLexer):
122 122 tokens = {
123 123 'root': [
124 124 (r'^Skipped', token.Generic.Skipped, 'skipped'),
125 125 (r'^Failed ', token.Generic.Failed, 'failed'),
126 126 (r'^ERROR: ', token.Generic.Failed, 'failed'),
127 127 ],
128 128 'skipped': [
129 129 (r'[\w-]+\.(t|py)', token.Generic.SName),
130 130 (r':.*', token.Generic.Skipped),
131 131 ],
132 132 'failed': [
133 133 (r'[\w-]+\.(t|py)', token.Generic.FName),
134 134 (r'(:| ).*', token.Generic.Failed),
135 135 ]
136 136 }
137 137
138 138 runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle)
139 139 runnerlexer = TestRunnerLexer()
140 140
141 141 if sys.version_info > (3, 5, 0):
142 142 PYTHON3 = True
143 143 xrange = range # we use xrange in one place, and we'd rather not use range
144 144 def _bytespath(p):
145 145 if p is None:
146 146 return p
147 147 return p.encode('utf-8')
148 148
149 149 def _strpath(p):
150 150 if p is None:
151 151 return p
152 152 return p.decode('utf-8')
153 153
154 154 elif sys.version_info >= (3, 0, 0):
155 155 print('%s is only supported on Python 3.5+ and 2.7, not %s' %
156 156 (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])))
157 157 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
158 158 else:
159 159 PYTHON3 = False
160 160
161 161 # In python 2.x, path operations are generally done using
162 162 # bytestrings by default, so we don't have to do any extra
163 163 # fiddling there. We define the wrapper functions anyway just to
164 164 # help keep code consistent between platforms.
165 165 def _bytespath(p):
166 166 return p
167 167
168 168 _strpath = _bytespath
169 169
170 170 # For Windows support
171 171 wifexited = getattr(os, "WIFEXITED", lambda x: False)
172 172
173 173 # Whether to use IPv6
174 174 def checksocketfamily(name, port=20058):
175 175 """return true if we can listen on localhost using family=name
176 176
177 177 name should be either 'AF_INET', or 'AF_INET6'.
178 178 port being used is okay - EADDRINUSE is considered as successful.
179 179 """
180 180 family = getattr(socket, name, None)
181 181 if family is None:
182 182 return False
183 183 try:
184 184 s = socket.socket(family, socket.SOCK_STREAM)
185 185 s.bind(('localhost', port))
186 186 s.close()
187 187 return True
188 188 except socket.error as exc:
189 189 if exc.errno == errno.EADDRINUSE:
190 190 return True
191 191 elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
192 192 return False
193 193 else:
194 194 raise
195 195 else:
196 196 return False
197 197
198 198 # useipv6 will be set by parseargs
199 199 useipv6 = None
200 200
201 201 def checkportisavailable(port):
202 202 """return true if a port seems free to bind on localhost"""
203 203 if useipv6:
204 204 family = socket.AF_INET6
205 205 else:
206 206 family = socket.AF_INET
207 207 try:
208 208 s = socket.socket(family, socket.SOCK_STREAM)
209 209 s.bind(('localhost', port))
210 210 s.close()
211 211 return True
212 212 except socket.error as exc:
213 213 if exc.errno not in (errno.EADDRINUSE, errno.EADDRNOTAVAIL,
214 214 errno.EPROTONOSUPPORT):
215 215 raise
216 216 return False
217 217
218 218 closefds = os.name == 'posix'
219 219 def Popen4(cmd, wd, timeout, env=None):
220 220 processlock.acquire()
221 221 p = subprocess.Popen(cmd, shell=True, bufsize=-1, cwd=wd, env=env,
222 222 close_fds=closefds,
223 223 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
224 224 stderr=subprocess.STDOUT)
225 225 processlock.release()
226 226
227 227 p.fromchild = p.stdout
228 228 p.tochild = p.stdin
229 229 p.childerr = p.stderr
230 230
231 231 p.timeout = False
232 232 if timeout:
233 233 def t():
234 234 start = time.time()
235 235 while time.time() - start < timeout and p.returncode is None:
236 236 time.sleep(.1)
237 237 p.timeout = True
238 238 if p.returncode is None:
239 239 terminate(p)
240 240 threading.Thread(target=t).start()
241 241
242 242 return p
243 243
244 244 PYTHON = _bytespath(sys.executable.replace('\\', '/'))
245 245 IMPL_PATH = b'PYTHONPATH'
246 246 if 'java' in sys.platform:
247 247 IMPL_PATH = b'JYTHONPATH'
248 248
249 249 defaults = {
250 250 'jobs': ('HGTEST_JOBS', 1),
251 251 'timeout': ('HGTEST_TIMEOUT', 180),
252 252 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 500),
253 253 'port': ('HGTEST_PORT', 20059),
254 254 'shell': ('HGTEST_SHELL', 'sh'),
255 255 }
256 256
257 257 def canonpath(path):
258 258 return os.path.realpath(os.path.expanduser(path))
259 259
260 260 def parselistfiles(files, listtype, warn=True):
261 261 entries = dict()
262 262 for filename in files:
263 263 try:
264 264 path = os.path.expanduser(os.path.expandvars(filename))
265 265 f = open(path, "rb")
266 266 except IOError as err:
267 267 if err.errno != errno.ENOENT:
268 268 raise
269 269 if warn:
270 270 print("warning: no such %s file: %s" % (listtype, filename))
271 271 continue
272 272
273 273 for line in f.readlines():
274 274 line = line.split(b'#', 1)[0].strip()
275 275 if line:
276 276 entries[line] = filename
277 277
278 278 f.close()
279 279 return entries
280 280
281 281 def parsettestcases(path):
282 282 """read a .t test file, return a set of test case names
283 283
284 284 If path does not exist, return an empty set.
285 285 """
286 286 cases = set()
287 287 try:
288 288 with open(path, 'rb') as f:
289 289 for l in f:
290 290 if l.startswith(b'#testcases '):
291 291 cases.update(l[11:].split())
292 292 except IOError as ex:
293 293 if ex.errno != errno.ENOENT:
294 294 raise
295 295 return cases
296 296
297 297 def getparser():
298 298 """Obtain the OptionParser used by the CLI."""
299 299 parser = optparse.OptionParser("%prog [options] [tests]")
300 300
301 301 # keep these sorted
302 302 parser.add_option("--blacklist", action="append",
303 303 help="skip tests listed in the specified blacklist file")
304 304 parser.add_option("--whitelist", action="append",
305 305 help="always run tests listed in the specified whitelist file")
306 parser.add_option("--test-list", action="append",
307 help="read tests to run from the specified file")
306 308 parser.add_option("--changed", type="string",
307 309 help="run tests that are changed in parent rev or working directory")
308 310 parser.add_option("-C", "--annotate", action="store_true",
309 311 help="output files annotated with coverage")
310 312 parser.add_option("-c", "--cover", action="store_true",
311 313 help="print a test coverage report")
312 314 parser.add_option("--color", choices=["always", "auto", "never"],
313 315 default=os.environ.get('HGRUNTESTSCOLOR', 'auto'),
314 316 help="colorisation: always|auto|never (default: auto)")
315 317 parser.add_option("-d", "--debug", action="store_true",
316 318 help="debug mode: write output of test scripts to console"
317 319 " rather than capturing and diffing it (disables timeout)")
318 320 parser.add_option("-f", "--first", action="store_true",
319 321 help="exit on the first test failure")
320 322 parser.add_option("-H", "--htmlcov", action="store_true",
321 323 help="create an HTML report of the coverage of the files")
322 324 parser.add_option("-i", "--interactive", action="store_true",
323 325 help="prompt to accept changed output")
324 326 parser.add_option("-j", "--jobs", type="int",
325 327 help="number of jobs to run in parallel"
326 328 " (default: $%s or %d)" % defaults['jobs'])
327 329 parser.add_option("--keep-tmpdir", action="store_true",
328 330 help="keep temporary directory after running tests")
329 331 parser.add_option("-k", "--keywords",
330 332 help="run tests matching keywords")
331 333 parser.add_option("--list-tests", action="store_true",
332 334 help="list tests instead of running them")
333 335 parser.add_option("-l", "--local", action="store_true",
334 336 help="shortcut for --with-hg=<testdir>/../hg, "
335 337 "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set")
336 338 parser.add_option("--loop", action="store_true",
337 339 help="loop tests repeatedly")
338 340 parser.add_option("--runs-per-test", type="int", dest="runs_per_test",
339 341 help="run each test N times (default=1)", default=1)
340 342 parser.add_option("-n", "--nodiff", action="store_true",
341 343 help="skip showing test changes")
342 344 parser.add_option("--outputdir", type="string",
343 345 help="directory to write error logs to (default=test directory)")
344 346 parser.add_option("-p", "--port", type="int",
345 347 help="port on which servers should listen"
346 348 " (default: $%s or %d)" % defaults['port'])
347 349 parser.add_option("--compiler", type="string",
348 350 help="compiler to build with")
349 351 parser.add_option("--pure", action="store_true",
350 352 help="use pure Python code instead of C extensions")
351 353 parser.add_option("-R", "--restart", action="store_true",
352 354 help="restart at last error")
353 355 parser.add_option("-r", "--retest", action="store_true",
354 356 help="retest failed tests")
355 357 parser.add_option("-S", "--noskips", action="store_true",
356 358 help="don't report skip tests verbosely")
357 359 parser.add_option("--shell", type="string",
358 360 help="shell to use (default: $%s or %s)" % defaults['shell'])
359 361 parser.add_option("-t", "--timeout", type="int",
360 362 help="kill errant tests after TIMEOUT seconds"
361 363 " (default: $%s or %d)" % defaults['timeout'])
362 364 parser.add_option("--slowtimeout", type="int",
363 365 help="kill errant slow tests after SLOWTIMEOUT seconds"
364 366 " (default: $%s or %d)" % defaults['slowtimeout'])
365 367 parser.add_option("--time", action="store_true",
366 368 help="time how long each test takes")
367 369 parser.add_option("--json", action="store_true",
368 370 help="store test result data in 'report.json' file")
369 371 parser.add_option("--tmpdir", type="string",
370 372 help="run tests in the given temporary directory"
371 373 " (implies --keep-tmpdir)")
372 374 parser.add_option("-v", "--verbose", action="store_true",
373 375 help="output verbose messages")
374 376 parser.add_option("--xunit", type="string",
375 377 help="record xunit results at specified path")
376 378 parser.add_option("--view", type="string",
377 379 help="external diff viewer")
378 380 parser.add_option("--with-hg", type="string",
379 381 metavar="HG",
380 382 help="test using specified hg script rather than a "
381 383 "temporary installation")
382 384 parser.add_option("--chg", action="store_true",
383 385 help="install and use chg wrapper in place of hg")
384 386 parser.add_option("--with-chg", metavar="CHG",
385 387 help="use specified chg wrapper in place of hg")
386 388 parser.add_option("--ipv6", action="store_true",
387 389 help="prefer IPv6 to IPv4 for network related tests")
388 390 parser.add_option("-3", "--py3k-warnings", action="store_true",
389 391 help="enable Py3k warnings on Python 2.7+")
390 392 # This option should be deleted once test-check-py3-compat.t and other
391 393 # Python 3 tests run with Python 3.
392 394 parser.add_option("--with-python3", metavar="PYTHON3",
393 395 help="Python 3 interpreter (if running under Python 2)"
394 396 " (TEMPORARY)")
395 397 parser.add_option('--extra-config-opt', action="append",
396 398 help='set the given config opt in the test hgrc')
397 399 parser.add_option('--random', action="store_true",
398 400 help='run tests in random order')
399 401 parser.add_option('--profile-runner', action='store_true',
400 402 help='run statprof on run-tests')
401 403 parser.add_option('--allow-slow-tests', action='store_true',
402 404 help='allow extremely slow tests')
403 405 parser.add_option('--showchannels', action='store_true',
404 406 help='show scheduling channels')
405 407 parser.add_option('--known-good-rev', type="string",
406 408 metavar="known_good_rev",
407 409 help=("Automatically bisect any failures using this "
408 410 "revision as a known-good revision."))
409 411 parser.add_option('--bisect-repo', type="string",
410 412 metavar='bisect_repo',
411 413 help=("Path of a repo to bisect. Use together with "
412 414 "--known-good-rev"))
413 415
414 416 for option, (envvar, default) in defaults.items():
415 417 defaults[option] = type(default)(os.environ.get(envvar, default))
416 418 parser.set_defaults(**defaults)
417 419
418 420 return parser
419 421
420 422 def parseargs(args, parser):
421 423 """Parse arguments with our OptionParser and validate results."""
422 424 (options, args) = parser.parse_args(args)
423 425
424 426 # jython is always pure
425 427 if 'java' in sys.platform or '__pypy__' in sys.modules:
426 428 options.pure = True
427 429
428 430 if options.with_hg:
429 431 options.with_hg = canonpath(_bytespath(options.with_hg))
430 432 if not (os.path.isfile(options.with_hg) and
431 433 os.access(options.with_hg, os.X_OK)):
432 434 parser.error('--with-hg must specify an executable hg script')
433 435 if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
434 436 sys.stderr.write('warning: --with-hg should specify an hg script\n')
435 437 if options.local:
436 438 testdir = os.path.dirname(_bytespath(canonpath(sys.argv[0])))
437 439 reporootdir = os.path.dirname(testdir)
438 440 pathandattrs = [(b'hg', 'with_hg')]
439 441 if options.chg:
440 442 pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
441 443 for relpath, attr in pathandattrs:
442 444 binpath = os.path.join(reporootdir, relpath)
443 445 if os.name != 'nt' and not os.access(binpath, os.X_OK):
444 446 parser.error('--local specified, but %r not found or '
445 447 'not executable' % binpath)
446 448 setattr(options, attr, binpath)
447 449
448 450 if (options.chg or options.with_chg) and os.name == 'nt':
449 451 parser.error('chg does not work on %s' % os.name)
450 452 if options.with_chg:
451 453 options.chg = False # no installation to temporary location
452 454 options.with_chg = canonpath(_bytespath(options.with_chg))
453 455 if not (os.path.isfile(options.with_chg) and
454 456 os.access(options.with_chg, os.X_OK)):
455 457 parser.error('--with-chg must specify a chg executable')
456 458 if options.chg and options.with_hg:
457 459 # chg shares installation location with hg
458 460 parser.error('--chg does not work when --with-hg is specified '
459 461 '(use --with-chg instead)')
460 462
461 463 if options.color == 'always' and not pygmentspresent:
462 464 sys.stderr.write('warning: --color=always ignored because '
463 465 'pygments is not installed\n')
464 466
465 467 if options.bisect_repo and not options.known_good_rev:
466 468 parser.error("--bisect-repo cannot be used without --known-good-rev")
467 469
468 470 global useipv6
469 471 if options.ipv6:
470 472 useipv6 = checksocketfamily('AF_INET6')
471 473 else:
472 474 # only use IPv6 if IPv4 is unavailable and IPv6 is available
473 475 useipv6 = ((not checksocketfamily('AF_INET'))
474 476 and checksocketfamily('AF_INET6'))
475 477
476 478 options.anycoverage = options.cover or options.annotate or options.htmlcov
477 479 if options.anycoverage:
478 480 try:
479 481 import coverage
480 482 covver = version.StrictVersion(coverage.__version__).version
481 483 if covver < (3, 3):
482 484 parser.error('coverage options require coverage 3.3 or later')
483 485 except ImportError:
484 486 parser.error('coverage options now require the coverage package')
485 487
486 488 if options.anycoverage and options.local:
487 489 # this needs some path mangling somewhere, I guess
488 490 parser.error("sorry, coverage options do not work when --local "
489 491 "is specified")
490 492
491 493 if options.anycoverage and options.with_hg:
492 494 parser.error("sorry, coverage options do not work when --with-hg "
493 495 "is specified")
494 496
495 497 global verbose
496 498 if options.verbose:
497 499 verbose = ''
498 500
499 501 if options.tmpdir:
500 502 options.tmpdir = canonpath(options.tmpdir)
501 503
502 504 if options.jobs < 1:
503 505 parser.error('--jobs must be positive')
504 506 if options.interactive and options.debug:
505 507 parser.error("-i/--interactive and -d/--debug are incompatible")
506 508 if options.debug:
507 509 if options.timeout != defaults['timeout']:
508 510 sys.stderr.write(
509 511 'warning: --timeout option ignored with --debug\n')
510 512 if options.slowtimeout != defaults['slowtimeout']:
511 513 sys.stderr.write(
512 514 'warning: --slowtimeout option ignored with --debug\n')
513 515 options.timeout = 0
514 516 options.slowtimeout = 0
515 517 if options.py3k_warnings:
516 518 if PYTHON3:
517 519 parser.error(
518 520 '--py3k-warnings can only be used on Python 2.7')
519 521 if options.with_python3:
520 522 if PYTHON3:
521 523 parser.error('--with-python3 cannot be used when executing with '
522 524 'Python 3')
523 525
524 526 options.with_python3 = canonpath(options.with_python3)
525 527 # Verify Python3 executable is acceptable.
526 528 proc = subprocess.Popen([options.with_python3, b'--version'],
527 529 stdout=subprocess.PIPE,
528 530 stderr=subprocess.STDOUT)
529 531 out, _err = proc.communicate()
530 532 ret = proc.wait()
531 533 if ret != 0:
532 534 parser.error('could not determine version of python 3')
533 535 if not out.startswith('Python '):
534 536 parser.error('unexpected output from python3 --version: %s' %
535 537 out)
536 538 vers = version.LooseVersion(out[len('Python '):])
537 539 if vers < version.LooseVersion('3.5.0'):
538 540 parser.error('--with-python3 version must be 3.5.0 or greater; '
539 541 'got %s' % out)
540 542
541 543 if options.blacklist:
542 544 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
543 545 if options.whitelist:
544 546 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
545 547 else:
546 548 options.whitelisted = {}
547 549
548 550 if options.showchannels:
549 551 options.nodiff = True
550 552
551 553 return (options, args)
552 554
553 555 def rename(src, dst):
554 556 """Like os.rename(), trade atomicity and opened files friendliness
555 557 for existing destination support.
556 558 """
557 559 shutil.copy(src, dst)
558 560 os.remove(src)
559 561
560 562 _unified_diff = difflib.unified_diff
561 563 if PYTHON3:
562 564 import functools
563 565 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
564 566
565 567 def getdiff(expected, output, ref, err):
566 568 servefail = False
567 569 lines = []
568 570 for line in _unified_diff(expected, output, ref, err):
569 571 if line.startswith(b'+++') or line.startswith(b'---'):
570 572 line = line.replace(b'\\', b'/')
571 573 if line.endswith(b' \n'):
572 574 line = line[:-2] + b'\n'
573 575 lines.append(line)
574 576 if not servefail and line.startswith(
575 577 b'+ abort: child process failed to start'):
576 578 servefail = True
577 579
578 580 return servefail, lines
579 581
580 582 verbose = False
581 583 def vlog(*msg):
582 584 """Log only when in verbose mode."""
583 585 if verbose is False:
584 586 return
585 587
586 588 return log(*msg)
587 589
588 590 # Bytes that break XML even in a CDATA block: control characters 0-31
589 591 # sans \t, \n and \r
590 592 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
591 593
592 594 # Match feature conditionalized output lines in the form, capturing the feature
593 595 # list in group 2, and the preceeding line output in group 1:
594 596 #
595 597 # output..output (feature !)\n
596 598 optline = re.compile(b'(.+) \((.+?) !\)\n$')
597 599
598 600 def cdatasafe(data):
599 601 """Make a string safe to include in a CDATA block.
600 602
601 603 Certain control characters are illegal in a CDATA block, and
602 604 there's no way to include a ]]> in a CDATA either. This function
603 605 replaces illegal bytes with ? and adds a space between the ]] so
604 606 that it won't break the CDATA block.
605 607 """
606 608 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
607 609
608 610 def log(*msg):
609 611 """Log something to stdout.
610 612
611 613 Arguments are strings to print.
612 614 """
613 615 with iolock:
614 616 if verbose:
615 617 print(verbose, end=' ')
616 618 for m in msg:
617 619 print(m, end=' ')
618 620 print()
619 621 sys.stdout.flush()
620 622
621 623 def highlightdiff(line, color):
622 624 if not color:
623 625 return line
624 626 assert pygmentspresent
625 627 return pygments.highlight(line.decode('latin1'), difflexer,
626 628 terminal256formatter).encode('latin1')
627 629
628 630 def highlightmsg(msg, color):
629 631 if not color:
630 632 return msg
631 633 assert pygmentspresent
632 634 return pygments.highlight(msg, runnerlexer, runnerformatter)
633 635
634 636 def terminate(proc):
635 637 """Terminate subprocess"""
636 638 vlog('# Terminating process %d' % proc.pid)
637 639 try:
638 640 proc.terminate()
639 641 except OSError:
640 642 pass
641 643
642 644 def killdaemons(pidfile):
643 645 import killdaemons as killmod
644 646 return killmod.killdaemons(pidfile, tryhard=False, remove=True,
645 647 logfn=vlog)
646 648
647 649 class Test(unittest.TestCase):
648 650 """Encapsulates a single, runnable test.
649 651
650 652 While this class conforms to the unittest.TestCase API, it differs in that
651 653 instances need to be instantiated manually. (Typically, unittest.TestCase
652 654 classes are instantiated automatically by scanning modules.)
653 655 """
654 656
655 657 # Status code reserved for skipped tests (used by hghave).
656 658 SKIPPED_STATUS = 80
657 659
658 660 def __init__(self, path, outputdir, tmpdir, keeptmpdir=False,
659 661 debug=False,
660 662 timeout=defaults['timeout'],
661 663 startport=defaults['port'], extraconfigopts=None,
662 664 py3kwarnings=False, shell=None, hgcommand=None,
663 665 slowtimeout=defaults['slowtimeout'], usechg=False,
664 666 useipv6=False):
665 667 """Create a test from parameters.
666 668
667 669 path is the full path to the file defining the test.
668 670
669 671 tmpdir is the main temporary directory to use for this test.
670 672
671 673 keeptmpdir determines whether to keep the test's temporary directory
672 674 after execution. It defaults to removal (False).
673 675
674 676 debug mode will make the test execute verbosely, with unfiltered
675 677 output.
676 678
677 679 timeout controls the maximum run time of the test. It is ignored when
678 680 debug is True. See slowtimeout for tests with #require slow.
679 681
680 682 slowtimeout overrides timeout if the test has #require slow.
681 683
682 684 startport controls the starting port number to use for this test. Each
683 685 test will reserve 3 port numbers for execution. It is the caller's
684 686 responsibility to allocate a non-overlapping port range to Test
685 687 instances.
686 688
687 689 extraconfigopts is an iterable of extra hgrc config options. Values
688 690 must have the form "key=value" (something understood by hgrc). Values
689 691 of the form "foo.key=value" will result in "[foo] key=value".
690 692
691 693 py3kwarnings enables Py3k warnings.
692 694
693 695 shell is the shell to execute tests in.
694 696 """
695 697 self.path = path
696 698 self.bname = os.path.basename(path)
697 699 self.name = _strpath(self.bname)
698 700 self._testdir = os.path.dirname(path)
699 701 self._outputdir = outputdir
700 702 self._tmpname = os.path.basename(path)
701 703 self.errpath = os.path.join(self._outputdir, b'%s.err' % self.bname)
702 704
703 705 self._threadtmp = tmpdir
704 706 self._keeptmpdir = keeptmpdir
705 707 self._debug = debug
706 708 self._timeout = timeout
707 709 self._slowtimeout = slowtimeout
708 710 self._startport = startport
709 711 self._extraconfigopts = extraconfigopts or []
710 712 self._py3kwarnings = py3kwarnings
711 713 self._shell = _bytespath(shell)
712 714 self._hgcommand = hgcommand or b'hg'
713 715 self._usechg = usechg
714 716 self._useipv6 = useipv6
715 717
716 718 self._aborted = False
717 719 self._daemonpids = []
718 720 self._finished = None
719 721 self._ret = None
720 722 self._out = None
721 723 self._skipped = None
722 724 self._testtmp = None
723 725 self._chgsockdir = None
724 726
725 727 self._refout = self.readrefout()
726 728
727 729 def readrefout(self):
728 730 """read reference output"""
729 731 # If we're not in --debug mode and reference output file exists,
730 732 # check test output against it.
731 733 if self._debug:
732 734 return None # to match "out is None"
733 735 elif os.path.exists(self.refpath):
734 736 with open(self.refpath, 'rb') as f:
735 737 return f.read().splitlines(True)
736 738 else:
737 739 return []
738 740
739 741 # needed to get base class __repr__ running
740 742 @property
741 743 def _testMethodName(self):
742 744 return self.name
743 745
744 746 def __str__(self):
745 747 return self.name
746 748
747 749 def shortDescription(self):
748 750 return self.name
749 751
750 752 def setUp(self):
751 753 """Tasks to perform before run()."""
752 754 self._finished = False
753 755 self._ret = None
754 756 self._out = None
755 757 self._skipped = None
756 758
757 759 try:
758 760 os.mkdir(self._threadtmp)
759 761 except OSError as e:
760 762 if e.errno != errno.EEXIST:
761 763 raise
762 764
763 765 name = self._tmpname
764 766 self._testtmp = os.path.join(self._threadtmp, name)
765 767 os.mkdir(self._testtmp)
766 768
767 769 # Remove any previous output files.
768 770 if os.path.exists(self.errpath):
769 771 try:
770 772 os.remove(self.errpath)
771 773 except OSError as e:
772 774 # We might have raced another test to clean up a .err
773 775 # file, so ignore ENOENT when removing a previous .err
774 776 # file.
775 777 if e.errno != errno.ENOENT:
776 778 raise
777 779
778 780 if self._usechg:
779 781 self._chgsockdir = os.path.join(self._threadtmp,
780 782 b'%s.chgsock' % name)
781 783 os.mkdir(self._chgsockdir)
782 784
783 785 def run(self, result):
784 786 """Run this test and report results against a TestResult instance."""
785 787 # This function is extremely similar to unittest.TestCase.run(). Once
786 788 # we require Python 2.7 (or at least its version of unittest), this
787 789 # function can largely go away.
788 790 self._result = result
789 791 result.startTest(self)
790 792 try:
791 793 try:
792 794 self.setUp()
793 795 except (KeyboardInterrupt, SystemExit):
794 796 self._aborted = True
795 797 raise
796 798 except Exception:
797 799 result.addError(self, sys.exc_info())
798 800 return
799 801
800 802 success = False
801 803 try:
802 804 self.runTest()
803 805 except KeyboardInterrupt:
804 806 self._aborted = True
805 807 raise
806 808 except unittest.SkipTest as e:
807 809 result.addSkip(self, str(e))
808 810 # The base class will have already counted this as a
809 811 # test we "ran", but we want to exclude skipped tests
810 812 # from those we count towards those run.
811 813 result.testsRun -= 1
812 814 except self.failureException as e:
813 815 # This differs from unittest in that we don't capture
814 816 # the stack trace. This is for historical reasons and
815 817 # this decision could be revisited in the future,
816 818 # especially for PythonTest instances.
817 819 if result.addFailure(self, str(e)):
818 820 success = True
819 821 except Exception:
820 822 result.addError(self, sys.exc_info())
821 823 else:
822 824 success = True
823 825
824 826 try:
825 827 self.tearDown()
826 828 except (KeyboardInterrupt, SystemExit):
827 829 self._aborted = True
828 830 raise
829 831 except Exception:
830 832 result.addError(self, sys.exc_info())
831 833 success = False
832 834
833 835 if success:
834 836 result.addSuccess(self)
835 837 finally:
836 838 result.stopTest(self, interrupted=self._aborted)
837 839
838 840 def runTest(self):
839 841 """Run this test instance.
840 842
841 843 This will return a tuple describing the result of the test.
842 844 """
843 845 env = self._getenv()
844 846 self._genrestoreenv(env)
845 847 self._daemonpids.append(env['DAEMON_PIDS'])
846 848 self._createhgrc(env['HGRCPATH'])
847 849
848 850 vlog('# Test', self.name)
849 851
850 852 ret, out = self._run(env)
851 853 self._finished = True
852 854 self._ret = ret
853 855 self._out = out
854 856
855 857 def describe(ret):
856 858 if ret < 0:
857 859 return 'killed by signal: %d' % -ret
858 860 return 'returned error code %d' % ret
859 861
860 862 self._skipped = False
861 863
862 864 if ret == self.SKIPPED_STATUS:
863 865 if out is None: # Debug mode, nothing to parse.
864 866 missing = ['unknown']
865 867 failed = None
866 868 else:
867 869 missing, failed = TTest.parsehghaveoutput(out)
868 870
869 871 if not missing:
870 872 missing = ['skipped']
871 873
872 874 if failed:
873 875 self.fail('hg have failed checking for %s' % failed[-1])
874 876 else:
875 877 self._skipped = True
876 878 raise unittest.SkipTest(missing[-1])
877 879 elif ret == 'timeout':
878 880 self.fail('timed out')
879 881 elif ret is False:
880 882 self.fail('no result code from test')
881 883 elif out != self._refout:
882 884 # Diff generation may rely on written .err file.
883 885 if (ret != 0 or out != self._refout) and not self._skipped \
884 886 and not self._debug:
885 887 f = open(self.errpath, 'wb')
886 888 for line in out:
887 889 f.write(line)
888 890 f.close()
889 891
890 892 # The result object handles diff calculation for us.
891 893 if self._result.addOutputMismatch(self, ret, out, self._refout):
892 894 # change was accepted, skip failing
893 895 return
894 896
895 897 if ret:
896 898 msg = 'output changed and ' + describe(ret)
897 899 else:
898 900 msg = 'output changed'
899 901
900 902 self.fail(msg)
901 903 elif ret:
902 904 self.fail(describe(ret))
903 905
904 906 def tearDown(self):
905 907 """Tasks to perform after run()."""
906 908 for entry in self._daemonpids:
907 909 killdaemons(entry)
908 910 self._daemonpids = []
909 911
910 912 if self._keeptmpdir:
911 913 log('\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' %
912 914 (self._testtmp.decode('utf-8'),
913 915 self._threadtmp.decode('utf-8')))
914 916 else:
915 917 shutil.rmtree(self._testtmp, True)
916 918 shutil.rmtree(self._threadtmp, True)
917 919
918 920 if self._usechg:
919 921 # chgservers will stop automatically after they find the socket
920 922 # files are deleted
921 923 shutil.rmtree(self._chgsockdir, True)
922 924
923 925 if (self._ret != 0 or self._out != self._refout) and not self._skipped \
924 926 and not self._debug and self._out:
925 927 f = open(self.errpath, 'wb')
926 928 for line in self._out:
927 929 f.write(line)
928 930 f.close()
929 931
930 932 vlog("# Ret was:", self._ret, '(%s)' % self.name)
931 933
932 934 def _run(self, env):
933 935 # This should be implemented in child classes to run tests.
934 936 raise unittest.SkipTest('unknown test type')
935 937
936 938 def abort(self):
937 939 """Terminate execution of this test."""
938 940 self._aborted = True
939 941
940 942 def _portmap(self, i):
941 943 offset = b'' if i == 0 else b'%d' % i
942 944 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
943 945
944 946 def _getreplacements(self):
945 947 """Obtain a mapping of text replacements to apply to test output.
946 948
947 949 Test output needs to be normalized so it can be compared to expected
948 950 output. This function defines how some of that normalization will
949 951 occur.
950 952 """
951 953 r = [
952 954 # This list should be parallel to defineport in _getenv
953 955 self._portmap(0),
954 956 self._portmap(1),
955 957 self._portmap(2),
956 958 (br'(?m)^(saved backup bundle to .*\.hg)( \(glob\))?$',
957 959 br'\1 (glob)'),
958 960 (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
959 961 (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
960 962 ]
961 963 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
962 964
963 965 return r
964 966
965 967 def _escapepath(self, p):
966 968 if os.name == 'nt':
967 969 return (
968 970 (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or
969 971 c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c
970 972 for c in p))
971 973 )
972 974 else:
973 975 return re.escape(p)
974 976
975 977 def _localip(self):
976 978 if self._useipv6:
977 979 return b'::1'
978 980 else:
979 981 return b'127.0.0.1'
980 982
981 983 def _genrestoreenv(self, testenv):
982 984 """Generate a script that can be used by tests to restore the original
983 985 environment."""
984 986 # Put the restoreenv script inside self._threadtmp
985 987 scriptpath = os.path.join(self._threadtmp, b'restoreenv.sh')
986 988 testenv['HGTEST_RESTOREENV'] = scriptpath
987 989
988 990 # Only restore environment variable names that the shell allows
989 991 # us to export.
990 992 name_regex = re.compile('^[a-zA-Z][a-zA-Z0-9_]*$')
991 993
992 994 # Do not restore these variables; otherwise tests would fail.
993 995 reqnames = {'PYTHON', 'TESTDIR', 'TESTTMP'}
994 996
995 997 with open(scriptpath, 'w') as envf:
996 998 for name, value in origenviron.items():
997 999 if not name_regex.match(name):
998 1000 # Skip environment variables with unusual names not
999 1001 # allowed by most shells.
1000 1002 continue
1001 1003 if name in reqnames:
1002 1004 continue
1003 1005 envf.write('%s=%s\n' % (name, shellquote(value)))
1004 1006
1005 1007 for name in testenv:
1006 1008 if name in origenviron or name in reqnames:
1007 1009 continue
1008 1010 envf.write('unset %s\n' % (name,))
1009 1011
1010 1012 def _getenv(self):
1011 1013 """Obtain environment variables to use during test execution."""
1012 1014 def defineport(i):
1013 1015 offset = '' if i == 0 else '%s' % i
1014 1016 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
1015 1017 env = os.environ.copy()
1016 1018 env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase')
1017 1019 env['HGEMITWARNINGS'] = '1'
1018 1020 env['TESTTMP'] = self._testtmp
1019 1021 env['HOME'] = self._testtmp
1020 1022 # This number should match portneeded in _getport
1021 1023 for port in xrange(3):
1022 1024 # This list should be parallel to _portmap in _getreplacements
1023 1025 defineport(port)
1024 1026 env["HGRCPATH"] = os.path.join(self._threadtmp, b'.hgrc')
1025 1027 env["DAEMON_PIDS"] = os.path.join(self._threadtmp, b'daemon.pids')
1026 1028 env["HGEDITOR"] = ('"' + sys.executable + '"'
1027 1029 + ' -c "import sys; sys.exit(0)"')
1028 1030 env["HGMERGE"] = "internal:merge"
1029 1031 env["HGUSER"] = "test"
1030 1032 env["HGENCODING"] = "ascii"
1031 1033 env["HGENCODINGMODE"] = "strict"
1032 1034 env['HGIPV6'] = str(int(self._useipv6))
1033 1035
1034 1036 # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
1035 1037 # IP addresses.
1036 1038 env['LOCALIP'] = self._localip()
1037 1039
1038 1040 # Reset some environment variables to well-known values so that
1039 1041 # the tests produce repeatable output.
1040 1042 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
1041 1043 env['TZ'] = 'GMT'
1042 1044 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
1043 1045 env['COLUMNS'] = '80'
1044 1046 env['TERM'] = 'xterm'
1045 1047
1046 1048 for k in ('HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy ' +
1047 1049 'HGPLAIN HGPLAINEXCEPT EDITOR VISUAL PAGER ' +
1048 1050 'NO_PROXY CHGDEBUG').split():
1049 1051 if k in env:
1050 1052 del env[k]
1051 1053
1052 1054 # unset env related to hooks
1053 1055 for k in env.keys():
1054 1056 if k.startswith('HG_'):
1055 1057 del env[k]
1056 1058
1057 1059 if self._usechg:
1058 1060 env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server')
1059 1061
1060 1062 return env
1061 1063
1062 1064 def _createhgrc(self, path):
1063 1065 """Create an hgrc file for this test."""
1064 1066 hgrc = open(path, 'wb')
1065 1067 hgrc.write(b'[ui]\n')
1066 1068 hgrc.write(b'slash = True\n')
1067 1069 hgrc.write(b'interactive = False\n')
1068 1070 hgrc.write(b'mergemarkers = detailed\n')
1069 1071 hgrc.write(b'promptecho = True\n')
1070 1072 hgrc.write(b'[defaults]\n')
1071 1073 hgrc.write(b'[devel]\n')
1072 1074 hgrc.write(b'all-warnings = true\n')
1073 1075 hgrc.write(b'default-date = 0 0\n')
1074 1076 hgrc.write(b'[largefiles]\n')
1075 1077 hgrc.write(b'usercache = %s\n' %
1076 1078 (os.path.join(self._testtmp, b'.cache/largefiles')))
1077 1079 hgrc.write(b'[web]\n')
1078 1080 hgrc.write(b'address = localhost\n')
1079 1081 hgrc.write(b'ipv6 = %s\n' % str(self._useipv6).encode('ascii'))
1080 1082
1081 1083 for opt in self._extraconfigopts:
1082 1084 section, key = opt.split('.', 1)
1083 1085 assert '=' in key, ('extra config opt %s must '
1084 1086 'have an = for assignment' % opt)
1085 1087 hgrc.write(b'[%s]\n%s\n' % (section, key))
1086 1088 hgrc.close()
1087 1089
1088 1090 def fail(self, msg):
1089 1091 # unittest differentiates between errored and failed.
1090 1092 # Failed is denoted by AssertionError (by default at least).
1091 1093 raise AssertionError(msg)
1092 1094
1093 1095 def _runcommand(self, cmd, env, normalizenewlines=False):
1094 1096 """Run command in a sub-process, capturing the output (stdout and
1095 1097 stderr).
1096 1098
1097 1099 Return a tuple (exitcode, output). output is None in debug mode.
1098 1100 """
1099 1101 if self._debug:
1100 1102 proc = subprocess.Popen(cmd, shell=True, cwd=self._testtmp,
1101 1103 env=env)
1102 1104 ret = proc.wait()
1103 1105 return (ret, None)
1104 1106
1105 1107 proc = Popen4(cmd, self._testtmp, self._timeout, env)
1106 1108 def cleanup():
1107 1109 terminate(proc)
1108 1110 ret = proc.wait()
1109 1111 if ret == 0:
1110 1112 ret = signal.SIGTERM << 8
1111 1113 killdaemons(env['DAEMON_PIDS'])
1112 1114 return ret
1113 1115
1114 1116 output = ''
1115 1117 proc.tochild.close()
1116 1118
1117 1119 try:
1118 1120 output = proc.fromchild.read()
1119 1121 except KeyboardInterrupt:
1120 1122 vlog('# Handling keyboard interrupt')
1121 1123 cleanup()
1122 1124 raise
1123 1125
1124 1126 ret = proc.wait()
1125 1127 if wifexited(ret):
1126 1128 ret = os.WEXITSTATUS(ret)
1127 1129
1128 1130 if proc.timeout:
1129 1131 ret = 'timeout'
1130 1132
1131 1133 if ret:
1132 1134 killdaemons(env['DAEMON_PIDS'])
1133 1135
1134 1136 for s, r in self._getreplacements():
1135 1137 output = re.sub(s, r, output)
1136 1138
1137 1139 if normalizenewlines:
1138 1140 output = output.replace('\r\n', '\n')
1139 1141
1140 1142 return ret, output.splitlines(True)
1141 1143
1142 1144 class PythonTest(Test):
1143 1145 """A Python-based test."""
1144 1146
1145 1147 @property
1146 1148 def refpath(self):
1147 1149 return os.path.join(self._testdir, b'%s.out' % self.bname)
1148 1150
1149 1151 def _run(self, env):
1150 1152 py3kswitch = self._py3kwarnings and b' -3' or b''
1151 1153 cmd = b'%s%s "%s"' % (PYTHON, py3kswitch, self.path)
1152 1154 vlog("# Running", cmd)
1153 1155 normalizenewlines = os.name == 'nt'
1154 1156 result = self._runcommand(cmd, env,
1155 1157 normalizenewlines=normalizenewlines)
1156 1158 if self._aborted:
1157 1159 raise KeyboardInterrupt()
1158 1160
1159 1161 return result
1160 1162
1161 1163 # Some glob patterns apply only in some circumstances, so the script
1162 1164 # might want to remove (glob) annotations that otherwise should be
1163 1165 # retained.
1164 1166 checkcodeglobpats = [
1165 1167 # On Windows it looks like \ doesn't require a (glob), but we know
1166 1168 # better.
1167 1169 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
1168 1170 re.compile(br'^moving \S+/.*[^)]$'),
1169 1171 re.compile(br'^pulling from \$TESTTMP/.*[^)]$'),
1170 1172 # Not all platforms have 127.0.0.1 as loopback (though most do),
1171 1173 # so we always glob that too.
1172 1174 re.compile(br'.*\$LOCALIP.*$'),
1173 1175 ]
1174 1176
1175 1177 bchr = chr
1176 1178 if PYTHON3:
1177 1179 bchr = lambda x: bytes([x])
1178 1180
1179 1181 class TTest(Test):
1180 1182 """A "t test" is a test backed by a .t file."""
1181 1183
1182 1184 SKIPPED_PREFIX = b'skipped: '
1183 1185 FAILED_PREFIX = b'hghave check failed: '
1184 1186 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
1185 1187
1186 1188 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
1187 1189 ESCAPEMAP = dict((bchr(i), br'\x%02x' % i) for i in range(256))
1188 1190 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
1189 1191
1190 1192 def __init__(self, path, *args, **kwds):
1191 1193 # accept an extra "case" parameter
1192 1194 case = None
1193 1195 if 'case' in kwds:
1194 1196 case = kwds.pop('case')
1195 1197 self._case = case
1196 1198 self._allcases = parsettestcases(path)
1197 1199 super(TTest, self).__init__(path, *args, **kwds)
1198 1200 if case:
1199 1201 self.name = '%s (case %s)' % (self.name, _strpath(case))
1200 1202 self.errpath = b'%s.%s.err' % (self.errpath[:-4], case)
1201 1203 self._tmpname += b'-%s' % case
1202 1204
1203 1205 @property
1204 1206 def refpath(self):
1205 1207 return os.path.join(self._testdir, self.bname)
1206 1208
1207 1209 def _run(self, env):
1208 1210 f = open(self.path, 'rb')
1209 1211 lines = f.readlines()
1210 1212 f.close()
1211 1213
1212 1214 # .t file is both reference output and the test input, keep reference
1213 1215 # output updated with the the test input. This avoids some race
1214 1216 # conditions where the reference output does not match the actual test.
1215 1217 if self._refout is not None:
1216 1218 self._refout = lines
1217 1219
1218 1220 salt, script, after, expected = self._parsetest(lines)
1219 1221
1220 1222 # Write out the generated script.
1221 1223 fname = b'%s.sh' % self._testtmp
1222 1224 f = open(fname, 'wb')
1223 1225 for l in script:
1224 1226 f.write(l)
1225 1227 f.close()
1226 1228
1227 1229 cmd = b'%s "%s"' % (self._shell, fname)
1228 1230 vlog("# Running", cmd)
1229 1231
1230 1232 exitcode, output = self._runcommand(cmd, env)
1231 1233
1232 1234 if self._aborted:
1233 1235 raise KeyboardInterrupt()
1234 1236
1235 1237 # Do not merge output if skipped. Return hghave message instead.
1236 1238 # Similarly, with --debug, output is None.
1237 1239 if exitcode == self.SKIPPED_STATUS or output is None:
1238 1240 return exitcode, output
1239 1241
1240 1242 return self._processoutput(exitcode, output, salt, after, expected)
1241 1243
1242 1244 def _hghave(self, reqs):
1243 1245 # TODO do something smarter when all other uses of hghave are gone.
1244 1246 runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__)))
1245 1247 tdir = runtestdir.replace(b'\\', b'/')
1246 1248 proc = Popen4(b'%s -c "%s/hghave %s"' %
1247 1249 (self._shell, tdir, b' '.join(reqs)),
1248 1250 self._testtmp, 0, self._getenv())
1249 1251 stdout, stderr = proc.communicate()
1250 1252 ret = proc.wait()
1251 1253 if wifexited(ret):
1252 1254 ret = os.WEXITSTATUS(ret)
1253 1255 if ret == 2:
1254 1256 print(stdout.decode('utf-8'))
1255 1257 sys.exit(1)
1256 1258
1257 1259 if ret != 0:
1258 1260 return False, stdout
1259 1261
1260 1262 if 'slow' in reqs:
1261 1263 self._timeout = self._slowtimeout
1262 1264 return True, None
1263 1265
1264 1266 def _iftest(self, args):
1265 1267 # implements "#if"
1266 1268 reqs = []
1267 1269 for arg in args:
1268 1270 if arg.startswith(b'no-') and arg[3:] in self._allcases:
1269 1271 if arg[3:] == self._case:
1270 1272 return False
1271 1273 elif arg in self._allcases:
1272 1274 if arg != self._case:
1273 1275 return False
1274 1276 else:
1275 1277 reqs.append(arg)
1276 1278 return self._hghave(reqs)[0]
1277 1279
1278 1280 def _parsetest(self, lines):
1279 1281 # We generate a shell script which outputs unique markers to line
1280 1282 # up script results with our source. These markers include input
1281 1283 # line number and the last return code.
1282 1284 salt = b"SALT%d" % time.time()
1283 1285 def addsalt(line, inpython):
1284 1286 if inpython:
1285 1287 script.append(b'%s %d 0\n' % (salt, line))
1286 1288 else:
1287 1289 script.append(b'echo %s %d $?\n' % (salt, line))
1288 1290
1289 1291 script = []
1290 1292
1291 1293 # After we run the shell script, we re-unify the script output
1292 1294 # with non-active parts of the source, with synchronization by our
1293 1295 # SALT line number markers. The after table contains the non-active
1294 1296 # components, ordered by line number.
1295 1297 after = {}
1296 1298
1297 1299 # Expected shell script output.
1298 1300 expected = {}
1299 1301
1300 1302 pos = prepos = -1
1301 1303
1302 1304 # True or False when in a true or false conditional section
1303 1305 skipping = None
1304 1306
1305 1307 # We keep track of whether or not we're in a Python block so we
1306 1308 # can generate the surrounding doctest magic.
1307 1309 inpython = False
1308 1310
1309 1311 if self._debug:
1310 1312 script.append(b'set -x\n')
1311 1313 if self._hgcommand != b'hg':
1312 1314 script.append(b'alias hg="%s"\n' % self._hgcommand)
1313 1315 if os.getenv('MSYSTEM'):
1314 1316 script.append(b'alias pwd="pwd -W"\n')
1315 1317
1316 1318 n = 0
1317 1319 for n, l in enumerate(lines):
1318 1320 if not l.endswith(b'\n'):
1319 1321 l += b'\n'
1320 1322 if l.startswith(b'#require'):
1321 1323 lsplit = l.split()
1322 1324 if len(lsplit) < 2 or lsplit[0] != b'#require':
1323 1325 after.setdefault(pos, []).append(' !!! invalid #require\n')
1324 1326 haveresult, message = self._hghave(lsplit[1:])
1325 1327 if not haveresult:
1326 1328 script = [b'echo "%s"\nexit 80\n' % message]
1327 1329 break
1328 1330 after.setdefault(pos, []).append(l)
1329 1331 elif l.startswith(b'#if'):
1330 1332 lsplit = l.split()
1331 1333 if len(lsplit) < 2 or lsplit[0] != b'#if':
1332 1334 after.setdefault(pos, []).append(' !!! invalid #if\n')
1333 1335 if skipping is not None:
1334 1336 after.setdefault(pos, []).append(' !!! nested #if\n')
1335 1337 skipping = not self._iftest(lsplit[1:])
1336 1338 after.setdefault(pos, []).append(l)
1337 1339 elif l.startswith(b'#else'):
1338 1340 if skipping is None:
1339 1341 after.setdefault(pos, []).append(' !!! missing #if\n')
1340 1342 skipping = not skipping
1341 1343 after.setdefault(pos, []).append(l)
1342 1344 elif l.startswith(b'#endif'):
1343 1345 if skipping is None:
1344 1346 after.setdefault(pos, []).append(' !!! missing #if\n')
1345 1347 skipping = None
1346 1348 after.setdefault(pos, []).append(l)
1347 1349 elif skipping:
1348 1350 after.setdefault(pos, []).append(l)
1349 1351 elif l.startswith(b' >>> '): # python inlines
1350 1352 after.setdefault(pos, []).append(l)
1351 1353 prepos = pos
1352 1354 pos = n
1353 1355 if not inpython:
1354 1356 # We've just entered a Python block. Add the header.
1355 1357 inpython = True
1356 1358 addsalt(prepos, False) # Make sure we report the exit code.
1357 1359 script.append(b'%s -m heredoctest <<EOF\n' % PYTHON)
1358 1360 addsalt(n, True)
1359 1361 script.append(l[2:])
1360 1362 elif l.startswith(b' ... '): # python inlines
1361 1363 after.setdefault(prepos, []).append(l)
1362 1364 script.append(l[2:])
1363 1365 elif l.startswith(b' $ '): # commands
1364 1366 if inpython:
1365 1367 script.append(b'EOF\n')
1366 1368 inpython = False
1367 1369 after.setdefault(pos, []).append(l)
1368 1370 prepos = pos
1369 1371 pos = n
1370 1372 addsalt(n, False)
1371 1373 cmd = l[4:].split()
1372 1374 if len(cmd) == 2 and cmd[0] == b'cd':
1373 1375 l = b' $ cd %s || exit 1\n' % cmd[1]
1374 1376 script.append(l[4:])
1375 1377 elif l.startswith(b' > '): # continuations
1376 1378 after.setdefault(prepos, []).append(l)
1377 1379 script.append(l[4:])
1378 1380 elif l.startswith(b' '): # results
1379 1381 # Queue up a list of expected results.
1380 1382 expected.setdefault(pos, []).append(l[2:])
1381 1383 else:
1382 1384 if inpython:
1383 1385 script.append(b'EOF\n')
1384 1386 inpython = False
1385 1387 # Non-command/result. Queue up for merged output.
1386 1388 after.setdefault(pos, []).append(l)
1387 1389
1388 1390 if inpython:
1389 1391 script.append(b'EOF\n')
1390 1392 if skipping is not None:
1391 1393 after.setdefault(pos, []).append(' !!! missing #endif\n')
1392 1394 addsalt(n + 1, False)
1393 1395
1394 1396 return salt, script, after, expected
1395 1397
1396 1398 def _processoutput(self, exitcode, output, salt, after, expected):
1397 1399 # Merge the script output back into a unified test.
1398 1400 warnonly = 1 # 1: not yet; 2: yes; 3: for sure not
1399 1401 if exitcode != 0:
1400 1402 warnonly = 3
1401 1403
1402 1404 pos = -1
1403 1405 postout = []
1404 1406 for l in output:
1405 1407 lout, lcmd = l, None
1406 1408 if salt in l:
1407 1409 lout, lcmd = l.split(salt, 1)
1408 1410
1409 1411 while lout:
1410 1412 if not lout.endswith(b'\n'):
1411 1413 lout += b' (no-eol)\n'
1412 1414
1413 1415 # Find the expected output at the current position.
1414 1416 els = [None]
1415 1417 if expected.get(pos, None):
1416 1418 els = expected[pos]
1417 1419
1418 1420 i = 0
1419 1421 optional = []
1420 1422 while i < len(els):
1421 1423 el = els[i]
1422 1424
1423 1425 r = self.linematch(el, lout)
1424 1426 if isinstance(r, str):
1425 1427 if r == '+glob':
1426 1428 lout = el[:-1] + ' (glob)\n'
1427 1429 r = '' # Warn only this line.
1428 1430 elif r == '-glob':
1429 1431 lout = ''.join(el.rsplit(' (glob)', 1))
1430 1432 r = '' # Warn only this line.
1431 1433 elif r == "retry":
1432 1434 postout.append(b' ' + el)
1433 1435 els.pop(i)
1434 1436 break
1435 1437 else:
1436 1438 log('\ninfo, unknown linematch result: %r\n' % r)
1437 1439 r = False
1438 1440 if r:
1439 1441 els.pop(i)
1440 1442 break
1441 1443 if el:
1442 1444 if el.endswith(b" (?)\n"):
1443 1445 optional.append(i)
1444 1446 else:
1445 1447 m = optline.match(el)
1446 1448 if m:
1447 1449 conditions = [
1448 1450 c for c in m.group(2).split(b' ')]
1449 1451
1450 1452 if not self._iftest(conditions):
1451 1453 optional.append(i)
1452 1454
1453 1455 i += 1
1454 1456
1455 1457 if r:
1456 1458 if r == "retry":
1457 1459 continue
1458 1460 # clean up any optional leftovers
1459 1461 for i in optional:
1460 1462 postout.append(b' ' + els[i])
1461 1463 for i in reversed(optional):
1462 1464 del els[i]
1463 1465 postout.append(b' ' + el)
1464 1466 else:
1465 1467 if self.NEEDESCAPE(lout):
1466 1468 lout = TTest._stringescape(b'%s (esc)\n' %
1467 1469 lout.rstrip(b'\n'))
1468 1470 postout.append(b' ' + lout) # Let diff deal with it.
1469 1471 if r != '': # If line failed.
1470 1472 warnonly = 3 # for sure not
1471 1473 elif warnonly == 1: # Is "not yet" and line is warn only.
1472 1474 warnonly = 2 # Yes do warn.
1473 1475 break
1474 1476 else:
1475 1477 # clean up any optional leftovers
1476 1478 while expected.get(pos, None):
1477 1479 el = expected[pos].pop(0)
1478 1480 if el:
1479 1481 if not el.endswith(b" (?)\n"):
1480 1482 m = optline.match(el)
1481 1483 if m:
1482 1484 conditions = [c for c in m.group(2).split(b' ')]
1483 1485
1484 1486 if self._iftest(conditions):
1485 1487 # Don't append as optional line
1486 1488 continue
1487 1489 else:
1488 1490 continue
1489 1491 postout.append(b' ' + el)
1490 1492
1491 1493 if lcmd:
1492 1494 # Add on last return code.
1493 1495 ret = int(lcmd.split()[1])
1494 1496 if ret != 0:
1495 1497 postout.append(b' [%d]\n' % ret)
1496 1498 if pos in after:
1497 1499 # Merge in non-active test bits.
1498 1500 postout += after.pop(pos)
1499 1501 pos = int(lcmd.split()[0])
1500 1502
1501 1503 if pos in after:
1502 1504 postout += after.pop(pos)
1503 1505
1504 1506 if warnonly == 2:
1505 1507 exitcode = False # Set exitcode to warned.
1506 1508
1507 1509 return exitcode, postout
1508 1510
1509 1511 @staticmethod
1510 1512 def rematch(el, l):
1511 1513 try:
1512 1514 # use \Z to ensure that the regex matches to the end of the string
1513 1515 if os.name == 'nt':
1514 1516 return re.match(el + br'\r?\n\Z', l)
1515 1517 return re.match(el + br'\n\Z', l)
1516 1518 except re.error:
1517 1519 # el is an invalid regex
1518 1520 return False
1519 1521
1520 1522 @staticmethod
1521 1523 def globmatch(el, l):
1522 1524 # The only supported special characters are * and ? plus / which also
1523 1525 # matches \ on windows. Escaping of these characters is supported.
1524 1526 if el + b'\n' == l:
1525 1527 if os.altsep:
1526 1528 # matching on "/" is not needed for this line
1527 1529 for pat in checkcodeglobpats:
1528 1530 if pat.match(el):
1529 1531 return True
1530 1532 return b'-glob'
1531 1533 return True
1532 1534 el = el.replace(b'$LOCALIP', b'*')
1533 1535 i, n = 0, len(el)
1534 1536 res = b''
1535 1537 while i < n:
1536 1538 c = el[i:i + 1]
1537 1539 i += 1
1538 1540 if c == b'\\' and i < n and el[i:i + 1] in b'*?\\/':
1539 1541 res += el[i - 1:i + 1]
1540 1542 i += 1
1541 1543 elif c == b'*':
1542 1544 res += b'.*'
1543 1545 elif c == b'?':
1544 1546 res += b'.'
1545 1547 elif c == b'/' and os.altsep:
1546 1548 res += b'[/\\\\]'
1547 1549 else:
1548 1550 res += re.escape(c)
1549 1551 return TTest.rematch(res, l)
1550 1552
1551 1553 def linematch(self, el, l):
1552 1554 retry = False
1553 1555 if el == l: # perfect match (fast)
1554 1556 return True
1555 1557 if el:
1556 1558 if el.endswith(b" (?)\n"):
1557 1559 retry = "retry"
1558 1560 el = el[:-5] + b"\n"
1559 1561 else:
1560 1562 m = optline.match(el)
1561 1563 if m:
1562 1564 conditions = [c for c in m.group(2).split(b' ')]
1563 1565
1564 1566 el = m.group(1) + b"\n"
1565 1567 if not self._iftest(conditions):
1566 1568 retry = "retry" # Not required by listed features
1567 1569
1568 1570 if el.endswith(b" (esc)\n"):
1569 1571 if PYTHON3:
1570 1572 el = el[:-7].decode('unicode_escape') + '\n'
1571 1573 el = el.encode('utf-8')
1572 1574 else:
1573 1575 el = el[:-7].decode('string-escape') + '\n'
1574 1576 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
1575 1577 return True
1576 1578 if el.endswith(b" (re)\n"):
1577 1579 return TTest.rematch(el[:-6], l) or retry
1578 1580 if el.endswith(b" (glob)\n"):
1579 1581 # ignore '(glob)' added to l by 'replacements'
1580 1582 if l.endswith(b" (glob)\n"):
1581 1583 l = l[:-8] + b"\n"
1582 1584 return TTest.globmatch(el[:-8], l) or retry
1583 1585 if os.altsep and l.replace(b'\\', b'/') == el:
1584 1586 return b'+glob'
1585 1587 return retry
1586 1588
1587 1589 @staticmethod
1588 1590 def parsehghaveoutput(lines):
1589 1591 '''Parse hghave log lines.
1590 1592
1591 1593 Return tuple of lists (missing, failed):
1592 1594 * the missing/unknown features
1593 1595 * the features for which existence check failed'''
1594 1596 missing = []
1595 1597 failed = []
1596 1598 for line in lines:
1597 1599 if line.startswith(TTest.SKIPPED_PREFIX):
1598 1600 line = line.splitlines()[0]
1599 1601 missing.append(line[len(TTest.SKIPPED_PREFIX):].decode('utf-8'))
1600 1602 elif line.startswith(TTest.FAILED_PREFIX):
1601 1603 line = line.splitlines()[0]
1602 1604 failed.append(line[len(TTest.FAILED_PREFIX):].decode('utf-8'))
1603 1605
1604 1606 return missing, failed
1605 1607
1606 1608 @staticmethod
1607 1609 def _escapef(m):
1608 1610 return TTest.ESCAPEMAP[m.group(0)]
1609 1611
1610 1612 @staticmethod
1611 1613 def _stringescape(s):
1612 1614 return TTest.ESCAPESUB(TTest._escapef, s)
1613 1615
1614 1616 iolock = threading.RLock()
1615 1617
1616 1618 class TestResult(unittest._TextTestResult):
1617 1619 """Holds results when executing via unittest."""
1618 1620 # Don't worry too much about accessing the non-public _TextTestResult.
1619 1621 # It is relatively common in Python testing tools.
1620 1622 def __init__(self, options, *args, **kwargs):
1621 1623 super(TestResult, self).__init__(*args, **kwargs)
1622 1624
1623 1625 self._options = options
1624 1626
1625 1627 # unittest.TestResult didn't have skipped until 2.7. We need to
1626 1628 # polyfill it.
1627 1629 self.skipped = []
1628 1630
1629 1631 # We have a custom "ignored" result that isn't present in any Python
1630 1632 # unittest implementation. It is very similar to skipped. It may make
1631 1633 # sense to map it into skip some day.
1632 1634 self.ignored = []
1633 1635
1634 1636 self.times = []
1635 1637 self._firststarttime = None
1636 1638 # Data stored for the benefit of generating xunit reports.
1637 1639 self.successes = []
1638 1640 self.faildata = {}
1639 1641
1640 1642 if options.color == 'auto':
1641 1643 self.color = pygmentspresent and self.stream.isatty()
1642 1644 elif options.color == 'never':
1643 1645 self.color = False
1644 1646 else: # 'always', for testing purposes
1645 1647 self.color = pygmentspresent
1646 1648
1647 1649 def addFailure(self, test, reason):
1648 1650 self.failures.append((test, reason))
1649 1651
1650 1652 if self._options.first:
1651 1653 self.stop()
1652 1654 else:
1653 1655 with iolock:
1654 1656 if reason == "timed out":
1655 1657 self.stream.write('t')
1656 1658 else:
1657 1659 if not self._options.nodiff:
1658 1660 formatted = '\nERROR: %s output changed\n' % test
1659 1661 self.stream.write(highlightmsg(formatted, self.color))
1660 1662 self.stream.write('!')
1661 1663
1662 1664 self.stream.flush()
1663 1665
1664 1666 def addSuccess(self, test):
1665 1667 with iolock:
1666 1668 super(TestResult, self).addSuccess(test)
1667 1669 self.successes.append(test)
1668 1670
1669 1671 def addError(self, test, err):
1670 1672 super(TestResult, self).addError(test, err)
1671 1673 if self._options.first:
1672 1674 self.stop()
1673 1675
1674 1676 # Polyfill.
1675 1677 def addSkip(self, test, reason):
1676 1678 self.skipped.append((test, reason))
1677 1679 with iolock:
1678 1680 if self.showAll:
1679 1681 self.stream.writeln('skipped %s' % reason)
1680 1682 else:
1681 1683 self.stream.write('s')
1682 1684 self.stream.flush()
1683 1685
1684 1686 def addIgnore(self, test, reason):
1685 1687 self.ignored.append((test, reason))
1686 1688 with iolock:
1687 1689 if self.showAll:
1688 1690 self.stream.writeln('ignored %s' % reason)
1689 1691 else:
1690 1692 if reason not in ('not retesting', "doesn't match keyword"):
1691 1693 self.stream.write('i')
1692 1694 else:
1693 1695 self.testsRun += 1
1694 1696 self.stream.flush()
1695 1697
1696 1698 def addOutputMismatch(self, test, ret, got, expected):
1697 1699 """Record a mismatch in test output for a particular test."""
1698 1700 if self.shouldStop:
1699 1701 # don't print, some other test case already failed and
1700 1702 # printed, we're just stale and probably failed due to our
1701 1703 # temp dir getting cleaned up.
1702 1704 return
1703 1705
1704 1706 accepted = False
1705 1707 lines = []
1706 1708
1707 1709 with iolock:
1708 1710 if self._options.nodiff:
1709 1711 pass
1710 1712 elif self._options.view:
1711 1713 v = self._options.view
1712 1714 if PYTHON3:
1713 1715 v = _bytespath(v)
1714 1716 os.system(b"%s %s %s" %
1715 1717 (v, test.refpath, test.errpath))
1716 1718 else:
1717 1719 servefail, lines = getdiff(expected, got,
1718 1720 test.refpath, test.errpath)
1719 1721 if servefail:
1720 1722 raise test.failureException(
1721 1723 'server failed to start (HGPORT=%s)' % test._startport)
1722 1724 else:
1723 1725 self.stream.write('\n')
1724 1726 for line in lines:
1725 1727 line = highlightdiff(line, self.color)
1726 1728 if PYTHON3:
1727 1729 self.stream.flush()
1728 1730 self.stream.buffer.write(line)
1729 1731 self.stream.buffer.flush()
1730 1732 else:
1731 1733 self.stream.write(line)
1732 1734 self.stream.flush()
1733 1735
1734 1736 # handle interactive prompt without releasing iolock
1735 1737 if self._options.interactive:
1736 1738 if test.readrefout() != expected:
1737 1739 self.stream.write(
1738 1740 'Reference output has changed (run again to prompt '
1739 1741 'changes)')
1740 1742 else:
1741 1743 self.stream.write('Accept this change? [n] ')
1742 1744 answer = sys.stdin.readline().strip()
1743 1745 if answer.lower() in ('y', 'yes'):
1744 1746 if test.path.endswith(b'.t'):
1745 1747 rename(test.errpath, test.path)
1746 1748 else:
1747 1749 rename(test.errpath, '%s.out' % test.path)
1748 1750 accepted = True
1749 1751 if not accepted:
1750 1752 self.faildata[test.name] = b''.join(lines)
1751 1753
1752 1754 return accepted
1753 1755
1754 1756 def startTest(self, test):
1755 1757 super(TestResult, self).startTest(test)
1756 1758
1757 1759 # os.times module computes the user time and system time spent by
1758 1760 # child's processes along with real elapsed time taken by a process.
1759 1761 # This module has one limitation. It can only work for Linux user
1760 1762 # and not for Windows.
1761 1763 test.started = os.times()
1762 1764 if self._firststarttime is None: # thread racy but irrelevant
1763 1765 self._firststarttime = test.started[4]
1764 1766
1765 1767 def stopTest(self, test, interrupted=False):
1766 1768 super(TestResult, self).stopTest(test)
1767 1769
1768 1770 test.stopped = os.times()
1769 1771
1770 1772 starttime = test.started
1771 1773 endtime = test.stopped
1772 1774 origin = self._firststarttime
1773 1775 self.times.append((test.name,
1774 1776 endtime[2] - starttime[2], # user space CPU time
1775 1777 endtime[3] - starttime[3], # sys space CPU time
1776 1778 endtime[4] - starttime[4], # real time
1777 1779 starttime[4] - origin, # start date in run context
1778 1780 endtime[4] - origin, # end date in run context
1779 1781 ))
1780 1782
1781 1783 if interrupted:
1782 1784 with iolock:
1783 1785 self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
1784 1786 test.name, self.times[-1][3]))
1785 1787
1786 1788 class TestSuite(unittest.TestSuite):
1787 1789 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
1788 1790
1789 1791 def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
1790 1792 retest=False, keywords=None, loop=False, runs_per_test=1,
1791 1793 loadtest=None, showchannels=False,
1792 1794 *args, **kwargs):
1793 1795 """Create a new instance that can run tests with a configuration.
1794 1796
1795 1797 testdir specifies the directory where tests are executed from. This
1796 1798 is typically the ``tests`` directory from Mercurial's source
1797 1799 repository.
1798 1800
1799 1801 jobs specifies the number of jobs to run concurrently. Each test
1800 1802 executes on its own thread. Tests actually spawn new processes, so
1801 1803 state mutation should not be an issue.
1802 1804
1803 1805 If there is only one job, it will use the main thread.
1804 1806
1805 1807 whitelist and blacklist denote tests that have been whitelisted and
1806 1808 blacklisted, respectively. These arguments don't belong in TestSuite.
1807 1809 Instead, whitelist and blacklist should be handled by the thing that
1808 1810 populates the TestSuite with tests. They are present to preserve
1809 1811 backwards compatible behavior which reports skipped tests as part
1810 1812 of the results.
1811 1813
1812 1814 retest denotes whether to retest failed tests. This arguably belongs
1813 1815 outside of TestSuite.
1814 1816
1815 1817 keywords denotes key words that will be used to filter which tests
1816 1818 to execute. This arguably belongs outside of TestSuite.
1817 1819
1818 1820 loop denotes whether to loop over tests forever.
1819 1821 """
1820 1822 super(TestSuite, self).__init__(*args, **kwargs)
1821 1823
1822 1824 self._jobs = jobs
1823 1825 self._whitelist = whitelist
1824 1826 self._blacklist = blacklist
1825 1827 self._retest = retest
1826 1828 self._keywords = keywords
1827 1829 self._loop = loop
1828 1830 self._runs_per_test = runs_per_test
1829 1831 self._loadtest = loadtest
1830 1832 self._showchannels = showchannels
1831 1833
1832 1834 def run(self, result):
1833 1835 # We have a number of filters that need to be applied. We do this
1834 1836 # here instead of inside Test because it makes the running logic for
1835 1837 # Test simpler.
1836 1838 tests = []
1837 1839 num_tests = [0]
1838 1840 for test in self._tests:
1839 1841 def get():
1840 1842 num_tests[0] += 1
1841 1843 if getattr(test, 'should_reload', False):
1842 1844 return self._loadtest(test, num_tests[0])
1843 1845 return test
1844 1846 if not os.path.exists(test.path):
1845 1847 result.addSkip(test, "Doesn't exist")
1846 1848 continue
1847 1849
1848 1850 if not (self._whitelist and test.name in self._whitelist):
1849 1851 if self._blacklist and test.bname in self._blacklist:
1850 1852 result.addSkip(test, 'blacklisted')
1851 1853 continue
1852 1854
1853 1855 if self._retest and not os.path.exists(test.errpath):
1854 1856 result.addIgnore(test, 'not retesting')
1855 1857 continue
1856 1858
1857 1859 if self._keywords:
1858 1860 f = open(test.path, 'rb')
1859 1861 t = f.read().lower() + test.bname.lower()
1860 1862 f.close()
1861 1863 ignored = False
1862 1864 for k in self._keywords.lower().split():
1863 1865 if k not in t:
1864 1866 result.addIgnore(test, "doesn't match keyword")
1865 1867 ignored = True
1866 1868 break
1867 1869
1868 1870 if ignored:
1869 1871 continue
1870 1872 for _ in xrange(self._runs_per_test):
1871 1873 tests.append(get())
1872 1874
1873 1875 runtests = list(tests)
1874 1876 done = queue.Queue()
1875 1877 running = 0
1876 1878
1877 1879 channels = [""] * self._jobs
1878 1880
1879 1881 def job(test, result):
1880 1882 for n, v in enumerate(channels):
1881 1883 if not v:
1882 1884 channel = n
1883 1885 break
1884 1886 else:
1885 1887 raise ValueError('Could not find output channel')
1886 1888 channels[channel] = "=" + test.name[5:].split(".")[0]
1887 1889 try:
1888 1890 test(result)
1889 1891 done.put(None)
1890 1892 except KeyboardInterrupt:
1891 1893 pass
1892 1894 except: # re-raises
1893 1895 done.put(('!', test, 'run-test raised an error, see traceback'))
1894 1896 raise
1895 1897 finally:
1896 1898 try:
1897 1899 channels[channel] = ''
1898 1900 except IndexError:
1899 1901 pass
1900 1902
1901 1903 def stat():
1902 1904 count = 0
1903 1905 while channels:
1904 1906 d = '\n%03s ' % count
1905 1907 for n, v in enumerate(channels):
1906 1908 if v:
1907 1909 d += v[0]
1908 1910 channels[n] = v[1:] or '.'
1909 1911 else:
1910 1912 d += ' '
1911 1913 d += ' '
1912 1914 with iolock:
1913 1915 sys.stdout.write(d + ' ')
1914 1916 sys.stdout.flush()
1915 1917 for x in xrange(10):
1916 1918 if channels:
1917 1919 time.sleep(.1)
1918 1920 count += 1
1919 1921
1920 1922 stoppedearly = False
1921 1923
1922 1924 if self._showchannels:
1923 1925 statthread = threading.Thread(target=stat, name="stat")
1924 1926 statthread.start()
1925 1927
1926 1928 try:
1927 1929 while tests or running:
1928 1930 if not done.empty() or running == self._jobs or not tests:
1929 1931 try:
1930 1932 done.get(True, 1)
1931 1933 running -= 1
1932 1934 if result and result.shouldStop:
1933 1935 stoppedearly = True
1934 1936 break
1935 1937 except queue.Empty:
1936 1938 continue
1937 1939 if tests and not running == self._jobs:
1938 1940 test = tests.pop(0)
1939 1941 if self._loop:
1940 1942 if getattr(test, 'should_reload', False):
1941 1943 num_tests[0] += 1
1942 1944 tests.append(
1943 1945 self._loadtest(test, num_tests[0]))
1944 1946 else:
1945 1947 tests.append(test)
1946 1948 if self._jobs == 1:
1947 1949 job(test, result)
1948 1950 else:
1949 1951 t = threading.Thread(target=job, name=test.name,
1950 1952 args=(test, result))
1951 1953 t.start()
1952 1954 running += 1
1953 1955
1954 1956 # If we stop early we still need to wait on started tests to
1955 1957 # finish. Otherwise, there is a race between the test completing
1956 1958 # and the test's cleanup code running. This could result in the
1957 1959 # test reporting incorrect.
1958 1960 if stoppedearly:
1959 1961 while running:
1960 1962 try:
1961 1963 done.get(True, 1)
1962 1964 running -= 1
1963 1965 except queue.Empty:
1964 1966 continue
1965 1967 except KeyboardInterrupt:
1966 1968 for test in runtests:
1967 1969 test.abort()
1968 1970
1969 1971 channels = []
1970 1972
1971 1973 return result
1972 1974
1973 1975 # Save the most recent 5 wall-clock runtimes of each test to a
1974 1976 # human-readable text file named .testtimes. Tests are sorted
1975 1977 # alphabetically, while times for each test are listed from oldest to
1976 1978 # newest.
1977 1979
1978 1980 def loadtimes(outputdir):
1979 1981 times = []
1980 1982 try:
1981 1983 with open(os.path.join(outputdir, b'.testtimes-')) as fp:
1982 1984 for line in fp:
1983 1985 ts = line.split()
1984 1986 times.append((ts[0], [float(t) for t in ts[1:]]))
1985 1987 except IOError as err:
1986 1988 if err.errno != errno.ENOENT:
1987 1989 raise
1988 1990 return times
1989 1991
1990 1992 def savetimes(outputdir, result):
1991 1993 saved = dict(loadtimes(outputdir))
1992 1994 maxruns = 5
1993 1995 skipped = set([str(t[0]) for t in result.skipped])
1994 1996 for tdata in result.times:
1995 1997 test, real = tdata[0], tdata[3]
1996 1998 if test not in skipped:
1997 1999 ts = saved.setdefault(test, [])
1998 2000 ts.append(real)
1999 2001 ts[:] = ts[-maxruns:]
2000 2002
2001 2003 fd, tmpname = tempfile.mkstemp(prefix=b'.testtimes',
2002 2004 dir=outputdir, text=True)
2003 2005 with os.fdopen(fd, 'w') as fp:
2004 2006 for name, ts in sorted(saved.items()):
2005 2007 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
2006 2008 timepath = os.path.join(outputdir, b'.testtimes')
2007 2009 try:
2008 2010 os.unlink(timepath)
2009 2011 except OSError:
2010 2012 pass
2011 2013 try:
2012 2014 os.rename(tmpname, timepath)
2013 2015 except OSError:
2014 2016 pass
2015 2017
2016 2018 class TextTestRunner(unittest.TextTestRunner):
2017 2019 """Custom unittest test runner that uses appropriate settings."""
2018 2020
2019 2021 def __init__(self, runner, *args, **kwargs):
2020 2022 super(TextTestRunner, self).__init__(*args, **kwargs)
2021 2023
2022 2024 self._runner = runner
2023 2025
2024 2026 def listtests(self, test):
2025 2027 result = TestResult(self._runner.options, self.stream,
2026 2028 self.descriptions, 0)
2027 2029 test = sorted(test, key=lambda t: t.name)
2028 2030 for t in test:
2029 2031 print(t.name)
2030 2032 result.addSuccess(t)
2031 2033
2032 2034 if self._runner.options.xunit:
2033 2035 with open(self._runner.options.xunit, "wb") as xuf:
2034 2036 self._writexunit(result, xuf)
2035 2037
2036 2038 if self._runner.options.json:
2037 2039 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2038 2040 with open(jsonpath, 'w') as fp:
2039 2041 self._writejson(result, fp)
2040 2042
2041 2043 return result
2042 2044
2043 2045 def run(self, test):
2044 2046 result = TestResult(self._runner.options, self.stream,
2045 2047 self.descriptions, self.verbosity)
2046 2048
2047 2049 test(result)
2048 2050
2049 2051 failed = len(result.failures)
2050 2052 skipped = len(result.skipped)
2051 2053 ignored = len(result.ignored)
2052 2054
2053 2055 with iolock:
2054 2056 self.stream.writeln('')
2055 2057
2056 2058 if not self._runner.options.noskips:
2057 2059 for test, msg in result.skipped:
2058 2060 formatted = 'Skipped %s: %s\n' % (test.name, msg)
2059 2061 self.stream.write(highlightmsg(formatted, result.color))
2060 2062 for test, msg in result.failures:
2061 2063 formatted = 'Failed %s: %s\n' % (test.name, msg)
2062 2064 self.stream.write(highlightmsg(formatted, result.color))
2063 2065 for test, msg in result.errors:
2064 2066 self.stream.writeln('Errored %s: %s' % (test.name, msg))
2065 2067
2066 2068 if self._runner.options.xunit:
2067 2069 with open(self._runner.options.xunit, "wb") as xuf:
2068 2070 self._writexunit(result, xuf)
2069 2071
2070 2072 if self._runner.options.json:
2071 2073 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2072 2074 with open(jsonpath, 'w') as fp:
2073 2075 self._writejson(result, fp)
2074 2076
2075 2077 self._runner._checkhglib('Tested')
2076 2078
2077 2079 savetimes(self._runner._outputdir, result)
2078 2080
2079 2081 if failed and self._runner.options.known_good_rev:
2080 2082 bisectcmd = ['hg', 'bisect']
2081 2083 bisectrepo = self._runner.options.bisect_repo
2082 2084 if bisectrepo:
2083 2085 bisectcmd.extend(['-R', os.path.abspath(bisectrepo)])
2084 2086 def nooutput(args):
2085 2087 p = subprocess.Popen(args, stderr=subprocess.STDOUT,
2086 2088 stdout=subprocess.PIPE)
2087 2089 p.stdout.read()
2088 2090 p.wait()
2089 2091 for test, msg in result.failures:
2090 2092 nooutput(bisectcmd + ['--reset']),
2091 2093 nooutput(bisectcmd + ['--bad', '.'])
2092 2094 nooutput(bisectcmd + ['--good',
2093 2095 self._runner.options.known_good_rev])
2094 2096 # TODO: we probably need to forward more options
2095 2097 # that alter hg's behavior inside the tests.
2096 2098 opts = ''
2097 2099 withhg = self._runner.options.with_hg
2098 2100 if withhg:
2099 2101 opts += ' --with-hg=%s ' % shellquote(withhg)
2100 2102 rtc = '%s %s %s %s' % (sys.executable, sys.argv[0], opts,
2101 2103 test)
2102 2104 sub = subprocess.Popen(bisectcmd + ['--command', rtc],
2103 2105 stderr=subprocess.STDOUT,
2104 2106 stdout=subprocess.PIPE)
2105 2107 data = sub.stdout.read()
2106 2108 sub.wait()
2107 2109 m = re.search(
2108 2110 (r'\nThe first (?P<goodbad>bad|good) revision '
2109 2111 r'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
2110 2112 r'summary: +(?P<summary>[^\n]+)\n'),
2111 2113 data, (re.MULTILINE | re.DOTALL))
2112 2114 if m is None:
2113 2115 self.stream.writeln(
2114 2116 'Failed to identify failure point for %s' % test)
2115 2117 continue
2116 2118 dat = m.groupdict()
2117 2119 verb = 'broken' if dat['goodbad'] == 'bad' else 'fixed'
2118 2120 self.stream.writeln(
2119 2121 '%s %s by %s (%s)' % (
2120 2122 test, verb, dat['node'], dat['summary']))
2121 2123 self.stream.writeln(
2122 2124 '# Ran %d tests, %d skipped, %d failed.'
2123 2125 % (result.testsRun, skipped + ignored, failed))
2124 2126 if failed:
2125 2127 self.stream.writeln('python hash seed: %s' %
2126 2128 os.environ['PYTHONHASHSEED'])
2127 2129 if self._runner.options.time:
2128 2130 self.printtimes(result.times)
2129 2131 self.stream.flush()
2130 2132
2131 2133 return result
2132 2134
2133 2135 def printtimes(self, times):
2134 2136 # iolock held by run
2135 2137 self.stream.writeln('# Producing time report')
2136 2138 times.sort(key=lambda t: (t[3]))
2137 2139 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
2138 2140 self.stream.writeln('%-7s %-7s %-7s %-7s %-7s %s' %
2139 2141 ('start', 'end', 'cuser', 'csys', 'real', 'Test'))
2140 2142 for tdata in times:
2141 2143 test = tdata[0]
2142 2144 cuser, csys, real, start, end = tdata[1:6]
2143 2145 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
2144 2146
2145 2147 @staticmethod
2146 2148 def _writexunit(result, outf):
2147 2149 # See http://llg.cubic.org/docs/junit/ for a reference.
2148 2150 timesd = dict((t[0], t[3]) for t in result.times)
2149 2151 doc = minidom.Document()
2150 2152 s = doc.createElement('testsuite')
2151 2153 s.setAttribute('name', 'run-tests')
2152 2154 s.setAttribute('tests', str(result.testsRun))
2153 2155 s.setAttribute('errors', "0") # TODO
2154 2156 s.setAttribute('failures', str(len(result.failures)))
2155 2157 s.setAttribute('skipped', str(len(result.skipped) +
2156 2158 len(result.ignored)))
2157 2159 doc.appendChild(s)
2158 2160 for tc in result.successes:
2159 2161 t = doc.createElement('testcase')
2160 2162 t.setAttribute('name', tc.name)
2161 2163 tctime = timesd.get(tc.name)
2162 2164 if tctime is not None:
2163 2165 t.setAttribute('time', '%.3f' % tctime)
2164 2166 s.appendChild(t)
2165 2167 for tc, err in sorted(result.faildata.items()):
2166 2168 t = doc.createElement('testcase')
2167 2169 t.setAttribute('name', tc)
2168 2170 tctime = timesd.get(tc)
2169 2171 if tctime is not None:
2170 2172 t.setAttribute('time', '%.3f' % tctime)
2171 2173 # createCDATASection expects a unicode or it will
2172 2174 # convert using default conversion rules, which will
2173 2175 # fail if string isn't ASCII.
2174 2176 err = cdatasafe(err).decode('utf-8', 'replace')
2175 2177 cd = doc.createCDATASection(err)
2176 2178 # Use 'failure' here instead of 'error' to match errors = 0,
2177 2179 # failures = len(result.failures) in the testsuite element.
2178 2180 failelem = doc.createElement('failure')
2179 2181 failelem.setAttribute('message', 'output changed')
2180 2182 failelem.setAttribute('type', 'output-mismatch')
2181 2183 failelem.appendChild(cd)
2182 2184 t.appendChild(failelem)
2183 2185 s.appendChild(t)
2184 2186 for tc, message in result.skipped:
2185 2187 # According to the schema, 'skipped' has no attributes. So store
2186 2188 # the skip message as a text node instead.
2187 2189 t = doc.createElement('testcase')
2188 2190 t.setAttribute('name', tc.name)
2189 2191 message = cdatasafe(message).decode('utf-8', 'replace')
2190 2192 cd = doc.createCDATASection(message)
2191 2193 skipelem = doc.createElement('skipped')
2192 2194 skipelem.appendChild(cd)
2193 2195 t.appendChild(skipelem)
2194 2196 s.appendChild(t)
2195 2197 outf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
2196 2198
2197 2199 @staticmethod
2198 2200 def _writejson(result, outf):
2199 2201 timesd = {}
2200 2202 for tdata in result.times:
2201 2203 test = tdata[0]
2202 2204 timesd[test] = tdata[1:]
2203 2205
2204 2206 outcome = {}
2205 2207 groups = [('success', ((tc, None)
2206 2208 for tc in result.successes)),
2207 2209 ('failure', result.failures),
2208 2210 ('skip', result.skipped)]
2209 2211 for res, testcases in groups:
2210 2212 for tc, __ in testcases:
2211 2213 if tc.name in timesd:
2212 2214 diff = result.faildata.get(tc.name, b'')
2213 2215 try:
2214 2216 diff = diff.decode('unicode_escape')
2215 2217 except UnicodeDecodeError as e:
2216 2218 diff = '%r decoding diff, sorry' % e
2217 2219 tres = {'result': res,
2218 2220 'time': ('%0.3f' % timesd[tc.name][2]),
2219 2221 'cuser': ('%0.3f' % timesd[tc.name][0]),
2220 2222 'csys': ('%0.3f' % timesd[tc.name][1]),
2221 2223 'start': ('%0.3f' % timesd[tc.name][3]),
2222 2224 'end': ('%0.3f' % timesd[tc.name][4]),
2223 2225 'diff': diff,
2224 2226 }
2225 2227 else:
2226 2228 # blacklisted test
2227 2229 tres = {'result': res}
2228 2230
2229 2231 outcome[tc.name] = tres
2230 2232 jsonout = json.dumps(outcome, sort_keys=True, indent=4,
2231 2233 separators=(',', ': '))
2232 2234 outf.writelines(("testreport =", jsonout))
2233 2235
2234 2236 class TestRunner(object):
2235 2237 """Holds context for executing tests.
2236 2238
2237 2239 Tests rely on a lot of state. This object holds it for them.
2238 2240 """
2239 2241
2240 2242 # Programs required to run tests.
2241 2243 REQUIREDTOOLS = [
2242 2244 b'diff',
2243 2245 b'grep',
2244 2246 b'unzip',
2245 2247 b'gunzip',
2246 2248 b'bunzip2',
2247 2249 b'sed',
2248 2250 ]
2249 2251
2250 2252 # Maps file extensions to test class.
2251 2253 TESTTYPES = [
2252 2254 (b'.py', PythonTest),
2253 2255 (b'.t', TTest),
2254 2256 ]
2255 2257
2256 2258 def __init__(self):
2257 2259 self.options = None
2258 2260 self._hgroot = None
2259 2261 self._testdir = None
2260 2262 self._outputdir = None
2261 2263 self._hgtmp = None
2262 2264 self._installdir = None
2263 2265 self._bindir = None
2264 2266 self._tmpbinddir = None
2265 2267 self._pythondir = None
2266 2268 self._coveragefile = None
2267 2269 self._createdfiles = []
2268 2270 self._hgcommand = None
2269 2271 self._hgpath = None
2270 2272 self._portoffset = 0
2271 2273 self._ports = {}
2272 2274
2273 2275 def run(self, args, parser=None):
2274 2276 """Run the test suite."""
2275 2277 oldmask = os.umask(0o22)
2276 2278 try:
2277 2279 parser = parser or getparser()
2278 2280 options, args = parseargs(args, parser)
2279 2281 # positional arguments are paths to test files to run, so
2280 2282 # we make sure they're all bytestrings
2281 2283 args = [_bytespath(a) for a in args]
2284 if options.test_list is not None:
2285 for listfile in options.test_list:
2286 with open(listfile, 'rb') as f:
2287 args.extend(t for t in f.read().splitlines() if t)
2282 2288 self.options = options
2283 2289
2284 2290 self._checktools()
2285 2291 testdescs = self.findtests(args)
2286 2292 if options.profile_runner:
2287 2293 import statprof
2288 2294 statprof.start()
2289 2295 result = self._run(testdescs)
2290 2296 if options.profile_runner:
2291 2297 statprof.stop()
2292 2298 statprof.display()
2293 2299 return result
2294 2300
2295 2301 finally:
2296 2302 os.umask(oldmask)
2297 2303
2298 2304 def _run(self, testdescs):
2299 2305 if self.options.random:
2300 2306 random.shuffle(testdescs)
2301 2307 else:
2302 2308 # keywords for slow tests
2303 2309 slow = {b'svn': 10,
2304 2310 b'cvs': 10,
2305 2311 b'hghave': 10,
2306 2312 b'largefiles-update': 10,
2307 2313 b'run-tests': 10,
2308 2314 b'corruption': 10,
2309 2315 b'race': 10,
2310 2316 b'i18n': 10,
2311 2317 b'check': 100,
2312 2318 b'gendoc': 100,
2313 2319 b'contrib-perf': 200,
2314 2320 }
2315 2321 perf = {}
2316 2322 def sortkey(f):
2317 2323 # run largest tests first, as they tend to take the longest
2318 2324 f = f['path']
2319 2325 try:
2320 2326 return perf[f]
2321 2327 except KeyError:
2322 2328 try:
2323 2329 val = -os.stat(f).st_size
2324 2330 except OSError as e:
2325 2331 if e.errno != errno.ENOENT:
2326 2332 raise
2327 2333 perf[f] = -1e9 # file does not exist, tell early
2328 2334 return -1e9
2329 2335 for kw, mul in slow.items():
2330 2336 if kw in f:
2331 2337 val *= mul
2332 2338 if f.endswith(b'.py'):
2333 2339 val /= 10.0
2334 2340 perf[f] = val / 1000.0
2335 2341 return perf[f]
2336 2342 testdescs.sort(key=sortkey)
2337 2343
2338 2344 self._testdir = osenvironb[b'TESTDIR'] = getattr(
2339 2345 os, 'getcwdb', os.getcwd)()
2340 2346 if self.options.outputdir:
2341 2347 self._outputdir = canonpath(_bytespath(self.options.outputdir))
2342 2348 else:
2343 2349 self._outputdir = self._testdir
2344 2350
2345 2351 if 'PYTHONHASHSEED' not in os.environ:
2346 2352 # use a random python hash seed all the time
2347 2353 # we do the randomness ourself to know what seed is used
2348 2354 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
2349 2355
2350 2356 if self.options.tmpdir:
2351 2357 self.options.keep_tmpdir = True
2352 2358 tmpdir = _bytespath(self.options.tmpdir)
2353 2359 if os.path.exists(tmpdir):
2354 2360 # Meaning of tmpdir has changed since 1.3: we used to create
2355 2361 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
2356 2362 # tmpdir already exists.
2357 2363 print("error: temp dir %r already exists" % tmpdir)
2358 2364 return 1
2359 2365
2360 2366 # Automatically removing tmpdir sounds convenient, but could
2361 2367 # really annoy anyone in the habit of using "--tmpdir=/tmp"
2362 2368 # or "--tmpdir=$HOME".
2363 2369 #vlog("# Removing temp dir", tmpdir)
2364 2370 #shutil.rmtree(tmpdir)
2365 2371 os.makedirs(tmpdir)
2366 2372 else:
2367 2373 d = None
2368 2374 if os.name == 'nt':
2369 2375 # without this, we get the default temp dir location, but
2370 2376 # in all lowercase, which causes troubles with paths (issue3490)
2371 2377 d = osenvironb.get(b'TMP', None)
2372 2378 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
2373 2379
2374 2380 self._hgtmp = osenvironb[b'HGTMP'] = (
2375 2381 os.path.realpath(tmpdir))
2376 2382
2377 2383 if self.options.with_hg:
2378 2384 self._installdir = None
2379 2385 whg = self.options.with_hg
2380 2386 self._bindir = os.path.dirname(os.path.realpath(whg))
2381 2387 assert isinstance(self._bindir, bytes)
2382 2388 self._hgcommand = os.path.basename(whg)
2383 2389 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
2384 2390 os.makedirs(self._tmpbindir)
2385 2391
2386 2392 # This looks redundant with how Python initializes sys.path from
2387 2393 # the location of the script being executed. Needed because the
2388 2394 # "hg" specified by --with-hg is not the only Python script
2389 2395 # executed in the test suite that needs to import 'mercurial'
2390 2396 # ... which means it's not really redundant at all.
2391 2397 self._pythondir = self._bindir
2392 2398 else:
2393 2399 self._installdir = os.path.join(self._hgtmp, b"install")
2394 2400 self._bindir = os.path.join(self._installdir, b"bin")
2395 2401 self._hgcommand = b'hg'
2396 2402 self._tmpbindir = self._bindir
2397 2403 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
2398 2404
2399 2405 # set CHGHG, then replace "hg" command by "chg"
2400 2406 chgbindir = self._bindir
2401 2407 if self.options.chg or self.options.with_chg:
2402 2408 osenvironb[b'CHGHG'] = os.path.join(self._bindir, self._hgcommand)
2403 2409 else:
2404 2410 osenvironb.pop(b'CHGHG', None) # drop flag for hghave
2405 2411 if self.options.chg:
2406 2412 self._hgcommand = b'chg'
2407 2413 elif self.options.with_chg:
2408 2414 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
2409 2415 self._hgcommand = os.path.basename(self.options.with_chg)
2410 2416
2411 2417 osenvironb[b"BINDIR"] = self._bindir
2412 2418 osenvironb[b"PYTHON"] = PYTHON
2413 2419
2414 2420 if self.options.with_python3:
2415 2421 osenvironb[b'PYTHON3'] = self.options.with_python3
2416 2422
2417 2423 fileb = _bytespath(__file__)
2418 2424 runtestdir = os.path.abspath(os.path.dirname(fileb))
2419 2425 osenvironb[b'RUNTESTDIR'] = runtestdir
2420 2426 if PYTHON3:
2421 2427 sepb = _bytespath(os.pathsep)
2422 2428 else:
2423 2429 sepb = os.pathsep
2424 2430 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
2425 2431 if os.path.islink(__file__):
2426 2432 # test helper will likely be at the end of the symlink
2427 2433 realfile = os.path.realpath(fileb)
2428 2434 realdir = os.path.abspath(os.path.dirname(realfile))
2429 2435 path.insert(2, realdir)
2430 2436 if chgbindir != self._bindir:
2431 2437 path.insert(1, chgbindir)
2432 2438 if self._testdir != runtestdir:
2433 2439 path = [self._testdir] + path
2434 2440 if self._tmpbindir != self._bindir:
2435 2441 path = [self._tmpbindir] + path
2436 2442 osenvironb[b"PATH"] = sepb.join(path)
2437 2443
2438 2444 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
2439 2445 # can run .../tests/run-tests.py test-foo where test-foo
2440 2446 # adds an extension to HGRC. Also include run-test.py directory to
2441 2447 # import modules like heredoctest.
2442 2448 pypath = [self._pythondir, self._testdir, runtestdir]
2443 2449 # We have to augment PYTHONPATH, rather than simply replacing
2444 2450 # it, in case external libraries are only available via current
2445 2451 # PYTHONPATH. (In particular, the Subversion bindings on OS X
2446 2452 # are in /opt/subversion.)
2447 2453 oldpypath = osenvironb.get(IMPL_PATH)
2448 2454 if oldpypath:
2449 2455 pypath.append(oldpypath)
2450 2456 osenvironb[IMPL_PATH] = sepb.join(pypath)
2451 2457
2452 2458 if self.options.pure:
2453 2459 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
2454 2460 os.environ["HGMODULEPOLICY"] = "py"
2455 2461
2456 2462 if self.options.allow_slow_tests:
2457 2463 os.environ["HGTEST_SLOW"] = "slow"
2458 2464 elif 'HGTEST_SLOW' in os.environ:
2459 2465 del os.environ['HGTEST_SLOW']
2460 2466
2461 2467 self._coveragefile = os.path.join(self._testdir, b'.coverage')
2462 2468
2463 2469 vlog("# Using TESTDIR", self._testdir)
2464 2470 vlog("# Using RUNTESTDIR", osenvironb[b'RUNTESTDIR'])
2465 2471 vlog("# Using HGTMP", self._hgtmp)
2466 2472 vlog("# Using PATH", os.environ["PATH"])
2467 2473 vlog("# Using", IMPL_PATH, osenvironb[IMPL_PATH])
2468 2474 vlog("# Writing to directory", self._outputdir)
2469 2475
2470 2476 try:
2471 2477 return self._runtests(testdescs) or 0
2472 2478 finally:
2473 2479 time.sleep(.1)
2474 2480 self._cleanup()
2475 2481
2476 2482 def findtests(self, args):
2477 2483 """Finds possible test files from arguments.
2478 2484
2479 2485 If you wish to inject custom tests into the test harness, this would
2480 2486 be a good function to monkeypatch or override in a derived class.
2481 2487 """
2482 2488 if not args:
2483 2489 if self.options.changed:
2484 2490 proc = Popen4('hg st --rev "%s" -man0 .' %
2485 2491 self.options.changed, None, 0)
2486 2492 stdout, stderr = proc.communicate()
2487 2493 args = stdout.strip(b'\0').split(b'\0')
2488 2494 else:
2489 2495 args = os.listdir(b'.')
2490 2496
2491 2497 tests = []
2492 2498 for t in args:
2493 2499 if not (os.path.basename(t).startswith(b'test-')
2494 2500 and (t.endswith(b'.py') or t.endswith(b'.t'))):
2495 2501 continue
2496 2502 if t.endswith(b'.t'):
2497 2503 # .t file may contain multiple test cases
2498 2504 cases = sorted(parsettestcases(t))
2499 2505 if cases:
2500 2506 tests += [{'path': t, 'case': c} for c in sorted(cases)]
2501 2507 else:
2502 2508 tests.append({'path': t})
2503 2509 else:
2504 2510 tests.append({'path': t})
2505 2511 return tests
2506 2512
2507 2513 def _runtests(self, testdescs):
2508 2514 def _reloadtest(test, i):
2509 2515 # convert a test back to its description dict
2510 2516 desc = {'path': test.path}
2511 2517 case = getattr(test, '_case', None)
2512 2518 if case:
2513 2519 desc['case'] = case
2514 2520 return self._gettest(desc, i)
2515 2521
2516 2522 try:
2517 2523 if self.options.restart:
2518 2524 orig = list(testdescs)
2519 2525 while testdescs:
2520 2526 desc = testdescs[0]
2521 2527 # desc['path'] is a relative path
2522 2528 if 'case' in desc:
2523 2529 errpath = b'%s.%s.err' % (desc['path'], desc['case'])
2524 2530 else:
2525 2531 errpath = b'%s.err' % desc['path']
2526 2532 errpath = os.path.join(self._outputdir, errpath)
2527 2533 if os.path.exists(errpath):
2528 2534 break
2529 2535 testdescs.pop(0)
2530 2536 if not testdescs:
2531 2537 print("running all tests")
2532 2538 testdescs = orig
2533 2539
2534 2540 tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
2535 2541
2536 2542 failed = False
2537 2543 kws = self.options.keywords
2538 2544 if kws is not None and PYTHON3:
2539 2545 kws = kws.encode('utf-8')
2540 2546
2541 2547 suite = TestSuite(self._testdir,
2542 2548 jobs=self.options.jobs,
2543 2549 whitelist=self.options.whitelisted,
2544 2550 blacklist=self.options.blacklist,
2545 2551 retest=self.options.retest,
2546 2552 keywords=kws,
2547 2553 loop=self.options.loop,
2548 2554 runs_per_test=self.options.runs_per_test,
2549 2555 showchannels=self.options.showchannels,
2550 2556 tests=tests, loadtest=_reloadtest)
2551 2557 verbosity = 1
2552 2558 if self.options.verbose:
2553 2559 verbosity = 2
2554 2560 runner = TextTestRunner(self, verbosity=verbosity)
2555 2561
2556 2562 if self.options.list_tests:
2557 2563 result = runner.listtests(suite)
2558 2564 else:
2559 2565 if self._installdir:
2560 2566 self._installhg()
2561 2567 self._checkhglib("Testing")
2562 2568 else:
2563 2569 self._usecorrectpython()
2564 2570 if self.options.chg:
2565 2571 assert self._installdir
2566 2572 self._installchg()
2567 2573
2568 2574 result = runner.run(suite)
2569 2575
2570 2576 if result.failures:
2571 2577 failed = True
2572 2578
2573 2579 if self.options.anycoverage:
2574 2580 self._outputcoverage()
2575 2581 except KeyboardInterrupt:
2576 2582 failed = True
2577 2583 print("\ninterrupted!")
2578 2584
2579 2585 if failed:
2580 2586 return 1
2581 2587
2582 2588 def _getport(self, count):
2583 2589 port = self._ports.get(count) # do we have a cached entry?
2584 2590 if port is None:
2585 2591 portneeded = 3
2586 2592 # above 100 tries we just give up and let test reports failure
2587 2593 for tries in xrange(100):
2588 2594 allfree = True
2589 2595 port = self.options.port + self._portoffset
2590 2596 for idx in xrange(portneeded):
2591 2597 if not checkportisavailable(port + idx):
2592 2598 allfree = False
2593 2599 break
2594 2600 self._portoffset += portneeded
2595 2601 if allfree:
2596 2602 break
2597 2603 self._ports[count] = port
2598 2604 return port
2599 2605
2600 2606 def _gettest(self, testdesc, count):
2601 2607 """Obtain a Test by looking at its filename.
2602 2608
2603 2609 Returns a Test instance. The Test may not be runnable if it doesn't
2604 2610 map to a known type.
2605 2611 """
2606 2612 path = testdesc['path']
2607 2613 lctest = path.lower()
2608 2614 testcls = Test
2609 2615
2610 2616 for ext, cls in self.TESTTYPES:
2611 2617 if lctest.endswith(ext):
2612 2618 testcls = cls
2613 2619 break
2614 2620
2615 2621 refpath = os.path.join(self._testdir, path)
2616 2622 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
2617 2623
2618 2624 # extra keyword parameters. 'case' is used by .t tests
2619 2625 kwds = dict((k, testdesc[k]) for k in ['case'] if k in testdesc)
2620 2626
2621 2627 t = testcls(refpath, self._outputdir, tmpdir,
2622 2628 keeptmpdir=self.options.keep_tmpdir,
2623 2629 debug=self.options.debug,
2624 2630 timeout=self.options.timeout,
2625 2631 startport=self._getport(count),
2626 2632 extraconfigopts=self.options.extra_config_opt,
2627 2633 py3kwarnings=self.options.py3k_warnings,
2628 2634 shell=self.options.shell,
2629 2635 hgcommand=self._hgcommand,
2630 2636 usechg=bool(self.options.with_chg or self.options.chg),
2631 2637 useipv6=useipv6, **kwds)
2632 2638 t.should_reload = True
2633 2639 return t
2634 2640
2635 2641 def _cleanup(self):
2636 2642 """Clean up state from this test invocation."""
2637 2643 if self.options.keep_tmpdir:
2638 2644 return
2639 2645
2640 2646 vlog("# Cleaning up HGTMP", self._hgtmp)
2641 2647 shutil.rmtree(self._hgtmp, True)
2642 2648 for f in self._createdfiles:
2643 2649 try:
2644 2650 os.remove(f)
2645 2651 except OSError:
2646 2652 pass
2647 2653
2648 2654 def _usecorrectpython(self):
2649 2655 """Configure the environment to use the appropriate Python in tests."""
2650 2656 # Tests must use the same interpreter as us or bad things will happen.
2651 2657 pyexename = sys.platform == 'win32' and b'python.exe' or b'python'
2652 2658 if getattr(os, 'symlink', None):
2653 2659 vlog("# Making python executable in test path a symlink to '%s'" %
2654 2660 sys.executable)
2655 2661 mypython = os.path.join(self._tmpbindir, pyexename)
2656 2662 try:
2657 2663 if os.readlink(mypython) == sys.executable:
2658 2664 return
2659 2665 os.unlink(mypython)
2660 2666 except OSError as err:
2661 2667 if err.errno != errno.ENOENT:
2662 2668 raise
2663 2669 if self._findprogram(pyexename) != sys.executable:
2664 2670 try:
2665 2671 os.symlink(sys.executable, mypython)
2666 2672 self._createdfiles.append(mypython)
2667 2673 except OSError as err:
2668 2674 # child processes may race, which is harmless
2669 2675 if err.errno != errno.EEXIST:
2670 2676 raise
2671 2677 else:
2672 2678 exedir, exename = os.path.split(sys.executable)
2673 2679 vlog("# Modifying search path to find %s as %s in '%s'" %
2674 2680 (exename, pyexename, exedir))
2675 2681 path = os.environ['PATH'].split(os.pathsep)
2676 2682 while exedir in path:
2677 2683 path.remove(exedir)
2678 2684 os.environ['PATH'] = os.pathsep.join([exedir] + path)
2679 2685 if not self._findprogram(pyexename):
2680 2686 print("WARNING: Cannot find %s in search path" % pyexename)
2681 2687
2682 2688 def _installhg(self):
2683 2689 """Install hg into the test environment.
2684 2690
2685 2691 This will also configure hg with the appropriate testing settings.
2686 2692 """
2687 2693 vlog("# Performing temporary installation of HG")
2688 2694 installerrs = os.path.join(self._hgtmp, b"install.err")
2689 2695 compiler = ''
2690 2696 if self.options.compiler:
2691 2697 compiler = '--compiler ' + self.options.compiler
2692 2698 if self.options.pure:
2693 2699 pure = b"--pure"
2694 2700 else:
2695 2701 pure = b""
2696 2702
2697 2703 # Run installer in hg root
2698 2704 script = os.path.realpath(sys.argv[0])
2699 2705 exe = sys.executable
2700 2706 if PYTHON3:
2701 2707 compiler = _bytespath(compiler)
2702 2708 script = _bytespath(script)
2703 2709 exe = _bytespath(exe)
2704 2710 hgroot = os.path.dirname(os.path.dirname(script))
2705 2711 self._hgroot = hgroot
2706 2712 os.chdir(hgroot)
2707 2713 nohome = b'--home=""'
2708 2714 if os.name == 'nt':
2709 2715 # The --home="" trick works only on OS where os.sep == '/'
2710 2716 # because of a distutils convert_path() fast-path. Avoid it at
2711 2717 # least on Windows for now, deal with .pydistutils.cfg bugs
2712 2718 # when they happen.
2713 2719 nohome = b''
2714 2720 cmd = (b'%(exe)s setup.py %(pure)s clean --all'
2715 2721 b' build %(compiler)s --build-base="%(base)s"'
2716 2722 b' install --force --prefix="%(prefix)s"'
2717 2723 b' --install-lib="%(libdir)s"'
2718 2724 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
2719 2725 % {b'exe': exe, b'pure': pure,
2720 2726 b'compiler': compiler,
2721 2727 b'base': os.path.join(self._hgtmp, b"build"),
2722 2728 b'prefix': self._installdir, b'libdir': self._pythondir,
2723 2729 b'bindir': self._bindir,
2724 2730 b'nohome': nohome, b'logfile': installerrs})
2725 2731
2726 2732 # setuptools requires install directories to exist.
2727 2733 def makedirs(p):
2728 2734 try:
2729 2735 os.makedirs(p)
2730 2736 except OSError as e:
2731 2737 if e.errno != errno.EEXIST:
2732 2738 raise
2733 2739 makedirs(self._pythondir)
2734 2740 makedirs(self._bindir)
2735 2741
2736 2742 vlog("# Running", cmd)
2737 2743 if os.system(cmd) == 0:
2738 2744 if not self.options.verbose:
2739 2745 try:
2740 2746 os.remove(installerrs)
2741 2747 except OSError as e:
2742 2748 if e.errno != errno.ENOENT:
2743 2749 raise
2744 2750 else:
2745 2751 f = open(installerrs, 'rb')
2746 2752 for line in f:
2747 2753 if PYTHON3:
2748 2754 sys.stdout.buffer.write(line)
2749 2755 else:
2750 2756 sys.stdout.write(line)
2751 2757 f.close()
2752 2758 sys.exit(1)
2753 2759 os.chdir(self._testdir)
2754 2760
2755 2761 self._usecorrectpython()
2756 2762
2757 2763 if self.options.py3k_warnings and not self.options.anycoverage:
2758 2764 vlog("# Updating hg command to enable Py3k Warnings switch")
2759 2765 f = open(os.path.join(self._bindir, 'hg'), 'rb')
2760 2766 lines = [line.rstrip() for line in f]
2761 2767 lines[0] += ' -3'
2762 2768 f.close()
2763 2769 f = open(os.path.join(self._bindir, 'hg'), 'wb')
2764 2770 for line in lines:
2765 2771 f.write(line + '\n')
2766 2772 f.close()
2767 2773
2768 2774 hgbat = os.path.join(self._bindir, b'hg.bat')
2769 2775 if os.path.isfile(hgbat):
2770 2776 # hg.bat expects to be put in bin/scripts while run-tests.py
2771 2777 # installation layout put it in bin/ directly. Fix it
2772 2778 f = open(hgbat, 'rb')
2773 2779 data = f.read()
2774 2780 f.close()
2775 2781 if b'"%~dp0..\python" "%~dp0hg" %*' in data:
2776 2782 data = data.replace(b'"%~dp0..\python" "%~dp0hg" %*',
2777 2783 b'"%~dp0python" "%~dp0hg" %*')
2778 2784 f = open(hgbat, 'wb')
2779 2785 f.write(data)
2780 2786 f.close()
2781 2787 else:
2782 2788 print('WARNING: cannot fix hg.bat reference to python.exe')
2783 2789
2784 2790 if self.options.anycoverage:
2785 2791 custom = os.path.join(self._testdir, 'sitecustomize.py')
2786 2792 target = os.path.join(self._pythondir, 'sitecustomize.py')
2787 2793 vlog('# Installing coverage trigger to %s' % target)
2788 2794 shutil.copyfile(custom, target)
2789 2795 rc = os.path.join(self._testdir, '.coveragerc')
2790 2796 vlog('# Installing coverage rc to %s' % rc)
2791 2797 os.environ['COVERAGE_PROCESS_START'] = rc
2792 2798 covdir = os.path.join(self._installdir, '..', 'coverage')
2793 2799 try:
2794 2800 os.mkdir(covdir)
2795 2801 except OSError as e:
2796 2802 if e.errno != errno.EEXIST:
2797 2803 raise
2798 2804
2799 2805 os.environ['COVERAGE_DIR'] = covdir
2800 2806
2801 2807 def _checkhglib(self, verb):
2802 2808 """Ensure that the 'mercurial' package imported by python is
2803 2809 the one we expect it to be. If not, print a warning to stderr."""
2804 2810 if ((self._bindir == self._pythondir) and
2805 2811 (self._bindir != self._tmpbindir)):
2806 2812 # The pythondir has been inferred from --with-hg flag.
2807 2813 # We cannot expect anything sensible here.
2808 2814 return
2809 2815 expecthg = os.path.join(self._pythondir, b'mercurial')
2810 2816 actualhg = self._gethgpath()
2811 2817 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
2812 2818 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
2813 2819 ' (expected %s)\n'
2814 2820 % (verb, actualhg, expecthg))
2815 2821 def _gethgpath(self):
2816 2822 """Return the path to the mercurial package that is actually found by
2817 2823 the current Python interpreter."""
2818 2824 if self._hgpath is not None:
2819 2825 return self._hgpath
2820 2826
2821 2827 cmd = b'%s -c "import mercurial; print (mercurial.__path__[0])"'
2822 2828 cmd = cmd % PYTHON
2823 2829 if PYTHON3:
2824 2830 cmd = _strpath(cmd)
2825 2831 pipe = os.popen(cmd)
2826 2832 try:
2827 2833 self._hgpath = _bytespath(pipe.read().strip())
2828 2834 finally:
2829 2835 pipe.close()
2830 2836
2831 2837 return self._hgpath
2832 2838
2833 2839 def _installchg(self):
2834 2840 """Install chg into the test environment"""
2835 2841 vlog('# Performing temporary installation of CHG')
2836 2842 assert os.path.dirname(self._bindir) == self._installdir
2837 2843 assert self._hgroot, 'must be called after _installhg()'
2838 2844 cmd = (b'"%(make)s" clean install PREFIX="%(prefix)s"'
2839 2845 % {b'make': 'make', # TODO: switch by option or environment?
2840 2846 b'prefix': self._installdir})
2841 2847 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
2842 2848 vlog("# Running", cmd)
2843 2849 proc = subprocess.Popen(cmd, shell=True, cwd=cwd,
2844 2850 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
2845 2851 stderr=subprocess.STDOUT)
2846 2852 out, _err = proc.communicate()
2847 2853 if proc.returncode != 0:
2848 2854 if PYTHON3:
2849 2855 sys.stdout.buffer.write(out)
2850 2856 else:
2851 2857 sys.stdout.write(out)
2852 2858 sys.exit(1)
2853 2859
2854 2860 def _outputcoverage(self):
2855 2861 """Produce code coverage output."""
2856 2862 import coverage
2857 2863 coverage = coverage.coverage
2858 2864
2859 2865 vlog('# Producing coverage report')
2860 2866 # chdir is the easiest way to get short, relative paths in the
2861 2867 # output.
2862 2868 os.chdir(self._hgroot)
2863 2869 covdir = os.path.join(self._installdir, '..', 'coverage')
2864 2870 cov = coverage(data_file=os.path.join(covdir, 'cov'))
2865 2871
2866 2872 # Map install directory paths back to source directory.
2867 2873 cov.config.paths['srcdir'] = ['.', self._pythondir]
2868 2874
2869 2875 cov.combine()
2870 2876
2871 2877 omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]]
2872 2878 cov.report(ignore_errors=True, omit=omit)
2873 2879
2874 2880 if self.options.htmlcov:
2875 2881 htmldir = os.path.join(self._outputdir, 'htmlcov')
2876 2882 cov.html_report(directory=htmldir, omit=omit)
2877 2883 if self.options.annotate:
2878 2884 adir = os.path.join(self._outputdir, 'annotated')
2879 2885 if not os.path.isdir(adir):
2880 2886 os.mkdir(adir)
2881 2887 cov.annotate(directory=adir, omit=omit)
2882 2888
2883 2889 def _findprogram(self, program):
2884 2890 """Search PATH for a executable program"""
2885 2891 dpb = _bytespath(os.defpath)
2886 2892 sepb = _bytespath(os.pathsep)
2887 2893 for p in osenvironb.get(b'PATH', dpb).split(sepb):
2888 2894 name = os.path.join(p, program)
2889 2895 if os.name == 'nt' or os.access(name, os.X_OK):
2890 2896 return name
2891 2897 return None
2892 2898
2893 2899 def _checktools(self):
2894 2900 """Ensure tools required to run tests are present."""
2895 2901 for p in self.REQUIREDTOOLS:
2896 2902 if os.name == 'nt' and not p.endswith('.exe'):
2897 2903 p += '.exe'
2898 2904 found = self._findprogram(p)
2899 2905 if found:
2900 2906 vlog("# Found prerequisite", p, "at", found)
2901 2907 else:
2902 2908 print("WARNING: Did not find prerequisite tool: %s " %
2903 2909 p.decode("utf-8"))
2904 2910
2905 2911 if __name__ == '__main__':
2906 2912 runner = TestRunner()
2907 2913
2908 2914 try:
2909 2915 import msvcrt
2910 2916 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
2911 2917 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
2912 2918 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
2913 2919 except ImportError:
2914 2920 pass
2915 2921
2916 2922 sys.exit(runner.run(sys.argv[1:]))
@@ -1,1460 +1,1473 b''
1 1 This file tests the behavior of run-tests.py itself.
2 2
3 3 Avoid interference from actual test env:
4 4
5 5 $ . "$TESTDIR/helper-runtests.sh"
6 6
7 7 Smoke test with install
8 8 ============
9 9
10 10 $ run-tests.py $HGTEST_RUN_TESTS_PURE -l
11 11
12 12 # Ran 0 tests, 0 skipped, 0 failed.
13 13
14 14 Define a helper to avoid the install step
15 15 =============
16 16 $ rt()
17 17 > {
18 18 > run-tests.py --with-hg=`which hg` "$@"
19 19 > }
20 20
21 21 error paths
22 22
23 23 #if symlink
24 24 $ ln -s `which true` hg
25 25 $ run-tests.py --with-hg=./hg
26 26 warning: --with-hg should specify an hg script
27 27
28 28 # Ran 0 tests, 0 skipped, 0 failed.
29 29 $ rm hg
30 30 #endif
31 31
32 32 #if execbit
33 33 $ touch hg
34 34 $ run-tests.py --with-hg=./hg
35 35 Usage: run-tests.py [options] [tests]
36 36
37 37 run-tests.py: error: --with-hg must specify an executable hg script
38 38 [2]
39 39 $ rm hg
40 40 #endif
41 41
42 42 Features for testing optional lines
43 43 ===================================
44 44
45 45 $ cat > hghaveaddon.py <<EOF
46 46 > import hghave
47 47 > @hghave.check("custom", "custom hghave feature")
48 48 > def has_custom():
49 49 > return True
50 50 > @hghave.check("missing", "missing hghave feature")
51 51 > def has_missing():
52 52 > return False
53 53 > EOF
54 54
55 55 an empty test
56 56 =======================
57 57
58 58 $ touch test-empty.t
59 59 $ rt
60 60 .
61 61 # Ran 1 tests, 0 skipped, 0 failed.
62 62 $ rm test-empty.t
63 63
64 64 a succesful test
65 65 =======================
66 66
67 67 $ cat > test-success.t << EOF
68 68 > $ echo babar
69 69 > babar
70 70 > $ echo xyzzy
71 71 > dont_print (?)
72 72 > nothing[42]line (re) (?)
73 73 > never*happens (glob) (?)
74 74 > more_nothing (?)
75 75 > xyzzy
76 76 > nor this (?)
77 77 > $ printf 'abc\ndef\nxyz\n'
78 78 > 123 (?)
79 79 > abc
80 80 > def (?)
81 81 > 456 (?)
82 82 > xyz
83 83 > $ printf 'zyx\nwvu\ntsr\n'
84 84 > abc (?)
85 85 > zyx (custom !)
86 86 > wvu
87 87 > no_print (no-custom !)
88 88 > tsr (no-missing !)
89 89 > missing (missing !)
90 90 > EOF
91 91
92 92 $ rt
93 93 .
94 94 # Ran 1 tests, 0 skipped, 0 failed.
95 95
96 96 failing test
97 97 ==================
98 98
99 99 test churn with globs
100 100 $ cat > test-failure.t <<EOF
101 101 > $ echo "bar-baz"; echo "bar-bad"
102 102 > bar*bad (glob)
103 103 > bar*baz (glob)
104 104 > EOF
105 105 $ rt test-failure.t
106 106
107 107 --- $TESTTMP/test-failure.t
108 108 +++ $TESTTMP/test-failure.t.err
109 109 @@ -1,3 +1,3 @@
110 110 $ echo "bar-baz"; echo "bar-bad"
111 111 + bar*baz (glob)
112 112 bar*bad (glob)
113 113 - bar*baz (glob)
114 114
115 115 ERROR: test-failure.t output changed
116 116 !
117 117 Failed test-failure.t: output changed
118 118 # Ran 1 tests, 0 skipped, 1 failed.
119 119 python hash seed: * (glob)
120 120 [1]
121 121
122 122 test diff colorisation
123 123
124 124 #if no-windows pygments
125 125 $ rt test-failure.t --color always
126 126
127 127 \x1b[38;5;124m--- $TESTTMP/test-failure.t\x1b[39m (esc)
128 128 \x1b[38;5;34m+++ $TESTTMP/test-failure.t.err\x1b[39m (esc)
129 129 \x1b[38;5;90;01m@@ -1,3 +1,3 @@\x1b[39;00m (esc)
130 130 $ echo "bar-baz"; echo "bar-bad"
131 131 \x1b[38;5;34m+ bar*baz (glob)\x1b[39m (esc)
132 132 bar*bad (glob)
133 133 \x1b[38;5;124m- bar*baz (glob)\x1b[39m (esc)
134 134 \x1b[38;5;88mERROR: \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m output changed\x1b[39m (esc)
135 135 !
136 136 \x1b[38;5;88mFailed \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m: output changed\x1b[39m (esc)
137 137 # Ran 1 tests, 0 skipped, 1 failed.
138 138 python hash seed: * (glob)
139 139 [1]
140 140
141 141 $ rt test-failure.t 2> tmp.log
142 142 [1]
143 143 $ cat tmp.log
144 144
145 145 --- $TESTTMP/test-failure.t
146 146 +++ $TESTTMP/test-failure.t.err
147 147 @@ -1,3 +1,3 @@
148 148 $ echo "bar-baz"; echo "bar-bad"
149 149 + bar*baz (glob)
150 150 bar*bad (glob)
151 151 - bar*baz (glob)
152 152
153 153 ERROR: test-failure.t output changed
154 154 !
155 155 Failed test-failure.t: output changed
156 156 # Ran 1 tests, 0 skipped, 1 failed.
157 157 python hash seed: * (glob)
158 158 #endif
159 159
160 160 $ cat > test-failure.t << EOF
161 161 > $ true
162 162 > should go away (true !)
163 163 > $ true
164 164 > should stay (false !)
165 165 >
166 166 > Should remove first line, not second or third
167 167 > $ echo 'testing'
168 168 > baz*foo (glob) (true !)
169 169 > foobar*foo (glob) (false !)
170 170 > te*ting (glob) (true !)
171 171 >
172 172 > Should keep first two lines, remove third and last
173 173 > $ echo 'testing'
174 174 > test.ng (re) (true !)
175 175 > foo.ar (re) (false !)
176 176 > b.r (re) (true !)
177 177 > missing (?)
178 178 > awol (true !)
179 179 >
180 180 > The "missing" line should stay, even though awol is dropped
181 181 > $ echo 'testing'
182 182 > test.ng (re) (true !)
183 183 > foo.ar (?)
184 184 > awol
185 185 > missing (?)
186 186 > EOF
187 187 $ rt test-failure.t
188 188
189 189 --- $TESTTMP/test-failure.t
190 190 +++ $TESTTMP/test-failure.t.err
191 191 @@ -1,11 +1,9 @@
192 192 $ true
193 193 - should go away (true !)
194 194 $ true
195 195 should stay (false !)
196 196
197 197 Should remove first line, not second or third
198 198 $ echo 'testing'
199 199 - baz*foo (glob) (true !)
200 200 foobar*foo (glob) (false !)
201 201 te*ting (glob) (true !)
202 202
203 203 foo.ar (re) (false !)
204 204 missing (?)
205 205 @@ -13,13 +11,10 @@
206 206 $ echo 'testing'
207 207 test.ng (re) (true !)
208 208 foo.ar (re) (false !)
209 209 - b.r (re) (true !)
210 210 missing (?)
211 211 - awol (true !)
212 212
213 213 The "missing" line should stay, even though awol is dropped
214 214 $ echo 'testing'
215 215 test.ng (re) (true !)
216 216 foo.ar (?)
217 217 - awol
218 218 missing (?)
219 219
220 220 ERROR: test-failure.t output changed
221 221 !
222 222 Failed test-failure.t: output changed
223 223 # Ran 1 tests, 0 skipped, 1 failed.
224 224 python hash seed: * (glob)
225 225 [1]
226 226
227 227 basic failing test
228 228 $ cat > test-failure.t << EOF
229 229 > $ echo babar
230 230 > rataxes
231 231 > This is a noop statement so that
232 232 > this test is still more bytes than success.
233 233 > pad pad pad pad............................................................
234 234 > pad pad pad pad............................................................
235 235 > pad pad pad pad............................................................
236 236 > pad pad pad pad............................................................
237 237 > pad pad pad pad............................................................
238 238 > pad pad pad pad............................................................
239 239 > EOF
240 240
241 241 >>> fh = open('test-failure-unicode.t', 'wb')
242 242 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8')) and None
243 243 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8')) and None
244 244
245 245 $ rt
246 246
247 247 --- $TESTTMP/test-failure.t
248 248 +++ $TESTTMP/test-failure.t.err
249 249 @@ -1,5 +1,5 @@
250 250 $ echo babar
251 251 - rataxes
252 252 + babar
253 253 This is a noop statement so that
254 254 this test is still more bytes than success.
255 255 pad pad pad pad............................................................
256 256
257 257 ERROR: test-failure.t output changed
258 258 !.
259 259 --- $TESTTMP/test-failure-unicode.t
260 260 +++ $TESTTMP/test-failure-unicode.t.err
261 261 @@ -1,2 +1,2 @@
262 262 $ echo babar\xce\xb1 (esc)
263 263 - l\xce\xb5\xce\xb5t (esc)
264 264 + babar\xce\xb1 (esc)
265 265
266 266 ERROR: test-failure-unicode.t output changed
267 267 !
268 268 Failed test-failure.t: output changed
269 269 Failed test-failure-unicode.t: output changed
270 270 # Ran 3 tests, 0 skipped, 2 failed.
271 271 python hash seed: * (glob)
272 272 [1]
273 273
274 274 test --outputdir
275 275 $ mkdir output
276 276 $ rt --outputdir output
277 277
278 278 --- $TESTTMP/test-failure.t
279 279 +++ $TESTTMP/output/test-failure.t.err
280 280 @@ -1,5 +1,5 @@
281 281 $ echo babar
282 282 - rataxes
283 283 + babar
284 284 This is a noop statement so that
285 285 this test is still more bytes than success.
286 286 pad pad pad pad............................................................
287 287
288 288 ERROR: test-failure.t output changed
289 289 !.
290 290 --- $TESTTMP/test-failure-unicode.t
291 291 +++ $TESTTMP/output/test-failure-unicode.t.err
292 292 @@ -1,2 +1,2 @@
293 293 $ echo babar\xce\xb1 (esc)
294 294 - l\xce\xb5\xce\xb5t (esc)
295 295 + babar\xce\xb1 (esc)
296 296
297 297 ERROR: test-failure-unicode.t output changed
298 298 !
299 299 Failed test-failure.t: output changed
300 300 Failed test-failure-unicode.t: output changed
301 301 # Ran 3 tests, 0 skipped, 2 failed.
302 302 python hash seed: * (glob)
303 303 [1]
304 304 $ ls -a output
305 305 .
306 306 ..
307 307 .testtimes
308 308 test-failure-unicode.t.err
309 309 test-failure.t.err
310 310
311 311 test --xunit support
312 312 $ rt --xunit=xunit.xml
313 313
314 314 --- $TESTTMP/test-failure.t
315 315 +++ $TESTTMP/test-failure.t.err
316 316 @@ -1,5 +1,5 @@
317 317 $ echo babar
318 318 - rataxes
319 319 + babar
320 320 This is a noop statement so that
321 321 this test is still more bytes than success.
322 322 pad pad pad pad............................................................
323 323
324 324 ERROR: test-failure.t output changed
325 325 !.
326 326 --- $TESTTMP/test-failure-unicode.t
327 327 +++ $TESTTMP/test-failure-unicode.t.err
328 328 @@ -1,2 +1,2 @@
329 329 $ echo babar\xce\xb1 (esc)
330 330 - l\xce\xb5\xce\xb5t (esc)
331 331 + babar\xce\xb1 (esc)
332 332
333 333 ERROR: test-failure-unicode.t output changed
334 334 !
335 335 Failed test-failure.t: output changed
336 336 Failed test-failure-unicode.t: output changed
337 337 # Ran 3 tests, 0 skipped, 2 failed.
338 338 python hash seed: * (glob)
339 339 [1]
340 340 $ cat xunit.xml
341 341 <?xml version="1.0" encoding="utf-8"?>
342 342 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
343 343 <testcase name="test-success.t" time="*"/> (glob)
344 344 <testcase name="test-failure-unicode.t" time="*"> (glob)
345 345 <failure message="output changed" type="output-mismatch">
346 346 <![CDATA[--- $TESTTMP/test-failure-unicode.t
347 347 +++ $TESTTMP/test-failure-unicode.t.err
348 348 @@ -1,2 +1,2 @@
349 349 $ echo babar\xce\xb1 (esc)
350 350 - l\xce\xb5\xce\xb5t (esc)
351 351 + babar\xce\xb1 (esc)
352 352 ]]> </failure>
353 353 </testcase>
354 354 <testcase name="test-failure.t" time="*"> (glob)
355 355 <failure message="output changed" type="output-mismatch">
356 356 <![CDATA[--- $TESTTMP/test-failure.t
357 357 +++ $TESTTMP/test-failure.t.err
358 358 @@ -1,5 +1,5 @@
359 359 $ echo babar
360 360 - rataxes
361 361 + babar
362 362 This is a noop statement so that
363 363 this test is still more bytes than success.
364 364 pad pad pad pad............................................................
365 365 ]]> </failure>
366 366 </testcase>
367 367 </testsuite>
368 368
369 369 $ cat .testtimes
370 370 test-failure-unicode.t * (glob)
371 371 test-failure.t * (glob)
372 372 test-success.t * (glob)
373 373
374 374 $ rt --list-tests
375 375 test-failure-unicode.t
376 376 test-failure.t
377 377 test-success.t
378 378
379 379 $ rt --list-tests --json
380 380 test-failure-unicode.t
381 381 test-failure.t
382 382 test-success.t
383 383 $ cat report.json
384 384 testreport ={
385 385 "test-failure-unicode.t": {
386 386 "result": "success"
387 387 },
388 388 "test-failure.t": {
389 389 "result": "success"
390 390 },
391 391 "test-success.t": {
392 392 "result": "success"
393 393 }
394 394 } (no-eol)
395 395
396 396 $ rt --list-tests --xunit=xunit.xml
397 397 test-failure-unicode.t
398 398 test-failure.t
399 399 test-success.t
400 400 $ cat xunit.xml
401 401 <?xml version="1.0" encoding="utf-8"?>
402 402 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
403 403 <testcase name="test-failure-unicode.t"/>
404 404 <testcase name="test-failure.t"/>
405 405 <testcase name="test-success.t"/>
406 406 </testsuite>
407 407
408 408 $ rt --list-tests test-failure* --json --xunit=xunit.xml --outputdir output
409 409 test-failure-unicode.t
410 410 test-failure.t
411 411 $ cat output/report.json
412 412 testreport ={
413 413 "test-failure-unicode.t": {
414 414 "result": "success"
415 415 },
416 416 "test-failure.t": {
417 417 "result": "success"
418 418 }
419 419 } (no-eol)
420 420 $ cat xunit.xml
421 421 <?xml version="1.0" encoding="utf-8"?>
422 422 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
423 423 <testcase name="test-failure-unicode.t"/>
424 424 <testcase name="test-failure.t"/>
425 425 </testsuite>
426 426
427 427 $ rm test-failure-unicode.t
428 428
429 429 test for --retest
430 430 ====================
431 431
432 432 $ rt --retest
433 433
434 434 --- $TESTTMP/test-failure.t
435 435 +++ $TESTTMP/test-failure.t.err
436 436 @@ -1,5 +1,5 @@
437 437 $ echo babar
438 438 - rataxes
439 439 + babar
440 440 This is a noop statement so that
441 441 this test is still more bytes than success.
442 442 pad pad pad pad............................................................
443 443
444 444 ERROR: test-failure.t output changed
445 445 !
446 446 Failed test-failure.t: output changed
447 447 # Ran 2 tests, 1 skipped, 1 failed.
448 448 python hash seed: * (glob)
449 449 [1]
450 450
451 451 --retest works with --outputdir
452 452 $ rm -r output
453 453 $ mkdir output
454 454 $ mv test-failure.t.err output
455 455 $ rt --retest --outputdir output
456 456
457 457 --- $TESTTMP/test-failure.t
458 458 +++ $TESTTMP/output/test-failure.t.err
459 459 @@ -1,5 +1,5 @@
460 460 $ echo babar
461 461 - rataxes
462 462 + babar
463 463 This is a noop statement so that
464 464 this test is still more bytes than success.
465 465 pad pad pad pad............................................................
466 466
467 467 ERROR: test-failure.t output changed
468 468 !
469 469 Failed test-failure.t: output changed
470 470 # Ran 2 tests, 1 skipped, 1 failed.
471 471 python hash seed: * (glob)
472 472 [1]
473 473
474 474 Selecting Tests To Run
475 475 ======================
476 476
477 477 successful
478 478
479 479 $ rt test-success.t
480 480 .
481 481 # Ran 1 tests, 0 skipped, 0 failed.
482 482
483 483 success w/ keyword
484 484 $ rt -k xyzzy
485 485 .
486 486 # Ran 2 tests, 1 skipped, 0 failed.
487 487
488 488 failed
489 489
490 490 $ rt test-failure.t
491 491
492 492 --- $TESTTMP/test-failure.t
493 493 +++ $TESTTMP/test-failure.t.err
494 494 @@ -1,5 +1,5 @@
495 495 $ echo babar
496 496 - rataxes
497 497 + babar
498 498 This is a noop statement so that
499 499 this test is still more bytes than success.
500 500 pad pad pad pad............................................................
501 501
502 502 ERROR: test-failure.t output changed
503 503 !
504 504 Failed test-failure.t: output changed
505 505 # Ran 1 tests, 0 skipped, 1 failed.
506 506 python hash seed: * (glob)
507 507 [1]
508 508
509 509 failure w/ keyword
510 510 $ rt -k rataxes
511 511
512 512 --- $TESTTMP/test-failure.t
513 513 +++ $TESTTMP/test-failure.t.err
514 514 @@ -1,5 +1,5 @@
515 515 $ echo babar
516 516 - rataxes
517 517 + babar
518 518 This is a noop statement so that
519 519 this test is still more bytes than success.
520 520 pad pad pad pad............................................................
521 521
522 522 ERROR: test-failure.t output changed
523 523 !
524 524 Failed test-failure.t: output changed
525 525 # Ran 2 tests, 1 skipped, 1 failed.
526 526 python hash seed: * (glob)
527 527 [1]
528 528
529 529 Verify that when a process fails to start we show a useful message
530 530 ==================================================================
531 531
532 532 $ cat > test-serve-fail.t <<EOF
533 533 > $ echo 'abort: child process failed to start blah'
534 534 > EOF
535 535 $ rt test-serve-fail.t
536 536
537 537 ERROR: test-serve-fail.t output changed
538 538 !
539 539 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
540 540 # Ran 1 tests, 0 skipped, 1 failed.
541 541 python hash seed: * (glob)
542 542 [1]
543 543 $ rm test-serve-fail.t
544 544
545 545 Verify that we can try other ports
546 546 ===================================
547 547 $ hg init inuse
548 548 $ hg serve -R inuse -p $HGPORT -d --pid-file=blocks.pid
549 549 $ cat blocks.pid >> $DAEMON_PIDS
550 550 $ cat > test-serve-inuse.t <<EOF
551 551 > $ hg serve -R `pwd`/inuse -p \$HGPORT -d --pid-file=hg.pid
552 552 > $ cat hg.pid >> \$DAEMON_PIDS
553 553 > EOF
554 554 $ rt test-serve-inuse.t
555 555 .
556 556 # Ran 1 tests, 0 skipped, 0 failed.
557 557 $ rm test-serve-inuse.t
558 558 $ killdaemons.py $DAEMON_PIDS
559 559 $ rm $DAEMON_PIDS
560 560
561 561 Running In Debug Mode
562 562 ======================
563 563
564 564 $ rt --debug 2>&1 | grep -v pwd
565 565 + echo *SALT* 0 0 (glob)
566 566 *SALT* 0 0 (glob)
567 567 + echo babar
568 568 babar
569 569 + echo *SALT* 10 0 (glob)
570 570 *SALT* 10 0 (glob)
571 571 *+ echo *SALT* 0 0 (glob)
572 572 *SALT* 0 0 (glob)
573 573 + echo babar
574 574 babar
575 575 + echo *SALT* 2 0 (glob)
576 576 *SALT* 2 0 (glob)
577 577 + echo xyzzy
578 578 xyzzy
579 579 + echo *SALT* 9 0 (glob)
580 580 *SALT* 9 0 (glob)
581 581 + printf *abc\ndef\nxyz\n* (glob)
582 582 abc
583 583 def
584 584 xyz
585 585 + echo *SALT* 15 0 (glob)
586 586 *SALT* 15 0 (glob)
587 587 + printf *zyx\nwvu\ntsr\n* (glob)
588 588 zyx
589 589 wvu
590 590 tsr
591 591 + echo *SALT* 22 0 (glob)
592 592 *SALT* 22 0 (glob)
593 593 .
594 594 # Ran 2 tests, 0 skipped, 0 failed.
595 595
596 596 Parallel runs
597 597 ==============
598 598
599 599 (duplicate the failing test to get predictable output)
600 600 $ cp test-failure.t test-failure-copy.t
601 601
602 602 $ rt --jobs 2 test-failure*.t -n
603 603 !!
604 604 Failed test-failure*.t: output changed (glob)
605 605 Failed test-failure*.t: output changed (glob)
606 606 # Ran 2 tests, 0 skipped, 2 failed.
607 607 python hash seed: * (glob)
608 608 [1]
609 609
610 610 failures in parallel with --first should only print one failure
611 611 >>> f = open('test-nothing.t', 'w')
612 612 >>> f.write('foo\n' * 1024) and None
613 613 >>> f.write(' $ sleep 1') and None
614 614 $ rt --jobs 2 --first
615 615
616 616 --- $TESTTMP/test-failure*.t (glob)
617 617 +++ $TESTTMP/test-failure*.t.err (glob)
618 618 @@ -1,5 +1,5 @@
619 619 $ echo babar
620 620 - rataxes
621 621 + babar
622 622 This is a noop statement so that
623 623 this test is still more bytes than success.
624 624 pad pad pad pad............................................................
625 625
626 626 Failed test-failure*.t: output changed (glob)
627 627 Failed test-nothing.t: output changed
628 628 # Ran 2 tests, 0 skipped, 2 failed.
629 629 python hash seed: * (glob)
630 630 [1]
631 631
632 632
633 633 (delete the duplicated test file)
634 634 $ rm test-failure-copy.t test-nothing.t
635 635
636 636
637 637 Interactive run
638 638 ===============
639 639
640 640 (backup the failing test)
641 641 $ cp test-failure.t backup
642 642
643 643 Refuse the fix
644 644
645 645 $ echo 'n' | rt -i
646 646
647 647 --- $TESTTMP/test-failure.t
648 648 +++ $TESTTMP/test-failure.t.err
649 649 @@ -1,5 +1,5 @@
650 650 $ echo babar
651 651 - rataxes
652 652 + babar
653 653 This is a noop statement so that
654 654 this test is still more bytes than success.
655 655 pad pad pad pad............................................................
656 656 Accept this change? [n]
657 657 ERROR: test-failure.t output changed
658 658 !.
659 659 Failed test-failure.t: output changed
660 660 # Ran 2 tests, 0 skipped, 1 failed.
661 661 python hash seed: * (glob)
662 662 [1]
663 663
664 664 $ cat test-failure.t
665 665 $ echo babar
666 666 rataxes
667 667 This is a noop statement so that
668 668 this test is still more bytes than success.
669 669 pad pad pad pad............................................................
670 670 pad pad pad pad............................................................
671 671 pad pad pad pad............................................................
672 672 pad pad pad pad............................................................
673 673 pad pad pad pad............................................................
674 674 pad pad pad pad............................................................
675 675
676 676 Interactive with custom view
677 677
678 678 $ echo 'n' | rt -i --view echo
679 679 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err (glob)
680 680 Accept this change? [n]* (glob)
681 681 ERROR: test-failure.t output changed
682 682 !.
683 683 Failed test-failure.t: output changed
684 684 # Ran 2 tests, 0 skipped, 1 failed.
685 685 python hash seed: * (glob)
686 686 [1]
687 687
688 688 View the fix
689 689
690 690 $ echo 'y' | rt --view echo
691 691 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err (glob)
692 692
693 693 ERROR: test-failure.t output changed
694 694 !.
695 695 Failed test-failure.t: output changed
696 696 # Ran 2 tests, 0 skipped, 1 failed.
697 697 python hash seed: * (glob)
698 698 [1]
699 699
700 700 Accept the fix
701 701
702 702 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
703 703 $ echo " saved backup bundle to \$TESTTMP/foo.hg" >> test-failure.t
704 704 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
705 705 $ echo " saved backup bundle to \$TESTTMP/foo.hg (glob)" >> test-failure.t
706 706 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
707 707 $ echo " saved backup bundle to \$TESTTMP/*.hg (glob)" >> test-failure.t
708 708 $ echo 'y' | rt -i 2>&1
709 709
710 710 --- $TESTTMP/test-failure.t
711 711 +++ $TESTTMP/test-failure.t.err
712 712 @@ -1,5 +1,5 @@
713 713 $ echo babar
714 714 - rataxes
715 715 + babar
716 716 This is a noop statement so that
717 717 this test is still more bytes than success.
718 718 pad pad pad pad............................................................
719 719 @@ -9,7 +9,7 @@
720 720 pad pad pad pad............................................................
721 721 pad pad pad pad............................................................
722 722 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
723 723 - saved backup bundle to $TESTTMP/foo.hg
724 724 + saved backup bundle to $TESTTMP/foo.hg* (glob)
725 725 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
726 726 saved backup bundle to $TESTTMP/foo.hg* (glob)
727 727 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
728 728 Accept this change? [n] ..
729 729 # Ran 2 tests, 0 skipped, 0 failed.
730 730
731 731 $ sed -e 's,(glob)$,&<,g' test-failure.t
732 732 $ echo babar
733 733 babar
734 734 This is a noop statement so that
735 735 this test is still more bytes than success.
736 736 pad pad pad pad............................................................
737 737 pad pad pad pad............................................................
738 738 pad pad pad pad............................................................
739 739 pad pad pad pad............................................................
740 740 pad pad pad pad............................................................
741 741 pad pad pad pad............................................................
742 742 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
743 743 saved backup bundle to $TESTTMP/foo.hg (glob)<
744 744 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
745 745 saved backup bundle to $TESTTMP/foo.hg (glob)<
746 746 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
747 747 saved backup bundle to $TESTTMP/*.hg (glob)<
748 748
749 749 Race condition - test file was modified when test is running
750 750
751 751 $ TESTRACEDIR=`pwd`
752 752 $ export TESTRACEDIR
753 753 $ cat > test-race.t <<EOF
754 754 > $ echo 1
755 755 > $ echo "# a new line" >> $TESTRACEDIR/test-race.t
756 756 > EOF
757 757
758 758 $ rt -i test-race.t
759 759
760 760 --- $TESTTMP/test-race.t
761 761 +++ $TESTTMP/test-race.t.err
762 762 @@ -1,2 +1,3 @@
763 763 $ echo 1
764 764 + 1
765 765 $ echo "# a new line" >> $TESTTMP/test-race.t
766 766 Reference output has changed (run again to prompt changes)
767 767 ERROR: test-race.t output changed
768 768 !
769 769 Failed test-race.t: output changed
770 770 # Ran 1 tests, 0 skipped, 1 failed.
771 771 python hash seed: * (glob)
772 772 [1]
773 773
774 774 $ rm test-race.t
775 775
776 776 When "#testcases" is used in .t files
777 777
778 778 $ cat >> test-cases.t <<EOF
779 779 > #testcases a b
780 780 > #if a
781 781 > $ echo 1
782 782 > #endif
783 783 > #if b
784 784 > $ echo 2
785 785 > #endif
786 786 > EOF
787 787
788 788 $ cat <<EOF | rt -i test-cases.t 2>&1
789 789 > y
790 790 > y
791 791 > EOF
792 792
793 793 --- $TESTTMP/test-cases.t
794 794 +++ $TESTTMP/test-cases.t.a.err
795 795 @@ -1,6 +1,7 @@
796 796 #testcases a b
797 797 #if a
798 798 $ echo 1
799 799 + 1
800 800 #endif
801 801 #if b
802 802 $ echo 2
803 803 Accept this change? [n] .
804 804 --- $TESTTMP/test-cases.t
805 805 +++ $TESTTMP/test-cases.t.b.err
806 806 @@ -5,4 +5,5 @@
807 807 #endif
808 808 #if b
809 809 $ echo 2
810 810 + 2
811 811 #endif
812 812 Accept this change? [n] .
813 813 # Ran 2 tests, 0 skipped, 0 failed.
814 814
815 815 $ cat test-cases.t
816 816 #testcases a b
817 817 #if a
818 818 $ echo 1
819 819 1
820 820 #endif
821 821 #if b
822 822 $ echo 2
823 823 2
824 824 #endif
825 825
826 826 $ cat >> test-cases.t <<'EOF'
827 827 > #if a
828 828 > $ NAME=A
829 829 > #else
830 830 > $ NAME=B
831 831 > #endif
832 832 > $ echo $NAME
833 833 > A (a !)
834 834 > B (b !)
835 835 > EOF
836 836 $ rt test-cases.t
837 837 ..
838 838 # Ran 2 tests, 0 skipped, 0 failed.
839 839
840 840 $ rm test-cases.t
841 841
842 842 (reinstall)
843 843 $ mv backup test-failure.t
844 844
845 845 No Diff
846 846 ===============
847 847
848 848 $ rt --nodiff
849 849 !.
850 850 Failed test-failure.t: output changed
851 851 # Ran 2 tests, 0 skipped, 1 failed.
852 852 python hash seed: * (glob)
853 853 [1]
854 854
855 855 test --tmpdir support
856 856 $ rt --tmpdir=$TESTTMP/keep test-success.t
857 857
858 858 Keeping testtmp dir: $TESTTMP/keep/child1/test-success.t (glob)
859 859 Keeping threadtmp dir: $TESTTMP/keep/child1 (glob)
860 860 .
861 861 # Ran 1 tests, 0 skipped, 0 failed.
862 862
863 863 timeouts
864 864 ========
865 865 $ cat > test-timeout.t <<EOF
866 866 > $ sleep 2
867 867 > $ echo pass
868 868 > pass
869 869 > EOF
870 870 > echo '#require slow' > test-slow-timeout.t
871 871 > cat test-timeout.t >> test-slow-timeout.t
872 872 $ rt --timeout=1 --slowtimeout=3 test-timeout.t test-slow-timeout.t
873 873 st
874 874 Skipped test-slow-timeout.t: missing feature: allow slow tests (use --allow-slow-tests)
875 875 Failed test-timeout.t: timed out
876 876 # Ran 1 tests, 1 skipped, 1 failed.
877 877 python hash seed: * (glob)
878 878 [1]
879 879 $ rt --timeout=1 --slowtimeout=3 \
880 880 > test-timeout.t test-slow-timeout.t --allow-slow-tests
881 881 .t
882 882 Failed test-timeout.t: timed out
883 883 # Ran 2 tests, 0 skipped, 1 failed.
884 884 python hash seed: * (glob)
885 885 [1]
886 886 $ rm test-timeout.t test-slow-timeout.t
887 887
888 888 test for --time
889 889 ==================
890 890
891 891 $ rt test-success.t --time
892 892 .
893 893 # Ran 1 tests, 0 skipped, 0 failed.
894 894 # Producing time report
895 895 start end cuser csys real Test
896 896 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
897 897
898 898 test for --time with --job enabled
899 899 ====================================
900 900
901 901 $ rt test-success.t --time --jobs 2
902 902 .
903 903 # Ran 1 tests, 0 skipped, 0 failed.
904 904 # Producing time report
905 905 start end cuser csys real Test
906 906 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
907 907
908 908 Skips
909 909 ================
910 910 $ cat > test-skip.t <<EOF
911 911 > $ echo xyzzy
912 912 > #require false
913 913 > EOF
914 914 $ rt --nodiff
915 915 !.s
916 916 Skipped test-skip.t: missing feature: nail clipper
917 917 Failed test-failure.t: output changed
918 918 # Ran 2 tests, 1 skipped, 1 failed.
919 919 python hash seed: * (glob)
920 920 [1]
921 921
922 922 $ rt --keyword xyzzy
923 923 .s
924 924 Skipped test-skip.t: missing feature: nail clipper
925 925 # Ran 2 tests, 2 skipped, 0 failed.
926 926
927 927 Skips with xml
928 928 $ rt --keyword xyzzy \
929 929 > --xunit=xunit.xml
930 930 .s
931 931 Skipped test-skip.t: missing feature: nail clipper
932 932 # Ran 2 tests, 2 skipped, 0 failed.
933 933 $ cat xunit.xml
934 934 <?xml version="1.0" encoding="utf-8"?>
935 935 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
936 936 <testcase name="test-success.t" time="*"/> (glob)
937 937 <testcase name="test-skip.t">
938 938 <skipped>
939 939 <![CDATA[missing feature: nail clipper]]> </skipped>
940 940 </testcase>
941 941 </testsuite>
942 942
943 943 Missing skips or blacklisted skips don't count as executed:
944 944 $ echo test-failure.t > blacklist
945 945 $ rt --blacklist=blacklist --json\
946 946 > test-failure.t test-bogus.t
947 947 ss
948 948 Skipped test-bogus.t: Doesn't exist
949 949 Skipped test-failure.t: blacklisted
950 950 # Ran 0 tests, 2 skipped, 0 failed.
951 951 $ cat report.json
952 952 testreport ={
953 953 "test-bogus.t": {
954 954 "result": "skip"
955 955 },
956 956 "test-failure.t": {
957 957 "result": "skip"
958 958 }
959 959 } (no-eol)
960 960
961 961 Whitelist trumps blacklist
962 962 $ echo test-failure.t > whitelist
963 963 $ rt --blacklist=blacklist --whitelist=whitelist --json\
964 964 > test-failure.t test-bogus.t
965 965 s
966 966 --- $TESTTMP/test-failure.t
967 967 +++ $TESTTMP/test-failure.t.err
968 968 @@ -1,5 +1,5 @@
969 969 $ echo babar
970 970 - rataxes
971 971 + babar
972 972 This is a noop statement so that
973 973 this test is still more bytes than success.
974 974 pad pad pad pad............................................................
975 975
976 976 ERROR: test-failure.t output changed
977 977 !
978 978 Skipped test-bogus.t: Doesn't exist
979 979 Failed test-failure.t: output changed
980 980 # Ran 1 tests, 1 skipped, 1 failed.
981 981 python hash seed: * (glob)
982 982 [1]
983 983
984 Ensure that --test-list causes only the tests listed in that file to
985 be executed.
986 $ echo test-success.t >> onlytest
987 $ rt --test-list=onlytest
988 .
989 # Ran 1 tests, 0 skipped, 0 failed.
990 $ echo test-bogus.t >> anothertest
991 $ rt --test-list=onlytest --test-list=anothertest
992 s.
993 Skipped test-bogus.t: Doesn't exist
994 # Ran 1 tests, 1 skipped, 0 failed.
995 $ rm onlytest anothertest
996
984 997 test for --json
985 998 ==================
986 999
987 1000 $ rt --json
988 1001
989 1002 --- $TESTTMP/test-failure.t
990 1003 +++ $TESTTMP/test-failure.t.err
991 1004 @@ -1,5 +1,5 @@
992 1005 $ echo babar
993 1006 - rataxes
994 1007 + babar
995 1008 This is a noop statement so that
996 1009 this test is still more bytes than success.
997 1010 pad pad pad pad............................................................
998 1011
999 1012 ERROR: test-failure.t output changed
1000 1013 !.s
1001 1014 Skipped test-skip.t: missing feature: nail clipper
1002 1015 Failed test-failure.t: output changed
1003 1016 # Ran 2 tests, 1 skipped, 1 failed.
1004 1017 python hash seed: * (glob)
1005 1018 [1]
1006 1019
1007 1020 $ cat report.json
1008 1021 testreport ={
1009 1022 "test-failure.t": [\{] (re)
1010 1023 "csys": "\s*[\d\.]{4,5}", ? (re)
1011 1024 "cuser": "\s*[\d\.]{4,5}", ? (re)
1012 1025 "diff": "---.+\+\+\+.+", ? (re)
1013 1026 "end": "\s*[\d\.]{4,5}", ? (re)
1014 1027 "result": "failure", ? (re)
1015 1028 "start": "\s*[\d\.]{4,5}", ? (re)
1016 1029 "time": "\s*[\d\.]{4,5}" (re)
1017 1030 }, ? (re)
1018 1031 "test-skip.t": {
1019 1032 "csys": "\s*[\d\.]{4,5}", ? (re)
1020 1033 "cuser": "\s*[\d\.]{4,5}", ? (re)
1021 1034 "diff": "", ? (re)
1022 1035 "end": "\s*[\d\.]{4,5}", ? (re)
1023 1036 "result": "skip", ? (re)
1024 1037 "start": "\s*[\d\.]{4,5}", ? (re)
1025 1038 "time": "\s*[\d\.]{4,5}" (re)
1026 1039 }, ? (re)
1027 1040 "test-success.t": [\{] (re)
1028 1041 "csys": "\s*[\d\.]{4,5}", ? (re)
1029 1042 "cuser": "\s*[\d\.]{4,5}", ? (re)
1030 1043 "diff": "", ? (re)
1031 1044 "end": "\s*[\d\.]{4,5}", ? (re)
1032 1045 "result": "success", ? (re)
1033 1046 "start": "\s*[\d\.]{4,5}", ? (re)
1034 1047 "time": "\s*[\d\.]{4,5}" (re)
1035 1048 }
1036 1049 } (no-eol)
1037 1050 --json with --outputdir
1038 1051
1039 1052 $ rm report.json
1040 1053 $ rm -r output
1041 1054 $ mkdir output
1042 1055 $ rt --json --outputdir output
1043 1056
1044 1057 --- $TESTTMP/test-failure.t
1045 1058 +++ $TESTTMP/output/test-failure.t.err
1046 1059 @@ -1,5 +1,5 @@
1047 1060 $ echo babar
1048 1061 - rataxes
1049 1062 + babar
1050 1063 This is a noop statement so that
1051 1064 this test is still more bytes than success.
1052 1065 pad pad pad pad............................................................
1053 1066
1054 1067 ERROR: test-failure.t output changed
1055 1068 !.s
1056 1069 Skipped test-skip.t: missing feature: nail clipper
1057 1070 Failed test-failure.t: output changed
1058 1071 # Ran 2 tests, 1 skipped, 1 failed.
1059 1072 python hash seed: * (glob)
1060 1073 [1]
1061 1074 $ f report.json
1062 1075 report.json: file not found
1063 1076 $ cat output/report.json
1064 1077 testreport ={
1065 1078 "test-failure.t": [\{] (re)
1066 1079 "csys": "\s*[\d\.]{4,5}", ? (re)
1067 1080 "cuser": "\s*[\d\.]{4,5}", ? (re)
1068 1081 "diff": "---.+\+\+\+.+", ? (re)
1069 1082 "end": "\s*[\d\.]{4,5}", ? (re)
1070 1083 "result": "failure", ? (re)
1071 1084 "start": "\s*[\d\.]{4,5}", ? (re)
1072 1085 "time": "\s*[\d\.]{4,5}" (re)
1073 1086 }, ? (re)
1074 1087 "test-skip.t": {
1075 1088 "csys": "\s*[\d\.]{4,5}", ? (re)
1076 1089 "cuser": "\s*[\d\.]{4,5}", ? (re)
1077 1090 "diff": "", ? (re)
1078 1091 "end": "\s*[\d\.]{4,5}", ? (re)
1079 1092 "result": "skip", ? (re)
1080 1093 "start": "\s*[\d\.]{4,5}", ? (re)
1081 1094 "time": "\s*[\d\.]{4,5}" (re)
1082 1095 }, ? (re)
1083 1096 "test-success.t": [\{] (re)
1084 1097 "csys": "\s*[\d\.]{4,5}", ? (re)
1085 1098 "cuser": "\s*[\d\.]{4,5}", ? (re)
1086 1099 "diff": "", ? (re)
1087 1100 "end": "\s*[\d\.]{4,5}", ? (re)
1088 1101 "result": "success", ? (re)
1089 1102 "start": "\s*[\d\.]{4,5}", ? (re)
1090 1103 "time": "\s*[\d\.]{4,5}" (re)
1091 1104 }
1092 1105 } (no-eol)
1093 1106 $ ls -a output
1094 1107 .
1095 1108 ..
1096 1109 .testtimes
1097 1110 report.json
1098 1111 test-failure.t.err
1099 1112
1100 1113 Test that failed test accepted through interactive are properly reported:
1101 1114
1102 1115 $ cp test-failure.t backup
1103 1116 $ echo y | rt --json -i
1104 1117
1105 1118 --- $TESTTMP/test-failure.t
1106 1119 +++ $TESTTMP/test-failure.t.err
1107 1120 @@ -1,5 +1,5 @@
1108 1121 $ echo babar
1109 1122 - rataxes
1110 1123 + babar
1111 1124 This is a noop statement so that
1112 1125 this test is still more bytes than success.
1113 1126 pad pad pad pad............................................................
1114 1127 Accept this change? [n] ..s
1115 1128 Skipped test-skip.t: missing feature: nail clipper
1116 1129 # Ran 2 tests, 1 skipped, 0 failed.
1117 1130
1118 1131 $ cat report.json
1119 1132 testreport ={
1120 1133 "test-failure.t": [\{] (re)
1121 1134 "csys": "\s*[\d\.]{4,5}", ? (re)
1122 1135 "cuser": "\s*[\d\.]{4,5}", ? (re)
1123 1136 "diff": "", ? (re)
1124 1137 "end": "\s*[\d\.]{4,5}", ? (re)
1125 1138 "result": "success", ? (re)
1126 1139 "start": "\s*[\d\.]{4,5}", ? (re)
1127 1140 "time": "\s*[\d\.]{4,5}" (re)
1128 1141 }, ? (re)
1129 1142 "test-skip.t": {
1130 1143 "csys": "\s*[\d\.]{4,5}", ? (re)
1131 1144 "cuser": "\s*[\d\.]{4,5}", ? (re)
1132 1145 "diff": "", ? (re)
1133 1146 "end": "\s*[\d\.]{4,5}", ? (re)
1134 1147 "result": "skip", ? (re)
1135 1148 "start": "\s*[\d\.]{4,5}", ? (re)
1136 1149 "time": "\s*[\d\.]{4,5}" (re)
1137 1150 }, ? (re)
1138 1151 "test-success.t": [\{] (re)
1139 1152 "csys": "\s*[\d\.]{4,5}", ? (re)
1140 1153 "cuser": "\s*[\d\.]{4,5}", ? (re)
1141 1154 "diff": "", ? (re)
1142 1155 "end": "\s*[\d\.]{4,5}", ? (re)
1143 1156 "result": "success", ? (re)
1144 1157 "start": "\s*[\d\.]{4,5}", ? (re)
1145 1158 "time": "\s*[\d\.]{4,5}" (re)
1146 1159 }
1147 1160 } (no-eol)
1148 1161 $ mv backup test-failure.t
1149 1162
1150 1163 backslash on end of line with glob matching is handled properly
1151 1164
1152 1165 $ cat > test-glob-backslash.t << EOF
1153 1166 > $ echo 'foo bar \\'
1154 1167 > foo * \ (glob)
1155 1168 > EOF
1156 1169
1157 1170 $ rt test-glob-backslash.t
1158 1171 .
1159 1172 # Ran 1 tests, 0 skipped, 0 failed.
1160 1173
1161 1174 $ rm -f test-glob-backslash.t
1162 1175
1163 1176 Test globbing of local IP addresses
1164 1177 $ echo 172.16.18.1
1165 1178 $LOCALIP (glob)
1166 1179 $ echo dead:beef::1
1167 1180 $LOCALIP (glob)
1168 1181
1169 1182 Test reusability for third party tools
1170 1183 ======================================
1171 1184
1172 1185 $ mkdir "$TESTTMP"/anothertests
1173 1186 $ cd "$TESTTMP"/anothertests
1174 1187
1175 1188 test that `run-tests.py` can execute hghave, even if it runs not in
1176 1189 Mercurial source tree.
1177 1190
1178 1191 $ cat > test-hghave.t <<EOF
1179 1192 > #require true
1180 1193 > $ echo foo
1181 1194 > foo
1182 1195 > EOF
1183 1196 $ rt test-hghave.t
1184 1197 .
1185 1198 # Ran 1 tests, 0 skipped, 0 failed.
1186 1199
1187 1200 test that RUNTESTDIR refers the directory, in which `run-tests.py` now
1188 1201 running is placed.
1189 1202
1190 1203 $ cat > test-runtestdir.t <<EOF
1191 1204 > - $TESTDIR, in which test-run-tests.t is placed
1192 1205 > - \$TESTDIR, in which test-runtestdir.t is placed (expanded at runtime)
1193 1206 > - \$RUNTESTDIR, in which run-tests.py is placed (expanded at runtime)
1194 1207 >
1195 1208 > #if windows
1196 1209 > $ test "\$TESTDIR" = "$TESTTMP\anothertests"
1197 1210 > #else
1198 1211 > $ test "\$TESTDIR" = "$TESTTMP"/anothertests
1199 1212 > #endif
1200 1213 > $ test "\$RUNTESTDIR" = "$TESTDIR"
1201 1214 > $ head -n 3 "\$RUNTESTDIR"/../contrib/check-code.py | sed 's@.!.*python@#!USRBINENVPY@'
1202 1215 > #!USRBINENVPY
1203 1216 > #
1204 1217 > # check-code - a style and portability checker for Mercurial
1205 1218 > EOF
1206 1219 $ rt test-runtestdir.t
1207 1220 .
1208 1221 # Ran 1 tests, 0 skipped, 0 failed.
1209 1222
1210 1223 #if execbit
1211 1224
1212 1225 test that TESTDIR is referred in PATH
1213 1226
1214 1227 $ cat > custom-command.sh <<EOF
1215 1228 > #!/bin/sh
1216 1229 > echo "hello world"
1217 1230 > EOF
1218 1231 $ chmod +x custom-command.sh
1219 1232 $ cat > test-testdir-path.t <<EOF
1220 1233 > $ custom-command.sh
1221 1234 > hello world
1222 1235 > EOF
1223 1236 $ rt test-testdir-path.t
1224 1237 .
1225 1238 # Ran 1 tests, 0 skipped, 0 failed.
1226 1239
1227 1240 #endif
1228 1241
1229 1242 test support for --allow-slow-tests
1230 1243 $ cat > test-very-slow-test.t <<EOF
1231 1244 > #require slow
1232 1245 > $ echo pass
1233 1246 > pass
1234 1247 > EOF
1235 1248 $ rt test-very-slow-test.t
1236 1249 s
1237 1250 Skipped test-very-slow-test.t: missing feature: allow slow tests (use --allow-slow-tests)
1238 1251 # Ran 0 tests, 1 skipped, 0 failed.
1239 1252 $ rt $HGTEST_RUN_TESTS_PURE --allow-slow-tests test-very-slow-test.t
1240 1253 .
1241 1254 # Ran 1 tests, 0 skipped, 0 failed.
1242 1255
1243 1256 support for running a test outside the current directory
1244 1257 $ mkdir nonlocal
1245 1258 $ cat > nonlocal/test-is-not-here.t << EOF
1246 1259 > $ echo pass
1247 1260 > pass
1248 1261 > EOF
1249 1262 $ rt nonlocal/test-is-not-here.t
1250 1263 .
1251 1264 # Ran 1 tests, 0 skipped, 0 failed.
1252 1265
1253 1266 support for bisecting failed tests automatically
1254 1267 $ hg init bisect
1255 1268 $ cd bisect
1256 1269 $ cat >> test-bisect.t <<EOF
1257 1270 > $ echo pass
1258 1271 > pass
1259 1272 > EOF
1260 1273 $ hg add test-bisect.t
1261 1274 $ hg ci -m 'good'
1262 1275 $ cat >> test-bisect.t <<EOF
1263 1276 > $ echo pass
1264 1277 > fail
1265 1278 > EOF
1266 1279 $ hg ci -m 'bad'
1267 1280 $ rt --known-good-rev=0 test-bisect.t
1268 1281
1269 1282 --- $TESTTMP/anothertests/bisect/test-bisect.t
1270 1283 +++ $TESTTMP/anothertests/bisect/test-bisect.t.err
1271 1284 @@ -1,4 +1,4 @@
1272 1285 $ echo pass
1273 1286 pass
1274 1287 $ echo pass
1275 1288 - fail
1276 1289 + pass
1277 1290
1278 1291 ERROR: test-bisect.t output changed
1279 1292 !
1280 1293 Failed test-bisect.t: output changed
1281 1294 test-bisect.t broken by 72cbf122d116 (bad)
1282 1295 # Ran 1 tests, 0 skipped, 1 failed.
1283 1296 python hash seed: * (glob)
1284 1297 [1]
1285 1298
1286 1299 $ cd ..
1287 1300
1288 1301 support bisecting a separate repo
1289 1302
1290 1303 $ hg init bisect-dependent
1291 1304 $ cd bisect-dependent
1292 1305 $ cat > test-bisect-dependent.t <<EOF
1293 1306 > $ tail -1 \$TESTDIR/../bisect/test-bisect.t
1294 1307 > pass
1295 1308 > EOF
1296 1309 $ hg commit -Am dependent test-bisect-dependent.t
1297 1310
1298 1311 $ rt --known-good-rev=0 test-bisect-dependent.t
1299 1312
1300 1313 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1301 1314 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1302 1315 @@ -1,2 +1,2 @@
1303 1316 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1304 1317 - pass
1305 1318 + fail
1306 1319
1307 1320 ERROR: test-bisect-dependent.t output changed
1308 1321 !
1309 1322 Failed test-bisect-dependent.t: output changed
1310 1323 Failed to identify failure point for test-bisect-dependent.t
1311 1324 # Ran 1 tests, 0 skipped, 1 failed.
1312 1325 python hash seed: * (glob)
1313 1326 [1]
1314 1327
1315 1328 $ rt --bisect-repo=../test-bisect test-bisect-dependent.t
1316 1329 Usage: run-tests.py [options] [tests]
1317 1330
1318 1331 run-tests.py: error: --bisect-repo cannot be used without --known-good-rev
1319 1332 [2]
1320 1333
1321 1334 $ rt --known-good-rev=0 --bisect-repo=../bisect test-bisect-dependent.t
1322 1335
1323 1336 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1324 1337 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1325 1338 @@ -1,2 +1,2 @@
1326 1339 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1327 1340 - pass
1328 1341 + fail
1329 1342
1330 1343 ERROR: test-bisect-dependent.t output changed
1331 1344 !
1332 1345 Failed test-bisect-dependent.t: output changed
1333 1346 test-bisect-dependent.t broken by 72cbf122d116 (bad)
1334 1347 # Ran 1 tests, 0 skipped, 1 failed.
1335 1348 python hash seed: * (glob)
1336 1349 [1]
1337 1350
1338 1351 $ cd ..
1339 1352
1340 1353 Test a broken #if statement doesn't break run-tests threading.
1341 1354 ==============================================================
1342 1355 $ mkdir broken
1343 1356 $ cd broken
1344 1357 $ cat > test-broken.t <<EOF
1345 1358 > true
1346 1359 > #if notarealhghavefeature
1347 1360 > $ false
1348 1361 > #endif
1349 1362 > EOF
1350 1363 $ for f in 1 2 3 4 ; do
1351 1364 > cat > test-works-$f.t <<EOF
1352 1365 > This is test case $f
1353 1366 > $ sleep 1
1354 1367 > EOF
1355 1368 > done
1356 1369 $ rt -j 2
1357 1370 ....
1358 1371 # Ran 5 tests, 0 skipped, 0 failed.
1359 1372 skipped: unknown feature: notarealhghavefeature
1360 1373
1361 1374 $ cd ..
1362 1375 $ rm -rf broken
1363 1376
1364 1377 Test cases in .t files
1365 1378 ======================
1366 1379 $ mkdir cases
1367 1380 $ cd cases
1368 1381 $ cat > test-cases-abc.t <<'EOF'
1369 1382 > #testcases A B C
1370 1383 > $ V=B
1371 1384 > #if A
1372 1385 > $ V=A
1373 1386 > #endif
1374 1387 > #if C
1375 1388 > $ V=C
1376 1389 > #endif
1377 1390 > $ echo $V | sed 's/A/C/'
1378 1391 > C
1379 1392 > #if C
1380 1393 > $ [ $V = C ]
1381 1394 > #endif
1382 1395 > #if A
1383 1396 > $ [ $V = C ]
1384 1397 > [1]
1385 1398 > #endif
1386 1399 > #if no-C
1387 1400 > $ [ $V = C ]
1388 1401 > [1]
1389 1402 > #endif
1390 1403 > $ [ $V = D ]
1391 1404 > [1]
1392 1405 > EOF
1393 1406 $ rt
1394 1407 .
1395 1408 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1396 1409 +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
1397 1410 @@ -7,7 +7,7 @@
1398 1411 $ V=C
1399 1412 #endif
1400 1413 $ echo $V | sed 's/A/C/'
1401 1414 - C
1402 1415 + B
1403 1416 #if C
1404 1417 $ [ $V = C ]
1405 1418 #endif
1406 1419
1407 1420 ERROR: test-cases-abc.t (case B) output changed
1408 1421 !.
1409 1422 Failed test-cases-abc.t (case B): output changed
1410 1423 # Ran 3 tests, 0 skipped, 1 failed.
1411 1424 python hash seed: * (glob)
1412 1425 [1]
1413 1426
1414 1427 --restart works
1415 1428
1416 1429 $ rt --restart
1417 1430
1418 1431 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1419 1432 +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
1420 1433 @@ -7,7 +7,7 @@
1421 1434 $ V=C
1422 1435 #endif
1423 1436 $ echo $V | sed 's/A/C/'
1424 1437 - C
1425 1438 + B
1426 1439 #if C
1427 1440 $ [ $V = C ]
1428 1441 #endif
1429 1442
1430 1443 ERROR: test-cases-abc.t (case B) output changed
1431 1444 !.
1432 1445 Failed test-cases-abc.t (case B): output changed
1433 1446 # Ran 2 tests, 0 skipped, 1 failed.
1434 1447 python hash seed: * (glob)
1435 1448 [1]
1436 1449
1437 1450 --restart works with outputdir
1438 1451
1439 1452 $ mkdir output
1440 1453 $ mv test-cases-abc.t.B.err output
1441 1454 $ rt --restart --outputdir output
1442 1455
1443 1456 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1444 1457 +++ $TESTTMP/anothertests/cases/output/test-cases-abc.t.B.err
1445 1458 @@ -7,7 +7,7 @@
1446 1459 $ V=C
1447 1460 #endif
1448 1461 $ echo $V | sed 's/A/C/'
1449 1462 - C
1450 1463 + B
1451 1464 #if C
1452 1465 $ [ $V = C ]
1453 1466 #endif
1454 1467
1455 1468 ERROR: test-cases-abc.t (case B) output changed
1456 1469 !.
1457 1470 Failed test-cases-abc.t (case B): output changed
1458 1471 # Ran 2 tests, 0 skipped, 1 failed.
1459 1472 python hash seed: * (glob)
1460 1473 [1]
General Comments 0
You need to be logged in to leave comments. Login now