##// END OF EJS Templates
run-tests: add a way to list tests, with JSON and XUnit support...
Siddharth Agarwal -
r32704:1270b00a default
parent child Browse files
Show More
@@ -1,2748 +1,2772
1 1 #!/usr/bin/env python
2 2 #
3 3 # run-tests.py - Run a set of tests on Mercurial
4 4 #
5 5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 # Modifying this script is tricky because it has many modes:
11 11 # - serial (default) vs parallel (-jN, N > 1)
12 12 # - no coverage (default) vs coverage (-c, -C, -s)
13 13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 14 # - tests are a mix of shell scripts and Python scripts
15 15 #
16 16 # If you change this script, it is recommended that you ensure you
17 17 # haven't broken it by running it in various modes with a representative
18 18 # sample of test scripts. For example:
19 19 #
20 20 # 1) serial, no coverage, temp install:
21 21 # ./run-tests.py test-s*
22 22 # 2) serial, no coverage, local hg:
23 23 # ./run-tests.py --local test-s*
24 24 # 3) serial, coverage, temp install:
25 25 # ./run-tests.py -c test-s*
26 26 # 4) serial, coverage, local hg:
27 27 # ./run-tests.py -c --local test-s* # unsupported
28 28 # 5) parallel, no coverage, temp install:
29 29 # ./run-tests.py -j2 test-s*
30 30 # 6) parallel, no coverage, local hg:
31 31 # ./run-tests.py -j2 --local test-s*
32 32 # 7) parallel, coverage, temp install:
33 33 # ./run-tests.py -j2 -c test-s* # currently broken
34 34 # 8) parallel, coverage, local install:
35 35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 36 # 9) parallel, custom tmp dir:
37 37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 38 # 10) parallel, pure, tests that call run-tests:
39 39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
40 40 #
41 41 # (You could use any subset of the tests: test-s* happens to match
42 42 # enough that it's worth doing parallel runs, few enough that it
43 43 # completes fairly quickly, includes both shell and Python scripts, and
44 44 # includes some scripts that run daemon processes.)
45 45
46 46 from __future__ import absolute_import, print_function
47 47
48 48 import difflib
49 49 import distutils.version as version
50 50 import errno
51 51 import json
52 52 import optparse
53 53 import os
54 54 import random
55 55 import re
56 56 import shutil
57 57 import signal
58 58 import socket
59 59 import subprocess
60 60 import sys
61 61 import sysconfig
62 62 import tempfile
63 63 import threading
64 64 import time
65 65 import unittest
66 66 import xml.dom.minidom as minidom
67 67
68 68 try:
69 69 import Queue as queue
70 70 except ImportError:
71 71 import queue
72 72
73 73 if os.environ.get('RTUNICODEPEDANTRY', False):
74 74 try:
75 75 reload(sys)
76 76 sys.setdefaultencoding("undefined")
77 77 except NameError:
78 78 pass
79 79
80 80 osenvironb = getattr(os, 'environb', os.environ)
81 81 processlock = threading.Lock()
82 82
83 83 if sys.version_info > (3, 5, 0):
84 84 PYTHON3 = True
85 85 xrange = range # we use xrange in one place, and we'd rather not use range
86 86 def _bytespath(p):
87 87 return p.encode('utf-8')
88 88
89 89 def _strpath(p):
90 90 return p.decode('utf-8')
91 91
92 92 elif sys.version_info >= (3, 0, 0):
93 93 print('%s is only supported on Python 3.5+ and 2.7, not %s' %
94 94 (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])))
95 95 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
96 96 else:
97 97 PYTHON3 = False
98 98
99 99 # In python 2.x, path operations are generally done using
100 100 # bytestrings by default, so we don't have to do any extra
101 101 # fiddling there. We define the wrapper functions anyway just to
102 102 # help keep code consistent between platforms.
103 103 def _bytespath(p):
104 104 return p
105 105
106 106 _strpath = _bytespath
107 107
108 108 # For Windows support
109 109 wifexited = getattr(os, "WIFEXITED", lambda x: False)
110 110
111 111 # Whether to use IPv6
112 112 def checksocketfamily(name, port=20058):
113 113 """return true if we can listen on localhost using family=name
114 114
115 115 name should be either 'AF_INET', or 'AF_INET6'.
116 116 port being used is okay - EADDRINUSE is considered as successful.
117 117 """
118 118 family = getattr(socket, name, None)
119 119 if family is None:
120 120 return False
121 121 try:
122 122 s = socket.socket(family, socket.SOCK_STREAM)
123 123 s.bind(('localhost', port))
124 124 s.close()
125 125 return True
126 126 except socket.error as exc:
127 127 if exc.errno == errno.EADDRINUSE:
128 128 return True
129 129 elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
130 130 return False
131 131 else:
132 132 raise
133 133 else:
134 134 return False
135 135
136 136 # useipv6 will be set by parseargs
137 137 useipv6 = None
138 138
139 139 def checkportisavailable(port):
140 140 """return true if a port seems free to bind on localhost"""
141 141 if useipv6:
142 142 family = socket.AF_INET6
143 143 else:
144 144 family = socket.AF_INET
145 145 try:
146 146 s = socket.socket(family, socket.SOCK_STREAM)
147 147 s.bind(('localhost', port))
148 148 s.close()
149 149 return True
150 150 except socket.error as exc:
151 151 if exc.errno not in (errno.EADDRINUSE, errno.EADDRNOTAVAIL,
152 152 errno.EPROTONOSUPPORT):
153 153 raise
154 154 return False
155 155
156 156 closefds = os.name == 'posix'
157 157 def Popen4(cmd, wd, timeout, env=None):
158 158 processlock.acquire()
159 159 p = subprocess.Popen(cmd, shell=True, bufsize=-1, cwd=wd, env=env,
160 160 close_fds=closefds,
161 161 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
162 162 stderr=subprocess.STDOUT)
163 163 processlock.release()
164 164
165 165 p.fromchild = p.stdout
166 166 p.tochild = p.stdin
167 167 p.childerr = p.stderr
168 168
169 169 p.timeout = False
170 170 if timeout:
171 171 def t():
172 172 start = time.time()
173 173 while time.time() - start < timeout and p.returncode is None:
174 174 time.sleep(.1)
175 175 p.timeout = True
176 176 if p.returncode is None:
177 177 terminate(p)
178 178 threading.Thread(target=t).start()
179 179
180 180 return p
181 181
182 182 PYTHON = _bytespath(sys.executable.replace('\\', '/'))
183 183 IMPL_PATH = b'PYTHONPATH'
184 184 if 'java' in sys.platform:
185 185 IMPL_PATH = b'JYTHONPATH'
186 186
187 187 defaults = {
188 188 'jobs': ('HGTEST_JOBS', 1),
189 189 'timeout': ('HGTEST_TIMEOUT', 180),
190 190 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 500),
191 191 'port': ('HGTEST_PORT', 20059),
192 192 'shell': ('HGTEST_SHELL', 'sh'),
193 193 }
194 194
195 195 def canonpath(path):
196 196 return os.path.realpath(os.path.expanduser(path))
197 197
198 198 def parselistfiles(files, listtype, warn=True):
199 199 entries = dict()
200 200 for filename in files:
201 201 try:
202 202 path = os.path.expanduser(os.path.expandvars(filename))
203 203 f = open(path, "rb")
204 204 except IOError as err:
205 205 if err.errno != errno.ENOENT:
206 206 raise
207 207 if warn:
208 208 print("warning: no such %s file: %s" % (listtype, filename))
209 209 continue
210 210
211 211 for line in f.readlines():
212 212 line = line.split(b'#', 1)[0].strip()
213 213 if line:
214 214 entries[line] = filename
215 215
216 216 f.close()
217 217 return entries
218 218
219 219 def parsettestcases(path):
220 220 """read a .t test file, return a set of test case names
221 221
222 222 If path does not exist, return an empty set.
223 223 """
224 224 cases = set()
225 225 try:
226 226 with open(path, 'rb') as f:
227 227 for l in f:
228 228 if l.startswith(b'#testcases '):
229 229 cases.update(l[11:].split())
230 230 except IOError as ex:
231 231 if ex.errno != errno.ENOENT:
232 232 raise
233 233 return cases
234 234
235 235 def getparser():
236 236 """Obtain the OptionParser used by the CLI."""
237 237 parser = optparse.OptionParser("%prog [options] [tests]")
238 238
239 239 # keep these sorted
240 240 parser.add_option("--blacklist", action="append",
241 241 help="skip tests listed in the specified blacklist file")
242 242 parser.add_option("--whitelist", action="append",
243 243 help="always run tests listed in the specified whitelist file")
244 244 parser.add_option("--changed", type="string",
245 245 help="run tests that are changed in parent rev or working directory")
246 246 parser.add_option("-C", "--annotate", action="store_true",
247 247 help="output files annotated with coverage")
248 248 parser.add_option("-c", "--cover", action="store_true",
249 249 help="print a test coverage report")
250 250 parser.add_option("-d", "--debug", action="store_true",
251 251 help="debug mode: write output of test scripts to console"
252 252 " rather than capturing and diffing it (disables timeout)")
253 253 parser.add_option("-f", "--first", action="store_true",
254 254 help="exit on the first test failure")
255 255 parser.add_option("-H", "--htmlcov", action="store_true",
256 256 help="create an HTML report of the coverage of the files")
257 257 parser.add_option("-i", "--interactive", action="store_true",
258 258 help="prompt to accept changed output")
259 259 parser.add_option("-j", "--jobs", type="int",
260 260 help="number of jobs to run in parallel"
261 261 " (default: $%s or %d)" % defaults['jobs'])
262 262 parser.add_option("--keep-tmpdir", action="store_true",
263 263 help="keep temporary directory after running tests")
264 264 parser.add_option("-k", "--keywords",
265 265 help="run tests matching keywords")
266 parser.add_option("--list-tests", action="store_true",
267 help="list tests instead of running them")
266 268 parser.add_option("-l", "--local", action="store_true",
267 269 help="shortcut for --with-hg=<testdir>/../hg, "
268 270 "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set")
269 271 parser.add_option("--loop", action="store_true",
270 272 help="loop tests repeatedly")
271 273 parser.add_option("--runs-per-test", type="int", dest="runs_per_test",
272 274 help="run each test N times (default=1)", default=1)
273 275 parser.add_option("-n", "--nodiff", action="store_true",
274 276 help="skip showing test changes")
275 277 parser.add_option("-p", "--port", type="int",
276 278 help="port on which servers should listen"
277 279 " (default: $%s or %d)" % defaults['port'])
278 280 parser.add_option("--compiler", type="string",
279 281 help="compiler to build with")
280 282 parser.add_option("--pure", action="store_true",
281 283 help="use pure Python code instead of C extensions")
282 284 parser.add_option("-R", "--restart", action="store_true",
283 285 help="restart at last error")
284 286 parser.add_option("-r", "--retest", action="store_true",
285 287 help="retest failed tests")
286 288 parser.add_option("-S", "--noskips", action="store_true",
287 289 help="don't report skip tests verbosely")
288 290 parser.add_option("--shell", type="string",
289 291 help="shell to use (default: $%s or %s)" % defaults['shell'])
290 292 parser.add_option("-t", "--timeout", type="int",
291 293 help="kill errant tests after TIMEOUT seconds"
292 294 " (default: $%s or %d)" % defaults['timeout'])
293 295 parser.add_option("--slowtimeout", type="int",
294 296 help="kill errant slow tests after SLOWTIMEOUT seconds"
295 297 " (default: $%s or %d)" % defaults['slowtimeout'])
296 298 parser.add_option("--time", action="store_true",
297 299 help="time how long each test takes")
298 300 parser.add_option("--json", action="store_true",
299 301 help="store test result data in 'report.json' file")
300 302 parser.add_option("--tmpdir", type="string",
301 303 help="run tests in the given temporary directory"
302 304 " (implies --keep-tmpdir)")
303 305 parser.add_option("-v", "--verbose", action="store_true",
304 306 help="output verbose messages")
305 307 parser.add_option("--xunit", type="string",
306 308 help="record xunit results at specified path")
307 309 parser.add_option("--view", type="string",
308 310 help="external diff viewer")
309 311 parser.add_option("--with-hg", type="string",
310 312 metavar="HG",
311 313 help="test using specified hg script rather than a "
312 314 "temporary installation")
313 315 parser.add_option("--chg", action="store_true",
314 316 help="install and use chg wrapper in place of hg")
315 317 parser.add_option("--with-chg", metavar="CHG",
316 318 help="use specified chg wrapper in place of hg")
317 319 parser.add_option("--ipv6", action="store_true",
318 320 help="prefer IPv6 to IPv4 for network related tests")
319 321 parser.add_option("-3", "--py3k-warnings", action="store_true",
320 322 help="enable Py3k warnings on Python 2.7+")
321 323 # This option should be deleted once test-check-py3-compat.t and other
322 324 # Python 3 tests run with Python 3.
323 325 parser.add_option("--with-python3", metavar="PYTHON3",
324 326 help="Python 3 interpreter (if running under Python 2)"
325 327 " (TEMPORARY)")
326 328 parser.add_option('--extra-config-opt', action="append",
327 329 help='set the given config opt in the test hgrc')
328 330 parser.add_option('--random', action="store_true",
329 331 help='run tests in random order')
330 332 parser.add_option('--profile-runner', action='store_true',
331 333 help='run statprof on run-tests')
332 334 parser.add_option('--allow-slow-tests', action='store_true',
333 335 help='allow extremely slow tests')
334 336 parser.add_option('--showchannels', action='store_true',
335 337 help='show scheduling channels')
336 338 parser.add_option('--known-good-rev', type="string",
337 339 metavar="known_good_rev",
338 340 help=("Automatically bisect any failures using this "
339 341 "revision as a known-good revision."))
340 342
341 343 for option, (envvar, default) in defaults.items():
342 344 defaults[option] = type(default)(os.environ.get(envvar, default))
343 345 parser.set_defaults(**defaults)
344 346
345 347 return parser
346 348
347 349 def parseargs(args, parser):
348 350 """Parse arguments with our OptionParser and validate results."""
349 351 (options, args) = parser.parse_args(args)
350 352
351 353 # jython is always pure
352 354 if 'java' in sys.platform or '__pypy__' in sys.modules:
353 355 options.pure = True
354 356
355 357 if options.with_hg:
356 358 options.with_hg = canonpath(_bytespath(options.with_hg))
357 359 if not (os.path.isfile(options.with_hg) and
358 360 os.access(options.with_hg, os.X_OK)):
359 361 parser.error('--with-hg must specify an executable hg script')
360 362 if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
361 363 sys.stderr.write('warning: --with-hg should specify an hg script\n')
362 364 if options.local:
363 365 testdir = os.path.dirname(_bytespath(canonpath(sys.argv[0])))
364 366 reporootdir = os.path.dirname(testdir)
365 367 pathandattrs = [(b'hg', 'with_hg')]
366 368 if options.chg:
367 369 pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
368 370 for relpath, attr in pathandattrs:
369 371 binpath = os.path.join(reporootdir, relpath)
370 372 if os.name != 'nt' and not os.access(binpath, os.X_OK):
371 373 parser.error('--local specified, but %r not found or '
372 374 'not executable' % binpath)
373 375 setattr(options, attr, binpath)
374 376
375 377 if (options.chg or options.with_chg) and os.name == 'nt':
376 378 parser.error('chg does not work on %s' % os.name)
377 379 if options.with_chg:
378 380 options.chg = False # no installation to temporary location
379 381 options.with_chg = canonpath(_bytespath(options.with_chg))
380 382 if not (os.path.isfile(options.with_chg) and
381 383 os.access(options.with_chg, os.X_OK)):
382 384 parser.error('--with-chg must specify a chg executable')
383 385 if options.chg and options.with_hg:
384 386 # chg shares installation location with hg
385 387 parser.error('--chg does not work when --with-hg is specified '
386 388 '(use --with-chg instead)')
387 389
388 390 global useipv6
389 391 if options.ipv6:
390 392 useipv6 = checksocketfamily('AF_INET6')
391 393 else:
392 394 # only use IPv6 if IPv4 is unavailable and IPv6 is available
393 395 useipv6 = ((not checksocketfamily('AF_INET'))
394 396 and checksocketfamily('AF_INET6'))
395 397
396 398 options.anycoverage = options.cover or options.annotate or options.htmlcov
397 399 if options.anycoverage:
398 400 try:
399 401 import coverage
400 402 covver = version.StrictVersion(coverage.__version__).version
401 403 if covver < (3, 3):
402 404 parser.error('coverage options require coverage 3.3 or later')
403 405 except ImportError:
404 406 parser.error('coverage options now require the coverage package')
405 407
406 408 if options.anycoverage and options.local:
407 409 # this needs some path mangling somewhere, I guess
408 410 parser.error("sorry, coverage options do not work when --local "
409 411 "is specified")
410 412
411 413 if options.anycoverage and options.with_hg:
412 414 parser.error("sorry, coverage options do not work when --with-hg "
413 415 "is specified")
414 416
415 417 global verbose
416 418 if options.verbose:
417 419 verbose = ''
418 420
419 421 if options.tmpdir:
420 422 options.tmpdir = canonpath(options.tmpdir)
421 423
422 424 if options.jobs < 1:
423 425 parser.error('--jobs must be positive')
424 426 if options.interactive and options.debug:
425 427 parser.error("-i/--interactive and -d/--debug are incompatible")
426 428 if options.debug:
427 429 if options.timeout != defaults['timeout']:
428 430 sys.stderr.write(
429 431 'warning: --timeout option ignored with --debug\n')
430 432 if options.slowtimeout != defaults['slowtimeout']:
431 433 sys.stderr.write(
432 434 'warning: --slowtimeout option ignored with --debug\n')
433 435 options.timeout = 0
434 436 options.slowtimeout = 0
435 437 if options.py3k_warnings:
436 438 if PYTHON3:
437 439 parser.error(
438 440 '--py3k-warnings can only be used on Python 2.7')
439 441 if options.with_python3:
440 442 if PYTHON3:
441 443 parser.error('--with-python3 cannot be used when executing with '
442 444 'Python 3')
443 445
444 446 options.with_python3 = canonpath(options.with_python3)
445 447 # Verify Python3 executable is acceptable.
446 448 proc = subprocess.Popen([options.with_python3, b'--version'],
447 449 stdout=subprocess.PIPE,
448 450 stderr=subprocess.STDOUT)
449 451 out, _err = proc.communicate()
450 452 ret = proc.wait()
451 453 if ret != 0:
452 454 parser.error('could not determine version of python 3')
453 455 if not out.startswith('Python '):
454 456 parser.error('unexpected output from python3 --version: %s' %
455 457 out)
456 458 vers = version.LooseVersion(out[len('Python '):])
457 459 if vers < version.LooseVersion('3.5.0'):
458 460 parser.error('--with-python3 version must be 3.5.0 or greater; '
459 461 'got %s' % out)
460 462
461 463 if options.blacklist:
462 464 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
463 465 if options.whitelist:
464 466 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
465 467 else:
466 468 options.whitelisted = {}
467 469
468 470 if options.showchannels:
469 471 options.nodiff = True
470 472
471 473 return (options, args)
472 474
473 475 def rename(src, dst):
474 476 """Like os.rename(), trade atomicity and opened files friendliness
475 477 for existing destination support.
476 478 """
477 479 shutil.copy(src, dst)
478 480 os.remove(src)
479 481
480 482 _unified_diff = difflib.unified_diff
481 483 if PYTHON3:
482 484 import functools
483 485 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
484 486
485 487 def getdiff(expected, output, ref, err):
486 488 servefail = False
487 489 lines = []
488 490 for line in _unified_diff(expected, output, ref, err):
489 491 if line.startswith(b'+++') or line.startswith(b'---'):
490 492 line = line.replace(b'\\', b'/')
491 493 if line.endswith(b' \n'):
492 494 line = line[:-2] + b'\n'
493 495 lines.append(line)
494 496 if not servefail and line.startswith(
495 497 b'+ abort: child process failed to start'):
496 498 servefail = True
497 499
498 500 return servefail, lines
499 501
500 502 verbose = False
501 503 def vlog(*msg):
502 504 """Log only when in verbose mode."""
503 505 if verbose is False:
504 506 return
505 507
506 508 return log(*msg)
507 509
508 510 # Bytes that break XML even in a CDATA block: control characters 0-31
509 511 # sans \t, \n and \r
510 512 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
511 513
512 514 # Match feature conditionalized output lines in the form, capturing the feature
513 515 # list in group 2, and the preceeding line output in group 1:
514 516 #
515 517 # output..output (feature !)\n
516 518 optline = re.compile(b'(.+) \((.+?) !\)\n$')
517 519
518 520 def cdatasafe(data):
519 521 """Make a string safe to include in a CDATA block.
520 522
521 523 Certain control characters are illegal in a CDATA block, and
522 524 there's no way to include a ]]> in a CDATA either. This function
523 525 replaces illegal bytes with ? and adds a space between the ]] so
524 526 that it won't break the CDATA block.
525 527 """
526 528 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
527 529
528 530 def log(*msg):
529 531 """Log something to stdout.
530 532
531 533 Arguments are strings to print.
532 534 """
533 535 with iolock:
534 536 if verbose:
535 537 print(verbose, end=' ')
536 538 for m in msg:
537 539 print(m, end=' ')
538 540 print()
539 541 sys.stdout.flush()
540 542
541 543 def terminate(proc):
542 544 """Terminate subprocess"""
543 545 vlog('# Terminating process %d' % proc.pid)
544 546 try:
545 547 proc.terminate()
546 548 except OSError:
547 549 pass
548 550
549 551 def killdaemons(pidfile):
550 552 import killdaemons as killmod
551 553 return killmod.killdaemons(pidfile, tryhard=False, remove=True,
552 554 logfn=vlog)
553 555
554 556 class Test(unittest.TestCase):
555 557 """Encapsulates a single, runnable test.
556 558
557 559 While this class conforms to the unittest.TestCase API, it differs in that
558 560 instances need to be instantiated manually. (Typically, unittest.TestCase
559 561 classes are instantiated automatically by scanning modules.)
560 562 """
561 563
562 564 # Status code reserved for skipped tests (used by hghave).
563 565 SKIPPED_STATUS = 80
564 566
565 567 def __init__(self, path, tmpdir, keeptmpdir=False,
566 568 debug=False,
567 569 timeout=defaults['timeout'],
568 570 startport=defaults['port'], extraconfigopts=None,
569 571 py3kwarnings=False, shell=None, hgcommand=None,
570 572 slowtimeout=defaults['slowtimeout'], usechg=False,
571 573 useipv6=False):
572 574 """Create a test from parameters.
573 575
574 576 path is the full path to the file defining the test.
575 577
576 578 tmpdir is the main temporary directory to use for this test.
577 579
578 580 keeptmpdir determines whether to keep the test's temporary directory
579 581 after execution. It defaults to removal (False).
580 582
581 583 debug mode will make the test execute verbosely, with unfiltered
582 584 output.
583 585
584 586 timeout controls the maximum run time of the test. It is ignored when
585 587 debug is True. See slowtimeout for tests with #require slow.
586 588
587 589 slowtimeout overrides timeout if the test has #require slow.
588 590
589 591 startport controls the starting port number to use for this test. Each
590 592 test will reserve 3 port numbers for execution. It is the caller's
591 593 responsibility to allocate a non-overlapping port range to Test
592 594 instances.
593 595
594 596 extraconfigopts is an iterable of extra hgrc config options. Values
595 597 must have the form "key=value" (something understood by hgrc). Values
596 598 of the form "foo.key=value" will result in "[foo] key=value".
597 599
598 600 py3kwarnings enables Py3k warnings.
599 601
600 602 shell is the shell to execute tests in.
601 603 """
602 604 self.path = path
603 605 self.bname = os.path.basename(path)
604 606 self.name = _strpath(self.bname)
605 607 self._testdir = os.path.dirname(path)
606 608 self._tmpname = os.path.basename(path)
607 609 self.errpath = os.path.join(self._testdir, b'%s.err' % self.bname)
608 610
609 611 self._threadtmp = tmpdir
610 612 self._keeptmpdir = keeptmpdir
611 613 self._debug = debug
612 614 self._timeout = timeout
613 615 self._slowtimeout = slowtimeout
614 616 self._startport = startport
615 617 self._extraconfigopts = extraconfigopts or []
616 618 self._py3kwarnings = py3kwarnings
617 619 self._shell = _bytespath(shell)
618 620 self._hgcommand = hgcommand or b'hg'
619 621 self._usechg = usechg
620 622 self._useipv6 = useipv6
621 623
622 624 self._aborted = False
623 625 self._daemonpids = []
624 626 self._finished = None
625 627 self._ret = None
626 628 self._out = None
627 629 self._skipped = None
628 630 self._testtmp = None
629 631 self._chgsockdir = None
630 632
631 633 # If we're not in --debug mode and reference output file exists,
632 634 # check test output against it.
633 635 if debug:
634 636 self._refout = None # to match "out is None"
635 637 elif os.path.exists(self.refpath):
636 638 f = open(self.refpath, 'rb')
637 639 self._refout = f.read().splitlines(True)
638 640 f.close()
639 641 else:
640 642 self._refout = []
641 643
642 644 # needed to get base class __repr__ running
643 645 @property
644 646 def _testMethodName(self):
645 647 return self.name
646 648
647 649 def __str__(self):
648 650 return self.name
649 651
650 652 def shortDescription(self):
651 653 return self.name
652 654
653 655 def setUp(self):
654 656 """Tasks to perform before run()."""
655 657 self._finished = False
656 658 self._ret = None
657 659 self._out = None
658 660 self._skipped = None
659 661
660 662 try:
661 663 os.mkdir(self._threadtmp)
662 664 except OSError as e:
663 665 if e.errno != errno.EEXIST:
664 666 raise
665 667
666 668 name = self._tmpname
667 669 self._testtmp = os.path.join(self._threadtmp, name)
668 670 os.mkdir(self._testtmp)
669 671
670 672 # Remove any previous output files.
671 673 if os.path.exists(self.errpath):
672 674 try:
673 675 os.remove(self.errpath)
674 676 except OSError as e:
675 677 # We might have raced another test to clean up a .err
676 678 # file, so ignore ENOENT when removing a previous .err
677 679 # file.
678 680 if e.errno != errno.ENOENT:
679 681 raise
680 682
681 683 if self._usechg:
682 684 self._chgsockdir = os.path.join(self._threadtmp,
683 685 b'%s.chgsock' % name)
684 686 os.mkdir(self._chgsockdir)
685 687
686 688 def run(self, result):
687 689 """Run this test and report results against a TestResult instance."""
688 690 # This function is extremely similar to unittest.TestCase.run(). Once
689 691 # we require Python 2.7 (or at least its version of unittest), this
690 692 # function can largely go away.
691 693 self._result = result
692 694 result.startTest(self)
693 695 try:
694 696 try:
695 697 self.setUp()
696 698 except (KeyboardInterrupt, SystemExit):
697 699 self._aborted = True
698 700 raise
699 701 except Exception:
700 702 result.addError(self, sys.exc_info())
701 703 return
702 704
703 705 success = False
704 706 try:
705 707 self.runTest()
706 708 except KeyboardInterrupt:
707 709 self._aborted = True
708 710 raise
709 711 except SkipTest as e:
710 712 result.addSkip(self, str(e))
711 713 # The base class will have already counted this as a
712 714 # test we "ran", but we want to exclude skipped tests
713 715 # from those we count towards those run.
714 716 result.testsRun -= 1
715 717 except IgnoreTest as e:
716 718 result.addIgnore(self, str(e))
717 719 # As with skips, ignores also should be excluded from
718 720 # the number of tests executed.
719 721 result.testsRun -= 1
720 722 except WarnTest as e:
721 723 result.addWarn(self, str(e))
722 724 except ReportedTest as e:
723 725 pass
724 726 except self.failureException as e:
725 727 # This differs from unittest in that we don't capture
726 728 # the stack trace. This is for historical reasons and
727 729 # this decision could be revisited in the future,
728 730 # especially for PythonTest instances.
729 731 if result.addFailure(self, str(e)):
730 732 success = True
731 733 except Exception:
732 734 result.addError(self, sys.exc_info())
733 735 else:
734 736 success = True
735 737
736 738 try:
737 739 self.tearDown()
738 740 except (KeyboardInterrupt, SystemExit):
739 741 self._aborted = True
740 742 raise
741 743 except Exception:
742 744 result.addError(self, sys.exc_info())
743 745 success = False
744 746
745 747 if success:
746 748 result.addSuccess(self)
747 749 finally:
748 750 result.stopTest(self, interrupted=self._aborted)
749 751
750 752 def runTest(self):
751 753 """Run this test instance.
752 754
753 755 This will return a tuple describing the result of the test.
754 756 """
755 757 env = self._getenv()
756 758 self._daemonpids.append(env['DAEMON_PIDS'])
757 759 self._createhgrc(env['HGRCPATH'])
758 760
759 761 vlog('# Test', self.name)
760 762
761 763 ret, out = self._run(env)
762 764 self._finished = True
763 765 self._ret = ret
764 766 self._out = out
765 767
766 768 def describe(ret):
767 769 if ret < 0:
768 770 return 'killed by signal: %d' % -ret
769 771 return 'returned error code %d' % ret
770 772
771 773 self._skipped = False
772 774
773 775 if ret == self.SKIPPED_STATUS:
774 776 if out is None: # Debug mode, nothing to parse.
775 777 missing = ['unknown']
776 778 failed = None
777 779 else:
778 780 missing, failed = TTest.parsehghaveoutput(out)
779 781
780 782 if not missing:
781 783 missing = ['skipped']
782 784
783 785 if failed:
784 786 self.fail('hg have failed checking for %s' % failed[-1])
785 787 else:
786 788 self._skipped = True
787 789 raise SkipTest(missing[-1])
788 790 elif ret == 'timeout':
789 791 self.fail('timed out')
790 792 elif ret is False:
791 793 raise WarnTest('no result code from test')
792 794 elif out != self._refout:
793 795 # Diff generation may rely on written .err file.
794 796 if (ret != 0 or out != self._refout) and not self._skipped \
795 797 and not self._debug:
796 798 f = open(self.errpath, 'wb')
797 799 for line in out:
798 800 f.write(line)
799 801 f.close()
800 802
801 803 # The result object handles diff calculation for us.
802 804 if self._result.addOutputMismatch(self, ret, out, self._refout):
803 805 # change was accepted, skip failing
804 806 return
805 807
806 808 if ret:
807 809 msg = 'output changed and ' + describe(ret)
808 810 else:
809 811 msg = 'output changed'
810 812
811 813 self.fail(msg)
812 814 elif ret:
813 815 self.fail(describe(ret))
814 816
815 817 def tearDown(self):
816 818 """Tasks to perform after run()."""
817 819 for entry in self._daemonpids:
818 820 killdaemons(entry)
819 821 self._daemonpids = []
820 822
821 823 if self._keeptmpdir:
822 824 log('\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' %
823 825 (self._testtmp.decode('utf-8'),
824 826 self._threadtmp.decode('utf-8')))
825 827 else:
826 828 shutil.rmtree(self._testtmp, True)
827 829 shutil.rmtree(self._threadtmp, True)
828 830
829 831 if self._usechg:
830 832 # chgservers will stop automatically after they find the socket
831 833 # files are deleted
832 834 shutil.rmtree(self._chgsockdir, True)
833 835
834 836 if (self._ret != 0 or self._out != self._refout) and not self._skipped \
835 837 and not self._debug and self._out:
836 838 f = open(self.errpath, 'wb')
837 839 for line in self._out:
838 840 f.write(line)
839 841 f.close()
840 842
841 843 vlog("# Ret was:", self._ret, '(%s)' % self.name)
842 844
843 845 def _run(self, env):
844 846 # This should be implemented in child classes to run tests.
845 847 raise SkipTest('unknown test type')
846 848
847 849 def abort(self):
848 850 """Terminate execution of this test."""
849 851 self._aborted = True
850 852
851 853 def _portmap(self, i):
852 854 offset = b'' if i == 0 else b'%d' % i
853 855 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
854 856
855 857 def _getreplacements(self):
856 858 """Obtain a mapping of text replacements to apply to test output.
857 859
858 860 Test output needs to be normalized so it can be compared to expected
859 861 output. This function defines how some of that normalization will
860 862 occur.
861 863 """
862 864 r = [
863 865 # This list should be parallel to defineport in _getenv
864 866 self._portmap(0),
865 867 self._portmap(1),
866 868 self._portmap(2),
867 869 (br'(?m)^(saved backup bundle to .*\.hg)( \(glob\))?$',
868 870 br'\1 (glob)'),
869 871 (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
870 872 (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
871 873 ]
872 874 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
873 875
874 876 return r
875 877
876 878 def _escapepath(self, p):
877 879 if os.name == 'nt':
878 880 return (
879 881 (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or
880 882 c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c
881 883 for c in p))
882 884 )
883 885 else:
884 886 return re.escape(p)
885 887
886 888 def _localip(self):
887 889 if self._useipv6:
888 890 return b'::1'
889 891 else:
890 892 return b'127.0.0.1'
891 893
892 894 def _getenv(self):
893 895 """Obtain environment variables to use during test execution."""
894 896 def defineport(i):
895 897 offset = '' if i == 0 else '%s' % i
896 898 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
897 899 env = os.environ.copy()
898 900 env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase')
899 901 env['HGEMITWARNINGS'] = '1'
900 902 env['TESTTMP'] = self._testtmp
901 903 env['HOME'] = self._testtmp
902 904 # This number should match portneeded in _getport
903 905 for port in xrange(3):
904 906 # This list should be parallel to _portmap in _getreplacements
905 907 defineport(port)
906 908 env["HGRCPATH"] = os.path.join(self._threadtmp, b'.hgrc')
907 909 env["DAEMON_PIDS"] = os.path.join(self._threadtmp, b'daemon.pids')
908 910 env["HGEDITOR"] = ('"' + sys.executable + '"'
909 911 + ' -c "import sys; sys.exit(0)"')
910 912 env["HGMERGE"] = "internal:merge"
911 913 env["HGUSER"] = "test"
912 914 env["HGENCODING"] = "ascii"
913 915 env["HGENCODINGMODE"] = "strict"
914 916 env['HGIPV6'] = str(int(self._useipv6))
915 917
916 918 # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
917 919 # IP addresses.
918 920 env['LOCALIP'] = self._localip()
919 921
920 922 # Reset some environment variables to well-known values so that
921 923 # the tests produce repeatable output.
922 924 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
923 925 env['TZ'] = 'GMT'
924 926 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
925 927 env['COLUMNS'] = '80'
926 928 env['TERM'] = 'xterm'
927 929
928 930 for k in ('HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy ' +
929 931 'HGPLAIN HGPLAINEXCEPT EDITOR VISUAL PAGER ' +
930 932 'NO_PROXY CHGDEBUG').split():
931 933 if k in env:
932 934 del env[k]
933 935
934 936 # unset env related to hooks
935 937 for k in env.keys():
936 938 if k.startswith('HG_'):
937 939 del env[k]
938 940
939 941 if self._usechg:
940 942 env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server')
941 943
942 944 return env
943 945
944 946 def _createhgrc(self, path):
945 947 """Create an hgrc file for this test."""
946 948 hgrc = open(path, 'wb')
947 949 hgrc.write(b'[ui]\n')
948 950 hgrc.write(b'slash = True\n')
949 951 hgrc.write(b'interactive = False\n')
950 952 hgrc.write(b'mergemarkers = detailed\n')
951 953 hgrc.write(b'promptecho = True\n')
952 954 hgrc.write(b'[defaults]\n')
953 955 hgrc.write(b'[devel]\n')
954 956 hgrc.write(b'all-warnings = true\n')
955 957 hgrc.write(b'default-date = 0 0\n')
956 958 hgrc.write(b'[largefiles]\n')
957 959 hgrc.write(b'usercache = %s\n' %
958 960 (os.path.join(self._testtmp, b'.cache/largefiles')))
959 961 hgrc.write(b'[web]\n')
960 962 hgrc.write(b'address = localhost\n')
961 963 hgrc.write(b'ipv6 = %s\n' % str(self._useipv6).encode('ascii'))
962 964
963 965 for opt in self._extraconfigopts:
964 966 section, key = opt.split('.', 1)
965 967 assert '=' in key, ('extra config opt %s must '
966 968 'have an = for assignment' % opt)
967 969 hgrc.write(b'[%s]\n%s\n' % (section, key))
968 970 hgrc.close()
969 971
970 972 def fail(self, msg):
971 973 # unittest differentiates between errored and failed.
972 974 # Failed is denoted by AssertionError (by default at least).
973 975 raise AssertionError(msg)
974 976
975 977 def _runcommand(self, cmd, env, normalizenewlines=False):
976 978 """Run command in a sub-process, capturing the output (stdout and
977 979 stderr).
978 980
979 981 Return a tuple (exitcode, output). output is None in debug mode.
980 982 """
981 983 if self._debug:
982 984 proc = subprocess.Popen(cmd, shell=True, cwd=self._testtmp,
983 985 env=env)
984 986 ret = proc.wait()
985 987 return (ret, None)
986 988
987 989 proc = Popen4(cmd, self._testtmp, self._timeout, env)
988 990 def cleanup():
989 991 terminate(proc)
990 992 ret = proc.wait()
991 993 if ret == 0:
992 994 ret = signal.SIGTERM << 8
993 995 killdaemons(env['DAEMON_PIDS'])
994 996 return ret
995 997
996 998 output = ''
997 999 proc.tochild.close()
998 1000
999 1001 try:
1000 1002 output = proc.fromchild.read()
1001 1003 except KeyboardInterrupt:
1002 1004 vlog('# Handling keyboard interrupt')
1003 1005 cleanup()
1004 1006 raise
1005 1007
1006 1008 ret = proc.wait()
1007 1009 if wifexited(ret):
1008 1010 ret = os.WEXITSTATUS(ret)
1009 1011
1010 1012 if proc.timeout:
1011 1013 ret = 'timeout'
1012 1014
1013 1015 if ret:
1014 1016 killdaemons(env['DAEMON_PIDS'])
1015 1017
1016 1018 for s, r in self._getreplacements():
1017 1019 output = re.sub(s, r, output)
1018 1020
1019 1021 if normalizenewlines:
1020 1022 output = output.replace('\r\n', '\n')
1021 1023
1022 1024 return ret, output.splitlines(True)
1023 1025
1024 1026 class PythonTest(Test):
1025 1027 """A Python-based test."""
1026 1028
1027 1029 @property
1028 1030 def refpath(self):
1029 1031 return os.path.join(self._testdir, b'%s.out' % self.bname)
1030 1032
1031 1033 def _run(self, env):
1032 1034 py3kswitch = self._py3kwarnings and b' -3' or b''
1033 1035 cmd = b'%s%s "%s"' % (PYTHON, py3kswitch, self.path)
1034 1036 vlog("# Running", cmd)
1035 1037 normalizenewlines = os.name == 'nt'
1036 1038 result = self._runcommand(cmd, env,
1037 1039 normalizenewlines=normalizenewlines)
1038 1040 if self._aborted:
1039 1041 raise KeyboardInterrupt()
1040 1042
1041 1043 return result
1042 1044
1043 1045 # Some glob patterns apply only in some circumstances, so the script
1044 1046 # might want to remove (glob) annotations that otherwise should be
1045 1047 # retained.
1046 1048 checkcodeglobpats = [
1047 1049 # On Windows it looks like \ doesn't require a (glob), but we know
1048 1050 # better.
1049 1051 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
1050 1052 re.compile(br'^moving \S+/.*[^)]$'),
1051 1053 re.compile(br'^pulling from \$TESTTMP/.*[^)]$'),
1052 1054 # Not all platforms have 127.0.0.1 as loopback (though most do),
1053 1055 # so we always glob that too.
1054 1056 re.compile(br'.*\$LOCALIP.*$'),
1055 1057 ]
1056 1058
1057 1059 bchr = chr
1058 1060 if PYTHON3:
1059 1061 bchr = lambda x: bytes([x])
1060 1062
1061 1063 class TTest(Test):
1062 1064 """A "t test" is a test backed by a .t file."""
1063 1065
1064 1066 SKIPPED_PREFIX = b'skipped: '
1065 1067 FAILED_PREFIX = b'hghave check failed: '
1066 1068 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
1067 1069
1068 1070 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
1069 1071 ESCAPEMAP = dict((bchr(i), br'\x%02x' % i) for i in range(256))
1070 1072 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
1071 1073
1072 1074 def __init__(self, path, *args, **kwds):
1073 1075 # accept an extra "case" parameter
1074 1076 case = None
1075 1077 if 'case' in kwds:
1076 1078 case = kwds.pop('case')
1077 1079 self._case = case
1078 1080 self._allcases = parsettestcases(path)
1079 1081 super(TTest, self).__init__(path, *args, **kwds)
1080 1082 if case:
1081 1083 self.name = '%s (case %s)' % (self.name, _strpath(case))
1082 1084 self.errpath = b'%s.%s.err' % (self.errpath[:-4], case)
1083 1085 self._tmpname += b'-%s' % case
1084 1086
1085 1087 @property
1086 1088 def refpath(self):
1087 1089 return os.path.join(self._testdir, self.bname)
1088 1090
1089 1091 def _run(self, env):
1090 1092 f = open(self.path, 'rb')
1091 1093 lines = f.readlines()
1092 1094 f.close()
1093 1095
1094 1096 salt, script, after, expected = self._parsetest(lines)
1095 1097
1096 1098 # Write out the generated script.
1097 1099 fname = b'%s.sh' % self._testtmp
1098 1100 f = open(fname, 'wb')
1099 1101 for l in script:
1100 1102 f.write(l)
1101 1103 f.close()
1102 1104
1103 1105 cmd = b'%s "%s"' % (self._shell, fname)
1104 1106 vlog("# Running", cmd)
1105 1107
1106 1108 exitcode, output = self._runcommand(cmd, env)
1107 1109
1108 1110 if self._aborted:
1109 1111 raise KeyboardInterrupt()
1110 1112
1111 1113 # Do not merge output if skipped. Return hghave message instead.
1112 1114 # Similarly, with --debug, output is None.
1113 1115 if exitcode == self.SKIPPED_STATUS or output is None:
1114 1116 return exitcode, output
1115 1117
1116 1118 return self._processoutput(exitcode, output, salt, after, expected)
1117 1119
1118 1120 def _hghave(self, reqs):
1119 1121 # TODO do something smarter when all other uses of hghave are gone.
1120 1122 runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__)))
1121 1123 tdir = runtestdir.replace(b'\\', b'/')
1122 1124 proc = Popen4(b'%s -c "%s/hghave %s"' %
1123 1125 (self._shell, tdir, b' '.join(reqs)),
1124 1126 self._testtmp, 0, self._getenv())
1125 1127 stdout, stderr = proc.communicate()
1126 1128 ret = proc.wait()
1127 1129 if wifexited(ret):
1128 1130 ret = os.WEXITSTATUS(ret)
1129 1131 if ret == 2:
1130 1132 print(stdout.decode('utf-8'))
1131 1133 sys.exit(1)
1132 1134
1133 1135 if ret != 0:
1134 1136 return False, stdout
1135 1137
1136 1138 if 'slow' in reqs:
1137 1139 self._timeout = self._slowtimeout
1138 1140 return True, None
1139 1141
1140 1142 def _iftest(self, args):
1141 1143 # implements "#if"
1142 1144 reqs = []
1143 1145 for arg in args:
1144 1146 if arg.startswith(b'no-') and arg[3:] in self._allcases:
1145 1147 if arg[3:] == self._case:
1146 1148 return False
1147 1149 elif arg in self._allcases:
1148 1150 if arg != self._case:
1149 1151 return False
1150 1152 else:
1151 1153 reqs.append(arg)
1152 1154 return self._hghave(reqs)[0]
1153 1155
1154 1156 def _parsetest(self, lines):
1155 1157 # We generate a shell script which outputs unique markers to line
1156 1158 # up script results with our source. These markers include input
1157 1159 # line number and the last return code.
1158 1160 salt = b"SALT%d" % time.time()
1159 1161 def addsalt(line, inpython):
1160 1162 if inpython:
1161 1163 script.append(b'%s %d 0\n' % (salt, line))
1162 1164 else:
1163 1165 script.append(b'echo %s %d $?\n' % (salt, line))
1164 1166
1165 1167 script = []
1166 1168
1167 1169 # After we run the shell script, we re-unify the script output
1168 1170 # with non-active parts of the source, with synchronization by our
1169 1171 # SALT line number markers. The after table contains the non-active
1170 1172 # components, ordered by line number.
1171 1173 after = {}
1172 1174
1173 1175 # Expected shell script output.
1174 1176 expected = {}
1175 1177
1176 1178 pos = prepos = -1
1177 1179
1178 1180 # True or False when in a true or false conditional section
1179 1181 skipping = None
1180 1182
1181 1183 # We keep track of whether or not we're in a Python block so we
1182 1184 # can generate the surrounding doctest magic.
1183 1185 inpython = False
1184 1186
1185 1187 if self._debug:
1186 1188 script.append(b'set -x\n')
1187 1189 if self._hgcommand != b'hg':
1188 1190 script.append(b'alias hg="%s"\n' % self._hgcommand)
1189 1191 if os.getenv('MSYSTEM'):
1190 1192 script.append(b'alias pwd="pwd -W"\n')
1191 1193
1192 1194 n = 0
1193 1195 for n, l in enumerate(lines):
1194 1196 if not l.endswith(b'\n'):
1195 1197 l += b'\n'
1196 1198 if l.startswith(b'#require'):
1197 1199 lsplit = l.split()
1198 1200 if len(lsplit) < 2 or lsplit[0] != b'#require':
1199 1201 after.setdefault(pos, []).append(' !!! invalid #require\n')
1200 1202 haveresult, message = self._hghave(lsplit[1:])
1201 1203 if not haveresult:
1202 1204 script = [b'echo "%s"\nexit 80\n' % message]
1203 1205 break
1204 1206 after.setdefault(pos, []).append(l)
1205 1207 elif l.startswith(b'#if'):
1206 1208 lsplit = l.split()
1207 1209 if len(lsplit) < 2 or lsplit[0] != b'#if':
1208 1210 after.setdefault(pos, []).append(' !!! invalid #if\n')
1209 1211 if skipping is not None:
1210 1212 after.setdefault(pos, []).append(' !!! nested #if\n')
1211 1213 skipping = not self._iftest(lsplit[1:])
1212 1214 after.setdefault(pos, []).append(l)
1213 1215 elif l.startswith(b'#else'):
1214 1216 if skipping is None:
1215 1217 after.setdefault(pos, []).append(' !!! missing #if\n')
1216 1218 skipping = not skipping
1217 1219 after.setdefault(pos, []).append(l)
1218 1220 elif l.startswith(b'#endif'):
1219 1221 if skipping is None:
1220 1222 after.setdefault(pos, []).append(' !!! missing #if\n')
1221 1223 skipping = None
1222 1224 after.setdefault(pos, []).append(l)
1223 1225 elif skipping:
1224 1226 after.setdefault(pos, []).append(l)
1225 1227 elif l.startswith(b' >>> '): # python inlines
1226 1228 after.setdefault(pos, []).append(l)
1227 1229 prepos = pos
1228 1230 pos = n
1229 1231 if not inpython:
1230 1232 # We've just entered a Python block. Add the header.
1231 1233 inpython = True
1232 1234 addsalt(prepos, False) # Make sure we report the exit code.
1233 1235 script.append(b'%s -m heredoctest <<EOF\n' % PYTHON)
1234 1236 addsalt(n, True)
1235 1237 script.append(l[2:])
1236 1238 elif l.startswith(b' ... '): # python inlines
1237 1239 after.setdefault(prepos, []).append(l)
1238 1240 script.append(l[2:])
1239 1241 elif l.startswith(b' $ '): # commands
1240 1242 if inpython:
1241 1243 script.append(b'EOF\n')
1242 1244 inpython = False
1243 1245 after.setdefault(pos, []).append(l)
1244 1246 prepos = pos
1245 1247 pos = n
1246 1248 addsalt(n, False)
1247 1249 cmd = l[4:].split()
1248 1250 if len(cmd) == 2 and cmd[0] == b'cd':
1249 1251 l = b' $ cd %s || exit 1\n' % cmd[1]
1250 1252 script.append(l[4:])
1251 1253 elif l.startswith(b' > '): # continuations
1252 1254 after.setdefault(prepos, []).append(l)
1253 1255 script.append(l[4:])
1254 1256 elif l.startswith(b' '): # results
1255 1257 # Queue up a list of expected results.
1256 1258 expected.setdefault(pos, []).append(l[2:])
1257 1259 else:
1258 1260 if inpython:
1259 1261 script.append(b'EOF\n')
1260 1262 inpython = False
1261 1263 # Non-command/result. Queue up for merged output.
1262 1264 after.setdefault(pos, []).append(l)
1263 1265
1264 1266 if inpython:
1265 1267 script.append(b'EOF\n')
1266 1268 if skipping is not None:
1267 1269 after.setdefault(pos, []).append(' !!! missing #endif\n')
1268 1270 addsalt(n + 1, False)
1269 1271
1270 1272 return salt, script, after, expected
1271 1273
1272 1274 def _processoutput(self, exitcode, output, salt, after, expected):
1273 1275 # Merge the script output back into a unified test.
1274 1276 warnonly = 1 # 1: not yet; 2: yes; 3: for sure not
1275 1277 if exitcode != 0:
1276 1278 warnonly = 3
1277 1279
1278 1280 pos = -1
1279 1281 postout = []
1280 1282 for l in output:
1281 1283 lout, lcmd = l, None
1282 1284 if salt in l:
1283 1285 lout, lcmd = l.split(salt, 1)
1284 1286
1285 1287 while lout:
1286 1288 if not lout.endswith(b'\n'):
1287 1289 lout += b' (no-eol)\n'
1288 1290
1289 1291 # Find the expected output at the current position.
1290 1292 els = [None]
1291 1293 if expected.get(pos, None):
1292 1294 els = expected[pos]
1293 1295
1294 1296 i = 0
1295 1297 optional = []
1296 1298 while i < len(els):
1297 1299 el = els[i]
1298 1300
1299 1301 r = TTest.linematch(el, lout)
1300 1302 if isinstance(r, str):
1301 1303 if r == '+glob':
1302 1304 lout = el[:-1] + ' (glob)\n'
1303 1305 r = '' # Warn only this line.
1304 1306 elif r == '-glob':
1305 1307 lout = ''.join(el.rsplit(' (glob)', 1))
1306 1308 r = '' # Warn only this line.
1307 1309 elif r == "retry":
1308 1310 postout.append(b' ' + el)
1309 1311 els.pop(i)
1310 1312 break
1311 1313 else:
1312 1314 log('\ninfo, unknown linematch result: %r\n' % r)
1313 1315 r = False
1314 1316 if r:
1315 1317 els.pop(i)
1316 1318 break
1317 1319 if el:
1318 1320 if el.endswith(b" (?)\n"):
1319 1321 optional.append(i)
1320 1322 else:
1321 1323 m = optline.match(el)
1322 1324 if m:
1323 1325 conditions = [c for c in m.group(2).split(' ')]
1324 1326
1325 1327 if self._hghave(conditions)[0]:
1326 1328 lout = el
1327 1329 else:
1328 1330 optional.append(i)
1329 1331
1330 1332 i += 1
1331 1333
1332 1334 if r:
1333 1335 if r == "retry":
1334 1336 continue
1335 1337 # clean up any optional leftovers
1336 1338 for i in optional:
1337 1339 postout.append(b' ' + els[i])
1338 1340 for i in reversed(optional):
1339 1341 del els[i]
1340 1342 postout.append(b' ' + el)
1341 1343 else:
1342 1344 if self.NEEDESCAPE(lout):
1343 1345 lout = TTest._stringescape(b'%s (esc)\n' %
1344 1346 lout.rstrip(b'\n'))
1345 1347 postout.append(b' ' + lout) # Let diff deal with it.
1346 1348 if r != '': # If line failed.
1347 1349 warnonly = 3 # for sure not
1348 1350 elif warnonly == 1: # Is "not yet" and line is warn only.
1349 1351 warnonly = 2 # Yes do warn.
1350 1352 break
1351 1353 else:
1352 1354 # clean up any optional leftovers
1353 1355 while expected.get(pos, None):
1354 1356 el = expected[pos].pop(0)
1355 1357 if el:
1356 1358 if (not optline.match(el)
1357 1359 and not el.endswith(b" (?)\n")):
1358 1360 break
1359 1361 postout.append(b' ' + el)
1360 1362
1361 1363 if lcmd:
1362 1364 # Add on last return code.
1363 1365 ret = int(lcmd.split()[1])
1364 1366 if ret != 0:
1365 1367 postout.append(b' [%d]\n' % ret)
1366 1368 if pos in after:
1367 1369 # Merge in non-active test bits.
1368 1370 postout += after.pop(pos)
1369 1371 pos = int(lcmd.split()[0])
1370 1372
1371 1373 if pos in after:
1372 1374 postout += after.pop(pos)
1373 1375
1374 1376 if warnonly == 2:
1375 1377 exitcode = False # Set exitcode to warned.
1376 1378
1377 1379 return exitcode, postout
1378 1380
1379 1381 @staticmethod
1380 1382 def rematch(el, l):
1381 1383 try:
1382 1384 # use \Z to ensure that the regex matches to the end of the string
1383 1385 if os.name == 'nt':
1384 1386 return re.match(el + br'\r?\n\Z', l)
1385 1387 return re.match(el + br'\n\Z', l)
1386 1388 except re.error:
1387 1389 # el is an invalid regex
1388 1390 return False
1389 1391
1390 1392 @staticmethod
1391 1393 def globmatch(el, l):
1392 1394 # The only supported special characters are * and ? plus / which also
1393 1395 # matches \ on windows. Escaping of these characters is supported.
1394 1396 if el + b'\n' == l:
1395 1397 if os.altsep:
1396 1398 # matching on "/" is not needed for this line
1397 1399 for pat in checkcodeglobpats:
1398 1400 if pat.match(el):
1399 1401 return True
1400 1402 return b'-glob'
1401 1403 return True
1402 1404 el = el.replace(b'$LOCALIP', b'*')
1403 1405 i, n = 0, len(el)
1404 1406 res = b''
1405 1407 while i < n:
1406 1408 c = el[i:i + 1]
1407 1409 i += 1
1408 1410 if c == b'\\' and i < n and el[i:i + 1] in b'*?\\/':
1409 1411 res += el[i - 1:i + 1]
1410 1412 i += 1
1411 1413 elif c == b'*':
1412 1414 res += b'.*'
1413 1415 elif c == b'?':
1414 1416 res += b'.'
1415 1417 elif c == b'/' and os.altsep:
1416 1418 res += b'[/\\\\]'
1417 1419 else:
1418 1420 res += re.escape(c)
1419 1421 return TTest.rematch(res, l)
1420 1422
1421 1423 @staticmethod
1422 1424 def linematch(el, l):
1423 1425 retry = False
1424 1426 if el == l: # perfect match (fast)
1425 1427 return True
1426 1428 if el:
1427 1429 if el.endswith(b" (?)\n"):
1428 1430 retry = "retry"
1429 1431 el = el[:-5] + b"\n"
1430 1432 else:
1431 1433 m = optline.match(el)
1432 1434 if m:
1433 1435 el = m.group(1) + b"\n"
1434 1436 retry = "retry"
1435 1437
1436 1438 if el.endswith(b" (esc)\n"):
1437 1439 if PYTHON3:
1438 1440 el = el[:-7].decode('unicode_escape') + '\n'
1439 1441 el = el.encode('utf-8')
1440 1442 else:
1441 1443 el = el[:-7].decode('string-escape') + '\n'
1442 1444 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
1443 1445 return True
1444 1446 if el.endswith(b" (re)\n"):
1445 1447 return TTest.rematch(el[:-6], l) or retry
1446 1448 if el.endswith(b" (glob)\n"):
1447 1449 # ignore '(glob)' added to l by 'replacements'
1448 1450 if l.endswith(b" (glob)\n"):
1449 1451 l = l[:-8] + b"\n"
1450 1452 return TTest.globmatch(el[:-8], l) or retry
1451 1453 if os.altsep and l.replace(b'\\', b'/') == el:
1452 1454 return b'+glob'
1453 1455 return retry
1454 1456
1455 1457 @staticmethod
1456 1458 def parsehghaveoutput(lines):
1457 1459 '''Parse hghave log lines.
1458 1460
1459 1461 Return tuple of lists (missing, failed):
1460 1462 * the missing/unknown features
1461 1463 * the features for which existence check failed'''
1462 1464 missing = []
1463 1465 failed = []
1464 1466 for line in lines:
1465 1467 if line.startswith(TTest.SKIPPED_PREFIX):
1466 1468 line = line.splitlines()[0]
1467 1469 missing.append(line[len(TTest.SKIPPED_PREFIX):].decode('utf-8'))
1468 1470 elif line.startswith(TTest.FAILED_PREFIX):
1469 1471 line = line.splitlines()[0]
1470 1472 failed.append(line[len(TTest.FAILED_PREFIX):].decode('utf-8'))
1471 1473
1472 1474 return missing, failed
1473 1475
1474 1476 @staticmethod
1475 1477 def _escapef(m):
1476 1478 return TTest.ESCAPEMAP[m.group(0)]
1477 1479
1478 1480 @staticmethod
1479 1481 def _stringescape(s):
1480 1482 return TTest.ESCAPESUB(TTest._escapef, s)
1481 1483
1482 1484 iolock = threading.RLock()
1483 1485
1484 1486 class SkipTest(Exception):
1485 1487 """Raised to indicate that a test is to be skipped."""
1486 1488
1487 1489 class IgnoreTest(Exception):
1488 1490 """Raised to indicate that a test is to be ignored."""
1489 1491
1490 1492 class WarnTest(Exception):
1491 1493 """Raised to indicate that a test warned."""
1492 1494
1493 1495 class ReportedTest(Exception):
1494 1496 """Raised to indicate that a test already reported."""
1495 1497
1496 1498 class TestResult(unittest._TextTestResult):
1497 1499 """Holds results when executing via unittest."""
1498 1500 # Don't worry too much about accessing the non-public _TextTestResult.
1499 1501 # It is relatively common in Python testing tools.
1500 1502 def __init__(self, options, *args, **kwargs):
1501 1503 super(TestResult, self).__init__(*args, **kwargs)
1502 1504
1503 1505 self._options = options
1504 1506
1505 1507 # unittest.TestResult didn't have skipped until 2.7. We need to
1506 1508 # polyfill it.
1507 1509 self.skipped = []
1508 1510
1509 1511 # We have a custom "ignored" result that isn't present in any Python
1510 1512 # unittest implementation. It is very similar to skipped. It may make
1511 1513 # sense to map it into skip some day.
1512 1514 self.ignored = []
1513 1515
1514 1516 # We have a custom "warned" result that isn't present in any Python
1515 1517 # unittest implementation. It is very similar to failed. It may make
1516 1518 # sense to map it into fail some day.
1517 1519 self.warned = []
1518 1520
1519 1521 self.times = []
1520 1522 self._firststarttime = None
1521 1523 # Data stored for the benefit of generating xunit reports.
1522 1524 self.successes = []
1523 1525 self.faildata = {}
1524 1526
1525 1527 def addFailure(self, test, reason):
1526 1528 self.failures.append((test, reason))
1527 1529
1528 1530 if self._options.first:
1529 1531 self.stop()
1530 1532 else:
1531 1533 with iolock:
1532 1534 if reason == "timed out":
1533 1535 self.stream.write('t')
1534 1536 else:
1535 1537 if not self._options.nodiff:
1536 1538 self.stream.write('\nERROR: %s output changed\n' % test)
1537 1539 self.stream.write('!')
1538 1540
1539 1541 self.stream.flush()
1540 1542
1541 1543 def addSuccess(self, test):
1542 1544 with iolock:
1543 1545 super(TestResult, self).addSuccess(test)
1544 1546 self.successes.append(test)
1545 1547
1546 1548 def addError(self, test, err):
1547 1549 super(TestResult, self).addError(test, err)
1548 1550 if self._options.first:
1549 1551 self.stop()
1550 1552
1551 1553 # Polyfill.
1552 1554 def addSkip(self, test, reason):
1553 1555 self.skipped.append((test, reason))
1554 1556 with iolock:
1555 1557 if self.showAll:
1556 1558 self.stream.writeln('skipped %s' % reason)
1557 1559 else:
1558 1560 self.stream.write('s')
1559 1561 self.stream.flush()
1560 1562
1561 1563 def addIgnore(self, test, reason):
1562 1564 self.ignored.append((test, reason))
1563 1565 with iolock:
1564 1566 if self.showAll:
1565 1567 self.stream.writeln('ignored %s' % reason)
1566 1568 else:
1567 1569 if reason not in ('not retesting', "doesn't match keyword"):
1568 1570 self.stream.write('i')
1569 1571 else:
1570 1572 self.testsRun += 1
1571 1573 self.stream.flush()
1572 1574
1573 1575 def addWarn(self, test, reason):
1574 1576 self.warned.append((test, reason))
1575 1577
1576 1578 if self._options.first:
1577 1579 self.stop()
1578 1580
1579 1581 with iolock:
1580 1582 if self.showAll:
1581 1583 self.stream.writeln('warned %s' % reason)
1582 1584 else:
1583 1585 self.stream.write('~')
1584 1586 self.stream.flush()
1585 1587
1586 1588 def addOutputMismatch(self, test, ret, got, expected):
1587 1589 """Record a mismatch in test output for a particular test."""
1588 1590 if self.shouldStop:
1589 1591 # don't print, some other test case already failed and
1590 1592 # printed, we're just stale and probably failed due to our
1591 1593 # temp dir getting cleaned up.
1592 1594 return
1593 1595
1594 1596 accepted = False
1595 1597 lines = []
1596 1598
1597 1599 with iolock:
1598 1600 if self._options.nodiff:
1599 1601 pass
1600 1602 elif self._options.view:
1601 1603 v = self._options.view
1602 1604 if PYTHON3:
1603 1605 v = _bytespath(v)
1604 1606 os.system(b"%s %s %s" %
1605 1607 (v, test.refpath, test.errpath))
1606 1608 else:
1607 1609 servefail, lines = getdiff(expected, got,
1608 1610 test.refpath, test.errpath)
1609 1611 if servefail:
1610 1612 self.addFailure(
1611 1613 test,
1612 1614 'server failed to start (HGPORT=%s)' % test._startport)
1613 1615 raise ReportedTest('server failed to start')
1614 1616 else:
1615 1617 self.stream.write('\n')
1616 1618 for line in lines:
1617 1619 if PYTHON3:
1618 1620 self.stream.flush()
1619 1621 self.stream.buffer.write(line)
1620 1622 self.stream.buffer.flush()
1621 1623 else:
1622 1624 self.stream.write(line)
1623 1625 self.stream.flush()
1624 1626
1625 1627 # handle interactive prompt without releasing iolock
1626 1628 if self._options.interactive:
1627 1629 self.stream.write('Accept this change? [n] ')
1628 1630 answer = sys.stdin.readline().strip()
1629 1631 if answer.lower() in ('y', 'yes'):
1630 1632 if test.name.endswith('.t'):
1631 1633 rename(test.errpath, test.path)
1632 1634 else:
1633 1635 rename(test.errpath, '%s.out' % test.path)
1634 1636 accepted = True
1635 1637 if not accepted:
1636 1638 self.faildata[test.name] = b''.join(lines)
1637 1639
1638 1640 return accepted
1639 1641
1640 1642 def startTest(self, test):
1641 1643 super(TestResult, self).startTest(test)
1642 1644
1643 1645 # os.times module computes the user time and system time spent by
1644 1646 # child's processes along with real elapsed time taken by a process.
1645 1647 # This module has one limitation. It can only work for Linux user
1646 1648 # and not for Windows.
1647 1649 test.started = os.times()
1648 1650 if self._firststarttime is None: # thread racy but irrelevant
1649 1651 self._firststarttime = test.started[4]
1650 1652
1651 1653 def stopTest(self, test, interrupted=False):
1652 1654 super(TestResult, self).stopTest(test)
1653 1655
1654 1656 test.stopped = os.times()
1655 1657
1656 1658 starttime = test.started
1657 1659 endtime = test.stopped
1658 1660 origin = self._firststarttime
1659 1661 self.times.append((test.name,
1660 1662 endtime[2] - starttime[2], # user space CPU time
1661 1663 endtime[3] - starttime[3], # sys space CPU time
1662 1664 endtime[4] - starttime[4], # real time
1663 1665 starttime[4] - origin, # start date in run context
1664 1666 endtime[4] - origin, # end date in run context
1665 1667 ))
1666 1668
1667 1669 if interrupted:
1668 1670 with iolock:
1669 1671 self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
1670 1672 test.name, self.times[-1][3]))
1671 1673
1672 1674 class TestSuite(unittest.TestSuite):
1673 1675 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
1674 1676
1675 1677 def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
1676 1678 retest=False, keywords=None, loop=False, runs_per_test=1,
1677 1679 loadtest=None, showchannels=False,
1678 1680 *args, **kwargs):
1679 1681 """Create a new instance that can run tests with a configuration.
1680 1682
1681 1683 testdir specifies the directory where tests are executed from. This
1682 1684 is typically the ``tests`` directory from Mercurial's source
1683 1685 repository.
1684 1686
1685 1687 jobs specifies the number of jobs to run concurrently. Each test
1686 1688 executes on its own thread. Tests actually spawn new processes, so
1687 1689 state mutation should not be an issue.
1688 1690
1689 1691 If there is only one job, it will use the main thread.
1690 1692
1691 1693 whitelist and blacklist denote tests that have been whitelisted and
1692 1694 blacklisted, respectively. These arguments don't belong in TestSuite.
1693 1695 Instead, whitelist and blacklist should be handled by the thing that
1694 1696 populates the TestSuite with tests. They are present to preserve
1695 1697 backwards compatible behavior which reports skipped tests as part
1696 1698 of the results.
1697 1699
1698 1700 retest denotes whether to retest failed tests. This arguably belongs
1699 1701 outside of TestSuite.
1700 1702
1701 1703 keywords denotes key words that will be used to filter which tests
1702 1704 to execute. This arguably belongs outside of TestSuite.
1703 1705
1704 1706 loop denotes whether to loop over tests forever.
1705 1707 """
1706 1708 super(TestSuite, self).__init__(*args, **kwargs)
1707 1709
1708 1710 self._jobs = jobs
1709 1711 self._whitelist = whitelist
1710 1712 self._blacklist = blacklist
1711 1713 self._retest = retest
1712 1714 self._keywords = keywords
1713 1715 self._loop = loop
1714 1716 self._runs_per_test = runs_per_test
1715 1717 self._loadtest = loadtest
1716 1718 self._showchannels = showchannels
1717 1719
1718 1720 def run(self, result):
1719 1721 # We have a number of filters that need to be applied. We do this
1720 1722 # here instead of inside Test because it makes the running logic for
1721 1723 # Test simpler.
1722 1724 tests = []
1723 1725 num_tests = [0]
1724 1726 for test in self._tests:
1725 1727 def get():
1726 1728 num_tests[0] += 1
1727 1729 if getattr(test, 'should_reload', False):
1728 1730 return self._loadtest(test, num_tests[0])
1729 1731 return test
1730 1732 if not os.path.exists(test.path):
1731 1733 result.addSkip(test, "Doesn't exist")
1732 1734 continue
1733 1735
1734 1736 if not (self._whitelist and test.name in self._whitelist):
1735 1737 if self._blacklist and test.bname in self._blacklist:
1736 1738 result.addSkip(test, 'blacklisted')
1737 1739 continue
1738 1740
1739 1741 if self._retest and not os.path.exists(test.errpath):
1740 1742 result.addIgnore(test, 'not retesting')
1741 1743 continue
1742 1744
1743 1745 if self._keywords:
1744 1746 f = open(test.path, 'rb')
1745 1747 t = f.read().lower() + test.bname.lower()
1746 1748 f.close()
1747 1749 ignored = False
1748 1750 for k in self._keywords.lower().split():
1749 1751 if k not in t:
1750 1752 result.addIgnore(test, "doesn't match keyword")
1751 1753 ignored = True
1752 1754 break
1753 1755
1754 1756 if ignored:
1755 1757 continue
1756 1758 for _ in xrange(self._runs_per_test):
1757 1759 tests.append(get())
1758 1760
1759 1761 runtests = list(tests)
1760 1762 done = queue.Queue()
1761 1763 running = 0
1762 1764
1763 1765 channels = [""] * self._jobs
1764 1766
1765 1767 def job(test, result):
1766 1768 for n, v in enumerate(channels):
1767 1769 if not v:
1768 1770 channel = n
1769 1771 break
1770 1772 else:
1771 1773 raise ValueError('Could not find output channel')
1772 1774 channels[channel] = "=" + test.name[5:].split(".")[0]
1773 1775 try:
1774 1776 test(result)
1775 1777 done.put(None)
1776 1778 except KeyboardInterrupt:
1777 1779 pass
1778 1780 except: # re-raises
1779 1781 done.put(('!', test, 'run-test raised an error, see traceback'))
1780 1782 raise
1781 1783 finally:
1782 1784 try:
1783 1785 channels[channel] = ''
1784 1786 except IndexError:
1785 1787 pass
1786 1788
1787 1789 def stat():
1788 1790 count = 0
1789 1791 while channels:
1790 1792 d = '\n%03s ' % count
1791 1793 for n, v in enumerate(channels):
1792 1794 if v:
1793 1795 d += v[0]
1794 1796 channels[n] = v[1:] or '.'
1795 1797 else:
1796 1798 d += ' '
1797 1799 d += ' '
1798 1800 with iolock:
1799 1801 sys.stdout.write(d + ' ')
1800 1802 sys.stdout.flush()
1801 1803 for x in xrange(10):
1802 1804 if channels:
1803 1805 time.sleep(.1)
1804 1806 count += 1
1805 1807
1806 1808 stoppedearly = False
1807 1809
1808 1810 if self._showchannels:
1809 1811 statthread = threading.Thread(target=stat, name="stat")
1810 1812 statthread.start()
1811 1813
1812 1814 try:
1813 1815 while tests or running:
1814 1816 if not done.empty() or running == self._jobs or not tests:
1815 1817 try:
1816 1818 done.get(True, 1)
1817 1819 running -= 1
1818 1820 if result and result.shouldStop:
1819 1821 stoppedearly = True
1820 1822 break
1821 1823 except queue.Empty:
1822 1824 continue
1823 1825 if tests and not running == self._jobs:
1824 1826 test = tests.pop(0)
1825 1827 if self._loop:
1826 1828 if getattr(test, 'should_reload', False):
1827 1829 num_tests[0] += 1
1828 1830 tests.append(
1829 1831 self._loadtest(test, num_tests[0]))
1830 1832 else:
1831 1833 tests.append(test)
1832 1834 if self._jobs == 1:
1833 1835 job(test, result)
1834 1836 else:
1835 1837 t = threading.Thread(target=job, name=test.name,
1836 1838 args=(test, result))
1837 1839 t.start()
1838 1840 running += 1
1839 1841
1840 1842 # If we stop early we still need to wait on started tests to
1841 1843 # finish. Otherwise, there is a race between the test completing
1842 1844 # and the test's cleanup code running. This could result in the
1843 1845 # test reporting incorrect.
1844 1846 if stoppedearly:
1845 1847 while running:
1846 1848 try:
1847 1849 done.get(True, 1)
1848 1850 running -= 1
1849 1851 except queue.Empty:
1850 1852 continue
1851 1853 except KeyboardInterrupt:
1852 1854 for test in runtests:
1853 1855 test.abort()
1854 1856
1855 1857 channels = []
1856 1858
1857 1859 return result
1858 1860
1859 1861 # Save the most recent 5 wall-clock runtimes of each test to a
1860 1862 # human-readable text file named .testtimes. Tests are sorted
1861 1863 # alphabetically, while times for each test are listed from oldest to
1862 1864 # newest.
1863 1865
1864 1866 def loadtimes(testdir):
1865 1867 times = []
1866 1868 try:
1867 1869 with open(os.path.join(testdir, b'.testtimes-')) as fp:
1868 1870 for line in fp:
1869 1871 ts = line.split()
1870 1872 times.append((ts[0], [float(t) for t in ts[1:]]))
1871 1873 except IOError as err:
1872 1874 if err.errno != errno.ENOENT:
1873 1875 raise
1874 1876 return times
1875 1877
1876 1878 def savetimes(testdir, result):
1877 1879 saved = dict(loadtimes(testdir))
1878 1880 maxruns = 5
1879 1881 skipped = set([str(t[0]) for t in result.skipped])
1880 1882 for tdata in result.times:
1881 1883 test, real = tdata[0], tdata[3]
1882 1884 if test not in skipped:
1883 1885 ts = saved.setdefault(test, [])
1884 1886 ts.append(real)
1885 1887 ts[:] = ts[-maxruns:]
1886 1888
1887 1889 fd, tmpname = tempfile.mkstemp(prefix=b'.testtimes',
1888 1890 dir=testdir, text=True)
1889 1891 with os.fdopen(fd, 'w') as fp:
1890 1892 for name, ts in sorted(saved.items()):
1891 1893 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
1892 1894 timepath = os.path.join(testdir, b'.testtimes')
1893 1895 try:
1894 1896 os.unlink(timepath)
1895 1897 except OSError:
1896 1898 pass
1897 1899 try:
1898 1900 os.rename(tmpname, timepath)
1899 1901 except OSError:
1900 1902 pass
1901 1903
1902 1904 class TextTestRunner(unittest.TextTestRunner):
1903 1905 """Custom unittest test runner that uses appropriate settings."""
1904 1906
1905 1907 def __init__(self, runner, *args, **kwargs):
1906 1908 super(TextTestRunner, self).__init__(*args, **kwargs)
1907 1909
1908 1910 self._runner = runner
1909 1911
1912 def listtests(self, test):
1913 result = TestResult(self._runner.options, self.stream,
1914 self.descriptions, 0)
1915 test = sorted(test, key=lambda t: t.name)
1916 for t in test:
1917 print(t.name)
1918 result.addSuccess(t)
1919
1920 if self._runner.options.xunit:
1921 with open(self._runner.options.xunit, "wb") as xuf:
1922 self._writexunit(result, xuf)
1923
1924 if self._runner.options.json:
1925 jsonpath = os.path.join(self._runner._testdir, b'report.json')
1926 with open(jsonpath, 'w') as fp:
1927 self._writejson(result, fp)
1928
1929 return result
1930
1910 1931 def run(self, test):
1911 1932 result = TestResult(self._runner.options, self.stream,
1912 1933 self.descriptions, self.verbosity)
1913 1934
1914 1935 test(result)
1915 1936
1916 1937 failed = len(result.failures)
1917 1938 warned = len(result.warned)
1918 1939 skipped = len(result.skipped)
1919 1940 ignored = len(result.ignored)
1920 1941
1921 1942 with iolock:
1922 1943 self.stream.writeln('')
1923 1944
1924 1945 if not self._runner.options.noskips:
1925 1946 for test, msg in result.skipped:
1926 1947 self.stream.writeln('Skipped %s: %s' % (test.name, msg))
1927 1948 for test, msg in result.warned:
1928 1949 self.stream.writeln('Warned %s: %s' % (test.name, msg))
1929 1950 for test, msg in result.failures:
1930 1951 self.stream.writeln('Failed %s: %s' % (test.name, msg))
1931 1952 for test, msg in result.errors:
1932 1953 self.stream.writeln('Errored %s: %s' % (test.name, msg))
1933 1954
1934 1955 if self._runner.options.xunit:
1935 1956 with open(self._runner.options.xunit, "wb") as xuf:
1936 1957 self._writexunit(result, xuf)
1937 1958
1938 1959 if self._runner.options.json:
1939 1960 jsonpath = os.path.join(self._runner._testdir, b'report.json')
1940 1961 with open(jsonpath, 'w') as fp:
1941 1962 self._writejson(result, fp)
1942 1963
1943 1964 self._runner._checkhglib('Tested')
1944 1965
1945 1966 savetimes(self._runner._testdir, result)
1946 1967
1947 1968 if failed and self._runner.options.known_good_rev:
1948 1969 def nooutput(args):
1949 1970 p = subprocess.Popen(args, stderr=subprocess.STDOUT,
1950 1971 stdout=subprocess.PIPE)
1951 1972 p.stdout.read()
1952 1973 p.wait()
1953 1974 for test, msg in result.failures:
1954 1975 nooutput(['hg', 'bisect', '--reset']),
1955 1976 nooutput(['hg', 'bisect', '--bad', '.'])
1956 1977 nooutput(['hg', 'bisect', '--good',
1957 1978 self._runner.options.known_good_rev])
1958 1979 # TODO: we probably need to forward some options
1959 1980 # that alter hg's behavior inside the tests.
1960 1981 rtc = '%s %s %s' % (sys.executable, sys.argv[0], test)
1961 1982 sub = subprocess.Popen(['hg', 'bisect', '--command', rtc],
1962 1983 stderr=subprocess.STDOUT,
1963 1984 stdout=subprocess.PIPE)
1964 1985 data = sub.stdout.read()
1965 1986 sub.wait()
1966 1987 m = re.search(
1967 1988 (r'\nThe first (?P<goodbad>bad|good) revision '
1968 1989 r'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
1969 1990 r'summary: +(?P<summary>[^\n]+)\n'),
1970 1991 data, (re.MULTILINE | re.DOTALL))
1971 1992 if m is None:
1972 1993 self.stream.writeln(
1973 1994 'Failed to identify failure point for %s' % test)
1974 1995 continue
1975 1996 dat = m.groupdict()
1976 1997 verb = 'broken' if dat['goodbad'] == 'bad' else 'fixed'
1977 1998 self.stream.writeln(
1978 1999 '%s %s by %s (%s)' % (
1979 2000 test, verb, dat['node'], dat['summary']))
1980 2001 self.stream.writeln(
1981 2002 '# Ran %d tests, %d skipped, %d warned, %d failed.'
1982 2003 % (result.testsRun,
1983 2004 skipped + ignored, warned, failed))
1984 2005 if failed:
1985 2006 self.stream.writeln('python hash seed: %s' %
1986 2007 os.environ['PYTHONHASHSEED'])
1987 2008 if self._runner.options.time:
1988 2009 self.printtimes(result.times)
1989 2010
1990 2011 return result
1991 2012
1992 2013 def printtimes(self, times):
1993 2014 # iolock held by run
1994 2015 self.stream.writeln('# Producing time report')
1995 2016 times.sort(key=lambda t: (t[3]))
1996 2017 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
1997 2018 self.stream.writeln('%-7s %-7s %-7s %-7s %-7s %s' %
1998 2019 ('start', 'end', 'cuser', 'csys', 'real', 'Test'))
1999 2020 for tdata in times:
2000 2021 test = tdata[0]
2001 2022 cuser, csys, real, start, end = tdata[1:6]
2002 2023 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
2003 2024
2004 2025 @staticmethod
2005 2026 def _writexunit(result, outf):
2006 2027 timesd = dict((t[0], t[3]) for t in result.times)
2007 2028 doc = minidom.Document()
2008 2029 s = doc.createElement('testsuite')
2009 2030 s.setAttribute('name', 'run-tests')
2010 2031 s.setAttribute('tests', str(result.testsRun))
2011 2032 s.setAttribute('errors', "0") # TODO
2012 2033 s.setAttribute('failures', str(len(result.failures)))
2013 2034 s.setAttribute('skipped', str(len(result.skipped) +
2014 2035 len(result.ignored)))
2015 2036 doc.appendChild(s)
2016 2037 for tc in result.successes:
2017 2038 t = doc.createElement('testcase')
2018 2039 t.setAttribute('name', tc.name)
2019 2040 tctime = timesd.get(tc.name)
2020 2041 if tctime is not None:
2021 2042 t.setAttribute('time', '%.3f' % tctime)
2022 2043 s.appendChild(t)
2023 2044 for tc, err in sorted(result.faildata.items()):
2024 2045 t = doc.createElement('testcase')
2025 2046 t.setAttribute('name', tc)
2026 2047 tctime = timesd.get(tc)
2027 2048 if tctime is not None:
2028 2049 t.setAttribute('time', '%.3f' % tctime)
2029 2050 # createCDATASection expects a unicode or it will
2030 2051 # convert using default conversion rules, which will
2031 2052 # fail if string isn't ASCII.
2032 2053 err = cdatasafe(err).decode('utf-8', 'replace')
2033 2054 cd = doc.createCDATASection(err)
2034 2055 t.appendChild(cd)
2035 2056 s.appendChild(t)
2036 2057 outf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
2037 2058
2038 2059 @staticmethod
2039 2060 def _writejson(result, outf):
2040 2061 timesd = {}
2041 2062 for tdata in result.times:
2042 2063 test = tdata[0]
2043 2064 timesd[test] = tdata[1:]
2044 2065
2045 2066 outcome = {}
2046 2067 groups = [('success', ((tc, None)
2047 2068 for tc in result.successes)),
2048 2069 ('failure', result.failures),
2049 2070 ('skip', result.skipped)]
2050 2071 for res, testcases in groups:
2051 2072 for tc, __ in testcases:
2052 2073 if tc.name in timesd:
2053 2074 diff = result.faildata.get(tc.name, b'')
2054 2075 tres = {'result': res,
2055 2076 'time': ('%0.3f' % timesd[tc.name][2]),
2056 2077 'cuser': ('%0.3f' % timesd[tc.name][0]),
2057 2078 'csys': ('%0.3f' % timesd[tc.name][1]),
2058 2079 'start': ('%0.3f' % timesd[tc.name][3]),
2059 2080 'end': ('%0.3f' % timesd[tc.name][4]),
2060 2081 'diff': diff.decode('unicode_escape'),
2061 2082 }
2062 2083 else:
2063 2084 # blacklisted test
2064 2085 tres = {'result': res}
2065 2086
2066 2087 outcome[tc.name] = tres
2067 2088 jsonout = json.dumps(outcome, sort_keys=True, indent=4,
2068 2089 separators=(',', ': '))
2069 2090 outf.writelines(("testreport =", jsonout))
2070 2091
2071 2092 class TestRunner(object):
2072 2093 """Holds context for executing tests.
2073 2094
2074 2095 Tests rely on a lot of state. This object holds it for them.
2075 2096 """
2076 2097
2077 2098 # Programs required to run tests.
2078 2099 REQUIREDTOOLS = [
2079 2100 os.path.basename(_bytespath(sys.executable)),
2080 2101 b'diff',
2081 2102 b'grep',
2082 2103 b'unzip',
2083 2104 b'gunzip',
2084 2105 b'bunzip2',
2085 2106 b'sed',
2086 2107 ]
2087 2108
2088 2109 # Maps file extensions to test class.
2089 2110 TESTTYPES = [
2090 2111 (b'.py', PythonTest),
2091 2112 (b'.t', TTest),
2092 2113 ]
2093 2114
2094 2115 def __init__(self):
2095 2116 self.options = None
2096 2117 self._hgroot = None
2097 2118 self._testdir = None
2098 2119 self._hgtmp = None
2099 2120 self._installdir = None
2100 2121 self._bindir = None
2101 2122 self._tmpbinddir = None
2102 2123 self._pythondir = None
2103 2124 self._coveragefile = None
2104 2125 self._createdfiles = []
2105 2126 self._hgcommand = None
2106 2127 self._hgpath = None
2107 2128 self._portoffset = 0
2108 2129 self._ports = {}
2109 2130
2110 2131 def run(self, args, parser=None):
2111 2132 """Run the test suite."""
2112 2133 oldmask = os.umask(0o22)
2113 2134 try:
2114 2135 parser = parser or getparser()
2115 2136 options, args = parseargs(args, parser)
2116 2137 # positional arguments are paths to test files to run, so
2117 2138 # we make sure they're all bytestrings
2118 2139 args = [_bytespath(a) for a in args]
2119 2140 self.options = options
2120 2141
2121 2142 self._checktools()
2122 2143 testdescs = self.findtests(args)
2123 2144 if options.profile_runner:
2124 2145 import statprof
2125 2146 statprof.start()
2126 2147 result = self._run(testdescs)
2127 2148 if options.profile_runner:
2128 2149 statprof.stop()
2129 2150 statprof.display()
2130 2151 return result
2131 2152
2132 2153 finally:
2133 2154 os.umask(oldmask)
2134 2155
2135 2156 def _run(self, testdescs):
2136 2157 if self.options.random:
2137 2158 random.shuffle(testdescs)
2138 2159 else:
2139 2160 # keywords for slow tests
2140 2161 slow = {b'svn': 10,
2141 2162 b'cvs': 10,
2142 2163 b'hghave': 10,
2143 2164 b'largefiles-update': 10,
2144 2165 b'run-tests': 10,
2145 2166 b'corruption': 10,
2146 2167 b'race': 10,
2147 2168 b'i18n': 10,
2148 2169 b'check': 100,
2149 2170 b'gendoc': 100,
2150 2171 b'contrib-perf': 200,
2151 2172 }
2152 2173 perf = {}
2153 2174 def sortkey(f):
2154 2175 # run largest tests first, as they tend to take the longest
2155 2176 f = f['path']
2156 2177 try:
2157 2178 return perf[f]
2158 2179 except KeyError:
2159 2180 try:
2160 2181 val = -os.stat(f).st_size
2161 2182 except OSError as e:
2162 2183 if e.errno != errno.ENOENT:
2163 2184 raise
2164 2185 perf[f] = -1e9 # file does not exist, tell early
2165 2186 return -1e9
2166 2187 for kw, mul in slow.items():
2167 2188 if kw in f:
2168 2189 val *= mul
2169 2190 if f.endswith(b'.py'):
2170 2191 val /= 10.0
2171 2192 perf[f] = val / 1000.0
2172 2193 return perf[f]
2173 2194 testdescs.sort(key=sortkey)
2174 2195
2175 2196 self._testdir = osenvironb[b'TESTDIR'] = getattr(
2176 2197 os, 'getcwdb', os.getcwd)()
2177 2198
2178 2199 if 'PYTHONHASHSEED' not in os.environ:
2179 2200 # use a random python hash seed all the time
2180 2201 # we do the randomness ourself to know what seed is used
2181 2202 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
2182 2203
2183 2204 if self.options.tmpdir:
2184 2205 self.options.keep_tmpdir = True
2185 2206 tmpdir = _bytespath(self.options.tmpdir)
2186 2207 if os.path.exists(tmpdir):
2187 2208 # Meaning of tmpdir has changed since 1.3: we used to create
2188 2209 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
2189 2210 # tmpdir already exists.
2190 2211 print("error: temp dir %r already exists" % tmpdir)
2191 2212 return 1
2192 2213
2193 2214 # Automatically removing tmpdir sounds convenient, but could
2194 2215 # really annoy anyone in the habit of using "--tmpdir=/tmp"
2195 2216 # or "--tmpdir=$HOME".
2196 2217 #vlog("# Removing temp dir", tmpdir)
2197 2218 #shutil.rmtree(tmpdir)
2198 2219 os.makedirs(tmpdir)
2199 2220 else:
2200 2221 d = None
2201 2222 if os.name == 'nt':
2202 2223 # without this, we get the default temp dir location, but
2203 2224 # in all lowercase, which causes troubles with paths (issue3490)
2204 2225 d = osenvironb.get(b'TMP', None)
2205 2226 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
2206 2227
2207 2228 self._hgtmp = osenvironb[b'HGTMP'] = (
2208 2229 os.path.realpath(tmpdir))
2209 2230
2210 2231 if self.options.with_hg:
2211 2232 self._installdir = None
2212 2233 whg = self.options.with_hg
2213 2234 self._bindir = os.path.dirname(os.path.realpath(whg))
2214 2235 assert isinstance(self._bindir, bytes)
2215 2236 self._hgcommand = os.path.basename(whg)
2216 2237 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
2217 2238 os.makedirs(self._tmpbindir)
2218 2239
2219 2240 # This looks redundant with how Python initializes sys.path from
2220 2241 # the location of the script being executed. Needed because the
2221 2242 # "hg" specified by --with-hg is not the only Python script
2222 2243 # executed in the test suite that needs to import 'mercurial'
2223 2244 # ... which means it's not really redundant at all.
2224 2245 self._pythondir = self._bindir
2225 2246 else:
2226 2247 self._installdir = os.path.join(self._hgtmp, b"install")
2227 2248 self._bindir = os.path.join(self._installdir, b"bin")
2228 2249 self._hgcommand = b'hg'
2229 2250 self._tmpbindir = self._bindir
2230 2251 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
2231 2252
2232 2253 # set CHGHG, then replace "hg" command by "chg"
2233 2254 chgbindir = self._bindir
2234 2255 if self.options.chg or self.options.with_chg:
2235 2256 osenvironb[b'CHGHG'] = os.path.join(self._bindir, self._hgcommand)
2236 2257 else:
2237 2258 osenvironb.pop(b'CHGHG', None) # drop flag for hghave
2238 2259 if self.options.chg:
2239 2260 self._hgcommand = b'chg'
2240 2261 elif self.options.with_chg:
2241 2262 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
2242 2263 self._hgcommand = os.path.basename(self.options.with_chg)
2243 2264
2244 2265 osenvironb[b"BINDIR"] = self._bindir
2245 2266 osenvironb[b"PYTHON"] = PYTHON
2246 2267
2247 2268 if self.options.with_python3:
2248 2269 osenvironb[b'PYTHON3'] = self.options.with_python3
2249 2270
2250 2271 fileb = _bytespath(__file__)
2251 2272 runtestdir = os.path.abspath(os.path.dirname(fileb))
2252 2273 osenvironb[b'RUNTESTDIR'] = runtestdir
2253 2274 if PYTHON3:
2254 2275 sepb = _bytespath(os.pathsep)
2255 2276 else:
2256 2277 sepb = os.pathsep
2257 2278 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
2258 2279 if os.path.islink(__file__):
2259 2280 # test helper will likely be at the end of the symlink
2260 2281 realfile = os.path.realpath(fileb)
2261 2282 realdir = os.path.abspath(os.path.dirname(realfile))
2262 2283 path.insert(2, realdir)
2263 2284 if chgbindir != self._bindir:
2264 2285 path.insert(1, chgbindir)
2265 2286 if self._testdir != runtestdir:
2266 2287 path = [self._testdir] + path
2267 2288 if self._tmpbindir != self._bindir:
2268 2289 path = [self._tmpbindir] + path
2269 2290 osenvironb[b"PATH"] = sepb.join(path)
2270 2291
2271 2292 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
2272 2293 # can run .../tests/run-tests.py test-foo where test-foo
2273 2294 # adds an extension to HGRC. Also include run-test.py directory to
2274 2295 # import modules like heredoctest.
2275 2296 pypath = [self._pythondir, self._testdir, runtestdir]
2276 2297 # We have to augment PYTHONPATH, rather than simply replacing
2277 2298 # it, in case external libraries are only available via current
2278 2299 # PYTHONPATH. (In particular, the Subversion bindings on OS X
2279 2300 # are in /opt/subversion.)
2280 2301 oldpypath = osenvironb.get(IMPL_PATH)
2281 2302 if oldpypath:
2282 2303 pypath.append(oldpypath)
2283 2304 osenvironb[IMPL_PATH] = sepb.join(pypath)
2284 2305
2285 2306 if self.options.pure:
2286 2307 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
2287 2308 os.environ["HGMODULEPOLICY"] = "py"
2288 2309
2289 2310 if self.options.allow_slow_tests:
2290 2311 os.environ["HGTEST_SLOW"] = "slow"
2291 2312 elif 'HGTEST_SLOW' in os.environ:
2292 2313 del os.environ['HGTEST_SLOW']
2293 2314
2294 2315 self._coveragefile = os.path.join(self._testdir, b'.coverage')
2295 2316
2296 2317 vlog("# Using TESTDIR", self._testdir)
2297 2318 vlog("# Using RUNTESTDIR", osenvironb[b'RUNTESTDIR'])
2298 2319 vlog("# Using HGTMP", self._hgtmp)
2299 2320 vlog("# Using PATH", os.environ["PATH"])
2300 2321 vlog("# Using", IMPL_PATH, osenvironb[IMPL_PATH])
2301 2322
2302 2323 try:
2303 2324 return self._runtests(testdescs) or 0
2304 2325 finally:
2305 2326 time.sleep(.1)
2306 2327 self._cleanup()
2307 2328
2308 2329 def findtests(self, args):
2309 2330 """Finds possible test files from arguments.
2310 2331
2311 2332 If you wish to inject custom tests into the test harness, this would
2312 2333 be a good function to monkeypatch or override in a derived class.
2313 2334 """
2314 2335 if not args:
2315 2336 if self.options.changed:
2316 2337 proc = Popen4('hg st --rev "%s" -man0 .' %
2317 2338 self.options.changed, None, 0)
2318 2339 stdout, stderr = proc.communicate()
2319 2340 args = stdout.strip(b'\0').split(b'\0')
2320 2341 else:
2321 2342 args = os.listdir(b'.')
2322 2343
2323 2344 tests = []
2324 2345 for t in args:
2325 2346 if not (os.path.basename(t).startswith(b'test-')
2326 2347 and (t.endswith(b'.py') or t.endswith(b'.t'))):
2327 2348 continue
2328 2349 if t.endswith(b'.t'):
2329 2350 # .t file may contain multiple test cases
2330 2351 cases = sorted(parsettestcases(t))
2331 2352 if cases:
2332 2353 tests += [{'path': t, 'case': c} for c in sorted(cases)]
2333 2354 else:
2334 2355 tests.append({'path': t})
2335 2356 else:
2336 2357 tests.append({'path': t})
2337 2358 return tests
2338 2359
2339 2360 def _runtests(self, testdescs):
2340 2361 def _reloadtest(test, i):
2341 2362 # convert a test back to its description dict
2342 2363 desc = {'path': test.path}
2343 2364 case = getattr(test, '_case', None)
2344 2365 if case:
2345 2366 desc['case'] = case
2346 2367 return self._gettest(desc, i)
2347 2368
2348 2369 try:
2349 2370 if self.options.restart:
2350 2371 orig = list(testdescs)
2351 2372 while testdescs:
2352 2373 desc = testdescs[0]
2353 2374 if 'case' in desc:
2354 2375 errpath = b'%s.%s.err' % (desc['path'], desc['case'])
2355 2376 else:
2356 2377 errpath = b'%s.err' % desc['path']
2357 2378 if os.path.exists(errpath):
2358 2379 break
2359 2380 testdescs.pop(0)
2360 2381 if not testdescs:
2361 2382 print("running all tests")
2362 2383 testdescs = orig
2363 2384
2364 2385 tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
2365 2386
2366 2387 failed = False
2367 2388 warned = False
2368 2389 kws = self.options.keywords
2369 2390 if kws is not None and PYTHON3:
2370 2391 kws = kws.encode('utf-8')
2371 2392
2372 2393 suite = TestSuite(self._testdir,
2373 2394 jobs=self.options.jobs,
2374 2395 whitelist=self.options.whitelisted,
2375 2396 blacklist=self.options.blacklist,
2376 2397 retest=self.options.retest,
2377 2398 keywords=kws,
2378 2399 loop=self.options.loop,
2379 2400 runs_per_test=self.options.runs_per_test,
2380 2401 showchannels=self.options.showchannels,
2381 2402 tests=tests, loadtest=_reloadtest)
2382 2403 verbosity = 1
2383 2404 if self.options.verbose:
2384 2405 verbosity = 2
2385 2406 runner = TextTestRunner(self, verbosity=verbosity)
2386 2407
2408 if self.options.list_tests:
2409 result = runner.listtests(suite)
2410 else:
2387 2411 if self._installdir:
2388 2412 self._installhg()
2389 2413 self._checkhglib("Testing")
2390 2414 else:
2391 2415 self._usecorrectpython()
2392 2416 if self.options.chg:
2393 2417 assert self._installdir
2394 2418 self._installchg()
2395 2419
2396 2420 result = runner.run(suite)
2397 2421
2398 2422 if result.failures:
2399 2423 failed = True
2400 2424 if result.warned:
2401 2425 warned = True
2402 2426
2403 2427 if self.options.anycoverage:
2404 2428 self._outputcoverage()
2405 2429 except KeyboardInterrupt:
2406 2430 failed = True
2407 2431 print("\ninterrupted!")
2408 2432
2409 2433 if failed:
2410 2434 return 1
2411 2435 if warned:
2412 2436 return 80
2413 2437
2414 2438 def _getport(self, count):
2415 2439 port = self._ports.get(count) # do we have a cached entry?
2416 2440 if port is None:
2417 2441 portneeded = 3
2418 2442 # above 100 tries we just give up and let test reports failure
2419 2443 for tries in xrange(100):
2420 2444 allfree = True
2421 2445 port = self.options.port + self._portoffset
2422 2446 for idx in xrange(portneeded):
2423 2447 if not checkportisavailable(port + idx):
2424 2448 allfree = False
2425 2449 break
2426 2450 self._portoffset += portneeded
2427 2451 if allfree:
2428 2452 break
2429 2453 self._ports[count] = port
2430 2454 return port
2431 2455
2432 2456 def _gettest(self, testdesc, count):
2433 2457 """Obtain a Test by looking at its filename.
2434 2458
2435 2459 Returns a Test instance. The Test may not be runnable if it doesn't
2436 2460 map to a known type.
2437 2461 """
2438 2462 path = testdesc['path']
2439 2463 lctest = path.lower()
2440 2464 testcls = Test
2441 2465
2442 2466 for ext, cls in self.TESTTYPES:
2443 2467 if lctest.endswith(ext):
2444 2468 testcls = cls
2445 2469 break
2446 2470
2447 2471 refpath = os.path.join(self._testdir, path)
2448 2472 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
2449 2473
2450 2474 # extra keyword parameters. 'case' is used by .t tests
2451 2475 kwds = dict((k, testdesc[k]) for k in ['case'] if k in testdesc)
2452 2476
2453 2477 t = testcls(refpath, tmpdir,
2454 2478 keeptmpdir=self.options.keep_tmpdir,
2455 2479 debug=self.options.debug,
2456 2480 timeout=self.options.timeout,
2457 2481 startport=self._getport(count),
2458 2482 extraconfigopts=self.options.extra_config_opt,
2459 2483 py3kwarnings=self.options.py3k_warnings,
2460 2484 shell=self.options.shell,
2461 2485 hgcommand=self._hgcommand,
2462 2486 usechg=bool(self.options.with_chg or self.options.chg),
2463 2487 useipv6=useipv6, **kwds)
2464 2488 t.should_reload = True
2465 2489 return t
2466 2490
2467 2491 def _cleanup(self):
2468 2492 """Clean up state from this test invocation."""
2469 2493 if self.options.keep_tmpdir:
2470 2494 return
2471 2495
2472 2496 vlog("# Cleaning up HGTMP", self._hgtmp)
2473 2497 shutil.rmtree(self._hgtmp, True)
2474 2498 for f in self._createdfiles:
2475 2499 try:
2476 2500 os.remove(f)
2477 2501 except OSError:
2478 2502 pass
2479 2503
2480 2504 def _usecorrectpython(self):
2481 2505 """Configure the environment to use the appropriate Python in tests."""
2482 2506 # Tests must use the same interpreter as us or bad things will happen.
2483 2507 pyexename = sys.platform == 'win32' and b'python.exe' or b'python'
2484 2508 if getattr(os, 'symlink', None):
2485 2509 vlog("# Making python executable in test path a symlink to '%s'" %
2486 2510 sys.executable)
2487 2511 mypython = os.path.join(self._tmpbindir, pyexename)
2488 2512 try:
2489 2513 if os.readlink(mypython) == sys.executable:
2490 2514 return
2491 2515 os.unlink(mypython)
2492 2516 except OSError as err:
2493 2517 if err.errno != errno.ENOENT:
2494 2518 raise
2495 2519 if self._findprogram(pyexename) != sys.executable:
2496 2520 try:
2497 2521 os.symlink(sys.executable, mypython)
2498 2522 self._createdfiles.append(mypython)
2499 2523 except OSError as err:
2500 2524 # child processes may race, which is harmless
2501 2525 if err.errno != errno.EEXIST:
2502 2526 raise
2503 2527 else:
2504 2528 exedir, exename = os.path.split(sys.executable)
2505 2529 vlog("# Modifying search path to find %s as %s in '%s'" %
2506 2530 (exename, pyexename, exedir))
2507 2531 path = os.environ['PATH'].split(os.pathsep)
2508 2532 while exedir in path:
2509 2533 path.remove(exedir)
2510 2534 os.environ['PATH'] = os.pathsep.join([exedir] + path)
2511 2535 if not self._findprogram(pyexename):
2512 2536 print("WARNING: Cannot find %s in search path" % pyexename)
2513 2537
2514 2538 def _installhg(self):
2515 2539 """Install hg into the test environment.
2516 2540
2517 2541 This will also configure hg with the appropriate testing settings.
2518 2542 """
2519 2543 vlog("# Performing temporary installation of HG")
2520 2544 installerrs = os.path.join(self._hgtmp, b"install.err")
2521 2545 compiler = ''
2522 2546 if self.options.compiler:
2523 2547 compiler = '--compiler ' + self.options.compiler
2524 2548 if self.options.pure:
2525 2549 pure = b"--pure"
2526 2550 else:
2527 2551 pure = b""
2528 2552
2529 2553 # Run installer in hg root
2530 2554 script = os.path.realpath(sys.argv[0])
2531 2555 exe = sys.executable
2532 2556 if PYTHON3:
2533 2557 compiler = _bytespath(compiler)
2534 2558 script = _bytespath(script)
2535 2559 exe = _bytespath(exe)
2536 2560 hgroot = os.path.dirname(os.path.dirname(script))
2537 2561 self._hgroot = hgroot
2538 2562 os.chdir(hgroot)
2539 2563 nohome = b'--home=""'
2540 2564 if os.name == 'nt':
2541 2565 # The --home="" trick works only on OS where os.sep == '/'
2542 2566 # because of a distutils convert_path() fast-path. Avoid it at
2543 2567 # least on Windows for now, deal with .pydistutils.cfg bugs
2544 2568 # when they happen.
2545 2569 nohome = b''
2546 2570 cmd = (b'%(exe)s setup.py %(pure)s clean --all'
2547 2571 b' build %(compiler)s --build-base="%(base)s"'
2548 2572 b' install --force --prefix="%(prefix)s"'
2549 2573 b' --install-lib="%(libdir)s"'
2550 2574 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
2551 2575 % {b'exe': exe, b'pure': pure,
2552 2576 b'compiler': compiler,
2553 2577 b'base': os.path.join(self._hgtmp, b"build"),
2554 2578 b'prefix': self._installdir, b'libdir': self._pythondir,
2555 2579 b'bindir': self._bindir,
2556 2580 b'nohome': nohome, b'logfile': installerrs})
2557 2581
2558 2582 # setuptools requires install directories to exist.
2559 2583 def makedirs(p):
2560 2584 try:
2561 2585 os.makedirs(p)
2562 2586 except OSError as e:
2563 2587 if e.errno != errno.EEXIST:
2564 2588 raise
2565 2589 makedirs(self._pythondir)
2566 2590 makedirs(self._bindir)
2567 2591
2568 2592 vlog("# Running", cmd)
2569 2593 if os.system(cmd) == 0:
2570 2594 if not self.options.verbose:
2571 2595 try:
2572 2596 os.remove(installerrs)
2573 2597 except OSError as e:
2574 2598 if e.errno != errno.ENOENT:
2575 2599 raise
2576 2600 else:
2577 2601 f = open(installerrs, 'rb')
2578 2602 for line in f:
2579 2603 if PYTHON3:
2580 2604 sys.stdout.buffer.write(line)
2581 2605 else:
2582 2606 sys.stdout.write(line)
2583 2607 f.close()
2584 2608 sys.exit(1)
2585 2609 os.chdir(self._testdir)
2586 2610
2587 2611 self._usecorrectpython()
2588 2612
2589 2613 if self.options.py3k_warnings and not self.options.anycoverage:
2590 2614 vlog("# Updating hg command to enable Py3k Warnings switch")
2591 2615 f = open(os.path.join(self._bindir, 'hg'), 'rb')
2592 2616 lines = [line.rstrip() for line in f]
2593 2617 lines[0] += ' -3'
2594 2618 f.close()
2595 2619 f = open(os.path.join(self._bindir, 'hg'), 'wb')
2596 2620 for line in lines:
2597 2621 f.write(line + '\n')
2598 2622 f.close()
2599 2623
2600 2624 hgbat = os.path.join(self._bindir, b'hg.bat')
2601 2625 if os.path.isfile(hgbat):
2602 2626 # hg.bat expects to be put in bin/scripts while run-tests.py
2603 2627 # installation layout put it in bin/ directly. Fix it
2604 2628 f = open(hgbat, 'rb')
2605 2629 data = f.read()
2606 2630 f.close()
2607 2631 if b'"%~dp0..\python" "%~dp0hg" %*' in data:
2608 2632 data = data.replace(b'"%~dp0..\python" "%~dp0hg" %*',
2609 2633 b'"%~dp0python" "%~dp0hg" %*')
2610 2634 f = open(hgbat, 'wb')
2611 2635 f.write(data)
2612 2636 f.close()
2613 2637 else:
2614 2638 print('WARNING: cannot fix hg.bat reference to python.exe')
2615 2639
2616 2640 if self.options.anycoverage:
2617 2641 custom = os.path.join(self._testdir, 'sitecustomize.py')
2618 2642 target = os.path.join(self._pythondir, 'sitecustomize.py')
2619 2643 vlog('# Installing coverage trigger to %s' % target)
2620 2644 shutil.copyfile(custom, target)
2621 2645 rc = os.path.join(self._testdir, '.coveragerc')
2622 2646 vlog('# Installing coverage rc to %s' % rc)
2623 2647 os.environ['COVERAGE_PROCESS_START'] = rc
2624 2648 covdir = os.path.join(self._installdir, '..', 'coverage')
2625 2649 try:
2626 2650 os.mkdir(covdir)
2627 2651 except OSError as e:
2628 2652 if e.errno != errno.EEXIST:
2629 2653 raise
2630 2654
2631 2655 os.environ['COVERAGE_DIR'] = covdir
2632 2656
2633 2657 def _checkhglib(self, verb):
2634 2658 """Ensure that the 'mercurial' package imported by python is
2635 2659 the one we expect it to be. If not, print a warning to stderr."""
2636 2660 if ((self._bindir == self._pythondir) and
2637 2661 (self._bindir != self._tmpbindir)):
2638 2662 # The pythondir has been inferred from --with-hg flag.
2639 2663 # We cannot expect anything sensible here.
2640 2664 return
2641 2665 expecthg = os.path.join(self._pythondir, b'mercurial')
2642 2666 actualhg = self._gethgpath()
2643 2667 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
2644 2668 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
2645 2669 ' (expected %s)\n'
2646 2670 % (verb, actualhg, expecthg))
2647 2671 def _gethgpath(self):
2648 2672 """Return the path to the mercurial package that is actually found by
2649 2673 the current Python interpreter."""
2650 2674 if self._hgpath is not None:
2651 2675 return self._hgpath
2652 2676
2653 2677 cmd = b'%s -c "import mercurial; print (mercurial.__path__[0])"'
2654 2678 cmd = cmd % PYTHON
2655 2679 if PYTHON3:
2656 2680 cmd = _strpath(cmd)
2657 2681 pipe = os.popen(cmd)
2658 2682 try:
2659 2683 self._hgpath = _bytespath(pipe.read().strip())
2660 2684 finally:
2661 2685 pipe.close()
2662 2686
2663 2687 return self._hgpath
2664 2688
2665 2689 def _installchg(self):
2666 2690 """Install chg into the test environment"""
2667 2691 vlog('# Performing temporary installation of CHG')
2668 2692 assert os.path.dirname(self._bindir) == self._installdir
2669 2693 assert self._hgroot, 'must be called after _installhg()'
2670 2694 cmd = (b'"%(make)s" clean install PREFIX="%(prefix)s"'
2671 2695 % {b'make': 'make', # TODO: switch by option or environment?
2672 2696 b'prefix': self._installdir})
2673 2697 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
2674 2698 vlog("# Running", cmd)
2675 2699 proc = subprocess.Popen(cmd, shell=True, cwd=cwd,
2676 2700 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
2677 2701 stderr=subprocess.STDOUT)
2678 2702 out, _err = proc.communicate()
2679 2703 if proc.returncode != 0:
2680 2704 if PYTHON3:
2681 2705 sys.stdout.buffer.write(out)
2682 2706 else:
2683 2707 sys.stdout.write(out)
2684 2708 sys.exit(1)
2685 2709
2686 2710 def _outputcoverage(self):
2687 2711 """Produce code coverage output."""
2688 2712 import coverage
2689 2713 coverage = coverage.coverage
2690 2714
2691 2715 vlog('# Producing coverage report')
2692 2716 # chdir is the easiest way to get short, relative paths in the
2693 2717 # output.
2694 2718 os.chdir(self._hgroot)
2695 2719 covdir = os.path.join(self._installdir, '..', 'coverage')
2696 2720 cov = coverage(data_file=os.path.join(covdir, 'cov'))
2697 2721
2698 2722 # Map install directory paths back to source directory.
2699 2723 cov.config.paths['srcdir'] = ['.', self._pythondir]
2700 2724
2701 2725 cov.combine()
2702 2726
2703 2727 omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]]
2704 2728 cov.report(ignore_errors=True, omit=omit)
2705 2729
2706 2730 if self.options.htmlcov:
2707 2731 htmldir = os.path.join(self._testdir, 'htmlcov')
2708 2732 cov.html_report(directory=htmldir, omit=omit)
2709 2733 if self.options.annotate:
2710 2734 adir = os.path.join(self._testdir, 'annotated')
2711 2735 if not os.path.isdir(adir):
2712 2736 os.mkdir(adir)
2713 2737 cov.annotate(directory=adir, omit=omit)
2714 2738
2715 2739 def _findprogram(self, program):
2716 2740 """Search PATH for a executable program"""
2717 2741 dpb = _bytespath(os.defpath)
2718 2742 sepb = _bytespath(os.pathsep)
2719 2743 for p in osenvironb.get(b'PATH', dpb).split(sepb):
2720 2744 name = os.path.join(p, program)
2721 2745 if os.name == 'nt' or os.access(name, os.X_OK):
2722 2746 return name
2723 2747 return None
2724 2748
2725 2749 def _checktools(self):
2726 2750 """Ensure tools required to run tests are present."""
2727 2751 for p in self.REQUIREDTOOLS:
2728 2752 if os.name == 'nt' and not p.endswith('.exe'):
2729 2753 p += '.exe'
2730 2754 found = self._findprogram(p)
2731 2755 if found:
2732 2756 vlog("# Found prerequisite", p, "at", found)
2733 2757 else:
2734 2758 print("WARNING: Did not find prerequisite tool: %s " %
2735 2759 p.decode("utf-8"))
2736 2760
2737 2761 if __name__ == '__main__':
2738 2762 runner = TestRunner()
2739 2763
2740 2764 try:
2741 2765 import msvcrt
2742 2766 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
2743 2767 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
2744 2768 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
2745 2769 except ImportError:
2746 2770 pass
2747 2771
2748 2772 sys.exit(runner.run(sys.argv[1:]))
@@ -1,1001 +1,1055
1 1 This file tests the behavior of run-tests.py itself.
2 2
3 3 Avoid interference from actual test env:
4 4
5 5 $ . "$TESTDIR/helper-runtests.sh"
6 6
7 7 Smoke test with install
8 8 ============
9 9
10 10 $ run-tests.py $HGTEST_RUN_TESTS_PURE -l
11 11
12 12 # Ran 0 tests, 0 skipped, 0 warned, 0 failed.
13 13
14 14 Define a helper to avoid the install step
15 15 =============
16 16 $ rt()
17 17 > {
18 18 > run-tests.py --with-hg=`which hg` "$@"
19 19 > }
20 20
21 21 error paths
22 22
23 23 #if symlink
24 24 $ ln -s `which true` hg
25 25 $ run-tests.py --with-hg=./hg
26 26 warning: --with-hg should specify an hg script
27 27
28 28 # Ran 0 tests, 0 skipped, 0 warned, 0 failed.
29 29 $ rm hg
30 30 #endif
31 31
32 32 #if execbit
33 33 $ touch hg
34 34 $ run-tests.py --with-hg=./hg
35 35 Usage: run-tests.py [options] [tests]
36 36
37 37 run-tests.py: error: --with-hg must specify an executable hg script
38 38 [2]
39 39 $ rm hg
40 40 #endif
41 41
42 42 Features for testing optional lines
43 43 ===================================
44 44
45 45 $ cat > hghaveaddon.py <<EOF
46 46 > import hghave
47 47 > @hghave.check("custom", "custom hghave feature")
48 48 > def has_custom():
49 49 > return True
50 50 > @hghave.check("missing", "missing hghave feature")
51 51 > def has_missing():
52 52 > return False
53 53 > EOF
54 54
55 55 an empty test
56 56 =======================
57 57
58 58 $ touch test-empty.t
59 59 $ rt
60 60 .
61 61 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
62 62 $ rm test-empty.t
63 63
64 64 a succesful test
65 65 =======================
66 66
67 67 $ cat > test-success.t << EOF
68 68 > $ echo babar
69 69 > babar
70 70 > $ echo xyzzy
71 71 > dont_print (?)
72 72 > nothing[42]line (re) (?)
73 73 > never*happens (glob) (?)
74 74 > more_nothing (?)
75 75 > xyzzy
76 76 > nor this (?)
77 77 > $ printf 'abc\ndef\nxyz\n'
78 78 > 123 (?)
79 79 > abc
80 80 > def (?)
81 81 > 456 (?)
82 82 > xyz
83 83 > $ printf 'zyx\nwvu\ntsr\n'
84 84 > abc (?)
85 85 > zyx (custom !)
86 86 > wvu
87 87 > no_print (no-custom !)
88 88 > tsr (no-missing !)
89 89 > missing (missing !)
90 90 > EOF
91 91
92 92 $ rt
93 93 .
94 94 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
95 95
96 96 failing test
97 97 ==================
98 98
99 99 test churn with globs
100 100 $ cat > test-failure.t <<EOF
101 101 > $ echo "bar-baz"; echo "bar-bad"
102 102 > bar*bad (glob)
103 103 > bar*baz (glob)
104 104 > EOF
105 105 $ rt test-failure.t
106 106
107 107 --- $TESTTMP/test-failure.t
108 108 +++ $TESTTMP/test-failure.t.err
109 109 @@ -1,3 +1,3 @@
110 110 $ echo "bar-baz"; echo "bar-bad"
111 111 + bar*baz (glob)
112 112 bar*bad (glob)
113 113 - bar*baz (glob)
114 114
115 115 ERROR: test-failure.t output changed
116 116 !
117 117 Failed test-failure.t: output changed
118 118 # Ran 1 tests, 0 skipped, 0 warned, 1 failed.
119 119 python hash seed: * (glob)
120 120 [1]
121 121
122 122 basic failing test
123 123 $ cat > test-failure.t << EOF
124 124 > $ echo babar
125 125 > rataxes
126 126 > This is a noop statement so that
127 127 > this test is still more bytes than success.
128 128 > pad pad pad pad............................................................
129 129 > pad pad pad pad............................................................
130 130 > pad pad pad pad............................................................
131 131 > pad pad pad pad............................................................
132 132 > pad pad pad pad............................................................
133 133 > pad pad pad pad............................................................
134 134 > EOF
135 135
136 136 >>> fh = open('test-failure-unicode.t', 'wb')
137 137 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8')) and None
138 138 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8')) and None
139 139
140 140 $ rt
141 141
142 142 --- $TESTTMP/test-failure.t
143 143 +++ $TESTTMP/test-failure.t.err
144 144 @@ -1,5 +1,5 @@
145 145 $ echo babar
146 146 - rataxes
147 147 + babar
148 148 This is a noop statement so that
149 149 this test is still more bytes than success.
150 150 pad pad pad pad............................................................
151 151
152 152 ERROR: test-failure.t output changed
153 153 !.
154 154 --- $TESTTMP/test-failure-unicode.t
155 155 +++ $TESTTMP/test-failure-unicode.t.err
156 156 @@ -1,2 +1,2 @@
157 157 $ echo babar\xce\xb1 (esc)
158 158 - l\xce\xb5\xce\xb5t (esc)
159 159 + babar\xce\xb1 (esc)
160 160
161 161 ERROR: test-failure-unicode.t output changed
162 162 !
163 163 Failed test-failure.t: output changed
164 164 Failed test-failure-unicode.t: output changed
165 165 # Ran 3 tests, 0 skipped, 0 warned, 2 failed.
166 166 python hash seed: * (glob)
167 167 [1]
168 168
169 169 test --xunit support
170 170 $ rt --xunit=xunit.xml
171 171
172 172 --- $TESTTMP/test-failure.t
173 173 +++ $TESTTMP/test-failure.t.err
174 174 @@ -1,5 +1,5 @@
175 175 $ echo babar
176 176 - rataxes
177 177 + babar
178 178 This is a noop statement so that
179 179 this test is still more bytes than success.
180 180 pad pad pad pad............................................................
181 181
182 182 ERROR: test-failure.t output changed
183 183 !.
184 184 --- $TESTTMP/test-failure-unicode.t
185 185 +++ $TESTTMP/test-failure-unicode.t.err
186 186 @@ -1,2 +1,2 @@
187 187 $ echo babar\xce\xb1 (esc)
188 188 - l\xce\xb5\xce\xb5t (esc)
189 189 + babar\xce\xb1 (esc)
190 190
191 191 ERROR: test-failure-unicode.t output changed
192 192 !
193 193 Failed test-failure.t: output changed
194 194 Failed test-failure-unicode.t: output changed
195 195 # Ran 3 tests, 0 skipped, 0 warned, 2 failed.
196 196 python hash seed: * (glob)
197 197 [1]
198 198 $ cat xunit.xml
199 199 <?xml version="1.0" encoding="utf-8"?>
200 200 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
201 201 <testcase name="test-success.t" time="*"/> (glob)
202 202 <testcase name="test-failure-unicode.t" time="*"> (glob)
203 203 <![CDATA[--- $TESTTMP/test-failure-unicode.t
204 204 +++ $TESTTMP/test-failure-unicode.t.err
205 205 @@ -1,2 +1,2 @@
206 206 $ echo babar\xce\xb1 (esc)
207 207 - l\xce\xb5\xce\xb5t (esc)
208 208 + babar\xce\xb1 (esc)
209 209 ]]> </testcase>
210 210 <testcase name="test-failure.t" time="*"> (glob)
211 211 <![CDATA[--- $TESTTMP/test-failure.t
212 212 +++ $TESTTMP/test-failure.t.err
213 213 @@ -1,5 +1,5 @@
214 214 $ echo babar
215 215 - rataxes
216 216 + babar
217 217 This is a noop statement so that
218 218 this test is still more bytes than success.
219 219 pad pad pad pad............................................................
220 220 ]]> </testcase>
221 221 </testsuite>
222 222
223 223 $ cat .testtimes
224 224 test-failure-unicode.t * (glob)
225 225 test-failure.t * (glob)
226 226 test-success.t * (glob)
227
228 $ rt --list-tests
229 test-failure-unicode.t
230 test-failure.t
231 test-success.t
232
233 $ rt --list-tests --json
234 test-failure-unicode.t
235 test-failure.t
236 test-success.t
237 $ cat report.json
238 testreport ={
239 "test-failure-unicode.t": {
240 "result": "success"
241 },
242 "test-failure.t": {
243 "result": "success"
244 },
245 "test-success.t": {
246 "result": "success"
247 }
248 } (no-eol)
249
250 $ rt --list-tests --xunit=xunit.xml
251 test-failure-unicode.t
252 test-failure.t
253 test-success.t
254 $ cat xunit.xml
255 <?xml version="1.0" encoding="utf-8"?>
256 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
257 <testcase name="test-failure-unicode.t"/>
258 <testcase name="test-failure.t"/>
259 <testcase name="test-success.t"/>
260 </testsuite>
261
262 $ rt --list-tests test-failure* --json --xunit=xunit.xml
263 test-failure-unicode.t
264 test-failure.t
265 $ cat report.json
266 testreport ={
267 "test-failure-unicode.t": {
268 "result": "success"
269 },
270 "test-failure.t": {
271 "result": "success"
272 }
273 } (no-eol)
274 $ cat xunit.xml
275 <?xml version="1.0" encoding="utf-8"?>
276 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
277 <testcase name="test-failure-unicode.t"/>
278 <testcase name="test-failure.t"/>
279 </testsuite>
280
227 281 $ rm test-failure-unicode.t
228 282
229 283 test for --retest
230 284 ====================
231 285
232 286 $ rt --retest
233 287
234 288 --- $TESTTMP/test-failure.t
235 289 +++ $TESTTMP/test-failure.t.err
236 290 @@ -1,5 +1,5 @@
237 291 $ echo babar
238 292 - rataxes
239 293 + babar
240 294 This is a noop statement so that
241 295 this test is still more bytes than success.
242 296 pad pad pad pad............................................................
243 297
244 298 ERROR: test-failure.t output changed
245 299 !
246 300 Failed test-failure.t: output changed
247 301 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
248 302 python hash seed: * (glob)
249 303 [1]
250 304
251 305 Selecting Tests To Run
252 306 ======================
253 307
254 308 successful
255 309
256 310 $ rt test-success.t
257 311 .
258 312 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
259 313
260 314 success w/ keyword
261 315 $ rt -k xyzzy
262 316 .
263 317 # Ran 2 tests, 1 skipped, 0 warned, 0 failed.
264 318
265 319 failed
266 320
267 321 $ rt test-failure.t
268 322
269 323 --- $TESTTMP/test-failure.t
270 324 +++ $TESTTMP/test-failure.t.err
271 325 @@ -1,5 +1,5 @@
272 326 $ echo babar
273 327 - rataxes
274 328 + babar
275 329 This is a noop statement so that
276 330 this test is still more bytes than success.
277 331 pad pad pad pad............................................................
278 332
279 333 ERROR: test-failure.t output changed
280 334 !
281 335 Failed test-failure.t: output changed
282 336 # Ran 1 tests, 0 skipped, 0 warned, 1 failed.
283 337 python hash seed: * (glob)
284 338 [1]
285 339
286 340 failure w/ keyword
287 341 $ rt -k rataxes
288 342
289 343 --- $TESTTMP/test-failure.t
290 344 +++ $TESTTMP/test-failure.t.err
291 345 @@ -1,5 +1,5 @@
292 346 $ echo babar
293 347 - rataxes
294 348 + babar
295 349 This is a noop statement so that
296 350 this test is still more bytes than success.
297 351 pad pad pad pad............................................................
298 352
299 353 ERROR: test-failure.t output changed
300 354 !
301 355 Failed test-failure.t: output changed
302 356 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
303 357 python hash seed: * (glob)
304 358 [1]
305 359
306 360 Verify that when a process fails to start we show a useful message
307 361 ==================================================================
308 362
309 363 $ cat > test-serve-fail.t <<EOF
310 364 > $ echo 'abort: child process failed to start blah'
311 365 > EOF
312 366 $ rt test-serve-fail.t
313 367
314 368 ERROR: test-serve-fail.t output changed
315 369 !
316 370 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
317 371 # Ran 1 tests, 0 skipped, 0 warned, 1 failed.
318 372 python hash seed: * (glob)
319 373 [1]
320 374 $ rm test-serve-fail.t
321 375
322 376 Verify that we can try other ports
323 377 ===================================
324 378 $ hg init inuse
325 379 $ hg serve -R inuse -p $HGPORT -d --pid-file=blocks.pid
326 380 $ cat blocks.pid >> $DAEMON_PIDS
327 381 $ cat > test-serve-inuse.t <<EOF
328 382 > $ hg serve -R `pwd`/inuse -p \$HGPORT -d --pid-file=hg.pid
329 383 > $ cat hg.pid >> \$DAEMON_PIDS
330 384 > EOF
331 385 $ rt test-serve-inuse.t
332 386 .
333 387 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
334 388 $ rm test-serve-inuse.t
335 389 $ killdaemons.py $DAEMON_PIDS
336 390 $ rm $DAEMON_PIDS
337 391
338 392 Running In Debug Mode
339 393 ======================
340 394
341 395 $ rt --debug 2>&1 | grep -v pwd
342 396 + echo *SALT* 0 0 (glob)
343 397 *SALT* 0 0 (glob)
344 398 + echo babar
345 399 babar
346 400 + echo *SALT* 10 0 (glob)
347 401 *SALT* 10 0 (glob)
348 402 *+ echo *SALT* 0 0 (glob)
349 403 *SALT* 0 0 (glob)
350 404 + echo babar
351 405 babar
352 406 + echo *SALT* 2 0 (glob)
353 407 *SALT* 2 0 (glob)
354 408 + echo xyzzy
355 409 xyzzy
356 410 + echo *SALT* 9 0 (glob)
357 411 *SALT* 9 0 (glob)
358 412 + printf *abc\ndef\nxyz\n* (glob)
359 413 abc
360 414 def
361 415 xyz
362 416 + echo *SALT* 15 0 (glob)
363 417 *SALT* 15 0 (glob)
364 418 + printf *zyx\nwvu\ntsr\n* (glob)
365 419 zyx
366 420 wvu
367 421 tsr
368 422 + echo *SALT* 22 0 (glob)
369 423 *SALT* 22 0 (glob)
370 424 .
371 425 # Ran 2 tests, 0 skipped, 0 warned, 0 failed.
372 426
373 427 Parallel runs
374 428 ==============
375 429
376 430 (duplicate the failing test to get predictable output)
377 431 $ cp test-failure.t test-failure-copy.t
378 432
379 433 $ rt --jobs 2 test-failure*.t -n
380 434 !!
381 435 Failed test-failure*.t: output changed (glob)
382 436 Failed test-failure*.t: output changed (glob)
383 437 # Ran 2 tests, 0 skipped, 0 warned, 2 failed.
384 438 python hash seed: * (glob)
385 439 [1]
386 440
387 441 failures in parallel with --first should only print one failure
388 442 >>> f = open('test-nothing.t', 'w')
389 443 >>> f.write('foo\n' * 1024) and None
390 444 >>> f.write(' $ sleep 1') and None
391 445 $ rt --jobs 2 --first
392 446
393 447 --- $TESTTMP/test-failure*.t (glob)
394 448 +++ $TESTTMP/test-failure*.t.err (glob)
395 449 @@ -1,5 +1,5 @@
396 450 $ echo babar
397 451 - rataxes
398 452 + babar
399 453 This is a noop statement so that
400 454 this test is still more bytes than success.
401 455 pad pad pad pad............................................................
402 456
403 457 Failed test-failure*.t: output changed (glob)
404 458 Failed test-nothing.t: output changed
405 459 # Ran 2 tests, 0 skipped, 0 warned, 2 failed.
406 460 python hash seed: * (glob)
407 461 [1]
408 462
409 463
410 464 (delete the duplicated test file)
411 465 $ rm test-failure-copy.t test-nothing.t
412 466
413 467
414 468 Interactive run
415 469 ===============
416 470
417 471 (backup the failing test)
418 472 $ cp test-failure.t backup
419 473
420 474 Refuse the fix
421 475
422 476 $ echo 'n' | rt -i
423 477
424 478 --- $TESTTMP/test-failure.t
425 479 +++ $TESTTMP/test-failure.t.err
426 480 @@ -1,5 +1,5 @@
427 481 $ echo babar
428 482 - rataxes
429 483 + babar
430 484 This is a noop statement so that
431 485 this test is still more bytes than success.
432 486 pad pad pad pad............................................................
433 487 Accept this change? [n]
434 488 ERROR: test-failure.t output changed
435 489 !.
436 490 Failed test-failure.t: output changed
437 491 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
438 492 python hash seed: * (glob)
439 493 [1]
440 494
441 495 $ cat test-failure.t
442 496 $ echo babar
443 497 rataxes
444 498 This is a noop statement so that
445 499 this test is still more bytes than success.
446 500 pad pad pad pad............................................................
447 501 pad pad pad pad............................................................
448 502 pad pad pad pad............................................................
449 503 pad pad pad pad............................................................
450 504 pad pad pad pad............................................................
451 505 pad pad pad pad............................................................
452 506
453 507 Interactive with custom view
454 508
455 509 $ echo 'n' | rt -i --view echo
456 510 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err (glob)
457 511 Accept this change? [n]* (glob)
458 512 ERROR: test-failure.t output changed
459 513 !.
460 514 Failed test-failure.t: output changed
461 515 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
462 516 python hash seed: * (glob)
463 517 [1]
464 518
465 519 View the fix
466 520
467 521 $ echo 'y' | rt --view echo
468 522 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err (glob)
469 523
470 524 ERROR: test-failure.t output changed
471 525 !.
472 526 Failed test-failure.t: output changed
473 527 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
474 528 python hash seed: * (glob)
475 529 [1]
476 530
477 531 Accept the fix
478 532
479 533 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
480 534 $ echo " saved backup bundle to \$TESTTMP/foo.hg" >> test-failure.t
481 535 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
482 536 $ echo " saved backup bundle to \$TESTTMP/foo.hg (glob)" >> test-failure.t
483 537 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
484 538 $ echo " saved backup bundle to \$TESTTMP/*.hg (glob)" >> test-failure.t
485 539 $ echo 'y' | rt -i 2>&1
486 540
487 541 --- $TESTTMP/test-failure.t
488 542 +++ $TESTTMP/test-failure.t.err
489 543 @@ -1,5 +1,5 @@
490 544 $ echo babar
491 545 - rataxes
492 546 + babar
493 547 This is a noop statement so that
494 548 this test is still more bytes than success.
495 549 pad pad pad pad............................................................
496 550 @@ -9,7 +9,7 @@
497 551 pad pad pad pad............................................................
498 552 pad pad pad pad............................................................
499 553 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
500 554 - saved backup bundle to $TESTTMP/foo.hg
501 555 + saved backup bundle to $TESTTMP/foo.hg* (glob)
502 556 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
503 557 saved backup bundle to $TESTTMP/foo.hg* (glob)
504 558 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
505 559 Accept this change? [n] ..
506 560 # Ran 2 tests, 0 skipped, 0 warned, 0 failed.
507 561
508 562 $ sed -e 's,(glob)$,&<,g' test-failure.t
509 563 $ echo babar
510 564 babar
511 565 This is a noop statement so that
512 566 this test is still more bytes than success.
513 567 pad pad pad pad............................................................
514 568 pad pad pad pad............................................................
515 569 pad pad pad pad............................................................
516 570 pad pad pad pad............................................................
517 571 pad pad pad pad............................................................
518 572 pad pad pad pad............................................................
519 573 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
520 574 saved backup bundle to $TESTTMP/foo.hg (glob)<
521 575 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
522 576 saved backup bundle to $TESTTMP/foo.hg (glob)<
523 577 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
524 578 saved backup bundle to $TESTTMP/*.hg (glob)<
525 579
526 580 (reinstall)
527 581 $ mv backup test-failure.t
528 582
529 583 No Diff
530 584 ===============
531 585
532 586 $ rt --nodiff
533 587 !.
534 588 Failed test-failure.t: output changed
535 589 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
536 590 python hash seed: * (glob)
537 591 [1]
538 592
539 593 test --tmpdir support
540 594 $ rt --tmpdir=$TESTTMP/keep test-success.t
541 595
542 596 Keeping testtmp dir: $TESTTMP/keep/child1/test-success.t (glob)
543 597 Keeping threadtmp dir: $TESTTMP/keep/child1 (glob)
544 598 .
545 599 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
546 600
547 601 timeouts
548 602 ========
549 603 $ cat > test-timeout.t <<EOF
550 604 > $ sleep 2
551 605 > $ echo pass
552 606 > pass
553 607 > EOF
554 608 > echo '#require slow' > test-slow-timeout.t
555 609 > cat test-timeout.t >> test-slow-timeout.t
556 610 $ rt --timeout=1 --slowtimeout=3 test-timeout.t test-slow-timeout.t
557 611 st
558 612 Skipped test-slow-timeout.t: missing feature: allow slow tests (use --allow-slow-tests)
559 613 Failed test-timeout.t: timed out
560 614 # Ran 1 tests, 1 skipped, 0 warned, 1 failed.
561 615 python hash seed: * (glob)
562 616 [1]
563 617 $ rt --timeout=1 --slowtimeout=3 \
564 618 > test-timeout.t test-slow-timeout.t --allow-slow-tests
565 619 .t
566 620 Failed test-timeout.t: timed out
567 621 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
568 622 python hash seed: * (glob)
569 623 [1]
570 624 $ rm test-timeout.t test-slow-timeout.t
571 625
572 626 test for --time
573 627 ==================
574 628
575 629 $ rt test-success.t --time
576 630 .
577 631 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
578 632 # Producing time report
579 633 start end cuser csys real Test
580 634 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
581 635
582 636 test for --time with --job enabled
583 637 ====================================
584 638
585 639 $ rt test-success.t --time --jobs 2
586 640 .
587 641 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
588 642 # Producing time report
589 643 start end cuser csys real Test
590 644 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
591 645
592 646 Skips
593 647 ================
594 648 $ cat > test-skip.t <<EOF
595 649 > $ echo xyzzy
596 650 > #require false
597 651 > EOF
598 652 $ rt --nodiff
599 653 !.s
600 654 Skipped test-skip.t: missing feature: nail clipper
601 655 Failed test-failure.t: output changed
602 656 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
603 657 python hash seed: * (glob)
604 658 [1]
605 659
606 660 $ rt --keyword xyzzy
607 661 .s
608 662 Skipped test-skip.t: missing feature: nail clipper
609 663 # Ran 2 tests, 2 skipped, 0 warned, 0 failed.
610 664
611 665 Skips with xml
612 666 $ rt --keyword xyzzy \
613 667 > --xunit=xunit.xml
614 668 .s
615 669 Skipped test-skip.t: missing feature: nail clipper
616 670 # Ran 2 tests, 2 skipped, 0 warned, 0 failed.
617 671 $ cat xunit.xml
618 672 <?xml version="1.0" encoding="utf-8"?>
619 673 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
620 674 <testcase name="test-success.t" time="*"/> (glob)
621 675 </testsuite>
622 676
623 677 Missing skips or blacklisted skips don't count as executed:
624 678 $ echo test-failure.t > blacklist
625 679 $ rt --blacklist=blacklist --json\
626 680 > test-failure.t test-bogus.t
627 681 ss
628 682 Skipped test-bogus.t: Doesn't exist
629 683 Skipped test-failure.t: blacklisted
630 684 # Ran 0 tests, 2 skipped, 0 warned, 0 failed.
631 685 $ cat report.json
632 686 testreport ={
633 687 "test-bogus.t": {
634 688 "result": "skip"
635 689 },
636 690 "test-failure.t": {
637 691 "result": "skip"
638 692 }
639 693 } (no-eol)
640 694
641 695 Whitelist trumps blacklist
642 696 $ echo test-failure.t > whitelist
643 697 $ rt --blacklist=blacklist --whitelist=whitelist --json\
644 698 > test-failure.t test-bogus.t
645 699 s
646 700 --- $TESTTMP/test-failure.t
647 701 +++ $TESTTMP/test-failure.t.err
648 702 @@ -1,5 +1,5 @@
649 703 $ echo babar
650 704 - rataxes
651 705 + babar
652 706 This is a noop statement so that
653 707 this test is still more bytes than success.
654 708 pad pad pad pad............................................................
655 709
656 710 ERROR: test-failure.t output changed
657 711 !
658 712 Skipped test-bogus.t: Doesn't exist
659 713 Failed test-failure.t: output changed
660 714 # Ran 1 tests, 1 skipped, 0 warned, 1 failed.
661 715 python hash seed: * (glob)
662 716 [1]
663 717
664 718 test for --json
665 719 ==================
666 720
667 721 $ rt --json
668 722
669 723 --- $TESTTMP/test-failure.t
670 724 +++ $TESTTMP/test-failure.t.err
671 725 @@ -1,5 +1,5 @@
672 726 $ echo babar
673 727 - rataxes
674 728 + babar
675 729 This is a noop statement so that
676 730 this test is still more bytes than success.
677 731 pad pad pad pad............................................................
678 732
679 733 ERROR: test-failure.t output changed
680 734 !.s
681 735 Skipped test-skip.t: missing feature: nail clipper
682 736 Failed test-failure.t: output changed
683 737 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
684 738 python hash seed: * (glob)
685 739 [1]
686 740
687 741 $ cat report.json
688 742 testreport ={
689 743 "test-failure.t": [\{] (re)
690 744 "csys": "\s*[\d\.]{4,5}", ? (re)
691 745 "cuser": "\s*[\d\.]{4,5}", ? (re)
692 746 "diff": "---.+\+\+\+.+", ? (re)
693 747 "end": "\s*[\d\.]{4,5}", ? (re)
694 748 "result": "failure", ? (re)
695 749 "start": "\s*[\d\.]{4,5}", ? (re)
696 750 "time": "\s*[\d\.]{4,5}" (re)
697 751 }, ? (re)
698 752 "test-skip.t": {
699 753 "csys": "\s*[\d\.]{4,5}", ? (re)
700 754 "cuser": "\s*[\d\.]{4,5}", ? (re)
701 755 "diff": "", ? (re)
702 756 "end": "\s*[\d\.]{4,5}", ? (re)
703 757 "result": "skip", ? (re)
704 758 "start": "\s*[\d\.]{4,5}", ? (re)
705 759 "time": "\s*[\d\.]{4,5}" (re)
706 760 }, ? (re)
707 761 "test-success.t": [\{] (re)
708 762 "csys": "\s*[\d\.]{4,5}", ? (re)
709 763 "cuser": "\s*[\d\.]{4,5}", ? (re)
710 764 "diff": "", ? (re)
711 765 "end": "\s*[\d\.]{4,5}", ? (re)
712 766 "result": "success", ? (re)
713 767 "start": "\s*[\d\.]{4,5}", ? (re)
714 768 "time": "\s*[\d\.]{4,5}" (re)
715 769 }
716 770 } (no-eol)
717 771
718 772 Test that failed test accepted through interactive are properly reported:
719 773
720 774 $ cp test-failure.t backup
721 775 $ echo y | rt --json -i
722 776
723 777 --- $TESTTMP/test-failure.t
724 778 +++ $TESTTMP/test-failure.t.err
725 779 @@ -1,5 +1,5 @@
726 780 $ echo babar
727 781 - rataxes
728 782 + babar
729 783 This is a noop statement so that
730 784 this test is still more bytes than success.
731 785 pad pad pad pad............................................................
732 786 Accept this change? [n] ..s
733 787 Skipped test-skip.t: missing feature: nail clipper
734 788 # Ran 2 tests, 1 skipped, 0 warned, 0 failed.
735 789
736 790 $ cat report.json
737 791 testreport ={
738 792 "test-failure.t": [\{] (re)
739 793 "csys": "\s*[\d\.]{4,5}", ? (re)
740 794 "cuser": "\s*[\d\.]{4,5}", ? (re)
741 795 "diff": "", ? (re)
742 796 "end": "\s*[\d\.]{4,5}", ? (re)
743 797 "result": "success", ? (re)
744 798 "start": "\s*[\d\.]{4,5}", ? (re)
745 799 "time": "\s*[\d\.]{4,5}" (re)
746 800 }, ? (re)
747 801 "test-skip.t": {
748 802 "csys": "\s*[\d\.]{4,5}", ? (re)
749 803 "cuser": "\s*[\d\.]{4,5}", ? (re)
750 804 "diff": "", ? (re)
751 805 "end": "\s*[\d\.]{4,5}", ? (re)
752 806 "result": "skip", ? (re)
753 807 "start": "\s*[\d\.]{4,5}", ? (re)
754 808 "time": "\s*[\d\.]{4,5}" (re)
755 809 }, ? (re)
756 810 "test-success.t": [\{] (re)
757 811 "csys": "\s*[\d\.]{4,5}", ? (re)
758 812 "cuser": "\s*[\d\.]{4,5}", ? (re)
759 813 "diff": "", ? (re)
760 814 "end": "\s*[\d\.]{4,5}", ? (re)
761 815 "result": "success", ? (re)
762 816 "start": "\s*[\d\.]{4,5}", ? (re)
763 817 "time": "\s*[\d\.]{4,5}" (re)
764 818 }
765 819 } (no-eol)
766 820 $ mv backup test-failure.t
767 821
768 822 backslash on end of line with glob matching is handled properly
769 823
770 824 $ cat > test-glob-backslash.t << EOF
771 825 > $ echo 'foo bar \\'
772 826 > foo * \ (glob)
773 827 > EOF
774 828
775 829 $ rt test-glob-backslash.t
776 830 .
777 831 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
778 832
779 833 $ rm -f test-glob-backslash.t
780 834
781 835 Test globbing of local IP addresses
782 836 $ echo 172.16.18.1
783 837 $LOCALIP (glob)
784 838 $ echo dead:beef::1
785 839 $LOCALIP (glob)
786 840
787 841 Test reusability for third party tools
788 842 ======================================
789 843
790 844 $ mkdir "$TESTTMP"/anothertests
791 845 $ cd "$TESTTMP"/anothertests
792 846
793 847 test that `run-tests.py` can execute hghave, even if it runs not in
794 848 Mercurial source tree.
795 849
796 850 $ cat > test-hghave.t <<EOF
797 851 > #require true
798 852 > $ echo foo
799 853 > foo
800 854 > EOF
801 855 $ rt test-hghave.t
802 856 .
803 857 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
804 858
805 859 test that RUNTESTDIR refers the directory, in which `run-tests.py` now
806 860 running is placed.
807 861
808 862 $ cat > test-runtestdir.t <<EOF
809 863 > - $TESTDIR, in which test-run-tests.t is placed
810 864 > - \$TESTDIR, in which test-runtestdir.t is placed (expanded at runtime)
811 865 > - \$RUNTESTDIR, in which run-tests.py is placed (expanded at runtime)
812 866 >
813 867 > #if windows
814 868 > $ test "\$TESTDIR" = "$TESTTMP\anothertests"
815 869 > #else
816 870 > $ test "\$TESTDIR" = "$TESTTMP"/anothertests
817 871 > #endif
818 872 > $ test "\$RUNTESTDIR" = "$TESTDIR"
819 873 > $ head -n 3 "\$RUNTESTDIR"/../contrib/check-code.py
820 874 > #!/usr/bin/env python
821 875 > #
822 876 > # check-code - a style and portability checker for Mercurial
823 877 > EOF
824 878 $ rt test-runtestdir.t
825 879 .
826 880 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
827 881
828 882 #if execbit
829 883
830 884 test that TESTDIR is referred in PATH
831 885
832 886 $ cat > custom-command.sh <<EOF
833 887 > #!/bin/sh
834 888 > echo "hello world"
835 889 > EOF
836 890 $ chmod +x custom-command.sh
837 891 $ cat > test-testdir-path.t <<EOF
838 892 > $ custom-command.sh
839 893 > hello world
840 894 > EOF
841 895 $ rt test-testdir-path.t
842 896 .
843 897 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
844 898
845 899 #endif
846 900
847 901 test support for --allow-slow-tests
848 902 $ cat > test-very-slow-test.t <<EOF
849 903 > #require slow
850 904 > $ echo pass
851 905 > pass
852 906 > EOF
853 907 $ rt test-very-slow-test.t
854 908 s
855 909 Skipped test-very-slow-test.t: missing feature: allow slow tests (use --allow-slow-tests)
856 910 # Ran 0 tests, 1 skipped, 0 warned, 0 failed.
857 911 $ rt $HGTEST_RUN_TESTS_PURE --allow-slow-tests test-very-slow-test.t
858 912 .
859 913 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
860 914
861 915 support for running a test outside the current directory
862 916 $ mkdir nonlocal
863 917 $ cat > nonlocal/test-is-not-here.t << EOF
864 918 > $ echo pass
865 919 > pass
866 920 > EOF
867 921 $ rt nonlocal/test-is-not-here.t
868 922 .
869 923 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
870 924
871 925 support for bisecting failed tests automatically
872 926 $ hg init bisect
873 927 $ cd bisect
874 928 $ cat >> test-bisect.t <<EOF
875 929 > $ echo pass
876 930 > pass
877 931 > EOF
878 932 $ hg add test-bisect.t
879 933 $ hg ci -m 'good'
880 934 $ cat >> test-bisect.t <<EOF
881 935 > $ echo pass
882 936 > fail
883 937 > EOF
884 938 $ hg ci -m 'bad'
885 939 $ rt --known-good-rev=0 test-bisect.t
886 940
887 941 --- $TESTTMP/anothertests/bisect/test-bisect.t
888 942 +++ $TESTTMP/anothertests/bisect/test-bisect.t.err
889 943 @@ -1,4 +1,4 @@
890 944 $ echo pass
891 945 pass
892 946 $ echo pass
893 947 - fail
894 948 + pass
895 949
896 950 ERROR: test-bisect.t output changed
897 951 !
898 952 Failed test-bisect.t: output changed
899 953 test-bisect.t broken by 72cbf122d116 (bad)
900 954 # Ran 1 tests, 0 skipped, 0 warned, 1 failed.
901 955 python hash seed: * (glob)
902 956 [1]
903 957
904 958 $ cd ..
905 959
906 960 Test a broken #if statement doesn't break run-tests threading.
907 961 ==============================================================
908 962 $ mkdir broken
909 963 $ cd broken
910 964 $ cat > test-broken.t <<EOF
911 965 > true
912 966 > #if notarealhghavefeature
913 967 > $ false
914 968 > #endif
915 969 > EOF
916 970 $ for f in 1 2 3 4 ; do
917 971 > cat > test-works-$f.t <<EOF
918 972 > This is test case $f
919 973 > $ sleep 1
920 974 > EOF
921 975 > done
922 976 $ rt -j 2
923 977 ....
924 978 # Ran 5 tests, 0 skipped, 0 warned, 0 failed.
925 979 skipped: unknown feature: notarealhghavefeature
926 980
927 981 $ cd ..
928 982 $ rm -rf broken
929 983
930 984 Test cases in .t files
931 985 ======================
932 986 $ mkdir cases
933 987 $ cd cases
934 988 $ cat > test-cases-abc.t <<'EOF'
935 989 > #testcases A B C
936 990 > $ V=B
937 991 > #if A
938 992 > $ V=A
939 993 > #endif
940 994 > #if C
941 995 > $ V=C
942 996 > #endif
943 997 > $ echo $V | sed 's/A/C/'
944 998 > C
945 999 > #if C
946 1000 > $ [ $V = C ]
947 1001 > #endif
948 1002 > #if A
949 1003 > $ [ $V = C ]
950 1004 > [1]
951 1005 > #endif
952 1006 > #if no-C
953 1007 > $ [ $V = C ]
954 1008 > [1]
955 1009 > #endif
956 1010 > $ [ $V = D ]
957 1011 > [1]
958 1012 > EOF
959 1013 $ rt
960 1014 .
961 1015 --- $TESTTMP/anothertests/cases/test-cases-abc.t
962 1016 +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
963 1017 @@ -7,7 +7,7 @@
964 1018 $ V=C
965 1019 #endif
966 1020 $ echo $V | sed 's/A/C/'
967 1021 - C
968 1022 + B
969 1023 #if C
970 1024 $ [ $V = C ]
971 1025 #endif
972 1026
973 1027 ERROR: test-cases-abc.t (case B) output changed
974 1028 !.
975 1029 Failed test-cases-abc.t (case B): output changed
976 1030 # Ran 3 tests, 0 skipped, 0 warned, 1 failed.
977 1031 python hash seed: * (glob)
978 1032 [1]
979 1033
980 1034 --restart works
981 1035
982 1036 $ rt --restart
983 1037
984 1038 --- $TESTTMP/anothertests/cases/test-cases-abc.t
985 1039 +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
986 1040 @@ -7,7 +7,7 @@
987 1041 $ V=C
988 1042 #endif
989 1043 $ echo $V | sed 's/A/C/'
990 1044 - C
991 1045 + B
992 1046 #if C
993 1047 $ [ $V = C ]
994 1048 #endif
995 1049
996 1050 ERROR: test-cases-abc.t (case B) output changed
997 1051 !.
998 1052 Failed test-cases-abc.t (case B): output changed
999 1053 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
1000 1054 python hash seed: * (glob)
1001 1055 [1]
General Comments 0
You need to be logged in to leave comments. Login now