##// END OF EJS Templates
run-tests: write JSON reports to output dir
Siddharth Agarwal -
r32718:23287562 default
parent child Browse files
Show More
@@ -1,2798 +1,2798 b''
1 1 #!/usr/bin/env python
2 2 #
3 3 # run-tests.py - Run a set of tests on Mercurial
4 4 #
5 5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 # Modifying this script is tricky because it has many modes:
11 11 # - serial (default) vs parallel (-jN, N > 1)
12 12 # - no coverage (default) vs coverage (-c, -C, -s)
13 13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 14 # - tests are a mix of shell scripts and Python scripts
15 15 #
16 16 # If you change this script, it is recommended that you ensure you
17 17 # haven't broken it by running it in various modes with a representative
18 18 # sample of test scripts. For example:
19 19 #
20 20 # 1) serial, no coverage, temp install:
21 21 # ./run-tests.py test-s*
22 22 # 2) serial, no coverage, local hg:
23 23 # ./run-tests.py --local test-s*
24 24 # 3) serial, coverage, temp install:
25 25 # ./run-tests.py -c test-s*
26 26 # 4) serial, coverage, local hg:
27 27 # ./run-tests.py -c --local test-s* # unsupported
28 28 # 5) parallel, no coverage, temp install:
29 29 # ./run-tests.py -j2 test-s*
30 30 # 6) parallel, no coverage, local hg:
31 31 # ./run-tests.py -j2 --local test-s*
32 32 # 7) parallel, coverage, temp install:
33 33 # ./run-tests.py -j2 -c test-s* # currently broken
34 34 # 8) parallel, coverage, local install:
35 35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 36 # 9) parallel, custom tmp dir:
37 37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 38 # 10) parallel, pure, tests that call run-tests:
39 39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
40 40 #
41 41 # (You could use any subset of the tests: test-s* happens to match
42 42 # enough that it's worth doing parallel runs, few enough that it
43 43 # completes fairly quickly, includes both shell and Python scripts, and
44 44 # includes some scripts that run daemon processes.)
45 45
46 46 from __future__ import absolute_import, print_function
47 47
48 48 import difflib
49 49 import distutils.version as version
50 50 import errno
51 51 import json
52 52 import optparse
53 53 import os
54 54 import random
55 55 import re
56 56 import shutil
57 57 import signal
58 58 import socket
59 59 import subprocess
60 60 import sys
61 61 import sysconfig
62 62 import tempfile
63 63 import threading
64 64 import time
65 65 import unittest
66 66 import xml.dom.minidom as minidom
67 67
68 68 try:
69 69 import Queue as queue
70 70 except ImportError:
71 71 import queue
72 72
73 73 if os.environ.get('RTUNICODEPEDANTRY', False):
74 74 try:
75 75 reload(sys)
76 76 sys.setdefaultencoding("undefined")
77 77 except NameError:
78 78 pass
79 79
80 80 osenvironb = getattr(os, 'environb', os.environ)
81 81 processlock = threading.Lock()
82 82
83 83 if sys.version_info > (3, 5, 0):
84 84 PYTHON3 = True
85 85 xrange = range # we use xrange in one place, and we'd rather not use range
86 86 def _bytespath(p):
87 87 return p.encode('utf-8')
88 88
89 89 def _strpath(p):
90 90 return p.decode('utf-8')
91 91
92 92 elif sys.version_info >= (3, 0, 0):
93 93 print('%s is only supported on Python 3.5+ and 2.7, not %s' %
94 94 (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])))
95 95 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
96 96 else:
97 97 PYTHON3 = False
98 98
99 99 # In python 2.x, path operations are generally done using
100 100 # bytestrings by default, so we don't have to do any extra
101 101 # fiddling there. We define the wrapper functions anyway just to
102 102 # help keep code consistent between platforms.
103 103 def _bytespath(p):
104 104 return p
105 105
106 106 _strpath = _bytespath
107 107
108 108 # For Windows support
109 109 wifexited = getattr(os, "WIFEXITED", lambda x: False)
110 110
111 111 # Whether to use IPv6
112 112 def checksocketfamily(name, port=20058):
113 113 """return true if we can listen on localhost using family=name
114 114
115 115 name should be either 'AF_INET', or 'AF_INET6'.
116 116 port being used is okay - EADDRINUSE is considered as successful.
117 117 """
118 118 family = getattr(socket, name, None)
119 119 if family is None:
120 120 return False
121 121 try:
122 122 s = socket.socket(family, socket.SOCK_STREAM)
123 123 s.bind(('localhost', port))
124 124 s.close()
125 125 return True
126 126 except socket.error as exc:
127 127 if exc.errno == errno.EADDRINUSE:
128 128 return True
129 129 elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
130 130 return False
131 131 else:
132 132 raise
133 133 else:
134 134 return False
135 135
136 136 # useipv6 will be set by parseargs
137 137 useipv6 = None
138 138
139 139 def checkportisavailable(port):
140 140 """return true if a port seems free to bind on localhost"""
141 141 if useipv6:
142 142 family = socket.AF_INET6
143 143 else:
144 144 family = socket.AF_INET
145 145 try:
146 146 s = socket.socket(family, socket.SOCK_STREAM)
147 147 s.bind(('localhost', port))
148 148 s.close()
149 149 return True
150 150 except socket.error as exc:
151 151 if exc.errno not in (errno.EADDRINUSE, errno.EADDRNOTAVAIL,
152 152 errno.EPROTONOSUPPORT):
153 153 raise
154 154 return False
155 155
156 156 closefds = os.name == 'posix'
157 157 def Popen4(cmd, wd, timeout, env=None):
158 158 processlock.acquire()
159 159 p = subprocess.Popen(cmd, shell=True, bufsize=-1, cwd=wd, env=env,
160 160 close_fds=closefds,
161 161 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
162 162 stderr=subprocess.STDOUT)
163 163 processlock.release()
164 164
165 165 p.fromchild = p.stdout
166 166 p.tochild = p.stdin
167 167 p.childerr = p.stderr
168 168
169 169 p.timeout = False
170 170 if timeout:
171 171 def t():
172 172 start = time.time()
173 173 while time.time() - start < timeout and p.returncode is None:
174 174 time.sleep(.1)
175 175 p.timeout = True
176 176 if p.returncode is None:
177 177 terminate(p)
178 178 threading.Thread(target=t).start()
179 179
180 180 return p
181 181
182 182 PYTHON = _bytespath(sys.executable.replace('\\', '/'))
183 183 IMPL_PATH = b'PYTHONPATH'
184 184 if 'java' in sys.platform:
185 185 IMPL_PATH = b'JYTHONPATH'
186 186
187 187 defaults = {
188 188 'jobs': ('HGTEST_JOBS', 1),
189 189 'timeout': ('HGTEST_TIMEOUT', 180),
190 190 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 500),
191 191 'port': ('HGTEST_PORT', 20059),
192 192 'shell': ('HGTEST_SHELL', 'sh'),
193 193 }
194 194
195 195 def canonpath(path):
196 196 return os.path.realpath(os.path.expanduser(path))
197 197
198 198 def parselistfiles(files, listtype, warn=True):
199 199 entries = dict()
200 200 for filename in files:
201 201 try:
202 202 path = os.path.expanduser(os.path.expandvars(filename))
203 203 f = open(path, "rb")
204 204 except IOError as err:
205 205 if err.errno != errno.ENOENT:
206 206 raise
207 207 if warn:
208 208 print("warning: no such %s file: %s" % (listtype, filename))
209 209 continue
210 210
211 211 for line in f.readlines():
212 212 line = line.split(b'#', 1)[0].strip()
213 213 if line:
214 214 entries[line] = filename
215 215
216 216 f.close()
217 217 return entries
218 218
219 219 def parsettestcases(path):
220 220 """read a .t test file, return a set of test case names
221 221
222 222 If path does not exist, return an empty set.
223 223 """
224 224 cases = set()
225 225 try:
226 226 with open(path, 'rb') as f:
227 227 for l in f:
228 228 if l.startswith(b'#testcases '):
229 229 cases.update(l[11:].split())
230 230 except IOError as ex:
231 231 if ex.errno != errno.ENOENT:
232 232 raise
233 233 return cases
234 234
235 235 def getparser():
236 236 """Obtain the OptionParser used by the CLI."""
237 237 parser = optparse.OptionParser("%prog [options] [tests]")
238 238
239 239 # keep these sorted
240 240 parser.add_option("--blacklist", action="append",
241 241 help="skip tests listed in the specified blacklist file")
242 242 parser.add_option("--whitelist", action="append",
243 243 help="always run tests listed in the specified whitelist file")
244 244 parser.add_option("--changed", type="string",
245 245 help="run tests that are changed in parent rev or working directory")
246 246 parser.add_option("-C", "--annotate", action="store_true",
247 247 help="output files annotated with coverage")
248 248 parser.add_option("-c", "--cover", action="store_true",
249 249 help="print a test coverage report")
250 250 parser.add_option("-d", "--debug", action="store_true",
251 251 help="debug mode: write output of test scripts to console"
252 252 " rather than capturing and diffing it (disables timeout)")
253 253 parser.add_option("-f", "--first", action="store_true",
254 254 help="exit on the first test failure")
255 255 parser.add_option("-H", "--htmlcov", action="store_true",
256 256 help="create an HTML report of the coverage of the files")
257 257 parser.add_option("-i", "--interactive", action="store_true",
258 258 help="prompt to accept changed output")
259 259 parser.add_option("-j", "--jobs", type="int",
260 260 help="number of jobs to run in parallel"
261 261 " (default: $%s or %d)" % defaults['jobs'])
262 262 parser.add_option("--keep-tmpdir", action="store_true",
263 263 help="keep temporary directory after running tests")
264 264 parser.add_option("-k", "--keywords",
265 265 help="run tests matching keywords")
266 266 parser.add_option("--list-tests", action="store_true",
267 267 help="list tests instead of running them")
268 268 parser.add_option("-l", "--local", action="store_true",
269 269 help="shortcut for --with-hg=<testdir>/../hg, "
270 270 "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set")
271 271 parser.add_option("--loop", action="store_true",
272 272 help="loop tests repeatedly")
273 273 parser.add_option("--runs-per-test", type="int", dest="runs_per_test",
274 274 help="run each test N times (default=1)", default=1)
275 275 parser.add_option("-n", "--nodiff", action="store_true",
276 276 help="skip showing test changes")
277 277 parser.add_option("--outputdir", type="string",
278 278 help="directory to write error logs to (default=test directory)")
279 279 parser.add_option("-p", "--port", type="int",
280 280 help="port on which servers should listen"
281 281 " (default: $%s or %d)" % defaults['port'])
282 282 parser.add_option("--compiler", type="string",
283 283 help="compiler to build with")
284 284 parser.add_option("--pure", action="store_true",
285 285 help="use pure Python code instead of C extensions")
286 286 parser.add_option("-R", "--restart", action="store_true",
287 287 help="restart at last error")
288 288 parser.add_option("-r", "--retest", action="store_true",
289 289 help="retest failed tests")
290 290 parser.add_option("-S", "--noskips", action="store_true",
291 291 help="don't report skip tests verbosely")
292 292 parser.add_option("--shell", type="string",
293 293 help="shell to use (default: $%s or %s)" % defaults['shell'])
294 294 parser.add_option("-t", "--timeout", type="int",
295 295 help="kill errant tests after TIMEOUT seconds"
296 296 " (default: $%s or %d)" % defaults['timeout'])
297 297 parser.add_option("--slowtimeout", type="int",
298 298 help="kill errant slow tests after SLOWTIMEOUT seconds"
299 299 " (default: $%s or %d)" % defaults['slowtimeout'])
300 300 parser.add_option("--time", action="store_true",
301 301 help="time how long each test takes")
302 302 parser.add_option("--json", action="store_true",
303 303 help="store test result data in 'report.json' file")
304 304 parser.add_option("--tmpdir", type="string",
305 305 help="run tests in the given temporary directory"
306 306 " (implies --keep-tmpdir)")
307 307 parser.add_option("-v", "--verbose", action="store_true",
308 308 help="output verbose messages")
309 309 parser.add_option("--xunit", type="string",
310 310 help="record xunit results at specified path")
311 311 parser.add_option("--view", type="string",
312 312 help="external diff viewer")
313 313 parser.add_option("--with-hg", type="string",
314 314 metavar="HG",
315 315 help="test using specified hg script rather than a "
316 316 "temporary installation")
317 317 parser.add_option("--chg", action="store_true",
318 318 help="install and use chg wrapper in place of hg")
319 319 parser.add_option("--with-chg", metavar="CHG",
320 320 help="use specified chg wrapper in place of hg")
321 321 parser.add_option("--ipv6", action="store_true",
322 322 help="prefer IPv6 to IPv4 for network related tests")
323 323 parser.add_option("-3", "--py3k-warnings", action="store_true",
324 324 help="enable Py3k warnings on Python 2.7+")
325 325 # This option should be deleted once test-check-py3-compat.t and other
326 326 # Python 3 tests run with Python 3.
327 327 parser.add_option("--with-python3", metavar="PYTHON3",
328 328 help="Python 3 interpreter (if running under Python 2)"
329 329 " (TEMPORARY)")
330 330 parser.add_option('--extra-config-opt', action="append",
331 331 help='set the given config opt in the test hgrc')
332 332 parser.add_option('--random', action="store_true",
333 333 help='run tests in random order')
334 334 parser.add_option('--profile-runner', action='store_true',
335 335 help='run statprof on run-tests')
336 336 parser.add_option('--allow-slow-tests', action='store_true',
337 337 help='allow extremely slow tests')
338 338 parser.add_option('--showchannels', action='store_true',
339 339 help='show scheduling channels')
340 340 parser.add_option('--known-good-rev', type="string",
341 341 metavar="known_good_rev",
342 342 help=("Automatically bisect any failures using this "
343 343 "revision as a known-good revision."))
344 344
345 345 for option, (envvar, default) in defaults.items():
346 346 defaults[option] = type(default)(os.environ.get(envvar, default))
347 347 parser.set_defaults(**defaults)
348 348
349 349 return parser
350 350
351 351 def parseargs(args, parser):
352 352 """Parse arguments with our OptionParser and validate results."""
353 353 (options, args) = parser.parse_args(args)
354 354
355 355 # jython is always pure
356 356 if 'java' in sys.platform or '__pypy__' in sys.modules:
357 357 options.pure = True
358 358
359 359 if options.with_hg:
360 360 options.with_hg = canonpath(_bytespath(options.with_hg))
361 361 if not (os.path.isfile(options.with_hg) and
362 362 os.access(options.with_hg, os.X_OK)):
363 363 parser.error('--with-hg must specify an executable hg script')
364 364 if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
365 365 sys.stderr.write('warning: --with-hg should specify an hg script\n')
366 366 if options.local:
367 367 testdir = os.path.dirname(_bytespath(canonpath(sys.argv[0])))
368 368 reporootdir = os.path.dirname(testdir)
369 369 pathandattrs = [(b'hg', 'with_hg')]
370 370 if options.chg:
371 371 pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
372 372 for relpath, attr in pathandattrs:
373 373 binpath = os.path.join(reporootdir, relpath)
374 374 if os.name != 'nt' and not os.access(binpath, os.X_OK):
375 375 parser.error('--local specified, but %r not found or '
376 376 'not executable' % binpath)
377 377 setattr(options, attr, binpath)
378 378
379 379 if (options.chg or options.with_chg) and os.name == 'nt':
380 380 parser.error('chg does not work on %s' % os.name)
381 381 if options.with_chg:
382 382 options.chg = False # no installation to temporary location
383 383 options.with_chg = canonpath(_bytespath(options.with_chg))
384 384 if not (os.path.isfile(options.with_chg) and
385 385 os.access(options.with_chg, os.X_OK)):
386 386 parser.error('--with-chg must specify a chg executable')
387 387 if options.chg and options.with_hg:
388 388 # chg shares installation location with hg
389 389 parser.error('--chg does not work when --with-hg is specified '
390 390 '(use --with-chg instead)')
391 391
392 392 global useipv6
393 393 if options.ipv6:
394 394 useipv6 = checksocketfamily('AF_INET6')
395 395 else:
396 396 # only use IPv6 if IPv4 is unavailable and IPv6 is available
397 397 useipv6 = ((not checksocketfamily('AF_INET'))
398 398 and checksocketfamily('AF_INET6'))
399 399
400 400 options.anycoverage = options.cover or options.annotate or options.htmlcov
401 401 if options.anycoverage:
402 402 try:
403 403 import coverage
404 404 covver = version.StrictVersion(coverage.__version__).version
405 405 if covver < (3, 3):
406 406 parser.error('coverage options require coverage 3.3 or later')
407 407 except ImportError:
408 408 parser.error('coverage options now require the coverage package')
409 409
410 410 if options.anycoverage and options.local:
411 411 # this needs some path mangling somewhere, I guess
412 412 parser.error("sorry, coverage options do not work when --local "
413 413 "is specified")
414 414
415 415 if options.anycoverage and options.with_hg:
416 416 parser.error("sorry, coverage options do not work when --with-hg "
417 417 "is specified")
418 418
419 419 global verbose
420 420 if options.verbose:
421 421 verbose = ''
422 422
423 423 if options.tmpdir:
424 424 options.tmpdir = canonpath(options.tmpdir)
425 425
426 426 if options.jobs < 1:
427 427 parser.error('--jobs must be positive')
428 428 if options.interactive and options.debug:
429 429 parser.error("-i/--interactive and -d/--debug are incompatible")
430 430 if options.debug:
431 431 if options.timeout != defaults['timeout']:
432 432 sys.stderr.write(
433 433 'warning: --timeout option ignored with --debug\n')
434 434 if options.slowtimeout != defaults['slowtimeout']:
435 435 sys.stderr.write(
436 436 'warning: --slowtimeout option ignored with --debug\n')
437 437 options.timeout = 0
438 438 options.slowtimeout = 0
439 439 if options.py3k_warnings:
440 440 if PYTHON3:
441 441 parser.error(
442 442 '--py3k-warnings can only be used on Python 2.7')
443 443 if options.with_python3:
444 444 if PYTHON3:
445 445 parser.error('--with-python3 cannot be used when executing with '
446 446 'Python 3')
447 447
448 448 options.with_python3 = canonpath(options.with_python3)
449 449 # Verify Python3 executable is acceptable.
450 450 proc = subprocess.Popen([options.with_python3, b'--version'],
451 451 stdout=subprocess.PIPE,
452 452 stderr=subprocess.STDOUT)
453 453 out, _err = proc.communicate()
454 454 ret = proc.wait()
455 455 if ret != 0:
456 456 parser.error('could not determine version of python 3')
457 457 if not out.startswith('Python '):
458 458 parser.error('unexpected output from python3 --version: %s' %
459 459 out)
460 460 vers = version.LooseVersion(out[len('Python '):])
461 461 if vers < version.LooseVersion('3.5.0'):
462 462 parser.error('--with-python3 version must be 3.5.0 or greater; '
463 463 'got %s' % out)
464 464
465 465 if options.blacklist:
466 466 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
467 467 if options.whitelist:
468 468 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
469 469 else:
470 470 options.whitelisted = {}
471 471
472 472 if options.showchannels:
473 473 options.nodiff = True
474 474
475 475 return (options, args)
476 476
477 477 def rename(src, dst):
478 478 """Like os.rename(), trade atomicity and opened files friendliness
479 479 for existing destination support.
480 480 """
481 481 shutil.copy(src, dst)
482 482 os.remove(src)
483 483
484 484 _unified_diff = difflib.unified_diff
485 485 if PYTHON3:
486 486 import functools
487 487 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
488 488
489 489 def getdiff(expected, output, ref, err):
490 490 servefail = False
491 491 lines = []
492 492 for line in _unified_diff(expected, output, ref, err):
493 493 if line.startswith(b'+++') or line.startswith(b'---'):
494 494 line = line.replace(b'\\', b'/')
495 495 if line.endswith(b' \n'):
496 496 line = line[:-2] + b'\n'
497 497 lines.append(line)
498 498 if not servefail and line.startswith(
499 499 b'+ abort: child process failed to start'):
500 500 servefail = True
501 501
502 502 return servefail, lines
503 503
504 504 verbose = False
505 505 def vlog(*msg):
506 506 """Log only when in verbose mode."""
507 507 if verbose is False:
508 508 return
509 509
510 510 return log(*msg)
511 511
512 512 # Bytes that break XML even in a CDATA block: control characters 0-31
513 513 # sans \t, \n and \r
514 514 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
515 515
516 516 # Match feature conditionalized output lines in the form, capturing the feature
517 517 # list in group 2, and the preceeding line output in group 1:
518 518 #
519 519 # output..output (feature !)\n
520 520 optline = re.compile(b'(.+) \((.+?) !\)\n$')
521 521
522 522 def cdatasafe(data):
523 523 """Make a string safe to include in a CDATA block.
524 524
525 525 Certain control characters are illegal in a CDATA block, and
526 526 there's no way to include a ]]> in a CDATA either. This function
527 527 replaces illegal bytes with ? and adds a space between the ]] so
528 528 that it won't break the CDATA block.
529 529 """
530 530 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
531 531
532 532 def log(*msg):
533 533 """Log something to stdout.
534 534
535 535 Arguments are strings to print.
536 536 """
537 537 with iolock:
538 538 if verbose:
539 539 print(verbose, end=' ')
540 540 for m in msg:
541 541 print(m, end=' ')
542 542 print()
543 543 sys.stdout.flush()
544 544
545 545 def terminate(proc):
546 546 """Terminate subprocess"""
547 547 vlog('# Terminating process %d' % proc.pid)
548 548 try:
549 549 proc.terminate()
550 550 except OSError:
551 551 pass
552 552
553 553 def killdaemons(pidfile):
554 554 import killdaemons as killmod
555 555 return killmod.killdaemons(pidfile, tryhard=False, remove=True,
556 556 logfn=vlog)
557 557
558 558 class Test(unittest.TestCase):
559 559 """Encapsulates a single, runnable test.
560 560
561 561 While this class conforms to the unittest.TestCase API, it differs in that
562 562 instances need to be instantiated manually. (Typically, unittest.TestCase
563 563 classes are instantiated automatically by scanning modules.)
564 564 """
565 565
566 566 # Status code reserved for skipped tests (used by hghave).
567 567 SKIPPED_STATUS = 80
568 568
569 569 def __init__(self, path, outputdir, tmpdir, keeptmpdir=False,
570 570 debug=False,
571 571 timeout=defaults['timeout'],
572 572 startport=defaults['port'], extraconfigopts=None,
573 573 py3kwarnings=False, shell=None, hgcommand=None,
574 574 slowtimeout=defaults['slowtimeout'], usechg=False,
575 575 useipv6=False):
576 576 """Create a test from parameters.
577 577
578 578 path is the full path to the file defining the test.
579 579
580 580 tmpdir is the main temporary directory to use for this test.
581 581
582 582 keeptmpdir determines whether to keep the test's temporary directory
583 583 after execution. It defaults to removal (False).
584 584
585 585 debug mode will make the test execute verbosely, with unfiltered
586 586 output.
587 587
588 588 timeout controls the maximum run time of the test. It is ignored when
589 589 debug is True. See slowtimeout for tests with #require slow.
590 590
591 591 slowtimeout overrides timeout if the test has #require slow.
592 592
593 593 startport controls the starting port number to use for this test. Each
594 594 test will reserve 3 port numbers for execution. It is the caller's
595 595 responsibility to allocate a non-overlapping port range to Test
596 596 instances.
597 597
598 598 extraconfigopts is an iterable of extra hgrc config options. Values
599 599 must have the form "key=value" (something understood by hgrc). Values
600 600 of the form "foo.key=value" will result in "[foo] key=value".
601 601
602 602 py3kwarnings enables Py3k warnings.
603 603
604 604 shell is the shell to execute tests in.
605 605 """
606 606 self.path = path
607 607 self.bname = os.path.basename(path)
608 608 self.name = _strpath(self.bname)
609 609 self._testdir = os.path.dirname(path)
610 610 self._outputdir = outputdir
611 611 self._tmpname = os.path.basename(path)
612 612 self.errpath = os.path.join(self._outputdir, b'%s.err' % self.bname)
613 613
614 614 self._threadtmp = tmpdir
615 615 self._keeptmpdir = keeptmpdir
616 616 self._debug = debug
617 617 self._timeout = timeout
618 618 self._slowtimeout = slowtimeout
619 619 self._startport = startport
620 620 self._extraconfigopts = extraconfigopts or []
621 621 self._py3kwarnings = py3kwarnings
622 622 self._shell = _bytespath(shell)
623 623 self._hgcommand = hgcommand or b'hg'
624 624 self._usechg = usechg
625 625 self._useipv6 = useipv6
626 626
627 627 self._aborted = False
628 628 self._daemonpids = []
629 629 self._finished = None
630 630 self._ret = None
631 631 self._out = None
632 632 self._skipped = None
633 633 self._testtmp = None
634 634 self._chgsockdir = None
635 635
636 636 # If we're not in --debug mode and reference output file exists,
637 637 # check test output against it.
638 638 if debug:
639 639 self._refout = None # to match "out is None"
640 640 elif os.path.exists(self.refpath):
641 641 f = open(self.refpath, 'rb')
642 642 self._refout = f.read().splitlines(True)
643 643 f.close()
644 644 else:
645 645 self._refout = []
646 646
647 647 # needed to get base class __repr__ running
648 648 @property
649 649 def _testMethodName(self):
650 650 return self.name
651 651
652 652 def __str__(self):
653 653 return self.name
654 654
655 655 def shortDescription(self):
656 656 return self.name
657 657
658 658 def setUp(self):
659 659 """Tasks to perform before run()."""
660 660 self._finished = False
661 661 self._ret = None
662 662 self._out = None
663 663 self._skipped = None
664 664
665 665 try:
666 666 os.mkdir(self._threadtmp)
667 667 except OSError as e:
668 668 if e.errno != errno.EEXIST:
669 669 raise
670 670
671 671 name = self._tmpname
672 672 self._testtmp = os.path.join(self._threadtmp, name)
673 673 os.mkdir(self._testtmp)
674 674
675 675 # Remove any previous output files.
676 676 if os.path.exists(self.errpath):
677 677 try:
678 678 os.remove(self.errpath)
679 679 except OSError as e:
680 680 # We might have raced another test to clean up a .err
681 681 # file, so ignore ENOENT when removing a previous .err
682 682 # file.
683 683 if e.errno != errno.ENOENT:
684 684 raise
685 685
686 686 if self._usechg:
687 687 self._chgsockdir = os.path.join(self._threadtmp,
688 688 b'%s.chgsock' % name)
689 689 os.mkdir(self._chgsockdir)
690 690
691 691 def run(self, result):
692 692 """Run this test and report results against a TestResult instance."""
693 693 # This function is extremely similar to unittest.TestCase.run(). Once
694 694 # we require Python 2.7 (or at least its version of unittest), this
695 695 # function can largely go away.
696 696 self._result = result
697 697 result.startTest(self)
698 698 try:
699 699 try:
700 700 self.setUp()
701 701 except (KeyboardInterrupt, SystemExit):
702 702 self._aborted = True
703 703 raise
704 704 except Exception:
705 705 result.addError(self, sys.exc_info())
706 706 return
707 707
708 708 success = False
709 709 try:
710 710 self.runTest()
711 711 except KeyboardInterrupt:
712 712 self._aborted = True
713 713 raise
714 714 except SkipTest as e:
715 715 result.addSkip(self, str(e))
716 716 # The base class will have already counted this as a
717 717 # test we "ran", but we want to exclude skipped tests
718 718 # from those we count towards those run.
719 719 result.testsRun -= 1
720 720 except IgnoreTest as e:
721 721 result.addIgnore(self, str(e))
722 722 # As with skips, ignores also should be excluded from
723 723 # the number of tests executed.
724 724 result.testsRun -= 1
725 725 except WarnTest as e:
726 726 result.addWarn(self, str(e))
727 727 except ReportedTest as e:
728 728 pass
729 729 except self.failureException as e:
730 730 # This differs from unittest in that we don't capture
731 731 # the stack trace. This is for historical reasons and
732 732 # this decision could be revisited in the future,
733 733 # especially for PythonTest instances.
734 734 if result.addFailure(self, str(e)):
735 735 success = True
736 736 except Exception:
737 737 result.addError(self, sys.exc_info())
738 738 else:
739 739 success = True
740 740
741 741 try:
742 742 self.tearDown()
743 743 except (KeyboardInterrupt, SystemExit):
744 744 self._aborted = True
745 745 raise
746 746 except Exception:
747 747 result.addError(self, sys.exc_info())
748 748 success = False
749 749
750 750 if success:
751 751 result.addSuccess(self)
752 752 finally:
753 753 result.stopTest(self, interrupted=self._aborted)
754 754
755 755 def runTest(self):
756 756 """Run this test instance.
757 757
758 758 This will return a tuple describing the result of the test.
759 759 """
760 760 env = self._getenv()
761 761 self._daemonpids.append(env['DAEMON_PIDS'])
762 762 self._createhgrc(env['HGRCPATH'])
763 763
764 764 vlog('# Test', self.name)
765 765
766 766 ret, out = self._run(env)
767 767 self._finished = True
768 768 self._ret = ret
769 769 self._out = out
770 770
771 771 def describe(ret):
772 772 if ret < 0:
773 773 return 'killed by signal: %d' % -ret
774 774 return 'returned error code %d' % ret
775 775
776 776 self._skipped = False
777 777
778 778 if ret == self.SKIPPED_STATUS:
779 779 if out is None: # Debug mode, nothing to parse.
780 780 missing = ['unknown']
781 781 failed = None
782 782 else:
783 783 missing, failed = TTest.parsehghaveoutput(out)
784 784
785 785 if not missing:
786 786 missing = ['skipped']
787 787
788 788 if failed:
789 789 self.fail('hg have failed checking for %s' % failed[-1])
790 790 else:
791 791 self._skipped = True
792 792 raise SkipTest(missing[-1])
793 793 elif ret == 'timeout':
794 794 self.fail('timed out')
795 795 elif ret is False:
796 796 raise WarnTest('no result code from test')
797 797 elif out != self._refout:
798 798 # Diff generation may rely on written .err file.
799 799 if (ret != 0 or out != self._refout) and not self._skipped \
800 800 and not self._debug:
801 801 f = open(self.errpath, 'wb')
802 802 for line in out:
803 803 f.write(line)
804 804 f.close()
805 805
806 806 # The result object handles diff calculation for us.
807 807 if self._result.addOutputMismatch(self, ret, out, self._refout):
808 808 # change was accepted, skip failing
809 809 return
810 810
811 811 if ret:
812 812 msg = 'output changed and ' + describe(ret)
813 813 else:
814 814 msg = 'output changed'
815 815
816 816 self.fail(msg)
817 817 elif ret:
818 818 self.fail(describe(ret))
819 819
820 820 def tearDown(self):
821 821 """Tasks to perform after run()."""
822 822 for entry in self._daemonpids:
823 823 killdaemons(entry)
824 824 self._daemonpids = []
825 825
826 826 if self._keeptmpdir:
827 827 log('\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' %
828 828 (self._testtmp.decode('utf-8'),
829 829 self._threadtmp.decode('utf-8')))
830 830 else:
831 831 shutil.rmtree(self._testtmp, True)
832 832 shutil.rmtree(self._threadtmp, True)
833 833
834 834 if self._usechg:
835 835 # chgservers will stop automatically after they find the socket
836 836 # files are deleted
837 837 shutil.rmtree(self._chgsockdir, True)
838 838
839 839 if (self._ret != 0 or self._out != self._refout) and not self._skipped \
840 840 and not self._debug and self._out:
841 841 f = open(self.errpath, 'wb')
842 842 for line in self._out:
843 843 f.write(line)
844 844 f.close()
845 845
846 846 vlog("# Ret was:", self._ret, '(%s)' % self.name)
847 847
848 848 def _run(self, env):
849 849 # This should be implemented in child classes to run tests.
850 850 raise SkipTest('unknown test type')
851 851
852 852 def abort(self):
853 853 """Terminate execution of this test."""
854 854 self._aborted = True
855 855
856 856 def _portmap(self, i):
857 857 offset = b'' if i == 0 else b'%d' % i
858 858 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
859 859
860 860 def _getreplacements(self):
861 861 """Obtain a mapping of text replacements to apply to test output.
862 862
863 863 Test output needs to be normalized so it can be compared to expected
864 864 output. This function defines how some of that normalization will
865 865 occur.
866 866 """
867 867 r = [
868 868 # This list should be parallel to defineport in _getenv
869 869 self._portmap(0),
870 870 self._portmap(1),
871 871 self._portmap(2),
872 872 (br'(?m)^(saved backup bundle to .*\.hg)( \(glob\))?$',
873 873 br'\1 (glob)'),
874 874 (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
875 875 (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
876 876 ]
877 877 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
878 878
879 879 return r
880 880
881 881 def _escapepath(self, p):
882 882 if os.name == 'nt':
883 883 return (
884 884 (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or
885 885 c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c
886 886 for c in p))
887 887 )
888 888 else:
889 889 return re.escape(p)
890 890
891 891 def _localip(self):
892 892 if self._useipv6:
893 893 return b'::1'
894 894 else:
895 895 return b'127.0.0.1'
896 896
897 897 def _getenv(self):
898 898 """Obtain environment variables to use during test execution."""
899 899 def defineport(i):
900 900 offset = '' if i == 0 else '%s' % i
901 901 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
902 902 env = os.environ.copy()
903 903 env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase')
904 904 env['HGEMITWARNINGS'] = '1'
905 905 env['TESTTMP'] = self._testtmp
906 906 env['HOME'] = self._testtmp
907 907 # This number should match portneeded in _getport
908 908 for port in xrange(3):
909 909 # This list should be parallel to _portmap in _getreplacements
910 910 defineport(port)
911 911 env["HGRCPATH"] = os.path.join(self._threadtmp, b'.hgrc')
912 912 env["DAEMON_PIDS"] = os.path.join(self._threadtmp, b'daemon.pids')
913 913 env["HGEDITOR"] = ('"' + sys.executable + '"'
914 914 + ' -c "import sys; sys.exit(0)"')
915 915 env["HGMERGE"] = "internal:merge"
916 916 env["HGUSER"] = "test"
917 917 env["HGENCODING"] = "ascii"
918 918 env["HGENCODINGMODE"] = "strict"
919 919 env['HGIPV6'] = str(int(self._useipv6))
920 920
921 921 # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
922 922 # IP addresses.
923 923 env['LOCALIP'] = self._localip()
924 924
925 925 # Reset some environment variables to well-known values so that
926 926 # the tests produce repeatable output.
927 927 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
928 928 env['TZ'] = 'GMT'
929 929 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
930 930 env['COLUMNS'] = '80'
931 931 env['TERM'] = 'xterm'
932 932
933 933 for k in ('HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy ' +
934 934 'HGPLAIN HGPLAINEXCEPT EDITOR VISUAL PAGER ' +
935 935 'NO_PROXY CHGDEBUG').split():
936 936 if k in env:
937 937 del env[k]
938 938
939 939 # unset env related to hooks
940 940 for k in env.keys():
941 941 if k.startswith('HG_'):
942 942 del env[k]
943 943
944 944 if self._usechg:
945 945 env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server')
946 946
947 947 return env
948 948
949 949 def _createhgrc(self, path):
950 950 """Create an hgrc file for this test."""
951 951 hgrc = open(path, 'wb')
952 952 hgrc.write(b'[ui]\n')
953 953 hgrc.write(b'slash = True\n')
954 954 hgrc.write(b'interactive = False\n')
955 955 hgrc.write(b'mergemarkers = detailed\n')
956 956 hgrc.write(b'promptecho = True\n')
957 957 hgrc.write(b'[defaults]\n')
958 958 hgrc.write(b'[devel]\n')
959 959 hgrc.write(b'all-warnings = true\n')
960 960 hgrc.write(b'default-date = 0 0\n')
961 961 hgrc.write(b'[largefiles]\n')
962 962 hgrc.write(b'usercache = %s\n' %
963 963 (os.path.join(self._testtmp, b'.cache/largefiles')))
964 964 hgrc.write(b'[web]\n')
965 965 hgrc.write(b'address = localhost\n')
966 966 hgrc.write(b'ipv6 = %s\n' % str(self._useipv6).encode('ascii'))
967 967
968 968 for opt in self._extraconfigopts:
969 969 section, key = opt.split('.', 1)
970 970 assert '=' in key, ('extra config opt %s must '
971 971 'have an = for assignment' % opt)
972 972 hgrc.write(b'[%s]\n%s\n' % (section, key))
973 973 hgrc.close()
974 974
975 975 def fail(self, msg):
976 976 # unittest differentiates between errored and failed.
977 977 # Failed is denoted by AssertionError (by default at least).
978 978 raise AssertionError(msg)
979 979
980 980 def _runcommand(self, cmd, env, normalizenewlines=False):
981 981 """Run command in a sub-process, capturing the output (stdout and
982 982 stderr).
983 983
984 984 Return a tuple (exitcode, output). output is None in debug mode.
985 985 """
986 986 if self._debug:
987 987 proc = subprocess.Popen(cmd, shell=True, cwd=self._testtmp,
988 988 env=env)
989 989 ret = proc.wait()
990 990 return (ret, None)
991 991
992 992 proc = Popen4(cmd, self._testtmp, self._timeout, env)
993 993 def cleanup():
994 994 terminate(proc)
995 995 ret = proc.wait()
996 996 if ret == 0:
997 997 ret = signal.SIGTERM << 8
998 998 killdaemons(env['DAEMON_PIDS'])
999 999 return ret
1000 1000
1001 1001 output = ''
1002 1002 proc.tochild.close()
1003 1003
1004 1004 try:
1005 1005 output = proc.fromchild.read()
1006 1006 except KeyboardInterrupt:
1007 1007 vlog('# Handling keyboard interrupt')
1008 1008 cleanup()
1009 1009 raise
1010 1010
1011 1011 ret = proc.wait()
1012 1012 if wifexited(ret):
1013 1013 ret = os.WEXITSTATUS(ret)
1014 1014
1015 1015 if proc.timeout:
1016 1016 ret = 'timeout'
1017 1017
1018 1018 if ret:
1019 1019 killdaemons(env['DAEMON_PIDS'])
1020 1020
1021 1021 for s, r in self._getreplacements():
1022 1022 output = re.sub(s, r, output)
1023 1023
1024 1024 if normalizenewlines:
1025 1025 output = output.replace('\r\n', '\n')
1026 1026
1027 1027 return ret, output.splitlines(True)
1028 1028
1029 1029 class PythonTest(Test):
1030 1030 """A Python-based test."""
1031 1031
1032 1032 @property
1033 1033 def refpath(self):
1034 1034 return os.path.join(self._testdir, b'%s.out' % self.bname)
1035 1035
1036 1036 def _run(self, env):
1037 1037 py3kswitch = self._py3kwarnings and b' -3' or b''
1038 1038 cmd = b'%s%s "%s"' % (PYTHON, py3kswitch, self.path)
1039 1039 vlog("# Running", cmd)
1040 1040 normalizenewlines = os.name == 'nt'
1041 1041 result = self._runcommand(cmd, env,
1042 1042 normalizenewlines=normalizenewlines)
1043 1043 if self._aborted:
1044 1044 raise KeyboardInterrupt()
1045 1045
1046 1046 return result
1047 1047
1048 1048 # Some glob patterns apply only in some circumstances, so the script
1049 1049 # might want to remove (glob) annotations that otherwise should be
1050 1050 # retained.
1051 1051 checkcodeglobpats = [
1052 1052 # On Windows it looks like \ doesn't require a (glob), but we know
1053 1053 # better.
1054 1054 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
1055 1055 re.compile(br'^moving \S+/.*[^)]$'),
1056 1056 re.compile(br'^pulling from \$TESTTMP/.*[^)]$'),
1057 1057 # Not all platforms have 127.0.0.1 as loopback (though most do),
1058 1058 # so we always glob that too.
1059 1059 re.compile(br'.*\$LOCALIP.*$'),
1060 1060 ]
1061 1061
1062 1062 bchr = chr
1063 1063 if PYTHON3:
1064 1064 bchr = lambda x: bytes([x])
1065 1065
1066 1066 class TTest(Test):
1067 1067 """A "t test" is a test backed by a .t file."""
1068 1068
1069 1069 SKIPPED_PREFIX = b'skipped: '
1070 1070 FAILED_PREFIX = b'hghave check failed: '
1071 1071 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
1072 1072
1073 1073 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
1074 1074 ESCAPEMAP = dict((bchr(i), br'\x%02x' % i) for i in range(256))
1075 1075 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
1076 1076
1077 1077 def __init__(self, path, *args, **kwds):
1078 1078 # accept an extra "case" parameter
1079 1079 case = None
1080 1080 if 'case' in kwds:
1081 1081 case = kwds.pop('case')
1082 1082 self._case = case
1083 1083 self._allcases = parsettestcases(path)
1084 1084 super(TTest, self).__init__(path, *args, **kwds)
1085 1085 if case:
1086 1086 self.name = '%s (case %s)' % (self.name, _strpath(case))
1087 1087 self.errpath = b'%s.%s.err' % (self.errpath[:-4], case)
1088 1088 self._tmpname += b'-%s' % case
1089 1089
1090 1090 @property
1091 1091 def refpath(self):
1092 1092 return os.path.join(self._testdir, self.bname)
1093 1093
1094 1094 def _run(self, env):
1095 1095 f = open(self.path, 'rb')
1096 1096 lines = f.readlines()
1097 1097 f.close()
1098 1098
1099 1099 salt, script, after, expected = self._parsetest(lines)
1100 1100
1101 1101 # Write out the generated script.
1102 1102 fname = b'%s.sh' % self._testtmp
1103 1103 f = open(fname, 'wb')
1104 1104 for l in script:
1105 1105 f.write(l)
1106 1106 f.close()
1107 1107
1108 1108 cmd = b'%s "%s"' % (self._shell, fname)
1109 1109 vlog("# Running", cmd)
1110 1110
1111 1111 exitcode, output = self._runcommand(cmd, env)
1112 1112
1113 1113 if self._aborted:
1114 1114 raise KeyboardInterrupt()
1115 1115
1116 1116 # Do not merge output if skipped. Return hghave message instead.
1117 1117 # Similarly, with --debug, output is None.
1118 1118 if exitcode == self.SKIPPED_STATUS or output is None:
1119 1119 return exitcode, output
1120 1120
1121 1121 return self._processoutput(exitcode, output, salt, after, expected)
1122 1122
1123 1123 def _hghave(self, reqs):
1124 1124 # TODO do something smarter when all other uses of hghave are gone.
1125 1125 runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__)))
1126 1126 tdir = runtestdir.replace(b'\\', b'/')
1127 1127 proc = Popen4(b'%s -c "%s/hghave %s"' %
1128 1128 (self._shell, tdir, b' '.join(reqs)),
1129 1129 self._testtmp, 0, self._getenv())
1130 1130 stdout, stderr = proc.communicate()
1131 1131 ret = proc.wait()
1132 1132 if wifexited(ret):
1133 1133 ret = os.WEXITSTATUS(ret)
1134 1134 if ret == 2:
1135 1135 print(stdout.decode('utf-8'))
1136 1136 sys.exit(1)
1137 1137
1138 1138 if ret != 0:
1139 1139 return False, stdout
1140 1140
1141 1141 if 'slow' in reqs:
1142 1142 self._timeout = self._slowtimeout
1143 1143 return True, None
1144 1144
1145 1145 def _iftest(self, args):
1146 1146 # implements "#if"
1147 1147 reqs = []
1148 1148 for arg in args:
1149 1149 if arg.startswith(b'no-') and arg[3:] in self._allcases:
1150 1150 if arg[3:] == self._case:
1151 1151 return False
1152 1152 elif arg in self._allcases:
1153 1153 if arg != self._case:
1154 1154 return False
1155 1155 else:
1156 1156 reqs.append(arg)
1157 1157 return self._hghave(reqs)[0]
1158 1158
1159 1159 def _parsetest(self, lines):
1160 1160 # We generate a shell script which outputs unique markers to line
1161 1161 # up script results with our source. These markers include input
1162 1162 # line number and the last return code.
1163 1163 salt = b"SALT%d" % time.time()
1164 1164 def addsalt(line, inpython):
1165 1165 if inpython:
1166 1166 script.append(b'%s %d 0\n' % (salt, line))
1167 1167 else:
1168 1168 script.append(b'echo %s %d $?\n' % (salt, line))
1169 1169
1170 1170 script = []
1171 1171
1172 1172 # After we run the shell script, we re-unify the script output
1173 1173 # with non-active parts of the source, with synchronization by our
1174 1174 # SALT line number markers. The after table contains the non-active
1175 1175 # components, ordered by line number.
1176 1176 after = {}
1177 1177
1178 1178 # Expected shell script output.
1179 1179 expected = {}
1180 1180
1181 1181 pos = prepos = -1
1182 1182
1183 1183 # True or False when in a true or false conditional section
1184 1184 skipping = None
1185 1185
1186 1186 # We keep track of whether or not we're in a Python block so we
1187 1187 # can generate the surrounding doctest magic.
1188 1188 inpython = False
1189 1189
1190 1190 if self._debug:
1191 1191 script.append(b'set -x\n')
1192 1192 if self._hgcommand != b'hg':
1193 1193 script.append(b'alias hg="%s"\n' % self._hgcommand)
1194 1194 if os.getenv('MSYSTEM'):
1195 1195 script.append(b'alias pwd="pwd -W"\n')
1196 1196
1197 1197 n = 0
1198 1198 for n, l in enumerate(lines):
1199 1199 if not l.endswith(b'\n'):
1200 1200 l += b'\n'
1201 1201 if l.startswith(b'#require'):
1202 1202 lsplit = l.split()
1203 1203 if len(lsplit) < 2 or lsplit[0] != b'#require':
1204 1204 after.setdefault(pos, []).append(' !!! invalid #require\n')
1205 1205 haveresult, message = self._hghave(lsplit[1:])
1206 1206 if not haveresult:
1207 1207 script = [b'echo "%s"\nexit 80\n' % message]
1208 1208 break
1209 1209 after.setdefault(pos, []).append(l)
1210 1210 elif l.startswith(b'#if'):
1211 1211 lsplit = l.split()
1212 1212 if len(lsplit) < 2 or lsplit[0] != b'#if':
1213 1213 after.setdefault(pos, []).append(' !!! invalid #if\n')
1214 1214 if skipping is not None:
1215 1215 after.setdefault(pos, []).append(' !!! nested #if\n')
1216 1216 skipping = not self._iftest(lsplit[1:])
1217 1217 after.setdefault(pos, []).append(l)
1218 1218 elif l.startswith(b'#else'):
1219 1219 if skipping is None:
1220 1220 after.setdefault(pos, []).append(' !!! missing #if\n')
1221 1221 skipping = not skipping
1222 1222 after.setdefault(pos, []).append(l)
1223 1223 elif l.startswith(b'#endif'):
1224 1224 if skipping is None:
1225 1225 after.setdefault(pos, []).append(' !!! missing #if\n')
1226 1226 skipping = None
1227 1227 after.setdefault(pos, []).append(l)
1228 1228 elif skipping:
1229 1229 after.setdefault(pos, []).append(l)
1230 1230 elif l.startswith(b' >>> '): # python inlines
1231 1231 after.setdefault(pos, []).append(l)
1232 1232 prepos = pos
1233 1233 pos = n
1234 1234 if not inpython:
1235 1235 # We've just entered a Python block. Add the header.
1236 1236 inpython = True
1237 1237 addsalt(prepos, False) # Make sure we report the exit code.
1238 1238 script.append(b'%s -m heredoctest <<EOF\n' % PYTHON)
1239 1239 addsalt(n, True)
1240 1240 script.append(l[2:])
1241 1241 elif l.startswith(b' ... '): # python inlines
1242 1242 after.setdefault(prepos, []).append(l)
1243 1243 script.append(l[2:])
1244 1244 elif l.startswith(b' $ '): # commands
1245 1245 if inpython:
1246 1246 script.append(b'EOF\n')
1247 1247 inpython = False
1248 1248 after.setdefault(pos, []).append(l)
1249 1249 prepos = pos
1250 1250 pos = n
1251 1251 addsalt(n, False)
1252 1252 cmd = l[4:].split()
1253 1253 if len(cmd) == 2 and cmd[0] == b'cd':
1254 1254 l = b' $ cd %s || exit 1\n' % cmd[1]
1255 1255 script.append(l[4:])
1256 1256 elif l.startswith(b' > '): # continuations
1257 1257 after.setdefault(prepos, []).append(l)
1258 1258 script.append(l[4:])
1259 1259 elif l.startswith(b' '): # results
1260 1260 # Queue up a list of expected results.
1261 1261 expected.setdefault(pos, []).append(l[2:])
1262 1262 else:
1263 1263 if inpython:
1264 1264 script.append(b'EOF\n')
1265 1265 inpython = False
1266 1266 # Non-command/result. Queue up for merged output.
1267 1267 after.setdefault(pos, []).append(l)
1268 1268
1269 1269 if inpython:
1270 1270 script.append(b'EOF\n')
1271 1271 if skipping is not None:
1272 1272 after.setdefault(pos, []).append(' !!! missing #endif\n')
1273 1273 addsalt(n + 1, False)
1274 1274
1275 1275 return salt, script, after, expected
1276 1276
1277 1277 def _processoutput(self, exitcode, output, salt, after, expected):
1278 1278 # Merge the script output back into a unified test.
1279 1279 warnonly = 1 # 1: not yet; 2: yes; 3: for sure not
1280 1280 if exitcode != 0:
1281 1281 warnonly = 3
1282 1282
1283 1283 pos = -1
1284 1284 postout = []
1285 1285 for l in output:
1286 1286 lout, lcmd = l, None
1287 1287 if salt in l:
1288 1288 lout, lcmd = l.split(salt, 1)
1289 1289
1290 1290 while lout:
1291 1291 if not lout.endswith(b'\n'):
1292 1292 lout += b' (no-eol)\n'
1293 1293
1294 1294 # Find the expected output at the current position.
1295 1295 els = [None]
1296 1296 if expected.get(pos, None):
1297 1297 els = expected[pos]
1298 1298
1299 1299 i = 0
1300 1300 optional = []
1301 1301 while i < len(els):
1302 1302 el = els[i]
1303 1303
1304 1304 r = TTest.linematch(el, lout)
1305 1305 if isinstance(r, str):
1306 1306 if r == '+glob':
1307 1307 lout = el[:-1] + ' (glob)\n'
1308 1308 r = '' # Warn only this line.
1309 1309 elif r == '-glob':
1310 1310 lout = ''.join(el.rsplit(' (glob)', 1))
1311 1311 r = '' # Warn only this line.
1312 1312 elif r == "retry":
1313 1313 postout.append(b' ' + el)
1314 1314 els.pop(i)
1315 1315 break
1316 1316 else:
1317 1317 log('\ninfo, unknown linematch result: %r\n' % r)
1318 1318 r = False
1319 1319 if r:
1320 1320 els.pop(i)
1321 1321 break
1322 1322 if el:
1323 1323 if el.endswith(b" (?)\n"):
1324 1324 optional.append(i)
1325 1325 else:
1326 1326 m = optline.match(el)
1327 1327 if m:
1328 1328 conditions = [c for c in m.group(2).split(' ')]
1329 1329
1330 1330 if self._hghave(conditions)[0]:
1331 1331 lout = el
1332 1332 else:
1333 1333 optional.append(i)
1334 1334
1335 1335 i += 1
1336 1336
1337 1337 if r:
1338 1338 if r == "retry":
1339 1339 continue
1340 1340 # clean up any optional leftovers
1341 1341 for i in optional:
1342 1342 postout.append(b' ' + els[i])
1343 1343 for i in reversed(optional):
1344 1344 del els[i]
1345 1345 postout.append(b' ' + el)
1346 1346 else:
1347 1347 if self.NEEDESCAPE(lout):
1348 1348 lout = TTest._stringescape(b'%s (esc)\n' %
1349 1349 lout.rstrip(b'\n'))
1350 1350 postout.append(b' ' + lout) # Let diff deal with it.
1351 1351 if r != '': # If line failed.
1352 1352 warnonly = 3 # for sure not
1353 1353 elif warnonly == 1: # Is "not yet" and line is warn only.
1354 1354 warnonly = 2 # Yes do warn.
1355 1355 break
1356 1356 else:
1357 1357 # clean up any optional leftovers
1358 1358 while expected.get(pos, None):
1359 1359 el = expected[pos].pop(0)
1360 1360 if el:
1361 1361 if (not optline.match(el)
1362 1362 and not el.endswith(b" (?)\n")):
1363 1363 break
1364 1364 postout.append(b' ' + el)
1365 1365
1366 1366 if lcmd:
1367 1367 # Add on last return code.
1368 1368 ret = int(lcmd.split()[1])
1369 1369 if ret != 0:
1370 1370 postout.append(b' [%d]\n' % ret)
1371 1371 if pos in after:
1372 1372 # Merge in non-active test bits.
1373 1373 postout += after.pop(pos)
1374 1374 pos = int(lcmd.split()[0])
1375 1375
1376 1376 if pos in after:
1377 1377 postout += after.pop(pos)
1378 1378
1379 1379 if warnonly == 2:
1380 1380 exitcode = False # Set exitcode to warned.
1381 1381
1382 1382 return exitcode, postout
1383 1383
1384 1384 @staticmethod
1385 1385 def rematch(el, l):
1386 1386 try:
1387 1387 # use \Z to ensure that the regex matches to the end of the string
1388 1388 if os.name == 'nt':
1389 1389 return re.match(el + br'\r?\n\Z', l)
1390 1390 return re.match(el + br'\n\Z', l)
1391 1391 except re.error:
1392 1392 # el is an invalid regex
1393 1393 return False
1394 1394
1395 1395 @staticmethod
1396 1396 def globmatch(el, l):
1397 1397 # The only supported special characters are * and ? plus / which also
1398 1398 # matches \ on windows. Escaping of these characters is supported.
1399 1399 if el + b'\n' == l:
1400 1400 if os.altsep:
1401 1401 # matching on "/" is not needed for this line
1402 1402 for pat in checkcodeglobpats:
1403 1403 if pat.match(el):
1404 1404 return True
1405 1405 return b'-glob'
1406 1406 return True
1407 1407 el = el.replace(b'$LOCALIP', b'*')
1408 1408 i, n = 0, len(el)
1409 1409 res = b''
1410 1410 while i < n:
1411 1411 c = el[i:i + 1]
1412 1412 i += 1
1413 1413 if c == b'\\' and i < n and el[i:i + 1] in b'*?\\/':
1414 1414 res += el[i - 1:i + 1]
1415 1415 i += 1
1416 1416 elif c == b'*':
1417 1417 res += b'.*'
1418 1418 elif c == b'?':
1419 1419 res += b'.'
1420 1420 elif c == b'/' and os.altsep:
1421 1421 res += b'[/\\\\]'
1422 1422 else:
1423 1423 res += re.escape(c)
1424 1424 return TTest.rematch(res, l)
1425 1425
1426 1426 @staticmethod
1427 1427 def linematch(el, l):
1428 1428 retry = False
1429 1429 if el == l: # perfect match (fast)
1430 1430 return True
1431 1431 if el:
1432 1432 if el.endswith(b" (?)\n"):
1433 1433 retry = "retry"
1434 1434 el = el[:-5] + b"\n"
1435 1435 else:
1436 1436 m = optline.match(el)
1437 1437 if m:
1438 1438 el = m.group(1) + b"\n"
1439 1439 retry = "retry"
1440 1440
1441 1441 if el.endswith(b" (esc)\n"):
1442 1442 if PYTHON3:
1443 1443 el = el[:-7].decode('unicode_escape') + '\n'
1444 1444 el = el.encode('utf-8')
1445 1445 else:
1446 1446 el = el[:-7].decode('string-escape') + '\n'
1447 1447 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
1448 1448 return True
1449 1449 if el.endswith(b" (re)\n"):
1450 1450 return TTest.rematch(el[:-6], l) or retry
1451 1451 if el.endswith(b" (glob)\n"):
1452 1452 # ignore '(glob)' added to l by 'replacements'
1453 1453 if l.endswith(b" (glob)\n"):
1454 1454 l = l[:-8] + b"\n"
1455 1455 return TTest.globmatch(el[:-8], l) or retry
1456 1456 if os.altsep and l.replace(b'\\', b'/') == el:
1457 1457 return b'+glob'
1458 1458 return retry
1459 1459
1460 1460 @staticmethod
1461 1461 def parsehghaveoutput(lines):
1462 1462 '''Parse hghave log lines.
1463 1463
1464 1464 Return tuple of lists (missing, failed):
1465 1465 * the missing/unknown features
1466 1466 * the features for which existence check failed'''
1467 1467 missing = []
1468 1468 failed = []
1469 1469 for line in lines:
1470 1470 if line.startswith(TTest.SKIPPED_PREFIX):
1471 1471 line = line.splitlines()[0]
1472 1472 missing.append(line[len(TTest.SKIPPED_PREFIX):].decode('utf-8'))
1473 1473 elif line.startswith(TTest.FAILED_PREFIX):
1474 1474 line = line.splitlines()[0]
1475 1475 failed.append(line[len(TTest.FAILED_PREFIX):].decode('utf-8'))
1476 1476
1477 1477 return missing, failed
1478 1478
1479 1479 @staticmethod
1480 1480 def _escapef(m):
1481 1481 return TTest.ESCAPEMAP[m.group(0)]
1482 1482
1483 1483 @staticmethod
1484 1484 def _stringescape(s):
1485 1485 return TTest.ESCAPESUB(TTest._escapef, s)
1486 1486
1487 1487 iolock = threading.RLock()
1488 1488
1489 1489 class SkipTest(Exception):
1490 1490 """Raised to indicate that a test is to be skipped."""
1491 1491
1492 1492 class IgnoreTest(Exception):
1493 1493 """Raised to indicate that a test is to be ignored."""
1494 1494
1495 1495 class WarnTest(Exception):
1496 1496 """Raised to indicate that a test warned."""
1497 1497
1498 1498 class ReportedTest(Exception):
1499 1499 """Raised to indicate that a test already reported."""
1500 1500
1501 1501 class TestResult(unittest._TextTestResult):
1502 1502 """Holds results when executing via unittest."""
1503 1503 # Don't worry too much about accessing the non-public _TextTestResult.
1504 1504 # It is relatively common in Python testing tools.
1505 1505 def __init__(self, options, *args, **kwargs):
1506 1506 super(TestResult, self).__init__(*args, **kwargs)
1507 1507
1508 1508 self._options = options
1509 1509
1510 1510 # unittest.TestResult didn't have skipped until 2.7. We need to
1511 1511 # polyfill it.
1512 1512 self.skipped = []
1513 1513
1514 1514 # We have a custom "ignored" result that isn't present in any Python
1515 1515 # unittest implementation. It is very similar to skipped. It may make
1516 1516 # sense to map it into skip some day.
1517 1517 self.ignored = []
1518 1518
1519 1519 # We have a custom "warned" result that isn't present in any Python
1520 1520 # unittest implementation. It is very similar to failed. It may make
1521 1521 # sense to map it into fail some day.
1522 1522 self.warned = []
1523 1523
1524 1524 self.times = []
1525 1525 self._firststarttime = None
1526 1526 # Data stored for the benefit of generating xunit reports.
1527 1527 self.successes = []
1528 1528 self.faildata = {}
1529 1529
1530 1530 def addFailure(self, test, reason):
1531 1531 self.failures.append((test, reason))
1532 1532
1533 1533 if self._options.first:
1534 1534 self.stop()
1535 1535 else:
1536 1536 with iolock:
1537 1537 if reason == "timed out":
1538 1538 self.stream.write('t')
1539 1539 else:
1540 1540 if not self._options.nodiff:
1541 1541 self.stream.write('\nERROR: %s output changed\n' % test)
1542 1542 self.stream.write('!')
1543 1543
1544 1544 self.stream.flush()
1545 1545
1546 1546 def addSuccess(self, test):
1547 1547 with iolock:
1548 1548 super(TestResult, self).addSuccess(test)
1549 1549 self.successes.append(test)
1550 1550
1551 1551 def addError(self, test, err):
1552 1552 super(TestResult, self).addError(test, err)
1553 1553 if self._options.first:
1554 1554 self.stop()
1555 1555
1556 1556 # Polyfill.
1557 1557 def addSkip(self, test, reason):
1558 1558 self.skipped.append((test, reason))
1559 1559 with iolock:
1560 1560 if self.showAll:
1561 1561 self.stream.writeln('skipped %s' % reason)
1562 1562 else:
1563 1563 self.stream.write('s')
1564 1564 self.stream.flush()
1565 1565
1566 1566 def addIgnore(self, test, reason):
1567 1567 self.ignored.append((test, reason))
1568 1568 with iolock:
1569 1569 if self.showAll:
1570 1570 self.stream.writeln('ignored %s' % reason)
1571 1571 else:
1572 1572 if reason not in ('not retesting', "doesn't match keyword"):
1573 1573 self.stream.write('i')
1574 1574 else:
1575 1575 self.testsRun += 1
1576 1576 self.stream.flush()
1577 1577
1578 1578 def addWarn(self, test, reason):
1579 1579 self.warned.append((test, reason))
1580 1580
1581 1581 if self._options.first:
1582 1582 self.stop()
1583 1583
1584 1584 with iolock:
1585 1585 if self.showAll:
1586 1586 self.stream.writeln('warned %s' % reason)
1587 1587 else:
1588 1588 self.stream.write('~')
1589 1589 self.stream.flush()
1590 1590
1591 1591 def addOutputMismatch(self, test, ret, got, expected):
1592 1592 """Record a mismatch in test output for a particular test."""
1593 1593 if self.shouldStop:
1594 1594 # don't print, some other test case already failed and
1595 1595 # printed, we're just stale and probably failed due to our
1596 1596 # temp dir getting cleaned up.
1597 1597 return
1598 1598
1599 1599 accepted = False
1600 1600 lines = []
1601 1601
1602 1602 with iolock:
1603 1603 if self._options.nodiff:
1604 1604 pass
1605 1605 elif self._options.view:
1606 1606 v = self._options.view
1607 1607 if PYTHON3:
1608 1608 v = _bytespath(v)
1609 1609 os.system(b"%s %s %s" %
1610 1610 (v, test.refpath, test.errpath))
1611 1611 else:
1612 1612 servefail, lines = getdiff(expected, got,
1613 1613 test.refpath, test.errpath)
1614 1614 if servefail:
1615 1615 self.addFailure(
1616 1616 test,
1617 1617 'server failed to start (HGPORT=%s)' % test._startport)
1618 1618 raise ReportedTest('server failed to start')
1619 1619 else:
1620 1620 self.stream.write('\n')
1621 1621 for line in lines:
1622 1622 if PYTHON3:
1623 1623 self.stream.flush()
1624 1624 self.stream.buffer.write(line)
1625 1625 self.stream.buffer.flush()
1626 1626 else:
1627 1627 self.stream.write(line)
1628 1628 self.stream.flush()
1629 1629
1630 1630 # handle interactive prompt without releasing iolock
1631 1631 if self._options.interactive:
1632 1632 self.stream.write('Accept this change? [n] ')
1633 1633 answer = sys.stdin.readline().strip()
1634 1634 if answer.lower() in ('y', 'yes'):
1635 1635 if test.name.endswith('.t'):
1636 1636 rename(test.errpath, test.path)
1637 1637 else:
1638 1638 rename(test.errpath, '%s.out' % test.path)
1639 1639 accepted = True
1640 1640 if not accepted:
1641 1641 self.faildata[test.name] = b''.join(lines)
1642 1642
1643 1643 return accepted
1644 1644
1645 1645 def startTest(self, test):
1646 1646 super(TestResult, self).startTest(test)
1647 1647
1648 1648 # os.times module computes the user time and system time spent by
1649 1649 # child's processes along with real elapsed time taken by a process.
1650 1650 # This module has one limitation. It can only work for Linux user
1651 1651 # and not for Windows.
1652 1652 test.started = os.times()
1653 1653 if self._firststarttime is None: # thread racy but irrelevant
1654 1654 self._firststarttime = test.started[4]
1655 1655
1656 1656 def stopTest(self, test, interrupted=False):
1657 1657 super(TestResult, self).stopTest(test)
1658 1658
1659 1659 test.stopped = os.times()
1660 1660
1661 1661 starttime = test.started
1662 1662 endtime = test.stopped
1663 1663 origin = self._firststarttime
1664 1664 self.times.append((test.name,
1665 1665 endtime[2] - starttime[2], # user space CPU time
1666 1666 endtime[3] - starttime[3], # sys space CPU time
1667 1667 endtime[4] - starttime[4], # real time
1668 1668 starttime[4] - origin, # start date in run context
1669 1669 endtime[4] - origin, # end date in run context
1670 1670 ))
1671 1671
1672 1672 if interrupted:
1673 1673 with iolock:
1674 1674 self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
1675 1675 test.name, self.times[-1][3]))
1676 1676
1677 1677 class TestSuite(unittest.TestSuite):
1678 1678 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
1679 1679
1680 1680 def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
1681 1681 retest=False, keywords=None, loop=False, runs_per_test=1,
1682 1682 loadtest=None, showchannels=False,
1683 1683 *args, **kwargs):
1684 1684 """Create a new instance that can run tests with a configuration.
1685 1685
1686 1686 testdir specifies the directory where tests are executed from. This
1687 1687 is typically the ``tests`` directory from Mercurial's source
1688 1688 repository.
1689 1689
1690 1690 jobs specifies the number of jobs to run concurrently. Each test
1691 1691 executes on its own thread. Tests actually spawn new processes, so
1692 1692 state mutation should not be an issue.
1693 1693
1694 1694 If there is only one job, it will use the main thread.
1695 1695
1696 1696 whitelist and blacklist denote tests that have been whitelisted and
1697 1697 blacklisted, respectively. These arguments don't belong in TestSuite.
1698 1698 Instead, whitelist and blacklist should be handled by the thing that
1699 1699 populates the TestSuite with tests. They are present to preserve
1700 1700 backwards compatible behavior which reports skipped tests as part
1701 1701 of the results.
1702 1702
1703 1703 retest denotes whether to retest failed tests. This arguably belongs
1704 1704 outside of TestSuite.
1705 1705
1706 1706 keywords denotes key words that will be used to filter which tests
1707 1707 to execute. This arguably belongs outside of TestSuite.
1708 1708
1709 1709 loop denotes whether to loop over tests forever.
1710 1710 """
1711 1711 super(TestSuite, self).__init__(*args, **kwargs)
1712 1712
1713 1713 self._jobs = jobs
1714 1714 self._whitelist = whitelist
1715 1715 self._blacklist = blacklist
1716 1716 self._retest = retest
1717 1717 self._keywords = keywords
1718 1718 self._loop = loop
1719 1719 self._runs_per_test = runs_per_test
1720 1720 self._loadtest = loadtest
1721 1721 self._showchannels = showchannels
1722 1722
1723 1723 def run(self, result):
1724 1724 # We have a number of filters that need to be applied. We do this
1725 1725 # here instead of inside Test because it makes the running logic for
1726 1726 # Test simpler.
1727 1727 tests = []
1728 1728 num_tests = [0]
1729 1729 for test in self._tests:
1730 1730 def get():
1731 1731 num_tests[0] += 1
1732 1732 if getattr(test, 'should_reload', False):
1733 1733 return self._loadtest(test, num_tests[0])
1734 1734 return test
1735 1735 if not os.path.exists(test.path):
1736 1736 result.addSkip(test, "Doesn't exist")
1737 1737 continue
1738 1738
1739 1739 if not (self._whitelist and test.name in self._whitelist):
1740 1740 if self._blacklist and test.bname in self._blacklist:
1741 1741 result.addSkip(test, 'blacklisted')
1742 1742 continue
1743 1743
1744 1744 if self._retest and not os.path.exists(test.errpath):
1745 1745 result.addIgnore(test, 'not retesting')
1746 1746 continue
1747 1747
1748 1748 if self._keywords:
1749 1749 f = open(test.path, 'rb')
1750 1750 t = f.read().lower() + test.bname.lower()
1751 1751 f.close()
1752 1752 ignored = False
1753 1753 for k in self._keywords.lower().split():
1754 1754 if k not in t:
1755 1755 result.addIgnore(test, "doesn't match keyword")
1756 1756 ignored = True
1757 1757 break
1758 1758
1759 1759 if ignored:
1760 1760 continue
1761 1761 for _ in xrange(self._runs_per_test):
1762 1762 tests.append(get())
1763 1763
1764 1764 runtests = list(tests)
1765 1765 done = queue.Queue()
1766 1766 running = 0
1767 1767
1768 1768 channels = [""] * self._jobs
1769 1769
1770 1770 def job(test, result):
1771 1771 for n, v in enumerate(channels):
1772 1772 if not v:
1773 1773 channel = n
1774 1774 break
1775 1775 else:
1776 1776 raise ValueError('Could not find output channel')
1777 1777 channels[channel] = "=" + test.name[5:].split(".")[0]
1778 1778 try:
1779 1779 test(result)
1780 1780 done.put(None)
1781 1781 except KeyboardInterrupt:
1782 1782 pass
1783 1783 except: # re-raises
1784 1784 done.put(('!', test, 'run-test raised an error, see traceback'))
1785 1785 raise
1786 1786 finally:
1787 1787 try:
1788 1788 channels[channel] = ''
1789 1789 except IndexError:
1790 1790 pass
1791 1791
1792 1792 def stat():
1793 1793 count = 0
1794 1794 while channels:
1795 1795 d = '\n%03s ' % count
1796 1796 for n, v in enumerate(channels):
1797 1797 if v:
1798 1798 d += v[0]
1799 1799 channels[n] = v[1:] or '.'
1800 1800 else:
1801 1801 d += ' '
1802 1802 d += ' '
1803 1803 with iolock:
1804 1804 sys.stdout.write(d + ' ')
1805 1805 sys.stdout.flush()
1806 1806 for x in xrange(10):
1807 1807 if channels:
1808 1808 time.sleep(.1)
1809 1809 count += 1
1810 1810
1811 1811 stoppedearly = False
1812 1812
1813 1813 if self._showchannels:
1814 1814 statthread = threading.Thread(target=stat, name="stat")
1815 1815 statthread.start()
1816 1816
1817 1817 try:
1818 1818 while tests or running:
1819 1819 if not done.empty() or running == self._jobs or not tests:
1820 1820 try:
1821 1821 done.get(True, 1)
1822 1822 running -= 1
1823 1823 if result and result.shouldStop:
1824 1824 stoppedearly = True
1825 1825 break
1826 1826 except queue.Empty:
1827 1827 continue
1828 1828 if tests and not running == self._jobs:
1829 1829 test = tests.pop(0)
1830 1830 if self._loop:
1831 1831 if getattr(test, 'should_reload', False):
1832 1832 num_tests[0] += 1
1833 1833 tests.append(
1834 1834 self._loadtest(test, num_tests[0]))
1835 1835 else:
1836 1836 tests.append(test)
1837 1837 if self._jobs == 1:
1838 1838 job(test, result)
1839 1839 else:
1840 1840 t = threading.Thread(target=job, name=test.name,
1841 1841 args=(test, result))
1842 1842 t.start()
1843 1843 running += 1
1844 1844
1845 1845 # If we stop early we still need to wait on started tests to
1846 1846 # finish. Otherwise, there is a race between the test completing
1847 1847 # and the test's cleanup code running. This could result in the
1848 1848 # test reporting incorrect.
1849 1849 if stoppedearly:
1850 1850 while running:
1851 1851 try:
1852 1852 done.get(True, 1)
1853 1853 running -= 1
1854 1854 except queue.Empty:
1855 1855 continue
1856 1856 except KeyboardInterrupt:
1857 1857 for test in runtests:
1858 1858 test.abort()
1859 1859
1860 1860 channels = []
1861 1861
1862 1862 return result
1863 1863
1864 1864 # Save the most recent 5 wall-clock runtimes of each test to a
1865 1865 # human-readable text file named .testtimes. Tests are sorted
1866 1866 # alphabetically, while times for each test are listed from oldest to
1867 1867 # newest.
1868 1868
1869 1869 def loadtimes(outputdir):
1870 1870 times = []
1871 1871 try:
1872 1872 with open(os.path.join(outputdir, b'.testtimes-')) as fp:
1873 1873 for line in fp:
1874 1874 ts = line.split()
1875 1875 times.append((ts[0], [float(t) for t in ts[1:]]))
1876 1876 except IOError as err:
1877 1877 if err.errno != errno.ENOENT:
1878 1878 raise
1879 1879 return times
1880 1880
1881 1881 def savetimes(outputdir, result):
1882 1882 saved = dict(loadtimes(outputdir))
1883 1883 maxruns = 5
1884 1884 skipped = set([str(t[0]) for t in result.skipped])
1885 1885 for tdata in result.times:
1886 1886 test, real = tdata[0], tdata[3]
1887 1887 if test not in skipped:
1888 1888 ts = saved.setdefault(test, [])
1889 1889 ts.append(real)
1890 1890 ts[:] = ts[-maxruns:]
1891 1891
1892 1892 fd, tmpname = tempfile.mkstemp(prefix=b'.testtimes',
1893 1893 dir=outputdir, text=True)
1894 1894 with os.fdopen(fd, 'w') as fp:
1895 1895 for name, ts in sorted(saved.items()):
1896 1896 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
1897 1897 timepath = os.path.join(outputdir, b'.testtimes')
1898 1898 try:
1899 1899 os.unlink(timepath)
1900 1900 except OSError:
1901 1901 pass
1902 1902 try:
1903 1903 os.rename(tmpname, timepath)
1904 1904 except OSError:
1905 1905 pass
1906 1906
1907 1907 class TextTestRunner(unittest.TextTestRunner):
1908 1908 """Custom unittest test runner that uses appropriate settings."""
1909 1909
1910 1910 def __init__(self, runner, *args, **kwargs):
1911 1911 super(TextTestRunner, self).__init__(*args, **kwargs)
1912 1912
1913 1913 self._runner = runner
1914 1914
1915 1915 def listtests(self, test):
1916 1916 result = TestResult(self._runner.options, self.stream,
1917 1917 self.descriptions, 0)
1918 1918 test = sorted(test, key=lambda t: t.name)
1919 1919 for t in test:
1920 1920 print(t.name)
1921 1921 result.addSuccess(t)
1922 1922
1923 1923 if self._runner.options.xunit:
1924 1924 with open(self._runner.options.xunit, "wb") as xuf:
1925 1925 self._writexunit(result, xuf)
1926 1926
1927 1927 if self._runner.options.json:
1928 jsonpath = os.path.join(self._runner._testdir, b'report.json')
1928 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
1929 1929 with open(jsonpath, 'w') as fp:
1930 1930 self._writejson(result, fp)
1931 1931
1932 1932 return result
1933 1933
1934 1934 def run(self, test):
1935 1935 result = TestResult(self._runner.options, self.stream,
1936 1936 self.descriptions, self.verbosity)
1937 1937
1938 1938 test(result)
1939 1939
1940 1940 failed = len(result.failures)
1941 1941 warned = len(result.warned)
1942 1942 skipped = len(result.skipped)
1943 1943 ignored = len(result.ignored)
1944 1944
1945 1945 with iolock:
1946 1946 self.stream.writeln('')
1947 1947
1948 1948 if not self._runner.options.noskips:
1949 1949 for test, msg in result.skipped:
1950 1950 self.stream.writeln('Skipped %s: %s' % (test.name, msg))
1951 1951 for test, msg in result.warned:
1952 1952 self.stream.writeln('Warned %s: %s' % (test.name, msg))
1953 1953 for test, msg in result.failures:
1954 1954 self.stream.writeln('Failed %s: %s' % (test.name, msg))
1955 1955 for test, msg in result.errors:
1956 1956 self.stream.writeln('Errored %s: %s' % (test.name, msg))
1957 1957
1958 1958 if self._runner.options.xunit:
1959 1959 with open(self._runner.options.xunit, "wb") as xuf:
1960 1960 self._writexunit(result, xuf)
1961 1961
1962 1962 if self._runner.options.json:
1963 jsonpath = os.path.join(self._runner._testdir, b'report.json')
1963 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
1964 1964 with open(jsonpath, 'w') as fp:
1965 1965 self._writejson(result, fp)
1966 1966
1967 1967 self._runner._checkhglib('Tested')
1968 1968
1969 1969 savetimes(self._runner._outputdir, result)
1970 1970
1971 1971 if failed and self._runner.options.known_good_rev:
1972 1972 def nooutput(args):
1973 1973 p = subprocess.Popen(args, stderr=subprocess.STDOUT,
1974 1974 stdout=subprocess.PIPE)
1975 1975 p.stdout.read()
1976 1976 p.wait()
1977 1977 for test, msg in result.failures:
1978 1978 nooutput(['hg', 'bisect', '--reset']),
1979 1979 nooutput(['hg', 'bisect', '--bad', '.'])
1980 1980 nooutput(['hg', 'bisect', '--good',
1981 1981 self._runner.options.known_good_rev])
1982 1982 # TODO: we probably need to forward some options
1983 1983 # that alter hg's behavior inside the tests.
1984 1984 rtc = '%s %s %s' % (sys.executable, sys.argv[0], test)
1985 1985 sub = subprocess.Popen(['hg', 'bisect', '--command', rtc],
1986 1986 stderr=subprocess.STDOUT,
1987 1987 stdout=subprocess.PIPE)
1988 1988 data = sub.stdout.read()
1989 1989 sub.wait()
1990 1990 m = re.search(
1991 1991 (r'\nThe first (?P<goodbad>bad|good) revision '
1992 1992 r'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
1993 1993 r'summary: +(?P<summary>[^\n]+)\n'),
1994 1994 data, (re.MULTILINE | re.DOTALL))
1995 1995 if m is None:
1996 1996 self.stream.writeln(
1997 1997 'Failed to identify failure point for %s' % test)
1998 1998 continue
1999 1999 dat = m.groupdict()
2000 2000 verb = 'broken' if dat['goodbad'] == 'bad' else 'fixed'
2001 2001 self.stream.writeln(
2002 2002 '%s %s by %s (%s)' % (
2003 2003 test, verb, dat['node'], dat['summary']))
2004 2004 self.stream.writeln(
2005 2005 '# Ran %d tests, %d skipped, %d warned, %d failed.'
2006 2006 % (result.testsRun,
2007 2007 skipped + ignored, warned, failed))
2008 2008 if failed:
2009 2009 self.stream.writeln('python hash seed: %s' %
2010 2010 os.environ['PYTHONHASHSEED'])
2011 2011 if self._runner.options.time:
2012 2012 self.printtimes(result.times)
2013 2013
2014 2014 return result
2015 2015
2016 2016 def printtimes(self, times):
2017 2017 # iolock held by run
2018 2018 self.stream.writeln('# Producing time report')
2019 2019 times.sort(key=lambda t: (t[3]))
2020 2020 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
2021 2021 self.stream.writeln('%-7s %-7s %-7s %-7s %-7s %s' %
2022 2022 ('start', 'end', 'cuser', 'csys', 'real', 'Test'))
2023 2023 for tdata in times:
2024 2024 test = tdata[0]
2025 2025 cuser, csys, real, start, end = tdata[1:6]
2026 2026 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
2027 2027
2028 2028 @staticmethod
2029 2029 def _writexunit(result, outf):
2030 2030 # See http://llg.cubic.org/docs/junit/ for a reference.
2031 2031 timesd = dict((t[0], t[3]) for t in result.times)
2032 2032 doc = minidom.Document()
2033 2033 s = doc.createElement('testsuite')
2034 2034 s.setAttribute('name', 'run-tests')
2035 2035 s.setAttribute('tests', str(result.testsRun))
2036 2036 s.setAttribute('errors', "0") # TODO
2037 2037 s.setAttribute('failures', str(len(result.failures)))
2038 2038 s.setAttribute('skipped', str(len(result.skipped) +
2039 2039 len(result.ignored)))
2040 2040 doc.appendChild(s)
2041 2041 for tc in result.successes:
2042 2042 t = doc.createElement('testcase')
2043 2043 t.setAttribute('name', tc.name)
2044 2044 tctime = timesd.get(tc.name)
2045 2045 if tctime is not None:
2046 2046 t.setAttribute('time', '%.3f' % tctime)
2047 2047 s.appendChild(t)
2048 2048 for tc, err in sorted(result.faildata.items()):
2049 2049 t = doc.createElement('testcase')
2050 2050 t.setAttribute('name', tc)
2051 2051 tctime = timesd.get(tc)
2052 2052 if tctime is not None:
2053 2053 t.setAttribute('time', '%.3f' % tctime)
2054 2054 # createCDATASection expects a unicode or it will
2055 2055 # convert using default conversion rules, which will
2056 2056 # fail if string isn't ASCII.
2057 2057 err = cdatasafe(err).decode('utf-8', 'replace')
2058 2058 cd = doc.createCDATASection(err)
2059 2059 # Use 'failure' here instead of 'error' to match errors = 0,
2060 2060 # failures = len(result.failures) in the testsuite element.
2061 2061 failelem = doc.createElement('failure')
2062 2062 failelem.setAttribute('message', 'output changed')
2063 2063 failelem.setAttribute('type', 'output-mismatch')
2064 2064 failelem.appendChild(cd)
2065 2065 t.appendChild(failelem)
2066 2066 s.appendChild(t)
2067 2067 for tc, message in result.skipped:
2068 2068 # According to the schema, 'skipped' has no attributes. So store
2069 2069 # the skip message as a text node instead.
2070 2070 t = doc.createElement('testcase')
2071 2071 t.setAttribute('name', tc.name)
2072 2072 message = cdatasafe(message).decode('utf-8', 'replace')
2073 2073 cd = doc.createCDATASection(message)
2074 2074 skipelem = doc.createElement('skipped')
2075 2075 skipelem.appendChild(cd)
2076 2076 t.appendChild(skipelem)
2077 2077 s.appendChild(t)
2078 2078 outf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
2079 2079
2080 2080 @staticmethod
2081 2081 def _writejson(result, outf):
2082 2082 timesd = {}
2083 2083 for tdata in result.times:
2084 2084 test = tdata[0]
2085 2085 timesd[test] = tdata[1:]
2086 2086
2087 2087 outcome = {}
2088 2088 groups = [('success', ((tc, None)
2089 2089 for tc in result.successes)),
2090 2090 ('failure', result.failures),
2091 2091 ('skip', result.skipped)]
2092 2092 for res, testcases in groups:
2093 2093 for tc, __ in testcases:
2094 2094 if tc.name in timesd:
2095 2095 diff = result.faildata.get(tc.name, b'')
2096 2096 tres = {'result': res,
2097 2097 'time': ('%0.3f' % timesd[tc.name][2]),
2098 2098 'cuser': ('%0.3f' % timesd[tc.name][0]),
2099 2099 'csys': ('%0.3f' % timesd[tc.name][1]),
2100 2100 'start': ('%0.3f' % timesd[tc.name][3]),
2101 2101 'end': ('%0.3f' % timesd[tc.name][4]),
2102 2102 'diff': diff.decode('unicode_escape'),
2103 2103 }
2104 2104 else:
2105 2105 # blacklisted test
2106 2106 tres = {'result': res}
2107 2107
2108 2108 outcome[tc.name] = tres
2109 2109 jsonout = json.dumps(outcome, sort_keys=True, indent=4,
2110 2110 separators=(',', ': '))
2111 2111 outf.writelines(("testreport =", jsonout))
2112 2112
2113 2113 class TestRunner(object):
2114 2114 """Holds context for executing tests.
2115 2115
2116 2116 Tests rely on a lot of state. This object holds it for them.
2117 2117 """
2118 2118
2119 2119 # Programs required to run tests.
2120 2120 REQUIREDTOOLS = [
2121 2121 b'diff',
2122 2122 b'grep',
2123 2123 b'unzip',
2124 2124 b'gunzip',
2125 2125 b'bunzip2',
2126 2126 b'sed',
2127 2127 ]
2128 2128
2129 2129 # Maps file extensions to test class.
2130 2130 TESTTYPES = [
2131 2131 (b'.py', PythonTest),
2132 2132 (b'.t', TTest),
2133 2133 ]
2134 2134
2135 2135 def __init__(self):
2136 2136 self.options = None
2137 2137 self._hgroot = None
2138 2138 self._testdir = None
2139 2139 self._outputdir = None
2140 2140 self._hgtmp = None
2141 2141 self._installdir = None
2142 2142 self._bindir = None
2143 2143 self._tmpbinddir = None
2144 2144 self._pythondir = None
2145 2145 self._coveragefile = None
2146 2146 self._createdfiles = []
2147 2147 self._hgcommand = None
2148 2148 self._hgpath = None
2149 2149 self._portoffset = 0
2150 2150 self._ports = {}
2151 2151
2152 2152 def run(self, args, parser=None):
2153 2153 """Run the test suite."""
2154 2154 oldmask = os.umask(0o22)
2155 2155 try:
2156 2156 parser = parser or getparser()
2157 2157 options, args = parseargs(args, parser)
2158 2158 # positional arguments are paths to test files to run, so
2159 2159 # we make sure they're all bytestrings
2160 2160 args = [_bytespath(a) for a in args]
2161 2161 self.options = options
2162 2162
2163 2163 self._checktools()
2164 2164 testdescs = self.findtests(args)
2165 2165 if options.profile_runner:
2166 2166 import statprof
2167 2167 statprof.start()
2168 2168 result = self._run(testdescs)
2169 2169 if options.profile_runner:
2170 2170 statprof.stop()
2171 2171 statprof.display()
2172 2172 return result
2173 2173
2174 2174 finally:
2175 2175 os.umask(oldmask)
2176 2176
2177 2177 def _run(self, testdescs):
2178 2178 if self.options.random:
2179 2179 random.shuffle(testdescs)
2180 2180 else:
2181 2181 # keywords for slow tests
2182 2182 slow = {b'svn': 10,
2183 2183 b'cvs': 10,
2184 2184 b'hghave': 10,
2185 2185 b'largefiles-update': 10,
2186 2186 b'run-tests': 10,
2187 2187 b'corruption': 10,
2188 2188 b'race': 10,
2189 2189 b'i18n': 10,
2190 2190 b'check': 100,
2191 2191 b'gendoc': 100,
2192 2192 b'contrib-perf': 200,
2193 2193 }
2194 2194 perf = {}
2195 2195 def sortkey(f):
2196 2196 # run largest tests first, as they tend to take the longest
2197 2197 f = f['path']
2198 2198 try:
2199 2199 return perf[f]
2200 2200 except KeyError:
2201 2201 try:
2202 2202 val = -os.stat(f).st_size
2203 2203 except OSError as e:
2204 2204 if e.errno != errno.ENOENT:
2205 2205 raise
2206 2206 perf[f] = -1e9 # file does not exist, tell early
2207 2207 return -1e9
2208 2208 for kw, mul in slow.items():
2209 2209 if kw in f:
2210 2210 val *= mul
2211 2211 if f.endswith(b'.py'):
2212 2212 val /= 10.0
2213 2213 perf[f] = val / 1000.0
2214 2214 return perf[f]
2215 2215 testdescs.sort(key=sortkey)
2216 2216
2217 2217 self._testdir = osenvironb[b'TESTDIR'] = getattr(
2218 2218 os, 'getcwdb', os.getcwd)()
2219 2219 if self.options.outputdir:
2220 2220 self._outputdir = canonpath(_bytespath(self.options.outputdir))
2221 2221 else:
2222 2222 self._outputdir = self._testdir
2223 2223
2224 2224 if 'PYTHONHASHSEED' not in os.environ:
2225 2225 # use a random python hash seed all the time
2226 2226 # we do the randomness ourself to know what seed is used
2227 2227 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
2228 2228
2229 2229 if self.options.tmpdir:
2230 2230 self.options.keep_tmpdir = True
2231 2231 tmpdir = _bytespath(self.options.tmpdir)
2232 2232 if os.path.exists(tmpdir):
2233 2233 # Meaning of tmpdir has changed since 1.3: we used to create
2234 2234 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
2235 2235 # tmpdir already exists.
2236 2236 print("error: temp dir %r already exists" % tmpdir)
2237 2237 return 1
2238 2238
2239 2239 # Automatically removing tmpdir sounds convenient, but could
2240 2240 # really annoy anyone in the habit of using "--tmpdir=/tmp"
2241 2241 # or "--tmpdir=$HOME".
2242 2242 #vlog("# Removing temp dir", tmpdir)
2243 2243 #shutil.rmtree(tmpdir)
2244 2244 os.makedirs(tmpdir)
2245 2245 else:
2246 2246 d = None
2247 2247 if os.name == 'nt':
2248 2248 # without this, we get the default temp dir location, but
2249 2249 # in all lowercase, which causes troubles with paths (issue3490)
2250 2250 d = osenvironb.get(b'TMP', None)
2251 2251 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
2252 2252
2253 2253 self._hgtmp = osenvironb[b'HGTMP'] = (
2254 2254 os.path.realpath(tmpdir))
2255 2255
2256 2256 if self.options.with_hg:
2257 2257 self._installdir = None
2258 2258 whg = self.options.with_hg
2259 2259 self._bindir = os.path.dirname(os.path.realpath(whg))
2260 2260 assert isinstance(self._bindir, bytes)
2261 2261 self._hgcommand = os.path.basename(whg)
2262 2262 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
2263 2263 os.makedirs(self._tmpbindir)
2264 2264
2265 2265 # This looks redundant with how Python initializes sys.path from
2266 2266 # the location of the script being executed. Needed because the
2267 2267 # "hg" specified by --with-hg is not the only Python script
2268 2268 # executed in the test suite that needs to import 'mercurial'
2269 2269 # ... which means it's not really redundant at all.
2270 2270 self._pythondir = self._bindir
2271 2271 else:
2272 2272 self._installdir = os.path.join(self._hgtmp, b"install")
2273 2273 self._bindir = os.path.join(self._installdir, b"bin")
2274 2274 self._hgcommand = b'hg'
2275 2275 self._tmpbindir = self._bindir
2276 2276 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
2277 2277
2278 2278 # set CHGHG, then replace "hg" command by "chg"
2279 2279 chgbindir = self._bindir
2280 2280 if self.options.chg or self.options.with_chg:
2281 2281 osenvironb[b'CHGHG'] = os.path.join(self._bindir, self._hgcommand)
2282 2282 else:
2283 2283 osenvironb.pop(b'CHGHG', None) # drop flag for hghave
2284 2284 if self.options.chg:
2285 2285 self._hgcommand = b'chg'
2286 2286 elif self.options.with_chg:
2287 2287 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
2288 2288 self._hgcommand = os.path.basename(self.options.with_chg)
2289 2289
2290 2290 osenvironb[b"BINDIR"] = self._bindir
2291 2291 osenvironb[b"PYTHON"] = PYTHON
2292 2292
2293 2293 if self.options.with_python3:
2294 2294 osenvironb[b'PYTHON3'] = self.options.with_python3
2295 2295
2296 2296 fileb = _bytespath(__file__)
2297 2297 runtestdir = os.path.abspath(os.path.dirname(fileb))
2298 2298 osenvironb[b'RUNTESTDIR'] = runtestdir
2299 2299 if PYTHON3:
2300 2300 sepb = _bytespath(os.pathsep)
2301 2301 else:
2302 2302 sepb = os.pathsep
2303 2303 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
2304 2304 if os.path.islink(__file__):
2305 2305 # test helper will likely be at the end of the symlink
2306 2306 realfile = os.path.realpath(fileb)
2307 2307 realdir = os.path.abspath(os.path.dirname(realfile))
2308 2308 path.insert(2, realdir)
2309 2309 if chgbindir != self._bindir:
2310 2310 path.insert(1, chgbindir)
2311 2311 if self._testdir != runtestdir:
2312 2312 path = [self._testdir] + path
2313 2313 if self._tmpbindir != self._bindir:
2314 2314 path = [self._tmpbindir] + path
2315 2315 osenvironb[b"PATH"] = sepb.join(path)
2316 2316
2317 2317 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
2318 2318 # can run .../tests/run-tests.py test-foo where test-foo
2319 2319 # adds an extension to HGRC. Also include run-test.py directory to
2320 2320 # import modules like heredoctest.
2321 2321 pypath = [self._pythondir, self._testdir, runtestdir]
2322 2322 # We have to augment PYTHONPATH, rather than simply replacing
2323 2323 # it, in case external libraries are only available via current
2324 2324 # PYTHONPATH. (In particular, the Subversion bindings on OS X
2325 2325 # are in /opt/subversion.)
2326 2326 oldpypath = osenvironb.get(IMPL_PATH)
2327 2327 if oldpypath:
2328 2328 pypath.append(oldpypath)
2329 2329 osenvironb[IMPL_PATH] = sepb.join(pypath)
2330 2330
2331 2331 if self.options.pure:
2332 2332 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
2333 2333 os.environ["HGMODULEPOLICY"] = "py"
2334 2334
2335 2335 if self.options.allow_slow_tests:
2336 2336 os.environ["HGTEST_SLOW"] = "slow"
2337 2337 elif 'HGTEST_SLOW' in os.environ:
2338 2338 del os.environ['HGTEST_SLOW']
2339 2339
2340 2340 self._coveragefile = os.path.join(self._testdir, b'.coverage')
2341 2341
2342 2342 vlog("# Using TESTDIR", self._testdir)
2343 2343 vlog("# Using RUNTESTDIR", osenvironb[b'RUNTESTDIR'])
2344 2344 vlog("# Using HGTMP", self._hgtmp)
2345 2345 vlog("# Using PATH", os.environ["PATH"])
2346 2346 vlog("# Using", IMPL_PATH, osenvironb[IMPL_PATH])
2347 2347 vlog("# Writing to directory", self._outputdir)
2348 2348
2349 2349 try:
2350 2350 return self._runtests(testdescs) or 0
2351 2351 finally:
2352 2352 time.sleep(.1)
2353 2353 self._cleanup()
2354 2354
2355 2355 def findtests(self, args):
2356 2356 """Finds possible test files from arguments.
2357 2357
2358 2358 If you wish to inject custom tests into the test harness, this would
2359 2359 be a good function to monkeypatch or override in a derived class.
2360 2360 """
2361 2361 if not args:
2362 2362 if self.options.changed:
2363 2363 proc = Popen4('hg st --rev "%s" -man0 .' %
2364 2364 self.options.changed, None, 0)
2365 2365 stdout, stderr = proc.communicate()
2366 2366 args = stdout.strip(b'\0').split(b'\0')
2367 2367 else:
2368 2368 args = os.listdir(b'.')
2369 2369
2370 2370 tests = []
2371 2371 for t in args:
2372 2372 if not (os.path.basename(t).startswith(b'test-')
2373 2373 and (t.endswith(b'.py') or t.endswith(b'.t'))):
2374 2374 continue
2375 2375 if t.endswith(b'.t'):
2376 2376 # .t file may contain multiple test cases
2377 2377 cases = sorted(parsettestcases(t))
2378 2378 if cases:
2379 2379 tests += [{'path': t, 'case': c} for c in sorted(cases)]
2380 2380 else:
2381 2381 tests.append({'path': t})
2382 2382 else:
2383 2383 tests.append({'path': t})
2384 2384 return tests
2385 2385
2386 2386 def _runtests(self, testdescs):
2387 2387 def _reloadtest(test, i):
2388 2388 # convert a test back to its description dict
2389 2389 desc = {'path': test.path}
2390 2390 case = getattr(test, '_case', None)
2391 2391 if case:
2392 2392 desc['case'] = case
2393 2393 return self._gettest(desc, i)
2394 2394
2395 2395 try:
2396 2396 if self.options.restart:
2397 2397 orig = list(testdescs)
2398 2398 while testdescs:
2399 2399 desc = testdescs[0]
2400 2400 if 'case' in desc:
2401 2401 errpath = b'%s.%s.err' % (desc['path'], desc['case'])
2402 2402 else:
2403 2403 errpath = b'%s.err' % desc['path']
2404 2404 if os.path.exists(errpath):
2405 2405 break
2406 2406 testdescs.pop(0)
2407 2407 if not testdescs:
2408 2408 print("running all tests")
2409 2409 testdescs = orig
2410 2410
2411 2411 tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
2412 2412
2413 2413 failed = False
2414 2414 warned = False
2415 2415 kws = self.options.keywords
2416 2416 if kws is not None and PYTHON3:
2417 2417 kws = kws.encode('utf-8')
2418 2418
2419 2419 suite = TestSuite(self._testdir,
2420 2420 jobs=self.options.jobs,
2421 2421 whitelist=self.options.whitelisted,
2422 2422 blacklist=self.options.blacklist,
2423 2423 retest=self.options.retest,
2424 2424 keywords=kws,
2425 2425 loop=self.options.loop,
2426 2426 runs_per_test=self.options.runs_per_test,
2427 2427 showchannels=self.options.showchannels,
2428 2428 tests=tests, loadtest=_reloadtest)
2429 2429 verbosity = 1
2430 2430 if self.options.verbose:
2431 2431 verbosity = 2
2432 2432 runner = TextTestRunner(self, verbosity=verbosity)
2433 2433
2434 2434 if self.options.list_tests:
2435 2435 result = runner.listtests(suite)
2436 2436 else:
2437 2437 if self._installdir:
2438 2438 self._installhg()
2439 2439 self._checkhglib("Testing")
2440 2440 else:
2441 2441 self._usecorrectpython()
2442 2442 if self.options.chg:
2443 2443 assert self._installdir
2444 2444 self._installchg()
2445 2445
2446 2446 result = runner.run(suite)
2447 2447
2448 2448 if result.failures:
2449 2449 failed = True
2450 2450 if result.warned:
2451 2451 warned = True
2452 2452
2453 2453 if self.options.anycoverage:
2454 2454 self._outputcoverage()
2455 2455 except KeyboardInterrupt:
2456 2456 failed = True
2457 2457 print("\ninterrupted!")
2458 2458
2459 2459 if failed:
2460 2460 return 1
2461 2461 if warned:
2462 2462 return 80
2463 2463
2464 2464 def _getport(self, count):
2465 2465 port = self._ports.get(count) # do we have a cached entry?
2466 2466 if port is None:
2467 2467 portneeded = 3
2468 2468 # above 100 tries we just give up and let test reports failure
2469 2469 for tries in xrange(100):
2470 2470 allfree = True
2471 2471 port = self.options.port + self._portoffset
2472 2472 for idx in xrange(portneeded):
2473 2473 if not checkportisavailable(port + idx):
2474 2474 allfree = False
2475 2475 break
2476 2476 self._portoffset += portneeded
2477 2477 if allfree:
2478 2478 break
2479 2479 self._ports[count] = port
2480 2480 return port
2481 2481
2482 2482 def _gettest(self, testdesc, count):
2483 2483 """Obtain a Test by looking at its filename.
2484 2484
2485 2485 Returns a Test instance. The Test may not be runnable if it doesn't
2486 2486 map to a known type.
2487 2487 """
2488 2488 path = testdesc['path']
2489 2489 lctest = path.lower()
2490 2490 testcls = Test
2491 2491
2492 2492 for ext, cls in self.TESTTYPES:
2493 2493 if lctest.endswith(ext):
2494 2494 testcls = cls
2495 2495 break
2496 2496
2497 2497 refpath = os.path.join(self._testdir, path)
2498 2498 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
2499 2499
2500 2500 # extra keyword parameters. 'case' is used by .t tests
2501 2501 kwds = dict((k, testdesc[k]) for k in ['case'] if k in testdesc)
2502 2502
2503 2503 t = testcls(refpath, self._outputdir, tmpdir,
2504 2504 keeptmpdir=self.options.keep_tmpdir,
2505 2505 debug=self.options.debug,
2506 2506 timeout=self.options.timeout,
2507 2507 startport=self._getport(count),
2508 2508 extraconfigopts=self.options.extra_config_opt,
2509 2509 py3kwarnings=self.options.py3k_warnings,
2510 2510 shell=self.options.shell,
2511 2511 hgcommand=self._hgcommand,
2512 2512 usechg=bool(self.options.with_chg or self.options.chg),
2513 2513 useipv6=useipv6, **kwds)
2514 2514 t.should_reload = True
2515 2515 return t
2516 2516
2517 2517 def _cleanup(self):
2518 2518 """Clean up state from this test invocation."""
2519 2519 if self.options.keep_tmpdir:
2520 2520 return
2521 2521
2522 2522 vlog("# Cleaning up HGTMP", self._hgtmp)
2523 2523 shutil.rmtree(self._hgtmp, True)
2524 2524 for f in self._createdfiles:
2525 2525 try:
2526 2526 os.remove(f)
2527 2527 except OSError:
2528 2528 pass
2529 2529
2530 2530 def _usecorrectpython(self):
2531 2531 """Configure the environment to use the appropriate Python in tests."""
2532 2532 # Tests must use the same interpreter as us or bad things will happen.
2533 2533 pyexename = sys.platform == 'win32' and b'python.exe' or b'python'
2534 2534 if getattr(os, 'symlink', None):
2535 2535 vlog("# Making python executable in test path a symlink to '%s'" %
2536 2536 sys.executable)
2537 2537 mypython = os.path.join(self._tmpbindir, pyexename)
2538 2538 try:
2539 2539 if os.readlink(mypython) == sys.executable:
2540 2540 return
2541 2541 os.unlink(mypython)
2542 2542 except OSError as err:
2543 2543 if err.errno != errno.ENOENT:
2544 2544 raise
2545 2545 if self._findprogram(pyexename) != sys.executable:
2546 2546 try:
2547 2547 os.symlink(sys.executable, mypython)
2548 2548 self._createdfiles.append(mypython)
2549 2549 except OSError as err:
2550 2550 # child processes may race, which is harmless
2551 2551 if err.errno != errno.EEXIST:
2552 2552 raise
2553 2553 else:
2554 2554 exedir, exename = os.path.split(sys.executable)
2555 2555 vlog("# Modifying search path to find %s as %s in '%s'" %
2556 2556 (exename, pyexename, exedir))
2557 2557 path = os.environ['PATH'].split(os.pathsep)
2558 2558 while exedir in path:
2559 2559 path.remove(exedir)
2560 2560 os.environ['PATH'] = os.pathsep.join([exedir] + path)
2561 2561 if not self._findprogram(pyexename):
2562 2562 print("WARNING: Cannot find %s in search path" % pyexename)
2563 2563
2564 2564 def _installhg(self):
2565 2565 """Install hg into the test environment.
2566 2566
2567 2567 This will also configure hg with the appropriate testing settings.
2568 2568 """
2569 2569 vlog("# Performing temporary installation of HG")
2570 2570 installerrs = os.path.join(self._hgtmp, b"install.err")
2571 2571 compiler = ''
2572 2572 if self.options.compiler:
2573 2573 compiler = '--compiler ' + self.options.compiler
2574 2574 if self.options.pure:
2575 2575 pure = b"--pure"
2576 2576 else:
2577 2577 pure = b""
2578 2578
2579 2579 # Run installer in hg root
2580 2580 script = os.path.realpath(sys.argv[0])
2581 2581 exe = sys.executable
2582 2582 if PYTHON3:
2583 2583 compiler = _bytespath(compiler)
2584 2584 script = _bytespath(script)
2585 2585 exe = _bytespath(exe)
2586 2586 hgroot = os.path.dirname(os.path.dirname(script))
2587 2587 self._hgroot = hgroot
2588 2588 os.chdir(hgroot)
2589 2589 nohome = b'--home=""'
2590 2590 if os.name == 'nt':
2591 2591 # The --home="" trick works only on OS where os.sep == '/'
2592 2592 # because of a distutils convert_path() fast-path. Avoid it at
2593 2593 # least on Windows for now, deal with .pydistutils.cfg bugs
2594 2594 # when they happen.
2595 2595 nohome = b''
2596 2596 cmd = (b'%(exe)s setup.py %(pure)s clean --all'
2597 2597 b' build %(compiler)s --build-base="%(base)s"'
2598 2598 b' install --force --prefix="%(prefix)s"'
2599 2599 b' --install-lib="%(libdir)s"'
2600 2600 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
2601 2601 % {b'exe': exe, b'pure': pure,
2602 2602 b'compiler': compiler,
2603 2603 b'base': os.path.join(self._hgtmp, b"build"),
2604 2604 b'prefix': self._installdir, b'libdir': self._pythondir,
2605 2605 b'bindir': self._bindir,
2606 2606 b'nohome': nohome, b'logfile': installerrs})
2607 2607
2608 2608 # setuptools requires install directories to exist.
2609 2609 def makedirs(p):
2610 2610 try:
2611 2611 os.makedirs(p)
2612 2612 except OSError as e:
2613 2613 if e.errno != errno.EEXIST:
2614 2614 raise
2615 2615 makedirs(self._pythondir)
2616 2616 makedirs(self._bindir)
2617 2617
2618 2618 vlog("# Running", cmd)
2619 2619 if os.system(cmd) == 0:
2620 2620 if not self.options.verbose:
2621 2621 try:
2622 2622 os.remove(installerrs)
2623 2623 except OSError as e:
2624 2624 if e.errno != errno.ENOENT:
2625 2625 raise
2626 2626 else:
2627 2627 f = open(installerrs, 'rb')
2628 2628 for line in f:
2629 2629 if PYTHON3:
2630 2630 sys.stdout.buffer.write(line)
2631 2631 else:
2632 2632 sys.stdout.write(line)
2633 2633 f.close()
2634 2634 sys.exit(1)
2635 2635 os.chdir(self._testdir)
2636 2636
2637 2637 self._usecorrectpython()
2638 2638
2639 2639 if self.options.py3k_warnings and not self.options.anycoverage:
2640 2640 vlog("# Updating hg command to enable Py3k Warnings switch")
2641 2641 f = open(os.path.join(self._bindir, 'hg'), 'rb')
2642 2642 lines = [line.rstrip() for line in f]
2643 2643 lines[0] += ' -3'
2644 2644 f.close()
2645 2645 f = open(os.path.join(self._bindir, 'hg'), 'wb')
2646 2646 for line in lines:
2647 2647 f.write(line + '\n')
2648 2648 f.close()
2649 2649
2650 2650 hgbat = os.path.join(self._bindir, b'hg.bat')
2651 2651 if os.path.isfile(hgbat):
2652 2652 # hg.bat expects to be put in bin/scripts while run-tests.py
2653 2653 # installation layout put it in bin/ directly. Fix it
2654 2654 f = open(hgbat, 'rb')
2655 2655 data = f.read()
2656 2656 f.close()
2657 2657 if b'"%~dp0..\python" "%~dp0hg" %*' in data:
2658 2658 data = data.replace(b'"%~dp0..\python" "%~dp0hg" %*',
2659 2659 b'"%~dp0python" "%~dp0hg" %*')
2660 2660 f = open(hgbat, 'wb')
2661 2661 f.write(data)
2662 2662 f.close()
2663 2663 else:
2664 2664 print('WARNING: cannot fix hg.bat reference to python.exe')
2665 2665
2666 2666 if self.options.anycoverage:
2667 2667 custom = os.path.join(self._testdir, 'sitecustomize.py')
2668 2668 target = os.path.join(self._pythondir, 'sitecustomize.py')
2669 2669 vlog('# Installing coverage trigger to %s' % target)
2670 2670 shutil.copyfile(custom, target)
2671 2671 rc = os.path.join(self._testdir, '.coveragerc')
2672 2672 vlog('# Installing coverage rc to %s' % rc)
2673 2673 os.environ['COVERAGE_PROCESS_START'] = rc
2674 2674 covdir = os.path.join(self._installdir, '..', 'coverage')
2675 2675 try:
2676 2676 os.mkdir(covdir)
2677 2677 except OSError as e:
2678 2678 if e.errno != errno.EEXIST:
2679 2679 raise
2680 2680
2681 2681 os.environ['COVERAGE_DIR'] = covdir
2682 2682
2683 2683 def _checkhglib(self, verb):
2684 2684 """Ensure that the 'mercurial' package imported by python is
2685 2685 the one we expect it to be. If not, print a warning to stderr."""
2686 2686 if ((self._bindir == self._pythondir) and
2687 2687 (self._bindir != self._tmpbindir)):
2688 2688 # The pythondir has been inferred from --with-hg flag.
2689 2689 # We cannot expect anything sensible here.
2690 2690 return
2691 2691 expecthg = os.path.join(self._pythondir, b'mercurial')
2692 2692 actualhg = self._gethgpath()
2693 2693 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
2694 2694 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
2695 2695 ' (expected %s)\n'
2696 2696 % (verb, actualhg, expecthg))
2697 2697 def _gethgpath(self):
2698 2698 """Return the path to the mercurial package that is actually found by
2699 2699 the current Python interpreter."""
2700 2700 if self._hgpath is not None:
2701 2701 return self._hgpath
2702 2702
2703 2703 cmd = b'%s -c "import mercurial; print (mercurial.__path__[0])"'
2704 2704 cmd = cmd % PYTHON
2705 2705 if PYTHON3:
2706 2706 cmd = _strpath(cmd)
2707 2707 pipe = os.popen(cmd)
2708 2708 try:
2709 2709 self._hgpath = _bytespath(pipe.read().strip())
2710 2710 finally:
2711 2711 pipe.close()
2712 2712
2713 2713 return self._hgpath
2714 2714
2715 2715 def _installchg(self):
2716 2716 """Install chg into the test environment"""
2717 2717 vlog('# Performing temporary installation of CHG')
2718 2718 assert os.path.dirname(self._bindir) == self._installdir
2719 2719 assert self._hgroot, 'must be called after _installhg()'
2720 2720 cmd = (b'"%(make)s" clean install PREFIX="%(prefix)s"'
2721 2721 % {b'make': 'make', # TODO: switch by option or environment?
2722 2722 b'prefix': self._installdir})
2723 2723 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
2724 2724 vlog("# Running", cmd)
2725 2725 proc = subprocess.Popen(cmd, shell=True, cwd=cwd,
2726 2726 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
2727 2727 stderr=subprocess.STDOUT)
2728 2728 out, _err = proc.communicate()
2729 2729 if proc.returncode != 0:
2730 2730 if PYTHON3:
2731 2731 sys.stdout.buffer.write(out)
2732 2732 else:
2733 2733 sys.stdout.write(out)
2734 2734 sys.exit(1)
2735 2735
2736 2736 def _outputcoverage(self):
2737 2737 """Produce code coverage output."""
2738 2738 import coverage
2739 2739 coverage = coverage.coverage
2740 2740
2741 2741 vlog('# Producing coverage report')
2742 2742 # chdir is the easiest way to get short, relative paths in the
2743 2743 # output.
2744 2744 os.chdir(self._hgroot)
2745 2745 covdir = os.path.join(self._installdir, '..', 'coverage')
2746 2746 cov = coverage(data_file=os.path.join(covdir, 'cov'))
2747 2747
2748 2748 # Map install directory paths back to source directory.
2749 2749 cov.config.paths['srcdir'] = ['.', self._pythondir]
2750 2750
2751 2751 cov.combine()
2752 2752
2753 2753 omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]]
2754 2754 cov.report(ignore_errors=True, omit=omit)
2755 2755
2756 2756 if self.options.htmlcov:
2757 2757 htmldir = os.path.join(self._testdir, 'htmlcov')
2758 2758 cov.html_report(directory=htmldir, omit=omit)
2759 2759 if self.options.annotate:
2760 2760 adir = os.path.join(self._testdir, 'annotated')
2761 2761 if not os.path.isdir(adir):
2762 2762 os.mkdir(adir)
2763 2763 cov.annotate(directory=adir, omit=omit)
2764 2764
2765 2765 def _findprogram(self, program):
2766 2766 """Search PATH for a executable program"""
2767 2767 dpb = _bytespath(os.defpath)
2768 2768 sepb = _bytespath(os.pathsep)
2769 2769 for p in osenvironb.get(b'PATH', dpb).split(sepb):
2770 2770 name = os.path.join(p, program)
2771 2771 if os.name == 'nt' or os.access(name, os.X_OK):
2772 2772 return name
2773 2773 return None
2774 2774
2775 2775 def _checktools(self):
2776 2776 """Ensure tools required to run tests are present."""
2777 2777 for p in self.REQUIREDTOOLS:
2778 2778 if os.name == 'nt' and not p.endswith('.exe'):
2779 2779 p += '.exe'
2780 2780 found = self._findprogram(p)
2781 2781 if found:
2782 2782 vlog("# Found prerequisite", p, "at", found)
2783 2783 else:
2784 2784 print("WARNING: Did not find prerequisite tool: %s " %
2785 2785 p.decode("utf-8"))
2786 2786
2787 2787 if __name__ == '__main__':
2788 2788 runner = TestRunner()
2789 2789
2790 2790 try:
2791 2791 import msvcrt
2792 2792 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
2793 2793 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
2794 2794 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
2795 2795 except ImportError:
2796 2796 pass
2797 2797
2798 2798 sys.exit(runner.run(sys.argv[1:]))
@@ -1,1123 +1,1185 b''
1 1 This file tests the behavior of run-tests.py itself.
2 2
3 3 Avoid interference from actual test env:
4 4
5 5 $ . "$TESTDIR/helper-runtests.sh"
6 6
7 7 Smoke test with install
8 8 ============
9 9
10 10 $ run-tests.py $HGTEST_RUN_TESTS_PURE -l
11 11
12 12 # Ran 0 tests, 0 skipped, 0 warned, 0 failed.
13 13
14 14 Define a helper to avoid the install step
15 15 =============
16 16 $ rt()
17 17 > {
18 18 > run-tests.py --with-hg=`which hg` "$@"
19 19 > }
20 20
21 21 error paths
22 22
23 23 #if symlink
24 24 $ ln -s `which true` hg
25 25 $ run-tests.py --with-hg=./hg
26 26 warning: --with-hg should specify an hg script
27 27
28 28 # Ran 0 tests, 0 skipped, 0 warned, 0 failed.
29 29 $ rm hg
30 30 #endif
31 31
32 32 #if execbit
33 33 $ touch hg
34 34 $ run-tests.py --with-hg=./hg
35 35 Usage: run-tests.py [options] [tests]
36 36
37 37 run-tests.py: error: --with-hg must specify an executable hg script
38 38 [2]
39 39 $ rm hg
40 40 #endif
41 41
42 42 Features for testing optional lines
43 43 ===================================
44 44
45 45 $ cat > hghaveaddon.py <<EOF
46 46 > import hghave
47 47 > @hghave.check("custom", "custom hghave feature")
48 48 > def has_custom():
49 49 > return True
50 50 > @hghave.check("missing", "missing hghave feature")
51 51 > def has_missing():
52 52 > return False
53 53 > EOF
54 54
55 55 an empty test
56 56 =======================
57 57
58 58 $ touch test-empty.t
59 59 $ rt
60 60 .
61 61 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
62 62 $ rm test-empty.t
63 63
64 64 a succesful test
65 65 =======================
66 66
67 67 $ cat > test-success.t << EOF
68 68 > $ echo babar
69 69 > babar
70 70 > $ echo xyzzy
71 71 > dont_print (?)
72 72 > nothing[42]line (re) (?)
73 73 > never*happens (glob) (?)
74 74 > more_nothing (?)
75 75 > xyzzy
76 76 > nor this (?)
77 77 > $ printf 'abc\ndef\nxyz\n'
78 78 > 123 (?)
79 79 > abc
80 80 > def (?)
81 81 > 456 (?)
82 82 > xyz
83 83 > $ printf 'zyx\nwvu\ntsr\n'
84 84 > abc (?)
85 85 > zyx (custom !)
86 86 > wvu
87 87 > no_print (no-custom !)
88 88 > tsr (no-missing !)
89 89 > missing (missing !)
90 90 > EOF
91 91
92 92 $ rt
93 93 .
94 94 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
95 95
96 96 failing test
97 97 ==================
98 98
99 99 test churn with globs
100 100 $ cat > test-failure.t <<EOF
101 101 > $ echo "bar-baz"; echo "bar-bad"
102 102 > bar*bad (glob)
103 103 > bar*baz (glob)
104 104 > EOF
105 105 $ rt test-failure.t
106 106
107 107 --- $TESTTMP/test-failure.t
108 108 +++ $TESTTMP/test-failure.t.err
109 109 @@ -1,3 +1,3 @@
110 110 $ echo "bar-baz"; echo "bar-bad"
111 111 + bar*baz (glob)
112 112 bar*bad (glob)
113 113 - bar*baz (glob)
114 114
115 115 ERROR: test-failure.t output changed
116 116 !
117 117 Failed test-failure.t: output changed
118 118 # Ran 1 tests, 0 skipped, 0 warned, 1 failed.
119 119 python hash seed: * (glob)
120 120 [1]
121 121
122 122 basic failing test
123 123 $ cat > test-failure.t << EOF
124 124 > $ echo babar
125 125 > rataxes
126 126 > This is a noop statement so that
127 127 > this test is still more bytes than success.
128 128 > pad pad pad pad............................................................
129 129 > pad pad pad pad............................................................
130 130 > pad pad pad pad............................................................
131 131 > pad pad pad pad............................................................
132 132 > pad pad pad pad............................................................
133 133 > pad pad pad pad............................................................
134 134 > EOF
135 135
136 136 >>> fh = open('test-failure-unicode.t', 'wb')
137 137 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8')) and None
138 138 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8')) and None
139 139
140 140 $ rt
141 141
142 142 --- $TESTTMP/test-failure.t
143 143 +++ $TESTTMP/test-failure.t.err
144 144 @@ -1,5 +1,5 @@
145 145 $ echo babar
146 146 - rataxes
147 147 + babar
148 148 This is a noop statement so that
149 149 this test is still more bytes than success.
150 150 pad pad pad pad............................................................
151 151
152 152 ERROR: test-failure.t output changed
153 153 !.
154 154 --- $TESTTMP/test-failure-unicode.t
155 155 +++ $TESTTMP/test-failure-unicode.t.err
156 156 @@ -1,2 +1,2 @@
157 157 $ echo babar\xce\xb1 (esc)
158 158 - l\xce\xb5\xce\xb5t (esc)
159 159 + babar\xce\xb1 (esc)
160 160
161 161 ERROR: test-failure-unicode.t output changed
162 162 !
163 163 Failed test-failure.t: output changed
164 164 Failed test-failure-unicode.t: output changed
165 165 # Ran 3 tests, 0 skipped, 0 warned, 2 failed.
166 166 python hash seed: * (glob)
167 167 [1]
168 168
169 169 test --outputdir
170 170 $ mkdir output
171 171 $ rt --outputdir output
172 172
173 173 --- $TESTTMP/test-failure.t
174 174 +++ $TESTTMP/output/test-failure.t.err
175 175 @@ -1,5 +1,5 @@
176 176 $ echo babar
177 177 - rataxes
178 178 + babar
179 179 This is a noop statement so that
180 180 this test is still more bytes than success.
181 181 pad pad pad pad............................................................
182 182
183 183 ERROR: test-failure.t output changed
184 184 !.
185 185 --- $TESTTMP/test-failure-unicode.t
186 186 +++ $TESTTMP/output/test-failure-unicode.t.err
187 187 @@ -1,2 +1,2 @@
188 188 $ echo babar\xce\xb1 (esc)
189 189 - l\xce\xb5\xce\xb5t (esc)
190 190 + babar\xce\xb1 (esc)
191 191
192 192 ERROR: test-failure-unicode.t output changed
193 193 !
194 194 Failed test-failure.t: output changed
195 195 Failed test-failure-unicode.t: output changed
196 196 # Ran 3 tests, 0 skipped, 0 warned, 2 failed.
197 197 python hash seed: * (glob)
198 198 [1]
199 199 $ ls -a output
200 200 .
201 201 ..
202 202 .testtimes
203 203 test-failure-unicode.t.err
204 204 test-failure.t.err
205 205
206 206 test --xunit support
207 207 $ rt --xunit=xunit.xml
208 208
209 209 --- $TESTTMP/test-failure.t
210 210 +++ $TESTTMP/test-failure.t.err
211 211 @@ -1,5 +1,5 @@
212 212 $ echo babar
213 213 - rataxes
214 214 + babar
215 215 This is a noop statement so that
216 216 this test is still more bytes than success.
217 217 pad pad pad pad............................................................
218 218
219 219 ERROR: test-failure.t output changed
220 220 !.
221 221 --- $TESTTMP/test-failure-unicode.t
222 222 +++ $TESTTMP/test-failure-unicode.t.err
223 223 @@ -1,2 +1,2 @@
224 224 $ echo babar\xce\xb1 (esc)
225 225 - l\xce\xb5\xce\xb5t (esc)
226 226 + babar\xce\xb1 (esc)
227 227
228 228 ERROR: test-failure-unicode.t output changed
229 229 !
230 230 Failed test-failure.t: output changed
231 231 Failed test-failure-unicode.t: output changed
232 232 # Ran 3 tests, 0 skipped, 0 warned, 2 failed.
233 233 python hash seed: * (glob)
234 234 [1]
235 235 $ cat xunit.xml
236 236 <?xml version="1.0" encoding="utf-8"?>
237 237 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
238 238 <testcase name="test-success.t" time="*"/> (glob)
239 239 <testcase name="test-failure-unicode.t" time="*"> (glob)
240 240 <failure message="output changed" type="output-mismatch">
241 241 <![CDATA[--- $TESTTMP/test-failure-unicode.t
242 242 +++ $TESTTMP/test-failure-unicode.t.err
243 243 @@ -1,2 +1,2 @@
244 244 $ echo babar\xce\xb1 (esc)
245 245 - l\xce\xb5\xce\xb5t (esc)
246 246 + babar\xce\xb1 (esc)
247 247 ]]> </failure>
248 248 </testcase>
249 249 <testcase name="test-failure.t" time="*"> (glob)
250 250 <failure message="output changed" type="output-mismatch">
251 251 <![CDATA[--- $TESTTMP/test-failure.t
252 252 +++ $TESTTMP/test-failure.t.err
253 253 @@ -1,5 +1,5 @@
254 254 $ echo babar
255 255 - rataxes
256 256 + babar
257 257 This is a noop statement so that
258 258 this test is still more bytes than success.
259 259 pad pad pad pad............................................................
260 260 ]]> </failure>
261 261 </testcase>
262 262 </testsuite>
263 263
264 264 $ cat .testtimes
265 265 test-failure-unicode.t * (glob)
266 266 test-failure.t * (glob)
267 267 test-success.t * (glob)
268 268
269 269 $ rt --list-tests
270 270 test-failure-unicode.t
271 271 test-failure.t
272 272 test-success.t
273 273
274 274 $ rt --list-tests --json
275 275 test-failure-unicode.t
276 276 test-failure.t
277 277 test-success.t
278 278 $ cat report.json
279 279 testreport ={
280 280 "test-failure-unicode.t": {
281 281 "result": "success"
282 282 },
283 283 "test-failure.t": {
284 284 "result": "success"
285 285 },
286 286 "test-success.t": {
287 287 "result": "success"
288 288 }
289 289 } (no-eol)
290 290
291 291 $ rt --list-tests --xunit=xunit.xml
292 292 test-failure-unicode.t
293 293 test-failure.t
294 294 test-success.t
295 295 $ cat xunit.xml
296 296 <?xml version="1.0" encoding="utf-8"?>
297 297 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
298 298 <testcase name="test-failure-unicode.t"/>
299 299 <testcase name="test-failure.t"/>
300 300 <testcase name="test-success.t"/>
301 301 </testsuite>
302 302
303 $ rt --list-tests test-failure* --json --xunit=xunit.xml
303 $ rt --list-tests test-failure* --json --xunit=xunit.xml --outputdir output
304 304 test-failure-unicode.t
305 305 test-failure.t
306 $ cat report.json
306 $ cat output/report.json
307 307 testreport ={
308 308 "test-failure-unicode.t": {
309 309 "result": "success"
310 310 },
311 311 "test-failure.t": {
312 312 "result": "success"
313 313 }
314 314 } (no-eol)
315 315 $ cat xunit.xml
316 316 <?xml version="1.0" encoding="utf-8"?>
317 317 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
318 318 <testcase name="test-failure-unicode.t"/>
319 319 <testcase name="test-failure.t"/>
320 320 </testsuite>
321 321
322 322 $ rm test-failure-unicode.t
323 323
324 324 test for --retest
325 325 ====================
326 326
327 327 $ rt --retest
328 328
329 329 --- $TESTTMP/test-failure.t
330 330 +++ $TESTTMP/test-failure.t.err
331 331 @@ -1,5 +1,5 @@
332 332 $ echo babar
333 333 - rataxes
334 334 + babar
335 335 This is a noop statement so that
336 336 this test is still more bytes than success.
337 337 pad pad pad pad............................................................
338 338
339 339 ERROR: test-failure.t output changed
340 340 !
341 341 Failed test-failure.t: output changed
342 342 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
343 343 python hash seed: * (glob)
344 344 [1]
345 345
346 346 --retest works with --outputdir
347 347 $ rm -r output
348 348 $ mkdir output
349 349 $ mv test-failure.t.err output
350 350 $ rt --retest --outputdir output
351 351
352 352 --- $TESTTMP/test-failure.t
353 353 +++ $TESTTMP/output/test-failure.t.err
354 354 @@ -1,5 +1,5 @@
355 355 $ echo babar
356 356 - rataxes
357 357 + babar
358 358 This is a noop statement so that
359 359 this test is still more bytes than success.
360 360 pad pad pad pad............................................................
361 361
362 362 ERROR: test-failure.t output changed
363 363 !
364 364 Failed test-failure.t: output changed
365 365 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
366 366 python hash seed: * (glob)
367 367 [1]
368 368
369 369 Selecting Tests To Run
370 370 ======================
371 371
372 372 successful
373 373
374 374 $ rt test-success.t
375 375 .
376 376 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
377 377
378 378 success w/ keyword
379 379 $ rt -k xyzzy
380 380 .
381 381 # Ran 2 tests, 1 skipped, 0 warned, 0 failed.
382 382
383 383 failed
384 384
385 385 $ rt test-failure.t
386 386
387 387 --- $TESTTMP/test-failure.t
388 388 +++ $TESTTMP/test-failure.t.err
389 389 @@ -1,5 +1,5 @@
390 390 $ echo babar
391 391 - rataxes
392 392 + babar
393 393 This is a noop statement so that
394 394 this test is still more bytes than success.
395 395 pad pad pad pad............................................................
396 396
397 397 ERROR: test-failure.t output changed
398 398 !
399 399 Failed test-failure.t: output changed
400 400 # Ran 1 tests, 0 skipped, 0 warned, 1 failed.
401 401 python hash seed: * (glob)
402 402 [1]
403 403
404 404 failure w/ keyword
405 405 $ rt -k rataxes
406 406
407 407 --- $TESTTMP/test-failure.t
408 408 +++ $TESTTMP/test-failure.t.err
409 409 @@ -1,5 +1,5 @@
410 410 $ echo babar
411 411 - rataxes
412 412 + babar
413 413 This is a noop statement so that
414 414 this test is still more bytes than success.
415 415 pad pad pad pad............................................................
416 416
417 417 ERROR: test-failure.t output changed
418 418 !
419 419 Failed test-failure.t: output changed
420 420 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
421 421 python hash seed: * (glob)
422 422 [1]
423 423
424 424 Verify that when a process fails to start we show a useful message
425 425 ==================================================================
426 426
427 427 $ cat > test-serve-fail.t <<EOF
428 428 > $ echo 'abort: child process failed to start blah'
429 429 > EOF
430 430 $ rt test-serve-fail.t
431 431
432 432 ERROR: test-serve-fail.t output changed
433 433 !
434 434 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
435 435 # Ran 1 tests, 0 skipped, 0 warned, 1 failed.
436 436 python hash seed: * (glob)
437 437 [1]
438 438 $ rm test-serve-fail.t
439 439
440 440 Verify that we can try other ports
441 441 ===================================
442 442 $ hg init inuse
443 443 $ hg serve -R inuse -p $HGPORT -d --pid-file=blocks.pid
444 444 $ cat blocks.pid >> $DAEMON_PIDS
445 445 $ cat > test-serve-inuse.t <<EOF
446 446 > $ hg serve -R `pwd`/inuse -p \$HGPORT -d --pid-file=hg.pid
447 447 > $ cat hg.pid >> \$DAEMON_PIDS
448 448 > EOF
449 449 $ rt test-serve-inuse.t
450 450 .
451 451 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
452 452 $ rm test-serve-inuse.t
453 453 $ killdaemons.py $DAEMON_PIDS
454 454 $ rm $DAEMON_PIDS
455 455
456 456 Running In Debug Mode
457 457 ======================
458 458
459 459 $ rt --debug 2>&1 | grep -v pwd
460 460 + echo *SALT* 0 0 (glob)
461 461 *SALT* 0 0 (glob)
462 462 + echo babar
463 463 babar
464 464 + echo *SALT* 10 0 (glob)
465 465 *SALT* 10 0 (glob)
466 466 *+ echo *SALT* 0 0 (glob)
467 467 *SALT* 0 0 (glob)
468 468 + echo babar
469 469 babar
470 470 + echo *SALT* 2 0 (glob)
471 471 *SALT* 2 0 (glob)
472 472 + echo xyzzy
473 473 xyzzy
474 474 + echo *SALT* 9 0 (glob)
475 475 *SALT* 9 0 (glob)
476 476 + printf *abc\ndef\nxyz\n* (glob)
477 477 abc
478 478 def
479 479 xyz
480 480 + echo *SALT* 15 0 (glob)
481 481 *SALT* 15 0 (glob)
482 482 + printf *zyx\nwvu\ntsr\n* (glob)
483 483 zyx
484 484 wvu
485 485 tsr
486 486 + echo *SALT* 22 0 (glob)
487 487 *SALT* 22 0 (glob)
488 488 .
489 489 # Ran 2 tests, 0 skipped, 0 warned, 0 failed.
490 490
491 491 Parallel runs
492 492 ==============
493 493
494 494 (duplicate the failing test to get predictable output)
495 495 $ cp test-failure.t test-failure-copy.t
496 496
497 497 $ rt --jobs 2 test-failure*.t -n
498 498 !!
499 499 Failed test-failure*.t: output changed (glob)
500 500 Failed test-failure*.t: output changed (glob)
501 501 # Ran 2 tests, 0 skipped, 0 warned, 2 failed.
502 502 python hash seed: * (glob)
503 503 [1]
504 504
505 505 failures in parallel with --first should only print one failure
506 506 >>> f = open('test-nothing.t', 'w')
507 507 >>> f.write('foo\n' * 1024) and None
508 508 >>> f.write(' $ sleep 1') and None
509 509 $ rt --jobs 2 --first
510 510
511 511 --- $TESTTMP/test-failure*.t (glob)
512 512 +++ $TESTTMP/test-failure*.t.err (glob)
513 513 @@ -1,5 +1,5 @@
514 514 $ echo babar
515 515 - rataxes
516 516 + babar
517 517 This is a noop statement so that
518 518 this test is still more bytes than success.
519 519 pad pad pad pad............................................................
520 520
521 521 Failed test-failure*.t: output changed (glob)
522 522 Failed test-nothing.t: output changed
523 523 # Ran 2 tests, 0 skipped, 0 warned, 2 failed.
524 524 python hash seed: * (glob)
525 525 [1]
526 526
527 527
528 528 (delete the duplicated test file)
529 529 $ rm test-failure-copy.t test-nothing.t
530 530
531 531
532 532 Interactive run
533 533 ===============
534 534
535 535 (backup the failing test)
536 536 $ cp test-failure.t backup
537 537
538 538 Refuse the fix
539 539
540 540 $ echo 'n' | rt -i
541 541
542 542 --- $TESTTMP/test-failure.t
543 543 +++ $TESTTMP/test-failure.t.err
544 544 @@ -1,5 +1,5 @@
545 545 $ echo babar
546 546 - rataxes
547 547 + babar
548 548 This is a noop statement so that
549 549 this test is still more bytes than success.
550 550 pad pad pad pad............................................................
551 551 Accept this change? [n]
552 552 ERROR: test-failure.t output changed
553 553 !.
554 554 Failed test-failure.t: output changed
555 555 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
556 556 python hash seed: * (glob)
557 557 [1]
558 558
559 559 $ cat test-failure.t
560 560 $ echo babar
561 561 rataxes
562 562 This is a noop statement so that
563 563 this test is still more bytes than success.
564 564 pad pad pad pad............................................................
565 565 pad pad pad pad............................................................
566 566 pad pad pad pad............................................................
567 567 pad pad pad pad............................................................
568 568 pad pad pad pad............................................................
569 569 pad pad pad pad............................................................
570 570
571 571 Interactive with custom view
572 572
573 573 $ echo 'n' | rt -i --view echo
574 574 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err (glob)
575 575 Accept this change? [n]* (glob)
576 576 ERROR: test-failure.t output changed
577 577 !.
578 578 Failed test-failure.t: output changed
579 579 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
580 580 python hash seed: * (glob)
581 581 [1]
582 582
583 583 View the fix
584 584
585 585 $ echo 'y' | rt --view echo
586 586 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err (glob)
587 587
588 588 ERROR: test-failure.t output changed
589 589 !.
590 590 Failed test-failure.t: output changed
591 591 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
592 592 python hash seed: * (glob)
593 593 [1]
594 594
595 595 Accept the fix
596 596
597 597 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
598 598 $ echo " saved backup bundle to \$TESTTMP/foo.hg" >> test-failure.t
599 599 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
600 600 $ echo " saved backup bundle to \$TESTTMP/foo.hg (glob)" >> test-failure.t
601 601 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
602 602 $ echo " saved backup bundle to \$TESTTMP/*.hg (glob)" >> test-failure.t
603 603 $ echo 'y' | rt -i 2>&1
604 604
605 605 --- $TESTTMP/test-failure.t
606 606 +++ $TESTTMP/test-failure.t.err
607 607 @@ -1,5 +1,5 @@
608 608 $ echo babar
609 609 - rataxes
610 610 + babar
611 611 This is a noop statement so that
612 612 this test is still more bytes than success.
613 613 pad pad pad pad............................................................
614 614 @@ -9,7 +9,7 @@
615 615 pad pad pad pad............................................................
616 616 pad pad pad pad............................................................
617 617 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
618 618 - saved backup bundle to $TESTTMP/foo.hg
619 619 + saved backup bundle to $TESTTMP/foo.hg* (glob)
620 620 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
621 621 saved backup bundle to $TESTTMP/foo.hg* (glob)
622 622 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
623 623 Accept this change? [n] ..
624 624 # Ran 2 tests, 0 skipped, 0 warned, 0 failed.
625 625
626 626 $ sed -e 's,(glob)$,&<,g' test-failure.t
627 627 $ echo babar
628 628 babar
629 629 This is a noop statement so that
630 630 this test is still more bytes than success.
631 631 pad pad pad pad............................................................
632 632 pad pad pad pad............................................................
633 633 pad pad pad pad............................................................
634 634 pad pad pad pad............................................................
635 635 pad pad pad pad............................................................
636 636 pad pad pad pad............................................................
637 637 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
638 638 saved backup bundle to $TESTTMP/foo.hg (glob)<
639 639 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
640 640 saved backup bundle to $TESTTMP/foo.hg (glob)<
641 641 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
642 642 saved backup bundle to $TESTTMP/*.hg (glob)<
643 643
644 644 (reinstall)
645 645 $ mv backup test-failure.t
646 646
647 647 No Diff
648 648 ===============
649 649
650 650 $ rt --nodiff
651 651 !.
652 652 Failed test-failure.t: output changed
653 653 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
654 654 python hash seed: * (glob)
655 655 [1]
656 656
657 657 test --tmpdir support
658 658 $ rt --tmpdir=$TESTTMP/keep test-success.t
659 659
660 660 Keeping testtmp dir: $TESTTMP/keep/child1/test-success.t (glob)
661 661 Keeping threadtmp dir: $TESTTMP/keep/child1 (glob)
662 662 .
663 663 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
664 664
665 665 timeouts
666 666 ========
667 667 $ cat > test-timeout.t <<EOF
668 668 > $ sleep 2
669 669 > $ echo pass
670 670 > pass
671 671 > EOF
672 672 > echo '#require slow' > test-slow-timeout.t
673 673 > cat test-timeout.t >> test-slow-timeout.t
674 674 $ rt --timeout=1 --slowtimeout=3 test-timeout.t test-slow-timeout.t
675 675 st
676 676 Skipped test-slow-timeout.t: missing feature: allow slow tests (use --allow-slow-tests)
677 677 Failed test-timeout.t: timed out
678 678 # Ran 1 tests, 1 skipped, 0 warned, 1 failed.
679 679 python hash seed: * (glob)
680 680 [1]
681 681 $ rt --timeout=1 --slowtimeout=3 \
682 682 > test-timeout.t test-slow-timeout.t --allow-slow-tests
683 683 .t
684 684 Failed test-timeout.t: timed out
685 685 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
686 686 python hash seed: * (glob)
687 687 [1]
688 688 $ rm test-timeout.t test-slow-timeout.t
689 689
690 690 test for --time
691 691 ==================
692 692
693 693 $ rt test-success.t --time
694 694 .
695 695 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
696 696 # Producing time report
697 697 start end cuser csys real Test
698 698 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
699 699
700 700 test for --time with --job enabled
701 701 ====================================
702 702
703 703 $ rt test-success.t --time --jobs 2
704 704 .
705 705 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
706 706 # Producing time report
707 707 start end cuser csys real Test
708 708 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
709 709
710 710 Skips
711 711 ================
712 712 $ cat > test-skip.t <<EOF
713 713 > $ echo xyzzy
714 714 > #require false
715 715 > EOF
716 716 $ rt --nodiff
717 717 !.s
718 718 Skipped test-skip.t: missing feature: nail clipper
719 719 Failed test-failure.t: output changed
720 720 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
721 721 python hash seed: * (glob)
722 722 [1]
723 723
724 724 $ rt --keyword xyzzy
725 725 .s
726 726 Skipped test-skip.t: missing feature: nail clipper
727 727 # Ran 2 tests, 2 skipped, 0 warned, 0 failed.
728 728
729 729 Skips with xml
730 730 $ rt --keyword xyzzy \
731 731 > --xunit=xunit.xml
732 732 .s
733 733 Skipped test-skip.t: missing feature: nail clipper
734 734 # Ran 2 tests, 2 skipped, 0 warned, 0 failed.
735 735 $ cat xunit.xml
736 736 <?xml version="1.0" encoding="utf-8"?>
737 737 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
738 738 <testcase name="test-success.t" time="*"/> (glob)
739 739 <testcase name="test-skip.t">
740 740 <skipped>
741 741 <![CDATA[missing feature: nail clipper]]> </skipped>
742 742 </testcase>
743 743 </testsuite>
744 744
745 745 Missing skips or blacklisted skips don't count as executed:
746 746 $ echo test-failure.t > blacklist
747 747 $ rt --blacklist=blacklist --json\
748 748 > test-failure.t test-bogus.t
749 749 ss
750 750 Skipped test-bogus.t: Doesn't exist
751 751 Skipped test-failure.t: blacklisted
752 752 # Ran 0 tests, 2 skipped, 0 warned, 0 failed.
753 753 $ cat report.json
754 754 testreport ={
755 755 "test-bogus.t": {
756 756 "result": "skip"
757 757 },
758 758 "test-failure.t": {
759 759 "result": "skip"
760 760 }
761 761 } (no-eol)
762 762
763 763 Whitelist trumps blacklist
764 764 $ echo test-failure.t > whitelist
765 765 $ rt --blacklist=blacklist --whitelist=whitelist --json\
766 766 > test-failure.t test-bogus.t
767 767 s
768 768 --- $TESTTMP/test-failure.t
769 769 +++ $TESTTMP/test-failure.t.err
770 770 @@ -1,5 +1,5 @@
771 771 $ echo babar
772 772 - rataxes
773 773 + babar
774 774 This is a noop statement so that
775 775 this test is still more bytes than success.
776 776 pad pad pad pad............................................................
777 777
778 778 ERROR: test-failure.t output changed
779 779 !
780 780 Skipped test-bogus.t: Doesn't exist
781 781 Failed test-failure.t: output changed
782 782 # Ran 1 tests, 1 skipped, 0 warned, 1 failed.
783 783 python hash seed: * (glob)
784 784 [1]
785 785
786 786 test for --json
787 787 ==================
788 788
789 789 $ rt --json
790 790
791 791 --- $TESTTMP/test-failure.t
792 792 +++ $TESTTMP/test-failure.t.err
793 793 @@ -1,5 +1,5 @@
794 794 $ echo babar
795 795 - rataxes
796 796 + babar
797 797 This is a noop statement so that
798 798 this test is still more bytes than success.
799 799 pad pad pad pad............................................................
800 800
801 801 ERROR: test-failure.t output changed
802 802 !.s
803 803 Skipped test-skip.t: missing feature: nail clipper
804 804 Failed test-failure.t: output changed
805 805 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
806 806 python hash seed: * (glob)
807 807 [1]
808 808
809 809 $ cat report.json
810 810 testreport ={
811 811 "test-failure.t": [\{] (re)
812 812 "csys": "\s*[\d\.]{4,5}", ? (re)
813 813 "cuser": "\s*[\d\.]{4,5}", ? (re)
814 814 "diff": "---.+\+\+\+.+", ? (re)
815 815 "end": "\s*[\d\.]{4,5}", ? (re)
816 816 "result": "failure", ? (re)
817 817 "start": "\s*[\d\.]{4,5}", ? (re)
818 818 "time": "\s*[\d\.]{4,5}" (re)
819 819 }, ? (re)
820 820 "test-skip.t": {
821 821 "csys": "\s*[\d\.]{4,5}", ? (re)
822 822 "cuser": "\s*[\d\.]{4,5}", ? (re)
823 823 "diff": "", ? (re)
824 824 "end": "\s*[\d\.]{4,5}", ? (re)
825 825 "result": "skip", ? (re)
826 826 "start": "\s*[\d\.]{4,5}", ? (re)
827 827 "time": "\s*[\d\.]{4,5}" (re)
828 828 }, ? (re)
829 829 "test-success.t": [\{] (re)
830 830 "csys": "\s*[\d\.]{4,5}", ? (re)
831 831 "cuser": "\s*[\d\.]{4,5}", ? (re)
832 832 "diff": "", ? (re)
833 833 "end": "\s*[\d\.]{4,5}", ? (re)
834 834 "result": "success", ? (re)
835 835 "start": "\s*[\d\.]{4,5}", ? (re)
836 836 "time": "\s*[\d\.]{4,5}" (re)
837 837 }
838 838 } (no-eol)
839 --json with --outputdir
840
841 $ rm report.json
842 $ rm -r output
843 $ mkdir output
844 $ rt --json --outputdir output
845
846 --- $TESTTMP/test-failure.t
847 +++ $TESTTMP/output/test-failure.t.err
848 @@ -1,5 +1,5 @@
849 $ echo babar
850 - rataxes
851 + babar
852 This is a noop statement so that
853 this test is still more bytes than success.
854 pad pad pad pad............................................................
855
856 ERROR: test-failure.t output changed
857 !.s
858 Skipped test-skip.t: missing feature: nail clipper
859 Failed test-failure.t: output changed
860 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
861 python hash seed: * (glob)
862 [1]
863 $ f report.json
864 report.json: file not found
865 $ cat output/report.json
866 testreport ={
867 "test-failure.t": [\{] (re)
868 "csys": "\s*[\d\.]{4,5}", ? (re)
869 "cuser": "\s*[\d\.]{4,5}", ? (re)
870 "diff": "---.+\+\+\+.+", ? (re)
871 "end": "\s*[\d\.]{4,5}", ? (re)
872 "result": "failure", ? (re)
873 "start": "\s*[\d\.]{4,5}", ? (re)
874 "time": "\s*[\d\.]{4,5}" (re)
875 }, ? (re)
876 "test-skip.t": {
877 "csys": "\s*[\d\.]{4,5}", ? (re)
878 "cuser": "\s*[\d\.]{4,5}", ? (re)
879 "diff": "", ? (re)
880 "end": "\s*[\d\.]{4,5}", ? (re)
881 "result": "skip", ? (re)
882 "start": "\s*[\d\.]{4,5}", ? (re)
883 "time": "\s*[\d\.]{4,5}" (re)
884 }, ? (re)
885 "test-success.t": [\{] (re)
886 "csys": "\s*[\d\.]{4,5}", ? (re)
887 "cuser": "\s*[\d\.]{4,5}", ? (re)
888 "diff": "", ? (re)
889 "end": "\s*[\d\.]{4,5}", ? (re)
890 "result": "success", ? (re)
891 "start": "\s*[\d\.]{4,5}", ? (re)
892 "time": "\s*[\d\.]{4,5}" (re)
893 }
894 } (no-eol)
895 $ ls -a output
896 .
897 ..
898 .testtimes
899 report.json
900 test-failure.t.err
839 901
840 902 Test that failed test accepted through interactive are properly reported:
841 903
842 904 $ cp test-failure.t backup
843 905 $ echo y | rt --json -i
844 906
845 907 --- $TESTTMP/test-failure.t
846 908 +++ $TESTTMP/test-failure.t.err
847 909 @@ -1,5 +1,5 @@
848 910 $ echo babar
849 911 - rataxes
850 912 + babar
851 913 This is a noop statement so that
852 914 this test is still more bytes than success.
853 915 pad pad pad pad............................................................
854 916 Accept this change? [n] ..s
855 917 Skipped test-skip.t: missing feature: nail clipper
856 918 # Ran 2 tests, 1 skipped, 0 warned, 0 failed.
857 919
858 920 $ cat report.json
859 921 testreport ={
860 922 "test-failure.t": [\{] (re)
861 923 "csys": "\s*[\d\.]{4,5}", ? (re)
862 924 "cuser": "\s*[\d\.]{4,5}", ? (re)
863 925 "diff": "", ? (re)
864 926 "end": "\s*[\d\.]{4,5}", ? (re)
865 927 "result": "success", ? (re)
866 928 "start": "\s*[\d\.]{4,5}", ? (re)
867 929 "time": "\s*[\d\.]{4,5}" (re)
868 930 }, ? (re)
869 931 "test-skip.t": {
870 932 "csys": "\s*[\d\.]{4,5}", ? (re)
871 933 "cuser": "\s*[\d\.]{4,5}", ? (re)
872 934 "diff": "", ? (re)
873 935 "end": "\s*[\d\.]{4,5}", ? (re)
874 936 "result": "skip", ? (re)
875 937 "start": "\s*[\d\.]{4,5}", ? (re)
876 938 "time": "\s*[\d\.]{4,5}" (re)
877 939 }, ? (re)
878 940 "test-success.t": [\{] (re)
879 941 "csys": "\s*[\d\.]{4,5}", ? (re)
880 942 "cuser": "\s*[\d\.]{4,5}", ? (re)
881 943 "diff": "", ? (re)
882 944 "end": "\s*[\d\.]{4,5}", ? (re)
883 945 "result": "success", ? (re)
884 946 "start": "\s*[\d\.]{4,5}", ? (re)
885 947 "time": "\s*[\d\.]{4,5}" (re)
886 948 }
887 949 } (no-eol)
888 950 $ mv backup test-failure.t
889 951
890 952 backslash on end of line with glob matching is handled properly
891 953
892 954 $ cat > test-glob-backslash.t << EOF
893 955 > $ echo 'foo bar \\'
894 956 > foo * \ (glob)
895 957 > EOF
896 958
897 959 $ rt test-glob-backslash.t
898 960 .
899 961 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
900 962
901 963 $ rm -f test-glob-backslash.t
902 964
903 965 Test globbing of local IP addresses
904 966 $ echo 172.16.18.1
905 967 $LOCALIP (glob)
906 968 $ echo dead:beef::1
907 969 $LOCALIP (glob)
908 970
909 971 Test reusability for third party tools
910 972 ======================================
911 973
912 974 $ mkdir "$TESTTMP"/anothertests
913 975 $ cd "$TESTTMP"/anothertests
914 976
915 977 test that `run-tests.py` can execute hghave, even if it runs not in
916 978 Mercurial source tree.
917 979
918 980 $ cat > test-hghave.t <<EOF
919 981 > #require true
920 982 > $ echo foo
921 983 > foo
922 984 > EOF
923 985 $ rt test-hghave.t
924 986 .
925 987 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
926 988
927 989 test that RUNTESTDIR refers the directory, in which `run-tests.py` now
928 990 running is placed.
929 991
930 992 $ cat > test-runtestdir.t <<EOF
931 993 > - $TESTDIR, in which test-run-tests.t is placed
932 994 > - \$TESTDIR, in which test-runtestdir.t is placed (expanded at runtime)
933 995 > - \$RUNTESTDIR, in which run-tests.py is placed (expanded at runtime)
934 996 >
935 997 > #if windows
936 998 > $ test "\$TESTDIR" = "$TESTTMP\anothertests"
937 999 > #else
938 1000 > $ test "\$TESTDIR" = "$TESTTMP"/anothertests
939 1001 > #endif
940 1002 > $ test "\$RUNTESTDIR" = "$TESTDIR"
941 1003 > $ head -n 3 "\$RUNTESTDIR"/../contrib/check-code.py
942 1004 > #!/usr/bin/env python
943 1005 > #
944 1006 > # check-code - a style and portability checker for Mercurial
945 1007 > EOF
946 1008 $ rt test-runtestdir.t
947 1009 .
948 1010 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
949 1011
950 1012 #if execbit
951 1013
952 1014 test that TESTDIR is referred in PATH
953 1015
954 1016 $ cat > custom-command.sh <<EOF
955 1017 > #!/bin/sh
956 1018 > echo "hello world"
957 1019 > EOF
958 1020 $ chmod +x custom-command.sh
959 1021 $ cat > test-testdir-path.t <<EOF
960 1022 > $ custom-command.sh
961 1023 > hello world
962 1024 > EOF
963 1025 $ rt test-testdir-path.t
964 1026 .
965 1027 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
966 1028
967 1029 #endif
968 1030
969 1031 test support for --allow-slow-tests
970 1032 $ cat > test-very-slow-test.t <<EOF
971 1033 > #require slow
972 1034 > $ echo pass
973 1035 > pass
974 1036 > EOF
975 1037 $ rt test-very-slow-test.t
976 1038 s
977 1039 Skipped test-very-slow-test.t: missing feature: allow slow tests (use --allow-slow-tests)
978 1040 # Ran 0 tests, 1 skipped, 0 warned, 0 failed.
979 1041 $ rt $HGTEST_RUN_TESTS_PURE --allow-slow-tests test-very-slow-test.t
980 1042 .
981 1043 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
982 1044
983 1045 support for running a test outside the current directory
984 1046 $ mkdir nonlocal
985 1047 $ cat > nonlocal/test-is-not-here.t << EOF
986 1048 > $ echo pass
987 1049 > pass
988 1050 > EOF
989 1051 $ rt nonlocal/test-is-not-here.t
990 1052 .
991 1053 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
992 1054
993 1055 support for bisecting failed tests automatically
994 1056 $ hg init bisect
995 1057 $ cd bisect
996 1058 $ cat >> test-bisect.t <<EOF
997 1059 > $ echo pass
998 1060 > pass
999 1061 > EOF
1000 1062 $ hg add test-bisect.t
1001 1063 $ hg ci -m 'good'
1002 1064 $ cat >> test-bisect.t <<EOF
1003 1065 > $ echo pass
1004 1066 > fail
1005 1067 > EOF
1006 1068 $ hg ci -m 'bad'
1007 1069 $ rt --known-good-rev=0 test-bisect.t
1008 1070
1009 1071 --- $TESTTMP/anothertests/bisect/test-bisect.t
1010 1072 +++ $TESTTMP/anothertests/bisect/test-bisect.t.err
1011 1073 @@ -1,4 +1,4 @@
1012 1074 $ echo pass
1013 1075 pass
1014 1076 $ echo pass
1015 1077 - fail
1016 1078 + pass
1017 1079
1018 1080 ERROR: test-bisect.t output changed
1019 1081 !
1020 1082 Failed test-bisect.t: output changed
1021 1083 test-bisect.t broken by 72cbf122d116 (bad)
1022 1084 # Ran 1 tests, 0 skipped, 0 warned, 1 failed.
1023 1085 python hash seed: * (glob)
1024 1086 [1]
1025 1087
1026 1088 $ cd ..
1027 1089
1028 1090 Test a broken #if statement doesn't break run-tests threading.
1029 1091 ==============================================================
1030 1092 $ mkdir broken
1031 1093 $ cd broken
1032 1094 $ cat > test-broken.t <<EOF
1033 1095 > true
1034 1096 > #if notarealhghavefeature
1035 1097 > $ false
1036 1098 > #endif
1037 1099 > EOF
1038 1100 $ for f in 1 2 3 4 ; do
1039 1101 > cat > test-works-$f.t <<EOF
1040 1102 > This is test case $f
1041 1103 > $ sleep 1
1042 1104 > EOF
1043 1105 > done
1044 1106 $ rt -j 2
1045 1107 ....
1046 1108 # Ran 5 tests, 0 skipped, 0 warned, 0 failed.
1047 1109 skipped: unknown feature: notarealhghavefeature
1048 1110
1049 1111 $ cd ..
1050 1112 $ rm -rf broken
1051 1113
1052 1114 Test cases in .t files
1053 1115 ======================
1054 1116 $ mkdir cases
1055 1117 $ cd cases
1056 1118 $ cat > test-cases-abc.t <<'EOF'
1057 1119 > #testcases A B C
1058 1120 > $ V=B
1059 1121 > #if A
1060 1122 > $ V=A
1061 1123 > #endif
1062 1124 > #if C
1063 1125 > $ V=C
1064 1126 > #endif
1065 1127 > $ echo $V | sed 's/A/C/'
1066 1128 > C
1067 1129 > #if C
1068 1130 > $ [ $V = C ]
1069 1131 > #endif
1070 1132 > #if A
1071 1133 > $ [ $V = C ]
1072 1134 > [1]
1073 1135 > #endif
1074 1136 > #if no-C
1075 1137 > $ [ $V = C ]
1076 1138 > [1]
1077 1139 > #endif
1078 1140 > $ [ $V = D ]
1079 1141 > [1]
1080 1142 > EOF
1081 1143 $ rt
1082 1144 .
1083 1145 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1084 1146 +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
1085 1147 @@ -7,7 +7,7 @@
1086 1148 $ V=C
1087 1149 #endif
1088 1150 $ echo $V | sed 's/A/C/'
1089 1151 - C
1090 1152 + B
1091 1153 #if C
1092 1154 $ [ $V = C ]
1093 1155 #endif
1094 1156
1095 1157 ERROR: test-cases-abc.t (case B) output changed
1096 1158 !.
1097 1159 Failed test-cases-abc.t (case B): output changed
1098 1160 # Ran 3 tests, 0 skipped, 0 warned, 1 failed.
1099 1161 python hash seed: * (glob)
1100 1162 [1]
1101 1163
1102 1164 --restart works
1103 1165
1104 1166 $ rt --restart
1105 1167
1106 1168 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1107 1169 +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
1108 1170 @@ -7,7 +7,7 @@
1109 1171 $ V=C
1110 1172 #endif
1111 1173 $ echo $V | sed 's/A/C/'
1112 1174 - C
1113 1175 + B
1114 1176 #if C
1115 1177 $ [ $V = C ]
1116 1178 #endif
1117 1179
1118 1180 ERROR: test-cases-abc.t (case B) output changed
1119 1181 !.
1120 1182 Failed test-cases-abc.t (case B): output changed
1121 1183 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
1122 1184 python hash seed: * (glob)
1123 1185 [1]
General Comments 0
You need to be logged in to leave comments. Login now