##// END OF EJS Templates
run-tests: fix -i when "#testcases" is used in .t test...
Jun Wu -
r32982:573baab2 default
parent child Browse files
Show More
@@ -1,2769 +1,2769 b''
1 1 #!/usr/bin/env python
2 2 #
3 3 # run-tests.py - Run a set of tests on Mercurial
4 4 #
5 5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 # Modifying this script is tricky because it has many modes:
11 11 # - serial (default) vs parallel (-jN, N > 1)
12 12 # - no coverage (default) vs coverage (-c, -C, -s)
13 13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 14 # - tests are a mix of shell scripts and Python scripts
15 15 #
16 16 # If you change this script, it is recommended that you ensure you
17 17 # haven't broken it by running it in various modes with a representative
18 18 # sample of test scripts. For example:
19 19 #
20 20 # 1) serial, no coverage, temp install:
21 21 # ./run-tests.py test-s*
22 22 # 2) serial, no coverage, local hg:
23 23 # ./run-tests.py --local test-s*
24 24 # 3) serial, coverage, temp install:
25 25 # ./run-tests.py -c test-s*
26 26 # 4) serial, coverage, local hg:
27 27 # ./run-tests.py -c --local test-s* # unsupported
28 28 # 5) parallel, no coverage, temp install:
29 29 # ./run-tests.py -j2 test-s*
30 30 # 6) parallel, no coverage, local hg:
31 31 # ./run-tests.py -j2 --local test-s*
32 32 # 7) parallel, coverage, temp install:
33 33 # ./run-tests.py -j2 -c test-s* # currently broken
34 34 # 8) parallel, coverage, local install:
35 35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 36 # 9) parallel, custom tmp dir:
37 37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 38 # 10) parallel, pure, tests that call run-tests:
39 39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
40 40 #
41 41 # (You could use any subset of the tests: test-s* happens to match
42 42 # enough that it's worth doing parallel runs, few enough that it
43 43 # completes fairly quickly, includes both shell and Python scripts, and
44 44 # includes some scripts that run daemon processes.)
45 45
46 46 from __future__ import absolute_import, print_function
47 47
48 48 import difflib
49 49 import distutils.version as version
50 50 import errno
51 51 import json
52 52 import optparse
53 53 import os
54 54 import random
55 55 import re
56 56 import shutil
57 57 import signal
58 58 import socket
59 59 import subprocess
60 60 import sys
61 61 import sysconfig
62 62 import tempfile
63 63 import threading
64 64 import time
65 65 import unittest
66 66 import xml.dom.minidom as minidom
67 67
68 68 try:
69 69 import Queue as queue
70 70 except ImportError:
71 71 import queue
72 72
73 73 if os.environ.get('RTUNICODEPEDANTRY', False):
74 74 try:
75 75 reload(sys)
76 76 sys.setdefaultencoding("undefined")
77 77 except NameError:
78 78 pass
79 79
80 80 osenvironb = getattr(os, 'environb', os.environ)
81 81 processlock = threading.Lock()
82 82
83 83 if sys.version_info > (3, 5, 0):
84 84 PYTHON3 = True
85 85 xrange = range # we use xrange in one place, and we'd rather not use range
86 86 def _bytespath(p):
87 87 return p.encode('utf-8')
88 88
89 89 def _strpath(p):
90 90 return p.decode('utf-8')
91 91
92 92 elif sys.version_info >= (3, 0, 0):
93 93 print('%s is only supported on Python 3.5+ and 2.7, not %s' %
94 94 (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])))
95 95 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
96 96 else:
97 97 PYTHON3 = False
98 98
99 99 # In python 2.x, path operations are generally done using
100 100 # bytestrings by default, so we don't have to do any extra
101 101 # fiddling there. We define the wrapper functions anyway just to
102 102 # help keep code consistent between platforms.
103 103 def _bytespath(p):
104 104 return p
105 105
106 106 _strpath = _bytespath
107 107
108 108 # For Windows support
109 109 wifexited = getattr(os, "WIFEXITED", lambda x: False)
110 110
111 111 # Whether to use IPv6
112 112 def checksocketfamily(name, port=20058):
113 113 """return true if we can listen on localhost using family=name
114 114
115 115 name should be either 'AF_INET', or 'AF_INET6'.
116 116 port being used is okay - EADDRINUSE is considered as successful.
117 117 """
118 118 family = getattr(socket, name, None)
119 119 if family is None:
120 120 return False
121 121 try:
122 122 s = socket.socket(family, socket.SOCK_STREAM)
123 123 s.bind(('localhost', port))
124 124 s.close()
125 125 return True
126 126 except socket.error as exc:
127 127 if exc.errno == errno.EADDRINUSE:
128 128 return True
129 129 elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
130 130 return False
131 131 else:
132 132 raise
133 133 else:
134 134 return False
135 135
136 136 # useipv6 will be set by parseargs
137 137 useipv6 = None
138 138
139 139 def checkportisavailable(port):
140 140 """return true if a port seems free to bind on localhost"""
141 141 if useipv6:
142 142 family = socket.AF_INET6
143 143 else:
144 144 family = socket.AF_INET
145 145 try:
146 146 s = socket.socket(family, socket.SOCK_STREAM)
147 147 s.bind(('localhost', port))
148 148 s.close()
149 149 return True
150 150 except socket.error as exc:
151 151 if exc.errno not in (errno.EADDRINUSE, errno.EADDRNOTAVAIL,
152 152 errno.EPROTONOSUPPORT):
153 153 raise
154 154 return False
155 155
156 156 closefds = os.name == 'posix'
157 157 def Popen4(cmd, wd, timeout, env=None):
158 158 processlock.acquire()
159 159 p = subprocess.Popen(cmd, shell=True, bufsize=-1, cwd=wd, env=env,
160 160 close_fds=closefds,
161 161 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
162 162 stderr=subprocess.STDOUT)
163 163 processlock.release()
164 164
165 165 p.fromchild = p.stdout
166 166 p.tochild = p.stdin
167 167 p.childerr = p.stderr
168 168
169 169 p.timeout = False
170 170 if timeout:
171 171 def t():
172 172 start = time.time()
173 173 while time.time() - start < timeout and p.returncode is None:
174 174 time.sleep(.1)
175 175 p.timeout = True
176 176 if p.returncode is None:
177 177 terminate(p)
178 178 threading.Thread(target=t).start()
179 179
180 180 return p
181 181
182 182 PYTHON = _bytespath(sys.executable.replace('\\', '/'))
183 183 IMPL_PATH = b'PYTHONPATH'
184 184 if 'java' in sys.platform:
185 185 IMPL_PATH = b'JYTHONPATH'
186 186
187 187 defaults = {
188 188 'jobs': ('HGTEST_JOBS', 1),
189 189 'timeout': ('HGTEST_TIMEOUT', 180),
190 190 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 500),
191 191 'port': ('HGTEST_PORT', 20059),
192 192 'shell': ('HGTEST_SHELL', 'sh'),
193 193 }
194 194
195 195 def canonpath(path):
196 196 return os.path.realpath(os.path.expanduser(path))
197 197
198 198 def parselistfiles(files, listtype, warn=True):
199 199 entries = dict()
200 200 for filename in files:
201 201 try:
202 202 path = os.path.expanduser(os.path.expandvars(filename))
203 203 f = open(path, "rb")
204 204 except IOError as err:
205 205 if err.errno != errno.ENOENT:
206 206 raise
207 207 if warn:
208 208 print("warning: no such %s file: %s" % (listtype, filename))
209 209 continue
210 210
211 211 for line in f.readlines():
212 212 line = line.split(b'#', 1)[0].strip()
213 213 if line:
214 214 entries[line] = filename
215 215
216 216 f.close()
217 217 return entries
218 218
219 219 def parsettestcases(path):
220 220 """read a .t test file, return a set of test case names
221 221
222 222 If path does not exist, return an empty set.
223 223 """
224 224 cases = set()
225 225 try:
226 226 with open(path, 'rb') as f:
227 227 for l in f:
228 228 if l.startswith(b'#testcases '):
229 229 cases.update(l[11:].split())
230 230 except IOError as ex:
231 231 if ex.errno != errno.ENOENT:
232 232 raise
233 233 return cases
234 234
235 235 def getparser():
236 236 """Obtain the OptionParser used by the CLI."""
237 237 parser = optparse.OptionParser("%prog [options] [tests]")
238 238
239 239 # keep these sorted
240 240 parser.add_option("--blacklist", action="append",
241 241 help="skip tests listed in the specified blacklist file")
242 242 parser.add_option("--whitelist", action="append",
243 243 help="always run tests listed in the specified whitelist file")
244 244 parser.add_option("--changed", type="string",
245 245 help="run tests that are changed in parent rev or working directory")
246 246 parser.add_option("-C", "--annotate", action="store_true",
247 247 help="output files annotated with coverage")
248 248 parser.add_option("-c", "--cover", action="store_true",
249 249 help="print a test coverage report")
250 250 parser.add_option("-d", "--debug", action="store_true",
251 251 help="debug mode: write output of test scripts to console"
252 252 " rather than capturing and diffing it (disables timeout)")
253 253 parser.add_option("-f", "--first", action="store_true",
254 254 help="exit on the first test failure")
255 255 parser.add_option("-H", "--htmlcov", action="store_true",
256 256 help="create an HTML report of the coverage of the files")
257 257 parser.add_option("-i", "--interactive", action="store_true",
258 258 help="prompt to accept changed output")
259 259 parser.add_option("-j", "--jobs", type="int",
260 260 help="number of jobs to run in parallel"
261 261 " (default: $%s or %d)" % defaults['jobs'])
262 262 parser.add_option("--keep-tmpdir", action="store_true",
263 263 help="keep temporary directory after running tests")
264 264 parser.add_option("-k", "--keywords",
265 265 help="run tests matching keywords")
266 266 parser.add_option("--list-tests", action="store_true",
267 267 help="list tests instead of running them")
268 268 parser.add_option("-l", "--local", action="store_true",
269 269 help="shortcut for --with-hg=<testdir>/../hg, "
270 270 "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set")
271 271 parser.add_option("--loop", action="store_true",
272 272 help="loop tests repeatedly")
273 273 parser.add_option("--runs-per-test", type="int", dest="runs_per_test",
274 274 help="run each test N times (default=1)", default=1)
275 275 parser.add_option("-n", "--nodiff", action="store_true",
276 276 help="skip showing test changes")
277 277 parser.add_option("--outputdir", type="string",
278 278 help="directory to write error logs to (default=test directory)")
279 279 parser.add_option("-p", "--port", type="int",
280 280 help="port on which servers should listen"
281 281 " (default: $%s or %d)" % defaults['port'])
282 282 parser.add_option("--compiler", type="string",
283 283 help="compiler to build with")
284 284 parser.add_option("--pure", action="store_true",
285 285 help="use pure Python code instead of C extensions")
286 286 parser.add_option("-R", "--restart", action="store_true",
287 287 help="restart at last error")
288 288 parser.add_option("-r", "--retest", action="store_true",
289 289 help="retest failed tests")
290 290 parser.add_option("-S", "--noskips", action="store_true",
291 291 help="don't report skip tests verbosely")
292 292 parser.add_option("--shell", type="string",
293 293 help="shell to use (default: $%s or %s)" % defaults['shell'])
294 294 parser.add_option("-t", "--timeout", type="int",
295 295 help="kill errant tests after TIMEOUT seconds"
296 296 " (default: $%s or %d)" % defaults['timeout'])
297 297 parser.add_option("--slowtimeout", type="int",
298 298 help="kill errant slow tests after SLOWTIMEOUT seconds"
299 299 " (default: $%s or %d)" % defaults['slowtimeout'])
300 300 parser.add_option("--time", action="store_true",
301 301 help="time how long each test takes")
302 302 parser.add_option("--json", action="store_true",
303 303 help="store test result data in 'report.json' file")
304 304 parser.add_option("--tmpdir", type="string",
305 305 help="run tests in the given temporary directory"
306 306 " (implies --keep-tmpdir)")
307 307 parser.add_option("-v", "--verbose", action="store_true",
308 308 help="output verbose messages")
309 309 parser.add_option("--xunit", type="string",
310 310 help="record xunit results at specified path")
311 311 parser.add_option("--view", type="string",
312 312 help="external diff viewer")
313 313 parser.add_option("--with-hg", type="string",
314 314 metavar="HG",
315 315 help="test using specified hg script rather than a "
316 316 "temporary installation")
317 317 parser.add_option("--chg", action="store_true",
318 318 help="install and use chg wrapper in place of hg")
319 319 parser.add_option("--with-chg", metavar="CHG",
320 320 help="use specified chg wrapper in place of hg")
321 321 parser.add_option("--ipv6", action="store_true",
322 322 help="prefer IPv6 to IPv4 for network related tests")
323 323 parser.add_option("-3", "--py3k-warnings", action="store_true",
324 324 help="enable Py3k warnings on Python 2.7+")
325 325 # This option should be deleted once test-check-py3-compat.t and other
326 326 # Python 3 tests run with Python 3.
327 327 parser.add_option("--with-python3", metavar="PYTHON3",
328 328 help="Python 3 interpreter (if running under Python 2)"
329 329 " (TEMPORARY)")
330 330 parser.add_option('--extra-config-opt', action="append",
331 331 help='set the given config opt in the test hgrc')
332 332 parser.add_option('--random', action="store_true",
333 333 help='run tests in random order')
334 334 parser.add_option('--profile-runner', action='store_true',
335 335 help='run statprof on run-tests')
336 336 parser.add_option('--allow-slow-tests', action='store_true',
337 337 help='allow extremely slow tests')
338 338 parser.add_option('--showchannels', action='store_true',
339 339 help='show scheduling channels')
340 340 parser.add_option('--known-good-rev', type="string",
341 341 metavar="known_good_rev",
342 342 help=("Automatically bisect any failures using this "
343 343 "revision as a known-good revision."))
344 344
345 345 for option, (envvar, default) in defaults.items():
346 346 defaults[option] = type(default)(os.environ.get(envvar, default))
347 347 parser.set_defaults(**defaults)
348 348
349 349 return parser
350 350
351 351 def parseargs(args, parser):
352 352 """Parse arguments with our OptionParser and validate results."""
353 353 (options, args) = parser.parse_args(args)
354 354
355 355 # jython is always pure
356 356 if 'java' in sys.platform or '__pypy__' in sys.modules:
357 357 options.pure = True
358 358
359 359 if options.with_hg:
360 360 options.with_hg = canonpath(_bytespath(options.with_hg))
361 361 if not (os.path.isfile(options.with_hg) and
362 362 os.access(options.with_hg, os.X_OK)):
363 363 parser.error('--with-hg must specify an executable hg script')
364 364 if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
365 365 sys.stderr.write('warning: --with-hg should specify an hg script\n')
366 366 if options.local:
367 367 testdir = os.path.dirname(_bytespath(canonpath(sys.argv[0])))
368 368 reporootdir = os.path.dirname(testdir)
369 369 pathandattrs = [(b'hg', 'with_hg')]
370 370 if options.chg:
371 371 pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
372 372 for relpath, attr in pathandattrs:
373 373 binpath = os.path.join(reporootdir, relpath)
374 374 if os.name != 'nt' and not os.access(binpath, os.X_OK):
375 375 parser.error('--local specified, but %r not found or '
376 376 'not executable' % binpath)
377 377 setattr(options, attr, binpath)
378 378
379 379 if (options.chg or options.with_chg) and os.name == 'nt':
380 380 parser.error('chg does not work on %s' % os.name)
381 381 if options.with_chg:
382 382 options.chg = False # no installation to temporary location
383 383 options.with_chg = canonpath(_bytespath(options.with_chg))
384 384 if not (os.path.isfile(options.with_chg) and
385 385 os.access(options.with_chg, os.X_OK)):
386 386 parser.error('--with-chg must specify a chg executable')
387 387 if options.chg and options.with_hg:
388 388 # chg shares installation location with hg
389 389 parser.error('--chg does not work when --with-hg is specified '
390 390 '(use --with-chg instead)')
391 391
392 392 global useipv6
393 393 if options.ipv6:
394 394 useipv6 = checksocketfamily('AF_INET6')
395 395 else:
396 396 # only use IPv6 if IPv4 is unavailable and IPv6 is available
397 397 useipv6 = ((not checksocketfamily('AF_INET'))
398 398 and checksocketfamily('AF_INET6'))
399 399
400 400 options.anycoverage = options.cover or options.annotate or options.htmlcov
401 401 if options.anycoverage:
402 402 try:
403 403 import coverage
404 404 covver = version.StrictVersion(coverage.__version__).version
405 405 if covver < (3, 3):
406 406 parser.error('coverage options require coverage 3.3 or later')
407 407 except ImportError:
408 408 parser.error('coverage options now require the coverage package')
409 409
410 410 if options.anycoverage and options.local:
411 411 # this needs some path mangling somewhere, I guess
412 412 parser.error("sorry, coverage options do not work when --local "
413 413 "is specified")
414 414
415 415 if options.anycoverage and options.with_hg:
416 416 parser.error("sorry, coverage options do not work when --with-hg "
417 417 "is specified")
418 418
419 419 global verbose
420 420 if options.verbose:
421 421 verbose = ''
422 422
423 423 if options.tmpdir:
424 424 options.tmpdir = canonpath(options.tmpdir)
425 425
426 426 if options.jobs < 1:
427 427 parser.error('--jobs must be positive')
428 428 if options.interactive and options.debug:
429 429 parser.error("-i/--interactive and -d/--debug are incompatible")
430 430 if options.debug:
431 431 if options.timeout != defaults['timeout']:
432 432 sys.stderr.write(
433 433 'warning: --timeout option ignored with --debug\n')
434 434 if options.slowtimeout != defaults['slowtimeout']:
435 435 sys.stderr.write(
436 436 'warning: --slowtimeout option ignored with --debug\n')
437 437 options.timeout = 0
438 438 options.slowtimeout = 0
439 439 if options.py3k_warnings:
440 440 if PYTHON3:
441 441 parser.error(
442 442 '--py3k-warnings can only be used on Python 2.7')
443 443 if options.with_python3:
444 444 if PYTHON3:
445 445 parser.error('--with-python3 cannot be used when executing with '
446 446 'Python 3')
447 447
448 448 options.with_python3 = canonpath(options.with_python3)
449 449 # Verify Python3 executable is acceptable.
450 450 proc = subprocess.Popen([options.with_python3, b'--version'],
451 451 stdout=subprocess.PIPE,
452 452 stderr=subprocess.STDOUT)
453 453 out, _err = proc.communicate()
454 454 ret = proc.wait()
455 455 if ret != 0:
456 456 parser.error('could not determine version of python 3')
457 457 if not out.startswith('Python '):
458 458 parser.error('unexpected output from python3 --version: %s' %
459 459 out)
460 460 vers = version.LooseVersion(out[len('Python '):])
461 461 if vers < version.LooseVersion('3.5.0'):
462 462 parser.error('--with-python3 version must be 3.5.0 or greater; '
463 463 'got %s' % out)
464 464
465 465 if options.blacklist:
466 466 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
467 467 if options.whitelist:
468 468 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
469 469 else:
470 470 options.whitelisted = {}
471 471
472 472 if options.showchannels:
473 473 options.nodiff = True
474 474
475 475 return (options, args)
476 476
477 477 def rename(src, dst):
478 478 """Like os.rename(), trade atomicity and opened files friendliness
479 479 for existing destination support.
480 480 """
481 481 shutil.copy(src, dst)
482 482 os.remove(src)
483 483
484 484 _unified_diff = difflib.unified_diff
485 485 if PYTHON3:
486 486 import functools
487 487 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
488 488
489 489 def getdiff(expected, output, ref, err):
490 490 servefail = False
491 491 lines = []
492 492 for line in _unified_diff(expected, output, ref, err):
493 493 if line.startswith(b'+++') or line.startswith(b'---'):
494 494 line = line.replace(b'\\', b'/')
495 495 if line.endswith(b' \n'):
496 496 line = line[:-2] + b'\n'
497 497 lines.append(line)
498 498 if not servefail and line.startswith(
499 499 b'+ abort: child process failed to start'):
500 500 servefail = True
501 501
502 502 return servefail, lines
503 503
504 504 verbose = False
505 505 def vlog(*msg):
506 506 """Log only when in verbose mode."""
507 507 if verbose is False:
508 508 return
509 509
510 510 return log(*msg)
511 511
512 512 # Bytes that break XML even in a CDATA block: control characters 0-31
513 513 # sans \t, \n and \r
514 514 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
515 515
516 516 # Match feature conditionalized output lines in the form, capturing the feature
517 517 # list in group 2, and the preceeding line output in group 1:
518 518 #
519 519 # output..output (feature !)\n
520 520 optline = re.compile(b'(.+) \((.+?) !\)\n$')
521 521
522 522 def cdatasafe(data):
523 523 """Make a string safe to include in a CDATA block.
524 524
525 525 Certain control characters are illegal in a CDATA block, and
526 526 there's no way to include a ]]> in a CDATA either. This function
527 527 replaces illegal bytes with ? and adds a space between the ]] so
528 528 that it won't break the CDATA block.
529 529 """
530 530 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
531 531
532 532 def log(*msg):
533 533 """Log something to stdout.
534 534
535 535 Arguments are strings to print.
536 536 """
537 537 with iolock:
538 538 if verbose:
539 539 print(verbose, end=' ')
540 540 for m in msg:
541 541 print(m, end=' ')
542 542 print()
543 543 sys.stdout.flush()
544 544
545 545 def terminate(proc):
546 546 """Terminate subprocess"""
547 547 vlog('# Terminating process %d' % proc.pid)
548 548 try:
549 549 proc.terminate()
550 550 except OSError:
551 551 pass
552 552
553 553 def killdaemons(pidfile):
554 554 import killdaemons as killmod
555 555 return killmod.killdaemons(pidfile, tryhard=False, remove=True,
556 556 logfn=vlog)
557 557
558 558 class Test(unittest.TestCase):
559 559 """Encapsulates a single, runnable test.
560 560
561 561 While this class conforms to the unittest.TestCase API, it differs in that
562 562 instances need to be instantiated manually. (Typically, unittest.TestCase
563 563 classes are instantiated automatically by scanning modules.)
564 564 """
565 565
566 566 # Status code reserved for skipped tests (used by hghave).
567 567 SKIPPED_STATUS = 80
568 568
569 569 def __init__(self, path, outputdir, tmpdir, keeptmpdir=False,
570 570 debug=False,
571 571 timeout=defaults['timeout'],
572 572 startport=defaults['port'], extraconfigopts=None,
573 573 py3kwarnings=False, shell=None, hgcommand=None,
574 574 slowtimeout=defaults['slowtimeout'], usechg=False,
575 575 useipv6=False):
576 576 """Create a test from parameters.
577 577
578 578 path is the full path to the file defining the test.
579 579
580 580 tmpdir is the main temporary directory to use for this test.
581 581
582 582 keeptmpdir determines whether to keep the test's temporary directory
583 583 after execution. It defaults to removal (False).
584 584
585 585 debug mode will make the test execute verbosely, with unfiltered
586 586 output.
587 587
588 588 timeout controls the maximum run time of the test. It is ignored when
589 589 debug is True. See slowtimeout for tests with #require slow.
590 590
591 591 slowtimeout overrides timeout if the test has #require slow.
592 592
593 593 startport controls the starting port number to use for this test. Each
594 594 test will reserve 3 port numbers for execution. It is the caller's
595 595 responsibility to allocate a non-overlapping port range to Test
596 596 instances.
597 597
598 598 extraconfigopts is an iterable of extra hgrc config options. Values
599 599 must have the form "key=value" (something understood by hgrc). Values
600 600 of the form "foo.key=value" will result in "[foo] key=value".
601 601
602 602 py3kwarnings enables Py3k warnings.
603 603
604 604 shell is the shell to execute tests in.
605 605 """
606 606 self.path = path
607 607 self.bname = os.path.basename(path)
608 608 self.name = _strpath(self.bname)
609 609 self._testdir = os.path.dirname(path)
610 610 self._outputdir = outputdir
611 611 self._tmpname = os.path.basename(path)
612 612 self.errpath = os.path.join(self._outputdir, b'%s.err' % self.bname)
613 613
614 614 self._threadtmp = tmpdir
615 615 self._keeptmpdir = keeptmpdir
616 616 self._debug = debug
617 617 self._timeout = timeout
618 618 self._slowtimeout = slowtimeout
619 619 self._startport = startport
620 620 self._extraconfigopts = extraconfigopts or []
621 621 self._py3kwarnings = py3kwarnings
622 622 self._shell = _bytespath(shell)
623 623 self._hgcommand = hgcommand or b'hg'
624 624 self._usechg = usechg
625 625 self._useipv6 = useipv6
626 626
627 627 self._aborted = False
628 628 self._daemonpids = []
629 629 self._finished = None
630 630 self._ret = None
631 631 self._out = None
632 632 self._skipped = None
633 633 self._testtmp = None
634 634 self._chgsockdir = None
635 635
636 636 self._refout = self.readrefout()
637 637
638 638 def readrefout(self):
639 639 """read reference output"""
640 640 # If we're not in --debug mode and reference output file exists,
641 641 # check test output against it.
642 642 if self._debug:
643 643 return None # to match "out is None"
644 644 elif os.path.exists(self.refpath):
645 645 with open(self.refpath, 'rb') as f:
646 646 return f.read().splitlines(True)
647 647 else:
648 648 return []
649 649
650 650 # needed to get base class __repr__ running
651 651 @property
652 652 def _testMethodName(self):
653 653 return self.name
654 654
655 655 def __str__(self):
656 656 return self.name
657 657
658 658 def shortDescription(self):
659 659 return self.name
660 660
661 661 def setUp(self):
662 662 """Tasks to perform before run()."""
663 663 self._finished = False
664 664 self._ret = None
665 665 self._out = None
666 666 self._skipped = None
667 667
668 668 try:
669 669 os.mkdir(self._threadtmp)
670 670 except OSError as e:
671 671 if e.errno != errno.EEXIST:
672 672 raise
673 673
674 674 name = self._tmpname
675 675 self._testtmp = os.path.join(self._threadtmp, name)
676 676 os.mkdir(self._testtmp)
677 677
678 678 # Remove any previous output files.
679 679 if os.path.exists(self.errpath):
680 680 try:
681 681 os.remove(self.errpath)
682 682 except OSError as e:
683 683 # We might have raced another test to clean up a .err
684 684 # file, so ignore ENOENT when removing a previous .err
685 685 # file.
686 686 if e.errno != errno.ENOENT:
687 687 raise
688 688
689 689 if self._usechg:
690 690 self._chgsockdir = os.path.join(self._threadtmp,
691 691 b'%s.chgsock' % name)
692 692 os.mkdir(self._chgsockdir)
693 693
694 694 def run(self, result):
695 695 """Run this test and report results against a TestResult instance."""
696 696 # This function is extremely similar to unittest.TestCase.run(). Once
697 697 # we require Python 2.7 (or at least its version of unittest), this
698 698 # function can largely go away.
699 699 self._result = result
700 700 result.startTest(self)
701 701 try:
702 702 try:
703 703 self.setUp()
704 704 except (KeyboardInterrupt, SystemExit):
705 705 self._aborted = True
706 706 raise
707 707 except Exception:
708 708 result.addError(self, sys.exc_info())
709 709 return
710 710
711 711 success = False
712 712 try:
713 713 self.runTest()
714 714 except KeyboardInterrupt:
715 715 self._aborted = True
716 716 raise
717 717 except unittest.SkipTest as e:
718 718 result.addSkip(self, str(e))
719 719 # The base class will have already counted this as a
720 720 # test we "ran", but we want to exclude skipped tests
721 721 # from those we count towards those run.
722 722 result.testsRun -= 1
723 723 except self.failureException as e:
724 724 # This differs from unittest in that we don't capture
725 725 # the stack trace. This is for historical reasons and
726 726 # this decision could be revisited in the future,
727 727 # especially for PythonTest instances.
728 728 if result.addFailure(self, str(e)):
729 729 success = True
730 730 except Exception:
731 731 result.addError(self, sys.exc_info())
732 732 else:
733 733 success = True
734 734
735 735 try:
736 736 self.tearDown()
737 737 except (KeyboardInterrupt, SystemExit):
738 738 self._aborted = True
739 739 raise
740 740 except Exception:
741 741 result.addError(self, sys.exc_info())
742 742 success = False
743 743
744 744 if success:
745 745 result.addSuccess(self)
746 746 finally:
747 747 result.stopTest(self, interrupted=self._aborted)
748 748
749 749 def runTest(self):
750 750 """Run this test instance.
751 751
752 752 This will return a tuple describing the result of the test.
753 753 """
754 754 env = self._getenv()
755 755 self._daemonpids.append(env['DAEMON_PIDS'])
756 756 self._createhgrc(env['HGRCPATH'])
757 757
758 758 vlog('# Test', self.name)
759 759
760 760 ret, out = self._run(env)
761 761 self._finished = True
762 762 self._ret = ret
763 763 self._out = out
764 764
765 765 def describe(ret):
766 766 if ret < 0:
767 767 return 'killed by signal: %d' % -ret
768 768 return 'returned error code %d' % ret
769 769
770 770 self._skipped = False
771 771
772 772 if ret == self.SKIPPED_STATUS:
773 773 if out is None: # Debug mode, nothing to parse.
774 774 missing = ['unknown']
775 775 failed = None
776 776 else:
777 777 missing, failed = TTest.parsehghaveoutput(out)
778 778
779 779 if not missing:
780 780 missing = ['skipped']
781 781
782 782 if failed:
783 783 self.fail('hg have failed checking for %s' % failed[-1])
784 784 else:
785 785 self._skipped = True
786 786 raise unittest.SkipTest(missing[-1])
787 787 elif ret == 'timeout':
788 788 self.fail('timed out')
789 789 elif ret is False:
790 790 self.fail('no result code from test')
791 791 elif out != self._refout:
792 792 # Diff generation may rely on written .err file.
793 793 if (ret != 0 or out != self._refout) and not self._skipped \
794 794 and not self._debug:
795 795 f = open(self.errpath, 'wb')
796 796 for line in out:
797 797 f.write(line)
798 798 f.close()
799 799
800 800 # The result object handles diff calculation for us.
801 801 if self._result.addOutputMismatch(self, ret, out, self._refout):
802 802 # change was accepted, skip failing
803 803 return
804 804
805 805 if ret:
806 806 msg = 'output changed and ' + describe(ret)
807 807 else:
808 808 msg = 'output changed'
809 809
810 810 self.fail(msg)
811 811 elif ret:
812 812 self.fail(describe(ret))
813 813
814 814 def tearDown(self):
815 815 """Tasks to perform after run()."""
816 816 for entry in self._daemonpids:
817 817 killdaemons(entry)
818 818 self._daemonpids = []
819 819
820 820 if self._keeptmpdir:
821 821 log('\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' %
822 822 (self._testtmp.decode('utf-8'),
823 823 self._threadtmp.decode('utf-8')))
824 824 else:
825 825 shutil.rmtree(self._testtmp, True)
826 826 shutil.rmtree(self._threadtmp, True)
827 827
828 828 if self._usechg:
829 829 # chgservers will stop automatically after they find the socket
830 830 # files are deleted
831 831 shutil.rmtree(self._chgsockdir, True)
832 832
833 833 if (self._ret != 0 or self._out != self._refout) and not self._skipped \
834 834 and not self._debug and self._out:
835 835 f = open(self.errpath, 'wb')
836 836 for line in self._out:
837 837 f.write(line)
838 838 f.close()
839 839
840 840 vlog("# Ret was:", self._ret, '(%s)' % self.name)
841 841
842 842 def _run(self, env):
843 843 # This should be implemented in child classes to run tests.
844 844 raise unittest.SkipTest('unknown test type')
845 845
846 846 def abort(self):
847 847 """Terminate execution of this test."""
848 848 self._aborted = True
849 849
850 850 def _portmap(self, i):
851 851 offset = b'' if i == 0 else b'%d' % i
852 852 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
853 853
854 854 def _getreplacements(self):
855 855 """Obtain a mapping of text replacements to apply to test output.
856 856
857 857 Test output needs to be normalized so it can be compared to expected
858 858 output. This function defines how some of that normalization will
859 859 occur.
860 860 """
861 861 r = [
862 862 # This list should be parallel to defineport in _getenv
863 863 self._portmap(0),
864 864 self._portmap(1),
865 865 self._portmap(2),
866 866 (br'(?m)^(saved backup bundle to .*\.hg)( \(glob\))?$',
867 867 br'\1 (glob)'),
868 868 (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
869 869 (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
870 870 ]
871 871 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
872 872
873 873 return r
874 874
875 875 def _escapepath(self, p):
876 876 if os.name == 'nt':
877 877 return (
878 878 (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or
879 879 c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c
880 880 for c in p))
881 881 )
882 882 else:
883 883 return re.escape(p)
884 884
885 885 def _localip(self):
886 886 if self._useipv6:
887 887 return b'::1'
888 888 else:
889 889 return b'127.0.0.1'
890 890
891 891 def _getenv(self):
892 892 """Obtain environment variables to use during test execution."""
893 893 def defineport(i):
894 894 offset = '' if i == 0 else '%s' % i
895 895 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
896 896 env = os.environ.copy()
897 897 env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase')
898 898 env['HGEMITWARNINGS'] = '1'
899 899 env['TESTTMP'] = self._testtmp
900 900 env['HOME'] = self._testtmp
901 901 # This number should match portneeded in _getport
902 902 for port in xrange(3):
903 903 # This list should be parallel to _portmap in _getreplacements
904 904 defineport(port)
905 905 env["HGRCPATH"] = os.path.join(self._threadtmp, b'.hgrc')
906 906 env["DAEMON_PIDS"] = os.path.join(self._threadtmp, b'daemon.pids')
907 907 env["HGEDITOR"] = ('"' + sys.executable + '"'
908 908 + ' -c "import sys; sys.exit(0)"')
909 909 env["HGMERGE"] = "internal:merge"
910 910 env["HGUSER"] = "test"
911 911 env["HGENCODING"] = "ascii"
912 912 env["HGENCODINGMODE"] = "strict"
913 913 env['HGIPV6'] = str(int(self._useipv6))
914 914
915 915 # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
916 916 # IP addresses.
917 917 env['LOCALIP'] = self._localip()
918 918
919 919 # Reset some environment variables to well-known values so that
920 920 # the tests produce repeatable output.
921 921 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
922 922 env['TZ'] = 'GMT'
923 923 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
924 924 env['COLUMNS'] = '80'
925 925 env['TERM'] = 'xterm'
926 926
927 927 for k in ('HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy ' +
928 928 'HGPLAIN HGPLAINEXCEPT EDITOR VISUAL PAGER ' +
929 929 'NO_PROXY CHGDEBUG').split():
930 930 if k in env:
931 931 del env[k]
932 932
933 933 # unset env related to hooks
934 934 for k in env.keys():
935 935 if k.startswith('HG_'):
936 936 del env[k]
937 937
938 938 if self._usechg:
939 939 env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server')
940 940
941 941 return env
942 942
943 943 def _createhgrc(self, path):
944 944 """Create an hgrc file for this test."""
945 945 hgrc = open(path, 'wb')
946 946 hgrc.write(b'[ui]\n')
947 947 hgrc.write(b'slash = True\n')
948 948 hgrc.write(b'interactive = False\n')
949 949 hgrc.write(b'mergemarkers = detailed\n')
950 950 hgrc.write(b'promptecho = True\n')
951 951 hgrc.write(b'[defaults]\n')
952 952 hgrc.write(b'[devel]\n')
953 953 hgrc.write(b'all-warnings = true\n')
954 954 hgrc.write(b'default-date = 0 0\n')
955 955 hgrc.write(b'[largefiles]\n')
956 956 hgrc.write(b'usercache = %s\n' %
957 957 (os.path.join(self._testtmp, b'.cache/largefiles')))
958 958 hgrc.write(b'[web]\n')
959 959 hgrc.write(b'address = localhost\n')
960 960 hgrc.write(b'ipv6 = %s\n' % str(self._useipv6).encode('ascii'))
961 961
962 962 for opt in self._extraconfigopts:
963 963 section, key = opt.split('.', 1)
964 964 assert '=' in key, ('extra config opt %s must '
965 965 'have an = for assignment' % opt)
966 966 hgrc.write(b'[%s]\n%s\n' % (section, key))
967 967 hgrc.close()
968 968
969 969 def fail(self, msg):
970 970 # unittest differentiates between errored and failed.
971 971 # Failed is denoted by AssertionError (by default at least).
972 972 raise AssertionError(msg)
973 973
974 974 def _runcommand(self, cmd, env, normalizenewlines=False):
975 975 """Run command in a sub-process, capturing the output (stdout and
976 976 stderr).
977 977
978 978 Return a tuple (exitcode, output). output is None in debug mode.
979 979 """
980 980 if self._debug:
981 981 proc = subprocess.Popen(cmd, shell=True, cwd=self._testtmp,
982 982 env=env)
983 983 ret = proc.wait()
984 984 return (ret, None)
985 985
986 986 proc = Popen4(cmd, self._testtmp, self._timeout, env)
987 987 def cleanup():
988 988 terminate(proc)
989 989 ret = proc.wait()
990 990 if ret == 0:
991 991 ret = signal.SIGTERM << 8
992 992 killdaemons(env['DAEMON_PIDS'])
993 993 return ret
994 994
995 995 output = ''
996 996 proc.tochild.close()
997 997
998 998 try:
999 999 output = proc.fromchild.read()
1000 1000 except KeyboardInterrupt:
1001 1001 vlog('# Handling keyboard interrupt')
1002 1002 cleanup()
1003 1003 raise
1004 1004
1005 1005 ret = proc.wait()
1006 1006 if wifexited(ret):
1007 1007 ret = os.WEXITSTATUS(ret)
1008 1008
1009 1009 if proc.timeout:
1010 1010 ret = 'timeout'
1011 1011
1012 1012 if ret:
1013 1013 killdaemons(env['DAEMON_PIDS'])
1014 1014
1015 1015 for s, r in self._getreplacements():
1016 1016 output = re.sub(s, r, output)
1017 1017
1018 1018 if normalizenewlines:
1019 1019 output = output.replace('\r\n', '\n')
1020 1020
1021 1021 return ret, output.splitlines(True)
1022 1022
1023 1023 class PythonTest(Test):
1024 1024 """A Python-based test."""
1025 1025
1026 1026 @property
1027 1027 def refpath(self):
1028 1028 return os.path.join(self._testdir, b'%s.out' % self.bname)
1029 1029
1030 1030 def _run(self, env):
1031 1031 py3kswitch = self._py3kwarnings and b' -3' or b''
1032 1032 cmd = b'%s%s "%s"' % (PYTHON, py3kswitch, self.path)
1033 1033 vlog("# Running", cmd)
1034 1034 normalizenewlines = os.name == 'nt'
1035 1035 result = self._runcommand(cmd, env,
1036 1036 normalizenewlines=normalizenewlines)
1037 1037 if self._aborted:
1038 1038 raise KeyboardInterrupt()
1039 1039
1040 1040 return result
1041 1041
1042 1042 # Some glob patterns apply only in some circumstances, so the script
1043 1043 # might want to remove (glob) annotations that otherwise should be
1044 1044 # retained.
1045 1045 checkcodeglobpats = [
1046 1046 # On Windows it looks like \ doesn't require a (glob), but we know
1047 1047 # better.
1048 1048 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
1049 1049 re.compile(br'^moving \S+/.*[^)]$'),
1050 1050 re.compile(br'^pulling from \$TESTTMP/.*[^)]$'),
1051 1051 # Not all platforms have 127.0.0.1 as loopback (though most do),
1052 1052 # so we always glob that too.
1053 1053 re.compile(br'.*\$LOCALIP.*$'),
1054 1054 ]
1055 1055
1056 1056 bchr = chr
1057 1057 if PYTHON3:
1058 1058 bchr = lambda x: bytes([x])
1059 1059
1060 1060 class TTest(Test):
1061 1061 """A "t test" is a test backed by a .t file."""
1062 1062
1063 1063 SKIPPED_PREFIX = b'skipped: '
1064 1064 FAILED_PREFIX = b'hghave check failed: '
1065 1065 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
1066 1066
1067 1067 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
1068 1068 ESCAPEMAP = dict((bchr(i), br'\x%02x' % i) for i in range(256))
1069 1069 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
1070 1070
1071 1071 def __init__(self, path, *args, **kwds):
1072 1072 # accept an extra "case" parameter
1073 1073 case = None
1074 1074 if 'case' in kwds:
1075 1075 case = kwds.pop('case')
1076 1076 self._case = case
1077 1077 self._allcases = parsettestcases(path)
1078 1078 super(TTest, self).__init__(path, *args, **kwds)
1079 1079 if case:
1080 1080 self.name = '%s (case %s)' % (self.name, _strpath(case))
1081 1081 self.errpath = b'%s.%s.err' % (self.errpath[:-4], case)
1082 1082 self._tmpname += b'-%s' % case
1083 1083
1084 1084 @property
1085 1085 def refpath(self):
1086 1086 return os.path.join(self._testdir, self.bname)
1087 1087
1088 1088 def _run(self, env):
1089 1089 f = open(self.path, 'rb')
1090 1090 lines = f.readlines()
1091 1091 f.close()
1092 1092
1093 1093 # .t file is both reference output and the test input, keep reference
1094 1094 # output updated with the the test input. This avoids some race
1095 1095 # conditions where the reference output does not match the actual test.
1096 1096 if self._refout is not None:
1097 1097 self._refout = lines
1098 1098
1099 1099 salt, script, after, expected = self._parsetest(lines)
1100 1100
1101 1101 # Write out the generated script.
1102 1102 fname = b'%s.sh' % self._testtmp
1103 1103 f = open(fname, 'wb')
1104 1104 for l in script:
1105 1105 f.write(l)
1106 1106 f.close()
1107 1107
1108 1108 cmd = b'%s "%s"' % (self._shell, fname)
1109 1109 vlog("# Running", cmd)
1110 1110
1111 1111 exitcode, output = self._runcommand(cmd, env)
1112 1112
1113 1113 if self._aborted:
1114 1114 raise KeyboardInterrupt()
1115 1115
1116 1116 # Do not merge output if skipped. Return hghave message instead.
1117 1117 # Similarly, with --debug, output is None.
1118 1118 if exitcode == self.SKIPPED_STATUS or output is None:
1119 1119 return exitcode, output
1120 1120
1121 1121 return self._processoutput(exitcode, output, salt, after, expected)
1122 1122
1123 1123 def _hghave(self, reqs):
1124 1124 # TODO do something smarter when all other uses of hghave are gone.
1125 1125 runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__)))
1126 1126 tdir = runtestdir.replace(b'\\', b'/')
1127 1127 proc = Popen4(b'%s -c "%s/hghave %s"' %
1128 1128 (self._shell, tdir, b' '.join(reqs)),
1129 1129 self._testtmp, 0, self._getenv())
1130 1130 stdout, stderr = proc.communicate()
1131 1131 ret = proc.wait()
1132 1132 if wifexited(ret):
1133 1133 ret = os.WEXITSTATUS(ret)
1134 1134 if ret == 2:
1135 1135 print(stdout.decode('utf-8'))
1136 1136 sys.exit(1)
1137 1137
1138 1138 if ret != 0:
1139 1139 return False, stdout
1140 1140
1141 1141 if 'slow' in reqs:
1142 1142 self._timeout = self._slowtimeout
1143 1143 return True, None
1144 1144
1145 1145 def _iftest(self, args):
1146 1146 # implements "#if"
1147 1147 reqs = []
1148 1148 for arg in args:
1149 1149 if arg.startswith(b'no-') and arg[3:] in self._allcases:
1150 1150 if arg[3:] == self._case:
1151 1151 return False
1152 1152 elif arg in self._allcases:
1153 1153 if arg != self._case:
1154 1154 return False
1155 1155 else:
1156 1156 reqs.append(arg)
1157 1157 return self._hghave(reqs)[0]
1158 1158
1159 1159 def _parsetest(self, lines):
1160 1160 # We generate a shell script which outputs unique markers to line
1161 1161 # up script results with our source. These markers include input
1162 1162 # line number and the last return code.
1163 1163 salt = b"SALT%d" % time.time()
1164 1164 def addsalt(line, inpython):
1165 1165 if inpython:
1166 1166 script.append(b'%s %d 0\n' % (salt, line))
1167 1167 else:
1168 1168 script.append(b'echo %s %d $?\n' % (salt, line))
1169 1169
1170 1170 script = []
1171 1171
1172 1172 # After we run the shell script, we re-unify the script output
1173 1173 # with non-active parts of the source, with synchronization by our
1174 1174 # SALT line number markers. The after table contains the non-active
1175 1175 # components, ordered by line number.
1176 1176 after = {}
1177 1177
1178 1178 # Expected shell script output.
1179 1179 expected = {}
1180 1180
1181 1181 pos = prepos = -1
1182 1182
1183 1183 # True or False when in a true or false conditional section
1184 1184 skipping = None
1185 1185
1186 1186 # We keep track of whether or not we're in a Python block so we
1187 1187 # can generate the surrounding doctest magic.
1188 1188 inpython = False
1189 1189
1190 1190 if self._debug:
1191 1191 script.append(b'set -x\n')
1192 1192 if self._hgcommand != b'hg':
1193 1193 script.append(b'alias hg="%s"\n' % self._hgcommand)
1194 1194 if os.getenv('MSYSTEM'):
1195 1195 script.append(b'alias pwd="pwd -W"\n')
1196 1196
1197 1197 n = 0
1198 1198 for n, l in enumerate(lines):
1199 1199 if not l.endswith(b'\n'):
1200 1200 l += b'\n'
1201 1201 if l.startswith(b'#require'):
1202 1202 lsplit = l.split()
1203 1203 if len(lsplit) < 2 or lsplit[0] != b'#require':
1204 1204 after.setdefault(pos, []).append(' !!! invalid #require\n')
1205 1205 haveresult, message = self._hghave(lsplit[1:])
1206 1206 if not haveresult:
1207 1207 script = [b'echo "%s"\nexit 80\n' % message]
1208 1208 break
1209 1209 after.setdefault(pos, []).append(l)
1210 1210 elif l.startswith(b'#if'):
1211 1211 lsplit = l.split()
1212 1212 if len(lsplit) < 2 or lsplit[0] != b'#if':
1213 1213 after.setdefault(pos, []).append(' !!! invalid #if\n')
1214 1214 if skipping is not None:
1215 1215 after.setdefault(pos, []).append(' !!! nested #if\n')
1216 1216 skipping = not self._iftest(lsplit[1:])
1217 1217 after.setdefault(pos, []).append(l)
1218 1218 elif l.startswith(b'#else'):
1219 1219 if skipping is None:
1220 1220 after.setdefault(pos, []).append(' !!! missing #if\n')
1221 1221 skipping = not skipping
1222 1222 after.setdefault(pos, []).append(l)
1223 1223 elif l.startswith(b'#endif'):
1224 1224 if skipping is None:
1225 1225 after.setdefault(pos, []).append(' !!! missing #if\n')
1226 1226 skipping = None
1227 1227 after.setdefault(pos, []).append(l)
1228 1228 elif skipping:
1229 1229 after.setdefault(pos, []).append(l)
1230 1230 elif l.startswith(b' >>> '): # python inlines
1231 1231 after.setdefault(pos, []).append(l)
1232 1232 prepos = pos
1233 1233 pos = n
1234 1234 if not inpython:
1235 1235 # We've just entered a Python block. Add the header.
1236 1236 inpython = True
1237 1237 addsalt(prepos, False) # Make sure we report the exit code.
1238 1238 script.append(b'%s -m heredoctest <<EOF\n' % PYTHON)
1239 1239 addsalt(n, True)
1240 1240 script.append(l[2:])
1241 1241 elif l.startswith(b' ... '): # python inlines
1242 1242 after.setdefault(prepos, []).append(l)
1243 1243 script.append(l[2:])
1244 1244 elif l.startswith(b' $ '): # commands
1245 1245 if inpython:
1246 1246 script.append(b'EOF\n')
1247 1247 inpython = False
1248 1248 after.setdefault(pos, []).append(l)
1249 1249 prepos = pos
1250 1250 pos = n
1251 1251 addsalt(n, False)
1252 1252 cmd = l[4:].split()
1253 1253 if len(cmd) == 2 and cmd[0] == b'cd':
1254 1254 l = b' $ cd %s || exit 1\n' % cmd[1]
1255 1255 script.append(l[4:])
1256 1256 elif l.startswith(b' > '): # continuations
1257 1257 after.setdefault(prepos, []).append(l)
1258 1258 script.append(l[4:])
1259 1259 elif l.startswith(b' '): # results
1260 1260 # Queue up a list of expected results.
1261 1261 expected.setdefault(pos, []).append(l[2:])
1262 1262 else:
1263 1263 if inpython:
1264 1264 script.append(b'EOF\n')
1265 1265 inpython = False
1266 1266 # Non-command/result. Queue up for merged output.
1267 1267 after.setdefault(pos, []).append(l)
1268 1268
1269 1269 if inpython:
1270 1270 script.append(b'EOF\n')
1271 1271 if skipping is not None:
1272 1272 after.setdefault(pos, []).append(' !!! missing #endif\n')
1273 1273 addsalt(n + 1, False)
1274 1274
1275 1275 return salt, script, after, expected
1276 1276
1277 1277 def _processoutput(self, exitcode, output, salt, after, expected):
1278 1278 # Merge the script output back into a unified test.
1279 1279 warnonly = 1 # 1: not yet; 2: yes; 3: for sure not
1280 1280 if exitcode != 0:
1281 1281 warnonly = 3
1282 1282
1283 1283 pos = -1
1284 1284 postout = []
1285 1285 for l in output:
1286 1286 lout, lcmd = l, None
1287 1287 if salt in l:
1288 1288 lout, lcmd = l.split(salt, 1)
1289 1289
1290 1290 while lout:
1291 1291 if not lout.endswith(b'\n'):
1292 1292 lout += b' (no-eol)\n'
1293 1293
1294 1294 # Find the expected output at the current position.
1295 1295 els = [None]
1296 1296 if expected.get(pos, None):
1297 1297 els = expected[pos]
1298 1298
1299 1299 i = 0
1300 1300 optional = []
1301 1301 while i < len(els):
1302 1302 el = els[i]
1303 1303
1304 1304 r = TTest.linematch(el, lout)
1305 1305 if isinstance(r, str):
1306 1306 if r == '+glob':
1307 1307 lout = el[:-1] + ' (glob)\n'
1308 1308 r = '' # Warn only this line.
1309 1309 elif r == '-glob':
1310 1310 lout = ''.join(el.rsplit(' (glob)', 1))
1311 1311 r = '' # Warn only this line.
1312 1312 elif r == "retry":
1313 1313 postout.append(b' ' + el)
1314 1314 els.pop(i)
1315 1315 break
1316 1316 else:
1317 1317 log('\ninfo, unknown linematch result: %r\n' % r)
1318 1318 r = False
1319 1319 if r:
1320 1320 els.pop(i)
1321 1321 break
1322 1322 if el:
1323 1323 if el.endswith(b" (?)\n"):
1324 1324 optional.append(i)
1325 1325 else:
1326 1326 m = optline.match(el)
1327 1327 if m:
1328 1328 conditions = [c for c in m.group(2).split(' ')]
1329 1329
1330 1330 if self._hghave(conditions)[0]:
1331 1331 lout = el
1332 1332 else:
1333 1333 optional.append(i)
1334 1334
1335 1335 i += 1
1336 1336
1337 1337 if r:
1338 1338 if r == "retry":
1339 1339 continue
1340 1340 # clean up any optional leftovers
1341 1341 for i in optional:
1342 1342 postout.append(b' ' + els[i])
1343 1343 for i in reversed(optional):
1344 1344 del els[i]
1345 1345 postout.append(b' ' + el)
1346 1346 else:
1347 1347 if self.NEEDESCAPE(lout):
1348 1348 lout = TTest._stringescape(b'%s (esc)\n' %
1349 1349 lout.rstrip(b'\n'))
1350 1350 postout.append(b' ' + lout) # Let diff deal with it.
1351 1351 if r != '': # If line failed.
1352 1352 warnonly = 3 # for sure not
1353 1353 elif warnonly == 1: # Is "not yet" and line is warn only.
1354 1354 warnonly = 2 # Yes do warn.
1355 1355 break
1356 1356 else:
1357 1357 # clean up any optional leftovers
1358 1358 while expected.get(pos, None):
1359 1359 el = expected[pos].pop(0)
1360 1360 if el:
1361 1361 if (not optline.match(el)
1362 1362 and not el.endswith(b" (?)\n")):
1363 1363 break
1364 1364 postout.append(b' ' + el)
1365 1365
1366 1366 if lcmd:
1367 1367 # Add on last return code.
1368 1368 ret = int(lcmd.split()[1])
1369 1369 if ret != 0:
1370 1370 postout.append(b' [%d]\n' % ret)
1371 1371 if pos in after:
1372 1372 # Merge in non-active test bits.
1373 1373 postout += after.pop(pos)
1374 1374 pos = int(lcmd.split()[0])
1375 1375
1376 1376 if pos in after:
1377 1377 postout += after.pop(pos)
1378 1378
1379 1379 if warnonly == 2:
1380 1380 exitcode = False # Set exitcode to warned.
1381 1381
1382 1382 return exitcode, postout
1383 1383
1384 1384 @staticmethod
1385 1385 def rematch(el, l):
1386 1386 try:
1387 1387 # use \Z to ensure that the regex matches to the end of the string
1388 1388 if os.name == 'nt':
1389 1389 return re.match(el + br'\r?\n\Z', l)
1390 1390 return re.match(el + br'\n\Z', l)
1391 1391 except re.error:
1392 1392 # el is an invalid regex
1393 1393 return False
1394 1394
1395 1395 @staticmethod
1396 1396 def globmatch(el, l):
1397 1397 # The only supported special characters are * and ? plus / which also
1398 1398 # matches \ on windows. Escaping of these characters is supported.
1399 1399 if el + b'\n' == l:
1400 1400 if os.altsep:
1401 1401 # matching on "/" is not needed for this line
1402 1402 for pat in checkcodeglobpats:
1403 1403 if pat.match(el):
1404 1404 return True
1405 1405 return b'-glob'
1406 1406 return True
1407 1407 el = el.replace(b'$LOCALIP', b'*')
1408 1408 i, n = 0, len(el)
1409 1409 res = b''
1410 1410 while i < n:
1411 1411 c = el[i:i + 1]
1412 1412 i += 1
1413 1413 if c == b'\\' and i < n and el[i:i + 1] in b'*?\\/':
1414 1414 res += el[i - 1:i + 1]
1415 1415 i += 1
1416 1416 elif c == b'*':
1417 1417 res += b'.*'
1418 1418 elif c == b'?':
1419 1419 res += b'.'
1420 1420 elif c == b'/' and os.altsep:
1421 1421 res += b'[/\\\\]'
1422 1422 else:
1423 1423 res += re.escape(c)
1424 1424 return TTest.rematch(res, l)
1425 1425
1426 1426 @staticmethod
1427 1427 def linematch(el, l):
1428 1428 retry = False
1429 1429 if el == l: # perfect match (fast)
1430 1430 return True
1431 1431 if el:
1432 1432 if el.endswith(b" (?)\n"):
1433 1433 retry = "retry"
1434 1434 el = el[:-5] + b"\n"
1435 1435 else:
1436 1436 m = optline.match(el)
1437 1437 if m:
1438 1438 el = m.group(1) + b"\n"
1439 1439 retry = "retry"
1440 1440
1441 1441 if el.endswith(b" (esc)\n"):
1442 1442 if PYTHON3:
1443 1443 el = el[:-7].decode('unicode_escape') + '\n'
1444 1444 el = el.encode('utf-8')
1445 1445 else:
1446 1446 el = el[:-7].decode('string-escape') + '\n'
1447 1447 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
1448 1448 return True
1449 1449 if el.endswith(b" (re)\n"):
1450 1450 return TTest.rematch(el[:-6], l) or retry
1451 1451 if el.endswith(b" (glob)\n"):
1452 1452 # ignore '(glob)' added to l by 'replacements'
1453 1453 if l.endswith(b" (glob)\n"):
1454 1454 l = l[:-8] + b"\n"
1455 1455 return TTest.globmatch(el[:-8], l) or retry
1456 1456 if os.altsep and l.replace(b'\\', b'/') == el:
1457 1457 return b'+glob'
1458 1458 return retry
1459 1459
1460 1460 @staticmethod
1461 1461 def parsehghaveoutput(lines):
1462 1462 '''Parse hghave log lines.
1463 1463
1464 1464 Return tuple of lists (missing, failed):
1465 1465 * the missing/unknown features
1466 1466 * the features for which existence check failed'''
1467 1467 missing = []
1468 1468 failed = []
1469 1469 for line in lines:
1470 1470 if line.startswith(TTest.SKIPPED_PREFIX):
1471 1471 line = line.splitlines()[0]
1472 1472 missing.append(line[len(TTest.SKIPPED_PREFIX):].decode('utf-8'))
1473 1473 elif line.startswith(TTest.FAILED_PREFIX):
1474 1474 line = line.splitlines()[0]
1475 1475 failed.append(line[len(TTest.FAILED_PREFIX):].decode('utf-8'))
1476 1476
1477 1477 return missing, failed
1478 1478
1479 1479 @staticmethod
1480 1480 def _escapef(m):
1481 1481 return TTest.ESCAPEMAP[m.group(0)]
1482 1482
1483 1483 @staticmethod
1484 1484 def _stringescape(s):
1485 1485 return TTest.ESCAPESUB(TTest._escapef, s)
1486 1486
1487 1487 iolock = threading.RLock()
1488 1488
1489 1489 class TestResult(unittest._TextTestResult):
1490 1490 """Holds results when executing via unittest."""
1491 1491 # Don't worry too much about accessing the non-public _TextTestResult.
1492 1492 # It is relatively common in Python testing tools.
1493 1493 def __init__(self, options, *args, **kwargs):
1494 1494 super(TestResult, self).__init__(*args, **kwargs)
1495 1495
1496 1496 self._options = options
1497 1497
1498 1498 # unittest.TestResult didn't have skipped until 2.7. We need to
1499 1499 # polyfill it.
1500 1500 self.skipped = []
1501 1501
1502 1502 # We have a custom "ignored" result that isn't present in any Python
1503 1503 # unittest implementation. It is very similar to skipped. It may make
1504 1504 # sense to map it into skip some day.
1505 1505 self.ignored = []
1506 1506
1507 1507 self.times = []
1508 1508 self._firststarttime = None
1509 1509 # Data stored for the benefit of generating xunit reports.
1510 1510 self.successes = []
1511 1511 self.faildata = {}
1512 1512
1513 1513 def addFailure(self, test, reason):
1514 1514 self.failures.append((test, reason))
1515 1515
1516 1516 if self._options.first:
1517 1517 self.stop()
1518 1518 else:
1519 1519 with iolock:
1520 1520 if reason == "timed out":
1521 1521 self.stream.write('t')
1522 1522 else:
1523 1523 if not self._options.nodiff:
1524 1524 self.stream.write('\nERROR: %s output changed\n' % test)
1525 1525 self.stream.write('!')
1526 1526
1527 1527 self.stream.flush()
1528 1528
1529 1529 def addSuccess(self, test):
1530 1530 with iolock:
1531 1531 super(TestResult, self).addSuccess(test)
1532 1532 self.successes.append(test)
1533 1533
1534 1534 def addError(self, test, err):
1535 1535 super(TestResult, self).addError(test, err)
1536 1536 if self._options.first:
1537 1537 self.stop()
1538 1538
1539 1539 # Polyfill.
1540 1540 def addSkip(self, test, reason):
1541 1541 self.skipped.append((test, reason))
1542 1542 with iolock:
1543 1543 if self.showAll:
1544 1544 self.stream.writeln('skipped %s' % reason)
1545 1545 else:
1546 1546 self.stream.write('s')
1547 1547 self.stream.flush()
1548 1548
1549 1549 def addIgnore(self, test, reason):
1550 1550 self.ignored.append((test, reason))
1551 1551 with iolock:
1552 1552 if self.showAll:
1553 1553 self.stream.writeln('ignored %s' % reason)
1554 1554 else:
1555 1555 if reason not in ('not retesting', "doesn't match keyword"):
1556 1556 self.stream.write('i')
1557 1557 else:
1558 1558 self.testsRun += 1
1559 1559 self.stream.flush()
1560 1560
1561 1561 def addOutputMismatch(self, test, ret, got, expected):
1562 1562 """Record a mismatch in test output for a particular test."""
1563 1563 if self.shouldStop:
1564 1564 # don't print, some other test case already failed and
1565 1565 # printed, we're just stale and probably failed due to our
1566 1566 # temp dir getting cleaned up.
1567 1567 return
1568 1568
1569 1569 accepted = False
1570 1570 lines = []
1571 1571
1572 1572 with iolock:
1573 1573 if self._options.nodiff:
1574 1574 pass
1575 1575 elif self._options.view:
1576 1576 v = self._options.view
1577 1577 if PYTHON3:
1578 1578 v = _bytespath(v)
1579 1579 os.system(b"%s %s %s" %
1580 1580 (v, test.refpath, test.errpath))
1581 1581 else:
1582 1582 servefail, lines = getdiff(expected, got,
1583 1583 test.refpath, test.errpath)
1584 1584 if servefail:
1585 1585 raise test.failureException(
1586 1586 'server failed to start (HGPORT=%s)' % test._startport)
1587 1587 else:
1588 1588 self.stream.write('\n')
1589 1589 for line in lines:
1590 1590 if PYTHON3:
1591 1591 self.stream.flush()
1592 1592 self.stream.buffer.write(line)
1593 1593 self.stream.buffer.flush()
1594 1594 else:
1595 1595 self.stream.write(line)
1596 1596 self.stream.flush()
1597 1597
1598 1598 # handle interactive prompt without releasing iolock
1599 1599 if self._options.interactive:
1600 1600 if test.readrefout() != expected:
1601 1601 self.stream.write(
1602 1602 'Reference output has changed (run again to prompt '
1603 1603 'changes)')
1604 1604 else:
1605 1605 self.stream.write('Accept this change? [n] ')
1606 1606 answer = sys.stdin.readline().strip()
1607 1607 if answer.lower() in ('y', 'yes'):
1608 if test.name.endswith('.t'):
1608 if test.path.endswith(b'.t'):
1609 1609 rename(test.errpath, test.path)
1610 1610 else:
1611 1611 rename(test.errpath, '%s.out' % test.path)
1612 1612 accepted = True
1613 1613 if not accepted:
1614 1614 self.faildata[test.name] = b''.join(lines)
1615 1615
1616 1616 return accepted
1617 1617
1618 1618 def startTest(self, test):
1619 1619 super(TestResult, self).startTest(test)
1620 1620
1621 1621 # os.times module computes the user time and system time spent by
1622 1622 # child's processes along with real elapsed time taken by a process.
1623 1623 # This module has one limitation. It can only work for Linux user
1624 1624 # and not for Windows.
1625 1625 test.started = os.times()
1626 1626 if self._firststarttime is None: # thread racy but irrelevant
1627 1627 self._firststarttime = test.started[4]
1628 1628
1629 1629 def stopTest(self, test, interrupted=False):
1630 1630 super(TestResult, self).stopTest(test)
1631 1631
1632 1632 test.stopped = os.times()
1633 1633
1634 1634 starttime = test.started
1635 1635 endtime = test.stopped
1636 1636 origin = self._firststarttime
1637 1637 self.times.append((test.name,
1638 1638 endtime[2] - starttime[2], # user space CPU time
1639 1639 endtime[3] - starttime[3], # sys space CPU time
1640 1640 endtime[4] - starttime[4], # real time
1641 1641 starttime[4] - origin, # start date in run context
1642 1642 endtime[4] - origin, # end date in run context
1643 1643 ))
1644 1644
1645 1645 if interrupted:
1646 1646 with iolock:
1647 1647 self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
1648 1648 test.name, self.times[-1][3]))
1649 1649
1650 1650 class TestSuite(unittest.TestSuite):
1651 1651 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
1652 1652
1653 1653 def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
1654 1654 retest=False, keywords=None, loop=False, runs_per_test=1,
1655 1655 loadtest=None, showchannels=False,
1656 1656 *args, **kwargs):
1657 1657 """Create a new instance that can run tests with a configuration.
1658 1658
1659 1659 testdir specifies the directory where tests are executed from. This
1660 1660 is typically the ``tests`` directory from Mercurial's source
1661 1661 repository.
1662 1662
1663 1663 jobs specifies the number of jobs to run concurrently. Each test
1664 1664 executes on its own thread. Tests actually spawn new processes, so
1665 1665 state mutation should not be an issue.
1666 1666
1667 1667 If there is only one job, it will use the main thread.
1668 1668
1669 1669 whitelist and blacklist denote tests that have been whitelisted and
1670 1670 blacklisted, respectively. These arguments don't belong in TestSuite.
1671 1671 Instead, whitelist and blacklist should be handled by the thing that
1672 1672 populates the TestSuite with tests. They are present to preserve
1673 1673 backwards compatible behavior which reports skipped tests as part
1674 1674 of the results.
1675 1675
1676 1676 retest denotes whether to retest failed tests. This arguably belongs
1677 1677 outside of TestSuite.
1678 1678
1679 1679 keywords denotes key words that will be used to filter which tests
1680 1680 to execute. This arguably belongs outside of TestSuite.
1681 1681
1682 1682 loop denotes whether to loop over tests forever.
1683 1683 """
1684 1684 super(TestSuite, self).__init__(*args, **kwargs)
1685 1685
1686 1686 self._jobs = jobs
1687 1687 self._whitelist = whitelist
1688 1688 self._blacklist = blacklist
1689 1689 self._retest = retest
1690 1690 self._keywords = keywords
1691 1691 self._loop = loop
1692 1692 self._runs_per_test = runs_per_test
1693 1693 self._loadtest = loadtest
1694 1694 self._showchannels = showchannels
1695 1695
1696 1696 def run(self, result):
1697 1697 # We have a number of filters that need to be applied. We do this
1698 1698 # here instead of inside Test because it makes the running logic for
1699 1699 # Test simpler.
1700 1700 tests = []
1701 1701 num_tests = [0]
1702 1702 for test in self._tests:
1703 1703 def get():
1704 1704 num_tests[0] += 1
1705 1705 if getattr(test, 'should_reload', False):
1706 1706 return self._loadtest(test, num_tests[0])
1707 1707 return test
1708 1708 if not os.path.exists(test.path):
1709 1709 result.addSkip(test, "Doesn't exist")
1710 1710 continue
1711 1711
1712 1712 if not (self._whitelist and test.name in self._whitelist):
1713 1713 if self._blacklist and test.bname in self._blacklist:
1714 1714 result.addSkip(test, 'blacklisted')
1715 1715 continue
1716 1716
1717 1717 if self._retest and not os.path.exists(test.errpath):
1718 1718 result.addIgnore(test, 'not retesting')
1719 1719 continue
1720 1720
1721 1721 if self._keywords:
1722 1722 f = open(test.path, 'rb')
1723 1723 t = f.read().lower() + test.bname.lower()
1724 1724 f.close()
1725 1725 ignored = False
1726 1726 for k in self._keywords.lower().split():
1727 1727 if k not in t:
1728 1728 result.addIgnore(test, "doesn't match keyword")
1729 1729 ignored = True
1730 1730 break
1731 1731
1732 1732 if ignored:
1733 1733 continue
1734 1734 for _ in xrange(self._runs_per_test):
1735 1735 tests.append(get())
1736 1736
1737 1737 runtests = list(tests)
1738 1738 done = queue.Queue()
1739 1739 running = 0
1740 1740
1741 1741 channels = [""] * self._jobs
1742 1742
1743 1743 def job(test, result):
1744 1744 for n, v in enumerate(channels):
1745 1745 if not v:
1746 1746 channel = n
1747 1747 break
1748 1748 else:
1749 1749 raise ValueError('Could not find output channel')
1750 1750 channels[channel] = "=" + test.name[5:].split(".")[0]
1751 1751 try:
1752 1752 test(result)
1753 1753 done.put(None)
1754 1754 except KeyboardInterrupt:
1755 1755 pass
1756 1756 except: # re-raises
1757 1757 done.put(('!', test, 'run-test raised an error, see traceback'))
1758 1758 raise
1759 1759 finally:
1760 1760 try:
1761 1761 channels[channel] = ''
1762 1762 except IndexError:
1763 1763 pass
1764 1764
1765 1765 def stat():
1766 1766 count = 0
1767 1767 while channels:
1768 1768 d = '\n%03s ' % count
1769 1769 for n, v in enumerate(channels):
1770 1770 if v:
1771 1771 d += v[0]
1772 1772 channels[n] = v[1:] or '.'
1773 1773 else:
1774 1774 d += ' '
1775 1775 d += ' '
1776 1776 with iolock:
1777 1777 sys.stdout.write(d + ' ')
1778 1778 sys.stdout.flush()
1779 1779 for x in xrange(10):
1780 1780 if channels:
1781 1781 time.sleep(.1)
1782 1782 count += 1
1783 1783
1784 1784 stoppedearly = False
1785 1785
1786 1786 if self._showchannels:
1787 1787 statthread = threading.Thread(target=stat, name="stat")
1788 1788 statthread.start()
1789 1789
1790 1790 try:
1791 1791 while tests or running:
1792 1792 if not done.empty() or running == self._jobs or not tests:
1793 1793 try:
1794 1794 done.get(True, 1)
1795 1795 running -= 1
1796 1796 if result and result.shouldStop:
1797 1797 stoppedearly = True
1798 1798 break
1799 1799 except queue.Empty:
1800 1800 continue
1801 1801 if tests and not running == self._jobs:
1802 1802 test = tests.pop(0)
1803 1803 if self._loop:
1804 1804 if getattr(test, 'should_reload', False):
1805 1805 num_tests[0] += 1
1806 1806 tests.append(
1807 1807 self._loadtest(test, num_tests[0]))
1808 1808 else:
1809 1809 tests.append(test)
1810 1810 if self._jobs == 1:
1811 1811 job(test, result)
1812 1812 else:
1813 1813 t = threading.Thread(target=job, name=test.name,
1814 1814 args=(test, result))
1815 1815 t.start()
1816 1816 running += 1
1817 1817
1818 1818 # If we stop early we still need to wait on started tests to
1819 1819 # finish. Otherwise, there is a race between the test completing
1820 1820 # and the test's cleanup code running. This could result in the
1821 1821 # test reporting incorrect.
1822 1822 if stoppedearly:
1823 1823 while running:
1824 1824 try:
1825 1825 done.get(True, 1)
1826 1826 running -= 1
1827 1827 except queue.Empty:
1828 1828 continue
1829 1829 except KeyboardInterrupt:
1830 1830 for test in runtests:
1831 1831 test.abort()
1832 1832
1833 1833 channels = []
1834 1834
1835 1835 return result
1836 1836
1837 1837 # Save the most recent 5 wall-clock runtimes of each test to a
1838 1838 # human-readable text file named .testtimes. Tests are sorted
1839 1839 # alphabetically, while times for each test are listed from oldest to
1840 1840 # newest.
1841 1841
1842 1842 def loadtimes(outputdir):
1843 1843 times = []
1844 1844 try:
1845 1845 with open(os.path.join(outputdir, b'.testtimes-')) as fp:
1846 1846 for line in fp:
1847 1847 ts = line.split()
1848 1848 times.append((ts[0], [float(t) for t in ts[1:]]))
1849 1849 except IOError as err:
1850 1850 if err.errno != errno.ENOENT:
1851 1851 raise
1852 1852 return times
1853 1853
1854 1854 def savetimes(outputdir, result):
1855 1855 saved = dict(loadtimes(outputdir))
1856 1856 maxruns = 5
1857 1857 skipped = set([str(t[0]) for t in result.skipped])
1858 1858 for tdata in result.times:
1859 1859 test, real = tdata[0], tdata[3]
1860 1860 if test not in skipped:
1861 1861 ts = saved.setdefault(test, [])
1862 1862 ts.append(real)
1863 1863 ts[:] = ts[-maxruns:]
1864 1864
1865 1865 fd, tmpname = tempfile.mkstemp(prefix=b'.testtimes',
1866 1866 dir=outputdir, text=True)
1867 1867 with os.fdopen(fd, 'w') as fp:
1868 1868 for name, ts in sorted(saved.items()):
1869 1869 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
1870 1870 timepath = os.path.join(outputdir, b'.testtimes')
1871 1871 try:
1872 1872 os.unlink(timepath)
1873 1873 except OSError:
1874 1874 pass
1875 1875 try:
1876 1876 os.rename(tmpname, timepath)
1877 1877 except OSError:
1878 1878 pass
1879 1879
1880 1880 class TextTestRunner(unittest.TextTestRunner):
1881 1881 """Custom unittest test runner that uses appropriate settings."""
1882 1882
1883 1883 def __init__(self, runner, *args, **kwargs):
1884 1884 super(TextTestRunner, self).__init__(*args, **kwargs)
1885 1885
1886 1886 self._runner = runner
1887 1887
1888 1888 def listtests(self, test):
1889 1889 result = TestResult(self._runner.options, self.stream,
1890 1890 self.descriptions, 0)
1891 1891 test = sorted(test, key=lambda t: t.name)
1892 1892 for t in test:
1893 1893 print(t.name)
1894 1894 result.addSuccess(t)
1895 1895
1896 1896 if self._runner.options.xunit:
1897 1897 with open(self._runner.options.xunit, "wb") as xuf:
1898 1898 self._writexunit(result, xuf)
1899 1899
1900 1900 if self._runner.options.json:
1901 1901 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
1902 1902 with open(jsonpath, 'w') as fp:
1903 1903 self._writejson(result, fp)
1904 1904
1905 1905 return result
1906 1906
1907 1907 def run(self, test):
1908 1908 result = TestResult(self._runner.options, self.stream,
1909 1909 self.descriptions, self.verbosity)
1910 1910
1911 1911 test(result)
1912 1912
1913 1913 failed = len(result.failures)
1914 1914 skipped = len(result.skipped)
1915 1915 ignored = len(result.ignored)
1916 1916
1917 1917 with iolock:
1918 1918 self.stream.writeln('')
1919 1919
1920 1920 if not self._runner.options.noskips:
1921 1921 for test, msg in result.skipped:
1922 1922 self.stream.writeln('Skipped %s: %s' % (test.name, msg))
1923 1923 for test, msg in result.failures:
1924 1924 self.stream.writeln('Failed %s: %s' % (test.name, msg))
1925 1925 for test, msg in result.errors:
1926 1926 self.stream.writeln('Errored %s: %s' % (test.name, msg))
1927 1927
1928 1928 if self._runner.options.xunit:
1929 1929 with open(self._runner.options.xunit, "wb") as xuf:
1930 1930 self._writexunit(result, xuf)
1931 1931
1932 1932 if self._runner.options.json:
1933 1933 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
1934 1934 with open(jsonpath, 'w') as fp:
1935 1935 self._writejson(result, fp)
1936 1936
1937 1937 self._runner._checkhglib('Tested')
1938 1938
1939 1939 savetimes(self._runner._outputdir, result)
1940 1940
1941 1941 if failed and self._runner.options.known_good_rev:
1942 1942 def nooutput(args):
1943 1943 p = subprocess.Popen(args, stderr=subprocess.STDOUT,
1944 1944 stdout=subprocess.PIPE)
1945 1945 p.stdout.read()
1946 1946 p.wait()
1947 1947 for test, msg in result.failures:
1948 1948 nooutput(['hg', 'bisect', '--reset']),
1949 1949 nooutput(['hg', 'bisect', '--bad', '.'])
1950 1950 nooutput(['hg', 'bisect', '--good',
1951 1951 self._runner.options.known_good_rev])
1952 1952 # TODO: we probably need to forward some options
1953 1953 # that alter hg's behavior inside the tests.
1954 1954 rtc = '%s %s %s' % (sys.executable, sys.argv[0], test)
1955 1955 sub = subprocess.Popen(['hg', 'bisect', '--command', rtc],
1956 1956 stderr=subprocess.STDOUT,
1957 1957 stdout=subprocess.PIPE)
1958 1958 data = sub.stdout.read()
1959 1959 sub.wait()
1960 1960 m = re.search(
1961 1961 (r'\nThe first (?P<goodbad>bad|good) revision '
1962 1962 r'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
1963 1963 r'summary: +(?P<summary>[^\n]+)\n'),
1964 1964 data, (re.MULTILINE | re.DOTALL))
1965 1965 if m is None:
1966 1966 self.stream.writeln(
1967 1967 'Failed to identify failure point for %s' % test)
1968 1968 continue
1969 1969 dat = m.groupdict()
1970 1970 verb = 'broken' if dat['goodbad'] == 'bad' else 'fixed'
1971 1971 self.stream.writeln(
1972 1972 '%s %s by %s (%s)' % (
1973 1973 test, verb, dat['node'], dat['summary']))
1974 1974 self.stream.writeln(
1975 1975 '# Ran %d tests, %d skipped, %d failed.'
1976 1976 % (result.testsRun, skipped + ignored, failed))
1977 1977 if failed:
1978 1978 self.stream.writeln('python hash seed: %s' %
1979 1979 os.environ['PYTHONHASHSEED'])
1980 1980 if self._runner.options.time:
1981 1981 self.printtimes(result.times)
1982 1982 self.stream.flush()
1983 1983
1984 1984 return result
1985 1985
1986 1986 def printtimes(self, times):
1987 1987 # iolock held by run
1988 1988 self.stream.writeln('# Producing time report')
1989 1989 times.sort(key=lambda t: (t[3]))
1990 1990 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
1991 1991 self.stream.writeln('%-7s %-7s %-7s %-7s %-7s %s' %
1992 1992 ('start', 'end', 'cuser', 'csys', 'real', 'Test'))
1993 1993 for tdata in times:
1994 1994 test = tdata[0]
1995 1995 cuser, csys, real, start, end = tdata[1:6]
1996 1996 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
1997 1997
1998 1998 @staticmethod
1999 1999 def _writexunit(result, outf):
2000 2000 # See http://llg.cubic.org/docs/junit/ for a reference.
2001 2001 timesd = dict((t[0], t[3]) for t in result.times)
2002 2002 doc = minidom.Document()
2003 2003 s = doc.createElement('testsuite')
2004 2004 s.setAttribute('name', 'run-tests')
2005 2005 s.setAttribute('tests', str(result.testsRun))
2006 2006 s.setAttribute('errors', "0") # TODO
2007 2007 s.setAttribute('failures', str(len(result.failures)))
2008 2008 s.setAttribute('skipped', str(len(result.skipped) +
2009 2009 len(result.ignored)))
2010 2010 doc.appendChild(s)
2011 2011 for tc in result.successes:
2012 2012 t = doc.createElement('testcase')
2013 2013 t.setAttribute('name', tc.name)
2014 2014 tctime = timesd.get(tc.name)
2015 2015 if tctime is not None:
2016 2016 t.setAttribute('time', '%.3f' % tctime)
2017 2017 s.appendChild(t)
2018 2018 for tc, err in sorted(result.faildata.items()):
2019 2019 t = doc.createElement('testcase')
2020 2020 t.setAttribute('name', tc)
2021 2021 tctime = timesd.get(tc)
2022 2022 if tctime is not None:
2023 2023 t.setAttribute('time', '%.3f' % tctime)
2024 2024 # createCDATASection expects a unicode or it will
2025 2025 # convert using default conversion rules, which will
2026 2026 # fail if string isn't ASCII.
2027 2027 err = cdatasafe(err).decode('utf-8', 'replace')
2028 2028 cd = doc.createCDATASection(err)
2029 2029 # Use 'failure' here instead of 'error' to match errors = 0,
2030 2030 # failures = len(result.failures) in the testsuite element.
2031 2031 failelem = doc.createElement('failure')
2032 2032 failelem.setAttribute('message', 'output changed')
2033 2033 failelem.setAttribute('type', 'output-mismatch')
2034 2034 failelem.appendChild(cd)
2035 2035 t.appendChild(failelem)
2036 2036 s.appendChild(t)
2037 2037 for tc, message in result.skipped:
2038 2038 # According to the schema, 'skipped' has no attributes. So store
2039 2039 # the skip message as a text node instead.
2040 2040 t = doc.createElement('testcase')
2041 2041 t.setAttribute('name', tc.name)
2042 2042 message = cdatasafe(message).decode('utf-8', 'replace')
2043 2043 cd = doc.createCDATASection(message)
2044 2044 skipelem = doc.createElement('skipped')
2045 2045 skipelem.appendChild(cd)
2046 2046 t.appendChild(skipelem)
2047 2047 s.appendChild(t)
2048 2048 outf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
2049 2049
2050 2050 @staticmethod
2051 2051 def _writejson(result, outf):
2052 2052 timesd = {}
2053 2053 for tdata in result.times:
2054 2054 test = tdata[0]
2055 2055 timesd[test] = tdata[1:]
2056 2056
2057 2057 outcome = {}
2058 2058 groups = [('success', ((tc, None)
2059 2059 for tc in result.successes)),
2060 2060 ('failure', result.failures),
2061 2061 ('skip', result.skipped)]
2062 2062 for res, testcases in groups:
2063 2063 for tc, __ in testcases:
2064 2064 if tc.name in timesd:
2065 2065 diff = result.faildata.get(tc.name, b'')
2066 2066 try:
2067 2067 diff = diff.decode('unicode_escape')
2068 2068 except UnicodeDecodeError as e:
2069 2069 diff = '%r decoding diff, sorry' % e
2070 2070 tres = {'result': res,
2071 2071 'time': ('%0.3f' % timesd[tc.name][2]),
2072 2072 'cuser': ('%0.3f' % timesd[tc.name][0]),
2073 2073 'csys': ('%0.3f' % timesd[tc.name][1]),
2074 2074 'start': ('%0.3f' % timesd[tc.name][3]),
2075 2075 'end': ('%0.3f' % timesd[tc.name][4]),
2076 2076 'diff': diff,
2077 2077 }
2078 2078 else:
2079 2079 # blacklisted test
2080 2080 tres = {'result': res}
2081 2081
2082 2082 outcome[tc.name] = tres
2083 2083 jsonout = json.dumps(outcome, sort_keys=True, indent=4,
2084 2084 separators=(',', ': '))
2085 2085 outf.writelines(("testreport =", jsonout))
2086 2086
2087 2087 class TestRunner(object):
2088 2088 """Holds context for executing tests.
2089 2089
2090 2090 Tests rely on a lot of state. This object holds it for them.
2091 2091 """
2092 2092
2093 2093 # Programs required to run tests.
2094 2094 REQUIREDTOOLS = [
2095 2095 b'diff',
2096 2096 b'grep',
2097 2097 b'unzip',
2098 2098 b'gunzip',
2099 2099 b'bunzip2',
2100 2100 b'sed',
2101 2101 ]
2102 2102
2103 2103 # Maps file extensions to test class.
2104 2104 TESTTYPES = [
2105 2105 (b'.py', PythonTest),
2106 2106 (b'.t', TTest),
2107 2107 ]
2108 2108
2109 2109 def __init__(self):
2110 2110 self.options = None
2111 2111 self._hgroot = None
2112 2112 self._testdir = None
2113 2113 self._outputdir = None
2114 2114 self._hgtmp = None
2115 2115 self._installdir = None
2116 2116 self._bindir = None
2117 2117 self._tmpbinddir = None
2118 2118 self._pythondir = None
2119 2119 self._coveragefile = None
2120 2120 self._createdfiles = []
2121 2121 self._hgcommand = None
2122 2122 self._hgpath = None
2123 2123 self._portoffset = 0
2124 2124 self._ports = {}
2125 2125
2126 2126 def run(self, args, parser=None):
2127 2127 """Run the test suite."""
2128 2128 oldmask = os.umask(0o22)
2129 2129 try:
2130 2130 parser = parser or getparser()
2131 2131 options, args = parseargs(args, parser)
2132 2132 # positional arguments are paths to test files to run, so
2133 2133 # we make sure they're all bytestrings
2134 2134 args = [_bytespath(a) for a in args]
2135 2135 self.options = options
2136 2136
2137 2137 self._checktools()
2138 2138 testdescs = self.findtests(args)
2139 2139 if options.profile_runner:
2140 2140 import statprof
2141 2141 statprof.start()
2142 2142 result = self._run(testdescs)
2143 2143 if options.profile_runner:
2144 2144 statprof.stop()
2145 2145 statprof.display()
2146 2146 return result
2147 2147
2148 2148 finally:
2149 2149 os.umask(oldmask)
2150 2150
2151 2151 def _run(self, testdescs):
2152 2152 if self.options.random:
2153 2153 random.shuffle(testdescs)
2154 2154 else:
2155 2155 # keywords for slow tests
2156 2156 slow = {b'svn': 10,
2157 2157 b'cvs': 10,
2158 2158 b'hghave': 10,
2159 2159 b'largefiles-update': 10,
2160 2160 b'run-tests': 10,
2161 2161 b'corruption': 10,
2162 2162 b'race': 10,
2163 2163 b'i18n': 10,
2164 2164 b'check': 100,
2165 2165 b'gendoc': 100,
2166 2166 b'contrib-perf': 200,
2167 2167 }
2168 2168 perf = {}
2169 2169 def sortkey(f):
2170 2170 # run largest tests first, as they tend to take the longest
2171 2171 f = f['path']
2172 2172 try:
2173 2173 return perf[f]
2174 2174 except KeyError:
2175 2175 try:
2176 2176 val = -os.stat(f).st_size
2177 2177 except OSError as e:
2178 2178 if e.errno != errno.ENOENT:
2179 2179 raise
2180 2180 perf[f] = -1e9 # file does not exist, tell early
2181 2181 return -1e9
2182 2182 for kw, mul in slow.items():
2183 2183 if kw in f:
2184 2184 val *= mul
2185 2185 if f.endswith(b'.py'):
2186 2186 val /= 10.0
2187 2187 perf[f] = val / 1000.0
2188 2188 return perf[f]
2189 2189 testdescs.sort(key=sortkey)
2190 2190
2191 2191 self._testdir = osenvironb[b'TESTDIR'] = getattr(
2192 2192 os, 'getcwdb', os.getcwd)()
2193 2193 if self.options.outputdir:
2194 2194 self._outputdir = canonpath(_bytespath(self.options.outputdir))
2195 2195 else:
2196 2196 self._outputdir = self._testdir
2197 2197
2198 2198 if 'PYTHONHASHSEED' not in os.environ:
2199 2199 # use a random python hash seed all the time
2200 2200 # we do the randomness ourself to know what seed is used
2201 2201 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
2202 2202
2203 2203 if self.options.tmpdir:
2204 2204 self.options.keep_tmpdir = True
2205 2205 tmpdir = _bytespath(self.options.tmpdir)
2206 2206 if os.path.exists(tmpdir):
2207 2207 # Meaning of tmpdir has changed since 1.3: we used to create
2208 2208 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
2209 2209 # tmpdir already exists.
2210 2210 print("error: temp dir %r already exists" % tmpdir)
2211 2211 return 1
2212 2212
2213 2213 # Automatically removing tmpdir sounds convenient, but could
2214 2214 # really annoy anyone in the habit of using "--tmpdir=/tmp"
2215 2215 # or "--tmpdir=$HOME".
2216 2216 #vlog("# Removing temp dir", tmpdir)
2217 2217 #shutil.rmtree(tmpdir)
2218 2218 os.makedirs(tmpdir)
2219 2219 else:
2220 2220 d = None
2221 2221 if os.name == 'nt':
2222 2222 # without this, we get the default temp dir location, but
2223 2223 # in all lowercase, which causes troubles with paths (issue3490)
2224 2224 d = osenvironb.get(b'TMP', None)
2225 2225 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
2226 2226
2227 2227 self._hgtmp = osenvironb[b'HGTMP'] = (
2228 2228 os.path.realpath(tmpdir))
2229 2229
2230 2230 if self.options.with_hg:
2231 2231 self._installdir = None
2232 2232 whg = self.options.with_hg
2233 2233 self._bindir = os.path.dirname(os.path.realpath(whg))
2234 2234 assert isinstance(self._bindir, bytes)
2235 2235 self._hgcommand = os.path.basename(whg)
2236 2236 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
2237 2237 os.makedirs(self._tmpbindir)
2238 2238
2239 2239 # This looks redundant with how Python initializes sys.path from
2240 2240 # the location of the script being executed. Needed because the
2241 2241 # "hg" specified by --with-hg is not the only Python script
2242 2242 # executed in the test suite that needs to import 'mercurial'
2243 2243 # ... which means it's not really redundant at all.
2244 2244 self._pythondir = self._bindir
2245 2245 else:
2246 2246 self._installdir = os.path.join(self._hgtmp, b"install")
2247 2247 self._bindir = os.path.join(self._installdir, b"bin")
2248 2248 self._hgcommand = b'hg'
2249 2249 self._tmpbindir = self._bindir
2250 2250 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
2251 2251
2252 2252 # set CHGHG, then replace "hg" command by "chg"
2253 2253 chgbindir = self._bindir
2254 2254 if self.options.chg or self.options.with_chg:
2255 2255 osenvironb[b'CHGHG'] = os.path.join(self._bindir, self._hgcommand)
2256 2256 else:
2257 2257 osenvironb.pop(b'CHGHG', None) # drop flag for hghave
2258 2258 if self.options.chg:
2259 2259 self._hgcommand = b'chg'
2260 2260 elif self.options.with_chg:
2261 2261 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
2262 2262 self._hgcommand = os.path.basename(self.options.with_chg)
2263 2263
2264 2264 osenvironb[b"BINDIR"] = self._bindir
2265 2265 osenvironb[b"PYTHON"] = PYTHON
2266 2266
2267 2267 if self.options.with_python3:
2268 2268 osenvironb[b'PYTHON3'] = self.options.with_python3
2269 2269
2270 2270 fileb = _bytespath(__file__)
2271 2271 runtestdir = os.path.abspath(os.path.dirname(fileb))
2272 2272 osenvironb[b'RUNTESTDIR'] = runtestdir
2273 2273 if PYTHON3:
2274 2274 sepb = _bytespath(os.pathsep)
2275 2275 else:
2276 2276 sepb = os.pathsep
2277 2277 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
2278 2278 if os.path.islink(__file__):
2279 2279 # test helper will likely be at the end of the symlink
2280 2280 realfile = os.path.realpath(fileb)
2281 2281 realdir = os.path.abspath(os.path.dirname(realfile))
2282 2282 path.insert(2, realdir)
2283 2283 if chgbindir != self._bindir:
2284 2284 path.insert(1, chgbindir)
2285 2285 if self._testdir != runtestdir:
2286 2286 path = [self._testdir] + path
2287 2287 if self._tmpbindir != self._bindir:
2288 2288 path = [self._tmpbindir] + path
2289 2289 osenvironb[b"PATH"] = sepb.join(path)
2290 2290
2291 2291 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
2292 2292 # can run .../tests/run-tests.py test-foo where test-foo
2293 2293 # adds an extension to HGRC. Also include run-test.py directory to
2294 2294 # import modules like heredoctest.
2295 2295 pypath = [self._pythondir, self._testdir, runtestdir]
2296 2296 # We have to augment PYTHONPATH, rather than simply replacing
2297 2297 # it, in case external libraries are only available via current
2298 2298 # PYTHONPATH. (In particular, the Subversion bindings on OS X
2299 2299 # are in /opt/subversion.)
2300 2300 oldpypath = osenvironb.get(IMPL_PATH)
2301 2301 if oldpypath:
2302 2302 pypath.append(oldpypath)
2303 2303 osenvironb[IMPL_PATH] = sepb.join(pypath)
2304 2304
2305 2305 if self.options.pure:
2306 2306 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
2307 2307 os.environ["HGMODULEPOLICY"] = "py"
2308 2308
2309 2309 if self.options.allow_slow_tests:
2310 2310 os.environ["HGTEST_SLOW"] = "slow"
2311 2311 elif 'HGTEST_SLOW' in os.environ:
2312 2312 del os.environ['HGTEST_SLOW']
2313 2313
2314 2314 self._coveragefile = os.path.join(self._testdir, b'.coverage')
2315 2315
2316 2316 vlog("# Using TESTDIR", self._testdir)
2317 2317 vlog("# Using RUNTESTDIR", osenvironb[b'RUNTESTDIR'])
2318 2318 vlog("# Using HGTMP", self._hgtmp)
2319 2319 vlog("# Using PATH", os.environ["PATH"])
2320 2320 vlog("# Using", IMPL_PATH, osenvironb[IMPL_PATH])
2321 2321 vlog("# Writing to directory", self._outputdir)
2322 2322
2323 2323 try:
2324 2324 return self._runtests(testdescs) or 0
2325 2325 finally:
2326 2326 time.sleep(.1)
2327 2327 self._cleanup()
2328 2328
2329 2329 def findtests(self, args):
2330 2330 """Finds possible test files from arguments.
2331 2331
2332 2332 If you wish to inject custom tests into the test harness, this would
2333 2333 be a good function to monkeypatch or override in a derived class.
2334 2334 """
2335 2335 if not args:
2336 2336 if self.options.changed:
2337 2337 proc = Popen4('hg st --rev "%s" -man0 .' %
2338 2338 self.options.changed, None, 0)
2339 2339 stdout, stderr = proc.communicate()
2340 2340 args = stdout.strip(b'\0').split(b'\0')
2341 2341 else:
2342 2342 args = os.listdir(b'.')
2343 2343
2344 2344 tests = []
2345 2345 for t in args:
2346 2346 if not (os.path.basename(t).startswith(b'test-')
2347 2347 and (t.endswith(b'.py') or t.endswith(b'.t'))):
2348 2348 continue
2349 2349 if t.endswith(b'.t'):
2350 2350 # .t file may contain multiple test cases
2351 2351 cases = sorted(parsettestcases(t))
2352 2352 if cases:
2353 2353 tests += [{'path': t, 'case': c} for c in sorted(cases)]
2354 2354 else:
2355 2355 tests.append({'path': t})
2356 2356 else:
2357 2357 tests.append({'path': t})
2358 2358 return tests
2359 2359
2360 2360 def _runtests(self, testdescs):
2361 2361 def _reloadtest(test, i):
2362 2362 # convert a test back to its description dict
2363 2363 desc = {'path': test.path}
2364 2364 case = getattr(test, '_case', None)
2365 2365 if case:
2366 2366 desc['case'] = case
2367 2367 return self._gettest(desc, i)
2368 2368
2369 2369 try:
2370 2370 if self.options.restart:
2371 2371 orig = list(testdescs)
2372 2372 while testdescs:
2373 2373 desc = testdescs[0]
2374 2374 # desc['path'] is a relative path
2375 2375 if 'case' in desc:
2376 2376 errpath = b'%s.%s.err' % (desc['path'], desc['case'])
2377 2377 else:
2378 2378 errpath = b'%s.err' % desc['path']
2379 2379 errpath = os.path.join(self._outputdir, errpath)
2380 2380 if os.path.exists(errpath):
2381 2381 break
2382 2382 testdescs.pop(0)
2383 2383 if not testdescs:
2384 2384 print("running all tests")
2385 2385 testdescs = orig
2386 2386
2387 2387 tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
2388 2388
2389 2389 failed = False
2390 2390 kws = self.options.keywords
2391 2391 if kws is not None and PYTHON3:
2392 2392 kws = kws.encode('utf-8')
2393 2393
2394 2394 suite = TestSuite(self._testdir,
2395 2395 jobs=self.options.jobs,
2396 2396 whitelist=self.options.whitelisted,
2397 2397 blacklist=self.options.blacklist,
2398 2398 retest=self.options.retest,
2399 2399 keywords=kws,
2400 2400 loop=self.options.loop,
2401 2401 runs_per_test=self.options.runs_per_test,
2402 2402 showchannels=self.options.showchannels,
2403 2403 tests=tests, loadtest=_reloadtest)
2404 2404 verbosity = 1
2405 2405 if self.options.verbose:
2406 2406 verbosity = 2
2407 2407 runner = TextTestRunner(self, verbosity=verbosity)
2408 2408
2409 2409 if self.options.list_tests:
2410 2410 result = runner.listtests(suite)
2411 2411 else:
2412 2412 if self._installdir:
2413 2413 self._installhg()
2414 2414 self._checkhglib("Testing")
2415 2415 else:
2416 2416 self._usecorrectpython()
2417 2417 if self.options.chg:
2418 2418 assert self._installdir
2419 2419 self._installchg()
2420 2420
2421 2421 result = runner.run(suite)
2422 2422
2423 2423 if result.failures:
2424 2424 failed = True
2425 2425
2426 2426 if self.options.anycoverage:
2427 2427 self._outputcoverage()
2428 2428 except KeyboardInterrupt:
2429 2429 failed = True
2430 2430 print("\ninterrupted!")
2431 2431
2432 2432 if failed:
2433 2433 return 1
2434 2434
2435 2435 def _getport(self, count):
2436 2436 port = self._ports.get(count) # do we have a cached entry?
2437 2437 if port is None:
2438 2438 portneeded = 3
2439 2439 # above 100 tries we just give up and let test reports failure
2440 2440 for tries in xrange(100):
2441 2441 allfree = True
2442 2442 port = self.options.port + self._portoffset
2443 2443 for idx in xrange(portneeded):
2444 2444 if not checkportisavailable(port + idx):
2445 2445 allfree = False
2446 2446 break
2447 2447 self._portoffset += portneeded
2448 2448 if allfree:
2449 2449 break
2450 2450 self._ports[count] = port
2451 2451 return port
2452 2452
2453 2453 def _gettest(self, testdesc, count):
2454 2454 """Obtain a Test by looking at its filename.
2455 2455
2456 2456 Returns a Test instance. The Test may not be runnable if it doesn't
2457 2457 map to a known type.
2458 2458 """
2459 2459 path = testdesc['path']
2460 2460 lctest = path.lower()
2461 2461 testcls = Test
2462 2462
2463 2463 for ext, cls in self.TESTTYPES:
2464 2464 if lctest.endswith(ext):
2465 2465 testcls = cls
2466 2466 break
2467 2467
2468 2468 refpath = os.path.join(self._testdir, path)
2469 2469 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
2470 2470
2471 2471 # extra keyword parameters. 'case' is used by .t tests
2472 2472 kwds = dict((k, testdesc[k]) for k in ['case'] if k in testdesc)
2473 2473
2474 2474 t = testcls(refpath, self._outputdir, tmpdir,
2475 2475 keeptmpdir=self.options.keep_tmpdir,
2476 2476 debug=self.options.debug,
2477 2477 timeout=self.options.timeout,
2478 2478 startport=self._getport(count),
2479 2479 extraconfigopts=self.options.extra_config_opt,
2480 2480 py3kwarnings=self.options.py3k_warnings,
2481 2481 shell=self.options.shell,
2482 2482 hgcommand=self._hgcommand,
2483 2483 usechg=bool(self.options.with_chg or self.options.chg),
2484 2484 useipv6=useipv6, **kwds)
2485 2485 t.should_reload = True
2486 2486 return t
2487 2487
2488 2488 def _cleanup(self):
2489 2489 """Clean up state from this test invocation."""
2490 2490 if self.options.keep_tmpdir:
2491 2491 return
2492 2492
2493 2493 vlog("# Cleaning up HGTMP", self._hgtmp)
2494 2494 shutil.rmtree(self._hgtmp, True)
2495 2495 for f in self._createdfiles:
2496 2496 try:
2497 2497 os.remove(f)
2498 2498 except OSError:
2499 2499 pass
2500 2500
2501 2501 def _usecorrectpython(self):
2502 2502 """Configure the environment to use the appropriate Python in tests."""
2503 2503 # Tests must use the same interpreter as us or bad things will happen.
2504 2504 pyexename = sys.platform == 'win32' and b'python.exe' or b'python'
2505 2505 if getattr(os, 'symlink', None):
2506 2506 vlog("# Making python executable in test path a symlink to '%s'" %
2507 2507 sys.executable)
2508 2508 mypython = os.path.join(self._tmpbindir, pyexename)
2509 2509 try:
2510 2510 if os.readlink(mypython) == sys.executable:
2511 2511 return
2512 2512 os.unlink(mypython)
2513 2513 except OSError as err:
2514 2514 if err.errno != errno.ENOENT:
2515 2515 raise
2516 2516 if self._findprogram(pyexename) != sys.executable:
2517 2517 try:
2518 2518 os.symlink(sys.executable, mypython)
2519 2519 self._createdfiles.append(mypython)
2520 2520 except OSError as err:
2521 2521 # child processes may race, which is harmless
2522 2522 if err.errno != errno.EEXIST:
2523 2523 raise
2524 2524 else:
2525 2525 exedir, exename = os.path.split(sys.executable)
2526 2526 vlog("# Modifying search path to find %s as %s in '%s'" %
2527 2527 (exename, pyexename, exedir))
2528 2528 path = os.environ['PATH'].split(os.pathsep)
2529 2529 while exedir in path:
2530 2530 path.remove(exedir)
2531 2531 os.environ['PATH'] = os.pathsep.join([exedir] + path)
2532 2532 if not self._findprogram(pyexename):
2533 2533 print("WARNING: Cannot find %s in search path" % pyexename)
2534 2534
2535 2535 def _installhg(self):
2536 2536 """Install hg into the test environment.
2537 2537
2538 2538 This will also configure hg with the appropriate testing settings.
2539 2539 """
2540 2540 vlog("# Performing temporary installation of HG")
2541 2541 installerrs = os.path.join(self._hgtmp, b"install.err")
2542 2542 compiler = ''
2543 2543 if self.options.compiler:
2544 2544 compiler = '--compiler ' + self.options.compiler
2545 2545 if self.options.pure:
2546 2546 pure = b"--pure"
2547 2547 else:
2548 2548 pure = b""
2549 2549
2550 2550 # Run installer in hg root
2551 2551 script = os.path.realpath(sys.argv[0])
2552 2552 exe = sys.executable
2553 2553 if PYTHON3:
2554 2554 compiler = _bytespath(compiler)
2555 2555 script = _bytespath(script)
2556 2556 exe = _bytespath(exe)
2557 2557 hgroot = os.path.dirname(os.path.dirname(script))
2558 2558 self._hgroot = hgroot
2559 2559 os.chdir(hgroot)
2560 2560 nohome = b'--home=""'
2561 2561 if os.name == 'nt':
2562 2562 # The --home="" trick works only on OS where os.sep == '/'
2563 2563 # because of a distutils convert_path() fast-path. Avoid it at
2564 2564 # least on Windows for now, deal with .pydistutils.cfg bugs
2565 2565 # when they happen.
2566 2566 nohome = b''
2567 2567 cmd = (b'%(exe)s setup.py %(pure)s clean --all'
2568 2568 b' build %(compiler)s --build-base="%(base)s"'
2569 2569 b' install --force --prefix="%(prefix)s"'
2570 2570 b' --install-lib="%(libdir)s"'
2571 2571 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
2572 2572 % {b'exe': exe, b'pure': pure,
2573 2573 b'compiler': compiler,
2574 2574 b'base': os.path.join(self._hgtmp, b"build"),
2575 2575 b'prefix': self._installdir, b'libdir': self._pythondir,
2576 2576 b'bindir': self._bindir,
2577 2577 b'nohome': nohome, b'logfile': installerrs})
2578 2578
2579 2579 # setuptools requires install directories to exist.
2580 2580 def makedirs(p):
2581 2581 try:
2582 2582 os.makedirs(p)
2583 2583 except OSError as e:
2584 2584 if e.errno != errno.EEXIST:
2585 2585 raise
2586 2586 makedirs(self._pythondir)
2587 2587 makedirs(self._bindir)
2588 2588
2589 2589 vlog("# Running", cmd)
2590 2590 if os.system(cmd) == 0:
2591 2591 if not self.options.verbose:
2592 2592 try:
2593 2593 os.remove(installerrs)
2594 2594 except OSError as e:
2595 2595 if e.errno != errno.ENOENT:
2596 2596 raise
2597 2597 else:
2598 2598 f = open(installerrs, 'rb')
2599 2599 for line in f:
2600 2600 if PYTHON3:
2601 2601 sys.stdout.buffer.write(line)
2602 2602 else:
2603 2603 sys.stdout.write(line)
2604 2604 f.close()
2605 2605 sys.exit(1)
2606 2606 os.chdir(self._testdir)
2607 2607
2608 2608 self._usecorrectpython()
2609 2609
2610 2610 if self.options.py3k_warnings and not self.options.anycoverage:
2611 2611 vlog("# Updating hg command to enable Py3k Warnings switch")
2612 2612 f = open(os.path.join(self._bindir, 'hg'), 'rb')
2613 2613 lines = [line.rstrip() for line in f]
2614 2614 lines[0] += ' -3'
2615 2615 f.close()
2616 2616 f = open(os.path.join(self._bindir, 'hg'), 'wb')
2617 2617 for line in lines:
2618 2618 f.write(line + '\n')
2619 2619 f.close()
2620 2620
2621 2621 hgbat = os.path.join(self._bindir, b'hg.bat')
2622 2622 if os.path.isfile(hgbat):
2623 2623 # hg.bat expects to be put in bin/scripts while run-tests.py
2624 2624 # installation layout put it in bin/ directly. Fix it
2625 2625 f = open(hgbat, 'rb')
2626 2626 data = f.read()
2627 2627 f.close()
2628 2628 if b'"%~dp0..\python" "%~dp0hg" %*' in data:
2629 2629 data = data.replace(b'"%~dp0..\python" "%~dp0hg" %*',
2630 2630 b'"%~dp0python" "%~dp0hg" %*')
2631 2631 f = open(hgbat, 'wb')
2632 2632 f.write(data)
2633 2633 f.close()
2634 2634 else:
2635 2635 print('WARNING: cannot fix hg.bat reference to python.exe')
2636 2636
2637 2637 if self.options.anycoverage:
2638 2638 custom = os.path.join(self._testdir, 'sitecustomize.py')
2639 2639 target = os.path.join(self._pythondir, 'sitecustomize.py')
2640 2640 vlog('# Installing coverage trigger to %s' % target)
2641 2641 shutil.copyfile(custom, target)
2642 2642 rc = os.path.join(self._testdir, '.coveragerc')
2643 2643 vlog('# Installing coverage rc to %s' % rc)
2644 2644 os.environ['COVERAGE_PROCESS_START'] = rc
2645 2645 covdir = os.path.join(self._installdir, '..', 'coverage')
2646 2646 try:
2647 2647 os.mkdir(covdir)
2648 2648 except OSError as e:
2649 2649 if e.errno != errno.EEXIST:
2650 2650 raise
2651 2651
2652 2652 os.environ['COVERAGE_DIR'] = covdir
2653 2653
2654 2654 def _checkhglib(self, verb):
2655 2655 """Ensure that the 'mercurial' package imported by python is
2656 2656 the one we expect it to be. If not, print a warning to stderr."""
2657 2657 if ((self._bindir == self._pythondir) and
2658 2658 (self._bindir != self._tmpbindir)):
2659 2659 # The pythondir has been inferred from --with-hg flag.
2660 2660 # We cannot expect anything sensible here.
2661 2661 return
2662 2662 expecthg = os.path.join(self._pythondir, b'mercurial')
2663 2663 actualhg = self._gethgpath()
2664 2664 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
2665 2665 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
2666 2666 ' (expected %s)\n'
2667 2667 % (verb, actualhg, expecthg))
2668 2668 def _gethgpath(self):
2669 2669 """Return the path to the mercurial package that is actually found by
2670 2670 the current Python interpreter."""
2671 2671 if self._hgpath is not None:
2672 2672 return self._hgpath
2673 2673
2674 2674 cmd = b'%s -c "import mercurial; print (mercurial.__path__[0])"'
2675 2675 cmd = cmd % PYTHON
2676 2676 if PYTHON3:
2677 2677 cmd = _strpath(cmd)
2678 2678 pipe = os.popen(cmd)
2679 2679 try:
2680 2680 self._hgpath = _bytespath(pipe.read().strip())
2681 2681 finally:
2682 2682 pipe.close()
2683 2683
2684 2684 return self._hgpath
2685 2685
2686 2686 def _installchg(self):
2687 2687 """Install chg into the test environment"""
2688 2688 vlog('# Performing temporary installation of CHG')
2689 2689 assert os.path.dirname(self._bindir) == self._installdir
2690 2690 assert self._hgroot, 'must be called after _installhg()'
2691 2691 cmd = (b'"%(make)s" clean install PREFIX="%(prefix)s"'
2692 2692 % {b'make': 'make', # TODO: switch by option or environment?
2693 2693 b'prefix': self._installdir})
2694 2694 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
2695 2695 vlog("# Running", cmd)
2696 2696 proc = subprocess.Popen(cmd, shell=True, cwd=cwd,
2697 2697 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
2698 2698 stderr=subprocess.STDOUT)
2699 2699 out, _err = proc.communicate()
2700 2700 if proc.returncode != 0:
2701 2701 if PYTHON3:
2702 2702 sys.stdout.buffer.write(out)
2703 2703 else:
2704 2704 sys.stdout.write(out)
2705 2705 sys.exit(1)
2706 2706
2707 2707 def _outputcoverage(self):
2708 2708 """Produce code coverage output."""
2709 2709 import coverage
2710 2710 coverage = coverage.coverage
2711 2711
2712 2712 vlog('# Producing coverage report')
2713 2713 # chdir is the easiest way to get short, relative paths in the
2714 2714 # output.
2715 2715 os.chdir(self._hgroot)
2716 2716 covdir = os.path.join(self._installdir, '..', 'coverage')
2717 2717 cov = coverage(data_file=os.path.join(covdir, 'cov'))
2718 2718
2719 2719 # Map install directory paths back to source directory.
2720 2720 cov.config.paths['srcdir'] = ['.', self._pythondir]
2721 2721
2722 2722 cov.combine()
2723 2723
2724 2724 omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]]
2725 2725 cov.report(ignore_errors=True, omit=omit)
2726 2726
2727 2727 if self.options.htmlcov:
2728 2728 htmldir = os.path.join(self._outputdir, 'htmlcov')
2729 2729 cov.html_report(directory=htmldir, omit=omit)
2730 2730 if self.options.annotate:
2731 2731 adir = os.path.join(self._outputdir, 'annotated')
2732 2732 if not os.path.isdir(adir):
2733 2733 os.mkdir(adir)
2734 2734 cov.annotate(directory=adir, omit=omit)
2735 2735
2736 2736 def _findprogram(self, program):
2737 2737 """Search PATH for a executable program"""
2738 2738 dpb = _bytespath(os.defpath)
2739 2739 sepb = _bytespath(os.pathsep)
2740 2740 for p in osenvironb.get(b'PATH', dpb).split(sepb):
2741 2741 name = os.path.join(p, program)
2742 2742 if os.name == 'nt' or os.access(name, os.X_OK):
2743 2743 return name
2744 2744 return None
2745 2745
2746 2746 def _checktools(self):
2747 2747 """Ensure tools required to run tests are present."""
2748 2748 for p in self.REQUIREDTOOLS:
2749 2749 if os.name == 'nt' and not p.endswith('.exe'):
2750 2750 p += '.exe'
2751 2751 found = self._findprogram(p)
2752 2752 if found:
2753 2753 vlog("# Found prerequisite", p, "at", found)
2754 2754 else:
2755 2755 print("WARNING: Did not find prerequisite tool: %s " %
2756 2756 p.decode("utf-8"))
2757 2757
2758 2758 if __name__ == '__main__':
2759 2759 runner = TestRunner()
2760 2760
2761 2761 try:
2762 2762 import msvcrt
2763 2763 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
2764 2764 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
2765 2765 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
2766 2766 except ImportError:
2767 2767 pass
2768 2768
2769 2769 sys.exit(runner.run(sys.argv[1:]))
@@ -1,1237 +1,1289 b''
1 1 This file tests the behavior of run-tests.py itself.
2 2
3 3 Avoid interference from actual test env:
4 4
5 5 $ . "$TESTDIR/helper-runtests.sh"
6 6
7 7 Smoke test with install
8 8 ============
9 9
10 10 $ run-tests.py $HGTEST_RUN_TESTS_PURE -l
11 11
12 12 # Ran 0 tests, 0 skipped, 0 failed.
13 13
14 14 Define a helper to avoid the install step
15 15 =============
16 16 $ rt()
17 17 > {
18 18 > run-tests.py --with-hg=`which hg` "$@"
19 19 > }
20 20
21 21 error paths
22 22
23 23 #if symlink
24 24 $ ln -s `which true` hg
25 25 $ run-tests.py --with-hg=./hg
26 26 warning: --with-hg should specify an hg script
27 27
28 28 # Ran 0 tests, 0 skipped, 0 failed.
29 29 $ rm hg
30 30 #endif
31 31
32 32 #if execbit
33 33 $ touch hg
34 34 $ run-tests.py --with-hg=./hg
35 35 Usage: run-tests.py [options] [tests]
36 36
37 37 run-tests.py: error: --with-hg must specify an executable hg script
38 38 [2]
39 39 $ rm hg
40 40 #endif
41 41
42 42 Features for testing optional lines
43 43 ===================================
44 44
45 45 $ cat > hghaveaddon.py <<EOF
46 46 > import hghave
47 47 > @hghave.check("custom", "custom hghave feature")
48 48 > def has_custom():
49 49 > return True
50 50 > @hghave.check("missing", "missing hghave feature")
51 51 > def has_missing():
52 52 > return False
53 53 > EOF
54 54
55 55 an empty test
56 56 =======================
57 57
58 58 $ touch test-empty.t
59 59 $ rt
60 60 .
61 61 # Ran 1 tests, 0 skipped, 0 failed.
62 62 $ rm test-empty.t
63 63
64 64 a succesful test
65 65 =======================
66 66
67 67 $ cat > test-success.t << EOF
68 68 > $ echo babar
69 69 > babar
70 70 > $ echo xyzzy
71 71 > dont_print (?)
72 72 > nothing[42]line (re) (?)
73 73 > never*happens (glob) (?)
74 74 > more_nothing (?)
75 75 > xyzzy
76 76 > nor this (?)
77 77 > $ printf 'abc\ndef\nxyz\n'
78 78 > 123 (?)
79 79 > abc
80 80 > def (?)
81 81 > 456 (?)
82 82 > xyz
83 83 > $ printf 'zyx\nwvu\ntsr\n'
84 84 > abc (?)
85 85 > zyx (custom !)
86 86 > wvu
87 87 > no_print (no-custom !)
88 88 > tsr (no-missing !)
89 89 > missing (missing !)
90 90 > EOF
91 91
92 92 $ rt
93 93 .
94 94 # Ran 1 tests, 0 skipped, 0 failed.
95 95
96 96 failing test
97 97 ==================
98 98
99 99 test churn with globs
100 100 $ cat > test-failure.t <<EOF
101 101 > $ echo "bar-baz"; echo "bar-bad"
102 102 > bar*bad (glob)
103 103 > bar*baz (glob)
104 104 > EOF
105 105 $ rt test-failure.t
106 106
107 107 --- $TESTTMP/test-failure.t
108 108 +++ $TESTTMP/test-failure.t.err
109 109 @@ -1,3 +1,3 @@
110 110 $ echo "bar-baz"; echo "bar-bad"
111 111 + bar*baz (glob)
112 112 bar*bad (glob)
113 113 - bar*baz (glob)
114 114
115 115 ERROR: test-failure.t output changed
116 116 !
117 117 Failed test-failure.t: output changed
118 118 # Ran 1 tests, 0 skipped, 1 failed.
119 119 python hash seed: * (glob)
120 120 [1]
121 121
122 122 basic failing test
123 123 $ cat > test-failure.t << EOF
124 124 > $ echo babar
125 125 > rataxes
126 126 > This is a noop statement so that
127 127 > this test is still more bytes than success.
128 128 > pad pad pad pad............................................................
129 129 > pad pad pad pad............................................................
130 130 > pad pad pad pad............................................................
131 131 > pad pad pad pad............................................................
132 132 > pad pad pad pad............................................................
133 133 > pad pad pad pad............................................................
134 134 > EOF
135 135
136 136 >>> fh = open('test-failure-unicode.t', 'wb')
137 137 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8')) and None
138 138 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8')) and None
139 139
140 140 $ rt
141 141
142 142 --- $TESTTMP/test-failure.t
143 143 +++ $TESTTMP/test-failure.t.err
144 144 @@ -1,5 +1,5 @@
145 145 $ echo babar
146 146 - rataxes
147 147 + babar
148 148 This is a noop statement so that
149 149 this test is still more bytes than success.
150 150 pad pad pad pad............................................................
151 151
152 152 ERROR: test-failure.t output changed
153 153 !.
154 154 --- $TESTTMP/test-failure-unicode.t
155 155 +++ $TESTTMP/test-failure-unicode.t.err
156 156 @@ -1,2 +1,2 @@
157 157 $ echo babar\xce\xb1 (esc)
158 158 - l\xce\xb5\xce\xb5t (esc)
159 159 + babar\xce\xb1 (esc)
160 160
161 161 ERROR: test-failure-unicode.t output changed
162 162 !
163 163 Failed test-failure.t: output changed
164 164 Failed test-failure-unicode.t: output changed
165 165 # Ran 3 tests, 0 skipped, 2 failed.
166 166 python hash seed: * (glob)
167 167 [1]
168 168
169 169 test --outputdir
170 170 $ mkdir output
171 171 $ rt --outputdir output
172 172
173 173 --- $TESTTMP/test-failure.t
174 174 +++ $TESTTMP/output/test-failure.t.err
175 175 @@ -1,5 +1,5 @@
176 176 $ echo babar
177 177 - rataxes
178 178 + babar
179 179 This is a noop statement so that
180 180 this test is still more bytes than success.
181 181 pad pad pad pad............................................................
182 182
183 183 ERROR: test-failure.t output changed
184 184 !.
185 185 --- $TESTTMP/test-failure-unicode.t
186 186 +++ $TESTTMP/output/test-failure-unicode.t.err
187 187 @@ -1,2 +1,2 @@
188 188 $ echo babar\xce\xb1 (esc)
189 189 - l\xce\xb5\xce\xb5t (esc)
190 190 + babar\xce\xb1 (esc)
191 191
192 192 ERROR: test-failure-unicode.t output changed
193 193 !
194 194 Failed test-failure.t: output changed
195 195 Failed test-failure-unicode.t: output changed
196 196 # Ran 3 tests, 0 skipped, 2 failed.
197 197 python hash seed: * (glob)
198 198 [1]
199 199 $ ls -a output
200 200 .
201 201 ..
202 202 .testtimes
203 203 test-failure-unicode.t.err
204 204 test-failure.t.err
205 205
206 206 test --xunit support
207 207 $ rt --xunit=xunit.xml
208 208
209 209 --- $TESTTMP/test-failure.t
210 210 +++ $TESTTMP/test-failure.t.err
211 211 @@ -1,5 +1,5 @@
212 212 $ echo babar
213 213 - rataxes
214 214 + babar
215 215 This is a noop statement so that
216 216 this test is still more bytes than success.
217 217 pad pad pad pad............................................................
218 218
219 219 ERROR: test-failure.t output changed
220 220 !.
221 221 --- $TESTTMP/test-failure-unicode.t
222 222 +++ $TESTTMP/test-failure-unicode.t.err
223 223 @@ -1,2 +1,2 @@
224 224 $ echo babar\xce\xb1 (esc)
225 225 - l\xce\xb5\xce\xb5t (esc)
226 226 + babar\xce\xb1 (esc)
227 227
228 228 ERROR: test-failure-unicode.t output changed
229 229 !
230 230 Failed test-failure.t: output changed
231 231 Failed test-failure-unicode.t: output changed
232 232 # Ran 3 tests, 0 skipped, 2 failed.
233 233 python hash seed: * (glob)
234 234 [1]
235 235 $ cat xunit.xml
236 236 <?xml version="1.0" encoding="utf-8"?>
237 237 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
238 238 <testcase name="test-success.t" time="*"/> (glob)
239 239 <testcase name="test-failure-unicode.t" time="*"> (glob)
240 240 <failure message="output changed" type="output-mismatch">
241 241 <![CDATA[--- $TESTTMP/test-failure-unicode.t
242 242 +++ $TESTTMP/test-failure-unicode.t.err
243 243 @@ -1,2 +1,2 @@
244 244 $ echo babar\xce\xb1 (esc)
245 245 - l\xce\xb5\xce\xb5t (esc)
246 246 + babar\xce\xb1 (esc)
247 247 ]]> </failure>
248 248 </testcase>
249 249 <testcase name="test-failure.t" time="*"> (glob)
250 250 <failure message="output changed" type="output-mismatch">
251 251 <![CDATA[--- $TESTTMP/test-failure.t
252 252 +++ $TESTTMP/test-failure.t.err
253 253 @@ -1,5 +1,5 @@
254 254 $ echo babar
255 255 - rataxes
256 256 + babar
257 257 This is a noop statement so that
258 258 this test is still more bytes than success.
259 259 pad pad pad pad............................................................
260 260 ]]> </failure>
261 261 </testcase>
262 262 </testsuite>
263 263
264 264 $ cat .testtimes
265 265 test-failure-unicode.t * (glob)
266 266 test-failure.t * (glob)
267 267 test-success.t * (glob)
268 268
269 269 $ rt --list-tests
270 270 test-failure-unicode.t
271 271 test-failure.t
272 272 test-success.t
273 273
274 274 $ rt --list-tests --json
275 275 test-failure-unicode.t
276 276 test-failure.t
277 277 test-success.t
278 278 $ cat report.json
279 279 testreport ={
280 280 "test-failure-unicode.t": {
281 281 "result": "success"
282 282 },
283 283 "test-failure.t": {
284 284 "result": "success"
285 285 },
286 286 "test-success.t": {
287 287 "result": "success"
288 288 }
289 289 } (no-eol)
290 290
291 291 $ rt --list-tests --xunit=xunit.xml
292 292 test-failure-unicode.t
293 293 test-failure.t
294 294 test-success.t
295 295 $ cat xunit.xml
296 296 <?xml version="1.0" encoding="utf-8"?>
297 297 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
298 298 <testcase name="test-failure-unicode.t"/>
299 299 <testcase name="test-failure.t"/>
300 300 <testcase name="test-success.t"/>
301 301 </testsuite>
302 302
303 303 $ rt --list-tests test-failure* --json --xunit=xunit.xml --outputdir output
304 304 test-failure-unicode.t
305 305 test-failure.t
306 306 $ cat output/report.json
307 307 testreport ={
308 308 "test-failure-unicode.t": {
309 309 "result": "success"
310 310 },
311 311 "test-failure.t": {
312 312 "result": "success"
313 313 }
314 314 } (no-eol)
315 315 $ cat xunit.xml
316 316 <?xml version="1.0" encoding="utf-8"?>
317 317 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
318 318 <testcase name="test-failure-unicode.t"/>
319 319 <testcase name="test-failure.t"/>
320 320 </testsuite>
321 321
322 322 $ rm test-failure-unicode.t
323 323
324 324 test for --retest
325 325 ====================
326 326
327 327 $ rt --retest
328 328
329 329 --- $TESTTMP/test-failure.t
330 330 +++ $TESTTMP/test-failure.t.err
331 331 @@ -1,5 +1,5 @@
332 332 $ echo babar
333 333 - rataxes
334 334 + babar
335 335 This is a noop statement so that
336 336 this test is still more bytes than success.
337 337 pad pad pad pad............................................................
338 338
339 339 ERROR: test-failure.t output changed
340 340 !
341 341 Failed test-failure.t: output changed
342 342 # Ran 2 tests, 1 skipped, 1 failed.
343 343 python hash seed: * (glob)
344 344 [1]
345 345
346 346 --retest works with --outputdir
347 347 $ rm -r output
348 348 $ mkdir output
349 349 $ mv test-failure.t.err output
350 350 $ rt --retest --outputdir output
351 351
352 352 --- $TESTTMP/test-failure.t
353 353 +++ $TESTTMP/output/test-failure.t.err
354 354 @@ -1,5 +1,5 @@
355 355 $ echo babar
356 356 - rataxes
357 357 + babar
358 358 This is a noop statement so that
359 359 this test is still more bytes than success.
360 360 pad pad pad pad............................................................
361 361
362 362 ERROR: test-failure.t output changed
363 363 !
364 364 Failed test-failure.t: output changed
365 365 # Ran 2 tests, 1 skipped, 1 failed.
366 366 python hash seed: * (glob)
367 367 [1]
368 368
369 369 Selecting Tests To Run
370 370 ======================
371 371
372 372 successful
373 373
374 374 $ rt test-success.t
375 375 .
376 376 # Ran 1 tests, 0 skipped, 0 failed.
377 377
378 378 success w/ keyword
379 379 $ rt -k xyzzy
380 380 .
381 381 # Ran 2 tests, 1 skipped, 0 failed.
382 382
383 383 failed
384 384
385 385 $ rt test-failure.t
386 386
387 387 --- $TESTTMP/test-failure.t
388 388 +++ $TESTTMP/test-failure.t.err
389 389 @@ -1,5 +1,5 @@
390 390 $ echo babar
391 391 - rataxes
392 392 + babar
393 393 This is a noop statement so that
394 394 this test is still more bytes than success.
395 395 pad pad pad pad............................................................
396 396
397 397 ERROR: test-failure.t output changed
398 398 !
399 399 Failed test-failure.t: output changed
400 400 # Ran 1 tests, 0 skipped, 1 failed.
401 401 python hash seed: * (glob)
402 402 [1]
403 403
404 404 failure w/ keyword
405 405 $ rt -k rataxes
406 406
407 407 --- $TESTTMP/test-failure.t
408 408 +++ $TESTTMP/test-failure.t.err
409 409 @@ -1,5 +1,5 @@
410 410 $ echo babar
411 411 - rataxes
412 412 + babar
413 413 This is a noop statement so that
414 414 this test is still more bytes than success.
415 415 pad pad pad pad............................................................
416 416
417 417 ERROR: test-failure.t output changed
418 418 !
419 419 Failed test-failure.t: output changed
420 420 # Ran 2 tests, 1 skipped, 1 failed.
421 421 python hash seed: * (glob)
422 422 [1]
423 423
424 424 Verify that when a process fails to start we show a useful message
425 425 ==================================================================
426 426
427 427 $ cat > test-serve-fail.t <<EOF
428 428 > $ echo 'abort: child process failed to start blah'
429 429 > EOF
430 430 $ rt test-serve-fail.t
431 431
432 432 ERROR: test-serve-fail.t output changed
433 433 !
434 434 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
435 435 # Ran 1 tests, 0 skipped, 1 failed.
436 436 python hash seed: * (glob)
437 437 [1]
438 438 $ rm test-serve-fail.t
439 439
440 440 Verify that we can try other ports
441 441 ===================================
442 442 $ hg init inuse
443 443 $ hg serve -R inuse -p $HGPORT -d --pid-file=blocks.pid
444 444 $ cat blocks.pid >> $DAEMON_PIDS
445 445 $ cat > test-serve-inuse.t <<EOF
446 446 > $ hg serve -R `pwd`/inuse -p \$HGPORT -d --pid-file=hg.pid
447 447 > $ cat hg.pid >> \$DAEMON_PIDS
448 448 > EOF
449 449 $ rt test-serve-inuse.t
450 450 .
451 451 # Ran 1 tests, 0 skipped, 0 failed.
452 452 $ rm test-serve-inuse.t
453 453 $ killdaemons.py $DAEMON_PIDS
454 454 $ rm $DAEMON_PIDS
455 455
456 456 Running In Debug Mode
457 457 ======================
458 458
459 459 $ rt --debug 2>&1 | grep -v pwd
460 460 + echo *SALT* 0 0 (glob)
461 461 *SALT* 0 0 (glob)
462 462 + echo babar
463 463 babar
464 464 + echo *SALT* 10 0 (glob)
465 465 *SALT* 10 0 (glob)
466 466 *+ echo *SALT* 0 0 (glob)
467 467 *SALT* 0 0 (glob)
468 468 + echo babar
469 469 babar
470 470 + echo *SALT* 2 0 (glob)
471 471 *SALT* 2 0 (glob)
472 472 + echo xyzzy
473 473 xyzzy
474 474 + echo *SALT* 9 0 (glob)
475 475 *SALT* 9 0 (glob)
476 476 + printf *abc\ndef\nxyz\n* (glob)
477 477 abc
478 478 def
479 479 xyz
480 480 + echo *SALT* 15 0 (glob)
481 481 *SALT* 15 0 (glob)
482 482 + printf *zyx\nwvu\ntsr\n* (glob)
483 483 zyx
484 484 wvu
485 485 tsr
486 486 + echo *SALT* 22 0 (glob)
487 487 *SALT* 22 0 (glob)
488 488 .
489 489 # Ran 2 tests, 0 skipped, 0 failed.
490 490
491 491 Parallel runs
492 492 ==============
493 493
494 494 (duplicate the failing test to get predictable output)
495 495 $ cp test-failure.t test-failure-copy.t
496 496
497 497 $ rt --jobs 2 test-failure*.t -n
498 498 !!
499 499 Failed test-failure*.t: output changed (glob)
500 500 Failed test-failure*.t: output changed (glob)
501 501 # Ran 2 tests, 0 skipped, 2 failed.
502 502 python hash seed: * (glob)
503 503 [1]
504 504
505 505 failures in parallel with --first should only print one failure
506 506 >>> f = open('test-nothing.t', 'w')
507 507 >>> f.write('foo\n' * 1024) and None
508 508 >>> f.write(' $ sleep 1') and None
509 509 $ rt --jobs 2 --first
510 510
511 511 --- $TESTTMP/test-failure*.t (glob)
512 512 +++ $TESTTMP/test-failure*.t.err (glob)
513 513 @@ -1,5 +1,5 @@
514 514 $ echo babar
515 515 - rataxes
516 516 + babar
517 517 This is a noop statement so that
518 518 this test is still more bytes than success.
519 519 pad pad pad pad............................................................
520 520
521 521 Failed test-failure*.t: output changed (glob)
522 522 Failed test-nothing.t: output changed
523 523 # Ran 2 tests, 0 skipped, 2 failed.
524 524 python hash seed: * (glob)
525 525 [1]
526 526
527 527
528 528 (delete the duplicated test file)
529 529 $ rm test-failure-copy.t test-nothing.t
530 530
531 531
532 532 Interactive run
533 533 ===============
534 534
535 535 (backup the failing test)
536 536 $ cp test-failure.t backup
537 537
538 538 Refuse the fix
539 539
540 540 $ echo 'n' | rt -i
541 541
542 542 --- $TESTTMP/test-failure.t
543 543 +++ $TESTTMP/test-failure.t.err
544 544 @@ -1,5 +1,5 @@
545 545 $ echo babar
546 546 - rataxes
547 547 + babar
548 548 This is a noop statement so that
549 549 this test is still more bytes than success.
550 550 pad pad pad pad............................................................
551 551 Accept this change? [n]
552 552 ERROR: test-failure.t output changed
553 553 !.
554 554 Failed test-failure.t: output changed
555 555 # Ran 2 tests, 0 skipped, 1 failed.
556 556 python hash seed: * (glob)
557 557 [1]
558 558
559 559 $ cat test-failure.t
560 560 $ echo babar
561 561 rataxes
562 562 This is a noop statement so that
563 563 this test is still more bytes than success.
564 564 pad pad pad pad............................................................
565 565 pad pad pad pad............................................................
566 566 pad pad pad pad............................................................
567 567 pad pad pad pad............................................................
568 568 pad pad pad pad............................................................
569 569 pad pad pad pad............................................................
570 570
571 571 Interactive with custom view
572 572
573 573 $ echo 'n' | rt -i --view echo
574 574 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err (glob)
575 575 Accept this change? [n]* (glob)
576 576 ERROR: test-failure.t output changed
577 577 !.
578 578 Failed test-failure.t: output changed
579 579 # Ran 2 tests, 0 skipped, 1 failed.
580 580 python hash seed: * (glob)
581 581 [1]
582 582
583 583 View the fix
584 584
585 585 $ echo 'y' | rt --view echo
586 586 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err (glob)
587 587
588 588 ERROR: test-failure.t output changed
589 589 !.
590 590 Failed test-failure.t: output changed
591 591 # Ran 2 tests, 0 skipped, 1 failed.
592 592 python hash seed: * (glob)
593 593 [1]
594 594
595 595 Accept the fix
596 596
597 597 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
598 598 $ echo " saved backup bundle to \$TESTTMP/foo.hg" >> test-failure.t
599 599 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
600 600 $ echo " saved backup bundle to \$TESTTMP/foo.hg (glob)" >> test-failure.t
601 601 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
602 602 $ echo " saved backup bundle to \$TESTTMP/*.hg (glob)" >> test-failure.t
603 603 $ echo 'y' | rt -i 2>&1
604 604
605 605 --- $TESTTMP/test-failure.t
606 606 +++ $TESTTMP/test-failure.t.err
607 607 @@ -1,5 +1,5 @@
608 608 $ echo babar
609 609 - rataxes
610 610 + babar
611 611 This is a noop statement so that
612 612 this test is still more bytes than success.
613 613 pad pad pad pad............................................................
614 614 @@ -9,7 +9,7 @@
615 615 pad pad pad pad............................................................
616 616 pad pad pad pad............................................................
617 617 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
618 618 - saved backup bundle to $TESTTMP/foo.hg
619 619 + saved backup bundle to $TESTTMP/foo.hg* (glob)
620 620 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
621 621 saved backup bundle to $TESTTMP/foo.hg* (glob)
622 622 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
623 623 Accept this change? [n] ..
624 624 # Ran 2 tests, 0 skipped, 0 failed.
625 625
626 626 $ sed -e 's,(glob)$,&<,g' test-failure.t
627 627 $ echo babar
628 628 babar
629 629 This is a noop statement so that
630 630 this test is still more bytes than success.
631 631 pad pad pad pad............................................................
632 632 pad pad pad pad............................................................
633 633 pad pad pad pad............................................................
634 634 pad pad pad pad............................................................
635 635 pad pad pad pad............................................................
636 636 pad pad pad pad............................................................
637 637 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
638 638 saved backup bundle to $TESTTMP/foo.hg (glob)<
639 639 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
640 640 saved backup bundle to $TESTTMP/foo.hg (glob)<
641 641 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
642 642 saved backup bundle to $TESTTMP/*.hg (glob)<
643 643
644 644 Race condition - test file was modified when test is running
645 645
646 646 $ TESTRACEDIR=`pwd`
647 647 $ export TESTRACEDIR
648 648 $ cat > test-race.t <<EOF
649 649 > $ echo 1
650 650 > $ echo "# a new line" >> $TESTRACEDIR/test-race.t
651 651 > EOF
652 652
653 653 $ rt -i test-race.t
654 654
655 655 --- $TESTTMP/test-race.t
656 656 +++ $TESTTMP/test-race.t.err
657 657 @@ -1,2 +1,3 @@
658 658 $ echo 1
659 659 + 1
660 660 $ echo "# a new line" >> $TESTTMP/test-race.t
661 661 Reference output has changed (run again to prompt changes)
662 662 ERROR: test-race.t output changed
663 663 !
664 664 Failed test-race.t: output changed
665 665 # Ran 1 tests, 0 skipped, 1 failed.
666 666 python hash seed: * (glob)
667 667 [1]
668 668
669 669 $ rm test-race.t
670 670
671 When "#testcases" is used in .t files
672
673 $ cat >> test-cases.t <<EOF
674 > #testcases a b
675 > #if a
676 > $ echo 1
677 > #endif
678 > #if b
679 > $ echo 2
680 > #endif
681 > EOF
682
683 $ cat <<EOF | rt -i test-cases.t 2>&1
684 > y
685 > y
686 > EOF
687
688 --- $TESTTMP/test-cases.t
689 +++ $TESTTMP/test-cases.t.a.err
690 @@ -1,6 +1,7 @@
691 #testcases a b
692 #if a
693 $ echo 1
694 + 1
695 #endif
696 #if b
697 $ echo 2
698 Accept this change? [n] .
699 --- $TESTTMP/test-cases.t
700 +++ $TESTTMP/test-cases.t.b.err
701 @@ -5,4 +5,5 @@
702 #endif
703 #if b
704 $ echo 2
705 + 2
706 #endif
707 Accept this change? [n] .
708 # Ran 2 tests, 0 skipped, 0 failed.
709
710 $ cat test-cases.t
711 #testcases a b
712 #if a
713 $ echo 1
714 1
715 #endif
716 #if b
717 $ echo 2
718 2
719 #endif
720
721 $ rm test-cases.t
722
671 723 (reinstall)
672 724 $ mv backup test-failure.t
673 725
674 726 No Diff
675 727 ===============
676 728
677 729 $ rt --nodiff
678 730 !.
679 731 Failed test-failure.t: output changed
680 732 # Ran 2 tests, 0 skipped, 1 failed.
681 733 python hash seed: * (glob)
682 734 [1]
683 735
684 736 test --tmpdir support
685 737 $ rt --tmpdir=$TESTTMP/keep test-success.t
686 738
687 739 Keeping testtmp dir: $TESTTMP/keep/child1/test-success.t (glob)
688 740 Keeping threadtmp dir: $TESTTMP/keep/child1 (glob)
689 741 .
690 742 # Ran 1 tests, 0 skipped, 0 failed.
691 743
692 744 timeouts
693 745 ========
694 746 $ cat > test-timeout.t <<EOF
695 747 > $ sleep 2
696 748 > $ echo pass
697 749 > pass
698 750 > EOF
699 751 > echo '#require slow' > test-slow-timeout.t
700 752 > cat test-timeout.t >> test-slow-timeout.t
701 753 $ rt --timeout=1 --slowtimeout=3 test-timeout.t test-slow-timeout.t
702 754 st
703 755 Skipped test-slow-timeout.t: missing feature: allow slow tests (use --allow-slow-tests)
704 756 Failed test-timeout.t: timed out
705 757 # Ran 1 tests, 1 skipped, 1 failed.
706 758 python hash seed: * (glob)
707 759 [1]
708 760 $ rt --timeout=1 --slowtimeout=3 \
709 761 > test-timeout.t test-slow-timeout.t --allow-slow-tests
710 762 .t
711 763 Failed test-timeout.t: timed out
712 764 # Ran 2 tests, 0 skipped, 1 failed.
713 765 python hash seed: * (glob)
714 766 [1]
715 767 $ rm test-timeout.t test-slow-timeout.t
716 768
717 769 test for --time
718 770 ==================
719 771
720 772 $ rt test-success.t --time
721 773 .
722 774 # Ran 1 tests, 0 skipped, 0 failed.
723 775 # Producing time report
724 776 start end cuser csys real Test
725 777 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
726 778
727 779 test for --time with --job enabled
728 780 ====================================
729 781
730 782 $ rt test-success.t --time --jobs 2
731 783 .
732 784 # Ran 1 tests, 0 skipped, 0 failed.
733 785 # Producing time report
734 786 start end cuser csys real Test
735 787 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
736 788
737 789 Skips
738 790 ================
739 791 $ cat > test-skip.t <<EOF
740 792 > $ echo xyzzy
741 793 > #require false
742 794 > EOF
743 795 $ rt --nodiff
744 796 !.s
745 797 Skipped test-skip.t: missing feature: nail clipper
746 798 Failed test-failure.t: output changed
747 799 # Ran 2 tests, 1 skipped, 1 failed.
748 800 python hash seed: * (glob)
749 801 [1]
750 802
751 803 $ rt --keyword xyzzy
752 804 .s
753 805 Skipped test-skip.t: missing feature: nail clipper
754 806 # Ran 2 tests, 2 skipped, 0 failed.
755 807
756 808 Skips with xml
757 809 $ rt --keyword xyzzy \
758 810 > --xunit=xunit.xml
759 811 .s
760 812 Skipped test-skip.t: missing feature: nail clipper
761 813 # Ran 2 tests, 2 skipped, 0 failed.
762 814 $ cat xunit.xml
763 815 <?xml version="1.0" encoding="utf-8"?>
764 816 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
765 817 <testcase name="test-success.t" time="*"/> (glob)
766 818 <testcase name="test-skip.t">
767 819 <skipped>
768 820 <![CDATA[missing feature: nail clipper]]> </skipped>
769 821 </testcase>
770 822 </testsuite>
771 823
772 824 Missing skips or blacklisted skips don't count as executed:
773 825 $ echo test-failure.t > blacklist
774 826 $ rt --blacklist=blacklist --json\
775 827 > test-failure.t test-bogus.t
776 828 ss
777 829 Skipped test-bogus.t: Doesn't exist
778 830 Skipped test-failure.t: blacklisted
779 831 # Ran 0 tests, 2 skipped, 0 failed.
780 832 $ cat report.json
781 833 testreport ={
782 834 "test-bogus.t": {
783 835 "result": "skip"
784 836 },
785 837 "test-failure.t": {
786 838 "result": "skip"
787 839 }
788 840 } (no-eol)
789 841
790 842 Whitelist trumps blacklist
791 843 $ echo test-failure.t > whitelist
792 844 $ rt --blacklist=blacklist --whitelist=whitelist --json\
793 845 > test-failure.t test-bogus.t
794 846 s
795 847 --- $TESTTMP/test-failure.t
796 848 +++ $TESTTMP/test-failure.t.err
797 849 @@ -1,5 +1,5 @@
798 850 $ echo babar
799 851 - rataxes
800 852 + babar
801 853 This is a noop statement so that
802 854 this test is still more bytes than success.
803 855 pad pad pad pad............................................................
804 856
805 857 ERROR: test-failure.t output changed
806 858 !
807 859 Skipped test-bogus.t: Doesn't exist
808 860 Failed test-failure.t: output changed
809 861 # Ran 1 tests, 1 skipped, 1 failed.
810 862 python hash seed: * (glob)
811 863 [1]
812 864
813 865 test for --json
814 866 ==================
815 867
816 868 $ rt --json
817 869
818 870 --- $TESTTMP/test-failure.t
819 871 +++ $TESTTMP/test-failure.t.err
820 872 @@ -1,5 +1,5 @@
821 873 $ echo babar
822 874 - rataxes
823 875 + babar
824 876 This is a noop statement so that
825 877 this test is still more bytes than success.
826 878 pad pad pad pad............................................................
827 879
828 880 ERROR: test-failure.t output changed
829 881 !.s
830 882 Skipped test-skip.t: missing feature: nail clipper
831 883 Failed test-failure.t: output changed
832 884 # Ran 2 tests, 1 skipped, 1 failed.
833 885 python hash seed: * (glob)
834 886 [1]
835 887
836 888 $ cat report.json
837 889 testreport ={
838 890 "test-failure.t": [\{] (re)
839 891 "csys": "\s*[\d\.]{4,5}", ? (re)
840 892 "cuser": "\s*[\d\.]{4,5}", ? (re)
841 893 "diff": "---.+\+\+\+.+", ? (re)
842 894 "end": "\s*[\d\.]{4,5}", ? (re)
843 895 "result": "failure", ? (re)
844 896 "start": "\s*[\d\.]{4,5}", ? (re)
845 897 "time": "\s*[\d\.]{4,5}" (re)
846 898 }, ? (re)
847 899 "test-skip.t": {
848 900 "csys": "\s*[\d\.]{4,5}", ? (re)
849 901 "cuser": "\s*[\d\.]{4,5}", ? (re)
850 902 "diff": "", ? (re)
851 903 "end": "\s*[\d\.]{4,5}", ? (re)
852 904 "result": "skip", ? (re)
853 905 "start": "\s*[\d\.]{4,5}", ? (re)
854 906 "time": "\s*[\d\.]{4,5}" (re)
855 907 }, ? (re)
856 908 "test-success.t": [\{] (re)
857 909 "csys": "\s*[\d\.]{4,5}", ? (re)
858 910 "cuser": "\s*[\d\.]{4,5}", ? (re)
859 911 "diff": "", ? (re)
860 912 "end": "\s*[\d\.]{4,5}", ? (re)
861 913 "result": "success", ? (re)
862 914 "start": "\s*[\d\.]{4,5}", ? (re)
863 915 "time": "\s*[\d\.]{4,5}" (re)
864 916 }
865 917 } (no-eol)
866 918 --json with --outputdir
867 919
868 920 $ rm report.json
869 921 $ rm -r output
870 922 $ mkdir output
871 923 $ rt --json --outputdir output
872 924
873 925 --- $TESTTMP/test-failure.t
874 926 +++ $TESTTMP/output/test-failure.t.err
875 927 @@ -1,5 +1,5 @@
876 928 $ echo babar
877 929 - rataxes
878 930 + babar
879 931 This is a noop statement so that
880 932 this test is still more bytes than success.
881 933 pad pad pad pad............................................................
882 934
883 935 ERROR: test-failure.t output changed
884 936 !.s
885 937 Skipped test-skip.t: missing feature: nail clipper
886 938 Failed test-failure.t: output changed
887 939 # Ran 2 tests, 1 skipped, 1 failed.
888 940 python hash seed: * (glob)
889 941 [1]
890 942 $ f report.json
891 943 report.json: file not found
892 944 $ cat output/report.json
893 945 testreport ={
894 946 "test-failure.t": [\{] (re)
895 947 "csys": "\s*[\d\.]{4,5}", ? (re)
896 948 "cuser": "\s*[\d\.]{4,5}", ? (re)
897 949 "diff": "---.+\+\+\+.+", ? (re)
898 950 "end": "\s*[\d\.]{4,5}", ? (re)
899 951 "result": "failure", ? (re)
900 952 "start": "\s*[\d\.]{4,5}", ? (re)
901 953 "time": "\s*[\d\.]{4,5}" (re)
902 954 }, ? (re)
903 955 "test-skip.t": {
904 956 "csys": "\s*[\d\.]{4,5}", ? (re)
905 957 "cuser": "\s*[\d\.]{4,5}", ? (re)
906 958 "diff": "", ? (re)
907 959 "end": "\s*[\d\.]{4,5}", ? (re)
908 960 "result": "skip", ? (re)
909 961 "start": "\s*[\d\.]{4,5}", ? (re)
910 962 "time": "\s*[\d\.]{4,5}" (re)
911 963 }, ? (re)
912 964 "test-success.t": [\{] (re)
913 965 "csys": "\s*[\d\.]{4,5}", ? (re)
914 966 "cuser": "\s*[\d\.]{4,5}", ? (re)
915 967 "diff": "", ? (re)
916 968 "end": "\s*[\d\.]{4,5}", ? (re)
917 969 "result": "success", ? (re)
918 970 "start": "\s*[\d\.]{4,5}", ? (re)
919 971 "time": "\s*[\d\.]{4,5}" (re)
920 972 }
921 973 } (no-eol)
922 974 $ ls -a output
923 975 .
924 976 ..
925 977 .testtimes
926 978 report.json
927 979 test-failure.t.err
928 980
929 981 Test that failed test accepted through interactive are properly reported:
930 982
931 983 $ cp test-failure.t backup
932 984 $ echo y | rt --json -i
933 985
934 986 --- $TESTTMP/test-failure.t
935 987 +++ $TESTTMP/test-failure.t.err
936 988 @@ -1,5 +1,5 @@
937 989 $ echo babar
938 990 - rataxes
939 991 + babar
940 992 This is a noop statement so that
941 993 this test is still more bytes than success.
942 994 pad pad pad pad............................................................
943 995 Accept this change? [n] ..s
944 996 Skipped test-skip.t: missing feature: nail clipper
945 997 # Ran 2 tests, 1 skipped, 0 failed.
946 998
947 999 $ cat report.json
948 1000 testreport ={
949 1001 "test-failure.t": [\{] (re)
950 1002 "csys": "\s*[\d\.]{4,5}", ? (re)
951 1003 "cuser": "\s*[\d\.]{4,5}", ? (re)
952 1004 "diff": "", ? (re)
953 1005 "end": "\s*[\d\.]{4,5}", ? (re)
954 1006 "result": "success", ? (re)
955 1007 "start": "\s*[\d\.]{4,5}", ? (re)
956 1008 "time": "\s*[\d\.]{4,5}" (re)
957 1009 }, ? (re)
958 1010 "test-skip.t": {
959 1011 "csys": "\s*[\d\.]{4,5}", ? (re)
960 1012 "cuser": "\s*[\d\.]{4,5}", ? (re)
961 1013 "diff": "", ? (re)
962 1014 "end": "\s*[\d\.]{4,5}", ? (re)
963 1015 "result": "skip", ? (re)
964 1016 "start": "\s*[\d\.]{4,5}", ? (re)
965 1017 "time": "\s*[\d\.]{4,5}" (re)
966 1018 }, ? (re)
967 1019 "test-success.t": [\{] (re)
968 1020 "csys": "\s*[\d\.]{4,5}", ? (re)
969 1021 "cuser": "\s*[\d\.]{4,5}", ? (re)
970 1022 "diff": "", ? (re)
971 1023 "end": "\s*[\d\.]{4,5}", ? (re)
972 1024 "result": "success", ? (re)
973 1025 "start": "\s*[\d\.]{4,5}", ? (re)
974 1026 "time": "\s*[\d\.]{4,5}" (re)
975 1027 }
976 1028 } (no-eol)
977 1029 $ mv backup test-failure.t
978 1030
979 1031 backslash on end of line with glob matching is handled properly
980 1032
981 1033 $ cat > test-glob-backslash.t << EOF
982 1034 > $ echo 'foo bar \\'
983 1035 > foo * \ (glob)
984 1036 > EOF
985 1037
986 1038 $ rt test-glob-backslash.t
987 1039 .
988 1040 # Ran 1 tests, 0 skipped, 0 failed.
989 1041
990 1042 $ rm -f test-glob-backslash.t
991 1043
992 1044 Test globbing of local IP addresses
993 1045 $ echo 172.16.18.1
994 1046 $LOCALIP (glob)
995 1047 $ echo dead:beef::1
996 1048 $LOCALIP (glob)
997 1049
998 1050 Test reusability for third party tools
999 1051 ======================================
1000 1052
1001 1053 $ mkdir "$TESTTMP"/anothertests
1002 1054 $ cd "$TESTTMP"/anothertests
1003 1055
1004 1056 test that `run-tests.py` can execute hghave, even if it runs not in
1005 1057 Mercurial source tree.
1006 1058
1007 1059 $ cat > test-hghave.t <<EOF
1008 1060 > #require true
1009 1061 > $ echo foo
1010 1062 > foo
1011 1063 > EOF
1012 1064 $ rt test-hghave.t
1013 1065 .
1014 1066 # Ran 1 tests, 0 skipped, 0 failed.
1015 1067
1016 1068 test that RUNTESTDIR refers the directory, in which `run-tests.py` now
1017 1069 running is placed.
1018 1070
1019 1071 $ cat > test-runtestdir.t <<EOF
1020 1072 > - $TESTDIR, in which test-run-tests.t is placed
1021 1073 > - \$TESTDIR, in which test-runtestdir.t is placed (expanded at runtime)
1022 1074 > - \$RUNTESTDIR, in which run-tests.py is placed (expanded at runtime)
1023 1075 >
1024 1076 > #if windows
1025 1077 > $ test "\$TESTDIR" = "$TESTTMP\anothertests"
1026 1078 > #else
1027 1079 > $ test "\$TESTDIR" = "$TESTTMP"/anothertests
1028 1080 > #endif
1029 1081 > $ test "\$RUNTESTDIR" = "$TESTDIR"
1030 1082 > $ head -n 3 "\$RUNTESTDIR"/../contrib/check-code.py | sed 's@.!.*python@#!USRBINENVPY@'
1031 1083 > #!USRBINENVPY
1032 1084 > #
1033 1085 > # check-code - a style and portability checker for Mercurial
1034 1086 > EOF
1035 1087 $ rt test-runtestdir.t
1036 1088 .
1037 1089 # Ran 1 tests, 0 skipped, 0 failed.
1038 1090
1039 1091 #if execbit
1040 1092
1041 1093 test that TESTDIR is referred in PATH
1042 1094
1043 1095 $ cat > custom-command.sh <<EOF
1044 1096 > #!/bin/sh
1045 1097 > echo "hello world"
1046 1098 > EOF
1047 1099 $ chmod +x custom-command.sh
1048 1100 $ cat > test-testdir-path.t <<EOF
1049 1101 > $ custom-command.sh
1050 1102 > hello world
1051 1103 > EOF
1052 1104 $ rt test-testdir-path.t
1053 1105 .
1054 1106 # Ran 1 tests, 0 skipped, 0 failed.
1055 1107
1056 1108 #endif
1057 1109
1058 1110 test support for --allow-slow-tests
1059 1111 $ cat > test-very-slow-test.t <<EOF
1060 1112 > #require slow
1061 1113 > $ echo pass
1062 1114 > pass
1063 1115 > EOF
1064 1116 $ rt test-very-slow-test.t
1065 1117 s
1066 1118 Skipped test-very-slow-test.t: missing feature: allow slow tests (use --allow-slow-tests)
1067 1119 # Ran 0 tests, 1 skipped, 0 failed.
1068 1120 $ rt $HGTEST_RUN_TESTS_PURE --allow-slow-tests test-very-slow-test.t
1069 1121 .
1070 1122 # Ran 1 tests, 0 skipped, 0 failed.
1071 1123
1072 1124 support for running a test outside the current directory
1073 1125 $ mkdir nonlocal
1074 1126 $ cat > nonlocal/test-is-not-here.t << EOF
1075 1127 > $ echo pass
1076 1128 > pass
1077 1129 > EOF
1078 1130 $ rt nonlocal/test-is-not-here.t
1079 1131 .
1080 1132 # Ran 1 tests, 0 skipped, 0 failed.
1081 1133
1082 1134 support for bisecting failed tests automatically
1083 1135 $ hg init bisect
1084 1136 $ cd bisect
1085 1137 $ cat >> test-bisect.t <<EOF
1086 1138 > $ echo pass
1087 1139 > pass
1088 1140 > EOF
1089 1141 $ hg add test-bisect.t
1090 1142 $ hg ci -m 'good'
1091 1143 $ cat >> test-bisect.t <<EOF
1092 1144 > $ echo pass
1093 1145 > fail
1094 1146 > EOF
1095 1147 $ hg ci -m 'bad'
1096 1148 $ rt --known-good-rev=0 test-bisect.t
1097 1149
1098 1150 --- $TESTTMP/anothertests/bisect/test-bisect.t
1099 1151 +++ $TESTTMP/anothertests/bisect/test-bisect.t.err
1100 1152 @@ -1,4 +1,4 @@
1101 1153 $ echo pass
1102 1154 pass
1103 1155 $ echo pass
1104 1156 - fail
1105 1157 + pass
1106 1158
1107 1159 ERROR: test-bisect.t output changed
1108 1160 !
1109 1161 Failed test-bisect.t: output changed
1110 1162 test-bisect.t broken by 72cbf122d116 (bad)
1111 1163 # Ran 1 tests, 0 skipped, 1 failed.
1112 1164 python hash seed: * (glob)
1113 1165 [1]
1114 1166
1115 1167 $ cd ..
1116 1168
1117 1169 Test a broken #if statement doesn't break run-tests threading.
1118 1170 ==============================================================
1119 1171 $ mkdir broken
1120 1172 $ cd broken
1121 1173 $ cat > test-broken.t <<EOF
1122 1174 > true
1123 1175 > #if notarealhghavefeature
1124 1176 > $ false
1125 1177 > #endif
1126 1178 > EOF
1127 1179 $ for f in 1 2 3 4 ; do
1128 1180 > cat > test-works-$f.t <<EOF
1129 1181 > This is test case $f
1130 1182 > $ sleep 1
1131 1183 > EOF
1132 1184 > done
1133 1185 $ rt -j 2
1134 1186 ....
1135 1187 # Ran 5 tests, 0 skipped, 0 failed.
1136 1188 skipped: unknown feature: notarealhghavefeature
1137 1189
1138 1190 $ cd ..
1139 1191 $ rm -rf broken
1140 1192
1141 1193 Test cases in .t files
1142 1194 ======================
1143 1195 $ mkdir cases
1144 1196 $ cd cases
1145 1197 $ cat > test-cases-abc.t <<'EOF'
1146 1198 > #testcases A B C
1147 1199 > $ V=B
1148 1200 > #if A
1149 1201 > $ V=A
1150 1202 > #endif
1151 1203 > #if C
1152 1204 > $ V=C
1153 1205 > #endif
1154 1206 > $ echo $V | sed 's/A/C/'
1155 1207 > C
1156 1208 > #if C
1157 1209 > $ [ $V = C ]
1158 1210 > #endif
1159 1211 > #if A
1160 1212 > $ [ $V = C ]
1161 1213 > [1]
1162 1214 > #endif
1163 1215 > #if no-C
1164 1216 > $ [ $V = C ]
1165 1217 > [1]
1166 1218 > #endif
1167 1219 > $ [ $V = D ]
1168 1220 > [1]
1169 1221 > EOF
1170 1222 $ rt
1171 1223 .
1172 1224 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1173 1225 +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
1174 1226 @@ -7,7 +7,7 @@
1175 1227 $ V=C
1176 1228 #endif
1177 1229 $ echo $V | sed 's/A/C/'
1178 1230 - C
1179 1231 + B
1180 1232 #if C
1181 1233 $ [ $V = C ]
1182 1234 #endif
1183 1235
1184 1236 ERROR: test-cases-abc.t (case B) output changed
1185 1237 !.
1186 1238 Failed test-cases-abc.t (case B): output changed
1187 1239 # Ran 3 tests, 0 skipped, 1 failed.
1188 1240 python hash seed: * (glob)
1189 1241 [1]
1190 1242
1191 1243 --restart works
1192 1244
1193 1245 $ rt --restart
1194 1246
1195 1247 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1196 1248 +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
1197 1249 @@ -7,7 +7,7 @@
1198 1250 $ V=C
1199 1251 #endif
1200 1252 $ echo $V | sed 's/A/C/'
1201 1253 - C
1202 1254 + B
1203 1255 #if C
1204 1256 $ [ $V = C ]
1205 1257 #endif
1206 1258
1207 1259 ERROR: test-cases-abc.t (case B) output changed
1208 1260 !.
1209 1261 Failed test-cases-abc.t (case B): output changed
1210 1262 # Ran 2 tests, 0 skipped, 1 failed.
1211 1263 python hash seed: * (glob)
1212 1264 [1]
1213 1265
1214 1266 --restart works with outputdir
1215 1267
1216 1268 $ mkdir output
1217 1269 $ mv test-cases-abc.t.B.err output
1218 1270 $ rt --restart --outputdir output
1219 1271
1220 1272 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1221 1273 +++ $TESTTMP/anothertests/cases/output/test-cases-abc.t.B.err
1222 1274 @@ -7,7 +7,7 @@
1223 1275 $ V=C
1224 1276 #endif
1225 1277 $ echo $V | sed 's/A/C/'
1226 1278 - C
1227 1279 + B
1228 1280 #if C
1229 1281 $ [ $V = C ]
1230 1282 #endif
1231 1283
1232 1284 ERROR: test-cases-abc.t (case B) output changed
1233 1285 !.
1234 1286 Failed test-cases-abc.t (case B): output changed
1235 1287 # Ran 2 tests, 0 skipped, 1 failed.
1236 1288 python hash seed: * (glob)
1237 1289 [1]
General Comments 0
You need to be logged in to leave comments. Login now