##// END OF EJS Templates
run-tests: report skipped tests as "skipped" - they might still be "relevant"
Mads Kiilerich -
r22292:102f0e92 default
parent child Browse files
Show More
@@ -1,1922 +1,1922 b''
1 1 #!/usr/bin/env python
2 2 #
3 3 # run-tests.py - Run a set of tests on Mercurial
4 4 #
5 5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 # Modifying this script is tricky because it has many modes:
11 11 # - serial (default) vs parallel (-jN, N > 1)
12 12 # - no coverage (default) vs coverage (-c, -C, -s)
13 13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 14 # - tests are a mix of shell scripts and Python scripts
15 15 #
16 16 # If you change this script, it is recommended that you ensure you
17 17 # haven't broken it by running it in various modes with a representative
18 18 # sample of test scripts. For example:
19 19 #
20 20 # 1) serial, no coverage, temp install:
21 21 # ./run-tests.py test-s*
22 22 # 2) serial, no coverage, local hg:
23 23 # ./run-tests.py --local test-s*
24 24 # 3) serial, coverage, temp install:
25 25 # ./run-tests.py -c test-s*
26 26 # 4) serial, coverage, local hg:
27 27 # ./run-tests.py -c --local test-s* # unsupported
28 28 # 5) parallel, no coverage, temp install:
29 29 # ./run-tests.py -j2 test-s*
30 30 # 6) parallel, no coverage, local hg:
31 31 # ./run-tests.py -j2 --local test-s*
32 32 # 7) parallel, coverage, temp install:
33 33 # ./run-tests.py -j2 -c test-s* # currently broken
34 34 # 8) parallel, coverage, local install:
35 35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 36 # 9) parallel, custom tmp dir:
37 37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 38 #
39 39 # (You could use any subset of the tests: test-s* happens to match
40 40 # enough that it's worth doing parallel runs, few enough that it
41 41 # completes fairly quickly, includes both shell and Python scripts, and
42 42 # includes some scripts that run daemon processes.)
43 43
44 44 from distutils import version
45 45 import difflib
46 46 import errno
47 47 import optparse
48 48 import os
49 49 import shutil
50 50 import subprocess
51 51 import signal
52 52 import sys
53 53 import tempfile
54 54 import time
55 55 import random
56 56 import re
57 57 import threading
58 58 import killdaemons as killmod
59 59 import Queue as queue
60 60 from xml.dom import minidom
61 61 import unittest
62 62
63 63 processlock = threading.Lock()
64 64
65 65 # subprocess._cleanup can race with any Popen.wait or Popen.poll on py24
66 66 # http://bugs.python.org/issue1731717 for details. We shouldn't be producing
67 67 # zombies but it's pretty harmless even if we do.
68 68 if sys.version_info < (2, 5):
69 69 subprocess._cleanup = lambda: None
70 70
71 71 closefds = os.name == 'posix'
72 72 def Popen4(cmd, wd, timeout, env=None):
73 73 processlock.acquire()
74 74 p = subprocess.Popen(cmd, shell=True, bufsize=-1, cwd=wd, env=env,
75 75 close_fds=closefds,
76 76 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
77 77 stderr=subprocess.STDOUT)
78 78 processlock.release()
79 79
80 80 p.fromchild = p.stdout
81 81 p.tochild = p.stdin
82 82 p.childerr = p.stderr
83 83
84 84 p.timeout = False
85 85 if timeout:
86 86 def t():
87 87 start = time.time()
88 88 while time.time() - start < timeout and p.returncode is None:
89 89 time.sleep(.1)
90 90 p.timeout = True
91 91 if p.returncode is None:
92 92 terminate(p)
93 93 threading.Thread(target=t).start()
94 94
95 95 return p
96 96
97 97 PYTHON = sys.executable.replace('\\', '/')
98 98 IMPL_PATH = 'PYTHONPATH'
99 99 if 'java' in sys.platform:
100 100 IMPL_PATH = 'JYTHONPATH'
101 101
102 102 TESTDIR = HGTMP = INST = BINDIR = TMPBINDIR = PYTHONDIR = None
103 103
104 104 defaults = {
105 105 'jobs': ('HGTEST_JOBS', 1),
106 106 'timeout': ('HGTEST_TIMEOUT', 180),
107 107 'port': ('HGTEST_PORT', 20059),
108 108 'shell': ('HGTEST_SHELL', 'sh'),
109 109 }
110 110
111 111 def parselistfiles(files, listtype, warn=True):
112 112 entries = dict()
113 113 for filename in files:
114 114 try:
115 115 path = os.path.expanduser(os.path.expandvars(filename))
116 116 f = open(path, "rb")
117 117 except IOError, err:
118 118 if err.errno != errno.ENOENT:
119 119 raise
120 120 if warn:
121 121 print "warning: no such %s file: %s" % (listtype, filename)
122 122 continue
123 123
124 124 for line in f.readlines():
125 125 line = line.split('#', 1)[0].strip()
126 126 if line:
127 127 entries[line] = filename
128 128
129 129 f.close()
130 130 return entries
131 131
132 132 def getparser():
133 133 """Obtain the OptionParser used by the CLI."""
134 134 parser = optparse.OptionParser("%prog [options] [tests]")
135 135
136 136 # keep these sorted
137 137 parser.add_option("--blacklist", action="append",
138 138 help="skip tests listed in the specified blacklist file")
139 139 parser.add_option("--whitelist", action="append",
140 140 help="always run tests listed in the specified whitelist file")
141 141 parser.add_option("--changed", type="string",
142 142 help="run tests that are changed in parent rev or working directory")
143 143 parser.add_option("-C", "--annotate", action="store_true",
144 144 help="output files annotated with coverage")
145 145 parser.add_option("-c", "--cover", action="store_true",
146 146 help="print a test coverage report")
147 147 parser.add_option("-d", "--debug", action="store_true",
148 148 help="debug mode: write output of test scripts to console"
149 149 " rather than capturing and diffing it (disables timeout)")
150 150 parser.add_option("-f", "--first", action="store_true",
151 151 help="exit on the first test failure")
152 152 parser.add_option("-H", "--htmlcov", action="store_true",
153 153 help="create an HTML report of the coverage of the files")
154 154 parser.add_option("-i", "--interactive", action="store_true",
155 155 help="prompt to accept changed output")
156 156 parser.add_option("-j", "--jobs", type="int",
157 157 help="number of jobs to run in parallel"
158 158 " (default: $%s or %d)" % defaults['jobs'])
159 159 parser.add_option("--keep-tmpdir", action="store_true",
160 160 help="keep temporary directory after running tests")
161 161 parser.add_option("-k", "--keywords",
162 162 help="run tests matching keywords")
163 163 parser.add_option("-l", "--local", action="store_true",
164 164 help="shortcut for --with-hg=<testdir>/../hg")
165 165 parser.add_option("--loop", action="store_true",
166 166 help="loop tests repeatedly")
167 167 parser.add_option("-n", "--nodiff", action="store_true",
168 168 help="skip showing test changes")
169 169 parser.add_option("-p", "--port", type="int",
170 170 help="port on which servers should listen"
171 171 " (default: $%s or %d)" % defaults['port'])
172 172 parser.add_option("--compiler", type="string",
173 173 help="compiler to build with")
174 174 parser.add_option("--pure", action="store_true",
175 175 help="use pure Python code instead of C extensions")
176 176 parser.add_option("-R", "--restart", action="store_true",
177 177 help="restart at last error")
178 178 parser.add_option("-r", "--retest", action="store_true",
179 179 help="retest failed tests")
180 180 parser.add_option("-S", "--noskips", action="store_true",
181 181 help="don't report skip tests verbosely")
182 182 parser.add_option("--shell", type="string",
183 183 help="shell to use (default: $%s or %s)" % defaults['shell'])
184 184 parser.add_option("-t", "--timeout", type="int",
185 185 help="kill errant tests after TIMEOUT seconds"
186 186 " (default: $%s or %d)" % defaults['timeout'])
187 187 parser.add_option("--time", action="store_true",
188 188 help="time how long each test takes")
189 189 parser.add_option("--tmpdir", type="string",
190 190 help="run tests in the given temporary directory"
191 191 " (implies --keep-tmpdir)")
192 192 parser.add_option("-v", "--verbose", action="store_true",
193 193 help="output verbose messages")
194 194 parser.add_option("--xunit", type="string",
195 195 help="record xunit results at specified path")
196 196 parser.add_option("--view", type="string",
197 197 help="external diff viewer")
198 198 parser.add_option("--with-hg", type="string",
199 199 metavar="HG",
200 200 help="test using specified hg script rather than a "
201 201 "temporary installation")
202 202 parser.add_option("-3", "--py3k-warnings", action="store_true",
203 203 help="enable Py3k warnings on Python 2.6+")
204 204 parser.add_option('--extra-config-opt', action="append",
205 205 help='set the given config opt in the test hgrc')
206 206 parser.add_option('--random', action="store_true",
207 207 help='run tests in random order')
208 208
209 209 for option, (envvar, default) in defaults.items():
210 210 defaults[option] = type(default)(os.environ.get(envvar, default))
211 211 parser.set_defaults(**defaults)
212 212
213 213 return parser
214 214
215 215 def parseargs(args, parser):
216 216 """Parse arguments with our OptionParser and validate results."""
217 217 (options, args) = parser.parse_args(args)
218 218
219 219 # jython is always pure
220 220 if 'java' in sys.platform or '__pypy__' in sys.modules:
221 221 options.pure = True
222 222
223 223 if options.with_hg:
224 224 options.with_hg = os.path.expanduser(options.with_hg)
225 225 if not (os.path.isfile(options.with_hg) and
226 226 os.access(options.with_hg, os.X_OK)):
227 227 parser.error('--with-hg must specify an executable hg script')
228 228 if not os.path.basename(options.with_hg) == 'hg':
229 229 sys.stderr.write('warning: --with-hg should specify an hg script\n')
230 230 if options.local:
231 231 testdir = os.path.dirname(os.path.realpath(sys.argv[0]))
232 232 hgbin = os.path.join(os.path.dirname(testdir), 'hg')
233 233 if os.name != 'nt' and not os.access(hgbin, os.X_OK):
234 234 parser.error('--local specified, but %r not found or not executable'
235 235 % hgbin)
236 236 options.with_hg = hgbin
237 237
238 238 options.anycoverage = options.cover or options.annotate or options.htmlcov
239 239 if options.anycoverage:
240 240 try:
241 241 import coverage
242 242 covver = version.StrictVersion(coverage.__version__).version
243 243 if covver < (3, 3):
244 244 parser.error('coverage options require coverage 3.3 or later')
245 245 except ImportError:
246 246 parser.error('coverage options now require the coverage package')
247 247
248 248 if options.anycoverage and options.local:
249 249 # this needs some path mangling somewhere, I guess
250 250 parser.error("sorry, coverage options do not work when --local "
251 251 "is specified")
252 252
253 253 global verbose
254 254 if options.verbose:
255 255 verbose = ''
256 256
257 257 if options.tmpdir:
258 258 options.tmpdir = os.path.expanduser(options.tmpdir)
259 259
260 260 if options.jobs < 1:
261 261 parser.error('--jobs must be positive')
262 262 if options.interactive and options.debug:
263 263 parser.error("-i/--interactive and -d/--debug are incompatible")
264 264 if options.debug:
265 265 if options.timeout != defaults['timeout']:
266 266 sys.stderr.write(
267 267 'warning: --timeout option ignored with --debug\n')
268 268 options.timeout = 0
269 269 if options.py3k_warnings:
270 270 if sys.version_info[:2] < (2, 6) or sys.version_info[:2] >= (3, 0):
271 271 parser.error('--py3k-warnings can only be used on Python 2.6+')
272 272 if options.blacklist:
273 273 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
274 274 if options.whitelist:
275 275 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
276 276 else:
277 277 options.whitelisted = {}
278 278
279 279 return (options, args)
280 280
281 281 def rename(src, dst):
282 282 """Like os.rename(), trade atomicity and opened files friendliness
283 283 for existing destination support.
284 284 """
285 285 shutil.copy(src, dst)
286 286 os.remove(src)
287 287
288 288 def getdiff(expected, output, ref, err):
289 289 servefail = False
290 290 lines = []
291 291 for line in difflib.unified_diff(expected, output, ref, err):
292 292 if line.startswith('+++') or line.startswith('---'):
293 293 if line.endswith(' \n'):
294 294 line = line[:-2] + '\n'
295 295 lines.append(line)
296 296 if not servefail and line.startswith(
297 297 '+ abort: child process failed to start'):
298 298 servefail = True
299 299
300 300 return servefail, lines
301 301
302 302 verbose = False
303 303 def vlog(*msg):
304 304 """Log only when in verbose mode."""
305 305 if verbose is False:
306 306 return
307 307
308 308 return log(*msg)
309 309
310 310 # Bytes that break XML even in a CDATA block: control characters 0-31
311 311 # sans \t, \n and \r
312 312 CDATA_EVIL = re.compile(r"[\000-\010\013\014\016-\037]")
313 313
314 314 def cdatasafe(data):
315 315 """Make a string safe to include in a CDATA block.
316 316
317 317 Certain control characters are illegal in a CDATA block, and
318 318 there's no way to include a ]]> in a CDATA either. This function
319 319 replaces illegal bytes with ? and adds a space between the ]] so
320 320 that it won't break the CDATA block.
321 321 """
322 322 return CDATA_EVIL.sub('?', data).replace(']]>', '] ]>')
323 323
324 324 def log(*msg):
325 325 """Log something to stdout.
326 326
327 327 Arguments are strings to print.
328 328 """
329 329 iolock.acquire()
330 330 if verbose:
331 331 print verbose,
332 332 for m in msg:
333 333 print m,
334 334 print
335 335 sys.stdout.flush()
336 336 iolock.release()
337 337
338 338 def terminate(proc):
339 339 """Terminate subprocess (with fallback for Python versions < 2.6)"""
340 340 vlog('# Terminating process %d' % proc.pid)
341 341 try:
342 342 getattr(proc, 'terminate', lambda : os.kill(proc.pid, signal.SIGTERM))()
343 343 except OSError:
344 344 pass
345 345
346 346 def killdaemons(pidfile):
347 347 return killmod.killdaemons(pidfile, tryhard=False, remove=True,
348 348 logfn=vlog)
349 349
350 350 class Test(unittest.TestCase):
351 351 """Encapsulates a single, runnable test.
352 352
353 353 While this class conforms to the unittest.TestCase API, it differs in that
354 354 instances need to be instantiated manually. (Typically, unittest.TestCase
355 355 classes are instantiated automatically by scanning modules.)
356 356 """
357 357
358 358 # Status code reserved for skipped tests (used by hghave).
359 359 SKIPPED_STATUS = 80
360 360
361 361 def __init__(self, path, tmpdir, keeptmpdir=False,
362 362 debug=False,
363 363 timeout=defaults['timeout'],
364 364 startport=defaults['port'], extraconfigopts=None,
365 365 py3kwarnings=False, shell=None):
366 366 """Create a test from parameters.
367 367
368 368 path is the full path to the file defining the test.
369 369
370 370 tmpdir is the main temporary directory to use for this test.
371 371
372 372 keeptmpdir determines whether to keep the test's temporary directory
373 373 after execution. It defaults to removal (False).
374 374
375 375 debug mode will make the test execute verbosely, with unfiltered
376 376 output.
377 377
378 378 timeout controls the maximum run time of the test. It is ignored when
379 379 debug is True.
380 380
381 381 startport controls the starting port number to use for this test. Each
382 382 test will reserve 3 port numbers for execution. It is the caller's
383 383 responsibility to allocate a non-overlapping port range to Test
384 384 instances.
385 385
386 386 extraconfigopts is an iterable of extra hgrc config options. Values
387 387 must have the form "key=value" (something understood by hgrc). Values
388 388 of the form "foo.key=value" will result in "[foo] key=value".
389 389
390 390 py3kwarnings enables Py3k warnings.
391 391
392 392 shell is the shell to execute tests in.
393 393 """
394 394
395 395 self.path = path
396 396 self.name = os.path.basename(path)
397 397 self._testdir = os.path.dirname(path)
398 398 self.errpath = os.path.join(self._testdir, '%s.err' % self.name)
399 399
400 400 self._threadtmp = tmpdir
401 401 self._keeptmpdir = keeptmpdir
402 402 self._debug = debug
403 403 self._timeout = timeout
404 404 self._startport = startport
405 405 self._extraconfigopts = extraconfigopts or []
406 406 self._py3kwarnings = py3kwarnings
407 407 self._shell = shell
408 408
409 409 self._aborted = False
410 410 self._daemonpids = []
411 411 self._finished = None
412 412 self._ret = None
413 413 self._out = None
414 414 self._skipped = None
415 415 self._testtmp = None
416 416
417 417 # If we're not in --debug mode and reference output file exists,
418 418 # check test output against it.
419 419 if debug:
420 420 self._refout = None # to match "out is None"
421 421 elif os.path.exists(self.refpath):
422 422 f = open(self.refpath, 'rb')
423 423 self._refout = f.read().splitlines(True)
424 424 f.close()
425 425 else:
426 426 self._refout = []
427 427
428 428 def __str__(self):
429 429 return self.name
430 430
431 431 def shortDescription(self):
432 432 return self.name
433 433
434 434 def setUp(self):
435 435 """Tasks to perform before run()."""
436 436 self._finished = False
437 437 self._ret = None
438 438 self._out = None
439 439 self._skipped = None
440 440
441 441 try:
442 442 os.mkdir(self._threadtmp)
443 443 except OSError, e:
444 444 if e.errno != errno.EEXIST:
445 445 raise
446 446
447 447 self._testtmp = os.path.join(self._threadtmp,
448 448 os.path.basename(self.path))
449 449 os.mkdir(self._testtmp)
450 450
451 451 # Remove any previous output files.
452 452 if os.path.exists(self.errpath):
453 453 os.remove(self.errpath)
454 454
455 455 def run(self, result):
456 456 """Run this test and report results against a TestResult instance."""
457 457 # This function is extremely similar to unittest.TestCase.run(). Once
458 458 # we require Python 2.7 (or at least its version of unittest), this
459 459 # function can largely go away.
460 460 self._result = result
461 461 result.startTest(self)
462 462 try:
463 463 try:
464 464 self.setUp()
465 465 except (KeyboardInterrupt, SystemExit):
466 466 self._aborted = True
467 467 raise
468 468 except Exception:
469 469 result.addError(self, sys.exc_info())
470 470 return
471 471
472 472 success = False
473 473 try:
474 474 self.runTest()
475 475 except KeyboardInterrupt:
476 476 self._aborted = True
477 477 raise
478 478 except SkipTest, e:
479 479 result.addSkip(self, str(e))
480 480 # The base class will have already counted this as a
481 481 # test we "ran", but we want to exclude skipped tests
482 482 # from those we count towards those run.
483 483 result.testsRun -= 1
484 484 except IgnoreTest, e:
485 485 result.addIgnore(self, str(e))
486 486 # As with skips, ignores also should be excluded from
487 487 # the number of tests executed.
488 488 result.testsRun -= 1
489 489 except WarnTest, e:
490 490 result.addWarn(self, str(e))
491 491 except self.failureException, e:
492 492 # This differs from unittest in that we don't capture
493 493 # the stack trace. This is for historical reasons and
494 494 # this decision could be revisted in the future,
495 495 # especially for PythonTest instances.
496 496 if result.addFailure(self, str(e)):
497 497 success = True
498 498 except Exception:
499 499 result.addError(self, sys.exc_info())
500 500 else:
501 501 success = True
502 502
503 503 try:
504 504 self.tearDown()
505 505 except (KeyboardInterrupt, SystemExit):
506 506 self._aborted = True
507 507 raise
508 508 except Exception:
509 509 result.addError(self, sys.exc_info())
510 510 success = False
511 511
512 512 if success:
513 513 result.addSuccess(self)
514 514 finally:
515 515 result.stopTest(self, interrupted=self._aborted)
516 516
517 517 def runTest(self):
518 518 """Run this test instance.
519 519
520 520 This will return a tuple describing the result of the test.
521 521 """
522 522 replacements = self._getreplacements()
523 523 env = self._getenv()
524 524 self._daemonpids.append(env['DAEMON_PIDS'])
525 525 self._createhgrc(env['HGRCPATH'])
526 526
527 527 vlog('# Test', self.name)
528 528
529 529 ret, out = self._run(replacements, env)
530 530 self._finished = True
531 531 self._ret = ret
532 532 self._out = out
533 533
534 534 def describe(ret):
535 535 if ret < 0:
536 536 return 'killed by signal: %d' % -ret
537 537 return 'returned error code %d' % ret
538 538
539 539 self._skipped = False
540 540
541 541 if ret == self.SKIPPED_STATUS:
542 542 if out is None: # Debug mode, nothing to parse.
543 543 missing = ['unknown']
544 544 failed = None
545 545 else:
546 546 missing, failed = TTest.parsehghaveoutput(out)
547 547
548 548 if not missing:
549 missing = ['irrelevant']
549 missing = ['skipped']
550 550
551 551 if failed:
552 552 self.fail('hg have failed checking for %s' % failed[-1])
553 553 else:
554 554 self._skipped = True
555 555 raise SkipTest(missing[-1])
556 556 elif ret == 'timeout':
557 557 self.fail('timed out')
558 558 elif ret is False:
559 559 raise WarnTest('no result code from test')
560 560 elif out != self._refout:
561 561 # Diff generation may rely on written .err file.
562 562 if (ret != 0 or out != self._refout) and not self._skipped \
563 563 and not self._debug:
564 564 f = open(self.errpath, 'wb')
565 565 for line in out:
566 566 f.write(line)
567 567 f.close()
568 568
569 569 # The result object handles diff calculation for us.
570 570 if self._result.addOutputMismatch(self, ret, out, self._refout):
571 571 # change was accepted, skip failing
572 572 return
573 573
574 574 if ret:
575 575 msg = 'output changed and ' + describe(ret)
576 576 else:
577 577 msg = 'output changed'
578 578
579 579 self.fail(msg)
580 580 elif ret:
581 581 self.fail(describe(ret))
582 582
583 583 def tearDown(self):
584 584 """Tasks to perform after run()."""
585 585 for entry in self._daemonpids:
586 586 killdaemons(entry)
587 587 self._daemonpids = []
588 588
589 589 if not self._keeptmpdir:
590 590 shutil.rmtree(self._testtmp, True)
591 591 shutil.rmtree(self._threadtmp, True)
592 592
593 593 if (self._ret != 0 or self._out != self._refout) and not self._skipped \
594 594 and not self._debug and self._out:
595 595 f = open(self.errpath, 'wb')
596 596 for line in self._out:
597 597 f.write(line)
598 598 f.close()
599 599
600 600 vlog("# Ret was:", self._ret)
601 601
602 602 def _run(self, replacements, env):
603 603 # This should be implemented in child classes to run tests.
604 604 raise SkipTest('unknown test type')
605 605
606 606 def abort(self):
607 607 """Terminate execution of this test."""
608 608 self._aborted = True
609 609
610 610 def _getreplacements(self):
611 611 """Obtain a mapping of text replacements to apply to test output.
612 612
613 613 Test output needs to be normalized so it can be compared to expected
614 614 output. This function defines how some of that normalization will
615 615 occur.
616 616 """
617 617 r = [
618 618 (r':%s\b' % self._startport, ':$HGPORT'),
619 619 (r':%s\b' % (self._startport + 1), ':$HGPORT1'),
620 620 (r':%s\b' % (self._startport + 2), ':$HGPORT2'),
621 621 ]
622 622
623 623 if os.name == 'nt':
624 624 r.append(
625 625 (''.join(c.isalpha() and '[%s%s]' % (c.lower(), c.upper()) or
626 626 c in '/\\' and r'[/\\]' or c.isdigit() and c or '\\' + c
627 627 for c in self._testtmp), '$TESTTMP'))
628 628 else:
629 629 r.append((re.escape(self._testtmp), '$TESTTMP'))
630 630
631 631 return r
632 632
633 633 def _getenv(self):
634 634 """Obtain environment variables to use during test execution."""
635 635 env = os.environ.copy()
636 636 env['TESTTMP'] = self._testtmp
637 637 env['HOME'] = self._testtmp
638 638 env["HGPORT"] = str(self._startport)
639 639 env["HGPORT1"] = str(self._startport + 1)
640 640 env["HGPORT2"] = str(self._startport + 2)
641 641 env["HGRCPATH"] = os.path.join(self._threadtmp, '.hgrc')
642 642 env["DAEMON_PIDS"] = os.path.join(self._threadtmp, 'daemon.pids')
643 643 env["HGEDITOR"] = sys.executable + ' -c "import sys; sys.exit(0)"'
644 644 env["HGMERGE"] = "internal:merge"
645 645 env["HGUSER"] = "test"
646 646 env["HGENCODING"] = "ascii"
647 647 env["HGENCODINGMODE"] = "strict"
648 648
649 649 # Reset some environment variables to well-known values so that
650 650 # the tests produce repeatable output.
651 651 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
652 652 env['TZ'] = 'GMT'
653 653 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
654 654 env['COLUMNS'] = '80'
655 655 env['TERM'] = 'xterm'
656 656
657 657 for k in ('HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy ' +
658 658 'NO_PROXY').split():
659 659 if k in env:
660 660 del env[k]
661 661
662 662 # unset env related to hooks
663 663 for k in env.keys():
664 664 if k.startswith('HG_'):
665 665 del env[k]
666 666
667 667 return env
668 668
669 669 def _createhgrc(self, path):
670 670 """Create an hgrc file for this test."""
671 671 hgrc = open(path, 'wb')
672 672 hgrc.write('[ui]\n')
673 673 hgrc.write('slash = True\n')
674 674 hgrc.write('interactive = False\n')
675 675 hgrc.write('mergemarkers = detailed\n')
676 676 hgrc.write('[defaults]\n')
677 677 hgrc.write('backout = -d "0 0"\n')
678 678 hgrc.write('commit = -d "0 0"\n')
679 679 hgrc.write('shelve = --date "0 0"\n')
680 680 hgrc.write('tag = -d "0 0"\n')
681 681 for opt in self._extraconfigopts:
682 682 section, key = opt.split('.', 1)
683 683 assert '=' in key, ('extra config opt %s must '
684 684 'have an = for assignment' % opt)
685 685 hgrc.write('[%s]\n%s\n' % (section, key))
686 686 hgrc.close()
687 687
688 688 def fail(self, msg):
689 689 # unittest differentiates between errored and failed.
690 690 # Failed is denoted by AssertionError (by default at least).
691 691 raise AssertionError(msg)
692 692
693 693 class PythonTest(Test):
694 694 """A Python-based test."""
695 695
696 696 @property
697 697 def refpath(self):
698 698 return os.path.join(self._testdir, '%s.out' % self.name)
699 699
700 700 def _run(self, replacements, env):
701 701 py3kswitch = self._py3kwarnings and ' -3' or ''
702 702 cmd = '%s%s "%s"' % (PYTHON, py3kswitch, self.path)
703 703 vlog("# Running", cmd)
704 704 if os.name == 'nt':
705 705 replacements.append((r'\r\n', '\n'))
706 706 result = run(cmd, self._testtmp, replacements, env,
707 707 debug=self._debug, timeout=self._timeout)
708 708 if self._aborted:
709 709 raise KeyboardInterrupt()
710 710
711 711 return result
712 712
713 713 class TTest(Test):
714 714 """A "t test" is a test backed by a .t file."""
715 715
716 716 SKIPPED_PREFIX = 'skipped: '
717 717 FAILED_PREFIX = 'hghave check failed: '
718 718 NEEDESCAPE = re.compile(r'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
719 719
720 720 ESCAPESUB = re.compile(r'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
721 721 ESCAPEMAP = dict((chr(i), r'\x%02x' % i) for i in range(256))
722 722 ESCAPEMAP.update({'\\': '\\\\', '\r': r'\r'})
723 723
724 724 @property
725 725 def refpath(self):
726 726 return os.path.join(self._testdir, self.name)
727 727
728 728 def _run(self, replacements, env):
729 729 f = open(self.path, 'rb')
730 730 lines = f.readlines()
731 731 f.close()
732 732
733 733 salt, script, after, expected = self._parsetest(lines)
734 734
735 735 # Write out the generated script.
736 736 fname = '%s.sh' % self._testtmp
737 737 f = open(fname, 'wb')
738 738 for l in script:
739 739 f.write(l)
740 740 f.close()
741 741
742 742 cmd = '%s "%s"' % (self._shell, fname)
743 743 vlog("# Running", cmd)
744 744
745 745 exitcode, output = run(cmd, self._testtmp, replacements, env,
746 746 debug=self._debug, timeout=self._timeout)
747 747
748 748 if self._aborted:
749 749 raise KeyboardInterrupt()
750 750
751 751 # Do not merge output if skipped. Return hghave message instead.
752 752 # Similarly, with --debug, output is None.
753 753 if exitcode == self.SKIPPED_STATUS or output is None:
754 754 return exitcode, output
755 755
756 756 return self._processoutput(exitcode, output, salt, after, expected)
757 757
758 758 def _hghave(self, reqs):
759 759 # TODO do something smarter when all other uses of hghave are gone.
760 760 tdir = self._testdir.replace('\\', '/')
761 761 proc = Popen4('%s -c "%s/hghave %s"' %
762 762 (self._shell, tdir, ' '.join(reqs)),
763 763 self._testtmp, 0)
764 764 stdout, stderr = proc.communicate()
765 765 ret = proc.wait()
766 766 if wifexited(ret):
767 767 ret = os.WEXITSTATUS(ret)
768 768 if ret == 2:
769 769 print stdout
770 770 sys.exit(1)
771 771
772 772 return ret == 0
773 773
774 774 def _parsetest(self, lines):
775 775 # We generate a shell script which outputs unique markers to line
776 776 # up script results with our source. These markers include input
777 777 # line number and the last return code.
778 778 salt = "SALT" + str(time.time())
779 779 def addsalt(line, inpython):
780 780 if inpython:
781 781 script.append('%s %d 0\n' % (salt, line))
782 782 else:
783 783 script.append('echo %s %s $?\n' % (salt, line))
784 784
785 785 script = []
786 786
787 787 # After we run the shell script, we re-unify the script output
788 788 # with non-active parts of the source, with synchronization by our
789 789 # SALT line number markers. The after table contains the non-active
790 790 # components, ordered by line number.
791 791 after = {}
792 792
793 793 # Expected shell script output.
794 794 expected = {}
795 795
796 796 pos = prepos = -1
797 797
798 798 # True or False when in a true or false conditional section
799 799 skipping = None
800 800
801 801 # We keep track of whether or not we're in a Python block so we
802 802 # can generate the surrounding doctest magic.
803 803 inpython = False
804 804
805 805 if self._debug:
806 806 script.append('set -x\n')
807 807 if os.getenv('MSYSTEM'):
808 808 script.append('alias pwd="pwd -W"\n')
809 809
810 810 for n, l in enumerate(lines):
811 811 if not l.endswith('\n'):
812 812 l += '\n'
813 813 if l.startswith('#require'):
814 814 lsplit = l.split()
815 815 if len(lsplit) < 2 or lsplit[0] != '#require':
816 816 after.setdefault(pos, []).append(' !!! invalid #require\n')
817 817 if not self._hghave(lsplit[1:]):
818 818 script = ["exit 80\n"]
819 819 break
820 820 after.setdefault(pos, []).append(l)
821 821 elif l.startswith('#if'):
822 822 lsplit = l.split()
823 823 if len(lsplit) < 2 or lsplit[0] != '#if':
824 824 after.setdefault(pos, []).append(' !!! invalid #if\n')
825 825 if skipping is not None:
826 826 after.setdefault(pos, []).append(' !!! nested #if\n')
827 827 skipping = not self._hghave(lsplit[1:])
828 828 after.setdefault(pos, []).append(l)
829 829 elif l.startswith('#else'):
830 830 if skipping is None:
831 831 after.setdefault(pos, []).append(' !!! missing #if\n')
832 832 skipping = not skipping
833 833 after.setdefault(pos, []).append(l)
834 834 elif l.startswith('#endif'):
835 835 if skipping is None:
836 836 after.setdefault(pos, []).append(' !!! missing #if\n')
837 837 skipping = None
838 838 after.setdefault(pos, []).append(l)
839 839 elif skipping:
840 840 after.setdefault(pos, []).append(l)
841 841 elif l.startswith(' >>> '): # python inlines
842 842 after.setdefault(pos, []).append(l)
843 843 prepos = pos
844 844 pos = n
845 845 if not inpython:
846 846 # We've just entered a Python block. Add the header.
847 847 inpython = True
848 848 addsalt(prepos, False) # Make sure we report the exit code.
849 849 script.append('%s -m heredoctest <<EOF\n' % PYTHON)
850 850 addsalt(n, True)
851 851 script.append(l[2:])
852 852 elif l.startswith(' ... '): # python inlines
853 853 after.setdefault(prepos, []).append(l)
854 854 script.append(l[2:])
855 855 elif l.startswith(' $ '): # commands
856 856 if inpython:
857 857 script.append('EOF\n')
858 858 inpython = False
859 859 after.setdefault(pos, []).append(l)
860 860 prepos = pos
861 861 pos = n
862 862 addsalt(n, False)
863 863 cmd = l[4:].split()
864 864 if len(cmd) == 2 and cmd[0] == 'cd':
865 865 l = ' $ cd %s || exit 1\n' % cmd[1]
866 866 script.append(l[4:])
867 867 elif l.startswith(' > '): # continuations
868 868 after.setdefault(prepos, []).append(l)
869 869 script.append(l[4:])
870 870 elif l.startswith(' '): # results
871 871 # Queue up a list of expected results.
872 872 expected.setdefault(pos, []).append(l[2:])
873 873 else:
874 874 if inpython:
875 875 script.append('EOF\n')
876 876 inpython = False
877 877 # Non-command/result. Queue up for merged output.
878 878 after.setdefault(pos, []).append(l)
879 879
880 880 if inpython:
881 881 script.append('EOF\n')
882 882 if skipping is not None:
883 883 after.setdefault(pos, []).append(' !!! missing #endif\n')
884 884 addsalt(n + 1, False)
885 885
886 886 return salt, script, after, expected
887 887
888 888 def _processoutput(self, exitcode, output, salt, after, expected):
889 889 # Merge the script output back into a unified test.
890 890 warnonly = 1 # 1: not yet; 2: yes; 3: for sure not
891 891 if exitcode != 0:
892 892 warnonly = 3
893 893
894 894 pos = -1
895 895 postout = []
896 896 for l in output:
897 897 lout, lcmd = l, None
898 898 if salt in l:
899 899 lout, lcmd = l.split(salt, 1)
900 900
901 901 if lout:
902 902 if not lout.endswith('\n'):
903 903 lout += ' (no-eol)\n'
904 904
905 905 # Find the expected output at the current position.
906 906 el = None
907 907 if expected.get(pos, None):
908 908 el = expected[pos].pop(0)
909 909
910 910 r = TTest.linematch(el, lout)
911 911 if isinstance(r, str):
912 912 if r == '+glob':
913 913 lout = el[:-1] + ' (glob)\n'
914 914 r = '' # Warn only this line.
915 915 elif r == '-glob':
916 916 lout = ''.join(el.rsplit(' (glob)', 1))
917 917 r = '' # Warn only this line.
918 918 else:
919 919 log('\ninfo, unknown linematch result: %r\n' % r)
920 920 r = False
921 921 if r:
922 922 postout.append(' ' + el)
923 923 else:
924 924 if self.NEEDESCAPE(lout):
925 925 lout = TTest._stringescape('%s (esc)\n' %
926 926 lout.rstrip('\n'))
927 927 postout.append(' ' + lout) # Let diff deal with it.
928 928 if r != '': # If line failed.
929 929 warnonly = 3 # for sure not
930 930 elif warnonly == 1: # Is "not yet" and line is warn only.
931 931 warnonly = 2 # Yes do warn.
932 932
933 933 if lcmd:
934 934 # Add on last return code.
935 935 ret = int(lcmd.split()[1])
936 936 if ret != 0:
937 937 postout.append(' [%s]\n' % ret)
938 938 if pos in after:
939 939 # Merge in non-active test bits.
940 940 postout += after.pop(pos)
941 941 pos = int(lcmd.split()[0])
942 942
943 943 if pos in after:
944 944 postout += after.pop(pos)
945 945
946 946 if warnonly == 2:
947 947 exitcode = False # Set exitcode to warned.
948 948
949 949 return exitcode, postout
950 950
951 951 @staticmethod
952 952 def rematch(el, l):
953 953 try:
954 954 # use \Z to ensure that the regex matches to the end of the string
955 955 if os.name == 'nt':
956 956 return re.match(el + r'\r?\n\Z', l)
957 957 return re.match(el + r'\n\Z', l)
958 958 except re.error:
959 959 # el is an invalid regex
960 960 return False
961 961
962 962 @staticmethod
963 963 def globmatch(el, l):
964 964 # The only supported special characters are * and ? plus / which also
965 965 # matches \ on windows. Escaping of these characters is supported.
966 966 if el + '\n' == l:
967 967 if os.altsep:
968 968 # matching on "/" is not needed for this line
969 969 return '-glob'
970 970 return True
971 971 i, n = 0, len(el)
972 972 res = ''
973 973 while i < n:
974 974 c = el[i]
975 975 i += 1
976 976 if c == '\\' and el[i] in '*?\\/':
977 977 res += el[i - 1:i + 1]
978 978 i += 1
979 979 elif c == '*':
980 980 res += '.*'
981 981 elif c == '?':
982 982 res += '.'
983 983 elif c == '/' and os.altsep:
984 984 res += '[/\\\\]'
985 985 else:
986 986 res += re.escape(c)
987 987 return TTest.rematch(res, l)
988 988
989 989 @staticmethod
990 990 def linematch(el, l):
991 991 if el == l: # perfect match (fast)
992 992 return True
993 993 if el:
994 994 if el.endswith(" (esc)\n"):
995 995 el = el[:-7].decode('string-escape') + '\n'
996 996 if el == l or os.name == 'nt' and el[:-1] + '\r\n' == l:
997 997 return True
998 998 if el.endswith(" (re)\n"):
999 999 return TTest.rematch(el[:-6], l)
1000 1000 if el.endswith(" (glob)\n"):
1001 1001 return TTest.globmatch(el[:-8], l)
1002 1002 if os.altsep and l.replace('\\', '/') == el:
1003 1003 return '+glob'
1004 1004 return False
1005 1005
1006 1006 @staticmethod
1007 1007 def parsehghaveoutput(lines):
1008 1008 '''Parse hghave log lines.
1009 1009
1010 1010 Return tuple of lists (missing, failed):
1011 1011 * the missing/unknown features
1012 1012 * the features for which existence check failed'''
1013 1013 missing = []
1014 1014 failed = []
1015 1015 for line in lines:
1016 1016 if line.startswith(TTest.SKIPPED_PREFIX):
1017 1017 line = line.splitlines()[0]
1018 1018 missing.append(line[len(TTest.SKIPPED_PREFIX):])
1019 1019 elif line.startswith(TTest.FAILED_PREFIX):
1020 1020 line = line.splitlines()[0]
1021 1021 failed.append(line[len(TTest.FAILED_PREFIX):])
1022 1022
1023 1023 return missing, failed
1024 1024
1025 1025 @staticmethod
1026 1026 def _escapef(m):
1027 1027 return TTest.ESCAPEMAP[m.group(0)]
1028 1028
1029 1029 @staticmethod
1030 1030 def _stringescape(s):
1031 1031 return TTest.ESCAPESUB(TTest._escapef, s)
1032 1032
1033 1033
1034 1034 wifexited = getattr(os, "WIFEXITED", lambda x: False)
1035 1035 def run(cmd, wd, replacements, env, debug=False, timeout=None):
1036 1036 """Run command in a sub-process, capturing the output (stdout and stderr).
1037 1037 Return a tuple (exitcode, output). output is None in debug mode."""
1038 1038 if debug:
1039 1039 proc = subprocess.Popen(cmd, shell=True, cwd=wd, env=env)
1040 1040 ret = proc.wait()
1041 1041 return (ret, None)
1042 1042
1043 1043 proc = Popen4(cmd, wd, timeout, env)
1044 1044 def cleanup():
1045 1045 terminate(proc)
1046 1046 ret = proc.wait()
1047 1047 if ret == 0:
1048 1048 ret = signal.SIGTERM << 8
1049 1049 killdaemons(env['DAEMON_PIDS'])
1050 1050 return ret
1051 1051
1052 1052 output = ''
1053 1053 proc.tochild.close()
1054 1054
1055 1055 try:
1056 1056 output = proc.fromchild.read()
1057 1057 except KeyboardInterrupt:
1058 1058 vlog('# Handling keyboard interrupt')
1059 1059 cleanup()
1060 1060 raise
1061 1061
1062 1062 ret = proc.wait()
1063 1063 if wifexited(ret):
1064 1064 ret = os.WEXITSTATUS(ret)
1065 1065
1066 1066 if proc.timeout:
1067 1067 ret = 'timeout'
1068 1068
1069 1069 if ret:
1070 1070 killdaemons(env['DAEMON_PIDS'])
1071 1071
1072 1072 for s, r in replacements:
1073 1073 output = re.sub(s, r, output)
1074 1074 return ret, output.splitlines(True)
1075 1075
1076 1076 iolock = threading.RLock()
1077 1077
1078 1078 class SkipTest(Exception):
1079 1079 """Raised to indicate that a test is to be skipped."""
1080 1080
1081 1081 class IgnoreTest(Exception):
1082 1082 """Raised to indicate that a test is to be ignored."""
1083 1083
1084 1084 class WarnTest(Exception):
1085 1085 """Raised to indicate that a test warned."""
1086 1086
1087 1087 class TestResult(unittest._TextTestResult):
1088 1088 """Holds results when executing via unittest."""
1089 1089 # Don't worry too much about accessing the non-public _TextTestResult.
1090 1090 # It is relatively common in Python testing tools.
1091 1091 def __init__(self, options, *args, **kwargs):
1092 1092 super(TestResult, self).__init__(*args, **kwargs)
1093 1093
1094 1094 self._options = options
1095 1095
1096 1096 # unittest.TestResult didn't have skipped until 2.7. We need to
1097 1097 # polyfill it.
1098 1098 self.skipped = []
1099 1099
1100 1100 # We have a custom "ignored" result that isn't present in any Python
1101 1101 # unittest implementation. It is very similar to skipped. It may make
1102 1102 # sense to map it into skip some day.
1103 1103 self.ignored = []
1104 1104
1105 1105 # We have a custom "warned" result that isn't present in any Python
1106 1106 # unittest implementation. It is very similar to failed. It may make
1107 1107 # sense to map it into fail some day.
1108 1108 self.warned = []
1109 1109
1110 1110 self.times = []
1111 1111 self._started = {}
1112 1112 self._stopped = {}
1113 1113 # Data stored for the benefit of generating xunit reports.
1114 1114 self.successes = []
1115 1115 self.faildata = {}
1116 1116
1117 1117 def addFailure(self, test, reason):
1118 1118 self.failures.append((test, reason))
1119 1119
1120 1120 if self._options.first:
1121 1121 self.stop()
1122 1122 else:
1123 1123 iolock.acquire()
1124 1124 if not self._options.nodiff:
1125 1125 self.stream.write('\nERROR: %s output changed\n' % test)
1126 1126
1127 1127 self.stream.write('!')
1128 1128 self.stream.flush()
1129 1129 iolock.release()
1130 1130
1131 1131 def addSuccess(self, test):
1132 1132 iolock.acquire()
1133 1133 super(TestResult, self).addSuccess(test)
1134 1134 iolock.release()
1135 1135 self.successes.append(test)
1136 1136
1137 1137 def addError(self, test, err):
1138 1138 super(TestResult, self).addError(test, err)
1139 1139 if self._options.first:
1140 1140 self.stop()
1141 1141
1142 1142 # Polyfill.
1143 1143 def addSkip(self, test, reason):
1144 1144 self.skipped.append((test, reason))
1145 1145 iolock.acquire()
1146 1146 if self.showAll:
1147 1147 self.stream.writeln('skipped %s' % reason)
1148 1148 else:
1149 1149 self.stream.write('s')
1150 1150 self.stream.flush()
1151 1151 iolock.release()
1152 1152
1153 1153 def addIgnore(self, test, reason):
1154 1154 self.ignored.append((test, reason))
1155 1155 iolock.acquire()
1156 1156 if self.showAll:
1157 1157 self.stream.writeln('ignored %s' % reason)
1158 1158 else:
1159 1159 if reason != 'not retesting' and reason != "doesn't match keyword":
1160 1160 self.stream.write('i')
1161 1161 else:
1162 1162 self.testsRun += 1
1163 1163 self.stream.flush()
1164 1164 iolock.release()
1165 1165
1166 1166 def addWarn(self, test, reason):
1167 1167 self.warned.append((test, reason))
1168 1168
1169 1169 if self._options.first:
1170 1170 self.stop()
1171 1171
1172 1172 iolock.acquire()
1173 1173 if self.showAll:
1174 1174 self.stream.writeln('warned %s' % reason)
1175 1175 else:
1176 1176 self.stream.write('~')
1177 1177 self.stream.flush()
1178 1178 iolock.release()
1179 1179
1180 1180 def addOutputMismatch(self, test, ret, got, expected):
1181 1181 """Record a mismatch in test output for a particular test."""
1182 1182
1183 1183 accepted = False
1184 1184 failed = False
1185 1185 lines = []
1186 1186
1187 1187 iolock.acquire()
1188 1188 if self._options.nodiff:
1189 1189 pass
1190 1190 elif self._options.view:
1191 1191 os.system("%s %s %s" %
1192 1192 (self._options.view, test.refpath, test.errpath))
1193 1193 else:
1194 1194 failed, lines = getdiff(expected, got,
1195 1195 test.refpath, test.errpath)
1196 1196 if failed:
1197 1197 self.addFailure(test, 'diff generation failed')
1198 1198 else:
1199 1199 self.stream.write('\n')
1200 1200 for line in lines:
1201 1201 self.stream.write(line)
1202 1202 self.stream.flush()
1203 1203
1204 1204 # handle interactive prompt without releasing iolock
1205 1205 if self._options.interactive:
1206 1206 self.stream.write('Accept this change? [n] ')
1207 1207 answer = sys.stdin.readline().strip()
1208 1208 if answer.lower() in ('y', 'yes'):
1209 1209 if test.name.endswith('.t'):
1210 1210 rename(test.errpath, test.path)
1211 1211 else:
1212 1212 rename(test.errpath, '%s.out' % test.path)
1213 1213 accepted = True
1214 1214 if not accepted and not failed:
1215 1215 self.faildata[test.name] = ''.join(lines)
1216 1216 iolock.release()
1217 1217
1218 1218 return accepted
1219 1219
1220 1220 def startTest(self, test):
1221 1221 super(TestResult, self).startTest(test)
1222 1222
1223 1223 # os.times module computes the user time and system time spent by
1224 1224 # child's processes along with real elapsed time taken by a process.
1225 1225 # This module has one limitation. It can only work for Linux user
1226 1226 # and not for Windows.
1227 1227 self._started[test.name] = os.times()
1228 1228
1229 1229 def stopTest(self, test, interrupted=False):
1230 1230 super(TestResult, self).stopTest(test)
1231 1231
1232 1232 self._stopped[test.name] = os.times()
1233 1233
1234 1234 starttime = self._started[test.name]
1235 1235 endtime = self._stopped[test.name]
1236 1236 self.times.append((test.name, endtime[2] - starttime[2],
1237 1237 endtime[3] - starttime[3], endtime[4] - starttime[4]))
1238 1238
1239 1239 del self._started[test.name]
1240 1240 del self._stopped[test.name]
1241 1241
1242 1242 if interrupted:
1243 1243 iolock.acquire()
1244 1244 self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
1245 1245 test.name, self.times[-1][3]))
1246 1246 iolock.release()
1247 1247
1248 1248 class TestSuite(unittest.TestSuite):
1249 1249 """Custom unitest TestSuite that knows how to execute Mercurial tests."""
1250 1250
1251 1251 def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
1252 1252 retest=False, keywords=None, loop=False,
1253 1253 *args, **kwargs):
1254 1254 """Create a new instance that can run tests with a configuration.
1255 1255
1256 1256 testdir specifies the directory where tests are executed from. This
1257 1257 is typically the ``tests`` directory from Mercurial's source
1258 1258 repository.
1259 1259
1260 1260 jobs specifies the number of jobs to run concurrently. Each test
1261 1261 executes on its own thread. Tests actually spawn new processes, so
1262 1262 state mutation should not be an issue.
1263 1263
1264 1264 whitelist and blacklist denote tests that have been whitelisted and
1265 1265 blacklisted, respectively. These arguments don't belong in TestSuite.
1266 1266 Instead, whitelist and blacklist should be handled by the thing that
1267 1267 populates the TestSuite with tests. They are present to preserve
1268 1268 backwards compatible behavior which reports skipped tests as part
1269 1269 of the results.
1270 1270
1271 1271 retest denotes whether to retest failed tests. This arguably belongs
1272 1272 outside of TestSuite.
1273 1273
1274 1274 keywords denotes key words that will be used to filter which tests
1275 1275 to execute. This arguably belongs outside of TestSuite.
1276 1276
1277 1277 loop denotes whether to loop over tests forever.
1278 1278 """
1279 1279 super(TestSuite, self).__init__(*args, **kwargs)
1280 1280
1281 1281 self._jobs = jobs
1282 1282 self._whitelist = whitelist
1283 1283 self._blacklist = blacklist
1284 1284 self._retest = retest
1285 1285 self._keywords = keywords
1286 1286 self._loop = loop
1287 1287
1288 1288 def run(self, result):
1289 1289 # We have a number of filters that need to be applied. We do this
1290 1290 # here instead of inside Test because it makes the running logic for
1291 1291 # Test simpler.
1292 1292 tests = []
1293 1293 for test in self._tests:
1294 1294 if not os.path.exists(test.path):
1295 1295 result.addSkip(test, "Doesn't exist")
1296 1296 continue
1297 1297
1298 1298 if not (self._whitelist and test.name in self._whitelist):
1299 1299 if self._blacklist and test.name in self._blacklist:
1300 1300 result.addSkip(test, 'blacklisted')
1301 1301 continue
1302 1302
1303 1303 if self._retest and not os.path.exists(test.errpath):
1304 1304 result.addIgnore(test, 'not retesting')
1305 1305 continue
1306 1306
1307 1307 if self._keywords:
1308 1308 f = open(test.path, 'rb')
1309 1309 t = f.read().lower() + test.name.lower()
1310 1310 f.close()
1311 1311 ignored = False
1312 1312 for k in self._keywords.lower().split():
1313 1313 if k not in t:
1314 1314 result.addIgnore(test, "doesn't match keyword")
1315 1315 ignored = True
1316 1316 break
1317 1317
1318 1318 if ignored:
1319 1319 continue
1320 1320
1321 1321 tests.append(test)
1322 1322
1323 1323 runtests = list(tests)
1324 1324 done = queue.Queue()
1325 1325 running = 0
1326 1326
1327 1327 def job(test, result):
1328 1328 try:
1329 1329 test(result)
1330 1330 done.put(None)
1331 1331 except KeyboardInterrupt:
1332 1332 pass
1333 1333 except: # re-raises
1334 1334 done.put(('!', test, 'run-test raised an error, see traceback'))
1335 1335 raise
1336 1336
1337 1337 try:
1338 1338 while tests or running:
1339 1339 if not done.empty() or running == self._jobs or not tests:
1340 1340 try:
1341 1341 done.get(True, 1)
1342 1342 if result and result.shouldStop:
1343 1343 break
1344 1344 except queue.Empty:
1345 1345 continue
1346 1346 running -= 1
1347 1347 if tests and not running == self._jobs:
1348 1348 test = tests.pop(0)
1349 1349 if self._loop:
1350 1350 tests.append(test)
1351 1351 t = threading.Thread(target=job, name=test.name,
1352 1352 args=(test, result))
1353 1353 t.start()
1354 1354 running += 1
1355 1355 except KeyboardInterrupt:
1356 1356 for test in runtests:
1357 1357 test.abort()
1358 1358
1359 1359 return result
1360 1360
1361 1361 class TextTestRunner(unittest.TextTestRunner):
1362 1362 """Custom unittest test runner that uses appropriate settings."""
1363 1363
1364 1364 def __init__(self, runner, *args, **kwargs):
1365 1365 super(TextTestRunner, self).__init__(*args, **kwargs)
1366 1366
1367 1367 self._runner = runner
1368 1368
1369 1369 def run(self, test):
1370 1370 result = TestResult(self._runner.options, self.stream,
1371 1371 self.descriptions, self.verbosity)
1372 1372
1373 1373 test(result)
1374 1374
1375 1375 failed = len(result.failures)
1376 1376 warned = len(result.warned)
1377 1377 skipped = len(result.skipped)
1378 1378 ignored = len(result.ignored)
1379 1379
1380 1380 iolock.acquire()
1381 1381 self.stream.writeln('')
1382 1382
1383 1383 if not self._runner.options.noskips:
1384 1384 for test, msg in result.skipped:
1385 1385 self.stream.writeln('Skipped %s: %s' % (test.name, msg))
1386 1386 for test, msg in result.warned:
1387 1387 self.stream.writeln('Warned %s: %s' % (test.name, msg))
1388 1388 for test, msg in result.failures:
1389 1389 self.stream.writeln('Failed %s: %s' % (test.name, msg))
1390 1390 for test, msg in result.errors:
1391 1391 self.stream.writeln('Errored %s: %s' % (test.name, msg))
1392 1392
1393 1393 if self._runner.options.xunit:
1394 1394 xuf = open(self._runner.options.xunit, 'wb')
1395 1395 try:
1396 1396 timesd = dict(
1397 1397 (test, real) for test, cuser, csys, real in result.times)
1398 1398 doc = minidom.Document()
1399 1399 s = doc.createElement('testsuite')
1400 1400 s.setAttribute('name', 'run-tests')
1401 1401 s.setAttribute('tests', str(result.testsRun))
1402 1402 s.setAttribute('errors', "0") # TODO
1403 1403 s.setAttribute('failures', str(failed))
1404 1404 s.setAttribute('skipped', str(skipped + ignored))
1405 1405 doc.appendChild(s)
1406 1406 for tc in result.successes:
1407 1407 t = doc.createElement('testcase')
1408 1408 t.setAttribute('name', tc.name)
1409 1409 t.setAttribute('time', '%.3f' % timesd[tc.name])
1410 1410 s.appendChild(t)
1411 1411 for tc, err in sorted(result.faildata.iteritems()):
1412 1412 t = doc.createElement('testcase')
1413 1413 t.setAttribute('name', tc)
1414 1414 t.setAttribute('time', '%.3f' % timesd[tc])
1415 1415 cd = doc.createCDATASection(cdatasafe(err))
1416 1416 t.appendChild(cd)
1417 1417 s.appendChild(t)
1418 1418 xuf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
1419 1419 finally:
1420 1420 xuf.close()
1421 1421
1422 1422 self._runner._checkhglib('Tested')
1423 1423
1424 1424 self.stream.writeln('# Ran %d tests, %d skipped, %d warned, %d failed.'
1425 1425 % (result.testsRun,
1426 1426 skipped + ignored, warned, failed))
1427 1427 if failed:
1428 1428 self.stream.writeln('python hash seed: %s' %
1429 1429 os.environ['PYTHONHASHSEED'])
1430 1430 if self._runner.options.time:
1431 1431 self.printtimes(result.times)
1432 1432
1433 1433 iolock.release()
1434 1434
1435 1435 return result
1436 1436
1437 1437 def printtimes(self, times):
1438 1438 # iolock held by run
1439 1439 self.stream.writeln('# Producing time report')
1440 1440 times.sort(key=lambda t: (t[3]))
1441 1441 cols = '%7.3f %7.3f %7.3f %s'
1442 1442 self.stream.writeln('%-7s %-7s %-7s %s' % ('cuser', 'csys', 'real',
1443 1443 'Test'))
1444 1444 for test, cuser, csys, real in times:
1445 1445 self.stream.writeln(cols % (cuser, csys, real, test))
1446 1446
1447 1447 class TestRunner(object):
1448 1448 """Holds context for executing tests.
1449 1449
1450 1450 Tests rely on a lot of state. This object holds it for them.
1451 1451 """
1452 1452
1453 1453 # Programs required to run tests.
1454 1454 REQUIREDTOOLS = [
1455 1455 os.path.basename(sys.executable),
1456 1456 'diff',
1457 1457 'grep',
1458 1458 'unzip',
1459 1459 'gunzip',
1460 1460 'bunzip2',
1461 1461 'sed',
1462 1462 ]
1463 1463
1464 1464 # Maps file extensions to test class.
1465 1465 TESTTYPES = [
1466 1466 ('.py', PythonTest),
1467 1467 ('.t', TTest),
1468 1468 ]
1469 1469
1470 1470 def __init__(self):
1471 1471 self.options = None
1472 1472 self._testdir = None
1473 1473 self._hgtmp = None
1474 1474 self._installdir = None
1475 1475 self._bindir = None
1476 1476 self._tmpbinddir = None
1477 1477 self._pythondir = None
1478 1478 self._coveragefile = None
1479 1479 self._createdfiles = []
1480 1480 self._hgpath = None
1481 1481
1482 1482 def run(self, args, parser=None):
1483 1483 """Run the test suite."""
1484 1484 oldmask = os.umask(022)
1485 1485 try:
1486 1486 parser = parser or getparser()
1487 1487 options, args = parseargs(args, parser)
1488 1488 self.options = options
1489 1489
1490 1490 self._checktools()
1491 1491 tests = self.findtests(args)
1492 1492 return self._run(tests)
1493 1493 finally:
1494 1494 os.umask(oldmask)
1495 1495
1496 1496 def _run(self, tests):
1497 1497 if self.options.random:
1498 1498 random.shuffle(tests)
1499 1499 else:
1500 1500 # keywords for slow tests
1501 1501 slow = 'svn gendoc check-code-hg'.split()
1502 1502 def sortkey(f):
1503 1503 # run largest tests first, as they tend to take the longest
1504 1504 try:
1505 1505 val = -os.stat(f).st_size
1506 1506 except OSError, e:
1507 1507 if e.errno != errno.ENOENT:
1508 1508 raise
1509 1509 return -1e9 # file does not exist, tell early
1510 1510 for kw in slow:
1511 1511 if kw in f:
1512 1512 val *= 10
1513 1513 return val
1514 1514 tests.sort(key=sortkey)
1515 1515
1516 1516 self._testdir = os.environ['TESTDIR'] = os.getcwd()
1517 1517
1518 1518 if 'PYTHONHASHSEED' not in os.environ:
1519 1519 # use a random python hash seed all the time
1520 1520 # we do the randomness ourself to know what seed is used
1521 1521 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
1522 1522
1523 1523 if self.options.tmpdir:
1524 1524 self.options.keep_tmpdir = True
1525 1525 tmpdir = self.options.tmpdir
1526 1526 if os.path.exists(tmpdir):
1527 1527 # Meaning of tmpdir has changed since 1.3: we used to create
1528 1528 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
1529 1529 # tmpdir already exists.
1530 1530 print "error: temp dir %r already exists" % tmpdir
1531 1531 return 1
1532 1532
1533 1533 # Automatically removing tmpdir sounds convenient, but could
1534 1534 # really annoy anyone in the habit of using "--tmpdir=/tmp"
1535 1535 # or "--tmpdir=$HOME".
1536 1536 #vlog("# Removing temp dir", tmpdir)
1537 1537 #shutil.rmtree(tmpdir)
1538 1538 os.makedirs(tmpdir)
1539 1539 else:
1540 1540 d = None
1541 1541 if os.name == 'nt':
1542 1542 # without this, we get the default temp dir location, but
1543 1543 # in all lowercase, which causes troubles with paths (issue3490)
1544 1544 d = os.getenv('TMP')
1545 1545 tmpdir = tempfile.mkdtemp('', 'hgtests.', d)
1546 1546 self._hgtmp = os.environ['HGTMP'] = os.path.realpath(tmpdir)
1547 1547
1548 1548 if self.options.with_hg:
1549 1549 self._installdir = None
1550 1550 self._bindir = os.path.dirname(os.path.realpath(
1551 1551 self.options.with_hg))
1552 1552 self._tmpbindir = os.path.join(self._hgtmp, 'install', 'bin')
1553 1553 os.makedirs(self._tmpbindir)
1554 1554
1555 1555 # This looks redundant with how Python initializes sys.path from
1556 1556 # the location of the script being executed. Needed because the
1557 1557 # "hg" specified by --with-hg is not the only Python script
1558 1558 # executed in the test suite that needs to import 'mercurial'
1559 1559 # ... which means it's not really redundant at all.
1560 1560 self._pythondir = self._bindir
1561 1561 else:
1562 1562 self._installdir = os.path.join(self._hgtmp, "install")
1563 1563 self._bindir = os.environ["BINDIR"] = \
1564 1564 os.path.join(self._installdir, "bin")
1565 1565 self._tmpbindir = self._bindir
1566 1566 self._pythondir = os.path.join(self._installdir, "lib", "python")
1567 1567
1568 1568 os.environ["BINDIR"] = self._bindir
1569 1569 os.environ["PYTHON"] = PYTHON
1570 1570
1571 1571 path = [self._bindir] + os.environ["PATH"].split(os.pathsep)
1572 1572 if self._tmpbindir != self._bindir:
1573 1573 path = [self._tmpbindir] + path
1574 1574 os.environ["PATH"] = os.pathsep.join(path)
1575 1575
1576 1576 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
1577 1577 # can run .../tests/run-tests.py test-foo where test-foo
1578 1578 # adds an extension to HGRC. Also include run-test.py directory to
1579 1579 # import modules like heredoctest.
1580 1580 pypath = [self._pythondir, self._testdir,
1581 1581 os.path.abspath(os.path.dirname(__file__))]
1582 1582 # We have to augment PYTHONPATH, rather than simply replacing
1583 1583 # it, in case external libraries are only available via current
1584 1584 # PYTHONPATH. (In particular, the Subversion bindings on OS X
1585 1585 # are in /opt/subversion.)
1586 1586 oldpypath = os.environ.get(IMPL_PATH)
1587 1587 if oldpypath:
1588 1588 pypath.append(oldpypath)
1589 1589 os.environ[IMPL_PATH] = os.pathsep.join(pypath)
1590 1590
1591 1591 self._coveragefile = os.path.join(self._testdir, '.coverage')
1592 1592
1593 1593 vlog("# Using TESTDIR", self._testdir)
1594 1594 vlog("# Using HGTMP", self._hgtmp)
1595 1595 vlog("# Using PATH", os.environ["PATH"])
1596 1596 vlog("# Using", IMPL_PATH, os.environ[IMPL_PATH])
1597 1597
1598 1598 try:
1599 1599 return self._runtests(tests) or 0
1600 1600 finally:
1601 1601 time.sleep(.1)
1602 1602 self._cleanup()
1603 1603
1604 1604 def findtests(self, args):
1605 1605 """Finds possible test files from arguments.
1606 1606
1607 1607 If you wish to inject custom tests into the test harness, this would
1608 1608 be a good function to monkeypatch or override in a derived class.
1609 1609 """
1610 1610 if not args:
1611 1611 if self.options.changed:
1612 1612 proc = Popen4('hg st --rev "%s" -man0 .' %
1613 1613 self.options.changed, None, 0)
1614 1614 stdout, stderr = proc.communicate()
1615 1615 args = stdout.strip('\0').split('\0')
1616 1616 else:
1617 1617 args = os.listdir('.')
1618 1618
1619 1619 return [t for t in args
1620 1620 if os.path.basename(t).startswith('test-')
1621 1621 and (t.endswith('.py') or t.endswith('.t'))]
1622 1622
1623 1623 def _runtests(self, tests):
1624 1624 try:
1625 1625 if self._installdir:
1626 1626 self._installhg()
1627 1627 self._checkhglib("Testing")
1628 1628 else:
1629 1629 self._usecorrectpython()
1630 1630
1631 1631 if self.options.restart:
1632 1632 orig = list(tests)
1633 1633 while tests:
1634 1634 if os.path.exists(tests[0] + ".err"):
1635 1635 break
1636 1636 tests.pop(0)
1637 1637 if not tests:
1638 1638 print "running all tests"
1639 1639 tests = orig
1640 1640
1641 1641 tests = [self._gettest(t, i) for i, t in enumerate(tests)]
1642 1642
1643 1643 failed = False
1644 1644 warned = False
1645 1645
1646 1646 suite = TestSuite(self._testdir,
1647 1647 jobs=self.options.jobs,
1648 1648 whitelist=self.options.whitelisted,
1649 1649 blacklist=self.options.blacklist,
1650 1650 retest=self.options.retest,
1651 1651 keywords=self.options.keywords,
1652 1652 loop=self.options.loop,
1653 1653 tests=tests)
1654 1654 verbosity = 1
1655 1655 if self.options.verbose:
1656 1656 verbosity = 2
1657 1657 runner = TextTestRunner(self, verbosity=verbosity)
1658 1658 result = runner.run(suite)
1659 1659
1660 1660 if result.failures:
1661 1661 failed = True
1662 1662 if result.warned:
1663 1663 warned = True
1664 1664
1665 1665 if self.options.anycoverage:
1666 1666 self._outputcoverage()
1667 1667 except KeyboardInterrupt:
1668 1668 failed = True
1669 1669 print "\ninterrupted!"
1670 1670
1671 1671 if failed:
1672 1672 return 1
1673 1673 if warned:
1674 1674 return 80
1675 1675
1676 1676 def _gettest(self, test, count):
1677 1677 """Obtain a Test by looking at its filename.
1678 1678
1679 1679 Returns a Test instance. The Test may not be runnable if it doesn't
1680 1680 map to a known type.
1681 1681 """
1682 1682 lctest = test.lower()
1683 1683 testcls = Test
1684 1684
1685 1685 for ext, cls in self.TESTTYPES:
1686 1686 if lctest.endswith(ext):
1687 1687 testcls = cls
1688 1688 break
1689 1689
1690 1690 refpath = os.path.join(self._testdir, test)
1691 1691 tmpdir = os.path.join(self._hgtmp, 'child%d' % count)
1692 1692
1693 1693 return testcls(refpath, tmpdir,
1694 1694 keeptmpdir=self.options.keep_tmpdir,
1695 1695 debug=self.options.debug,
1696 1696 timeout=self.options.timeout,
1697 1697 startport=self.options.port + count * 3,
1698 1698 extraconfigopts=self.options.extra_config_opt,
1699 1699 py3kwarnings=self.options.py3k_warnings,
1700 1700 shell=self.options.shell)
1701 1701
1702 1702 def _cleanup(self):
1703 1703 """Clean up state from this test invocation."""
1704 1704
1705 1705 if self.options.keep_tmpdir:
1706 1706 return
1707 1707
1708 1708 vlog("# Cleaning up HGTMP", self._hgtmp)
1709 1709 shutil.rmtree(self._hgtmp, True)
1710 1710 for f in self._createdfiles:
1711 1711 try:
1712 1712 os.remove(f)
1713 1713 except OSError:
1714 1714 pass
1715 1715
1716 1716 def _usecorrectpython(self):
1717 1717 """Configure the environment to use the appropriate Python in tests."""
1718 1718 # Tests must use the same interpreter as us or bad things will happen.
1719 1719 pyexename = sys.platform == 'win32' and 'python.exe' or 'python'
1720 1720 if getattr(os, 'symlink', None):
1721 1721 vlog("# Making python executable in test path a symlink to '%s'" %
1722 1722 sys.executable)
1723 1723 mypython = os.path.join(self._tmpbindir, pyexename)
1724 1724 try:
1725 1725 if os.readlink(mypython) == sys.executable:
1726 1726 return
1727 1727 os.unlink(mypython)
1728 1728 except OSError, err:
1729 1729 if err.errno != errno.ENOENT:
1730 1730 raise
1731 1731 if self._findprogram(pyexename) != sys.executable:
1732 1732 try:
1733 1733 os.symlink(sys.executable, mypython)
1734 1734 self._createdfiles.append(mypython)
1735 1735 except OSError, err:
1736 1736 # child processes may race, which is harmless
1737 1737 if err.errno != errno.EEXIST:
1738 1738 raise
1739 1739 else:
1740 1740 exedir, exename = os.path.split(sys.executable)
1741 1741 vlog("# Modifying search path to find %s as %s in '%s'" %
1742 1742 (exename, pyexename, exedir))
1743 1743 path = os.environ['PATH'].split(os.pathsep)
1744 1744 while exedir in path:
1745 1745 path.remove(exedir)
1746 1746 os.environ['PATH'] = os.pathsep.join([exedir] + path)
1747 1747 if not self._findprogram(pyexename):
1748 1748 print "WARNING: Cannot find %s in search path" % pyexename
1749 1749
1750 1750 def _installhg(self):
1751 1751 """Install hg into the test environment.
1752 1752
1753 1753 This will also configure hg with the appropriate testing settings.
1754 1754 """
1755 1755 vlog("# Performing temporary installation of HG")
1756 1756 installerrs = os.path.join("tests", "install.err")
1757 1757 compiler = ''
1758 1758 if self.options.compiler:
1759 1759 compiler = '--compiler ' + self.options.compiler
1760 1760 pure = self.options.pure and "--pure" or ""
1761 1761 py3 = ''
1762 1762 if sys.version_info[0] == 3:
1763 1763 py3 = '--c2to3'
1764 1764
1765 1765 # Run installer in hg root
1766 1766 script = os.path.realpath(sys.argv[0])
1767 1767 hgroot = os.path.dirname(os.path.dirname(script))
1768 1768 os.chdir(hgroot)
1769 1769 nohome = '--home=""'
1770 1770 if os.name == 'nt':
1771 1771 # The --home="" trick works only on OS where os.sep == '/'
1772 1772 # because of a distutils convert_path() fast-path. Avoid it at
1773 1773 # least on Windows for now, deal with .pydistutils.cfg bugs
1774 1774 # when they happen.
1775 1775 nohome = ''
1776 1776 cmd = ('%(exe)s setup.py %(py3)s %(pure)s clean --all'
1777 1777 ' build %(compiler)s --build-base="%(base)s"'
1778 1778 ' install --force --prefix="%(prefix)s"'
1779 1779 ' --install-lib="%(libdir)s"'
1780 1780 ' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
1781 1781 % {'exe': sys.executable, 'py3': py3, 'pure': pure,
1782 1782 'compiler': compiler,
1783 1783 'base': os.path.join(self._hgtmp, "build"),
1784 1784 'prefix': self._installdir, 'libdir': self._pythondir,
1785 1785 'bindir': self._bindir,
1786 1786 'nohome': nohome, 'logfile': installerrs})
1787 1787 vlog("# Running", cmd)
1788 1788 if os.system(cmd) == 0:
1789 1789 if not self.options.verbose:
1790 1790 os.remove(installerrs)
1791 1791 else:
1792 1792 f = open(installerrs, 'rb')
1793 1793 for line in f:
1794 1794 print line
1795 1795 f.close()
1796 1796 sys.exit(1)
1797 1797 os.chdir(self._testdir)
1798 1798
1799 1799 self._usecorrectpython()
1800 1800
1801 1801 if self.options.py3k_warnings and not self.options.anycoverage:
1802 1802 vlog("# Updating hg command to enable Py3k Warnings switch")
1803 1803 f = open(os.path.join(self._bindir, 'hg'), 'rb')
1804 1804 lines = [line.rstrip() for line in f]
1805 1805 lines[0] += ' -3'
1806 1806 f.close()
1807 1807 f = open(os.path.join(self._bindir, 'hg'), 'wb')
1808 1808 for line in lines:
1809 1809 f.write(line + '\n')
1810 1810 f.close()
1811 1811
1812 1812 hgbat = os.path.join(self._bindir, 'hg.bat')
1813 1813 if os.path.isfile(hgbat):
1814 1814 # hg.bat expects to be put in bin/scripts while run-tests.py
1815 1815 # installation layout put it in bin/ directly. Fix it
1816 1816 f = open(hgbat, 'rb')
1817 1817 data = f.read()
1818 1818 f.close()
1819 1819 if '"%~dp0..\python" "%~dp0hg" %*' in data:
1820 1820 data = data.replace('"%~dp0..\python" "%~dp0hg" %*',
1821 1821 '"%~dp0python" "%~dp0hg" %*')
1822 1822 f = open(hgbat, 'wb')
1823 1823 f.write(data)
1824 1824 f.close()
1825 1825 else:
1826 1826 print 'WARNING: cannot fix hg.bat reference to python.exe'
1827 1827
1828 1828 if self.options.anycoverage:
1829 1829 custom = os.path.join(self._testdir, 'sitecustomize.py')
1830 1830 target = os.path.join(self._pythondir, 'sitecustomize.py')
1831 1831 vlog('# Installing coverage trigger to %s' % target)
1832 1832 shutil.copyfile(custom, target)
1833 1833 rc = os.path.join(self._testdir, '.coveragerc')
1834 1834 vlog('# Installing coverage rc to %s' % rc)
1835 1835 os.environ['COVERAGE_PROCESS_START'] = rc
1836 1836 fn = os.path.join(self._installdir, '..', '.coverage')
1837 1837 os.environ['COVERAGE_FILE'] = fn
1838 1838
1839 1839 def _checkhglib(self, verb):
1840 1840 """Ensure that the 'mercurial' package imported by python is
1841 1841 the one we expect it to be. If not, print a warning to stderr."""
1842 1842 if ((self._bindir == self._pythondir) and
1843 1843 (self._bindir != self._tmpbindir)):
1844 1844 # The pythondir has been infered from --with-hg flag.
1845 1845 # We cannot expect anything sensible here
1846 1846 return
1847 1847 expecthg = os.path.join(self._pythondir, 'mercurial')
1848 1848 actualhg = self._gethgpath()
1849 1849 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
1850 1850 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
1851 1851 ' (expected %s)\n'
1852 1852 % (verb, actualhg, expecthg))
1853 1853 def _gethgpath(self):
1854 1854 """Return the path to the mercurial package that is actually found by
1855 1855 the current Python interpreter."""
1856 1856 if self._hgpath is not None:
1857 1857 return self._hgpath
1858 1858
1859 1859 cmd = '%s -c "import mercurial; print (mercurial.__path__[0])"'
1860 1860 pipe = os.popen(cmd % PYTHON)
1861 1861 try:
1862 1862 self._hgpath = pipe.read().strip()
1863 1863 finally:
1864 1864 pipe.close()
1865 1865
1866 1866 return self._hgpath
1867 1867
1868 1868 def _outputcoverage(self):
1869 1869 """Produce code coverage output."""
1870 1870 vlog('# Producing coverage report')
1871 1871 os.chdir(self._pythondir)
1872 1872
1873 1873 def covrun(*args):
1874 1874 cmd = 'coverage %s' % ' '.join(args)
1875 1875 vlog('# Running: %s' % cmd)
1876 1876 os.system(cmd)
1877 1877
1878 1878 covrun('-c')
1879 1879 omit = ','.join(os.path.join(x, '*') for x in
1880 1880 [self._bindir, self._testdir])
1881 1881 covrun('-i', '-r', '"--omit=%s"' % omit) # report
1882 1882 if self.options.htmlcov:
1883 1883 htmldir = os.path.join(self._testdir, 'htmlcov')
1884 1884 covrun('-i', '-b', '"--directory=%s"' % htmldir,
1885 1885 '"--omit=%s"' % omit)
1886 1886 if self.options.annotate:
1887 1887 adir = os.path.join(self._testdir, 'annotated')
1888 1888 if not os.path.isdir(adir):
1889 1889 os.mkdir(adir)
1890 1890 covrun('-i', '-a', '"--directory=%s"' % adir, '"--omit=%s"' % omit)
1891 1891
1892 1892 def _findprogram(self, program):
1893 1893 """Search PATH for a executable program"""
1894 1894 for p in os.environ.get('PATH', os.defpath).split(os.pathsep):
1895 1895 name = os.path.join(p, program)
1896 1896 if os.name == 'nt' or os.access(name, os.X_OK):
1897 1897 return name
1898 1898 return None
1899 1899
1900 1900 def _checktools(self):
1901 1901 """Ensure tools required to run tests are present."""
1902 1902 for p in self.REQUIREDTOOLS:
1903 1903 if os.name == 'nt' and not p.endswith('.exe'):
1904 1904 p += '.exe'
1905 1905 found = self._findprogram(p)
1906 1906 if found:
1907 1907 vlog("# Found prerequisite", p, "at", found)
1908 1908 else:
1909 1909 print "WARNING: Did not find prerequisite tool: %s " % p
1910 1910
1911 1911 if __name__ == '__main__':
1912 1912 runner = TestRunner()
1913 1913
1914 1914 try:
1915 1915 import msvcrt
1916 1916 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
1917 1917 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
1918 1918 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
1919 1919 except ImportError:
1920 1920 pass
1921 1921
1922 1922 sys.exit(runner.run(sys.argv[1:]))
@@ -1,359 +1,359 b''
1 1 This file tests the behavior of run-tests.py itself.
2 2
3 3 Smoke test
4 4 ============
5 5
6 6 $ $TESTDIR/run-tests.py
7 7
8 8 # Ran 0 tests, 0 skipped, 0 warned, 0 failed.
9 9
10 10 a succesful test
11 11 =======================
12 12
13 13 $ cat > test-success.t << EOF
14 14 > $ echo babar
15 15 > babar
16 16 > $ echo xyzzy
17 17 > xyzzy
18 18 > EOF
19 19
20 20 $ $TESTDIR/run-tests.py --with-hg=`which hg`
21 21 .
22 22 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
23 23
24 24 failing test
25 25 ==================
26 26
27 27 $ cat > test-failure.t << EOF
28 28 > $ echo babar
29 29 > rataxes
30 30 > This is a noop statement so that
31 31 > this test is still more bytes than success.
32 32 > EOF
33 33
34 34 $ $TESTDIR/run-tests.py --with-hg=`which hg`
35 35
36 36 --- $TESTTMP/test-failure.t (glob)
37 37 +++ $TESTTMP/test-failure.t.err (glob)
38 38 @@ -1,4 +1,4 @@
39 39 $ echo babar
40 40 - rataxes
41 41 + babar
42 42 This is a noop statement so that
43 43 this test is still more bytes than success.
44 44
45 45 ERROR: test-failure.t output changed
46 46 !.
47 47 Failed test-failure.t: output changed
48 48 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
49 49 python hash seed: * (glob)
50 50 [1]
51 51 test --xunit support
52 52 $ $TESTDIR/run-tests.py --with-hg=`which hg` --xunit=xunit.xml
53 53
54 54 --- $TESTTMP/test-failure.t
55 55 +++ $TESTTMP/test-failure.t.err
56 56 @@ -1,4 +1,4 @@
57 57 $ echo babar
58 58 - rataxes
59 59 + babar
60 60 This is a noop statement so that
61 61 this test is still more bytes than success.
62 62
63 63 ERROR: test-failure.t output changed
64 64 !.
65 65 Failed test-failure.t: output changed
66 66 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
67 67 python hash seed: * (glob)
68 68 [1]
69 69 $ cat xunit.xml
70 70 <?xml version="1.0" encoding="utf-8"?>
71 71 <testsuite errors="0" failures="1" name="run-tests" skipped="0" tests="2">
72 72 <testcase name="test-success.t" time="*"/> (glob)
73 73 <testcase name="test-failure.t" time="*"> (glob)
74 74 <![CDATA[--- $TESTTMP/test-failure.t
75 75 +++ $TESTTMP/test-failure.t.err
76 76 @@ -1,4 +1,4 @@
77 77 $ echo babar
78 78 - rataxes
79 79 + babar
80 80 This is a noop statement so that
81 81 this test is still more bytes than success.
82 82 ]]> </testcase>
83 83 </testsuite>
84 84
85 85 test for --retest
86 86 ====================
87 87
88 88 $ $TESTDIR/run-tests.py --with-hg=`which hg` --retest
89 89
90 90 --- $TESTTMP/test-failure.t (glob)
91 91 +++ $TESTTMP/test-failure.t.err (glob)
92 92 @@ -1,4 +1,4 @@
93 93 $ echo babar
94 94 - rataxes
95 95 + babar
96 96 This is a noop statement so that
97 97 this test is still more bytes than success.
98 98
99 99 ERROR: test-failure.t output changed
100 100 !
101 101 Failed test-failure.t: output changed
102 102 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
103 103 python hash seed: * (glob)
104 104 [1]
105 105
106 106 Selecting Tests To Run
107 107 ======================
108 108
109 109 successful
110 110
111 111 $ $TESTDIR/run-tests.py --with-hg=`which hg` test-success.t
112 112 .
113 113 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
114 114
115 115 success w/ keyword
116 116 $ $TESTDIR/run-tests.py --with-hg=`which hg` -k xyzzy
117 117 .
118 118 # Ran 2 tests, 1 skipped, 0 warned, 0 failed.
119 119
120 120 failed
121 121
122 122 $ $TESTDIR/run-tests.py --with-hg=`which hg` test-failure.t
123 123
124 124 --- $TESTTMP/test-failure.t (glob)
125 125 +++ $TESTTMP/test-failure.t.err (glob)
126 126 @@ -1,4 +1,4 @@
127 127 $ echo babar
128 128 - rataxes
129 129 + babar
130 130 This is a noop statement so that
131 131 this test is still more bytes than success.
132 132
133 133 ERROR: test-failure.t output changed
134 134 !
135 135 Failed test-failure.t: output changed
136 136 # Ran 1 tests, 0 skipped, 0 warned, 1 failed.
137 137 python hash seed: * (glob)
138 138 [1]
139 139
140 140 failure w/ keyword
141 141 $ $TESTDIR/run-tests.py --with-hg=`which hg` -k rataxes
142 142
143 143 --- $TESTTMP/test-failure.t
144 144 +++ $TESTTMP/test-failure.t.err
145 145 @@ -1,4 +1,4 @@
146 146 $ echo babar
147 147 - rataxes
148 148 + babar
149 149 This is a noop statement so that
150 150 this test is still more bytes than success.
151 151
152 152 ERROR: test-failure.t output changed
153 153 !
154 154 Failed test-failure.t: output changed
155 155 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
156 156 python hash seed: * (glob)
157 157 [1]
158 158
159 159 Running In Debug Mode
160 160 ======================
161 161
162 162 $ $TESTDIR/run-tests.py --with-hg=`which hg` --debug 2>&1 | grep -v pwd
163 163 + echo SALT* 0 0 (glob)
164 164 SALT* 0 0 (glob)
165 165 + echo babar
166 166 babar
167 167 + echo SALT* 4 0 (glob)
168 168 SALT* 4 0 (glob)
169 169 .+ echo SALT* 0 0 (glob)
170 170 SALT* 0 0 (glob)
171 171 + echo babar
172 172 babar
173 173 + echo SALT* 2 0 (glob)
174 174 SALT* 2 0 (glob)
175 175 + echo xyzzy
176 176 xyzzy
177 177 + echo SALT* 4 0 (glob)
178 178 SALT* 4 0 (glob)
179 179 .
180 180 # Ran 2 tests, 0 skipped, 0 warned, 0 failed.
181 181
182 182 Parallel runs
183 183 ==============
184 184
185 185 (duplicate the failing test to get predictable output)
186 186 $ cp test-failure.t test-failure-copy.t
187 187
188 188 $ $TESTDIR/run-tests.py --with-hg=`which hg` --jobs 2 test-failure*.t
189 189
190 190 --- $TESTTMP/test-failure*.t (glob)
191 191 +++ $TESTTMP/test-failure*.t.err (glob)
192 192 @@ -1,4 +1,4 @@
193 193 $ echo babar
194 194 - rataxes
195 195 + babar
196 196 This is a noop statement so that
197 197 this test is still more bytes than success.
198 198
199 199 ERROR: test-failure*.t output changed (glob)
200 200 !
201 201 --- $TESTTMP/test-failure*.t (glob)
202 202 +++ $TESTTMP/test-failure*.t.err (glob)
203 203 @@ -1,4 +1,4 @@
204 204 $ echo babar
205 205 - rataxes
206 206 + babar
207 207 This is a noop statement so that
208 208 this test is still more bytes than success.
209 209
210 210 ERROR: test-failure*.t output changed (glob)
211 211 !
212 212 Failed test-failure*.t: output changed (glob)
213 213 Failed test-failure*.t: output changed (glob)
214 214 # Ran 2 tests, 0 skipped, 0 warned, 2 failed.
215 215 python hash seed: * (glob)
216 216 [1]
217 217
218 218 (delete the duplicated test file)
219 219 $ rm test-failure-copy.t
220 220
221 221
222 222 Interactive run
223 223 ===============
224 224
225 225 (backup the failing test)
226 226 $ cp test-failure.t backup
227 227
228 228 Refuse the fix
229 229
230 230 $ echo 'n' | $TESTDIR/run-tests.py --with-hg=`which hg` -i
231 231
232 232 --- $TESTTMP/test-failure.t
233 233 +++ $TESTTMP/test-failure.t.err
234 234 @@ -1,4 +1,4 @@
235 235 $ echo babar
236 236 - rataxes
237 237 + babar
238 238 This is a noop statement so that
239 239 this test is still more bytes than success.
240 240 Accept this change? [n]
241 241 ERROR: test-failure.t output changed
242 242 !.
243 243 Failed test-failure.t: output changed
244 244 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
245 245 python hash seed: * (glob)
246 246 [1]
247 247
248 248 $ cat test-failure.t
249 249 $ echo babar
250 250 rataxes
251 251 This is a noop statement so that
252 252 this test is still more bytes than success.
253 253
254 254 View the fix
255 255
256 256 $ echo 'y' | $TESTDIR/run-tests.py --with-hg=`which hg` --view echo
257 257 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
258 258
259 259 ERROR: test-failure.t output changed
260 260 !.
261 261 Failed test-failure.t: output changed
262 262 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
263 263 python hash seed: * (glob)
264 264 [1]
265 265
266 266 Accept the fix
267 267
268 268 $ echo 'y' | $TESTDIR/run-tests.py --with-hg=`which hg` -i
269 269
270 270 --- $TESTTMP/test-failure.t
271 271 +++ $TESTTMP/test-failure.t.err
272 272 @@ -1,4 +1,4 @@
273 273 $ echo babar
274 274 - rataxes
275 275 + babar
276 276 This is a noop statement so that
277 277 this test is still more bytes than success.
278 278 Accept this change? [n] ..
279 279 # Ran 2 tests, 0 skipped, 0 warned, 0 failed.
280 280
281 281 $ cat test-failure.t
282 282 $ echo babar
283 283 babar
284 284 This is a noop statement so that
285 285 this test is still more bytes than success.
286 286
287 287 (reinstall)
288 288 $ mv backup test-failure.t
289 289
290 290 No Diff
291 291 ===============
292 292
293 293 $ $TESTDIR/run-tests.py --with-hg=`which hg` --nodiff
294 294 !.
295 295 Failed test-failure.t: output changed
296 296 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
297 297 python hash seed: * (glob)
298 298 [1]
299 299
300 300 test for --time
301 301 ==================
302 302
303 303 $ $TESTDIR/run-tests.py --with-hg=`which hg` test-success.t --time
304 304 .
305 305 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
306 306 # Producing time report
307 307 cuser csys real Test
308 308 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
309 309
310 310 test for --time with --job enabled
311 311 ====================================
312 312
313 313 $ $TESTDIR/run-tests.py --with-hg=`which hg` test-success.t --time --jobs 2
314 314 .
315 315 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
316 316 # Producing time report
317 317 cuser csys real Test
318 318 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
319 319
320 320 Skips
321 321 ================
322 322 $ cat > test-skip.t <<EOF
323 323 > $ echo xyzzy
324 324 > #require false
325 325 > EOF
326 326 $ $TESTDIR/run-tests.py --with-hg=`which hg` --nodiff
327 327 !.s
328 Skipped test-skip.t: irrelevant
328 Skipped test-skip.t: skipped
329 329 Failed test-failure.t: output changed
330 330 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
331 331 python hash seed: * (glob)
332 332 [1]
333 333
334 334 $ $TESTDIR/run-tests.py --with-hg=`which hg` --keyword xyzzy
335 335 .s
336 Skipped test-skip.t: irrelevant
336 Skipped test-skip.t: skipped
337 337 # Ran 2 tests, 2 skipped, 0 warned, 0 failed.
338 338
339 339 Skips with xml
340 340 $ $TESTDIR/run-tests.py --with-hg=`which hg` --keyword xyzzy \
341 341 > --xunit=xunit.xml
342 342 .s
343 Skipped test-skip.t: irrelevant
343 Skipped test-skip.t: skipped
344 344 # Ran 2 tests, 2 skipped, 0 warned, 0 failed.
345 345 $ cat xunit.xml
346 346 <?xml version="1.0" encoding="utf-8"?>
347 347 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
348 348 <testcase name="test-success.t" time="*"/> (glob)
349 349 </testsuite>
350 350
351 351 Missing skips or blacklisted skips don't count as executed:
352 352 $ echo test-failure.t > blacklist
353 353 $ $TESTDIR/run-tests.py --with-hg=`which hg` --blacklist=blacklist \
354 354 > test-failure.t test-bogus.t
355 355 ss
356 356 Skipped test-bogus.t: Doesn't exist
357 357 Skipped test-failure.t: blacklisted
358 358 # Ran 0 tests, 2 skipped, 0 warned, 0 failed.
359 359
General Comments 0
You need to be logged in to leave comments. Login now