diff --git a/tests/basic_test_result.py b/tests/basic_test_result.py new file mode 100644 --- /dev/null +++ b/tests/basic_test_result.py @@ -0,0 +1,46 @@ +from __future__ import print_function + +import unittest + +class TestResult(unittest._TextTestResult): + + def __init__(self, options, *args, **kwargs): + super(TestResult, self).__init__(*args, **kwargs) + self._options = options + + # unittest.TestResult didn't have skipped until 2.7. We need to + # polyfill it. + self.skipped = [] + + # We have a custom "ignored" result that isn't present in any Python + # unittest implementation. It is very similar to skipped. It may make + # sense to map it into skip some day. + self.ignored = [] + + self.times = [] + self._firststarttime = None + # Data stored for the benefit of generating xunit reports. + self.successes = [] + self.faildata = {} + + def addFailure(self, test, reason): + print("FAILURE!", test, reason) + + def addSuccess(self, test): + print("SUCCESS!", test) + + def addError(self, test, err): + print("ERR!", test, err) + + # Polyfill. + def addSkip(self, test, reason): + print("SKIP!", test, reason) + + def addIgnore(self, test, reason): + print("IGNORE!", test, reason) + + def addOutputMismatch(self, test, ret, got, expected): + return False + + def stopTest(self, test, interrupted=False): + super(TestResult, self).stopTest(test) diff --git a/tests/run-tests.py b/tests/run-tests.py --- a/tests/run-tests.py +++ b/tests/run-tests.py @@ -1851,6 +1851,16 @@ class TestResult(unittest._TextTestResul self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % ( test.name, self.times[-1][3])) +def getTestResult(): + """ + Returns the relevant test result + """ + if "CUSTOM_TEST_RESULT" in os.environ: + testresultmodule = __import__(os.environ["CUSTOM_TEST_RESULT"]) + return testresultmodule.TestResult + else: + return TestResult + class TestSuite(unittest.TestSuite): """Custom unittest TestSuite that knows how to execute Mercurial tests.""" @@ -2090,8 +2100,8 @@ class TextTestRunner(unittest.TextTestRu self._runner = runner def listtests(self, test): - result = TestResult(self._runner.options, self.stream, - self.descriptions, 0) + result = getTestResult()(self._runner.options, self.stream, + self.descriptions, 0) test = sorted(test, key=lambda t: t.name) for t in test: print(t.name) @@ -2109,9 +2119,8 @@ class TextTestRunner(unittest.TextTestRu return result def run(self, test): - result = TestResult(self._runner.options, self.stream, - self.descriptions, self.verbosity) - + result = getTestResult()(self._runner.options, self.stream, + self.descriptions, self.verbosity) test(result) failed = len(result.failures) diff --git a/tests/test-run-tests.t b/tests/test-run-tests.t --- a/tests/test-run-tests.t +++ b/tests/test-run-tests.t @@ -1246,6 +1246,15 @@ Test globbing of local IP addresses $ echo dead:beef::1 $LOCALIP (glob) +Add support for external test formatter +======================================= + + $ CUSTOM_TEST_RESULT=basic_test_result $PYTHON $TESTDIR/run-tests.py --with-hg=`which hg` "$@" test-success.t test-failure.t + + # Ran 2 tests, 0 skipped, 0 failed. + FAILURE! test-failure.t output changed + SUCCESS! test-success.t + Test reusability for third party tools ======================================