# HG changeset patch # User anuraggoel # Date 2014-09-08 14:41:00 # Node ID c42e69268f5b3a1c5ba97be212dd2eda621a15b3 # Parent e2806b8613cae110276280c0151a6775949fd7d6 run-tests: added '--json' functionality to store test result in json file This patch added a new functionality '--json'. While testing, if '--json' is enabled then test result data gets stored in newly created "report.json" file in the following format. testreport ={ "test-success.t": { "result": "success", "time": "2.041" } "test-failure.t": { "result": "failure", "time": "4.430" } "test-skip.t": { "result": "skip" "time": "3.754" } } Otherwise, if '--json' is enabled but json module was not installed then it will raise an error "json module not installed". This "report.json" file will further accessed by html/javascript file for graph usage. diff --git a/tests/run-tests.py b/tests/run-tests.py --- a/tests/run-tests.py +++ b/tests/run-tests.py @@ -60,6 +60,14 @@ import Queue as queue from xml.dom import minidom import unittest +try: + if sys.version_info < (2, 7): + import simplejson as json + else: + import json +except ImportError: + json = None + processlock = threading.Lock() # subprocess._cleanup can race with any Popen.wait or Popen.poll on py24 @@ -186,6 +194,8 @@ def getparser(): " (default: $%s or %d)" % defaults['timeout']) parser.add_option("--time", action="store_true", help="time how long each test takes") + parser.add_option("--json", action="store_true", + help="store test result data in 'report.json' file") parser.add_option("--tmpdir", type="string", help="run tests in the given temporary directory" " (implies --keep-tmpdir)") @@ -1419,6 +1429,37 @@ class TextTestRunner(unittest.TextTestRu finally: xuf.close() + if self._runner.options.json: + if json is None: + raise ImportError("json module not installed") + jsonpath = os.path.join(self._runner._testdir, 'report.json') + fp = open(jsonpath, 'w') + try: + timesd = {} + for test, cuser, csys, real in result.times: + timesd[test] = real + + outcome = {} + for tc in result.successes: + testresult = {'result': 'success', + 'time': ('%0.3f' % timesd[tc.name])} + outcome[tc.name] = testresult + + for tc, err in sorted(result.faildata.iteritems()): + testresult = {'result': 'failure', + 'time': ('%0.3f' % timesd[tc])} + outcome[tc] = testresult + + for tc, reason in result.skipped: + testresult = {'result': 'skip', + 'time': ('%0.3f' % timesd[tc.name])} + outcome[tc.name] = testresult + + jsonout = json.dumps(outcome, sort_keys=True, indent=4) + fp.writelines(("testreport =", jsonout)) + finally: + fp.close() + self._runner._checkhglib('Tested') self.stream.writeln('# Ran %d tests, %d skipped, %d warned, %d failed.' diff --git a/tests/test-run-tests.t b/tests/test-run-tests.t --- a/tests/test-run-tests.t +++ b/tests/test-run-tests.t @@ -369,3 +369,40 @@ Missing skips or blacklisted skips don't Skipped test-failure.t: blacklisted # Ran 0 tests, 2 skipped, 0 warned, 0 failed. +test for --json +================== + + $ $TESTDIR/run-tests.py --with-hg=`which hg` --json + + --- $TESTTMP/test-failure.t + +++ $TESTTMP/test-failure.t.err + @@ -1,4 +1,4 @@ + $ echo babar + - rataxes + + babar + This is a noop statement so that + this test is still more bytes than success. + + ERROR: test-failure.t output changed + !.s + Skipped test-skip.t: skipped + Failed test-failure.t: output changed + # Ran 2 tests, 1 skipped, 0 warned, 1 failed. + python hash seed: * (glob) + [1] + + $ cat report.json + testreport ={ + "test-failure.t": [\{] (re) + "result": "failure", + "time": "\s*[\d\.]{5}" (re) + }, + "test-skip.t": { + "result": "skip", + "time": "\s*[\d\.]{5}" (re) + }, + "test-success.t": [\{] (re) + "result": "success", + "time": "\s*[\d\.]{5}" (re) + } + } (no-eol)