diff options
author | dtu@chromium.org <dtu@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2014-07-17 22:28:12 +0000 |
---|---|---|
committer | dtu@chromium.org <dtu@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2014-07-17 22:28:12 +0000 |
commit | 2dfa4bfe9584a7d01c8b9223577ccdf99203d16a (patch) | |
tree | 19adda911bfd06e892b1a7ba1d914b81bb8496e6 /tools | |
parent | 9401b48e48ad82e9018dd3fb0ca70a2e9e1cd42e (diff) | |
download | chromium_src-2dfa4bfe9584a7d01c8b9223577ccdf99203d16a.zip chromium_src-2dfa4bfe9584a7d01c8b9223577ccdf99203d16a.tar.gz chromium_src-2dfa4bfe9584a7d01c8b9223577ccdf99203d16a.tar.bz2 |
[telemetry] OutputFormatter for unit tests.
BUG=346956
TEST=tools/telemetry/run_tests && tools/perf/run_tests && content/test/gpu/run_unittests
R=kbr@chromium.org, nednguyen@google.com, tonyg@chromium.org
Review URL: https://codereview.chromium.org/382433002
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@283907 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'tools')
-rw-r--r-- | tools/perf/benchmarks/benchmark_unittest.py | 4 | ||||
-rwxr-xr-x | tools/perf/run_tests | 7 | ||||
-rwxr-xr-x | tools/telemetry/run_tests | 5 | ||||
-rw-r--r-- | tools/telemetry/telemetry/unittest/gtest_output_formatter.py | 87 | ||||
-rw-r--r-- | tools/telemetry/telemetry/unittest/gtest_output_formatter_unittest.py | 125 | ||||
-rw-r--r-- | tools/telemetry/telemetry/unittest/gtest_unittest_results.py | 129 | ||||
-rw-r--r-- | tools/telemetry/telemetry/unittest/gtest_unittest_results_unittest.py | 112 | ||||
-rw-r--r-- | tools/telemetry/telemetry/unittest/options_for_unittests.py | 19 | ||||
-rw-r--r-- | tools/telemetry/telemetry/unittest/output_formatter.py | 130 | ||||
-rw-r--r-- | tools/telemetry/telemetry/unittest/output_formatter_unittest.py | 54 | ||||
-rw-r--r-- | tools/telemetry/telemetry/unittest/run_tests.py | 30 |
11 files changed, 436 insertions, 266 deletions
diff --git a/tools/perf/benchmarks/benchmark_unittest.py b/tools/perf/benchmarks/benchmark_unittest.py index 196c15b..5c8707e 100644 --- a/tools/perf/benchmarks/benchmark_unittest.py +++ b/tools/perf/benchmarks/benchmark_unittest.py @@ -14,8 +14,8 @@ import unittest from telemetry import benchmark as benchmark_module from telemetry.core import discover from telemetry.page import page_measurement -from telemetry.unittest import gtest_unittest_results from telemetry.unittest import options_for_unittests +from telemetry.unittest import output_formatter def SmokeTestGenerator(benchmark): @@ -55,7 +55,7 @@ def SmokeTestGenerator(benchmark): def load_tests(_, _2, _3): - suite = gtest_unittest_results.GTestTestSuite() + suite = output_formatter.TestSuite() benchmarks_dir = os.path.dirname(__file__) top_level_dir = os.path.dirname(benchmarks_dir) diff --git a/tools/perf/run_tests b/tools/perf/run_tests index ed1429c..e5dbf09 100755 --- a/tools/perf/run_tests +++ b/tools/perf/run_tests @@ -5,7 +5,7 @@ """This script runs unit tests of the code in the perf directory. -This script DOES NOT run benchmarks. run_benchmark and run_measurement do that. +This script DOES NOT run benchmarks. run_benchmark does that. """ import os @@ -13,10 +13,13 @@ import sys sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, 'telemetry')) +from telemetry.unittest import gtest_output_formatter from telemetry.unittest import run_tests if __name__ == '__main__': base_dir = os.path.dirname(os.path.realpath(__file__)) - run_tests.environment = run_tests.Environment(base_dir, [base_dir]) + output_formatters = [ + gtest_output_formatter.GTestOutputFormatter(sys.stdout)] + run_tests.config = run_tests.Config(base_dir, [base_dir], output_formatters) sys.exit(run_tests.RunTestsCommand.main()) diff --git a/tools/telemetry/run_tests b/tools/telemetry/run_tests index 3fd9702..7a217f2 100755 --- a/tools/telemetry/run_tests +++ b/tools/telemetry/run_tests @@ -6,10 +6,13 @@ import os import sys +from telemetry.unittest import gtest_output_formatter from telemetry.unittest import run_tests if __name__ == '__main__': base_dir = os.path.dirname(os.path.realpath(__file__)) - run_tests.environment = run_tests.Environment(base_dir, [base_dir]) + output_formatters = [ + gtest_output_formatter.GTestOutputFormatter(sys.stdout)] + run_tests.config = run_tests.Config(base_dir, [base_dir], output_formatters) sys.exit(run_tests.RunTestsCommand.main()) diff --git a/tools/telemetry/telemetry/unittest/gtest_output_formatter.py b/tools/telemetry/telemetry/unittest/gtest_output_formatter.py new file mode 100644 index 0000000..cdf1a52 --- /dev/null +++ b/tools/telemetry/telemetry/unittest/gtest_output_formatter.py @@ -0,0 +1,87 @@ +# Copyright 2014 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import logging +import time +import unittest + +from telemetry.unittest import output_formatter +from telemetry.util import exception_formatter + + +def _FormatTestName(test): + chunks = test.id().split('.')[2:] + return '.'.join(chunks) + + +class GTestOutputFormatter(output_formatter.OutputFormatter): + def __init__(self, output_stream): + super(GTestOutputFormatter, self).__init__(output_stream) + self._suite_start_time = None + self._test_start_time = None + + def _Print(self, *args): + print >> self._output_stream, ' '.join(map(str, args)) + self._output_stream.flush() + + def _TestTimeMs(self): + return (time.time() - self._test_start_time) * 1000 + + def StartTest(self, test): + self._Print('[ RUN ]', _FormatTestName(test)) + self._test_start_time = time.time() + + def StartTestSuite(self, suite): + contains_test_suites = any(isinstance(test, unittest.TestSuite) + for test in suite) + if not contains_test_suites: + test_count = len([test for test in suite]) + unit = 'test' if test_count == 1 else 'tests' + self._Print('[----------]', test_count, unit) + self._suite_start_time = time.time() + + def StopTestSuite(self, suite): + contains_test_suites = any(isinstance(test, unittest.TestSuite) + for test in suite) + if not contains_test_suites: + test_count = len([test for test in suite]) + unit = 'test' if test_count == 1 else 'tests' + elapsed_ms = (time.time() - self._suite_start_time) * 1000 + self._Print('[----------]', test_count, unit, + '(%d ms total)' % elapsed_ms) + self._Print() + + def StopTestRun(self, result): + unit = 'test' if len(result.successes) == 1 else 'tests' + self._Print('[ PASSED ]', len(result.successes), '%s.' % unit) + if result.errors or result.failures: + all_errors = result.errors[:] + all_errors.extend(result.failures) + unit = 'test' if len(all_errors) == 1 else 'tests' + self._Print('[ FAILED ]', len(all_errors), '%s, listed below:' % unit) + for test, _ in all_errors: + self._Print('[ FAILED ] ', _FormatTestName(test)) + if not result.wasSuccessful(): + self._Print() + count = len(result.errors) + len(result.failures) + unit = 'TEST' if count == 1 else 'TESTS' + self._Print(count, 'FAILED', unit) + self._Print() + + def Error(self, test, err): + self.Failure(test, err) + + def Failure(self, test, err): + exception_formatter.PrintFormattedException(*err) + test_name = _FormatTestName(test) + self._Print('[ FAILED ]', test_name, '(%0.f ms)' % self._TestTimeMs()) + + def Success(self, test): + test_name = _FormatTestName(test) + self._Print('[ OK ]', test_name, '(%0.f ms)' % self._TestTimeMs()) + + def Skip(self, test, reason): + test_name = _FormatTestName(test) + logging.warning('===== SKIPPING TEST %s: %s =====', test_name, reason) + self.Success(test) diff --git a/tools/telemetry/telemetry/unittest/gtest_output_formatter_unittest.py b/tools/telemetry/telemetry/unittest/gtest_output_formatter_unittest.py new file mode 100644 index 0000000..0eff246 --- /dev/null +++ b/tools/telemetry/telemetry/unittest/gtest_output_formatter_unittest.py @@ -0,0 +1,125 @@ +# Copyright 2014 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import unittest +import sys + +from telemetry.unittest import gtest_output_formatter +from telemetry.unittest import simple_mock + + +class DummyException(Exception): + pass +try: + raise DummyException('Dummy exception') +except DummyException: + DUMMY_EXCEPTION = sys.exc_info() + + +class TestFoo(unittest.TestCase): + # Test method doesn't have test- prefix intentionally. This is so that + # run_test script won't run this test. + def runTezt(self): + pass + + +class TestOutputStream(object): + def __init__(self): + self._output_data = [] + + @property + def output_data(self): + return ''.join(self._output_data) + + def write(self, data): + self._output_data.append(data) + + def flush(self): + pass + + +class TestResultWithSuccesses(unittest.TestResult): + def __init__(self): + super(TestResultWithSuccesses, self).__init__() + self.successes = [] + + def addSuccess(self, test): + super(TestResultWithSuccesses, self).addSuccess(test) + self.successes.append(test) + + +class GTestOutputFormatterTest(unittest.TestCase): + def setUp(self): + super(GTestOutputFormatterTest, self).setUp() + self._stream = TestOutputStream() + self._formatter = gtest_output_formatter.GTestOutputFormatter(self._stream) + + self._mock_timer = simple_mock.MockTimer() + self._real_time_time = gtest_output_formatter.time.time + gtest_output_formatter.time.time = self._mock_timer.GetTime + + def tearDown(self): + gtest_output_formatter.time.time = self._real_time_time + + def testTestSuiteWithWrapperSuite(self): + suite = unittest.TestSuite() + suite.addTest(unittest.TestSuite()) + self._formatter.StartTestSuite(suite) + self._formatter.StopTestSuite(suite) + + self.assertEqual(self._stream.output_data, '') + + def testTestSuiteWithTestCase(self): + suite = unittest.TestSuite() + suite.addTest(TestFoo(methodName='runTezt')) + self._formatter.StartTestSuite(suite) + self._mock_timer.SetTime(0.042) + self._formatter.StopTestSuite(suite) + + expected = ('[----------] 1 test\n' + '[----------] 1 test (42 ms total)\n\n') + self.assertEqual(self._stream.output_data, expected) + + def testCaseFailure(self): + test = TestFoo(methodName='runTezt') + self._formatter.StartTest(test) + self._mock_timer.SetTime(0.042) + self._formatter.Failure(test, DUMMY_EXCEPTION) + + expected = ('[ RUN ] gtest_output_formatter_unittest.TestFoo.runTezt\n' + '[ FAILED ] gtest_output_formatter_unittest.TestFoo.runTezt ' + '(42 ms)\n') + self.assertEqual(self._stream.output_data, expected) + + def testCaseSuccess(self): + test = TestFoo(methodName='runTezt') + self._formatter.StartTest(test) + self._mock_timer.SetTime(0.042) + self._formatter.Success(test) + + expected = ('[ RUN ] gtest_output_formatter_unittest.TestFoo.runTezt\n' + '[ OK ] gtest_output_formatter_unittest.TestFoo.runTezt ' + '(42 ms)\n') + self.assertEqual(self._stream.output_data, expected) + + def testStopTestRun(self): + result = TestResultWithSuccesses() + self._formatter.StopTestRun(result) + + expected = '[ PASSED ] 0 tests.\n\n' + self.assertEqual(self._stream.output_data, expected) + + def testStopTestRunWithFailureAndSuccess(self): + test = TestFoo(methodName='runTezt') + result = TestResultWithSuccesses() + result.addSuccess(test) + result.addFailure(test, DUMMY_EXCEPTION) + self._formatter.StopTestRun(result) + + expected = ( + '[ PASSED ] 1 test.\n' + '[ FAILED ] 1 test, listed below:\n' + '[ FAILED ] gtest_output_formatter_unittest.TestFoo.runTezt\n\n' + '1 FAILED TEST\n\n') + self.assertEqual(self._stream.output_data, expected) diff --git a/tools/telemetry/telemetry/unittest/gtest_unittest_results.py b/tools/telemetry/telemetry/unittest/gtest_unittest_results.py deleted file mode 100644 index 6f53eee..0000000 --- a/tools/telemetry/telemetry/unittest/gtest_unittest_results.py +++ /dev/null @@ -1,129 +0,0 @@ -# Copyright 2014 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -import logging -import sys -import time -import unittest - -from telemetry.core import util -from telemetry.unittest import options_for_unittests - - -class GTestTestSuite(unittest.TestSuite): - def run(self, result): # pylint: disable=W0221 - result.StartTestSuite(self) - result = super(GTestTestSuite, self).run(result) - result.StopTestSuite(self) - return result - - -class GTestTestRunner(object): - def run(self, test, repeat_count, args): - util.AddDirToPythonPath(util.GetUnittestDataDir()) - result = GTestUnittestResults(sys.stdout) - try: - options_for_unittests.Set(args) - for _ in xrange(repeat_count): - test(result) - finally: - options_for_unittests.Set(None) - - result.PrintSummary() - return result - - -def _FormatTestName(test): - chunks = test.id().split('.')[2:] - return '.'.join(chunks) - - -class GTestUnittestResults(unittest.TestResult): - def __init__(self, output_stream): - super(GTestUnittestResults, self).__init__() - self._output_stream = output_stream - self._test_start_time = None - self._test_suite_start_time = None - self.successes = [] - - @property - def failures_and_errors(self): - return self.failures + self.errors - - def _GetMs(self): - return (time.time() - self._test_start_time) * 1000 - - def _EmitFailure(self, test, err): - print >> self._output_stream, self._exc_info_to_string(err, test) - print >> self._output_stream, '[ FAILED ]', _FormatTestName(test), ( - '(%0.f ms)' % self._GetMs()) - sys.stdout.flush() - - def addError(self, test, err): - super(GTestUnittestResults, self).addError(test, err) - self._EmitFailure(test, err) - - def addFailure(self, test, err): - super(GTestUnittestResults, self).addFailure(test, err) - self._EmitFailure(test, err) - - def startTest(self, test): - super(GTestUnittestResults, self).startTest(test) - print >> self._output_stream, '[ RUN ]', _FormatTestName(test) - sys.stdout.flush() - self._test_start_time = time.time() - - def addSuccess(self, test): - super(GTestUnittestResults, self).addSuccess(test) - self.successes.append(test) - print >> self._output_stream, '[ OK ]', _FormatTestName(test), ( - '(%0.f ms)' % self._GetMs()) - sys.stdout.flush() - - def addSkip(self, test, reason): - super(GTestUnittestResults, self).addSkip(test, reason) - logging.warning('===== SKIPPING TEST %s: %s =====', - _FormatTestName(test), reason) - if self._test_start_time == None: - self._test_start_time = time.time() - print >> self._output_stream, '[ OK ]', _FormatTestName(test), ( - '(%0.f ms)' % self._GetMs()) - sys.stdout.flush() - - def StartTestSuite(self, suite): - contains_test_suites = any(isinstance(test, unittest.TestSuite) - for test in suite) - if not contains_test_suites: - test_count = len([test for test in suite]) - unit = 'test' if test_count == 1 else 'tests' - print '[----------]', test_count, unit - self._test_suite_start_time = time.time() - - def StopTestSuite(self, suite): - contains_test_suites = any(isinstance(test, unittest.TestSuite) - for test in suite) - if not contains_test_suites: - elapsed_ms = (time.time() - self._test_suite_start_time) * 1000 - test_count = len([test for test in suite]) - unit = 'test' if test_count == 1 else 'tests' - print '[----------]', test_count, unit, '(%d ms total)' % elapsed_ms - print - - def PrintSummary(self): - unit = 'test' if len(self.successes) == 1 else 'tests' - print >> self._output_stream, '[ PASSED ]', ( - '%d %s.' % (len(self.successes), unit)) - if not self.wasSuccessful(): - failure_and_error_count = len(self.failures_and_errors) - unit = 'test' if failure_and_error_count == 1 else 'tests' - print >> self._output_stream, '[ FAILED ]', ( - '%d %s, listed below:' % (failure_and_error_count, unit)) - for test, _ in self.failures_and_errors: - print >> self._output_stream, '[ FAILED ] ', _FormatTestName(test) - print >> self._output_stream - - unit = 'TEST' if failure_and_error_count == 1 else 'TESTS' - print >> self._output_stream, failure_and_error_count, 'FAILED', unit - print >> self._output_stream - sys.stdout.flush() diff --git a/tools/telemetry/telemetry/unittest/gtest_unittest_results_unittest.py b/tools/telemetry/telemetry/unittest/gtest_unittest_results_unittest.py deleted file mode 100644 index 239378e..0000000 --- a/tools/telemetry/telemetry/unittest/gtest_unittest_results_unittest.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright 2014 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. -import unittest - -from telemetry.unittest import gtest_unittest_results -from telemetry.unittest import simple_mock - - -class TestFoo(unittest.TestCase): - - def __init__(self, methodName, mock_timer): - super(TestFoo, self).__init__(methodName) - self._mock_timer = mock_timer - - # Test method doesn't have test- prefix intentionally. This is so that - # run_test script won't run this test. - def runTezt(self): - self._mock_timer.SetTime(0.007) - self.assertTrue(True) - - -class TestBar(unittest.TestCase): - - def __init__(self, methodName, mock_timer): - super(TestBar, self).__init__(methodName) - self._mock_timer = mock_timer - - # Test method doesn't have test- prefix intentionally. This is so that - # run_test script won't run this test. - def runTezt(self): - self._mock_timer.SetTime(0.010) - self.assertTrue(False) - - -class TestOutputStream(object): - - def __init__(self): - self.output_data = [] - - def write(self, data): - self.output_data.append(data) - - -class SummaryGtestUnittestResults( - gtest_unittest_results.GTestUnittestResults): - - def __init__(self): - super(SummaryGtestUnittestResults, self).__init__(TestOutputStream()) - - @property - def output(self): - return ''.join(self._output_stream.output_data) - - -class GTestUnittestResultsTest(unittest.TestCase): - - def setUp(self): - super(GTestUnittestResultsTest, self).setUp() - self._mock_timer = simple_mock.MockTimer() - self._real_gtest_time_time = gtest_unittest_results.time.time - gtest_unittest_results.time.time = self._mock_timer.GetTime - - def testResultsOfSinglePassTest(self): - test = TestFoo(methodName='runTezt', mock_timer=self._mock_timer) - results = SummaryGtestUnittestResults() - test(results) - - results.PrintSummary() - expected = ( - '[ RUN ] gtest_unittest_results_unittest.TestFoo.runTezt\n' - '[ OK ] gtest_unittest_results_unittest.TestFoo.runTezt (7 ms)\n' - '[ PASSED ] 1 test.\n\n') - self.assertEquals(expected, results.output) - - def testResultsOfSingleFailTest(self): - test = TestBar(methodName='runTezt', mock_timer=self._mock_timer) - results = SummaryGtestUnittestResults() - test(results) - - results.PrintSummary() - # Ignore trace info in the middle of results.output. - self.assertTrue(results.output.startswith( - '[ RUN ] gtest_unittest_results_unittest.TestBar.runTezt\n')) - self.assertTrue(results.output.endswith( - '[ FAILED ] gtest_unittest_results_unittest.TestBar.runTezt (10 ms)\n' - '[ PASSED ] 0 tests.\n' - '[ FAILED ] 1 test, listed below:\n' - '[ FAILED ] gtest_unittest_results_unittest.TestBar.runTezt\n\n' - '1 FAILED TEST\n\n')) - - def testResultsOfMixedFailAndPassTestSuite(self): - test = unittest.TestSuite() - test.addTest(TestFoo(methodName='runTezt', mock_timer=self._mock_timer)) - test.addTest(TestBar(methodName='runTezt', mock_timer=self._mock_timer)) - results = SummaryGtestUnittestResults() - test(results) - results.PrintSummary() - # Ignore trace info in the middle of results.output. - self.assertTrue(results.output.startswith( - '[ RUN ] gtest_unittest_results_unittest.TestFoo.runTezt\n' - '[ OK ] gtest_unittest_results_unittest.TestFoo.runTezt (7 ms)\n' - '[ RUN ] gtest_unittest_results_unittest.TestBar.runTezt\n')) - self.assertTrue(results.output.endswith( - '[ FAILED ] gtest_unittest_results_unittest.TestBar.runTezt (3 ms)\n' - '[ PASSED ] 1 test.\n' - '[ FAILED ] 1 test, listed below:\n' - '[ FAILED ] gtest_unittest_results_unittest.TestBar.runTezt\n\n' - '1 FAILED TEST\n\n')) - - def tearDown(self): - gtest_unittest_results.time.time = self._real_gtest_time_time diff --git a/tools/telemetry/telemetry/unittest/options_for_unittests.py b/tools/telemetry/telemetry/unittest/options_for_unittests.py index 331e250..1c47fbf 100644 --- a/tools/telemetry/telemetry/unittest/options_for_unittests.py +++ b/tools/telemetry/telemetry/unittest/options_for_unittests.py @@ -11,23 +11,22 @@ This allows multiple unit tests to use a specific browser, in face of multiple options.""" -_options = None +_options = [] -def Set(options): - global _options +def Push(options): + _options.append(options) - _options = options + +def Pop(): + return _options.pop() def GetCopy(): - if not _options: + if not AreSet(): return None - - return _options.Copy() + return _options[-1].Copy() def AreSet(): - if _options: - return True - return False + return bool(_options) diff --git a/tools/telemetry/telemetry/unittest/output_formatter.py b/tools/telemetry/telemetry/unittest/output_formatter.py new file mode 100644 index 0000000..c69faa3 --- /dev/null +++ b/tools/telemetry/telemetry/unittest/output_formatter.py @@ -0,0 +1,130 @@ +# Copyright 2014 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import unittest + +from telemetry.core import util +from telemetry.unittest import options_for_unittests + + +class OutputFormatter(object): + def __init__(self, output_stream): + self._output_stream = output_stream + + def StartTest(self, test): + pass + + def StartTestSuite(self, suite): + pass + + def StartTestRun(self): + pass + + def StopTest(self, test): + pass + + def StopTestSuite(self, suite): + pass + + def StopTestRun(self, result): + pass + + def Error(self, test, err): + pass + + def Failure(self, test, err): + pass + + def Success(self, test): + pass + + def Skip(self, test, reason): + pass + + +class TestSuite(unittest.TestSuite): + """TestSuite that can delegate start and stop calls to a TestResult object.""" + def run(self, result): # pylint: disable=W0221 + if hasattr(result, 'startTestSuite'): + result.startTestSuite(self) + result = super(TestSuite, self).run(result) + if hasattr(result, 'stopTestSuite'): + result.stopTestSuite(self) + return result + + +class TestRunner(object): + def run(self, test, output_formatters, repeat_count, args): + util.AddDirToPythonPath(util.GetUnittestDataDir()) + result = TestResult(output_formatters) + result.startTestRun() + try: + options_for_unittests.Push(args) + for _ in xrange(repeat_count): + test(result) + finally: + options_for_unittests.Pop() + result.stopTestRun() + + return result + + +class TestResult(unittest.TestResult): + def __init__(self, output_formatters): + super(TestResult, self).__init__() + self.successes = [] + self._output_formatters = output_formatters + + @property + def failures_and_errors(self): + return self.failures + self.errors + + def startTest(self, test): + super(TestResult, self).startTest(test) + for output_formatter in self._output_formatters: + output_formatter.StartTest(test) + + def startTestSuite(self, suite): + for output_formatter in self._output_formatters: + output_formatter.StartTestSuite(suite) + + def startTestRun(self): + super(TestResult, self).startTestRun() + for output_formatter in self._output_formatters: + output_formatter.StartTestRun() + + def stopTest(self, test): + super(TestResult, self).stopTest(test) + for output_formatter in self._output_formatters: + output_formatter.StopTest(test) + + def stopTestSuite(self, suite): + for output_formatter in self._output_formatters: + output_formatter.StopTestSuite(suite) + + def stopTestRun(self): + super(TestResult, self).stopTestRun() + for output_formatter in self._output_formatters: + output_formatter.StopTestRun(self) + + def addError(self, test, err): + super(TestResult, self).addError(test, err) + for output_formatter in self._output_formatters: + output_formatter.Error(test, err) + + def addFailure(self, test, err): + super(TestResult, self).addFailure(test, err) + for output_formatter in self._output_formatters: + output_formatter.Failure(test, err) + + def addSuccess(self, test): + super(TestResult, self).addSuccess(test) + self.successes.append(test) + for output_formatter in self._output_formatters: + output_formatter.Success(test) + + def addSkip(self, test, reason): + super(TestResult, self).addSkip(test, reason) + for output_formatter in self._output_formatters: + output_formatter.Skip(test, reason) diff --git a/tools/telemetry/telemetry/unittest/output_formatter_unittest.py b/tools/telemetry/telemetry/unittest/output_formatter_unittest.py new file mode 100644 index 0000000..571062b --- /dev/null +++ b/tools/telemetry/telemetry/unittest/output_formatter_unittest.py @@ -0,0 +1,54 @@ +# Copyright 2014 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import unittest + +from telemetry.unittest import output_formatter + + +class TestFoo(unittest.TestCase): + # Test method doesn't have test- prefix intentionally. This is so that + # run_test script won't run this test. + def RunPassingTest(self): + pass + + def RunFailingTest(self): + self.fail('expected failure') + + +class LoggingOutputFormatter(object): + def __init__(self): + self._call_log = [] + + @property + def call_log(self): + return tuple(self._call_log) + + def __getattr__(self, name): + def wrapper(*_): + self._call_log.append(name) + return wrapper + + +class OutputFormatterTest(unittest.TestCase): + def testTestRunner(self): + suite = output_formatter.TestSuite() + suite.addTest(TestFoo(methodName='RunPassingTest')) + suite.addTest(TestFoo(methodName='RunFailingTest')) + + formatter = LoggingOutputFormatter() + runner = output_formatter.TestRunner() + output_formatters = (formatter,) + result = runner.run(suite, output_formatters, 1, None) + + self.assertEqual(len(result.successes), 1) + self.assertEqual(len(result.failures), 1) + self.assertEqual(len(result.failures_and_errors), 1) + expected = ( + 'StartTestRun', 'StartTestSuite', + 'StartTest', 'Success', 'StopTest', + 'StartTest', 'Failure', 'StopTest', + 'StopTestSuite', 'StopTestRun', + ) + self.assertEqual(formatter.call_log, expected) diff --git a/tools/telemetry/telemetry/unittest/run_tests.py b/tools/telemetry/telemetry/unittest/run_tests.py index 3521341..83f605b 100644 --- a/tools/telemetry/telemetry/unittest/run_tests.py +++ b/tools/telemetry/telemetry/unittest/run_tests.py @@ -10,13 +10,14 @@ from telemetry.core import browser_finder from telemetry.core import browser_options from telemetry.core import command_line from telemetry.core import discover -from telemetry.unittest import gtest_unittest_results +from telemetry.unittest import output_formatter -class Environment(object): - def __init__(self, top_level_dir, test_dirs): +class Config(object): + def __init__(self, top_level_dir, test_dirs, output_formatters): self._top_level_dir = top_level_dir self._test_dirs = tuple(test_dirs) + self._output_formatters = tuple(output_formatters) @property def top_level_dir(self): @@ -26,10 +27,14 @@ class Environment(object): def test_dirs(self): return self._test_dirs + @property + def output_formatters(self): + return self._output_formatters + def Discover(start_dir, top_level_dir=None, pattern='test*.py'): loader = unittest.defaultTestLoader - loader.suiteClass = gtest_unittest_results.GTestTestSuite + loader.suiteClass = output_formatter.TestSuite test_suites = [] modules = discover.DiscoverModules(start_dir, top_level_dir, pattern) @@ -76,7 +81,7 @@ def DiscoverTests(search_dirs, top_level_dir, possible_browser, method = getattr(test, test._testMethodName) return decorators.IsEnabled(method, possible_browser) - wrapper_suite = gtest_unittest_results.GTestTestSuite() + wrapper_suite = output_formatter.TestSuite() for search_dir in search_dirs: wrapper_suite.addTests(Discover(search_dir, top_level_dir, '*_unittest.py')) return FilterSuite(wrapper_suite, IsTestSelected) @@ -97,7 +102,7 @@ def RestoreLoggingLevel(func): return _LoggingRestoreWrapper -environment = None +config = None class RunTestsCommand(command_line.OptparseCommand): @@ -136,12 +141,17 @@ class RunTestsCommand(command_line.OptparseCommand): 'Re-run with --browser=list to see ' 'available browser types.' % args.browser_type) - @RestoreLoggingLevel def Run(self, args): possible_browser = browser_finder.FindBrowser(args) test_suite = DiscoverTests( - environment.test_dirs, environment.top_level_dir, possible_browser, + config.test_dirs, config.top_level_dir, possible_browser, args.positional_args, args.run_disabled_tests) - runner = gtest_unittest_results.GTestTestRunner() - result = runner.run(test_suite, args.repeat_count, args) + runner = output_formatter.TestRunner() + result = runner.run( + test_suite, config.output_formatters, args.repeat_count, args) return len(result.failures_and_errors) + + @classmethod + @RestoreLoggingLevel + def main(cls, args=None): + return super(RunTestsCommand, cls).main(args) |