summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjgraettinger@chromium.org <jgraettinger@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2014-06-27 16:32:35 +0000
committerjgraettinger@chromium.org <jgraettinger@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2014-06-27 16:32:35 +0000
commit83dd150d2fdd0bdf3d66e1e1e7244a47cd7e340f (patch)
tree87a25dd948210114fbbb0dcca47b48adc6230ec4
parent0015bc533d9955ab9dfcb1b2d10b62083ab6cd88 (diff)
downloadchromium_src-83dd150d2fdd0bdf3d66e1e1e7244a47cd7e340f.zip
chromium_src-83dd150d2fdd0bdf3d66e1e1e7244a47cd7e340f.tar.gz
chromium_src-83dd150d2fdd0bdf3d66e1e1e7244a47cd7e340f.tar.bz2
Revert of [telemetry] Refactor run_tests to remove old cruft. (https://codereview.chromium.org/228073002/)
Reason for revert: Causing builder failures, eg: http://build.chromium.org/p/chromiumos.chromium/builders/AMD64%20%28chromium%29/builds/333 Error is missing gtest_testrunner module, removed in this CL: ERROR:root:selenium module failed to be imported. autotest-chrome-0.0.1-r3848: ERROR:root:telemetry_UnitTests import error: cannot import name gtest_testrunner. Skipping telemetry_UnitTests autotest-chrome-0.0.1-r3848: Traceback (most recent call last): autotest-chrome-0.0.1-r3848: File "/build/amd64-generic/tmp/portage/chromeos-base/autotest-chrome-0.0.1-r3848/work/autotest-work/client/bin/setup_job.py", line 72, in init_test autotest-chrome-0.0.1-r3848: exec import_stmt + '\n' + init_stmt in locals_dict, globals_dict autotest-chrome-0.0.1-r3848: File "<string>", line 1, in <module> autotest-chrome-0.0.1-r3848: File "/build/amd64-generic/tmp/portage/chromeos-base/autotest-chrome-0.0.1-r3848/work/autotest-work/client/site_tests/telemetry_UnitTests/telemetry_UnitTests.py", line 7, in <module> autotest-chrome-0.0.1-r3848: from telemetry.unittest import gtest_testrunner, run_tests autotest-chrome-0.0.1-r3848: ImportError: cannot import name gtest_testrunner TBR=nduca, tonyg, Ken Russell, dtu NOTRY=true NOTREECHECKS=true BUG=346956 Original issue's description: > [telemetry] Refactor run_tests to remove old cruft. > > - Remove dead code and circuitous code paths. > - Make its API consistent with run_benchmark, taking advantage of command_line.OptparseCommand. > > > BUG=346956 > TEST=tools/telemetry/run_tests && tools/perf/run_tests > > Committed: https://src.chromium.org/viewvc/chrome?view=rev&revision=280232 Review URL: https://codereview.chromium.org/355203002 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@280340 0039d316-1c4b-4281-b951-d872f2087c98
-rwxr-xr-xcontent/test/gpu/run_unittests13
-rw-r--r--tools/perf/benchmarks/benchmark_unittest.py4
-rwxr-xr-xtools/perf/run_tests16
-rwxr-xr-xtools/telemetry/run_tests13
-rw-r--r--tools/telemetry/telemetry/core/command_line.py2
-rw-r--r--tools/telemetry/telemetry/results/base_test_results_unittest.py3
-rw-r--r--tools/telemetry/telemetry/results/gtest_test_results.py23
-rwxr-xr-xtools/telemetry/telemetry/unittest/gtest_testrunner.py48
-rw-r--r--tools/telemetry/telemetry/unittest/gtest_unittest_results.py38
-rw-r--r--tools/telemetry/telemetry/unittest/options_for_unittests.py14
-rw-r--r--tools/telemetry/telemetry/unittest/run_tests.py183
-rw-r--r--tools/telemetry/telemetry/unittest/run_tests_unittest.py9
12 files changed, 192 insertions, 174 deletions
diff --git a/content/test/gpu/run_unittests b/content/test/gpu/run_unittests
index 781a073..ad8abfc 100755
--- a/content/test/gpu/run_unittests
+++ b/content/test/gpu/run_unittests
@@ -15,10 +15,17 @@ This script DOES NOT run benchmarks. run_gpu_tests does that.
sys.path.append(os.path.join(os.path.dirname(__file__),
os.pardir, os.pardir, os.pardir, 'tools', 'telemetry'))
+from telemetry.unittest import gtest_testrunner
from telemetry.unittest import run_tests
if __name__ == '__main__':
- base_dir = os.path.dirname(os.path.realpath(__file__))
- run_tests.environment = run_tests.Environment(base_dir, [base_dir])
- sys.exit(run_tests.RunTestsCommand.main())
+ top_level_dir = os.path.abspath(os.path.dirname(__file__))
+ runner = gtest_testrunner.GTestTestRunner(print_result_after_run=False)
+ ret = run_tests.Main(sys.argv[1:], top_level_dir, top_level_dir, runner)
+
+ if runner.result:
+ runner.result.PrintSummary()
+ sys.exit(min(ret + runner.result.num_errors, 255))
+ else:
+ sys.exit(ret)
diff --git a/tools/perf/benchmarks/benchmark_unittest.py b/tools/perf/benchmarks/benchmark_unittest.py
index 196c15b..6027a33 100644
--- a/tools/perf/benchmarks/benchmark_unittest.py
+++ b/tools/perf/benchmarks/benchmark_unittest.py
@@ -14,7 +14,7 @@ import unittest
from telemetry import benchmark as benchmark_module
from telemetry.core import discover
from telemetry.page import page_measurement
-from telemetry.unittest import gtest_unittest_results
+from telemetry.unittest import gtest_testrunner
from telemetry.unittest import options_for_unittests
@@ -55,7 +55,7 @@ def SmokeTestGenerator(benchmark):
def load_tests(_, _2, _3):
- suite = gtest_unittest_results.GTestTestSuite()
+ suite = gtest_testrunner.GTestTestSuite()
benchmarks_dir = os.path.dirname(__file__)
top_level_dir = os.path.dirname(benchmarks_dir)
diff --git a/tools/perf/run_tests b/tools/perf/run_tests
index ed1429c..77a5366 100755
--- a/tools/perf/run_tests
+++ b/tools/perf/run_tests
@@ -5,18 +5,24 @@
"""This script runs unit tests of the code in the perf directory.
-This script DOES NOT run benchmarks. run_benchmark and run_measurement do that.
+This script DOES NOT run benchmarks. run_benchmarks and run_measurement do that.
"""
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, 'telemetry'))
-
+from telemetry.unittest import gtest_testrunner
from telemetry.unittest import run_tests
if __name__ == '__main__':
- base_dir = os.path.dirname(os.path.realpath(__file__))
- run_tests.environment = run_tests.Environment(base_dir, [base_dir])
- sys.exit(run_tests.RunTestsCommand.main())
+ top_level_dir = os.path.abspath(os.path.dirname(__file__))
+ runner = gtest_testrunner.GTestTestRunner(print_result_after_run=False)
+ ret = run_tests.Main(sys.argv[1:], top_level_dir, top_level_dir, runner)
+
+ if runner.result:
+ runner.result.PrintSummary()
+ sys.exit(min(ret + runner.result.num_errors, 255))
+ else:
+ sys.exit(ret)
diff --git a/tools/telemetry/run_tests b/tools/telemetry/run_tests
index 3fd9702..e5c13e9 100755
--- a/tools/telemetry/run_tests
+++ b/tools/telemetry/run_tests
@@ -6,10 +6,17 @@
import os
import sys
+from telemetry.unittest import gtest_testrunner
from telemetry.unittest import run_tests
if __name__ == '__main__':
- base_dir = os.path.dirname(os.path.realpath(__file__))
- run_tests.environment = run_tests.Environment(base_dir, [base_dir])
- sys.exit(run_tests.RunTestsCommand.main())
+ top_level_dir = os.path.abspath(os.path.dirname(__file__))
+ runner = gtest_testrunner.GTestTestRunner(print_result_after_run=False)
+ ret = run_tests.Main(sys.argv[1:], top_level_dir, top_level_dir, runner)
+
+ if runner.result:
+ runner.result.PrintSummary()
+ sys.exit(min(ret + runner.result.num_errors, 255))
+ else:
+ sys.exit(ret)
diff --git a/tools/telemetry/telemetry/core/command_line.py b/tools/telemetry/telemetry/core/command_line.py
index 9652cc3..0f21d4b 100644
--- a/tools/telemetry/telemetry/core/command_line.py
+++ b/tools/telemetry/telemetry/core/command_line.py
@@ -108,4 +108,4 @@ class SubcommandCommand(Command):
args.command.ProcessCommandLineArgs(parser, args)
def Run(self, args):
- return args.command().Run(args)
+ args.command().Run(args)
diff --git a/tools/telemetry/telemetry/results/base_test_results_unittest.py b/tools/telemetry/telemetry/results/base_test_results_unittest.py
index 9f24c85..2312acc 100644
--- a/tools/telemetry/telemetry/results/base_test_results_unittest.py
+++ b/tools/telemetry/telemetry/results/base_test_results_unittest.py
@@ -16,9 +16,6 @@ class TestOutputStream(object):
assert isinstance(data, str)
self.output_data.append(data)
- def flush(self):
- pass
-
class BaseTestResultsUnittest(unittest.TestCase):
diff --git a/tools/telemetry/telemetry/results/gtest_test_results.py b/tools/telemetry/telemetry/results/gtest_test_results.py
index 33fa003..5f30a7b 100644
--- a/tools/telemetry/telemetry/results/gtest_test_results.py
+++ b/tools/telemetry/telemetry/results/gtest_test_results.py
@@ -3,6 +3,7 @@
# found in the LICENSE file.
import logging
+import sys
import time
from telemetry.results import page_test_results
@@ -24,7 +25,7 @@ class GTestTestResults(page_test_results.PageTestResults):
print >> self._output_stream, self._GetStringFromExcInfo(err)
print >> self._output_stream, '[ FAILED ]', page.display_name, (
'(%0.f ms)' % self._GetMs())
- self._output_stream.flush()
+ sys.stdout.flush()
def AddError(self, page, err):
super(GTestTestResults, self).AddError(page, err)
@@ -36,25 +37,27 @@ class GTestTestResults(page_test_results.PageTestResults):
def StartTest(self, page):
super(GTestTestResults, self).StartTest(page)
- print >> self._output_stream, '[ RUN ]', page.display_name
- self._output_stream.flush()
+ print >> self._output_stream, '[ RUN ]', (
+ page.display_name)
+ sys.stdout.flush()
self._timestamp = time.time()
def AddSuccess(self, page):
super(GTestTestResults, self).AddSuccess(page)
- print >> self._output_stream, '[ OK ]', page.display_name, (
+ test_name = page.display_name
+ print >> self._output_stream, '[ OK ]', test_name, (
'(%0.f ms)' % self._GetMs())
- self._output_stream.flush()
+ sys.stdout.flush()
def AddSkip(self, page, reason):
super(GTestTestResults, self).AddSkip(page, reason)
- logging.warning('===== SKIPPING TEST %s: %s =====',
- page.display_name, reason)
+ test_name = page.display_name
+ logging.warning('===== SKIPPING TEST %s: %s =====', test_name, reason)
if self._timestamp == None:
self._timestamp = time.time()
- print >> self._output_stream, '[ OK ]', page.display_name, (
+ print >> self._output_stream, '[ OK ]', test_name, (
'(%0.f ms)' % self._GetMs())
- self._output_stream.flush()
+ sys.stdout.flush()
def PrintSummary(self):
unit = 'test' if len(self.successes) == 1 else 'tests'
@@ -74,4 +77,4 @@ class GTestTestResults(page_test_results.PageTestResults):
unit = 'TEST' if count == 1 else 'TESTS'
print >> self._output_stream, '%d FAILED %s' % (count, unit)
print >> self._output_stream
- self._output_stream.flush()
+ sys.stdout.flush()
diff --git a/tools/telemetry/telemetry/unittest/gtest_testrunner.py b/tools/telemetry/telemetry/unittest/gtest_testrunner.py
new file mode 100755
index 0000000..784e874
--- /dev/null
+++ b/tools/telemetry/telemetry/unittest/gtest_testrunner.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Implements a unittest TestRunner with GTest output.
+
+This output is ported from gtest.cc's PrettyUnitTestResultPrinter, but
+designed to be a drop-in replacement for unittest's TextTestRunner.
+"""
+
+import sys
+import time
+import unittest
+
+from telemetry.unittest import gtest_unittest_results
+
+
+class GTestTestSuite(unittest.TestSuite):
+ def __call__(self, *args, **kwargs):
+ result = args[0]
+ timestamp = time.time()
+ unit = 'test' if len(self._tests) == 1 else 'tests'
+ if not any(isinstance(x, unittest.TestSuite) for x in self._tests):
+ print '[----------] %d %s' % (len(self._tests), unit)
+ for test in self._tests:
+ if result.shouldStop:
+ break
+ test(result)
+ endts = time.time()
+ ms = (endts - timestamp) * 1000
+ if not any(isinstance(x, unittest.TestSuite) for x in self._tests):
+ print '[----------] %d %s (%d ms total)' % (len(self._tests), unit, ms)
+ print
+ return result
+
+
+class GTestTestRunner(object):
+ def __init__(self, print_result_after_run=True):
+ self.print_result_after_run = print_result_after_run
+ self.result = gtest_unittest_results.GTestUnittestResults(sys.stdout)
+
+ def run(self, test):
+ "Run the given test case or test suite."
+ test(self.result)
+ if self.print_result_after_run:
+ self.result.PrintSummary()
+ return self.result
diff --git a/tools/telemetry/telemetry/unittest/gtest_unittest_results.py b/tools/telemetry/telemetry/unittest/gtest_unittest_results.py
index 3a9fdc1..f68cbd9 100644
--- a/tools/telemetry/telemetry/unittest/gtest_unittest_results.py
+++ b/tools/telemetry/telemetry/unittest/gtest_unittest_results.py
@@ -8,20 +8,11 @@ import time
import unittest
-class GTestTestSuite(unittest.TestSuite):
- def run(self, result): # pylint: disable=W0221
- result.StartTestSuite(self)
- result = super(GTestTestSuite, self).run(result)
- result.StopTestSuite(self)
- return result
-
-
class GTestUnittestResults(unittest.TestResult):
def __init__(self, output_stream):
super(GTestUnittestResults, self).__init__()
self._output_stream = output_stream
- self._test_start_time = None
- self._test_suite_start_time = None
+ self._timestamp = None
self._successes_count = 0
@property
@@ -29,7 +20,7 @@ class GTestUnittestResults(unittest.TestResult):
return self._successes_count
def _GetMs(self):
- return (time.time() - self._test_start_time) * 1000
+ return (time.time() - self._timestamp) * 1000
@property
def num_errors(self):
@@ -60,7 +51,7 @@ class GTestUnittestResults(unittest.TestResult):
print >> self._output_stream, '[ RUN ]', (
GTestUnittestResults._formatTestname(test))
sys.stdout.flush()
- self._test_start_time = time.time()
+ self._timestamp = time.time()
def addSuccess(self, test):
super(GTestUnittestResults, self).addSuccess(test)
@@ -74,31 +65,12 @@ class GTestUnittestResults(unittest.TestResult):
super(GTestUnittestResults, self).addSkip(test, reason)
test_name = GTestUnittestResults._formatTestname(test)
logging.warning('===== SKIPPING TEST %s: %s =====', test_name, reason)
- if self._test_start_time == None:
- self._test_start_time = time.time()
+ if self._timestamp == None:
+ self._timestamp = time.time()
print >> self._output_stream, '[ OK ]', test_name, (
'(%0.f ms)' % self._GetMs())
sys.stdout.flush()
- def StartTestSuite(self, suite):
- contains_test_suites = any(isinstance(test, unittest.TestSuite)
- for test in suite)
- if not contains_test_suites:
- test_count = len([test for test in suite])
- unit = 'test' if test_count == 1 else 'tests'
- print '[----------]', test_count, unit
- self._test_suite_start_time = time.time()
-
- def StopTestSuite(self, suite):
- contains_test_suites = any(isinstance(test, unittest.TestSuite)
- for test in suite)
- if not contains_test_suites:
- elapsed_ms = (time.time() - self._test_suite_start_time) * 1000
- test_count = len([test for test in suite])
- unit = 'test' if test_count == 1 else 'tests'
- print '[----------]', test_count, unit, '(%d ms total)' % elapsed_ms
- print
-
def PrintSummary(self):
unit = 'test' if self._successes_count == 1 else 'tests'
print >> self._output_stream, '[ PASSED ]', (
diff --git a/tools/telemetry/telemetry/unittest/options_for_unittests.py b/tools/telemetry/telemetry/unittest/options_for_unittests.py
index 331e250..fd87bda 100644
--- a/tools/telemetry/telemetry/unittest/options_for_unittests.py
+++ b/tools/telemetry/telemetry/unittest/options_for_unittests.py
@@ -9,16 +9,14 @@ if unit tests are not running.
This allows multiple unit tests to use a specific
browser, in face of multiple options."""
-
-
_options = None
-
-
-def Set(options):
+_browser_type = None
+def Set(options, browser_type):
global _options
+ global _browser_type
_options = options
-
+ _browser_type = browser_type
def GetCopy():
if not _options:
@@ -26,8 +24,10 @@ def GetCopy():
return _options.Copy()
-
def AreSet():
if _options:
return True
return False
+
+def GetBrowserType():
+ return _browser_type
diff --git a/tools/telemetry/telemetry/unittest/run_tests.py b/tools/telemetry/telemetry/unittest/run_tests.py
index 727bfc8..2605f34 100644
--- a/tools/telemetry/telemetry/unittest/run_tests.py
+++ b/tools/telemetry/telemetry/unittest/run_tests.py
@@ -3,86 +3,73 @@
# found in the LICENSE file.
import logging
-import sys
import unittest
from telemetry import decorators
-from telemetry.core import browser_finder
from telemetry.core import browser_options
-from telemetry.core import command_line
from telemetry.core import discover
from telemetry.core import util
-from telemetry.unittest import gtest_unittest_results
+from telemetry.unittest import gtest_testrunner
from telemetry.unittest import options_for_unittests
-class Environment(object):
- def __init__(self, top_level_dir, test_dirs):
- self._top_level_dir = top_level_dir
- self._test_dirs = tuple(test_dirs)
-
- @property
- def top_level_dir(self):
- return self._top_level_dir
-
- @property
- def test_dirs(self):
- return self._test_dirs
-
-
def Discover(start_dir, top_level_dir=None, pattern='test*.py'):
loader = unittest.defaultTestLoader
- loader.suiteClass = gtest_unittest_results.GTestTestSuite
+ loader.suiteClass = gtest_testrunner.GTestTestSuite
+ subsuites = []
- test_suites = []
modules = discover.DiscoverModules(start_dir, top_level_dir, pattern)
for module in modules:
if hasattr(module, 'suite'):
- suite = module.suite()
+ new_suite = module.suite()
else:
- suite = loader.loadTestsFromModule(module)
- if suite.countTestCases():
- test_suites.append(suite)
- return test_suites
+ new_suite = loader.loadTestsFromModule(module)
+ if new_suite.countTestCases():
+ subsuites.append(new_suite)
+ return gtest_testrunner.GTestTestSuite(subsuites)
def FilterSuite(suite, predicate):
new_suite = suite.__class__()
- for test in suite:
- if isinstance(test, unittest.TestSuite):
- subsuite = FilterSuite(test, predicate)
- if subsuite.countTestCases():
- new_suite.addTest(subsuite)
- else:
- assert isinstance(test, unittest.TestCase)
- if predicate(test):
- new_suite.addTest(test)
+ for x in suite:
+ if isinstance(x, unittest.TestSuite):
+ subsuite = FilterSuite(x, predicate)
+ if subsuite.countTestCases() == 0:
+ continue
+
+ new_suite.addTest(subsuite)
+ continue
+
+ assert isinstance(x, unittest.TestCase)
+ if predicate(x):
+ new_suite.addTest(x)
return new_suite
-def DiscoverTests(search_dirs, top_level_dir, possible_browser,
- selected_tests=None, run_disabled_tests=False):
+def DiscoverAndRunTests(dir_name, args, top_level_dir, possible_browser,
+ default_options, runner):
+ if not runner:
+ runner = gtest_testrunner.GTestTestRunner(print_result_after_run=True)
+ suite = Discover(dir_name, top_level_dir, '*_unittest.py')
def IsTestSelected(test):
- if selected_tests:
+ if len(args) != 0:
found = False
- for name in selected_tests:
+ for name in args:
if name in test.id():
found = True
if not found:
return False
- if run_disabled_tests:
+ if default_options.run_disabled_tests:
return True
# pylint: disable=W0212
if not hasattr(test, '_testMethodName'):
return True
method = getattr(test, test._testMethodName)
return decorators.IsEnabled(method, possible_browser)
-
- wrapper_suite = gtest_unittest_results.GTestTestSuite()
- for search_dir in search_dirs:
- wrapper_suite.addTests(Discover(search_dir, top_level_dir, '*_unittest.py'))
- return FilterSuite(wrapper_suite, IsTestSelected)
+ filtered_suite = FilterSuite(suite, IsTestSelected)
+ test_result = runner.run(filtered_suite)
+ return test_result
def RestoreLoggingLevel(func):
@@ -100,61 +87,53 @@ def RestoreLoggingLevel(func):
return _LoggingRestoreWrapper
-environment = None
-
-
-class RunTestsCommand(command_line.OptparseCommand):
- """Run unit tests"""
-
- usage = '[test_name ...] [<options>]'
-
- @classmethod
- def CreateParser(cls):
- options = browser_options.BrowserFinderOptions()
- options.browser_type = 'any'
- parser = options.CreateParser('%%prog %s' % cls.usage)
- return parser
-
- @classmethod
- def AddCommandLineArgs(cls, parser):
- parser.add_option('--repeat-count', dest='run_test_repeat_count',
- type='int', default=1,
- help='Repeats each a provided number of times.')
- parser.add_option('-d', '--also-run-disabled-tests',
- dest='run_disabled_tests',
- action='store_true', default=False,
- help='Ignore @Disabled and @Enabled restrictions.')
-
- @classmethod
- def ProcessCommandLineArgs(cls, parser, args):
- if args.verbosity == 0:
- logging.getLogger().setLevel(logging.WARN)
-
- try:
- possible_browser = browser_finder.FindBrowser(args)
- except browser_finder.BrowserFinderException, ex:
- parser.error(ex)
-
- if not possible_browser:
- parser.error('No browser found of type %s. Cannot run tests.\n'
- 'Re-run with --browser=list to see '
- 'available browser types.' % args.browser_type)
-
- cls.test_suite = DiscoverTests(
- environment.test_dirs, environment.top_level_dir, possible_browser,
- args.positional_args, args.run_disabled_tests)
-
- @RestoreLoggingLevel
- def Run(self, args):
- util.AddDirToPythonPath(util.GetUnittestDataDir())
-
- result = gtest_unittest_results.GTestUnittestResults(sys.stdout)
- try:
- options_for_unittests.Set(args)
- for _ in xrange(args.run_test_repeat_count):
- self.test_suite(result)
- finally:
- options_for_unittests.Set(None)
-
- result.PrintSummary()
- return len(result.failures) + len(result.errors)
+@RestoreLoggingLevel
+def Main(args, start_dir, top_level_dir, runner=None):
+ """Unit test suite that collects all test cases for telemetry."""
+ # Add unittest_data to the path so we can import packages from it.
+ util.AddDirToPythonPath(util.GetUnittestDataDir())
+
+ default_options = browser_options.BrowserFinderOptions()
+ default_options.browser_type = 'any'
+
+ parser = default_options.CreateParser('run_tests [options] [test names]')
+ parser.add_option('--repeat-count', dest='run_test_repeat_count',
+ type='int', default=1,
+ help='Repeats each a provided number of times.')
+ parser.add_option('-d', '--also-run-disabled-tests',
+ dest='run_disabled_tests',
+ action='store_true', default=False,
+ help='Ignore @Disabled and @Enabled restrictions.')
+
+ _, args = parser.parse_args(args)
+
+ if default_options.verbosity == 0:
+ logging.getLogger().setLevel(logging.WARN)
+
+ from telemetry.core import browser_finder
+ try:
+ browser_to_create = browser_finder.FindBrowser(default_options)
+ except browser_finder.BrowserFinderException, ex:
+ logging.error(str(ex))
+ return 1
+
+ if browser_to_create == None:
+ logging.error('No browser found of type %s. Cannot run tests.',
+ default_options.browser_type)
+ logging.error('Re-run with --browser=list to see available browser types.')
+ return 1
+
+ options_for_unittests.Set(default_options,
+ browser_to_create.browser_type)
+ try:
+ success = True
+ for _ in xrange(default_options.run_test_repeat_count):
+ success = success and DiscoverAndRunTests(
+ start_dir, args, top_level_dir, browser_to_create, default_options,
+ runner)
+ if success:
+ return 0
+ finally:
+ options_for_unittests.Set(None, None)
+
+ return 1
diff --git a/tools/telemetry/telemetry/unittest/run_tests_unittest.py b/tools/telemetry/telemetry/unittest/run_tests_unittest.py
index ee02d93..20bc88d 100644
--- a/tools/telemetry/telemetry/unittest/run_tests_unittest.py
+++ b/tools/telemetry/telemetry/unittest/run_tests_unittest.py
@@ -32,9 +32,8 @@ class MockPlatform(object):
class RunTestsUnitTest(unittest.TestCase):
def setUp(self):
- self.suite = unittest.TestSuite()
- self.suite.addTests(run_tests.Discover(
- util.GetTelemetryDir(), util.GetTelemetryDir(), 'disabled_cases.py'))
+ self.suite = run_tests.Discover(
+ util.GetTelemetryDir(), util.GetTelemetryDir(), 'disabled_cases.py')
def _GetEnabledTests(self, browser_type, os_name, os_version_name,
supports_tab_control):
@@ -46,8 +45,8 @@ class RunTestsUnitTest(unittest.TestCase):
enabled_tests = set()
for i in run_tests.FilterSuite(self.suite, MockPredicate)._tests:
- for j in i:
- for k in j:
+ for j in i._tests:
+ for k in j._tests:
enabled_tests.add(k._testMethodName)
return enabled_tests