summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjbudorick <jbudorick@chromium.org>2014-10-23 18:50:13 -0700
committerCommit bot <commit-bot@chromium.org>2014-10-24 01:50:37 +0000
commit256fd53752aff2c311a46e7c254b9ff037088aeb (patch)
tree1e95d28d1f2e2ea029c25f16adf98399fe4b1aaa
parent680be408457f4272b468e72468eb22e1c0f0b67a (diff)
downloadchromium_src-256fd53752aff2c311a46e7c254b9ff037088aeb.zip
chromium_src-256fd53752aff2c311a46e7c254b9ff037088aeb.tar.gz
chromium_src-256fd53752aff2c311a46e7c254b9ff037088aeb.tar.bz2
[Android] Test runner for python unit tests.
BUG=424113 Review URL: https://codereview.chromium.org/660023002 Cr-Commit-Position: refs/heads/master@{#301025}
-rwxr-xr-xbuild/android/buildbot/bb_device_steps.py9
-rwxr-xr-xbuild/android/buildbot/bb_run_bot.py3
-rw-r--r--build/android/pylib/constants.py17
-rwxr-xr-xbuild/android/test_runner.py91
4 files changed, 102 insertions, 18 deletions
diff --git a/build/android/buildbot/bb_device_steps.py b/build/android/buildbot/bb_device_steps.py
index e95a552..59dd94c 100755
--- a/build/android/buildbot/bb_device_steps.py
+++ b/build/android/buildbot/bb_device_steps.py
@@ -77,7 +77,7 @@ INSTRUMENTATION_TESTS = dict((suite.name, suite) for suite in [
VALID_TESTS = set(['chromedriver', 'chrome_proxy', 'gpu', 'mojo', 'sync',
'telemetry_perf_unittests', 'ui', 'unit', 'webkit',
- 'webkit_layout'])
+ 'webkit_layout', 'python_unittests'])
RunCmd = bb_utils.RunCmd
@@ -534,12 +534,19 @@ def RunGPUTests(options):
EscapeBuilderName(builder_name)])
+def RunPythonUnitTests(_options):
+ for suite in constants.PYTHON_UNIT_TEST_SUITES:
+ bb_annotations.PrintNamedStep(suite)
+ RunCmd(['build/android/test_runner.py', 'python', '-s', suite])
+
+
def GetTestStepCmds():
return [
('chromedriver', RunChromeDriverTests),
('chrome_proxy', RunChromeProxyTests),
('gpu', RunGPUTests),
('mojo', RunMojoTests),
+ ('python_unittests', RunPythonUnitTests),
('sync', RunChromeSyncShellTests),
('telemetry_perf_unittests', RunTelemetryPerfUnitTests),
('ui', RunInstrumentationTests),
diff --git a/build/android/buildbot/bb_run_bot.py b/build/android/buildbot/bb_run_bot.py
index 586287b8..d237380 100755
--- a/build/android/buildbot/bb_run_bot.py
+++ b/build/android/buildbot/bb_run_bot.py
@@ -117,6 +117,7 @@ def GetBotStepMap():
compile_step = ['compile']
chrome_proxy_tests = ['chrome_proxy']
chrome_sync_shell_tests = ['sync']
+ python_unittests = ['python_unittests']
std_host_tests = ['check_webview_licenses', 'findbugs']
emma_coverage_tests = [x for x in std_host_tests if x is not 'findbugs']
std_build_steps = ['compile', 'zip_build']
@@ -167,7 +168,7 @@ def GetBotStepMap():
H(compile_step + std_host_tests, target_arch='ia32')),
B('fyi-builder-rel', H(std_build_steps, experimental)),
B('fyi-tests', H(std_test_steps),
- T(std_tests + chrome_sync_shell_tests,
+ T(std_tests + chrome_sync_shell_tests + python_unittests,
['--experimental', flakiness_server,
'--coverage-bucket', CHROMIUM_COVERAGE_BUCKET,
'--cleanup'])),
diff --git a/build/android/pylib/constants.py b/build/android/pylib/constants.py
index f54d498..f6edfed 100644
--- a/build/android/pylib/constants.py
+++ b/build/android/pylib/constants.py
@@ -182,6 +182,23 @@ UPSTREAM_FLAKINESS_SERVER = 'test-results.appspot.com'
DEVICE_LOCAL_PROPERTIES_PATH = '/data/local.prop'
+PYTHON_UNIT_TEST_SUITES = {
+ 'pylib_py_unittests': {
+ 'path': os.path.join(DIR_SOURCE_ROOT, 'build', 'android'),
+ 'test_modules': [
+ 'pylib.device.device_utils_test',
+ ]
+ },
+# TODO(mkosiba) Enable after fixing these tests.
+# 'gyp_py_unittests': {
+# 'path': os.path.join(constants.DIR_SOURCE_ROOT, 'build', 'android', 'gyp'),
+# 'test_modules': [
+# 'java_cpp_enum_tests'
+# ]
+# },
+}
+
+
def GetBuildType():
try:
return os.environ['BUILDTYPE']
diff --git a/build/android/test_runner.py b/build/android/test_runner.py
index 92c9798..2f5058f 100755
--- a/build/android/test_runner.py
+++ b/build/android/test_runner.py
@@ -14,6 +14,7 @@ import shutil
import signal
import sys
import threading
+import unittest
from pylib import android_commands
from pylib import constants
@@ -44,6 +45,9 @@ from pylib.utils import reraiser_thread
from pylib.utils import run_tests_helper
+HOST_TESTS = ['junit', 'python']
+
+
def AddCommonOptions(option_parser):
"""Adds all common options to |option_parser|."""
@@ -60,9 +64,6 @@ def AddCommonOptions(option_parser):
group.add_option('--build-directory', dest='build_directory',
help=('Path to the directory in which build files are'
' located (should not include build type)'))
- group.add_option('-c', dest='cleanup_test_files',
- help='Cleanup test files on the device after run',
- action='store_true')
group.add_option('--num_retries', dest='num_retries', type='int',
default=2,
help=('Number of retries for a test before '
@@ -73,22 +74,10 @@ def AddCommonOptions(option_parser):
default=0,
action='count',
help='Verbose level (multiple times for more)')
- group.add_option('--tool',
- dest='tool',
- help=('Run the test under a tool '
- '(use --tool help to list them)'))
group.add_option('--flakiness-dashboard-server',
dest='flakiness_dashboard_server',
help=('Address of the server that is hosting the '
'Chrome for Android flakiness dashboard.'))
- group.add_option('--skip-deps-push', dest='push_deps',
- action='store_false', default=True,
- help=('Do not push dependencies to the device. '
- 'Use this at own risk for speeding up test '
- 'execution on local machine.'))
- group.add_option('-d', '--device', dest='test_device',
- help=('Target device for the test suite '
- 'to run on.'))
option_parser.add_option_group(group)
@@ -100,6 +89,26 @@ def ProcessCommonOptions(options):
constants.SetBuildDirectory(options.build_directory)
+def AddDeviceOptions(option_parser):
+ group = optparse.OptionGroup(option_parser, 'Device Options')
+ group.add_option('-c', dest='cleanup_test_files',
+ help='Cleanup test files on the device after run',
+ action='store_true')
+ group.add_option('--tool',
+ dest='tool',
+ help=('Run the test under a tool '
+ '(use --tool help to list them)'))
+ group.add_option('--skip-deps-push', dest='push_deps',
+ action='store_false', default=True,
+ help=('Do not push dependencies to the device. '
+ 'Use this at own risk for speeding up test '
+ 'execution on local machine.'))
+ group.add_option('-d', '--device', dest='test_device',
+ help=('Target device for the test suite '
+ 'to run on.'))
+ option_parser.add_option_group(group)
+
+
def AddGTestOptions(option_parser):
"""Adds gtest options to |option_parser|."""
@@ -133,6 +142,7 @@ def AddGTestOptions(option_parser):
# TODO(gkanwar): Move these to Common Options once we have the plumbing
# in our other test types to handle these commands
AddCommonOptions(option_parser)
+ AddDeviceOptions(option_parser)
def AddLinkerTestOptions(option_parser):
@@ -143,6 +153,7 @@ def AddLinkerTestOptions(option_parser):
option_parser.add_option('-f', '--gtest-filter', dest='test_filter',
help='googletest-style filter string.')
AddCommonOptions(option_parser)
+ AddDeviceOptions(option_parser)
def ProcessGTestOptions(options):
@@ -226,6 +237,7 @@ def AddInstrumentationTestOptions(option_parser):
AddJavaTestOptions(option_parser)
AddCommonOptions(option_parser)
+ AddDeviceOptions(option_parser)
option_parser.add_option('-j', '--java-only', action='store_true',
default=False, help='Run only the Java tests.')
@@ -334,6 +346,7 @@ def AddUIAutomatorTestOptions(option_parser):
AddJavaTestOptions(option_parser)
AddCommonOptions(option_parser)
+ AddDeviceOptions(option_parser)
def ProcessUIAutomatorOptions(options, error_func):
@@ -447,6 +460,7 @@ def AddMonkeyTestOptions(option_parser):
'[default: "%default"].'))
AddCommonOptions(option_parser)
+ AddDeviceOptions(option_parser)
def ProcessMonkeyTestOptions(options, error_func):
@@ -520,6 +534,7 @@ def AddPerfTestOptions(option_parser):
action='store_true',
help='Just print the steps without executing.')
AddCommonOptions(option_parser)
+ AddDeviceOptions(option_parser)
def ProcessPerfTestOptions(options, args, error_func):
@@ -547,6 +562,24 @@ def ProcessPerfTestOptions(options, args, error_func):
options.dry_run, single_step)
+def AddPythonTestOptions(option_parser):
+ option_parser.add_option('-s', '--suite', dest='suite_name',
+ help=('Name of the test suite to run'
+ '(use -s help to list them).'))
+ AddCommonOptions(option_parser)
+
+
+def ProcessPythonTestOptions(options, error_func):
+ if options.suite_name not in constants.PYTHON_UNIT_TEST_SUITES:
+ available = ('Available test suites: [%s]' %
+ ', '.join(constants.PYTHON_UNIT_TEST_SUITES.iterkeys()))
+ if options.suite_name == 'help':
+ print available
+ else:
+ error_func('"%s" is not a valid suite. %s' %
+ (options.suite_name, available))
+
+
def _RunGTests(options, devices):
"""Subcommand of RunTestsCommands which runs gtests."""
ProcessGTestOptions(options)
@@ -737,6 +770,25 @@ def _RunPerfTests(options, args, error_func):
return 0
+def _RunPythonTests(options, error_func):
+ """Subcommand of RunTestsCommand which runs python unit tests."""
+ ProcessPythonTestOptions(options, error_func)
+
+ suite_vars = constants.PYTHON_UNIT_TEST_SUITES[options.suite_name]
+ suite_path = suite_vars['path']
+ suite_test_modules = suite_vars['test_modules']
+
+ sys.path = [suite_path] + sys.path
+ try:
+ suite = unittest.TestSuite()
+ suite.addTests(unittest.defaultTestLoader.loadTestsFromName(m)
+ for m in suite_test_modules)
+ runner = unittest.TextTestRunner(verbosity=1+options.verbose_count)
+ return 0 if runner.run(suite).wasSuccessful() else 1
+ finally:
+ sys.path = sys.path[1:]
+
+
def _GetAttachedDevices(test_device=None):
"""Get all attached devices.
@@ -790,7 +842,10 @@ def RunTestsCommand(command, options, args, option_parser):
ProcessCommonOptions(options)
- devices = _GetAttachedDevices(options.test_device)
+ if command in HOST_TESTS:
+ devices = []
+ else:
+ devices = _GetAttachedDevices(options.test_device)
forwarder.Forwarder.RemoveHostLog()
if not ports.ResetTestServerPortAllocation():
@@ -810,6 +865,8 @@ def RunTestsCommand(command, options, args, option_parser):
return _RunMonkeyTests(options, option_parser.error, devices)
elif command == 'perf':
return _RunPerfTests(options, args, option_parser.error)
+ elif command == 'python':
+ return _RunPythonTests(options, option_parser.error)
else:
raise Exception('Unknown test type.')
@@ -872,6 +929,8 @@ VALID_COMMANDS = {
AddMonkeyTestOptions, RunTestsCommand),
'perf': CommandFunctionTuple(
AddPerfTestOptions, RunTestsCommand),
+ 'python': CommandFunctionTuple(
+ AddPythonTestOptions, RunTestsCommand),
'linker': CommandFunctionTuple(
AddLinkerTestOptions, RunTestsCommand),
'help': CommandFunctionTuple(lambda option_parser: None, HelpCommand)