summaryrefslogtreecommitdiffstats
path: root/build
diff options
context:
space:
mode:
Diffstat (limited to 'build')
-rw-r--r--build/android/pylib/gtest/setup.py37
-rw-r--r--build/android/pylib/gtest/test_options.py17
-rw-r--r--build/android/pylib/gtest/test_runner.py23
-rw-r--r--build/android/pylib/host_driven/python_test_base.py27
-rw-r--r--build/android/pylib/host_driven/run_python_tests.py24
-rw-r--r--build/android/pylib/instrumentation/setup.py41
-rw-r--r--build/android/pylib/instrumentation/test_options.py24
-rw-r--r--build/android/pylib/instrumentation/test_runner.py46
-rw-r--r--build/android/pylib/uiautomator/setup.py39
-rw-r--r--build/android/pylib/uiautomator/test_options.py23
-rw-r--r--build/android/pylib/uiautomator/test_runner.py42
-rwxr-xr-xbuild/android/test_runner.py105
12 files changed, 264 insertions, 184 deletions
diff --git a/build/android/pylib/gtest/setup.py b/build/android/pylib/gtest/setup.py
index a7271e5..2e70dd0 100644
--- a/build/android/pylib/gtest/setup.py
+++ b/build/android/pylib/gtest/setup.py
@@ -257,20 +257,11 @@ def _GetTestsFiltered(suite_name, gtest_filter, runner_factory, devices):
return tests
-def Setup(suite_name, test_arguments, timeout,
- cleanup_test_files, tool, build_type, push_deps,
- gtest_filter):
+def Setup(test_options):
"""Create the test runner factory and tests.
Args:
- suite_name: The suite name specified on the command line.
- test_arguments: Additional arguments to pass to the test binary.
- timeout: Timeout for each test.
- cleanup_test_files: Whether or not to cleanup test files on device.
- tool: Name of the Valgrind tool.
- build_type: 'Release' or 'Debug'.
- push_deps: If True, push all dependencies to the device.
- gtest_filter: Filter for tests.
+ test_options: A GTestOptions object.
Returns:
A tuple of (TestRunnerFactory, tests).
@@ -279,34 +270,32 @@ def Setup(suite_name, test_arguments, timeout,
if not ports.ResetTestServerPortAllocation():
raise Exception('Failed to reset test server port.')
- test_package = test_package_apk.TestPackageApk(suite_name, build_type)
+ test_package = test_package_apk.TestPackageApk(test_options.suite_name,
+ test_options.build_type)
if not os.path.exists(test_package.suite_path):
test_package = test_package_exe.TestPackageExecutable(
- suite_name, build_type)
+ test_options.suite_name, test_options.build_type)
if not os.path.exists(test_package.suite_path):
raise Exception(
- 'Did not find %s target. Ensure it has been built.' % suite_name)
+ 'Did not find %s target. Ensure it has been built.'
+ % test_options.suite_name)
logging.warning('Found target %s', test_package.suite_path)
- _GenerateDepsDirUsingIsolate(suite_name, build_type)
+ _GenerateDepsDirUsingIsolate(test_options.suite_name,
+ test_options.build_type)
# Constructs a new TestRunner with the current options.
def TestRunnerFactory(device, shard_index):
return test_runner.TestRunner(
+ test_options,
device,
- test_package,
- test_arguments,
- timeout,
- cleanup_test_files,
- tool,
- build_type,
- push_deps)
+ test_package)
attached_devices = android_commands.GetAttachedDevices()
- tests = _GetTestsFiltered(suite_name, gtest_filter,
+ tests = _GetTestsFiltered(test_options.suite_name, test_options.gtest_filter,
TestRunnerFactory, attached_devices)
# Coalesce unit tests into a single test per device
- if suite_name != 'content_browsertests':
+ if test_options.suite_name != 'content_browsertests':
num_devices = len(attached_devices)
tests = [':'.join(tests[i::num_devices]) for i in xrange(num_devices)]
tests = [t for t in tests if t]
diff --git a/build/android/pylib/gtest/test_options.py b/build/android/pylib/gtest/test_options.py
new file mode 100644
index 0000000..c414671
--- /dev/null
+++ b/build/android/pylib/gtest/test_options.py
@@ -0,0 +1,17 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Defines the GTestOptions named tuple."""
+
+import collections
+
+GTestOptions = collections.namedtuple('GTestOptions', [
+ 'build_type',
+ 'tool',
+ 'cleanup_test_files',
+ 'push_deps',
+ 'gtest_filter',
+ 'test_arguments',
+ 'timeout',
+ 'suite_name'])
diff --git a/build/android/pylib/gtest/test_runner.py b/build/android/pylib/gtest/test_runner.py
index 2a93e37..b9b1616 100644
--- a/build/android/pylib/gtest/test_runner.py
+++ b/build/android/pylib/gtest/test_runner.py
@@ -23,30 +23,31 @@ def _TestSuiteRequiresMockTestServer(suite_name):
class TestRunner(base_test_runner.BaseTestRunner):
- def __init__(self, device, test_package, test_arguments, timeout,
- cleanup_test_files, tool_name, build_type, push_deps):
+ def __init__(self, test_options, device, test_package):
"""Single test suite attached to a single device.
Args:
+ test_options: A GTestOptions object.
device: Device to run the tests.
test_package: An instance of TestPackage class.
- test_arguments: Additional arguments to pass to the test binary.
- timeout: Timeout for each test.
- cleanup_test_files: Whether or not to cleanup test files on device.
- tool_name: Name of the Valgrind tool.
- build_type: 'Release' or 'Debug'.
- push_deps: If True, push all dependencies to the device.
"""
- super(TestRunner, self).__init__(device, tool_name, build_type, push_deps,
- cleanup_test_files)
+
+ super(TestRunner, self).__init__(device, test_options.tool,
+ test_options.build_type,
+ test_options.push_deps,
+ test_options.cleanup_test_files)
+
self.test_package = test_package
self.test_package.tool = self.tool
- self._test_arguments = test_arguments
+ self._test_arguments = test_options.test_arguments
+
+ timeout = test_options.timeout
if timeout == 0:
timeout = 60
# On a VM (e.g. chromium buildbots), this timeout is way too small.
if os.environ.get('BUILDBOT_SLAVENAME'):
timeout = timeout * 2
+
self._timeout = timeout * self.tool.GetTimeoutScale()
#override
diff --git a/build/android/pylib/host_driven/python_test_base.py b/build/android/pylib/host_driven/python_test_base.py
index 14f71ce..42a6326 100644
--- a/build/android/pylib/host_driven/python_test_base.py
+++ b/build/android/pylib/host_driven/python_test_base.py
@@ -25,6 +25,7 @@ import time
from pylib import android_commands
from pylib.base import base_test_result
+from pylib.instrumentation import test_options
from pylib.instrumentation import test_package
from pylib.instrumentation import test_result
from pylib.instrumentation import test_runner
@@ -78,15 +79,23 @@ class PythonTestBase(object):
test = self._ComposeFullTestName(fname, suite, test)
test_pkg = test_package.TestPackage(
self.options.test_apk_path, self.options.test_apk_jar_path)
- java_test_runner = test_runner.TestRunner(self.options.build_type,
- self.options.test_data,
- self.options.save_perf_json,
- self.options.screenshot_failures,
- self.options.tool,
- self.options.wait_for_debugger,
- self.options.disable_assertions,
- self.options.push_deps,
- self.options.cleanup_test_files,
+ instrumentation_options = test_options.InstrumentationOptions(
+ self.options.build_type,
+ self.options.tool,
+ self.options.cleanup_test_files,
+ self.options.push_deps,
+ self.options.annotations,
+ self.options.exclude_annotations,
+ self.options.test_filter,
+ self.options.test_data,
+ self.options.save_perf_json,
+ self.options.screenshot_failures,
+ self.options.disable_assertions,
+ self.options.wait_for_debugger,
+ self.options.test_apk,
+ self.options.test_apk_path,
+ self.options.test_apk_jar_path)
+ java_test_runner = test_runner.TestRunner(instrumentation_options,
self.device_id,
self.shard_index, test_pkg,
self.ports_to_forward)
diff --git a/build/android/pylib/host_driven/run_python_tests.py b/build/android/pylib/host_driven/run_python_tests.py
index 3ce8857..09f2aa1 100644
--- a/build/android/pylib/host_driven/run_python_tests.py
+++ b/build/android/pylib/host_driven/run_python_tests.py
@@ -11,6 +11,7 @@ import types
from pylib import android_commands
from pylib.base import base_test_result
+from pylib.instrumentation import test_options
from pylib.instrumentation import test_package
from pylib.instrumentation import test_runner
from pylib.utils import report_results
@@ -87,11 +88,24 @@ def DispatchPythonTests(options):
logging.debug('Pushing files to device %s', device_id)
test_pkg = test_package.TestPackage(options.test_apk_path,
options.test_apk_jar_path)
- test_files_copier = test_runner.TestRunner(
- options.build_type, options.test_data, options.save_perf_json,
- options.screenshot_failures, options.tool, options.wait_for_debugger,
- options.disable_assertions, options.push_deps,
- options.cleanup_test_files, device_id, 0, test_pkg, [])
+ instrumentation_options = test_options.InstrumentationOptions(
+ options.build_type,
+ options.tool,
+ options.cleanup_test_files,
+ options.push_deps,
+ options.annotations,
+ options.exclude_annotations,
+ options.test_filter,
+ options.test_data,
+ options.save_perf_json,
+ options.screenshot_failures,
+ options.disable_assertions,
+ options.wait_for_debugger,
+ options.test_apk,
+ options.test_apk_path,
+ options.test_apk_jar_path)
+ test_files_copier = test_runner.TestRunner(instrumentation_options,
+ device_id, 0, test_pkg, [])
test_files_copier.InstallTestPackage()
if options.push_deps:
logging.info('Pushing data deps to device.')
diff --git a/build/android/pylib/instrumentation/setup.py b/build/android/pylib/instrumentation/setup.py
index 1c09acf..3b24188 100644
--- a/build/android/pylib/instrumentation/setup.py
+++ b/build/android/pylib/instrumentation/setup.py
@@ -5,52 +5,31 @@
"""Generates test runner factory and tests for instrumentation tests."""
import logging
-import os
-
-from pylib import android_commands
-from pylib import constants
-from pylib.base import base_test_result
-from pylib.utils import report_results
import test_package
import test_runner
-def Setup(test_apk_path, test_apk_jar_path, annotations, exclude_annotations,
- test_filter, build_type, test_data, save_perf_json,
- screenshot_failures, tool, wait_for_debugger, disable_assertions,
- push_deps, cleanup_test_files):
+def Setup(test_options):
"""Create and return the test runner factory and tests.
Args:
- test_apk_path: Path to the test apk file.
- test_apk_jar_path: Path to the jar associated with the test apk.
- annotations: Annotations for the tests.
- exclude_annotations: Any annotations to exclude from running.
- test_filter: Filter string for tests.
- build_type: 'Release' or 'Debug'.
- test_data: Location of the test data.
- save_perf_json: Whether or not to save the JSON file from UI perf tests.
- screenshot_failures: Take a screenshot for a test failure
- tool: Name of the Valgrind tool.
- wait_for_debugger: blocks until the debugger is connected.
- disable_assertions: Whether to disable java assertions on the device.
- push_deps: If True, push all dependencies to the device.
- cleanup_test_files: Whether or not to cleanup test files on device.
+ test_options: An InstrumentationOptions object.
Returns:
A tuple of (TestRunnerFactory, tests).
"""
- test_pkg = test_package.TestPackage(test_apk_path, test_apk_jar_path)
- tests = test_pkg._GetAllMatchingTests(annotations, exclude_annotations,
- test_filter)
+ test_pkg = test_package.TestPackage(test_options.test_apk_path,
+ test_options.test_apk_jar_path)
+ tests = test_pkg._GetAllMatchingTests(
+ test_options.annotations,
+ test_options.exclude_annotations,
+ test_options.test_filter)
if not tests:
logging.error('No instrumentation tests to run with current args.')
def TestRunnerFactory(device, shard_index):
- return test_runner.TestRunner(
- build_type, test_data, save_perf_json, screenshot_failures,
- tool, wait_for_debugger, disable_assertions, push_deps,
- cleanup_test_files, device, shard_index, test_pkg, [])
+ return test_runner.TestRunner(test_options, device, shard_index,
+ test_pkg, [])
return (TestRunnerFactory, tests)
diff --git a/build/android/pylib/instrumentation/test_options.py b/build/android/pylib/instrumentation/test_options.py
new file mode 100644
index 0000000..fec3d9c
--- /dev/null
+++ b/build/android/pylib/instrumentation/test_options.py
@@ -0,0 +1,24 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Defines the InstrumentationOptions named tuple."""
+
+import collections
+
+InstrumentationOptions = collections.namedtuple('InstrumentationOptions', [
+ 'build_type',
+ 'tool',
+ 'cleanup_test_files',
+ 'push_deps',
+ 'annotations',
+ 'exclude_annotations',
+ 'test_filter',
+ 'test_data',
+ 'save_perf_json',
+ 'screenshot_failures',
+ 'disable_assertions',
+ 'wait_for_debugger',
+ 'test_apk',
+ 'test_apk_path',
+ 'test_apk_jar_path'])
diff --git a/build/android/pylib/instrumentation/test_runner.py b/build/android/pylib/instrumentation/test_runner.py
index d6c7c78..4464d72 100644
--- a/build/android/pylib/instrumentation/test_runner.py
+++ b/build/android/pylib/instrumentation/test_runner.py
@@ -7,12 +7,9 @@
import logging
import os
import re
-import shutil
-import sys
import time
from pylib import android_commands
-from pylib import cmd_helper
from pylib import constants
from pylib import json_perf_parser
from pylib import perf_tests_helper
@@ -52,38 +49,25 @@ class TestRunner(base_test_runner.BaseTestRunner):
'/chrome-profile*')
_DEVICE_HAS_TEST_FILES = {}
- def __init__(self, build_type, test_data, save_perf_json, screenshot_failures,
- tool, wait_for_debugger, disable_assertions, push_deps,
- cleanup_test_files, device, shard_index, test_pkg,
+ def __init__(self, test_options, device, shard_index, test_pkg,
ports_to_forward):
"""Create a new TestRunner.
Args:
- build_type: 'Release' or 'Debug'.
- test_data: Location of the test data.
- save_perf_json: Whether or not to save the JSON file from UI perf tests.
- screenshot_failures: Take a screenshot for a test failure
- tool: Name of the Valgrind tool.
- wait_for_debugger: Blocks until the debugger is connected.
- disable_assertions: Whether to disable java assertions on the device.
- push_deps: If True, push all dependencies to the device.
- cleanup_test_files: Whether or not to cleanup test files on device.
+ test_options: An InstrumentationOptions object.
device: Attached android device.
shard_index: Shard index.
test_pkg: A TestPackage object.
ports_to_forward: A list of port numbers for which to set up forwarders.
- Can be optionally requested by a test case.
+ Can be optionally requested by a test case.
"""
- super(TestRunner, self).__init__(device, tool, build_type, push_deps,
- cleanup_test_files)
+ super(TestRunner, self).__init__(device, test_options.tool,
+ test_options.build_type,
+ test_options.push_deps,
+ test_options.cleanup_test_files)
self._lighttp_port = constants.LIGHTTPD_RANDOM_PORT_FIRST + shard_index
- self.build_type = build_type
- self.test_data = test_data
- self.save_perf_json = save_perf_json
- self.screenshot_failures = screenshot_failures
- self.wait_for_debugger = wait_for_debugger
- self.disable_assertions = disable_assertions
+ self.options = test_options
self.test_pkg = test_pkg
self.ports_to_forward = ports_to_forward
@@ -111,7 +95,7 @@ class TestRunner(base_test_runner.BaseTestRunner):
# TODO(frankf): Specify test data in this file as opposed to passing
# as command-line.
- for dest_host_pair in self.test_data:
+ for dest_host_pair in self.options.test_data:
dst_src = dest_host_pair.split(':',1)
dst_layer = dst_src[0]
host_src = dst_src[1]
@@ -125,7 +109,7 @@ class TestRunner(base_test_runner.BaseTestRunner):
def _GetInstrumentationArgs(self):
ret = {}
- if self.wait_for_debugger:
+ if self.options.wait_for_debugger:
ret['debug'] = 'true'
return ret
@@ -142,7 +126,8 @@ class TestRunner(base_test_runner.BaseTestRunner):
logging.warning('Unable to enable java asserts for %s, non rooted device',
self.device)
else:
- if self.adb.SetJavaAssertsEnabled(enable=not self.disable_assertions):
+ if self.adb.SetJavaAssertsEnabled(
+ enable=not self.options.disable_assertions):
self.adb.Reboot(full_reboot=False)
# We give different default value to launch HTTP server based on shard index
@@ -250,7 +235,7 @@ class TestRunner(base_test_runner.BaseTestRunner):
else:
raise Exception('Perf file does not exist or is empty')
- if self.save_perf_json:
+ if self.options.save_perf_json:
json_local_file = '/tmp/chromium-android-perf-json-' + raw_test_name
with open(json_local_file, 'w') as f:
f.write(json_string)
@@ -286,7 +271,7 @@ class TestRunner(base_test_runner.BaseTestRunner):
scale_match = re.match('TimeoutScale:([0-9]+)', annotation)
if scale_match:
timeout_scale = int(scale_match.group(1))
- if self.wait_for_debugger:
+ if self.options.wait_for_debugger:
timeout_scale *= 100
return timeout_scale
@@ -330,7 +315,8 @@ class TestRunner(base_test_runner.BaseTestRunner):
log = raw_result.GetFailureReason()
if not log:
log = 'No information.'
- if self.screenshot_failures or log.find('INJECT_EVENTS perm') >= 0:
+ if (self.options.screenshot_failures or
+ log.find('INJECT_EVENTS perm') >= 0):
self._TakeScreenshot(test)
result = test_result.InstrumentationTestResult(
test, base_test_result.ResultType.FAIL, start_date_ms, duration_ms,
diff --git a/build/android/pylib/uiautomator/setup.py b/build/android/pylib/uiautomator/setup.py
index ee7b2f9..24bc465 100644
--- a/build/android/pylib/uiautomator/setup.py
+++ b/build/android/pylib/uiautomator/setup.py
@@ -5,54 +5,31 @@
"""Generates test runner factory and tests for uiautomator tests."""
import logging
-import os
-
-from pylib import android_commands
-from pylib import constants
-from pylib.base import base_test_result
-from pylib.utils import report_results
import test_package
import test_runner
-def Setup(uiautomator_jar, uiautomator_info_jar, annotations,
- exclude_annotations, test_filter, package_name, build_type, test_data,
- save_perf_json, screenshot_failures, tool, disable_assertions,
- push_deps, cleanup_test_files):
+def Setup(test_options):
"""Runs uiautomator tests on connected device(s).
Args:
- uiautomator_jar: Location of the jar file with the uiautomator test suite.
- uiautomator_info_jar: Info jar accompanying the jar.
- annotations: Annotations for the tests.
- exclude_annotations: Any annotations to exclude from running.
- test_filter: Filter string for tests.
- package_name: Application package name under test.
- build_type: 'Release' or 'Debug'.
- test_data: Location of the test data.
- save_perf_json: Whether or not to save the JSON file from UI perf tests.
- screenshot_failures: Take a screenshot for a test failure
- tool: Name of the Valgrind tool.
- disable_assertions: Whether to disable java assertions on the device.
- push_deps: If True, push all dependencies to the device.
- cleanup_test_files: Whether or not to cleanup test files on device.
+ test_options: A UIAutomatorOptions object.
Returns:
A tuple of (TestRunnerFactory, tests).
"""
- test_pkg = test_package.TestPackage(
- uiautomator_jar, uiautomator_info_jar)
- tests = test_pkg._GetAllMatchingTests(
- annotations, exclude_annotations, test_filter)
+ test_pkg = test_package.TestPackage(test_options.uiautomator_jar,
+ test_options.uiautomator_info_jar)
+ tests = test_pkg._GetAllMatchingTests(test_options.annotations,
+ test_options.exclude_annotations,
+ test_options.test_filter)
if not tests:
logging.error('No uiautomator tests to run with current args.')
def TestRunnerFactory(device, shard_index):
return test_runner.TestRunner(
- package_name, build_type, test_data, save_perf_json,
- screenshot_failures, tool, False, disable_assertions, push_deps,
- cleanup_test_files, device, shard_index, test_pkg, [])
+ test_options, device, shard_index, test_pkg, [])
return (TestRunnerFactory, tests)
diff --git a/build/android/pylib/uiautomator/test_options.py b/build/android/pylib/uiautomator/test_options.py
new file mode 100644
index 0000000..ddc2ae0
--- /dev/null
+++ b/build/android/pylib/uiautomator/test_options.py
@@ -0,0 +1,23 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Defines the UIAutomatorOptions named tuple."""
+
+import collections
+
+UIAutomatorOptions = collections.namedtuple('UIAutomatorOptions', [
+ 'build_type',
+ 'tool',
+ 'cleanup_test_files',
+ 'push_deps',
+ 'annotations',
+ 'exclude_annotations',
+ 'test_filter',
+ 'test_data',
+ 'save_perf_json',
+ 'screenshot_failures',
+ 'disable_assertions',
+ 'uiautomator_jar',
+ 'uiautomator_info_jar',
+ 'package_name'])
diff --git a/build/android/pylib/uiautomator/test_runner.py b/build/android/pylib/uiautomator/test_runner.py
index 309fa1f..39ed5ec 100644
--- a/build/android/pylib/uiautomator/test_runner.py
+++ b/build/android/pylib/uiautomator/test_runner.py
@@ -4,32 +4,48 @@
"""Class for running uiautomator tests on a single device."""
+from pylib.instrumentation import test_options
from pylib.instrumentation import test_runner as instr_test_runner
class TestRunner(instr_test_runner.TestRunner):
"""Responsible for running a series of tests connected to a single device."""
- def __init__(self, package_name, build_type, test_data, save_perf_json,
- screenshot_failures, tool, wait_for_debugger,
- disable_assertions, push_deps, cleanup_test_files, device,
- shard_index, test_pkg, ports_to_forward):
+ def __init__(self, test_options, device, shard_index, test_pkg,
+ ports_to_forward):
"""Create a new TestRunner.
Args:
- package_name: Application package name under test.
- See the super class for all other args.
+ test_options: A UIAutomatorOptions object.
+ device: Attached android device.
+ shard_index: Shard index.
+ test_pkg: A TestPackage object.
+ ports_to_forward: A list of port numbers for which to set up forwarders.
+ Can be optionally requested by a test case.
"""
- super(TestRunner, self).__init__(
- build_type, test_data, save_perf_json, screenshot_failures, tool,
- wait_for_debugger, disable_assertions, push_deps, cleanup_test_files,
- device, shard_index, test_pkg, ports_to_forward)
-
- self.package_name = package_name
+ # Create an InstrumentationOptions object to pass to the super class
+ instrumentation_options = test_options.InstrumentationOptions(
+ test_options.build_type,
+ test_options.tool,
+ test_options.cleanup_test_files,
+ test_options.push_deps,
+ test_options.annotations,
+ test_options.exclude_annotations,
+ test_options.test_filter,
+ test_options.test_data,
+ test_options.save_perf_json,
+ test_options.screenshot_failures,
+ test_options.disable_assertions,
+ wait_for_debugger=False,
+ test_apk=None)
+ super(TestRunner, self).__init__(instrumentation_options, device,
+ shard_index, test_pkg, ports_to_forward)
+
+ self.package_name = test_options.package_name
#override
def InstallTestPackage(self):
- self.test_pkg.Install(self.adb)
+ self.test_pkg.Install(self.adb)
#override
def PushDataDeps(self):
diff --git a/build/android/test_runner.py b/build/android/test_runner.py
index b51effb..6e2a1e4 100755
--- a/build/android/test_runner.py
+++ b/build/android/test_runner.py
@@ -16,16 +16,18 @@ import os
import shutil
import sys
-from pylib import cmd_helper
from pylib import constants
from pylib import ports
from pylib.base import base_test_result
from pylib.base import test_dispatcher
-from pylib.gtest import setup as gtest_setup
from pylib.gtest import gtest_config
+from pylib.gtest import setup as gtest_setup
+from pylib.gtest import test_options as gtest_test_options
from pylib.host_driven import run_python_tests as python_dispatch
from pylib.instrumentation import setup as instrumentation_setup
+from pylib.instrumentation import test_options as instrumentation_test_options
from pylib.uiautomator import setup as uiautomator_setup
+from pylib.uiautomator import test_options as uiautomator_test_options
from pylib.utils import report_results
from pylib.utils import run_tests_helper
@@ -125,16 +127,13 @@ def ProcessGTestOptions(options):
Args:
options: Command line options.
-
- Returns:
- True if the command should continue.
"""
if options.suite_name == 'help':
print 'Available test suites are:'
for test_suite in (gtest_config.STABLE_TEST_SUITES +
gtest_config.EXPERIMENTAL_TEST_SUITES):
print test_suite
- return False
+ sys.exit(0)
# Convert to a list, assuming all test suites if nothing was specified.
# TODO(gkanwar): Require having a test suite
@@ -142,7 +141,6 @@ def ProcessGTestOptions(options):
options.suite_name = [options.suite_name]
else:
options.suite_name = [s for s in gtest_config.STABLE_TEST_SUITES]
- return True
def AddJavaTestOptions(option_parser):
@@ -253,7 +251,16 @@ def AddInstrumentationTestOptions(option_parser):
def ProcessInstrumentationOptions(options, error_func):
- """Processes options/arguments and populate |options| with defaults."""
+ """Processes options/arguments and populate |options| with defaults.
+
+ Args:
+ options: optparse.Options object.
+ error_func: Function to call with the error message in case of an error.
+
+ Returns:
+ An InstrumentationOptions named tuple which contains all options relevant to
+ instrumentation tests.
+ """
ProcessJavaTestOptions(options, error_func)
@@ -274,6 +281,23 @@ def ProcessInstrumentationOptions(options, error_func):
_SDK_OUT_DIR, options.build_type, constants.SDK_BUILD_TEST_JAVALIB_DIR,
'%s.jar' % options.test_apk)
+ return instrumentation_test_options.InstrumentationOptions(
+ options.build_type,
+ options.tool,
+ options.cleanup_test_files,
+ options.push_deps,
+ options.annotations,
+ options.exclude_annotations,
+ options.test_filter,
+ options.test_data,
+ options.save_perf_json,
+ options.screenshot_failures,
+ options.disable_assertions,
+ options.wait_for_debugger,
+ options.test_apk,
+ options.test_apk_path,
+ options.test_apk_jar_path)
+
def AddUIAutomatorTestOptions(option_parser):
"""Adds UI Automator test options to |option_parser|."""
@@ -297,7 +321,16 @@ def AddUIAutomatorTestOptions(option_parser):
def ProcessUIAutomatorOptions(options, error_func):
- """Processes UIAutomator options/arguments."""
+ """Processes UIAutomator options/arguments.
+
+ Args:
+ options: optparse.Options object.
+ error_func: Function to call with the error message in case of an error.
+
+ Returns:
+ A UIAutomatorOptions named tuple which contains all options relevant to
+ instrumentation tests.
+ """
ProcessJavaTestOptions(options, error_func)
@@ -318,18 +351,41 @@ def ProcessUIAutomatorOptions(options, error_func):
options.uiautomator_jar[:options.uiautomator_jar.find('.dex.jar')] +
'_java.jar')
+ return uiautomator_test_options.UIAutomatorOptions(
+ options.build_type,
+ options.tool,
+ options.cleanup_test_files,
+ options.push_deps,
+ options.annotations,
+ options.exclude_annotations,
+ options.test_filter,
+ options.test_data,
+ options.save_perf_json,
+ options.screenshot_failures,
+ options.disable_assertions,
+ options.uiautomator_jar,
+ options.uiautomator_info_jar,
+ options.package_name)
+
def _RunGTests(options, error_func):
"""Subcommand of RunTestsCommands which runs gtests."""
- if not ProcessGTestOptions(options):
- return 0
+ ProcessGTestOptions(options)
exit_code = 0
for suite_name in options.suite_name:
- runner_factory, tests = gtest_setup.Setup(
- suite_name, options.test_arguments,
- options.timeout, options.cleanup_test_files, options.tool,
- options.build_type, options.push_deps, options.test_filter)
+ # TODO(gkanwar): Move this into ProcessGTestOptions once we require -s for
+ # the gtest command.
+ gtest_options = gtest_test_options.GTestOptions(
+ options.build_type,
+ options.tool,
+ options.cleanup_test_files,
+ options.push_deps,
+ options.test_filter,
+ options.test_arguments,
+ options.timeout,
+ suite_name)
+ runner_factory, tests = gtest_setup.Setup(gtest_options)
results, test_exit_code = test_dispatcher.RunTests(
tests, runner_factory, False, options.test_device,
@@ -356,18 +412,13 @@ def _RunGTests(options, error_func):
def _RunInstrumentationTests(options, error_func):
"""Subcommand of RunTestsCommands which runs instrumentation tests."""
- ProcessInstrumentationOptions(options, error_func)
+ instrumentation_options = ProcessInstrumentationOptions(options, error_func)
results = base_test_result.TestRunResults()
exit_code = 0
if options.run_java_tests:
- runner_factory, tests = instrumentation_setup.Setup(
- options.test_apk_path, options.test_apk_jar_path, options.annotations,
- options.exclude_annotations, options.test_filter, options.build_type,
- options.test_data, options.save_perf_json, options.screenshot_failures,
- options.tool, options.wait_for_debugger, options.disable_assertions,
- options.push_deps, options.cleanup_test_files)
+ runner_factory, tests = instrumentation_setup.Setup(instrumentation_options)
test_results, exit_code = test_dispatcher.RunTests(
tests, runner_factory, options.wait_for_debugger,
@@ -402,19 +453,13 @@ def _RunInstrumentationTests(options, error_func):
def _RunUIAutomatorTests(options, error_func):
"""Subcommand of RunTestsCommands which runs uiautomator tests."""
- ProcessUIAutomatorOptions(options, error_func)
+ uiautomator_options = ProcessUIAutomatorOptions(options, error_func)
results = base_test_result.TestRunResults()
exit_code = 0
if options.run_java_tests:
- runner_factory, tests = uiautomator_setup.Setup(
- options.uiautomator_jar, options.uiautomator_info_jar,
- options.annotations, options.exclude_annotations, options.test_filter,
- options.package_name, options.build_type, options.test_data,
- options.save_perf_json, options.screenshot_failures, options.tool,
- options.disable_assertions, options.push_deps,
- options.cleanup_test_files)
+ runner_factory, tests = uiautomator_setup.Setup(uiautomator_options)
test_results, exit_code = test_dispatcher.RunTests(
tests, runner_factory, False, options.test_device,