diff options
author | gkanwar@google.com <gkanwar@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2013-07-10 04:57:10 +0000 |
---|---|---|
committer | gkanwar@google.com <gkanwar@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2013-07-10 04:57:10 +0000 |
commit | b38738937b4ae8b133ff7ed9a08f2effc1a4aefa (patch) | |
tree | 99d50c9574d08b40c8ec55d08001ceed3726d149 | |
parent | c28050a0a9bcdeb5e509b9ae20fd17981d9e97f8 (diff) | |
download | chromium_src-b38738937b4ae8b133ff7ed9a08f2effc1a4aefa.zip chromium_src-b38738937b4ae8b133ff7ed9a08f2effc1a4aefa.tar.gz chromium_src-b38738937b4ae8b133ff7ed9a08f2effc1a4aefa.tar.bz2 |
Updates the test runner script exit codes
The script now returns exit codes based on what occurred in the
test. Exit codes:
0 -- normal
1 -- fail/crash
88 -- warning
In addition, the scripts now handle DeviceUnresponsiveError by
returning a warning exit code overall, rather than silently moving
on.
BUG=170477, 258171
Review URL: https://chromiumcodereview.appspot.com/18323020
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@210749 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r-- | build/android/buildbot/bb_annotations.py | 46 | ||||
-rwxr-xr-x | build/android/buildbot/bb_device_steps.py | 26 | ||||
-rwxr-xr-x | build/android/buildbot/bb_host_steps.py | 20 | ||||
-rw-r--r-- | build/android/buildbot/bb_utils.py | 10 | ||||
-rw-r--r-- | build/android/pylib/base/base_test_result.py | 3 | ||||
-rw-r--r-- | build/android/pylib/base/shard.py | 36 | ||||
-rw-r--r-- | build/android/pylib/base/shard_unittest.py | 16 | ||||
-rw-r--r-- | build/android/pylib/browsertests/dispatch.py | 22 | ||||
-rw-r--r-- | build/android/pylib/buildbot_report.py | 8 | ||||
-rw-r--r-- | build/android/pylib/constants.py | 4 | ||||
-rw-r--r-- | build/android/pylib/gtest/dispatch.py | 36 | ||||
-rw-r--r-- | build/android/pylib/gtest/test_runner.py | 5 | ||||
-rw-r--r-- | build/android/pylib/host_driven/run_python_tests.py | 5 | ||||
-rw-r--r-- | build/android/pylib/instrumentation/dispatch.py | 2 | ||||
-rw-r--r-- | build/android/pylib/uiautomator/dispatch.py | 2 | ||||
-rw-r--r-- | build/android/pylib/utils/report_results.py | 8 | ||||
-rwxr-xr-x | build/android/run_monkey_test.py | 6 | ||||
-rwxr-xr-x | build/android/test_runner.py | 60 |
18 files changed, 189 insertions, 126 deletions
diff --git a/build/android/buildbot/bb_annotations.py b/build/android/buildbot/bb_annotations.py new file mode 100644 index 0000000..059d673 --- /dev/null +++ b/build/android/buildbot/bb_annotations.py @@ -0,0 +1,46 @@ +# Copyright 2013 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Helper functions to print buildbot messages.""" + +def PrintLink(label, url): + """Adds a link with name |label| linking to |url| to current buildbot step. + + Args: + label: A string with the name of the label. + url: A string of the URL. + """ + print '@@@STEP_LINK@%s@%s@@@' % (label, url) + + +def PrintMsg(msg): + """Appends |msg| to the current buildbot step text. + + Args: + msg: String to be appended. + """ + print '@@@STEP_TEXT@%s@@@' % msg + + +def PrintSummaryText(msg): + """Appends |msg| to main build summary. Visible from waterfall. + + Args: + msg: String to be appended. + """ + print '@@@STEP_SUMMARY_TEXT@%s@@@' % msg + + +def PrintError(): + """Marks the current step as failed.""" + print '@@@STEP_FAILURE@@@' + + +def PrintWarning(): + """Marks the current step with a warning.""" + print '@@@STEP_WARNINGS@@@' + + +def PrintNamedStep(step): + print '@@@BUILD_STEP %s@@@' % step diff --git a/build/android/buildbot/bb_device_steps.py b/build/android/buildbot/bb_device_steps.py index 74943b4..440c150 100755 --- a/build/android/buildbot/bb_device_steps.py +++ b/build/android/buildbot/bb_device_steps.py @@ -11,10 +11,10 @@ import shutil import sys import bb_utils +import bb_annotations sys.path.append(os.path.join(os.path.dirname(__file__), '..')) from pylib import android_commands -from pylib import buildbot_report from pylib import constants from pylib.gtest import gtest_config @@ -94,7 +94,7 @@ def RebootDevices(): print '%s failed to startup.' % device if any(results): - buildbot_report.PrintWarning() + bb_annotations.PrintWarning() else: print 'Reboots complete.' @@ -112,7 +112,7 @@ def RunTestSuites(options, suites): if options.asan: args.append('--tool=asan') for suite in suites: - buildbot_report.PrintNamedStep(suite.name) + bb_annotations.PrintNamedStep(suite.name) cmd = ['build/android/test_runner.py', 'gtest', '-s', suite.name] + args if suite.is_suite_exe: cmd.append('--exe') @@ -129,12 +129,12 @@ def RunBrowserTestSuite(options): args.append('--release') if options.asan: args.append('--tool=asan') - buildbot_report.PrintNamedStep(constants.BROWSERTEST_SUITE_NAME) + bb_annotations.PrintNamedStep(constants.BROWSERTEST_SUITE_NAME) RunCmd(['build/android/test_runner.py', 'content_browsertests'] + args) def RunChromeDriverTests(_): """Run all the steps for running chromedriver tests.""" - buildbot_report.PrintNamedStep('chromedriver_annotation') + bb_annotations.PrintNamedStep('chromedriver_annotation') RunCmd(['chrome/test/chromedriver/run_buildbot_steps.py', '--android-package=%s' % constants.CHROMIUM_TEST_SHELL_PACKAGE]) @@ -147,7 +147,7 @@ def InstallApk(options, test, print_step=False): print_step: Print a buildbot step """ if print_step: - buildbot_report.PrintNamedStep('install_%s' % test.name.lower()) + bb_annotations.PrintNamedStep('install_%s' % test.name.lower()) args = ['--apk', test.apk, '--apk_package', test.apk_package] if options.target == 'Release': args.append('--release') @@ -162,7 +162,7 @@ def RunInstrumentationSuite(options, test): options: options object test: An I_TEST namedtuple """ - buildbot_report.PrintNamedStep('%s_instrumentation_tests' % test.name.lower()) + bb_annotations.PrintNamedStep('%s_instrumentation_tests' % test.name.lower()) InstallApk(options, test) args = ['--test-apk', test.test_apk, '--test_data', test.test_data, @@ -188,7 +188,7 @@ def RunInstrumentationSuite(options, test): def RunWebkitLint(target): """Lint WebKit's TestExpectation files.""" - buildbot_report.PrintNamedStep('webkit_lint') + bb_annotations.PrintNamedStep('webkit_lint') RunCmd(['webkit/tools/layout_tests/run_webkit_tests.py', '--lint-test-files', '--chromium', @@ -197,7 +197,7 @@ def RunWebkitLint(target): def RunWebkitLayoutTests(options): """Run layout tests on an actual device.""" - buildbot_report.PrintNamedStep('webkit_tests') + bb_annotations.PrintNamedStep('webkit_tests') cmd_args = [ '--no-show-results', '--no-new-test-results', @@ -248,14 +248,14 @@ def ProvisionDevices(options): RunCmd(['adb', 'start-server']) RunCmd(['sleep', '1']) - buildbot_report.PrintNamedStep('provision_devices') + bb_annotations.PrintNamedStep('provision_devices') if options.reboot: RebootDevices() RunCmd(['build/android/provision_devices.py', '-t', options.target]) def DeviceStatusCheck(_): - buildbot_report.PrintNamedStep('device_status_check') + bb_annotations.PrintNamedStep('device_status_check') RunCmd(['build/android/device_status_check.py'], halt_on_failure=True) @@ -292,7 +292,7 @@ def GetTestStepCmds(): def LogcatDump(options): # Print logcat, kill logcat monitor - buildbot_report.PrintNamedStep('logcat_dump') + bb_annotations.PrintNamedStep('logcat_dump') logcat_file = os.path.join(CHROME_SRC, 'out', options.target, 'full_log') with open(logcat_file, 'w') as f: RunCmd([ @@ -302,7 +302,7 @@ def LogcatDump(options): def GenerateTestReport(options): - buildbot_report.PrintNamedStep('test_report') + bb_annotations.PrintNamedStep('test_report') for report in glob.glob( os.path.join(CHROME_SRC, 'out', options.target, 'test_logs', '*.log')): RunCmd(['cat', report]) diff --git a/build/android/buildbot/bb_host_steps.py b/build/android/buildbot/bb_host_steps.py index 43c2d1b..6dbaac1 100755 --- a/build/android/buildbot/bb_host_steps.py +++ b/build/android/buildbot/bb_host_steps.py @@ -7,9 +7,9 @@ import os import sys import bb_utils +import bb_annotations sys.path.append(os.path.join(os.path.dirname(__file__), '..')) -from pylib import buildbot_report from pylib import constants @@ -26,7 +26,7 @@ def SrcPath(*path): def CheckWebViewLicenses(_): - buildbot_report.PrintNamedStep('check_licenses') + bb_annotations.PrintNamedStep('check_licenses') RunCmd([SrcPath('android_webview', 'tools', 'webview_licenses.py'), 'scan'], warning_code=1) @@ -37,14 +37,14 @@ def RunHooks(build_type): landmine_path = os.path.join(build_path, '.landmines_triggered') clobber_env = os.environ.get('BUILDBOT_CLOBBER') if clobber_env or os.path.isfile(landmine_path): - buildbot_report.PrintNamedStep('Clobber') + bb_annotations.PrintNamedStep('Clobber') if not clobber_env: print 'Clobbering due to triggered landmines:' with open(landmine_path) as f: print f.read() RunCmd(['rm', '-rf', build_path]) - buildbot_report.PrintNamedStep('runhooks') + bb_annotations.PrintNamedStep('runhooks') RunCmd(['gclient', 'runhooks'], halt_on_failure=True) @@ -56,17 +56,17 @@ def Compile(options): '--target=%s' % options.target, '--goma-dir=%s' % bb_utils.GOMA_DIR] build_targets = options.build_targets.split(',') - buildbot_report.PrintNamedStep('compile') + bb_annotations.PrintNamedStep('compile') for build_target in build_targets: RunCmd(cmd + ['--build-args=%s' % build_target], halt_on_failure=True) if options.experimental: for compile_target in EXPERIMENTAL_TARGETS: - buildbot_report.PrintNamedStep('Experimental Compile %s' % compile_target) + bb_annotations.PrintNamedStep('Experimental Compile %s' % compile_target) RunCmd(cmd + ['--build-args=%s' % compile_target], flunk_on_failure=False) def ZipBuild(options): - buildbot_report.PrintNamedStep('zip_build') + bb_annotations.PrintNamedStep('zip_build') RunCmd([ os.path.join(SLAVE_SCRIPTS_DIR, 'zip_build.py'), '--src-dir', constants.DIR_SOURCE_ROOT, @@ -76,7 +76,7 @@ def ZipBuild(options): def ExtractBuild(options): - buildbot_report.PrintNamedStep('extract_build') + bb_annotations.PrintNamedStep('extract_build') RunCmd( [os.path.join(SLAVE_SCRIPTS_DIR, 'extract_build.py'), '--build-dir', SrcPath('build'), '--build-output-dir', @@ -85,7 +85,7 @@ def ExtractBuild(options): def FindBugs(options): - buildbot_report.PrintNamedStep('findbugs') + bb_annotations.PrintNamedStep('findbugs') build_type = [] if options.target == 'Release': build_type = ['--release-build'] @@ -96,7 +96,7 @@ def FindBugs(options): def BisectPerfRegression(_): - buildbot_report.PrintNamedStep('Bisect Perf Regression') + bb_annotations.PrintNamedStep('Bisect Perf Regression') RunCmd([SrcPath('tools', 'prepare-bisect-perf-regression.py'), '-w', os.path.join(constants.DIR_SOURCE_ROOT, os.pardir)]) RunCmd([SrcPath('tools', 'run-bisect-perf-regression.py'), diff --git a/build/android/buildbot/bb_utils.py b/build/android/buildbot/bb_utils.py index 7813a16..2e28aaa 100644 --- a/build/android/buildbot/bb_utils.py +++ b/build/android/buildbot/bb_utils.py @@ -9,8 +9,10 @@ import pipes import subprocess import sys +import bb_annotations + sys.path.append(os.path.join(os.path.dirname(__file__), '..')) -from pylib import buildbot_report +from pylib import constants TESTING = 'BUILDBOT_TESTING' in os.environ @@ -43,16 +45,16 @@ def SpawnCmd(command, stdout=None): def RunCmd(command, flunk_on_failure=True, halt_on_failure=False, - warning_code=88, stdout=None): + warning_code=constants.WARNING_EXIT_CODE, stdout=None): """Run a command relative to the chrome source root.""" code = SpawnCmd(command, stdout).wait() print '<', CommandToString(command) if code != 0: print 'ERROR: process exited with code %d' % code if code != warning_code and flunk_on_failure: - buildbot_report.PrintError() + bb_annotations.PrintError() else: - buildbot_report.PrintWarning() + bb_annotations.PrintWarning() # Allow steps to have both halting (i.e. 1) and non-halting exit codes. if code != warning_code and halt_on_failure: print 'FATAL %d != %d' % (code, warning_code) diff --git a/build/android/pylib/base/base_test_result.py b/build/android/pylib/base/base_test_result.py index ba438e1..ebf9e71 100644 --- a/build/android/pylib/base/base_test_result.py +++ b/build/android/pylib/base/base_test_result.py @@ -4,7 +4,6 @@ """Module containing base test results classes.""" - class ResultType(object): """Class enumerating test types.""" PASS = 'PASS' @@ -22,6 +21,7 @@ class ResultType(object): class BaseTestResult(object): """Base class for a single test result.""" + def __init__(self, name, test_type, log=''): """Construct a BaseTestResult. @@ -64,6 +64,7 @@ class BaseTestResult(object): class TestRunResults(object): """Set of results for a test run.""" + def __init__(self): self._results = set() diff --git a/build/android/pylib/base/shard.py b/build/android/pylib/base/shard.py index 89b84f6..8c429f7 100644 --- a/build/android/pylib/base/shard.py +++ b/build/android/pylib/base/shard.py @@ -8,6 +8,7 @@ import logging import threading from pylib import android_commands +from pylib import constants from pylib import forwarder from pylib.utils import reraiser_thread from pylib.utils import watchdog_timer @@ -92,7 +93,7 @@ class _TestCollection(object): """Add an test to the collection. Args: - item: A test to add. + test: A test to add. """ with self._lock: self._tests.append(test) @@ -117,7 +118,7 @@ class _TestCollection(object): def _RunTestsFromQueue(runner, test_collection, out_results, watcher, - num_retries): + num_retries): """Runs tests from the test_collection until empty using the given runner. Adds TestRunResults objects to the out_results list and may add tests to the @@ -150,12 +151,6 @@ def _RunTestsFromQueue(runner, test_collection, out_results, watcher, else: # All tests passed or retry limit reached. Either way, record results. out_results.append(result) - except android_commands.errors.DeviceUnresponsiveError: - # Device is unresponsive, stop handling tests on this device and ensure - # current test gets runs by another device. Don't reraise this exception - # on the main thread. - test_collection.add(test) - return except: # An unhandleable exception, ensure tests get run by another device and # reraise this exception on the main thread. @@ -199,12 +194,13 @@ def _RunAllTests(runners, tests, num_retries, timeout=None): timeout: watchdog timeout in seconds, defaults to the default timeout. Returns: - A TestRunResults object. + A tuple of (TestRunResults object, exit code) """ logging.warning('Running %s tests with %s test runners.' % (len(tests), len(runners))) tests_collection = _TestCollection([_Test(t) for t in tests]) results = [] + exit_code = 0 watcher = watchdog_timer.WatchdogTimer(timeout) workers = reraiser_thread.ReraiserThreadGroup( [reraiser_thread.ReraiserThread( @@ -212,12 +208,21 @@ def _RunAllTests(runners, tests, num_retries, timeout=None): [r, tests_collection, results, watcher, num_retries], name=r.device[-4:]) for r in runners]) - workers.StartAll() - workers.JoinAll(watcher) run_results = base_test_result.TestRunResults() + workers.StartAll() + + # Catch DeviceUnresponsiveErrors and set a warning exit code + try: + workers.JoinAll(watcher) + except android_commands.errors.DeviceUnresponsiveError as e: + logging.error(e) + exit_code = constants.WARNING_EXIT_CODE + for r in results: run_results.AddTestRunResults(r) - return run_results + if not run_results.DidRunPass(): + exit_code = constants.ERROR_EXIT_CODE + return (run_results, exit_code) def _CreateRunners(runner_factory, devices, timeout=None): @@ -250,6 +255,7 @@ def _CreateRunners(runner_factory, devices, timeout=None): def _TearDownRunners(runners, timeout=None): """Calls TearDown() for each test runner in parallel. + Args: runners: a list of TestRunner objects. timeout: watchdog timeout in seconds, defaults to the default timeout. @@ -280,11 +286,11 @@ def ShardAndRunTests(runner_factory, devices, tests, build_type='Debug', num_retries: number of retries for a test. Returns: - A base_test_result.TestRunResults object. + A tuple of (base_test_result.TestRunResults object, exit code). """ if not tests: - logging.warning('No tests to run.') - return base_test_result.TestRunResults() + logging.error('No tests to run.') + return (base_test_result.TestRunResults(), constants.ERROR_EXIT_CODE) logging.info('Will run %d tests: %s', len(tests), str(tests)) forwarder.Forwarder.KillHost(build_type) diff --git a/build/android/pylib/base/shard_unittest.py b/build/android/pylib/base/shard_unittest.py index 25695be..5f8b990 100644 --- a/build/android/pylib/base/shard_unittest.py +++ b/build/android/pylib/base/shard_unittest.py @@ -14,6 +14,7 @@ sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), # Mock out android_commands.GetAttachedDevices(). from pylib import android_commands android_commands.GetAttachedDevices = lambda: ['0', '1'] +from pylib import constants from pylib.utils import watchdog_timer import base_test_result @@ -133,8 +134,9 @@ class TestThreadGroupFunctions(unittest.TestCase): def testRun(self): runners = [MockRunner('0'), MockRunner('1')] - results = shard._RunAllTests(runners, self.tests, 0) + results, exit_code = shard._RunAllTests(runners, self.tests, 0) self.assertEqual(len(results.GetPass()), len(self.tests)) + self.assertEqual(exit_code, 0) def testTearDown(self): runners = [MockRunner('0'), MockRunner('1')] @@ -144,8 +146,9 @@ class TestThreadGroupFunctions(unittest.TestCase): def testRetry(self): runners = shard._CreateRunners(MockRunnerFail, ['0', '1']) - results = shard._RunAllTests(runners, self.tests, 0) + results, exit_code = shard._RunAllTests(runners, self.tests, 0) self.assertEqual(len(results.GetFail()), len(self.tests)) + self.assertEqual(exit_code, constants.ERROR_EXIT_CODE) def testReraise(self): runners = shard._CreateRunners(MockRunnerException, ['0', '1']) @@ -160,17 +163,20 @@ class TestShard(unittest.TestCase): return shard.ShardAndRunTests(runner_factory, ['0', '1'], ['a', 'b', 'c']) def testShard(self): - results = TestShard._RunShard(MockRunner) + results, exit_code = TestShard._RunShard(MockRunner) self.assertEqual(len(results.GetPass()), 3) + self.assertEqual(exit_code, 0) def testFailing(self): - results = TestShard._RunShard(MockRunnerFail) + results, exit_code = TestShard._RunShard(MockRunnerFail) self.assertEqual(len(results.GetPass()), 0) self.assertEqual(len(results.GetFail()), 3) + self.assertEqual(exit_code, 0) def testNoTests(self): - results = shard.ShardAndRunTests(MockRunner, ['0', '1'], []) + results, exit_code = shard.ShardAndRunTests(MockRunner, ['0', '1'], []) self.assertEqual(len(results.GetAll()), 0) + self.assertEqual(exit_code, constants.ERROR_EXIT_CODE) if __name__ == '__main__': diff --git a/build/android/pylib/browsertests/dispatch.py b/build/android/pylib/browsertests/dispatch.py index 8457cf2..3867dc3 100644 --- a/build/android/pylib/browsertests/dispatch.py +++ b/build/android/pylib/browsertests/dispatch.py @@ -23,7 +23,15 @@ from common import unittest_util def Dispatch(options): - """Dispatches all content_browsertests.""" + """Dispatches all content_browsertests. + + Args: + options: optparse.Options object containing command-line options + Returns: + A tuple of (base_test_result.TestRunResults object, exit code). + Raises: + Exception: Failed to reset the test server port. + """ attached_devices = [] if options.test_device: @@ -75,20 +83,18 @@ def Dispatch(options): # TODO(nileshagrawal): remove this abnormally long setup timeout once fewer # files are pushed to the devices for content_browsertests: crbug.com/138275 setup_timeout = 20 * 60 # 20 minutes - test_results = shard.ShardAndRunTests(RunnerFactory, attached_devices, - all_tests, options.build_type, - setup_timeout=setup_timeout, - test_timeout=None, - num_retries=options.num_retries) + test_results, exit_code = shard.ShardAndRunTests( + RunnerFactory, attached_devices, all_tests, options.build_type, + setup_timeout=setup_timeout, test_timeout=None, + num_retries=options.num_retries) report_results.LogFull( results=test_results, test_type='Unit test', test_package=constants.BROWSERTEST_SUITE_NAME, build_type=options.build_type, flakiness_server=options.flakiness_dashboard_server) - report_results.PrintAnnotation(test_results) - return len(test_results.GetNotPass()) + return (test_results, exit_code) def _FilterTests(all_enabled_tests): diff --git a/build/android/pylib/buildbot_report.py b/build/android/pylib/buildbot_report.py index 8e7db8d..fe3fcd6 100644 --- a/build/android/pylib/buildbot_report.py +++ b/build/android/pylib/buildbot_report.py @@ -44,11 +44,3 @@ def PrintWarning(): def PrintNamedStep(step): print '@@@BUILD_STEP %s@@@' % step - - -def PrintStepResultIfNeeded(options, result): - if result: - if options.buildbot_step_failure: - PrintError() - else: - PrintWarning() diff --git a/build/android/pylib/constants.py b/build/android/pylib/constants.py index 2888232..c3ec825 100644 --- a/build/android/pylib/constants.py +++ b/build/android/pylib/constants.py @@ -98,3 +98,7 @@ def _GetADBPath(): ADB_PATH = _GetADBPath() + +# Exit codes +ERROR_EXIT_CODE = 1 +WARNING_EXIT_CODE = 88 diff --git a/build/android/pylib/gtest/dispatch.py b/build/android/pylib/gtest/dispatch.py index 3c11c00..cf30ffa 100644 --- a/build/android/pylib/gtest/dispatch.py +++ b/build/android/pylib/gtest/dispatch.py @@ -13,6 +13,7 @@ from pylib import android_commands from pylib import cmd_helper from pylib import constants from pylib import ports +from pylib.base import base_test_result from pylib.base import shard from pylib.utils import emulator from pylib.utils import report_results @@ -35,6 +36,9 @@ def _FullyQualifiedTestSuites(exe, option_test_suite, build_type): Ex. ('content_unittests', '/tmp/chrome/src/out/Debug/content_unittests_apk/' 'content_unittests-debug.apk') + + Raises: + Exception: If test suite not found. """ def GetQualifiedSuite(suite): if suite.is_suite_exe: @@ -93,7 +97,8 @@ def GetAllEnabledTests(runner_factory, devices): Returns: List of all enabled tests. - Raises Exception if all devices failed. + Raises: + Exception: If no devices available. """ for device in devices: try: @@ -118,9 +123,12 @@ def _RunATestSuite(options, suite_name): suite_name: name of the test suite being run. Returns: - 0 if successful, number of failing tests otherwise. + A tuple of (base_test_result.TestRunResult object, exit code). + + Raises: + Exception: For various reasons including device failure or failing to reset + the test server port. """ - step_name = os.path.basename(options.test_suite).replace('-debug.apk', '') attached_devices = [] buildbot_emulators = [] @@ -168,9 +176,9 @@ def _RunATestSuite(options, suite_name): tests = [t for t in tests if t] # Run tests. - test_results = shard.ShardAndRunTests(RunnerFactory, attached_devices, tests, - options.build_type, test_timeout=None, - num_retries=options.num_retries) + test_results, exit_code = shard.ShardAndRunTests( + RunnerFactory, attached_devices, tests, options.build_type, + test_timeout=None, num_retries=options.num_retries) report_results.LogFull( results=test_results, @@ -178,12 +186,11 @@ def _RunATestSuite(options, suite_name): test_package=suite_name, build_type=options.build_type, flakiness_server=options.flakiness_dashboard_server) - report_results.PrintAnnotation(test_results) for buildbot_emulator in buildbot_emulators: buildbot_emulator.Shutdown() - return len(test_results.GetNotPass()) + return (test_results, exit_code) def _ListTestSuites(): @@ -203,7 +210,7 @@ def Dispatch(options): options: options for running the tests. Returns: - 0 if successful, number of failing tests otherwise. + base_test_result.TestRunResults object with the results of running the tests """ if options.test_suite == 'help': _ListTestSuites() @@ -215,13 +222,18 @@ def Dispatch(options): all_test_suites = _FullyQualifiedTestSuites(options.exe, options.test_suite, options.build_type) - failures = 0 + results = base_test_result.TestRunResults() + exit_code = 0 for suite_name, suite_path in all_test_suites: # Give each test suite its own copy of options. test_options = copy.deepcopy(options) test_options.test_suite = suite_path - failures += _RunATestSuite(test_options, suite_name) + test_results, test_exit_code = _RunATestSuite(test_options, suite_name) + results.AddTestRunResults(test_results) + if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: + exit_code = test_exit_code if options.use_xvfb: framebuffer.Stop() - return failures + + return (results, exit_code) diff --git a/build/android/pylib/gtest/test_runner.py b/build/android/pylib/gtest/test_runner.py index 8ab2f0d..fa9a08c 100644 --- a/build/android/pylib/gtest/test_runner.py +++ b/build/android/pylib/gtest/test_runner.py @@ -368,11 +368,6 @@ class TestRunner(base_test_runner.BaseTestRunner): self.test_package.ClearApplicationState() self.test_package.CreateTestRunnerScript(test, self._test_arguments) test_results = self.test_package.RunTestsAndListResults() - except errors.DeviceUnresponsiveError as e: - # Make sure this device is not attached - logging.warning(e) - if android_commands.IsDeviceAttached(self.device): - raise finally: self.CleanupSpawningServerState() # Calculate unknown test results. diff --git a/build/android/pylib/host_driven/run_python_tests.py b/build/android/pylib/host_driven/run_python_tests.py index e36df0c..30a6312 100644 --- a/build/android/pylib/host_driven/run_python_tests.py +++ b/build/android/pylib/host_driven/run_python_tests.py @@ -105,7 +105,10 @@ def DispatchPythonTests(options): sharder = PythonTestSharder(attached_devices, available_tests, options) test_results = sharder.RunShardedTests() - return test_results + if not test_results.DidRunPass(): + return (test_results, 1) + + return (test_results, 0) def _GetTestModules(python_test_root, is_official_build): diff --git a/build/android/pylib/instrumentation/dispatch.py b/build/android/pylib/instrumentation/dispatch.py index d718ea5..df245c73 100644 --- a/build/android/pylib/instrumentation/dispatch.py +++ b/build/android/pylib/instrumentation/dispatch.py @@ -26,7 +26,7 @@ def Dispatch(options): options: Command line options. Returns: - Test results in a base_test_result.TestRunResults object. + A tuple of (base_test_result.TestRunResults object, exit code). Raises: Exception: when there are no attached devices. diff --git a/build/android/pylib/uiautomator/dispatch.py b/build/android/pylib/uiautomator/dispatch.py index 82abd84..3837af8 100644 --- a/build/android/pylib/uiautomator/dispatch.py +++ b/build/android/pylib/uiautomator/dispatch.py @@ -26,7 +26,7 @@ def Dispatch(options): options: Command line options. Returns: - Test results in a base_test_result.TestRunResults object. + A tuple of (base_test_result.TestRunResults object, exit code) Raises: Exception: when there are no attached devices. diff --git a/build/android/pylib/utils/report_results.py b/build/android/pylib/utils/report_results.py index 83b80de..84bbb4d 100644 --- a/build/android/pylib/utils/report_results.py +++ b/build/android/pylib/utils/report_results.py @@ -110,11 +110,3 @@ def LogFull(results, test_type, test_package, annotation=None, if flakiness_server: _LogToFlakinessDashboard(results, test_type, test_package, flakiness_server) - - -def PrintAnnotation(results): - """Print buildbot annotations for test results.""" - if not results.DidRunPass(): - buildbot_report.PrintError() - else: - print 'Step success!' # No annotation needed diff --git a/build/android/run_monkey_test.py b/build/android/run_monkey_test.py index 3ec952d..3160641 100755 --- a/build/android/run_monkey_test.py +++ b/build/android/run_monkey_test.py @@ -113,7 +113,11 @@ def DispatchPythonTests(options): test_type='Monkey', test_package='Monkey', build_type=options.build_type) - report_results.PrintAnnotation(results) + # TODO(gkanwar): After the host-driven tests have been refactored, they sould + # use the comment exit code system (part of pylib/base/shard.py) + if not results.DidRunPass(): + return 1 + return 0 def main(): diff --git a/build/android/test_runner.py b/build/android/test_runner.py index 66e70d8..08a0be3 100755 --- a/build/android/test_runner.py +++ b/build/android/test_runner.py @@ -108,19 +108,6 @@ def AddCommonOptions(option_parser): help=('Do not push dependencies to the device. ' 'Use this at own risk for speeding up test ' 'execution on local machine.')) - # TODO(gkanwar): This option is deprecated. Remove it in the future. - option_parser.add_option('--exit-code', action='store_true', - help=('(DEPRECATED) If set, the exit code will be ' - 'total number of failures.')) - # TODO(gkanwar): This option is deprecated. It is currently used to run tests - # with the FlakyTest annotation to prevent the bots going red downstream. We - # should instead use exit codes and let the Buildbot scripts deal with test - # failures appropriately. See crbug.com/170477. - option_parser.add_option('--buildbot-step-failure', - action='store_true', - help=('(DEPRECATED) If present, will set the ' - 'buildbot status as STEP_FAILURE, otherwise ' - 'as STEP_WARNINGS when test(s) fail.')) option_parser.add_option('-d', '--device', dest='test_device', help=('Target device for the test suite ' 'to run on.')) @@ -376,25 +363,35 @@ def RunTestsCommand(command, options, args, option_parser): Returns: Integer indicated exit code. + + Raises: + Exception: Unknown command name passed in, or an exception from an + individual test runner. """ ProcessCommonOptions(options) - total_failed = 0 if command == 'gtest': # TODO(gkanwar): See the emulator TODO above -- this call should either go # away or become generalized. ProcessEmulatorOptions(options) - total_failed = gtest_dispatch.Dispatch(options) + results, exit_code = gtest_dispatch.Dispatch(options) elif command == 'content_browsertests': - total_failed = browsertests_dispatch.Dispatch(options) + results, exit_code = browsertests_dispatch.Dispatch(options) elif command == 'instrumentation': ProcessInstrumentationOptions(options, option_parser.error) results = base_test_result.TestRunResults() + exit_code = 0 if options.run_java_tests: - results.AddTestRunResults(instrumentation_dispatch.Dispatch(options)) + test_results, exit_code = instrumentation_dispatch.Dispatch(options) + results.AddTestRunResults(test_results) if options.run_python_tests: - results.AddTestRunResults(python_dispatch.DispatchPythonTests(options)) + test_results, test_exit_code = (python_dispatch. + DispatchPythonTests(options)) + results.AddTestRunResults(test_results) + # Only allow exit code escalation + if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: + exit_code = test_exit_code report_results.LogFull( results=results, test_type='Instrumentation', @@ -402,14 +399,20 @@ def RunTestsCommand(command, options, args, option_parser): annotation=options.annotations, build_type=options.build_type, flakiness_server=options.flakiness_dashboard_server) - total_failed += len(results.GetNotPass()) elif command == 'uiautomator': ProcessUIAutomatorOptions(options, option_parser.error) results = base_test_result.TestRunResults() + exit_code = 0 if options.run_java_tests: - results.AddTestRunResults(uiautomator_dispatch.Dispatch(options)) + test_results, exit_code = uiautomator_dispatch.Dispatch(options) + results.AddTestRunResults(test_results) if options.run_python_tests: - results.AddTestRunResults(python_dispatch.Dispatch(options)) + test_results, test_exit_code = (python_dispatch. + DispatchPythonTests(options)) + results.AddTestRunResults(test_results) + # Only allow exit code escalation + if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: + exit_code = test_exit_code report_results.LogFull( results=results, test_type='UIAutomator', @@ -417,11 +420,10 @@ def RunTestsCommand(command, options, args, option_parser): annotation=options.annotations, build_type=options.build_type, flakiness_server=options.flakiness_dashboard_server) - total_failed += len(results.GetNotPass()) else: raise Exception('Unknown test type state') - return total_failed + return exit_code def HelpCommand(command, options, args, option_parser): @@ -504,6 +506,7 @@ class CommandOptionParser(optparse.OptionParser): return '\nExample:\n %s\n' % self.example return '' + def main(argv): option_parser = CommandOptionParser( usage='Usage: %prog <command> [options]', @@ -515,18 +518,9 @@ def main(argv): command = argv[1] VALID_COMMANDS[command].add_options_func(option_parser) options, args = option_parser.parse_args(argv) - exit_code = VALID_COMMANDS[command].run_command_func( + return VALID_COMMANDS[command].run_command_func( command, options, args, option_parser) - # Failures of individual test suites are communicated by printing a - # STEP_FAILURE message. - # Returning a success exit status also prevents the buildbot from incorrectly - # marking the last suite as failed if there were failures in other suites in - # the batch (this happens because the exit status is a sum of all failures - # from all suites, but the buildbot associates the exit status only with the - # most recent step). - return exit_code - if __name__ == '__main__': sys.exit(main(sys.argv)) |