diff options
author | frankf@chromium.org <frankf@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2013-01-23 19:12:16 +0000 |
---|---|---|
committer | frankf@chromium.org <frankf@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2013-01-23 19:12:16 +0000 |
commit | 5af39e2cff3afbe376c8e1a3efb657618cc560e5 (patch) | |
tree | 5b5410a27dbb4a8a73d3a112ddd5077804c107ab /build/android | |
parent | b7e1aab1f34b1082db8613892889d9753d8af356 (diff) | |
download | chromium_src-5af39e2cff3afbe376c8e1a3efb657618cc560e5.zip chromium_src-5af39e2cff3afbe376c8e1a3efb657618cc560e5.tar.gz chromium_src-5af39e2cff3afbe376c8e1a3efb657618cc560e5.tar.bz2 |
[Android] Keep track of unknown test results at the TestRunner layer.
If the entire gtest run crashes/failes/times out, the tests that
were not run are marked as 'unknown'. These tests are reported
by GetAllBroken() and are retried by BaseTestSharder.
Get rid of overall_* flags which are now redundant.
BUG=171466
Review URL: https://codereview.chromium.org/11896050
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@178345 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'build/android')
-rw-r--r-- | build/android/pylib/base/base_test_sharder.py | 17 | ||||
-rw-r--r-- | build/android/pylib/base/test_result.py | 46 | ||||
-rw-r--r-- | build/android/pylib/gtest/single_test_runner.py | 18 | ||||
-rw-r--r-- | build/android/pylib/gtest/test_package.py | 10 | ||||
-rw-r--r-- | build/android/pylib/utils/flakiness_dashboard_results_uploader.py | 5 | ||||
-rwxr-xr-x | build/android/run_tests.py | 3 |
6 files changed, 37 insertions, 62 deletions
diff --git a/build/android/pylib/base/base_test_sharder.py b/build/android/pylib/base/base_test_sharder.py index e33ff87..bce273d 100644 --- a/build/android/pylib/base/base_test_sharder.py +++ b/build/android/pylib/base/base_test_sharder.py @@ -47,6 +47,13 @@ class BaseTestSharder(object): self.attached_devices = attached_devices # Worst case scenario: a device will drop offline per run, so we need # to retry until we're out of devices. + + # TODO(frankf): There are two sources of flakiness: + # 1. Device flakiness + # 2. Test/product flakiness + # We should differentiate between these. Otherwise, blindly retrying tests + # might mask test/product flakiness. For type 2, we should follow the + # general chrome best practices. self.retries = len(self.attached_devices) self.tests = [] self.build_type = build_type @@ -90,6 +97,8 @@ class BaseTestSharder(object): self._KillHostForwarder() for retry in xrange(self.retries): logging.warning('Try %d of %d', retry + 1, self.retries) + logging.warning('Attempting to run %d tests: %s' + % (len(self.tests), self.tests)) self.SetupSharding(self.tests) test_runners = [] @@ -128,9 +137,7 @@ class BaseTestSharder(object): # Retry on devices that didn't have any exception. self.attached_devices = list(retry_devices) - # TODO(frankf): Fix the retry logic: - # - GetAllBroken() should report all tests not ran. - # - Do not break TestResults encapsulation. + # TODO(frankf): Do not break TestResults encapsulation. if (retry == self.retries - 1 or len(self.attached_devices) == 0): all_passed = final_results.ok + test_results.ok @@ -139,10 +146,6 @@ class BaseTestSharder(object): break else: final_results.ok += test_results.ok - final_results.overall_timed_out = (final_results.overall_timed_out or - test_results.overall_timed_out) - final_results.overall_fail = (final_results.overall_fail or - test_results.overall_fail) self.tests = [] for t in test_results.GetAllBroken(): diff --git a/build/android/pylib/base/test_result.py b/build/android/pylib/base/test_result.py index ee2421e..9d85f74 100644 --- a/build/android/pylib/base/test_result.py +++ b/build/android/pylib/base/test_result.py @@ -55,22 +55,15 @@ class TestResults(object): self.crashed = [] self.unknown = [] self.timed_out = [] - self.overall_timed_out = False - self.overall_fail = False self.device_exception = None @staticmethod - def FromRun(ok=None, failed=None, crashed=None, timed_out=None, - overall_timed_out=False, overall_fail=False, - device_exception=None): + def FromRun(ok=None, failed=None, crashed=None, timed_out=None): ret = TestResults() ret.ok = ok or [] ret.failed = failed or [] ret.crashed = crashed or [] ret.timed_out = timed_out or [] - ret.overall_timed_out = overall_timed_out - ret.overall_fail = overall_fail - ret.device_exception = device_exception return ret @staticmethod @@ -83,10 +76,6 @@ class TestResults(object): ret.crashed += t.crashed ret.unknown += t.unknown ret.timed_out += t.timed_out - if t.overall_timed_out: - ret.overall_timed_out = True - if t.overall_fail: - ret.overall_fail = True return ret @staticmethod @@ -128,9 +117,13 @@ class TestResults(object): logging.critical(t.log) def GetAllBroken(self): - """Returns the all broken tests.""" + """Returns all the broken tests.""" return self.failed + self.crashed + self.unknown + self.timed_out + def GetAll(self): + """Returns all the tests.""" + return self.ok + self.GetAllBroken() + def _LogToFile(self, test_type, test_suite, build_type): """Log results to local files which can be used for aggregation later.""" # TODO(frankf): Report tests that failed to run here too. @@ -201,7 +194,7 @@ class TestResults(object): logging.error(e) def LogFull(self, test_type, test_package, annotation=None, - build_type='Debug', all_tests=None, flakiness_server=None): + build_type='Debug', flakiness_server=None): """Log the tests results for the test suite. The results will be logged three different ways: @@ -217,9 +210,6 @@ class TestResults(object): annotation: If instrumenation test type, this is a list of annotations (e.g. ['Smoke', 'SmallTest']). build_type: Release/Debug - all_tests: A list of all tests that were supposed to run. - This is used to determine which tests have failed to run. - If None, we assume all tests ran. flakiness_server: If provider, upload the results to flakiness dashboard with this URL. """ @@ -242,31 +232,20 @@ class TestResults(object): logging.critical('Passed') # Summarize in the test output. - logging.critical('*' * 80) - summary = ['Summary:\n'] - if all_tests: - summary += ['TESTS_TO_RUN=%d\n' % len(all_tests)] - num_tests_ran = (len(self.ok) + len(self.failed) + - len(self.crashed) + len(self.unknown) + - len(self.timed_out)) + num_tests_ran = len(self.GetAll()) tests_passed = [t.name for t in self.ok] tests_failed = [t.name for t in self.failed] tests_crashed = [t.name for t in self.crashed] - tests_unknown = [t.name for t in self.unknown] tests_timed_out = [t.name for t in self.timed_out] + tests_unknown = [t.name for t in self.unknown] + logging.critical('*' * 80) + summary = ['Summary:\n'] summary += ['RAN=%d\n' % (num_tests_ran), 'PASSED=%d\n' % len(tests_passed), 'FAILED=%d %s\n' % (len(tests_failed), tests_failed), 'CRASHED=%d %s\n' % (len(tests_crashed), tests_crashed), 'TIMEDOUT=%d %s\n' % (len(tests_timed_out), tests_timed_out), 'UNKNOWN=%d %s\n' % (len(tests_unknown), tests_unknown)] - if all_tests and num_tests_ran != len(all_tests): - # Add the list of tests we failed to run. - tests_failed_to_run = list(set(all_tests) - set(tests_passed) - - set(tests_failed) - set(tests_crashed) - - set(tests_unknown) - set(tests_timed_out)) - summary += ['FAILED_TO_RUN=%d %s\n' % (len(tests_failed_to_run), - tests_failed_to_run)] summary_string = ''.join(summary) logging.critical(summary_string) logging.critical('*' * 80) @@ -285,8 +264,7 @@ class TestResults(object): def PrintAnnotation(self): """Print buildbot annotations for test results.""" - if (self.failed or self.crashed or self.overall_fail or - self.overall_timed_out): + if self.GetAllBroken(): buildbot_report.PrintError() else: print 'Step success!' # No annotation needed diff --git a/build/android/pylib/gtest/single_test_runner.py b/build/android/pylib/gtest/single_test_runner.py index 43959ee..61521e0 100644 --- a/build/android/pylib/gtest/single_test_runner.py +++ b/build/android/pylib/gtest/single_test_runner.py @@ -253,17 +253,17 @@ class SingleTestRunner(BaseTestRunner): self.test_results = self.test_package.RunTestsAndListResults() except errors.DeviceUnresponsiveError as e: # Make sure this device is not attached + logging.warning(e) if android_commands.IsDeviceAttached(self.device): raise e - - # TODO(frankf): We should report these as "skipped" not "failures". - # Wrap the results - logging.warning(e) - failed_tests = [] - for t in self._gtest_filter.split(':'): - failed_tests += [BaseTestResult(t, '')] - self.test_results = TestResults.FromRun( - failed=failed_tests, device_exception=self.device) + self.test_results.device_exception = device_exception + # Calculate unknown test results. + finally: + # TODO(frankf): Do not break TestResults encapsulation. + all_tests = set(self._gtest_filter.split(':')) + all_tests_ran = set([t.name for t in self.test_results.GetAll()]) + unknown_tests = all_tests - all_tests_ran + self.test_results.unknown = [BaseTestResult(t, '') for t in unknown_tests] return self.test_results diff --git a/build/android/pylib/gtest/test_package.py b/build/android/pylib/gtest/test_package.py index cf325c0..d5e14f7 100644 --- a/build/android/pylib/gtest/test_package.py +++ b/build/android/pylib/gtest/test_package.py @@ -122,8 +122,6 @@ class TestPackage(object): failed_tests = [] crashed_tests = [] timed_out_tests = [] - overall_fail = False - overall_timed_out = False # Test case statuses. re_run = re.compile('\[ RUN \] ?(.*)\r\n') @@ -146,7 +144,6 @@ class TestPackage(object): if found == 1: # re_passed break elif found == 2: # re_runner_fail - overall_fail = True break else: # re_run if self.dump_debug_info: @@ -159,7 +156,6 @@ class TestPackage(object): ok_tests += [BaseTestResult(full_test_name, p.before)] elif found == 2: # re_crash crashed_tests += [BaseTestResult(full_test_name, p.before)] - overall_fail = True break else: # re_fail failed_tests += [BaseTestResult(full_test_name, p.before)] @@ -169,7 +165,6 @@ class TestPackage(object): except pexpect.TIMEOUT: logging.error('Test terminated after %d second timeout.', self.timeout) - overall_timed_out = True if full_test_name: timed_out_tests += [BaseTestResult(full_test_name, p.before)] finally: @@ -180,10 +175,7 @@ class TestPackage(object): logging.critical( 'gtest exit code: %d\npexpect.before: %s\npexpect.after: %s', ret_code, p.before, p.after) - overall_fail = True # Create TestResults and return return TestResults.FromRun(ok=ok_tests, failed=failed_tests, - crashed=crashed_tests, timed_out=timed_out_tests, - overall_fail=overall_fail, - overall_timed_out=overall_timed_out) + crashed=crashed_tests, timed_out=timed_out_tests) diff --git a/build/android/pylib/utils/flakiness_dashboard_results_uploader.py b/build/android/pylib/utils/flakiness_dashboard_results_uploader.py index 8a47e94..bfd9196 100644 --- a/build/android/pylib/utils/flakiness_dashboard_results_uploader.py +++ b/build/android/pylib/utils/flakiness_dashboard_results_uploader.py @@ -151,13 +151,16 @@ class ResultsUploader(object): self._test_results_map = {} def AddResults(self, test_results): + # TODO(frankf): Differentiate between fail/crash/timeouts. conversion_map = [ (test_results.ok, False, json_results_generator.JSONResultsGeneratorBase.PASS_RESULT), (test_results.failed, True, json_results_generator.JSONResultsGeneratorBase.FAIL_RESULT), (test_results.crashed, True, - "C"), + json_results_generator.JSONResultsGeneratorBase.FAIL_RESULT), + (test_results.timed_out, True, + json_results_generator.JSONResultsGeneratorBase.FAIL_RESULT), (test_results.unknown, True, json_results_generator.JSONResultsGeneratorBase.NO_DATA_RESULT), ] diff --git a/build/android/run_tests.py b/build/android/run_tests.py index 14d54d1..5c80bf3 100755 --- a/build/android/run_tests.py +++ b/build/android/run_tests.py @@ -196,7 +196,6 @@ class TestSharder(BaseTestSharder): test_type='Unit test', test_package=test_runners[0].test_package.test_suite_basename, build_type=self.build_type, - all_tests=self.all_tests, flakiness_server=self.flakiness_server) test_results.PrintAnnotation() @@ -265,7 +264,7 @@ def _RunATestSuite(options): for buildbot_emulator in buildbot_emulators: buildbot_emulator.Shutdown() - return len(test_results.failed) + return len(test_results.GetAllBroken()) def Dispatch(options): |