summaryrefslogtreecommitdiffstats
path: root/base
diff options
context:
space:
mode:
authorphajdan.jr@chromium.org <phajdan.jr@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-11-12 00:09:27 +0000
committerphajdan.jr@chromium.org <phajdan.jr@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-11-12 00:09:27 +0000
commit2b8de11a43f1abc73cb9242f4fa5c5db91eb740e (patch)
treeec9ffa413e97d36f130021d14299b8087fb4e482 /base
parent4b616c0c01b45caf88f0683a68c4a10c045b8b98 (diff)
downloadchromium_src-2b8de11a43f1abc73cb9242f4fa5c5db91eb740e.zip
chromium_src-2b8de11a43f1abc73cb9242f4fa5c5db91eb740e.tar.gz
chromium_src-2b8de11a43f1abc73cb9242f4fa5c5db91eb740e.tar.bz2
GTTF: Test launcher - correctly handle non-zero exit code with all tests passing
BUG=317752 R=earthdok@chromium.org Review URL: https://codereview.chromium.org/66293006 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@234338 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'base')
-rw-r--r--base/test/launcher/test_result.cc2
-rw-r--r--base/test/launcher/test_result.h17
-rw-r--r--base/test/launcher/test_results_tracker.cc6
-rw-r--r--base/test/launcher/unit_test_launcher.cc79
4 files changed, 80 insertions, 24 deletions
diff --git a/base/test/launcher/test_result.cc b/base/test/launcher/test_result.cc
index 39b641e..70d7a80 100644
--- a/base/test/launcher/test_result.cc
+++ b/base/test/launcher/test_result.cc
@@ -22,6 +22,8 @@ std::string TestResult::StatusAsString() const {
return "SUCCESS";
case TEST_FAILURE:
return "FAILURE";
+ case TEST_FAILURE_ON_EXIT:
+ return "FAILURE_ON_EXIT";
case TEST_CRASH:
return "CRASH";
case TEST_TIMEOUT:
diff --git a/base/test/launcher/test_result.h b/base/test/launcher/test_result.h
index cbae3d2..b61cdd4 100644
--- a/base/test/launcher/test_result.h
+++ b/base/test/launcher/test_result.h
@@ -14,12 +14,13 @@ namespace base {
// Structure containing result of a single test.
struct TestResult {
enum Status {
- TEST_UNKNOWN, // Status not set.
- TEST_SUCCESS, // Test passed.
- TEST_FAILURE, // Assertion failure (think EXPECT_TRUE, not DCHECK).
- TEST_TIMEOUT, // Test timed out and was killed.
- TEST_CRASH, // Test crashed (includes CHECK/DCHECK failures).
- TEST_SKIPPED, // Test skipped (not run at all).
+ TEST_UNKNOWN, // Status not set.
+ TEST_SUCCESS, // Test passed.
+ TEST_FAILURE, // Assertion failure (think EXPECT_TRUE, not DCHECK).
+ TEST_FAILURE_ON_EXIT, // Test passed but executable exit code was non-zero.
+ TEST_TIMEOUT, // Test timed out and was killed.
+ TEST_CRASH, // Test crashed (includes CHECK/DCHECK failures).
+ TEST_SKIPPED, // Test skipped (not run at all).
};
TestResult();
@@ -38,7 +39,9 @@ struct TestResult {
// normally, possibly with an exit code indicating failure, but didn't crash
// or time out in the middle of the test).
bool completed() const {
- return status == TEST_SUCCESS || status == TEST_FAILURE;
+ return status == TEST_SUCCESS ||
+ status == TEST_FAILURE ||
+ status == TEST_FAILURE_ON_EXIT;
}
// Full name of the test (e.g. "A.B").
diff --git a/base/test/launcher/test_results_tracker.cc b/base/test/launcher/test_results_tracker.cc
index 0760dff..905b0f1 100644
--- a/base/test/launcher/test_results_tracker.cc
+++ b/base/test/launcher/test_results_tracker.cc
@@ -175,6 +175,9 @@ void TestResultsTracker::PrintSummaryOfCurrentIteration() const {
PrintTests(tests_by_status[TestResult::TEST_FAILURE].begin(),
tests_by_status[TestResult::TEST_FAILURE].end(),
"failed");
+ PrintTests(tests_by_status[TestResult::TEST_FAILURE_ON_EXIT].begin(),
+ tests_by_status[TestResult::TEST_FAILURE_ON_EXIT].end(),
+ "failed on exit");
PrintTests(tests_by_status[TestResult::TEST_TIMEOUT].begin(),
tests_by_status[TestResult::TEST_TIMEOUT].end(),
"timed out");
@@ -211,6 +214,9 @@ void TestResultsTracker::PrintSummaryOfAllIterations() const {
PrintTests(tests_by_status[TestResult::TEST_FAILURE].begin(),
tests_by_status[TestResult::TEST_FAILURE].end(),
"failed");
+ PrintTests(tests_by_status[TestResult::TEST_FAILURE_ON_EXIT].begin(),
+ tests_by_status[TestResult::TEST_FAILURE_ON_EXIT].end(),
+ "failed on exit");
PrintTests(tests_by_status[TestResult::TEST_TIMEOUT].begin(),
tests_by_status[TestResult::TEST_TIMEOUT].end(),
"timed out");
diff --git a/base/test/launcher/unit_test_launcher.cc b/base/test/launcher/unit_test_launcher.cc
index 7ab96fc..c5dbc4d 100644
--- a/base/test/launcher/unit_test_launcher.cc
+++ b/base/test/launcher/unit_test_launcher.cc
@@ -228,17 +228,23 @@ class UnitTestLauncherDelegate : public TestLauncherDelegate {
bool was_timeout,
const std::string& output) {
DCHECK(thread_checker_.CalledOnValidThread());
- std::vector<std::string> tests_to_relaunch_after_interruption;
+ std::vector<std::string> tests_to_relaunch;
ProcessTestResults(callback_state.test_launcher,
callback_state.test_names,
callback_state.output_file,
output,
exit_code,
was_timeout,
- &tests_to_relaunch_after_interruption);
-
- RunBatch(callback_state.test_launcher,
- tests_to_relaunch_after_interruption);
+ &tests_to_relaunch);
+
+ // Relaunch requested tests in parallel, but only use single
+ // test per batch for more precise results (crashes, test passes
+ // but non-zero exit codes etc).
+ for (size_t i = 0; i < tests_to_relaunch.size(); i++) {
+ std::vector<std::string> batch;
+ batch.push_back(tests_to_relaunch[i]);
+ RunBatch(callback_state.test_launcher, batch);
+ }
// The temporary file's directory is also temporary.
DeleteFile(callback_state.output_file.DirName(), true);
@@ -251,7 +257,7 @@ class UnitTestLauncherDelegate : public TestLauncherDelegate {
bool was_timeout,
const std::string& output) {
DCHECK(thread_checker_.CalledOnValidThread());
- std::vector<std::string> tests_to_relaunch_after_interruption;
+ std::vector<std::string> tests_to_relaunch;
bool called_any_callbacks =
ProcessTestResults(callback_state.test_launcher,
callback_state.test_names,
@@ -259,11 +265,11 @@ class UnitTestLauncherDelegate : public TestLauncherDelegate {
output,
exit_code,
was_timeout,
- &tests_to_relaunch_after_interruption);
+ &tests_to_relaunch);
// There is only one test, there cannot be other tests to relaunch
// due to a crash.
- DCHECK(tests_to_relaunch_after_interruption.empty());
+ DCHECK(tests_to_relaunch.empty());
// There is only one test, we should have called back with its result.
DCHECK(called_any_callbacks);
@@ -286,7 +292,7 @@ class UnitTestLauncherDelegate : public TestLauncherDelegate {
const std::string& output,
int exit_code,
bool was_timeout,
- std::vector<std::string>* tests_to_relaunch_after_interruption) {
+ std::vector<std::string>* tests_to_relaunch) {
std::vector<TestResult> test_results;
bool crashed = false;
bool have_test_results =
@@ -303,6 +309,9 @@ class UnitTestLauncherDelegate : public TestLauncherDelegate {
bool had_interrupted_test = false;
+ // Results to be reported back to the test launcher.
+ std::vector<TestResult> final_results;
+
for (size_t i = 0; i < test_names.size(); i++) {
if (ContainsKey(results_map, test_names[i])) {
TestResult test_result = results_map[test_names[i]];
@@ -329,10 +338,9 @@ class UnitTestLauncherDelegate : public TestLauncherDelegate {
}
test_result.output_snippet =
GetTestOutputSnippet(test_result, output);
- test_launcher->OnTestFinished(test_result);
- called_any_callback = true;
+ final_results.push_back(test_result);
} else if (had_interrupted_test) {
- tests_to_relaunch_after_interruption->push_back(test_names[i]);
+ tests_to_relaunch->push_back(test_names[i]);
} else {
// TODO(phajdan.jr): Explicitly pass the info that the test didn't
// run for a mysterious reason.
@@ -342,17 +350,54 @@ class UnitTestLauncherDelegate : public TestLauncherDelegate {
test_result.status = TestResult::TEST_UNKNOWN;
test_result.output_snippet =
GetTestOutputSnippet(test_result, output);
- test_launcher->OnTestFinished(test_result);
- called_any_callback = true;
+ final_results.push_back(test_result);
}
}
// TODO(phajdan.jr): Handle the case where processing XML output
// indicates a crash but none of the test results is marked as crashing.
- // TODO(phajdan.jr): Handle the case where the exit code is non-zero
- // but results file indicates that all tests passed (e.g. crash during
- // shutdown).
+ if (final_results.empty())
+ return false;
+
+ bool has_non_success_test = false;
+ for (size_t i = 0; i < final_results.size(); i++) {
+ if (final_results[i].status != TestResult::TEST_SUCCESS) {
+ has_non_success_test = true;
+ break;
+ }
+ }
+
+ if (!has_non_success_test && exit_code != 0) {
+ // This is a bit surprising case: all tests are marked as successful,
+ // but the exit code was not zero. This can happen e.g. under memory
+ // tools that report leaks this way.
+
+ if (final_results.size() == 1) {
+ // Easy case. One test only so we know the non-zero exit code
+ // was caused by that one test.
+ final_results[0].status = TestResult::TEST_FAILURE_ON_EXIT;
+ } else {
+ // Harder case. Discard the results and request relaunching all
+ // tests without batching. This will trigger above branch on
+ // relaunch leading to more precise results.
+ LOG(WARNING) << "Not sure which test caused non-zero exit code, "
+ << "relaunching all of them without batching.";
+
+ for (size_t i = 0; i < final_results.size(); i++)
+ tests_to_relaunch->push_back(final_results[i].full_name);
+
+ return false;
+ }
+ }
+
+ for (size_t i = 0; i < final_results.size(); i++) {
+ // Fix the output snippet after possible changes to the test result.
+ final_results[i].output_snippet =
+ GetTestOutputSnippet(final_results[i], output);
+ test_launcher->OnTestFinished(final_results[i]);
+ called_any_callback = true;
+ }
} else {
fprintf(stdout,
"Failed to get out-of-band test success data, "