summaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authorcsharp@chromium.org <csharp@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-08-16 14:58:43 +0000
committercsharp@chromium.org <csharp@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-08-16 14:58:43 +0000
commit546714888e40856480f088b130d228bbb4bf6cfc (patch)
tree319afc948234f1aa3471c2867810ff61924e5574 /tools
parent64a83ce1e6a4b72722ba7db7ad31ef98c11c9a8c (diff)
downloadchromium_src-546714888e40856480f088b130d228bbb4bf6cfc.zip
chromium_src-546714888e40856480f088b130d228bbb4bf6cfc.tar.gz
chromium_src-546714888e40856480f088b130d228bbb4bf6cfc.tar.bz2
Repeat Failed Tests in Serial
Before considering a test failed in run_test_cases.py try running the failed tests serial, since they may have only failed before because they were conflicting with other tests running at the same time. BUG= Review URL: https://chromiumcodereview.appspot.com/10831330 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@151893 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'tools')
-rwxr-xr-xtools/isolate/run_test_cases.py40
-rwxr-xr-xtools/isolate/run_test_cases_smoke_test.py80
2 files changed, 99 insertions, 21 deletions
diff --git a/tools/isolate/run_test_cases.py b/tools/isolate/run_test_cases.py
index 9beccc8..3200c02 100755
--- a/tools/isolate/run_test_cases.py
+++ b/tools/isolate/run_test_cases.py
@@ -454,13 +454,13 @@ def list_test_cases(executable, index, shards, disabled, fails, flaky):
class Runner(object):
- def __init__(self, executable, cwd_dir, timeout, progress):
+ def __init__(self, executable, cwd_dir, timeout, progress, retry_count=3):
# Constants
self.executable = executable
self.cwd_dir = cwd_dir
self.timeout = timeout
self.progress = progress
- self.retry_count = 3
+ self.retry_count = retry_count
# It is important to remove the shard environment variables since it could
# conflict with --gtest_filter.
self.env = os.environ.copy()
@@ -546,6 +546,14 @@ def get_test_cases(executable, whitelist, blacklist, index, shards):
return tests
+def LogResults(result_file, results):
+ """Write the results out to a file if one is given."""
+ if not result_file:
+ return
+ with open(result_file, 'wb') as f:
+ json.dump(results, f, sort_keys=True, indent=2)
+
+
def run_test_cases(executable, test_cases, jobs, timeout, result_file):
"""Traces test cases one by one."""
progress = Progress(len(test_cases))
@@ -556,9 +564,7 @@ def run_test_cases(executable, test_cases, jobs, timeout, result_file):
results = pool.join(progress, 0.1)
duration = time.time() - progress.start
results = dict((item[0]['test_case'], item) for item in results)
- if result_file:
- with open(result_file, 'wb') as f:
- json.dump(results, f, sort_keys=True, indent=2)
+ LogResults(result_file, results)
sys.stdout.write('\n')
total = len(results)
if not total:
@@ -581,10 +587,34 @@ def run_test_cases(executable, test_cases, jobs, timeout, result_file):
else:
assert False, items
+ # Retry all the failures serially to see if they are just flaky when
+ # run at the same time.
+ if fail:
+ print 'Retrying failed tests serially.'
+ progress = Progress(len(fail))
+ function = Runner(
+ executable, os.getcwd(), timeout, progress, retry_count=1).map
+ test_cases_retry = fail[:]
+
+ for test_case in test_cases_retry:
+ output = function(test_case)
+ progress.print_update()
+ results[output[0]['test_case']].append(output)
+ if not output[0]['returncode']:
+ fail.remove(test_case)
+ flaky.append(test_case)
+
+ LogResults(result_file, results)
+ sys.stdout.write('\n')
+
+ print 'Summary:'
for test_case in sorted(flaky):
items = results[test_case]
print '%s is flaky (tried %d times)' % (test_case, len(items))
+ for test_case in sorted(fail):
+ print '%s failed' % (test_case)
+
print 'Success: %4d %5.2f%%' % (len(success), len(success) * 100. / total)
print 'Flaky: %4d %5.2f%%' % (len(flaky), len(flaky) * 100. / total)
print 'Fail: %4d %5.2f%%' % (len(fail), len(fail) * 100. / total)
diff --git a/tools/isolate/run_test_cases_smoke_test.py b/tools/isolate/run_test_cases_smoke_test.py
index 62ba92ef..2174468 100755
--- a/tools/isolate/run_test_cases_smoke_test.py
+++ b/tools/isolate/run_test_cases_smoke_test.py
@@ -3,6 +3,7 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+import json
import logging
import os
import re
@@ -16,14 +17,19 @@ sys.path.append(os.path.join(ROOT_DIR, 'data', 'gtest_fake'))
import gtest_fake_base
-def RunTest(test_file):
+def RunTest(test_file, dump_file=None):
target = os.path.join(ROOT_DIR, 'data', 'gtest_fake', test_file)
cmd = [
sys.executable,
os.path.join(ROOT_DIR, 'run_test_cases.py'),
- '--no-dump',
- target,
]
+
+ if dump_file:
+ cmd.extend(['--result', dump_file])
+ else:
+ cmd.append('--no-dump')
+
+ cmd.append(target)
logging.debug(' '.join(cmd))
proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
@@ -39,6 +45,12 @@ class TraceTestCases(unittest.TestCase):
os.environ.pop('GTEST_SHARD_INDEX', '')
os.environ.pop('GTEST_TOTAL_SHARDS', '')
+ self.filename = 'test.results'
+
+ def tearDown(self):
+ if os.path.exists(self.filename):
+ os.remove(self.filename)
+
def _check_results(self, expected_out_re, out, err):
if sys.platform == 'win32':
out = out.replace('\r\n', '\n')
@@ -52,8 +64,20 @@ class TraceTestCases(unittest.TestCase):
self.assertEquals([], lines)
self.assertEquals('', err)
+ def _check_results_file(self, expected_file_contents_entries):
+ self.assertTrue(os.path.exists(self.filename))
+
+ with open(self.filename) as f:
+ file_contents = json.load(f)
+
+ self.assertEqual(len(expected_file_contents_entries), len(file_contents))
+ for (entry_name, entry_count) in expected_file_contents_entries:
+ self.assertTrue(entry_name in file_contents)
+ self.assertEqual(entry_count, len(file_contents[entry_name]))
+
def test_simple_pass(self):
- out, err, return_code = RunTest('gtest_fake_pass.py')
+ out, err, return_code = RunTest('gtest_fake_pass.py',
+ dump_file=self.filename)
self.assertEquals(0, return_code)
@@ -61,37 +85,53 @@ class TraceTestCases(unittest.TestCase):
r'\[\d/\d\] \d\.\d\ds .+',
r'\[\d/\d\] \d\.\d\ds .+',
r'\[\d/\d\] \d\.\d\ds .+',
+ re.escape('Summary:'),
re.escape('Success: 3 100.00%'),
re.escape('Flaky: 0 0.00%'),
re.escape('Fail: 0 0.00%'),
r'\d+\.\ds Done running 3 tests with 3 executions. \d+\.\d test/s',
]
-
self._check_results(expected_out_re, out, err)
+ expected_result_file_entries = [
+ ('Foo.Bar1', 1),
+ ('Foo.Bar2', 1),
+ ('Foo.Bar3', 1)
+ ]
+ self._check_results_file(expected_result_file_entries)
+
def test_simple_fail(self):
- out, err, return_code = RunTest('gtest_fake_fail.py')
+ out, err, return_code = RunTest('gtest_fake_fail.py', self.filename)
self.assertEquals(1, return_code)
- expected_out_re = [
- r'\[\d/\d\] \d\.\d\ds .+',
- r'\[\d/\d\] \d\.\d\ds .+',
- r'\[\d/\d\] \d\.\d\ds .+',
- r'\[\d/\d\] \d\.\d\ds .+',
- r'\[\d/\d\] \d\.\d\ds .+',
- r'\[\d/\d\] \d\.\d\ds .+',
+ test_fail_output = [
re.escape('Note: Google Test filter = Baz.Fail'),
r'',
] + [
- re.escape(l) for l in
- gtest_fake_base.get_test_output('Baz.Fail').splitlines()
+ re.escape(l) for l in
+ gtest_fake_base.get_test_output('Baz.Fail').splitlines()
] + [
'',
] + [
re.escape(l) for l in gtest_fake_base.get_footer(1, 1).splitlines()
] + [
- '',
+ ''
+ ]
+
+ expected_out_re = [
+ r'\[\d/\d\] \d\.\d\ds .+',
+ r'\[\d/\d\] \d\.\d\ds .+',
+ r'\[\d/\d\] \d\.\d\ds .+',
+ r'\[\d/\d\] \d\.\d\ds .+',
+ r'\[\d/\d\] \d\.\d\ds .+',
+ r'\[\d/\d\] \d\.\d\ds .+',
+ ] + test_fail_output + [
+ re.escape('Retrying failed tests serially.'),
+ r'\[\d/\d\] \d\.\d\ds .+',
+ ] + test_fail_output + [
+ re.escape('Summary:'),
+ re.escape('Baz.Fail failed'),
re.escape('Success: 3 75.00%'),
re.escape('Flaky: 0 0.00%'),
re.escape('Fail: 1 25.00%'),
@@ -99,6 +139,14 @@ class TraceTestCases(unittest.TestCase):
]
self._check_results(expected_out_re, out, err)
+ expected_result_file_entries = [
+ ('Foo.Bar1', 1),
+ ('Foo.Bar2', 1),
+ ('Foo.Bar3', 1),
+ ('Baz.Fail', 4)
+ ]
+ self._check_results_file(expected_result_file_entries)
+
def test_simple_gtest_list_error(self):
out, err, return_code = RunTest('gtest_fake_error.py')