summaryrefslogtreecommitdiffstats
path: root/webkit
diff options
context:
space:
mode:
Diffstat (limited to 'webkit')
-rw-r--r--webkit/tools/layout_tests/layout_package/test_expectations.py20
-rw-r--r--webkit/tools/layout_tests/layout_package/test_shell_thread.py91
-rwxr-xr-xwebkit/tools/layout_tests/run_webkit_tests.py161
-rw-r--r--webkit/tools/layout_tests/test_expectations.txt14
-rw-r--r--webkit/tools/test_shell/test_shell_main.cc12
5 files changed, 198 insertions, 100 deletions
diff --git a/webkit/tools/layout_tests/layout_package/test_expectations.py b/webkit/tools/layout_tests/layout_package/test_expectations.py
index cd46d2a..7ba9ace 100644
--- a/webkit/tools/layout_tests/layout_package/test_expectations.py
+++ b/webkit/tools/layout_tests/layout_package/test_expectations.py
@@ -15,7 +15,7 @@ import compare_failures
# Test expectation and modifier constants.
-(PASS, FAIL, TIMEOUT, CRASH, SKIP, WONTFIX, DEFER, NONE) = range(8)
+(PASS, FAIL, TIMEOUT, CRASH, SKIP, WONTFIX, DEFER, SLOW, NONE) = range(9)
class TestExpectations:
TEST_LIST = "test_expectations.txt"
@@ -112,6 +112,9 @@ class TestExpectations:
def IsIgnored(self, test):
return self._expected_failures.HasModifier(test, WONTFIX)
+ def HasModifier(self, test, modifier):
+ return self._expected_failures.HasModifier(test, modifier)
+
def StripComments(line):
"""Strips comments from a line and return None if the line is empty
or else the contents of line with leading and trailing spaces removed
@@ -145,14 +148,19 @@ class TestExpectationsFile:
DEFER LINUX WIN : LayoutTests/fast/js/no-good.js = TIMEOUT PASS
SKIP: Doesn't run the test.
+ SLOW: The test takes a long time to run, but does not timeout indefinitely.
WONTFIX: For tests that we never intend to pass on a given platform.
DEFER: Test does not count in our statistics for the current release.
DEBUG: Expectations apply only to the debug build.
RELEASE: Expectations apply only to release build.
LINUX/WIN/MAC: Expectations apply only to these platforms.
- A test can be included twice, but not via the same path. If a test is included
- twice, then the more precise path wins.
+ Notes:
+ -A test cannot be both SLOW and TIMEOUT
+ -A test cannot be both DEFER and WONTFIX
+ -A test can be included twice, but not via the same path.
+ -If a test is included twice, then the more precise path wins.
+ -CRASH tests cannot be DEFER or WONTFIX
"""
EXPECTATIONS = { 'pass': PASS,
@@ -167,6 +175,7 @@ class TestExpectationsFile:
MODIFIERS = { 'skip': SKIP,
'wontfix': WONTFIX,
'defer': DEFER,
+ 'slow': SLOW,
'none': NONE }
def __init__(self, path, full_test_list, platform, is_debug_mode):
@@ -298,6 +307,11 @@ class TestExpectationsFile:
expectations = self._ParseExpectations(tests_and_expecation_parts[1],
lineno, test_list_path)
+ if 'slow' in options and TIMEOUT in expectations:
+ self._AddError(lineno, 'A test cannot be both slow and timeout. If the '
+ 'test times out indefinitely, the it should be listed as timeout.',
+ test_and_expectations)
+
full_path = os.path.join(path_utils.LayoutDataDir(), test_list_path)
full_path = os.path.normpath(full_path)
# WebKit's way of skipping tests is to add a -disabled suffix.
diff --git a/webkit/tools/layout_tests/layout_package/test_shell_thread.py b/webkit/tools/layout_tests/layout_package/test_shell_thread.py
index c3ef1db..da69393 100644
--- a/webkit/tools/layout_tests/layout_package/test_shell_thread.py
+++ b/webkit/tools/layout_tests/layout_package/test_shell_thread.py
@@ -25,13 +25,13 @@ import path_utils
import platform_utils
import test_failures
-def ProcessOutput(proc, filename, test_uri, test_types, test_args, target):
+def ProcessOutput(proc, test_info, test_types, test_args, target):
"""Receives the output from a test_shell process, subjects it to a number
of tests, and returns a list of failure types the test produced.
Args:
proc: an active test_shell process
- filename: path of the test file being run
+ test_info: Object containing the test filename, uri and timeout
test_types: list of test types to subject the output to
test_args: arguments to be passed to each test
target: Debug or Release
@@ -69,10 +69,11 @@ def ProcessOutput(proc, filename, test_uri, test_types, test_args, target):
# Don't include #URL lines in our output
if line.startswith("#URL:"):
+ test_string = test_info.uri.strip()
url = line.rstrip()[5:]
- if url != test_uri:
+ if url != test_string:
logging.fatal("Test got out of sync:\n|%s|\n|%s|" %
- (url, test_uri))
+ (url, test_string))
raise AssertionError("test out of sync")
elif line.startswith("#MD5:"):
local_test_args.hash = line.rstrip()[5:]
@@ -90,7 +91,8 @@ def ProcessOutput(proc, filename, test_uri, test_types, test_args, target):
time_for_diffs = {}
for test_type in test_types:
start_diff_time = time.time()
- new_failures = test_type.CompareOutput(filename, proc,
+ new_failures = test_type.CompareOutput(test_info.filename,
+ proc,
''.join(outlines),
local_test_args,
target)
@@ -103,8 +105,8 @@ def ProcessOutput(proc, filename, test_uri, test_types, test_args, target):
total_time_for_all_diffs = time.time() - end_test_time
test_run_time = end_test_time - start_time
- return TestStats(filename, failures, test_run_time, total_time_for_all_diffs,
- time_for_diffs)
+ return TestStats(test_info.filename, failures, test_run_time,
+ total_time_for_all_diffs, time_for_diffs)
def StartTestShell(command, args):
@@ -132,28 +134,27 @@ class TestStats:
class SingleTestThread(threading.Thread):
"""Thread wrapper for running a single test file."""
- def __init__(self, test_shell_command, shell_args, test_uri, filename,
- test_types, test_args, target):
+ def __init__(self, test_shell_command, shell_args, test_info, test_types,
+ test_args, target):
"""
Args:
- test_uri: full file:// or http:// URI of the test file to be run
- filename: absolute local path to the test file
+ test_info: Object containing the test filename, uri and timeout
See TestShellThread for documentation of the remaining arguments.
"""
threading.Thread.__init__(self)
self._command = test_shell_command
self._shell_args = shell_args
- self._test_uri = test_uri
- self._filename = filename
+ self._test_info = test_info
self._test_types = test_types
self._test_args = test_args
self._target = target
def run(self):
- proc = StartTestShell(self._command, self._shell_args + [self._test_uri])
- self._test_stats = ProcessOutput(proc, self._filename, self._test_uri,
- self._test_types, self._test_args, self._target)
+ proc = StartTestShell(self._command, self._shell_args +
+ ["--time-out-ms=" + self.test_info.timeout, self._test_info.uri])
+ self._test_stats = ProcessOutput(proc, self._test_info, self._test_types,
+ self._test_args, self._target)
def GetTestStats(self):
return self._test_stats
@@ -197,16 +198,6 @@ class TestShellThread(threading.Thread):
# Time at which we started running tests from self._current_dir.
self._current_dir_start_time = None
- if self._options.run_singly:
- # When we're running one test per test_shell process, we can enforce
- # a hard timeout. test_shell uses a default of 10 seconds if no
- # time-out-ms is given, and the test_shell watchdog uses 2.5x the
- # test_shell's value. We want to be larger than that.
- self._time_out_sec = int(self._options.time_out_ms) * 3.0 / 1000.0
- logging.info("Setting Python per-test timeout to %s ms (%s sec)" %
- (1000 * self._time_out_sec, self._time_out_sec))
-
-
def GetFailures(self):
"""Returns a dictionary mapping test filename to a list of
TestFailures."""
@@ -277,14 +268,16 @@ class TestShellThread(threading.Thread):
self._num_tests_in_current_dir = len(self._filename_list)
self._current_dir_start_time = time.time()
- filename, test_uri = self._filename_list.pop()
+ test_info = self._filename_list.pop()
# We have a url, run tests.
batch_count += 1
if self._options.run_singly:
- failures = self._RunTestSingly(filename, test_uri)
+ failures = self._RunTestSingly(test_info)
else:
- failures = self._RunTest(filename, test_uri)
+ failures = self._RunTest(test_info)
+
+ filename = test_info.filename
if failures:
# Check and kill test shell if we need too.
if len([1 for f in failures if f.ShouldKillTestShell()]):
@@ -299,28 +292,39 @@ class TestShellThread(threading.Thread):
self._failures[filename] = failures
else:
logging.debug(path_utils.RelativeTestFilename(filename) + " passed")
+
if batch_size > 0 and batch_count > batch_size:
# Bounce the shell and reset count.
self._KillTestShell()
batch_count = 0
- def _RunTestSingly(self, filename, test_uri):
+ def _RunTestSingly(self, test_info):
"""Run a test in a separate thread, enforcing a hard time limit.
Since we can only detect the termination of a thread, not any internal
state or progress, we can only run per-test timeouts when running test
files singly.
+
+ Args:
+ test_info: Object containing the test filename, uri and timeout
+
+ Return:
+ A list of TestFailure objects describing the error.
"""
worker = SingleTestThread(self._test_shell_command,
self._shell_args,
- test_uri,
- filename,
+ test_info,
self._test_types,
self._test_args,
self._options.target)
+
worker.start()
- worker.join(self._time_out_sec)
+
+ # When we're running one test per test_shell process, we can enforce
+ # a hard timeout. the test_shell watchdog uses 2.5x the timeout
+ # We want to be larger than that.
+ worker.join(int(test_info.timeout) * 3.0 / 1000.0)
if worker.isAlive():
# If join() returned with the thread still running, the test_shell.exe is
# completely hung and there's nothing more we can do with it. We have
@@ -339,25 +343,25 @@ class TestShellThread(threading.Thread):
failures = stats.failures
except AttributeError, e:
failures = []
- logging.error('Cannot get results of test: %s' % filename)
+ logging.error('Cannot get results of test: %s' % test_info.filename)
return failures
-
- def _RunTest(self, filename, test_uri):
+ def _RunTest(self, test_info):
"""Run a single test file using a shared test_shell process.
Args:
- filename: The absolute filename of the test
- test_uri: The URI version of the filename
+ test_info: Object containing the test filename, uri and timeout
Return:
A list of TestFailure objects describing the error.
"""
self._EnsureTestShellIsRunning()
+ # Args to test_shell is a space-separated list of "uri timeout" or just a
+ # uri to use the default timeout specified in run_webkit_tests.
+ self._test_shell_proc.stdin.write(("%s %s\n" %
+ (test_info.uri, test_info.timeout)))
- # Ok, load the test URL...
- self._test_shell_proc.stdin.write(test_uri + "\n")
# If the test shell is dead, the above may cause an IOError as we
# try to write onto the broken pipe. If this is the first test for
# this test shell process, than the test shell did not
@@ -366,8 +370,8 @@ class TestShellThread(threading.Thread):
# try to recover here.
self._test_shell_proc.stdin.flush()
- stats = ProcessOutput(self._test_shell_proc, filename, test_uri,
- self._test_types, self._test_args, self._options.target)
+ stats = ProcessOutput(self._test_shell_proc, test_info, self._test_types,
+ self._test_args, self._options.target)
self._test_stats.append(stats)
return stats.failures
@@ -390,7 +394,4 @@ class TestShellThread(threading.Thread):
self._test_shell_proc.stdout.close()
if self._test_shell_proc.stderr:
self._test_shell_proc.stderr.close()
- if sys.platform not in ('win32', 'cygwin'):
- # Closing stdin/stdout/stderr hangs sometimes on OS X.
- subprocess.Popen(["kill", "-9", str(self._test_shell_proc.pid)])
self._test_shell_proc = None
diff --git a/webkit/tools/layout_tests/run_webkit_tests.py b/webkit/tools/layout_tests/run_webkit_tests.py
index f149c34..9ef0e2b 100755
--- a/webkit/tools/layout_tests/run_webkit_tests.py
+++ b/webkit/tools/layout_tests/run_webkit_tests.py
@@ -48,6 +48,18 @@ from test_types import test_type_base
from test_types import text_diff
from test_types import simplified_text_diff
+class TestInfo:
+ """Groups information about a test for easy passing of data."""
+ def __init__(self, filename, timeout):
+ """Generates the URI and stores the filename and timeout for this test.
+ Args:
+ filename: Full path to the test.
+ timeout: Timeout for running the test in TestShell.
+ """
+ self.filename = filename
+ self.uri = path_utils.FilenameToUri(filename)
+ self.timeout = timeout
+
class TestRunner:
"""A class for managing running a series of tests on a series of test
@@ -339,15 +351,20 @@ class TestRunner:
cross-tests dependencies tend to occur within the same directory.
Return:
- The Queue of lists of (test file, test uri) tuples.
+ The Queue of lists of TestInfo objects.
"""
tests_by_dir = {}
for test_file in test_files:
directory = self._GetDirForTestFile(test_file)
if directory not in tests_by_dir:
tests_by_dir[directory] = []
- tests_by_dir[directory].append((test_file,
- path_utils.FilenameToUri(test_file)))
+
+ if self._expectations.HasModifier(test_file, test_expectations.SLOW):
+ timeout = 10 * int(options.time_out_ms)
+ else:
+ timeout = self._options.time_out_ms
+
+ tests_by_dir[directory].append(TestInfo(test_file, timeout))
# Sort by the number of tests in the dir so that the ones with the most
# tests get run first in order to maximize parallelization. Number of tests
@@ -398,10 +415,6 @@ class TestRunner:
if self._options.gp_fault_error_box:
shell_args.append('--gp-fault-error-box')
- # larger timeout if page heap is enabled.
- if self._options.time_out_ms:
- shell_args.append('--time-out-ms=' + self._options.time_out_ms)
-
return (test_args, shell_args)
def _InstantiateTestShellThreads(self, test_shell_binary):
@@ -479,7 +492,7 @@ class TestRunner:
threads = self._InstantiateTestShellThreads(test_shell_binary)
# Wait for the threads to finish and collect test failures.
- test_failures = {}
+ failures = {}
test_timings = {}
individual_test_timings = []
try:
@@ -490,7 +503,7 @@ class TestRunner:
# suffices to not use an indefinite blocking join for it to
# be interruptible by KeyboardInterrupt.
thread.join(1.0)
- test_failures.update(thread.GetFailures())
+ failures.update(thread.GetFailures())
test_timings.update(thread.GetDirectoryTimingStats())
individual_test_timings.extend(thread.GetIndividualTestStats())
except KeyboardInterrupt:
@@ -511,27 +524,27 @@ class TestRunner:
logging.info("%f total testing time" % (end_time - start_time))
print
- self._PrintTimingStatistics(test_timings, individual_test_timings)
+ self._PrintTimingStatistics(test_timings, individual_test_timings, failures)
print "-" * 78
# Tests are done running. Compare failures with expected failures.
- regressions = self._CompareFailures(test_failures)
+ regressions = self._CompareFailures(failures)
print "-" * 78
# Write summaries to stdout.
- self._PrintResults(test_failures, sys.stdout)
+ self._PrintResults(failures, sys.stdout)
# Write the same data to a log file.
out_filename = os.path.join(self._options.results_directory, "score.txt")
output_file = open(out_filename, "w")
- self._PrintResults(test_failures, output_file)
+ self._PrintResults(failures, output_file)
output_file.close()
# Write the summary to disk (results.html) and maybe open the test_shell
# to this file.
- wrote_results = self._WriteResultsHtmlFile(test_failures, regressions)
+ wrote_results = self._WriteResultsHtmlFile(failures, regressions)
if not self._options.noshow_results and wrote_results:
self._ShowResultsHtmlFile()
@@ -540,7 +553,17 @@ class TestRunner:
return len(regressions)
def _PrintTimingStatistics(self, directory_test_timings,
- individual_test_timings):
+ individual_test_timings, failures):
+ self._PrintAggregateTestStatistics(individual_test_timings)
+ self._PrintIndividualTestTimes(individual_test_timings, failures)
+ self._PrintDirectoryTimings(directory_test_timings)
+
+ def _PrintAggregateTestStatistics(self, individual_test_timings):
+ """Prints aggregate statistics (e.g. median, mean, etc.) for all tests.
+ Args:
+ individual_test_timings: List of test_shell_thread.TestStats for all
+ tests.
+ """
test_types = individual_test_timings[0].time_for_diffs.keys()
times_for_test_shell = []
times_for_diff_processing = []
@@ -555,27 +578,72 @@ class TestRunner:
for test_type in test_types:
times_per_test_type[test_type].append(time_for_diffs[test_type])
- logging.debug("PER TEST TIME IN TESTSHELL (seconds):")
- self._PrintStatisticsForTestTimings(times_for_test_shell)
- logging.debug("PER TEST DIFF PROCESSING TIMES (seconds):")
- self._PrintStatisticsForTestTimings(times_for_diff_processing)
+ self._PrintStatisticsForTestTimings(
+ "PER TEST TIME IN TESTSHELL (seconds):",
+ times_for_test_shell)
+ self._PrintStatisticsForTestTimings(
+ "PER TEST DIFF PROCESSING TIMES (seconds):",
+ times_for_diff_processing)
for test_type in test_types:
- logging.debug("TEST TYPE: %s" % test_type)
- self._PrintStatisticsForTestTimings(times_per_test_type[test_type])
+ self._PrintStatisticsForTestTimings(
+ "PER TEST TIMES BY TEST TYPE: %s" % test_type,
+ times_per_test_type[test_type])
+ def _PrintIndividualTestTimes(self, individual_test_timings, failures):
+ """Prints the run times for slow, timeout and crash tests.
+ Args:
+ individual_test_timings: List of test_shell_thread.TestStats for all
+ tests.
+ failures: Dictionary mapping test filenames to list of test_failures.
+ """
# Reverse-sort by the time spent in test_shell.
individual_test_timings.sort(lambda a, b:
cmp(b.test_run_time, a.test_run_time))
- slowests_tests = (
- individual_test_timings[:self._options.num_slow_tests_to_log] )
- logging.debug("%s slowest tests:" % self._options.num_slow_tests_to_log)
- for test in slowests_tests:
- logging.debug("%s took %s seconds" % (test.filename,
- round(test.test_run_time, 1)))
+ num_printed = 0
+ slow_tests = []
+ timeout_or_crash_tests = []
+ unexpected_slow_tests = []
+ for test_tuple in individual_test_timings:
+ filename = test_tuple.filename
+ is_timeout_crash_or_slow = False
+ if self._expectations.HasModifier(filename, test_expectations.SLOW):
+ is_timeout_crash_or_slow = True
+ slow_tests.append(test_tuple)
+
+ if filename in failures:
+ for failure in failures[filename]:
+ if (failure.__class__ == test_failures.FailureTimeout or
+ failure.__class__ == test_failures.FailureCrash):
+ is_timeout_crash_or_slow = True
+ timeout_or_crash_tests.append(test_tuple)
+ break
+
+ if (not is_timeout_crash_or_slow and
+ num_printed < self._options.num_slow_tests_to_log):
+ num_printed = num_printed + 1
+ unexpected_slow_tests.append(test_tuple)
print
+ self._PrintTestListTiming("%s slowest tests that are not marked as SLOW "
+ "and did not timeout/crash:" % self._options.num_slow_tests_to_log,
+ unexpected_slow_tests)
+ print
+ self._PrintTestListTiming("Tests marked as SLOW:", slow_tests)
+ print
+ self._PrintTestListTiming("Tests that timed out or crashed:",
+ timeout_or_crash_tests)
+ print
+
+ def _PrintTestListTiming(self, title, test_list):
+ logging.debug(title)
+ for test_tuple in test_list:
+ filename = test_tuple.filename[len(path_utils.LayoutDataDir()) + 1:]
+ filename = filename.replace('\\', '/')
+ test_run_time = round(test_tuple.test_run_time, 1)
+ logging.debug("%s took %s seconds" % (filename, test_run_time))
+ def _PrintDirectoryTimings(self, directory_test_timings):
timings = []
for directory in directory_test_timings:
num_tests, time_for_directory = directory_test_timings[directory]
@@ -587,12 +655,19 @@ class TestRunner:
logging.debug("%s took %s seconds to run %s tests." % \
(timing[1], timing[0], timing[2]))
- def _PrintStatisticsForTestTimings(self, timings):
+ def _PrintStatisticsForTestTimings(self, title, timings):
"""Prints the median, mean and standard deviation of the values in timings.
Args:
+ title: Title for these timings.
timings: A list of floats representing times.
"""
+ logging.debug(title)
+ timings.sort()
+
num_tests = len(timings)
+ percentile90 = timings[int(.9 * num_tests)]
+ percentile99 = timings[int(.99 * num_tests)]
+
if num_tests % 2 == 1:
median = timings[((num_tests - 1) / 2) - 1]
else:
@@ -606,14 +681,15 @@ class TestRunner:
sum_of_deviations = math.pow(time - mean, 2)
std_deviation = math.sqrt(sum_of_deviations / num_tests)
- logging.debug(("Median: %s, Mean %s, Standard deviation: %s\n" %
- (median, mean, std_deviation)))
+ logging.debug(("Median: %s, Mean: %s, 90th percentile: %s, "
+ "99th percentile: %s, Standard deviation: %s\n" % (
+ median, mean, percentile90, percentile99, std_deviation)))
- def _PrintResults(self, test_failures, output):
+ def _PrintResults(self, failures, output):
"""Print a short summary to stdout about how many tests passed.
Args:
- test_failures is a dictionary mapping the test filename to a list of
+ failures is a dictionary mapping the test filename to a list of
TestFailure objects if the test failed
output is the file descriptor to write the results to. For example,
@@ -636,7 +712,7 @@ class TestRunner:
else:
dictionary[key] = 1
- for test, failures in test_failures.iteritems():
+ for test, failures in failures.iteritems():
for failure in failures:
AddFailure(failure_counts, failure.__class__)
if self._expectations.IsDeferred(test):
@@ -682,7 +758,7 @@ class TestRunner:
skipped |= self._expectations.GetWontFixSkipped()
self._PrintResultSummary("=> All tests",
self._test_files,
- test_failures,
+ failures,
failure_counts,
skipped,
output)
@@ -721,19 +797,19 @@ class TestRunner:
'percent' : float(count) * 100 / total,
'message' : message }))
- def _CompareFailures(self, test_failures):
+ def _CompareFailures(self, failures):
"""Determine how the test failures from this test run differ from the
previous test run and print results to stdout and a file.
Args:
- test_failures is a dictionary mapping the test filename to a list of
+ failures is a dictionary mapping the test filename to a list of
TestFailure objects if the test failed
Return:
A set of regressions (unexpected failures, hangs, or crashes)
"""
cf = compare_failures.CompareFailures(self._test_files,
- test_failures,
+ failures,
self._expectations)
if not self._options.nocompare_failures:
@@ -747,11 +823,11 @@ class TestRunner:
return cf.GetRegressions()
- def _WriteResultsHtmlFile(self, test_failures, regressions):
+ def _WriteResultsHtmlFile(self, failures, regressions):
"""Write results.html which is a summary of tests that failed.
Args:
- test_failures: a dictionary mapping the test filename to a list of
+ failures: a dictionary mapping the test filename to a list of
TestFailure objects if the test failed
regressions: a set of test filenames that regressed
@@ -760,7 +836,7 @@ class TestRunner:
"""
# test failures
if self._options.full_results_html:
- test_files = test_failures.keys()
+ test_files = failures.keys()
else:
test_files = list(regressions)
if not len(test_files):
@@ -780,7 +856,7 @@ class TestRunner:
test_files.sort()
for test_file in test_files:
- if test_file in test_failures: failures = test_failures[test_file]
+ if test_file in failures: failures = failures[test_file]
else: failures = [] # unexpected passes
out_file.write("<p><a href='%s'>%s</a><br />\n"
% (path_utils.FilenameToUri(test_file),
@@ -981,8 +1057,7 @@ if '__main__' == __name__:
"newly pass or fail.")
option_parser.add_option("", "--num-test-shells",
help="Number of testshells to run in parallel.")
- option_parser.add_option("", "--time-out-ms",
- default=None,
+ option_parser.add_option("", "--time-out-ms", default=None,
help="Set the timeout for each test")
option_parser.add_option("", "--run-singly", action="store_true",
default=False,
diff --git a/webkit/tools/layout_tests/test_expectations.txt b/webkit/tools/layout_tests/test_expectations.txt
index 4344e10..599d71a 100644
--- a/webkit/tools/layout_tests/test_expectations.txt
+++ b/webkit/tools/layout_tests/test_expectations.txt
@@ -19,6 +19,7 @@
// BUG[0-9]+: See this bug for more information. Every test that isn't marked
// WONTFIX should have a BUG annotation.
// SKIP: Doesn't run the test.
+// SLOW: The test takes a long time to run, but does not timeout indefinitely.
// WONTFIX: For tests that we never intend to pass on a given platform.
// DEFER: Test does not count in our statistics for the current release.
// DEBUG: Expectations apply only to the debug build.
@@ -34,6 +35,7 @@
// The above means that all the media tests are flaky, but only on Linux.
//
// Notes:
+// -A test cannot be both SLOW and TIMEOUT
// -A test cannot be both DEFER and WONTFIX
// -A test can be included twice, but not via the same path.
// -If a test is included twice, then the more precise path wins.
@@ -1042,10 +1044,8 @@ BUG10322 DEFER WIN LINUX : LayoutTests/fast/gradients/generated-gradients.html =
// Linux pixeltest failure: radial gradients don't have offset focal point
// New test. We should fix it, but it doesn't need to block the current release
BUG10322 DEFER WIN LINUX : LayoutTests/fast/gradients/simple-gradients.html = FAIL
-// This test isn't hanging, it just takes 12-13 seconds to run, which is more
-// than test_shell allows. However, we shouldn't look into the speed of the test
-// until we have reached PASS status.
-BUG10322 : LayoutTests/http/tests/misc/acid3.html = TIMEOUT FAIL
+// This test isn't hanging, it just takes 12-13 seconds to run.
+BUG10322 SLOW : LayoutTests/http/tests/misc/acid3.html = FAIL
// Expectations for this test changed upstream. We should fix this test, but
// it doesn't need to block the current release
@@ -1254,9 +1254,9 @@ BUG10353 MAC LINUX : LayoutTests/fast/replaced/width100percent-textarea.html = F
BUG10353 LINUX : LayoutTests/fast/text/drawBidiText.html = FAIL
BUG10353 LINUX : LayoutTests/http/tests/security/dataURL/xss-DENIED-from-data-url-in-foreign-domain-subframe.html = TIMEOUT PASS
chrome/fast/dom/xss-DENIED-javascript-variations.html = FAIL
-// This test can end up taking longer than the amount of time we give
-// each test. DEFERd until we find a better way to deal with it.
-BUG10353 DEFER WIN LINUX : LayoutTests/http/tests/misc/dns-prefetch-control.html = TIMEOUT PASS
+
+BUG_OJAN WIN LINUX SLOW : LayoutTests/http/tests/misc/dns-prefetch-control.html = PASS
+
// This is failing on the mac debug only, not sure why, but it's pretty repeatable on the bots, so we
// go to the complexity to split out that one case to track any other changes for the other platforms.
BUG10353 DEBUG MAC : LayoutTests/http/tests/misc/dns-prefetch-control.html = TIMEOUT FAIL PASS
diff --git a/webkit/tools/test_shell/test_shell_main.cc b/webkit/tools/test_shell/test_shell_main.cc
index 58e202d..a0ce4a9 100644
--- a/webkit/tools/test_shell/test_shell_main.cc
+++ b/webkit/tools/test_shell/test_shell_main.cc
@@ -263,15 +263,23 @@ int main(int argc, char* argv[]) {
// Watch stdin for URLs.
char filenameBuffer[kPathBufSize];
while (fgets(filenameBuffer, sizeof(filenameBuffer), stdin)) {
- char *newLine = strchr(filenameBuffer, '\n');
+ char* newLine = strchr(filenameBuffer, '\n');
if (newLine)
*newLine = '\0';
if (!*filenameBuffer)
continue;
- params.test_url = filenameBuffer;
+ params.test_url = strtok(filenameBuffer, " ");
+
+ char* timeout = strtok(NULL, " ");
+ int old_timeout_ms = TestShell::GetLayoutTestTimeout();
+ if (timeout)
+ TestShell::SetFileTestTimeout(atoi(timeout));
+
if (!TestShell::RunFileTest(params))
break;
+
+ TestShell::SetFileTestTimeout(old_timeout_ms);
}
} else {
params.test_url = WideToUTF8(uri).c_str();