summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--webkit/tools/layout_tests/layout_package/test_shell_thread.py52
-rwxr-xr-xwebkit/tools/layout_tests/run_webkit_tests.py193
2 files changed, 183 insertions, 62 deletions
diff --git a/webkit/tools/layout_tests/layout_package/test_shell_thread.py b/webkit/tools/layout_tests/layout_package/test_shell_thread.py
index d76ebee..5e49300 100644
--- a/webkit/tools/layout_tests/layout_package/test_shell_thread.py
+++ b/webkit/tools/layout_tests/layout_package/test_shell_thread.py
@@ -19,6 +19,7 @@ import subprocess
import sys
import thread
import threading
+import time
import path_utils
import platform_utils
@@ -151,23 +152,24 @@ class SingleTestThread(threading.Thread):
class TestShellThread(threading.Thread):
- def __init__(self, filename_queue, test_shell_command, test_types,
+ def __init__(self, filename_list_queue, test_shell_command, test_types,
test_args, shell_args, options):
"""Initialize all the local state for this test shell thread.
Args:
- filename_queue: A thread safe Queue class that contains tuples of
- (filename, uri) pairs.
+ filename_list_queue: A thread safe Queue class that contains lists of
+ tuples of (filename, uri) pairs.
test_shell_command: A list specifying the command+args for test_shell
test_types: A list of TestType objects to run the test output against.
test_args: A TestArguments object to pass to each TestType.
shell_args: Any extra arguments to be passed to test_shell.exe.
options: A property dictionary as produced by optparse. The command-line
- options should match those expected by run_webkit_tests; they
- are typically passed via the run_webkit_tests.TestRunner class.
+ options should match those expected by run_webkit_tests; they
+ are typically passed via the run_webkit_tests.TestRunner class.
"""
threading.Thread.__init__(self)
- self._filename_queue = filename_queue
+ self._filename_list_queue = filename_list_queue
+ self._filename_list = []
self._test_shell_command = test_shell_command
self._test_types = test_types
self._test_args = test_args
@@ -177,6 +179,14 @@ class TestShellThread(threading.Thread):
self._failures = {}
self._canceled = False
self._exception_info = None
+ self._timing_stats = {}
+
+ # Current directory of tests we're running.
+ self._current_dir = None
+ # Number of tests in self._current_dir.
+ self._num_tests_in_current_dir = None
+ # Time at which we started running tests from self._current_dir.
+ self._current_dir_start_time = None
if self._options.run_singly:
# When we're running one test per test_shell process, we can enforce
@@ -195,6 +205,11 @@ class TestShellThread(threading.Thread):
TestFailures."""
return self._failures
+ def GetTimingStats(self):
+ """Returns a dictionary mapping test directory to a tuple of
+ (number of tests in that directory, time to run the tests)"""
+ return self._timing_stats;
+
def Cancel(self):
"""Set a flag telling this thread to quit."""
self._canceled = True
@@ -232,12 +247,25 @@ class TestShellThread(threading.Thread):
if self._canceled:
logging.info('Testing canceled')
return
- try:
- filename, test_uri = self._filename_queue.get_nowait()
- except Queue.Empty:
- self._KillTestShell()
- logging.debug("queue empty, quitting test shell thread")
- return
+
+ if len(self._filename_list) is 0:
+ if self._current_dir is not None:
+ self._timing_stats[self._current_dir] = \
+ (self._num_tests_in_current_dir,
+ time.time() - self._current_dir_start_time)
+
+ try:
+ self._current_dir, self._filename_list = \
+ self._filename_list_queue.get_nowait()
+ except Queue.Empty:
+ self._KillTestShell()
+ logging.debug("queue empty, quitting test shell thread")
+ return
+
+ self._num_tests_in_current_dir = len(self._filename_list)
+ self._current_dir_start_time = time.time()
+
+ filename, test_uri = self._filename_list.pop()
# We have a url, run tests.
batch_count += 1
diff --git a/webkit/tools/layout_tests/run_webkit_tests.py b/webkit/tools/layout_tests/run_webkit_tests.py
index d0bd3a4..cfde9e2 100755
--- a/webkit/tools/layout_tests/run_webkit_tests.py
+++ b/webkit/tools/layout_tests/run_webkit_tests.py
@@ -58,6 +58,9 @@ class TestRunner:
# When collecting test cases, skip these directories
_skipped_directories = set(['.svn', '_svn', 'resources'])
+ # Top-level directories to shard when running tests.
+ _shardable_directories = set(['chrome', 'LayoutTests'])
+
HTTP_SUBDIR = os.sep.join(['', 'http', ''])
def __init__(self, options, paths):
@@ -293,19 +296,100 @@ class TestRunner:
return cmp(y_is_http, x_is_http)
return cmp(x, y)
- def Run(self):
- """Run all our tests on all our test files.
+ def _GetDirForTestFile(self, test_file):
+ """Returns the highest-level directory by which to shard the given test
+ file."""
+ # TODO(ojan): See if we can grab the lowest level directory. That will
+ # provide better parallelization. We should at least be able to do so
+ # for some directories (e.g. LayoutTests/dom).
+ index = test_file.rfind('/LayoutTests/')
+ if index is -1:
+ index = test_file.rfind('/chrome/')
+
+ test_file = test_file[index + 1:]
+ test_file_parts = test_file.split('/', 1)
+ directory = test_file_parts[0]
+ test_file = test_file_parts[1]
+
+ return_value = directory
+ while directory in TestRunner._shardable_directories:
+ test_file_parts = test_file.split('/', 1)
+ directory = test_file_parts[0]
+ return_value = os.path.join(return_value, directory)
+ test_file = test_file_parts[1]
+
+ return return_value
+
+ def _GetTestFileQueue(self, test_files):
+ """Create the thread safe queue of lists of (test filenames, test URIs)
+ tuples. Each TestShellThread pulls a list from this queue and runs those
+ tests in order before grabbing the next available list.
+
+ Shard the lists by directory. This helps ensure that tests that depend
+ on each other (aka bad tests!) continue to run together as most
+ cross-tests dependencies tend to occur within the same directory.
- For each test file, we run each test type. If there are any failures, we
- collect them for reporting.
+ Return:
+ The Queue of lists of (test file, test uri) tuples.
+ """
+ tests_by_dir = {}
+ for test_file in test_files:
+ directory = self._GetDirForTestFile(test_file)
+ if directory not in tests_by_dir:
+ tests_by_dir[directory] = []
+ tests_by_dir[directory].append((test_file,
+ path_utils.FilenameToUri(test_file)))
+
+ # Sort by the number of tests in the dir so that the ones with the most
+ # tests get run first in order to maximize parallelization. Number of tests
+ # is a good enough, but not perfect, approximation of how long that set of
+ # tests will take to run. We can't just use a PriorityQueue until we move
+ # to Python 2.6.
+ test_lists = []
+ for directory in tests_by_dir:
+ test_list = tests_by_dir[directory]
+ # Keep the tests in alphabetical order.
+ # TODO: Remove once tests are fixed so they can be run in any order.
+ test_list.reverse()
+ test_lists.append((directory, test_list))
+ test_lists.sort(lambda a, b: cmp(len(b), len(a)))
+
+ filename_queue = Queue.Queue()
+ for item in test_lists:
+ filename_queue.put(item)
+ return filename_queue
+
+ def _GetTestShellArgs(self):
+ """Returns the tuple of arguments for tests and for test_shell."""
+ shell_args = []
+ test_args = test_type_base.TestArguments()
+ if not self._options.no_pixel_tests:
+ png_path = os.path.join(self._options.results_directory,
+ "png_result%s.png" % i)
+ shell_args.append("--pixel-tests=" + png_path)
+ test_args.png_path = png_path
+
+ test_args.new_baseline = self._options.new_baseline
+ test_args.show_sources = self._options.sources
+
+ if self._options.startup_dialog:
+ shell_args.append('--testshell-startup-dialog')
+
+ if self._options.gp_fault_error_box:
+ shell_args.append('--gp-fault-error-box')
+
+ # larger timeout if page heap is enabled.
+ if self._options.time_out_ms:
+ shell_args.append('--time-out-ms=' + self._options.time_out_ms)
+
+ return (test_args, shell_args)
+
+ def _InstantiateTestShellThreads(self, test_shell_binary):
+ """Instantitates and starts the TestShellThread(s).
Return:
- We return nonzero if there are regressions compared to the last run.
+ The list of threads.
"""
- if not self._test_files:
- return 0
- start_time = time.time()
- test_shell_binary = path_utils.TestShellBinaryPath(self._options.target)
test_shell_command = [test_shell_binary]
if self._options.wrapper:
@@ -315,27 +399,8 @@ class TestRunner:
# about it anyway.
test_shell_command = self._options.wrapper.split() + test_shell_command
- # Check that the system dependencies (themes, fonts, ...) are correct.
- if not self._options.nocheck_sys_deps:
- proc = subprocess.Popen([test_shell_binary,
- "--check-layout-test-sys-deps"])
- if proc.wait() != 0:
- logging.info("Aborting because system dependencies check failed.\n"
- "To override, invoke with --nocheck-sys-deps")
- sys.exit(1)
-
- logging.info("Starting tests")
-
- # Create the output directory if it doesn't already exist.
- google.path_utils.MaybeMakeDirectory(self._options.results_directory)
-
test_files = self._test_files_list
-
- # Create the thread safe queue of (test filenames, test URIs) tuples. Each
- # TestShellThread pulls values from this queue.
- filename_queue = Queue.Queue()
- for test_file in test_files:
- filename_queue.put((test_file, path_utils.FilenameToUri(test_file)))
+ filename_queue = self._GetTestFileQueue(test_files)
# If we have http tests, the first one will be an http test.
if test_files and test_files[0].find(self.HTTP_SUBDIR) >= 0:
@@ -344,33 +409,13 @@ class TestRunner:
# Instantiate TestShellThreads and start them.
threads = []
for i in xrange(int(self._options.num_test_shells)):
- shell_args = []
- test_args = test_type_base.TestArguments()
- if not self._options.no_pixel_tests:
- png_path = os.path.join(self._options.results_directory,
- "png_result%s.png" % i)
- shell_args.append("--pixel-tests=" + png_path)
- test_args.png_path = png_path
-
- test_args.new_baseline = self._options.new_baseline
- test_args.show_sources = self._options.sources
-
# Create separate TestTypes instances for each thread.
test_types = []
for t in self._test_types:
test_types.append(t(self._options.platform,
self._options.results_directory))
- if self._options.startup_dialog:
- shell_args.append('--testshell-startup-dialog')
-
- if self._options.gp_fault_error_box:
- shell_args.append('--gp-fault-error-box')
-
- # larger timeout if page heap is enabled.
- if self._options.time_out_ms:
- shell_args.append('--time-out-ms=' + self._options.time_out_ms)
-
+ test_args, shell_args = self._GetTestShellArgs()
thread = test_shell_thread.TestShellThread(filename_queue,
test_shell_command,
test_types,
@@ -380,8 +425,41 @@ class TestRunner:
thread.start()
threads.append(thread)
+ return threads
+
+ def Run(self):
+ """Run all our tests on all our test files.
+
+ For each test file, we run each test type. If there are any failures, we
+ collect them for reporting.
+
+ Return:
+ We return nonzero if there are regressions compared to the last run.
+ """
+ if not self._test_files:
+ return 0
+ start_time = time.time()
+ test_shell_binary = path_utils.TestShellBinaryPath(self._options.target)
+
+ # Check that the system dependencies (themes, fonts, ...) are correct.
+ if not self._options.nocheck_sys_deps:
+ proc = subprocess.Popen([test_shell_binary,
+ "--check-layout-test-sys-deps"])
+ if proc.wait() != 0:
+ logging.info("Aborting because system dependencies check failed.\n"
+ "To override, invoke with --nocheck-sys-deps")
+ sys.exit(1)
+
+ logging.info("Starting tests")
+
+ # Create the output directory if it doesn't already exist.
+ google.path_utils.MaybeMakeDirectory(self._options.results_directory)
+
+ threads = self._InstantiateTestShellThreads(test_shell_binary)
+
# Wait for the threads to finish and collect test failures.
test_failures = {}
+ test_timings = {}
try:
for thread in threads:
while thread.isAlive():
@@ -391,6 +469,7 @@ class TestRunner:
# be interruptible by KeyboardInterrupt.
thread.join(1.0)
test_failures.update(thread.GetFailures())
+ test_timings.update(thread.GetTimingStats())
except KeyboardInterrupt:
for thread in threads:
thread.Cancel()
@@ -408,6 +487,8 @@ class TestRunner:
end_time = time.time()
logging.info("%f total testing time" % (end_time - start_time))
+ self._PrintTimingsForRuns(test_timings)
+
print "-" * 78
# Tests are done running. Compare failures with expected failures.
@@ -434,6 +515,18 @@ class TestRunner:
sys.stderr.flush()
return len(regressions)
+ def _PrintTimingsForRuns(self, test_timings):
+ timings = []
+ for directory in test_timings:
+ num_tests, time = test_timings[directory]
+ timings.append((round(time, 1), directory, num_tests))
+ timings.sort()
+
+ logging.debug("Time to process each each subdirectory:")
+ for timing in timings:
+ logging.debug("%s took %s seconds to run %s tests." % \
+ (timing[1], timing[0], timing[2]))
+
def _PrintResults(self, test_failures, output):
"""Print a short summary to stdout about how many tests passed.