diff options
Diffstat (limited to 'webkit/tools/layout_tests')
-rw-r--r-- | webkit/tools/layout_tests/layout_package/test_shell_thread.py | 52 | ||||
-rwxr-xr-x | webkit/tools/layout_tests/run_webkit_tests.py | 55 |
2 files changed, 17 insertions, 90 deletions
diff --git a/webkit/tools/layout_tests/layout_package/test_shell_thread.py b/webkit/tools/layout_tests/layout_package/test_shell_thread.py index 827a8e9..d76ebee 100644 --- a/webkit/tools/layout_tests/layout_package/test_shell_thread.py +++ b/webkit/tools/layout_tests/layout_package/test_shell_thread.py @@ -19,7 +19,6 @@ import subprocess import sys import thread import threading -import time import path_utils import platform_utils @@ -152,24 +151,23 @@ class SingleTestThread(threading.Thread): class TestShellThread(threading.Thread): - def __init__(self, filename_list_queue, test_shell_command, test_types, + def __init__(self, filename_queue, test_shell_command, test_types, test_args, shell_args, options): """Initialize all the local state for this test shell thread. Args: - filename_list_queue: A thread safe Queue class that contains lists of - tuples of (filename, uri) pairs. + filename_queue: A thread safe Queue class that contains tuples of + (filename, uri) pairs. test_shell_command: A list specifying the command+args for test_shell test_types: A list of TestType objects to run the test output against. test_args: A TestArguments object to pass to each TestType. shell_args: Any extra arguments to be passed to test_shell.exe. options: A property dictionary as produced by optparse. The command-line - options should match those expected by run_webkit_tests; they - are typically passed via the run_webkit_tests.TestRunner class. + options should match those expected by run_webkit_tests; they + are typically passed via the run_webkit_tests.TestRunner class. """ threading.Thread.__init__(self) - self._filename_list_queue = filename_list_queue - self._filename_list = [] + self._filename_queue = filename_queue self._test_shell_command = test_shell_command self._test_types = test_types self._test_args = test_args @@ -179,14 +177,6 @@ class TestShellThread(threading.Thread): self._failures = {} self._canceled = False self._exception_info = None - self._timing_stats = {} - - # Current directory of tests we're running. - self._current_dir = None - # Number of tests in self._current_dir. - self._num_tests_in_current_dir = None - # Time at which we started running tests from self._current_dir. - self._current_dir_start_time = None if self._options.run_singly: # When we're running one test per test_shell process, we can enforce @@ -204,11 +194,6 @@ class TestShellThread(threading.Thread): """Returns a dictionary mapping test filename to a list of TestFailures.""" return self._failures - - def GetTimingStats(self): - """Returns a dictionary mapping test directory to a tuple of - (number of tests in that directory, time to run the tests)""" - return self._timing_stats; def Cancel(self): """Set a flag telling this thread to quit.""" @@ -247,25 +232,12 @@ class TestShellThread(threading.Thread): if self._canceled: logging.info('Testing canceled') return - - if len(self._filename_list) is 0: - if self._current_dir is not None: - self._timing_stats[self._current_dir] = \ - (self._num_tests_in_current_dir, - time.time() - self._current_dir_start_time) - - try: - self._current_dir, self._filename_list = \ - self._filename_list_queue.get_nowait() - except Queue.Empty: - self._KillTestShell() - logging.debug("queue empty, quitting test shell thread") - return - - self._num_tests_in_current_dir = len(self._filename_list) - self._current_dir_start_time = time.time() - - filename, test_uri = self._filename_list.pop() + try: + filename, test_uri = self._filename_queue.get_nowait() + except Queue.Empty: + self._KillTestShell() + logging.debug("queue empty, quitting test shell thread") + return # We have a url, run tests. batch_count += 1 diff --git a/webkit/tools/layout_tests/run_webkit_tests.py b/webkit/tools/layout_tests/run_webkit_tests.py index 7a04814..d0bd3a4 100755 --- a/webkit/tools/layout_tests/run_webkit_tests.py +++ b/webkit/tools/layout_tests/run_webkit_tests.py @@ -331,41 +331,11 @@ class TestRunner: test_files = self._test_files_list - # Create the thread safe queue of lists of (test filenames, test URIs) - # tuples. Each TestShellThread pulls a list from this queue and runs those - # tests in order before grabbing the next available list. - # - # Shard the lists by directory. This helps ensure that tests that depend - # on each other (aka bad tests!) continue to run together as most - # cross-tests dependencies tend to occur within the same directory. - self._tests_by_dir = {} - for test_file in test_files: - # Cut off the filename to grab the second to lowest level directory. - # TODO(ojan): See if we can grab the lowest level directory. That will - # provide better parallelization. We should at least be able to do so - # for some directories (e.g. LayoutTests/dom). - dir = test_file.rsplit('/', 2)[0] - if dir not in self._tests_by_dir: - self._tests_by_dir[dir] = [] - self._tests_by_dir[dir].append((test_file, - path_utils.FilenameToUri(test_file))) - - # Sort by the number of tests in the dir so that the ones with the most - # tests get run first in order to maximize parallelization. Number of tests - # is a good enough, but not perfect, approximation of how long that set of - # tests will take to run. We can't just use a PriorityQueue until we move - # to Python 2.6. - test_lists = [] - for directory in self._tests_by_dir: - test_list = self._tests_by_dir[directory] - # Put the length of the list first so that the queue is sorted - # by that length and the largest lists are dequeued first. - test_lists.append((directory, test_list)) - test_lists.sort(lambda a, b: cmp(len(b), len(a))) - + # Create the thread safe queue of (test filenames, test URIs) tuples. Each + # TestShellThread pulls values from this queue. filename_queue = Queue.Queue() - for item in test_lists: - filename_queue.put(item) + for test_file in test_files: + filename_queue.put((test_file, path_utils.FilenameToUri(test_file))) # If we have http tests, the first one will be an http test. if test_files and test_files[0].find(self.HTTP_SUBDIR) >= 0: @@ -412,7 +382,6 @@ class TestRunner: # Wait for the threads to finish and collect test failures. test_failures = {} - test_timings = {} try: for thread in threads: while thread.isAlive(): @@ -422,7 +391,6 @@ class TestRunner: # be interruptible by KeyboardInterrupt. thread.join(1.0) test_failures.update(thread.GetFailures()) - test_timings.update(thread.GetTimingStats()) except KeyboardInterrupt: for thread in threads: thread.Cancel() @@ -449,7 +417,6 @@ class TestRunner: # Write summaries to stdout. self._PrintResults(test_failures, sys.stdout) - self._PrintTimingsForRuns(test_timings) # Write the same data to a log file. out_filename = os.path.join(self._options.results_directory, "score.txt") @@ -467,18 +434,6 @@ class TestRunner: sys.stderr.flush() return len(regressions) - def _PrintTimingsForRuns(self, test_timings): - timings = [] - for directory in test_timings: - num_tests, time = test_timings[directory] - timings.append((round(time, 1), directory, num_tests)) - timings.sort() - - logging.debug("Time to process each each subdirectory:") - for timing in timings: - logging.debug("%s took %s seconds to run %s tests." % \ - (timing[1], timing[0], timing[2])) - def _PrintResults(self, test_failures, output): """Print a short summary to stdout about how many tests passed. @@ -725,7 +680,7 @@ def main(options, args): if not options.num_test_shells: # Only run stable configurations with multiple test_shells by default. - if sys.platform in ('win32', 'cygwin') and options.target == 'Release': + if False: cpus = 1 if sys.platform in ('win32', 'cygwin'): cpus = int(os.environ.get('NUMBER_OF_PROCESSORS', 1)) |