diff options
author | dpranke@google.com <dpranke@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2009-09-30 02:47:19 +0000 |
---|---|---|
committer | dpranke@google.com <dpranke@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2009-09-30 02:47:19 +0000 |
commit | f9ea28ab6b3b02152b9a583dc91d51e05301eec6 (patch) | |
tree | a388aa4c74a8028c6a087e2f377d42a8af5251ad | |
parent | 84b0cbf0fc3b8eafa444be319fd10bb33b5f0f79 (diff) | |
download | chromium_src-f9ea28ab6b3b02152b9a583dc91d51e05301eec6.zip chromium_src-f9ea28ab6b3b02152b9a583dc91d51e05301eec6.tar.gz chromium_src-f9ea28ab6b3b02152b9a583dc91d51e05301eec6.tar.bz2 |
Tweak the layout_test driver scripts to shard the tests completely and then
add better logging about the amount of parallelism we're getting.
Now, if you add the flags --fully-parallel we will shard the test files at
the granularity of single files (i.e., all the files will be dumped into
a single shared Queue and each thread will pull one file out at a time).
This appears to produce a 2x - 3x speedup over the default behavior but isn't
100% stable yet.
Also, add the --no-log-errors flag to shut off error logging every time a
test fails.
R=ojan
TEST=none
BUG=none
Review URL: http://codereview.chromium.org/214004
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@27588 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r-- | webkit/tools/layout_tests/layout_package/test_shell_thread.py | 20 | ||||
-rwxr-xr-x | webkit/tools/layout_tests/run_webkit_tests.py | 48 |
2 files changed, 58 insertions, 10 deletions
diff --git a/webkit/tools/layout_tests/layout_package/test_shell_thread.py b/webkit/tools/layout_tests/layout_package/test_shell_thread.py index c6eccdb..c6bf2d3 100644 --- a/webkit/tools/layout_tests/layout_package/test_shell_thread.py +++ b/webkit/tools/layout_tests/layout_package/test_shell_thread.py @@ -189,6 +189,9 @@ class TestShellThread(threading.Thread): self._exception_info = None self._directory_timing_stats = {} self._test_stats = [] + self._num_tests = 0 + self._start_time = 0 + self._stop_time = 0 # Current directory of tests we're running. self._current_dir = None @@ -223,16 +226,31 @@ class TestShellThread(threading.Thread): joining this thread.""" return self._exception_info + def GetTotalTime(self): + return max(self._stop_time - self._start_time, 0.0) + + def GetNumTests(self): + return self._num_tests + def run(self): """Delegate main work to a helper method and watch for uncaught exceptions.""" + self._start_time = time.time() + self._num_tests = 0 try: + logging.debug('thread %s starting' % (self.getName())) self._Run() + logging.debug('thread %s done (%d tests)' % (self.getName(), + self.GetNumTests())) except: # Save the exception for our caller to see. self._exception_info = sys.exc_info() + self._stop_time = time.time() # Re-raise it and die. + logging.error('thread %s dying: %s' % (self.getName(), + self._exception_info)) raise + self._stop_time = time.time() def _Run(self): """Main work entry point of the thread. Basically we pull urls from the @@ -280,6 +298,7 @@ class TestShellThread(threading.Thread): # We have a url, run tests. batch_count += 1 + self._num_tests += 1 if self._options.run_singly: failures = self._RunTestSingly(test_info) else: @@ -307,7 +326,6 @@ class TestShellThread(threading.Thread): self._KillTestShell() batch_count = 0 - def _RunTestSingly(self, test_info): """Run a test in a separate thread, enforcing a hard time limit. diff --git a/webkit/tools/layout_tests/run_webkit_tests.py b/webkit/tools/layout_tests/run_webkit_tests.py index edd78b6..cde3fa2 100755 --- a/webkit/tools/layout_tests/run_webkit_tests.py +++ b/webkit/tools/layout_tests/run_webkit_tests.py @@ -337,6 +337,17 @@ class TestRunner: return return_value + def _GetTestInfoForFile(self, test_file): + """Returns the appropriate TestInfo object for the file. Mostly this + means that we look up the timeout value (in ms) to use for the given + test file. By default we use TestRunner.DEFAULT_TEST_TIMEOUT_MS + (currently 10 secs), but that can be overridden with the + --timeout-ms command line argument. Tests marked SLOW are allowed to + be up to ten times slower than the normal timeout value.""" + if self._expectations.HasModifier(test_file, test_expectations.SLOW): + return TestInfo(test_file, str(10 * int(options.time_out_ms))) + return TestInfo(test_file, self._options.time_out_ms) + def _GetTestFileQueue(self, test_files): """Create the thread safe queue of lists of (test filenames, test URIs) tuples. Each TestShellThread pulls a list from this queue and runs those @@ -349,18 +360,18 @@ class TestRunner: Return: The Queue of lists of TestInfo objects. """ + if self._options.experimental_fully_parallel: + filename_queue = Queue.Queue() + for test_file in test_files: + filename_queue.put(('.', [self._GetTestInfoForFile(test_file)])) + return filename_queue + tests_by_dir = {} for test_file in test_files: directory = self._GetDirForTestFile(test_file) if directory not in tests_by_dir: tests_by_dir[directory] = [] - - if self._expectations.HasModifier(test_file, test_expectations.SLOW): - timeout = str(10 * int(options.time_out_ms)) - else: - timeout = self._options.time_out_ms - - tests_by_dir[directory].append(TestInfo(test_file, timeout)) + tests_by_dir[directory].append(self._GetTestInfoForFile(test_file)) # Sort by the number of tests in the dir so that the ones with the most # tests get run first in order to maximize parallelization. Number of tests @@ -432,7 +443,8 @@ class TestRunner: filename_queue = self._GetTestFileQueue(test_files) # If we have http tests, the first one will be an http test. - if test_files and test_files[0].find(self.HTTP_SUBDIR) >= 0: + if ((test_files and test_files[0].find(self.HTTP_SUBDIR) >= 0) + or self._options.randomize_order): self._http_server.Start() # Instantiate TestShellThreads and start them. @@ -509,6 +521,7 @@ class TestRunner: failures = {} test_timings = {} individual_test_timings = [] + thread_timings = [] try: for thread in threads: while thread.isAlive(): @@ -518,6 +531,9 @@ class TestRunner: # be interruptible by KeyboardInterrupt. thread.join(1.0) failures.update(thread.GetFailures()) + thread_timings.append({ 'name': thread.getName(), + 'num_tests': thread.GetNumTests(), + 'total_time': thread.GetTotalTime()}); test_timings.update(thread.GetDirectoryTimingStats()) individual_test_timings.extend(thread.GetIndividualTestStats()) except KeyboardInterrupt: @@ -537,7 +553,18 @@ class TestRunner: print end_time = time.time() - logging.info("%f total testing time" % (end_time - start_time)) + + logging.info("%6.2f total testing time" % (end_time - start_time)) + cuml_time = 0 + logging.debug("Thread timing:") + for t in thread_timings: + logging.debug(" %10s: %5d tests, %6.2f secs" % + (t['name'], t['num_tests'], t['total_time'])) + cuml_time += t['total_time'] + logging.debug("") + + logging.debug(" %6.2f cumulative, %6.2f optimal" % + (cuml_time, cuml_time / int(self._options.num_test_shells))) print self._PrintTimingStatistics(test_timings, individual_test_timings, failures) @@ -1205,5 +1232,8 @@ if '__main__' == __name__: default=False, help="Prints a table mapping tests to their " "expected results") + option_parser.add_option("", "--experimental-fully-parallel", + action="store_true", default=False, + help="run all tests in parallel") options, args = option_parser.parse_args() main(options, args) |