diff options
author | dpranke@google.com <dpranke@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2009-09-30 00:43:29 +0000 |
---|---|---|
committer | dpranke@google.com <dpranke@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2009-09-30 00:43:29 +0000 |
commit | d38c8cb35e8c9860981e2df77a98a43b32c9df8f (patch) | |
tree | ed0c0be5e654567e79160f317163576162cb2418 | |
parent | 3e10e93146c326ab1368452dfdc84fb5d5165d33 (diff) | |
download | chromium_src-d38c8cb35e8c9860981e2df77a98a43b32c9df8f.zip chromium_src-d38c8cb35e8c9860981e2df77a98a43b32c9df8f.tar.gz chromium_src-d38c8cb35e8c9860981e2df77a98a43b32c9df8f.tar.bz2 |
Refactor run_webkit_tests a bit so we can extract the list of test files
separate for the layout test task force reporting. We can now build out the
full expecatations list by calling functions in layout_package/test_files
and layout_package/test_expectations (previously you needed to call private
routines in run_webkit_tests).
BUG=none
TEST=none
R=ojan@chromium.org
Review URL: http://codereview.chromium.org/254011
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@27576 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r-- | webkit/tools/layout_tests/layout_package/test_files.py | 71 | ||||
-rwxr-xr-x | webkit/tools/layout_tests/run_webkit_tests.py | 101 |
2 files changed, 95 insertions, 77 deletions
diff --git a/webkit/tools/layout_tests/layout_package/test_files.py b/webkit/tools/layout_tests/layout_package/test_files.py new file mode 100644 index 0000000..45c0dc4 --- /dev/null +++ b/webkit/tools/layout_tests/layout_package/test_files.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python +# Copyright (c) 2009 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""This module is used to find all of the layout test files used by Chromium +(across all platforms). It exposes one public function - GatherTestFiles() - +which takes an optional list of paths. If a list is passed in, the returned +list of test files is constrained to those found under the paths passed in, +i.e. calling GatherTestFiles(["LayoutTests/fast"]) will only return files +under that directory.""" + +import os +import path_utils + +# When collecting test cases, we include any file with these extensions. +_supported_file_extensions = set(['.html', '.shtml', '.xml', '.xhtml', '.pl', + '.php', '.svg']) +# When collecting test cases, skip these directories +_skipped_directories = set(['.svn', '_svn', 'resources', 'script-tests']) + +# Top-level directories to shard when running tests. +SHARDABLE_DIRECTORIES = set(['chrome', 'LayoutTests', 'pending']) + +def GatherTestFiles(paths): + """Generate a set of test files and return them. + + Args: + paths: a list of command line paths relative to the webkit/tests + directory. glob patterns are ok. + """ + paths_to_walk = set() + # if paths is empty, provide a pre-defined list. + if not paths: + paths = SHARDABLE_DIRECTORIES + for path in paths: + # If there's an * in the name, assume it's a glob pattern. + path = os.path.join(path_utils.LayoutTestsDir(path), path) + if path.find('*') > -1: + filenames = glob.glob(path) + paths_to_walk.update(filenames) + else: + paths_to_walk.add(path) + + # Now walk all the paths passed in on the command line and get filenames + test_files = set() + for path in paths_to_walk: + if os.path.isfile(path) and _HasSupportedExtension(path): + test_files.add(os.path.normpath(path)) + continue + + for root, dirs, files in os.walk(path): + # don't walk skipped directories and sub directories + if os.path.basename(root) in _skipped_directories: + del dirs[:] + continue + + for filename in files: + if _HasSupportedExtension(filename): + filename = os.path.join(root, filename) + filename = os.path.normpath(filename) + test_files.add(filename) + + return test_files + +def _HasSupportedExtension(filename): + """Return true if filename is one of the file extensions we want to run a + test on.""" + extension = os.path.splitext(filename)[1] + return extension in _supported_file_extensions + diff --git a/webkit/tools/layout_tests/run_webkit_tests.py b/webkit/tools/layout_tests/run_webkit_tests.py index d9b6559..91a772c 100755 --- a/webkit/tools/layout_tests/run_webkit_tests.py +++ b/webkit/tools/layout_tests/run_webkit_tests.py @@ -42,6 +42,7 @@ from layout_package import json_results_generator from layout_package import path_utils from layout_package import test_failures from layout_package import test_shell_thread +from layout_package import test_files from test_types import fuzzy_image_diff from test_types import image_diff from test_types import test_type_base @@ -111,15 +112,6 @@ class TestRunner: """A class for managing running a series of tests on a series of test files.""" - # When collecting test cases, we include any file with these extensions. - _supported_file_extensions = set(['.html', '.shtml', '.xml', '.xhtml', '.pl', - '.php', '.svg']) - # When collecting test cases, skip these directories - _skipped_directories = set(['.svn', '_svn', 'resources', 'script-tests']) - - # Top-level directories to shard when running tests. - _shardable_directories = set(['chrome', 'LayoutTests', 'pending']) - HTTP_SUBDIR = os.sep.join(['', 'http', '']) # The per-test timeout in milliseconds, if no --time-out-ms option was given @@ -127,12 +119,11 @@ class TestRunner: # test_shell.exe. DEFAULT_TEST_TIMEOUT_MS = 10 * 1000 - def __init__(self, options, paths): - """Collect a list of files to test. + def __init__(self, options): + """Initialize test runner data structures. Args: options: a dictionary of command line options - paths: a list of paths to crawl looking for test files """ self._options = options @@ -145,18 +136,6 @@ class TestRunner: self._test_files_list = None self._file_dir = path_utils.GetAbsolutePath(os.path.dirname(sys.argv[0])) - if options.lint_test_files: - # Creating the expecations for each platform/target pair does all the - # test list parsing and ensures it's correct syntax (e.g. no dupes). - for platform in test_expectations.TestExpectationsFile.PLATFORMS: - self._ParseExpectations(platform, is_debug_mode=True) - self._ParseExpectations(platform, is_debug_mode=False) - else: - self._GatherTestFiles(paths) - self._expectations = self._ParseExpectations(options.platform, - options.target == 'Debug') - self._PrepareListsAndPrintOutput() - def __del__(self): logging.info("flushing stdout") sys.stdout.flush() @@ -166,47 +145,15 @@ class TestRunner: # Stop the http server. self._http_server.Stop() - def _GatherTestFiles(self, paths): - """Generate a set of test files and place them in self._test_files - - Args: - paths: a list of command line paths relative to the webkit/tests - directory. glob patterns are ok. - """ - paths_to_walk = set() - # if paths is empty, provide a pre-defined list. - if not paths: - paths = TestRunner._shardable_directories - for path in paths: - # If there's an * in the name, assume it's a glob pattern. - path = os.path.join(path_utils.LayoutTestsDir(path), path) - if path.find('*') > -1: - filenames = glob.glob(path) - paths_to_walk.update(filenames) - else: - paths_to_walk.add(path) - - # Now walk all the paths passed in on the command line and get filenames - for path in paths_to_walk: - if os.path.isfile(path) and self._HasSupportedExtension(path): - self._test_files.add(os.path.normpath(path)) - continue - - for root, dirs, files in os.walk(path): - # don't walk skipped directories and sub directories - if os.path.basename(root) in TestRunner._skipped_directories: - del dirs[:] - continue - - for filename in files: - if self._HasSupportedExtension(filename): - filename = os.path.join(root, filename) - filename = os.path.normpath(filename) - self._test_files.add(filename) + def GatherFilePaths(self, paths): + """Find all the files to test. + args: + paths: a list of globs to use instead of the defaults.""" + self._test_files = test_files.GatherTestFiles(paths) logging.info('Found: %d tests' % len(self._test_files)) - def _ParseExpectations(self, platform, is_debug_mode): + def ParseExpectations(self, platform, is_debug_mode): """Parse the expectations from the test_list files and return a data structure holding them. Throws an error if the test_list files have invalid syntax. @@ -217,17 +164,15 @@ class TestRunner: test_files = self._test_files try: - return test_expectations.TestExpectations(test_files, - self._file_dir, - platform, - is_debug_mode) + self._expectations = test_expectations.TestExpectations(test_files, + self._file_dir, platform, is_debug_mode) except Exception, err: if self._options.lint_test_files: print str(err) else: raise err - def _PrepareListsAndPrintOutput(self): + def PrepareListsAndPrintOutput(self): """Create appropriate subsets of test lists and print test counts. Create appropriate subsets of self._tests_files in @@ -329,7 +274,7 @@ class TestRunner: tests_run_file.close() # update expectations so that the stats are calculated correctly - self._expectations = self._ParseExpectations( + self._expectations = self.ParseExpectations( path_utils.PlatformName(), options.target == 'Debug') else: logging.info('Run: %d tests' % len(self._test_files)) @@ -352,12 +297,6 @@ class TestRunner: logging.info('Expected crashes: %d fixable tests' % len(self._expectations.GetFixableCrashes())) - def _HasSupportedExtension(self, filename): - """Return true if filename is one of the file extensions we want to run a - test on.""" - extension = os.path.splitext(filename)[1] - return extension in TestRunner._supported_file_extensions - def AddTestType(self, test_type): """Add a TestType to the TestRunner.""" self._test_types.append(test_type) @@ -389,7 +328,7 @@ class TestRunner: test_file = test_file_parts[1] return_value = directory - while directory in TestRunner._shardable_directories: + while directory in test_files.SHARDABLE_DIRECTORIES: test_file_parts = test_file.split(os.sep, 1) directory = test_file_parts[0] return_value = os.path.join(return_value, directory) @@ -1083,13 +1022,21 @@ def main(options, args): # Create the output directory if it doesn't already exist. path_utils.MaybeMakeDirectory(options.results_directory) - test_runner = TestRunner(options, paths) + test_runner = TestRunner(options) + test_runner.GatherFilePaths(paths) if options.lint_test_files: - # Just creating the TestRunner checks the syntax of the test lists. + # Creating the expecations for each platform/target pair does all the + # test list parsing and ensures it's correct syntax (e.g. no dupes). + for platform in test_expectations.TestExpectationsFile.PLATFORMS: + test_runner.ParseExpectations(platform, is_debug_mode=True) + test_runner.ParseExpectations(platform, is_debug_mode=False) print ("If there are no fail messages, errors or exceptions, then the " "lint succeeded.") return + else: + test_runner.ParseExpectations(options.platform, options.target == 'Debug') + test_runner.PrepareListsAndPrintOutput() if options.find_baselines: # Record where we found each baseline, then exit. |