From a5831ecb281c966c5e3edb61d33d9194c99b2d77 Mon Sep 17 00:00:00 2001 From: "dpranke@chromium.org" Date: Thu, 21 Jan 2010 06:30:26 +0000 Subject: Move the layout test scripts into a 'webkitpy' subdirectory in preparation for upstreaming them to WebKit/WebKitTools/Scripts/webkitpy . This also involves changing run_webkit_tests.py to run_chromium_webkit_tests.py to minimize confusion with the existing run-webkit-tests perl script upstream. There should be no user-visible impact as the existing shell and batch scripts have been updated for the new paths. BUG=none TEST=none R=eseidel@chromium.org Review URL: http://codereview.chromium.org/545145 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@36739 0039d316-1c4b-4281-b951-d872f2087c98 --- webkit/tools/layout_tests/dedup-tests.py | 49 - .../tools/layout_tests/layout_package/__init__.py | 0 .../layout_package/apache_http_server.py | 203 --- .../tools/layout_tests/layout_package/failure.py | 200 --- .../layout_tests/layout_package/failure_finder.py | 892 ----------- .../layout_package/failure_finder_test.py | 374 ----- .../layout_tests/layout_package/html_generator.py | 230 --- .../layout_tests/layout_package/http_server.bat | 1 - .../layout_tests/layout_package/http_server.py | 255 --- .../layout_tests/layout_package/http_server.sh | 23 - .../layout_package/http_server_base.py | 42 - .../json_layout_results_generator.py | 159 -- .../layout_package/json_results_generator.py | 390 ----- .../layout_tests/layout_package/lighttpd.conf | 89 -- .../layout_tests/layout_package/metered_stream.py | 72 - .../layout_tests/layout_package/path_utils.py | 372 ----- .../layout_tests/layout_package/platform_utils.py | 25 - .../layout_package/platform_utils_linux.py | 223 --- .../layout_package/platform_utils_mac.py | 175 --- .../layout_package/platform_utils_win.py | 184 --- .../layout_package/test_expectations.py | 783 --------- .../layout_tests/layout_package/test_failures.py | 241 --- .../layout_tests/layout_package/test_files.py | 71 - .../layout_package/test_shell_thread.py | 488 ------ .../layout_package/websocket_server.py | 250 --- webkit/tools/layout_tests/rebaseline.bat | 2 +- webkit/tools/layout_tests/rebaseline.py | 1011 ------------ webkit/tools/layout_tests/rebaseline.sh | 2 +- webkit/tools/layout_tests/run_webkit_tests.bat | 2 +- webkit/tools/layout_tests/run_webkit_tests.py | 1657 -------------------- webkit/tools/layout_tests/run_webkit_tests.sh | 2 +- .../tools/layout_tests/test_output_formatter.bat | 2 +- webkit/tools/layout_tests/test_output_formatter.py | 105 -- webkit/tools/layout_tests/test_output_formatter.sh | 2 +- .../tools/layout_tests/test_output_xml_to_json.py | 135 -- webkit/tools/layout_tests/test_types/__init__.py | 0 .../layout_tests/test_types/fuzzy_image_diff.py | 47 - webkit/tools/layout_tests/test_types/image_diff.py | 199 --- .../layout_tests/test_types/test_type_base.py | 241 --- webkit/tools/layout_tests/test_types/text_diff.py | 96 -- .../update_expectations_from_dashboard.py | 476 ------ .../update_expectations_from_dashboard_unittest.py | 353 ----- webkit/tools/layout_tests/webkitpy/dedup-tests.py | 49 + .../webkitpy/layout_package/__init__.py | 0 .../webkitpy/layout_package/apache_http_server.py | 203 +++ .../webkitpy/layout_package/failure.py | 200 +++ .../webkitpy/layout_package/failure_finder.py | 892 +++++++++++ .../webkitpy/layout_package/failure_finder_test.py | 374 +++++ .../webkitpy/layout_package/html_generator.py | 230 +++ .../webkitpy/layout_package/http_server.py | 255 +++ .../webkitpy/layout_package/http_server_base.py | 42 + .../json_layout_results_generator.py | 159 ++ .../layout_package/json_results_generator.py | 390 +++++ .../webkitpy/layout_package/metered_stream.py | 72 + .../webkitpy/layout_package/path_utils.py | 372 +++++ .../webkitpy/layout_package/platform_utils.py | 25 + .../layout_package/platform_utils_linux.py | 223 +++ .../webkitpy/layout_package/platform_utils_mac.py | 175 +++ .../webkitpy/layout_package/platform_utils_win.py | 184 +++ .../webkitpy/layout_package/test_expectations.py | 783 +++++++++ .../webkitpy/layout_package/test_failures.py | 241 +++ .../webkitpy/layout_package/test_files.py | 71 + .../webkitpy/layout_package/test_shell_thread.py | 488 ++++++ .../webkitpy/layout_package/websocket_server.py | 250 +++ webkit/tools/layout_tests/webkitpy/rebaseline.py | 983 ++++++++++++ .../webkitpy/run_chromium_webkit_tests.py | 1608 +++++++++++++++++++ .../layout_tests/webkitpy/test_output_formatter.py | 105 ++ .../webkitpy/test_output_xml_to_json.py | 135 ++ .../layout_tests/webkitpy/test_types/__init__.py | 0 .../webkitpy/test_types/fuzzy_image_diff.py | 47 + .../layout_tests/webkitpy/test_types/image_diff.py | 199 +++ .../webkitpy/test_types/test_type_base.py | 241 +++ .../layout_tests/webkitpy/test_types/text_diff.py | 96 ++ .../webkitpy/update_expectations_from_dashboard.py | 476 ++++++ .../update_expectations_from_dashboard_unittest.py | 353 +++++ 75 files changed, 9927 insertions(+), 10117 deletions(-) delete mode 100755 webkit/tools/layout_tests/dedup-tests.py delete mode 100644 webkit/tools/layout_tests/layout_package/__init__.py delete mode 100644 webkit/tools/layout_tests/layout_package/apache_http_server.py delete mode 100644 webkit/tools/layout_tests/layout_package/failure.py delete mode 100644 webkit/tools/layout_tests/layout_package/failure_finder.py delete mode 100644 webkit/tools/layout_tests/layout_package/failure_finder_test.py delete mode 100644 webkit/tools/layout_tests/layout_package/html_generator.py delete mode 100644 webkit/tools/layout_tests/layout_package/http_server.bat delete mode 100755 webkit/tools/layout_tests/layout_package/http_server.py delete mode 100755 webkit/tools/layout_tests/layout_package/http_server.sh delete mode 100644 webkit/tools/layout_tests/layout_package/http_server_base.py delete mode 100644 webkit/tools/layout_tests/layout_package/json_layout_results_generator.py delete mode 100644 webkit/tools/layout_tests/layout_package/json_results_generator.py delete mode 100644 webkit/tools/layout_tests/layout_package/lighttpd.conf delete mode 100644 webkit/tools/layout_tests/layout_package/metered_stream.py delete mode 100644 webkit/tools/layout_tests/layout_package/path_utils.py delete mode 100644 webkit/tools/layout_tests/layout_package/platform_utils.py delete mode 100644 webkit/tools/layout_tests/layout_package/platform_utils_linux.py delete mode 100644 webkit/tools/layout_tests/layout_package/platform_utils_mac.py delete mode 100644 webkit/tools/layout_tests/layout_package/platform_utils_win.py delete mode 100644 webkit/tools/layout_tests/layout_package/test_expectations.py delete mode 100644 webkit/tools/layout_tests/layout_package/test_failures.py delete mode 100644 webkit/tools/layout_tests/layout_package/test_files.py delete mode 100644 webkit/tools/layout_tests/layout_package/test_shell_thread.py delete mode 100644 webkit/tools/layout_tests/layout_package/websocket_server.py delete mode 100644 webkit/tools/layout_tests/rebaseline.py delete mode 100755 webkit/tools/layout_tests/run_webkit_tests.py delete mode 100755 webkit/tools/layout_tests/test_output_formatter.py delete mode 100755 webkit/tools/layout_tests/test_output_xml_to_json.py delete mode 100644 webkit/tools/layout_tests/test_types/__init__.py delete mode 100644 webkit/tools/layout_tests/test_types/fuzzy_image_diff.py delete mode 100644 webkit/tools/layout_tests/test_types/image_diff.py delete mode 100644 webkit/tools/layout_tests/test_types/test_type_base.py delete mode 100644 webkit/tools/layout_tests/test_types/text_diff.py delete mode 100644 webkit/tools/layout_tests/update_expectations_from_dashboard.py delete mode 100644 webkit/tools/layout_tests/update_expectations_from_dashboard_unittest.py create mode 100755 webkit/tools/layout_tests/webkitpy/dedup-tests.py create mode 100644 webkit/tools/layout_tests/webkitpy/layout_package/__init__.py create mode 100644 webkit/tools/layout_tests/webkitpy/layout_package/apache_http_server.py create mode 100644 webkit/tools/layout_tests/webkitpy/layout_package/failure.py create mode 100644 webkit/tools/layout_tests/webkitpy/layout_package/failure_finder.py create mode 100644 webkit/tools/layout_tests/webkitpy/layout_package/failure_finder_test.py create mode 100644 webkit/tools/layout_tests/webkitpy/layout_package/html_generator.py create mode 100755 webkit/tools/layout_tests/webkitpy/layout_package/http_server.py create mode 100644 webkit/tools/layout_tests/webkitpy/layout_package/http_server_base.py create mode 100644 webkit/tools/layout_tests/webkitpy/layout_package/json_layout_results_generator.py create mode 100644 webkit/tools/layout_tests/webkitpy/layout_package/json_results_generator.py create mode 100644 webkit/tools/layout_tests/webkitpy/layout_package/metered_stream.py create mode 100644 webkit/tools/layout_tests/webkitpy/layout_package/path_utils.py create mode 100644 webkit/tools/layout_tests/webkitpy/layout_package/platform_utils.py create mode 100644 webkit/tools/layout_tests/webkitpy/layout_package/platform_utils_linux.py create mode 100644 webkit/tools/layout_tests/webkitpy/layout_package/platform_utils_mac.py create mode 100644 webkit/tools/layout_tests/webkitpy/layout_package/platform_utils_win.py create mode 100644 webkit/tools/layout_tests/webkitpy/layout_package/test_expectations.py create mode 100644 webkit/tools/layout_tests/webkitpy/layout_package/test_failures.py create mode 100644 webkit/tools/layout_tests/webkitpy/layout_package/test_files.py create mode 100644 webkit/tools/layout_tests/webkitpy/layout_package/test_shell_thread.py create mode 100644 webkit/tools/layout_tests/webkitpy/layout_package/websocket_server.py create mode 100644 webkit/tools/layout_tests/webkitpy/rebaseline.py create mode 100755 webkit/tools/layout_tests/webkitpy/run_chromium_webkit_tests.py create mode 100755 webkit/tools/layout_tests/webkitpy/test_output_formatter.py create mode 100755 webkit/tools/layout_tests/webkitpy/test_output_xml_to_json.py create mode 100644 webkit/tools/layout_tests/webkitpy/test_types/__init__.py create mode 100644 webkit/tools/layout_tests/webkitpy/test_types/fuzzy_image_diff.py create mode 100644 webkit/tools/layout_tests/webkitpy/test_types/image_diff.py create mode 100644 webkit/tools/layout_tests/webkitpy/test_types/test_type_base.py create mode 100644 webkit/tools/layout_tests/webkitpy/test_types/text_diff.py create mode 100644 webkit/tools/layout_tests/webkitpy/update_expectations_from_dashboard.py create mode 100644 webkit/tools/layout_tests/webkitpy/update_expectations_from_dashboard_unittest.py (limited to 'webkit/tools') diff --git a/webkit/tools/layout_tests/dedup-tests.py b/webkit/tools/layout_tests/dedup-tests.py deleted file mode 100755 index 0165e40..0000000 --- a/webkit/tools/layout_tests/dedup-tests.py +++ /dev/null @@ -1,49 +0,0 @@ -#!/usr/bin/python -# Copyright (c) 2009 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -"""dedup-tests -- print test results duplicated between win and linux. - -Because the outputs are very similar, we fall back on Windows outputs -if there isn't an expected output for Linux layout tests. This means -that any file that is duplicated between the Linux and Windows directories -is redundant. - -This command dumps out all such files. You can use it like: - dedup-tests.py # print out the bad files - dedup-tests.py | xargs git rm # delete the bad files -""" - -import collections -import os.path -import subprocess -import sys - -# A map of file hash => set of all files with that hash. -hashes = collections.defaultdict(set) - -# Fill in the map. -cmd = ['git', 'ls-tree', '-r', 'HEAD', 'webkit/data/layout_tests/'] -try: - git = subprocess.Popen(cmd, stdout=subprocess.PIPE) -except OSError, e: - if e.errno == 2: # No such file or directory. - print >> sys.stderr, "Error: 'No such file' when running git." - print >> sys.stderr, "This script requires git." - sys.exit(1) - raise e - -for line in git.stdout: - attrs, file = line.strip().split('\t') - _, _, hash = attrs.split(' ') - hashes[hash].add(file) - -# Dump out duplicated files. -for cluster in hashes.values(): - if len(cluster) < 2: - continue - for file in cluster: - if '/chromium-linux/' in file: - if file.replace('/chromium-linux/', '/chromium-win/') in cluster: - print file diff --git a/webkit/tools/layout_tests/layout_package/__init__.py b/webkit/tools/layout_tests/layout_package/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/webkit/tools/layout_tests/layout_package/apache_http_server.py b/webkit/tools/layout_tests/layout_package/apache_http_server.py deleted file mode 100644 index d11906d..0000000 --- a/webkit/tools/layout_tests/layout_package/apache_http_server.py +++ /dev/null @@ -1,203 +0,0 @@ -# Copyright (c) 2009 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -"""A class to start/stop the apache http server used by layout tests.""" - -import logging -import optparse -import os -import re -import subprocess -import sys - -import http_server_base -import path_utils -import platform_utils - - -class LayoutTestApacheHttpd(http_server_base.HttpServerBase): - - def __init__(self, output_dir): - """Args: - output_dir: the absolute path to the layout test result directory - """ - self._output_dir = output_dir - self._httpd_proc = None - path_utils.MaybeMakeDirectory(output_dir) - - self.mappings = [{'port': 8000}, - {'port': 8080}, - {'port': 8081}, - {'port': 8443, 'sslcert': True}] - - # The upstream .conf file assumed the existence of /tmp/WebKit for - # placing apache files like the lock file there. - self._runtime_path = os.path.join("/tmp", "WebKit") - path_utils.MaybeMakeDirectory(self._runtime_path) - - # The PID returned when Apache is started goes away (due to dropping - # privileges?). The proper controlling PID is written to a file in the - # apache runtime directory. - self._pid_file = os.path.join(self._runtime_path, 'httpd.pid') - - test_dir = path_utils.PathFromBase('third_party', 'WebKit', - 'LayoutTests') - js_test_resources_dir = self._CygwinSafeJoin(test_dir, "fast", "js", - "resources") - mime_types_path = self._CygwinSafeJoin(test_dir, "http", "conf", - "mime.types") - cert_file = self._CygwinSafeJoin(test_dir, "http", "conf", - "webkit-httpd.pem") - access_log = self._CygwinSafeJoin(output_dir, "access_log.txt") - error_log = self._CygwinSafeJoin(output_dir, "error_log.txt") - document_root = self._CygwinSafeJoin(test_dir, "http", "tests") - - executable = platform_utils.ApacheExecutablePath() - if self._IsCygwin(): - executable = self._GetCygwinPath(executable) - - cmd = [executable, - '-f', self._GetApacheConfigFilePath(test_dir, output_dir), - '-C', "\'DocumentRoot %s\'" % document_root, - '-c', "\'Alias /js-test-resources %s\'" % js_test_resources_dir, - '-C', "\'Listen %s\'" % "127.0.0.1:8000", - '-C', "\'Listen %s\'" % "127.0.0.1:8081", - '-c', "\'TypesConfig \"%s\"\'" % mime_types_path, - '-c', "\'CustomLog \"%s\" common\'" % access_log, - '-c', "\'ErrorLog \"%s\"\'" % error_log, - '-C', "\'User \"%s\"\'" % os.environ.get("USERNAME", - os.environ.get("USER", ""))] - - if self._IsCygwin(): - cygbin = path_utils.PathFromBase('third_party', 'cygwin', 'bin') - # Not entirely sure why, but from cygwin we need to run the - # httpd command through bash. - self._start_cmd = [ - os.path.join(cygbin, 'bash.exe'), - '-c', - 'PATH=%s %s' % (self._GetCygwinPath(cygbin), " ".join(cmd)), - ] - else: - # TODO(ojan): When we get cygwin using Apache 2, use set the - # cert file for cygwin as well. - cmd.extend(['-c', "\'SSLCertificateFile %s\'" % cert_file]) - # Join the string here so that Cygwin/Windows and Mac/Linux - # can use the same code. Otherwise, we could remove the single - # quotes above and keep cmd as a sequence. - self._start_cmd = " ".join(cmd) - - def _IsCygwin(self): - return sys.platform in ("win32", "cygwin") - - def _CygwinSafeJoin(self, *parts): - """Returns a platform appropriate path.""" - path = os.path.join(*parts) - if self._IsCygwin(): - return self._GetCygwinPath(path) - return path - - def _GetCygwinPath(self, path): - """Convert a Windows path to a cygwin path. - - The cygpath utility insists on converting paths that it thinks are - Cygwin root paths to what it thinks the correct roots are. So paths - such as "C:\b\slave\webkit-release\build\third_party\cygwin\bin" - are converted to plain "/usr/bin". To avoid this, we - do the conversion manually. - - The path is expected to be an absolute path, on any drive. - """ - drive_regexp = re.compile(r'([a-z]):[/\\]', re.IGNORECASE) - - def LowerDrive(matchobj): - return '/cygdrive/%s/' % matchobj.group(1).lower() - path = drive_regexp.sub(LowerDrive, path) - return path.replace('\\', '/') - - def _GetApacheConfigFilePath(self, test_dir, output_dir): - """Returns the path to the apache config file to use. - Args: - test_dir: absolute path to the LayoutTests directory. - output_dir: absolute path to the layout test results directory. - """ - httpd_config = platform_utils.ApacheConfigFilePath() - httpd_config_copy = os.path.join(output_dir, "httpd.conf") - httpd_conf = open(httpd_config).read() - if self._IsCygwin(): - # This is a gross hack, but it lets us use the upstream .conf file - # and our checked in cygwin. This tells the server the root - # directory to look in for .so modules. It will use this path - # plus the relative paths to the .so files listed in the .conf - # file. We have apache/cygwin checked into our tree so - # people don't have to install it into their cygwin. - cygusr = path_utils.PathFromBase('third_party', 'cygwin', 'usr') - httpd_conf = httpd_conf.replace('ServerRoot "/usr"', - 'ServerRoot "%s"' % self._GetCygwinPath(cygusr)) - - # TODO(ojan): Instead of writing an extra file, checkin a conf file - # upstream. Or, even better, upstream/delete all our chrome http - # tests so we don't need this special-cased DocumentRoot and then - # just use the upstream - # conf file. - chrome_document_root = path_utils.PathFromBase('webkit', 'data', - 'layout_tests') - if self._IsCygwin(): - chrome_document_root = self._GetCygwinPath(chrome_document_root) - httpd_conf = (httpd_conf + - self._GetVirtualHostConfig(chrome_document_root, 8081)) - - f = open(httpd_config_copy, 'wb') - f.write(httpd_conf) - f.close() - - if self._IsCygwin(): - return self._GetCygwinPath(httpd_config_copy) - return httpd_config_copy - - def _GetVirtualHostConfig(self, document_root, port, ssl=False): - """Returns a directive block for an httpd.conf file. - It will listen to 127.0.0.1 on each of the given port. - """ - return '\n'.join(('' % port, - 'DocumentRoot %s' % document_root, - ssl and 'SSLEngine On' or '', - '', '')) - - def _StartHttpdProcess(self): - """Starts the httpd process and returns whether there were errors.""" - # Use shell=True because we join the arguments into a string for - # the sake of Window/Cygwin and it needs quoting that breaks - # shell=False. - self._httpd_proc = subprocess.Popen(self._start_cmd, - stderr=subprocess.PIPE, - shell=True) - err = self._httpd_proc.stderr.read() - if len(err): - logging.debug(err) - return False - return True - - def Start(self): - """Starts the apache http server.""" - # Stop any currently running servers. - self.Stop() - - logging.debug("Starting apache http server") - server_started = self.WaitForAction(self._StartHttpdProcess) - if server_started: - logging.debug("Apache started. Testing ports") - server_started = self.WaitForAction(self.IsServerRunningOnAllPorts) - - if server_started: - logging.debug("Server successfully started") - else: - raise Exception('Failed to start http server') - - def Stop(self): - """Stops the apache http server.""" - logging.debug("Shutting down any running http servers") - httpd_pid = None - if os.path.exists(self._pid_file): - httpd_pid = int(open(self._pid_file).readline()) - path_utils.ShutDownHTTPServer(httpd_pid) diff --git a/webkit/tools/layout_tests/layout_package/failure.py b/webkit/tools/layout_tests/layout_package/failure.py deleted file mode 100644 index 50ef743..0000000 --- a/webkit/tools/layout_tests/layout_package/failure.py +++ /dev/null @@ -1,200 +0,0 @@ -# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -CHROMIUM_WIN = "chromium-win" -CHROMIUM_MAC = "chromium-mac" -CHROMIUM_LINUX = "chromium-linux" -WEBKIT_WIN_TITLE = "WebKit Win" -WEBKIT_MAC_TITLE = "WebKit Mac" -WEBKIT_TITLE = "WebKit" -UNKNOWN = "Unknown" - -EXPECTED_IMAGE_FILE_ENDING = "-expected.png" -ACTUAL_IMAGE_FILE_ENDING = "-actual.png" -UPSTREAM_IMAGE_FILE_ENDING = "-expected-upstream.png" -EXPECTED_TEXT_FILE_ENDING = "-expected.txt" -ACTUAL_TEXT_FILE_ENDING = "-actual.txt" -DIFF_IMAGE_FILE_ENDING = "-diff.png" -DIFF_TEXT_FILE_ENDING = "-diff.txt" - -CHROMIUM_SRC_HOME = "http://src.chromium.org/viewvc/chrome/trunk/src/webkit/" -CHROMIUM_TRAC_HOME = CHROMIUM_SRC_HOME + "data/layout_tests/" -WEBKIT_TRAC_HOME = "http://trac.webkit.org/browser/trunk/LayoutTests/" -WEBKIT_SVN_HOSTNAME = "svn.webkit.org" -THIRD_PARTY = "third_party" - -WEBKIT_PLATFORM_URL_BASE = WEBKIT_TRAC_HOME + "platform" -WEBKIT_LAYOUT_TEST_BASE_URL = "http://svn.webkit.org/repository/webkit/trunk/" -WEBKIT_IMAGE_BASELINE_BASE_URL_WIN = (WEBKIT_LAYOUT_TEST_BASE_URL + - "LayoutTests/platform/win/") -WEBKIT_IMAGE_BASELINE_BASE_URL_MAC = (WEBKIT_LAYOUT_TEST_BASE_URL + - "LayoutTests/platform/mac/") -WEBKIT_TRAC_IMAGE_BASELINE_BASE_URL_MAC = WEBKIT_PLATFORM_URL_BASE + "/mac/" -WEBKIT_TRAC_IMAGE_BASELINE_BASE_URL_WIN = WEBKIT_PLATFORM_URL_BASE + "/win/" - -LAYOUT_TEST_RESULTS_DIR = "layout-test-results" - -FAIL = "FAIL" -TIMEOUT = "TIMEOUT" -CRASH = "CRASH" -PASS = "PASS" -WONTFIX = "WONTFIX" - - -class Failure(object): - """ - This class represents a failure in the test output, and is - intended as a data model object. - """ - - def __init__(self): - self.platform = "" - self.test_path = "" - self.text_diff_mismatch = False - self.image_mismatch = False - self.timeout = False - self.crashed = False - self.text_baseline_url = "" - self.image_baseline_url = "" - self.text_baseline_age = "" - self.image_baseline_age = "" - self.test_age = "" - self.text_baseline_local = "" - self.image_baseline_local = "" - self.text_actual_local = "" - self.image_actual_local = "" - self.image_baseline_upstream_url = "" - self.image_baseline_upstream_local = "" - self.test_expectations_line = "" - self.flakiness = 0 - - def GetExpectedImageFilename(self): - return self._RenameEndOfTestPath(EXPECTED_IMAGE_FILE_ENDING) - - def GetActualImageFilename(self): - return self._RenameEndOfTestPath(ACTUAL_IMAGE_FILE_ENDING) - - def GetExpectedTextFilename(self): - return self._RenameEndOfTestPath(EXPECTED_TEXT_FILE_ENDING) - - def GetActualTextFilename(self): - return self._RenameEndOfTestPath(ACTUAL_TEXT_FILE_ENDING) - - def GetImageDiffFilename(self): - return self._RenameEndOfTestPath(DIFF_IMAGE_FILE_ENDING) - - def GetTextDiffFilename(self): - return self._RenameEndOfTestPath(DIFF_TEXT_FILE_ENDING) - - def GetImageUpstreamFilename(self): - return self._RenameEndOfTestPath(UPSTREAM_IMAGE_FILE_ENDING) - - def _RenameEndOfTestPath(self, suffix): - last_index = self.test_path.rfind(".") - if last_index == -1: - return self.test_path - return self.test_path[0:last_index] + suffix - - def GetTestHome(self): - if self.test_path.startswith("chrome"): - return CHROMIUM_TRAC_HOME + self.test_path - return WEBKIT_TRAC_HOME + self.test_path - - def GetImageBaselineTracHome(self): - if self.IsImageBaselineInWebkit(): - return self._GetTracHome(self.image_baseline_url) - return self.image_baseline_url - - def GetTextBaselineTracHome(self): - if self.text_baseline_url and self.IsTextBaselineInWebkit(): - return self._GetTracHome(self.text_baseline_url) - return self.text_baseline_url - - def _GetTracHome(self, file): - return WEBKIT_TRAC_HOME + file[file.find("LayoutTests"):] - - def GetTextBaselineLocation(self): - return self._GetFileLocation(self.text_baseline_url) - - def GetImageBaselineLocation(self): - return self._GetFileLocation(self.image_baseline_url) - - # TODO(gwilson): Refactor this logic so it can be used by multiple scripts. - # TODO(gwilson): Change this so that it respects the fallback order of - # different platforms. (If platform is mac, the fallback should be - # different.) - - def _GetFileLocation(self, file): - if not file: - return None - if file.find(CHROMIUM_WIN) > -1: - return CHROMIUM_WIN - if file.find(CHROMIUM_MAC) > -1: - return CHROMIUM_MAC - if file.find(CHROMIUM_LINUX) > -1: - return CHROMIUM_LINUX - if file.startswith(WEBKIT_IMAGE_BASELINE_BASE_URL_WIN): - return WEBKIT_WIN_TITLE - if file.startswith(WEBKIT_IMAGE_BASELINE_BASE_URL_MAC): - return WEBKIT_MAC_TITLE - # TODO(gwilson): Add mac-snowleopard, mac-leopard, mac-tiger here. - if file.startswith(WEBKIT_LAYOUT_TEST_BASE_URL): - return WEBKIT_TITLE - return UNKNOWN - - def _IsFileInWebKit(self, file): - return file != None and (file.find(WEBKIT_SVN_HOSTNAME) > -1 or - file.find(THIRD_PARTY) > -1) - - def IsImageBaselineInChromium(self): - return not self.IsImageBaselineInWebkit() - - def IsImageBaselineInWebkit(self): - return self._IsFileInWebKit(self.image_baseline_url) - - def IsTextBaselineInChromium(self): - return not self.IsTextBaselineInWebkit() - - def IsTextBaselineInWebkit(self): - return self._IsFileInWebKit(self.text_baseline_url) - - def GetTextResultLocationInZipFile(self): - return self._GetFileLocationInZipFile(self.GetActualTextFilename()) - - def GetImageResultLocationInZipFile(self): - return self._GetFileLocationInZipFile(self.GetActualImageFilename()) - - def _GetFileLocationInZipFile(self, file): - return "%s/%s" % (LAYOUT_TEST_RESULTS_DIR, file) - - # TODO(gwilson): implement this method. - def GetAllBaselineLocations(self): - return None - - # This method determines whether the test is actually expected to fail, - # in order to know whether to retrieve expected test results for it. - # (test results dont exist for tests expected to fail/crash.) - - def IsExpectedToFail(self): - return self._FindKeywordInExpectations(FAIL) - - def IsExpectedToTimeout(self): - return self._FindKeywordInExpectations(TIMEOUT) - - def IsExpectedToCrash(self): - return self._FindKeywordInExpectations(CRASH) - - def IsExpectedToPass(self): - return self._FindKeywordInExpectations(PASS) - - def IsWontFix(self): - return self._FindKeywordInExpectations(WONTFIX) - - def _FindKeywordInExpectations(self, keyword): - if (not self.test_expectations_line or - len(self.test_expectations_line) == 0): - return False - if self.test_expectations_line.find(keyword) > -1: - return True - return False diff --git a/webkit/tools/layout_tests/layout_package/failure_finder.py b/webkit/tools/layout_tests/layout_package/failure_finder.py deleted file mode 100644 index d8aa34f..0000000 --- a/webkit/tools/layout_tests/layout_package/failure_finder.py +++ /dev/null @@ -1,892 +0,0 @@ -# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -# TODO(gwilson): 1. Change text differs to use external utils. -# 2. Change text_expectations parsing to existing -# logic in layout_pagckage.test_expectations. - -import difflib -import errno -import os -import path_utils -import platform_utils -import re -import shutil -import subprocess -import sys -import urllib2 -import zipfile - -from failure import Failure - -WEBKIT_TRAC_HOSTNAME = "trac.webkit.org" -WEBKIT_LAYOUT_TEST_BASE_URL = ("http://svn.webkit.org/repository/" - "webkit/trunk/LayoutTests/") -WEBKIT_PLATFORM_BASELINE_URL = (WEBKIT_LAYOUT_TEST_BASE_URL + - "platform/%s/") - -BUILDBOT_BASE = "http://build.chromium.org/buildbot/" -WEBKIT_BUILDER_BASE = BUILDBOT_BASE + "waterfall/builders/%s" -FYI_BUILDER_BASE = BUILDBOT_BASE + "waterfall.fyi/builders/%s" -RESULTS_URL_BASE = "/builds/%s/steps/webkit_tests/logs/stdio" -ARCHIVE_URL_BASE = "/builds/%s/steps/archive_webkit_tests_results/logs/stdio" -ZIP_FILE_URL_BASE = (BUILDBOT_BASE + - "layout_test_results/%s/%s/layout-test-results.zip") -CHROMIUM_SRC_HOME = "http://src.chromium.org/viewvc/chrome/trunk/src/webkit/" -LAYOUT_TEST_REPO_BASE_URL = CHROMIUM_SRC_HOME + "data/layout_tests/" - -# TODO(gwilson): Put flaky test dashboard URL here when ready. -FLAKY_TEST_URL = "" -FLAKY_TEST_REGEX = "%s(\d+)" - -TEST_EXPECTATIONS_URL = (CHROMIUM_SRC_HOME + - "tools/layout_tests/test_expectations.txt") - -# Failure types as found in builder stdio. -TEXT_DIFF_MISMATCH = "Text diff mismatch" -IMAGE_MISMATCH = "Image mismatch" -TEST_TIMED_OUT = "Test timed out" -TEST_SHELL_CRASHED = "Test shell crashed" - -CHROMIUM_WIN = "chromium-win" -CHROMIUM_WIN_XP = "chromium-win-xp" -CHROMIUM_WIN_VISTA = "chromium-win-vista" -CHROMIUM_WIN_7 = "chromium-win-7" -CHROMIUM_MAC = "chromium-mac" -CHROMIUM_LINUX = "chromium-linux" -PLATFORM = "platform" -LAYOUTTESTS = "LayoutTests" - -# These platform dirs must be in order of their precedence. -# TODO(gwilson): This is not the same fallback order as test_shell. This list -# should be reversed, and we need to add detection for the type of OS that -# the given builder is running. -WEBKIT_MAC_PLATFORM_DIRS = ["mac-leopard", "mac-snowleopard", "mac"] -WEBKIT_WIN_PLATFORM_DIRS = ["win", "mac"] -CHROMIUM_MAC_PLATFORM_DIRS = [CHROMIUM_MAC] -CHROMIUM_WIN_PLATFORM_DIRS = [CHROMIUM_WIN_XP, CHROMIUM_WIN_VISTA, - CHROMIUM_WIN_7, CHROMIUM_WIN] -CHROMIUM_LINUX_PLATFORM_DIRS = [CHROMIUM_LINUX, CHROMIUM_WIN] - -ARCHIVE_URL_REGEX = "last.*change: (\d+)" -BUILD_NAME_REGEX = "build name: ([^\s]*)" -CHROMIUM_FILE_AGE_REGEX = '
\s*Modified\s*.* \((.*)\) by' -TEST_PATH_REGEX = "[^\s]+?" -FAILED_REGEX = ("DEBUG (" + TEST_PATH_REGEX + ") failed:\s*" - "(" + TEXT_DIFF_MISMATCH + ")?\s*" - "(" + IMAGE_MISMATCH + ")?\s*" - "(" + TEST_TIMED_OUT + ")?\s*" - "(" + TEST_SHELL_CRASHED + ")?") -FAILED_UNEXPECTED_REGEX = " [^\s]+(?: = .*?)?\n" -LAST_BUILD_REGEX = ("

Recent Builds:

" - "[\s\S]*?") -# Sometimes the lines of hyphens gets interrupted with multiple processes -# outputting to stdio, so don't rely on them being contiguous. -SUMMARY_REGEX = ("\d+ tests ran as expected, " - "\d+ didn't:(.*?)-{78}") # -{78} --> 78 dashes in a row. -SUMMARY_REGRESSIONS = "Regressions:.*?\n((?: [^\s]+(?: = .*?)?\n)+)" -TEST_EXPECTATIONS_PLATFORM_REGEX = "((WONTFIX |BUG.* )+.* %s.* : %s = [^\n]*)" -TEST_EXPECTATIONS_NO_PLATFORM_REGEX = ("((WONTFIX |BUG.* )+.*" - "(?!WIN)(?!LINUX)(?!MAC).* :" - " %s = [^\n]*)") - -WEBKIT_FILE_AGE_REGEX = ('.*?.' - '*?\s*' - '(.*?)') - -LOCAL_BASELINE_REGEXES = [ - ".*/third_party/Webkit/LayoutTests/platform/.*?(/.*)", - ".*/third_party/Webkit/LayoutTests(/.*)", - ".*/webkit/data/layout_tests/platform/.*?/LayoutTests(/.*)", - ".*/webkit/data/layout_tests/platform/.*?(/.*)", - ".*/webkit/data/layout_tests(/.*)", - "(/.*)"] - -UPSTREAM_IMAGE_FILE_ENDING = "-upstream.png" - -TEST_EXPECTATIONS_WONTFIX = "WONTFIX" - -TEMP_ZIP_DIR = "temp-zip-dir" - -TARGETS = ["Release", "Debug"] - - -def GetURLBase(use_fyi): - if use_fyi: - return FYI_BUILDER_BASE - return WEBKIT_BUILDER_BASE - - -def GetResultsURL(build, platform, use_fyi=False): - return (GetURLBase(use_fyi) + RESULTS_URL_BASE) % (platform, build) - - -def GetArchiveURL(build, platform, use_fyi=False): - return (GetURLBase(use_fyi) + ARCHIVE_URL_BASE) % (platform, build) - - -def GetZipFileURL(build, platform): - return ZIP_FILE_URL_BASE % (platform, build) - - -def GetBuilderURL(platform, use_fyi=False): - return GetURLBase(use_fyi) % platform - - -# TODO(gwilson): Once the new flakiness dashboard is done, connect it here. -def GetFlakyTestURL(platform): - return "" - - -# TODO(gwilson): can we refactor these into the resourcegatherer? -def IsLinuxPlatform(platform): - return (platform and platform.find("Linux") > -1) - - -def IsMacPlatform(platform): - return (platform and platform.find("Mac") > -1) - - -def CreateDirectory(dir): - """ - Method that creates the directory structure given. - This will create directories recursively until the given dir exists. - """ - if not os.path.exists(dir): - os.makedirs(dir, 0777) - - -def ExtractFirstValue(string, regex): - m = re.search(regex, string) - if m and m.group(1): - return m.group(1) - return None - - -def ExtractSingleRegexAtURL(url, regex): - content = ScrapeURL(url) - m = re.search(regex, content, re.DOTALL) - if m and m.group(1): - return m.group(1) - return None - - -def ScrapeURL(url): - return urllib2.urlopen(urllib2.Request(url)).read() - - -def GetImageDiffExecutable(): - for target in TARGETS: - try: - return path_utils.ImageDiffPath(target) - except Exception, e: - continue - # This build target did not exist, try the next one. - raise Exception("No image diff executable could be found. You may need " - "to build the image diff project under at least one build " - "target to create image diffs.") - - -def GeneratePNGDiff(file1, file2, output_file): - _compare_available = False - try: - executable = GetImageDiffExecutable() - cmd = [executable, '--diff', file1, file2, output_file] - _compare_available = True - except Exception, e: - print "No command line to compare %s and %s : %s" % (file1, file2, e) - - result = 1 - if _compare_available: - try: - result = subprocess.call(cmd) - except OSError, e: - if e.errno == errno.ENOENT or e.errno == errno.EACCES: - _compare_available = False - print "No possible comparison between %s and %s." % ( - file1, file2) - else: - raise e - if not result: - print "The given PNG images were the same!" - return _compare_available - - -# TODO(gwilson): Change this to use the pretty print differs. -def GenerateTextDiff(file1, file2, output_file): - # Open up expected and actual text files and use difflib to compare them. - dataA = open(file1, 'r').read() - dataB = open(file2, 'r').read() - d = difflib.Differ() - diffs = list(d.compare(dataA.split("\n"), dataB.split("\n"))) - output = open(output_file, 'w') - output.write("\n".join(diffs)) - output.close() - - -class BaselineCandidate(object): - """Simple data object for holding the URL and local file path of a - possible baseline. The local file path is meant to refer to the locally- - cached version of the file at the URL.""" - - def __init__(self, local, url): - self.local_file = local - self.baseline_url = url - - def IsValid(self): - return self.local_file != None and self.baseline_url != None - - -class FailureFinder(object): - - def __init__(self, - build, - builder_name, - exclude_known_failures, - test_regex, - output_dir, - max_failures, - verbose, - builder_output_log_file=None, - archive_step_log_file=None, - zip_file=None, - test_expectations_file=None): - self.build = build - # TODO(gwilson): add full url-encoding for the platform. - self.SetPlatform(builder_name) - self.exclude_known_failures = exclude_known_failures - self.exclude_wontfix = True - self.test_regex = test_regex - self.output_dir = output_dir - self.max_failures = max_failures - self.verbose = verbose - self.fyi_builder = False - self._flaky_test_cache = {} - self._test_expectations_cache = None - # If true, scraping will still happen but no files will be downloaded. - self.dont_download = False - # Local caches of log files. If set, the finder will use these files - # rather than scraping them from the buildbot. - self.builder_output_log_file = builder_output_log_file - self.archive_step_log_file = archive_step_log_file - self.zip_file = zip_file - self.test_expectations_file = test_expectations_file - self.delete_zip_file = True - # Determines if the script should scrape the baselines from webkit.org - # and chromium.org, or if it should use local baselines in the current - # checkout. - self.use_local_baselines = False - - def SetPlatform(self, platform): - self.platform = platform.replace(" ", "%20") - - # TODO(gwilson): Change this to get the last build that finished - # successfully. - - def GetLastBuild(self): - """ - Returns the last build number for this platform. - If use_fyi is true, this only looks at the fyi builder. - """ - try: - return ExtractSingleRegexAtURL(GetBuilderURL(self.platform, - self.fyi_builder), - LAST_BUILD_REGEX) - except urllib2.HTTPError: - if not self.fyi_builder: - self.fyi_builder = True - return self.GetLastBuild() - - def GetFailures(self): - if not self.build: - self.build = self.GetLastBuild() - if self.verbose: - print "Using build number %s" % self.build - - if self.use_local_baselines: - self._BuildBaselineIndexes() - self.failures = self._GetFailuresFromBuilder() - if (self.failures and - (self._DownloadResultResources() or self.dont_download)): - return self.failures - return None - - def _GetFailuresFromBuilder(self): - """ - Returns a list of failures for the given build and platform by scraping - the buildbots and parsing their results. - The list returned contains Failure class objects. - """ - if self.verbose: - print "Fetching failures from buildbot..." - - content = self._ScrapeBuilderOutput() - if not content: - return None - matches = self._FindMatchesInBuilderOutput(content) - - if self.verbose: - print "%s failures found." % len(matches) - - failures = [] - matches.sort() - for match in matches: - if (len(failures) < self.max_failures and - (not self.test_regex or match[0].find(self.test_regex) > -1)): - failure = self._CreateFailureFromMatch(match) - if self.verbose: - print failure.test_path - failures.append(failure) - - return failures - - def _ScrapeBuilderOutput(self): - # If the build log file is specified, use that instead of scraping. - if self.builder_output_log_file: - log = open(self.builder_output_log_file, 'r') - return "".join(log.readlines()) - - # Scrape the failures from the buildbot for this revision. - try: - - return ScrapeURL(GetResultsURL(self.build, - self.platform, - self.fyi_builder)) - except: - # If we hit a problem, and we're not on the FYI builder, try it - # again on the FYI builder. - if not self.fyi_builder: - if self.verbose: - print ("Could not find builder on waterfall, trying fyi " - "waterfall...") - self.fyi_builder = True - return self._ScrapeBuilderOutput() - print "I could not find that builder, or build did not compile." - print "Check that the builder name matches exactly " - print "(case sensitive), and wrap quotes around builder names " - print "that have spaces." - return None - - # TODO(gwilson): The type of failure is now output in the summary, so no - # matching between the summary and the earlier output is necessary. - # Change this method and others to derive failure types from summary only. - - def _FindMatchesInBuilderOutput(self, output): - matches = [] - matches = re.findall(FAILED_REGEX, output, re.MULTILINE) - if self.exclude_known_failures: - summary = re.search(SUMMARY_REGEX, output, re.DOTALL) - regressions = [] - if summary: - regressions = self._FindRegressionsInSummary(summary.group(1)) - matches = self._MatchRegressionsToFailures(regressions, matches) - return matches - - def _CreateFailureFromMatch(self, match): - failure = Failure() - failure.text_diff_mismatch = match[1] != '' - failure.image_mismatch = match[2] != '' - failure.crashed = match[4] != '' - failure.timeout = match[3] != '' - failure.test_path = match[0] - failure.platform = self.platform - return failure - - def _FindRegressionsInSummary(self, summary): - regressions = [] - if not summary or not len(summary): - return regressions - matches = re.findall(SUMMARY_REGRESSIONS, summary, re.DOTALL) - for match in matches: - lines = re.findall(FAILED_UNEXPECTED_REGEX, match, re.DOTALL) - for line in lines: - clipped = line.strip() - if clipped.find("=") > -1: - clipped = clipped[:clipped.find("=") - 1] - regressions.append(clipped) - return regressions - - def _MatchRegressionsToFailures(self, regressions, failures): - matches = [] - for regression in regressions: - for failure in failures: - if failure[0].find(regression) > -1: - matches.append(failure) - break - return matches - - # TODO(gwilson): add support for multiple conflicting build numbers by - # renaming the zip file and naming the directory appropriately. - - def _DownloadResultResources(self): - """ - Finds and downloads/extracts all of the test results (pixel/text - output) for all of the given failures. - """ - - target_zip = "%s/layout-test-results-%s.zip" % (self.output_dir, - self.build) - if self.zip_file: - filename = self.zip_file - self.delete_zip_file = False - else: - revision, build_name = self._GetRevisionAndBuildFromArchiveStep() - zip_url = GetZipFileURL(revision, build_name) - if self.verbose: - print "Downloading zip file from %s to %s" % (zip_url, - target_zip) - filename = self._DownloadFile(zip_url, target_zip, "b") - if not filename: - if self.verbose: - print ("Could not download zip file from %s. " - "Does it exist?" % zip_url) - return False - - if zipfile.is_zipfile(filename): - zip = zipfile.ZipFile(filename) - if self.verbose: - print 'Extracting files...' - directory = "%s/layout-test-results-%s" % (self.output_dir, - self.build) - CreateDirectory(directory) - self._UnzipZipfile(zip, TEMP_ZIP_DIR) - - for failure in self.failures: - failure.test_expectations_line = ( - self._GetTestExpectationsLine(failure.test_path)) - if self.exclude_wontfix and failure.IsWontFix(): - self.failures.remove(failure) - continue - if failure.text_diff_mismatch: - self._PopulateTextFailure(failure, directory, zip) - if failure.image_mismatch: - self._PopulateImageFailure(failure, directory, zip) - if not self.use_local_baselines: - failure.test_age = self._GetFileAge(failure.GetTestHome()) - failure.flakiness = self._GetFlakiness(failure.test_path, - self.platform) - zip.close() - if self.verbose: - print "Files extracted." - if self.delete_zip_file: - if self.verbose: - print "Cleaning up zip file..." - path_utils.RemoveDirectory(TEMP_ZIP_DIR) - os.remove(filename) - return True - else: - if self.verbose: - print ("Downloaded file '%s' doesn't look like a zip file." - % filename) - return False - - def _UnzipZipfile(self, zip, base_dir): - for i, name in enumerate(zip.namelist()): - if not name.endswith('/'): - extracted_file_path = os.path.join(base_dir, name) - try: - (path, filename) = os.path.split(extracted_file_path) - os.makedirs(path, 0777) - except: - pass - outfile = open(extracted_file_path, 'wb') - outfile.write(zip.read(name)) - outfile.flush() - outfile.close() - os.chmod(extracted_file_path, 0777) - - def _GetRevisionAndBuildFromArchiveStep(self): - if self.archive_step_log_file: - log = open(self.archive_step_log_file, 'r') - content = "".join(log.readlines()) - else: - content = ScrapeURL(GetArchiveURL(self.build, - self.platform, - self.fyi_builder)) - revision = ExtractFirstValue(content, ARCHIVE_URL_REGEX) - build_name = ExtractFirstValue(content, BUILD_NAME_REGEX) - return (revision, build_name) - - def _PopulateTextFailure(self, failure, directory, zip): - baseline = self._GetBaseline(failure.GetExpectedTextFilename(), - directory) - failure.text_baseline_local = baseline.local_file - failure.text_baseline_url = baseline.baseline_url - failure.text_baseline_age = ( - self._GetFileAge(failure.GetTextBaselineTracHome())) - failure.text_actual_local = "%s/%s" % (directory, - failure.GetActualTextFilename()) - if (baseline and baseline.IsValid() and not self.dont_download): - self._CopyFileFromZipDir(failure.GetTextResultLocationInZipFile(), - failure.text_actual_local) - GenerateTextDiff(failure.text_baseline_local, - failure.text_actual_local, - directory + "/" + failure.GetTextDiffFilename()) - - def _PopulateImageFailure(self, failure, directory, zip): - baseline = self._GetBaseline(failure.GetExpectedImageFilename(), - directory) - failure.image_baseline_local = baseline.local_file - failure.image_baseline_url = baseline.baseline_url - if baseline and baseline.IsValid(): - failure.image_baseline_age = ( - self._GetFileAge(failure.GetImageBaselineTracHome())) - failure.image_actual_local = "%s/%s" % (directory, - failure.GetActualImageFilename()) - self._CopyFileFromZipDir(failure.GetImageResultLocationInZipFile(), - failure.image_actual_local) - if (not GeneratePNGDiff(failure.image_baseline_local, - failure.image_actual_local, - "%s/%s" % - (directory, failure.GetImageDiffFilename())) - and self.verbose): - print "Could not generate PNG diff for %s" % failure.test_path - if failure.IsImageBaselineInChromium() or self.use_local_baselines: - upstream_baseline = ( - self._GetUpstreamBaseline(failure.GetExpectedImageFilename(), - directory)) - failure.image_baseline_upstream_local = \ - upstream_baseline.local_file - failure.image_baseline_upstream_url = \ - upstream_baseline.baseline_url - - def _GetBaseline(self, filename, directory, upstream_only=False): - """ Search and download the baseline for the given test (put it in the - directory given.)""" - - local_filename = os.path.join(directory, filename) - local_directory = local_filename[:local_filename.rfind("/")] - if upstream_only: - last_index = local_filename.rfind(".") - if last_index > -1: - local_filename = (local_filename[:last_index] + - UPSTREAM_IMAGE_FILE_ENDING) - - download_file_modifiers = "" - if local_filename.endswith(".png"): - download_file_modifiers = "b" # binary file - - if not self.dont_download: - CreateDirectory(local_directory) - - local_baseline = None - url_of_baseline = None - - if self.use_local_baselines: - test_path_key = self._NormalizeBaselineIdentifier(filename) - dict = self.baseline_dict - if upstream_only: - dict = self.webkit_baseline_dict - if test_path_key in dict: - local_baseline = dict[test_path_key] - url_of_baseline = local_baseline - shutil.copy(local_baseline, local_directory) - elif self.verbose: - print ("Baseline %s does not exist in the index." % - test_path_key) - else: - index = 0 - possible_files = self._GetPossibleFileList(filename, upstream_only) - # Download the baselines from the webkit.org site. - while local_baseline == None and index < len(possible_files): - local_baseline = self._DownloadFile(possible_files[index], - local_filename, - download_file_modifiers, - True) - if local_baseline: - url_of_baseline = possible_files[index] - index += 1 - - if not local_baseline: - if self.verbose: - print "Could not find any baseline for %s" % filename - else: - local_baseline = os.path.normpath(local_baseline) - if local_baseline and self.verbose: - print "Found baseline: %s" % url_of_baseline - - return BaselineCandidate(local_baseline, url_of_baseline) - - def _AddBaselinePaths(self, list, base_path, directories): - for dir in directories: - list.append(os.path.join(base_path, dir)) - - # TODO(gwilson): Refactor this method to use - # platform_utils_*.BaselineSearchPath instead of custom logic. - - def _BuildBaselineIndexes(self): - """ Builds an index of all the known local baselines in both chromium - and webkit. Two baselines are created, a webkit-specific (no chromium - baseline) dictionary and an overall (both) dictionary. Each one has a - structure like: "/fast/dom/one-expected.txt" -> - "C:\\path\\to\\fast\\dom\\one-expected.txt" - """ - if self.verbose: - print "Building index of all local baselines..." - - self.baseline_dict = {} - self.webkit_baseline_dict = {} - - base = os.path.abspath(os.path.curdir) - webkit_base = path_utils.PathFromBase('third_party', 'Webkit', - 'LayoutTests') - chromium_base = path_utils.PathFromBase('webkit', 'data', - 'layout_tests') - chromium_base_platform = os.path.join(chromium_base, PLATFORM) - webkit_base_platform = os.path.join(webkit_base, PLATFORM) - - possible_chromium_files = [] - possible_webkit_files = [] - - if IsMacPlatform(self.platform): - self._AddBaselinePaths(possible_chromium_files, - chromium_base_platform, - CHROMIUM_MAC_PLATFORM_DIRS) - self._AddBaselinePaths(possible_chromium_files, - webkit_base_platform, - WEBKIT_MAC_PLATFORM_DIRS) - self._AddBaselinePaths(possible_webkit_files, - webkit_base_platform, - WEBKIT_MAC_PLATFORM_DIRS) - elif IsLinuxPlatform(self.platform): - self._AddBaselinePaths(possible_chromium_files, - chromium_base_platform, - CHROMIUM_LINUX_PLATFORM_DIRS) - else: - self._AddBaselinePaths(possible_chromium_files, - chromium_base_platform, - CHROMIUM_WIN_PLATFORM_DIRS) - - if not IsMacPlatform(self.platform): - self._AddBaselinePaths(possible_webkit_files, - webkit_base_platform, - WEBKIT_WIN_PLATFORM_DIRS) - - possible_webkit_files.append(webkit_base) - - self._PopulateBaselineDict(possible_webkit_files, - self.webkit_baseline_dict) - self._PopulateBaselineDict(possible_chromium_files, - self.baseline_dict) - for key in self.webkit_baseline_dict.keys(): - if not key in self.baseline_dict: - self.baseline_dict[key] = self.webkit_baseline_dict[key] - - return True - - def _PopulateBaselineDict(self, directories, dictionary): - for dir in directories: - os.path.walk(dir, self._VisitBaselineDir, dictionary) - - def _VisitBaselineDir(self, dict, dirname, names): - """ Method intended to be called by os.path.walk to build up an index - of where all the test baselines exist. """ - # Exclude .svn from the walk, since we don't care what is in these - # dirs. - if '.svn' in names: - names.remove('.svn') - for name in names: - if name.find("-expected.") > -1: - test_path_key = os.path.join(dirname, name) - # Fix path separators to match the separators used on - # the buildbots. - test_path_key = test_path_key.replace("\\", "/") - test_path_key = self._NormalizeBaselineIdentifier( - test_path_key) - if not test_path_key in dict: - dict[test_path_key] = os.path.join(dirname, name) - - # TODO(gwilson): Simplify identifier creation to not rely so heavily on - # directory and path names. - - def _NormalizeBaselineIdentifier(self, test_path): - """ Given either a baseline path (i.e. /LayoutTests/platform/mac/...) - or a test path (i.e. /LayoutTests/fast/dom/....) will normalize - to a unique identifier. This is basically a hashing function for - layout test paths.""" - - for regex in LOCAL_BASELINE_REGEXES: - value = ExtractFirstValue(test_path, regex) - if value: - return value - return test_path - - def _AddBaselineURLs(self, list, base_url, platforms): - # If the base URL doesn't contain any platform in its path, only add - # the base URL to the list. This happens with the chrome/ dir. - if base_url.find("%s") == -1: - list.append(base_url) - return - for platform in platforms: - list.append(base_url % platform) - - # TODO(gwilson): Refactor this method to use - # platform_utils_*.BaselineSearchPath instead of custom logic. This may - # require some kind of wrapper since this method looks for URLs instead - # of local paths. - - def _GetPossibleFileList(self, filename, only_webkit): - """ Returns a list of possible filename locations for the given file. - Uses the platform of the class to determine the order. - """ - - possible_chromium_files = [] - possible_webkit_files = [] - - chromium_platform_url = LAYOUT_TEST_REPO_BASE_URL - if not filename.startswith("chrome"): - chromium_platform_url += "platform/%s/" - chromium_platform_url += filename - - webkit_platform_url = WEBKIT_PLATFORM_BASELINE_URL + filename - - if IsMacPlatform(self.platform): - self._AddBaselineURLs(possible_chromium_files, - chromium_platform_url, - CHROMIUM_MAC_PLATFORM_DIRS) - self._AddBaselineURLs(possible_webkit_files, - webkit_platform_url, - WEBKIT_MAC_PLATFORM_DIRS) - elif IsLinuxPlatform(self.platform): - self._AddBaselineURLs(possible_chromium_files, - chromium_platform_url, - CHROMIUM_LINUX_PLATFORM_DIRS) - else: - self._AddBaselineURLs(possible_chromium_files, - chromium_platform_url, - CHROMIUM_WIN_PLATFORM_DIRS) - - if not IsMacPlatform(self.platform): - self._AddBaselineURLs(possible_webkit_files, - webkit_platform_url, - WEBKIT_WIN_PLATFORM_DIRS) - possible_webkit_files.append(WEBKIT_LAYOUT_TEST_BASE_URL + filename) - - if only_webkit: - return possible_webkit_files - return possible_chromium_files + possible_webkit_files - - # Like _GetBaseline, but only retrieves the baseline from upstream (skip - # looking in chromium). - - def _GetUpstreamBaseline(self, filename, directory): - return self._GetBaseline(filename, directory, upstream_only=True) - - def _GetFileAge(self, url): - # Check if the given URL is really a local file path. - if not url or not url.startswith("http"): - return None - try: - if url.find(WEBKIT_TRAC_HOSTNAME) > -1: - return ExtractSingleRegexAtURL(url[:url.rfind("/")], - WEBKIT_FILE_AGE_REGEX % - url[url.find("/browser"):]) - else: - return ExtractSingleRegexAtURL(url + "?view=log", - CHROMIUM_FILE_AGE_REGEX) - except: - if self.verbose: - print "Could not find age for %s. Does the file exist?" % url - return None - - # Returns a flakiness on a scale of 1-50. - # TODO(gwilson): modify this to also return which of the last 10 - # builds failed for this test. - - def _GetFlakiness(self, test_path, target_platform): - url = GetFlakyTestURL(target_platform) - if url == "": - return None - - if url in self._flaky_test_cache: - content = self._flaky_test_cache[url] - else: - content = urllib2.urlopen(urllib2.Request(url)).read() - self._flaky_test_cache[url] = content - - flakiness = ExtractFirstValue(content, FLAKY_TEST_REGEX % test_path) - return flakiness - - def _GetTestExpectations(self): - if not self._test_expectations_cache: - try: - if self.test_expectations_file: - log = open(self.test_expectations_file, 'r') - self._test_expectations_cache = "\n".join(log.readlines()) - else: - self._test_expectations_cache = ScrapeURL( - TEST_EXPECTATIONS_URL) - except HTTPError: - print ("Could not find test_expectations.txt at %s" % - TEST_EXPECTATIONS_URL) - - return self._test_expectations_cache - - def _GetTestExpectationsLine(self, test_path): - content = self._GetTestExpectations() - - if not content: - return None - - for match in content.splitlines(): - line = re.search(".*? : (.*?) = .*", match) - if line and test_path.find(line.group(1)) > -1: - return match - - return None - - def _CopyFileFromZipDir(self, file_in_zip, file_to_create): - modifiers = "" - if file_to_create.endswith(".png"): - modifiers = "b" - dir = os.path.join(os.path.split(file_to_create)[0:-1])[0] - CreateDirectory(dir) - file = os.path.normpath(os.path.join(TEMP_ZIP_DIR, file_in_zip)) - shutil.copy(file, dir) - - def _ExtractFileFromZip(self, zip, file_in_zip, file_to_create): - modifiers = "" - if file_to_create.endswith(".png"): - modifiers = "b" - try: - CreateDirectory(file_to_create[0:file_to_create.rfind("/")]) - localFile = open(file_to_create, "w%s" % modifiers) - localFile.write(zip.read(file_in_zip)) - localFile.close() - os.chmod(file_to_create, 0777) - return True - except KeyError: - print "File %s does not exist in zip file." % (file_in_zip) - except AttributeError: - print "File %s does not exist in zip file." % (file_in_zip) - print "Is this zip file assembled correctly?" - return False - - def _DownloadFile(self, url, local_filename=None, modifiers="", - force=False): - """ - Copy the contents of a file from a given URL - to a local file. - """ - try: - if local_filename == None: - local_filename = url.split('/')[-1] - if os.path.isfile(local_filename) and not force: - if self.verbose: - print "File at %s already exists." % local_filename - return local_filename - if self.dont_download: - return local_filename - webFile = urllib2.urlopen(url) - localFile = open(local_filename, ("w%s" % modifiers)) - localFile.write(webFile.read()) - webFile.close() - localFile.close() - os.chmod(local_filename, 0777) - except urllib2.HTTPError: - return None - except urllib2.URLError: - print "The url %s is malformed." % url - return None - return localFile.name diff --git a/webkit/tools/layout_tests/layout_package/failure_finder_test.py b/webkit/tools/layout_tests/layout_package/failure_finder_test.py deleted file mode 100644 index 97fbed5..0000000 --- a/webkit/tools/layout_tests/layout_package/failure_finder_test.py +++ /dev/null @@ -1,374 +0,0 @@ -#!/bin/env/python -# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -import os -import zipfile - -from failure_finder import FailureFinder - -TEST_BUILDER_OUTPUT = """090723 10:38:22 test_shell_thread.py:289 - ERROR chrome/fast/forms/textarea-metrics.html failed: - Text diff mismatch - 090723 10:38:21 test_shell_thread.py:289 - ERROR chrome/fast/dom/xss-DENIED-javascript-variations.html failed: - Text diff mismatch - 090723 10:37:58 test_shell_thread.py:289 - ERROR LayoutTests/plugins/bindings-test.html failed: - Text diff mismatch - ------------------------------------------------------------------------------- -Expected to crash, but passed (1): - chrome/fast/forms/textarea-metrics.html - -Regressions: Unexpected failures (2): - chrome/fast/dom/xss-DENIED-javascript-variations.html = FAIL - LayoutTests/plugins/bindings-test.html = FAIL ------------------------------------------------------------------------------- -""" - -TEST_FAILURE_1 = ("layout-test-results/chrome/fast/forms/" - "textarea-metrics-actual.txt") -TEST_FAILURE_2 = ("layout-test-results/chrome/fast/dom/" - "xss-DENIED-javascript-variations-actual.txt") -TEST_FAILURE_3 = ("layout-test-results/LayoutTests/plugins/" - "bindings-test-actual.txt") - -TEST_ARCHIVE_OUTPUT = """ -Adding layout-test-results\pending\fast\repaint\not-real-actual.checksum -Adding layout-test-results\pending\fast\repaint\not-real-actual.png -Adding layout-test-results\pending\fast\repaint\not-real-actual.txt -last change: 22057 -build name: webkit-rel -host name: codf138 -saving results to \\my\test\location\webkit-rel\22057 -program finished with exit code 0 -""" - -TEST_TEST_EXPECTATIONS = """ -BUG1234 chrome/fast/forms/textarea-metrics.html = CRASH -""" - -TEST_BUILDER_LOG_FILE = "TEST_builder.log" -TEST_ARCHIVE_LOG_FILE = "TEST_archive.log" -TEST_DUMMY_ZIP_FILE = "TEST_zipfile.zip" -TEST_EXPECTATIONS_FILE = "TEST_expectations.txt" - -WEBKIT_BUILDER_NUMBER = "9800" -WEBKIT_FAILURES = ( - ["LayoutTests/fast/backgrounds/animated-svg-as-mask.html", - "LayoutTests/fast/backgrounds/background-clip-text.html", - "LayoutTests/fast/backgrounds/mask-composite.html", - "LayoutTests/fast/backgrounds/repeat/mask-negative-offset-repeat.html", - "LayoutTests/fast/backgrounds/svg-as-background-3.html", - "LayoutTests/fast/backgrounds/svg-as-background-6.html", - "LayoutTests/fast/backgrounds/svg-as-mask.html", - "LayoutTests/fast/block/float/013.html", - "LayoutTests/fast/block/float/nested-clearance.html", - "LayoutTests/fast/block/positioning/047.html"]) - -CHROMIUM_BASELINE = "chrome/fast/forms/basic-buttons.html" -EXPECTED_CHROMIUM_LOCAL_BASELINE = "./chrome/fast/forms/basic-buttons.html" -EXPECTED_CHROMIUM_URL_BASELINE = ("http://src.chromium.org/viewvc/chrome/" - "trunk/src/webkit/data/layout_tests/chrome/" - "fast/forms/basic-buttons.html") - -WEBKIT_BASELINE = "LayoutTests/fast/forms/11423.html" -EXPECTED_WEBKIT_LOCAL_BASELINE = "./LayoutTests/fast/forms/11423.html" -EXPECTED_WEBKIT_URL_BASELINE = ( - "http://svn.webkit.org/repository/webkit/trunk/" - "LayoutTests/fast/forms/11423.html") - -TEST_ZIP_FILE = ("http://build.chromium.org/buildbot/layout_test_results/" - "webkit-rel/21432/layout-test-results.zip") - -EXPECTED_REVISION = "20861" -EXPECTED_BUILD_NAME = "webkit-rel" - -SVG_TEST_EXPECTATION = ( - "LayoutTests/svg/custom/foreign-object-skew-expected.png") -SVG_TEST_EXPECTATION_UPSTREAM = ("LayoutTests/svg/custom/" - "foreign-object-skew-expected-upstream.png") -WEBARCHIVE_TEST_EXPECTATION = ("LayoutTests/webarchive/adopt-attribute-" - "styled-body-webarchive-expected.webarchive") -DOM_TEST_EXPECTATION = ("LayoutTests/fast/dom/" - "attribute-downcast-right-expected.txt") -DOM_TEST_EXPECTATION_UPSTREAM = ("LayoutTests/fast/dom/" - "attribute-downcast-right-" - "expected-upstream.png") - -TEST_EXPECTATIONS = """ -BUG1234 WONTFIX : LayoutTests/fast/backgrounds/svg-as-background-3.html = FAIL -BUG3456 WIN : LayoutTests/fast/backgrounds/svg-as-background-6.html = CRASH -BUG4567 : LayoutTests/fast/backgrounds/svg-as-mask.html = PASS -WONTFIX : LayoutTests/fast/block/ = FAIL -""" - -EXPECT_EXACT_MATCH = "LayoutTests/fast/backgrounds/svg-as-background-6.html" -EXPECT_GENERAL_MATCH = "LayoutTests/fast/block/float/013.html" -EXPECT_NO_MATCH = "LayoutTests/fast/backgrounds/svg-as-background-99.html" - -WEBKIT_ORG = "webkit.org" -CHROMIUM_ORG = "chromium.org" - - -class FailureFinderTest(object): - - def runTests(self): - all_tests_passed = True - - tests = ["testWhitespaceInBuilderName", - "testGetLastBuild", - "testFindMatchesInBuilderOutput", - "testScrapeBuilderOutput", - "testGetChromiumBaseline", - "testGetWebkitBaseline", - "testZipDownload", - "testUseLocalOutput", - "testTranslateBuildToZip", - "testGetBaseline", - "testFindTestExpectations", - "testFull"] - - for test in tests: - try: - result = eval(test + "()") - if result: - print "[ OK ] %s" % test - else: - all_tests_passed = False - print "[ FAIL ] %s" % test - except: - print "[ ERROR ] %s" % test - return all_tests_passed - - -def _getBasicFailureFinder(): - return FailureFinder(None, "Webkit", False, "", ".", 10, False) - - -def _testLastBuild(failure_finder): - try: - last_build = failure_finder.GetLastBuild() - # Verify that last_build is not empty and is a number. - build = int(last_build) - return (build > 0) - except: - return False - - -def testGetLastBuild(): - test = _getBasicFailureFinder() - return _testLastBuild(test) - - -def testWhitespaceInBuilderName(): - test = _getBasicFailureFinder() - test.SetPlatform("Webkit (webkit.org)") - return _testLastBuild(test) - - -def testScrapeBuilderOutput(): - - # Try on the default builder. - test = _getBasicFailureFinder() - test.build = "9800" - output = test._ScrapeBuilderOutput() - if not output: - return False - - # Try on a crazy builder on the FYI waterfall. - test = _getBasicFailureFinder() - test.build = "1766" - test.SetPlatform("Webkit Linux (webkit.org)") - output = test._ScrapeBuilderOutput() - if not output: - return False - - return True - - -def testFindMatchesInBuilderOutput(): - test = _getBasicFailureFinder() - test.exclude_known_failures = True - matches = test._FindMatchesInBuilderOutput(TEST_BUILDER_OUTPUT) - # Verify that we found x matches. - if len(matches) != 2: - print "Did not find all unexpected failures." - return False - - test.exclude_known_failures = False - matches = test._FindMatchesInBuilderOutput(TEST_BUILDER_OUTPUT) - if len(matches) != 3: - print "Did not find all failures." - return False - return True - - -def _testBaseline(test_name, expected_local, expected_url): - test = _getBasicFailureFinder() - # Test baseline that is obviously in Chromium's tree. - baseline = test._GetBaseline(test_name, ".", False) - try: - os.remove(baseline.local_file) - if (baseline.local_file != expected_local or - baseline.baseline_url != expected_url): - return False - except: - return False - return True - - -def testGetChromiumBaseline(): - return _testBaseline(CHROMIUM_BASELINE, EXPECTED_CHROMIUM_LOCAL_BASELINE, - EXPECTED_CHROMIUM_URL_BASELINE) - - -def testGetWebkitBaseline(): - return _testBaseline(WEBKIT_BASELINE, EXPECTED_WEBKIT_LOCAL_BASELINE, - EXPECTED_WEBKIT_URL_BASELINE) - - -def testUseLocalOutput(): - test_result = True - try: - _writeFile(TEST_BUILDER_LOG_FILE, TEST_BUILDER_OUTPUT) - _writeFile(TEST_ARCHIVE_LOG_FILE, TEST_ARCHIVE_OUTPUT) - _writeFile(TEST_EXPECTATIONS_FILE, TEST_TEST_EXPECTATIONS) - zip = zipfile.ZipFile(TEST_DUMMY_ZIP_FILE, 'w') - zip.write(TEST_BUILDER_LOG_FILE, TEST_FAILURE_1) - zip.write(TEST_BUILDER_LOG_FILE, TEST_FAILURE_2) - zip.write(TEST_BUILDER_LOG_FILE, TEST_FAILURE_3) - zip.close() - test = _getBasicFailureFinder() - test.archive_step_log_file = TEST_ARCHIVE_LOG_FILE - test.builder_output_log_file = TEST_BUILDER_LOG_FILE - test.test_expectations_file = TEST_EXPECTATIONS_FILE - test.zip_file = TEST_DUMMY_ZIP_FILE - test.dont_download = True - test.exclude_known_failures = True - test.delete_zip_file = False - failures = test.GetFailures() - if not failures or len(failures) != 2: - print "Did not get expected number of failures :" - for failure in failures: - print failure.test_path - test_result = False - finally: - os.remove(TEST_BUILDER_LOG_FILE) - os.remove(TEST_ARCHIVE_LOG_FILE) - os.remove(TEST_EXPECTATIONS_FILE) - os.remove(TEST_DUMMY_ZIP_FILE) - return test_result - - -def _writeFile(filename, contents): - myfile = open(filename, 'w') - myfile.write(contents) - myfile.close() - - -def testZipDownload(): - test = _getBasicFailureFinder() - try: - test._DownloadFile(TEST_ZIP_FILE, "test.zip", "b") # "b" -> binary - os.remove("test.zip") - return True - except: - return False - - -def testTranslateBuildToZip(): - test = _getBasicFailureFinder() - test.build = WEBKIT_BUILDER_NUMBER - revision, build_name = test._GetRevisionAndBuildFromArchiveStep() - if revision != EXPECTED_REVISION or build_name != EXPECTED_BUILD_NAME: - return False - return True - - -def testGetBaseline(): - test = _getBasicFailureFinder() - result = True - test.platform = "chromium-mac" - baseline = test._GetBaseline(WEBARCHIVE_TEST_EXPECTATION, ".") - if not baseline.local_file or baseline.baseline_url.find(WEBKIT_ORG) == -1: - result = False - print "Webarchive layout test not found at webkit.org: %s" % url - test.platform = "chromium-win" - baseline = test._GetBaseline(SVG_TEST_EXPECTATION, ".") - if (not baseline.local_file or - baseline.baseline_url.find(CHROMIUM_ORG) == -1): - result = False - print "SVG layout test found at %s, not chromium.org" % url - baseline = test._GetBaseline(SVG_TEST_EXPECTATION, ".", True) - if not baseline.local_file or baseline.baseline_url.find(WEBKIT_ORG) == -1: - result = False - print "Upstream SVG layout test NOT found at webkit.org!" - baseline = test._GetBaseline(DOM_TEST_EXPECTATION, ".", True) - if (not baseline.local_file or - baseline.baseline_url.find("/platform/") > -1): - result = False - print ("Upstream SVG layout test found in a " - "platform directory: %s" % url) - os.remove(WEBARCHIVE_TEST_EXPECTATION) - os.remove(SVG_TEST_EXPECTATION) - os.remove(SVG_TEST_EXPECTATION_UPSTREAM) - os.remove(DOM_TEST_EXPECTATION_UPSTREAM) - deleteDir("LayoutTests") - return result - - -def deleteDir(directory): - """ Recursively deletes empty directories given a root. - This method will throw an exception if they are not empty. """ - for root, dirs, files in os.walk(directory, topdown=False): - for d in dirs: - try: - os.rmdir(os.path.join(root, d)) - except: - pass - os.rmdir(directory) - - -def testFull(): - """ Verifies that the entire system works end-to-end. """ - test = _getBasicFailureFinder() - test.build = WEBKIT_BUILDER_NUMBER - test.dont_download = True # Dry run only, no downloading needed. - failures = test.GetFailures() - # Verify that the max failures parameter works. - if not failures or len(failures) > 10: - "Got no failures or too many failures." - return False - - # Verify the failures match the list of expected failures. - for failure in failures: - if not (failure.test_path in WEBKIT_FAILURES): - print "Found a failure I did not expect to see." - return False - - return True - - -def testFindTestExpectations(): - test = _getBasicFailureFinder() - test._test_expectations_cache = TEST_EXPECTATIONS - match = test._GetTestExpectationsLine(EXPECT_EXACT_MATCH) - if not match: - return False - match = test._GetTestExpectationsLine(EXPECT_GENERAL_MATCH) - if not match: - return False - match = test._GetTestExpectationsLine(EXPECT_NO_MATCH) - return not match - - -if __name__ == "__main__": - fft = FailureFinderTest() - result = fft.runTests() - if result: - print "All tests passed." - else: - print "Not all tests passed." diff --git a/webkit/tools/layout_tests/layout_package/html_generator.py b/webkit/tools/layout_tests/layout_package/html_generator.py deleted file mode 100644 index b93166b..0000000 --- a/webkit/tools/layout_tests/layout_package/html_generator.py +++ /dev/null @@ -1,230 +0,0 @@ -# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -import re - -from failure import Failure - -CHROMIUM_BUG_URL = "http://crbug.com/" - - -def ExtractFirstValue(string, regex): - m = re.search(regex, string) - if m and m.group(1): - return m.group(1) - return None - -# TODO(gwilson): Refactor HTML generation into a HTML templating system like -# Django templates. - - -class HTMLGenerator(object): - - def __init__(self, failures, output_dir, build, platform, - exclude_known_failures): - self.failures = failures - self.output_dir = output_dir - self.build = build - self.platform = platform - self.exclude_known_failures = exclude_known_failures - self.image_size = "200px" - - def GenerateHTML(self): - html = "" - html += """ - - - - - - """ - title = "All failures" - if self.exclude_known_failures: - title = "Regressions" - - html += """ -

%s for build %s (%s)

- """ % (title, self.build, self.platform) - - test_number = 0 - - # TODO(gwilson): Refactor this to do a join() on an array of HTML, - # rather than appending strings in a loop. - for failure in self.failures: - test_number += 1 - html += """ - - - -
- %s.  %s
  Last modified: %s - """ % (test_number, failure.test_path, failure.GetTestHome(), - failure.test_age) - html += "
" - html += "
%s
" % \ - (self._GenerateLinkifiedTextExpectations(failure)) - - html += self._GenerateFlakinessHTML(failure) - - if failure.crashed: - html += "
Test CRASHED
" - elif failure.timeout: - html += "
Test TIMED OUT
" - else: - html += """ - - - - - - - - - """ - - if failure.text_diff_mismatch: - html += self._GenerateTextFailureHTML(failure) - - if failure.image_mismatch: - html += self._GenerateImageFailureHTML(failure) - - html += "
 ExpectedActualDifferenceUpstream
" - html += "

" - html += """""" - - # TODO(gwilson): Change this filename to be passed in as an argument. - html_filename = "%s/index-%s.html" % (self.output_dir, self.build) - htmlFile = open(html_filename, 'w') - htmlFile.write(html) - htmlFile.close() - return html_filename - - def _GenerateLinkifiedTextExpectations(self, failure): - if not failure.test_expectations_line: - return "" - bug_number = ExtractFirstValue(failure.test_expectations_line, - "BUG(\d+)") - if not bug_number or bug_number == "": - return "" - return failure.test_expectations_line.replace( - "BUG" + bug_number, - "BUG%s" % (CHROMIUM_BUG_URL, bug_number, - bug_number)) - - # TODO(gwilson): Fix this so that it shows the last ten runs - # not just a "meter" of flakiness. - - def _GenerateFlakinessHTML(self, failure): - html = "" - if not failure.flakiness: - return html - html += """ - - - - """ % (failure.flakiness) - - flaky_red = int(round(int(failure.flakiness) / 5)) - flaky_green = 10 - flaky_red - for i in range(0, flaky_green): - html += """ - - """ - for i in range(0, flaky_red): - html += """ - - """ - html += """ - -
Flakiness: (%s)    

- """ - return html - - def _GenerateTextFailureHTML(self, failure): - html = "" - if not failure.GetTextBaselineLocation(): - return """This test likely does not have any - TEXT baseline for this platform, or one could not - be found.""" - html += """ - - - Render Tree Dump
- %s baseline
- Age: %s
- - """ % (failure.text_baseline_url, - failure.GetTextBaselineLocation(), - failure.text_baseline_age) - html += self._GenerateTextFailureTD(failure.GetExpectedTextFilename(), - "expected text") - html += self._GenerateTextFailureTD(failure.GetActualTextFilename(), - "actual text") - html += self._GenerateTextFailureTD(failure.GetTextDiffFilename(), - "text diff") - html += " " - html += "" - return html - - def _GenerateTextFailureTD(self, file_path, anchor_text): - return ("" - "%s") % ( - self.build, file_path, anchor_text) - - def _GenerateImageFailureHTML(self, failure): - if not failure.GetImageBaselineLocation(): - return """This test likely does not have any - IMAGE baseline for this platform, or one could not be - found.""" - html = """ - - Pixel Dump
- %s baseline
Age: %s - """ % (failure.image_baseline_url, - failure.GetImageBaselineLocation(), - failure.image_baseline_age) - html += self._GenerateImageFailureTD( - failure.GetExpectedImageFilename()) - html += self._GenerateImageFailureTD( - failure.GetActualImageFilename()) - html += self._GenerateImageFailureTD( - failure.GetImageDiffFilename()) - if (failure.image_baseline_upstream_local and - failure.image_baseline_upstream_local != ""): - html += self._GenerateImageFailureTD( - failure.GetImageUpstreamFilename()) - else: - html += """ -   - """ - html += "" - return html - - def _GenerateImageFailureTD(self, filename): - return ("" - "" - "") % (self.build, filename, self.image_size, - self.build, filename) diff --git a/webkit/tools/layout_tests/layout_package/http_server.bat b/webkit/tools/layout_tests/layout_package/http_server.bat deleted file mode 100644 index 6fddd29..0000000 --- a/webkit/tools/layout_tests/layout_package/http_server.bat +++ /dev/null @@ -1 +0,0 @@ -%~dp0..\..\..\..\third_party\python_24\python.exe %~dp0http_server.py %* diff --git a/webkit/tools/layout_tests/layout_package/http_server.py b/webkit/tools/layout_tests/layout_package/http_server.py deleted file mode 100755 index 6c279d6..0000000 --- a/webkit/tools/layout_tests/layout_package/http_server.py +++ /dev/null @@ -1,255 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -"""A class to help start/stop the lighttpd server used by layout tests.""" - - -import logging -import optparse -import os -import shutil -import subprocess -import sys -import tempfile -import time -import urllib - -import http_server_base -import path_utils - -# So we can import httpd_utils below to make ui_tests happy. -sys.path.append(path_utils.PathFromBase('tools', 'python')) -import google.httpd_utils - - -def RemoveLogFiles(folder, starts_with): - files = os.listdir(folder) - for file in files: - if file.startswith(starts_with): - full_path = os.path.join(folder, file) - os.remove(full_path) - - -class Lighttpd(http_server_base.HttpServerBase): - # Webkit tests - try: - _webkit_tests = path_utils.PathFromBase('third_party', 'WebKit', - 'LayoutTests', 'http', 'tests') - _js_test_resource = path_utils.PathFromBase('third_party', 'WebKit', - 'LayoutTests', 'fast', - 'js', 'resources') - except path_utils.PathNotFound: - _webkit_tests = None - _js_test_resource = None - - # Path where we can access all of the tests - _all_tests = path_utils.PathFromBase('webkit', 'data', 'layout_tests') - # Self generated certificate for SSL server (for client cert get - # \chrome\test\data\ssl\certs\root_ca_cert.crt) - _pem_file = path_utils.PathFromBase('tools', 'python', 'google', - 'httpd_config', 'httpd2.pem') - # One mapping where we can get to everything - VIRTUALCONFIG = [{'port': 8081, 'docroot': _all_tests}] - - if _webkit_tests: - VIRTUALCONFIG.extend( - # Three mappings (one with SSL enabled) for LayoutTests http tests - [{'port': 8000, 'docroot': _webkit_tests}, - {'port': 8080, 'docroot': _webkit_tests}, - {'port': 8443, 'docroot': _webkit_tests, 'sslcert': _pem_file}]) - - def __init__(self, output_dir, background=False, port=None, - root=None, register_cygwin=None, run_background=None): - """Args: - output_dir: the absolute path to the layout test result directory - """ - self._output_dir = output_dir - self._process = None - self._port = port - self._root = root - self._register_cygwin = register_cygwin - self._run_background = run_background - if self._port: - self._port = int(self._port) - - def IsRunning(self): - return self._process != None - - def Start(self): - if self.IsRunning(): - raise 'Lighttpd already running' - - base_conf_file = path_utils.PathFromBase('webkit', - 'tools', 'layout_tests', 'layout_package', 'lighttpd.conf') - out_conf_file = os.path.join(self._output_dir, 'lighttpd.conf') - time_str = time.strftime("%d%b%Y-%H%M%S") - access_file_name = "access.log-" + time_str + ".txt" - access_log = os.path.join(self._output_dir, access_file_name) - log_file_name = "error.log-" + time_str + ".txt" - error_log = os.path.join(self._output_dir, log_file_name) - - # Remove old log files. We only need to keep the last ones. - RemoveLogFiles(self._output_dir, "access.log-") - RemoveLogFiles(self._output_dir, "error.log-") - - # Write out the config - f = file(base_conf_file, 'rb') - base_conf = f.read() - f.close() - - f = file(out_conf_file, 'wb') - f.write(base_conf) - - # Write out our cgi handlers. Run perl through env so that it - # processes the #! line and runs perl with the proper command - # line arguments. Emulate apache's mod_asis with a cat cgi handler. - f.write(('cgi.assign = ( ".cgi" => "/usr/bin/env",\n' - ' ".pl" => "/usr/bin/env",\n' - ' ".asis" => "/bin/cat",\n' - ' ".php" => "%s" )\n\n') % - path_utils.LigHTTPdPHPPath()) - - # Setup log files - f.write(('server.errorlog = "%s"\n' - 'accesslog.filename = "%s"\n\n') % (error_log, access_log)) - - # Setup upload folders. Upload folder is to hold temporary upload files - # and also POST data. This is used to support XHR layout tests that - # does POST. - f.write(('server.upload-dirs = ( "%s" )\n\n') % (self._output_dir)) - - # Setup a link to where the js test templates are stored - f.write(('alias.url = ( "/js-test-resources" => "%s" )\n\n') % - (self._js_test_resource)) - - # dump out of virtual host config at the bottom. - if self._root: - if self._port: - # Have both port and root dir. - mappings = [{'port': self._port, 'docroot': self._root}] - else: - # Have only a root dir - set the ports as for LayoutTests. - # This is used in ui_tests to run http tests against a browser. - - # default set of ports as for LayoutTests but with a - # specified root. - mappings = [{'port': 8000, 'docroot': self._root}, - {'port': 8080, 'docroot': self._root}, - {'port': 8443, 'docroot': self._root, - 'sslcert': Lighttpd._pem_file}] - else: - mappings = self.VIRTUALCONFIG - for mapping in mappings: - ssl_setup = '' - if 'sslcert' in mapping: - ssl_setup = (' ssl.engine = "enable"\n' - ' ssl.pemfile = "%s"\n' % mapping['sslcert']) - - f.write(('$SERVER["socket"] == "127.0.0.1:%d" {\n' - ' server.document-root = "%s"\n' + - ssl_setup + - '}\n\n') % (mapping['port'], mapping['docroot'])) - f.close() - - executable = path_utils.LigHTTPdExecutablePath() - module_path = path_utils.LigHTTPdModulePath() - start_cmd = [executable, - # Newly written config file - '-f', path_utils.PathFromBase(self._output_dir, - 'lighttpd.conf'), - # Where it can find its module dynamic libraries - '-m', module_path] - - if not self._run_background: - start_cmd.append(# Don't background - '-D') - - # Copy liblightcomp.dylib to /tmp/lighttpd/lib to work around the - # bug that mod_alias.so loads it from the hard coded path. - if sys.platform == 'darwin': - tmp_module_path = '/tmp/lighttpd/lib' - if not os.path.exists(tmp_module_path): - os.makedirs(tmp_module_path) - lib_file = 'liblightcomp.dylib' - shutil.copyfile(os.path.join(module_path, lib_file), - os.path.join(tmp_module_path, lib_file)) - - # Put the cygwin directory first in the path to find cygwin1.dll - env = os.environ - if sys.platform in ('cygwin', 'win32'): - env['PATH'] = '%s;%s' % ( - path_utils.PathFromBase('third_party', 'cygwin', 'bin'), - env['PATH']) - - if sys.platform == 'win32' and self._register_cygwin: - setup_mount = path_utils.PathFromBase('third_party', 'cygwin', - 'setup_mount.bat') - subprocess.Popen(setup_mount).wait() - - logging.debug('Starting http server') - self._process = subprocess.Popen(start_cmd, env=env) - - # Wait for server to start. - self.mappings = mappings - server_started = self.WaitForAction(self.IsServerRunningOnAllPorts) - - # Our process terminated already - if not server_started or self._process.returncode != None: - raise google.httpd_utils.HttpdNotStarted('Failed to start httpd.') - - logging.debug("Server successfully started") - - # TODO(deanm): Find a nicer way to shutdown cleanly. Our log files are - # probably not being flushed, etc... why doesn't our python have os.kill ? - - def Stop(self, force=False): - if not force and not self.IsRunning(): - return - - httpd_pid = None - if self._process: - httpd_pid = self._process.pid - path_utils.ShutDownHTTPServer(httpd_pid) - - if self._process: - self._process.wait() - self._process = None - -if '__main__' == __name__: - # Provide some command line params for starting/stopping the http server - # manually. Also used in ui_tests to run http layout tests in a browser. - option_parser = optparse.OptionParser() - option_parser.add_option('-k', '--server', - help='Server action (start|stop)') - option_parser.add_option('-p', '--port', - help='Port to listen on (overrides layout test ports)') - option_parser.add_option('-r', '--root', - help='Absolute path to DocumentRoot (overrides layout test roots)') - option_parser.add_option('--register_cygwin', action="store_true", - dest="register_cygwin", help='Register Cygwin paths (on Win try bots)') - option_parser.add_option('--run_background', action="store_true", - dest="run_background", - help='Run on background (for running as UI test)') - options, args = option_parser.parse_args() - - if not options.server: - print ('Usage: %s --server {start|stop} [--root=root_dir]' - ' [--port=port_number]' % sys.argv[0]) - else: - if (options.root is None) and (options.port is not None): - # specifying root but not port means we want httpd on default - # set of ports that LayoutTest use, but pointing to a different - # source of tests. Specifying port but no root does not seem - # meaningful. - raise 'Specifying port requires also a root.' - httpd = Lighttpd(tempfile.gettempdir(), - port=options.port, - root=options.root, - register_cygwin=options.register_cygwin, - run_background=options.run_background) - if 'start' == options.server: - httpd.Start() - else: - httpd.Stop(force=True) diff --git a/webkit/tools/layout_tests/layout_package/http_server.sh b/webkit/tools/layout_tests/layout_package/http_server.sh deleted file mode 100755 index b3f4b4b..0000000 --- a/webkit/tools/layout_tests/layout_package/http_server.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/sh - -# Copyright (c) 2009 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -exec_dir=$(dirname $0) - -if [ "$OSTYPE" = "cygwin" ]; then - system_root=`cygpath "$SYSTEMROOT"` - PATH="/usr/bin:$system_root/system32:$system_root:$system_root/system32/WBEM" - export PATH - python_prog="$exec_dir/../../../../third_party/python_24/python.exe" -else - python_prog=python - # When not using the included python, we don't get automatic site.py paths. - # Specifically, run_webkit_tests needs the paths in: - # third_party/python_24/Lib/site-packages/google.pth - PYTHONPATH="${exec_dir}/../../../../tools/python:$PYTHONPATH" - export PYTHONPATH -fi - -"$python_prog" "$exec_dir/http_server.py" "$@" diff --git a/webkit/tools/layout_tests/layout_package/http_server_base.py b/webkit/tools/layout_tests/layout_package/http_server_base.py deleted file mode 100644 index daf0978..0000000 --- a/webkit/tools/layout_tests/layout_package/http_server_base.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (c) 2009 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -"""Base class with common routines between the Apache and Lighttpd servers.""" - -import logging -import time -import urllib - - -class HttpServerBase(object): - - def WaitForAction(self, action): - """Repeat the action for 20 seconds or until it succeeds. Returns - whether it succeeded.""" - start_time = time.time() - while time.time() - start_time < 20: - if action(): - return True - time.sleep(1) - - return False - - def IsServerRunningOnAllPorts(self): - """Returns whether the server is running on all the desired ports.""" - for mapping in self.mappings: - if 'sslcert' in mapping: - http_suffix = 's' - else: - http_suffix = '' - - url = 'http%s://127.0.0.1:%d/' % (http_suffix, mapping['port']) - - try: - response = urllib.urlopen(url) - logging.debug("Server running at %s" % url) - except IOError: - logging.debug("Server NOT running at %s" % url) - return False - - return True diff --git a/webkit/tools/layout_tests/layout_package/json_layout_results_generator.py b/webkit/tools/layout_tests/layout_package/json_layout_results_generator.py deleted file mode 100644 index f62075e..0000000 --- a/webkit/tools/layout_tests/layout_package/json_layout_results_generator.py +++ /dev/null @@ -1,159 +0,0 @@ -# Copyright (c) 2010 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -import logging -import os - -from layout_package import json_results_generator -from layout_package import path_utils -from layout_package import test_expectations -from layout_package import test_failures - - -class JSONLayoutResultsGenerator(json_results_generator.JSONResultsGenerator): - """A JSON results generator for layout tests.""" - - LAYOUT_TESTS_PATH = "LayoutTests" - - # Additional JSON fields. - WONTFIX = "wontfixCounts" - DEFERRED = "deferredCounts" - - def __init__(self, builder_name, build_name, build_number, - results_file_base_path, builder_base_url, - test_timings, expectations, result_summary, all_tests): - """Modifies the results.json file. Grabs it off the archive directory - if it is not found locally. - - Args: - result_summary: ResultsSummary object storing the summary of the test - results. - (see the comment of JSONResultsGenerator.__init__ for other Args) - """ - - self._builder_name = builder_name - self._build_name = build_name - self._build_number = build_number - self._builder_base_url = builder_base_url - self._results_file_path = os.path.join(results_file_base_path, - self.RESULTS_FILENAME) - self._expectations = expectations - - # We don't use self._skipped_tests and self._passed_tests as we - # override _InsertFailureSummaries. - - # We want relative paths to LayoutTest root for JSON output. - path_to_name = self._GetPathRelativeToLayoutTestRoot - self._result_summary = result_summary - self._failures = dict( - (path_to_name(test), test_failures.DetermineResultType(failures)) - for (test, failures) in result_summary.failures.iteritems()) - self._all_tests = [path_to_name(test) for test in all_tests] - self._test_timings = dict( - (path_to_name(test_tuple.filename), test_tuple.test_run_time) - for test_tuple in test_timings) - - self._GenerateJSONOutput() - - def _GetPathRelativeToLayoutTestRoot(self, test): - """Returns the path of the test relative to the layout test root. - For example, for: - src/third_party/WebKit/LayoutTests/fast/forms/foo.html - We would return - fast/forms/foo.html - """ - index = test.find(self.LAYOUT_TESTS_PATH) - if index is not -1: - index += len(self.LAYOUT_TESTS_PATH) - - if index is -1: - # Already a relative path. - relativePath = test - else: - relativePath = test[index + 1:] - - # Make sure all paths are unix-style. - return relativePath.replace('\\', '/') - - # override - def _ConvertJSONToCurrentVersion(self, results_json): - archive_version = None - if self.VERSION_KEY in results_json: - archive_version = results_json[self.VERSION_KEY] - - super(JSONLayoutResultsGenerator, self)._ConvertJSONToCurrentVersion( - results_json) - - # version 2->3 - if archive_version == 2: - for results_for_builder in results_json.itervalues(): - try: - test_results = results_for_builder[self.TESTS] - except: - continue - - for test in test_results: - # Make sure all paths are relative - test_path = self._GetPathRelativeToLayoutTestRoot(test) - if test_path != test: - test_results[test_path] = test_results[test] - del test_results[test] - - # override - def _InsertFailureSummaries(self, results_for_builder): - summary = self._result_summary - - self._InsertItemIntoRawList(results_for_builder, - len((set(summary.failures.keys()) | - summary.tests_by_expectation[test_expectations.SKIP]) & - summary.tests_by_timeline[test_expectations.NOW]), - self.FIXABLE_COUNT) - self._InsertItemIntoRawList(results_for_builder, - self._GetFailureSummaryEntry(test_expectations.NOW), - self.FIXABLE) - self._InsertItemIntoRawList(results_for_builder, - len(self._expectations.GetTestsWithTimeline( - test_expectations.NOW)), self.ALL_FIXABLE_COUNT) - self._InsertItemIntoRawList(results_for_builder, - self._GetFailureSummaryEntry(test_expectations.DEFER), - self.DEFERRED) - self._InsertItemIntoRawList(results_for_builder, - self._GetFailureSummaryEntry(test_expectations.WONTFIX), - self.WONTFIX) - - # override - def _NormalizeResultsJSON(self, test, test_name, tests): - super(JSONLayoutResultsGenerator, self)._NormalizeResultsJSON( - test, test_name, tests) - - # Remove tests that don't exist anymore. - full_path = os.path.join(path_utils.LayoutTestsDir(), test_name) - full_path = os.path.normpath(full_path) - if not os.path.exists(full_path): - del tests[test_name] - - def _GetFailureSummaryEntry(self, timeline): - """Creates a summary object to insert into the JSON. - - Args: - summary ResultSummary object with test results - timeline current test_expectations timeline to build entry for - (e.g., test_expectations.NOW, etc.) - """ - entry = {} - summary = self._result_summary - timeline_tests = summary.tests_by_timeline[timeline] - entry[self.SKIP_RESULT] = len( - summary.tests_by_expectation[test_expectations.SKIP] & - timeline_tests) - entry[self.PASS_RESULT] = len( - summary.tests_by_expectation[test_expectations.PASS] & - timeline_tests) - for failure_type in summary.tests_by_expectation.keys(): - if failure_type not in self.FAILURE_TO_CHAR: - continue - count = len(summary.tests_by_expectation[failure_type] & - timeline_tests) - entry[self.FAILURE_TO_CHAR[failure_type]] = count - return entry diff --git a/webkit/tools/layout_tests/layout_package/json_results_generator.py b/webkit/tools/layout_tests/layout_package/json_results_generator.py deleted file mode 100644 index 9bd0ad3..0000000 --- a/webkit/tools/layout_tests/layout_package/json_results_generator.py +++ /dev/null @@ -1,390 +0,0 @@ -# Copyright (c) 2009 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -import logging -import os -import subprocess -import sys -import time -import urllib2 -import xml.dom.minidom - -from layout_package import path_utils -from layout_package import test_expectations - -sys.path.append(path_utils.PathFromBase('third_party')) -import simplejson - - -class JSONResultsGenerator(object): - - MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG = 750 - # Min time (seconds) that will be added to the JSON. - MIN_TIME = 1 - JSON_PREFIX = "ADD_RESULTS(" - JSON_SUFFIX = ");" - PASS_RESULT = "P" - SKIP_RESULT = "X" - NO_DATA_RESULT = "N" - VERSION = 3 - VERSION_KEY = "version" - RESULTS = "results" - TIMES = "times" - BUILD_NUMBERS = "buildNumbers" - WEBKIT_SVN = "webkitRevision" - CHROME_SVN = "chromeRevision" - TIME = "secondsSinceEpoch" - TESTS = "tests" - - FIXABLE_COUNT = "fixableCount" - FIXABLE = "fixableCounts" - ALL_FIXABLE_COUNT = "allFixableCount" - - # Note that we omit test_expectations.FAIL from this list because - # it should never show up (it's a legacy input expectation, never - # an output expectation). - FAILURE_TO_CHAR = {test_expectations.CRASH: "C", - test_expectations.TIMEOUT: "T", - test_expectations.IMAGE: "I", - test_expectations.TEXT: "F", - test_expectations.MISSING: "O", - test_expectations.IMAGE_PLUS_TEXT: "Z"} - FAILURE_CHARS = FAILURE_TO_CHAR.values() - - RESULTS_FILENAME = "results.json" - - def __init__(self, builder_name, build_name, build_number, - results_file_base_path, builder_base_url, - test_timings, failures, passed_tests, skipped_tests, all_tests): - """Modifies the results.json file. Grabs it off the archive directory - if it is not found locally. - - Args - builder_name: the builder name (e.g. Webkit). - build_name: the build name (e.g. webkit-rel). - build_number: the build number. - results_file_base_path: Absolute path to the directory containing the - results json file. - builder_base_url: the URL where we have the archived test results. - test_timings: Map of test name to a test_run-time. - failures: Map of test name to a failure type (of test_expectations). - passed_tests: A set containing all the passed tests. - skipped_tests: A set containing all the skipped tests. - all_tests: List of all the tests that were run. This should not - include skipped tests. - """ - self._builder_name = builder_name - self._build_name = build_name - self._build_number = build_number - self._builder_base_url = builder_base_url - self._results_file_path = os.path.join(results_file_base_path, - self.RESULTS_FILENAME) - self._test_timings = test_timings - self._failures = failures - self._passed_tests = passed_tests - self._skipped_tests = skipped_tests - self._all_tests = all_tests - - self._GenerateJSONOutput() - - def _GenerateJSONOutput(self): - """Generates the JSON output file.""" - json = self._GetJSON() - if json: - results_file = open(self._results_file_path, "w") - results_file.write(json) - results_file.close() - - def _GetSVNRevision(self, in_directory=None): - """Returns the svn revision for the given directory. - - Args: - in_directory: The directory where svn is to be run. - """ - output = subprocess.Popen(["svn", "info", "--xml"], - cwd=in_directory, - shell=(sys.platform == 'win32'), - stdout=subprocess.PIPE).communicate()[0] - try: - dom = xml.dom.minidom.parseString(output) - return dom.getElementsByTagName('entry')[0].getAttribute( - 'revision') - except xml.parsers.expat.ExpatError: - return "" - - def _GetArchivedJSONResults(self): - """Reads old results JSON file if it exists. - Returns (archived_results, error) tuple where error is None if results - were successfully read. - """ - results_json = {} - old_results = None - error = None - - if os.path.exists(self._results_file_path): - old_results_file = open(self._results_file_path, "r") - old_results = old_results_file.read() - elif self._builder_base_url: - # Check if we have the archived JSON file on the buildbot server. - results_file_url = (self._builder_base_url + - self._build_name + "/" + self.RESULTS_FILENAME) - logging.error("Local results.json file does not exist. Grabbing " - "it off the archive at " + results_file_url) - - try: - results_file = urllib2.urlopen(results_file_url) - info = results_file.info() - old_results = results_file.read() - except urllib2.HTTPError, http_error: - # A non-4xx status code means the bot is hosed for some reason - # and we can't grab the results.json file off of it. - if (http_error.code < 400 and http_error.code >= 500): - error = http_error - except urllib2.URLError, url_error: - error = url_error - - if old_results: - # Strip the prefix and suffix so we can get the actual JSON object. - old_results = old_results[len(self.JSON_PREFIX): - len(old_results) - len(self.JSON_SUFFIX)] - - try: - results_json = simplejson.loads(old_results) - except: - logging.debug("results.json was not valid JSON. Clobbering.") - # The JSON file is not valid JSON. Just clobber the results. - results_json = {} - else: - logging.debug('Old JSON results do not exist. Starting fresh.') - results_json = {} - - return results_json, error - - def _GetJSON(self): - """Gets the results for the results.json file.""" - results_json, error = self._GetArchivedJSONResults() - if error: - # If there was an error don't write a results.json - # file at all as it would lose all the information on the bot. - logging.error("Archive directory is inaccessible. Not modifying " - "or clobbering the results.json file: " + str(error)) - return None - - builder_name = self._builder_name - if results_json and builder_name not in results_json: - logging.debug("Builder name (%s) is not in the results.json file." - % builder_name) - - self._ConvertJSONToCurrentVersion(results_json) - - if builder_name not in results_json: - results_json[builder_name] = self._CreateResultsForBuilderJSON() - - results_for_builder = results_json[builder_name] - - self._InsertGenericMetadata(results_for_builder) - - self._InsertFailureSummaries(results_for_builder) - - # Update the all failing tests with result type and time. - tests = results_for_builder[self.TESTS] - all_failing_tests = set(self._failures.iterkeys()) - all_failing_tests.update(tests.iterkeys()) - for test in all_failing_tests: - self._InsertTestTimeAndResult(test, tests) - - # Specify separators in order to get compact encoding. - results_str = simplejson.dumps(results_json, separators=(',', ':')) - return self.JSON_PREFIX + results_str + self.JSON_SUFFIX - - def _InsertFailureSummaries(self, results_for_builder): - """Inserts aggregate pass/failure statistics into the JSON. - This method reads self._skipped_tests, self._passed_tests and - self._failures and inserts FIXABLE, FIXABLE_COUNT and ALL_FIXABLE_COUNT - entries. - - Args: - results_for_builder: Dictionary containing the test results for a - single builder. - """ - # Insert the number of tests that failed. - self._InsertItemIntoRawList(results_for_builder, - len(set(self._failures.keys()) | self._skipped_tests), - self.FIXABLE_COUNT) - - # Create a pass/skip/failure summary dictionary. - entry = {} - entry[self.SKIP_RESULT] = len(self._skipped_tests) - entry[self.PASS_RESULT] = len(self._passed_tests) - get = entry.get - for failure_type in self._failures.values(): - failure_char = self.FAILURE_TO_CHAR[failure_type] - entry[failure_char] = get(failure_char, 0) + 1 - - # Insert the pass/skip/failure summary dictionary. - self._InsertItemIntoRawList(results_for_builder, entry, self.FIXABLE) - - # Insert the number of all the tests that are supposed to pass. - self._InsertItemIntoRawList(results_for_builder, - len(self._skipped_tests | self._all_tests), - self.ALL_FIXABLE_COUNT) - - def _InsertItemIntoRawList(self, results_for_builder, item, key): - """Inserts the item into the list with the given key in the results for - this builder. Creates the list if no such list exists. - - Args: - results_for_builder: Dictionary containing the test results for a - single builder. - item: Number or string to insert into the list. - key: Key in results_for_builder for the list to insert into. - """ - if key in results_for_builder: - raw_list = results_for_builder[key] - else: - raw_list = [] - - raw_list.insert(0, item) - raw_list = raw_list[:self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG] - results_for_builder[key] = raw_list - - def _InsertItemRunLengthEncoded(self, item, encoded_results): - """Inserts the item into the run-length encoded results. - - Args: - item: String or number to insert. - encoded_results: run-length encoded results. An array of arrays, e.g. - [[3,'A'],[1,'Q']] encodes AAAQ. - """ - if len(encoded_results) and item == encoded_results[0][1]: - num_results = encoded_results[0][0] - if num_results <= self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG: - encoded_results[0][0] = num_results + 1 - else: - # Use a list instead of a class for the run-length encoding since - # we want the serialized form to be concise. - encoded_results.insert(0, [1, item]) - - def _InsertGenericMetadata(self, results_for_builder): - """ Inserts generic metadata (such as version number, current time etc) - into the JSON. - - Args: - results_for_builder: Dictionary containing the test results for - a single builder. - """ - self._InsertItemIntoRawList(results_for_builder, - self._build_number, self.BUILD_NUMBERS) - - path_to_webkit = path_utils.PathFromBase('third_party', 'WebKit', - 'WebCore') - self._InsertItemIntoRawList(results_for_builder, - self._GetSVNRevision(path_to_webkit), - self.WEBKIT_SVN) - - path_to_chrome_base = path_utils.PathFromBase() - self._InsertItemIntoRawList(results_for_builder, - self._GetSVNRevision(path_to_chrome_base), - self.CHROME_SVN) - - self._InsertItemIntoRawList(results_for_builder, - int(time.time()), - self.TIME) - - def _InsertTestTimeAndResult(self, test_name, tests): - """ Insert a test item with its results to the given tests dictionary. - - Args: - tests: Dictionary containing test result entries. - """ - - result = JSONResultsGenerator.PASS_RESULT - time = 0 - - if test_name not in self._all_tests: - result = JSONResultsGenerator.NO_DATA_RESULT - - if test_name in self._failures: - result = self.FAILURE_TO_CHAR[self._failures[test_name]] - - if test_name in self._test_timings: - # Floor for now to get time in seconds. - time = int(self._test_timings[test_name]) - - if test_name not in tests: - tests[test_name] = self._CreateResultsAndTimesJSON() - - thisTest = tests[test_name] - self._InsertItemRunLengthEncoded(result, thisTest[self.RESULTS]) - self._InsertItemRunLengthEncoded(time, thisTest[self.TIMES]) - self._NormalizeResultsJSON(thisTest, test_name, tests) - - def _ConvertJSONToCurrentVersion(self, results_json): - """If the JSON does not match the current version, converts it to the - current version and adds in the new version number. - """ - if (self.VERSION_KEY in results_json and - results_json[self.VERSION_KEY] == self.VERSION): - return - - results_json[self.VERSION_KEY] = self.VERSION - - def _CreateResultsAndTimesJSON(self): - results_and_times = {} - results_and_times[self.RESULTS] = [] - results_and_times[self.TIMES] = [] - return results_and_times - - def _CreateResultsForBuilderJSON(self): - results_for_builder = {} - results_for_builder[self.TESTS] = {} - return results_for_builder - - def _RemoveItemsOverMaxNumberOfBuilds(self, encoded_list): - """Removes items from the run-length encoded list after the final - item that exceeds the max number of builds to track. - - Args: - encoded_results: run-length encoded results. An array of arrays, e.g. - [[3,'A'],[1,'Q']] encodes AAAQ. - """ - num_builds = 0 - index = 0 - for result in encoded_list: - num_builds = num_builds + result[0] - index = index + 1 - if num_builds > self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG: - return encoded_list[:index] - return encoded_list - - def _NormalizeResultsJSON(self, test, test_name, tests): - """ Prune tests where all runs pass or tests that no longer exist and - truncate all results to maxNumberOfBuilds. - - Args: - test: ResultsAndTimes object for this test. - test_name: Name of the test. - tests: The JSON object with all the test results for this builder. - """ - test[self.RESULTS] = self._RemoveItemsOverMaxNumberOfBuilds( - test[self.RESULTS]) - test[self.TIMES] = self._RemoveItemsOverMaxNumberOfBuilds( - test[self.TIMES]) - - is_all_pass = self._IsResultsAllOfType(test[self.RESULTS], - self.PASS_RESULT) - is_all_no_data = self._IsResultsAllOfType(test[self.RESULTS], - self.NO_DATA_RESULT) - max_time = max([time[1] for time in test[self.TIMES]]) - - # Remove all passes/no-data from the results to reduce noise and - # filesize. If a test passes every run, but takes > MIN_TIME to run, - # don't throw away the data. - if is_all_no_data or (is_all_pass and max_time <= self.MIN_TIME): - del tests[test_name] - - def _IsResultsAllOfType(self, results, type): - """Returns whether all the results are of the given type - (e.g. all passes).""" - return len(results) == 1 and results[0][1] == type diff --git a/webkit/tools/layout_tests/layout_package/lighttpd.conf b/webkit/tools/layout_tests/layout_package/lighttpd.conf deleted file mode 100644 index d3150dd..0000000 --- a/webkit/tools/layout_tests/layout_package/lighttpd.conf +++ /dev/null @@ -1,89 +0,0 @@ -server.tag = "LightTPD/1.4.19 (Win32)" -server.modules = ( "mod_accesslog", - "mod_alias", - "mod_cgi", - "mod_rewrite" ) - -# default document root required -server.document-root = "." - -# files to check for if .../ is requested -index-file.names = ( "index.php", "index.pl", "index.cgi", - "index.html", "index.htm", "default.htm" ) -# mimetype mapping -mimetype.assign = ( - ".gif" => "image/gif", - ".jpg" => "image/jpeg", - ".jpeg" => "image/jpeg", - ".png" => "image/png", - ".svg" => "image/svg+xml", - ".css" => "text/css", - ".html" => "text/html", - ".htm" => "text/html", - ".xhtml" => "application/xhtml+xml", - ".js" => "text/javascript", - ".log" => "text/plain", - ".conf" => "text/plain", - ".text" => "text/plain", - ".txt" => "text/plain", - ".dtd" => "text/xml", - ".xml" => "text/xml", - ".manifest" => "text/cache-manifest", - ) - -# Use the "Content-Type" extended attribute to obtain mime type if possible -mimetype.use-xattr = "enable" - -## -# which extensions should not be handle via static-file transfer -# -# .php, .pl, .fcgi are most often handled by mod_fastcgi or mod_cgi -static-file.exclude-extensions = ( ".php", ".pl", ".cgi" ) - -server.bind = "localhost" -server.port = 8001 - -## virtual directory listings -dir-listing.activate = "enable" -#dir-listing.encoding = "iso-8859-2" -#dir-listing.external-css = "style/oldstyle.css" - -## enable debugging -#debug.log-request-header = "enable" -#debug.log-response-header = "enable" -#debug.log-request-handling = "enable" -#debug.log-file-not-found = "enable" - -#### SSL engine -#ssl.engine = "enable" -#ssl.pemfile = "server.pem" - -# Rewrite rule for utf-8 path test (LayoutTests/http/tests/uri/utf8-path.html) -# See the apache rewrite rule at LayoutTests/http/tests/uri/intercept/.htaccess -# Rewrite rule for LayoutTests/http/tests/appcache/cyrillic-uri.html. -# See the apache rewrite rule at -# LayoutTests/http/tests/appcache/resources/intercept/.htaccess -url.rewrite-once = ( - "^/uri/intercept/(.*)" => "/uri/resources/print-uri.php", - "^/appcache/resources/intercept/(.*)" => "/appcache/resources/print-uri.php" -) - -# LayoutTests/http/tests/xmlhttprequest/response-encoding.html uses an htaccess -# to override charset for reply2.txt, reply2.xml, and reply4.txt. -$HTTP["url"] =~ "^/xmlhttprequest/resources/reply2.(txt|xml)" { - mimetype.assign = ( - ".txt" => "text/plain; charset=windows-1251", - ".xml" => "text/xml; charset=windows-1251" - ) -} -$HTTP["url"] =~ "^/xmlhttprequest/resources/reply4.txt" { - mimetype.assign = ( ".txt" => "text/plain; charset=koi8-r" ) -} - -# LayoutTests/http/tests/appcache/wrong-content-type.html uses an htaccess -# to override mime type for wrong-content-type.manifest. -$HTTP["url"] =~ "^/appcache/resources/wrong-content-type.manifest" { - mimetype.assign = ( ".manifest" => "text/plain" ) -} - -# Autogenerated test-specific config follows. diff --git a/webkit/tools/layout_tests/layout_package/metered_stream.py b/webkit/tools/layout_tests/layout_package/metered_stream.py deleted file mode 100644 index 575209e..0000000 --- a/webkit/tools/layout_tests/layout_package/metered_stream.py +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2009 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -""" -Package that implements a stream wrapper that has 'meters' as well as -regular output. A 'meter' is a single line of text that can be erased -and rewritten repeatedly, without producing multiple lines of output. It -can be used to produce effects like progress bars. -""" - - -class MeteredStream: - """This class is a wrapper around a stream that allows you to implement - meters. - - It can be used like a stream, but calling update() will print - the string followed by only a carriage return (instead of a carriage - return and a line feed). This can be used to implement progress bars and - other sorts of meters. Note that anything written by update() will be - erased by a subsequent update(), write(), or flush().""" - - def __init__(self, verbose, stream): - """ - Args: - verbose: whether update is a no-op - stream: output stream to write to - """ - self._dirty = False - self._verbose = verbose - self._stream = stream - self._last_update = "" - - def write(self, txt): - """Write text directly to the stream, overwriting and resetting the - meter.""" - if self._dirty: - self.update("") - self._dirty = False - self._stream.write(txt) - - def flush(self): - """Flush any buffered output.""" - self._stream.flush() - - def update(self, str): - """Write an update to the stream that will get overwritten by the next - update() or by a write(). - - This is used for progress updates that don't need to be preserved in - the log. Note that verbose disables this routine; we have this in - case we are logging lots of output and the update()s will get lost - or won't work properly (typically because verbose streams are - redirected to files. - - TODO(dpranke): figure out if there is a way to detect if we're writing - to a stream that handles CRs correctly (e.g., terminals). That might - be a cleaner way of handling this. - """ - if self._verbose: - return - - # Print the necessary number of backspaces to erase the previous - # message. - self._stream.write("\b" * len(self._last_update)) - self._stream.write(str) - num_remaining = len(self._last_update) - len(str) - if num_remaining > 0: - self._stream.write(" " * num_remaining + "\b" * num_remaining) - self._last_update = str - self._dirty = True diff --git a/webkit/tools/layout_tests/layout_package/path_utils.py b/webkit/tools/layout_tests/layout_package/path_utils.py deleted file mode 100644 index 48321df..0000000 --- a/webkit/tools/layout_tests/layout_package/path_utils.py +++ /dev/null @@ -1,372 +0,0 @@ -# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -"""This package contains utility methods for manipulating paths and -filenames for test results and baselines. It also contains wrappers -of a few routines in platform_utils.py so that platform_utils.py can -be considered a 'protected' package - i.e., this file should be -the only file that ever includes platform_utils. This leads to -us including a few things that don't really have anything to do - with paths, unfortunately.""" - -import errno -import os -import stat -import sys - -import platform_utils -import platform_utils_win -import platform_utils_mac -import platform_utils_linux - -# Cache some values so we don't have to recalculate them. _basedir is -# used by PathFromBase() and caches the full (native) path to the top -# of the source tree (/src). _baseline_search_path is used by -# ExpectedBaselines() and caches the list of native paths to search -# for baseline results. -_basedir = None -_baseline_search_path = None - - -class PathNotFound(Exception): - pass - - -def LayoutTestsDir(): - """Returns the fully-qualified path to the directory containing the input - data for the specified layout test.""" - return PathFromBase('third_party', 'WebKit', 'LayoutTests') - - -def ChromiumBaselinePath(platform=None): - """Returns the full path to the directory containing expected - baseline results from chromium ports. If |platform| is None, the - currently executing platform is used. - - Note: although directly referencing individual platform_utils_* files is - usually discouraged, we allow it here so that the rebaselining tool can - pull baselines for platforms other than the host platform.""" - - # Normalize the platform string. - platform = PlatformName(platform) - if platform.startswith('chromium-mac'): - return platform_utils_mac.BaselinePath(platform) - elif platform.startswith('chromium-win'): - return platform_utils_win.BaselinePath(platform) - elif platform.startswith('chromium-linux'): - return platform_utils_linux.BaselinePath(platform) - - return platform_utils.BaselinePath() - - -def WebKitBaselinePath(platform): - """Returns the full path to the directory containing expected - baseline results from WebKit ports.""" - return PathFromBase('third_party', 'WebKit', 'LayoutTests', - 'platform', platform) - - -def BaselineSearchPath(platform=None): - """Returns the list of directories to search for baselines/results for a - given platform, in order of preference. Paths are relative to the top of - the source tree. If parameter platform is None, returns the list for the - current platform that the script is running on. - - Note: although directly referencing individual platform_utils_* files is - usually discouraged, we allow it here so that the rebaselining tool can - pull baselines for platforms other than the host platform.""" - - # Normalize the platform name. - platform = PlatformName(platform) - if platform.startswith('chromium-mac'): - return platform_utils_mac.BaselineSearchPath(platform) - elif platform.startswith('chromium-win'): - return platform_utils_win.BaselineSearchPath(platform) - elif platform.startswith('chromium-linux'): - return platform_utils_linux.BaselineSearchPath(platform) - return platform_utils.BaselineSearchPath() - - -def ExpectedBaselines(filename, suffix, platform=None, all_baselines=False): - """Given a test name, finds where the baseline results are located. - - Args: - filename: absolute filename to test file - suffix: file suffix of the expected results, including dot; e.g. '.txt' - or '.png'. This should not be None, but may be an empty string. - platform: layout test platform: 'win', 'linux' or 'mac'. Defaults to - the current platform. - all_baselines: If True, return an ordered list of all baseline paths - for the given platform. If False, return only the first - one. - Returns - a list of ( platform_dir, results_filename ), where - platform_dir - abs path to the top of the results tree (or test tree) - results_filename - relative path from top of tree to the results file - (os.path.join of the two gives you the full path to the file, - unless None was returned.) - Return values will be in the format appropriate for the current platform - (e.g., "\\" for path separators on Windows). If the results file is not - found, then None will be returned for the directory, but the expected - relative pathname will still be returned. - """ - global _baseline_search_path - global _search_path_platform - testname = os.path.splitext(RelativeTestFilename(filename))[0] - - baseline_filename = testname + '-expected' + suffix - - if (_baseline_search_path is None) or (_search_path_platform != platform): - _baseline_search_path = BaselineSearchPath(platform) - _search_path_platform = platform - - baselines = [] - for platform_dir in _baseline_search_path: - if os.path.exists(os.path.join(platform_dir, baseline_filename)): - baselines.append((platform_dir, baseline_filename)) - - if not all_baselines and baselines: - return baselines - - # If it wasn't found in a platform directory, return the expected result - # in the test directory, even if no such file actually exists. - platform_dir = LayoutTestsDir() - if os.path.exists(os.path.join(platform_dir, baseline_filename)): - baselines.append((platform_dir, baseline_filename)) - - if baselines: - return baselines - - return [(None, baseline_filename)] - - -def ExpectedFilename(filename, suffix): - """Given a test name, returns an absolute path to its expected results. - - If no expected results are found in any of the searched directories, the - directory in which the test itself is located will be returned. The return - value is in the format appropriate for the platform (e.g., "\\" for - path separators on windows). - - Args: - filename: absolute filename to test file - suffix: file suffix of the expected results, including dot; e.g. '.txt' - or '.png'. This should not be None, but may be an empty string. - platform: the most-specific directory name to use to build the - search list of directories, e.g., 'chromium-win', or - 'chromium-mac-leopard' (we follow the WebKit format) - """ - platform_dir, baseline_filename = ExpectedBaselines(filename, suffix)[0] - if platform_dir: - return os.path.join(platform_dir, baseline_filename) - return os.path.join(LayoutTestsDir(), baseline_filename) - - -def RelativeTestFilename(filename): - """Provide the filename of the test relative to the layout tests - directory as a unix style path (a/b/c).""" - return _WinPathToUnix(filename[len(LayoutTestsDir()) + 1:]) - - -def _WinPathToUnix(path): - """Convert a windows path to use unix-style path separators (a/b/c).""" - return path.replace('\\', '/') - -# -# Routines that are arguably platform-specific but have been made -# generic for now (they used to be in platform_utils_*) -# - - -def FilenameToUri(full_path): - """Convert a test file to a URI.""" - LAYOUTTEST_HTTP_DIR = "http/tests/" - LAYOUTTEST_WEBSOCKET_DIR = "websocket/tests/" - - relative_path = _WinPathToUnix(RelativeTestFilename(full_path)) - port = None - use_ssl = False - - if relative_path.startswith(LAYOUTTEST_HTTP_DIR): - # http/tests/ run off port 8000 and ssl/ off 8443 - relative_path = relative_path[len(LAYOUTTEST_HTTP_DIR):] - port = 8000 - elif relative_path.startswith(LAYOUTTEST_WEBSOCKET_DIR): - # websocket/tests/ run off port 8880 and 9323 - # Note: the root is /, not websocket/tests/ - port = 8880 - - # Make http/tests/local run as local files. This is to mimic the - # logic in run-webkit-tests. - # TODO(jianli): Consider extending this to "media/". - if port and not relative_path.startswith("local/"): - if relative_path.startswith("ssl/"): - port += 443 - protocol = "https" - else: - protocol = "http" - return "%s://127.0.0.1:%u/%s" % (protocol, port, relative_path) - - if sys.platform in ('cygwin', 'win32'): - return "file:///" + GetAbsolutePath(full_path) - return "file://" + GetAbsolutePath(full_path) - - -def GetAbsolutePath(path): - """Returns an absolute UNIX path.""" - return _WinPathToUnix(os.path.abspath(path)) - - -def MaybeMakeDirectory(*path): - """Creates the specified directory if it doesn't already exist.""" - # This is a reimplementation of google.path_utils.MaybeMakeDirectory(). - try: - os.makedirs(os.path.join(*path)) - except OSError, e: - if e.errno != errno.EEXIST: - raise - - -def PathFromBase(*comps): - """Returns an absolute filename from a set of components specified - relative to the top of the source tree. If the path does not exist, - the exception PathNotFound is raised.""" - # This is a reimplementation of google.path_utils.PathFromBase(). - global _basedir - if _basedir == None: - # We compute the top of the source tree by finding the absolute - # path of this source file, and then climbing up three directories - # as given in subpath. If we move this file, subpath needs to be - # updated. - path = os.path.abspath(__file__) - subpath = os.path.join('webkit', 'tools', 'layout_tests') - _basedir = path[:path.index(subpath)] - path = os.path.join(_basedir, *comps) - if not os.path.exists(path): - raise PathNotFound('could not find %s' % (path)) - return path - - -def RemoveDirectory(*path): - """Recursively removes a directory, even if it's marked read-only. - - Remove the directory located at *path, if it exists. - - shutil.rmtree() doesn't work on Windows if any of the files or directories - are read-only, which svn repositories and some .svn files are. We need to - be able to force the files to be writable (i.e., deletable) as we traverse - the tree. - - Even with all this, Windows still sometimes fails to delete a file, citing - a permission error (maybe something to do with antivirus scans or disk - indexing). The best suggestion any of the user forums had was to wait a - bit and try again, so we do that too. It's hand-waving, but sometimes it - works. :/ - """ - file_path = os.path.join(*path) - if not os.path.exists(file_path): - return - - win32 = False - if sys.platform == 'win32': - win32 = True - # Some people don't have the APIs installed. In that case we'll do - # without. - try: - win32api = __import__('win32api') - win32con = __import__('win32con') - except ImportError: - win32 = False - - def remove_with_retry(rmfunc, path): - os.chmod(path, stat.S_IWRITE) - if win32: - win32api.SetFileAttributes(path, - win32con.FILE_ATTRIBUTE_NORMAL) - try: - return rmfunc(path) - except EnvironmentError, e: - if e.errno != errno.EACCES: - raise - print 'Failed to delete %s: trying again' % repr(path) - time.sleep(0.1) - return rmfunc(path) - else: - - def remove_with_retry(rmfunc, path): - if os.path.islink(path): - return os.remove(path) - else: - return rmfunc(path) - - for root, dirs, files in os.walk(file_path, topdown=False): - # For POSIX: making the directory writable guarantees removability. - # Windows will ignore the non-read-only bits in the chmod value. - os.chmod(root, 0770) - for name in files: - remove_with_retry(os.remove, os.path.join(root, name)) - for name in dirs: - remove_with_retry(os.rmdir, os.path.join(root, name)) - - remove_with_retry(os.rmdir, file_path) - -# -# Wrappers around platform_utils -# - - -def PlatformName(platform=None): - """Returns the appropriate chromium platform name for |platform|. If - |platform| is None, returns the name of the chromium platform on the - currently running system. If |platform| is of the form 'chromium-*', - it is returned unchanged, otherwise 'chromium-' is prepended.""" - if platform == None: - return platform_utils.PlatformName() - if not platform.startswith('chromium-'): - platform = "chromium-" + platform - return platform - - -def PlatformVersion(): - return platform_utils.PlatformVersion() - - -def LigHTTPdExecutablePath(): - return platform_utils.LigHTTPdExecutablePath() - - -def LigHTTPdModulePath(): - return platform_utils.LigHTTPdModulePath() - - -def LigHTTPdPHPPath(): - return platform_utils.LigHTTPdPHPPath() - - -def WDiffPath(): - return platform_utils.WDiffPath() - - -def TestShellPath(target): - return platform_utils.TestShellPath(target) - - -def ImageDiffPath(target): - return platform_utils.ImageDiffPath(target) - - -def LayoutTestHelperPath(target): - return platform_utils.LayoutTestHelperPath(target) - - -def FuzzyMatchPath(): - return platform_utils.FuzzyMatchPath() - - -def ShutDownHTTPServer(server_pid): - return platform_utils.ShutDownHTTPServer(server_pid) - - -def KillAllTestShells(): - platform_utils.KillAllTestShells() diff --git a/webkit/tools/layout_tests/layout_package/platform_utils.py b/webkit/tools/layout_tests/layout_package/platform_utils.py deleted file mode 100644 index 03af83d..0000000 --- a/webkit/tools/layout_tests/layout_package/platform_utils.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -"""Platform-specific utilities and pseudo-constants - -Any functions whose implementations or values differ from one platform to -another should be defined in their respective platform_utils_.py -modules. The appropriate one of those will be imported into this module to -provide callers with a common, platform-independent interface. - -This file should only ever be imported by layout_package.path_utils. -""" - -import sys - -# We may not support the version of Python that a user has installed (Cygwin -# especially has had problems), but we'll allow the platform utils to be -# included in any case so we don't get an import error. -if sys.platform in ('cygwin', 'win32'): - from platform_utils_win import * -elif sys.platform == 'darwin': - from platform_utils_mac import * -elif sys.platform in ('linux', 'linux2', 'freebsd7', 'openbsd4'): - from platform_utils_linux import * diff --git a/webkit/tools/layout_tests/layout_package/platform_utils_linux.py b/webkit/tools/layout_tests/layout_package/platform_utils_linux.py deleted file mode 100644 index a0a6ba4..0000000 --- a/webkit/tools/layout_tests/layout_package/platform_utils_linux.py +++ /dev/null @@ -1,223 +0,0 @@ -# Copyright (c) 2008-2009 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -"""This is the Linux implementation of the layout_package.platform_utils - package. This file should only be imported by that package.""" - -import os -import signal -import subprocess -import sys -import logging - -import path_utils -import platform_utils_win - - -def PlatformName(): - """Returns the name of the platform we're currently running on.""" - return 'chromium-linux' + PlatformVersion() - - -def PlatformVersion(): - """Returns the version string for the platform, e.g. '-vista' or - '-snowleopard'. If the platform does not distinguish between - minor versions, it returns ''.""" - return '' - - -def GetNumCores(): - """Returns the number of cores on the machine. For hyperthreaded machines, - this will be double the number of actual processors.""" - num_cores = os.sysconf("SC_NPROCESSORS_ONLN") - if isinstance(num_cores, int) and num_cores > 0: - return num_cores - return 1 - - -def BaselinePath(platform=None): - """Returns the path relative to the top of the source tree for the - baselines for the specified platform version. If |platform| is None, - then the version currently in use is used.""" - if platform is None: - platform = PlatformName() - return path_utils.PathFromBase('webkit', 'data', 'layout_tests', - 'platform', platform, 'LayoutTests') - - -def BaselineSearchPath(platform=None): - """Returns the list of directories to search for baselines/results, in - order of preference. Paths are relative to the top of the source tree.""" - return [BaselinePath(platform), - platform_utils_win.BaselinePath('chromium-win'), - path_utils.WebKitBaselinePath('win'), - path_utils.WebKitBaselinePath('mac')] - - -def ApacheExecutablePath(): - """Returns the executable path to start Apache""" - path = os.path.join("/usr", "sbin", "apache2") - if os.path.exists(path): - return path - print "Unable to fine Apache executable %s" % path - _MissingApache() - - -def ApacheConfigFilePath(): - """Returns the path to Apache config file""" - return path_utils.PathFromBase("third_party", "WebKit", "LayoutTests", - "http", "conf", "apache2-debian-httpd.conf") - - -def LigHTTPdExecutablePath(): - """Returns the executable path to start LigHTTPd""" - binpath = "/usr/sbin/lighttpd" - if os.path.exists(binpath): - return binpath - print "Unable to find LigHTTPd executable %s" % binpath - _MissingLigHTTPd() - - -def LigHTTPdModulePath(): - """Returns the library module path for LigHTTPd""" - modpath = "/usr/lib/lighttpd" - if os.path.exists(modpath): - return modpath - print "Unable to find LigHTTPd modules %s" % modpath - _MissingLigHTTPd() - - -def LigHTTPdPHPPath(): - """Returns the PHP executable path for LigHTTPd""" - binpath = "/usr/bin/php-cgi" - if os.path.exists(binpath): - return binpath - print "Unable to find PHP CGI executable %s" % binpath - _MissingLigHTTPd() - - -def WDiffPath(): - """Path to the WDiff executable, which we assume is already installed and - in the user's $PATH.""" - return 'wdiff' - - -def ImageDiffPath(target): - """Path to the image_diff binary. - - Args: - target: Build target mode (debug or release)""" - return _PathFromBuildResults(target, 'image_diff') - - -def LayoutTestHelperPath(target): - """Path to the layout_test helper binary, if needed, empty otherwise""" - return '' - - -def TestShellPath(target): - """Return the platform-specific binary path for our TestShell. - - Args: - target: Build target mode (debug or release) """ - if target in ('Debug', 'Release'): - try: - debug_path = _PathFromBuildResults('Debug', 'test_shell') - release_path = _PathFromBuildResults('Release', 'test_shell') - - debug_mtime = os.stat(debug_path).st_mtime - release_mtime = os.stat(release_path).st_mtime - - if debug_mtime > release_mtime and target == 'Release' or \ - release_mtime > debug_mtime and target == 'Debug': - logging.info('\x1b[31mWarning: you are not running the most ' - 'recent test_shell binary. You need to pass ' - '--debug or not to select between Debug and ' - 'Release.\x1b[0m') - # This will fail if we don't have both a debug and release binary. - # That's fine because, in this case, we must already be running the - # most up-to-date one. - except path_utils.PathNotFound: - pass - - return _PathFromBuildResults(target, 'test_shell') - - -def FuzzyMatchPath(): - """Return the path to the fuzzy matcher binary.""" - return path_utils.PathFromBase('third_party', 'fuzzymatch', 'fuzzymatch') - - -def ShutDownHTTPServer(server_pid): - """Shut down the lighttpd web server. Blocks until it's fully shut down. - - Args: - server_pid: The process ID of the running server. - """ - # server_pid is not set when "http_server.py stop" is run manually. - if server_pid is None: - # This isn't ideal, since it could conflict with web server processes - # not started by http_server.py, but good enough for now. - KillAllProcess('lighttpd') - KillAllProcess('apache2') - else: - try: - os.kill(server_pid, signal.SIGTERM) - #TODO(mmoss) Maybe throw in a SIGKILL just to be sure? - except OSError: - # Sometimes we get a bad PID (e.g. from a stale httpd.pid file), - # so if kill fails on the given PID, just try to 'killall' web - # servers. - ShutDownHTTPServer(None) - - -def KillProcess(pid): - """Forcefully kill the process. - - Args: - pid: The id of the process to be killed. - """ - os.kill(pid, signal.SIGKILL) - - -def KillAllProcess(process_name): - null = open(os.devnull) - subprocess.call(['killall', '-TERM', '-u', os.getenv('USER'), - process_name], stderr=null) - null.close() - - -def KillAllTestShells(): - """Kills all instances of the test_shell binary currently running.""" - KillAllProcess('test_shell') - -# -# Private helper functions -# - - -def _MissingLigHTTPd(): - print 'Please install using: "sudo apt-get install lighttpd php5-cgi"' - print 'For complete Linux build requirements, please see:' - print 'http://code.google.com/p/chromium/wiki/LinuxBuildInstructions' - sys.exit(1) - - -def _MissingApache(): - print ('Please install using: "sudo apt-get install apache2 ' - 'libapache2-mod-php5"') - print 'For complete Linux build requirements, please see:' - print 'http://code.google.com/p/chromium/wiki/LinuxBuildInstructions' - sys.exit(1) - - -def _PathFromBuildResults(*pathies): - # FIXME(dkegel): use latest or warn if more than one found? - for dir in ["sconsbuild", "out", "xcodebuild"]: - try: - return path_utils.PathFromBase(dir, *pathies) - except: - pass - raise path_utils.PathNotFound("Unable to find %s in build tree" % - (os.path.join(*pathies))) diff --git a/webkit/tools/layout_tests/layout_package/platform_utils_mac.py b/webkit/tools/layout_tests/layout_package/platform_utils_mac.py deleted file mode 100644 index a357ff4..0000000 --- a/webkit/tools/layout_tests/layout_package/platform_utils_mac.py +++ /dev/null @@ -1,175 +0,0 @@ -# Copyright (c) 2008-2009 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -"""This is the Mac implementation of the layout_package.platform_utils - package. This file should only be imported by that package.""" - -import os -import platform -import signal -import subprocess - -import path_utils - - -def PlatformName(): - """Returns the name of the platform we're currently running on.""" - # At the moment all chromium mac results are version-independent. At some - # point we may need to return 'chromium-mac' + PlatformVersion() - return 'chromium-mac' - - -def PlatformVersion(): - """Returns the version string for the platform, e.g. '-vista' or - '-snowleopard'. If the platform does not distinguish between - minor versions, it returns ''.""" - os_version_string = platform.mac_ver()[0] # e.g. "10.5.6" - if not os_version_string: - return '-leopard' - - release_version = int(os_version_string.split('.')[1]) - - # we don't support 'tiger' or earlier releases - if release_version == 5: - return '-leopard' - elif release_version == 6: - return '-snowleopard' - - return '' - - -def GetNumCores(): - """Returns the number of cores on the machine. For hyperthreaded machines, - this will be double the number of actual processors.""" - return int(os.popen2("sysctl -n hw.ncpu")[1].read()) - - -def BaselinePath(platform=None): - """Returns the path relative to the top of the source tree for the - baselines for the specified platform version. If |platform| is None, - then the version currently in use is used.""" - if platform is None: - platform = PlatformName() - return path_utils.PathFromBase('webkit', 'data', 'layout_tests', - 'platform', platform, 'LayoutTests') - -# TODO: We should add leopard and snowleopard to the list of paths to check -# once we start running the tests from snowleopard. - - -def BaselineSearchPath(platform=None): - """Returns the list of directories to search for baselines/results, in - order of preference. Paths are relative to the top of the source tree.""" - return [BaselinePath(platform), - path_utils.WebKitBaselinePath('mac' + PlatformVersion()), - path_utils.WebKitBaselinePath('mac')] - - -def WDiffPath(): - """Path to the WDiff executable, which we assume is already installed and - in the user's $PATH.""" - return 'wdiff' - - -def ImageDiffPath(target): - """Path to the image_diff executable - - Args: - target: build type - 'Debug','Release',etc.""" - return path_utils.PathFromBase('xcodebuild', target, 'image_diff') - - -def LayoutTestHelperPath(target): - """Path to the layout_test_helper executable, if needed, empty otherwise - - Args: - target: build type - 'Debug','Release',etc.""" - return path_utils.PathFromBase('xcodebuild', target, 'layout_test_helper') - - -def TestShellPath(target): - """Path to the test_shell executable. - - Args: - target: build type - 'Debug','Release',etc.""" - # TODO(pinkerton): make |target| happy with case-sensitive file systems. - return path_utils.PathFromBase('xcodebuild', target, 'TestShell.app', - 'Contents', 'MacOS', 'TestShell') - - -def ApacheExecutablePath(): - """Returns the executable path to start Apache""" - return os.path.join("/usr", "sbin", "httpd") - - -def ApacheConfigFilePath(): - """Returns the path to Apache config file""" - return path_utils.PathFromBase("third_party", "WebKit", "LayoutTests", - "http", "conf", "apache2-httpd.conf") - - -def LigHTTPdExecutablePath(): - """Returns the executable path to start LigHTTPd""" - return path_utils.PathFromBase('third_party', 'lighttpd', 'mac', - 'bin', 'lighttpd') - - -def LigHTTPdModulePath(): - """Returns the library module path for LigHTTPd""" - return path_utils.PathFromBase('third_party', 'lighttpd', 'mac', 'lib') - - -def LigHTTPdPHPPath(): - """Returns the PHP executable path for LigHTTPd""" - return path_utils.PathFromBase('third_party', 'lighttpd', 'mac', 'bin', - 'php-cgi') - - -def ShutDownHTTPServer(server_pid): - """Shut down the lighttpd web server. Blocks until it's fully shut down. - - Args: - server_pid: The process ID of the running server. - """ - # server_pid is not set when "http_server.py stop" is run manually. - if server_pid is None: - # TODO(mmoss) This isn't ideal, since it could conflict with lighttpd - # processes not started by http_server.py, but good enough for now. - KillAllProcess('lighttpd') - KillAllProcess('httpd') - else: - try: - os.kill(server_pid, signal.SIGTERM) - # TODO(mmoss) Maybe throw in a SIGKILL just to be sure? - except OSError: - # Sometimes we get a bad PID (e.g. from a stale httpd.pid file), - # so if kill fails on the given PID, just try to 'killall' web - # servers. - ShutDownHTTPServer(None) - - -def KillProcess(pid): - """Forcefully kill the process. - - Args: - pid: The id of the process to be killed. - """ - os.kill(pid, signal.SIGKILL) - - -def KillAllProcess(process_name): - # On Mac OS X 10.6, killall has a new constraint: -SIGNALNAME or - # -SIGNALNUMBER must come first. Example problem: - # $ killall -u $USER -TERM lighttpd - # killall: illegal option -- T - # Use of the earlier -TERM placement is just fine on 10.5. - null = open(os.devnull) - subprocess.call(['killall', '-TERM', '-u', os.getenv('USER'), - process_name], stderr=null) - null.close() - - -def KillAllTestShells(): - """Kills all instances of the test_shell binary currently running.""" - KillAllProcess('TestShell') diff --git a/webkit/tools/layout_tests/layout_package/platform_utils_win.py b/webkit/tools/layout_tests/layout_package/platform_utils_win.py deleted file mode 100644 index 1f699dc..0000000 --- a/webkit/tools/layout_tests/layout_package/platform_utils_win.py +++ /dev/null @@ -1,184 +0,0 @@ -# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -"""This is the Linux implementation of the layout_package.platform_utils - package. This file should only be imported by that package.""" - -import os -import path_utils -import subprocess -import sys - - -def PlatformName(): - """Returns the name of the platform we're currently running on.""" - # We're not ready for version-specific results yet. When we uncomment - # this, we also need to add it to the BaselineSearchPath() - return 'chromium-win' + PlatformVersion() - - -def PlatformVersion(): - """Returns the version string for the platform, e.g. '-vista' or - '-snowleopard'. If the platform does not distinguish between - minor versions, it returns ''.""" - winver = sys.getwindowsversion() - if winver[0] == 6 and (winver[1] == 1): - return '-7' - if winver[0] == 6 and (winver[1] == 0): - return '-vista' - if winver[0] == 5 and (winver[1] == 1 or winver[1] == 2): - return '-xp' - return '' - - -def GetNumCores(): - """Returns the number of cores on the machine. For hyperthreaded machines, - this will be double the number of actual processors.""" - return int(os.environ.get('NUMBER_OF_PROCESSORS', 1)) - - -def BaselinePath(platform=None): - """Returns the path relative to the top of the source tree for the - baselines for the specified platform version. If |platform| is None, - then the version currently in use is used.""" - if platform is None: - platform = PlatformName() - return path_utils.PathFromBase('webkit', 'data', 'layout_tests', - 'platform', platform, 'LayoutTests') - - -def BaselineSearchPath(platform=None): - """Returns the list of directories to search for baselines/results, in - order of preference. Paths are relative to the top of the source tree.""" - dirs = [] - if platform is None: - platform = PlatformName() - - if platform == 'chromium-win-xp': - dirs.append(BaselinePath(platform)) - if platform in ('chromium-win-xp', 'chromium-win-vista'): - dirs.append(BaselinePath('chromium-win-vista')) - dirs.append(BaselinePath('chromium-win')) - dirs.append(path_utils.WebKitBaselinePath('win')) - dirs.append(path_utils.WebKitBaselinePath('mac')) - return dirs - - -def WDiffPath(): - """Path to the WDiff executable, whose binary is checked in on Win""" - return path_utils.PathFromBase('third_party', 'cygwin', 'bin', 'wdiff.exe') - - -def ImageDiffPath(target): - """Return the platform-specific binary path for the image compare util. - We use this if we can't find the binary in the default location - in path_utils. - - Args: - target: Build target mode (debug or release) - """ - return _FindBinary(target, 'image_diff.exe') - - -def LayoutTestHelperPath(target): - """Return the platform-specific binary path for the layout test helper. - We use this if we can't find the binary in the default location - in path_utils. - - Args: - target: Build target mode (debug or release) - """ - return _FindBinary(target, 'layout_test_helper.exe') - - -def TestShellPath(target): - """Return the platform-specific binary path for our TestShell. - We use this if we can't find the binary in the default location - in path_utils. - - Args: - target: Build target mode (debug or release) - """ - return _FindBinary(target, 'test_shell.exe') - - -def ApacheExecutablePath(): - """Returns the executable path to start Apache""" - path = path_utils.PathFromBase('third_party', 'cygwin', "usr", "sbin") - # Don't return httpd.exe since we want to use this from cygwin. - return os.path.join(path, "httpd") - - -def ApacheConfigFilePath(): - """Returns the path to Apache config file""" - return path_utils.PathFromBase("third_party", "WebKit", "LayoutTests", - "http", "conf", "cygwin-httpd.conf") - - -def LigHTTPdExecutablePath(): - """Returns the executable path to start LigHTTPd""" - return path_utils.PathFromBase('third_party', 'lighttpd', 'win', - 'LightTPD.exe') - - -def LigHTTPdModulePath(): - """Returns the library module path for LigHTTPd""" - return path_utils.PathFromBase('third_party', 'lighttpd', 'win', 'lib') - - -def LigHTTPdPHPPath(): - """Returns the PHP executable path for LigHTTPd""" - return path_utils.PathFromBase('third_party', 'lighttpd', 'win', 'php5', - 'php-cgi.exe') - - -def ShutDownHTTPServer(server_pid): - """Shut down the lighttpd web server. Blocks until it's fully shut down. - - Args: - server_pid: The process ID of the running server. - Unused in this implementation of the method. - """ - subprocess.Popen(('taskkill.exe', '/f', '/im', 'LightTPD.exe'), - stdout=subprocess.PIPE, - stderr=subprocess.PIPE).wait() - subprocess.Popen(('taskkill.exe', '/f', '/im', 'httpd.exe'), - stdout=subprocess.PIPE, - stderr=subprocess.PIPE).wait() - - -def KillProcess(pid): - """Forcefully kill the process. - - Args: - pid: The id of the process to be killed. - """ - subprocess.call(('taskkill.exe', '/f', '/pid', str(pid)), - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - - -def KillAllTestShells(self): - """Kills all instances of the test_shell binary currently running.""" - subprocess.Popen(('taskkill.exe', '/f', '/im', 'test_shell.exe'), - stdout=subprocess.PIPE, - stderr=subprocess.PIPE).wait() - -# -# Private helper functions. -# - - -def _FindBinary(target, binary): - """On Windows, we look for binaries that we compile in potentially - two places: src/webkit/$target (preferably, which we get if we - built using webkit_glue.gyp), or src/chrome/$target (if compiled some - other way).""" - try: - return path_utils.PathFromBase('webkit', target, binary) - except path_utils.PathNotFound: - try: - return path_utils.PathFromBase('chrome', target, binary) - except path_utils.PathNotFound: - return path_utils.PathFromBase('build', target, binary) diff --git a/webkit/tools/layout_tests/layout_package/test_expectations.py b/webkit/tools/layout_tests/layout_package/test_expectations.py deleted file mode 100644 index a273f93..0000000 --- a/webkit/tools/layout_tests/layout_package/test_expectations.py +++ /dev/null @@ -1,783 +0,0 @@ -# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -"""A helper class for reading in and dealing with tests expectations -for layout tests. -""" - -import logging -import os -import re -import sys -import time -import path_utils - -sys.path.append(path_utils.PathFromBase('third_party')) -import simplejson - -# Test expectation and modifier constants. -(PASS, FAIL, TEXT, IMAGE, IMAGE_PLUS_TEXT, TIMEOUT, CRASH, SKIP, WONTFIX, - DEFER, SLOW, REBASELINE, MISSING, FLAKY, NOW, NONE) = range(16) - -# Test expectation file update action constants -(NO_CHANGE, REMOVE_TEST, REMOVE_PLATFORM, ADD_PLATFORMS_EXCEPT_THIS) = range(4) - - -class TestExpectations: - TEST_LIST = "test_expectations.txt" - - def __init__(self, tests, directory, platform, is_debug_mode, is_lint_mode, - tests_are_present=True): - """Reads the test expectations files from the given directory.""" - path = os.path.join(directory, self.TEST_LIST) - self._expected_failures = TestExpectationsFile(path, tests, platform, - is_debug_mode, is_lint_mode, tests_are_present=tests_are_present) - - # TODO(ojan): Allow for removing skipped tests when getting the list of - # tests to run, but not when getting metrics. - # TODO(ojan): Replace the Get* calls here with the more sane API exposed - # by TestExpectationsFile below. Maybe merge the two classes entirely? - - def GetExpectationsJsonForAllPlatforms(self): - return self._expected_failures.GetExpectationsJsonForAllPlatforms() - - def GetRebaseliningFailures(self): - return (self._expected_failures.GetTestSet(REBASELINE, FAIL) | - self._expected_failures.GetTestSet(REBASELINE, IMAGE) | - self._expected_failures.GetTestSet(REBASELINE, TEXT) | - self._expected_failures.GetTestSet(REBASELINE, - IMAGE_PLUS_TEXT)) - - def GetOptions(self, test): - return self._expected_failures.GetOptions(test) - - def GetExpectations(self, test): - return self._expected_failures.GetExpectations(test) - - def GetExpectationsString(self, test): - """Returns the expectatons for the given test as an uppercase string. - If there are no expectations for the test, then "PASS" is returned.""" - expectations = self.GetExpectations(test) - retval = [] - - for expectation in expectations: - for item in TestExpectationsFile.EXPECTATIONS.items(): - if item[1] == expectation: - retval.append(item[0]) - break - - return " ".join(retval).upper() - - def GetTimelineForTest(self, test): - return self._expected_failures.GetTimelineForTest(test) - - def GetTestsWithResultType(self, result_type): - return self._expected_failures.GetTestsWithResultType(result_type) - - def GetTestsWithTimeline(self, timeline): - return self._expected_failures.GetTestsWithTimeline(timeline) - - def MatchesAnExpectedResult(self, test, result): - """Returns whether we got one of the expected results for this test.""" - return (result in self._expected_failures.GetExpectations(test) or - (result in (IMAGE, TEXT, IMAGE_PLUS_TEXT) and - FAIL in self._expected_failures.GetExpectations(test)) or - result == MISSING and self.IsRebaselining(test) or - result == SKIP and self._expected_failures.HasModifier(test, - SKIP)) - - def IsRebaselining(self, test): - return self._expected_failures.HasModifier(test, REBASELINE) - - def HasModifier(self, test, modifier): - return self._expected_failures.HasModifier(test, modifier) - - def RemovePlatformFromFile(self, tests, platform, backup=False): - return self._expected_failures.RemovePlatformFromFile(tests, - platform, - backup) - - -def StripComments(line): - """Strips comments from a line and return None if the line is empty - or else the contents of line with leading and trailing spaces removed - and all other whitespace collapsed""" - - commentIndex = line.find('//') - if commentIndex is -1: - commentIndex = len(line) - - line = re.sub(r'\s+', ' ', line[:commentIndex].strip()) - if line == '': - return None - else: - return line - - -class ModifiersAndExpectations: - """A holder for modifiers and expectations on a test that serializes to - JSON.""" - - def __init__(self, modifiers, expectations): - self.modifiers = modifiers - self.expectations = expectations - - -class ExpectationsJsonEncoder(simplejson.JSONEncoder): - """JSON encoder that can handle ModifiersAndExpectations objects. - """ - - def default(self, obj): - if isinstance(obj, ModifiersAndExpectations): - return {"modifiers": obj.modifiers, - "expectations": obj.expectations} - else: - return JSONEncoder.default(self, obj) - - -class TestExpectationsFile: - """Test expectation files consist of lines with specifications of what - to expect from layout test cases. The test cases can be directories - in which case the expectations apply to all test cases in that - directory and any subdirectory. The format of the file is along the - lines of: - - LayoutTests/fast/js/fixme.js = FAIL - LayoutTests/fast/js/flaky.js = FAIL PASS - LayoutTests/fast/js/crash.js = CRASH TIMEOUT FAIL PASS - ... - - To add other options: - SKIP : LayoutTests/fast/js/no-good.js = TIMEOUT PASS - DEBUG : LayoutTests/fast/js/no-good.js = TIMEOUT PASS - DEBUG SKIP : LayoutTests/fast/js/no-good.js = TIMEOUT PASS - LINUX DEBUG SKIP : LayoutTests/fast/js/no-good.js = TIMEOUT PASS - DEFER LINUX WIN : LayoutTests/fast/js/no-good.js = TIMEOUT PASS - - SKIP: Doesn't run the test. - SLOW: The test takes a long time to run, but does not timeout indefinitely. - WONTFIX: For tests that we never intend to pass on a given platform. - DEFER: Test does not count in our statistics for the current release. - DEBUG: Expectations apply only to the debug build. - RELEASE: Expectations apply only to release build. - LINUX/WIN/WIN-XP/WIN-VISTA/WIN-7/MAC: Expectations apply only to these - platforms. - - Notes: - -A test cannot be both SLOW and TIMEOUT - -A test cannot be both DEFER and WONTFIX - -A test should only be one of IMAGE, TEXT, IMAGE+TEXT, or FAIL. FAIL is - a migratory state that currently means either IMAGE, TEXT, or - IMAGE+TEXT. Once we have finished migrating the expectations, we will - change FAIL to have the meaning of IMAGE+TEXT and remove the IMAGE+TEXT - identifier. - -A test can be included twice, but not via the same path. - -If a test is included twice, then the more precise path wins. - -CRASH tests cannot be DEFER or WONTFIX - """ - - EXPECTATIONS = {'pass': PASS, - 'fail': FAIL, - 'text': TEXT, - 'image': IMAGE, - 'image+text': IMAGE_PLUS_TEXT, - 'timeout': TIMEOUT, - 'crash': CRASH, - 'missing': MISSING} - - EXPECTATION_DESCRIPTIONS = {SKIP: ('skipped', 'skipped'), - PASS: ('pass', 'passes'), - FAIL: ('failure', 'failures'), - TEXT: ('text diff mismatch', - 'text diff mismatch'), - IMAGE: ('image mismatch', 'image mismatch'), - IMAGE_PLUS_TEXT: ('image and text mismatch', - 'image and text mismatch'), - CRASH: ('test shell crash', - 'test shell crashes'), - TIMEOUT: ('test timed out', 'tests timed out'), - MISSING: ('no expected result found', - 'no expected results found')} - - EXPECTATION_ORDER = (PASS, CRASH, TIMEOUT, MISSING, IMAGE_PLUS_TEXT, - TEXT, IMAGE, FAIL, SKIP) - - BASE_PLATFORMS = ('linux', 'mac', 'win') - PLATFORMS = BASE_PLATFORMS + ('win-xp', 'win-vista', 'win-7') - - BUILD_TYPES = ('debug', 'release') - - MODIFIERS = {'skip': SKIP, - 'wontfix': WONTFIX, - 'defer': DEFER, - 'slow': SLOW, - 'rebaseline': REBASELINE, - 'none': NONE} - - TIMELINES = {'wontfix': WONTFIX, - 'now': NOW, - 'defer': DEFER} - - RESULT_TYPES = {'skip': SKIP, - 'pass': PASS, - 'fail': FAIL, - 'flaky': FLAKY} - - def __init__(self, path, full_test_list, platform, is_debug_mode, - is_lint_mode, expectations_as_str=None, suppress_errors=False, - tests_are_present=True): - """ - path: The path to the expectation file. An error is thrown if a test is - listed more than once. - full_test_list: The list of all tests to be run pending processing of - the expections for those tests. - platform: Which platform from self.PLATFORMS to filter tests for. - is_debug_mode: Whether we testing a test_shell built debug mode. - is_lint_mode: Whether this is just linting test_expecatations.txt. - expectations_as_str: Contents of the expectations file. Used instead of - the path. This makes unittesting sane. - suppress_errors: Whether to suppress lint errors. - tests_are_present: Whether the test files are present in the local - filesystem. The LTTF Dashboard uses False here to avoid having to - keep a local copy of the tree. - """ - - self._path = path - self._expectations_as_str = expectations_as_str - self._is_lint_mode = is_lint_mode - self._tests_are_present = tests_are_present - self._full_test_list = full_test_list - self._suppress_errors = suppress_errors - self._errors = [] - self._non_fatal_errors = [] - self._platform = self.ToTestPlatformName(platform) - if self._platform is None: - raise Exception("Unknown platform '%s'" % (platform)) - self._is_debug_mode = is_debug_mode - - # Maps relative test paths as listed in the expectations file to a - # list of maps containing modifiers and expectations for each time - # the test is listed in the expectations file. - self._all_expectations = {} - - # Maps a test to its list of expectations. - self._test_to_expectations = {} - - # Maps a test to its list of options (string values) - self._test_to_options = {} - - # Maps a test to its list of modifiers: the constants associated with - # the options minus any bug or platform strings - self._test_to_modifiers = {} - - # Maps a test to the base path that it was listed with in the list. - self._test_list_paths = {} - - self._modifier_to_tests = self._DictOfSets(self.MODIFIERS) - self._expectation_to_tests = self._DictOfSets(self.EXPECTATIONS) - self._timeline_to_tests = self._DictOfSets(self.TIMELINES) - self._result_type_to_tests = self._DictOfSets(self.RESULT_TYPES) - - self._Read(self._GetIterableExpectations()) - - def _DictOfSets(self, strings_to_constants): - """Takes a dict of strings->constants and returns a dict mapping - each constant to an empty set.""" - d = {} - for c in strings_to_constants.values(): - d[c] = set() - return d - - def _GetIterableExpectations(self): - """Returns an object that can be iterated over. Allows for not caring - about whether we're iterating over a file or a new-line separated - string.""" - if self._expectations_as_str: - iterable = [x + "\n" for x in - self._expectations_as_str.split("\n")] - # Strip final entry if it's empty to avoid added in an extra - # newline. - if iterable[len(iterable) - 1] == "\n": - return iterable[:len(iterable) - 1] - return iterable - else: - return open(self._path) - - def ToTestPlatformName(self, name): - """Returns the test expectation platform that will be used for a - given platform name, or None if there is no match.""" - chromium_prefix = 'chromium-' - name = name.lower() - if name.startswith(chromium_prefix): - name = name[len(chromium_prefix):] - if name in self.PLATFORMS: - return name - return None - - def GetTestSet(self, modifier, expectation=None, include_skips=True): - if expectation is None: - tests = self._modifier_to_tests[modifier] - else: - tests = (self._expectation_to_tests[expectation] & - self._modifier_to_tests[modifier]) - - if not include_skips: - tests = tests - self.GetTestSet(SKIP, expectation) - - return tests - - def GetTestsWithResultType(self, result_type): - return self._result_type_to_tests[result_type] - - def GetTestsWithTimeline(self, timeline): - return self._timeline_to_tests[timeline] - - def GetOptions(self, test): - """This returns the entire set of options for the given test - (the modifiers plus the BUGXXXX identifier). This is used by the - LTTF dashboard.""" - return self._test_to_options[test] - - def HasModifier(self, test, modifier): - return test in self._modifier_to_tests[modifier] - - def GetExpectations(self, test): - return self._test_to_expectations[test] - - def GetExpectationsJsonForAllPlatforms(self): - # Specify separators in order to get compact encoding. - return ExpectationsJsonEncoder(separators=(',', ':')).encode( - self._all_expectations) - - def Contains(self, test): - return test in self._test_to_expectations - - def RemovePlatformFromFile(self, tests, platform, backup=False): - """Remove the platform option from test expectations file. - - If a test is in the test list and has an option that matches the given - platform, remove the matching platform and save the updated test back - to the file. If no other platforms remaining after removal, delete the - test from the file. - - Args: - tests: list of tests that need to update.. - platform: which platform option to remove. - backup: if true, the original test expectations file is saved as - [self.TEST_LIST].orig.YYYYMMDDHHMMSS - - Returns: - no - """ - - new_file = self._path + '.new' - logging.debug('Original file: "%s"', self._path) - logging.debug('New file: "%s"', new_file) - f_orig = self._GetIterableExpectations() - f_new = open(new_file, 'w') - - tests_removed = 0 - tests_updated = 0 - lineno = 0 - for line in f_orig: - lineno += 1 - action = self._GetPlatformUpdateAction(line, lineno, tests, - platform) - if action == NO_CHANGE: - # Save the original line back to the file - logging.debug('No change to test: %s', line) - f_new.write(line) - elif action == REMOVE_TEST: - tests_removed += 1 - logging.info('Test removed: %s', line) - elif action == REMOVE_PLATFORM: - parts = line.split(':') - new_options = parts[0].replace(platform.upper() + ' ', '', 1) - new_line = ('%s:%s' % (new_options, parts[1])) - f_new.write(new_line) - tests_updated += 1 - logging.info('Test updated: ') - logging.info(' old: %s', line) - logging.info(' new: %s', new_line) - elif action == ADD_PLATFORMS_EXCEPT_THIS: - parts = line.split(':') - new_options = parts[0] - for p in self.PLATFORMS: - if not p == platform: - new_options += p.upper() + ' ' - new_line = ('%s:%s' % (new_options, parts[1])) - f_new.write(new_line) - tests_updated += 1 - logging.info('Test updated: ') - logging.info(' old: %s', line) - logging.info(' new: %s', new_line) - else: - logging.error('Unknown update action: %d; line: %s', - action, line) - - logging.info('Total tests removed: %d', tests_removed) - logging.info('Total tests updated: %d', tests_updated) - - f_orig.close() - f_new.close() - - if backup: - date_suffix = time.strftime('%Y%m%d%H%M%S', - time.localtime(time.time())) - backup_file = ('%s.orig.%s' % (self._path, date_suffix)) - if os.path.exists(backup_file): - os.remove(backup_file) - logging.info('Saving original file to "%s"', backup_file) - os.rename(self._path, backup_file) - else: - os.remove(self._path) - - logging.debug('Saving new file to "%s"', self._path) - os.rename(new_file, self._path) - return True - - def ParseExpectationsLine(self, line, lineno): - """Parses a line from test_expectations.txt and returns a tuple - with the test path, options as a list, expectations as a list.""" - line = StripComments(line) - if not line: - return (None, None, None) - - options = [] - if line.find(":") is -1: - test_and_expectation = line.split("=") - else: - parts = line.split(":") - options = self._GetOptionsList(parts[0]) - test_and_expectation = parts[1].split('=') - - test = test_and_expectation[0].strip() - if (len(test_and_expectation) is not 2): - self._AddError(lineno, "Missing expectations.", - test_and_expectation) - expectations = None - else: - expectations = self._GetOptionsList(test_and_expectation[1]) - - return (test, options, expectations) - - def _GetPlatformUpdateAction(self, line, lineno, tests, platform): - """Check the platform option and return the action needs to be taken. - - Args: - line: current line in test expectations file. - lineno: current line number of line - tests: list of tests that need to update.. - platform: which platform option to remove. - - Returns: - NO_CHANGE: no change to the line (comments, test not in the list etc) - REMOVE_TEST: remove the test from file. - REMOVE_PLATFORM: remove this platform option from the test. - ADD_PLATFORMS_EXCEPT_THIS: add all the platforms except this one. - """ - test, options, expectations = self.ParseExpectationsLine(line, lineno) - if not test or test not in tests: - return NO_CHANGE - - has_any_platform = False - for option in options: - if option in self.PLATFORMS: - has_any_platform = True - if not option == platform: - return REMOVE_PLATFORM - - # If there is no platform specified, then it means apply to all - # platforms. Return the action to add all the platforms except this - # one. - if not has_any_platform: - return ADD_PLATFORMS_EXCEPT_THIS - - return REMOVE_TEST - - def _HasValidModifiersForCurrentPlatform(self, options, lineno, - test_and_expectations, modifiers): - """Returns true if the current platform is in the options list or if - no platforms are listed and if there are no fatal errors in the - options list. - - Args: - options: List of lowercase options. - lineno: The line in the file where the test is listed. - test_and_expectations: The path and expectations for the test. - modifiers: The set to populate with modifiers. - """ - has_any_platform = False - has_bug_id = False - for option in options: - if option in self.MODIFIERS: - modifiers.add(option) - elif option in self.PLATFORMS: - has_any_platform = True - elif option.startswith('bug'): - has_bug_id = True - elif option not in self.BUILD_TYPES: - self._AddError(lineno, 'Invalid modifier for test: %s' % - option, test_and_expectations) - - if has_any_platform and not self._MatchPlatform(options): - return False - - if not has_bug_id and 'wontfix' not in options: - # TODO(ojan): Turn this into an AddError call once all the - # tests have BUG identifiers. - self._LogNonFatalError(lineno, 'Test lacks BUG modifier.', - test_and_expectations) - - if 'release' in options or 'debug' in options: - if self._is_debug_mode and 'debug' not in options: - return False - if not self._is_debug_mode and 'release' not in options: - return False - - if 'wontfix' in options and 'defer' in options: - self._AddError(lineno, 'Test cannot be both DEFER and WONTFIX.', - test_and_expectations) - - if self._is_lint_mode and 'rebaseline' in options: - self._AddError(lineno, 'REBASELINE should only be used for running' - 'rebaseline.py. Cannot be checked in.', test_and_expectations) - - return True - - def _MatchPlatform(self, options): - """Match the list of options against our specified platform. If any - of the options prefix-match self._platform, return True. This handles - the case where a test is marked WIN and the platform is WIN-VISTA. - - Args: - options: list of options - """ - for opt in options: - if self._platform.startswith(opt): - return True - return False - - def _AddToAllExpectations(self, test, options, expectations): - # Make all paths unix-style so the dashboard doesn't need to. - test = test.replace('\\', '/') - if not test in self._all_expectations: - self._all_expectations[test] = [] - self._all_expectations[test].append( - ModifiersAndExpectations(options, expectations)) - - def _Read(self, expectations): - """For each test in an expectations iterable, generate the - expectations for it.""" - lineno = 0 - for line in expectations: - lineno += 1 - - test_list_path, options, expectations = \ - self.ParseExpectationsLine(line, lineno) - if not expectations: - continue - - self._AddToAllExpectations(test_list_path, - " ".join(options).upper(), - " ".join(expectations).upper()) - - modifiers = set() - if options and not self._HasValidModifiersForCurrentPlatform( - options, lineno, test_list_path, modifiers): - continue - - expectations = self._ParseExpectations(expectations, lineno, - test_list_path) - - if 'slow' in options and TIMEOUT in expectations: - self._AddError(lineno, - 'A test can not be both slow and timeout. If it times out ' - 'indefinitely, then it should be just timeout.', - test_list_path) - - full_path = os.path.join(path_utils.LayoutTestsDir(), - test_list_path) - full_path = os.path.normpath(full_path) - # WebKit's way of skipping tests is to add a -disabled suffix. - # So we should consider the path existing if the path or the - # -disabled version exists. - if (self._tests_are_present and not os.path.exists(full_path) - and not os.path.exists(full_path + '-disabled')): - # Log a non fatal error here since you hit this case any - # time you update test_expectations.txt without syncing - # the LayoutTests directory - self._LogNonFatalError(lineno, 'Path does not exist.', - test_list_path) - continue - - if not self._full_test_list: - tests = [test_list_path] - else: - tests = self._ExpandTests(test_list_path) - - self._AddTests(tests, expectations, test_list_path, lineno, - modifiers, options) - - if not self._suppress_errors and ( - len(self._errors) or len(self._non_fatal_errors)): - if self._is_debug_mode: - build_type = 'DEBUG' - else: - build_type = 'RELEASE' - print "\nFAILURES FOR PLATFORM: %s, BUILD_TYPE: %s" \ - % (self._platform.upper(), build_type) - - for error in self._non_fatal_errors: - logging.error(error) - if len(self._errors): - raise SyntaxError('\n'.join(map(str, self._errors))) - - # Now add in the tests that weren't present in the expectations file - expectations = set([PASS]) - options = [] - modifiers = [] - if self._full_test_list: - for test in self._full_test_list: - if not test in self._test_list_paths: - self._AddTest(test, modifiers, expectations, options) - - def _GetOptionsList(self, listString): - return [part.strip().lower() for part in listString.strip().split(' ')] - - def _ParseExpectations(self, expectations, lineno, test_list_path): - result = set() - for part in expectations: - if not part in self.EXPECTATIONS: - self._AddError(lineno, 'Unsupported expectation: %s' % part, - test_list_path) - continue - expectation = self.EXPECTATIONS[part] - result.add(expectation) - return result - - def _ExpandTests(self, test_list_path): - """Convert the test specification to an absolute, normalized - path and make sure directories end with the OS path separator.""" - path = os.path.join(path_utils.LayoutTestsDir(), test_list_path) - path = os.path.normpath(path) - path = self._FixDir(path) - - result = [] - for test in self._full_test_list: - if test.startswith(path): - result.append(test) - return result - - def _FixDir(self, path): - """Check to see if the path points to a directory, and if so, append - the directory separator if necessary.""" - if self._tests_are_present: - if os.path.isdir(path): - path = os.path.join(path, '') - else: - # If we can't check the filesystem to see if this is a directory, - # we assume that files w/o an extension are directories. - # TODO(dpranke): What happens w/ LayoutTests/css2.1 ? - if os.path.splitext(path)[1] == '': - path = os.path.join(path, '') - return path - - def _AddTests(self, tests, expectations, test_list_path, lineno, modifiers, - options): - for test in tests: - if self._AlreadySeenTest(test, test_list_path, lineno): - continue - - self._ClearExpectationsForTest(test, test_list_path) - self._AddTest(test, modifiers, expectations, options) - - def _AddTest(self, test, modifiers, expectations, options): - """Sets the expected state for a given test. - - This routine assumes the test has not been added before. If it has, - use _ClearExpectationsForTest() to reset the state prior to - calling this. - - Args: - test: test to add - modifiers: sequence of modifier keywords ('wontfix', 'slow', etc.) - expectations: sequence of expectations (PASS, IMAGE, etc.) - options: sequence of keywords and bug identifiers.""" - self._test_to_expectations[test] = expectations - for expectation in expectations: - self._expectation_to_tests[expectation].add(test) - - self._test_to_options[test] = options - self._test_to_modifiers[test] = set() - for modifier in modifiers: - mod_value = self.MODIFIERS[modifier] - self._modifier_to_tests[mod_value].add(test) - self._test_to_modifiers[test].add(mod_value) - - if 'wontfix' in modifiers: - self._timeline_to_tests[WONTFIX].add(test) - elif 'defer' in modifiers: - self._timeline_to_tests[DEFER].add(test) - else: - self._timeline_to_tests[NOW].add(test) - - if 'skip' in modifiers: - self._result_type_to_tests[SKIP].add(test) - elif expectations == set([PASS]): - self._result_type_to_tests[PASS].add(test) - elif len(expectations) > 1: - self._result_type_to_tests[FLAKY].add(test) - else: - self._result_type_to_tests[FAIL].add(test) - - def _ClearExpectationsForTest(self, test, test_list_path): - """Remove prexisting expectations for this test. - This happens if we are seeing a more precise path - than a previous listing. - """ - if test in self._test_list_paths: - self._test_to_expectations.pop(test, '') - self._RemoveFromSets(test, self._expectation_to_tests) - self._RemoveFromSets(test, self._modifier_to_tests) - self._RemoveFromSets(test, self._timeline_to_tests) - self._RemoveFromSets(test, self._result_type_to_tests) - - self._test_list_paths[test] = os.path.normpath(test_list_path) - - def _RemoveFromSets(self, test, dict): - """Removes the given test from the sets in the dictionary. - - Args: - test: test to look for - dict: dict of sets of files""" - for set_of_tests in dict.itervalues(): - if test in set_of_tests: - set_of_tests.remove(test) - - def _AlreadySeenTest(self, test, test_list_path, lineno): - """Returns true if we've already seen a more precise path for this test - than the test_list_path. - """ - if not test in self._test_list_paths: - return False - - prev_base_path = self._test_list_paths[test] - if (prev_base_path == os.path.normpath(test_list_path)): - self._AddError(lineno, 'Duplicate expectations.', test) - return True - - # Check if we've already seen a more precise path. - return prev_base_path.startswith(os.path.normpath(test_list_path)) - - def _AddError(self, lineno, msg, path): - """Reports an error that will prevent running the tests. Does not - immediately raise an exception because we'd like to aggregate all the - errors so they can all be printed out.""" - self._errors.append('\nLine:%s %s %s' % (lineno, msg, path)) - - def _LogNonFatalError(self, lineno, msg, path): - """Reports an error that will not prevent running the tests. These are - still errors, but not bad enough to warrant breaking test running.""" - self._non_fatal_errors.append('Line:%s %s %s' % (lineno, msg, path)) diff --git a/webkit/tools/layout_tests/layout_package/test_failures.py b/webkit/tools/layout_tests/layout_package/test_failures.py deleted file mode 100644 index 18d26e1..0000000 --- a/webkit/tools/layout_tests/layout_package/test_failures.py +++ /dev/null @@ -1,241 +0,0 @@ -# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -"""Classes for failures that occur during tests.""" - -import os -import test_expectations - - -def DetermineResultType(failure_list): - """Takes a set of test_failures and returns which result type best fits - the list of failures. "Best fits" means we use the worst type of failure. - - Returns: - one of the test_expectations result types - PASS, TEXT, CRASH, etc.""" - - if not failure_list or len(failure_list) == 0: - return test_expectations.PASS - - failure_types = [type(f) for f in failure_list] - if FailureCrash in failure_types: - return test_expectations.CRASH - elif FailureTimeout in failure_types: - return test_expectations.TIMEOUT - elif (FailureMissingResult in failure_types or - FailureMissingImage in failure_types or - FailureMissingImageHash in failure_types): - return test_expectations.MISSING - else: - is_text_failure = FailureTextMismatch in failure_types - is_image_failure = (FailureImageHashIncorrect in failure_types or - FailureImageHashMismatch in failure_types) - if is_text_failure and is_image_failure: - return test_expectations.IMAGE_PLUS_TEXT - elif is_text_failure: - return test_expectations.TEXT - elif is_image_failure: - return test_expectations.IMAGE - else: - raise ValueError("unclassifiable set of failures: " - + str(failure_types)) - - -class TestFailure(object): - """Abstract base class that defines the failure interface.""" - - @staticmethod - def Message(): - """Returns a string describing the failure in more detail.""" - raise NotImplemented - - def ResultHtmlOutput(self, filename): - """Returns an HTML string to be included on the results.html page.""" - raise NotImplemented - - def ShouldKillTestShell(self): - """Returns True if we should kill the test shell before the next - test.""" - return False - - def RelativeOutputFilename(self, filename, modifier): - """Returns a relative filename inside the output dir that contains - modifier. - - For example, if filename is fast\dom\foo.html and modifier is - "-expected.txt", the return value is fast\dom\foo-expected.txt - - Args: - filename: relative filename to test file - modifier: a string to replace the extension of filename with - - Return: - The relative windows path to the output filename - """ - return os.path.splitext(filename)[0] + modifier - - -class FailureWithType(TestFailure): - """Base class that produces standard HTML output based on the test type. - - Subclasses may commonly choose to override the ResultHtmlOutput, but still - use the standard OutputLinks. - """ - - def __init__(self, test_type): - TestFailure.__init__(self) - # TODO(ojan): This class no longer needs to know the test_type. - self._test_type = test_type - - # Filename suffixes used by ResultHtmlOutput. - OUT_FILENAMES = [] - - def OutputLinks(self, filename, out_names): - """Returns a string holding all applicable output file links. - - Args: - filename: the test filename, used to construct the result file names - out_names: list of filename suffixes for the files. If three or more - suffixes are in the list, they should be [actual, expected, diff, - wdiff]. Two suffixes should be [actual, expected], and a - single item is the [actual] filename suffix. - If out_names is empty, returns the empty string. - """ - links = [''] - uris = [self.RelativeOutputFilename(filename, fn) for fn in out_names] - if len(uris) > 1: - links.append("expected" % uris[1]) - if len(uris) > 0: - links.append("actual" % uris[0]) - if len(uris) > 2: - links.append("diff" % uris[2]) - if len(uris) > 3: - links.append("wdiff" % uris[3]) - return ' '.join(links) - - def ResultHtmlOutput(self, filename): - return self.Message() + self.OutputLinks(filename, self.OUT_FILENAMES) - - -class FailureTimeout(TestFailure): - """Test timed out. We also want to restart the test shell if this - happens.""" - - @staticmethod - def Message(): - return "Test timed out" - - def ResultHtmlOutput(self, filename): - return "%s" % self.Message() - - def ShouldKillTestShell(self): - return True - - -class FailureCrash(TestFailure): - """Test shell crashed.""" - - @staticmethod - def Message(): - return "Test shell crashed" - - def ResultHtmlOutput(self, filename): - # TODO(tc): create a link to the minidump file - stack = self.RelativeOutputFilename(filename, "-stack.txt") - return "%s stack" % (self.Message(), - stack) - - def ShouldKillTestShell(self): - return True - - -class FailureMissingResult(FailureWithType): - """Expected result was missing.""" - OUT_FILENAMES = ["-actual.txt"] - - @staticmethod - def Message(): - return "No expected results found" - - def ResultHtmlOutput(self, filename): - return ("%s" % self.Message() + - self.OutputLinks(filename, self.OUT_FILENAMES)) - - -class FailureTextMismatch(FailureWithType): - """Text diff output failed.""" - # Filename suffixes used by ResultHtmlOutput. - OUT_FILENAMES = ["-actual.txt", "-expected.txt", "-diff.txt"] - OUT_FILENAMES_WDIFF = ["-actual.txt", "-expected.txt", "-diff.txt", - "-wdiff.html"] - - def __init__(self, test_type, has_wdiff): - FailureWithType.__init__(self, test_type) - if has_wdiff: - self.OUT_FILENAMES = self.OUT_FILENAMES_WDIFF - - @staticmethod - def Message(): - return "Text diff mismatch" - - -class FailureMissingImageHash(FailureWithType): - """Actual result hash was missing.""" - # Chrome doesn't know to display a .checksum file as text, so don't bother - # putting in a link to the actual result. - OUT_FILENAMES = [] - - @staticmethod - def Message(): - return "No expected image hash found" - - def ResultHtmlOutput(self, filename): - return "%s" % self.Message() - - -class FailureMissingImage(FailureWithType): - """Actual result image was missing.""" - OUT_FILENAMES = ["-actual.png"] - - @staticmethod - def Message(): - return "No expected image found" - - def ResultHtmlOutput(self, filename): - return ("%s" % self.Message() + - self.OutputLinks(filename, self.OUT_FILENAMES)) - - -class FailureImageHashMismatch(FailureWithType): - """Image hashes didn't match.""" - OUT_FILENAMES = ["-actual.png", "-expected.png", "-diff.png"] - - @staticmethod - def Message(): - # We call this a simple image mismatch to avoid confusion, since - # we link to the PNGs rather than the checksums. - return "Image mismatch" - - -class FailureFuzzyFailure(FailureWithType): - """Image hashes didn't match.""" - OUT_FILENAMES = ["-actual.png", "-expected.png"] - - @staticmethod - def Message(): - return "Fuzzy image match also failed" - - -class FailureImageHashIncorrect(FailureWithType): - """Actual result hash is incorrect.""" - # Chrome doesn't know to display a .checksum file as text, so don't bother - # putting in a link to the actual result. - OUT_FILENAMES = [] - - @staticmethod - def Message(): - return "Images match, expected image hash incorrect. " - - def ResultHtmlOutput(self, filename): - return "%s" % self.Message() diff --git a/webkit/tools/layout_tests/layout_package/test_files.py b/webkit/tools/layout_tests/layout_package/test_files.py deleted file mode 100644 index bc8eaad..0000000 --- a/webkit/tools/layout_tests/layout_package/test_files.py +++ /dev/null @@ -1,71 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2009 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -"""This module is used to find all of the layout test files used by Chromium -(across all platforms). It exposes one public function - GatherTestFiles() - -which takes an optional list of paths. If a list is passed in, the returned -list of test files is constrained to those found under the paths passed in, -i.e. calling GatherTestFiles(["LayoutTests/fast"]) will only return files -under that directory.""" - -import glob -import os -import path_utils - -# When collecting test cases, we include any file with these extensions. -_supported_file_extensions = set(['.html', '.shtml', '.xml', '.xhtml', '.pl', - '.php', '.svg']) -# When collecting test cases, skip these directories -_skipped_directories = set(['.svn', '_svn', 'resources', 'script-tests']) - - -def GatherTestFiles(paths): - """Generate a set of test files and return them. - - Args: - paths: a list of command line paths relative to the webkit/tests - directory. glob patterns are ok. - """ - paths_to_walk = set() - # if paths is empty, provide a pre-defined list. - if paths: - for path in paths: - # If there's an * in the name, assume it's a glob pattern. - path = os.path.join(path_utils.LayoutTestsDir(), path) - if path.find('*') > -1: - filenames = glob.glob(path) - paths_to_walk.update(filenames) - else: - paths_to_walk.add(path) - else: - paths_to_walk.add(path_utils.LayoutTestsDir()) - - # Now walk all the paths passed in on the command line and get filenames - test_files = set() - for path in paths_to_walk: - if os.path.isfile(path) and _HasSupportedExtension(path): - test_files.add(os.path.normpath(path)) - continue - - for root, dirs, files in os.walk(path): - # don't walk skipped directories and sub directories - if os.path.basename(root) in _skipped_directories: - del dirs[:] - continue - - for filename in files: - if _HasSupportedExtension(filename): - filename = os.path.join(root, filename) - filename = os.path.normpath(filename) - test_files.add(filename) - - return test_files - - -def _HasSupportedExtension(filename): - """Return true if filename is one of the file extensions we want to run a - test on.""" - extension = os.path.splitext(filename)[1] - return extension in _supported_file_extensions diff --git a/webkit/tools/layout_tests/layout_package/test_shell_thread.py b/webkit/tools/layout_tests/layout_package/test_shell_thread.py deleted file mode 100644 index 1ae2ed8..0000000 --- a/webkit/tools/layout_tests/layout_package/test_shell_thread.py +++ /dev/null @@ -1,488 +0,0 @@ -# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -"""A Thread object for running the test shell and processing URLs from a -shared queue. - -Each thread runs a separate instance of the test_shell binary and validates -the output. When there are no more URLs to process in the shared queue, the -thread exits. -""" - -import copy -import logging -import os -import Queue -import signal -import subprocess -import sys -import thread -import threading -import time - -import path_utils -import test_failures - - -def ProcessOutput(proc, test_info, test_types, test_args, target, output_dir): - """Receives the output from a test_shell process, subjects it to a number - of tests, and returns a list of failure types the test produced. - - Args: - proc: an active test_shell process - test_info: Object containing the test filename, uri and timeout - test_types: list of test types to subject the output to - test_args: arguments to be passed to each test - target: Debug or Release - output_dir: directory to put crash stack traces into - - Returns: a list of failure objects and times for the test being processed - """ - outlines = [] - extra_lines = [] - failures = [] - crash = False - - # Some test args, such as the image hash, may be added or changed on a - # test-by-test basis. - local_test_args = copy.copy(test_args) - - start_time = time.time() - - line = proc.stdout.readline() - - # Only start saving output lines once we've loaded the URL for the test. - url = None - test_string = test_info.uri.strip() - - while line.rstrip() != "#EOF": - # Make sure we haven't crashed. - if line == '' and proc.poll() is not None: - failures.append(test_failures.FailureCrash()) - - # This is hex code 0xc000001d, which is used for abrupt - # termination. This happens if we hit ctrl+c from the prompt and - # we happen to be waiting on the test_shell. - # sdoyon: Not sure for which OS and in what circumstances the - # above code is valid. What works for me under Linux to detect - # ctrl+c is for the subprocess returncode to be negative SIGINT. - # And that agrees with the subprocess documentation. - if (-1073741510 == proc.returncode or - - signal.SIGINT == proc.returncode): - raise KeyboardInterrupt - crash = True - break - - # Don't include #URL lines in our output - if line.startswith("#URL:"): - url = line.rstrip()[5:] - if url != test_string: - logging.fatal("Test got out of sync:\n|%s|\n|%s|" % - (url, test_string)) - raise AssertionError("test out of sync") - elif line.startswith("#MD5:"): - local_test_args.hash = line.rstrip()[5:] - elif line.startswith("#TEST_TIMED_OUT"): - # Test timed out, but we still need to read until #EOF. - failures.append(test_failures.FailureTimeout()) - elif url: - outlines.append(line) - else: - extra_lines.append(line) - - line = proc.stdout.readline() - - end_test_time = time.time() - - if len(extra_lines): - extra = "".join(extra_lines) - if crash: - logging.debug("Stacktrace for %s:\n%s" % (test_string, extra)) - # Strip off "file://" since RelativeTestFilename expects - # filesystem paths. - filename = os.path.join(output_dir, - path_utils.RelativeTestFilename(test_string[7:])) - filename = os.path.splitext(filename)[0] + "-stack.txt" - path_utils.MaybeMakeDirectory(os.path.split(filename)[0]) - open(filename, "wb").write(extra) - else: - logging.debug("Previous test output extra lines after dump:\n%s" % - extra) - - # Check the output and save the results. - time_for_diffs = {} - for test_type in test_types: - start_diff_time = time.time() - new_failures = test_type.CompareOutput(test_info.filename, - proc, - ''.join(outlines), - local_test_args, - target) - # Don't add any more failures if we already have a crash, so we don't - # double-report those tests. We do double-report for timeouts since - # we still want to see the text and image output. - if not crash: - failures.extend(new_failures) - time_for_diffs[test_type.__class__.__name__] = ( - time.time() - start_diff_time) - - total_time_for_all_diffs = time.time() - end_test_time - test_run_time = end_test_time - start_time - return TestStats(test_info.filename, failures, test_run_time, - total_time_for_all_diffs, time_for_diffs) - - -def StartTestShell(command, args): - """Returns the process for a new test_shell started in layout-tests mode. - """ - cmd = [] - # Hook for injecting valgrind or other runtime instrumentation, - # used by e.g. tools/valgrind/valgrind_tests.py. - wrapper = os.environ.get("BROWSER_WRAPPER", None) - if wrapper != None: - cmd += [wrapper] - cmd += command + ['--layout-tests'] + args - return subprocess.Popen(cmd, - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - - -class TestStats: - - def __init__(self, filename, failures, test_run_time, - total_time_for_all_diffs, time_for_diffs): - self.filename = filename - self.failures = failures - self.test_run_time = test_run_time - self.total_time_for_all_diffs = total_time_for_all_diffs - self.time_for_diffs = time_for_diffs - - -class SingleTestThread(threading.Thread): - """Thread wrapper for running a single test file.""" - - def __init__(self, test_shell_command, shell_args, test_info, test_types, - test_args, target, output_dir): - """ - Args: - test_info: Object containing the test filename, uri and timeout - output_dir: Directory to put crash stacks into. - See TestShellThread for documentation of the remaining arguments. - """ - - threading.Thread.__init__(self) - self._command = test_shell_command - self._shell_args = shell_args - self._test_info = test_info - self._test_types = test_types - self._test_args = test_args - self._target = target - self._output_dir = output_dir - - def run(self): - proc = StartTestShell(self._command, self._shell_args + - ["--time-out-ms=" + self._test_info.timeout, self._test_info.uri]) - self._test_stats = ProcessOutput(proc, self._test_info, - self._test_types, self._test_args, self._target, self._output_dir) - - def GetTestStats(self): - return self._test_stats - - -class TestShellThread(threading.Thread): - - def __init__(self, filename_list_queue, result_queue, test_shell_command, - test_types, test_args, shell_args, options): - """Initialize all the local state for this test shell thread. - - Args: - filename_list_queue: A thread safe Queue class that contains lists - of tuples of (filename, uri) pairs. - result_queue: A thread safe Queue class that will contain tuples of - (test, failure lists) for the test results. - test_shell_command: A list specifying the command+args for - test_shell - test_types: A list of TestType objects to run the test output - against. - test_args: A TestArguments object to pass to each TestType. - shell_args: Any extra arguments to be passed to test_shell.exe. - options: A property dictionary as produced by optparse. The - command-line options should match those expected by - run_webkit_tests; they are typically passed via the - run_webkit_tests.TestRunner class.""" - threading.Thread.__init__(self) - self._filename_list_queue = filename_list_queue - self._result_queue = result_queue - self._filename_list = [] - self._test_shell_command = test_shell_command - self._test_types = test_types - self._test_args = test_args - self._test_shell_proc = None - self._shell_args = shell_args - self._options = options - self._canceled = False - self._exception_info = None - self._directory_timing_stats = {} - self._test_stats = [] - self._num_tests = 0 - self._start_time = 0 - self._stop_time = 0 - - # Current directory of tests we're running. - self._current_dir = None - # Number of tests in self._current_dir. - self._num_tests_in_current_dir = None - # Time at which we started running tests from self._current_dir. - self._current_dir_start_time = None - - def GetDirectoryTimingStats(self): - """Returns a dictionary mapping test directory to a tuple of - (number of tests in that directory, time to run the tests)""" - return self._directory_timing_stats - - def GetIndividualTestStats(self): - """Returns a list of (test_filename, time_to_run_test, - total_time_for_all_diffs, time_for_diffs) tuples.""" - return self._test_stats - - def Cancel(self): - """Set a flag telling this thread to quit.""" - self._canceled = True - - def GetExceptionInfo(self): - """If run() terminated on an uncaught exception, return it here - ((type, value, traceback) tuple). - Returns None if run() terminated normally. Meant to be called after - joining this thread.""" - return self._exception_info - - def GetTotalTime(self): - return max(self._stop_time - self._start_time, 0.0) - - def GetNumTests(self): - return self._num_tests - - def run(self): - """Delegate main work to a helper method and watch for uncaught - exceptions.""" - self._start_time = time.time() - self._num_tests = 0 - try: - logging.debug('%s starting' % (self.getName())) - self._Run(test_runner=None, result_summary=None) - logging.debug('%s done (%d tests)' % (self.getName(), - self.GetNumTests())) - except: - # Save the exception for our caller to see. - self._exception_info = sys.exc_info() - self._stop_time = time.time() - # Re-raise it and die. - logging.error('%s dying: %s' % (self.getName(), - self._exception_info)) - raise - self._stop_time = time.time() - - def RunInMainThread(self, test_runner, result_summary): - """This hook allows us to run the tests from the main thread if - --num-test-shells==1, instead of having to always run two or more - threads. This allows us to debug the test harness without having to - do multi-threaded debugging.""" - self._Run(test_runner, result_summary) - - def _Run(self, test_runner, result_summary): - """Main work entry point of the thread. Basically we pull urls from the - filename queue and run the tests until we run out of urls. - - If test_runner is not None, then we call test_runner.UpdateSummary() - with the results of each test.""" - batch_size = 0 - batch_count = 0 - if self._options.batch_size: - try: - batch_size = int(self._options.batch_size) - except: - logging.info("Ignoring invalid batch size '%s'" % - self._options.batch_size) - - # Append tests we're running to the existing tests_run.txt file. - # This is created in run_webkit_tests.py:_PrepareListsAndPrintOutput. - tests_run_filename = os.path.join(self._options.results_directory, - "tests_run.txt") - tests_run_file = open(tests_run_filename, "a") - - while True: - if self._canceled: - logging.info('Testing canceled') - tests_run_file.close() - return - - if len(self._filename_list) is 0: - if self._current_dir is not None: - self._directory_timing_stats[self._current_dir] = \ - (self._num_tests_in_current_dir, - time.time() - self._current_dir_start_time) - - try: - self._current_dir, self._filename_list = \ - self._filename_list_queue.get_nowait() - except Queue.Empty: - self._KillTestShell() - tests_run_file.close() - return - - self._num_tests_in_current_dir = len(self._filename_list) - self._current_dir_start_time = time.time() - - test_info = self._filename_list.pop() - - # We have a url, run tests. - batch_count += 1 - self._num_tests += 1 - if self._options.run_singly: - failures = self._RunTestSingly(test_info) - else: - failures = self._RunTest(test_info) - - filename = test_info.filename - tests_run_file.write(filename + "\n") - if failures: - # Check and kill test shell if we need too. - if len([1 for f in failures if f.ShouldKillTestShell()]): - self._KillTestShell() - # Reset the batch count since the shell just bounced. - batch_count = 0 - # Print the error message(s). - error_str = '\n'.join([' ' + f.Message() for f in failures]) - logging.debug("%s %s failed:\n%s" % (self.getName(), - path_utils.RelativeTestFilename(filename), - error_str)) - else: - logging.debug("%s %s passed" % (self.getName(), - path_utils.RelativeTestFilename(filename))) - self._result_queue.put((filename, failures)) - - if batch_size > 0 and batch_count > batch_size: - # Bounce the shell and reset count. - self._KillTestShell() - batch_count = 0 - - if test_runner: - test_runner.UpdateSummary(result_summary) - - def _RunTestSingly(self, test_info): - """Run a test in a separate thread, enforcing a hard time limit. - - Since we can only detect the termination of a thread, not any internal - state or progress, we can only run per-test timeouts when running test - files singly. - - Args: - test_info: Object containing the test filename, uri and timeout - - Return: - A list of TestFailure objects describing the error. - """ - worker = SingleTestThread(self._test_shell_command, - self._shell_args, - test_info, - self._test_types, - self._test_args, - self._options.target, - self._options.results_directory) - - worker.start() - - # When we're running one test per test_shell process, we can enforce - # a hard timeout. the test_shell watchdog uses 2.5x the timeout - # We want to be larger than that. - worker.join(int(test_info.timeout) * 3.0 / 1000.0) - if worker.isAlive(): - # If join() returned with the thread still running, the - # test_shell.exe is completely hung and there's nothing - # more we can do with it. We have to kill all the - # test_shells to free it up. If we're running more than - # one test_shell thread, we'll end up killing the other - # test_shells too, introducing spurious crashes. We accept that - # tradeoff in order to avoid losing the rest of this thread's - # results. - logging.error('Test thread hung: killing all test_shells') - path_utils.KillAllTestShells() - - try: - stats = worker.GetTestStats() - self._test_stats.append(stats) - failures = stats.failures - except AttributeError, e: - failures = [] - logging.error('Cannot get results of test: %s' % - test_info.filename) - - return failures - - def _RunTest(self, test_info): - """Run a single test file using a shared test_shell process. - - Args: - test_info: Object containing the test filename, uri and timeout - - Return: - A list of TestFailure objects describing the error. - """ - self._EnsureTestShellIsRunning() - # Args to test_shell is a space-separated list of - # "uri timeout pixel_hash" - # The timeout and pixel_hash are optional. The timeout is used if this - # test has a custom timeout. The pixel_hash is used to avoid doing an - # image dump if the checksums match, so it should be set to a blank - # value if we are generating a new baseline. - # (Otherwise, an image from a previous run will be copied into - # the baseline.) - image_hash = test_info.image_hash - if image_hash and self._test_args.new_baseline: - image_hash = "" - self._test_shell_proc.stdin.write(("%s %s %s\n" % - (test_info.uri, test_info.timeout, image_hash))) - - # If the test shell is dead, the above may cause an IOError as we - # try to write onto the broken pipe. If this is the first test for - # this test shell process, than the test shell did not - # successfully start. If this is not the first test, then the - # previous tests have caused some kind of delayed crash. We don't - # try to recover here. - self._test_shell_proc.stdin.flush() - - stats = ProcessOutput(self._test_shell_proc, test_info, - self._test_types, self._test_args, - self._options.target, - self._options.results_directory) - - self._test_stats.append(stats) - return stats.failures - - def _EnsureTestShellIsRunning(self): - """Start the shared test shell, if it's not running. Not for use when - running tests singly, since those each start a separate test shell in - their own thread. - """ - if (not self._test_shell_proc or - self._test_shell_proc.poll() is not None): - self._test_shell_proc = StartTestShell(self._test_shell_command, - self._shell_args) - - def _KillTestShell(self): - """Kill the test shell process if it's running.""" - if self._test_shell_proc: - self._test_shell_proc.stdin.close() - self._test_shell_proc.stdout.close() - if self._test_shell_proc.stderr: - self._test_shell_proc.stderr.close() - if (sys.platform not in ('win32', 'cygwin') and - not self._test_shell_proc.poll()): - # Closing stdin/stdout/stderr hangs sometimes on OS X. - null = open(os.devnull, "w") - subprocess.Popen(["kill", "-9", - str(self._test_shell_proc.pid)], stderr=null) - null.close() - self._test_shell_proc = None diff --git a/webkit/tools/layout_tests/layout_package/websocket_server.py b/webkit/tools/layout_tests/layout_package/websocket_server.py deleted file mode 100644 index 090a5d2..0000000 --- a/webkit/tools/layout_tests/layout_package/websocket_server.py +++ /dev/null @@ -1,250 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2009 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -"""A class to help start/stop the PyWebSocket server used by layout tests.""" - - -import logging -import optparse -import os -import subprocess -import sys -import tempfile -import time - -import path_utils -import platform_utils -import http_server - -# So we can import httpd_utils below to make ui_tests happy. -sys.path.append(path_utils.PathFromBase('tools', 'python')) -import google.httpd_utils - -_WS_LOG_PREFIX = 'pywebsocket.ws.log-' -_WSS_LOG_PREFIX = 'pywebsocket.wss.log-' - -_DEFAULT_WS_PORT = 8880 -_DEFAULT_WSS_PORT = 9323 - - -def RemoveLogFiles(folder, starts_with): - files = os.listdir(folder) - for file in files: - if file.startswith(starts_with): - full_path = os.path.join(folder, file) - os.remove(full_path) - - -class PyWebSocketNotStarted(Exception): - pass - - -class PyWebSocketNotFound(Exception): - pass - - -class PyWebSocket(http_server.Lighttpd): - - def __init__(self, output_dir, port=_DEFAULT_WS_PORT, - root=None, - use_tls=False, - private_key=http_server.Lighttpd._pem_file, - certificate=http_server.Lighttpd._pem_file, - register_cygwin=None, - pidfile=None): - """Args: - output_dir: the absolute path to the layout test result directory - """ - http_server.Lighttpd.__init__(self, output_dir, - port=port, - root=root, - register_cygwin=register_cygwin) - self._output_dir = output_dir - self._process = None - self._port = port - self._root = root - self._use_tls = use_tls - self._private_key = private_key - self._certificate = certificate - if self._port: - self._port = int(self._port) - if self._use_tls: - self._server_name = 'PyWebSocket(Secure)' - else: - self._server_name = 'PyWebSocket' - self._pidfile = pidfile - self._wsout = None - - # Webkit tests - if self._root: - self._layout_tests = os.path.abspath(self._root) - self._web_socket_tests = os.path.abspath( - os.path.join(self._root, 'websocket', 'tests')) - else: - try: - self._web_socket_tests = path_utils.PathFromBase( - 'third_party', 'WebKit', 'LayoutTests', 'websocket', - 'tests') - self._layout_tests = path_utils.PathFromBase( - 'third_party', 'WebKit', 'LayoutTests') - except path_utils.PathNotFound: - self._web_socket_tests = None - - def Start(self): - if not self._web_socket_tests: - logging.info('No need to start %s server.' % self._server_name) - return - if self.IsRunning(): - raise PyWebSocketNotStarted('%s is already running.' % - self._server_name) - - time_str = time.strftime('%d%b%Y-%H%M%S') - if self._use_tls: - log_prefix = _WSS_LOG_PREFIX - else: - log_prefix = _WS_LOG_PREFIX - log_file_name = log_prefix + time_str - - # Remove old log files. We only need to keep the last ones. - RemoveLogFiles(self._output_dir, log_prefix) - - error_log = os.path.join(self._output_dir, log_file_name + "-err.txt") - - output_log = os.path.join(self._output_dir, log_file_name + "-out.txt") - self._wsout = open(output_log, "w") - - python_interp = sys.executable - pywebsocket_base = path_utils.PathFromBase( - 'third_party', 'WebKit', 'WebKitTools', 'pywebsocket') - pywebsocket_script = path_utils.PathFromBase( - 'third_party', 'WebKit', 'WebKitTools', 'pywebsocket', - 'mod_pywebsocket', 'standalone.py') - start_cmd = [ - python_interp, pywebsocket_script, - '-p', str(self._port), - '-d', self._layout_tests, - '-s', self._web_socket_tests, - '-l', error_log, - ] - if self._use_tls: - start_cmd.extend(['-t', '-k', self._private_key, - '-c', self._certificate]) - - # Put the cygwin directory first in the path to find cygwin1.dll - env = os.environ - if sys.platform in ('cygwin', 'win32'): - env['PATH'] = '%s;%s' % ( - path_utils.PathFromBase('third_party', 'cygwin', 'bin'), - env['PATH']) - - if sys.platform == 'win32' and self._register_cygwin: - setup_mount = path_utils.PathFromBase('third_party', 'cygwin', - 'setup_mount.bat') - subprocess.Popen(setup_mount).wait() - - env['PYTHONPATH'] = (pywebsocket_base + os.path.pathsep + - env.get('PYTHONPATH', '')) - - logging.debug('Starting %s server.' % self._server_name) - self._process = subprocess.Popen(start_cmd, stdout=self._wsout, - env=env) - - # Wait a bit before checking the liveness of the server. - time.sleep(0.5) - - if self._use_tls: - url = 'https' - else: - url = 'http' - url = url + '://127.0.0.1:%d/' % self._port - if not google.httpd_utils.UrlIsAlive(url): - raise PyWebSocketNotStarted( - 'Failed to start %s server on port %s.' % - (self._server_name, self._port)) - - # Our process terminated already - if self._process.returncode != None: - raise PyWebSocketNotStarted( - 'Failed to start %s server.' % self._server_name) - if self._pidfile: - f = open(self._pidfile, 'w') - f.write("%d" % self._process.pid) - f.close() - - def Stop(self, force=False): - if not force and not self.IsRunning(): - return - - if self._process: - pid = self._process.pid - elif self._pidfile: - f = open(self._pidfile) - pid = int(f.read().strip()) - f.close() - - if not pid: - raise PyWebSocketNotFound( - 'Failed to find %s server pid.' % self._server_name) - - logging.debug('Shutting down %s server %d.' % (self._server_name, pid)) - platform_utils.KillProcess(pid) - - if self._process: - self._process.wait() - self._process = None - - if self._wsout: - self._wsout.close() - self._wsout = None - - -if '__main__' == __name__: - # Provide some command line params for starting the PyWebSocket server - # manually. - option_parser = optparse.OptionParser() - option_parser.add_option('--server', type='choice', - choices=['start', 'stop'], default='start', - help='Server action (start|stop)') - option_parser.add_option('-p', '--port', dest='port', - default=None, help='Port to listen on') - option_parser.add_option('-r', '--root', - help='Absolute path to DocumentRoot ' - '(overrides layout test roots)') - option_parser.add_option('-t', '--tls', dest='use_tls', - action='store_true', - default=False, help='use TLS (wss://)') - option_parser.add_option('-k', '--private_key', dest='private_key', - default='', help='TLS private key file.') - option_parser.add_option('-c', '--certificate', dest='certificate', - default='', help='TLS certificate file.') - option_parser.add_option('--register_cygwin', action="store_true", - dest="register_cygwin", - help='Register Cygwin paths (on Win try bots)') - option_parser.add_option('--pidfile', help='path to pid file.') - options, args = option_parser.parse_args() - - if not options.port: - if options.use_tls: - options.port = _DEFAULT_WSS_PORT - else: - options.port = _DEFAULT_WS_PORT - - kwds = {'port': options.port, 'use_tls': options.use_tls} - if options.root: - kwds['root'] = options.root - if options.private_key: - kwds['private_key'] = options.private_key - if options.certificate: - kwds['certificate'] = options.certificate - kwds['register_cygwin'] = options.register_cygwin - if options.pidfile: - kwds['pidfile'] = options.pidfile - - pywebsocket = PyWebSocket(tempfile.gettempdir(), **kwds) - - if 'start' == options.server: - pywebsocket.Start() - else: - pywebsocket.Stop(force=True) diff --git a/webkit/tools/layout_tests/rebaseline.bat b/webkit/tools/layout_tests/rebaseline.bat index 341b66d..dc0ed2bd 100644 --- a/webkit/tools/layout_tests/rebaseline.bat +++ b/webkit/tools/layout_tests/rebaseline.bat @@ -1 +1 @@ -%~dp0..\..\..\third_party\python_24\python.exe %~dp0rebaseline.py %* +%~dp0..\..\..\third_party\python_24\python.exe %~dp0\webkitpy\rebaseline.py %* diff --git a/webkit/tools/layout_tests/rebaseline.py b/webkit/tools/layout_tests/rebaseline.py deleted file mode 100644 index 392180ae2..0000000 --- a/webkit/tools/layout_tests/rebaseline.py +++ /dev/null @@ -1,1011 +0,0 @@ -#!usr/bin/env python -# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -"""Rebaselining tool that automatically produces baselines for all platforms. - -The script does the following for each platform specified: - 1. Compile a list of tests that need rebaselining. - 2. Download test result archive from buildbot for the platform. - 3. Extract baselines from the archive file for all identified files. - 4. Add new baselines to SVN repository. - 5. For each test that has been rebaselined, remove this platform option from - the test in test_expectation.txt. If no other platforms remain after - removal, delete the rebaselined test from the file. - -At the end, the script generates a html that compares old and new baselines. -""" - -import logging -import optparse -import os -import re -import shutil -import subprocess -import sys -import tempfile -import time -import urllib -import webbrowser -import zipfile - -from layout_package import path_utils -from layout_package import test_expectations -from test_types import image_diff -from test_types import text_diff - -# Repository type constants. -REPO_SVN, REPO_UNKNOWN = range(2) - -BASELINE_SUFFIXES = ['.txt', '.png', '.checksum'] -REBASELINE_PLATFORM_ORDER = ['mac', 'win', 'win-xp', 'win-vista', 'linux'] -ARCHIVE_DIR_NAME_DICT = {'win': 'webkit-rel', - 'win-vista': 'webkit-dbg-vista', - 'win-xp': 'webkit-rel', - 'mac': 'webkit-rel-mac5', - 'linux': 'webkit-rel-linux', - 'win-canary': 'webkit-rel-webkit-org', - 'win-vista-canary': 'webkit-dbg-vista', - 'win-xp-canary': 'webkit-rel-webkit-org', - 'mac-canary': 'webkit-rel-mac-webkit-org', - 'linux-canary': 'webkit-rel-linux-webkit-org'} - - -def RunShellWithReturnCode(command, print_output=False): - """Executes a command and returns the output and process return code. - - Args: - command: program and arguments. - print_output: if true, print the command results to standard output. - - Returns: - command output, return code - """ - - # Use a shell for subcommands on Windows to get a PATH search. - use_shell = sys.platform.startswith('win') - p = subprocess.Popen(command, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, shell=use_shell) - if print_output: - output_array = [] - while True: - line = p.stdout.readline() - if not line: - break - if print_output: - print line.strip('\n') - output_array.append(line) - output = ''.join(output_array) - else: - output = p.stdout.read() - p.wait() - p.stdout.close() - - return output, p.returncode - - -def RunShell(command, print_output=False): - """Executes a command and returns the output. - - Args: - command: program and arguments. - print_output: if true, print the command results to standard output. - - Returns: - command output - """ - - output, return_code = RunShellWithReturnCode(command, print_output) - return output - - -def LogDashedString(text, platform, logging_level=logging.INFO): - """Log text message with dashes on both sides.""" - - msg = text - if platform: - msg += ': ' + platform - if len(msg) < 78: - dashes = '-' * ((78 - len(msg)) / 2) - msg = '%s %s %s' % (dashes, msg, dashes) - - if logging_level == logging.ERROR: - logging.error(msg) - elif logging_level == logging.WARNING: - logging.warn(msg) - else: - logging.info(msg) - - -def SetupHtmlDirectory(html_directory): - """Setup the directory to store html results. - - All html related files are stored in the "rebaseline_html" subdirectory. - - Args: - html_directory: parent directory that stores the rebaselining results. - If None, a temp directory is created. - - Returns: - the directory that stores the html related rebaselining results. - """ - - if not html_directory: - html_directory = tempfile.mkdtemp() - elif not os.path.exists(html_directory): - os.mkdir(html_directory) - - html_directory = os.path.join(html_directory, 'rebaseline_html') - logging.info('Html directory: "%s"', html_directory) - - if os.path.exists(html_directory): - shutil.rmtree(html_directory, True) - logging.info('Deleted file at html directory: "%s"', html_directory) - - if not os.path.exists(html_directory): - os.mkdir(html_directory) - return html_directory - - -def GetResultFileFullpath(html_directory, baseline_filename, platform, - result_type): - """Get full path of the baseline result file. - - Args: - html_directory: directory that stores the html related files. - baseline_filename: name of the baseline file. - platform: win, linux or mac - result_type: type of the baseline result: '.txt', '.png'. - - Returns: - Full path of the baseline file for rebaselining result comparison. - """ - - base, ext = os.path.splitext(baseline_filename) - result_filename = '%s-%s-%s%s' % (base, platform, result_type, ext) - fullpath = os.path.join(html_directory, result_filename) - logging.debug(' Result file full path: "%s".', fullpath) - return fullpath - - -class Rebaseliner(object): - """Class to produce new baselines for a given platform.""" - - REVISION_REGEX = r'' - - def __init__(self, platform, options): - self._file_dir = path_utils.GetAbsolutePath( - os.path.dirname(sys.argv[0])) - self._platform = platform - self._options = options - self._rebaselining_tests = [] - self._rebaselined_tests = [] - - # Create tests and expectations helper which is used to: - # -. compile list of tests that need rebaselining. - # -. update the tests in test_expectations file after rebaseline - # is done. - self._test_expectations = \ - test_expectations.TestExpectations(None, - self._file_dir, - platform, - False, - False) - - self._repo_type = self._GetRepoType() - - def Run(self, backup): - """Run rebaseline process.""" - - LogDashedString('Compiling rebaselining tests', self._platform) - if not self._CompileRebaseliningTests(): - return True - - LogDashedString('Downloading archive', self._platform) - archive_file = self._DownloadBuildBotArchive() - logging.info('') - if not archive_file: - logging.error('No archive found.') - return False - - LogDashedString('Extracting and adding new baselines', self._platform) - if not self._ExtractAndAddNewBaselines(archive_file): - return False - - LogDashedString('Updating rebaselined tests in file', self._platform) - self._UpdateRebaselinedTestsInFile(backup) - logging.info('') - - if len(self._rebaselining_tests) != len(self._rebaselined_tests): - logging.warning('NOT ALL TESTS THAT NEED REBASELINING HAVE BEEN ' - 'REBASELINED.') - logging.warning(' Total tests needing rebaselining: %d', - len(self._rebaselining_tests)) - logging.warning(' Total tests rebaselined: %d', - len(self._rebaselined_tests)) - return False - - logging.warning('All tests needing rebaselining were successfully ' - 'rebaselined.') - - return True - - def GetRebaseliningTests(self): - return self._rebaselining_tests - - def _GetRepoType(self): - """Get the repository type that client is using.""" - - output, return_code = RunShellWithReturnCode(['svn', 'info'], False) - if return_code == 0: - return REPO_SVN - - return REPO_UNKNOWN - - def _CompileRebaseliningTests(self): - """Compile list of tests that need rebaselining for the platform. - - Returns: - List of tests that need rebaselining or - None if there is no such test. - """ - - self._rebaselining_tests = \ - self._test_expectations.GetRebaseliningFailures() - if not self._rebaselining_tests: - logging.warn('No tests found that need rebaselining.') - return None - - logging.info('Total number of tests needing rebaselining ' - 'for "%s": "%d"', self._platform, - len(self._rebaselining_tests)) - - test_no = 1 - for test in self._rebaselining_tests: - logging.info(' %d: %s', test_no, test) - test_no += 1 - - return self._rebaselining_tests - - def _GetLatestRevision(self, url): - """Get the latest layout test revision number from buildbot. - - Args: - url: Url to retrieve layout test revision numbers. - - Returns: - latest revision or - None on failure. - """ - - logging.debug('Url to retrieve revision: "%s"', url) - - f = urllib.urlopen(url) - content = f.read() - f.close() - - revisions = re.findall(self.REVISION_REGEX, content) - if not revisions: - logging.error('Failed to find revision, content: "%s"', content) - return None - - revisions.sort(key=int) - logging.info('Latest revision: "%s"', revisions[len(revisions) - 1]) - return revisions[len(revisions) - 1] - - def _GetArchiveDirName(self, platform, webkit_canary): - """Get name of the layout test archive directory. - - Returns: - Directory name or - None on failure - """ - - if webkit_canary: - platform += '-canary' - - if platform in ARCHIVE_DIR_NAME_DICT: - return ARCHIVE_DIR_NAME_DICT[platform] - else: - logging.error('Cannot find platform key %s in archive ' - 'directory name dictionary', platform) - return None - - def _GetArchiveUrl(self): - """Generate the url to download latest layout test archive. - - Returns: - Url to download archive or - None on failure - """ - - dir_name = self._GetArchiveDirName(self._platform, - self._options.webkit_canary) - if not dir_name: - return None - - logging.debug('Buildbot platform dir name: "%s"', dir_name) - - url_base = '%s/%s/' % (self._options.archive_url, dir_name) - latest_revision = self._GetLatestRevision(url_base) - if latest_revision is None or latest_revision <= 0: - return None - - archive_url = ('%s%s/layout-test-results.zip' % (url_base, - latest_revision)) - logging.info('Archive url: "%s"', archive_url) - return archive_url - - def _DownloadBuildBotArchive(self): - """Download layout test archive file from buildbot. - - Returns: - True if download succeeded or - False otherwise. - """ - - url = self._GetArchiveUrl() - if url is None: - return None - - fn = urllib.urlretrieve(url)[0] - logging.info('Archive downloaded and saved to file: "%s"', fn) - return fn - - def _ExtractAndAddNewBaselines(self, archive_file): - """Extract new baselines from archive and add them to SVN repository. - - Args: - archive_file: full path to the archive file. - - Returns: - List of tests that have been rebaselined or - None on failure. - """ - - zip_file = zipfile.ZipFile(archive_file, 'r') - zip_namelist = zip_file.namelist() - - logging.debug('zip file namelist:') - for name in zip_namelist: - logging.debug(' ' + name) - - platform = path_utils.PlatformName(self._platform) - logging.debug('Platform dir: "%s"', platform) - - test_no = 1 - self._rebaselined_tests = [] - for test in self._rebaselining_tests: - logging.info('Test %d: %s', test_no, test) - - found = False - svn_error = False - test_basename = os.path.splitext(test)[0] - for suffix in BASELINE_SUFFIXES: - archive_test_name = ('layout-test-results/%s-actual%s' % - (test_basename, suffix)) - logging.debug(' Archive test file name: "%s"', - archive_test_name) - if not archive_test_name in zip_namelist: - logging.info(' %s file not in archive.', suffix) - continue - - found = True - logging.info(' %s file found in archive.', suffix) - - # Extract new baseline from archive and save it to a temp file. - data = zip_file.read(archive_test_name) - temp_fd, temp_name = tempfile.mkstemp(suffix) - f = os.fdopen(temp_fd, 'wb') - f.write(data) - f.close() - - expected_filename = '%s-expected%s' % (test_basename, suffix) - expected_fullpath = os.path.join( - path_utils.ChromiumBaselinePath(platform), - expected_filename) - expected_fullpath = os.path.normpath(expected_fullpath) - logging.debug(' Expected file full path: "%s"', - expected_fullpath) - - # TODO(victorw): for now, the rebaselining tool checks whether - # or not THIS baseline is duplicate and should be skipped. - # We could improve the tool to check all baselines in upper - # and lower - # levels and remove all duplicated baselines. - if self._IsDupBaseline(temp_name, - expected_fullpath, - test, - suffix, - self._platform): - os.remove(temp_name) - self._DeleteBaseline(expected_fullpath) - continue - - # Create the new baseline directory if it doesn't already - # exist. - path_utils.MaybeMakeDirectory( - os.path.dirname(expected_fullpath)) - - shutil.move(temp_name, expected_fullpath) - - if not self._SvnAdd(expected_fullpath): - svn_error = True - elif suffix != '.checksum': - self._CreateHtmlBaselineFiles(expected_fullpath) - - if not found: - logging.warn(' No new baselines found in archive.') - else: - if svn_error: - logging.warn(' Failed to add baselines to SVN.') - else: - logging.info(' Rebaseline succeeded.') - self._rebaselined_tests.append(test) - - test_no += 1 - - zip_file.close() - os.remove(archive_file) - - return self._rebaselined_tests - - def _IsDupBaseline(self, new_baseline, baseline_path, test, suffix, - platform): - """Check whether a baseline is duplicate and can fallback to same - baseline for another platform. For example, if a test has same - baseline on linux and windows, then we only store windows - baseline and linux baseline will fallback to the windows version. - - Args: - expected_filename: baseline expectation file name. - test: test name. - suffix: file suffix of the expected results, including dot; - e.g. '.txt' or '.png'. - platform: baseline platform 'mac', 'win' or 'linux'. - - Returns: - True if the baseline is unnecessary. - False otherwise. - """ - test_filepath = os.path.join(path_utils.LayoutTestsDir(), test) - all_baselines = path_utils.ExpectedBaselines(test_filepath, - suffix, - platform, - True) - for (fallback_dir, fallback_file) in all_baselines: - if fallback_dir and fallback_file: - fallback_fullpath = os.path.normpath( - os.path.join(fallback_dir, fallback_file)) - if fallback_fullpath.lower() != baseline_path.lower(): - if not self._DiffBaselines(new_baseline, - fallback_fullpath): - logging.info(' Found same baseline at %s', - fallback_fullpath) - return True - else: - return False - - return False - - def _DiffBaselines(self, file1, file2): - """Check whether two baselines are different. - - Args: - file1, file2: full paths of the baselines to compare. - - Returns: - True if two files are different or have different extensions. - False otherwise. - """ - - ext1 = os.path.splitext(file1)[1].upper() - ext2 = os.path.splitext(file2)[1].upper() - if ext1 != ext2: - logging.warn('Files to compare have different ext. ' - 'File1: %s; File2: %s', file1, file2) - return True - - if ext1 == '.PNG': - return image_diff.ImageDiff(self._platform, '').DiffFiles(file1, - file2) - else: - return text_diff.TestTextDiff(self._platform, '').DiffFiles(file1, - file2) - - def _DeleteBaseline(self, filename): - """Remove the file from repository and delete it from disk. - - Args: - filename: full path of the file to delete. - """ - - if not filename or not os.path.isfile(filename): - return - - if self._repo_type == REPO_SVN: - parent_dir, basename = os.path.split(filename) - original_dir = os.getcwd() - os.chdir(parent_dir) - RunShell(['svn', 'delete', '--force', basename], False) - os.chdir(original_dir) - else: - os.remove(filename) - - def _UpdateRebaselinedTestsInFile(self, backup): - """Update the rebaselined tests in test expectations file. - - Args: - backup: if True, backup the original test expectations file. - - Returns: - no - """ - - if self._rebaselined_tests: - self._test_expectations.RemovePlatformFromFile( - self._rebaselined_tests, self._platform, backup) - else: - logging.info('No test was rebaselined so nothing to remove.') - - def _SvnAdd(self, filename): - """Add the file to SVN repository. - - Args: - filename: full path of the file to add. - - Returns: - True if the file already exists in SVN or is sucessfully added - to SVN. - False otherwise. - """ - - if not filename: - return False - - parent_dir, basename = os.path.split(filename) - if self._repo_type != REPO_SVN or parent_dir == filename: - logging.info("No svn checkout found, skip svn add.") - return True - - original_dir = os.getcwd() - os.chdir(parent_dir) - status_output = RunShell(['svn', 'status', basename], False) - os.chdir(original_dir) - output = status_output.upper() - if output.startswith('A') or output.startswith('M'): - logging.info(' File already added to SVN: "%s"', filename) - return True - - if output.find('IS NOT A WORKING COPY') >= 0: - logging.info(' File is not a working copy, add its parent: "%s"', - parent_dir) - return self._SvnAdd(parent_dir) - - os.chdir(parent_dir) - add_output = RunShell(['svn', 'add', basename], True) - os.chdir(original_dir) - output = add_output.upper().rstrip() - if output.startswith('A') and output.find(basename.upper()) >= 0: - logging.info(' Added new file: "%s"', filename) - self._SvnPropSet(filename) - return True - - if (not status_output) and (add_output.upper().find( - 'ALREADY UNDER VERSION CONTROL') >= 0): - logging.info(' File already under SVN and has no change: "%s"', - filename) - return True - - logging.warn(' Failed to add file to SVN: "%s"', filename) - logging.warn(' Svn status output: "%s"', status_output) - logging.warn(' Svn add output: "%s"', add_output) - return False - - def _SvnPropSet(self, filename): - """Set the baseline property - - Args: - filename: full path of the file to add. - - Returns: - True if the file already exists in SVN or is sucessfully added - to SVN. - False otherwise. - """ - ext = os.path.splitext(filename)[1].upper() - if ext != '.TXT' and ext != '.PNG' and ext != '.CHECKSUM': - return - - parent_dir, basename = os.path.split(filename) - original_dir = os.getcwd() - os.chdir(parent_dir) - if ext == '.PNG': - cmd = ['svn', 'pset', 'svn:mime-type', 'image/png', basename] - else: - cmd = ['svn', 'pset', 'svn:eol-style', 'LF', basename] - - logging.debug(' Set svn prop: %s', ' '.join(cmd)) - RunShell(cmd, False) - os.chdir(original_dir) - - def _CreateHtmlBaselineFiles(self, baseline_fullpath): - """Create baseline files (old, new and diff) in html directory. - - The files are used to compare the rebaselining results. - - Args: - baseline_fullpath: full path of the expected baseline file. - """ - - if not baseline_fullpath or not os.path.exists(baseline_fullpath): - return - - # Copy the new baseline to html directory for result comparison. - baseline_filename = os.path.basename(baseline_fullpath) - new_file = GetResultFileFullpath(self._options.html_directory, - baseline_filename, - self._platform, - 'new') - shutil.copyfile(baseline_fullpath, new_file) - logging.info(' Html: copied new baseline file from "%s" to "%s".', - baseline_fullpath, new_file) - - # Get the old baseline from SVN and save to the html directory. - output = RunShell(['svn', 'cat', '-r', 'BASE', baseline_fullpath]) - if (not output) or (output.upper().rstrip().endswith( - 'NO SUCH FILE OR DIRECTORY')): - logging.info(' No base file: "%s"', baseline_fullpath) - return - base_file = GetResultFileFullpath(self._options.html_directory, - baseline_filename, - self._platform, - 'old') - f = open(base_file, 'wb') - f.write(output) - f.close() - logging.info(' Html: created old baseline file: "%s".', - base_file) - - # Get the diff between old and new baselines and save to the html dir. - if baseline_filename.upper().endswith('.TXT'): - # If the user specified a custom diff command in their svn config - # file, then it'll be used when we do svn diff, which we don't want - # to happen since we want the unified diff. Using --diff-cmd=diff - # doesn't always work, since they can have another diff executable - # in their path that gives different line endings. So we use a - # bogus temp directory as the config directory, which gets - # around these problems. - if sys.platform.startswith("win"): - parent_dir = tempfile.gettempdir() - else: - parent_dir = sys.path[0] # tempdir is not secure. - bogus_dir = os.path.join(parent_dir, "temp_svn_config") - logging.debug(' Html: temp config dir: "%s".', bogus_dir) - if not os.path.exists(bogus_dir): - os.mkdir(bogus_dir) - delete_bogus_dir = True - else: - delete_bogus_dir = False - - output = RunShell(["svn", "diff", "--config-dir", bogus_dir, - baseline_fullpath]) - if output: - diff_file = GetResultFileFullpath(self._options.html_directory, - baseline_filename, - self._platform, - 'diff') - f = open(diff_file, 'wb') - f.write(output) - f.close() - logging.info(' Html: created baseline diff file: "%s".', - diff_file) - - if delete_bogus_dir: - shutil.rmtree(bogus_dir, True) - logging.debug(' Html: removed temp config dir: "%s".', - bogus_dir) - - -class HtmlGenerator(object): - """Class to generate rebaselining result comparison html.""" - - HTML_REBASELINE = ('' - '' - '' - 'Rebaselining Result Comparison (%(time)s)' - '' - '' - '' - '

Rebaselining Result Comparison (%(time)s)

' - '%(body)s' - '' - '') - HTML_NO_REBASELINING_TESTS = ( - '

No tests found that need rebaselining.

') - HTML_TABLE_TEST = ('' - '%s

') - HTML_TR_TEST = ('' - '' - '
%s' - '' - '') - HTML_TEST_DETAIL = ('
' - '' - 'Baseline' - 'Platform' - 'Old' - 'New' - 'Difference' - '' - '%s' - '
') - HTML_TD_NOLINK = '%s' - HTML_TD_LINK = '%(name)s' - HTML_TD_LINK_IMG = ('' - '') - HTML_TR = '%s' - - def __init__(self, options, platforms, rebaselining_tests): - self._html_directory = options.html_directory - self._platforms = platforms - self._rebaselining_tests = rebaselining_tests - self._html_file = os.path.join(options.html_directory, - 'rebaseline.html') - - def GenerateHtml(self): - """Generate html file for rebaselining result comparison.""" - - logging.info('Generating html file') - - html_body = '' - if not self._rebaselining_tests: - html_body += self.HTML_NO_REBASELINING_TESTS - else: - tests = list(self._rebaselining_tests) - tests.sort() - - test_no = 1 - for test in tests: - logging.info('Test %d: %s', test_no, test) - html_body += self._GenerateHtmlForOneTest(test) - - html = self.HTML_REBASELINE % ({'time': time.asctime(), - 'body': html_body}) - logging.debug(html) - - f = open(self._html_file, 'w') - f.write(html) - f.close() - - logging.info('Baseline comparison html generated at "%s"', - self._html_file) - - def ShowHtml(self): - """Launch the rebaselining html in brwoser.""" - - logging.info('Launching html: "%s"', self._html_file) - - html_uri = path_utils.FilenameToUri(self._html_file) - webbrowser.open(html_uri, 1) - - logging.info('Html launched.') - - def _GenerateBaselineLinks(self, test_basename, suffix, platform): - """Generate links for baseline results (old, new and diff). - - Args: - test_basename: base filename of the test - suffix: baseline file suffixes: '.txt', '.png' - platform: win, linux or mac - - Returns: - html links for showing baseline results (old, new and diff) - """ - - baseline_filename = '%s-expected%s' % (test_basename, suffix) - logging.debug(' baseline filename: "%s"', baseline_filename) - - new_file = GetResultFileFullpath(self._html_directory, - baseline_filename, - platform, - 'new') - logging.info(' New baseline file: "%s"', new_file) - if not os.path.exists(new_file): - logging.info(' No new baseline file: "%s"', new_file) - return '' - - old_file = GetResultFileFullpath(self._html_directory, - baseline_filename, - platform, - 'old') - logging.info(' Old baseline file: "%s"', old_file) - if suffix == '.png': - html_td_link = self.HTML_TD_LINK_IMG - else: - html_td_link = self.HTML_TD_LINK - - links = '' - if os.path.exists(old_file): - links += html_td_link % {'uri': path_utils.FilenameToUri(old_file), - 'name': baseline_filename} - else: - logging.info(' No old baseline file: "%s"', old_file) - links += self.HTML_TD_NOLINK % '' - - links += html_td_link % {'uri': path_utils.FilenameToUri(new_file), - 'name': baseline_filename} - - diff_file = GetResultFileFullpath(self._html_directory, - baseline_filename, - platform, - 'diff') - logging.info(' Baseline diff file: "%s"', diff_file) - if os.path.exists(diff_file): - links += html_td_link % {'uri': path_utils.FilenameToUri( - diff_file), 'name': 'Diff'} - else: - logging.info(' No baseline diff file: "%s"', diff_file) - links += self.HTML_TD_NOLINK % '' - - return links - - def _GenerateHtmlForOneTest(self, test): - """Generate html for one rebaselining test. - - Args: - test: layout test name - - Returns: - html that compares baseline results for the test. - """ - - test_basename = os.path.basename(os.path.splitext(test)[0]) - logging.info(' basename: "%s"', test_basename) - rows = [] - for suffix in BASELINE_SUFFIXES: - if suffix == '.checksum': - continue - - logging.info(' Checking %s files', suffix) - for platform in self._platforms: - links = self._GenerateBaselineLinks(test_basename, suffix, - platform) - if links: - row = self.HTML_TD_NOLINK % self._GetBaselineResultType( - suffix) - row += self.HTML_TD_NOLINK % platform - row += links - logging.debug(' html row: %s', row) - - rows.append(self.HTML_TR % row) - - if rows: - test_path = os.path.join(path_utils.LayoutTestsDir(), test) - html = self.HTML_TR_TEST % (path_utils.FilenameToUri(test_path), - test) - html += self.HTML_TEST_DETAIL % ' '.join(rows) - - logging.debug(' html for test: %s', html) - return self.HTML_TABLE_TEST % html - - return '' - - def _GetBaselineResultType(self, suffix): - """Name of the baseline result type.""" - - if suffix == '.png': - return 'Pixel' - elif suffix == '.txt': - return 'Render Tree' - else: - return 'Other' - - -def main(): - """Main function to produce new baselines.""" - - option_parser = optparse.OptionParser() - option_parser.add_option('-v', '--verbose', - action='store_true', - default=False, - help='include debug-level logging.') - - option_parser.add_option('-p', '--platforms', - default='mac,win,win-xp,win-vista,linux', - help=('Comma delimited list of platforms ' - 'that need rebaselining.')) - - option_parser.add_option('-u', '--archive_url', - default=('http://build.chromium.org/buildbot/' - 'layout_test_results'), - help=('Url to find the layout test result archive' - ' file.')) - - option_parser.add_option('-w', '--webkit_canary', - action='store_true', - default=False, - help=('If True, pull baselines from webkit.org ' - 'canary bot.')) - - option_parser.add_option('-b', '--backup', - action='store_true', - default=False, - help=('Whether or not to backup the original test' - ' expectations file after rebaseline.')) - - option_parser.add_option('-d', '--html_directory', - default='', - help=('The directory that stores the results for' - ' rebaselining comparison.')) - - options = option_parser.parse_args()[0] - - # Set up our logging format. - log_level = logging.INFO - if options.verbose: - log_level = logging.DEBUG - logging.basicConfig(level=log_level, - format=('%(asctime)s %(filename)s:%(lineno)-3d ' - '%(levelname)s %(message)s'), - datefmt='%y%m%d %H:%M:%S') - - # Verify 'platforms' option is valid - if not options.platforms: - logging.error('Invalid "platforms" option. --platforms must be ' - 'specified in order to rebaseline.') - sys.exit(1) - platforms = [p.strip().lower() for p in options.platforms.split(',')] - for platform in platforms: - if not platform in REBASELINE_PLATFORM_ORDER: - logging.error('Invalid platform: "%s"' % (platform)) - sys.exit(1) - - # Adjust the platform order so rebaseline tool is running at the order of - # 'mac', 'win' and 'linux'. This is in same order with layout test baseline - # search paths. It simplifies how the rebaseline tool detects duplicate - # baselines. Check _IsDupBaseline method for details. - rebaseline_platforms = [] - for platform in REBASELINE_PLATFORM_ORDER: - if platform in platforms: - rebaseline_platforms.append(platform) - - options.html_directory = SetupHtmlDirectory(options.html_directory) - - rebaselining_tests = set() - backup = options.backup - for platform in rebaseline_platforms: - rebaseliner = Rebaseliner(platform, options) - - logging.info('') - LogDashedString('Rebaseline started', platform) - if rebaseliner.Run(backup): - # Only need to backup one original copy of test expectation file. - backup = False - LogDashedString('Rebaseline done', platform) - else: - LogDashedString('Rebaseline failed', platform, logging.ERROR) - - rebaselining_tests |= set(rebaseliner.GetRebaseliningTests()) - - logging.info('') - LogDashedString('Rebaselining result comparison started', None) - html_generator = HtmlGenerator(options, - rebaseline_platforms, - rebaselining_tests) - html_generator.GenerateHtml() - html_generator.ShowHtml() - LogDashedString('Rebaselining result comparison done', None) - - sys.exit(0) - -if '__main__' == __name__: - main() diff --git a/webkit/tools/layout_tests/rebaseline.sh b/webkit/tools/layout_tests/rebaseline.sh index 8deb5bf..4afbc28 100755 --- a/webkit/tools/layout_tests/rebaseline.sh +++ b/webkit/tools/layout_tests/rebaseline.sh @@ -13,4 +13,4 @@ PYTHON_PROG=python PYTHONPATH="${exec_dir}/../../../tools/python:$PYTHONPATH" export PYTHONPATH -"$PYTHON_PROG" "$exec_dir/rebaseline.py" "$@" +"$PYTHON_PROG" "$exec_dir/webkitpy/rebaseline.py" "$@" diff --git a/webkit/tools/layout_tests/run_webkit_tests.bat b/webkit/tools/layout_tests/run_webkit_tests.bat index 91fee71..176f9da 100644 --- a/webkit/tools/layout_tests/run_webkit_tests.bat +++ b/webkit/tools/layout_tests/run_webkit_tests.bat @@ -1 +1 @@ -@%~dp0\..\..\..\third_party\python_24\python.exe %~dp0\run_webkit_tests.py %* +@%~dp0\..\..\..\third_party\python_24\python.exe %~dp0\webkitpy\run_chromium_webkit_tests.py %* diff --git a/webkit/tools/layout_tests/run_webkit_tests.py b/webkit/tools/layout_tests/run_webkit_tests.py deleted file mode 100755 index cfca88b..0000000 --- a/webkit/tools/layout_tests/run_webkit_tests.py +++ /dev/null @@ -1,1657 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -"""Run layout tests using the test_shell. - -This is a port of the existing webkit test script run-webkit-tests. - -The TestRunner class runs a series of tests (TestType interface) against a set -of test files. If a test file fails a TestType, it returns a list TestFailure -objects to the TestRunner. The TestRunner then aggregates the TestFailures to -create a final report. - -This script reads several files, if they exist in the test_lists subdirectory -next to this script itself. Each should contain a list of paths to individual -tests or entire subdirectories of tests, relative to the outermost test -directory. Entire lines starting with '//' (comments) will be ignored. - -For details of the files' contents and purposes, see test_lists/README. -""" - -import errno -import glob -import logging -import math -import optparse -import os -import Queue -import random -import re -import shutil -import subprocess -import sys -import time -import traceback - -from layout_package import apache_http_server -from layout_package import test_expectations -from layout_package import http_server -from layout_package import json_layout_results_generator -from layout_package import metered_stream -from layout_package import path_utils -from layout_package import platform_utils -from layout_package import test_failures -from layout_package import test_shell_thread -from layout_package import test_files -from layout_package import websocket_server -from test_types import fuzzy_image_diff -from test_types import image_diff -from test_types import test_type_base -from test_types import text_diff - -sys.path.append(path_utils.PathFromBase('third_party')) -import simplejson - -# Indicates that we want detailed progress updates in the output (prints -# directory-by-directory feedback). -LOG_DETAILED_PROGRESS = 'detailed-progress' - -# Log any unexpected results while running (instead of just at the end). -LOG_UNEXPECTED = 'unexpected' - -# Builder base URL where we have the archived test results. -BUILDER_BASE_URL = "http://build.chromium.org/buildbot/layout_test_results/" - -TestExpectationsFile = test_expectations.TestExpectationsFile - - -class TestInfo: - """Groups information about a test for easy passing of data.""" - - def __init__(self, filename, timeout): - """Generates the URI and stores the filename and timeout for this test. - Args: - filename: Full path to the test. - timeout: Timeout for running the test in TestShell. - """ - self.filename = filename - self.uri = path_utils.FilenameToUri(filename) - self.timeout = timeout - expected_hash_file = path_utils.ExpectedFilename(filename, '.checksum') - try: - self.image_hash = open(expected_hash_file, "r").read() - except IOError, e: - if errno.ENOENT != e.errno: - raise - self.image_hash = None - - -class ResultSummary(object): - """A class for partitioning the test results we get into buckets. - - This class is basically a glorified struct and it's private to this file - so we don't bother with any information hiding.""" - - def __init__(self, expectations, test_files): - self.total = len(test_files) - self.remaining = self.total - self.expectations = expectations - self.expected = 0 - self.unexpected = 0 - self.tests_by_expectation = {} - self.tests_by_timeline = {} - self.results = {} - self.unexpected_results = {} - self.failures = {} - self.tests_by_expectation[test_expectations.SKIP] = set() - for expectation in TestExpectationsFile.EXPECTATIONS.values(): - self.tests_by_expectation[expectation] = set() - for timeline in TestExpectationsFile.TIMELINES.values(): - self.tests_by_timeline[timeline] = ( - expectations.GetTestsWithTimeline(timeline)) - - def Add(self, test, failures, result, expected): - """Add a result into the appropriate bin. - - Args: - test: test file name - failures: list of failure objects from test execution - result: result of test (PASS, IMAGE, etc.). - expected: whether the result was what we expected it to be. - """ - - self.tests_by_expectation[result].add(test) - self.results[test] = result - self.remaining -= 1 - if len(failures): - self.failures[test] = failures - if expected: - self.expected += 1 - else: - self.unexpected_results[test] = result - self.unexpected += 1 - - -class TestRunner: - """A class for managing running a series of tests on a series of layout - test files.""" - - HTTP_SUBDIR = os.sep.join(['', 'http', '']) - WEBSOCKET_SUBDIR = os.sep.join(['', 'websocket', '']) - - # The per-test timeout in milliseconds, if no --time-out-ms option was - # given to run_webkit_tests. This should correspond to the default timeout - # in test_shell.exe. - DEFAULT_TEST_TIMEOUT_MS = 6 * 1000 - - NUM_RETRY_ON_UNEXPECTED_FAILURE = 1 - - def __init__(self, options, meter): - """Initialize test runner data structures. - - Args: - options: a dictionary of command line options - meter: a MeteredStream object to record updates to. - """ - self._options = options - self._meter = meter - - if options.use_apache: - self._http_server = apache_http_server.LayoutTestApacheHttpd( - options.results_directory) - else: - self._http_server = http_server.Lighttpd(options.results_directory) - - self._websocket_server = websocket_server.PyWebSocket( - options.results_directory) - # disable wss server. need to install pyOpenSSL on buildbots. - # self._websocket_secure_server = websocket_server.PyWebSocket( - # options.results_directory, use_tls=True, port=9323) - - # a list of TestType objects - self._test_types = [] - - # a set of test files, and the same tests as a list - self._test_files = set() - self._test_files_list = None - self._file_dir = path_utils.GetAbsolutePath( - os.path.dirname(sys.argv[0])) - self._result_queue = Queue.Queue() - - # These are used for --log detailed-progress to track status by - # directory. - self._current_dir = None - self._current_progress_str = "" - self._current_test_number = 0 - - def __del__(self): - logging.debug("flushing stdout") - sys.stdout.flush() - logging.debug("flushing stderr") - sys.stderr.flush() - logging.debug("stopping http server") - # Stop the http server. - self._http_server.Stop() - # Stop the Web Socket / Web Socket Secure servers. - self._websocket_server.Stop() - # self._websocket_secure_server.Stop() - - def GatherFilePaths(self, paths): - """Find all the files to test. - - Args: - paths: a list of globs to use instead of the defaults.""" - self._test_files = test_files.GatherTestFiles(paths) - - def ParseExpectations(self, platform, is_debug_mode): - """Parse the expectations from the test_list files and return a data - structure holding them. Throws an error if the test_list files have - invalid syntax.""" - if self._options.lint_test_files: - test_files = None - else: - test_files = self._test_files - - try: - self._expectations = test_expectations.TestExpectations(test_files, - self._file_dir, platform, is_debug_mode, - self._options.lint_test_files) - return self._expectations - except Exception, err: - if self._options.lint_test_files: - print str(err) - else: - raise err - - def PrepareListsAndPrintOutput(self, write): - """Create appropriate subsets of test lists and returns a - ResultSummary object. Also prints expected test counts. - - Args: - write: A callback to write info to (e.g., a LoggingWriter) or - sys.stdout.write. - """ - - # Remove skipped - both fixable and ignored - files from the - # top-level list of files to test. - num_all_test_files = len(self._test_files) - write("Found: %d tests" % (len(self._test_files))) - skipped = set() - if num_all_test_files > 1 and not self._options.force: - skipped = self._expectations.GetTestsWithResultType( - test_expectations.SKIP) - self._test_files -= skipped - - # Create a sorted list of test files so the subset chunk, - # if used, contains alphabetically consecutive tests. - self._test_files_list = list(self._test_files) - if self._options.randomize_order: - random.shuffle(self._test_files_list) - else: - self._test_files_list.sort() - - # If the user specifies they just want to run a subset of the tests, - # just grab a subset of the non-skipped tests. - if self._options.run_chunk or self._options.run_part: - chunk_value = self._options.run_chunk or self._options.run_part - test_files = self._test_files_list - try: - (chunk_num, chunk_len) = chunk_value.split(":") - chunk_num = int(chunk_num) - assert(chunk_num >= 0) - test_size = int(chunk_len) - assert(test_size > 0) - except: - logging.critical("invalid chunk '%s'" % chunk_value) - sys.exit(1) - - # Get the number of tests - num_tests = len(test_files) - - # Get the start offset of the slice. - if self._options.run_chunk: - chunk_len = test_size - # In this case chunk_num can be really large. We need - # to make the slave fit in the current number of tests. - slice_start = (chunk_num * chunk_len) % num_tests - else: - # Validate the data. - assert(test_size <= num_tests) - assert(chunk_num <= test_size) - - # To count the chunk_len, and make sure we don't skip - # some tests, we round to the next value that fits exactly - # all the parts. - rounded_tests = num_tests - if rounded_tests % test_size != 0: - rounded_tests = (num_tests + test_size - - (num_tests % test_size)) - - chunk_len = rounded_tests / test_size - slice_start = chunk_len * (chunk_num - 1) - # It does not mind if we go over test_size. - - # Get the end offset of the slice. - slice_end = min(num_tests, slice_start + chunk_len) - - files = test_files[slice_start:slice_end] - - tests_run_msg = 'Running: %d tests (chunk slice [%d:%d] of %d)' % ( - (slice_end - slice_start), slice_start, slice_end, num_tests) - write(tests_run_msg) - - # If we reached the end and we don't have enough tests, we run some - # from the beginning. - if (self._options.run_chunk and - (slice_end - slice_start < chunk_len)): - extra = 1 + chunk_len - (slice_end - slice_start) - extra_msg = (' last chunk is partial, appending [0:%d]' % - extra) - write(extra_msg) - tests_run_msg += "\n" + extra_msg - files.extend(test_files[0:extra]) - tests_run_filename = os.path.join(self._options.results_directory, - "tests_run.txt") - tests_run_file = open(tests_run_filename, "w") - tests_run_file.write(tests_run_msg + "\n") - tests_run_file.close() - - len_skip_chunk = int(len(files) * len(skipped) / - float(len(self._test_files))) - skip_chunk_list = list(skipped)[0:len_skip_chunk] - skip_chunk = set(skip_chunk_list) - - # Update expectations so that the stats are calculated correctly. - # We need to pass a list that includes the right # of skipped files - # to ParseExpectations so that ResultSummary() will get the correct - # stats. So, we add in the subset of skipped files, and then - # subtract them back out. - self._test_files_list = files + skip_chunk_list - self._test_files = set(self._test_files_list) - - self._expectations = self.ParseExpectations( - path_utils.PlatformName(), options.target == 'Debug') - - self._test_files = set(files) - self._test_files_list = files - else: - skip_chunk = skipped - - result_summary = ResultSummary(self._expectations, - self._test_files | skip_chunk) - self._PrintExpectedResultsOfType(write, result_summary, - test_expectations.PASS, "passes") - self._PrintExpectedResultsOfType(write, result_summary, - test_expectations.FAIL, "failures") - self._PrintExpectedResultsOfType(write, result_summary, - test_expectations.FLAKY, "flaky") - self._PrintExpectedResultsOfType(write, result_summary, - test_expectations.SKIP, "skipped") - - - if self._options.force: - write('Running all tests, including skips (--force)') - else: - # Note that we don't actually run the skipped tests (they were - # subtracted out of self._test_files, above), but we stub out the - # results here so the statistics can remain accurate. - for test in skip_chunk: - result_summary.Add(test, [], test_expectations.SKIP, - expected=True) - write("") - - return result_summary - - def AddTestType(self, test_type): - """Add a TestType to the TestRunner.""" - self._test_types.append(test_type) - - def _GetDirForTestFile(self, test_file): - """Returns the highest-level directory by which to shard the given - test file.""" - index = test_file.rfind(os.sep + 'LayoutTests' + os.sep) - - test_file = test_file[index + len('LayoutTests/'):] - test_file_parts = test_file.split(os.sep, 1) - directory = test_file_parts[0] - test_file = test_file_parts[1] - - # The http tests are very stable on mac/linux. - # TODO(ojan): Make the http server on Windows be apache so we can - # turn shard the http tests there as well. Switching to apache is - # what made them stable on linux/mac. - return_value = directory - while ((directory != 'http' or sys.platform in ('darwin', 'linux2')) - and test_file.find(os.sep) >= 0): - test_file_parts = test_file.split(os.sep, 1) - directory = test_file_parts[0] - return_value = os.path.join(return_value, directory) - test_file = test_file_parts[1] - - return return_value - - def _GetTestInfoForFile(self, test_file): - """Returns the appropriate TestInfo object for the file. Mostly this - is used for looking up the timeout value (in ms) to use for the given - test.""" - if self._expectations.HasModifier(test_file, test_expectations.SLOW): - return TestInfo(test_file, self._options.slow_time_out_ms) - return TestInfo(test_file, self._options.time_out_ms) - - def _GetTestFileQueue(self, test_files): - """Create the thread safe queue of lists of (test filenames, test URIs) - tuples. Each TestShellThread pulls a list from this queue and runs - those tests in order before grabbing the next available list. - - Shard the lists by directory. This helps ensure that tests that depend - on each other (aka bad tests!) continue to run together as most - cross-tests dependencies tend to occur within the same directory. - - Return: - The Queue of lists of TestInfo objects. - """ - - if (self._options.experimental_fully_parallel or - self._IsSingleThreaded()): - filename_queue = Queue.Queue() - for test_file in test_files: - filename_queue.put('.', [self._GetTestInfoForFile(test_file)]) - return filename_queue - - tests_by_dir = {} - for test_file in test_files: - directory = self._GetDirForTestFile(test_file) - tests_by_dir.setdefault(directory, []) - tests_by_dir[directory].append(self._GetTestInfoForFile(test_file)) - - # Sort by the number of tests in the dir so that the ones with the - # most tests get run first in order to maximize parallelization. - # Number of tests is a good enough, but not perfect, approximation - # of how long that set of tests will take to run. We can't just use - # a PriorityQueue until we move # to Python 2.6. - test_lists = [] - http_tests = None - for directory in tests_by_dir: - test_list = tests_by_dir[directory] - # Keep the tests in alphabetical order. - # TODO: Remove once tests are fixed so they can be run in any - # order. - test_list.reverse() - test_list_tuple = (directory, test_list) - if directory == 'LayoutTests' + os.sep + 'http': - http_tests = test_list_tuple - else: - test_lists.append(test_list_tuple) - test_lists.sort(lambda a, b: cmp(len(b[1]), len(a[1]))) - - # Put the http tests first. There are only a couple hundred of them, - # but each http test takes a very long time to run, so sorting by the - # number of tests doesn't accurately capture how long they take to run. - if http_tests: - test_lists.insert(0, http_tests) - - filename_queue = Queue.Queue() - for item in test_lists: - filename_queue.put(item) - return filename_queue - - def _GetTestShellArgs(self, index): - """Returns the tuple of arguments for tests and for test_shell.""" - shell_args = [] - test_args = test_type_base.TestArguments() - if not self._options.no_pixel_tests: - png_path = os.path.join(self._options.results_directory, - "png_result%s.png" % index) - shell_args.append("--pixel-tests=" + png_path) - test_args.png_path = png_path - - test_args.new_baseline = self._options.new_baseline - - test_args.show_sources = self._options.sources - - if self._options.startup_dialog: - shell_args.append('--testshell-startup-dialog') - - if self._options.gp_fault_error_box: - shell_args.append('--gp-fault-error-box') - - return (test_args, shell_args) - - def _ContainsTests(self, subdir): - for test_file in self._test_files_list: - if test_file.find(subdir) >= 0: - return True - return False - - def _InstantiateTestShellThreads(self, test_shell_binary, test_files, - result_summary): - """Instantitates and starts the TestShellThread(s). - - Return: - The list of threads. - """ - test_shell_command = [test_shell_binary] - - if self._options.wrapper: - # This split() isn't really what we want -- it incorrectly will - # split quoted strings within the wrapper argument -- but in - # practice it shouldn't come up and the --help output warns - # about it anyway. - test_shell_command = (self._options.wrapper.split() + - test_shell_command) - - filename_queue = self._GetTestFileQueue(test_files) - - # Instantiate TestShellThreads and start them. - threads = [] - for i in xrange(int(self._options.num_test_shells)): - # Create separate TestTypes instances for each thread. - test_types = [] - for t in self._test_types: - test_types.append(t(self._options.platform, - self._options.results_directory)) - - test_args, shell_args = self._GetTestShellArgs(i) - thread = test_shell_thread.TestShellThread(filename_queue, - self._result_queue, - test_shell_command, - test_types, - test_args, - shell_args, - self._options) - if self._IsSingleThreaded(): - thread.RunInMainThread(self, result_summary) - else: - thread.start() - threads.append(thread) - - return threads - - def _StopLayoutTestHelper(self, proc): - """Stop the layout test helper and closes it down.""" - if proc: - logging.debug("Stopping layout test helper") - proc.stdin.write("x\n") - proc.stdin.close() - proc.wait() - - def _IsSingleThreaded(self): - """Returns whether we should run all the tests in the main thread.""" - return int(self._options.num_test_shells) == 1 - - def _RunTests(self, test_shell_binary, file_list, result_summary): - """Runs the tests in the file_list. - - Return: A tuple (failures, thread_timings, test_timings, - individual_test_timings) - failures is a map from test to list of failure types - thread_timings is a list of dicts with the total runtime - of each thread with 'name', 'num_tests', 'total_time' properties - test_timings is a list of timings for each sharded subdirectory - of the form [time, directory_name, num_tests] - individual_test_timings is a list of run times for each test - in the form {filename:filename, test_run_time:test_run_time} - result_summary: summary object to populate with the results - """ - threads = self._InstantiateTestShellThreads(test_shell_binary, - file_list, - result_summary) - - # Wait for the threads to finish and collect test failures. - failures = {} - test_timings = {} - individual_test_timings = [] - thread_timings = [] - try: - for thread in threads: - while thread.isAlive(): - # Let it timeout occasionally so it can notice a - # KeyboardInterrupt. Actually, the timeout doesn't - # really matter: apparently it suffices to not use - # an indefinite blocking join for it to - # be interruptible by KeyboardInterrupt. - thread.join(0.1) - self.UpdateSummary(result_summary) - thread_timings.append({'name': thread.getName(), - 'num_tests': thread.GetNumTests(), - 'total_time': thread.GetTotalTime()}) - test_timings.update(thread.GetDirectoryTimingStats()) - individual_test_timings.extend(thread.GetIndividualTestStats()) - except KeyboardInterrupt: - for thread in threads: - thread.Cancel() - self._StopLayoutTestHelper(layout_test_helper_proc) - raise - for thread in threads: - # Check whether a TestShellThread died before normal completion. - exception_info = thread.GetExceptionInfo() - if exception_info is not None: - # Re-raise the thread's exception here to make it clear that - # testing was aborted. Otherwise, the tests that did not run - # would be assumed to have passed. - raise exception_info[0], exception_info[1], exception_info[2] - - # Make sure we pick up any remaining tests. - self.UpdateSummary(result_summary) - return (thread_timings, test_timings, individual_test_timings) - - def Run(self, result_summary): - """Run all our tests on all our test files. - - For each test file, we run each test type. If there are any failures, - we collect them for reporting. - - Args: - result_summary: a summary object tracking the test results. - - Return: - We return nonzero if there are regressions compared to the last run. - """ - if not self._test_files: - return 0 - start_time = time.time() - test_shell_binary = path_utils.TestShellPath(self._options.target) - - # Start up any helper needed - layout_test_helper_proc = None - if not options.no_pixel_tests: - helper_path = path_utils.LayoutTestHelperPath(self._options.target) - if len(helper_path): - logging.debug("Starting layout helper %s" % helper_path) - layout_test_helper_proc = subprocess.Popen( - [helper_path], stdin=subprocess.PIPE, - stdout=subprocess.PIPE, stderr=None) - is_ready = layout_test_helper_proc.stdout.readline() - if not is_ready.startswith('ready'): - logging.error("layout_test_helper failed to be ready") - - # Check that the system dependencies (themes, fonts, ...) are correct. - if not self._options.nocheck_sys_deps: - proc = subprocess.Popen([test_shell_binary, - "--check-layout-test-sys-deps"]) - if proc.wait() != 0: - logging.info("Aborting because system dependencies check " - "failed.\n To override, invoke with " - "--nocheck-sys-deps") - sys.exit(1) - - if self._ContainsTests(self.HTTP_SUBDIR): - self._http_server.Start() - - if self._ContainsTests(self.WEBSOCKET_SUBDIR): - self._websocket_server.Start() - # self._websocket_secure_server.Start() - - thread_timings, test_timings, individual_test_timings = ( - self._RunTests(test_shell_binary, self._test_files_list, - result_summary)) - - # We exclude the crashes from the list of results to retry, because - # we want to treat even a potentially flaky crash as an error. - failures = self._GetFailures(result_summary, include_crashes=False) - retries = 0 - retry_summary = result_summary - while (retries < self.NUM_RETRY_ON_UNEXPECTED_FAILURE and - len(failures)): - logging.debug("Retrying %d unexpected failure(s)" % len(failures)) - retries += 1 - retry_summary = ResultSummary(self._expectations, failures.keys()) - self._RunTests(test_shell_binary, failures.keys(), retry_summary) - failures = self._GetFailures(retry_summary, include_crashes=True) - - self._StopLayoutTestHelper(layout_test_helper_proc) - end_time = time.time() - - write = CreateLoggingWriter(self._options, 'timing') - self._PrintTimingStatistics(write, end_time - start_time, - thread_timings, test_timings, - individual_test_timings, - result_summary) - - self._meter.update("") - - if self._options.verbose: - # We write this block to stdout for compatibility with the - # buildbot log parser, which only looks at stdout, not stderr :( - write = lambda s: sys.stdout.write("%s\n" % s) - else: - write = CreateLoggingWriter(self._options, 'actual') - - self._PrintResultSummary(write, result_summary) - - sys.stdout.flush() - sys.stderr.flush() - - if (LOG_DETAILED_PROGRESS in self._options.log or - (LOG_UNEXPECTED in self._options.log and - result_summary.total != result_summary.expected)): - print - - # This summary data gets written to stdout regardless of log level - self._PrintOneLineSummary(result_summary.total, - result_summary.expected) - - unexpected_results = self._SummarizeUnexpectedResults(result_summary, - retry_summary) - self._PrintUnexpectedResults(unexpected_results) - - # Write the same data to log files. - self._WriteJSONFiles(unexpected_results, result_summary, - individual_test_timings) - - # Write the summary to disk (results.html) and maybe open the - # test_shell to this file. - wrote_results = self._WriteResultsHtmlFile(result_summary) - if not self._options.noshow_results and wrote_results: - self._ShowResultsHtmlFile() - - # Ignore flaky failures and unexpected passes so we don't turn the - # bot red for those. - return unexpected_results['num_regressions'] - - def UpdateSummary(self, result_summary): - """Update the summary while running tests.""" - while True: - try: - (test, fail_list) = self._result_queue.get_nowait() - result = test_failures.DetermineResultType(fail_list) - expected = self._expectations.MatchesAnExpectedResult(test, - result) - result_summary.Add(test, fail_list, result, expected) - if (LOG_DETAILED_PROGRESS in self._options.log and - (self._options.experimental_fully_parallel or - self._IsSingleThreaded())): - self._DisplayDetailedProgress(result_summary) - else: - if not expected and LOG_UNEXPECTED in self._options.log: - self._PrintUnexpectedTestResult(test, result) - self._DisplayOneLineProgress(result_summary) - except Queue.Empty: - return - - def _DisplayOneLineProgress(self, result_summary): - """Displays the progress through the test run.""" - self._meter.update("Testing: %d ran as expected, %d didn't, %d left" % - (result_summary.expected, result_summary.unexpected, - result_summary.remaining)) - - def _DisplayDetailedProgress(self, result_summary): - """Display detailed progress output where we print the directory name - and one dot for each completed test. This is triggered by - "--log detailed-progress".""" - if self._current_test_number == len(self._test_files_list): - return - - next_test = self._test_files_list[self._current_test_number] - next_dir = os.path.dirname(path_utils.RelativeTestFilename(next_test)) - if self._current_progress_str == "": - self._current_progress_str = "%s: " % (next_dir) - self._current_dir = next_dir - - while next_test in result_summary.results: - if next_dir != self._current_dir: - self._meter.write("%s\n" % (self._current_progress_str)) - self._current_progress_str = "%s: ." % (next_dir) - self._current_dir = next_dir - else: - self._current_progress_str += "." - - if (next_test in result_summary.unexpected_results and - LOG_UNEXPECTED in self._options.log): - result = result_summary.unexpected_results[next_test] - self._meter.write("%s\n" % self._current_progress_str) - self._PrintUnexpectedTestResult(next_test, result) - self._current_progress_str = "%s: " % self._current_dir - - self._current_test_number += 1 - if self._current_test_number == len(self._test_files_list): - break - - next_test = self._test_files_list[self._current_test_number] - next_dir = os.path.dirname( - path_utils.RelativeTestFilename(next_test)) - - if result_summary.remaining: - remain_str = " (%d)" % (result_summary.remaining) - self._meter.update("%s%s" % - (self._current_progress_str, remain_str)) - else: - self._meter.write("%s\n" % (self._current_progress_str)) - - def _GetFailures(self, result_summary, include_crashes): - """Filters a dict of results and returns only the failures. - - Args: - result_summary: the results of the test run - include_crashes: whether crashes are included in the output. - We use False when finding the list of failures to retry - to see if the results were flaky. Although the crashes may also be - flaky, we treat them as if they aren't so that they're not ignored. - Returns: - a dict of files -> results - """ - failed_results = {} - for test, result in result_summary.unexpected_results.iteritems(): - if (result == test_expectations.PASS or - result == test_expectations.CRASH and not include_crashes): - continue - failed_results[test] = result - - return failed_results - - def _SummarizeUnexpectedResults(self, result_summary, retry_summary): - """Summarize any unexpected results as a dict. - - TODO(dpranke): split this data structure into a separate class? - - Args: - result_summary: summary object from initial test runs - retry_summary: summary object from final test run of retried tests - Returns: - A dictionary containing a summary of the unexpected results from the - run, with the following fields: - 'version': a version indicator (1 in this version) - 'fixable': # of fixable tests (NOW - PASS) - 'skipped': # of skipped tests (NOW & SKIPPED) - 'num_regressions': # of non-flaky failures - 'num_flaky': # of flaky failures - 'num_passes': # of unexpected passes - 'tests': a dict of tests -> {'expected': '...', 'actual': '...'} - """ - results = {} - results['version'] = 1 - - tbe = result_summary.tests_by_expectation - tbt = result_summary.tests_by_timeline - results['fixable'] = len(tbt[test_expectations.NOW] - - tbe[test_expectations.PASS]) - results['skipped'] = len(tbt[test_expectations.NOW] & - tbe[test_expectations.SKIP]) - - num_passes = 0 - num_flaky = 0 - num_regressions = 0 - keywords = {} - for k, v in TestExpectationsFile.EXPECTATIONS.iteritems(): - keywords[v] = k.upper() - - tests = {} - for filename, result in result_summary.unexpected_results.iteritems(): - # Note that if a test crashed in the original run, we ignore - # whether or not it crashed when we retried it (if we retried it), - # and always consider the result not flaky. - test = path_utils.RelativeTestFilename(filename) - expected = self._expectations.GetExpectationsString(filename) - actual = [keywords[result]] - - if result == test_expectations.PASS: - num_passes += 1 - elif result == test_expectations.CRASH: - num_regressions += 1 - else: - if filename not in retry_summary.unexpected_results: - actual.extend( - self._expectations.GetExpectationsString( - filename).split(" ")) - num_flaky += 1 - else: - retry_result = retry_summary.unexpected_results[filename] - if result != retry_result: - actual.append(keywords[retry_result]) - num_flaky += 1 - else: - num_regressions += 1 - - tests[test] = {} - tests[test]['expected'] = expected - tests[test]['actual'] = " ".join(actual) - - results['tests'] = tests - results['num_passes'] = num_passes - results['num_flaky'] = num_flaky - results['num_regressions'] = num_regressions - - return results - - def _WriteJSONFiles(self, unexpected_results, result_summary, - individual_test_timings): - """Writes the results of the test run as JSON files into the results - dir. - - There are three different files written into the results dir: - unexpected_results.json: A short list of any unexpected results. - This is used by the buildbots to display results. - expectations.json: This is used by the flakiness dashboard. - results.json: A full list of the results - used by the flakiness - dashboard and the aggregate results dashboard. - - Args: - unexpected_results: dict of unexpected results - result_summary: full summary object - individual_test_timings: list of test times (used by the flakiness - dashboard). - """ - logging.debug("Writing JSON files in %s." % - self._options.results_directory) - unexpected_file = open(os.path.join(self._options.results_directory, - "unexpected_results.json"), "w") - unexpected_file.write(simplejson.dumps(unexpected_results, - sort_keys=True, indent=2)) - unexpected_file.close() - - # Write a json file of the test_expectations.txt file for the layout - # tests dashboard. - expectations_file = open(os.path.join(self._options.results_directory, - "expectations.json"), "w") - expectations_json = \ - self._expectations.GetExpectationsJsonForAllPlatforms() - expectations_file.write("ADD_EXPECTATIONS(" + expectations_json + ");") - expectations_file.close() - - json_layout_results_generator.JSONLayoutResultsGenerator( - self._options.builder_name, self._options.build_name, - self._options.build_number, self._options.results_directory, - BUILDER_BASE_URL, individual_test_timings, - self._expectations, result_summary, self._test_files_list) - - logging.debug("Finished writing JSON files.") - - def _PrintExpectedResultsOfType(self, write, result_summary, result_type, - result_type_str): - """Print the number of the tests in a given result class. - - Args: - write: A callback to write info to (e.g., a LoggingWriter) or - sys.stdout.write. - result_summary - the object containing all the results to report on - result_type - the particular result type to report in the summary. - result_type_str - a string description of the result_type. - """ - tests = self._expectations.GetTestsWithResultType(result_type) - now = result_summary.tests_by_timeline[test_expectations.NOW] - wontfix = result_summary.tests_by_timeline[test_expectations.WONTFIX] - defer = result_summary.tests_by_timeline[test_expectations.DEFER] - - # We use a fancy format string in order to print the data out in a - # nicely-aligned table. - fmtstr = ("Expect: %%5d %%-8s (%%%dd now, %%%dd defer, %%%dd wontfix)" - % (self._NumDigits(now), self._NumDigits(defer), - self._NumDigits(wontfix))) - write(fmtstr % (len(tests), result_type_str, len(tests & now), - len(tests & defer), len(tests & wontfix))) - - def _NumDigits(self, num): - """Returns the number of digits needed to represent the length of a - sequence.""" - ndigits = 1 - if len(num): - ndigits = int(math.log10(len(num))) + 1 - return ndigits - - def _PrintTimingStatistics(self, write, total_time, thread_timings, - directory_test_timings, individual_test_timings, - result_summary): - """Record timing-specific information for the test run. - - Args: - write: A callback to write info to (e.g., a LoggingWriter) or - sys.stdout.write. - total_time: total elapsed time (in seconds) for the test run - thread_timings: wall clock time each thread ran for - directory_test_timings: timing by directory - individual_test_timings: timing by file - result_summary: summary object for the test run - """ - write("Test timing:") - write(" %6.2f total testing time" % total_time) - write("") - write("Thread timing:") - cuml_time = 0 - for t in thread_timings: - write(" %10s: %5d tests, %6.2f secs" % - (t['name'], t['num_tests'], t['total_time'])) - cuml_time += t['total_time'] - write(" %6.2f cumulative, %6.2f optimal" % - (cuml_time, cuml_time / int(self._options.num_test_shells))) - write("") - - self._PrintAggregateTestStatistics(write, individual_test_timings) - self._PrintIndividualTestTimes(write, individual_test_timings, - result_summary) - self._PrintDirectoryTimings(write, directory_test_timings) - - def _PrintAggregateTestStatistics(self, write, individual_test_timings): - """Prints aggregate statistics (e.g. median, mean, etc.) for all tests. - Args: - write: A callback to write info to (e.g., a LoggingWriter) or - sys.stdout.write. - individual_test_timings: List of test_shell_thread.TestStats for all - tests. - """ - test_types = individual_test_timings[0].time_for_diffs.keys() - times_for_test_shell = [] - times_for_diff_processing = [] - times_per_test_type = {} - for test_type in test_types: - times_per_test_type[test_type] = [] - - for test_stats in individual_test_timings: - times_for_test_shell.append(test_stats.test_run_time) - times_for_diff_processing.append( - test_stats.total_time_for_all_diffs) - time_for_diffs = test_stats.time_for_diffs - for test_type in test_types: - times_per_test_type[test_type].append( - time_for_diffs[test_type]) - - self._PrintStatisticsForTestTimings(write, - "PER TEST TIME IN TESTSHELL (seconds):", times_for_test_shell) - self._PrintStatisticsForTestTimings(write, - "PER TEST DIFF PROCESSING TIMES (seconds):", - times_for_diff_processing) - for test_type in test_types: - self._PrintStatisticsForTestTimings(write, - "PER TEST TIMES BY TEST TYPE: %s" % test_type, - times_per_test_type[test_type]) - - def _PrintIndividualTestTimes(self, write, individual_test_timings, - result_summary): - """Prints the run times for slow, timeout and crash tests. - Args: - write: A callback to write info to (e.g., a LoggingWriter) or - sys.stdout.write. - individual_test_timings: List of test_shell_thread.TestStats for all - tests. - result_summary: summary object for test run - """ - # Reverse-sort by the time spent in test_shell. - individual_test_timings.sort(lambda a, b: - cmp(b.test_run_time, a.test_run_time)) - - num_printed = 0 - slow_tests = [] - timeout_or_crash_tests = [] - unexpected_slow_tests = [] - for test_tuple in individual_test_timings: - filename = test_tuple.filename - is_timeout_crash_or_slow = False - if self._expectations.HasModifier(filename, - test_expectations.SLOW): - is_timeout_crash_or_slow = True - slow_tests.append(test_tuple) - - if filename in result_summary.failures: - result = result_summary.results[filename] - if (result == test_expectations.TIMEOUT or - result == test_expectations.CRASH): - is_timeout_crash_or_slow = True - timeout_or_crash_tests.append(test_tuple) - - if (not is_timeout_crash_or_slow and - num_printed < self._options.num_slow_tests_to_log): - num_printed = num_printed + 1 - unexpected_slow_tests.append(test_tuple) - - write("") - self._PrintTestListTiming(write, "%s slowest tests that are not " - "marked as SLOW and did not timeout/crash:" % - self._options.num_slow_tests_to_log, unexpected_slow_tests) - write("") - self._PrintTestListTiming(write, "Tests marked as SLOW:", slow_tests) - write("") - self._PrintTestListTiming(write, "Tests that timed out or crashed:", - timeout_or_crash_tests) - write("") - - def _PrintTestListTiming(self, write, title, test_list): - """Print timing info for each test. - - Args: - write: A callback to write info to (e.g., a LoggingWriter) or - sys.stdout.write. - title: section heading - test_list: tests that fall in this section - """ - write(title) - for test_tuple in test_list: - filename = test_tuple.filename[len( - path_utils.LayoutTestsDir()) + 1:] - filename = filename.replace('\\', '/') - test_run_time = round(test_tuple.test_run_time, 1) - write(" %s took %s seconds" % (filename, test_run_time)) - - def _PrintDirectoryTimings(self, write, directory_test_timings): - """Print timing info by directory for any directories that - take > 10 seconds to run. - - Args: - write: A callback to write info to (e.g., a LoggingWriter) or - sys.stdout.write. - directory_test_timing: time info for each directory - """ - timings = [] - for directory in directory_test_timings: - num_tests, time_for_directory = directory_test_timings[directory] - timings.append((round(time_for_directory, 1), directory, - num_tests)) - timings.sort() - - write("Time to process slowest subdirectories:") - min_seconds_to_print = 10 - for timing in timings: - if timing[0] > min_seconds_to_print: - write(" %s took %s seconds to run %s tests." % (timing[1], - timing[0], timing[2])) - write("") - - def _PrintStatisticsForTestTimings(self, write, title, timings): - """Prints the median, mean and standard deviation of the values in - timings. - - Args: - write: A callback to write info to (e.g., a LoggingWriter) or - sys.stdout.write. - title: Title for these timings. - timings: A list of floats representing times. - """ - write(title) - timings.sort() - - num_tests = len(timings) - percentile90 = timings[int(.9 * num_tests)] - percentile99 = timings[int(.99 * num_tests)] - - if num_tests % 2 == 1: - median = timings[((num_tests - 1) / 2) - 1] - else: - lower = timings[num_tests / 2 - 1] - upper = timings[num_tests / 2] - median = (float(lower + upper)) / 2 - - mean = sum(timings) / num_tests - - for time in timings: - sum_of_deviations = math.pow(time - mean, 2) - - std_deviation = math.sqrt(sum_of_deviations / num_tests) - write(" Median: %6.3f" % median) - write(" Mean: %6.3f" % mean) - write(" 90th percentile: %6.3f" % percentile90) - write(" 99th percentile: %6.3f" % percentile99) - write(" Standard dev: %6.3f" % std_deviation) - write("") - - def _PrintResultSummary(self, write, result_summary): - """Print a short summary about how many tests passed. - - Args: - write: A callback to write info to (e.g., a LoggingWriter) or - sys.stdout.write. - result_summary: information to log - """ - failed = len(result_summary.failures) - skipped = len( - result_summary.tests_by_expectation[test_expectations.SKIP]) - total = result_summary.total - passed = total - failed - skipped - pct_passed = 0.0 - if total > 0: - pct_passed = float(passed) * 100 / total - - write("") - write("=> Results: %d/%d tests passed (%.1f%%)" % - (passed, total, pct_passed)) - write("") - self._PrintResultSummaryEntry(write, result_summary, - test_expectations.NOW, "Tests to be fixed for the current release") - - write("") - self._PrintResultSummaryEntry(write, result_summary, - test_expectations.DEFER, - "Tests we'll fix in the future if they fail (DEFER)") - - write("") - self._PrintResultSummaryEntry(write, result_summary, - test_expectations.WONTFIX, - "Tests that will only be fixed if they crash (WONTFIX)") - - def _PrintResultSummaryEntry(self, write, result_summary, timeline, - heading): - """Print a summary block of results for a particular timeline of test. - - Args: - write: A callback to write info to (e.g., a LoggingWriter) or - sys.stdout.write. - result_summary: summary to print results for - timeline: the timeline to print results for (NOT, WONTFIX, etc.) - heading: a textual description of the timeline - """ - total = len(result_summary.tests_by_timeline[timeline]) - not_passing = (total - - len(result_summary.tests_by_expectation[test_expectations.PASS] & - result_summary.tests_by_timeline[timeline])) - write("=> %s (%d):" % (heading, not_passing)) - - for result in TestExpectationsFile.EXPECTATION_ORDER: - if result == test_expectations.PASS: - continue - results = (result_summary.tests_by_expectation[result] & - result_summary.tests_by_timeline[timeline]) - desc = TestExpectationsFile.EXPECTATION_DESCRIPTIONS[result] - if not_passing and len(results): - pct = len(results) * 100.0 / not_passing - write(" %5d %-24s (%4.1f%%)" % (len(results), - desc[len(results) != 1], pct)) - - def _PrintOneLineSummary(self, total, expected): - """Print a one-line summary of the test run to stdout. - - Args: - total: total number of tests run - expected: number of expected results - """ - unexpected = total - expected - if unexpected == 0: - print "All %d tests ran as expected." % expected - elif expected == 1: - print "1 test ran as expected, %d didn't:" % unexpected - else: - print "%d tests ran as expected, %d didn't:" % (expected, - unexpected) - - def _PrintUnexpectedResults(self, unexpected_results): - """Prints any unexpected results in a human-readable form to stdout.""" - passes = {} - flaky = {} - regressions = {} - - if len(unexpected_results['tests']): - print "" - - for test, results in unexpected_results['tests'].iteritems(): - actual = results['actual'].split(" ") - expected = results['expected'].split(" ") - if actual == ['PASS']: - if 'CRASH' in expected: - _AddToDictOfLists(passes, 'Expected to crash, but passed', - test) - elif 'TIMEOUT' in expected: - _AddToDictOfLists(passes, - 'Expected to timeout, but passed', test) - else: - _AddToDictOfLists(passes, 'Expected to fail, but passed', - test) - elif len(actual) > 1: - # We group flaky tests by the first actual result we got. - _AddToDictOfLists(flaky, actual[0], test) - else: - _AddToDictOfLists(regressions, results['actual'], test) - - if len(passes): - for key, tests in passes.iteritems(): - print "%s: (%d)" % (key, len(tests)) - tests.sort() - for test in tests: - print " %s" % test - print - - if len(flaky): - descriptions = TestExpectationsFile.EXPECTATION_DESCRIPTIONS - for key, tests in flaky.iteritems(): - result = TestExpectationsFile.EXPECTATIONS[key.lower()] - print "Unexpected flakiness: %s (%d)" % ( - descriptions[result][1], len(tests)) - tests.sort() - - for test in tests: - result = unexpected_results['tests'][test] - actual = result['actual'].split(" ") - expected = result['expected'].split(" ") - result = TestExpectationsFile.EXPECTATIONS[key.lower()] - new_expectations_list = list(set(actual) | set(expected)) - print " %s = %s" % (test, " ".join(new_expectations_list)) - print - - if len(regressions): - descriptions = TestExpectationsFile.EXPECTATION_DESCRIPTIONS - for key, tests in regressions.iteritems(): - result = TestExpectationsFile.EXPECTATIONS[key.lower()] - print "Regressions: Unexpected %s : (%d)" % ( - descriptions[result][1], len(tests)) - tests.sort() - for test in tests: - print " %s = %s" % (test, key) - print - - if len(unexpected_results['tests']) and self._options.verbose: - print "-" * 78 - - def _PrintUnexpectedTestResult(self, test, result): - """Prints one unexpected test result line.""" - desc = TestExpectationsFile.EXPECTATION_DESCRIPTIONS[result][0] - self._meter.write(" %s -> unexpected %s\n" % - (path_utils.RelativeTestFilename(test), desc)) - - def _WriteResultsHtmlFile(self, result_summary): - """Write results.html which is a summary of tests that failed. - - Args: - result_summary: a summary of the results :) - - Returns: - True if any results were written (since expected failures may be - omitted) - """ - # test failures - if self._options.full_results_html: - test_files = result_summary.failures.keys() - else: - unexpected_failures = self._GetFailures(result_summary, - include_crashes=True) - test_files = unexpected_failures.keys() - if not len(test_files): - return False - - out_filename = os.path.join(self._options.results_directory, - "results.html") - out_file = open(out_filename, 'w') - # header - if self._options.full_results_html: - h2 = "Test Failures" - else: - h2 = "Unexpected Test Failures" - out_file.write("Layout Test Results (%(time)s)" - "

%(h2)s (%(time)s)

\n" - % {'h2': h2, 'time': time.asctime()}) - - test_files.sort() - for test_file in test_files: - test_failures = result_summary.failures.get(test_file, []) - out_file.write("

%s
\n" - % (path_utils.FilenameToUri(test_file), - path_utils.RelativeTestFilename(test_file))) - for failure in test_failures: - out_file.write("  %s
" - % failure.ResultHtmlOutput( - path_utils.RelativeTestFilename(test_file))) - out_file.write("

\n") - - # footer - out_file.write("\n") - return True - - def _ShowResultsHtmlFile(self): - """Launches the test shell open to the results.html page.""" - results_filename = os.path.join(self._options.results_directory, - "results.html") - subprocess.Popen([path_utils.TestShellPath(self._options.target), - path_utils.FilenameToUri(results_filename)]) - - -def _AddToDictOfLists(dict, key, value): - dict.setdefault(key, []).append(value) - - -def ReadTestFiles(files): - tests = [] - for file in files: - for line in open(file): - line = test_expectations.StripComments(line) - if line: - tests.append(line) - return tests - - -def CreateLoggingWriter(options, log_option): - """Returns a write() function that will write the string to logging.info() - if comp was specified in --log or if --verbose is true. Otherwise the - message is dropped. - - Args: - options: list of command line options from optparse - log_option: option to match in options.log in order for the messages - to be logged (e.g., 'actual' or 'expected') - """ - if options.verbose or log_option in options.log.split(","): - return logging.info - return lambda str: 1 - - -def main(options, args): - """Run the tests. Will call sys.exit when complete. - - Args: - options: a dictionary of command line options - args: a list of sub directories or files to test - """ - - if options.sources: - options.verbose = True - - # Set up our logging format. - meter = metered_stream.MeteredStream(options.verbose, sys.stderr) - log_fmt = '%(message)s' - log_datefmt = '%y%m%d %H:%M:%S' - log_level = logging.INFO - if options.verbose: - log_fmt = ('%(asctime)s %(filename)s:%(lineno)-4d %(levelname)s ' - '%(message)s') - log_level = logging.DEBUG - logging.basicConfig(level=log_level, format=log_fmt, datefmt=log_datefmt, - stream=meter) - - if not options.target: - if options.debug: - options.target = "Debug" - else: - options.target = "Release" - - if not options.use_apache: - options.use_apache = sys.platform in ('darwin', 'linux2') - - if options.results_directory.startswith("/"): - # Assume it's an absolute path and normalize. - options.results_directory = path_utils.GetAbsolutePath( - options.results_directory) - else: - # If it's a relative path, make the output directory relative to - # Debug or Release. - basedir = path_utils.PathFromBase('webkit') - options.results_directory = path_utils.GetAbsolutePath( - os.path.join(basedir, options.target, options.results_directory)) - - if options.clobber_old_results: - # Just clobber the actual test results directories since the other - # files in the results directory are explicitly used for cross-run - # tracking. - path = os.path.join(options.results_directory, 'LayoutTests') - if os.path.exists(path): - shutil.rmtree(path) - - # Ensure platform is valid and force it to the form 'chromium-'. - options.platform = path_utils.PlatformName(options.platform) - - if not options.num_test_shells: - # TODO(ojan): Investigate perf/flakiness impact of using numcores + 1. - options.num_test_shells = platform_utils.GetNumCores() - - write = CreateLoggingWriter(options, 'config') - write("Running %s test_shells in parallel" % options.num_test_shells) - - if not options.time_out_ms: - if options.target == "Debug": - options.time_out_ms = str(2 * TestRunner.DEFAULT_TEST_TIMEOUT_MS) - else: - options.time_out_ms = str(TestRunner.DEFAULT_TEST_TIMEOUT_MS) - - options.slow_time_out_ms = str(5 * int(options.time_out_ms)) - write("Regular timeout: %s, slow test timeout: %s" % - (options.time_out_ms, options.slow_time_out_ms)) - - # Include all tests if none are specified. - new_args = [] - for arg in args: - if arg and arg != '': - new_args.append(arg) - - paths = new_args - if not paths: - paths = [] - if options.test_list: - paths += ReadTestFiles(options.test_list) - - # Create the output directory if it doesn't already exist. - path_utils.MaybeMakeDirectory(options.results_directory) - meter.update("Gathering files ...") - - test_runner = TestRunner(options, meter) - test_runner.GatherFilePaths(paths) - - if options.lint_test_files: - # Creating the expecations for each platform/target pair does all the - # test list parsing and ensures it's correct syntax (e.g. no dupes). - for platform in TestExpectationsFile.PLATFORMS: - test_runner.ParseExpectations(platform, is_debug_mode=True) - test_runner.ParseExpectations(platform, is_debug_mode=False) - print ("If there are no fail messages, errors or exceptions, then the " - "lint succeeded.") - sys.exit(0) - - try: - test_shell_binary_path = path_utils.TestShellPath(options.target) - except path_utils.PathNotFound: - print "\nERROR: test_shell is not found. Be sure that you have built" - print "it and that you are using the correct build. This script" - print "will run the Release one by default. Use --debug to use the" - print "Debug build.\n" - sys.exit(1) - - write = CreateLoggingWriter(options, "config") - write("Using platform '%s'" % options.platform) - write("Placing test results in %s" % options.results_directory) - if options.new_baseline: - write("Placing new baselines in %s" % - path_utils.ChromiumBaselinePath(options.platform)) - write("Using %s build at %s" % (options.target, test_shell_binary_path)) - if options.no_pixel_tests: - write("Not running pixel tests") - write("") - - meter.update("Parsing expectations ...") - test_runner.ParseExpectations(options.platform, options.target == 'Debug') - - meter.update("Preparing tests ...") - write = CreateLoggingWriter(options, "expected") - result_summary = test_runner.PrepareListsAndPrintOutput(write) - - if 'cygwin' == sys.platform: - logging.warn("#" * 40) - logging.warn("# UNEXPECTED PYTHON VERSION") - logging.warn("# This script should be run using the version of python") - logging.warn("# in third_party/python_24/") - logging.warn("#" * 40) - sys.exit(1) - - # Delete the disk cache if any to ensure a clean test run. - cachedir = os.path.split(test_shell_binary_path)[0] - cachedir = os.path.join(cachedir, "cache") - if os.path.exists(cachedir): - shutil.rmtree(cachedir) - - test_runner.AddTestType(text_diff.TestTextDiff) - if not options.no_pixel_tests: - test_runner.AddTestType(image_diff.ImageDiff) - if options.fuzzy_pixel_tests: - test_runner.AddTestType(fuzzy_image_diff.FuzzyImageDiff) - - meter.update("Starting ...") - has_new_failures = test_runner.Run(result_summary) - - logging.debug("Exit status: %d" % has_new_failures) - sys.exit(has_new_failures) - -if '__main__' == __name__: - option_parser = optparse.OptionParser() - option_parser.add_option("", "--no-pixel-tests", action="store_true", - default=False, - help="disable pixel-to-pixel PNG comparisons") - option_parser.add_option("", "--fuzzy-pixel-tests", action="store_true", - default=False, - help="Also use fuzzy matching to compare pixel " - "test outputs.") - option_parser.add_option("", "--results-directory", - default="layout-test-results", - help="Output results directory source dir," - " relative to Debug or Release") - option_parser.add_option("", "--new-baseline", action="store_true", - default=False, - help="save all generated results as new baselines" - " into the platform directory, overwriting " - "whatever's already there.") - option_parser.add_option("", "--noshow-results", action="store_true", - default=False, help="don't launch the test_shell" - " with results after the tests are done") - option_parser.add_option("", "--full-results-html", action="store_true", - default=False, help="show all failures in " - "results.html, rather than only regressions") - option_parser.add_option("", "--clobber-old-results", action="store_true", - default=False, help="Clobbers test results from " - "previous runs.") - option_parser.add_option("", "--lint-test-files", action="store_true", - default=False, help="Makes sure the test files " - "parse for all configurations. Does not run any " - "tests.") - option_parser.add_option("", "--force", action="store_true", - default=False, - help="Run all tests, even those marked SKIP " - "in the test list") - option_parser.add_option("", "--num-test-shells", - help="Number of testshells to run in parallel.") - option_parser.add_option("", "--use-apache", action="store_true", - default=False, - help="Whether to use apache instead of lighttpd.") - option_parser.add_option("", "--time-out-ms", default=None, - help="Set the timeout for each test") - option_parser.add_option("", "--run-singly", action="store_true", - default=False, - help="run a separate test_shell for each test") - option_parser.add_option("", "--debug", action="store_true", default=False, - help="use the debug binary instead of the release" - " binary") - option_parser.add_option("", "--num-slow-tests-to-log", default=50, - help="Number of slow tests whose timings " - "to print.") - option_parser.add_option("", "--platform", - help="Override the platform for expected results") - option_parser.add_option("", "--target", default="", - help="Set the build target configuration " - "(overrides --debug)") - option_parser.add_option("", "--log", action="store", - default="detailed-progress,unexpected", - help="log various types of data. The param should" - " be a comma-separated list of values from: " - "actual,config," + LOG_DETAILED_PROGRESS + - ",expected,timing," + LOG_UNEXPECTED + " " - "(defaults to " + - "--log detailed-progress,unexpected)") - option_parser.add_option("-v", "--verbose", action="store_true", - default=False, help="include debug-level logging") - option_parser.add_option("", "--sources", action="store_true", - help="show expected result file path for each " - "test (implies --verbose)") - option_parser.add_option("", "--startup-dialog", action="store_true", - default=False, - help="create a dialog on test_shell.exe startup") - option_parser.add_option("", "--gp-fault-error-box", action="store_true", - default=False, - help="enable Windows GP fault error box") - option_parser.add_option("", "--wrapper", - help="wrapper command to insert before " - "invocations of test_shell; option is split " - "on whitespace before running. (Example: " - "--wrapper='valgrind --smc-check=all')") - option_parser.add_option("", "--test-list", action="append", - help="read list of tests to run from file", - metavar="FILE") - option_parser.add_option("", "--nocheck-sys-deps", action="store_true", - default=False, - help="Don't check the system dependencies " - "(themes)") - option_parser.add_option("", "--randomize-order", action="store_true", - default=False, - help=("Run tests in random order (useful for " - "tracking down corruption)")) - option_parser.add_option("", "--run-chunk", - default=None, - help=("Run a specified chunk (n:l), the " - "nth of len l, of the layout tests")) - option_parser.add_option("", "--run-part", - default=None, - help=("Run a specified part (n:m), the nth of m" - " parts, of the layout tests")) - option_parser.add_option("", "--batch-size", - default=None, - help=("Run a the tests in batches (n), after " - "every n tests, the test shell is " - "relaunched.")) - option_parser.add_option("", "--builder-name", - default="DUMMY_BUILDER_NAME", - help=("The name of the builder shown on the " - "waterfall running this script e.g. " - "WebKit.")) - option_parser.add_option("", "--build-name", - default="DUMMY_BUILD_NAME", - help=("The name of the builder used in its path, " - "e.g. webkit-rel.")) - option_parser.add_option("", "--build-number", - default="DUMMY_BUILD_NUMBER", - help=("The build number of the builder running" - "this script.")) - option_parser.add_option("", "--experimental-fully-parallel", - action="store_true", default=False, - help="run all tests in parallel") - - options, args = option_parser.parse_args() - main(options, args) diff --git a/webkit/tools/layout_tests/run_webkit_tests.sh b/webkit/tools/layout_tests/run_webkit_tests.sh index ba6b807..a61a2d9 100755 --- a/webkit/tools/layout_tests/run_webkit_tests.sh +++ b/webkit/tools/layout_tests/run_webkit_tests.sh @@ -20,4 +20,4 @@ else export PYTHONPATH fi -"$PYTHON_PROG" "$exec_dir/run_webkit_tests.py" "$@" +"$PYTHON_PROG" "$exec_dir/webkitpy/run_chromium_webkit_tests.py" "$@" diff --git a/webkit/tools/layout_tests/test_output_formatter.bat b/webkit/tools/layout_tests/test_output_formatter.bat index a992d09..87b3a8a 100755 --- a/webkit/tools/layout_tests/test_output_formatter.bat +++ b/webkit/tools/layout_tests/test_output_formatter.bat @@ -1 +1 @@ -@%~dp0\..\..\..\third_party\python_24\python.exe %~dp0\test_output_formatter.py -v %* \ No newline at end of file +@%~dp0\..\..\..\third_party\python_24\python.exe %~dp0\webkitpy\test_output_formatter.py -v %* diff --git a/webkit/tools/layout_tests/test_output_formatter.py b/webkit/tools/layout_tests/test_output_formatter.py deleted file mode 100755 index f60dad1..0000000 --- a/webkit/tools/layout_tests/test_output_formatter.py +++ /dev/null @@ -1,105 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -""" -This is a script for generating easily-viewable comparisons of text and pixel -diffs. -""" -import optparse - -from layout_package import test_expectations -from layout_package import failure -from layout_package import failure_finder -from layout_package import failure_finder_test -from layout_package import html_generator - -DEFAULT_BUILDER = "Webkit" - - -def main(options, args): - - if options.run_tests: - fft = failure_finder_test.FailureFinderTest() - return fft.runTests() - - # TODO(gwilson): Add a check that verifies the given platform exists. - - finder = failure_finder.FailureFinder(options.build_number, - options.platform_builder, - (not options.include_expected), - options.test_regex, - options.output_dir, - int(options.max_failures), - options.verbose, - options.builder_log, - options.archive_log, - options.zip_file, - options.expectations_file) - finder.use_local_baselines = options.local - failure_list = finder.GetFailures() - - if not failure_list: - print "Did not find any failures." - return - - generator = html_generator.HTMLGenerator(failure_list, - options.output_dir, - finder.build, - options.platform_builder, - (not options.include_expected)) - filename = generator.GenerateHTML() - - if filename and options.verbose: - print "File created at %s" % filename - -if __name__ == "__main__": - option_parser = optparse.OptionParser() - option_parser.add_option("-v", "--verbose", action="store_true", - default=False, - help="Display lots of output.") - option_parser.add_option("-i", "--include-expected", action="store_true", - default=False, - help="Include expected failures in output") - option_parser.add_option("-p", "--platform-builder", - default=DEFAULT_BUILDER, - help="Use the given builder") - option_parser.add_option("-b", "--build-number", - default=None, - help="Use the given build number") - option_parser.add_option("-t", "--test-regex", - default=None, - help="Use the given regex to filter tests") - option_parser.add_option("-o", "--output-dir", - default=".", - help="Output files to given directory") - option_parser.add_option("-m", "--max-failures", - default=100, - help="Limit the maximum number of failures") - option_parser.add_option("-r", "--run-tests", action="store_true", - default=False, - help="Runs unit tests") - option_parser.add_option("-u", "--builder-log", - default=None, - help=("Use the local builder log file " - "instead of scraping the buildbots")) - option_parser.add_option("-a", "--archive-log", - default=None, - help=("Use the local archive log file " - "instead of scraping the buildbots")) - option_parser.add_option("-e", "--expectations-file", - default=None, - help=("Use the local test expectations file " - "instead of scraping the buildbots")) - option_parser.add_option("-z", "--zip-file", - default=None, - help=("Use the local test output zip file " - "instead of scraping the buildbots")) - option_parser.add_option("-l", "--local", action="store_true", - default=False, - help=("Use local baselines instead of scraping " - "baselines from source websites")) - - options, args = option_parser.parse_args() - main(options, args) diff --git a/webkit/tools/layout_tests/test_output_formatter.sh b/webkit/tools/layout_tests/test_output_formatter.sh index 2ea3e90..c63ef09 100755 --- a/webkit/tools/layout_tests/test_output_formatter.sh +++ b/webkit/tools/layout_tests/test_output_formatter.sh @@ -20,4 +20,4 @@ else export PYTHONPATH fi -"$PYTHON_PROG" "$exec_dir/test_output_formatter.py" "-v" "$@" +"$PYTHON_PROG" "$exec_dir/webkitpy/test_output_formatter.py" "-v" "$@" diff --git a/webkit/tools/layout_tests/test_output_xml_to_json.py b/webkit/tools/layout_tests/test_output_xml_to_json.py deleted file mode 100755 index bda1ff3..0000000 --- a/webkit/tools/layout_tests/test_output_xml_to_json.py +++ /dev/null @@ -1,135 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2010 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -""" -This is a script for generating JSON from JUnit XML output (generated by -google tests with --gtest_output=xml option). -""" -import logging -import optparse -import os -import sys -import time - -from xml.dom import minidom - -from layout_package import json_results_generator -from layout_package import path_utils -from layout_package import test_expectations - -# Builder base URL where we have the archived test results. -BUILDER_BASE_URL = "http://build.chromium.org/buildbot/gtest-results/" - - -class JSONGeneratorFromXML(object): - - def __init__(self, options): - self._options = options - - # Check the results directory - if not os.path.exists(self._options.results_directory): - os.mkdir(self._options.results_directory) - - results_xml_file = None - try: - results_xml_file = open(self._options.input_results_xml) - except IOError, e: - logging.fatal("Cannot open file %s", - self._options.input_results_xml) - sys.exit(1) - - summary = self._ParseTestResultsXML( - minidom.parse(results_xml_file).documentElement) - results_xml_file.close() - - json_results_generator.JSONResultsGenerator( - self._options.builder_name, self._options.build_name, - self._options.build_number, self._options.results_directory, - self._options.builder_base_url, - self._test_timings, - self._failures, self._passed_tests, self._skipped_tests, - self._tests_list) - - def _ParseTestResultsXML(self, node): - self._tests_list = set() - self._passed_tests = set() - self._skipped_tests = set() - self._test_timings = {} - self._failures = {} - - testcases = node.getElementsByTagName('testcase') - for testcase in testcases: - name = testcase.getAttribute('name') - classname = testcase.getAttribute('classname') - test_name = "%s.%s" % (classname, name) - - status = testcase.getAttribute('status') - if status == 'notrun': - if name.startswith('DISABLED_'): - self._skipped_tests.add(test_name) - continue - - failures = testcase.getElementsByTagName('failure') - if failures: - self._failures[test_name] = test_expectations.TEXT - else: - self._passed_tests.add(test_name) - - self._test_timings[test_name] = float( - testcase.getAttribute('time')) - self._tests_list.add(test_name) - - -def main(options, args): - """Parse the tests results and generate JSON files. - - Args: - options: a dictionary of command line options - args: a list of sub directories or files to test - """ - - if not options.test_type: - logging.error("--test-type needs to be specified.") - sys.exit(1) - - canon_test_type = options.test_type.replace("-", "_") - if not options.input_results_xml: - options.input_results_xml = "%s.xml" % (canon_test_type) - if not options.builder_base_url: - options.builder_base_url = "%s%s/" % (BUILDER_BASE_URL, - options.test_type) - - JSONGeneratorFromXML(options) - - return - -if '__main__' == __name__: - option_parser = optparse.OptionParser() - option_parser.add_option("", "--test-type", default="", - help="Test type that generated the results XML," - " e.g. unit-tests.") - option_parser.add_option("", "--results-directory", default="./", - help="Output results directory source dir.") - option_parser.add_option("", "--input-results-xml", default="", - help="Test results xml file (input for us)." - " default is TEST_TYPE.xml") - option_parser.add_option("", "--builder-base-url", default="", - help=("A URL where we have the archived test " - "results. (default=%sTEST_TYPE_results/)" - % BUILDER_BASE_URL)) - option_parser.add_option("", "--builder-name", - default="DUMMY_BUILDER_NAME", - help="The name of the builder shown on the " - "waterfall running this script e.g. WebKit.") - option_parser.add_option("", "--build-name", - default="DUMMY_BUILD_NAME", - help="The name of the builder used in its path, " - "e.g. webkit-rel.") - option_parser.add_option("", "--build-number", - default="DUMMY_BUILD_NUMBER", - help="The build number of the builder running" - "this script.") - options, args = option_parser.parse_args() - main(options, args) diff --git a/webkit/tools/layout_tests/test_types/__init__.py b/webkit/tools/layout_tests/test_types/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/webkit/tools/layout_tests/test_types/fuzzy_image_diff.py b/webkit/tools/layout_tests/test_types/fuzzy_image_diff.py deleted file mode 100644 index 3d503b6..0000000 --- a/webkit/tools/layout_tests/test_types/fuzzy_image_diff.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -"""Compares the image output of a test to the expected image output using -fuzzy matching. -""" - -import errno -import logging -import os -import shutil -import subprocess - -from layout_package import path_utils -from layout_package import test_failures -from test_types import test_type_base - - -class FuzzyImageDiff(test_type_base.TestTypeBase): - - def CompareOutput(self, filename, proc, output, test_args, target): - """Implementation of CompareOutput that checks the output image and - checksum against the expected files from the LayoutTest directory. - """ - failures = [] - - # If we didn't produce a hash file, this test must be text-only. - if test_args.hash is None: - return failures - - expected_png_file = path_utils.ExpectedFilename(filename, '.png') - - if test_args.show_sources: - logging.debug('Using %s' % expected_png_file) - - # Also report a missing expected PNG file. - if not os.path.isfile(expected_png_file): - failures.append(test_failures.FailureMissingImage(self)) - - # Run the fuzzymatcher - r = subprocess.call([path_utils.FuzzyMatchPath(), - test_args.png_path, expected_png_file]) - if r != 0: - failures.append(test_failures.FailureFuzzyFailure(self)) - - return failures diff --git a/webkit/tools/layout_tests/test_types/image_diff.py b/webkit/tools/layout_tests/test_types/image_diff.py deleted file mode 100644 index 38abb6b..0000000 --- a/webkit/tools/layout_tests/test_types/image_diff.py +++ /dev/null @@ -1,199 +0,0 @@ -# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -"""Compares the image output of a test to the expected image output. - -Compares hashes for the generated and expected images. If the output doesn't -match, returns FailureImageHashMismatch and outputs both hashes into the layout -test results directory. -""" - -import errno -import logging -import os -import shutil -import subprocess - -from layout_package import path_utils -from layout_package import test_failures -from test_types import test_type_base - -# Cache whether we have the image_diff executable available. -_compare_available = True -_compare_msg_printed = False - - -class ImageDiff(test_type_base.TestTypeBase): - - def _CopyOutputPNG(self, test_filename, source_image, extension): - """Copies result files into the output directory with appropriate - names. - - Args: - test_filename: the test filename - source_file: path to the image file (either actual or expected) - extension: extension to indicate -actual.png or -expected.png - """ - self._MakeOutputDirectory(test_filename) - dest_image = self.OutputFilename(test_filename, extension) - - try: - shutil.copyfile(source_image, dest_image) - except IOError, e: - # A missing expected PNG has already been recorded as an error. - if errno.ENOENT != e.errno: - raise - - def _SaveBaselineFiles(self, filename, png_path, checksum): - """Saves new baselines for the PNG and checksum. - - Args: - filename: test filename - png_path: path to the actual PNG result file - checksum: value of the actual checksum result - """ - png_file = open(png_path, "rb") - png_data = png_file.read() - png_file.close() - self._SaveBaselineData(filename, png_data, ".png") - self._SaveBaselineData(filename, checksum, ".checksum") - - def _CreateImageDiff(self, filename, target): - """Creates the visual diff of the expected/actual PNGs. - - Args: - filename: the name of the test - target: Debug or Release - """ - diff_filename = self.OutputFilename(filename, - self.FILENAME_SUFFIX_COMPARE) - actual_filename = self.OutputFilename(filename, - self.FILENAME_SUFFIX_ACTUAL + '.png') - expected_filename = self.OutputFilename(filename, - self.FILENAME_SUFFIX_EXPECTED + '.png') - - global _compare_available - cmd = '' - - try: - executable = path_utils.ImageDiffPath(target) - cmd = [executable, '--diff', actual_filename, expected_filename, - diff_filename] - except Exception, e: - _compare_available = False - - result = 1 - if _compare_available: - try: - result = subprocess.call(cmd) - except OSError, e: - if e.errno == errno.ENOENT or e.errno == errno.EACCES: - _compare_available = False - else: - raise e - except ValueError: - # work around a race condition in Python 2.4's implementation - # of subprocess.Popen - pass - - global _compare_msg_printed - - if not _compare_available and not _compare_msg_printed: - _compare_msg_printed = True - print('image_diff not found. Make sure you have a ' + target + - ' build of the image_diff executable.') - - return result - - def CompareOutput(self, filename, proc, output, test_args, target): - """Implementation of CompareOutput that checks the output image and - checksum against the expected files from the LayoutTest directory. - """ - failures = [] - - # If we didn't produce a hash file, this test must be text-only. - if test_args.hash is None: - return failures - - # If we're generating a new baseline, we pass. - if test_args.new_baseline: - self._SaveBaselineFiles(filename, test_args.png_path, - test_args.hash) - return failures - - # Compare hashes. - expected_hash_file = path_utils.ExpectedFilename(filename, '.checksum') - - expected_png_file = path_utils.ExpectedFilename(filename, '.png') - - if test_args.show_sources: - logging.debug('Using %s' % expected_hash_file) - logging.debug('Using %s' % expected_png_file) - - try: - expected_hash = open(expected_hash_file, "r").read() - except IOError, e: - if errno.ENOENT != e.errno: - raise - expected_hash = '' - - - if not os.path.isfile(expected_png_file): - # Report a missing expected PNG file. - self.WriteOutputFiles(filename, '', '.checksum', test_args.hash, - expected_hash, diff=False, wdiff=False) - self._CopyOutputPNG(filename, test_args.png_path, '-actual.png') - failures.append(test_failures.FailureMissingImage(self)) - return failures - elif test_args.hash == expected_hash: - # Hash matched (no diff needed, okay to return). - return failures - - - self.WriteOutputFiles(filename, '', '.checksum', test_args.hash, - expected_hash, diff=False, wdiff=False) - self._CopyOutputPNG(filename, test_args.png_path, '-actual.png') - self._CopyOutputPNG(filename, expected_png_file, '-expected.png') - - # Even though we only use result in one codepath below but we - # still need to call CreateImageDiff for other codepaths. - result = self._CreateImageDiff(filename, target) - if expected_hash == '': - failures.append(test_failures.FailureMissingImageHash(self)) - elif test_args.hash != expected_hash: - # Hashes don't match, so see if the images match. If they do, then - # the hash is wrong. - if result == 0: - failures.append(test_failures.FailureImageHashIncorrect(self)) - else: - failures.append(test_failures.FailureImageHashMismatch(self)) - - return failures - - def DiffFiles(self, file1, file2): - """Diff two image files. - - Args: - file1, file2: full paths of the files to compare. - - Returns: - True if two files are different. - False otherwise. - """ - - try: - executable = path_utils.ImageDiffPath('Debug') - except Exception, e: - logging.warn('Failed to find image diff executable.') - return True - - cmd = [executable, file1, file2] - result = 1 - try: - result = subprocess.call(cmd) - except OSError, e: - logging.warn('Failed to compare image diff: %s', e) - return True - - return result == 1 diff --git a/webkit/tools/layout_tests/test_types/test_type_base.py b/webkit/tools/layout_tests/test_types/test_type_base.py deleted file mode 100644 index f10c75f..0000000 --- a/webkit/tools/layout_tests/test_types/test_type_base.py +++ /dev/null @@ -1,241 +0,0 @@ -# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -"""Defines the interface TestTypeBase which other test types inherit from. - -Also defines the TestArguments "struct" to pass them additional arguments. -""" - -import cgi -import difflib -import errno -import logging -import os.path -import subprocess - -from layout_package import path_utils - - -class TestArguments(object): - """Struct-like wrapper for additional arguments needed by - specific tests.""" - # Whether to save new baseline results. - new_baseline = False - - # Path to the actual PNG file generated by pixel tests - png_path = None - - # Value of checksum generated by pixel tests. - hash = None - - # Whether to use wdiff to generate by-word diffs. - wdiff = False - - # Whether to report the locations of the expected result files used. - show_sources = False - -# Python bug workaround. See the wdiff code in WriteOutputFiles for an -# explanation. -_wdiff_available = True - - -class TestTypeBase(object): - - # Filename pieces when writing failures to the test results directory. - FILENAME_SUFFIX_ACTUAL = "-actual" - FILENAME_SUFFIX_EXPECTED = "-expected" - FILENAME_SUFFIX_DIFF = "-diff" - FILENAME_SUFFIX_WDIFF = "-wdiff.html" - FILENAME_SUFFIX_COMPARE = "-diff.png" - - def __init__(self, platform, root_output_dir): - """Initialize a TestTypeBase object. - - Args: - platform: the platform (e.g., 'chromium-mac-leopard') - identifying the platform-specific results to be used. - root_output_dir: The unix style path to the output dir. - """ - self._root_output_dir = root_output_dir - self._platform = platform - - def _MakeOutputDirectory(self, filename): - """Creates the output directory (if needed) for a given test - filename.""" - output_filename = os.path.join(self._root_output_dir, - path_utils.RelativeTestFilename(filename)) - path_utils.MaybeMakeDirectory(os.path.split(output_filename)[0]) - - def _SaveBaselineData(self, filename, data, modifier): - """Saves a new baseline file into the platform directory. - - The file will be named simply "-expected", suitable for - use as the expected results in a later run. - - Args: - filename: path to the test file - data: result to be saved as the new baseline - modifier: type of the result file, e.g. ".txt" or ".png" - """ - relative_dir = os.path.dirname( - path_utils.RelativeTestFilename(filename)) - output_dir = os.path.join( - path_utils.ChromiumBaselinePath(self._platform), relative_dir) - output_file = os.path.basename(os.path.splitext(filename)[0] + - self.FILENAME_SUFFIX_EXPECTED + modifier) - - path_utils.MaybeMakeDirectory(output_dir) - output_path = os.path.join(output_dir, output_file) - logging.debug('writing new baseline to "%s"' % (output_path)) - open(output_path, "wb").write(data) - - def OutputFilename(self, filename, modifier): - """Returns a filename inside the output dir that contains modifier. - - For example, if filename is c:/.../fast/dom/foo.html and modifier is - "-expected.txt", the return value is - c:/cygwin/tmp/layout-test-results/fast/dom/foo-expected.txt - - Args: - filename: absolute filename to test file - modifier: a string to replace the extension of filename with - - Return: - The absolute windows path to the output filename - """ - output_filename = os.path.join(self._root_output_dir, - path_utils.RelativeTestFilename(filename)) - return os.path.splitext(output_filename)[0] + modifier - - def CompareOutput(self, filename, proc, output, test_args, target): - """Method that compares the output from the test with the - expected value. - - This is an abstract method to be implemented by all sub classes. - - Args: - filename: absolute filename to test file - proc: a reference to the test_shell process - output: a string containing the output of the test - test_args: a TestArguments object holding optional additional - arguments - target: Debug or Release - - Return: - a list of TestFailure objects, empty if the test passes - """ - raise NotImplemented - - def WriteOutputFiles(self, filename, test_type, file_type, output, - expected, diff=True, wdiff=False): - """Writes the test output, the expected output and optionally the diff - between the two to files in the results directory. - - The full output filename of the actual, for example, will be - -actual - For instance, - my_test-simp-actual.txt - - Args: - filename: The test filename - test_type: A string describing the test type, e.g. "simp" - file_type: A string describing the test output file type, e.g. ".txt" - output: A string containing the test output - expected: A string containing the expected test output - diff: if True, write a file containing the diffs too. This should be - False for results that are not text - wdiff: if True, write an HTML file containing word-by-word diffs - """ - self._MakeOutputDirectory(filename) - actual_filename = self.OutputFilename(filename, - test_type + self.FILENAME_SUFFIX_ACTUAL + file_type) - expected_filename = self.OutputFilename(filename, - test_type + self.FILENAME_SUFFIX_EXPECTED + file_type) - if output: - open(actual_filename, "wb").write(output) - if expected: - open(expected_filename, "wb").write(expected) - - if not output or not expected: - return - - if diff: - diff = difflib.unified_diff(expected.splitlines(True), - output.splitlines(True), - expected_filename, - actual_filename) - - diff_filename = self.OutputFilename(filename, - test_type + self.FILENAME_SUFFIX_DIFF + file_type) - open(diff_filename, "wb").write(''.join(diff)) - - if wdiff: - # Shell out to wdiff to get colored inline diffs. - executable = path_utils.WDiffPath() - cmd = [executable, - '--start-delete=##WDIFF_DEL##', - '--end-delete=##WDIFF_END##', - '--start-insert=##WDIFF_ADD##', - '--end-insert=##WDIFF_END##', - expected_filename, - actual_filename] - filename = self.OutputFilename(filename, - test_type + self.FILENAME_SUFFIX_WDIFF) - - global _wdiff_available - - try: - # Python's Popen has a bug that causes any pipes opened to a - # process that can't be executed to be leaked. Since this - # code is specifically designed to tolerate exec failures - # to gracefully handle cases where wdiff is not installed, - # the bug results in a massive file descriptor leak. As a - # workaround, if an exec failure is ever experienced for - # wdiff, assume it's not available. This will leak one - # file descriptor but that's better than leaking each time - # wdiff would be run. - # - # http://mail.python.org/pipermail/python-list/ - # 2008-August/505753.html - # http://bugs.python.org/issue3210 - # - # It also has a threading bug, so we don't output wdiff if - # the Popen raises a ValueError. - # http://bugs.python.org/issue1236 - if _wdiff_available: - wdiff = subprocess.Popen( - cmd, stdout=subprocess.PIPE).communicate()[0] - wdiff_failed = False - - except OSError, e: - if (e.errno == errno.ENOENT or e.errno == errno.EACCES or - e.errno == errno.ECHILD): - _wdiff_available = False - else: - raise e - except ValueError, e: - wdiff_failed = True - - out = open(filename, 'wb') - - if not _wdiff_available: - out.write( - "wdiff not installed.
" - "If you're running OS X, you can install via macports." - "
" - "If running Ubuntu linux, you can run " - "'sudo apt-get install wdiff'.") - elif wdiff_failed: - out.write('wdiff failed due to running with multiple ' - 'test_shells in parallel.') - else: - wdiff = cgi.escape(wdiff) - wdiff = wdiff.replace('##WDIFF_DEL##', '') - wdiff = wdiff.replace('##WDIFF_ADD##', '') - wdiff = wdiff.replace('##WDIFF_END##', '') - out.write('') - out.write('
' + wdiff + '
') - - out.close() diff --git a/webkit/tools/layout_tests/test_types/text_diff.py b/webkit/tools/layout_tests/test_types/text_diff.py deleted file mode 100644 index ddbdc8b..0000000 --- a/webkit/tools/layout_tests/test_types/text_diff.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -"""Compares the text output of a test to the expected text output. - -If the output doesn't match, returns FailureTextMismatch and outputs the diff -files into the layout test results directory. -""" - -import errno -import logging -import os.path - -from layout_package import path_utils -from layout_package import test_failures -from test_types import test_type_base - - -def isRenderTreeDump(data): - """Returns true if data appears to be a render tree dump as opposed to a - plain text dump.""" - return data.find("RenderView at (0,0)") != -1 - - -class TestTextDiff(test_type_base.TestTypeBase): - - def GetNormalizedOutputText(self, output): - # Some tests produce "\r\n" explicitly. Our system (Python/Cygwin) - # helpfully changes the "\n" to "\r\n", resulting in "\r\r\n". - norm = output.replace("\r\r\n", "\r\n").strip("\r\n").replace( - "\r\n", "\n") - return norm + "\n" - - def GetNormalizedExpectedText(self, filename, show_sources): - """Given the filename of the test, read the expected output from a file - and normalize the text. Returns a string with the expected text, or '' - if the expected output file was not found.""" - # Read the platform-specific expected text. - expected_filename = path_utils.ExpectedFilename(filename, '.txt') - if show_sources: - logging.debug('Using %s' % expected_filename) - - return self.GetNormalizedText(expected_filename) - - def GetNormalizedText(self, filename): - try: - text = open(filename).read() - except IOError, e: - if errno.ENOENT != e.errno: - raise - return '' - - # Normalize line endings - return text.strip("\r\n").replace("\r\n", "\n") + "\n" - - def CompareOutput(self, filename, proc, output, test_args, target): - """Implementation of CompareOutput that checks the output text against - the expected text from the LayoutTest directory.""" - failures = [] - - # If we're generating a new baseline, we pass. - if test_args.new_baseline: - self._SaveBaselineData(filename, output, ".txt") - return failures - - # Normalize text to diff - output = self.GetNormalizedOutputText(output) - expected = self.GetNormalizedExpectedText(filename, - test_args.show_sources) - - # Write output files for new tests, too. - if output != expected: - # Text doesn't match, write output files. - self.WriteOutputFiles(filename, "", ".txt", output, expected, - diff=True, wdiff=True) - - if expected == '': - failures.append(test_failures.FailureMissingResult(self)) - else: - failures.append(test_failures.FailureTextMismatch(self, True)) - - return failures - - def DiffFiles(self, file1, file2): - """Diff two text files. - - Args: - file1, file2: full paths of the files to compare. - - Returns: - True if two files are different. - False otherwise. - """ - - return self.GetNormalizedText(file1) != self.GetNormalizedText(file2) diff --git a/webkit/tools/layout_tests/update_expectations_from_dashboard.py b/webkit/tools/layout_tests/update_expectations_from_dashboard.py deleted file mode 100644 index b5774b6..0000000 --- a/webkit/tools/layout_tests/update_expectations_from_dashboard.py +++ /dev/null @@ -1,476 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2009 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -"""Script to read in updates in JSON form from the layout test dashboard -and apply them to test_expectations.txt. - -Usage: -1. Go to http://src.chromium.org/viewvc/chrome/trunk/src/webkit/tools/ - layout_tests/flakiness_dashboard.html#expectationsUpdate=true -2. Copy-paste that JSON into a local file. -3. python update_expectations_from_dashboard.py path/to/local/file -""" - -import logging -import os -import sys - -from layout_package import path_utils -from layout_package import test_expectations - -sys.path.append(path_utils.PathFromBase('third_party')) -import simplejson - - -def UpdateExpectations(expectations, updates): - expectations = ExpectationsUpdater(None, None, - 'WIN', False, False, expectations, True) - return expectations.UpdateBasedOnJSON(updates) - - -class OptionsAndExpectationsHolder(object): - """Container for a list of options and a list of expectations for a given - test.""" - - def __init__(self, options, expectations): - self.options = options - self.expectations = expectations - - -class BuildInfo(OptionsAndExpectationsHolder): - """Container for a list of options and expectations for a given test as - well as a map from build_type (e.g. debug/release) to a list of platforms - (e.g. ["win", "linux"]). - """ - - def __init__(self, options, expectations, build_info): - OptionsAndExpectationsHolder.__init__(self, options, expectations) - self.build_info = build_info - - -class ExpectationsUpdater(test_expectations.TestExpectationsFile): - """Class to update test_expectations.txt based on updates in the following - form: - {"test1.html": { - "WIN RELEASE": {"missing": "FAIL TIMEOUT", "extra": "CRASH"}} - "WIN DEBUG": {"missing": "FAIL TIMEOUT"}} - "test2.html": ... - } - """ - - def _GetBuildTypesAndPlatforms(self, options): - """Splits up the options list into three lists: platforms, - build_types and other_options.""" - platforms = [] - build_types = [] - other_options = [] - for option in options: - if option in self.PLATFORMS: - platforms.append(option) - elif option in self.BUILD_TYPES: - build_types.append(option) - else: - other_options.append(option) - - if not len(build_types): - build_types = self.BUILD_TYPES - - if not len(platforms): - # If there are no platforms specified, use the most generic version - # of each platform name so we don't have to dedup them later. - platforms = self.BASE_PLATFORMS - - return (platforms, build_types, other_options) - - def _ApplyUpdatesToResults(self, test, results, update_json, expectations, - other_options): - """Applies the updates from the JSON to the existing results in - test_expectations. - Args: - test: The test to update. - results: The results object to update. - update_json: The parsed JSON object with the updates. - expectations: The existing expectatons for this test. - other_options: The existing modifiers for this test - excluding platforms and build_types. - """ - updates = update_json[test] - for build_info in updates: - platform, build_type = build_info.lower().split(' ') - - # If the platform/build_type is not currently listed for the test, - # skip it as this platform/build_type may be listed in another - # line. - if platform not in results or build_type not in results[platform]: - continue - - these_results = results[platform][build_type] - these_updates = updates[build_info] - these_expectations = these_results.expectations - these_options = these_results.options - - self._ApplyExtraUpdates(these_updates, these_options, - these_expectations) - self._ApplyMissingUpdates(test, these_updates, these_options, - these_expectations) - - def _ApplyExtraUpdates(self, updates, options, expectations): - """Remove extraneous expectations/options in the updates object to - the given options/expectations lists. - """ - if "extra" not in updates: - return - - items = updates["extra"].lower().split(' ') - for item in items: - if item in self.EXPECTATIONS: - if item in expectations: - expectations.remove(item) - else: - if item in options: - options.remove(item) - - def _ApplyMissingUpdates(self, test, updates, options, expectations): - """Apply an addition expectations/options in the updates object to - the given options/expectations lists. - """ - if "missing" not in updates: - return - - items = updates["missing"].lower().split(' ') - for item in items: - if item == 'other': - continue - - # Don't add TIMEOUT to SLOW tests. Automating that is too - # complicated instead, print out tests that need manual attention. - if ((item == "timeout" and - ("slow" in options or "slow" in items)) or - (item == "slow" and - ("timeout" in expectations or "timeout" in items))): - logging.info("NEEDS MANUAL ATTENTION: %s may need " - "to be marked TIMEOUT or SLOW." % test) - elif item in self.EXPECTATIONS: - if item not in expectations: - expectations.append(item) - if ("fail" in expectations and - (item == "image+text" or item == "image" or - item == "text")): - expectations.remove("fail") - else: - if item not in options: - options.append(item) - - def _AppendPlatform(self, item, build_type, platform): - """Appends the give build_type and platform to the BuildInfo item. - """ - build_info = item.build_info - if build_type not in build_info: - build_info[build_type] = [] - build_info[build_type].append(platform) - - def _GetUpdatesDedupedByMatchingOptionsAndExpectations(self, results): - """Converts the results, which is - results[platforms][build_type] = OptionsAndExpectationsHolder - to BuildInfo objects, which dedupes platform/build_types that - have the same expectations and options. - """ - updates = [] - for platform in results: - for build_type in results[platform]: - options = results[platform][build_type].options - expectations = results[platform][build_type].expectations - - found_match = False - for update in updates: - if (update.options == options and - update.expectations == expectations): - self._AppendPlatform(update, build_type, platform) - found_match = True - break - - if found_match: - continue - - update = BuildInfo(options, expectations, {}) - self._AppendPlatform(update, build_type, platform) - updates.append(update) - - return self._RoundUpFlakyUpdates(updates) - - def _HasMajorityBuildConfigurations(self, candidate, candidate2): - """Returns true if the candidate BuildInfo represents all build - configurations except the single one listed in candidate2. - For example, if a test is FAIL TIMEOUT on all bots except WIN-Release, - where it is just FAIL. Or if a test is FAIL TIMEOUT on MAC-Release, - Mac-Debug and Linux-Release, but only FAIL on Linux-Debug. - """ - build_types = self.BUILD_TYPES[:] - build_info = candidate.build_info - if "release" not in build_info or "debug" not in build_info: - return None - - release_set = set(build_info["release"]) - debug_set = set(build_info["debug"]) - if len(release_set - debug_set) is 1: - full_set = release_set - partial_set = debug_set - needed_build_type = "debug" - elif len(debug_set - release_set) is 1: - full_set = debug_set - partial_set = release_set - needed_build_type = "release" - else: - return None - - build_info2 = candidate2.build_info - if needed_build_type not in build_info2: - return None - - build_type = None - for this_build_type in build_info2: - # Can only work if this candidate has one build_type. - if build_type: - return None - build_type = this_build_type - - if set(build_info2[needed_build_type]) == full_set - partial_set: - return full_set - else: - return None - - def _RoundUpFlakyUpdates(self, updates): - """Consolidates the updates into one update if 5/6 results are - flaky and the is a subset of the flaky results 6th just not - happening to flake or 3/4 results are flaky and the 4th has a - subset of the flaky results. - """ - if len(updates) is not 2: - return updates - - item1, item2 = updates - candidate = None - candidate_platforms = self._HasMajorityBuildConfigurations(item1, - item2) - if candidate_platforms: - candidate = item1 - else: - candidate_platforms = self._HasMajorityBuildConfigurations(item1, - item2) - if candidate_platforms: - candidate = item2 - - if candidate: - options1 = set(item1.options) - options2 = set(item2.options) - expectations1 = set(item1.expectations) - if not len(expectations1): - expectations1.add("pass") - expectations2 = set(item2.expectations) - if not len(expectations2): - expectations2.add("pass") - - options_union = options1 | options2 - expectations_union = expectations1 | expectations2 - # If the options and expectations are equal to their respective - # unions then we can round up to include the 6th platform. - if (candidate == item1 and options1 == options_union and - expectations1 == expectations_union and len(expectations2) or - candidate == item2 and options2 == options_union and - expectations2 == expectations_union and len(expectations1)): - for build_type in self.BUILD_TYPES: - candidate.build_info[build_type] = list( - candidate_platforms) - updates = [candidate] - return updates - - def UpdateBasedOnJSON(self, update_json): - """Updates the expectations based on the update_json, which is of the - following form: - {"1.html": { - "WIN DEBUG": {"extra": "FAIL", "missing", "PASS"}, - "WIN RELEASE": {"extra": "FAIL"} - }} - """ - output = [] - - comment_lines = [] - removed_test_on_previous_line = False - lineno = 0 - for line in self._GetIterableExpectations(): - lineno += 1 - test, options, expectations = self.ParseExpectationsLine(line, - lineno) - - # If there are no updates for this test, then output the line - # unmodified. - if (test not in update_json): - if test: - self._WriteCompletedLines(output, comment_lines, line) - else: - if removed_test_on_previous_line: - removed_test_on_previous_line = False - comment_lines = [] - comment_lines.append(line) - continue - - platforms, build_types, other_options = \ - self._GetBuildTypesAndPlatforms(options) - - updates = update_json[test] - has_updates_for_this_line = False - for build_info in updates: - platform, build_type = build_info.lower().split(' ') - if platform in platforms and build_type in build_types: - has_updates_for_this_line = True - - # If the updates for this test don't apply for the platforms / - # build-types listed in this line, then output the line unmodified. - if not has_updates_for_this_line: - self._WriteCompletedLines(output, comment_lines, line) - continue - - results = {} - for platform in platforms: - results[platform] = {} - for build_type in build_types: - results[platform][build_type] = \ - OptionsAndExpectationsHolder(other_options[:], - expectations[:]) - - self._ApplyUpdatesToResults(test, results, update_json, - expectations, other_options) - - deduped_updates = \ - self._GetUpdatesDedupedByMatchingOptionsAndExpectations( - results) - removed_test_on_previous_line = not self._WriteUpdates(output, - comment_lines, test, deduped_updates) - # Append any comment/whitespace lines at the end of test_expectations. - output.extend(comment_lines) - return "".join(output) - - def _WriteUpdates(self, output, comment_lines, test, updates): - """Writes the updates to the output. - Args: - output: List to append updates to. - comment_lines: Comments that come before this test that should be - prepending iff any tests lines are written out. - test: The test being updating. - updates: List of BuildInfo instances that represent the final values - for this test line.. - """ - wrote_any_lines = False - for update in updates: - options = update.options - expectations = update.expectations - - has_meaningful_modifier = False - for option in options: - if option in self.MODIFIERS: - has_meaningful_modifier = True - break - - has_non_pass_expectation = False - for expectation in expectations: - if expectation != "pass": - has_non_pass_expectation = True - break - - # If this test is only left with platform, build_type, bug number - # and a PASS or no expectation, then we can exclude it from - # test_expectations. - if not has_meaningful_modifier and not has_non_pass_expectation: - continue - - if not has_non_pass_expectation: - expectations = ["pass"] - - missing_build_types = list(self.BUILD_TYPES) - sentinal = None - for build_type in update.build_info: - if not sentinal: - sentinal = update.build_info[build_type] - # Remove build_types where the list of platforms is equal. - if sentinal == update.build_info[build_type]: - missing_build_types.remove(build_type) - - has_all_build_types = not len(missing_build_types) - if has_all_build_types: - self._WriteLine(output, comment_lines, update, options, - build_type, expectations, test, - has_all_build_types) - wrote_any_lines = True - else: - for build_type in update.build_info: - self._WriteLine(output, comment_lines, update, options, - build_type, expectations, test, - has_all_build_types) - wrote_any_lines = True - - return wrote_any_lines - - def _WriteCompletedLines(self, output, comment_lines, test_line=None): - """Writes the comment_lines and test_line to the output and empties - out the comment_lines.""" - output.extend(comment_lines) - del comment_lines[:] - if test_line: - output.append(test_line) - - def _GetPlatform(self, platforms): - """Returns the platform to use. If all platforms are listed, then - return the empty string as that's what we want to list in - test_expectations.txt. - - Args: - platforms: List of lower-case platform names. - """ - platforms.sort() - if platforms == list(self.BASE_PLATFORMS): - return "" - else: - return " ".join(platforms) - - def _WriteLine(self, output, comment_lines, update, options, build_type, - expectations, test, exclude_build_type): - """Writes a test_expectations.txt line. - Args: - output: List to append new lines to. - comment_lines: List of lines to prepend before the new line. - update: The update object. - """ - line = options[:] - - platforms = self._GetPlatform(update.build_info[build_type]) - if platforms: - line.append(platforms) - if not exclude_build_type: - line.append(build_type) - - line = [x.upper() for x in line] - expectations = [x.upper() for x in expectations] - - line = line + [":", test, "="] + expectations - self._WriteCompletedLines(output, comment_lines, " ".join(line) + "\n") - - -def main(): - logging.basicConfig(level=logging.INFO, - format='%(message)s') - - updates = simplejson.load(open(sys.argv[1])) - - path_to_expectations = path_utils.GetAbsolutePath( - os.path.dirname(sys.argv[0])) - path_to_expectations = os.path.join(path_to_expectations, - "test_expectations.txt") - - old_expectations = open(path_to_expectations).read() - new_expectations = UpdateExpectations(old_expectations, updates) - open(path_to_expectations, 'w').write(new_expectations) - -if '__main__' == __name__: - main() diff --git a/webkit/tools/layout_tests/update_expectations_from_dashboard_unittest.py b/webkit/tools/layout_tests/update_expectations_from_dashboard_unittest.py deleted file mode 100644 index 102054d..0000000 --- a/webkit/tools/layout_tests/update_expectations_from_dashboard_unittest.py +++ /dev/null @@ -1,353 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -"""Unittests to make sure we generate and update the expected-*.txt files -properly after running layout tests.""" - -import os -import sys -import unittest - -import update_expectations_from_dashboard - - -class UpdateExpectationsUnittest(unittest.TestCase): - ########################################################################### - # Tests - - def testKeepsUnmodifiedLines(self): - expectations = """// Ensure comments and newlines don't get stripped. - BUG1 SLOW : 1.html = PASS - - BUG2 : 2.html = FAIL TIMEOUT - """ - exp_results = """// Ensure comments and newlines don't get stripped. - BUG1 SLOW : 1.html = PASS - - BUG2 : 2.html = FAIL TIMEOUT - """ - - updates = [] - self.updateExpectations(expectations, updates, exp_results) - - def testRemoveFlakyExpectation(self): - expectations = "BUG1 : 1.html = TIMEOUT FAIL\n" - expected_results = "BUG1 : 1.html = TIMEOUT\n" - updates = {"1.html": { - "WIN RELEASE": {"extra": "FAIL"}, - "WIN DEBUG": {"extra": "FAIL"}, - "LINUX RELEASE": {"extra": "FAIL"}, - "LINUX DEBUG": {"extra": "FAIL"}, - "MAC RELEASE": {"extra": "FAIL"}, - "MAC DEBUG": {"extra": "FAIL"}}} - self.updateExpectations(expectations, updates, expected_results) - - def testRemoveExpectationSlowTest(self): - expectations = "BUG1 SLOW : 1.html = FAIL\n" - expected_results = "BUG1 SLOW : 1.html = PASS\n" - updates = {"1.html": { - "WIN RELEASE": {"extra": "FAIL"}, - "WIN DEBUG": {"extra": "FAIL"}, - "LINUX RELEASE": {"extra": "FAIL"}, - "LINUX DEBUG": {"extra": "FAIL"}, - "MAC RELEASE": {"extra": "FAIL"}, - "MAC DEBUG": {"extra": "FAIL"}}} - self.updateExpectations(expectations, updates, expected_results) - - def testRemoveExpectation(self): - expectations = "BUG1 : 1.html = FAIL\n" - expected_results = "" - updates = {"1.html": { - "WIN RELEASE": {"extra": "FAIL"}, - "WIN DEBUG": {"extra": "FAIL"}, - "LINUX RELEASE": {"extra": "FAIL"}, - "LINUX DEBUG": {"extra": "FAIL"}, - "MAC RELEASE": {"extra": "FAIL"}, - "MAC DEBUG": {"extra": "FAIL"}}} - self.updateExpectations(expectations, updates, expected_results) - - def testRemoveExpectationFromOnePlatform(self): - expectations = "BUG1 : 1.html = FAIL\n" - expected_results = """BUG1 MAC WIN DEBUG : 1.html = FAIL - BUG1 RELEASE : 1.html = FAIL - """ - updates = {"1.html": {"LINUX DEBUG": {"extra": "FAIL"}}} - self.updateExpectations(expectations, updates, expected_results) - - def testRemoveSlow(self): - expectations = "BUG1 SLOW : 1.html = PASS\n" - expected_results = "" - updates = {"1.html": { - "WIN RELEASE": {"extra": "SLOW"}, - "WIN DEBUG": {"extra": "SLOW"}, - "LINUX RELEASE": {"extra": "SLOW"}, - "LINUX DEBUG": {"extra": "SLOW"}, - "MAC RELEASE": {"extra": "SLOW"}, - "MAC DEBUG": {"extra": "SLOW"}}} - self.updateExpectations(expectations, updates, expected_results) - - def testAddFlakyExpectation(self): - expectations = "BUG1 : 1.html = TIMEOUT\n" - expected_results = "BUG1 : 1.html = TIMEOUT FAIL\n" - updates = {"1.html": { - "WIN RELEASE": {"missing": "FAIL"}, - "WIN DEBUG": {"missing": "FAIL"}, - "LINUX RELEASE": {"missing": "FAIL"}, - "LINUX DEBUG": {"missing": "FAIL"}, - "MAC RELEASE": {"missing": "FAIL"}, - "MAC DEBUG": {"missing": "FAIL"}}} - self.updateExpectations(expectations, updates, expected_results) - - def testAddExpectationSlowTest(self): - expectations = "BUG1 SLOW : 1.html = PASS\n" - expected_results = "BUG1 SLOW : 1.html = PASS FAIL\n" - updates = {"1.html": { - "WIN RELEASE": {"missing": "FAIL"}, - "WIN DEBUG": {"missing": "FAIL"}, - "LINUX RELEASE": {"missing": "FAIL"}, - "LINUX DEBUG": {"missing": "FAIL"}, - "MAC RELEASE": {"missing": "FAIL"}, - "MAC DEBUG": {"missing": "FAIL"}}} - self.updateExpectations(expectations, updates, expected_results) - - def testAddExpectation(self): - # not yet implemented - return - - expectations = "" - expected_results = "BUG1 : 1.html = FAIL\n" - updates = {"1.html": { - "WIN RELEASE": {"missing": "FAIL"}, - "WIN DEBUG": {"missing": "FAIL"}, - "LINUX RELEASE": {"missing": "FAIL"}, - "LINUX DEBUG": {"missing": "FAIL"}, - "MAC RELEASE": {"missing": "FAIL"}, - "MAC DEBUG": {"missing": "FAIL"}}} - self.updateExpectations(expectations, updates, expected_results) - - def testAddExpectationForOnePlatform(self): - expectations = "BUG1 WIN : 1.html = TIMEOUT\n" - expected_results = "BUG1 WIN : 1.html = TIMEOUT\n" - # TODO(ojan): Once we add currently unlisted tests, this expect results - # for this test should be: - #expected_results = """BUG1 WIN : 1.html = TIMEOUT - #BUG_AUTO LINUX DEBUG : 1.html = TIMEOUT - #""" - updates = {"1.html": {"LINUX DEBUG": {"missing": "TIMEOUT"}}} - self.updateExpectations(expectations, updates, expected_results) - - def testAddSlow(self): - expectations = "BUG1 : 1.html = FAIL\n" - expected_results = "BUG1 SLOW : 1.html = FAIL\n" - updates = {"1.html": { - "WIN RELEASE": {"missing": "SLOW"}, - "WIN DEBUG": {"missing": "SLOW"}, - "LINUX RELEASE": {"missing": "SLOW"}, - "LINUX DEBUG": {"missing": "SLOW"}, - "MAC RELEASE": {"missing": "SLOW"}, - "MAC DEBUG": {"missing": "SLOW"}}} - self.updateExpectations(expectations, updates, expected_results) - - def testAddRemoveMultipleExpectations(self): - expectations = """BUG1 WIN : 1.html = FAIL - BUG2 MAC : 1.html = FAIL""" - expected_results = """BUG1 SLOW WIN : 1.html = FAIL - BUG2 MAC : 1.html = TIMEOUT\n""" - # TODO(ojan): Once we add currently unlisted tests, this expect results - # for this test should be: - #expected_results = """BUG1 SLOW WIN : 1.html = FAIL - #BUG_AUTO LINUX SLOW : 1.html = PASS - #BUG2 MAC : 1.html = TIMEOUT - #""" - - updates = {"1.html": { - "WIN RELEASE": {"missing": "SLOW"}, - "WIN DEBUG": {"missing": "SLOW"}, - "LINUX RELEASE": {"missing": "SLOW"}, - "LINUX DEBUG": {"missing": "SLOW"}, - "MAC RELEASE": {"missing": "TIMEOUT", "extra": "FAIL"}, - "MAC DEBUG": {"missing": "TIMEOUT", "extra": "FAIL"}}} - self.updateExpectations(expectations, updates, expected_results) - - def testAddExistingExpectation(self): - expectations = "BUG1 : 1.html = FAIL\n" - expected_results = "BUG1 : 1.html = FAIL\n" - updates = {"1.html": {"WIN RELEASE": {"missing": "FAIL"}}} - self.updateExpectations(expectations, updates, expected_results) - - def testAddImageOrTextToFailExpectation(self): - expectations = """BUG1 WIN RELEASE : 1.html = FAIL - BUG1 MAC RELEASE : 1.html = FAIL - BUG1 LINUX RELEASE : 1.html = FAIL - BUG1 LINUX DEBUG : 1.html = TIMEOUT - """ - expected_results = """BUG1 WIN RELEASE : 1.html = IMAGE+TEXT - BUG1 MAC RELEASE : 1.html = IMAGE - BUG1 LINUX RELEASE : 1.html = TEXT - BUG1 LINUX DEBUG : 1.html = TIMEOUT IMAGE+TEXT - """ - updates = {"1.html": { - "WIN RELEASE": {"missing": "IMAGE+TEXT"}, - "MAC RELEASE": {"missing": "IMAGE"}, - "LINUX RELEASE": {"missing": "TEXT"}, - "LINUX DEBUG": {"missing": "IMAGE+TEXT"}}} - self.updateExpectations(expectations, updates, expected_results) - - def testAddOther(self): - # Other is a catchall for more obscure expectations results. - # We should never add it to test_expectations. - expectations = "BUG1 WIN RELEASE : 1.html = FAIL\n" - expected_results = "BUG1 WIN RELEASE : 1.html = FAIL\n" - updates = {"1.html": {"WIN RELEASE": {"missing": "OTHER"}}} - self.updateExpectations(expectations, updates, expected_results) - - def testRemoveNonExistantExpectation(self): - expectations = "BUG1 : 1.html = FAIL\n" - expected_results = "BUG1 : 1.html = FAIL\n" - updates = {"1.html": {"WIN RELEASE": {"extra": "TIMEOUT"}}} - self.updateExpectations(expectations, updates, expected_results) - - def testUpdateSomePlatforms(self): - expectations = "BUG1 DEBUG : 1.html = TEXT PASS\n" - # TODO(ojan): Once we add currently unlisted tests, the expect results - # for this test should include the missing bits for RELEASE. - expected_results = "BUG1 LINUX DEBUG : 1.html = TEXT PASS\n" - updates = {"1.html": { - "WIN RELEASE": {"missing": "PASS TEXT"}, - "WIN DEBUG": {"extra": "MISSING TEXT"}, - "MAC RELEASE": {"missing": "PASS TEXT"}, - "MAC DEBUG": {"extra": "MISSING TEXT"}}} - self.updateExpectations(expectations, updates, expected_results) - - def testAddTimeoutToSlowTest(self): - # SLOW tests needing TIMEOUT need manual updating. Should just print - # a log and not modify the test. - expectations = "BUG1 SLOW : 1.html = TEXT\n" - expected_results = "BUG1 SLOW : 1.html = TEXT\n" - updates = {"1.html": {"WIN RELEASE": {"missing": "TIMEOUT"}}} - self.updateExpectations(expectations, updates, expected_results) - - def testAddSlowToTimeoutTest(self): - # SLOW tests needing TIMEOUT need manual updating. Should just print - # a log and not modify the test. - expectations = "BUG1 : 1.html = TIMEOUT\n" - expected_results = "BUG1 : 1.html = TIMEOUT\n" - updates = {"1.html": {"WIN RELEASE": {"missing": "SLOW"}}} - self.updateExpectations(expectations, updates, expected_results) - - def testIncludeLastPlatformInFlakiness(self): - # If a test is flaky on 5/6 platforms and the 6th's expectations are a - # subset of the other 5/6, then give them all the same expectations. - expectations = "BUG2 : 1.html = FAIL\n" - expected_results = "BUG2 : 1.html = FAIL TIMEOUT\n" - updates = {"1.html": { - "WIN RELEASE": {"missing": "TIMEOUT", "extra": "FAIL"}, - "WIN DEBUG": {"missing": "TIMEOUT"}, - "LINUX RELEASE": {"missing": "TIMEOUT"}, - "LINUX DEBUG": {"missing": "TIMEOUT"}, - "MAC RELEASE": {"missing": "TIMEOUT"}, - "MAC DEBUG": {"missing": "TIMEOUT"}}} - self.updateExpectations(expectations, updates, expected_results) - - def testIncludeLastPlatformInFlakinessThreeOutOfFour(self): - # If a test is flaky on 5/6 platforms and the 6th's expectations are a - # subset of the other 5/6, then give them all the same expectations. - expectations = "BUG2 MAC LINUX : 1.html = FAIL\n" - expected_results = "BUG2 LINUX MAC : 1.html = FAIL TIMEOUT\n" - updates = {"1.html": { - "LINUX RELEASE": {"missing": "TIMEOUT"}, - "MAC RELEASE": {"missing": "TIMEOUT"}, - "MAC DEBUG": {"missing": "TIMEOUT"}}} - self.updateExpectations(expectations, updates, expected_results) - - def testExcludeLastPlatformFromFlakiness(self): - # If a test is flaky on 5/6 platforms and the 6th's expectations - # are not a subset of the other 5/6, then don't give them - # all the same expectations. - expectations = "BUG1 : 1.html = FAIL\n" - expected_results = """BUG1 DEBUG : 1.html = FAIL TIMEOUT - BUG1 LINUX MAC RELEASE : 1.html = FAIL TIMEOUT - BUG1 WIN RELEASE : 1.html = FAIL CRASH - """ - updates = {"1.html": { - "WIN RELEASE": {"missing": "CRASH"}, - "WIN DEBUG": {"missing": "TIMEOUT"}, - "LINUX RELEASE": {"missing": "TIMEOUT"}, - "LINUX DEBUG": {"missing": "TIMEOUT"}, - "MAC RELEASE": {"missing": "TIMEOUT"}, - "MAC DEBUG": {"missing": "TIMEOUT"}}} - self.updateExpectations(expectations, updates, expected_results) - - def testStripComments(self): - expectations = """BUG1 : 1.html = TIMEOUT - - // Comment/whitespace should be removed when the test is. - BUG2 WIN RELEASE : 2.html = TEXT - - // Comment/whitespace after test should remain. - - BUG2 MAC : 2.html = TEXT - - // Comment/whitespace at end of file should remain. - """ - expected_results = """BUG1 : 1.html = TIMEOUT - - // Comment/whitespace after test should remain. - - BUG2 MAC DEBUG : 2.html = TEXT - - // Comment/whitespace at end of file should remain. - """ - updates = {"2.html": { - "WIN RELEASE": {"extra": "TEXT"}, - "MAC RELEASE": {"extra": "TEXT"}}} - self.updateExpectations(expectations, updates, expected_results) - - def testLeaveComments(self): - expectations = """BUG1 : 1.html = TIMEOUT - - // Comment/whitespace should remain. - BUG2 : 2.html = FAIL PASS - """ - expected_results = """BUG1 : 1.html = TIMEOUT - - // Comment/whitespace should remain. - BUG2 MAC DEBUG : 2.html = FAIL PASS - BUG2 LINUX MAC RELEASE : 2.html = FAIL PASS - """ - updates = {"2.html": { - "WIN RELEASE": {"extra": "FAIL"}, - "WIN DEBUG": {"extra": "FAIL"}, - "LINUX DEBUG": {"extra": "FAIL"}}} - self.updateExpectations(expectations, updates, expected_results) - - def testLeaveCommentsIfNoWhitespaceAfterTest(self): - expectations = """// Comment/whitespace should remain. - BUG2 WIN RELEASE : 2.html = TEXT - BUG2 : 1.html = IMAGE - """ - expected_results = """// Comment/whitespace should remain. - BUG2 : 1.html = IMAGE - """ - updates = {"2.html": {"WIN RELEASE": {"extra": "TEXT"}}} - self.updateExpectations(expectations, updates, expected_results) - - def testLeavesUnmodifiedExpectationsUntouched(self): - # Ensures tests that would just change sort order of a line are noops. - expectations = "BUG1 WIN LINUX : 1.html = TIMEOUT\n" - expected_results = "BUG1 WIN LINUX : 1.html = TIMEOUT\n" - updates = {"1.html": {"MAC RELEASE": {"missing": "SLOW"}}} - self.updateExpectations(expectations, updates, expected_results) - - ########################################################################### - # Helper functions - - def updateExpectations(self, expectations, updates, expected_results): - results = update_expectations_from_dashboard.UpdateExpectations( - expectations, updates) - self.assertEqual(expected_results, results) - -if '__main__' == __name__: - unittest.main() diff --git a/webkit/tools/layout_tests/webkitpy/dedup-tests.py b/webkit/tools/layout_tests/webkitpy/dedup-tests.py new file mode 100755 index 0000000..0165e40 --- /dev/null +++ b/webkit/tools/layout_tests/webkitpy/dedup-tests.py @@ -0,0 +1,49 @@ +#!/usr/bin/python +# Copyright (c) 2009 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""dedup-tests -- print test results duplicated between win and linux. + +Because the outputs are very similar, we fall back on Windows outputs +if there isn't an expected output for Linux layout tests. This means +that any file that is duplicated between the Linux and Windows directories +is redundant. + +This command dumps out all such files. You can use it like: + dedup-tests.py # print out the bad files + dedup-tests.py | xargs git rm # delete the bad files +""" + +import collections +import os.path +import subprocess +import sys + +# A map of file hash => set of all files with that hash. +hashes = collections.defaultdict(set) + +# Fill in the map. +cmd = ['git', 'ls-tree', '-r', 'HEAD', 'webkit/data/layout_tests/'] +try: + git = subprocess.Popen(cmd, stdout=subprocess.PIPE) +except OSError, e: + if e.errno == 2: # No such file or directory. + print >> sys.stderr, "Error: 'No such file' when running git." + print >> sys.stderr, "This script requires git." + sys.exit(1) + raise e + +for line in git.stdout: + attrs, file = line.strip().split('\t') + _, _, hash = attrs.split(' ') + hashes[hash].add(file) + +# Dump out duplicated files. +for cluster in hashes.values(): + if len(cluster) < 2: + continue + for file in cluster: + if '/chromium-linux/' in file: + if file.replace('/chromium-linux/', '/chromium-win/') in cluster: + print file diff --git a/webkit/tools/layout_tests/webkitpy/layout_package/__init__.py b/webkit/tools/layout_tests/webkitpy/layout_package/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/webkit/tools/layout_tests/webkitpy/layout_package/apache_http_server.py b/webkit/tools/layout_tests/webkitpy/layout_package/apache_http_server.py new file mode 100644 index 0000000..d11906d --- /dev/null +++ b/webkit/tools/layout_tests/webkitpy/layout_package/apache_http_server.py @@ -0,0 +1,203 @@ +# Copyright (c) 2009 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""A class to start/stop the apache http server used by layout tests.""" + +import logging +import optparse +import os +import re +import subprocess +import sys + +import http_server_base +import path_utils +import platform_utils + + +class LayoutTestApacheHttpd(http_server_base.HttpServerBase): + + def __init__(self, output_dir): + """Args: + output_dir: the absolute path to the layout test result directory + """ + self._output_dir = output_dir + self._httpd_proc = None + path_utils.MaybeMakeDirectory(output_dir) + + self.mappings = [{'port': 8000}, + {'port': 8080}, + {'port': 8081}, + {'port': 8443, 'sslcert': True}] + + # The upstream .conf file assumed the existence of /tmp/WebKit for + # placing apache files like the lock file there. + self._runtime_path = os.path.join("/tmp", "WebKit") + path_utils.MaybeMakeDirectory(self._runtime_path) + + # The PID returned when Apache is started goes away (due to dropping + # privileges?). The proper controlling PID is written to a file in the + # apache runtime directory. + self._pid_file = os.path.join(self._runtime_path, 'httpd.pid') + + test_dir = path_utils.PathFromBase('third_party', 'WebKit', + 'LayoutTests') + js_test_resources_dir = self._CygwinSafeJoin(test_dir, "fast", "js", + "resources") + mime_types_path = self._CygwinSafeJoin(test_dir, "http", "conf", + "mime.types") + cert_file = self._CygwinSafeJoin(test_dir, "http", "conf", + "webkit-httpd.pem") + access_log = self._CygwinSafeJoin(output_dir, "access_log.txt") + error_log = self._CygwinSafeJoin(output_dir, "error_log.txt") + document_root = self._CygwinSafeJoin(test_dir, "http", "tests") + + executable = platform_utils.ApacheExecutablePath() + if self._IsCygwin(): + executable = self._GetCygwinPath(executable) + + cmd = [executable, + '-f', self._GetApacheConfigFilePath(test_dir, output_dir), + '-C', "\'DocumentRoot %s\'" % document_root, + '-c', "\'Alias /js-test-resources %s\'" % js_test_resources_dir, + '-C', "\'Listen %s\'" % "127.0.0.1:8000", + '-C', "\'Listen %s\'" % "127.0.0.1:8081", + '-c', "\'TypesConfig \"%s\"\'" % mime_types_path, + '-c', "\'CustomLog \"%s\" common\'" % access_log, + '-c', "\'ErrorLog \"%s\"\'" % error_log, + '-C', "\'User \"%s\"\'" % os.environ.get("USERNAME", + os.environ.get("USER", ""))] + + if self._IsCygwin(): + cygbin = path_utils.PathFromBase('third_party', 'cygwin', 'bin') + # Not entirely sure why, but from cygwin we need to run the + # httpd command through bash. + self._start_cmd = [ + os.path.join(cygbin, 'bash.exe'), + '-c', + 'PATH=%s %s' % (self._GetCygwinPath(cygbin), " ".join(cmd)), + ] + else: + # TODO(ojan): When we get cygwin using Apache 2, use set the + # cert file for cygwin as well. + cmd.extend(['-c', "\'SSLCertificateFile %s\'" % cert_file]) + # Join the string here so that Cygwin/Windows and Mac/Linux + # can use the same code. Otherwise, we could remove the single + # quotes above and keep cmd as a sequence. + self._start_cmd = " ".join(cmd) + + def _IsCygwin(self): + return sys.platform in ("win32", "cygwin") + + def _CygwinSafeJoin(self, *parts): + """Returns a platform appropriate path.""" + path = os.path.join(*parts) + if self._IsCygwin(): + return self._GetCygwinPath(path) + return path + + def _GetCygwinPath(self, path): + """Convert a Windows path to a cygwin path. + + The cygpath utility insists on converting paths that it thinks are + Cygwin root paths to what it thinks the correct roots are. So paths + such as "C:\b\slave\webkit-release\build\third_party\cygwin\bin" + are converted to plain "/usr/bin". To avoid this, we + do the conversion manually. + + The path is expected to be an absolute path, on any drive. + """ + drive_regexp = re.compile(r'([a-z]):[/\\]', re.IGNORECASE) + + def LowerDrive(matchobj): + return '/cygdrive/%s/' % matchobj.group(1).lower() + path = drive_regexp.sub(LowerDrive, path) + return path.replace('\\', '/') + + def _GetApacheConfigFilePath(self, test_dir, output_dir): + """Returns the path to the apache config file to use. + Args: + test_dir: absolute path to the LayoutTests directory. + output_dir: absolute path to the layout test results directory. + """ + httpd_config = platform_utils.ApacheConfigFilePath() + httpd_config_copy = os.path.join(output_dir, "httpd.conf") + httpd_conf = open(httpd_config).read() + if self._IsCygwin(): + # This is a gross hack, but it lets us use the upstream .conf file + # and our checked in cygwin. This tells the server the root + # directory to look in for .so modules. It will use this path + # plus the relative paths to the .so files listed in the .conf + # file. We have apache/cygwin checked into our tree so + # people don't have to install it into their cygwin. + cygusr = path_utils.PathFromBase('third_party', 'cygwin', 'usr') + httpd_conf = httpd_conf.replace('ServerRoot "/usr"', + 'ServerRoot "%s"' % self._GetCygwinPath(cygusr)) + + # TODO(ojan): Instead of writing an extra file, checkin a conf file + # upstream. Or, even better, upstream/delete all our chrome http + # tests so we don't need this special-cased DocumentRoot and then + # just use the upstream + # conf file. + chrome_document_root = path_utils.PathFromBase('webkit', 'data', + 'layout_tests') + if self._IsCygwin(): + chrome_document_root = self._GetCygwinPath(chrome_document_root) + httpd_conf = (httpd_conf + + self._GetVirtualHostConfig(chrome_document_root, 8081)) + + f = open(httpd_config_copy, 'wb') + f.write(httpd_conf) + f.close() + + if self._IsCygwin(): + return self._GetCygwinPath(httpd_config_copy) + return httpd_config_copy + + def _GetVirtualHostConfig(self, document_root, port, ssl=False): + """Returns a directive block for an httpd.conf file. + It will listen to 127.0.0.1 on each of the given port. + """ + return '\n'.join(('' % port, + 'DocumentRoot %s' % document_root, + ssl and 'SSLEngine On' or '', + '', '')) + + def _StartHttpdProcess(self): + """Starts the httpd process and returns whether there were errors.""" + # Use shell=True because we join the arguments into a string for + # the sake of Window/Cygwin and it needs quoting that breaks + # shell=False. + self._httpd_proc = subprocess.Popen(self._start_cmd, + stderr=subprocess.PIPE, + shell=True) + err = self._httpd_proc.stderr.read() + if len(err): + logging.debug(err) + return False + return True + + def Start(self): + """Starts the apache http server.""" + # Stop any currently running servers. + self.Stop() + + logging.debug("Starting apache http server") + server_started = self.WaitForAction(self._StartHttpdProcess) + if server_started: + logging.debug("Apache started. Testing ports") + server_started = self.WaitForAction(self.IsServerRunningOnAllPorts) + + if server_started: + logging.debug("Server successfully started") + else: + raise Exception('Failed to start http server') + + def Stop(self): + """Stops the apache http server.""" + logging.debug("Shutting down any running http servers") + httpd_pid = None + if os.path.exists(self._pid_file): + httpd_pid = int(open(self._pid_file).readline()) + path_utils.ShutDownHTTPServer(httpd_pid) diff --git a/webkit/tools/layout_tests/webkitpy/layout_package/failure.py b/webkit/tools/layout_tests/webkitpy/layout_package/failure.py new file mode 100644 index 0000000..50ef743 --- /dev/null +++ b/webkit/tools/layout_tests/webkitpy/layout_package/failure.py @@ -0,0 +1,200 @@ +# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +CHROMIUM_WIN = "chromium-win" +CHROMIUM_MAC = "chromium-mac" +CHROMIUM_LINUX = "chromium-linux" +WEBKIT_WIN_TITLE = "WebKit Win" +WEBKIT_MAC_TITLE = "WebKit Mac" +WEBKIT_TITLE = "WebKit" +UNKNOWN = "Unknown" + +EXPECTED_IMAGE_FILE_ENDING = "-expected.png" +ACTUAL_IMAGE_FILE_ENDING = "-actual.png" +UPSTREAM_IMAGE_FILE_ENDING = "-expected-upstream.png" +EXPECTED_TEXT_FILE_ENDING = "-expected.txt" +ACTUAL_TEXT_FILE_ENDING = "-actual.txt" +DIFF_IMAGE_FILE_ENDING = "-diff.png" +DIFF_TEXT_FILE_ENDING = "-diff.txt" + +CHROMIUM_SRC_HOME = "http://src.chromium.org/viewvc/chrome/trunk/src/webkit/" +CHROMIUM_TRAC_HOME = CHROMIUM_SRC_HOME + "data/layout_tests/" +WEBKIT_TRAC_HOME = "http://trac.webkit.org/browser/trunk/LayoutTests/" +WEBKIT_SVN_HOSTNAME = "svn.webkit.org" +THIRD_PARTY = "third_party" + +WEBKIT_PLATFORM_URL_BASE = WEBKIT_TRAC_HOME + "platform" +WEBKIT_LAYOUT_TEST_BASE_URL = "http://svn.webkit.org/repository/webkit/trunk/" +WEBKIT_IMAGE_BASELINE_BASE_URL_WIN = (WEBKIT_LAYOUT_TEST_BASE_URL + + "LayoutTests/platform/win/") +WEBKIT_IMAGE_BASELINE_BASE_URL_MAC = (WEBKIT_LAYOUT_TEST_BASE_URL + + "LayoutTests/platform/mac/") +WEBKIT_TRAC_IMAGE_BASELINE_BASE_URL_MAC = WEBKIT_PLATFORM_URL_BASE + "/mac/" +WEBKIT_TRAC_IMAGE_BASELINE_BASE_URL_WIN = WEBKIT_PLATFORM_URL_BASE + "/win/" + +LAYOUT_TEST_RESULTS_DIR = "layout-test-results" + +FAIL = "FAIL" +TIMEOUT = "TIMEOUT" +CRASH = "CRASH" +PASS = "PASS" +WONTFIX = "WONTFIX" + + +class Failure(object): + """ + This class represents a failure in the test output, and is + intended as a data model object. + """ + + def __init__(self): + self.platform = "" + self.test_path = "" + self.text_diff_mismatch = False + self.image_mismatch = False + self.timeout = False + self.crashed = False + self.text_baseline_url = "" + self.image_baseline_url = "" + self.text_baseline_age = "" + self.image_baseline_age = "" + self.test_age = "" + self.text_baseline_local = "" + self.image_baseline_local = "" + self.text_actual_local = "" + self.image_actual_local = "" + self.image_baseline_upstream_url = "" + self.image_baseline_upstream_local = "" + self.test_expectations_line = "" + self.flakiness = 0 + + def GetExpectedImageFilename(self): + return self._RenameEndOfTestPath(EXPECTED_IMAGE_FILE_ENDING) + + def GetActualImageFilename(self): + return self._RenameEndOfTestPath(ACTUAL_IMAGE_FILE_ENDING) + + def GetExpectedTextFilename(self): + return self._RenameEndOfTestPath(EXPECTED_TEXT_FILE_ENDING) + + def GetActualTextFilename(self): + return self._RenameEndOfTestPath(ACTUAL_TEXT_FILE_ENDING) + + def GetImageDiffFilename(self): + return self._RenameEndOfTestPath(DIFF_IMAGE_FILE_ENDING) + + def GetTextDiffFilename(self): + return self._RenameEndOfTestPath(DIFF_TEXT_FILE_ENDING) + + def GetImageUpstreamFilename(self): + return self._RenameEndOfTestPath(UPSTREAM_IMAGE_FILE_ENDING) + + def _RenameEndOfTestPath(self, suffix): + last_index = self.test_path.rfind(".") + if last_index == -1: + return self.test_path + return self.test_path[0:last_index] + suffix + + def GetTestHome(self): + if self.test_path.startswith("chrome"): + return CHROMIUM_TRAC_HOME + self.test_path + return WEBKIT_TRAC_HOME + self.test_path + + def GetImageBaselineTracHome(self): + if self.IsImageBaselineInWebkit(): + return self._GetTracHome(self.image_baseline_url) + return self.image_baseline_url + + def GetTextBaselineTracHome(self): + if self.text_baseline_url and self.IsTextBaselineInWebkit(): + return self._GetTracHome(self.text_baseline_url) + return self.text_baseline_url + + def _GetTracHome(self, file): + return WEBKIT_TRAC_HOME + file[file.find("LayoutTests"):] + + def GetTextBaselineLocation(self): + return self._GetFileLocation(self.text_baseline_url) + + def GetImageBaselineLocation(self): + return self._GetFileLocation(self.image_baseline_url) + + # TODO(gwilson): Refactor this logic so it can be used by multiple scripts. + # TODO(gwilson): Change this so that it respects the fallback order of + # different platforms. (If platform is mac, the fallback should be + # different.) + + def _GetFileLocation(self, file): + if not file: + return None + if file.find(CHROMIUM_WIN) > -1: + return CHROMIUM_WIN + if file.find(CHROMIUM_MAC) > -1: + return CHROMIUM_MAC + if file.find(CHROMIUM_LINUX) > -1: + return CHROMIUM_LINUX + if file.startswith(WEBKIT_IMAGE_BASELINE_BASE_URL_WIN): + return WEBKIT_WIN_TITLE + if file.startswith(WEBKIT_IMAGE_BASELINE_BASE_URL_MAC): + return WEBKIT_MAC_TITLE + # TODO(gwilson): Add mac-snowleopard, mac-leopard, mac-tiger here. + if file.startswith(WEBKIT_LAYOUT_TEST_BASE_URL): + return WEBKIT_TITLE + return UNKNOWN + + def _IsFileInWebKit(self, file): + return file != None and (file.find(WEBKIT_SVN_HOSTNAME) > -1 or + file.find(THIRD_PARTY) > -1) + + def IsImageBaselineInChromium(self): + return not self.IsImageBaselineInWebkit() + + def IsImageBaselineInWebkit(self): + return self._IsFileInWebKit(self.image_baseline_url) + + def IsTextBaselineInChromium(self): + return not self.IsTextBaselineInWebkit() + + def IsTextBaselineInWebkit(self): + return self._IsFileInWebKit(self.text_baseline_url) + + def GetTextResultLocationInZipFile(self): + return self._GetFileLocationInZipFile(self.GetActualTextFilename()) + + def GetImageResultLocationInZipFile(self): + return self._GetFileLocationInZipFile(self.GetActualImageFilename()) + + def _GetFileLocationInZipFile(self, file): + return "%s/%s" % (LAYOUT_TEST_RESULTS_DIR, file) + + # TODO(gwilson): implement this method. + def GetAllBaselineLocations(self): + return None + + # This method determines whether the test is actually expected to fail, + # in order to know whether to retrieve expected test results for it. + # (test results dont exist for tests expected to fail/crash.) + + def IsExpectedToFail(self): + return self._FindKeywordInExpectations(FAIL) + + def IsExpectedToTimeout(self): + return self._FindKeywordInExpectations(TIMEOUT) + + def IsExpectedToCrash(self): + return self._FindKeywordInExpectations(CRASH) + + def IsExpectedToPass(self): + return self._FindKeywordInExpectations(PASS) + + def IsWontFix(self): + return self._FindKeywordInExpectations(WONTFIX) + + def _FindKeywordInExpectations(self, keyword): + if (not self.test_expectations_line or + len(self.test_expectations_line) == 0): + return False + if self.test_expectations_line.find(keyword) > -1: + return True + return False diff --git a/webkit/tools/layout_tests/webkitpy/layout_package/failure_finder.py b/webkit/tools/layout_tests/webkitpy/layout_package/failure_finder.py new file mode 100644 index 0000000..d8aa34f --- /dev/null +++ b/webkit/tools/layout_tests/webkitpy/layout_package/failure_finder.py @@ -0,0 +1,892 @@ +# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# TODO(gwilson): 1. Change text differs to use external utils. +# 2. Change text_expectations parsing to existing +# logic in layout_pagckage.test_expectations. + +import difflib +import errno +import os +import path_utils +import platform_utils +import re +import shutil +import subprocess +import sys +import urllib2 +import zipfile + +from failure import Failure + +WEBKIT_TRAC_HOSTNAME = "trac.webkit.org" +WEBKIT_LAYOUT_TEST_BASE_URL = ("http://svn.webkit.org/repository/" + "webkit/trunk/LayoutTests/") +WEBKIT_PLATFORM_BASELINE_URL = (WEBKIT_LAYOUT_TEST_BASE_URL + + "platform/%s/") + +BUILDBOT_BASE = "http://build.chromium.org/buildbot/" +WEBKIT_BUILDER_BASE = BUILDBOT_BASE + "waterfall/builders/%s" +FYI_BUILDER_BASE = BUILDBOT_BASE + "waterfall.fyi/builders/%s" +RESULTS_URL_BASE = "/builds/%s/steps/webkit_tests/logs/stdio" +ARCHIVE_URL_BASE = "/builds/%s/steps/archive_webkit_tests_results/logs/stdio" +ZIP_FILE_URL_BASE = (BUILDBOT_BASE + + "layout_test_results/%s/%s/layout-test-results.zip") +CHROMIUM_SRC_HOME = "http://src.chromium.org/viewvc/chrome/trunk/src/webkit/" +LAYOUT_TEST_REPO_BASE_URL = CHROMIUM_SRC_HOME + "data/layout_tests/" + +# TODO(gwilson): Put flaky test dashboard URL here when ready. +FLAKY_TEST_URL = "" +FLAKY_TEST_REGEX = "%s(\d+)" + +TEST_EXPECTATIONS_URL = (CHROMIUM_SRC_HOME + + "tools/layout_tests/test_expectations.txt") + +# Failure types as found in builder stdio. +TEXT_DIFF_MISMATCH = "Text diff mismatch" +IMAGE_MISMATCH = "Image mismatch" +TEST_TIMED_OUT = "Test timed out" +TEST_SHELL_CRASHED = "Test shell crashed" + +CHROMIUM_WIN = "chromium-win" +CHROMIUM_WIN_XP = "chromium-win-xp" +CHROMIUM_WIN_VISTA = "chromium-win-vista" +CHROMIUM_WIN_7 = "chromium-win-7" +CHROMIUM_MAC = "chromium-mac" +CHROMIUM_LINUX = "chromium-linux" +PLATFORM = "platform" +LAYOUTTESTS = "LayoutTests" + +# These platform dirs must be in order of their precedence. +# TODO(gwilson): This is not the same fallback order as test_shell. This list +# should be reversed, and we need to add detection for the type of OS that +# the given builder is running. +WEBKIT_MAC_PLATFORM_DIRS = ["mac-leopard", "mac-snowleopard", "mac"] +WEBKIT_WIN_PLATFORM_DIRS = ["win", "mac"] +CHROMIUM_MAC_PLATFORM_DIRS = [CHROMIUM_MAC] +CHROMIUM_WIN_PLATFORM_DIRS = [CHROMIUM_WIN_XP, CHROMIUM_WIN_VISTA, + CHROMIUM_WIN_7, CHROMIUM_WIN] +CHROMIUM_LINUX_PLATFORM_DIRS = [CHROMIUM_LINUX, CHROMIUM_WIN] + +ARCHIVE_URL_REGEX = "last.*change: (\d+)" +BUILD_NAME_REGEX = "build name: ([^\s]*)" +CHROMIUM_FILE_AGE_REGEX = '
\s*Modified\s*.* \((.*)\) by' +TEST_PATH_REGEX = "[^\s]+?" +FAILED_REGEX = ("DEBUG (" + TEST_PATH_REGEX + ") failed:\s*" + "(" + TEXT_DIFF_MISMATCH + ")?\s*" + "(" + IMAGE_MISMATCH + ")?\s*" + "(" + TEST_TIMED_OUT + ")?\s*" + "(" + TEST_SHELL_CRASHED + ")?") +FAILED_UNEXPECTED_REGEX = " [^\s]+(?: = .*?)?\n" +LAST_BUILD_REGEX = ("

Recent Builds:

" + "[\s\S]*?") +# Sometimes the lines of hyphens gets interrupted with multiple processes +# outputting to stdio, so don't rely on them being contiguous. +SUMMARY_REGEX = ("\d+ tests ran as expected, " + "\d+ didn't:(.*?)-{78}") # -{78} --> 78 dashes in a row. +SUMMARY_REGRESSIONS = "Regressions:.*?\n((?: [^\s]+(?: = .*?)?\n)+)" +TEST_EXPECTATIONS_PLATFORM_REGEX = "((WONTFIX |BUG.* )+.* %s.* : %s = [^\n]*)" +TEST_EXPECTATIONS_NO_PLATFORM_REGEX = ("((WONTFIX |BUG.* )+.*" + "(?!WIN)(?!LINUX)(?!MAC).* :" + " %s = [^\n]*)") + +WEBKIT_FILE_AGE_REGEX = ('.*?.' + '*?\s*' + '(.*?)') + +LOCAL_BASELINE_REGEXES = [ + ".*/third_party/Webkit/LayoutTests/platform/.*?(/.*)", + ".*/third_party/Webkit/LayoutTests(/.*)", + ".*/webkit/data/layout_tests/platform/.*?/LayoutTests(/.*)", + ".*/webkit/data/layout_tests/platform/.*?(/.*)", + ".*/webkit/data/layout_tests(/.*)", + "(/.*)"] + +UPSTREAM_IMAGE_FILE_ENDING = "-upstream.png" + +TEST_EXPECTATIONS_WONTFIX = "WONTFIX" + +TEMP_ZIP_DIR = "temp-zip-dir" + +TARGETS = ["Release", "Debug"] + + +def GetURLBase(use_fyi): + if use_fyi: + return FYI_BUILDER_BASE + return WEBKIT_BUILDER_BASE + + +def GetResultsURL(build, platform, use_fyi=False): + return (GetURLBase(use_fyi) + RESULTS_URL_BASE) % (platform, build) + + +def GetArchiveURL(build, platform, use_fyi=False): + return (GetURLBase(use_fyi) + ARCHIVE_URL_BASE) % (platform, build) + + +def GetZipFileURL(build, platform): + return ZIP_FILE_URL_BASE % (platform, build) + + +def GetBuilderURL(platform, use_fyi=False): + return GetURLBase(use_fyi) % platform + + +# TODO(gwilson): Once the new flakiness dashboard is done, connect it here. +def GetFlakyTestURL(platform): + return "" + + +# TODO(gwilson): can we refactor these into the resourcegatherer? +def IsLinuxPlatform(platform): + return (platform and platform.find("Linux") > -1) + + +def IsMacPlatform(platform): + return (platform and platform.find("Mac") > -1) + + +def CreateDirectory(dir): + """ + Method that creates the directory structure given. + This will create directories recursively until the given dir exists. + """ + if not os.path.exists(dir): + os.makedirs(dir, 0777) + + +def ExtractFirstValue(string, regex): + m = re.search(regex, string) + if m and m.group(1): + return m.group(1) + return None + + +def ExtractSingleRegexAtURL(url, regex): + content = ScrapeURL(url) + m = re.search(regex, content, re.DOTALL) + if m and m.group(1): + return m.group(1) + return None + + +def ScrapeURL(url): + return urllib2.urlopen(urllib2.Request(url)).read() + + +def GetImageDiffExecutable(): + for target in TARGETS: + try: + return path_utils.ImageDiffPath(target) + except Exception, e: + continue + # This build target did not exist, try the next one. + raise Exception("No image diff executable could be found. You may need " + "to build the image diff project under at least one build " + "target to create image diffs.") + + +def GeneratePNGDiff(file1, file2, output_file): + _compare_available = False + try: + executable = GetImageDiffExecutable() + cmd = [executable, '--diff', file1, file2, output_file] + _compare_available = True + except Exception, e: + print "No command line to compare %s and %s : %s" % (file1, file2, e) + + result = 1 + if _compare_available: + try: + result = subprocess.call(cmd) + except OSError, e: + if e.errno == errno.ENOENT or e.errno == errno.EACCES: + _compare_available = False + print "No possible comparison between %s and %s." % ( + file1, file2) + else: + raise e + if not result: + print "The given PNG images were the same!" + return _compare_available + + +# TODO(gwilson): Change this to use the pretty print differs. +def GenerateTextDiff(file1, file2, output_file): + # Open up expected and actual text files and use difflib to compare them. + dataA = open(file1, 'r').read() + dataB = open(file2, 'r').read() + d = difflib.Differ() + diffs = list(d.compare(dataA.split("\n"), dataB.split("\n"))) + output = open(output_file, 'w') + output.write("\n".join(diffs)) + output.close() + + +class BaselineCandidate(object): + """Simple data object for holding the URL and local file path of a + possible baseline. The local file path is meant to refer to the locally- + cached version of the file at the URL.""" + + def __init__(self, local, url): + self.local_file = local + self.baseline_url = url + + def IsValid(self): + return self.local_file != None and self.baseline_url != None + + +class FailureFinder(object): + + def __init__(self, + build, + builder_name, + exclude_known_failures, + test_regex, + output_dir, + max_failures, + verbose, + builder_output_log_file=None, + archive_step_log_file=None, + zip_file=None, + test_expectations_file=None): + self.build = build + # TODO(gwilson): add full url-encoding for the platform. + self.SetPlatform(builder_name) + self.exclude_known_failures = exclude_known_failures + self.exclude_wontfix = True + self.test_regex = test_regex + self.output_dir = output_dir + self.max_failures = max_failures + self.verbose = verbose + self.fyi_builder = False + self._flaky_test_cache = {} + self._test_expectations_cache = None + # If true, scraping will still happen but no files will be downloaded. + self.dont_download = False + # Local caches of log files. If set, the finder will use these files + # rather than scraping them from the buildbot. + self.builder_output_log_file = builder_output_log_file + self.archive_step_log_file = archive_step_log_file + self.zip_file = zip_file + self.test_expectations_file = test_expectations_file + self.delete_zip_file = True + # Determines if the script should scrape the baselines from webkit.org + # and chromium.org, or if it should use local baselines in the current + # checkout. + self.use_local_baselines = False + + def SetPlatform(self, platform): + self.platform = platform.replace(" ", "%20") + + # TODO(gwilson): Change this to get the last build that finished + # successfully. + + def GetLastBuild(self): + """ + Returns the last build number for this platform. + If use_fyi is true, this only looks at the fyi builder. + """ + try: + return ExtractSingleRegexAtURL(GetBuilderURL(self.platform, + self.fyi_builder), + LAST_BUILD_REGEX) + except urllib2.HTTPError: + if not self.fyi_builder: + self.fyi_builder = True + return self.GetLastBuild() + + def GetFailures(self): + if not self.build: + self.build = self.GetLastBuild() + if self.verbose: + print "Using build number %s" % self.build + + if self.use_local_baselines: + self._BuildBaselineIndexes() + self.failures = self._GetFailuresFromBuilder() + if (self.failures and + (self._DownloadResultResources() or self.dont_download)): + return self.failures + return None + + def _GetFailuresFromBuilder(self): + """ + Returns a list of failures for the given build and platform by scraping + the buildbots and parsing their results. + The list returned contains Failure class objects. + """ + if self.verbose: + print "Fetching failures from buildbot..." + + content = self._ScrapeBuilderOutput() + if not content: + return None + matches = self._FindMatchesInBuilderOutput(content) + + if self.verbose: + print "%s failures found." % len(matches) + + failures = [] + matches.sort() + for match in matches: + if (len(failures) < self.max_failures and + (not self.test_regex or match[0].find(self.test_regex) > -1)): + failure = self._CreateFailureFromMatch(match) + if self.verbose: + print failure.test_path + failures.append(failure) + + return failures + + def _ScrapeBuilderOutput(self): + # If the build log file is specified, use that instead of scraping. + if self.builder_output_log_file: + log = open(self.builder_output_log_file, 'r') + return "".join(log.readlines()) + + # Scrape the failures from the buildbot for this revision. + try: + + return ScrapeURL(GetResultsURL(self.build, + self.platform, + self.fyi_builder)) + except: + # If we hit a problem, and we're not on the FYI builder, try it + # again on the FYI builder. + if not self.fyi_builder: + if self.verbose: + print ("Could not find builder on waterfall, trying fyi " + "waterfall...") + self.fyi_builder = True + return self._ScrapeBuilderOutput() + print "I could not find that builder, or build did not compile." + print "Check that the builder name matches exactly " + print "(case sensitive), and wrap quotes around builder names " + print "that have spaces." + return None + + # TODO(gwilson): The type of failure is now output in the summary, so no + # matching between the summary and the earlier output is necessary. + # Change this method and others to derive failure types from summary only. + + def _FindMatchesInBuilderOutput(self, output): + matches = [] + matches = re.findall(FAILED_REGEX, output, re.MULTILINE) + if self.exclude_known_failures: + summary = re.search(SUMMARY_REGEX, output, re.DOTALL) + regressions = [] + if summary: + regressions = self._FindRegressionsInSummary(summary.group(1)) + matches = self._MatchRegressionsToFailures(regressions, matches) + return matches + + def _CreateFailureFromMatch(self, match): + failure = Failure() + failure.text_diff_mismatch = match[1] != '' + failure.image_mismatch = match[2] != '' + failure.crashed = match[4] != '' + failure.timeout = match[3] != '' + failure.test_path = match[0] + failure.platform = self.platform + return failure + + def _FindRegressionsInSummary(self, summary): + regressions = [] + if not summary or not len(summary): + return regressions + matches = re.findall(SUMMARY_REGRESSIONS, summary, re.DOTALL) + for match in matches: + lines = re.findall(FAILED_UNEXPECTED_REGEX, match, re.DOTALL) + for line in lines: + clipped = line.strip() + if clipped.find("=") > -1: + clipped = clipped[:clipped.find("=") - 1] + regressions.append(clipped) + return regressions + + def _MatchRegressionsToFailures(self, regressions, failures): + matches = [] + for regression in regressions: + for failure in failures: + if failure[0].find(regression) > -1: + matches.append(failure) + break + return matches + + # TODO(gwilson): add support for multiple conflicting build numbers by + # renaming the zip file and naming the directory appropriately. + + def _DownloadResultResources(self): + """ + Finds and downloads/extracts all of the test results (pixel/text + output) for all of the given failures. + """ + + target_zip = "%s/layout-test-results-%s.zip" % (self.output_dir, + self.build) + if self.zip_file: + filename = self.zip_file + self.delete_zip_file = False + else: + revision, build_name = self._GetRevisionAndBuildFromArchiveStep() + zip_url = GetZipFileURL(revision, build_name) + if self.verbose: + print "Downloading zip file from %s to %s" % (zip_url, + target_zip) + filename = self._DownloadFile(zip_url, target_zip, "b") + if not filename: + if self.verbose: + print ("Could not download zip file from %s. " + "Does it exist?" % zip_url) + return False + + if zipfile.is_zipfile(filename): + zip = zipfile.ZipFile(filename) + if self.verbose: + print 'Extracting files...' + directory = "%s/layout-test-results-%s" % (self.output_dir, + self.build) + CreateDirectory(directory) + self._UnzipZipfile(zip, TEMP_ZIP_DIR) + + for failure in self.failures: + failure.test_expectations_line = ( + self._GetTestExpectationsLine(failure.test_path)) + if self.exclude_wontfix and failure.IsWontFix(): + self.failures.remove(failure) + continue + if failure.text_diff_mismatch: + self._PopulateTextFailure(failure, directory, zip) + if failure.image_mismatch: + self._PopulateImageFailure(failure, directory, zip) + if not self.use_local_baselines: + failure.test_age = self._GetFileAge(failure.GetTestHome()) + failure.flakiness = self._GetFlakiness(failure.test_path, + self.platform) + zip.close() + if self.verbose: + print "Files extracted." + if self.delete_zip_file: + if self.verbose: + print "Cleaning up zip file..." + path_utils.RemoveDirectory(TEMP_ZIP_DIR) + os.remove(filename) + return True + else: + if self.verbose: + print ("Downloaded file '%s' doesn't look like a zip file." + % filename) + return False + + def _UnzipZipfile(self, zip, base_dir): + for i, name in enumerate(zip.namelist()): + if not name.endswith('/'): + extracted_file_path = os.path.join(base_dir, name) + try: + (path, filename) = os.path.split(extracted_file_path) + os.makedirs(path, 0777) + except: + pass + outfile = open(extracted_file_path, 'wb') + outfile.write(zip.read(name)) + outfile.flush() + outfile.close() + os.chmod(extracted_file_path, 0777) + + def _GetRevisionAndBuildFromArchiveStep(self): + if self.archive_step_log_file: + log = open(self.archive_step_log_file, 'r') + content = "".join(log.readlines()) + else: + content = ScrapeURL(GetArchiveURL(self.build, + self.platform, + self.fyi_builder)) + revision = ExtractFirstValue(content, ARCHIVE_URL_REGEX) + build_name = ExtractFirstValue(content, BUILD_NAME_REGEX) + return (revision, build_name) + + def _PopulateTextFailure(self, failure, directory, zip): + baseline = self._GetBaseline(failure.GetExpectedTextFilename(), + directory) + failure.text_baseline_local = baseline.local_file + failure.text_baseline_url = baseline.baseline_url + failure.text_baseline_age = ( + self._GetFileAge(failure.GetTextBaselineTracHome())) + failure.text_actual_local = "%s/%s" % (directory, + failure.GetActualTextFilename()) + if (baseline and baseline.IsValid() and not self.dont_download): + self._CopyFileFromZipDir(failure.GetTextResultLocationInZipFile(), + failure.text_actual_local) + GenerateTextDiff(failure.text_baseline_local, + failure.text_actual_local, + directory + "/" + failure.GetTextDiffFilename()) + + def _PopulateImageFailure(self, failure, directory, zip): + baseline = self._GetBaseline(failure.GetExpectedImageFilename(), + directory) + failure.image_baseline_local = baseline.local_file + failure.image_baseline_url = baseline.baseline_url + if baseline and baseline.IsValid(): + failure.image_baseline_age = ( + self._GetFileAge(failure.GetImageBaselineTracHome())) + failure.image_actual_local = "%s/%s" % (directory, + failure.GetActualImageFilename()) + self._CopyFileFromZipDir(failure.GetImageResultLocationInZipFile(), + failure.image_actual_local) + if (not GeneratePNGDiff(failure.image_baseline_local, + failure.image_actual_local, + "%s/%s" % + (directory, failure.GetImageDiffFilename())) + and self.verbose): + print "Could not generate PNG diff for %s" % failure.test_path + if failure.IsImageBaselineInChromium() or self.use_local_baselines: + upstream_baseline = ( + self._GetUpstreamBaseline(failure.GetExpectedImageFilename(), + directory)) + failure.image_baseline_upstream_local = \ + upstream_baseline.local_file + failure.image_baseline_upstream_url = \ + upstream_baseline.baseline_url + + def _GetBaseline(self, filename, directory, upstream_only=False): + """ Search and download the baseline for the given test (put it in the + directory given.)""" + + local_filename = os.path.join(directory, filename) + local_directory = local_filename[:local_filename.rfind("/")] + if upstream_only: + last_index = local_filename.rfind(".") + if last_index > -1: + local_filename = (local_filename[:last_index] + + UPSTREAM_IMAGE_FILE_ENDING) + + download_file_modifiers = "" + if local_filename.endswith(".png"): + download_file_modifiers = "b" # binary file + + if not self.dont_download: + CreateDirectory(local_directory) + + local_baseline = None + url_of_baseline = None + + if self.use_local_baselines: + test_path_key = self._NormalizeBaselineIdentifier(filename) + dict = self.baseline_dict + if upstream_only: + dict = self.webkit_baseline_dict + if test_path_key in dict: + local_baseline = dict[test_path_key] + url_of_baseline = local_baseline + shutil.copy(local_baseline, local_directory) + elif self.verbose: + print ("Baseline %s does not exist in the index." % + test_path_key) + else: + index = 0 + possible_files = self._GetPossibleFileList(filename, upstream_only) + # Download the baselines from the webkit.org site. + while local_baseline == None and index < len(possible_files): + local_baseline = self._DownloadFile(possible_files[index], + local_filename, + download_file_modifiers, + True) + if local_baseline: + url_of_baseline = possible_files[index] + index += 1 + + if not local_baseline: + if self.verbose: + print "Could not find any baseline for %s" % filename + else: + local_baseline = os.path.normpath(local_baseline) + if local_baseline and self.verbose: + print "Found baseline: %s" % url_of_baseline + + return BaselineCandidate(local_baseline, url_of_baseline) + + def _AddBaselinePaths(self, list, base_path, directories): + for dir in directories: + list.append(os.path.join(base_path, dir)) + + # TODO(gwilson): Refactor this method to use + # platform_utils_*.BaselineSearchPath instead of custom logic. + + def _BuildBaselineIndexes(self): + """ Builds an index of all the known local baselines in both chromium + and webkit. Two baselines are created, a webkit-specific (no chromium + baseline) dictionary and an overall (both) dictionary. Each one has a + structure like: "/fast/dom/one-expected.txt" -> + "C:\\path\\to\\fast\\dom\\one-expected.txt" + """ + if self.verbose: + print "Building index of all local baselines..." + + self.baseline_dict = {} + self.webkit_baseline_dict = {} + + base = os.path.abspath(os.path.curdir) + webkit_base = path_utils.PathFromBase('third_party', 'Webkit', + 'LayoutTests') + chromium_base = path_utils.PathFromBase('webkit', 'data', + 'layout_tests') + chromium_base_platform = os.path.join(chromium_base, PLATFORM) + webkit_base_platform = os.path.join(webkit_base, PLATFORM) + + possible_chromium_files = [] + possible_webkit_files = [] + + if IsMacPlatform(self.platform): + self._AddBaselinePaths(possible_chromium_files, + chromium_base_platform, + CHROMIUM_MAC_PLATFORM_DIRS) + self._AddBaselinePaths(possible_chromium_files, + webkit_base_platform, + WEBKIT_MAC_PLATFORM_DIRS) + self._AddBaselinePaths(possible_webkit_files, + webkit_base_platform, + WEBKIT_MAC_PLATFORM_DIRS) + elif IsLinuxPlatform(self.platform): + self._AddBaselinePaths(possible_chromium_files, + chromium_base_platform, + CHROMIUM_LINUX_PLATFORM_DIRS) + else: + self._AddBaselinePaths(possible_chromium_files, + chromium_base_platform, + CHROMIUM_WIN_PLATFORM_DIRS) + + if not IsMacPlatform(self.platform): + self._AddBaselinePaths(possible_webkit_files, + webkit_base_platform, + WEBKIT_WIN_PLATFORM_DIRS) + + possible_webkit_files.append(webkit_base) + + self._PopulateBaselineDict(possible_webkit_files, + self.webkit_baseline_dict) + self._PopulateBaselineDict(possible_chromium_files, + self.baseline_dict) + for key in self.webkit_baseline_dict.keys(): + if not key in self.baseline_dict: + self.baseline_dict[key] = self.webkit_baseline_dict[key] + + return True + + def _PopulateBaselineDict(self, directories, dictionary): + for dir in directories: + os.path.walk(dir, self._VisitBaselineDir, dictionary) + + def _VisitBaselineDir(self, dict, dirname, names): + """ Method intended to be called by os.path.walk to build up an index + of where all the test baselines exist. """ + # Exclude .svn from the walk, since we don't care what is in these + # dirs. + if '.svn' in names: + names.remove('.svn') + for name in names: + if name.find("-expected.") > -1: + test_path_key = os.path.join(dirname, name) + # Fix path separators to match the separators used on + # the buildbots. + test_path_key = test_path_key.replace("\\", "/") + test_path_key = self._NormalizeBaselineIdentifier( + test_path_key) + if not test_path_key in dict: + dict[test_path_key] = os.path.join(dirname, name) + + # TODO(gwilson): Simplify identifier creation to not rely so heavily on + # directory and path names. + + def _NormalizeBaselineIdentifier(self, test_path): + """ Given either a baseline path (i.e. /LayoutTests/platform/mac/...) + or a test path (i.e. /LayoutTests/fast/dom/....) will normalize + to a unique identifier. This is basically a hashing function for + layout test paths.""" + + for regex in LOCAL_BASELINE_REGEXES: + value = ExtractFirstValue(test_path, regex) + if value: + return value + return test_path + + def _AddBaselineURLs(self, list, base_url, platforms): + # If the base URL doesn't contain any platform in its path, only add + # the base URL to the list. This happens with the chrome/ dir. + if base_url.find("%s") == -1: + list.append(base_url) + return + for platform in platforms: + list.append(base_url % platform) + + # TODO(gwilson): Refactor this method to use + # platform_utils_*.BaselineSearchPath instead of custom logic. This may + # require some kind of wrapper since this method looks for URLs instead + # of local paths. + + def _GetPossibleFileList(self, filename, only_webkit): + """ Returns a list of possible filename locations for the given file. + Uses the platform of the class to determine the order. + """ + + possible_chromium_files = [] + possible_webkit_files = [] + + chromium_platform_url = LAYOUT_TEST_REPO_BASE_URL + if not filename.startswith("chrome"): + chromium_platform_url += "platform/%s/" + chromium_platform_url += filename + + webkit_platform_url = WEBKIT_PLATFORM_BASELINE_URL + filename + + if IsMacPlatform(self.platform): + self._AddBaselineURLs(possible_chromium_files, + chromium_platform_url, + CHROMIUM_MAC_PLATFORM_DIRS) + self._AddBaselineURLs(possible_webkit_files, + webkit_platform_url, + WEBKIT_MAC_PLATFORM_DIRS) + elif IsLinuxPlatform(self.platform): + self._AddBaselineURLs(possible_chromium_files, + chromium_platform_url, + CHROMIUM_LINUX_PLATFORM_DIRS) + else: + self._AddBaselineURLs(possible_chromium_files, + chromium_platform_url, + CHROMIUM_WIN_PLATFORM_DIRS) + + if not IsMacPlatform(self.platform): + self._AddBaselineURLs(possible_webkit_files, + webkit_platform_url, + WEBKIT_WIN_PLATFORM_DIRS) + possible_webkit_files.append(WEBKIT_LAYOUT_TEST_BASE_URL + filename) + + if only_webkit: + return possible_webkit_files + return possible_chromium_files + possible_webkit_files + + # Like _GetBaseline, but only retrieves the baseline from upstream (skip + # looking in chromium). + + def _GetUpstreamBaseline(self, filename, directory): + return self._GetBaseline(filename, directory, upstream_only=True) + + def _GetFileAge(self, url): + # Check if the given URL is really a local file path. + if not url or not url.startswith("http"): + return None + try: + if url.find(WEBKIT_TRAC_HOSTNAME) > -1: + return ExtractSingleRegexAtURL(url[:url.rfind("/")], + WEBKIT_FILE_AGE_REGEX % + url[url.find("/browser"):]) + else: + return ExtractSingleRegexAtURL(url + "?view=log", + CHROMIUM_FILE_AGE_REGEX) + except: + if self.verbose: + print "Could not find age for %s. Does the file exist?" % url + return None + + # Returns a flakiness on a scale of 1-50. + # TODO(gwilson): modify this to also return which of the last 10 + # builds failed for this test. + + def _GetFlakiness(self, test_path, target_platform): + url = GetFlakyTestURL(target_platform) + if url == "": + return None + + if url in self._flaky_test_cache: + content = self._flaky_test_cache[url] + else: + content = urllib2.urlopen(urllib2.Request(url)).read() + self._flaky_test_cache[url] = content + + flakiness = ExtractFirstValue(content, FLAKY_TEST_REGEX % test_path) + return flakiness + + def _GetTestExpectations(self): + if not self._test_expectations_cache: + try: + if self.test_expectations_file: + log = open(self.test_expectations_file, 'r') + self._test_expectations_cache = "\n".join(log.readlines()) + else: + self._test_expectations_cache = ScrapeURL( + TEST_EXPECTATIONS_URL) + except HTTPError: + print ("Could not find test_expectations.txt at %s" % + TEST_EXPECTATIONS_URL) + + return self._test_expectations_cache + + def _GetTestExpectationsLine(self, test_path): + content = self._GetTestExpectations() + + if not content: + return None + + for match in content.splitlines(): + line = re.search(".*? : (.*?) = .*", match) + if line and test_path.find(line.group(1)) > -1: + return match + + return None + + def _CopyFileFromZipDir(self, file_in_zip, file_to_create): + modifiers = "" + if file_to_create.endswith(".png"): + modifiers = "b" + dir = os.path.join(os.path.split(file_to_create)[0:-1])[0] + CreateDirectory(dir) + file = os.path.normpath(os.path.join(TEMP_ZIP_DIR, file_in_zip)) + shutil.copy(file, dir) + + def _ExtractFileFromZip(self, zip, file_in_zip, file_to_create): + modifiers = "" + if file_to_create.endswith(".png"): + modifiers = "b" + try: + CreateDirectory(file_to_create[0:file_to_create.rfind("/")]) + localFile = open(file_to_create, "w%s" % modifiers) + localFile.write(zip.read(file_in_zip)) + localFile.close() + os.chmod(file_to_create, 0777) + return True + except KeyError: + print "File %s does not exist in zip file." % (file_in_zip) + except AttributeError: + print "File %s does not exist in zip file." % (file_in_zip) + print "Is this zip file assembled correctly?" + return False + + def _DownloadFile(self, url, local_filename=None, modifiers="", + force=False): + """ + Copy the contents of a file from a given URL + to a local file. + """ + try: + if local_filename == None: + local_filename = url.split('/')[-1] + if os.path.isfile(local_filename) and not force: + if self.verbose: + print "File at %s already exists." % local_filename + return local_filename + if self.dont_download: + return local_filename + webFile = urllib2.urlopen(url) + localFile = open(local_filename, ("w%s" % modifiers)) + localFile.write(webFile.read()) + webFile.close() + localFile.close() + os.chmod(local_filename, 0777) + except urllib2.HTTPError: + return None + except urllib2.URLError: + print "The url %s is malformed." % url + return None + return localFile.name diff --git a/webkit/tools/layout_tests/webkitpy/layout_package/failure_finder_test.py b/webkit/tools/layout_tests/webkitpy/layout_package/failure_finder_test.py new file mode 100644 index 0000000..97fbed5 --- /dev/null +++ b/webkit/tools/layout_tests/webkitpy/layout_package/failure_finder_test.py @@ -0,0 +1,374 @@ +#!/bin/env/python +# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import os +import zipfile + +from failure_finder import FailureFinder + +TEST_BUILDER_OUTPUT = """090723 10:38:22 test_shell_thread.py:289 + ERROR chrome/fast/forms/textarea-metrics.html failed: + Text diff mismatch + 090723 10:38:21 test_shell_thread.py:289 + ERROR chrome/fast/dom/xss-DENIED-javascript-variations.html failed: + Text diff mismatch + 090723 10:37:58 test_shell_thread.py:289 + ERROR LayoutTests/plugins/bindings-test.html failed: + Text diff mismatch + +------------------------------------------------------------------------------ +Expected to crash, but passed (1): + chrome/fast/forms/textarea-metrics.html + +Regressions: Unexpected failures (2): + chrome/fast/dom/xss-DENIED-javascript-variations.html = FAIL + LayoutTests/plugins/bindings-test.html = FAIL +------------------------------------------------------------------------------ +""" + +TEST_FAILURE_1 = ("layout-test-results/chrome/fast/forms/" + "textarea-metrics-actual.txt") +TEST_FAILURE_2 = ("layout-test-results/chrome/fast/dom/" + "xss-DENIED-javascript-variations-actual.txt") +TEST_FAILURE_3 = ("layout-test-results/LayoutTests/plugins/" + "bindings-test-actual.txt") + +TEST_ARCHIVE_OUTPUT = """ +Adding layout-test-results\pending\fast\repaint\not-real-actual.checksum +Adding layout-test-results\pending\fast\repaint\not-real-actual.png +Adding layout-test-results\pending\fast\repaint\not-real-actual.txt +last change: 22057 +build name: webkit-rel +host name: codf138 +saving results to \\my\test\location\webkit-rel\22057 +program finished with exit code 0 +""" + +TEST_TEST_EXPECTATIONS = """ +BUG1234 chrome/fast/forms/textarea-metrics.html = CRASH +""" + +TEST_BUILDER_LOG_FILE = "TEST_builder.log" +TEST_ARCHIVE_LOG_FILE = "TEST_archive.log" +TEST_DUMMY_ZIP_FILE = "TEST_zipfile.zip" +TEST_EXPECTATIONS_FILE = "TEST_expectations.txt" + +WEBKIT_BUILDER_NUMBER = "9800" +WEBKIT_FAILURES = ( + ["LayoutTests/fast/backgrounds/animated-svg-as-mask.html", + "LayoutTests/fast/backgrounds/background-clip-text.html", + "LayoutTests/fast/backgrounds/mask-composite.html", + "LayoutTests/fast/backgrounds/repeat/mask-negative-offset-repeat.html", + "LayoutTests/fast/backgrounds/svg-as-background-3.html", + "LayoutTests/fast/backgrounds/svg-as-background-6.html", + "LayoutTests/fast/backgrounds/svg-as-mask.html", + "LayoutTests/fast/block/float/013.html", + "LayoutTests/fast/block/float/nested-clearance.html", + "LayoutTests/fast/block/positioning/047.html"]) + +CHROMIUM_BASELINE = "chrome/fast/forms/basic-buttons.html" +EXPECTED_CHROMIUM_LOCAL_BASELINE = "./chrome/fast/forms/basic-buttons.html" +EXPECTED_CHROMIUM_URL_BASELINE = ("http://src.chromium.org/viewvc/chrome/" + "trunk/src/webkit/data/layout_tests/chrome/" + "fast/forms/basic-buttons.html") + +WEBKIT_BASELINE = "LayoutTests/fast/forms/11423.html" +EXPECTED_WEBKIT_LOCAL_BASELINE = "./LayoutTests/fast/forms/11423.html" +EXPECTED_WEBKIT_URL_BASELINE = ( + "http://svn.webkit.org/repository/webkit/trunk/" + "LayoutTests/fast/forms/11423.html") + +TEST_ZIP_FILE = ("http://build.chromium.org/buildbot/layout_test_results/" + "webkit-rel/21432/layout-test-results.zip") + +EXPECTED_REVISION = "20861" +EXPECTED_BUILD_NAME = "webkit-rel" + +SVG_TEST_EXPECTATION = ( + "LayoutTests/svg/custom/foreign-object-skew-expected.png") +SVG_TEST_EXPECTATION_UPSTREAM = ("LayoutTests/svg/custom/" + "foreign-object-skew-expected-upstream.png") +WEBARCHIVE_TEST_EXPECTATION = ("LayoutTests/webarchive/adopt-attribute-" + "styled-body-webarchive-expected.webarchive") +DOM_TEST_EXPECTATION = ("LayoutTests/fast/dom/" + "attribute-downcast-right-expected.txt") +DOM_TEST_EXPECTATION_UPSTREAM = ("LayoutTests/fast/dom/" + "attribute-downcast-right-" + "expected-upstream.png") + +TEST_EXPECTATIONS = """ +BUG1234 WONTFIX : LayoutTests/fast/backgrounds/svg-as-background-3.html = FAIL +BUG3456 WIN : LayoutTests/fast/backgrounds/svg-as-background-6.html = CRASH +BUG4567 : LayoutTests/fast/backgrounds/svg-as-mask.html = PASS +WONTFIX : LayoutTests/fast/block/ = FAIL +""" + +EXPECT_EXACT_MATCH = "LayoutTests/fast/backgrounds/svg-as-background-6.html" +EXPECT_GENERAL_MATCH = "LayoutTests/fast/block/float/013.html" +EXPECT_NO_MATCH = "LayoutTests/fast/backgrounds/svg-as-background-99.html" + +WEBKIT_ORG = "webkit.org" +CHROMIUM_ORG = "chromium.org" + + +class FailureFinderTest(object): + + def runTests(self): + all_tests_passed = True + + tests = ["testWhitespaceInBuilderName", + "testGetLastBuild", + "testFindMatchesInBuilderOutput", + "testScrapeBuilderOutput", + "testGetChromiumBaseline", + "testGetWebkitBaseline", + "testZipDownload", + "testUseLocalOutput", + "testTranslateBuildToZip", + "testGetBaseline", + "testFindTestExpectations", + "testFull"] + + for test in tests: + try: + result = eval(test + "()") + if result: + print "[ OK ] %s" % test + else: + all_tests_passed = False + print "[ FAIL ] %s" % test + except: + print "[ ERROR ] %s" % test + return all_tests_passed + + +def _getBasicFailureFinder(): + return FailureFinder(None, "Webkit", False, "", ".", 10, False) + + +def _testLastBuild(failure_finder): + try: + last_build = failure_finder.GetLastBuild() + # Verify that last_build is not empty and is a number. + build = int(last_build) + return (build > 0) + except: + return False + + +def testGetLastBuild(): + test = _getBasicFailureFinder() + return _testLastBuild(test) + + +def testWhitespaceInBuilderName(): + test = _getBasicFailureFinder() + test.SetPlatform("Webkit (webkit.org)") + return _testLastBuild(test) + + +def testScrapeBuilderOutput(): + + # Try on the default builder. + test = _getBasicFailureFinder() + test.build = "9800" + output = test._ScrapeBuilderOutput() + if not output: + return False + + # Try on a crazy builder on the FYI waterfall. + test = _getBasicFailureFinder() + test.build = "1766" + test.SetPlatform("Webkit Linux (webkit.org)") + output = test._ScrapeBuilderOutput() + if not output: + return False + + return True + + +def testFindMatchesInBuilderOutput(): + test = _getBasicFailureFinder() + test.exclude_known_failures = True + matches = test._FindMatchesInBuilderOutput(TEST_BUILDER_OUTPUT) + # Verify that we found x matches. + if len(matches) != 2: + print "Did not find all unexpected failures." + return False + + test.exclude_known_failures = False + matches = test._FindMatchesInBuilderOutput(TEST_BUILDER_OUTPUT) + if len(matches) != 3: + print "Did not find all failures." + return False + return True + + +def _testBaseline(test_name, expected_local, expected_url): + test = _getBasicFailureFinder() + # Test baseline that is obviously in Chromium's tree. + baseline = test._GetBaseline(test_name, ".", False) + try: + os.remove(baseline.local_file) + if (baseline.local_file != expected_local or + baseline.baseline_url != expected_url): + return False + except: + return False + return True + + +def testGetChromiumBaseline(): + return _testBaseline(CHROMIUM_BASELINE, EXPECTED_CHROMIUM_LOCAL_BASELINE, + EXPECTED_CHROMIUM_URL_BASELINE) + + +def testGetWebkitBaseline(): + return _testBaseline(WEBKIT_BASELINE, EXPECTED_WEBKIT_LOCAL_BASELINE, + EXPECTED_WEBKIT_URL_BASELINE) + + +def testUseLocalOutput(): + test_result = True + try: + _writeFile(TEST_BUILDER_LOG_FILE, TEST_BUILDER_OUTPUT) + _writeFile(TEST_ARCHIVE_LOG_FILE, TEST_ARCHIVE_OUTPUT) + _writeFile(TEST_EXPECTATIONS_FILE, TEST_TEST_EXPECTATIONS) + zip = zipfile.ZipFile(TEST_DUMMY_ZIP_FILE, 'w') + zip.write(TEST_BUILDER_LOG_FILE, TEST_FAILURE_1) + zip.write(TEST_BUILDER_LOG_FILE, TEST_FAILURE_2) + zip.write(TEST_BUILDER_LOG_FILE, TEST_FAILURE_3) + zip.close() + test = _getBasicFailureFinder() + test.archive_step_log_file = TEST_ARCHIVE_LOG_FILE + test.builder_output_log_file = TEST_BUILDER_LOG_FILE + test.test_expectations_file = TEST_EXPECTATIONS_FILE + test.zip_file = TEST_DUMMY_ZIP_FILE + test.dont_download = True + test.exclude_known_failures = True + test.delete_zip_file = False + failures = test.GetFailures() + if not failures or len(failures) != 2: + print "Did not get expected number of failures :" + for failure in failures: + print failure.test_path + test_result = False + finally: + os.remove(TEST_BUILDER_LOG_FILE) + os.remove(TEST_ARCHIVE_LOG_FILE) + os.remove(TEST_EXPECTATIONS_FILE) + os.remove(TEST_DUMMY_ZIP_FILE) + return test_result + + +def _writeFile(filename, contents): + myfile = open(filename, 'w') + myfile.write(contents) + myfile.close() + + +def testZipDownload(): + test = _getBasicFailureFinder() + try: + test._DownloadFile(TEST_ZIP_FILE, "test.zip", "b") # "b" -> binary + os.remove("test.zip") + return True + except: + return False + + +def testTranslateBuildToZip(): + test = _getBasicFailureFinder() + test.build = WEBKIT_BUILDER_NUMBER + revision, build_name = test._GetRevisionAndBuildFromArchiveStep() + if revision != EXPECTED_REVISION or build_name != EXPECTED_BUILD_NAME: + return False + return True + + +def testGetBaseline(): + test = _getBasicFailureFinder() + result = True + test.platform = "chromium-mac" + baseline = test._GetBaseline(WEBARCHIVE_TEST_EXPECTATION, ".") + if not baseline.local_file or baseline.baseline_url.find(WEBKIT_ORG) == -1: + result = False + print "Webarchive layout test not found at webkit.org: %s" % url + test.platform = "chromium-win" + baseline = test._GetBaseline(SVG_TEST_EXPECTATION, ".") + if (not baseline.local_file or + baseline.baseline_url.find(CHROMIUM_ORG) == -1): + result = False + print "SVG layout test found at %s, not chromium.org" % url + baseline = test._GetBaseline(SVG_TEST_EXPECTATION, ".", True) + if not baseline.local_file or baseline.baseline_url.find(WEBKIT_ORG) == -1: + result = False + print "Upstream SVG layout test NOT found at webkit.org!" + baseline = test._GetBaseline(DOM_TEST_EXPECTATION, ".", True) + if (not baseline.local_file or + baseline.baseline_url.find("/platform/") > -1): + result = False + print ("Upstream SVG layout test found in a " + "platform directory: %s" % url) + os.remove(WEBARCHIVE_TEST_EXPECTATION) + os.remove(SVG_TEST_EXPECTATION) + os.remove(SVG_TEST_EXPECTATION_UPSTREAM) + os.remove(DOM_TEST_EXPECTATION_UPSTREAM) + deleteDir("LayoutTests") + return result + + +def deleteDir(directory): + """ Recursively deletes empty directories given a root. + This method will throw an exception if they are not empty. """ + for root, dirs, files in os.walk(directory, topdown=False): + for d in dirs: + try: + os.rmdir(os.path.join(root, d)) + except: + pass + os.rmdir(directory) + + +def testFull(): + """ Verifies that the entire system works end-to-end. """ + test = _getBasicFailureFinder() + test.build = WEBKIT_BUILDER_NUMBER + test.dont_download = True # Dry run only, no downloading needed. + failures = test.GetFailures() + # Verify that the max failures parameter works. + if not failures or len(failures) > 10: + "Got no failures or too many failures." + return False + + # Verify the failures match the list of expected failures. + for failure in failures: + if not (failure.test_path in WEBKIT_FAILURES): + print "Found a failure I did not expect to see." + return False + + return True + + +def testFindTestExpectations(): + test = _getBasicFailureFinder() + test._test_expectations_cache = TEST_EXPECTATIONS + match = test._GetTestExpectationsLine(EXPECT_EXACT_MATCH) + if not match: + return False + match = test._GetTestExpectationsLine(EXPECT_GENERAL_MATCH) + if not match: + return False + match = test._GetTestExpectationsLine(EXPECT_NO_MATCH) + return not match + + +if __name__ == "__main__": + fft = FailureFinderTest() + result = fft.runTests() + if result: + print "All tests passed." + else: + print "Not all tests passed." diff --git a/webkit/tools/layout_tests/webkitpy/layout_package/html_generator.py b/webkit/tools/layout_tests/webkitpy/layout_package/html_generator.py new file mode 100644 index 0000000..b93166b --- /dev/null +++ b/webkit/tools/layout_tests/webkitpy/layout_package/html_generator.py @@ -0,0 +1,230 @@ +# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import re + +from failure import Failure + +CHROMIUM_BUG_URL = "http://crbug.com/" + + +def ExtractFirstValue(string, regex): + m = re.search(regex, string) + if m and m.group(1): + return m.group(1) + return None + +# TODO(gwilson): Refactor HTML generation into a HTML templating system like +# Django templates. + + +class HTMLGenerator(object): + + def __init__(self, failures, output_dir, build, platform, + exclude_known_failures): + self.failures = failures + self.output_dir = output_dir + self.build = build + self.platform = platform + self.exclude_known_failures = exclude_known_failures + self.image_size = "200px" + + def GenerateHTML(self): + html = "" + html += """ + + + + + + """ + title = "All failures" + if self.exclude_known_failures: + title = "Regressions" + + html += """ +

%s for build %s (%s)

+ """ % (title, self.build, self.platform) + + test_number = 0 + + # TODO(gwilson): Refactor this to do a join() on an array of HTML, + # rather than appending strings in a loop. + for failure in self.failures: + test_number += 1 + html += """ + + + +
+ %s.  %s
  Last modified: %s + """ % (test_number, failure.test_path, failure.GetTestHome(), + failure.test_age) + html += "
" + html += "
%s
" % \ + (self._GenerateLinkifiedTextExpectations(failure)) + + html += self._GenerateFlakinessHTML(failure) + + if failure.crashed: + html += "
Test CRASHED
" + elif failure.timeout: + html += "
Test TIMED OUT
" + else: + html += """ + + + + + + + + + """ + + if failure.text_diff_mismatch: + html += self._GenerateTextFailureHTML(failure) + + if failure.image_mismatch: + html += self._GenerateImageFailureHTML(failure) + + html += "
 ExpectedActualDifferenceUpstream
" + html += "

" + html += """""" + + # TODO(gwilson): Change this filename to be passed in as an argument. + html_filename = "%s/index-%s.html" % (self.output_dir, self.build) + htmlFile = open(html_filename, 'w') + htmlFile.write(html) + htmlFile.close() + return html_filename + + def _GenerateLinkifiedTextExpectations(self, failure): + if not failure.test_expectations_line: + return "" + bug_number = ExtractFirstValue(failure.test_expectations_line, + "BUG(\d+)") + if not bug_number or bug_number == "": + return "" + return failure.test_expectations_line.replace( + "BUG" + bug_number, + "BUG%s" % (CHROMIUM_BUG_URL, bug_number, + bug_number)) + + # TODO(gwilson): Fix this so that it shows the last ten runs + # not just a "meter" of flakiness. + + def _GenerateFlakinessHTML(self, failure): + html = "" + if not failure.flakiness: + return html + html += """ + + + + """ % (failure.flakiness) + + flaky_red = int(round(int(failure.flakiness) / 5)) + flaky_green = 10 - flaky_red + for i in range(0, flaky_green): + html += """ + + """ + for i in range(0, flaky_red): + html += """ + + """ + html += """ + +
Flakiness: (%s)    

+ """ + return html + + def _GenerateTextFailureHTML(self, failure): + html = "" + if not failure.GetTextBaselineLocation(): + return """This test likely does not have any + TEXT baseline for this platform, or one could not + be found.""" + html += """ + + + Render Tree Dump
+ %s baseline
+ Age: %s
+ + """ % (failure.text_baseline_url, + failure.GetTextBaselineLocation(), + failure.text_baseline_age) + html += self._GenerateTextFailureTD(failure.GetExpectedTextFilename(), + "expected text") + html += self._GenerateTextFailureTD(failure.GetActualTextFilename(), + "actual text") + html += self._GenerateTextFailureTD(failure.GetTextDiffFilename(), + "text diff") + html += " " + html += "" + return html + + def _GenerateTextFailureTD(self, file_path, anchor_text): + return ("" + "%s") % ( + self.build, file_path, anchor_text) + + def _GenerateImageFailureHTML(self, failure): + if not failure.GetImageBaselineLocation(): + return """This test likely does not have any + IMAGE baseline for this platform, or one could not be + found.""" + html = """ + + Pixel Dump
+ %s baseline
Age: %s + """ % (failure.image_baseline_url, + failure.GetImageBaselineLocation(), + failure.image_baseline_age) + html += self._GenerateImageFailureTD( + failure.GetExpectedImageFilename()) + html += self._GenerateImageFailureTD( + failure.GetActualImageFilename()) + html += self._GenerateImageFailureTD( + failure.GetImageDiffFilename()) + if (failure.image_baseline_upstream_local and + failure.image_baseline_upstream_local != ""): + html += self._GenerateImageFailureTD( + failure.GetImageUpstreamFilename()) + else: + html += """ +   + """ + html += "" + return html + + def _GenerateImageFailureTD(self, filename): + return ("" + "" + "") % (self.build, filename, self.image_size, + self.build, filename) diff --git a/webkit/tools/layout_tests/webkitpy/layout_package/http_server.py b/webkit/tools/layout_tests/webkitpy/layout_package/http_server.py new file mode 100755 index 0000000..6c279d6 --- /dev/null +++ b/webkit/tools/layout_tests/webkitpy/layout_package/http_server.py @@ -0,0 +1,255 @@ +#!/usr/bin/env python +# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""A class to help start/stop the lighttpd server used by layout tests.""" + + +import logging +import optparse +import os +import shutil +import subprocess +import sys +import tempfile +import time +import urllib + +import http_server_base +import path_utils + +# So we can import httpd_utils below to make ui_tests happy. +sys.path.append(path_utils.PathFromBase('tools', 'python')) +import google.httpd_utils + + +def RemoveLogFiles(folder, starts_with): + files = os.listdir(folder) + for file in files: + if file.startswith(starts_with): + full_path = os.path.join(folder, file) + os.remove(full_path) + + +class Lighttpd(http_server_base.HttpServerBase): + # Webkit tests + try: + _webkit_tests = path_utils.PathFromBase('third_party', 'WebKit', + 'LayoutTests', 'http', 'tests') + _js_test_resource = path_utils.PathFromBase('third_party', 'WebKit', + 'LayoutTests', 'fast', + 'js', 'resources') + except path_utils.PathNotFound: + _webkit_tests = None + _js_test_resource = None + + # Path where we can access all of the tests + _all_tests = path_utils.PathFromBase('webkit', 'data', 'layout_tests') + # Self generated certificate for SSL server (for client cert get + # \chrome\test\data\ssl\certs\root_ca_cert.crt) + _pem_file = path_utils.PathFromBase('tools', 'python', 'google', + 'httpd_config', 'httpd2.pem') + # One mapping where we can get to everything + VIRTUALCONFIG = [{'port': 8081, 'docroot': _all_tests}] + + if _webkit_tests: + VIRTUALCONFIG.extend( + # Three mappings (one with SSL enabled) for LayoutTests http tests + [{'port': 8000, 'docroot': _webkit_tests}, + {'port': 8080, 'docroot': _webkit_tests}, + {'port': 8443, 'docroot': _webkit_tests, 'sslcert': _pem_file}]) + + def __init__(self, output_dir, background=False, port=None, + root=None, register_cygwin=None, run_background=None): + """Args: + output_dir: the absolute path to the layout test result directory + """ + self._output_dir = output_dir + self._process = None + self._port = port + self._root = root + self._register_cygwin = register_cygwin + self._run_background = run_background + if self._port: + self._port = int(self._port) + + def IsRunning(self): + return self._process != None + + def Start(self): + if self.IsRunning(): + raise 'Lighttpd already running' + + base_conf_file = path_utils.PathFromBase('webkit', + 'tools', 'layout_tests', 'layout_package', 'lighttpd.conf') + out_conf_file = os.path.join(self._output_dir, 'lighttpd.conf') + time_str = time.strftime("%d%b%Y-%H%M%S") + access_file_name = "access.log-" + time_str + ".txt" + access_log = os.path.join(self._output_dir, access_file_name) + log_file_name = "error.log-" + time_str + ".txt" + error_log = os.path.join(self._output_dir, log_file_name) + + # Remove old log files. We only need to keep the last ones. + RemoveLogFiles(self._output_dir, "access.log-") + RemoveLogFiles(self._output_dir, "error.log-") + + # Write out the config + f = file(base_conf_file, 'rb') + base_conf = f.read() + f.close() + + f = file(out_conf_file, 'wb') + f.write(base_conf) + + # Write out our cgi handlers. Run perl through env so that it + # processes the #! line and runs perl with the proper command + # line arguments. Emulate apache's mod_asis with a cat cgi handler. + f.write(('cgi.assign = ( ".cgi" => "/usr/bin/env",\n' + ' ".pl" => "/usr/bin/env",\n' + ' ".asis" => "/bin/cat",\n' + ' ".php" => "%s" )\n\n') % + path_utils.LigHTTPdPHPPath()) + + # Setup log files + f.write(('server.errorlog = "%s"\n' + 'accesslog.filename = "%s"\n\n') % (error_log, access_log)) + + # Setup upload folders. Upload folder is to hold temporary upload files + # and also POST data. This is used to support XHR layout tests that + # does POST. + f.write(('server.upload-dirs = ( "%s" )\n\n') % (self._output_dir)) + + # Setup a link to where the js test templates are stored + f.write(('alias.url = ( "/js-test-resources" => "%s" )\n\n') % + (self._js_test_resource)) + + # dump out of virtual host config at the bottom. + if self._root: + if self._port: + # Have both port and root dir. + mappings = [{'port': self._port, 'docroot': self._root}] + else: + # Have only a root dir - set the ports as for LayoutTests. + # This is used in ui_tests to run http tests against a browser. + + # default set of ports as for LayoutTests but with a + # specified root. + mappings = [{'port': 8000, 'docroot': self._root}, + {'port': 8080, 'docroot': self._root}, + {'port': 8443, 'docroot': self._root, + 'sslcert': Lighttpd._pem_file}] + else: + mappings = self.VIRTUALCONFIG + for mapping in mappings: + ssl_setup = '' + if 'sslcert' in mapping: + ssl_setup = (' ssl.engine = "enable"\n' + ' ssl.pemfile = "%s"\n' % mapping['sslcert']) + + f.write(('$SERVER["socket"] == "127.0.0.1:%d" {\n' + ' server.document-root = "%s"\n' + + ssl_setup + + '}\n\n') % (mapping['port'], mapping['docroot'])) + f.close() + + executable = path_utils.LigHTTPdExecutablePath() + module_path = path_utils.LigHTTPdModulePath() + start_cmd = [executable, + # Newly written config file + '-f', path_utils.PathFromBase(self._output_dir, + 'lighttpd.conf'), + # Where it can find its module dynamic libraries + '-m', module_path] + + if not self._run_background: + start_cmd.append(# Don't background + '-D') + + # Copy liblightcomp.dylib to /tmp/lighttpd/lib to work around the + # bug that mod_alias.so loads it from the hard coded path. + if sys.platform == 'darwin': + tmp_module_path = '/tmp/lighttpd/lib' + if not os.path.exists(tmp_module_path): + os.makedirs(tmp_module_path) + lib_file = 'liblightcomp.dylib' + shutil.copyfile(os.path.join(module_path, lib_file), + os.path.join(tmp_module_path, lib_file)) + + # Put the cygwin directory first in the path to find cygwin1.dll + env = os.environ + if sys.platform in ('cygwin', 'win32'): + env['PATH'] = '%s;%s' % ( + path_utils.PathFromBase('third_party', 'cygwin', 'bin'), + env['PATH']) + + if sys.platform == 'win32' and self._register_cygwin: + setup_mount = path_utils.PathFromBase('third_party', 'cygwin', + 'setup_mount.bat') + subprocess.Popen(setup_mount).wait() + + logging.debug('Starting http server') + self._process = subprocess.Popen(start_cmd, env=env) + + # Wait for server to start. + self.mappings = mappings + server_started = self.WaitForAction(self.IsServerRunningOnAllPorts) + + # Our process terminated already + if not server_started or self._process.returncode != None: + raise google.httpd_utils.HttpdNotStarted('Failed to start httpd.') + + logging.debug("Server successfully started") + + # TODO(deanm): Find a nicer way to shutdown cleanly. Our log files are + # probably not being flushed, etc... why doesn't our python have os.kill ? + + def Stop(self, force=False): + if not force and not self.IsRunning(): + return + + httpd_pid = None + if self._process: + httpd_pid = self._process.pid + path_utils.ShutDownHTTPServer(httpd_pid) + + if self._process: + self._process.wait() + self._process = None + +if '__main__' == __name__: + # Provide some command line params for starting/stopping the http server + # manually. Also used in ui_tests to run http layout tests in a browser. + option_parser = optparse.OptionParser() + option_parser.add_option('-k', '--server', + help='Server action (start|stop)') + option_parser.add_option('-p', '--port', + help='Port to listen on (overrides layout test ports)') + option_parser.add_option('-r', '--root', + help='Absolute path to DocumentRoot (overrides layout test roots)') + option_parser.add_option('--register_cygwin', action="store_true", + dest="register_cygwin", help='Register Cygwin paths (on Win try bots)') + option_parser.add_option('--run_background', action="store_true", + dest="run_background", + help='Run on background (for running as UI test)') + options, args = option_parser.parse_args() + + if not options.server: + print ('Usage: %s --server {start|stop} [--root=root_dir]' + ' [--port=port_number]' % sys.argv[0]) + else: + if (options.root is None) and (options.port is not None): + # specifying root but not port means we want httpd on default + # set of ports that LayoutTest use, but pointing to a different + # source of tests. Specifying port but no root does not seem + # meaningful. + raise 'Specifying port requires also a root.' + httpd = Lighttpd(tempfile.gettempdir(), + port=options.port, + root=options.root, + register_cygwin=options.register_cygwin, + run_background=options.run_background) + if 'start' == options.server: + httpd.Start() + else: + httpd.Stop(force=True) diff --git a/webkit/tools/layout_tests/webkitpy/layout_package/http_server_base.py b/webkit/tools/layout_tests/webkitpy/layout_package/http_server_base.py new file mode 100644 index 0000000..daf0978 --- /dev/null +++ b/webkit/tools/layout_tests/webkitpy/layout_package/http_server_base.py @@ -0,0 +1,42 @@ +# Copyright (c) 2009 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Base class with common routines between the Apache and Lighttpd servers.""" + +import logging +import time +import urllib + + +class HttpServerBase(object): + + def WaitForAction(self, action): + """Repeat the action for 20 seconds or until it succeeds. Returns + whether it succeeded.""" + start_time = time.time() + while time.time() - start_time < 20: + if action(): + return True + time.sleep(1) + + return False + + def IsServerRunningOnAllPorts(self): + """Returns whether the server is running on all the desired ports.""" + for mapping in self.mappings: + if 'sslcert' in mapping: + http_suffix = 's' + else: + http_suffix = '' + + url = 'http%s://127.0.0.1:%d/' % (http_suffix, mapping['port']) + + try: + response = urllib.urlopen(url) + logging.debug("Server running at %s" % url) + except IOError: + logging.debug("Server NOT running at %s" % url) + return False + + return True diff --git a/webkit/tools/layout_tests/webkitpy/layout_package/json_layout_results_generator.py b/webkit/tools/layout_tests/webkitpy/layout_package/json_layout_results_generator.py new file mode 100644 index 0000000..f62075e --- /dev/null +++ b/webkit/tools/layout_tests/webkitpy/layout_package/json_layout_results_generator.py @@ -0,0 +1,159 @@ +# Copyright (c) 2010 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import logging +import os + +from layout_package import json_results_generator +from layout_package import path_utils +from layout_package import test_expectations +from layout_package import test_failures + + +class JSONLayoutResultsGenerator(json_results_generator.JSONResultsGenerator): + """A JSON results generator for layout tests.""" + + LAYOUT_TESTS_PATH = "LayoutTests" + + # Additional JSON fields. + WONTFIX = "wontfixCounts" + DEFERRED = "deferredCounts" + + def __init__(self, builder_name, build_name, build_number, + results_file_base_path, builder_base_url, + test_timings, expectations, result_summary, all_tests): + """Modifies the results.json file. Grabs it off the archive directory + if it is not found locally. + + Args: + result_summary: ResultsSummary object storing the summary of the test + results. + (see the comment of JSONResultsGenerator.__init__ for other Args) + """ + + self._builder_name = builder_name + self._build_name = build_name + self._build_number = build_number + self._builder_base_url = builder_base_url + self._results_file_path = os.path.join(results_file_base_path, + self.RESULTS_FILENAME) + self._expectations = expectations + + # We don't use self._skipped_tests and self._passed_tests as we + # override _InsertFailureSummaries. + + # We want relative paths to LayoutTest root for JSON output. + path_to_name = self._GetPathRelativeToLayoutTestRoot + self._result_summary = result_summary + self._failures = dict( + (path_to_name(test), test_failures.DetermineResultType(failures)) + for (test, failures) in result_summary.failures.iteritems()) + self._all_tests = [path_to_name(test) for test in all_tests] + self._test_timings = dict( + (path_to_name(test_tuple.filename), test_tuple.test_run_time) + for test_tuple in test_timings) + + self._GenerateJSONOutput() + + def _GetPathRelativeToLayoutTestRoot(self, test): + """Returns the path of the test relative to the layout test root. + For example, for: + src/third_party/WebKit/LayoutTests/fast/forms/foo.html + We would return + fast/forms/foo.html + """ + index = test.find(self.LAYOUT_TESTS_PATH) + if index is not -1: + index += len(self.LAYOUT_TESTS_PATH) + + if index is -1: + # Already a relative path. + relativePath = test + else: + relativePath = test[index + 1:] + + # Make sure all paths are unix-style. + return relativePath.replace('\\', '/') + + # override + def _ConvertJSONToCurrentVersion(self, results_json): + archive_version = None + if self.VERSION_KEY in results_json: + archive_version = results_json[self.VERSION_KEY] + + super(JSONLayoutResultsGenerator, self)._ConvertJSONToCurrentVersion( + results_json) + + # version 2->3 + if archive_version == 2: + for results_for_builder in results_json.itervalues(): + try: + test_results = results_for_builder[self.TESTS] + except: + continue + + for test in test_results: + # Make sure all paths are relative + test_path = self._GetPathRelativeToLayoutTestRoot(test) + if test_path != test: + test_results[test_path] = test_results[test] + del test_results[test] + + # override + def _InsertFailureSummaries(self, results_for_builder): + summary = self._result_summary + + self._InsertItemIntoRawList(results_for_builder, + len((set(summary.failures.keys()) | + summary.tests_by_expectation[test_expectations.SKIP]) & + summary.tests_by_timeline[test_expectations.NOW]), + self.FIXABLE_COUNT) + self._InsertItemIntoRawList(results_for_builder, + self._GetFailureSummaryEntry(test_expectations.NOW), + self.FIXABLE) + self._InsertItemIntoRawList(results_for_builder, + len(self._expectations.GetTestsWithTimeline( + test_expectations.NOW)), self.ALL_FIXABLE_COUNT) + self._InsertItemIntoRawList(results_for_builder, + self._GetFailureSummaryEntry(test_expectations.DEFER), + self.DEFERRED) + self._InsertItemIntoRawList(results_for_builder, + self._GetFailureSummaryEntry(test_expectations.WONTFIX), + self.WONTFIX) + + # override + def _NormalizeResultsJSON(self, test, test_name, tests): + super(JSONLayoutResultsGenerator, self)._NormalizeResultsJSON( + test, test_name, tests) + + # Remove tests that don't exist anymore. + full_path = os.path.join(path_utils.LayoutTestsDir(), test_name) + full_path = os.path.normpath(full_path) + if not os.path.exists(full_path): + del tests[test_name] + + def _GetFailureSummaryEntry(self, timeline): + """Creates a summary object to insert into the JSON. + + Args: + summary ResultSummary object with test results + timeline current test_expectations timeline to build entry for + (e.g., test_expectations.NOW, etc.) + """ + entry = {} + summary = self._result_summary + timeline_tests = summary.tests_by_timeline[timeline] + entry[self.SKIP_RESULT] = len( + summary.tests_by_expectation[test_expectations.SKIP] & + timeline_tests) + entry[self.PASS_RESULT] = len( + summary.tests_by_expectation[test_expectations.PASS] & + timeline_tests) + for failure_type in summary.tests_by_expectation.keys(): + if failure_type not in self.FAILURE_TO_CHAR: + continue + count = len(summary.tests_by_expectation[failure_type] & + timeline_tests) + entry[self.FAILURE_TO_CHAR[failure_type]] = count + return entry diff --git a/webkit/tools/layout_tests/webkitpy/layout_package/json_results_generator.py b/webkit/tools/layout_tests/webkitpy/layout_package/json_results_generator.py new file mode 100644 index 0000000..9bd0ad3 --- /dev/null +++ b/webkit/tools/layout_tests/webkitpy/layout_package/json_results_generator.py @@ -0,0 +1,390 @@ +# Copyright (c) 2009 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import logging +import os +import subprocess +import sys +import time +import urllib2 +import xml.dom.minidom + +from layout_package import path_utils +from layout_package import test_expectations + +sys.path.append(path_utils.PathFromBase('third_party')) +import simplejson + + +class JSONResultsGenerator(object): + + MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG = 750 + # Min time (seconds) that will be added to the JSON. + MIN_TIME = 1 + JSON_PREFIX = "ADD_RESULTS(" + JSON_SUFFIX = ");" + PASS_RESULT = "P" + SKIP_RESULT = "X" + NO_DATA_RESULT = "N" + VERSION = 3 + VERSION_KEY = "version" + RESULTS = "results" + TIMES = "times" + BUILD_NUMBERS = "buildNumbers" + WEBKIT_SVN = "webkitRevision" + CHROME_SVN = "chromeRevision" + TIME = "secondsSinceEpoch" + TESTS = "tests" + + FIXABLE_COUNT = "fixableCount" + FIXABLE = "fixableCounts" + ALL_FIXABLE_COUNT = "allFixableCount" + + # Note that we omit test_expectations.FAIL from this list because + # it should never show up (it's a legacy input expectation, never + # an output expectation). + FAILURE_TO_CHAR = {test_expectations.CRASH: "C", + test_expectations.TIMEOUT: "T", + test_expectations.IMAGE: "I", + test_expectations.TEXT: "F", + test_expectations.MISSING: "O", + test_expectations.IMAGE_PLUS_TEXT: "Z"} + FAILURE_CHARS = FAILURE_TO_CHAR.values() + + RESULTS_FILENAME = "results.json" + + def __init__(self, builder_name, build_name, build_number, + results_file_base_path, builder_base_url, + test_timings, failures, passed_tests, skipped_tests, all_tests): + """Modifies the results.json file. Grabs it off the archive directory + if it is not found locally. + + Args + builder_name: the builder name (e.g. Webkit). + build_name: the build name (e.g. webkit-rel). + build_number: the build number. + results_file_base_path: Absolute path to the directory containing the + results json file. + builder_base_url: the URL where we have the archived test results. + test_timings: Map of test name to a test_run-time. + failures: Map of test name to a failure type (of test_expectations). + passed_tests: A set containing all the passed tests. + skipped_tests: A set containing all the skipped tests. + all_tests: List of all the tests that were run. This should not + include skipped tests. + """ + self._builder_name = builder_name + self._build_name = build_name + self._build_number = build_number + self._builder_base_url = builder_base_url + self._results_file_path = os.path.join(results_file_base_path, + self.RESULTS_FILENAME) + self._test_timings = test_timings + self._failures = failures + self._passed_tests = passed_tests + self._skipped_tests = skipped_tests + self._all_tests = all_tests + + self._GenerateJSONOutput() + + def _GenerateJSONOutput(self): + """Generates the JSON output file.""" + json = self._GetJSON() + if json: + results_file = open(self._results_file_path, "w") + results_file.write(json) + results_file.close() + + def _GetSVNRevision(self, in_directory=None): + """Returns the svn revision for the given directory. + + Args: + in_directory: The directory where svn is to be run. + """ + output = subprocess.Popen(["svn", "info", "--xml"], + cwd=in_directory, + shell=(sys.platform == 'win32'), + stdout=subprocess.PIPE).communicate()[0] + try: + dom = xml.dom.minidom.parseString(output) + return dom.getElementsByTagName('entry')[0].getAttribute( + 'revision') + except xml.parsers.expat.ExpatError: + return "" + + def _GetArchivedJSONResults(self): + """Reads old results JSON file if it exists. + Returns (archived_results, error) tuple where error is None if results + were successfully read. + """ + results_json = {} + old_results = None + error = None + + if os.path.exists(self._results_file_path): + old_results_file = open(self._results_file_path, "r") + old_results = old_results_file.read() + elif self._builder_base_url: + # Check if we have the archived JSON file on the buildbot server. + results_file_url = (self._builder_base_url + + self._build_name + "/" + self.RESULTS_FILENAME) + logging.error("Local results.json file does not exist. Grabbing " + "it off the archive at " + results_file_url) + + try: + results_file = urllib2.urlopen(results_file_url) + info = results_file.info() + old_results = results_file.read() + except urllib2.HTTPError, http_error: + # A non-4xx status code means the bot is hosed for some reason + # and we can't grab the results.json file off of it. + if (http_error.code < 400 and http_error.code >= 500): + error = http_error + except urllib2.URLError, url_error: + error = url_error + + if old_results: + # Strip the prefix and suffix so we can get the actual JSON object. + old_results = old_results[len(self.JSON_PREFIX): + len(old_results) - len(self.JSON_SUFFIX)] + + try: + results_json = simplejson.loads(old_results) + except: + logging.debug("results.json was not valid JSON. Clobbering.") + # The JSON file is not valid JSON. Just clobber the results. + results_json = {} + else: + logging.debug('Old JSON results do not exist. Starting fresh.') + results_json = {} + + return results_json, error + + def _GetJSON(self): + """Gets the results for the results.json file.""" + results_json, error = self._GetArchivedJSONResults() + if error: + # If there was an error don't write a results.json + # file at all as it would lose all the information on the bot. + logging.error("Archive directory is inaccessible. Not modifying " + "or clobbering the results.json file: " + str(error)) + return None + + builder_name = self._builder_name + if results_json and builder_name not in results_json: + logging.debug("Builder name (%s) is not in the results.json file." + % builder_name) + + self._ConvertJSONToCurrentVersion(results_json) + + if builder_name not in results_json: + results_json[builder_name] = self._CreateResultsForBuilderJSON() + + results_for_builder = results_json[builder_name] + + self._InsertGenericMetadata(results_for_builder) + + self._InsertFailureSummaries(results_for_builder) + + # Update the all failing tests with result type and time. + tests = results_for_builder[self.TESTS] + all_failing_tests = set(self._failures.iterkeys()) + all_failing_tests.update(tests.iterkeys()) + for test in all_failing_tests: + self._InsertTestTimeAndResult(test, tests) + + # Specify separators in order to get compact encoding. + results_str = simplejson.dumps(results_json, separators=(',', ':')) + return self.JSON_PREFIX + results_str + self.JSON_SUFFIX + + def _InsertFailureSummaries(self, results_for_builder): + """Inserts aggregate pass/failure statistics into the JSON. + This method reads self._skipped_tests, self._passed_tests and + self._failures and inserts FIXABLE, FIXABLE_COUNT and ALL_FIXABLE_COUNT + entries. + + Args: + results_for_builder: Dictionary containing the test results for a + single builder. + """ + # Insert the number of tests that failed. + self._InsertItemIntoRawList(results_for_builder, + len(set(self._failures.keys()) | self._skipped_tests), + self.FIXABLE_COUNT) + + # Create a pass/skip/failure summary dictionary. + entry = {} + entry[self.SKIP_RESULT] = len(self._skipped_tests) + entry[self.PASS_RESULT] = len(self._passed_tests) + get = entry.get + for failure_type in self._failures.values(): + failure_char = self.FAILURE_TO_CHAR[failure_type] + entry[failure_char] = get(failure_char, 0) + 1 + + # Insert the pass/skip/failure summary dictionary. + self._InsertItemIntoRawList(results_for_builder, entry, self.FIXABLE) + + # Insert the number of all the tests that are supposed to pass. + self._InsertItemIntoRawList(results_for_builder, + len(self._skipped_tests | self._all_tests), + self.ALL_FIXABLE_COUNT) + + def _InsertItemIntoRawList(self, results_for_builder, item, key): + """Inserts the item into the list with the given key in the results for + this builder. Creates the list if no such list exists. + + Args: + results_for_builder: Dictionary containing the test results for a + single builder. + item: Number or string to insert into the list. + key: Key in results_for_builder for the list to insert into. + """ + if key in results_for_builder: + raw_list = results_for_builder[key] + else: + raw_list = [] + + raw_list.insert(0, item) + raw_list = raw_list[:self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG] + results_for_builder[key] = raw_list + + def _InsertItemRunLengthEncoded(self, item, encoded_results): + """Inserts the item into the run-length encoded results. + + Args: + item: String or number to insert. + encoded_results: run-length encoded results. An array of arrays, e.g. + [[3,'A'],[1,'Q']] encodes AAAQ. + """ + if len(encoded_results) and item == encoded_results[0][1]: + num_results = encoded_results[0][0] + if num_results <= self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG: + encoded_results[0][0] = num_results + 1 + else: + # Use a list instead of a class for the run-length encoding since + # we want the serialized form to be concise. + encoded_results.insert(0, [1, item]) + + def _InsertGenericMetadata(self, results_for_builder): + """ Inserts generic metadata (such as version number, current time etc) + into the JSON. + + Args: + results_for_builder: Dictionary containing the test results for + a single builder. + """ + self._InsertItemIntoRawList(results_for_builder, + self._build_number, self.BUILD_NUMBERS) + + path_to_webkit = path_utils.PathFromBase('third_party', 'WebKit', + 'WebCore') + self._InsertItemIntoRawList(results_for_builder, + self._GetSVNRevision(path_to_webkit), + self.WEBKIT_SVN) + + path_to_chrome_base = path_utils.PathFromBase() + self._InsertItemIntoRawList(results_for_builder, + self._GetSVNRevision(path_to_chrome_base), + self.CHROME_SVN) + + self._InsertItemIntoRawList(results_for_builder, + int(time.time()), + self.TIME) + + def _InsertTestTimeAndResult(self, test_name, tests): + """ Insert a test item with its results to the given tests dictionary. + + Args: + tests: Dictionary containing test result entries. + """ + + result = JSONResultsGenerator.PASS_RESULT + time = 0 + + if test_name not in self._all_tests: + result = JSONResultsGenerator.NO_DATA_RESULT + + if test_name in self._failures: + result = self.FAILURE_TO_CHAR[self._failures[test_name]] + + if test_name in self._test_timings: + # Floor for now to get time in seconds. + time = int(self._test_timings[test_name]) + + if test_name not in tests: + tests[test_name] = self._CreateResultsAndTimesJSON() + + thisTest = tests[test_name] + self._InsertItemRunLengthEncoded(result, thisTest[self.RESULTS]) + self._InsertItemRunLengthEncoded(time, thisTest[self.TIMES]) + self._NormalizeResultsJSON(thisTest, test_name, tests) + + def _ConvertJSONToCurrentVersion(self, results_json): + """If the JSON does not match the current version, converts it to the + current version and adds in the new version number. + """ + if (self.VERSION_KEY in results_json and + results_json[self.VERSION_KEY] == self.VERSION): + return + + results_json[self.VERSION_KEY] = self.VERSION + + def _CreateResultsAndTimesJSON(self): + results_and_times = {} + results_and_times[self.RESULTS] = [] + results_and_times[self.TIMES] = [] + return results_and_times + + def _CreateResultsForBuilderJSON(self): + results_for_builder = {} + results_for_builder[self.TESTS] = {} + return results_for_builder + + def _RemoveItemsOverMaxNumberOfBuilds(self, encoded_list): + """Removes items from the run-length encoded list after the final + item that exceeds the max number of builds to track. + + Args: + encoded_results: run-length encoded results. An array of arrays, e.g. + [[3,'A'],[1,'Q']] encodes AAAQ. + """ + num_builds = 0 + index = 0 + for result in encoded_list: + num_builds = num_builds + result[0] + index = index + 1 + if num_builds > self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG: + return encoded_list[:index] + return encoded_list + + def _NormalizeResultsJSON(self, test, test_name, tests): + """ Prune tests where all runs pass or tests that no longer exist and + truncate all results to maxNumberOfBuilds. + + Args: + test: ResultsAndTimes object for this test. + test_name: Name of the test. + tests: The JSON object with all the test results for this builder. + """ + test[self.RESULTS] = self._RemoveItemsOverMaxNumberOfBuilds( + test[self.RESULTS]) + test[self.TIMES] = self._RemoveItemsOverMaxNumberOfBuilds( + test[self.TIMES]) + + is_all_pass = self._IsResultsAllOfType(test[self.RESULTS], + self.PASS_RESULT) + is_all_no_data = self._IsResultsAllOfType(test[self.RESULTS], + self.NO_DATA_RESULT) + max_time = max([time[1] for time in test[self.TIMES]]) + + # Remove all passes/no-data from the results to reduce noise and + # filesize. If a test passes every run, but takes > MIN_TIME to run, + # don't throw away the data. + if is_all_no_data or (is_all_pass and max_time <= self.MIN_TIME): + del tests[test_name] + + def _IsResultsAllOfType(self, results, type): + """Returns whether all the results are of the given type + (e.g. all passes).""" + return len(results) == 1 and results[0][1] == type diff --git a/webkit/tools/layout_tests/webkitpy/layout_package/metered_stream.py b/webkit/tools/layout_tests/webkitpy/layout_package/metered_stream.py new file mode 100644 index 0000000..575209e --- /dev/null +++ b/webkit/tools/layout_tests/webkitpy/layout_package/metered_stream.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python +# Copyright (c) 2009 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +""" +Package that implements a stream wrapper that has 'meters' as well as +regular output. A 'meter' is a single line of text that can be erased +and rewritten repeatedly, without producing multiple lines of output. It +can be used to produce effects like progress bars. +""" + + +class MeteredStream: + """This class is a wrapper around a stream that allows you to implement + meters. + + It can be used like a stream, but calling update() will print + the string followed by only a carriage return (instead of a carriage + return and a line feed). This can be used to implement progress bars and + other sorts of meters. Note that anything written by update() will be + erased by a subsequent update(), write(), or flush().""" + + def __init__(self, verbose, stream): + """ + Args: + verbose: whether update is a no-op + stream: output stream to write to + """ + self._dirty = False + self._verbose = verbose + self._stream = stream + self._last_update = "" + + def write(self, txt): + """Write text directly to the stream, overwriting and resetting the + meter.""" + if self._dirty: + self.update("") + self._dirty = False + self._stream.write(txt) + + def flush(self): + """Flush any buffered output.""" + self._stream.flush() + + def update(self, str): + """Write an update to the stream that will get overwritten by the next + update() or by a write(). + + This is used for progress updates that don't need to be preserved in + the log. Note that verbose disables this routine; we have this in + case we are logging lots of output and the update()s will get lost + or won't work properly (typically because verbose streams are + redirected to files. + + TODO(dpranke): figure out if there is a way to detect if we're writing + to a stream that handles CRs correctly (e.g., terminals). That might + be a cleaner way of handling this. + """ + if self._verbose: + return + + # Print the necessary number of backspaces to erase the previous + # message. + self._stream.write("\b" * len(self._last_update)) + self._stream.write(str) + num_remaining = len(self._last_update) - len(str) + if num_remaining > 0: + self._stream.write(" " * num_remaining + "\b" * num_remaining) + self._last_update = str + self._dirty = True diff --git a/webkit/tools/layout_tests/webkitpy/layout_package/path_utils.py b/webkit/tools/layout_tests/webkitpy/layout_package/path_utils.py new file mode 100644 index 0000000..48321df --- /dev/null +++ b/webkit/tools/layout_tests/webkitpy/layout_package/path_utils.py @@ -0,0 +1,372 @@ +# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""This package contains utility methods for manipulating paths and +filenames for test results and baselines. It also contains wrappers +of a few routines in platform_utils.py so that platform_utils.py can +be considered a 'protected' package - i.e., this file should be +the only file that ever includes platform_utils. This leads to +us including a few things that don't really have anything to do + with paths, unfortunately.""" + +import errno +import os +import stat +import sys + +import platform_utils +import platform_utils_win +import platform_utils_mac +import platform_utils_linux + +# Cache some values so we don't have to recalculate them. _basedir is +# used by PathFromBase() and caches the full (native) path to the top +# of the source tree (/src). _baseline_search_path is used by +# ExpectedBaselines() and caches the list of native paths to search +# for baseline results. +_basedir = None +_baseline_search_path = None + + +class PathNotFound(Exception): + pass + + +def LayoutTestsDir(): + """Returns the fully-qualified path to the directory containing the input + data for the specified layout test.""" + return PathFromBase('third_party', 'WebKit', 'LayoutTests') + + +def ChromiumBaselinePath(platform=None): + """Returns the full path to the directory containing expected + baseline results from chromium ports. If |platform| is None, the + currently executing platform is used. + + Note: although directly referencing individual platform_utils_* files is + usually discouraged, we allow it here so that the rebaselining tool can + pull baselines for platforms other than the host platform.""" + + # Normalize the platform string. + platform = PlatformName(platform) + if platform.startswith('chromium-mac'): + return platform_utils_mac.BaselinePath(platform) + elif platform.startswith('chromium-win'): + return platform_utils_win.BaselinePath(platform) + elif platform.startswith('chromium-linux'): + return platform_utils_linux.BaselinePath(platform) + + return platform_utils.BaselinePath() + + +def WebKitBaselinePath(platform): + """Returns the full path to the directory containing expected + baseline results from WebKit ports.""" + return PathFromBase('third_party', 'WebKit', 'LayoutTests', + 'platform', platform) + + +def BaselineSearchPath(platform=None): + """Returns the list of directories to search for baselines/results for a + given platform, in order of preference. Paths are relative to the top of + the source tree. If parameter platform is None, returns the list for the + current platform that the script is running on. + + Note: although directly referencing individual platform_utils_* files is + usually discouraged, we allow it here so that the rebaselining tool can + pull baselines for platforms other than the host platform.""" + + # Normalize the platform name. + platform = PlatformName(platform) + if platform.startswith('chromium-mac'): + return platform_utils_mac.BaselineSearchPath(platform) + elif platform.startswith('chromium-win'): + return platform_utils_win.BaselineSearchPath(platform) + elif platform.startswith('chromium-linux'): + return platform_utils_linux.BaselineSearchPath(platform) + return platform_utils.BaselineSearchPath() + + +def ExpectedBaselines(filename, suffix, platform=None, all_baselines=False): + """Given a test name, finds where the baseline results are located. + + Args: + filename: absolute filename to test file + suffix: file suffix of the expected results, including dot; e.g. '.txt' + or '.png'. This should not be None, but may be an empty string. + platform: layout test platform: 'win', 'linux' or 'mac'. Defaults to + the current platform. + all_baselines: If True, return an ordered list of all baseline paths + for the given platform. If False, return only the first + one. + Returns + a list of ( platform_dir, results_filename ), where + platform_dir - abs path to the top of the results tree (or test tree) + results_filename - relative path from top of tree to the results file + (os.path.join of the two gives you the full path to the file, + unless None was returned.) + Return values will be in the format appropriate for the current platform + (e.g., "\\" for path separators on Windows). If the results file is not + found, then None will be returned for the directory, but the expected + relative pathname will still be returned. + """ + global _baseline_search_path + global _search_path_platform + testname = os.path.splitext(RelativeTestFilename(filename))[0] + + baseline_filename = testname + '-expected' + suffix + + if (_baseline_search_path is None) or (_search_path_platform != platform): + _baseline_search_path = BaselineSearchPath(platform) + _search_path_platform = platform + + baselines = [] + for platform_dir in _baseline_search_path: + if os.path.exists(os.path.join(platform_dir, baseline_filename)): + baselines.append((platform_dir, baseline_filename)) + + if not all_baselines and baselines: + return baselines + + # If it wasn't found in a platform directory, return the expected result + # in the test directory, even if no such file actually exists. + platform_dir = LayoutTestsDir() + if os.path.exists(os.path.join(platform_dir, baseline_filename)): + baselines.append((platform_dir, baseline_filename)) + + if baselines: + return baselines + + return [(None, baseline_filename)] + + +def ExpectedFilename(filename, suffix): + """Given a test name, returns an absolute path to its expected results. + + If no expected results are found in any of the searched directories, the + directory in which the test itself is located will be returned. The return + value is in the format appropriate for the platform (e.g., "\\" for + path separators on windows). + + Args: + filename: absolute filename to test file + suffix: file suffix of the expected results, including dot; e.g. '.txt' + or '.png'. This should not be None, but may be an empty string. + platform: the most-specific directory name to use to build the + search list of directories, e.g., 'chromium-win', or + 'chromium-mac-leopard' (we follow the WebKit format) + """ + platform_dir, baseline_filename = ExpectedBaselines(filename, suffix)[0] + if platform_dir: + return os.path.join(platform_dir, baseline_filename) + return os.path.join(LayoutTestsDir(), baseline_filename) + + +def RelativeTestFilename(filename): + """Provide the filename of the test relative to the layout tests + directory as a unix style path (a/b/c).""" + return _WinPathToUnix(filename[len(LayoutTestsDir()) + 1:]) + + +def _WinPathToUnix(path): + """Convert a windows path to use unix-style path separators (a/b/c).""" + return path.replace('\\', '/') + +# +# Routines that are arguably platform-specific but have been made +# generic for now (they used to be in platform_utils_*) +# + + +def FilenameToUri(full_path): + """Convert a test file to a URI.""" + LAYOUTTEST_HTTP_DIR = "http/tests/" + LAYOUTTEST_WEBSOCKET_DIR = "websocket/tests/" + + relative_path = _WinPathToUnix(RelativeTestFilename(full_path)) + port = None + use_ssl = False + + if relative_path.startswith(LAYOUTTEST_HTTP_DIR): + # http/tests/ run off port 8000 and ssl/ off 8443 + relative_path = relative_path[len(LAYOUTTEST_HTTP_DIR):] + port = 8000 + elif relative_path.startswith(LAYOUTTEST_WEBSOCKET_DIR): + # websocket/tests/ run off port 8880 and 9323 + # Note: the root is /, not websocket/tests/ + port = 8880 + + # Make http/tests/local run as local files. This is to mimic the + # logic in run-webkit-tests. + # TODO(jianli): Consider extending this to "media/". + if port and not relative_path.startswith("local/"): + if relative_path.startswith("ssl/"): + port += 443 + protocol = "https" + else: + protocol = "http" + return "%s://127.0.0.1:%u/%s" % (protocol, port, relative_path) + + if sys.platform in ('cygwin', 'win32'): + return "file:///" + GetAbsolutePath(full_path) + return "file://" + GetAbsolutePath(full_path) + + +def GetAbsolutePath(path): + """Returns an absolute UNIX path.""" + return _WinPathToUnix(os.path.abspath(path)) + + +def MaybeMakeDirectory(*path): + """Creates the specified directory if it doesn't already exist.""" + # This is a reimplementation of google.path_utils.MaybeMakeDirectory(). + try: + os.makedirs(os.path.join(*path)) + except OSError, e: + if e.errno != errno.EEXIST: + raise + + +def PathFromBase(*comps): + """Returns an absolute filename from a set of components specified + relative to the top of the source tree. If the path does not exist, + the exception PathNotFound is raised.""" + # This is a reimplementation of google.path_utils.PathFromBase(). + global _basedir + if _basedir == None: + # We compute the top of the source tree by finding the absolute + # path of this source file, and then climbing up three directories + # as given in subpath. If we move this file, subpath needs to be + # updated. + path = os.path.abspath(__file__) + subpath = os.path.join('webkit', 'tools', 'layout_tests') + _basedir = path[:path.index(subpath)] + path = os.path.join(_basedir, *comps) + if not os.path.exists(path): + raise PathNotFound('could not find %s' % (path)) + return path + + +def RemoveDirectory(*path): + """Recursively removes a directory, even if it's marked read-only. + + Remove the directory located at *path, if it exists. + + shutil.rmtree() doesn't work on Windows if any of the files or directories + are read-only, which svn repositories and some .svn files are. We need to + be able to force the files to be writable (i.e., deletable) as we traverse + the tree. + + Even with all this, Windows still sometimes fails to delete a file, citing + a permission error (maybe something to do with antivirus scans or disk + indexing). The best suggestion any of the user forums had was to wait a + bit and try again, so we do that too. It's hand-waving, but sometimes it + works. :/ + """ + file_path = os.path.join(*path) + if not os.path.exists(file_path): + return + + win32 = False + if sys.platform == 'win32': + win32 = True + # Some people don't have the APIs installed. In that case we'll do + # without. + try: + win32api = __import__('win32api') + win32con = __import__('win32con') + except ImportError: + win32 = False + + def remove_with_retry(rmfunc, path): + os.chmod(path, stat.S_IWRITE) + if win32: + win32api.SetFileAttributes(path, + win32con.FILE_ATTRIBUTE_NORMAL) + try: + return rmfunc(path) + except EnvironmentError, e: + if e.errno != errno.EACCES: + raise + print 'Failed to delete %s: trying again' % repr(path) + time.sleep(0.1) + return rmfunc(path) + else: + + def remove_with_retry(rmfunc, path): + if os.path.islink(path): + return os.remove(path) + else: + return rmfunc(path) + + for root, dirs, files in os.walk(file_path, topdown=False): + # For POSIX: making the directory writable guarantees removability. + # Windows will ignore the non-read-only bits in the chmod value. + os.chmod(root, 0770) + for name in files: + remove_with_retry(os.remove, os.path.join(root, name)) + for name in dirs: + remove_with_retry(os.rmdir, os.path.join(root, name)) + + remove_with_retry(os.rmdir, file_path) + +# +# Wrappers around platform_utils +# + + +def PlatformName(platform=None): + """Returns the appropriate chromium platform name for |platform|. If + |platform| is None, returns the name of the chromium platform on the + currently running system. If |platform| is of the form 'chromium-*', + it is returned unchanged, otherwise 'chromium-' is prepended.""" + if platform == None: + return platform_utils.PlatformName() + if not platform.startswith('chromium-'): + platform = "chromium-" + platform + return platform + + +def PlatformVersion(): + return platform_utils.PlatformVersion() + + +def LigHTTPdExecutablePath(): + return platform_utils.LigHTTPdExecutablePath() + + +def LigHTTPdModulePath(): + return platform_utils.LigHTTPdModulePath() + + +def LigHTTPdPHPPath(): + return platform_utils.LigHTTPdPHPPath() + + +def WDiffPath(): + return platform_utils.WDiffPath() + + +def TestShellPath(target): + return platform_utils.TestShellPath(target) + + +def ImageDiffPath(target): + return platform_utils.ImageDiffPath(target) + + +def LayoutTestHelperPath(target): + return platform_utils.LayoutTestHelperPath(target) + + +def FuzzyMatchPath(): + return platform_utils.FuzzyMatchPath() + + +def ShutDownHTTPServer(server_pid): + return platform_utils.ShutDownHTTPServer(server_pid) + + +def KillAllTestShells(): + platform_utils.KillAllTestShells() diff --git a/webkit/tools/layout_tests/webkitpy/layout_package/platform_utils.py b/webkit/tools/layout_tests/webkitpy/layout_package/platform_utils.py new file mode 100644 index 0000000..03af83d --- /dev/null +++ b/webkit/tools/layout_tests/webkitpy/layout_package/platform_utils.py @@ -0,0 +1,25 @@ +# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Platform-specific utilities and pseudo-constants + +Any functions whose implementations or values differ from one platform to +another should be defined in their respective platform_utils_.py +modules. The appropriate one of those will be imported into this module to +provide callers with a common, platform-independent interface. + +This file should only ever be imported by layout_package.path_utils. +""" + +import sys + +# We may not support the version of Python that a user has installed (Cygwin +# especially has had problems), but we'll allow the platform utils to be +# included in any case so we don't get an import error. +if sys.platform in ('cygwin', 'win32'): + from platform_utils_win import * +elif sys.platform == 'darwin': + from platform_utils_mac import * +elif sys.platform in ('linux', 'linux2', 'freebsd7', 'openbsd4'): + from platform_utils_linux import * diff --git a/webkit/tools/layout_tests/webkitpy/layout_package/platform_utils_linux.py b/webkit/tools/layout_tests/webkitpy/layout_package/platform_utils_linux.py new file mode 100644 index 0000000..a0a6ba4 --- /dev/null +++ b/webkit/tools/layout_tests/webkitpy/layout_package/platform_utils_linux.py @@ -0,0 +1,223 @@ +# Copyright (c) 2008-2009 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""This is the Linux implementation of the layout_package.platform_utils + package. This file should only be imported by that package.""" + +import os +import signal +import subprocess +import sys +import logging + +import path_utils +import platform_utils_win + + +def PlatformName(): + """Returns the name of the platform we're currently running on.""" + return 'chromium-linux' + PlatformVersion() + + +def PlatformVersion(): + """Returns the version string for the platform, e.g. '-vista' or + '-snowleopard'. If the platform does not distinguish between + minor versions, it returns ''.""" + return '' + + +def GetNumCores(): + """Returns the number of cores on the machine. For hyperthreaded machines, + this will be double the number of actual processors.""" + num_cores = os.sysconf("SC_NPROCESSORS_ONLN") + if isinstance(num_cores, int) and num_cores > 0: + return num_cores + return 1 + + +def BaselinePath(platform=None): + """Returns the path relative to the top of the source tree for the + baselines for the specified platform version. If |platform| is None, + then the version currently in use is used.""" + if platform is None: + platform = PlatformName() + return path_utils.PathFromBase('webkit', 'data', 'layout_tests', + 'platform', platform, 'LayoutTests') + + +def BaselineSearchPath(platform=None): + """Returns the list of directories to search for baselines/results, in + order of preference. Paths are relative to the top of the source tree.""" + return [BaselinePath(platform), + platform_utils_win.BaselinePath('chromium-win'), + path_utils.WebKitBaselinePath('win'), + path_utils.WebKitBaselinePath('mac')] + + +def ApacheExecutablePath(): + """Returns the executable path to start Apache""" + path = os.path.join("/usr", "sbin", "apache2") + if os.path.exists(path): + return path + print "Unable to fine Apache executable %s" % path + _MissingApache() + + +def ApacheConfigFilePath(): + """Returns the path to Apache config file""" + return path_utils.PathFromBase("third_party", "WebKit", "LayoutTests", + "http", "conf", "apache2-debian-httpd.conf") + + +def LigHTTPdExecutablePath(): + """Returns the executable path to start LigHTTPd""" + binpath = "/usr/sbin/lighttpd" + if os.path.exists(binpath): + return binpath + print "Unable to find LigHTTPd executable %s" % binpath + _MissingLigHTTPd() + + +def LigHTTPdModulePath(): + """Returns the library module path for LigHTTPd""" + modpath = "/usr/lib/lighttpd" + if os.path.exists(modpath): + return modpath + print "Unable to find LigHTTPd modules %s" % modpath + _MissingLigHTTPd() + + +def LigHTTPdPHPPath(): + """Returns the PHP executable path for LigHTTPd""" + binpath = "/usr/bin/php-cgi" + if os.path.exists(binpath): + return binpath + print "Unable to find PHP CGI executable %s" % binpath + _MissingLigHTTPd() + + +def WDiffPath(): + """Path to the WDiff executable, which we assume is already installed and + in the user's $PATH.""" + return 'wdiff' + + +def ImageDiffPath(target): + """Path to the image_diff binary. + + Args: + target: Build target mode (debug or release)""" + return _PathFromBuildResults(target, 'image_diff') + + +def LayoutTestHelperPath(target): + """Path to the layout_test helper binary, if needed, empty otherwise""" + return '' + + +def TestShellPath(target): + """Return the platform-specific binary path for our TestShell. + + Args: + target: Build target mode (debug or release) """ + if target in ('Debug', 'Release'): + try: + debug_path = _PathFromBuildResults('Debug', 'test_shell') + release_path = _PathFromBuildResults('Release', 'test_shell') + + debug_mtime = os.stat(debug_path).st_mtime + release_mtime = os.stat(release_path).st_mtime + + if debug_mtime > release_mtime and target == 'Release' or \ + release_mtime > debug_mtime and target == 'Debug': + logging.info('\x1b[31mWarning: you are not running the most ' + 'recent test_shell binary. You need to pass ' + '--debug or not to select between Debug and ' + 'Release.\x1b[0m') + # This will fail if we don't have both a debug and release binary. + # That's fine because, in this case, we must already be running the + # most up-to-date one. + except path_utils.PathNotFound: + pass + + return _PathFromBuildResults(target, 'test_shell') + + +def FuzzyMatchPath(): + """Return the path to the fuzzy matcher binary.""" + return path_utils.PathFromBase('third_party', 'fuzzymatch', 'fuzzymatch') + + +def ShutDownHTTPServer(server_pid): + """Shut down the lighttpd web server. Blocks until it's fully shut down. + + Args: + server_pid: The process ID of the running server. + """ + # server_pid is not set when "http_server.py stop" is run manually. + if server_pid is None: + # This isn't ideal, since it could conflict with web server processes + # not started by http_server.py, but good enough for now. + KillAllProcess('lighttpd') + KillAllProcess('apache2') + else: + try: + os.kill(server_pid, signal.SIGTERM) + #TODO(mmoss) Maybe throw in a SIGKILL just to be sure? + except OSError: + # Sometimes we get a bad PID (e.g. from a stale httpd.pid file), + # so if kill fails on the given PID, just try to 'killall' web + # servers. + ShutDownHTTPServer(None) + + +def KillProcess(pid): + """Forcefully kill the process. + + Args: + pid: The id of the process to be killed. + """ + os.kill(pid, signal.SIGKILL) + + +def KillAllProcess(process_name): + null = open(os.devnull) + subprocess.call(['killall', '-TERM', '-u', os.getenv('USER'), + process_name], stderr=null) + null.close() + + +def KillAllTestShells(): + """Kills all instances of the test_shell binary currently running.""" + KillAllProcess('test_shell') + +# +# Private helper functions +# + + +def _MissingLigHTTPd(): + print 'Please install using: "sudo apt-get install lighttpd php5-cgi"' + print 'For complete Linux build requirements, please see:' + print 'http://code.google.com/p/chromium/wiki/LinuxBuildInstructions' + sys.exit(1) + + +def _MissingApache(): + print ('Please install using: "sudo apt-get install apache2 ' + 'libapache2-mod-php5"') + print 'For complete Linux build requirements, please see:' + print 'http://code.google.com/p/chromium/wiki/LinuxBuildInstructions' + sys.exit(1) + + +def _PathFromBuildResults(*pathies): + # FIXME(dkegel): use latest or warn if more than one found? + for dir in ["sconsbuild", "out", "xcodebuild"]: + try: + return path_utils.PathFromBase(dir, *pathies) + except: + pass + raise path_utils.PathNotFound("Unable to find %s in build tree" % + (os.path.join(*pathies))) diff --git a/webkit/tools/layout_tests/webkitpy/layout_package/platform_utils_mac.py b/webkit/tools/layout_tests/webkitpy/layout_package/platform_utils_mac.py new file mode 100644 index 0000000..a357ff4 --- /dev/null +++ b/webkit/tools/layout_tests/webkitpy/layout_package/platform_utils_mac.py @@ -0,0 +1,175 @@ +# Copyright (c) 2008-2009 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""This is the Mac implementation of the layout_package.platform_utils + package. This file should only be imported by that package.""" + +import os +import platform +import signal +import subprocess + +import path_utils + + +def PlatformName(): + """Returns the name of the platform we're currently running on.""" + # At the moment all chromium mac results are version-independent. At some + # point we may need to return 'chromium-mac' + PlatformVersion() + return 'chromium-mac' + + +def PlatformVersion(): + """Returns the version string for the platform, e.g. '-vista' or + '-snowleopard'. If the platform does not distinguish between + minor versions, it returns ''.""" + os_version_string = platform.mac_ver()[0] # e.g. "10.5.6" + if not os_version_string: + return '-leopard' + + release_version = int(os_version_string.split('.')[1]) + + # we don't support 'tiger' or earlier releases + if release_version == 5: + return '-leopard' + elif release_version == 6: + return '-snowleopard' + + return '' + + +def GetNumCores(): + """Returns the number of cores on the machine. For hyperthreaded machines, + this will be double the number of actual processors.""" + return int(os.popen2("sysctl -n hw.ncpu")[1].read()) + + +def BaselinePath(platform=None): + """Returns the path relative to the top of the source tree for the + baselines for the specified platform version. If |platform| is None, + then the version currently in use is used.""" + if platform is None: + platform = PlatformName() + return path_utils.PathFromBase('webkit', 'data', 'layout_tests', + 'platform', platform, 'LayoutTests') + +# TODO: We should add leopard and snowleopard to the list of paths to check +# once we start running the tests from snowleopard. + + +def BaselineSearchPath(platform=None): + """Returns the list of directories to search for baselines/results, in + order of preference. Paths are relative to the top of the source tree.""" + return [BaselinePath(platform), + path_utils.WebKitBaselinePath('mac' + PlatformVersion()), + path_utils.WebKitBaselinePath('mac')] + + +def WDiffPath(): + """Path to the WDiff executable, which we assume is already installed and + in the user's $PATH.""" + return 'wdiff' + + +def ImageDiffPath(target): + """Path to the image_diff executable + + Args: + target: build type - 'Debug','Release',etc.""" + return path_utils.PathFromBase('xcodebuild', target, 'image_diff') + + +def LayoutTestHelperPath(target): + """Path to the layout_test_helper executable, if needed, empty otherwise + + Args: + target: build type - 'Debug','Release',etc.""" + return path_utils.PathFromBase('xcodebuild', target, 'layout_test_helper') + + +def TestShellPath(target): + """Path to the test_shell executable. + + Args: + target: build type - 'Debug','Release',etc.""" + # TODO(pinkerton): make |target| happy with case-sensitive file systems. + return path_utils.PathFromBase('xcodebuild', target, 'TestShell.app', + 'Contents', 'MacOS', 'TestShell') + + +def ApacheExecutablePath(): + """Returns the executable path to start Apache""" + return os.path.join("/usr", "sbin", "httpd") + + +def ApacheConfigFilePath(): + """Returns the path to Apache config file""" + return path_utils.PathFromBase("third_party", "WebKit", "LayoutTests", + "http", "conf", "apache2-httpd.conf") + + +def LigHTTPdExecutablePath(): + """Returns the executable path to start LigHTTPd""" + return path_utils.PathFromBase('third_party', 'lighttpd', 'mac', + 'bin', 'lighttpd') + + +def LigHTTPdModulePath(): + """Returns the library module path for LigHTTPd""" + return path_utils.PathFromBase('third_party', 'lighttpd', 'mac', 'lib') + + +def LigHTTPdPHPPath(): + """Returns the PHP executable path for LigHTTPd""" + return path_utils.PathFromBase('third_party', 'lighttpd', 'mac', 'bin', + 'php-cgi') + + +def ShutDownHTTPServer(server_pid): + """Shut down the lighttpd web server. Blocks until it's fully shut down. + + Args: + server_pid: The process ID of the running server. + """ + # server_pid is not set when "http_server.py stop" is run manually. + if server_pid is None: + # TODO(mmoss) This isn't ideal, since it could conflict with lighttpd + # processes not started by http_server.py, but good enough for now. + KillAllProcess('lighttpd') + KillAllProcess('httpd') + else: + try: + os.kill(server_pid, signal.SIGTERM) + # TODO(mmoss) Maybe throw in a SIGKILL just to be sure? + except OSError: + # Sometimes we get a bad PID (e.g. from a stale httpd.pid file), + # so if kill fails on the given PID, just try to 'killall' web + # servers. + ShutDownHTTPServer(None) + + +def KillProcess(pid): + """Forcefully kill the process. + + Args: + pid: The id of the process to be killed. + """ + os.kill(pid, signal.SIGKILL) + + +def KillAllProcess(process_name): + # On Mac OS X 10.6, killall has a new constraint: -SIGNALNAME or + # -SIGNALNUMBER must come first. Example problem: + # $ killall -u $USER -TERM lighttpd + # killall: illegal option -- T + # Use of the earlier -TERM placement is just fine on 10.5. + null = open(os.devnull) + subprocess.call(['killall', '-TERM', '-u', os.getenv('USER'), + process_name], stderr=null) + null.close() + + +def KillAllTestShells(): + """Kills all instances of the test_shell binary currently running.""" + KillAllProcess('TestShell') diff --git a/webkit/tools/layout_tests/webkitpy/layout_package/platform_utils_win.py b/webkit/tools/layout_tests/webkitpy/layout_package/platform_utils_win.py new file mode 100644 index 0000000..1f699dc --- /dev/null +++ b/webkit/tools/layout_tests/webkitpy/layout_package/platform_utils_win.py @@ -0,0 +1,184 @@ +# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""This is the Linux implementation of the layout_package.platform_utils + package. This file should only be imported by that package.""" + +import os +import path_utils +import subprocess +import sys + + +def PlatformName(): + """Returns the name of the platform we're currently running on.""" + # We're not ready for version-specific results yet. When we uncomment + # this, we also need to add it to the BaselineSearchPath() + return 'chromium-win' + PlatformVersion() + + +def PlatformVersion(): + """Returns the version string for the platform, e.g. '-vista' or + '-snowleopard'. If the platform does not distinguish between + minor versions, it returns ''.""" + winver = sys.getwindowsversion() + if winver[0] == 6 and (winver[1] == 1): + return '-7' + if winver[0] == 6 and (winver[1] == 0): + return '-vista' + if winver[0] == 5 and (winver[1] == 1 or winver[1] == 2): + return '-xp' + return '' + + +def GetNumCores(): + """Returns the number of cores on the machine. For hyperthreaded machines, + this will be double the number of actual processors.""" + return int(os.environ.get('NUMBER_OF_PROCESSORS', 1)) + + +def BaselinePath(platform=None): + """Returns the path relative to the top of the source tree for the + baselines for the specified platform version. If |platform| is None, + then the version currently in use is used.""" + if platform is None: + platform = PlatformName() + return path_utils.PathFromBase('webkit', 'data', 'layout_tests', + 'platform', platform, 'LayoutTests') + + +def BaselineSearchPath(platform=None): + """Returns the list of directories to search for baselines/results, in + order of preference. Paths are relative to the top of the source tree.""" + dirs = [] + if platform is None: + platform = PlatformName() + + if platform == 'chromium-win-xp': + dirs.append(BaselinePath(platform)) + if platform in ('chromium-win-xp', 'chromium-win-vista'): + dirs.append(BaselinePath('chromium-win-vista')) + dirs.append(BaselinePath('chromium-win')) + dirs.append(path_utils.WebKitBaselinePath('win')) + dirs.append(path_utils.WebKitBaselinePath('mac')) + return dirs + + +def WDiffPath(): + """Path to the WDiff executable, whose binary is checked in on Win""" + return path_utils.PathFromBase('third_party', 'cygwin', 'bin', 'wdiff.exe') + + +def ImageDiffPath(target): + """Return the platform-specific binary path for the image compare util. + We use this if we can't find the binary in the default location + in path_utils. + + Args: + target: Build target mode (debug or release) + """ + return _FindBinary(target, 'image_diff.exe') + + +def LayoutTestHelperPath(target): + """Return the platform-specific binary path for the layout test helper. + We use this if we can't find the binary in the default location + in path_utils. + + Args: + target: Build target mode (debug or release) + """ + return _FindBinary(target, 'layout_test_helper.exe') + + +def TestShellPath(target): + """Return the platform-specific binary path for our TestShell. + We use this if we can't find the binary in the default location + in path_utils. + + Args: + target: Build target mode (debug or release) + """ + return _FindBinary(target, 'test_shell.exe') + + +def ApacheExecutablePath(): + """Returns the executable path to start Apache""" + path = path_utils.PathFromBase('third_party', 'cygwin', "usr", "sbin") + # Don't return httpd.exe since we want to use this from cygwin. + return os.path.join(path, "httpd") + + +def ApacheConfigFilePath(): + """Returns the path to Apache config file""" + return path_utils.PathFromBase("third_party", "WebKit", "LayoutTests", + "http", "conf", "cygwin-httpd.conf") + + +def LigHTTPdExecutablePath(): + """Returns the executable path to start LigHTTPd""" + return path_utils.PathFromBase('third_party', 'lighttpd', 'win', + 'LightTPD.exe') + + +def LigHTTPdModulePath(): + """Returns the library module path for LigHTTPd""" + return path_utils.PathFromBase('third_party', 'lighttpd', 'win', 'lib') + + +def LigHTTPdPHPPath(): + """Returns the PHP executable path for LigHTTPd""" + return path_utils.PathFromBase('third_party', 'lighttpd', 'win', 'php5', + 'php-cgi.exe') + + +def ShutDownHTTPServer(server_pid): + """Shut down the lighttpd web server. Blocks until it's fully shut down. + + Args: + server_pid: The process ID of the running server. + Unused in this implementation of the method. + """ + subprocess.Popen(('taskkill.exe', '/f', '/im', 'LightTPD.exe'), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE).wait() + subprocess.Popen(('taskkill.exe', '/f', '/im', 'httpd.exe'), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE).wait() + + +def KillProcess(pid): + """Forcefully kill the process. + + Args: + pid: The id of the process to be killed. + """ + subprocess.call(('taskkill.exe', '/f', '/pid', str(pid)), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + +def KillAllTestShells(self): + """Kills all instances of the test_shell binary currently running.""" + subprocess.Popen(('taskkill.exe', '/f', '/im', 'test_shell.exe'), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE).wait() + +# +# Private helper functions. +# + + +def _FindBinary(target, binary): + """On Windows, we look for binaries that we compile in potentially + two places: src/webkit/$target (preferably, which we get if we + built using webkit_glue.gyp), or src/chrome/$target (if compiled some + other way).""" + try: + return path_utils.PathFromBase('webkit', target, binary) + except path_utils.PathNotFound: + try: + return path_utils.PathFromBase('chrome', target, binary) + except path_utils.PathNotFound: + return path_utils.PathFromBase('build', target, binary) diff --git a/webkit/tools/layout_tests/webkitpy/layout_package/test_expectations.py b/webkit/tools/layout_tests/webkitpy/layout_package/test_expectations.py new file mode 100644 index 0000000..a273f93 --- /dev/null +++ b/webkit/tools/layout_tests/webkitpy/layout_package/test_expectations.py @@ -0,0 +1,783 @@ +# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""A helper class for reading in and dealing with tests expectations +for layout tests. +""" + +import logging +import os +import re +import sys +import time +import path_utils + +sys.path.append(path_utils.PathFromBase('third_party')) +import simplejson + +# Test expectation and modifier constants. +(PASS, FAIL, TEXT, IMAGE, IMAGE_PLUS_TEXT, TIMEOUT, CRASH, SKIP, WONTFIX, + DEFER, SLOW, REBASELINE, MISSING, FLAKY, NOW, NONE) = range(16) + +# Test expectation file update action constants +(NO_CHANGE, REMOVE_TEST, REMOVE_PLATFORM, ADD_PLATFORMS_EXCEPT_THIS) = range(4) + + +class TestExpectations: + TEST_LIST = "test_expectations.txt" + + def __init__(self, tests, directory, platform, is_debug_mode, is_lint_mode, + tests_are_present=True): + """Reads the test expectations files from the given directory.""" + path = os.path.join(directory, self.TEST_LIST) + self._expected_failures = TestExpectationsFile(path, tests, platform, + is_debug_mode, is_lint_mode, tests_are_present=tests_are_present) + + # TODO(ojan): Allow for removing skipped tests when getting the list of + # tests to run, but not when getting metrics. + # TODO(ojan): Replace the Get* calls here with the more sane API exposed + # by TestExpectationsFile below. Maybe merge the two classes entirely? + + def GetExpectationsJsonForAllPlatforms(self): + return self._expected_failures.GetExpectationsJsonForAllPlatforms() + + def GetRebaseliningFailures(self): + return (self._expected_failures.GetTestSet(REBASELINE, FAIL) | + self._expected_failures.GetTestSet(REBASELINE, IMAGE) | + self._expected_failures.GetTestSet(REBASELINE, TEXT) | + self._expected_failures.GetTestSet(REBASELINE, + IMAGE_PLUS_TEXT)) + + def GetOptions(self, test): + return self._expected_failures.GetOptions(test) + + def GetExpectations(self, test): + return self._expected_failures.GetExpectations(test) + + def GetExpectationsString(self, test): + """Returns the expectatons for the given test as an uppercase string. + If there are no expectations for the test, then "PASS" is returned.""" + expectations = self.GetExpectations(test) + retval = [] + + for expectation in expectations: + for item in TestExpectationsFile.EXPECTATIONS.items(): + if item[1] == expectation: + retval.append(item[0]) + break + + return " ".join(retval).upper() + + def GetTimelineForTest(self, test): + return self._expected_failures.GetTimelineForTest(test) + + def GetTestsWithResultType(self, result_type): + return self._expected_failures.GetTestsWithResultType(result_type) + + def GetTestsWithTimeline(self, timeline): + return self._expected_failures.GetTestsWithTimeline(timeline) + + def MatchesAnExpectedResult(self, test, result): + """Returns whether we got one of the expected results for this test.""" + return (result in self._expected_failures.GetExpectations(test) or + (result in (IMAGE, TEXT, IMAGE_PLUS_TEXT) and + FAIL in self._expected_failures.GetExpectations(test)) or + result == MISSING and self.IsRebaselining(test) or + result == SKIP and self._expected_failures.HasModifier(test, + SKIP)) + + def IsRebaselining(self, test): + return self._expected_failures.HasModifier(test, REBASELINE) + + def HasModifier(self, test, modifier): + return self._expected_failures.HasModifier(test, modifier) + + def RemovePlatformFromFile(self, tests, platform, backup=False): + return self._expected_failures.RemovePlatformFromFile(tests, + platform, + backup) + + +def StripComments(line): + """Strips comments from a line and return None if the line is empty + or else the contents of line with leading and trailing spaces removed + and all other whitespace collapsed""" + + commentIndex = line.find('//') + if commentIndex is -1: + commentIndex = len(line) + + line = re.sub(r'\s+', ' ', line[:commentIndex].strip()) + if line == '': + return None + else: + return line + + +class ModifiersAndExpectations: + """A holder for modifiers and expectations on a test that serializes to + JSON.""" + + def __init__(self, modifiers, expectations): + self.modifiers = modifiers + self.expectations = expectations + + +class ExpectationsJsonEncoder(simplejson.JSONEncoder): + """JSON encoder that can handle ModifiersAndExpectations objects. + """ + + def default(self, obj): + if isinstance(obj, ModifiersAndExpectations): + return {"modifiers": obj.modifiers, + "expectations": obj.expectations} + else: + return JSONEncoder.default(self, obj) + + +class TestExpectationsFile: + """Test expectation files consist of lines with specifications of what + to expect from layout test cases. The test cases can be directories + in which case the expectations apply to all test cases in that + directory and any subdirectory. The format of the file is along the + lines of: + + LayoutTests/fast/js/fixme.js = FAIL + LayoutTests/fast/js/flaky.js = FAIL PASS + LayoutTests/fast/js/crash.js = CRASH TIMEOUT FAIL PASS + ... + + To add other options: + SKIP : LayoutTests/fast/js/no-good.js = TIMEOUT PASS + DEBUG : LayoutTests/fast/js/no-good.js = TIMEOUT PASS + DEBUG SKIP : LayoutTests/fast/js/no-good.js = TIMEOUT PASS + LINUX DEBUG SKIP : LayoutTests/fast/js/no-good.js = TIMEOUT PASS + DEFER LINUX WIN : LayoutTests/fast/js/no-good.js = TIMEOUT PASS + + SKIP: Doesn't run the test. + SLOW: The test takes a long time to run, but does not timeout indefinitely. + WONTFIX: For tests that we never intend to pass on a given platform. + DEFER: Test does not count in our statistics for the current release. + DEBUG: Expectations apply only to the debug build. + RELEASE: Expectations apply only to release build. + LINUX/WIN/WIN-XP/WIN-VISTA/WIN-7/MAC: Expectations apply only to these + platforms. + + Notes: + -A test cannot be both SLOW and TIMEOUT + -A test cannot be both DEFER and WONTFIX + -A test should only be one of IMAGE, TEXT, IMAGE+TEXT, or FAIL. FAIL is + a migratory state that currently means either IMAGE, TEXT, or + IMAGE+TEXT. Once we have finished migrating the expectations, we will + change FAIL to have the meaning of IMAGE+TEXT and remove the IMAGE+TEXT + identifier. + -A test can be included twice, but not via the same path. + -If a test is included twice, then the more precise path wins. + -CRASH tests cannot be DEFER or WONTFIX + """ + + EXPECTATIONS = {'pass': PASS, + 'fail': FAIL, + 'text': TEXT, + 'image': IMAGE, + 'image+text': IMAGE_PLUS_TEXT, + 'timeout': TIMEOUT, + 'crash': CRASH, + 'missing': MISSING} + + EXPECTATION_DESCRIPTIONS = {SKIP: ('skipped', 'skipped'), + PASS: ('pass', 'passes'), + FAIL: ('failure', 'failures'), + TEXT: ('text diff mismatch', + 'text diff mismatch'), + IMAGE: ('image mismatch', 'image mismatch'), + IMAGE_PLUS_TEXT: ('image and text mismatch', + 'image and text mismatch'), + CRASH: ('test shell crash', + 'test shell crashes'), + TIMEOUT: ('test timed out', 'tests timed out'), + MISSING: ('no expected result found', + 'no expected results found')} + + EXPECTATION_ORDER = (PASS, CRASH, TIMEOUT, MISSING, IMAGE_PLUS_TEXT, + TEXT, IMAGE, FAIL, SKIP) + + BASE_PLATFORMS = ('linux', 'mac', 'win') + PLATFORMS = BASE_PLATFORMS + ('win-xp', 'win-vista', 'win-7') + + BUILD_TYPES = ('debug', 'release') + + MODIFIERS = {'skip': SKIP, + 'wontfix': WONTFIX, + 'defer': DEFER, + 'slow': SLOW, + 'rebaseline': REBASELINE, + 'none': NONE} + + TIMELINES = {'wontfix': WONTFIX, + 'now': NOW, + 'defer': DEFER} + + RESULT_TYPES = {'skip': SKIP, + 'pass': PASS, + 'fail': FAIL, + 'flaky': FLAKY} + + def __init__(self, path, full_test_list, platform, is_debug_mode, + is_lint_mode, expectations_as_str=None, suppress_errors=False, + tests_are_present=True): + """ + path: The path to the expectation file. An error is thrown if a test is + listed more than once. + full_test_list: The list of all tests to be run pending processing of + the expections for those tests. + platform: Which platform from self.PLATFORMS to filter tests for. + is_debug_mode: Whether we testing a test_shell built debug mode. + is_lint_mode: Whether this is just linting test_expecatations.txt. + expectations_as_str: Contents of the expectations file. Used instead of + the path. This makes unittesting sane. + suppress_errors: Whether to suppress lint errors. + tests_are_present: Whether the test files are present in the local + filesystem. The LTTF Dashboard uses False here to avoid having to + keep a local copy of the tree. + """ + + self._path = path + self._expectations_as_str = expectations_as_str + self._is_lint_mode = is_lint_mode + self._tests_are_present = tests_are_present + self._full_test_list = full_test_list + self._suppress_errors = suppress_errors + self._errors = [] + self._non_fatal_errors = [] + self._platform = self.ToTestPlatformName(platform) + if self._platform is None: + raise Exception("Unknown platform '%s'" % (platform)) + self._is_debug_mode = is_debug_mode + + # Maps relative test paths as listed in the expectations file to a + # list of maps containing modifiers and expectations for each time + # the test is listed in the expectations file. + self._all_expectations = {} + + # Maps a test to its list of expectations. + self._test_to_expectations = {} + + # Maps a test to its list of options (string values) + self._test_to_options = {} + + # Maps a test to its list of modifiers: the constants associated with + # the options minus any bug or platform strings + self._test_to_modifiers = {} + + # Maps a test to the base path that it was listed with in the list. + self._test_list_paths = {} + + self._modifier_to_tests = self._DictOfSets(self.MODIFIERS) + self._expectation_to_tests = self._DictOfSets(self.EXPECTATIONS) + self._timeline_to_tests = self._DictOfSets(self.TIMELINES) + self._result_type_to_tests = self._DictOfSets(self.RESULT_TYPES) + + self._Read(self._GetIterableExpectations()) + + def _DictOfSets(self, strings_to_constants): + """Takes a dict of strings->constants and returns a dict mapping + each constant to an empty set.""" + d = {} + for c in strings_to_constants.values(): + d[c] = set() + return d + + def _GetIterableExpectations(self): + """Returns an object that can be iterated over. Allows for not caring + about whether we're iterating over a file or a new-line separated + string.""" + if self._expectations_as_str: + iterable = [x + "\n" for x in + self._expectations_as_str.split("\n")] + # Strip final entry if it's empty to avoid added in an extra + # newline. + if iterable[len(iterable) - 1] == "\n": + return iterable[:len(iterable) - 1] + return iterable + else: + return open(self._path) + + def ToTestPlatformName(self, name): + """Returns the test expectation platform that will be used for a + given platform name, or None if there is no match.""" + chromium_prefix = 'chromium-' + name = name.lower() + if name.startswith(chromium_prefix): + name = name[len(chromium_prefix):] + if name in self.PLATFORMS: + return name + return None + + def GetTestSet(self, modifier, expectation=None, include_skips=True): + if expectation is None: + tests = self._modifier_to_tests[modifier] + else: + tests = (self._expectation_to_tests[expectation] & + self._modifier_to_tests[modifier]) + + if not include_skips: + tests = tests - self.GetTestSet(SKIP, expectation) + + return tests + + def GetTestsWithResultType(self, result_type): + return self._result_type_to_tests[result_type] + + def GetTestsWithTimeline(self, timeline): + return self._timeline_to_tests[timeline] + + def GetOptions(self, test): + """This returns the entire set of options for the given test + (the modifiers plus the BUGXXXX identifier). This is used by the + LTTF dashboard.""" + return self._test_to_options[test] + + def HasModifier(self, test, modifier): + return test in self._modifier_to_tests[modifier] + + def GetExpectations(self, test): + return self._test_to_expectations[test] + + def GetExpectationsJsonForAllPlatforms(self): + # Specify separators in order to get compact encoding. + return ExpectationsJsonEncoder(separators=(',', ':')).encode( + self._all_expectations) + + def Contains(self, test): + return test in self._test_to_expectations + + def RemovePlatformFromFile(self, tests, platform, backup=False): + """Remove the platform option from test expectations file. + + If a test is in the test list and has an option that matches the given + platform, remove the matching platform and save the updated test back + to the file. If no other platforms remaining after removal, delete the + test from the file. + + Args: + tests: list of tests that need to update.. + platform: which platform option to remove. + backup: if true, the original test expectations file is saved as + [self.TEST_LIST].orig.YYYYMMDDHHMMSS + + Returns: + no + """ + + new_file = self._path + '.new' + logging.debug('Original file: "%s"', self._path) + logging.debug('New file: "%s"', new_file) + f_orig = self._GetIterableExpectations() + f_new = open(new_file, 'w') + + tests_removed = 0 + tests_updated = 0 + lineno = 0 + for line in f_orig: + lineno += 1 + action = self._GetPlatformUpdateAction(line, lineno, tests, + platform) + if action == NO_CHANGE: + # Save the original line back to the file + logging.debug('No change to test: %s', line) + f_new.write(line) + elif action == REMOVE_TEST: + tests_removed += 1 + logging.info('Test removed: %s', line) + elif action == REMOVE_PLATFORM: + parts = line.split(':') + new_options = parts[0].replace(platform.upper() + ' ', '', 1) + new_line = ('%s:%s' % (new_options, parts[1])) + f_new.write(new_line) + tests_updated += 1 + logging.info('Test updated: ') + logging.info(' old: %s', line) + logging.info(' new: %s', new_line) + elif action == ADD_PLATFORMS_EXCEPT_THIS: + parts = line.split(':') + new_options = parts[0] + for p in self.PLATFORMS: + if not p == platform: + new_options += p.upper() + ' ' + new_line = ('%s:%s' % (new_options, parts[1])) + f_new.write(new_line) + tests_updated += 1 + logging.info('Test updated: ') + logging.info(' old: %s', line) + logging.info(' new: %s', new_line) + else: + logging.error('Unknown update action: %d; line: %s', + action, line) + + logging.info('Total tests removed: %d', tests_removed) + logging.info('Total tests updated: %d', tests_updated) + + f_orig.close() + f_new.close() + + if backup: + date_suffix = time.strftime('%Y%m%d%H%M%S', + time.localtime(time.time())) + backup_file = ('%s.orig.%s' % (self._path, date_suffix)) + if os.path.exists(backup_file): + os.remove(backup_file) + logging.info('Saving original file to "%s"', backup_file) + os.rename(self._path, backup_file) + else: + os.remove(self._path) + + logging.debug('Saving new file to "%s"', self._path) + os.rename(new_file, self._path) + return True + + def ParseExpectationsLine(self, line, lineno): + """Parses a line from test_expectations.txt and returns a tuple + with the test path, options as a list, expectations as a list.""" + line = StripComments(line) + if not line: + return (None, None, None) + + options = [] + if line.find(":") is -1: + test_and_expectation = line.split("=") + else: + parts = line.split(":") + options = self._GetOptionsList(parts[0]) + test_and_expectation = parts[1].split('=') + + test = test_and_expectation[0].strip() + if (len(test_and_expectation) is not 2): + self._AddError(lineno, "Missing expectations.", + test_and_expectation) + expectations = None + else: + expectations = self._GetOptionsList(test_and_expectation[1]) + + return (test, options, expectations) + + def _GetPlatformUpdateAction(self, line, lineno, tests, platform): + """Check the platform option and return the action needs to be taken. + + Args: + line: current line in test expectations file. + lineno: current line number of line + tests: list of tests that need to update.. + platform: which platform option to remove. + + Returns: + NO_CHANGE: no change to the line (comments, test not in the list etc) + REMOVE_TEST: remove the test from file. + REMOVE_PLATFORM: remove this platform option from the test. + ADD_PLATFORMS_EXCEPT_THIS: add all the platforms except this one. + """ + test, options, expectations = self.ParseExpectationsLine(line, lineno) + if not test or test not in tests: + return NO_CHANGE + + has_any_platform = False + for option in options: + if option in self.PLATFORMS: + has_any_platform = True + if not option == platform: + return REMOVE_PLATFORM + + # If there is no platform specified, then it means apply to all + # platforms. Return the action to add all the platforms except this + # one. + if not has_any_platform: + return ADD_PLATFORMS_EXCEPT_THIS + + return REMOVE_TEST + + def _HasValidModifiersForCurrentPlatform(self, options, lineno, + test_and_expectations, modifiers): + """Returns true if the current platform is in the options list or if + no platforms are listed and if there are no fatal errors in the + options list. + + Args: + options: List of lowercase options. + lineno: The line in the file where the test is listed. + test_and_expectations: The path and expectations for the test. + modifiers: The set to populate with modifiers. + """ + has_any_platform = False + has_bug_id = False + for option in options: + if option in self.MODIFIERS: + modifiers.add(option) + elif option in self.PLATFORMS: + has_any_platform = True + elif option.startswith('bug'): + has_bug_id = True + elif option not in self.BUILD_TYPES: + self._AddError(lineno, 'Invalid modifier for test: %s' % + option, test_and_expectations) + + if has_any_platform and not self._MatchPlatform(options): + return False + + if not has_bug_id and 'wontfix' not in options: + # TODO(ojan): Turn this into an AddError call once all the + # tests have BUG identifiers. + self._LogNonFatalError(lineno, 'Test lacks BUG modifier.', + test_and_expectations) + + if 'release' in options or 'debug' in options: + if self._is_debug_mode and 'debug' not in options: + return False + if not self._is_debug_mode and 'release' not in options: + return False + + if 'wontfix' in options and 'defer' in options: + self._AddError(lineno, 'Test cannot be both DEFER and WONTFIX.', + test_and_expectations) + + if self._is_lint_mode and 'rebaseline' in options: + self._AddError(lineno, 'REBASELINE should only be used for running' + 'rebaseline.py. Cannot be checked in.', test_and_expectations) + + return True + + def _MatchPlatform(self, options): + """Match the list of options against our specified platform. If any + of the options prefix-match self._platform, return True. This handles + the case where a test is marked WIN and the platform is WIN-VISTA. + + Args: + options: list of options + """ + for opt in options: + if self._platform.startswith(opt): + return True + return False + + def _AddToAllExpectations(self, test, options, expectations): + # Make all paths unix-style so the dashboard doesn't need to. + test = test.replace('\\', '/') + if not test in self._all_expectations: + self._all_expectations[test] = [] + self._all_expectations[test].append( + ModifiersAndExpectations(options, expectations)) + + def _Read(self, expectations): + """For each test in an expectations iterable, generate the + expectations for it.""" + lineno = 0 + for line in expectations: + lineno += 1 + + test_list_path, options, expectations = \ + self.ParseExpectationsLine(line, lineno) + if not expectations: + continue + + self._AddToAllExpectations(test_list_path, + " ".join(options).upper(), + " ".join(expectations).upper()) + + modifiers = set() + if options and not self._HasValidModifiersForCurrentPlatform( + options, lineno, test_list_path, modifiers): + continue + + expectations = self._ParseExpectations(expectations, lineno, + test_list_path) + + if 'slow' in options and TIMEOUT in expectations: + self._AddError(lineno, + 'A test can not be both slow and timeout. If it times out ' + 'indefinitely, then it should be just timeout.', + test_list_path) + + full_path = os.path.join(path_utils.LayoutTestsDir(), + test_list_path) + full_path = os.path.normpath(full_path) + # WebKit's way of skipping tests is to add a -disabled suffix. + # So we should consider the path existing if the path or the + # -disabled version exists. + if (self._tests_are_present and not os.path.exists(full_path) + and not os.path.exists(full_path + '-disabled')): + # Log a non fatal error here since you hit this case any + # time you update test_expectations.txt without syncing + # the LayoutTests directory + self._LogNonFatalError(lineno, 'Path does not exist.', + test_list_path) + continue + + if not self._full_test_list: + tests = [test_list_path] + else: + tests = self._ExpandTests(test_list_path) + + self._AddTests(tests, expectations, test_list_path, lineno, + modifiers, options) + + if not self._suppress_errors and ( + len(self._errors) or len(self._non_fatal_errors)): + if self._is_debug_mode: + build_type = 'DEBUG' + else: + build_type = 'RELEASE' + print "\nFAILURES FOR PLATFORM: %s, BUILD_TYPE: %s" \ + % (self._platform.upper(), build_type) + + for error in self._non_fatal_errors: + logging.error(error) + if len(self._errors): + raise SyntaxError('\n'.join(map(str, self._errors))) + + # Now add in the tests that weren't present in the expectations file + expectations = set([PASS]) + options = [] + modifiers = [] + if self._full_test_list: + for test in self._full_test_list: + if not test in self._test_list_paths: + self._AddTest(test, modifiers, expectations, options) + + def _GetOptionsList(self, listString): + return [part.strip().lower() for part in listString.strip().split(' ')] + + def _ParseExpectations(self, expectations, lineno, test_list_path): + result = set() + for part in expectations: + if not part in self.EXPECTATIONS: + self._AddError(lineno, 'Unsupported expectation: %s' % part, + test_list_path) + continue + expectation = self.EXPECTATIONS[part] + result.add(expectation) + return result + + def _ExpandTests(self, test_list_path): + """Convert the test specification to an absolute, normalized + path and make sure directories end with the OS path separator.""" + path = os.path.join(path_utils.LayoutTestsDir(), test_list_path) + path = os.path.normpath(path) + path = self._FixDir(path) + + result = [] + for test in self._full_test_list: + if test.startswith(path): + result.append(test) + return result + + def _FixDir(self, path): + """Check to see if the path points to a directory, and if so, append + the directory separator if necessary.""" + if self._tests_are_present: + if os.path.isdir(path): + path = os.path.join(path, '') + else: + # If we can't check the filesystem to see if this is a directory, + # we assume that files w/o an extension are directories. + # TODO(dpranke): What happens w/ LayoutTests/css2.1 ? + if os.path.splitext(path)[1] == '': + path = os.path.join(path, '') + return path + + def _AddTests(self, tests, expectations, test_list_path, lineno, modifiers, + options): + for test in tests: + if self._AlreadySeenTest(test, test_list_path, lineno): + continue + + self._ClearExpectationsForTest(test, test_list_path) + self._AddTest(test, modifiers, expectations, options) + + def _AddTest(self, test, modifiers, expectations, options): + """Sets the expected state for a given test. + + This routine assumes the test has not been added before. If it has, + use _ClearExpectationsForTest() to reset the state prior to + calling this. + + Args: + test: test to add + modifiers: sequence of modifier keywords ('wontfix', 'slow', etc.) + expectations: sequence of expectations (PASS, IMAGE, etc.) + options: sequence of keywords and bug identifiers.""" + self._test_to_expectations[test] = expectations + for expectation in expectations: + self._expectation_to_tests[expectation].add(test) + + self._test_to_options[test] = options + self._test_to_modifiers[test] = set() + for modifier in modifiers: + mod_value = self.MODIFIERS[modifier] + self._modifier_to_tests[mod_value].add(test) + self._test_to_modifiers[test].add(mod_value) + + if 'wontfix' in modifiers: + self._timeline_to_tests[WONTFIX].add(test) + elif 'defer' in modifiers: + self._timeline_to_tests[DEFER].add(test) + else: + self._timeline_to_tests[NOW].add(test) + + if 'skip' in modifiers: + self._result_type_to_tests[SKIP].add(test) + elif expectations == set([PASS]): + self._result_type_to_tests[PASS].add(test) + elif len(expectations) > 1: + self._result_type_to_tests[FLAKY].add(test) + else: + self._result_type_to_tests[FAIL].add(test) + + def _ClearExpectationsForTest(self, test, test_list_path): + """Remove prexisting expectations for this test. + This happens if we are seeing a more precise path + than a previous listing. + """ + if test in self._test_list_paths: + self._test_to_expectations.pop(test, '') + self._RemoveFromSets(test, self._expectation_to_tests) + self._RemoveFromSets(test, self._modifier_to_tests) + self._RemoveFromSets(test, self._timeline_to_tests) + self._RemoveFromSets(test, self._result_type_to_tests) + + self._test_list_paths[test] = os.path.normpath(test_list_path) + + def _RemoveFromSets(self, test, dict): + """Removes the given test from the sets in the dictionary. + + Args: + test: test to look for + dict: dict of sets of files""" + for set_of_tests in dict.itervalues(): + if test in set_of_tests: + set_of_tests.remove(test) + + def _AlreadySeenTest(self, test, test_list_path, lineno): + """Returns true if we've already seen a more precise path for this test + than the test_list_path. + """ + if not test in self._test_list_paths: + return False + + prev_base_path = self._test_list_paths[test] + if (prev_base_path == os.path.normpath(test_list_path)): + self._AddError(lineno, 'Duplicate expectations.', test) + return True + + # Check if we've already seen a more precise path. + return prev_base_path.startswith(os.path.normpath(test_list_path)) + + def _AddError(self, lineno, msg, path): + """Reports an error that will prevent running the tests. Does not + immediately raise an exception because we'd like to aggregate all the + errors so they can all be printed out.""" + self._errors.append('\nLine:%s %s %s' % (lineno, msg, path)) + + def _LogNonFatalError(self, lineno, msg, path): + """Reports an error that will not prevent running the tests. These are + still errors, but not bad enough to warrant breaking test running.""" + self._non_fatal_errors.append('Line:%s %s %s' % (lineno, msg, path)) diff --git a/webkit/tools/layout_tests/webkitpy/layout_package/test_failures.py b/webkit/tools/layout_tests/webkitpy/layout_package/test_failures.py new file mode 100644 index 0000000..18d26e1 --- /dev/null +++ b/webkit/tools/layout_tests/webkitpy/layout_package/test_failures.py @@ -0,0 +1,241 @@ +# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Classes for failures that occur during tests.""" + +import os +import test_expectations + + +def DetermineResultType(failure_list): + """Takes a set of test_failures and returns which result type best fits + the list of failures. "Best fits" means we use the worst type of failure. + + Returns: + one of the test_expectations result types - PASS, TEXT, CRASH, etc.""" + + if not failure_list or len(failure_list) == 0: + return test_expectations.PASS + + failure_types = [type(f) for f in failure_list] + if FailureCrash in failure_types: + return test_expectations.CRASH + elif FailureTimeout in failure_types: + return test_expectations.TIMEOUT + elif (FailureMissingResult in failure_types or + FailureMissingImage in failure_types or + FailureMissingImageHash in failure_types): + return test_expectations.MISSING + else: + is_text_failure = FailureTextMismatch in failure_types + is_image_failure = (FailureImageHashIncorrect in failure_types or + FailureImageHashMismatch in failure_types) + if is_text_failure and is_image_failure: + return test_expectations.IMAGE_PLUS_TEXT + elif is_text_failure: + return test_expectations.TEXT + elif is_image_failure: + return test_expectations.IMAGE + else: + raise ValueError("unclassifiable set of failures: " + + str(failure_types)) + + +class TestFailure(object): + """Abstract base class that defines the failure interface.""" + + @staticmethod + def Message(): + """Returns a string describing the failure in more detail.""" + raise NotImplemented + + def ResultHtmlOutput(self, filename): + """Returns an HTML string to be included on the results.html page.""" + raise NotImplemented + + def ShouldKillTestShell(self): + """Returns True if we should kill the test shell before the next + test.""" + return False + + def RelativeOutputFilename(self, filename, modifier): + """Returns a relative filename inside the output dir that contains + modifier. + + For example, if filename is fast\dom\foo.html and modifier is + "-expected.txt", the return value is fast\dom\foo-expected.txt + + Args: + filename: relative filename to test file + modifier: a string to replace the extension of filename with + + Return: + The relative windows path to the output filename + """ + return os.path.splitext(filename)[0] + modifier + + +class FailureWithType(TestFailure): + """Base class that produces standard HTML output based on the test type. + + Subclasses may commonly choose to override the ResultHtmlOutput, but still + use the standard OutputLinks. + """ + + def __init__(self, test_type): + TestFailure.__init__(self) + # TODO(ojan): This class no longer needs to know the test_type. + self._test_type = test_type + + # Filename suffixes used by ResultHtmlOutput. + OUT_FILENAMES = [] + + def OutputLinks(self, filename, out_names): + """Returns a string holding all applicable output file links. + + Args: + filename: the test filename, used to construct the result file names + out_names: list of filename suffixes for the files. If three or more + suffixes are in the list, they should be [actual, expected, diff, + wdiff]. Two suffixes should be [actual, expected], and a + single item is the [actual] filename suffix. + If out_names is empty, returns the empty string. + """ + links = [''] + uris = [self.RelativeOutputFilename(filename, fn) for fn in out_names] + if len(uris) > 1: + links.append("expected" % uris[1]) + if len(uris) > 0: + links.append("actual" % uris[0]) + if len(uris) > 2: + links.append("diff" % uris[2]) + if len(uris) > 3: + links.append("wdiff" % uris[3]) + return ' '.join(links) + + def ResultHtmlOutput(self, filename): + return self.Message() + self.OutputLinks(filename, self.OUT_FILENAMES) + + +class FailureTimeout(TestFailure): + """Test timed out. We also want to restart the test shell if this + happens.""" + + @staticmethod + def Message(): + return "Test timed out" + + def ResultHtmlOutput(self, filename): + return "%s" % self.Message() + + def ShouldKillTestShell(self): + return True + + +class FailureCrash(TestFailure): + """Test shell crashed.""" + + @staticmethod + def Message(): + return "Test shell crashed" + + def ResultHtmlOutput(self, filename): + # TODO(tc): create a link to the minidump file + stack = self.RelativeOutputFilename(filename, "-stack.txt") + return "%s stack" % (self.Message(), + stack) + + def ShouldKillTestShell(self): + return True + + +class FailureMissingResult(FailureWithType): + """Expected result was missing.""" + OUT_FILENAMES = ["-actual.txt"] + + @staticmethod + def Message(): + return "No expected results found" + + def ResultHtmlOutput(self, filename): + return ("%s" % self.Message() + + self.OutputLinks(filename, self.OUT_FILENAMES)) + + +class FailureTextMismatch(FailureWithType): + """Text diff output failed.""" + # Filename suffixes used by ResultHtmlOutput. + OUT_FILENAMES = ["-actual.txt", "-expected.txt", "-diff.txt"] + OUT_FILENAMES_WDIFF = ["-actual.txt", "-expected.txt", "-diff.txt", + "-wdiff.html"] + + def __init__(self, test_type, has_wdiff): + FailureWithType.__init__(self, test_type) + if has_wdiff: + self.OUT_FILENAMES = self.OUT_FILENAMES_WDIFF + + @staticmethod + def Message(): + return "Text diff mismatch" + + +class FailureMissingImageHash(FailureWithType): + """Actual result hash was missing.""" + # Chrome doesn't know to display a .checksum file as text, so don't bother + # putting in a link to the actual result. + OUT_FILENAMES = [] + + @staticmethod + def Message(): + return "No expected image hash found" + + def ResultHtmlOutput(self, filename): + return "%s" % self.Message() + + +class FailureMissingImage(FailureWithType): + """Actual result image was missing.""" + OUT_FILENAMES = ["-actual.png"] + + @staticmethod + def Message(): + return "No expected image found" + + def ResultHtmlOutput(self, filename): + return ("%s" % self.Message() + + self.OutputLinks(filename, self.OUT_FILENAMES)) + + +class FailureImageHashMismatch(FailureWithType): + """Image hashes didn't match.""" + OUT_FILENAMES = ["-actual.png", "-expected.png", "-diff.png"] + + @staticmethod + def Message(): + # We call this a simple image mismatch to avoid confusion, since + # we link to the PNGs rather than the checksums. + return "Image mismatch" + + +class FailureFuzzyFailure(FailureWithType): + """Image hashes didn't match.""" + OUT_FILENAMES = ["-actual.png", "-expected.png"] + + @staticmethod + def Message(): + return "Fuzzy image match also failed" + + +class FailureImageHashIncorrect(FailureWithType): + """Actual result hash is incorrect.""" + # Chrome doesn't know to display a .checksum file as text, so don't bother + # putting in a link to the actual result. + OUT_FILENAMES = [] + + @staticmethod + def Message(): + return "Images match, expected image hash incorrect. " + + def ResultHtmlOutput(self, filename): + return "%s" % self.Message() diff --git a/webkit/tools/layout_tests/webkitpy/layout_package/test_files.py b/webkit/tools/layout_tests/webkitpy/layout_package/test_files.py new file mode 100644 index 0000000..bc8eaad --- /dev/null +++ b/webkit/tools/layout_tests/webkitpy/layout_package/test_files.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python +# Copyright (c) 2009 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""This module is used to find all of the layout test files used by Chromium +(across all platforms). It exposes one public function - GatherTestFiles() - +which takes an optional list of paths. If a list is passed in, the returned +list of test files is constrained to those found under the paths passed in, +i.e. calling GatherTestFiles(["LayoutTests/fast"]) will only return files +under that directory.""" + +import glob +import os +import path_utils + +# When collecting test cases, we include any file with these extensions. +_supported_file_extensions = set(['.html', '.shtml', '.xml', '.xhtml', '.pl', + '.php', '.svg']) +# When collecting test cases, skip these directories +_skipped_directories = set(['.svn', '_svn', 'resources', 'script-tests']) + + +def GatherTestFiles(paths): + """Generate a set of test files and return them. + + Args: + paths: a list of command line paths relative to the webkit/tests + directory. glob patterns are ok. + """ + paths_to_walk = set() + # if paths is empty, provide a pre-defined list. + if paths: + for path in paths: + # If there's an * in the name, assume it's a glob pattern. + path = os.path.join(path_utils.LayoutTestsDir(), path) + if path.find('*') > -1: + filenames = glob.glob(path) + paths_to_walk.update(filenames) + else: + paths_to_walk.add(path) + else: + paths_to_walk.add(path_utils.LayoutTestsDir()) + + # Now walk all the paths passed in on the command line and get filenames + test_files = set() + for path in paths_to_walk: + if os.path.isfile(path) and _HasSupportedExtension(path): + test_files.add(os.path.normpath(path)) + continue + + for root, dirs, files in os.walk(path): + # don't walk skipped directories and sub directories + if os.path.basename(root) in _skipped_directories: + del dirs[:] + continue + + for filename in files: + if _HasSupportedExtension(filename): + filename = os.path.join(root, filename) + filename = os.path.normpath(filename) + test_files.add(filename) + + return test_files + + +def _HasSupportedExtension(filename): + """Return true if filename is one of the file extensions we want to run a + test on.""" + extension = os.path.splitext(filename)[1] + return extension in _supported_file_extensions diff --git a/webkit/tools/layout_tests/webkitpy/layout_package/test_shell_thread.py b/webkit/tools/layout_tests/webkitpy/layout_package/test_shell_thread.py new file mode 100644 index 0000000..1ae2ed8 --- /dev/null +++ b/webkit/tools/layout_tests/webkitpy/layout_package/test_shell_thread.py @@ -0,0 +1,488 @@ +# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""A Thread object for running the test shell and processing URLs from a +shared queue. + +Each thread runs a separate instance of the test_shell binary and validates +the output. When there are no more URLs to process in the shared queue, the +thread exits. +""" + +import copy +import logging +import os +import Queue +import signal +import subprocess +import sys +import thread +import threading +import time + +import path_utils +import test_failures + + +def ProcessOutput(proc, test_info, test_types, test_args, target, output_dir): + """Receives the output from a test_shell process, subjects it to a number + of tests, and returns a list of failure types the test produced. + + Args: + proc: an active test_shell process + test_info: Object containing the test filename, uri and timeout + test_types: list of test types to subject the output to + test_args: arguments to be passed to each test + target: Debug or Release + output_dir: directory to put crash stack traces into + + Returns: a list of failure objects and times for the test being processed + """ + outlines = [] + extra_lines = [] + failures = [] + crash = False + + # Some test args, such as the image hash, may be added or changed on a + # test-by-test basis. + local_test_args = copy.copy(test_args) + + start_time = time.time() + + line = proc.stdout.readline() + + # Only start saving output lines once we've loaded the URL for the test. + url = None + test_string = test_info.uri.strip() + + while line.rstrip() != "#EOF": + # Make sure we haven't crashed. + if line == '' and proc.poll() is not None: + failures.append(test_failures.FailureCrash()) + + # This is hex code 0xc000001d, which is used for abrupt + # termination. This happens if we hit ctrl+c from the prompt and + # we happen to be waiting on the test_shell. + # sdoyon: Not sure for which OS and in what circumstances the + # above code is valid. What works for me under Linux to detect + # ctrl+c is for the subprocess returncode to be negative SIGINT. + # And that agrees with the subprocess documentation. + if (-1073741510 == proc.returncode or + - signal.SIGINT == proc.returncode): + raise KeyboardInterrupt + crash = True + break + + # Don't include #URL lines in our output + if line.startswith("#URL:"): + url = line.rstrip()[5:] + if url != test_string: + logging.fatal("Test got out of sync:\n|%s|\n|%s|" % + (url, test_string)) + raise AssertionError("test out of sync") + elif line.startswith("#MD5:"): + local_test_args.hash = line.rstrip()[5:] + elif line.startswith("#TEST_TIMED_OUT"): + # Test timed out, but we still need to read until #EOF. + failures.append(test_failures.FailureTimeout()) + elif url: + outlines.append(line) + else: + extra_lines.append(line) + + line = proc.stdout.readline() + + end_test_time = time.time() + + if len(extra_lines): + extra = "".join(extra_lines) + if crash: + logging.debug("Stacktrace for %s:\n%s" % (test_string, extra)) + # Strip off "file://" since RelativeTestFilename expects + # filesystem paths. + filename = os.path.join(output_dir, + path_utils.RelativeTestFilename(test_string[7:])) + filename = os.path.splitext(filename)[0] + "-stack.txt" + path_utils.MaybeMakeDirectory(os.path.split(filename)[0]) + open(filename, "wb").write(extra) + else: + logging.debug("Previous test output extra lines after dump:\n%s" % + extra) + + # Check the output and save the results. + time_for_diffs = {} + for test_type in test_types: + start_diff_time = time.time() + new_failures = test_type.CompareOutput(test_info.filename, + proc, + ''.join(outlines), + local_test_args, + target) + # Don't add any more failures if we already have a crash, so we don't + # double-report those tests. We do double-report for timeouts since + # we still want to see the text and image output. + if not crash: + failures.extend(new_failures) + time_for_diffs[test_type.__class__.__name__] = ( + time.time() - start_diff_time) + + total_time_for_all_diffs = time.time() - end_test_time + test_run_time = end_test_time - start_time + return TestStats(test_info.filename, failures, test_run_time, + total_time_for_all_diffs, time_for_diffs) + + +def StartTestShell(command, args): + """Returns the process for a new test_shell started in layout-tests mode. + """ + cmd = [] + # Hook for injecting valgrind or other runtime instrumentation, + # used by e.g. tools/valgrind/valgrind_tests.py. + wrapper = os.environ.get("BROWSER_WRAPPER", None) + if wrapper != None: + cmd += [wrapper] + cmd += command + ['--layout-tests'] + args + return subprocess.Popen(cmd, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + + +class TestStats: + + def __init__(self, filename, failures, test_run_time, + total_time_for_all_diffs, time_for_diffs): + self.filename = filename + self.failures = failures + self.test_run_time = test_run_time + self.total_time_for_all_diffs = total_time_for_all_diffs + self.time_for_diffs = time_for_diffs + + +class SingleTestThread(threading.Thread): + """Thread wrapper for running a single test file.""" + + def __init__(self, test_shell_command, shell_args, test_info, test_types, + test_args, target, output_dir): + """ + Args: + test_info: Object containing the test filename, uri and timeout + output_dir: Directory to put crash stacks into. + See TestShellThread for documentation of the remaining arguments. + """ + + threading.Thread.__init__(self) + self._command = test_shell_command + self._shell_args = shell_args + self._test_info = test_info + self._test_types = test_types + self._test_args = test_args + self._target = target + self._output_dir = output_dir + + def run(self): + proc = StartTestShell(self._command, self._shell_args + + ["--time-out-ms=" + self._test_info.timeout, self._test_info.uri]) + self._test_stats = ProcessOutput(proc, self._test_info, + self._test_types, self._test_args, self._target, self._output_dir) + + def GetTestStats(self): + return self._test_stats + + +class TestShellThread(threading.Thread): + + def __init__(self, filename_list_queue, result_queue, test_shell_command, + test_types, test_args, shell_args, options): + """Initialize all the local state for this test shell thread. + + Args: + filename_list_queue: A thread safe Queue class that contains lists + of tuples of (filename, uri) pairs. + result_queue: A thread safe Queue class that will contain tuples of + (test, failure lists) for the test results. + test_shell_command: A list specifying the command+args for + test_shell + test_types: A list of TestType objects to run the test output + against. + test_args: A TestArguments object to pass to each TestType. + shell_args: Any extra arguments to be passed to test_shell.exe. + options: A property dictionary as produced by optparse. The + command-line options should match those expected by + run_webkit_tests; they are typically passed via the + run_webkit_tests.TestRunner class.""" + threading.Thread.__init__(self) + self._filename_list_queue = filename_list_queue + self._result_queue = result_queue + self._filename_list = [] + self._test_shell_command = test_shell_command + self._test_types = test_types + self._test_args = test_args + self._test_shell_proc = None + self._shell_args = shell_args + self._options = options + self._canceled = False + self._exception_info = None + self._directory_timing_stats = {} + self._test_stats = [] + self._num_tests = 0 + self._start_time = 0 + self._stop_time = 0 + + # Current directory of tests we're running. + self._current_dir = None + # Number of tests in self._current_dir. + self._num_tests_in_current_dir = None + # Time at which we started running tests from self._current_dir. + self._current_dir_start_time = None + + def GetDirectoryTimingStats(self): + """Returns a dictionary mapping test directory to a tuple of + (number of tests in that directory, time to run the tests)""" + return self._directory_timing_stats + + def GetIndividualTestStats(self): + """Returns a list of (test_filename, time_to_run_test, + total_time_for_all_diffs, time_for_diffs) tuples.""" + return self._test_stats + + def Cancel(self): + """Set a flag telling this thread to quit.""" + self._canceled = True + + def GetExceptionInfo(self): + """If run() terminated on an uncaught exception, return it here + ((type, value, traceback) tuple). + Returns None if run() terminated normally. Meant to be called after + joining this thread.""" + return self._exception_info + + def GetTotalTime(self): + return max(self._stop_time - self._start_time, 0.0) + + def GetNumTests(self): + return self._num_tests + + def run(self): + """Delegate main work to a helper method and watch for uncaught + exceptions.""" + self._start_time = time.time() + self._num_tests = 0 + try: + logging.debug('%s starting' % (self.getName())) + self._Run(test_runner=None, result_summary=None) + logging.debug('%s done (%d tests)' % (self.getName(), + self.GetNumTests())) + except: + # Save the exception for our caller to see. + self._exception_info = sys.exc_info() + self._stop_time = time.time() + # Re-raise it and die. + logging.error('%s dying: %s' % (self.getName(), + self._exception_info)) + raise + self._stop_time = time.time() + + def RunInMainThread(self, test_runner, result_summary): + """This hook allows us to run the tests from the main thread if + --num-test-shells==1, instead of having to always run two or more + threads. This allows us to debug the test harness without having to + do multi-threaded debugging.""" + self._Run(test_runner, result_summary) + + def _Run(self, test_runner, result_summary): + """Main work entry point of the thread. Basically we pull urls from the + filename queue and run the tests until we run out of urls. + + If test_runner is not None, then we call test_runner.UpdateSummary() + with the results of each test.""" + batch_size = 0 + batch_count = 0 + if self._options.batch_size: + try: + batch_size = int(self._options.batch_size) + except: + logging.info("Ignoring invalid batch size '%s'" % + self._options.batch_size) + + # Append tests we're running to the existing tests_run.txt file. + # This is created in run_webkit_tests.py:_PrepareListsAndPrintOutput. + tests_run_filename = os.path.join(self._options.results_directory, + "tests_run.txt") + tests_run_file = open(tests_run_filename, "a") + + while True: + if self._canceled: + logging.info('Testing canceled') + tests_run_file.close() + return + + if len(self._filename_list) is 0: + if self._current_dir is not None: + self._directory_timing_stats[self._current_dir] = \ + (self._num_tests_in_current_dir, + time.time() - self._current_dir_start_time) + + try: + self._current_dir, self._filename_list = \ + self._filename_list_queue.get_nowait() + except Queue.Empty: + self._KillTestShell() + tests_run_file.close() + return + + self._num_tests_in_current_dir = len(self._filename_list) + self._current_dir_start_time = time.time() + + test_info = self._filename_list.pop() + + # We have a url, run tests. + batch_count += 1 + self._num_tests += 1 + if self._options.run_singly: + failures = self._RunTestSingly(test_info) + else: + failures = self._RunTest(test_info) + + filename = test_info.filename + tests_run_file.write(filename + "\n") + if failures: + # Check and kill test shell if we need too. + if len([1 for f in failures if f.ShouldKillTestShell()]): + self._KillTestShell() + # Reset the batch count since the shell just bounced. + batch_count = 0 + # Print the error message(s). + error_str = '\n'.join([' ' + f.Message() for f in failures]) + logging.debug("%s %s failed:\n%s" % (self.getName(), + path_utils.RelativeTestFilename(filename), + error_str)) + else: + logging.debug("%s %s passed" % (self.getName(), + path_utils.RelativeTestFilename(filename))) + self._result_queue.put((filename, failures)) + + if batch_size > 0 and batch_count > batch_size: + # Bounce the shell and reset count. + self._KillTestShell() + batch_count = 0 + + if test_runner: + test_runner.UpdateSummary(result_summary) + + def _RunTestSingly(self, test_info): + """Run a test in a separate thread, enforcing a hard time limit. + + Since we can only detect the termination of a thread, not any internal + state or progress, we can only run per-test timeouts when running test + files singly. + + Args: + test_info: Object containing the test filename, uri and timeout + + Return: + A list of TestFailure objects describing the error. + """ + worker = SingleTestThread(self._test_shell_command, + self._shell_args, + test_info, + self._test_types, + self._test_args, + self._options.target, + self._options.results_directory) + + worker.start() + + # When we're running one test per test_shell process, we can enforce + # a hard timeout. the test_shell watchdog uses 2.5x the timeout + # We want to be larger than that. + worker.join(int(test_info.timeout) * 3.0 / 1000.0) + if worker.isAlive(): + # If join() returned with the thread still running, the + # test_shell.exe is completely hung and there's nothing + # more we can do with it. We have to kill all the + # test_shells to free it up. If we're running more than + # one test_shell thread, we'll end up killing the other + # test_shells too, introducing spurious crashes. We accept that + # tradeoff in order to avoid losing the rest of this thread's + # results. + logging.error('Test thread hung: killing all test_shells') + path_utils.KillAllTestShells() + + try: + stats = worker.GetTestStats() + self._test_stats.append(stats) + failures = stats.failures + except AttributeError, e: + failures = [] + logging.error('Cannot get results of test: %s' % + test_info.filename) + + return failures + + def _RunTest(self, test_info): + """Run a single test file using a shared test_shell process. + + Args: + test_info: Object containing the test filename, uri and timeout + + Return: + A list of TestFailure objects describing the error. + """ + self._EnsureTestShellIsRunning() + # Args to test_shell is a space-separated list of + # "uri timeout pixel_hash" + # The timeout and pixel_hash are optional. The timeout is used if this + # test has a custom timeout. The pixel_hash is used to avoid doing an + # image dump if the checksums match, so it should be set to a blank + # value if we are generating a new baseline. + # (Otherwise, an image from a previous run will be copied into + # the baseline.) + image_hash = test_info.image_hash + if image_hash and self._test_args.new_baseline: + image_hash = "" + self._test_shell_proc.stdin.write(("%s %s %s\n" % + (test_info.uri, test_info.timeout, image_hash))) + + # If the test shell is dead, the above may cause an IOError as we + # try to write onto the broken pipe. If this is the first test for + # this test shell process, than the test shell did not + # successfully start. If this is not the first test, then the + # previous tests have caused some kind of delayed crash. We don't + # try to recover here. + self._test_shell_proc.stdin.flush() + + stats = ProcessOutput(self._test_shell_proc, test_info, + self._test_types, self._test_args, + self._options.target, + self._options.results_directory) + + self._test_stats.append(stats) + return stats.failures + + def _EnsureTestShellIsRunning(self): + """Start the shared test shell, if it's not running. Not for use when + running tests singly, since those each start a separate test shell in + their own thread. + """ + if (not self._test_shell_proc or + self._test_shell_proc.poll() is not None): + self._test_shell_proc = StartTestShell(self._test_shell_command, + self._shell_args) + + def _KillTestShell(self): + """Kill the test shell process if it's running.""" + if self._test_shell_proc: + self._test_shell_proc.stdin.close() + self._test_shell_proc.stdout.close() + if self._test_shell_proc.stderr: + self._test_shell_proc.stderr.close() + if (sys.platform not in ('win32', 'cygwin') and + not self._test_shell_proc.poll()): + # Closing stdin/stdout/stderr hangs sometimes on OS X. + null = open(os.devnull, "w") + subprocess.Popen(["kill", "-9", + str(self._test_shell_proc.pid)], stderr=null) + null.close() + self._test_shell_proc = None diff --git a/webkit/tools/layout_tests/webkitpy/layout_package/websocket_server.py b/webkit/tools/layout_tests/webkitpy/layout_package/websocket_server.py new file mode 100644 index 0000000..090a5d2 --- /dev/null +++ b/webkit/tools/layout_tests/webkitpy/layout_package/websocket_server.py @@ -0,0 +1,250 @@ +#!/usr/bin/env python +# Copyright (c) 2009 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""A class to help start/stop the PyWebSocket server used by layout tests.""" + + +import logging +import optparse +import os +import subprocess +import sys +import tempfile +import time + +import path_utils +import platform_utils +import http_server + +# So we can import httpd_utils below to make ui_tests happy. +sys.path.append(path_utils.PathFromBase('tools', 'python')) +import google.httpd_utils + +_WS_LOG_PREFIX = 'pywebsocket.ws.log-' +_WSS_LOG_PREFIX = 'pywebsocket.wss.log-' + +_DEFAULT_WS_PORT = 8880 +_DEFAULT_WSS_PORT = 9323 + + +def RemoveLogFiles(folder, starts_with): + files = os.listdir(folder) + for file in files: + if file.startswith(starts_with): + full_path = os.path.join(folder, file) + os.remove(full_path) + + +class PyWebSocketNotStarted(Exception): + pass + + +class PyWebSocketNotFound(Exception): + pass + + +class PyWebSocket(http_server.Lighttpd): + + def __init__(self, output_dir, port=_DEFAULT_WS_PORT, + root=None, + use_tls=False, + private_key=http_server.Lighttpd._pem_file, + certificate=http_server.Lighttpd._pem_file, + register_cygwin=None, + pidfile=None): + """Args: + output_dir: the absolute path to the layout test result directory + """ + http_server.Lighttpd.__init__(self, output_dir, + port=port, + root=root, + register_cygwin=register_cygwin) + self._output_dir = output_dir + self._process = None + self._port = port + self._root = root + self._use_tls = use_tls + self._private_key = private_key + self._certificate = certificate + if self._port: + self._port = int(self._port) + if self._use_tls: + self._server_name = 'PyWebSocket(Secure)' + else: + self._server_name = 'PyWebSocket' + self._pidfile = pidfile + self._wsout = None + + # Webkit tests + if self._root: + self._layout_tests = os.path.abspath(self._root) + self._web_socket_tests = os.path.abspath( + os.path.join(self._root, 'websocket', 'tests')) + else: + try: + self._web_socket_tests = path_utils.PathFromBase( + 'third_party', 'WebKit', 'LayoutTests', 'websocket', + 'tests') + self._layout_tests = path_utils.PathFromBase( + 'third_party', 'WebKit', 'LayoutTests') + except path_utils.PathNotFound: + self._web_socket_tests = None + + def Start(self): + if not self._web_socket_tests: + logging.info('No need to start %s server.' % self._server_name) + return + if self.IsRunning(): + raise PyWebSocketNotStarted('%s is already running.' % + self._server_name) + + time_str = time.strftime('%d%b%Y-%H%M%S') + if self._use_tls: + log_prefix = _WSS_LOG_PREFIX + else: + log_prefix = _WS_LOG_PREFIX + log_file_name = log_prefix + time_str + + # Remove old log files. We only need to keep the last ones. + RemoveLogFiles(self._output_dir, log_prefix) + + error_log = os.path.join(self._output_dir, log_file_name + "-err.txt") + + output_log = os.path.join(self._output_dir, log_file_name + "-out.txt") + self._wsout = open(output_log, "w") + + python_interp = sys.executable + pywebsocket_base = path_utils.PathFromBase( + 'third_party', 'WebKit', 'WebKitTools', 'pywebsocket') + pywebsocket_script = path_utils.PathFromBase( + 'third_party', 'WebKit', 'WebKitTools', 'pywebsocket', + 'mod_pywebsocket', 'standalone.py') + start_cmd = [ + python_interp, pywebsocket_script, + '-p', str(self._port), + '-d', self._layout_tests, + '-s', self._web_socket_tests, + '-l', error_log, + ] + if self._use_tls: + start_cmd.extend(['-t', '-k', self._private_key, + '-c', self._certificate]) + + # Put the cygwin directory first in the path to find cygwin1.dll + env = os.environ + if sys.platform in ('cygwin', 'win32'): + env['PATH'] = '%s;%s' % ( + path_utils.PathFromBase('third_party', 'cygwin', 'bin'), + env['PATH']) + + if sys.platform == 'win32' and self._register_cygwin: + setup_mount = path_utils.PathFromBase('third_party', 'cygwin', + 'setup_mount.bat') + subprocess.Popen(setup_mount).wait() + + env['PYTHONPATH'] = (pywebsocket_base + os.path.pathsep + + env.get('PYTHONPATH', '')) + + logging.debug('Starting %s server.' % self._server_name) + self._process = subprocess.Popen(start_cmd, stdout=self._wsout, + env=env) + + # Wait a bit before checking the liveness of the server. + time.sleep(0.5) + + if self._use_tls: + url = 'https' + else: + url = 'http' + url = url + '://127.0.0.1:%d/' % self._port + if not google.httpd_utils.UrlIsAlive(url): + raise PyWebSocketNotStarted( + 'Failed to start %s server on port %s.' % + (self._server_name, self._port)) + + # Our process terminated already + if self._process.returncode != None: + raise PyWebSocketNotStarted( + 'Failed to start %s server.' % self._server_name) + if self._pidfile: + f = open(self._pidfile, 'w') + f.write("%d" % self._process.pid) + f.close() + + def Stop(self, force=False): + if not force and not self.IsRunning(): + return + + if self._process: + pid = self._process.pid + elif self._pidfile: + f = open(self._pidfile) + pid = int(f.read().strip()) + f.close() + + if not pid: + raise PyWebSocketNotFound( + 'Failed to find %s server pid.' % self._server_name) + + logging.debug('Shutting down %s server %d.' % (self._server_name, pid)) + platform_utils.KillProcess(pid) + + if self._process: + self._process.wait() + self._process = None + + if self._wsout: + self._wsout.close() + self._wsout = None + + +if '__main__' == __name__: + # Provide some command line params for starting the PyWebSocket server + # manually. + option_parser = optparse.OptionParser() + option_parser.add_option('--server', type='choice', + choices=['start', 'stop'], default='start', + help='Server action (start|stop)') + option_parser.add_option('-p', '--port', dest='port', + default=None, help='Port to listen on') + option_parser.add_option('-r', '--root', + help='Absolute path to DocumentRoot ' + '(overrides layout test roots)') + option_parser.add_option('-t', '--tls', dest='use_tls', + action='store_true', + default=False, help='use TLS (wss://)') + option_parser.add_option('-k', '--private_key', dest='private_key', + default='', help='TLS private key file.') + option_parser.add_option('-c', '--certificate', dest='certificate', + default='', help='TLS certificate file.') + option_parser.add_option('--register_cygwin', action="store_true", + dest="register_cygwin", + help='Register Cygwin paths (on Win try bots)') + option_parser.add_option('--pidfile', help='path to pid file.') + options, args = option_parser.parse_args() + + if not options.port: + if options.use_tls: + options.port = _DEFAULT_WSS_PORT + else: + options.port = _DEFAULT_WS_PORT + + kwds = {'port': options.port, 'use_tls': options.use_tls} + if options.root: + kwds['root'] = options.root + if options.private_key: + kwds['private_key'] = options.private_key + if options.certificate: + kwds['certificate'] = options.certificate + kwds['register_cygwin'] = options.register_cygwin + if options.pidfile: + kwds['pidfile'] = options.pidfile + + pywebsocket = PyWebSocket(tempfile.gettempdir(), **kwds) + + if 'start' == options.server: + pywebsocket.Start() + else: + pywebsocket.Stop(force=True) diff --git a/webkit/tools/layout_tests/webkitpy/rebaseline.py b/webkit/tools/layout_tests/webkitpy/rebaseline.py new file mode 100644 index 0000000..1c995c1 --- /dev/null +++ b/webkit/tools/layout_tests/webkitpy/rebaseline.py @@ -0,0 +1,983 @@ +#!usr/bin/env python +# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Rebaselining tool that automatically produces baselines for all platforms. + +The script does the following for each platform specified: + 1. Compile a list of tests that need rebaselining. + 2. Download test result archive from buildbot for the platform. + 3. Extract baselines from the archive file for all identified files. + 4. Add new baselines to SVN repository. + 5. For each test that has been rebaselined, remove this platform option from + the test in test_expectation.txt. If no other platforms remain after + removal, delete the rebaselined test from the file. + +At the end, the script generates a html that compares old and new baselines. +""" + +import logging +import optparse +import os +import re +import shutil +import subprocess +import sys +import tempfile +import time +import urllib +import webbrowser +import zipfile + +from layout_package import path_utils +from layout_package import test_expectations +from test_types import image_diff +from test_types import text_diff + +# Repository type constants. +REPO_SVN, REPO_UNKNOWN = range(2) + +BASELINE_SUFFIXES = ['.txt', '.png', '.checksum'] +REBASELINE_PLATFORM_ORDER = ['mac', 'win', 'win-xp', 'win-vista', 'linux'] +ARCHIVE_DIR_NAME_DICT = {'win': 'webkit-rel', + 'win-vista': 'webkit-dbg-vista', + 'win-xp': 'webkit-rel', + 'mac': 'webkit-rel-mac5', + 'linux': 'webkit-rel-linux', + 'win-canary': 'webkit-rel-webkit-org', + 'win-vista-canary': 'webkit-dbg-vista', + 'win-xp-canary': 'webkit-rel-webkit-org', + 'mac-canary': 'webkit-rel-mac-webkit-org', + 'linux-canary': 'webkit-rel-linux-webkit-org'} + +def RunShellWithReturnCode(command, print_output=False): + """Executes a command and returns the output and process return code. + + Args: + command: program and arguments. + print_output: if true, print the command results to standard output. + + Returns: + command output, return code + """ + + # Use a shell for subcommands on Windows to get a PATH search. + use_shell = sys.platform.startswith('win') + p = subprocess.Popen(command, stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, shell=use_shell) + if print_output: + output_array = [] + while True: + line = p.stdout.readline() + if not line: + break + if print_output: + print line.strip('\n') + output_array.append(line) + output = ''.join(output_array) + else: + output = p.stdout.read() + p.wait() + p.stdout.close() + + return output, p.returncode + +def RunShell(command, print_output=False): + """Executes a command and returns the output. + + Args: + command: program and arguments. + print_output: if true, print the command results to standard output. + + Returns: + command output + """ + + output, return_code = RunShellWithReturnCode(command, print_output) + return output + +def LogDashedString(text, platform, logging_level=logging.INFO): + """Log text message with dashes on both sides.""" + + msg = text + if platform: + msg += ': ' + platform + if len(msg) < 78: + dashes = '-' * ((78 - len(msg)) / 2) + msg = '%s %s %s' % (dashes, msg, dashes) + + if logging_level == logging.ERROR: + logging.error(msg) + elif logging_level == logging.WARNING: + logging.warn(msg) + else: + logging.info(msg) + + +def SetupHtmlDirectory(html_directory): + """Setup the directory to store html results. + + All html related files are stored in the "rebaseline_html" subdirectory. + + Args: + html_directory: parent directory that stores the rebaselining results. + If None, a temp directory is created. + + Returns: + the directory that stores the html related rebaselining results. + """ + + if not html_directory: + html_directory = tempfile.mkdtemp() + elif not os.path.exists(html_directory): + os.mkdir(html_directory) + + html_directory = os.path.join(html_directory, 'rebaseline_html') + logging.info('Html directory: "%s"', html_directory) + + if os.path.exists(html_directory): + shutil.rmtree(html_directory, True) + logging.info('Deleted file at html directory: "%s"', html_directory) + + if not os.path.exists(html_directory): + os.mkdir(html_directory) + return html_directory + + +def GetResultFileFullpath(html_directory, baseline_filename, platform, + result_type): + """Get full path of the baseline result file. + + Args: + html_directory: directory that stores the html related files. + baseline_filename: name of the baseline file. + platform: win, linux or mac + result_type: type of the baseline result: '.txt', '.png'. + + Returns: + Full path of the baseline file for rebaselining result comparison. + """ + + base, ext = os.path.splitext(baseline_filename) + result_filename = '%s-%s-%s%s' % (base, platform, result_type, ext) + fullpath = os.path.join(html_directory, result_filename) + logging.debug(' Result file full path: "%s".', fullpath) + return fullpath + + +class Rebaseliner(object): + """Class to produce new baselines for a given platform.""" + + REVISION_REGEX = r'' + + def __init__(self, platform, options): + self._file_dir = path_utils.GetAbsolutePath( + os.path.dirname(os.path.dirname(sys.argv[0]))) + self._platform = platform + self._options = options + self._rebaselining_tests = [] + self._rebaselined_tests = [] + + # Create tests and expectations helper which is used to: + # -. compile list of tests that need rebaselining. + # -. update the tests in test_expectations file after rebaseline is done. + self._test_expectations = test_expectations.TestExpectations(None, + self._file_dir, + platform, + False, + False) + + self._repo_type = self._GetRepoType() + + def Run(self, backup): + """Run rebaseline process.""" + + LogDashedString('Compiling rebaselining tests', self._platform) + if not self._CompileRebaseliningTests(): + return True + + LogDashedString('Downloading archive', self._platform) + archive_file = self._DownloadBuildBotArchive() + logging.info('') + if not archive_file: + logging.error('No archive found.') + return False + + LogDashedString('Extracting and adding new baselines', self._platform) + if not self._ExtractAndAddNewBaselines(archive_file): + return False + + LogDashedString('Updating rebaselined tests in file', self._platform) + self._UpdateRebaselinedTestsInFile(backup) + logging.info('') + + if len(self._rebaselining_tests) != len(self._rebaselined_tests): + logging.warning('NOT ALL TESTS THAT NEED REBASELINING HAVE BEEN ' + 'REBASELINED.') + logging.warning(' Total tests needing rebaselining: %d', + len(self._rebaselining_tests)) + logging.warning(' Total tests rebaselined: %d', + len(self._rebaselined_tests)) + return False + + logging.warning('All tests needing rebaselining were successfully ' + 'rebaselined.') + + return True + + def GetRebaseliningTests(self): + return self._rebaselining_tests + + def _GetRepoType(self): + """Get the repository type that client is using.""" + + output, return_code = RunShellWithReturnCode(['svn', 'info'], False) + if return_code == 0: + return REPO_SVN + + return REPO_UNKNOWN + + def _CompileRebaseliningTests(self): + """Compile list of tests that need rebaselining for the platform. + + Returns: + List of tests that need rebaselining or + None if there is no such test. + """ + + self._rebaselining_tests = self._test_expectations.GetRebaseliningFailures() + if not self._rebaselining_tests: + logging.warn('No tests found that need rebaselining.') + return None + + logging.info('Total number of tests needing rebaselining for "%s": "%d"', + self._platform, len(self._rebaselining_tests)) + + test_no = 1 + for test in self._rebaselining_tests: + logging.info(' %d: %s', test_no, test) + test_no += 1 + + return self._rebaselining_tests + + def _GetLatestRevision(self, url): + """Get the latest layout test revision number from buildbot. + + Args: + url: Url to retrieve layout test revision numbers. + + Returns: + latest revision or + None on failure. + """ + + logging.debug('Url to retrieve revision: "%s"', url) + + f = urllib.urlopen(url) + content = f.read() + f.close() + + revisions = re.findall(self.REVISION_REGEX, content) + if not revisions: + logging.error('Failed to find revision, content: "%s"', content) + return None + + revisions.sort(key=int) + logging.info('Latest revision: "%s"', revisions[len(revisions) - 1]) + return revisions[len(revisions) - 1] + + def _GetArchiveDirName(self, platform, webkit_canary): + """Get name of the layout test archive directory. + + Returns: + Directory name or + None on failure + """ + + if webkit_canary: + platform += '-canary' + + if platform in ARCHIVE_DIR_NAME_DICT: + return ARCHIVE_DIR_NAME_DICT[platform] + else: + logging.error('Cannot find platform key %s in archive directory name ' + 'dictionary', platform) + return None + + def _GetArchiveUrl(self): + """Generate the url to download latest layout test archive. + + Returns: + Url to download archive or + None on failure + """ + + dir_name = self._GetArchiveDirName(self._platform, + self._options.webkit_canary) + if not dir_name: + return None + + logging.debug('Buildbot platform dir name: "%s"', dir_name) + + url_base = '%s/%s/' % (self._options.archive_url, dir_name) + latest_revision = self._GetLatestRevision(url_base) + if latest_revision is None or latest_revision <= 0: + return None + + archive_url = ('%s%s/layout-test-results.zip' % (url_base, + latest_revision)) + logging.info('Archive url: "%s"', archive_url) + return archive_url + + def _DownloadBuildBotArchive(self): + """Download layout test archive file from buildbot. + + Returns: + True if download succeeded or + False otherwise. + """ + + url = self._GetArchiveUrl() + if url is None: + return None + + fn = urllib.urlretrieve(url)[0] + logging.info('Archive downloaded and saved to file: "%s"', fn) + return fn + + def _ExtractAndAddNewBaselines(self, archive_file): + """Extract new baselines from archive and add them to SVN repository. + + Args: + archive_file: full path to the archive file. + + Returns: + List of tests that have been rebaselined or + None on failure. + """ + + zip_file = zipfile.ZipFile(archive_file, 'r') + zip_namelist = zip_file.namelist() + + logging.debug('zip file namelist:') + for name in zip_namelist: + logging.debug(' ' + name) + + platform = path_utils.PlatformName(self._platform) + logging.debug('Platform dir: "%s"', platform) + + test_no = 1 + self._rebaselined_tests = [] + for test in self._rebaselining_tests: + logging.info('Test %d: %s', test_no, test) + + found = False + svn_error = False + test_basename = os.path.splitext(test)[0] + for suffix in BASELINE_SUFFIXES: + archive_test_name = 'layout-test-results/%s-actual%s' % (test_basename, + suffix) + logging.debug(' Archive test file name: "%s"', archive_test_name) + if not archive_test_name in zip_namelist: + logging.info(' %s file not in archive.', suffix) + continue + + found = True + logging.info(' %s file found in archive.', suffix) + + # Extract new baseline from archive and save it to a temp file. + data = zip_file.read(archive_test_name) + temp_fd, temp_name = tempfile.mkstemp(suffix) + f = os.fdopen(temp_fd, 'wb') + f.write(data) + f.close() + + expected_filename = '%s-expected%s' % (test_basename, suffix) + expected_fullpath = os.path.join( + path_utils.ChromiumBaselinePath(platform), expected_filename) + expected_fullpath = os.path.normpath(expected_fullpath) + logging.debug(' Expected file full path: "%s"', expected_fullpath) + + # TODO(victorw): for now, the rebaselining tool checks whether + # or not THIS baseline is duplicate and should be skipped. + # We could improve the tool to check all baselines in upper and lower + # levels and remove all duplicated baselines. + if self._IsDupBaseline(temp_name, + expected_fullpath, + test, + suffix, + self._platform): + os.remove(temp_name) + self._DeleteBaseline(expected_fullpath) + continue + + # Create the new baseline directory if it doesn't already exist. + path_utils.MaybeMakeDirectory(os.path.dirname(expected_fullpath)) + + shutil.move(temp_name, expected_fullpath) + + if not self._SvnAdd(expected_fullpath): + svn_error = True + elif suffix != '.checksum': + self._CreateHtmlBaselineFiles(expected_fullpath) + + if not found: + logging.warn(' No new baselines found in archive.') + else: + if svn_error: + logging.warn(' Failed to add baselines to SVN.') + else: + logging.info(' Rebaseline succeeded.') + self._rebaselined_tests.append(test) + + test_no += 1 + + zip_file.close() + os.remove(archive_file) + + return self._rebaselined_tests + + def _IsDupBaseline(self, new_baseline, baseline_path, test, suffix, platform): + """Check whether a baseline is duplicate and can fallback to same + baseline for another platform. For example, if a test has same baseline + on linux and windows, then we only store windows baseline and linux + baseline will fallback to the windows version. + + Args: + expected_filename: baseline expectation file name. + test: test name. + suffix: file suffix of the expected results, including dot; e.g. '.txt' + or '.png'. + platform: baseline platform 'mac', 'win' or 'linux'. + + Returns: + True if the baseline is unnecessary. + False otherwise. + """ + test_filepath = os.path.join(path_utils.LayoutTestsDir(), test) + all_baselines = path_utils.ExpectedBaselines(test_filepath, + suffix, + platform, + True) + for (fallback_dir, fallback_file) in all_baselines: + if fallback_dir and fallback_file: + fallback_fullpath = os.path.normpath( + os.path.join(fallback_dir, fallback_file)) + if fallback_fullpath.lower() != baseline_path.lower(): + if not self._DiffBaselines(new_baseline, fallback_fullpath): + logging.info(' Found same baseline at %s', fallback_fullpath) + return True + else: + return False + + return False + + def _DiffBaselines(self, file1, file2): + """Check whether two baselines are different. + + Args: + file1, file2: full paths of the baselines to compare. + + Returns: + True if two files are different or have different extensions. + False otherwise. + """ + + ext1 = os.path.splitext(file1)[1].upper() + ext2 = os.path.splitext(file2)[1].upper() + if ext1 != ext2: + logging.warn('Files to compare have different ext. File1: %s; File2: %s', + file1, file2) + return True + + if ext1 == '.PNG': + return image_diff.ImageDiff(self._platform, '').DiffFiles(file1, + file2) + else: + return text_diff.TestTextDiff(self._platform, '').DiffFiles(file1, + file2) + + def _DeleteBaseline(self, filename): + """Remove the file from repository and delete it from disk. + + Args: + filename: full path of the file to delete. + """ + + if not filename or not os.path.isfile(filename): + return + + if self._repo_type == REPO_SVN: + parent_dir, basename = os.path.split(filename) + original_dir = os.getcwd() + os.chdir(parent_dir) + RunShell(['svn', 'delete', '--force', basename], False) + os.chdir(original_dir) + else: + os.remove(filename) + + def _UpdateRebaselinedTestsInFile(self, backup): + """Update the rebaselined tests in test expectations file. + + Args: + backup: if True, backup the original test expectations file. + + Returns: + no + """ + + if self._rebaselined_tests: + self._test_expectations.RemovePlatformFromFile(self._rebaselined_tests, + self._platform, + backup) + else: + logging.info('No test was rebaselined so nothing to remove.') + + def _SvnAdd(self, filename): + """Add the file to SVN repository. + + Args: + filename: full path of the file to add. + + Returns: + True if the file already exists in SVN or is sucessfully added to SVN. + False otherwise. + """ + + if not filename: + return False + + parent_dir, basename = os.path.split(filename) + if self._repo_type != REPO_SVN or parent_dir == filename: + logging.info("No svn checkout found, skip svn add.") + return True + + original_dir = os.getcwd() + os.chdir(parent_dir) + status_output = RunShell(['svn', 'status', basename], False) + os.chdir(original_dir) + output = status_output.upper() + if output.startswith('A') or output.startswith('M'): + logging.info(' File already added to SVN: "%s"', filename) + return True + + if output.find('IS NOT A WORKING COPY') >= 0: + logging.info(' File is not a working copy, add its parent: "%s"', + parent_dir) + return self._SvnAdd(parent_dir) + + os.chdir(parent_dir) + add_output = RunShell(['svn', 'add', basename], True) + os.chdir(original_dir) + output = add_output.upper().rstrip() + if output.startswith('A') and output.find(basename.upper()) >= 0: + logging.info(' Added new file: "%s"', filename) + self._SvnPropSet(filename) + return True + + if (not status_output) and (add_output.upper().find( + 'ALREADY UNDER VERSION CONTROL') >= 0): + logging.info(' File already under SVN and has no change: "%s"', filename) + return True + + logging.warn(' Failed to add file to SVN: "%s"', filename) + logging.warn(' Svn status output: "%s"', status_output) + logging.warn(' Svn add output: "%s"', add_output) + return False + + def _SvnPropSet(self, filename): + """Set the baseline property + + Args: + filename: full path of the file to add. + + Returns: + True if the file already exists in SVN or is sucessfully added to SVN. + False otherwise. + """ + ext = os.path.splitext(filename)[1].upper() + if ext != '.TXT' and ext != '.PNG' and ext != '.CHECKSUM': + return + + parent_dir, basename = os.path.split(filename) + original_dir = os.getcwd() + os.chdir(parent_dir) + if ext == '.PNG': + cmd = [ 'svn', 'pset', 'svn:mime-type', 'image/png', basename ] + else: + cmd = [ 'svn', 'pset', 'svn:eol-style', 'LF', basename ] + + logging.debug(' Set svn prop: %s', ' '.join(cmd)) + RunShell(cmd, False) + os.chdir(original_dir) + + def _CreateHtmlBaselineFiles(self, baseline_fullpath): + """Create baseline files (old, new and diff) in html directory. + + The files are used to compare the rebaselining results. + + Args: + baseline_fullpath: full path of the expected baseline file. + """ + + if not baseline_fullpath or not os.path.exists(baseline_fullpath): + return + + # Copy the new baseline to html directory for result comparison. + baseline_filename = os.path.basename(baseline_fullpath) + new_file = GetResultFileFullpath(self._options.html_directory, + baseline_filename, + self._platform, + 'new') + shutil.copyfile(baseline_fullpath, new_file) + logging.info(' Html: copied new baseline file from "%s" to "%s".', + baseline_fullpath, new_file) + + # Get the old baseline from SVN and save to the html directory. + output = RunShell(['svn', 'cat', '-r', 'BASE', baseline_fullpath]) + if (not output) or (output.upper().rstrip().endswith( + 'NO SUCH FILE OR DIRECTORY')): + logging.info(' No base file: "%s"', baseline_fullpath) + return + base_file = GetResultFileFullpath(self._options.html_directory, + baseline_filename, + self._platform, + 'old') + f = open(base_file, 'wb') + f.write(output) + f.close() + logging.info(' Html: created old baseline file: "%s".', + base_file) + + # Get the diff between old and new baselines and save to the html directory. + if baseline_filename.upper().endswith('.TXT'): + # If the user specified a custom diff command in their svn config file, + # then it'll be used when we do svn diff, which we don't want to happen + # since we want the unified diff. Using --diff-cmd=diff doesn't always + # work, since they can have another diff executable in their path that + # gives different line endings. So we use a bogus temp directory as the + # config directory, which gets around these problems. + if sys.platform.startswith("win"): + parent_dir = tempfile.gettempdir() + else: + parent_dir = sys.path[0] # tempdir is not secure. + bogus_dir = os.path.join(parent_dir, "temp_svn_config") + logging.debug(' Html: temp config dir: "%s".', bogus_dir) + if not os.path.exists(bogus_dir): + os.mkdir(bogus_dir) + delete_bogus_dir = True + else: + delete_bogus_dir = False + + output = RunShell(["svn", "diff", "--config-dir", bogus_dir, + baseline_fullpath]) + if output: + diff_file = GetResultFileFullpath(self._options.html_directory, + baseline_filename, + self._platform, + 'diff') + f = open(diff_file, 'wb') + f.write(output) + f.close() + logging.info(' Html: created baseline diff file: "%s".', + diff_file) + + if delete_bogus_dir: + shutil.rmtree(bogus_dir, True) + logging.debug(' Html: removed temp config dir: "%s".', bogus_dir) + +class HtmlGenerator(object): + """Class to generate rebaselining result comparison html.""" + + HTML_REBASELINE = ('' + '' + '' + 'Rebaselining Result Comparison (%(time)s)' + '' + '' + '

Rebaselining Result Comparison (%(time)s)

' + '%(body)s' + '' + '') + HTML_NO_REBASELINING_TESTS = '

No tests found that need rebaselining.

' + HTML_TABLE_TEST = ('' + '%s

') + HTML_TR_TEST = ('' + '' + '
%s' + '' + '') + HTML_TEST_DETAIL = ('
' + '' + 'Baseline' + 'Platform' + 'Old' + 'New' + 'Difference' + '' + '%s' + '
') + HTML_TD_NOLINK = '%s' + HTML_TD_LINK = '%(name)s' + HTML_TD_LINK_IMG = ('' + '') + HTML_TR = '%s' + + def __init__(self, options, platforms, rebaselining_tests): + self._html_directory = options.html_directory + self._platforms = platforms + self._rebaselining_tests = rebaselining_tests + self._html_file = os.path.join(options.html_directory, 'rebaseline.html') + + def GenerateHtml(self): + """Generate html file for rebaselining result comparison.""" + + logging.info('Generating html file') + + html_body = '' + if not self._rebaselining_tests: + html_body += self.HTML_NO_REBASELINING_TESTS + else: + tests = list(self._rebaselining_tests) + tests.sort() + + test_no = 1 + for test in tests: + logging.info('Test %d: %s', test_no, test) + html_body += self._GenerateHtmlForOneTest(test) + + html = self.HTML_REBASELINE % ({'time': time.asctime(), 'body': html_body}) + logging.debug(html) + + f = open(self._html_file, 'w') + f.write(html) + f.close() + + logging.info('Baseline comparison html generated at "%s"', + self._html_file) + + def ShowHtml(self): + """Launch the rebaselining html in brwoser.""" + + logging.info('Launching html: "%s"', self._html_file) + + html_uri = path_utils.FilenameToUri(self._html_file) + webbrowser.open(html_uri, 1) + + logging.info('Html launched.') + + def _GenerateBaselineLinks(self, test_basename, suffix, platform): + """Generate links for baseline results (old, new and diff). + + Args: + test_basename: base filename of the test + suffix: baseline file suffixes: '.txt', '.png' + platform: win, linux or mac + + Returns: + html links for showing baseline results (old, new and diff) + """ + + baseline_filename = '%s-expected%s' % (test_basename, suffix) + logging.debug(' baseline filename: "%s"', baseline_filename) + + new_file = GetResultFileFullpath(self._html_directory, + baseline_filename, + platform, + 'new') + logging.info(' New baseline file: "%s"', new_file) + if not os.path.exists(new_file): + logging.info(' No new baseline file: "%s"', new_file) + return '' + + old_file = GetResultFileFullpath(self._html_directory, + baseline_filename, + platform, + 'old') + logging.info(' Old baseline file: "%s"', old_file) + if suffix == '.png': + html_td_link = self.HTML_TD_LINK_IMG + else: + html_td_link = self.HTML_TD_LINK + + links = '' + if os.path.exists(old_file): + links += html_td_link % {'uri': path_utils.FilenameToUri(old_file), + 'name': baseline_filename} + else: + logging.info(' No old baseline file: "%s"', old_file) + links += self.HTML_TD_NOLINK % '' + + links += html_td_link % {'uri': path_utils.FilenameToUri(new_file), + 'name': baseline_filename} + + diff_file = GetResultFileFullpath(self._html_directory, + baseline_filename, + platform, + 'diff') + logging.info(' Baseline diff file: "%s"', diff_file) + if os.path.exists(diff_file): + links += html_td_link % {'uri': path_utils.FilenameToUri(diff_file), + 'name': 'Diff'} + else: + logging.info(' No baseline diff file: "%s"', diff_file) + links += self.HTML_TD_NOLINK % '' + + return links + + def _GenerateHtmlForOneTest(self, test): + """Generate html for one rebaselining test. + + Args: + test: layout test name + + Returns: + html that compares baseline results for the test. + """ + + test_basename = os.path.basename(os.path.splitext(test)[0]) + logging.info(' basename: "%s"', test_basename) + rows = [] + for suffix in BASELINE_SUFFIXES: + if suffix == '.checksum': + continue + + logging.info(' Checking %s files', suffix) + for platform in self._platforms: + links = self._GenerateBaselineLinks(test_basename, suffix, platform) + if links: + row = self.HTML_TD_NOLINK % self._GetBaselineResultType(suffix) + row += self.HTML_TD_NOLINK % platform + row += links + logging.debug(' html row: %s', row) + + rows.append(self.HTML_TR % row) + + if rows: + test_path = os.path.join(path_utils.LayoutTestsDir(), test) + html = self.HTML_TR_TEST % (path_utils.FilenameToUri(test_path), test) + html += self.HTML_TEST_DETAIL % ' '.join(rows) + + logging.debug(' html for test: %s', html) + return self.HTML_TABLE_TEST % html + + return '' + + def _GetBaselineResultType(self, suffix): + """Name of the baseline result type.""" + + if suffix == '.png': + return 'Pixel' + elif suffix == '.txt': + return 'Render Tree' + else: + return 'Other' + + +def main(): + """Main function to produce new baselines.""" + + option_parser = optparse.OptionParser() + option_parser.add_option('-v', '--verbose', + action='store_true', + default=False, + help='include debug-level logging.') + + option_parser.add_option('-p', '--platforms', + default='mac,win,win-xp,win-vista,linux', + help=('Comma delimited list of platforms that need ' + 'rebaselining.')) + + option_parser.add_option('-u', '--archive_url', + default=('http://build.chromium.org/buildbot/' + 'layout_test_results'), + help=('Url to find the layout test result archive ' + 'file.')) + + option_parser.add_option('-w', '--webkit_canary', + action='store_true', + default=False, + help=('If True, pull baselines from webkit.org ' + 'canary bot.')) + + option_parser.add_option('-b', '--backup', + action='store_true', + default=False, + help=('Whether or not to backup the original test ' + 'expectations file after rebaseline.')) + + option_parser.add_option('-d', '--html_directory', + default='', + help=('The directory that stores the results for ' + 'rebaselining comparison.')) + + options = option_parser.parse_args()[0] + + # Set up our logging format. + log_level = logging.INFO + if options.verbose: + log_level = logging.DEBUG + logging.basicConfig(level=log_level, + format=('%(asctime)s %(filename)s:%(lineno)-3d ' + '%(levelname)s %(message)s'), + datefmt='%y%m%d %H:%M:%S') + + # Verify 'platforms' option is valid + if not options.platforms: + logging.error('Invalid "platforms" option. --platforms must be specified ' + 'in order to rebaseline.') + sys.exit(1) + platforms = [p.strip().lower() for p in options.platforms.split(',')] + for platform in platforms: + if not platform in REBASELINE_PLATFORM_ORDER: + logging.error('Invalid platform: "%s"' % (platform)) + sys.exit(1) + + # Adjust the platform order so rebaseline tool is running at the order of + # 'mac', 'win' and 'linux'. This is in same order with layout test baseline + # search paths. It simplifies how the rebaseline tool detects duplicate + # baselines. Check _IsDupBaseline method for details. + rebaseline_platforms = [] + for platform in REBASELINE_PLATFORM_ORDER: + if platform in platforms: + rebaseline_platforms.append(platform) + + options.html_directory = SetupHtmlDirectory(options.html_directory) + + rebaselining_tests = set() + backup = options.backup + for platform in rebaseline_platforms: + rebaseliner = Rebaseliner(platform, options) + + logging.info('') + LogDashedString('Rebaseline started', platform) + if rebaseliner.Run(backup): + # Only need to backup one original copy of test expectation file. + backup = False + LogDashedString('Rebaseline done', platform) + else: + LogDashedString('Rebaseline failed', platform, logging.ERROR) + + rebaselining_tests |= set(rebaseliner.GetRebaseliningTests()) + + logging.info('') + LogDashedString('Rebaselining result comparison started', None) + html_generator = HtmlGenerator(options, + rebaseline_platforms, + rebaselining_tests) + html_generator.GenerateHtml() + html_generator.ShowHtml() + LogDashedString('Rebaselining result comparison done', None) + + sys.exit(0) + +if '__main__' == __name__: + main() diff --git a/webkit/tools/layout_tests/webkitpy/run_chromium_webkit_tests.py b/webkit/tools/layout_tests/webkitpy/run_chromium_webkit_tests.py new file mode 100755 index 0000000..5f931e5 --- /dev/null +++ b/webkit/tools/layout_tests/webkitpy/run_chromium_webkit_tests.py @@ -0,0 +1,1608 @@ +#!/usr/bin/env python +# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Run layout tests using the test_shell. + +This is a port of the existing webkit test script run-webkit-tests. + +The TestRunner class runs a series of tests (TestType interface) against a set +of test files. If a test file fails a TestType, it returns a list TestFailure +objects to the TestRunner. The TestRunner then aggregates the TestFailures to +create a final report. + +This script reads several files, if they exist in the test_lists subdirectory +next to this script itself. Each should contain a list of paths to individual +tests or entire subdirectories of tests, relative to the outermost test +directory. Entire lines starting with '//' (comments) will be ignored. + +For details of the files' contents and purposes, see test_lists/README. +""" + +import errno +import glob +import logging +import math +import optparse +import os +import Queue +import random +import re +import shutil +import subprocess +import sys +import time +import traceback + +from layout_package import apache_http_server +from layout_package import test_expectations +from layout_package import http_server +from layout_package import json_layout_results_generator +from layout_package import metered_stream +from layout_package import path_utils +from layout_package import platform_utils +from layout_package import test_failures +from layout_package import test_shell_thread +from layout_package import test_files +from layout_package import websocket_server +from test_types import fuzzy_image_diff +from test_types import image_diff +from test_types import test_type_base +from test_types import text_diff + +sys.path.append(path_utils.PathFromBase('third_party')) +import simplejson + +# Indicates that we want detailed progress updates in the output (prints +# directory-by-directory feedback). +LOG_DETAILED_PROGRESS = 'detailed-progress' + +# Log any unexpected results while running (instead of just at the end). +LOG_UNEXPECTED = 'unexpected' + +# Builder base URL where we have the archived test results. +BUILDER_BASE_URL = "http://build.chromium.org/buildbot/layout_test_results/" + +TestExpectationsFile = test_expectations.TestExpectationsFile + +class TestInfo: + """Groups information about a test for easy passing of data.""" + def __init__(self, filename, timeout): + """Generates the URI and stores the filename and timeout for this test. + Args: + filename: Full path to the test. + timeout: Timeout for running the test in TestShell. + """ + self.filename = filename + self.uri = path_utils.FilenameToUri(filename) + self.timeout = timeout + expected_hash_file = path_utils.ExpectedFilename(filename, '.checksum') + try: + self.image_hash = open(expected_hash_file, "r").read() + except IOError, e: + if errno.ENOENT != e.errno: + raise + self.image_hash = None + + +class ResultSummary(object): + """A class for partitioning the test results we get into buckets. + + This class is basically a glorified struct and it's private to this file + so we don't bother with any information hiding.""" + def __init__(self, expectations, test_files): + self.total = len(test_files) + self.remaining = self.total + self.expectations = expectations + self.expected = 0 + self.unexpected = 0 + self.tests_by_expectation = {} + self.tests_by_timeline = {} + self.results = {} + self.unexpected_results = {} + self.failures = {} + self.tests_by_expectation[test_expectations.SKIP] = set() + for expectation in TestExpectationsFile.EXPECTATIONS.values(): + self.tests_by_expectation[expectation] = set() + for timeline in TestExpectationsFile.TIMELINES.values(): + self.tests_by_timeline[timeline] = expectations.GetTestsWithTimeline( + timeline) + + def Add(self, test, failures, result, expected): + """Add a result into the appropriate bin. + + Args: + test: test file name + failures: list of failure objects from test execution + result: result of test (PASS, IMAGE, etc.). + expected: whether the result was what we expected it to be. + """ + + self.tests_by_expectation[result].add(test) + self.results[test] = result + self.remaining -= 1 + if len(failures): + self.failures[test] = failures + if expected: + self.expected += 1 + else: + self.unexpected_results[test] = result + self.unexpected += 1 + + +class TestRunner: + """A class for managing running a series of tests on a series of layout test + files.""" + + HTTP_SUBDIR = os.sep.join(['', 'http', '']) + WEBSOCKET_SUBDIR = os.sep.join(['', 'websocket', '']) + + # The per-test timeout in milliseconds, if no --time-out-ms option was given + # to run_webkit_tests. This should correspond to the default timeout in + # test_shell.exe. + DEFAULT_TEST_TIMEOUT_MS = 6 * 1000 + + NUM_RETRY_ON_UNEXPECTED_FAILURE = 1 + + def __init__(self, options, meter): + """Initialize test runner data structures. + + Args: + options: a dictionary of command line options + meter: a MeteredStream object to record updates to. + """ + self._options = options + self._meter = meter + + if options.use_apache: + self._http_server = apache_http_server.LayoutTestApacheHttpd( + options.results_directory) + else: + self._http_server = http_server.Lighttpd(options.results_directory) + + self._websocket_server = websocket_server.PyWebSocket( + options.results_directory) + # disable wss server. need to install pyOpenSSL on buildbots. + # self._websocket_secure_server = websocket_server.PyWebSocket( + # options.results_directory, use_tls=True, port=9323) + + # a list of TestType objects + self._test_types = [] + + # a set of test files, and the same tests as a list + self._test_files = set() + self._test_files_list = None + self._file_dir = path_utils.GetAbsolutePath( + os.path.dirname(os.path.dirname(sys.argv[0]))) + self._result_queue = Queue.Queue() + + # These are used for --log detailed-progress to track status by directory. + self._current_dir = None + self._current_progress_str = "" + self._current_test_number = 0 + + def __del__(self): + logging.debug("flushing stdout") + sys.stdout.flush() + logging.debug("flushing stderr") + sys.stderr.flush() + logging.debug("stopping http server") + # Stop the http server. + self._http_server.Stop() + # Stop the Web Socket / Web Socket Secure servers. + self._websocket_server.Stop() + # self._websocket_secure_server.Stop() + + def GatherFilePaths(self, paths): + """Find all the files to test. + + Args: + paths: a list of globs to use instead of the defaults.""" + self._test_files = test_files.GatherTestFiles(paths) + + def ParseExpectations(self, platform, is_debug_mode): + """Parse the expectations from the test_list files and return a data + structure holding them. Throws an error if the test_list files have invalid + syntax. + """ + if self._options.lint_test_files: + test_files = None + else: + test_files = self._test_files + + try: + self._expectations = test_expectations.TestExpectations(test_files, + self._file_dir, platform, is_debug_mode, + self._options.lint_test_files) + return self._expectations + except Exception, err: + if self._options.lint_test_files: + print str(err) + else: + raise err + + def PrepareListsAndPrintOutput(self, write): + """Create appropriate subsets of test lists and returns a ResultSummary + object. Also prints expected test counts. + + Args: + write: A callback to write info to (e.g., a LoggingWriter) or + sys.stdout.write. + """ + + # Remove skipped - both fixable and ignored - files from the + # top-level list of files to test. + num_all_test_files = len(self._test_files) + write("Found: %d tests" % (len(self._test_files))) + skipped = set() + if num_all_test_files > 1 and not self._options.force: + skipped = self._expectations.GetTestsWithResultType( + test_expectations.SKIP) + self._test_files -= skipped + + # Create a sorted list of test files so the subset chunk, if used, contains + # alphabetically consecutive tests. + self._test_files_list = list(self._test_files) + if self._options.randomize_order: + random.shuffle(self._test_files_list) + else: + self._test_files_list.sort() + + # If the user specifies they just want to run a subset of the tests, + # just grab a subset of the non-skipped tests. + if self._options.run_chunk or self._options.run_part: + chunk_value = self._options.run_chunk or self._options.run_part + test_files = self._test_files_list + try: + (chunk_num, chunk_len) = chunk_value.split(":") + chunk_num = int(chunk_num) + assert(chunk_num >= 0) + test_size = int(chunk_len) + assert(test_size > 0) + except: + logging.critical("invalid chunk '%s'" % chunk_value) + sys.exit(1) + + # Get the number of tests + num_tests = len(test_files) + + # Get the start offset of the slice. + if self._options.run_chunk: + chunk_len = test_size + # In this case chunk_num can be really large. We need to make the + # slave fit in the current number of tests. + slice_start = (chunk_num * chunk_len) % num_tests + else: + # Validate the data. + assert(test_size <= num_tests) + assert(chunk_num <= test_size) + + # To count the chunk_len, and make sure we don't skip some tests, we + # round to the next value that fits exacly all the parts. + rounded_tests = num_tests + if rounded_tests % test_size != 0: + rounded_tests = num_tests + test_size - (num_tests % test_size) + + chunk_len = rounded_tests / test_size + slice_start = chunk_len * (chunk_num - 1) + # It does not mind if we go over test_size. + + # Get the end offset of the slice. + slice_end = min(num_tests, slice_start + chunk_len) + + files = test_files[slice_start:slice_end] + + tests_run_msg = 'Running: %d tests (chunk slice [%d:%d] of %d)' % ( + (slice_end - slice_start), slice_start, slice_end, num_tests) + write(tests_run_msg) + + # If we reached the end and we don't have enough tests, we run some + # from the beginning. + if self._options.run_chunk and (slice_end - slice_start < chunk_len): + extra = 1 + chunk_len - (slice_end - slice_start) + extra_msg = ' last chunk is partial, appending [0:%d]' % extra + write(extra_msg) + tests_run_msg += "\n" + extra_msg + files.extend(test_files[0:extra]) + tests_run_filename = os.path.join(self._options.results_directory, + "tests_run.txt") + tests_run_file = open(tests_run_filename, "w") + tests_run_file.write(tests_run_msg + "\n") + tests_run_file.close() + + len_skip_chunk = int(len(files) * len(skipped) / + float(len(self._test_files))) + skip_chunk_list = list(skipped)[0:len_skip_chunk] + skip_chunk = set(skip_chunk_list) + + # Update expectations so that the stats are calculated correctly. + # We need to pass a list that includes the right # of skipped files + # to ParseExpectations so that ResultSummary() will get the correct + # stats. So, we add in the subset of skipped files, and then subtract + # them back out. + self._test_files_list = files + skip_chunk_list + self._test_files = set(self._test_files_list) + + self._expectations = self.ParseExpectations( + path_utils.PlatformName(), options.target == 'Debug') + + self._test_files = set(files) + self._test_files_list = files + else: + skip_chunk = skipped + + result_summary = ResultSummary(self._expectations, + self._test_files | skip_chunk) + self._PrintExpectedResultsOfType(write, result_summary, + test_expectations.PASS, "passes") + self._PrintExpectedResultsOfType(write, result_summary, + test_expectations.FAIL, "failures") + self._PrintExpectedResultsOfType(write, result_summary, + test_expectations.FLAKY, "flaky") + self._PrintExpectedResultsOfType(write, result_summary, + test_expectations.SKIP, "skipped") + + + if self._options.force: + write('Running all tests, including skips (--force)') + else: + # Note that we don't actually run the skipped tests (they were + # subtracted out of self._test_files, above), but we stub out the + # results here so the statistics can remain accurate. + for test in skip_chunk: + result_summary.Add(test, [], test_expectations.SKIP, expected=True) + write("") + + return result_summary + + def AddTestType(self, test_type): + """Add a TestType to the TestRunner.""" + self._test_types.append(test_type) + + def _GetDirForTestFile(self, test_file): + """Returns the highest-level directory by which to shard the given test + file.""" + index = test_file.rfind(os.sep + 'LayoutTests' + os.sep) + + test_file = test_file[index + len('LayoutTests/'):] + test_file_parts = test_file.split(os.sep, 1) + directory = test_file_parts[0] + test_file = test_file_parts[1] + + # The http tests are very stable on mac/linux. + # TODO(ojan): Make the http server on Windows be apache so we can turn + # shard the http tests there as well. Switching to apache is what made them + # stable on linux/mac. + return_value = directory + while ((directory != 'http' or sys.platform in ('darwin', 'linux2')) and + test_file.find(os.sep) >= 0): + test_file_parts = test_file.split(os.sep, 1) + directory = test_file_parts[0] + return_value = os.path.join(return_value, directory) + test_file = test_file_parts[1] + + return return_value + + def _GetTestInfoForFile(self, test_file): + """Returns the appropriate TestInfo object for the file. Mostly this is used + for looking up the timeout value (in ms) to use for the given test.""" + if self._expectations.HasModifier(test_file, test_expectations.SLOW): + return TestInfo(test_file, self._options.slow_time_out_ms) + return TestInfo(test_file, self._options.time_out_ms) + + def _GetTestFileQueue(self, test_files): + """Create the thread safe queue of lists of (test filenames, test URIs) + tuples. Each TestShellThread pulls a list from this queue and runs those + tests in order before grabbing the next available list. + + Shard the lists by directory. This helps ensure that tests that depend + on each other (aka bad tests!) continue to run together as most + cross-tests dependencies tend to occur within the same directory. + + Return: + The Queue of lists of TestInfo objects. + """ + + if (self._options.experimental_fully_parallel or + self._IsSingleThreaded()): + filename_queue = Queue.Queue() + for test_file in test_files: + filename_queue.put(('.', [self._GetTestInfoForFile(test_file)])) + return filename_queue + + tests_by_dir = {} + for test_file in test_files: + directory = self._GetDirForTestFile(test_file) + tests_by_dir.setdefault(directory, []) + tests_by_dir[directory].append(self._GetTestInfoForFile(test_file)) + + # Sort by the number of tests in the dir so that the ones with the most + # tests get run first in order to maximize parallelization. Number of tests + # is a good enough, but not perfect, approximation of how long that set of + # tests will take to run. We can't just use a PriorityQueue until we move + # to Python 2.6. + test_lists = [] + http_tests = None + for directory in tests_by_dir: + test_list = tests_by_dir[directory] + # Keep the tests in alphabetical order. + # TODO: Remove once tests are fixed so they can be run in any order. + test_list.reverse() + test_list_tuple = (directory, test_list) + if directory == 'LayoutTests' + os.sep + 'http': + http_tests = test_list_tuple + else: + test_lists.append(test_list_tuple) + test_lists.sort(lambda a, b: cmp(len(b[1]), len(a[1]))) + + # Put the http tests first. There are only a couple hundred of them, but + # each http test takes a very long time to run, so sorting by the number + # of tests doesn't accurately capture how long they take to run. + if http_tests: + test_lists.insert(0, http_tests) + + filename_queue = Queue.Queue() + for item in test_lists: + filename_queue.put(item) + return filename_queue + + def _GetTestShellArgs(self, index): + """Returns the tuple of arguments for tests and for test_shell.""" + shell_args = [] + test_args = test_type_base.TestArguments() + if not self._options.no_pixel_tests: + png_path = os.path.join(self._options.results_directory, + "png_result%s.png" % index) + shell_args.append("--pixel-tests=" + png_path) + test_args.png_path = png_path + + test_args.new_baseline = self._options.new_baseline + + test_args.show_sources = self._options.sources + + if self._options.startup_dialog: + shell_args.append('--testshell-startup-dialog') + + if self._options.gp_fault_error_box: + shell_args.append('--gp-fault-error-box') + + return (test_args, shell_args) + + def _ContainsTests(self, subdir): + for test_file in self._test_files_list: + if test_file.find(subdir) >= 0: + return True + return False + + def _InstantiateTestShellThreads(self, test_shell_binary, test_files, + result_summary): + """Instantitates and starts the TestShellThread(s). + + Return: + The list of threads. + """ + test_shell_command = [test_shell_binary] + + if self._options.wrapper: + # This split() isn't really what we want -- it incorrectly will + # split quoted strings within the wrapper argument -- but in + # practice it shouldn't come up and the --help output warns + # about it anyway. + test_shell_command = self._options.wrapper.split() + test_shell_command + + filename_queue = self._GetTestFileQueue(test_files) + + # Instantiate TestShellThreads and start them. + threads = [] + for i in xrange(int(self._options.num_test_shells)): + # Create separate TestTypes instances for each thread. + test_types = [] + for t in self._test_types: + test_types.append(t(self._options.platform, + self._options.results_directory)) + + test_args, shell_args = self._GetTestShellArgs(i) + thread = test_shell_thread.TestShellThread(filename_queue, + self._result_queue, + test_shell_command, + test_types, + test_args, + shell_args, + self._options) + if self._IsSingleThreaded(): + thread.RunInMainThread(self, result_summary) + else: + thread.start() + threads.append(thread) + + return threads + + def _StopLayoutTestHelper(self, proc): + """Stop the layout test helper and closes it down.""" + if proc: + logging.debug("Stopping layout test helper") + proc.stdin.write("x\n") + proc.stdin.close() + proc.wait() + + def _IsSingleThreaded(self): + """Returns whether we should run all the tests in the main thread.""" + return int(self._options.num_test_shells) == 1 + + def _RunTests(self, test_shell_binary, file_list, result_summary): + """Runs the tests in the file_list. + + Return: A tuple (failures, thread_timings, test_timings, + individual_test_timings) + failures is a map from test to list of failure types + thread_timings is a list of dicts with the total runtime of each thread + with 'name', 'num_tests', 'total_time' properties + test_timings is a list of timings for each sharded subdirectory of the + form [time, directory_name, num_tests] + individual_test_timings is a list of run times for each test in the form + {filename:filename, test_run_time:test_run_time} + result_summary: summary object to populate with the results + """ + threads = self._InstantiateTestShellThreads(test_shell_binary, file_list, + result_summary) + + # Wait for the threads to finish and collect test failures. + failures = {} + test_timings = {} + individual_test_timings = [] + thread_timings = [] + try: + for thread in threads: + while thread.isAlive(): + # Let it timeout occasionally so it can notice a KeyboardInterrupt + # Actually, the timeout doesn't really matter: apparently it + # suffices to not use an indefinite blocking join for it to + # be interruptible by KeyboardInterrupt. + thread.join(0.1) + self.UpdateSummary(result_summary) + thread_timings.append({ 'name': thread.getName(), + 'num_tests': thread.GetNumTests(), + 'total_time': thread.GetTotalTime()}); + test_timings.update(thread.GetDirectoryTimingStats()) + individual_test_timings.extend(thread.GetIndividualTestStats()) + except KeyboardInterrupt: + for thread in threads: + thread.Cancel() + self._StopLayoutTestHelper(layout_test_helper_proc) + raise + for thread in threads: + # Check whether a TestShellThread died before normal completion. + exception_info = thread.GetExceptionInfo() + if exception_info is not None: + # Re-raise the thread's exception here to make it clear that + # testing was aborted. Otherwise, the tests that did not run + # would be assumed to have passed. + raise exception_info[0], exception_info[1], exception_info[2] + + # Make sure we pick up any remaining tests. + self.UpdateSummary(result_summary) + return (thread_timings, test_timings, individual_test_timings) + + def Run(self, result_summary): + """Run all our tests on all our test files. + + For each test file, we run each test type. If there are any failures, we + collect them for reporting. + + Args: + result_summary: a summary object tracking the test results. + + Return: + We return nonzero if there are regressions compared to the last run. + """ + if not self._test_files: + return 0 + start_time = time.time() + test_shell_binary = path_utils.TestShellPath(self._options.target) + + # Start up any helper needed + layout_test_helper_proc = None + if not options.no_pixel_tests: + helper_path = path_utils.LayoutTestHelperPath(self._options.target) + if len(helper_path): + logging.debug("Starting layout helper %s" % helper_path) + layout_test_helper_proc = subprocess.Popen([helper_path], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=None) + is_ready = layout_test_helper_proc.stdout.readline() + if not is_ready.startswith('ready'): + logging.error("layout_test_helper failed to be ready") + + # Check that the system dependencies (themes, fonts, ...) are correct. + if not self._options.nocheck_sys_deps: + proc = subprocess.Popen([test_shell_binary, + "--check-layout-test-sys-deps"]) + if proc.wait() != 0: + logging.info("Aborting because system dependencies check failed.\n" + "To override, invoke with --nocheck-sys-deps") + sys.exit(1) + + if self._ContainsTests(self.HTTP_SUBDIR): + self._http_server.Start() + + if self._ContainsTests(self.WEBSOCKET_SUBDIR): + self._websocket_server.Start() + # self._websocket_secure_server.Start() + + thread_timings, test_timings, individual_test_timings = ( + self._RunTests(test_shell_binary, self._test_files_list, + result_summary)) + + # We exclude the crashes from the list of results to retry, because + # we want to treat even a potentially flaky crash as an error. + failures = self._GetFailures(result_summary, include_crashes=False) + retries = 0 + retry_summary = result_summary + while (retries < self.NUM_RETRY_ON_UNEXPECTED_FAILURE and len(failures)): + logging.debug("Retrying %d unexpected failure(s)" % len(failures)) + retries += 1 + retry_summary = ResultSummary(self._expectations, failures.keys()) + self._RunTests(test_shell_binary, failures.keys(), retry_summary) + failures = self._GetFailures(retry_summary, include_crashes=True) + + self._StopLayoutTestHelper(layout_test_helper_proc) + end_time = time.time() + + write = CreateLoggingWriter(self._options, 'timing') + self._PrintTimingStatistics(write, end_time - start_time, + thread_timings, test_timings, + individual_test_timings, + result_summary) + + self._meter.update("") + + if self._options.verbose: + # We write this block to stdout for compatibility with the buildbot + # log parser, which only looks at stdout, not stderr :( + write = lambda s: sys.stdout.write("%s\n" % s) + else: + write = CreateLoggingWriter(self._options, 'actual') + + self._PrintResultSummary(write, result_summary) + + sys.stdout.flush() + sys.stderr.flush() + + if (LOG_DETAILED_PROGRESS in self._options.log or + (LOG_UNEXPECTED in self._options.log and + result_summary.total != result_summary.expected)): + print + + # This summary data gets written to stdout regardless of log level + self._PrintOneLineSummary(result_summary.total, result_summary.expected) + + unexpected_results = self._SummarizeUnexpectedResults(result_summary, + retry_summary) + self._PrintUnexpectedResults(unexpected_results) + + # Write the same data to log files. + self._WriteJSONFiles(unexpected_results, result_summary, + individual_test_timings) + + # Write the summary to disk (results.html) and maybe open the test_shell + # to this file. + wrote_results = self._WriteResultsHtmlFile(result_summary) + if not self._options.noshow_results and wrote_results: + self._ShowResultsHtmlFile() + + # Ignore flaky failures and unexpected passes so we don't turn the + # bot red for those. + return unexpected_results['num_regressions'] + + def UpdateSummary(self, result_summary): + """Update the summary while running tests.""" + while True: + try: + (test, fail_list) = self._result_queue.get_nowait() + result = test_failures.DetermineResultType(fail_list) + expected = self._expectations.MatchesAnExpectedResult(test, result) + result_summary.Add(test, fail_list, result, expected) + if (LOG_DETAILED_PROGRESS in self._options.log and + (self._options.experimental_fully_parallel or + self._IsSingleThreaded())): + self._DisplayDetailedProgress(result_summary) + else: + if not expected and LOG_UNEXPECTED in self._options.log: + self._PrintUnexpectedTestResult(test, result) + self._DisplayOneLineProgress(result_summary) + except Queue.Empty: + return + + def _DisplayOneLineProgress(self, result_summary): + """Displays the progress through the test run.""" + self._meter.update("Testing: %d ran as expected, %d didn't, %d left" % + (result_summary.expected, result_summary.unexpected, + result_summary.remaining)) + + def _DisplayDetailedProgress(self, result_summary): + """Display detailed progress output where we print the directory name + and one dot for each completed test. This is triggered by + "--log detailed-progress".""" + if self._current_test_number == len(self._test_files_list): + return + + next_test = self._test_files_list[self._current_test_number] + next_dir = os.path.dirname(path_utils.RelativeTestFilename(next_test)) + if self._current_progress_str == "": + self._current_progress_str = "%s: " % (next_dir) + self._current_dir = next_dir + + while next_test in result_summary.results: + if next_dir != self._current_dir: + self._meter.write("%s\n" % (self._current_progress_str)) + self._current_progress_str = "%s: ." % (next_dir) + self._current_dir = next_dir + else: + self._current_progress_str += "." + + if (next_test in result_summary.unexpected_results and + LOG_UNEXPECTED in self._options.log): + result = result_summary.unexpected_results[next_test] + self._meter.write("%s\n" % self._current_progress_str) + self._PrintUnexpectedTestResult(next_test, result) + self._current_progress_str = "%s: " % self._current_dir + + self._current_test_number += 1 + if self._current_test_number == len(self._test_files_list): + break + + next_test = self._test_files_list[self._current_test_number] + next_dir = os.path.dirname(path_utils.RelativeTestFilename(next_test)) + + if result_summary.remaining: + remain_str = " (%d)" % (result_summary.remaining) + self._meter.update("%s%s" % (self._current_progress_str, remain_str)) + else: + self._meter.write("%s\n" % (self._current_progress_str)) + + def _GetFailures(self, result_summary, include_crashes): + """Filters a dict of results and returns only the failures. + + Args: + result_summary: the results of the test run + include_crashes: whether crashes are included in the output. + We use False when finding the list of failures to retry + to see if the results were flaky. Although the crashes may also be + flaky, we treat them as if they aren't so that they're not ignored. + Returns: + a dict of files -> results + """ + failed_results = {} + for test, result in result_summary.unexpected_results.iteritems(): + if (result == test_expectations.PASS or + result == test_expectations.CRASH and not include_crashes): + continue + failed_results[test] = result + + return failed_results + + def _SummarizeUnexpectedResults(self, result_summary, retry_summary): + """Summarize any unexpected results as a dict. + + TODO(dpranke): split this data structure into a separate class? + + Args: + result_summary: summary object from initial test runs + retry_summary: summary object from final test run of retried tests + Returns: + A dictionary containing a summary of the unexpected results from the + run, with the following fields: + 'version': a version indicator (1 in this version) + 'fixable': # of fixable tests (NOW - PASS) + 'skipped': # of skipped tests (NOW & SKIPPED) + 'num_regressions': # of non-flaky failures + 'num_flaky': # of flaky failures + 'num_passes': # of unexpected passes + 'tests': a dict of tests -> { 'expected' : '...', 'actual' : '...' } + """ + results = {} + results['version'] = 1 + + tbe = result_summary.tests_by_expectation + tbt = result_summary.tests_by_timeline + results['fixable'] = len(tbt[test_expectations.NOW] - + tbe[test_expectations.PASS]) + results['skipped'] = len(tbt[test_expectations.NOW] & + tbe[test_expectations.SKIP]) + + num_passes = 0 + num_flaky = 0 + num_regressions = 0 + keywords = {} + for k, v in TestExpectationsFile.EXPECTATIONS.iteritems(): + keywords[v] = k.upper() + + tests = {} + for filename, result in result_summary.unexpected_results.iteritems(): + # Note that if a test crashed in the original run, we ignore whether or + # not it crashed when we retried it (if we retried it), and always + # consider the result not flaky. + test = path_utils.RelativeTestFilename(filename) + expected = self._expectations.GetExpectationsString(filename) + actual = [keywords[result]] + + if result == test_expectations.PASS: + num_passes += 1 + elif result == test_expectations.CRASH: + num_regressions += 1 + else: + if filename not in retry_summary.unexpected_results: + actual.extend( + self._expectations.GetExpectationsString(filename).split(" ")) + num_flaky += 1 + else: + retry_result = retry_summary.unexpected_results[filename] + if result != retry_result: + actual.append(keywords[retry_result]) + num_flaky += 1 + else: + num_regressions += 1 + + tests[test] = {} + tests[test]['expected'] = expected + tests[test]['actual'] = " ".join(actual) + + results['tests'] = tests + results['num_passes'] = num_passes + results['num_flaky'] = num_flaky + results['num_regressions'] = num_regressions + + return results + + def _WriteJSONFiles(self, unexpected_results, result_summary, + individual_test_timings): + """Writes the results of the test run as JSON files into the results dir. + + There are three different files written into the results dir: + unexpected_results.json: A short list of any unexpected results. This + is used by the buildbots to display results. + expectations.json: This is used by the flakiness dashboard. + results.json: A full list of the results - used by the flakiness + dashboard and the aggregate results dashboard. + + Args: + unexpected_results: dict of unexpected results + result_summary: full summary object + individual_test_timings: list of test times (used by the flakiness + dashboard). + """ + logging.debug("Writing JSON files in %s." % self._options.results_directory) + unexpected_file = open(os.path.join(self._options.results_directory, + "unexpected_results.json"), "w") + unexpected_file.write(simplejson.dumps(unexpected_results, sort_keys=True, + indent=2)) + unexpected_file.close() + + # Write a json file of the test_expectations.txt file for the layout tests + # dashboard. + expectations_file = open(os.path.join(self._options.results_directory, + "expectations.json"), "w") + expectations_json = self._expectations.GetExpectationsJsonForAllPlatforms() + expectations_file.write(("ADD_EXPECTATIONS(" + expectations_json + ");")) + expectations_file.close() + + json_layout_results_generator.JSONLayoutResultsGenerator( + self._options.builder_name, self._options.build_name, + self._options.build_number, self._options.results_directory, + BUILDER_BASE_URL, individual_test_timings, + self._expectations, result_summary, self._test_files_list) + + logging.debug("Finished writing JSON files.") + + def _PrintExpectedResultsOfType(self, write, result_summary, result_type, + result_type_str): + """Print the number of the tests in a given result class. + + Args: + write: A callback to write info to (e.g., a LoggingWriter) or + sys.stdout.write. + result_summary - the object containing all the results to report on + result_type - the particular result type to report in the summary. + result_type_str - a string description of the result_type. + """ + tests = self._expectations.GetTestsWithResultType(result_type) + now = result_summary.tests_by_timeline[test_expectations.NOW] + wontfix = result_summary.tests_by_timeline[test_expectations.WONTFIX] + defer = result_summary.tests_by_timeline[test_expectations.DEFER] + + # We use a fancy format string in order to print the data out in a + # nicely-aligned table. + fmtstr = ("Expect: %%5d %%-8s (%%%dd now, %%%dd defer, %%%dd wontfix)" % + (self._NumDigits(now), self._NumDigits(defer), + self._NumDigits(wontfix))) + write(fmtstr % + (len(tests), result_type_str, len(tests & now), len(tests & defer), + len(tests & wontfix))) + + def _NumDigits(self, num): + """Returns the number of digits needed to represent the length of a + sequence.""" + ndigits = 1 + if len(num): + ndigits = int(math.log10(len(num))) + 1 + return ndigits + + def _PrintTimingStatistics(self, write, total_time, thread_timings, + directory_test_timings, individual_test_timings, + result_summary): + """Record timing-specific information for the test run. + + Args: + write: A callback to write info to (e.g., a LoggingWriter) or + sys.stdout.write. + total_time: total elapsed time (in seconds) for the test run + thread_timings: wall clock time each thread ran for + directory_test_timings: timing by directory + individual_test_timings: timing by file + result_summary: summary object for the test run + """ + write("Test timing:") + write(" %6.2f total testing time" % total_time) + write("") + write("Thread timing:") + cuml_time = 0 + for t in thread_timings: + write(" %10s: %5d tests, %6.2f secs" % + (t['name'], t['num_tests'], t['total_time'])) + cuml_time += t['total_time'] + write(" %6.2f cumulative, %6.2f optimal" % + (cuml_time, cuml_time / int(self._options.num_test_shells))) + write("") + + self._PrintAggregateTestStatistics(write, individual_test_timings) + self._PrintIndividualTestTimes(write, individual_test_timings, + result_summary) + self._PrintDirectoryTimings(write, directory_test_timings) + + def _PrintAggregateTestStatistics(self, write, individual_test_timings): + """Prints aggregate statistics (e.g. median, mean, etc.) for all tests. + Args: + write: A callback to write info to (e.g., a LoggingWriter) or + sys.stdout.write. + individual_test_timings: List of test_shell_thread.TestStats for all + tests. + """ + test_types = individual_test_timings[0].time_for_diffs.keys() + times_for_test_shell = [] + times_for_diff_processing = [] + times_per_test_type = {} + for test_type in test_types: + times_per_test_type[test_type] = [] + + for test_stats in individual_test_timings: + times_for_test_shell.append(test_stats.test_run_time) + times_for_diff_processing.append(test_stats.total_time_for_all_diffs) + time_for_diffs = test_stats.time_for_diffs + for test_type in test_types: + times_per_test_type[test_type].append(time_for_diffs[test_type]) + + self._PrintStatisticsForTestTimings(write, + "PER TEST TIME IN TESTSHELL (seconds):", times_for_test_shell) + self._PrintStatisticsForTestTimings(write, + "PER TEST DIFF PROCESSING TIMES (seconds):", times_for_diff_processing) + for test_type in test_types: + self._PrintStatisticsForTestTimings(write, + "PER TEST TIMES BY TEST TYPE: %s" % test_type, + times_per_test_type[test_type]) + + def _PrintIndividualTestTimes(self, write, individual_test_timings, + result_summary): + """Prints the run times for slow, timeout and crash tests. + Args: + write: A callback to write info to (e.g., a LoggingWriter) or + sys.stdout.write. + individual_test_timings: List of test_shell_thread.TestStats for all + tests. + result_summary: summary object for test run + """ + # Reverse-sort by the time spent in test_shell. + individual_test_timings.sort(lambda a, b: + cmp(b.test_run_time, a.test_run_time)) + + num_printed = 0 + slow_tests = [] + timeout_or_crash_tests = [] + unexpected_slow_tests = [] + for test_tuple in individual_test_timings: + filename = test_tuple.filename + is_timeout_crash_or_slow = False + if self._expectations.HasModifier(filename, test_expectations.SLOW): + is_timeout_crash_or_slow = True + slow_tests.append(test_tuple) + + if filename in result_summary.failures: + result = result_summary.results[filename] + if (result == test_expectations.TIMEOUT or + result == test_expectations.CRASH): + is_timeout_crash_or_slow = True + timeout_or_crash_tests.append(test_tuple) + + if (not is_timeout_crash_or_slow and + num_printed < self._options.num_slow_tests_to_log): + num_printed = num_printed + 1 + unexpected_slow_tests.append(test_tuple) + + write("") + self._PrintTestListTiming(write, "%s slowest tests that are not marked " + "as SLOW and did not timeout/crash:" % + self._options.num_slow_tests_to_log, unexpected_slow_tests) + write("") + self._PrintTestListTiming(write, "Tests marked as SLOW:", slow_tests) + write("") + self._PrintTestListTiming(write, "Tests that timed out or crashed:", + timeout_or_crash_tests) + write("") + + def _PrintTestListTiming(self, write, title, test_list): + """Print timing info for each test. + + Args: + write: A callback to write info to (e.g., a LoggingWriter) or + sys.stdout.write. + title: section heading + test_list: tests that fall in this section + """ + write(title) + for test_tuple in test_list: + filename = test_tuple.filename[len(path_utils.LayoutTestsDir()) + 1:] + filename = filename.replace('\\', '/') + test_run_time = round(test_tuple.test_run_time, 1) + write(" %s took %s seconds" % (filename, test_run_time)) + + def _PrintDirectoryTimings(self, write, directory_test_timings): + """Print timing info by directory for any directories that take > 10 seconds + to run. + + Args: + write: A callback to write info to (e.g., a LoggingWriter) or + sys.stdout.write. + directory_test_timing: time info for each directory + """ + timings = [] + for directory in directory_test_timings: + num_tests, time_for_directory = directory_test_timings[directory] + timings.append((round(time_for_directory, 1), directory, num_tests)) + timings.sort() + + write("Time to process slowest subdirectories:") + min_seconds_to_print = 10 + for timing in timings: + if timing[0] > min_seconds_to_print: + write(" %s took %s seconds to run %s tests." % (timing[1], timing[0], + timing[2])) + write("") + + def _PrintStatisticsForTestTimings(self, write, title, timings): + """Prints the median, mean and standard deviation of the values in timings. + Args: + write: A callback to write info to (e.g., a LoggingWriter) or + sys.stdout.write. + title: Title for these timings. + timings: A list of floats representing times. + """ + write(title) + timings.sort() + + num_tests = len(timings) + percentile90 = timings[int(.9 * num_tests)] + percentile99 = timings[int(.99 * num_tests)] + + if num_tests % 2 == 1: + median = timings[((num_tests - 1) / 2) - 1] + else: + lower = timings[num_tests / 2 - 1] + upper = timings[num_tests / 2] + median = (float(lower + upper)) / 2 + + mean = sum(timings) / num_tests + + for time in timings: + sum_of_deviations = math.pow(time - mean, 2) + + std_deviation = math.sqrt(sum_of_deviations / num_tests) + write(" Median: %6.3f" % median) + write(" Mean: %6.3f" % mean) + write(" 90th percentile: %6.3f" % percentile90) + write(" 99th percentile: %6.3f" % percentile99) + write(" Standard dev: %6.3f" % std_deviation) + write("") + + def _PrintResultSummary(self, write, result_summary): + """Print a short summary to the output file about how many tests passed. + + Args: + write: A callback to write info to (e.g., a LoggingWriter) or + sys.stdout.write. + result_summary: information to log + """ + failed = len(result_summary.failures) + skipped = len(result_summary.tests_by_expectation[test_expectations.SKIP]) + total = result_summary.total + passed = total - failed - skipped + pct_passed = 0.0 + if total > 0: + pct_passed = float(passed) * 100 / total + + write(""); + write("=> Results: %d/%d tests passed (%.1f%%)" % + (passed, total, pct_passed)) + write(""); + self._PrintResultSummaryEntry(write, result_summary, test_expectations.NOW, + "Tests to be fixed for the current release") + + write(""); + self._PrintResultSummaryEntry(write, result_summary, + test_expectations.DEFER, + "Tests we'll fix in the future if they fail (DEFER)") + + write(""); + self._PrintResultSummaryEntry(write, result_summary, + test_expectations.WONTFIX, + "Tests that will only be fixed if they crash (WONTFIX)") + + def _PrintResultSummaryEntry(self, write, result_summary, timeline, heading): + """Print a summary block of results for a particular timeline of test. + + Args: + write: A callback to write info to (e.g., a LoggingWriter) or + sys.stdout.write. + result_summary: summary to print results for + timeline: the timeline to print results for (NOT, WONTFIX, etc.) + heading: a textual description of the timeline + """ + total = len(result_summary.tests_by_timeline[timeline]) + not_passing = (total - + len(result_summary.tests_by_expectation[test_expectations.PASS] & + result_summary.tests_by_timeline[timeline])) + write("=> %s (%d):" % (heading, not_passing)) + + for result in TestExpectationsFile.EXPECTATION_ORDER: + if result == test_expectations.PASS: + continue + results = (result_summary.tests_by_expectation[result] & + result_summary.tests_by_timeline[timeline]) + desc = TestExpectationsFile.EXPECTATION_DESCRIPTIONS[result] + if not_passing and len(results): + pct = len(results) * 100.0 / not_passing + write(" %5d %-24s (%4.1f%%)" % (len(results), + desc[len(results) != 1], pct)) + + def _PrintOneLineSummary(self, total, expected): + """Print a one-line summary of the test run to stdout. + + Args: + total: total number of tests run + expected: number of expected results + """ + unexpected = total - expected + if unexpected == 0: + print "All %d tests ran as expected." % expected + elif expected == 1: + print "1 test ran as expected, %d didn't:" % unexpected + else: + print "%d tests ran as expected, %d didn't:" % (expected, unexpected) + + def _PrintUnexpectedResults(self, unexpected_results): + """Prints any unexpected results in a human-readable form to stdout.""" + passes = {} + flaky = {} + regressions = {} + + if len(unexpected_results['tests']): + print "" + + for test, results in unexpected_results['tests'].iteritems(): + actual = results['actual'].split(" ") + expected = results['expected'].split(" ") + if actual == ['PASS']: + if 'CRASH' in expected: + _AddToDictOfLists(passes, 'Expected to crash, but passed', test) + elif 'TIMEOUT' in expected: + _AddToDictOfLists(passes, 'Expected to timeout, but passed', test) + else: + _AddToDictOfLists(passes, 'Expected to fail, but passed', test) + elif len(actual) > 1: + # We group flaky tests by the first actual result we got. + _AddToDictOfLists(flaky, actual[0], test) + else: + _AddToDictOfLists(regressions, results['actual'], test) + + if len(passes): + for key, tests in passes.iteritems(): + print "%s: (%d)" % (key, len(tests)) + tests.sort() + for test in tests: + print " %s" % test + print + + if len(flaky): + descriptions = TestExpectationsFile.EXPECTATION_DESCRIPTIONS + for key, tests in flaky.iteritems(): + result = TestExpectationsFile.EXPECTATIONS[key.lower()] + print "Unexpected flakiness: %s (%d)" % ( + descriptions[result][1], len(tests)) + tests.sort() + + for test in tests: + actual = unexpected_results['tests'][test]['actual'].split(" ") + expected = unexpected_results['tests'][test]['expected'].split(" ") + result = TestExpectationsFile.EXPECTATIONS[key.lower()] + new_expectations_list = list(set(actual) | set(expected)) + print " %s = %s" % (test, " ".join(new_expectations_list)) + print + + if len(regressions): + descriptions = TestExpectationsFile.EXPECTATION_DESCRIPTIONS + for key, tests in regressions.iteritems(): + result = TestExpectationsFile.EXPECTATIONS[key.lower()] + print "Regressions: Unexpected %s : (%d)" % ( + descriptions[result][1], len(tests)) + tests.sort() + for test in tests: + print " %s = %s" % (test, key) + print + + if len(unexpected_results['tests']) and self._options.verbose: + print "-" * 78 + + def _PrintUnexpectedTestResult(self, test, result): + """Prints one unexpected test result line.""" + desc = TestExpectationsFile.EXPECTATION_DESCRIPTIONS[result][0] + self._meter.write(" %s -> unexpected %s\n" % + (path_utils.RelativeTestFilename(test), desc)) + + def _WriteResultsHtmlFile(self, result_summary): + """Write results.html which is a summary of tests that failed. + + Args: + result_summary: a summary of the results :) + + Returns: + True if any results were written (since expected failures may be omitted) + """ + # test failures + if self._options.full_results_html: + test_files = result_summary.failures.keys() + else: + unexpected_failures = self._GetFailures(result_summary, + include_crashes=True) + test_files = unexpected_failures.keys() + if not len(test_files): + return False + + out_filename = os.path.join(self._options.results_directory, + "results.html") + out_file = open(out_filename, 'w') + # header + if self._options.full_results_html: + h2 = "Test Failures" + else: + h2 = "Unexpected Test Failures" + out_file.write("Layout Test Results (%(time)s)" + "

%(h2)s (%(time)s)

\n" + % {'h2': h2, 'time': time.asctime()}) + + test_files.sort() + for test_file in test_files: + test_failures = result_summary.failures.get(test_file, []) + out_file.write("

%s
\n" + % (path_utils.FilenameToUri(test_file), + path_utils.RelativeTestFilename(test_file))) + for failure in test_failures: + out_file.write("  %s
" + % failure.ResultHtmlOutput( + path_utils.RelativeTestFilename(test_file))) + out_file.write("

\n") + + # footer + out_file.write("\n") + return True + + def _ShowResultsHtmlFile(self): + """Launches the test shell open to the results.html page.""" + results_filename = os.path.join(self._options.results_directory, + "results.html") + subprocess.Popen([path_utils.TestShellPath(self._options.target), + path_utils.FilenameToUri(results_filename)]) + + +def _AddToDictOfLists(dict, key, value): + dict.setdefault(key, []).append(value) + +def ReadTestFiles(files): + tests = [] + for file in files: + for line in open(file): + line = test_expectations.StripComments(line) + if line: tests.append(line) + return tests + +def CreateLoggingWriter(options, log_option): + """Returns a write() function that will write the string to logging.info() + if comp was specified in --log or if --verbose is true. Otherwise the + message is dropped. + + Args: + options: list of command line options from optparse + log_option: option to match in options.log in order for the messages to be + logged (e.g., 'actual' or 'expected') + """ + if options.verbose or log_option in options.log.split(","): + return logging.info + return lambda str: 1 + +def main(options, args): + """Run the tests. Will call sys.exit when complete. + + Args: + options: a dictionary of command line options + args: a list of sub directories or files to test + """ + + if options.sources: + options.verbose = True + + # Set up our logging format. + meter = metered_stream.MeteredStream(options.verbose, sys.stderr) + log_fmt = '%(message)s' + log_datefmt = '%y%m%d %H:%M:%S' + log_level = logging.INFO + if options.verbose: + log_fmt = '%(asctime)s %(filename)s:%(lineno)-4d %(levelname)s %(message)s' + log_level = logging.DEBUG + logging.basicConfig(level=log_level, format=log_fmt, datefmt=log_datefmt, + stream=meter) + + if not options.target: + if options.debug: + options.target = "Debug" + else: + options.target = "Release" + + if not options.use_apache: + options.use_apache = sys.platform in ('darwin', 'linux2') + + if options.results_directory.startswith("/"): + # Assume it's an absolute path and normalize. + options.results_directory = path_utils.GetAbsolutePath( + options.results_directory) + else: + # If it's a relative path, make the output directory relative to Debug or + # Release. + basedir = path_utils.PathFromBase('webkit') + options.results_directory = path_utils.GetAbsolutePath( + os.path.join(basedir, options.target, options.results_directory)) + + if options.clobber_old_results: + # Just clobber the actual test results directories since the other files + # in the results directory are explicitly used for cross-run tracking. + path = os.path.join(options.results_directory, 'LayoutTests') + if os.path.exists(path): + shutil.rmtree(path) + + # Ensure platform is valid and force it to the form 'chromium-'. + options.platform = path_utils.PlatformName(options.platform) + + if not options.num_test_shells: + # TODO(ojan): Investigate perf/flakiness impact of using numcores + 1. + options.num_test_shells = platform_utils.GetNumCores() + + write = CreateLoggingWriter(options, 'config') + write("Running %s test_shells in parallel" % options.num_test_shells) + + if not options.time_out_ms: + if options.target == "Debug": + options.time_out_ms = str(2 * TestRunner.DEFAULT_TEST_TIMEOUT_MS) + else: + options.time_out_ms = str(TestRunner.DEFAULT_TEST_TIMEOUT_MS) + + options.slow_time_out_ms = str(5 * int(options.time_out_ms)) + write("Regular timeout: %s, slow test timeout: %s" % + (options.time_out_ms, options.slow_time_out_ms)) + + # Include all tests if none are specified. + new_args = [] + for arg in args: + if arg and arg != '': + new_args.append(arg) + + paths = new_args + if not paths: + paths = [] + if options.test_list: + paths += ReadTestFiles(options.test_list) + + # Create the output directory if it doesn't already exist. + path_utils.MaybeMakeDirectory(options.results_directory) + meter.update("Gathering files ...") + + test_runner = TestRunner(options, meter) + test_runner.GatherFilePaths(paths) + + if options.lint_test_files: + # Creating the expecations for each platform/target pair does all the + # test list parsing and ensures it's correct syntax (e.g. no dupes). + for platform in TestExpectationsFile.PLATFORMS: + test_runner.ParseExpectations(platform, is_debug_mode=True) + test_runner.ParseExpectations(platform, is_debug_mode=False) + print ("If there are no fail messages, errors or exceptions, then the " + "lint succeeded.") + sys.exit(0) + + try: + test_shell_binary_path = path_utils.TestShellPath(options.target) + except path_utils.PathNotFound: + print "\nERROR: test_shell is not found. Be sure that you have built it" + print "and that you are using the correct build. This script will run the" + print "Release one by default. Use --debug to use the Debug build.\n" + sys.exit(1) + + write = CreateLoggingWriter(options, "config") + write("Using platform '%s'" % options.platform) + write("Placing test results in %s" % options.results_directory) + if options.new_baseline: + write("Placing new baselines in %s" % + path_utils.ChromiumBaselinePath(options.platform)) + write("Using %s build at %s" % (options.target, test_shell_binary_path)) + if options.no_pixel_tests: + write("Not running pixel tests") + write("") + + meter.update("Parsing expectations ...") + test_runner.ParseExpectations(options.platform, options.target == 'Debug') + + meter.update("Preparing tests ...") + write = CreateLoggingWriter(options, "expected") + result_summary = test_runner.PrepareListsAndPrintOutput(write) + + if 'cygwin' == sys.platform: + logging.warn("#" * 40) + logging.warn("# UNEXPECTED PYTHON VERSION") + logging.warn("# This script should be run using the version of python") + logging.warn("# in third_party/python_24/") + logging.warn("#" * 40) + sys.exit(1) + + # Delete the disk cache if any to ensure a clean test run. + cachedir = os.path.split(test_shell_binary_path)[0] + cachedir = os.path.join(cachedir, "cache") + if os.path.exists(cachedir): + shutil.rmtree(cachedir) + + test_runner.AddTestType(text_diff.TestTextDiff) + if not options.no_pixel_tests: + test_runner.AddTestType(image_diff.ImageDiff) + if options.fuzzy_pixel_tests: + test_runner.AddTestType(fuzzy_image_diff.FuzzyImageDiff) + + meter.update("Starting ...") + has_new_failures = test_runner.Run(result_summary) + + logging.debug("Exit status: %d" % has_new_failures) + sys.exit(has_new_failures) + +if '__main__' == __name__: + option_parser = optparse.OptionParser() + option_parser.add_option("", "--no-pixel-tests", action="store_true", + default=False, + help="disable pixel-to-pixel PNG comparisons") + option_parser.add_option("", "--fuzzy-pixel-tests", action="store_true", + default=False, + help="Also use fuzzy matching to compare pixel test" + " outputs.") + option_parser.add_option("", "--results-directory", + default="layout-test-results", + help="Output results directory source dir," + " relative to Debug or Release") + option_parser.add_option("", "--new-baseline", action="store_true", + default=False, + help="save all generated results as new baselines " + "into the platform directory, overwriting " + "whatever's already there.") + option_parser.add_option("", "--noshow-results", action="store_true", + default=False, help="don't launch the test_shell" + " with results after the tests are done") + option_parser.add_option("", "--full-results-html", action="store_true", + default=False, help="show all failures in " + "results.html, rather than only regressions") + option_parser.add_option("", "--clobber-old-results", action="store_true", + default=False, help="Clobbers test results from " + "previous runs.") + option_parser.add_option("", "--lint-test-files", action="store_true", + default=False, help="Makes sure the test files " + "parse for all configurations. Does not run any " + "tests.") + option_parser.add_option("", "--force", action="store_true", + default=False, + help="Run all tests, even those marked SKIP in the " + "test list") + option_parser.add_option("", "--num-test-shells", + help="Number of testshells to run in parallel.") + option_parser.add_option("", "--use-apache", action="store_true", + default=False, + help="Whether to use apache instead of lighttpd.") + option_parser.add_option("", "--time-out-ms", default=None, + help="Set the timeout for each test") + option_parser.add_option("", "--run-singly", action="store_true", + default=False, + help="run a separate test_shell for each test") + option_parser.add_option("", "--debug", action="store_true", default=False, + help="use the debug binary instead of the release " + "binary") + option_parser.add_option("", "--num-slow-tests-to-log", default=50, + help="Number of slow tests whose timings to print.") + option_parser.add_option("", "--platform", + help="Override the platform for expected results") + option_parser.add_option("", "--target", default="", + help="Set the build target configuration (overrides" + " --debug)") + option_parser.add_option("", "--log", action="store", + default="detailed-progress,unexpected", + help="log various types of data. The param should " + "be a comma-separated list of values from: " + "actual,config," + LOG_DETAILED_PROGRESS + + ",expected,timing," + LOG_UNEXPECTED + + " (defaults to --log detailed-progress,unexpected)") + option_parser.add_option("-v", "--verbose", action="store_true", + default=False, help="include debug-level logging") + option_parser.add_option("", "--sources", action="store_true", + help="show expected result file path for each test " + "(implies --verbose)") + option_parser.add_option("", "--startup-dialog", action="store_true", + default=False, + help="create a dialog on test_shell.exe startup") + option_parser.add_option("", "--gp-fault-error-box", action="store_true", + default=False, + help="enable Windows GP fault error box") + option_parser.add_option("", "--wrapper", + help="wrapper command to insert before invocations " + "of test_shell; option is split on whitespace " + "before running. (example: " + "--wrapper='valgrind --smc-check=all')") + option_parser.add_option("", "--test-list", action="append", + help="read list of tests to run from file", + metavar="FILE") + option_parser.add_option("", "--nocheck-sys-deps", action="store_true", + default=False, + help="Don't check the system dependencies (themes)") + option_parser.add_option("", "--randomize-order", action="store_true", + default=False, + help=("Run tests in random order (useful for " + "tracking down corruption)")) + option_parser.add_option("", "--run-chunk", + default=None, + help=("Run a specified chunk (n:l), the nth of len " + "l, of the layout tests")) + option_parser.add_option("", "--run-part", + default=None, + help=("Run a specified part (n:m), the nth of m" + " parts, of the layout tests")) + option_parser.add_option("", "--batch-size", + default=None, + help=("Run a the tests in batches (n), after every " + "n tests, the test shell is relaunched.")) + option_parser.add_option("", "--builder-name", + default="DUMMY_BUILDER_NAME", + help=("The name of the builder shown on the " + "waterfall running this script e.g. WebKit.")) + option_parser.add_option("", "--build-name", + default="DUMMY_BUILD_NAME", + help=("The name of the builder used in its path, " + "e.g. webkit-rel.")) + option_parser.add_option("", "--build-number", + default="DUMMY_BUILD_NUMBER", + help=("The build number of the builder running" + "this script.")) + option_parser.add_option("", "--experimental-fully-parallel", + action="store_true", default=False, + help="run all tests in parallel") + + options, args = option_parser.parse_args() + main(options, args) diff --git a/webkit/tools/layout_tests/webkitpy/test_output_formatter.py b/webkit/tools/layout_tests/webkitpy/test_output_formatter.py new file mode 100755 index 0000000..f60dad1 --- /dev/null +++ b/webkit/tools/layout_tests/webkitpy/test_output_formatter.py @@ -0,0 +1,105 @@ +#!/usr/bin/env python +# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +""" +This is a script for generating easily-viewable comparisons of text and pixel +diffs. +""" +import optparse + +from layout_package import test_expectations +from layout_package import failure +from layout_package import failure_finder +from layout_package import failure_finder_test +from layout_package import html_generator + +DEFAULT_BUILDER = "Webkit" + + +def main(options, args): + + if options.run_tests: + fft = failure_finder_test.FailureFinderTest() + return fft.runTests() + + # TODO(gwilson): Add a check that verifies the given platform exists. + + finder = failure_finder.FailureFinder(options.build_number, + options.platform_builder, + (not options.include_expected), + options.test_regex, + options.output_dir, + int(options.max_failures), + options.verbose, + options.builder_log, + options.archive_log, + options.zip_file, + options.expectations_file) + finder.use_local_baselines = options.local + failure_list = finder.GetFailures() + + if not failure_list: + print "Did not find any failures." + return + + generator = html_generator.HTMLGenerator(failure_list, + options.output_dir, + finder.build, + options.platform_builder, + (not options.include_expected)) + filename = generator.GenerateHTML() + + if filename and options.verbose: + print "File created at %s" % filename + +if __name__ == "__main__": + option_parser = optparse.OptionParser() + option_parser.add_option("-v", "--verbose", action="store_true", + default=False, + help="Display lots of output.") + option_parser.add_option("-i", "--include-expected", action="store_true", + default=False, + help="Include expected failures in output") + option_parser.add_option("-p", "--platform-builder", + default=DEFAULT_BUILDER, + help="Use the given builder") + option_parser.add_option("-b", "--build-number", + default=None, + help="Use the given build number") + option_parser.add_option("-t", "--test-regex", + default=None, + help="Use the given regex to filter tests") + option_parser.add_option("-o", "--output-dir", + default=".", + help="Output files to given directory") + option_parser.add_option("-m", "--max-failures", + default=100, + help="Limit the maximum number of failures") + option_parser.add_option("-r", "--run-tests", action="store_true", + default=False, + help="Runs unit tests") + option_parser.add_option("-u", "--builder-log", + default=None, + help=("Use the local builder log file " + "instead of scraping the buildbots")) + option_parser.add_option("-a", "--archive-log", + default=None, + help=("Use the local archive log file " + "instead of scraping the buildbots")) + option_parser.add_option("-e", "--expectations-file", + default=None, + help=("Use the local test expectations file " + "instead of scraping the buildbots")) + option_parser.add_option("-z", "--zip-file", + default=None, + help=("Use the local test output zip file " + "instead of scraping the buildbots")) + option_parser.add_option("-l", "--local", action="store_true", + default=False, + help=("Use local baselines instead of scraping " + "baselines from source websites")) + + options, args = option_parser.parse_args() + main(options, args) diff --git a/webkit/tools/layout_tests/webkitpy/test_output_xml_to_json.py b/webkit/tools/layout_tests/webkitpy/test_output_xml_to_json.py new file mode 100755 index 0000000..bda1ff3 --- /dev/null +++ b/webkit/tools/layout_tests/webkitpy/test_output_xml_to_json.py @@ -0,0 +1,135 @@ +#!/usr/bin/env python +# Copyright (c) 2010 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +""" +This is a script for generating JSON from JUnit XML output (generated by +google tests with --gtest_output=xml option). +""" +import logging +import optparse +import os +import sys +import time + +from xml.dom import minidom + +from layout_package import json_results_generator +from layout_package import path_utils +from layout_package import test_expectations + +# Builder base URL where we have the archived test results. +BUILDER_BASE_URL = "http://build.chromium.org/buildbot/gtest-results/" + + +class JSONGeneratorFromXML(object): + + def __init__(self, options): + self._options = options + + # Check the results directory + if not os.path.exists(self._options.results_directory): + os.mkdir(self._options.results_directory) + + results_xml_file = None + try: + results_xml_file = open(self._options.input_results_xml) + except IOError, e: + logging.fatal("Cannot open file %s", + self._options.input_results_xml) + sys.exit(1) + + summary = self._ParseTestResultsXML( + minidom.parse(results_xml_file).documentElement) + results_xml_file.close() + + json_results_generator.JSONResultsGenerator( + self._options.builder_name, self._options.build_name, + self._options.build_number, self._options.results_directory, + self._options.builder_base_url, + self._test_timings, + self._failures, self._passed_tests, self._skipped_tests, + self._tests_list) + + def _ParseTestResultsXML(self, node): + self._tests_list = set() + self._passed_tests = set() + self._skipped_tests = set() + self._test_timings = {} + self._failures = {} + + testcases = node.getElementsByTagName('testcase') + for testcase in testcases: + name = testcase.getAttribute('name') + classname = testcase.getAttribute('classname') + test_name = "%s.%s" % (classname, name) + + status = testcase.getAttribute('status') + if status == 'notrun': + if name.startswith('DISABLED_'): + self._skipped_tests.add(test_name) + continue + + failures = testcase.getElementsByTagName('failure') + if failures: + self._failures[test_name] = test_expectations.TEXT + else: + self._passed_tests.add(test_name) + + self._test_timings[test_name] = float( + testcase.getAttribute('time')) + self._tests_list.add(test_name) + + +def main(options, args): + """Parse the tests results and generate JSON files. + + Args: + options: a dictionary of command line options + args: a list of sub directories or files to test + """ + + if not options.test_type: + logging.error("--test-type needs to be specified.") + sys.exit(1) + + canon_test_type = options.test_type.replace("-", "_") + if not options.input_results_xml: + options.input_results_xml = "%s.xml" % (canon_test_type) + if not options.builder_base_url: + options.builder_base_url = "%s%s/" % (BUILDER_BASE_URL, + options.test_type) + + JSONGeneratorFromXML(options) + + return + +if '__main__' == __name__: + option_parser = optparse.OptionParser() + option_parser.add_option("", "--test-type", default="", + help="Test type that generated the results XML," + " e.g. unit-tests.") + option_parser.add_option("", "--results-directory", default="./", + help="Output results directory source dir.") + option_parser.add_option("", "--input-results-xml", default="", + help="Test results xml file (input for us)." + " default is TEST_TYPE.xml") + option_parser.add_option("", "--builder-base-url", default="", + help=("A URL where we have the archived test " + "results. (default=%sTEST_TYPE_results/)" + % BUILDER_BASE_URL)) + option_parser.add_option("", "--builder-name", + default="DUMMY_BUILDER_NAME", + help="The name of the builder shown on the " + "waterfall running this script e.g. WebKit.") + option_parser.add_option("", "--build-name", + default="DUMMY_BUILD_NAME", + help="The name of the builder used in its path, " + "e.g. webkit-rel.") + option_parser.add_option("", "--build-number", + default="DUMMY_BUILD_NUMBER", + help="The build number of the builder running" + "this script.") + options, args = option_parser.parse_args() + main(options, args) diff --git a/webkit/tools/layout_tests/webkitpy/test_types/__init__.py b/webkit/tools/layout_tests/webkitpy/test_types/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/webkit/tools/layout_tests/webkitpy/test_types/fuzzy_image_diff.py b/webkit/tools/layout_tests/webkitpy/test_types/fuzzy_image_diff.py new file mode 100644 index 0000000..3d503b6 --- /dev/null +++ b/webkit/tools/layout_tests/webkitpy/test_types/fuzzy_image_diff.py @@ -0,0 +1,47 @@ +# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Compares the image output of a test to the expected image output using +fuzzy matching. +""" + +import errno +import logging +import os +import shutil +import subprocess + +from layout_package import path_utils +from layout_package import test_failures +from test_types import test_type_base + + +class FuzzyImageDiff(test_type_base.TestTypeBase): + + def CompareOutput(self, filename, proc, output, test_args, target): + """Implementation of CompareOutput that checks the output image and + checksum against the expected files from the LayoutTest directory. + """ + failures = [] + + # If we didn't produce a hash file, this test must be text-only. + if test_args.hash is None: + return failures + + expected_png_file = path_utils.ExpectedFilename(filename, '.png') + + if test_args.show_sources: + logging.debug('Using %s' % expected_png_file) + + # Also report a missing expected PNG file. + if not os.path.isfile(expected_png_file): + failures.append(test_failures.FailureMissingImage(self)) + + # Run the fuzzymatcher + r = subprocess.call([path_utils.FuzzyMatchPath(), + test_args.png_path, expected_png_file]) + if r != 0: + failures.append(test_failures.FailureFuzzyFailure(self)) + + return failures diff --git a/webkit/tools/layout_tests/webkitpy/test_types/image_diff.py b/webkit/tools/layout_tests/webkitpy/test_types/image_diff.py new file mode 100644 index 0000000..38abb6b --- /dev/null +++ b/webkit/tools/layout_tests/webkitpy/test_types/image_diff.py @@ -0,0 +1,199 @@ +# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Compares the image output of a test to the expected image output. + +Compares hashes for the generated and expected images. If the output doesn't +match, returns FailureImageHashMismatch and outputs both hashes into the layout +test results directory. +""" + +import errno +import logging +import os +import shutil +import subprocess + +from layout_package import path_utils +from layout_package import test_failures +from test_types import test_type_base + +# Cache whether we have the image_diff executable available. +_compare_available = True +_compare_msg_printed = False + + +class ImageDiff(test_type_base.TestTypeBase): + + def _CopyOutputPNG(self, test_filename, source_image, extension): + """Copies result files into the output directory with appropriate + names. + + Args: + test_filename: the test filename + source_file: path to the image file (either actual or expected) + extension: extension to indicate -actual.png or -expected.png + """ + self._MakeOutputDirectory(test_filename) + dest_image = self.OutputFilename(test_filename, extension) + + try: + shutil.copyfile(source_image, dest_image) + except IOError, e: + # A missing expected PNG has already been recorded as an error. + if errno.ENOENT != e.errno: + raise + + def _SaveBaselineFiles(self, filename, png_path, checksum): + """Saves new baselines for the PNG and checksum. + + Args: + filename: test filename + png_path: path to the actual PNG result file + checksum: value of the actual checksum result + """ + png_file = open(png_path, "rb") + png_data = png_file.read() + png_file.close() + self._SaveBaselineData(filename, png_data, ".png") + self._SaveBaselineData(filename, checksum, ".checksum") + + def _CreateImageDiff(self, filename, target): + """Creates the visual diff of the expected/actual PNGs. + + Args: + filename: the name of the test + target: Debug or Release + """ + diff_filename = self.OutputFilename(filename, + self.FILENAME_SUFFIX_COMPARE) + actual_filename = self.OutputFilename(filename, + self.FILENAME_SUFFIX_ACTUAL + '.png') + expected_filename = self.OutputFilename(filename, + self.FILENAME_SUFFIX_EXPECTED + '.png') + + global _compare_available + cmd = '' + + try: + executable = path_utils.ImageDiffPath(target) + cmd = [executable, '--diff', actual_filename, expected_filename, + diff_filename] + except Exception, e: + _compare_available = False + + result = 1 + if _compare_available: + try: + result = subprocess.call(cmd) + except OSError, e: + if e.errno == errno.ENOENT or e.errno == errno.EACCES: + _compare_available = False + else: + raise e + except ValueError: + # work around a race condition in Python 2.4's implementation + # of subprocess.Popen + pass + + global _compare_msg_printed + + if not _compare_available and not _compare_msg_printed: + _compare_msg_printed = True + print('image_diff not found. Make sure you have a ' + target + + ' build of the image_diff executable.') + + return result + + def CompareOutput(self, filename, proc, output, test_args, target): + """Implementation of CompareOutput that checks the output image and + checksum against the expected files from the LayoutTest directory. + """ + failures = [] + + # If we didn't produce a hash file, this test must be text-only. + if test_args.hash is None: + return failures + + # If we're generating a new baseline, we pass. + if test_args.new_baseline: + self._SaveBaselineFiles(filename, test_args.png_path, + test_args.hash) + return failures + + # Compare hashes. + expected_hash_file = path_utils.ExpectedFilename(filename, '.checksum') + + expected_png_file = path_utils.ExpectedFilename(filename, '.png') + + if test_args.show_sources: + logging.debug('Using %s' % expected_hash_file) + logging.debug('Using %s' % expected_png_file) + + try: + expected_hash = open(expected_hash_file, "r").read() + except IOError, e: + if errno.ENOENT != e.errno: + raise + expected_hash = '' + + + if not os.path.isfile(expected_png_file): + # Report a missing expected PNG file. + self.WriteOutputFiles(filename, '', '.checksum', test_args.hash, + expected_hash, diff=False, wdiff=False) + self._CopyOutputPNG(filename, test_args.png_path, '-actual.png') + failures.append(test_failures.FailureMissingImage(self)) + return failures + elif test_args.hash == expected_hash: + # Hash matched (no diff needed, okay to return). + return failures + + + self.WriteOutputFiles(filename, '', '.checksum', test_args.hash, + expected_hash, diff=False, wdiff=False) + self._CopyOutputPNG(filename, test_args.png_path, '-actual.png') + self._CopyOutputPNG(filename, expected_png_file, '-expected.png') + + # Even though we only use result in one codepath below but we + # still need to call CreateImageDiff for other codepaths. + result = self._CreateImageDiff(filename, target) + if expected_hash == '': + failures.append(test_failures.FailureMissingImageHash(self)) + elif test_args.hash != expected_hash: + # Hashes don't match, so see if the images match. If they do, then + # the hash is wrong. + if result == 0: + failures.append(test_failures.FailureImageHashIncorrect(self)) + else: + failures.append(test_failures.FailureImageHashMismatch(self)) + + return failures + + def DiffFiles(self, file1, file2): + """Diff two image files. + + Args: + file1, file2: full paths of the files to compare. + + Returns: + True if two files are different. + False otherwise. + """ + + try: + executable = path_utils.ImageDiffPath('Debug') + except Exception, e: + logging.warn('Failed to find image diff executable.') + return True + + cmd = [executable, file1, file2] + result = 1 + try: + result = subprocess.call(cmd) + except OSError, e: + logging.warn('Failed to compare image diff: %s', e) + return True + + return result == 1 diff --git a/webkit/tools/layout_tests/webkitpy/test_types/test_type_base.py b/webkit/tools/layout_tests/webkitpy/test_types/test_type_base.py new file mode 100644 index 0000000..f10c75f --- /dev/null +++ b/webkit/tools/layout_tests/webkitpy/test_types/test_type_base.py @@ -0,0 +1,241 @@ +# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Defines the interface TestTypeBase which other test types inherit from. + +Also defines the TestArguments "struct" to pass them additional arguments. +""" + +import cgi +import difflib +import errno +import logging +import os.path +import subprocess + +from layout_package import path_utils + + +class TestArguments(object): + """Struct-like wrapper for additional arguments needed by + specific tests.""" + # Whether to save new baseline results. + new_baseline = False + + # Path to the actual PNG file generated by pixel tests + png_path = None + + # Value of checksum generated by pixel tests. + hash = None + + # Whether to use wdiff to generate by-word diffs. + wdiff = False + + # Whether to report the locations of the expected result files used. + show_sources = False + +# Python bug workaround. See the wdiff code in WriteOutputFiles for an +# explanation. +_wdiff_available = True + + +class TestTypeBase(object): + + # Filename pieces when writing failures to the test results directory. + FILENAME_SUFFIX_ACTUAL = "-actual" + FILENAME_SUFFIX_EXPECTED = "-expected" + FILENAME_SUFFIX_DIFF = "-diff" + FILENAME_SUFFIX_WDIFF = "-wdiff.html" + FILENAME_SUFFIX_COMPARE = "-diff.png" + + def __init__(self, platform, root_output_dir): + """Initialize a TestTypeBase object. + + Args: + platform: the platform (e.g., 'chromium-mac-leopard') + identifying the platform-specific results to be used. + root_output_dir: The unix style path to the output dir. + """ + self._root_output_dir = root_output_dir + self._platform = platform + + def _MakeOutputDirectory(self, filename): + """Creates the output directory (if needed) for a given test + filename.""" + output_filename = os.path.join(self._root_output_dir, + path_utils.RelativeTestFilename(filename)) + path_utils.MaybeMakeDirectory(os.path.split(output_filename)[0]) + + def _SaveBaselineData(self, filename, data, modifier): + """Saves a new baseline file into the platform directory. + + The file will be named simply "-expected", suitable for + use as the expected results in a later run. + + Args: + filename: path to the test file + data: result to be saved as the new baseline + modifier: type of the result file, e.g. ".txt" or ".png" + """ + relative_dir = os.path.dirname( + path_utils.RelativeTestFilename(filename)) + output_dir = os.path.join( + path_utils.ChromiumBaselinePath(self._platform), relative_dir) + output_file = os.path.basename(os.path.splitext(filename)[0] + + self.FILENAME_SUFFIX_EXPECTED + modifier) + + path_utils.MaybeMakeDirectory(output_dir) + output_path = os.path.join(output_dir, output_file) + logging.debug('writing new baseline to "%s"' % (output_path)) + open(output_path, "wb").write(data) + + def OutputFilename(self, filename, modifier): + """Returns a filename inside the output dir that contains modifier. + + For example, if filename is c:/.../fast/dom/foo.html and modifier is + "-expected.txt", the return value is + c:/cygwin/tmp/layout-test-results/fast/dom/foo-expected.txt + + Args: + filename: absolute filename to test file + modifier: a string to replace the extension of filename with + + Return: + The absolute windows path to the output filename + """ + output_filename = os.path.join(self._root_output_dir, + path_utils.RelativeTestFilename(filename)) + return os.path.splitext(output_filename)[0] + modifier + + def CompareOutput(self, filename, proc, output, test_args, target): + """Method that compares the output from the test with the + expected value. + + This is an abstract method to be implemented by all sub classes. + + Args: + filename: absolute filename to test file + proc: a reference to the test_shell process + output: a string containing the output of the test + test_args: a TestArguments object holding optional additional + arguments + target: Debug or Release + + Return: + a list of TestFailure objects, empty if the test passes + """ + raise NotImplemented + + def WriteOutputFiles(self, filename, test_type, file_type, output, + expected, diff=True, wdiff=False): + """Writes the test output, the expected output and optionally the diff + between the two to files in the results directory. + + The full output filename of the actual, for example, will be + -actual + For instance, + my_test-simp-actual.txt + + Args: + filename: The test filename + test_type: A string describing the test type, e.g. "simp" + file_type: A string describing the test output file type, e.g. ".txt" + output: A string containing the test output + expected: A string containing the expected test output + diff: if True, write a file containing the diffs too. This should be + False for results that are not text + wdiff: if True, write an HTML file containing word-by-word diffs + """ + self._MakeOutputDirectory(filename) + actual_filename = self.OutputFilename(filename, + test_type + self.FILENAME_SUFFIX_ACTUAL + file_type) + expected_filename = self.OutputFilename(filename, + test_type + self.FILENAME_SUFFIX_EXPECTED + file_type) + if output: + open(actual_filename, "wb").write(output) + if expected: + open(expected_filename, "wb").write(expected) + + if not output or not expected: + return + + if diff: + diff = difflib.unified_diff(expected.splitlines(True), + output.splitlines(True), + expected_filename, + actual_filename) + + diff_filename = self.OutputFilename(filename, + test_type + self.FILENAME_SUFFIX_DIFF + file_type) + open(diff_filename, "wb").write(''.join(diff)) + + if wdiff: + # Shell out to wdiff to get colored inline diffs. + executable = path_utils.WDiffPath() + cmd = [executable, + '--start-delete=##WDIFF_DEL##', + '--end-delete=##WDIFF_END##', + '--start-insert=##WDIFF_ADD##', + '--end-insert=##WDIFF_END##', + expected_filename, + actual_filename] + filename = self.OutputFilename(filename, + test_type + self.FILENAME_SUFFIX_WDIFF) + + global _wdiff_available + + try: + # Python's Popen has a bug that causes any pipes opened to a + # process that can't be executed to be leaked. Since this + # code is specifically designed to tolerate exec failures + # to gracefully handle cases where wdiff is not installed, + # the bug results in a massive file descriptor leak. As a + # workaround, if an exec failure is ever experienced for + # wdiff, assume it's not available. This will leak one + # file descriptor but that's better than leaking each time + # wdiff would be run. + # + # http://mail.python.org/pipermail/python-list/ + # 2008-August/505753.html + # http://bugs.python.org/issue3210 + # + # It also has a threading bug, so we don't output wdiff if + # the Popen raises a ValueError. + # http://bugs.python.org/issue1236 + if _wdiff_available: + wdiff = subprocess.Popen( + cmd, stdout=subprocess.PIPE).communicate()[0] + wdiff_failed = False + + except OSError, e: + if (e.errno == errno.ENOENT or e.errno == errno.EACCES or + e.errno == errno.ECHILD): + _wdiff_available = False + else: + raise e + except ValueError, e: + wdiff_failed = True + + out = open(filename, 'wb') + + if not _wdiff_available: + out.write( + "wdiff not installed.
" + "If you're running OS X, you can install via macports." + "
" + "If running Ubuntu linux, you can run " + "'sudo apt-get install wdiff'.") + elif wdiff_failed: + out.write('wdiff failed due to running with multiple ' + 'test_shells in parallel.') + else: + wdiff = cgi.escape(wdiff) + wdiff = wdiff.replace('##WDIFF_DEL##', '') + wdiff = wdiff.replace('##WDIFF_ADD##', '') + wdiff = wdiff.replace('##WDIFF_END##', '') + out.write('') + out.write('
' + wdiff + '
') + + out.close() diff --git a/webkit/tools/layout_tests/webkitpy/test_types/text_diff.py b/webkit/tools/layout_tests/webkitpy/test_types/text_diff.py new file mode 100644 index 0000000..ddbdc8b --- /dev/null +++ b/webkit/tools/layout_tests/webkitpy/test_types/text_diff.py @@ -0,0 +1,96 @@ +# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Compares the text output of a test to the expected text output. + +If the output doesn't match, returns FailureTextMismatch and outputs the diff +files into the layout test results directory. +""" + +import errno +import logging +import os.path + +from layout_package import path_utils +from layout_package import test_failures +from test_types import test_type_base + + +def isRenderTreeDump(data): + """Returns true if data appears to be a render tree dump as opposed to a + plain text dump.""" + return data.find("RenderView at (0,0)") != -1 + + +class TestTextDiff(test_type_base.TestTypeBase): + + def GetNormalizedOutputText(self, output): + # Some tests produce "\r\n" explicitly. Our system (Python/Cygwin) + # helpfully changes the "\n" to "\r\n", resulting in "\r\r\n". + norm = output.replace("\r\r\n", "\r\n").strip("\r\n").replace( + "\r\n", "\n") + return norm + "\n" + + def GetNormalizedExpectedText(self, filename, show_sources): + """Given the filename of the test, read the expected output from a file + and normalize the text. Returns a string with the expected text, or '' + if the expected output file was not found.""" + # Read the platform-specific expected text. + expected_filename = path_utils.ExpectedFilename(filename, '.txt') + if show_sources: + logging.debug('Using %s' % expected_filename) + + return self.GetNormalizedText(expected_filename) + + def GetNormalizedText(self, filename): + try: + text = open(filename).read() + except IOError, e: + if errno.ENOENT != e.errno: + raise + return '' + + # Normalize line endings + return text.strip("\r\n").replace("\r\n", "\n") + "\n" + + def CompareOutput(self, filename, proc, output, test_args, target): + """Implementation of CompareOutput that checks the output text against + the expected text from the LayoutTest directory.""" + failures = [] + + # If we're generating a new baseline, we pass. + if test_args.new_baseline: + self._SaveBaselineData(filename, output, ".txt") + return failures + + # Normalize text to diff + output = self.GetNormalizedOutputText(output) + expected = self.GetNormalizedExpectedText(filename, + test_args.show_sources) + + # Write output files for new tests, too. + if output != expected: + # Text doesn't match, write output files. + self.WriteOutputFiles(filename, "", ".txt", output, expected, + diff=True, wdiff=True) + + if expected == '': + failures.append(test_failures.FailureMissingResult(self)) + else: + failures.append(test_failures.FailureTextMismatch(self, True)) + + return failures + + def DiffFiles(self, file1, file2): + """Diff two text files. + + Args: + file1, file2: full paths of the files to compare. + + Returns: + True if two files are different. + False otherwise. + """ + + return self.GetNormalizedText(file1) != self.GetNormalizedText(file2) diff --git a/webkit/tools/layout_tests/webkitpy/update_expectations_from_dashboard.py b/webkit/tools/layout_tests/webkitpy/update_expectations_from_dashboard.py new file mode 100644 index 0000000..b5774b6 --- /dev/null +++ b/webkit/tools/layout_tests/webkitpy/update_expectations_from_dashboard.py @@ -0,0 +1,476 @@ +#!/usr/bin/env python +# Copyright (c) 2009 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Script to read in updates in JSON form from the layout test dashboard +and apply them to test_expectations.txt. + +Usage: +1. Go to http://src.chromium.org/viewvc/chrome/trunk/src/webkit/tools/ + layout_tests/flakiness_dashboard.html#expectationsUpdate=true +2. Copy-paste that JSON into a local file. +3. python update_expectations_from_dashboard.py path/to/local/file +""" + +import logging +import os +import sys + +from layout_package import path_utils +from layout_package import test_expectations + +sys.path.append(path_utils.PathFromBase('third_party')) +import simplejson + + +def UpdateExpectations(expectations, updates): + expectations = ExpectationsUpdater(None, None, + 'WIN', False, False, expectations, True) + return expectations.UpdateBasedOnJSON(updates) + + +class OptionsAndExpectationsHolder(object): + """Container for a list of options and a list of expectations for a given + test.""" + + def __init__(self, options, expectations): + self.options = options + self.expectations = expectations + + +class BuildInfo(OptionsAndExpectationsHolder): + """Container for a list of options and expectations for a given test as + well as a map from build_type (e.g. debug/release) to a list of platforms + (e.g. ["win", "linux"]). + """ + + def __init__(self, options, expectations, build_info): + OptionsAndExpectationsHolder.__init__(self, options, expectations) + self.build_info = build_info + + +class ExpectationsUpdater(test_expectations.TestExpectationsFile): + """Class to update test_expectations.txt based on updates in the following + form: + {"test1.html": { + "WIN RELEASE": {"missing": "FAIL TIMEOUT", "extra": "CRASH"}} + "WIN DEBUG": {"missing": "FAIL TIMEOUT"}} + "test2.html": ... + } + """ + + def _GetBuildTypesAndPlatforms(self, options): + """Splits up the options list into three lists: platforms, + build_types and other_options.""" + platforms = [] + build_types = [] + other_options = [] + for option in options: + if option in self.PLATFORMS: + platforms.append(option) + elif option in self.BUILD_TYPES: + build_types.append(option) + else: + other_options.append(option) + + if not len(build_types): + build_types = self.BUILD_TYPES + + if not len(platforms): + # If there are no platforms specified, use the most generic version + # of each platform name so we don't have to dedup them later. + platforms = self.BASE_PLATFORMS + + return (platforms, build_types, other_options) + + def _ApplyUpdatesToResults(self, test, results, update_json, expectations, + other_options): + """Applies the updates from the JSON to the existing results in + test_expectations. + Args: + test: The test to update. + results: The results object to update. + update_json: The parsed JSON object with the updates. + expectations: The existing expectatons for this test. + other_options: The existing modifiers for this test + excluding platforms and build_types. + """ + updates = update_json[test] + for build_info in updates: + platform, build_type = build_info.lower().split(' ') + + # If the platform/build_type is not currently listed for the test, + # skip it as this platform/build_type may be listed in another + # line. + if platform not in results or build_type not in results[platform]: + continue + + these_results = results[platform][build_type] + these_updates = updates[build_info] + these_expectations = these_results.expectations + these_options = these_results.options + + self._ApplyExtraUpdates(these_updates, these_options, + these_expectations) + self._ApplyMissingUpdates(test, these_updates, these_options, + these_expectations) + + def _ApplyExtraUpdates(self, updates, options, expectations): + """Remove extraneous expectations/options in the updates object to + the given options/expectations lists. + """ + if "extra" not in updates: + return + + items = updates["extra"].lower().split(' ') + for item in items: + if item in self.EXPECTATIONS: + if item in expectations: + expectations.remove(item) + else: + if item in options: + options.remove(item) + + def _ApplyMissingUpdates(self, test, updates, options, expectations): + """Apply an addition expectations/options in the updates object to + the given options/expectations lists. + """ + if "missing" not in updates: + return + + items = updates["missing"].lower().split(' ') + for item in items: + if item == 'other': + continue + + # Don't add TIMEOUT to SLOW tests. Automating that is too + # complicated instead, print out tests that need manual attention. + if ((item == "timeout" and + ("slow" in options or "slow" in items)) or + (item == "slow" and + ("timeout" in expectations or "timeout" in items))): + logging.info("NEEDS MANUAL ATTENTION: %s may need " + "to be marked TIMEOUT or SLOW." % test) + elif item in self.EXPECTATIONS: + if item not in expectations: + expectations.append(item) + if ("fail" in expectations and + (item == "image+text" or item == "image" or + item == "text")): + expectations.remove("fail") + else: + if item not in options: + options.append(item) + + def _AppendPlatform(self, item, build_type, platform): + """Appends the give build_type and platform to the BuildInfo item. + """ + build_info = item.build_info + if build_type not in build_info: + build_info[build_type] = [] + build_info[build_type].append(platform) + + def _GetUpdatesDedupedByMatchingOptionsAndExpectations(self, results): + """Converts the results, which is + results[platforms][build_type] = OptionsAndExpectationsHolder + to BuildInfo objects, which dedupes platform/build_types that + have the same expectations and options. + """ + updates = [] + for platform in results: + for build_type in results[platform]: + options = results[platform][build_type].options + expectations = results[platform][build_type].expectations + + found_match = False + for update in updates: + if (update.options == options and + update.expectations == expectations): + self._AppendPlatform(update, build_type, platform) + found_match = True + break + + if found_match: + continue + + update = BuildInfo(options, expectations, {}) + self._AppendPlatform(update, build_type, platform) + updates.append(update) + + return self._RoundUpFlakyUpdates(updates) + + def _HasMajorityBuildConfigurations(self, candidate, candidate2): + """Returns true if the candidate BuildInfo represents all build + configurations except the single one listed in candidate2. + For example, if a test is FAIL TIMEOUT on all bots except WIN-Release, + where it is just FAIL. Or if a test is FAIL TIMEOUT on MAC-Release, + Mac-Debug and Linux-Release, but only FAIL on Linux-Debug. + """ + build_types = self.BUILD_TYPES[:] + build_info = candidate.build_info + if "release" not in build_info or "debug" not in build_info: + return None + + release_set = set(build_info["release"]) + debug_set = set(build_info["debug"]) + if len(release_set - debug_set) is 1: + full_set = release_set + partial_set = debug_set + needed_build_type = "debug" + elif len(debug_set - release_set) is 1: + full_set = debug_set + partial_set = release_set + needed_build_type = "release" + else: + return None + + build_info2 = candidate2.build_info + if needed_build_type not in build_info2: + return None + + build_type = None + for this_build_type in build_info2: + # Can only work if this candidate has one build_type. + if build_type: + return None + build_type = this_build_type + + if set(build_info2[needed_build_type]) == full_set - partial_set: + return full_set + else: + return None + + def _RoundUpFlakyUpdates(self, updates): + """Consolidates the updates into one update if 5/6 results are + flaky and the is a subset of the flaky results 6th just not + happening to flake or 3/4 results are flaky and the 4th has a + subset of the flaky results. + """ + if len(updates) is not 2: + return updates + + item1, item2 = updates + candidate = None + candidate_platforms = self._HasMajorityBuildConfigurations(item1, + item2) + if candidate_platforms: + candidate = item1 + else: + candidate_platforms = self._HasMajorityBuildConfigurations(item1, + item2) + if candidate_platforms: + candidate = item2 + + if candidate: + options1 = set(item1.options) + options2 = set(item2.options) + expectations1 = set(item1.expectations) + if not len(expectations1): + expectations1.add("pass") + expectations2 = set(item2.expectations) + if not len(expectations2): + expectations2.add("pass") + + options_union = options1 | options2 + expectations_union = expectations1 | expectations2 + # If the options and expectations are equal to their respective + # unions then we can round up to include the 6th platform. + if (candidate == item1 and options1 == options_union and + expectations1 == expectations_union and len(expectations2) or + candidate == item2 and options2 == options_union and + expectations2 == expectations_union and len(expectations1)): + for build_type in self.BUILD_TYPES: + candidate.build_info[build_type] = list( + candidate_platforms) + updates = [candidate] + return updates + + def UpdateBasedOnJSON(self, update_json): + """Updates the expectations based on the update_json, which is of the + following form: + {"1.html": { + "WIN DEBUG": {"extra": "FAIL", "missing", "PASS"}, + "WIN RELEASE": {"extra": "FAIL"} + }} + """ + output = [] + + comment_lines = [] + removed_test_on_previous_line = False + lineno = 0 + for line in self._GetIterableExpectations(): + lineno += 1 + test, options, expectations = self.ParseExpectationsLine(line, + lineno) + + # If there are no updates for this test, then output the line + # unmodified. + if (test not in update_json): + if test: + self._WriteCompletedLines(output, comment_lines, line) + else: + if removed_test_on_previous_line: + removed_test_on_previous_line = False + comment_lines = [] + comment_lines.append(line) + continue + + platforms, build_types, other_options = \ + self._GetBuildTypesAndPlatforms(options) + + updates = update_json[test] + has_updates_for_this_line = False + for build_info in updates: + platform, build_type = build_info.lower().split(' ') + if platform in platforms and build_type in build_types: + has_updates_for_this_line = True + + # If the updates for this test don't apply for the platforms / + # build-types listed in this line, then output the line unmodified. + if not has_updates_for_this_line: + self._WriteCompletedLines(output, comment_lines, line) + continue + + results = {} + for platform in platforms: + results[platform] = {} + for build_type in build_types: + results[platform][build_type] = \ + OptionsAndExpectationsHolder(other_options[:], + expectations[:]) + + self._ApplyUpdatesToResults(test, results, update_json, + expectations, other_options) + + deduped_updates = \ + self._GetUpdatesDedupedByMatchingOptionsAndExpectations( + results) + removed_test_on_previous_line = not self._WriteUpdates(output, + comment_lines, test, deduped_updates) + # Append any comment/whitespace lines at the end of test_expectations. + output.extend(comment_lines) + return "".join(output) + + def _WriteUpdates(self, output, comment_lines, test, updates): + """Writes the updates to the output. + Args: + output: List to append updates to. + comment_lines: Comments that come before this test that should be + prepending iff any tests lines are written out. + test: The test being updating. + updates: List of BuildInfo instances that represent the final values + for this test line.. + """ + wrote_any_lines = False + for update in updates: + options = update.options + expectations = update.expectations + + has_meaningful_modifier = False + for option in options: + if option in self.MODIFIERS: + has_meaningful_modifier = True + break + + has_non_pass_expectation = False + for expectation in expectations: + if expectation != "pass": + has_non_pass_expectation = True + break + + # If this test is only left with platform, build_type, bug number + # and a PASS or no expectation, then we can exclude it from + # test_expectations. + if not has_meaningful_modifier and not has_non_pass_expectation: + continue + + if not has_non_pass_expectation: + expectations = ["pass"] + + missing_build_types = list(self.BUILD_TYPES) + sentinal = None + for build_type in update.build_info: + if not sentinal: + sentinal = update.build_info[build_type] + # Remove build_types where the list of platforms is equal. + if sentinal == update.build_info[build_type]: + missing_build_types.remove(build_type) + + has_all_build_types = not len(missing_build_types) + if has_all_build_types: + self._WriteLine(output, comment_lines, update, options, + build_type, expectations, test, + has_all_build_types) + wrote_any_lines = True + else: + for build_type in update.build_info: + self._WriteLine(output, comment_lines, update, options, + build_type, expectations, test, + has_all_build_types) + wrote_any_lines = True + + return wrote_any_lines + + def _WriteCompletedLines(self, output, comment_lines, test_line=None): + """Writes the comment_lines and test_line to the output and empties + out the comment_lines.""" + output.extend(comment_lines) + del comment_lines[:] + if test_line: + output.append(test_line) + + def _GetPlatform(self, platforms): + """Returns the platform to use. If all platforms are listed, then + return the empty string as that's what we want to list in + test_expectations.txt. + + Args: + platforms: List of lower-case platform names. + """ + platforms.sort() + if platforms == list(self.BASE_PLATFORMS): + return "" + else: + return " ".join(platforms) + + def _WriteLine(self, output, comment_lines, update, options, build_type, + expectations, test, exclude_build_type): + """Writes a test_expectations.txt line. + Args: + output: List to append new lines to. + comment_lines: List of lines to prepend before the new line. + update: The update object. + """ + line = options[:] + + platforms = self._GetPlatform(update.build_info[build_type]) + if platforms: + line.append(platforms) + if not exclude_build_type: + line.append(build_type) + + line = [x.upper() for x in line] + expectations = [x.upper() for x in expectations] + + line = line + [":", test, "="] + expectations + self._WriteCompletedLines(output, comment_lines, " ".join(line) + "\n") + + +def main(): + logging.basicConfig(level=logging.INFO, + format='%(message)s') + + updates = simplejson.load(open(sys.argv[1])) + + path_to_expectations = path_utils.GetAbsolutePath( + os.path.dirname(sys.argv[0])) + path_to_expectations = os.path.join(path_to_expectations, + "test_expectations.txt") + + old_expectations = open(path_to_expectations).read() + new_expectations = UpdateExpectations(old_expectations, updates) + open(path_to_expectations, 'w').write(new_expectations) + +if '__main__' == __name__: + main() diff --git a/webkit/tools/layout_tests/webkitpy/update_expectations_from_dashboard_unittest.py b/webkit/tools/layout_tests/webkitpy/update_expectations_from_dashboard_unittest.py new file mode 100644 index 0000000..102054d --- /dev/null +++ b/webkit/tools/layout_tests/webkitpy/update_expectations_from_dashboard_unittest.py @@ -0,0 +1,353 @@ +#!/usr/bin/env python +# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Unittests to make sure we generate and update the expected-*.txt files +properly after running layout tests.""" + +import os +import sys +import unittest + +import update_expectations_from_dashboard + + +class UpdateExpectationsUnittest(unittest.TestCase): + ########################################################################### + # Tests + + def testKeepsUnmodifiedLines(self): + expectations = """// Ensure comments and newlines don't get stripped. + BUG1 SLOW : 1.html = PASS + + BUG2 : 2.html = FAIL TIMEOUT + """ + exp_results = """// Ensure comments and newlines don't get stripped. + BUG1 SLOW : 1.html = PASS + + BUG2 : 2.html = FAIL TIMEOUT + """ + + updates = [] + self.updateExpectations(expectations, updates, exp_results) + + def testRemoveFlakyExpectation(self): + expectations = "BUG1 : 1.html = TIMEOUT FAIL\n" + expected_results = "BUG1 : 1.html = TIMEOUT\n" + updates = {"1.html": { + "WIN RELEASE": {"extra": "FAIL"}, + "WIN DEBUG": {"extra": "FAIL"}, + "LINUX RELEASE": {"extra": "FAIL"}, + "LINUX DEBUG": {"extra": "FAIL"}, + "MAC RELEASE": {"extra": "FAIL"}, + "MAC DEBUG": {"extra": "FAIL"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testRemoveExpectationSlowTest(self): + expectations = "BUG1 SLOW : 1.html = FAIL\n" + expected_results = "BUG1 SLOW : 1.html = PASS\n" + updates = {"1.html": { + "WIN RELEASE": {"extra": "FAIL"}, + "WIN DEBUG": {"extra": "FAIL"}, + "LINUX RELEASE": {"extra": "FAIL"}, + "LINUX DEBUG": {"extra": "FAIL"}, + "MAC RELEASE": {"extra": "FAIL"}, + "MAC DEBUG": {"extra": "FAIL"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testRemoveExpectation(self): + expectations = "BUG1 : 1.html = FAIL\n" + expected_results = "" + updates = {"1.html": { + "WIN RELEASE": {"extra": "FAIL"}, + "WIN DEBUG": {"extra": "FAIL"}, + "LINUX RELEASE": {"extra": "FAIL"}, + "LINUX DEBUG": {"extra": "FAIL"}, + "MAC RELEASE": {"extra": "FAIL"}, + "MAC DEBUG": {"extra": "FAIL"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testRemoveExpectationFromOnePlatform(self): + expectations = "BUG1 : 1.html = FAIL\n" + expected_results = """BUG1 MAC WIN DEBUG : 1.html = FAIL + BUG1 RELEASE : 1.html = FAIL + """ + updates = {"1.html": {"LINUX DEBUG": {"extra": "FAIL"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testRemoveSlow(self): + expectations = "BUG1 SLOW : 1.html = PASS\n" + expected_results = "" + updates = {"1.html": { + "WIN RELEASE": {"extra": "SLOW"}, + "WIN DEBUG": {"extra": "SLOW"}, + "LINUX RELEASE": {"extra": "SLOW"}, + "LINUX DEBUG": {"extra": "SLOW"}, + "MAC RELEASE": {"extra": "SLOW"}, + "MAC DEBUG": {"extra": "SLOW"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testAddFlakyExpectation(self): + expectations = "BUG1 : 1.html = TIMEOUT\n" + expected_results = "BUG1 : 1.html = TIMEOUT FAIL\n" + updates = {"1.html": { + "WIN RELEASE": {"missing": "FAIL"}, + "WIN DEBUG": {"missing": "FAIL"}, + "LINUX RELEASE": {"missing": "FAIL"}, + "LINUX DEBUG": {"missing": "FAIL"}, + "MAC RELEASE": {"missing": "FAIL"}, + "MAC DEBUG": {"missing": "FAIL"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testAddExpectationSlowTest(self): + expectations = "BUG1 SLOW : 1.html = PASS\n" + expected_results = "BUG1 SLOW : 1.html = PASS FAIL\n" + updates = {"1.html": { + "WIN RELEASE": {"missing": "FAIL"}, + "WIN DEBUG": {"missing": "FAIL"}, + "LINUX RELEASE": {"missing": "FAIL"}, + "LINUX DEBUG": {"missing": "FAIL"}, + "MAC RELEASE": {"missing": "FAIL"}, + "MAC DEBUG": {"missing": "FAIL"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testAddExpectation(self): + # not yet implemented + return + + expectations = "" + expected_results = "BUG1 : 1.html = FAIL\n" + updates = {"1.html": { + "WIN RELEASE": {"missing": "FAIL"}, + "WIN DEBUG": {"missing": "FAIL"}, + "LINUX RELEASE": {"missing": "FAIL"}, + "LINUX DEBUG": {"missing": "FAIL"}, + "MAC RELEASE": {"missing": "FAIL"}, + "MAC DEBUG": {"missing": "FAIL"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testAddExpectationForOnePlatform(self): + expectations = "BUG1 WIN : 1.html = TIMEOUT\n" + expected_results = "BUG1 WIN : 1.html = TIMEOUT\n" + # TODO(ojan): Once we add currently unlisted tests, this expect results + # for this test should be: + #expected_results = """BUG1 WIN : 1.html = TIMEOUT + #BUG_AUTO LINUX DEBUG : 1.html = TIMEOUT + #""" + updates = {"1.html": {"LINUX DEBUG": {"missing": "TIMEOUT"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testAddSlow(self): + expectations = "BUG1 : 1.html = FAIL\n" + expected_results = "BUG1 SLOW : 1.html = FAIL\n" + updates = {"1.html": { + "WIN RELEASE": {"missing": "SLOW"}, + "WIN DEBUG": {"missing": "SLOW"}, + "LINUX RELEASE": {"missing": "SLOW"}, + "LINUX DEBUG": {"missing": "SLOW"}, + "MAC RELEASE": {"missing": "SLOW"}, + "MAC DEBUG": {"missing": "SLOW"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testAddRemoveMultipleExpectations(self): + expectations = """BUG1 WIN : 1.html = FAIL + BUG2 MAC : 1.html = FAIL""" + expected_results = """BUG1 SLOW WIN : 1.html = FAIL + BUG2 MAC : 1.html = TIMEOUT\n""" + # TODO(ojan): Once we add currently unlisted tests, this expect results + # for this test should be: + #expected_results = """BUG1 SLOW WIN : 1.html = FAIL + #BUG_AUTO LINUX SLOW : 1.html = PASS + #BUG2 MAC : 1.html = TIMEOUT + #""" + + updates = {"1.html": { + "WIN RELEASE": {"missing": "SLOW"}, + "WIN DEBUG": {"missing": "SLOW"}, + "LINUX RELEASE": {"missing": "SLOW"}, + "LINUX DEBUG": {"missing": "SLOW"}, + "MAC RELEASE": {"missing": "TIMEOUT", "extra": "FAIL"}, + "MAC DEBUG": {"missing": "TIMEOUT", "extra": "FAIL"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testAddExistingExpectation(self): + expectations = "BUG1 : 1.html = FAIL\n" + expected_results = "BUG1 : 1.html = FAIL\n" + updates = {"1.html": {"WIN RELEASE": {"missing": "FAIL"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testAddImageOrTextToFailExpectation(self): + expectations = """BUG1 WIN RELEASE : 1.html = FAIL + BUG1 MAC RELEASE : 1.html = FAIL + BUG1 LINUX RELEASE : 1.html = FAIL + BUG1 LINUX DEBUG : 1.html = TIMEOUT + """ + expected_results = """BUG1 WIN RELEASE : 1.html = IMAGE+TEXT + BUG1 MAC RELEASE : 1.html = IMAGE + BUG1 LINUX RELEASE : 1.html = TEXT + BUG1 LINUX DEBUG : 1.html = TIMEOUT IMAGE+TEXT + """ + updates = {"1.html": { + "WIN RELEASE": {"missing": "IMAGE+TEXT"}, + "MAC RELEASE": {"missing": "IMAGE"}, + "LINUX RELEASE": {"missing": "TEXT"}, + "LINUX DEBUG": {"missing": "IMAGE+TEXT"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testAddOther(self): + # Other is a catchall for more obscure expectations results. + # We should never add it to test_expectations. + expectations = "BUG1 WIN RELEASE : 1.html = FAIL\n" + expected_results = "BUG1 WIN RELEASE : 1.html = FAIL\n" + updates = {"1.html": {"WIN RELEASE": {"missing": "OTHER"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testRemoveNonExistantExpectation(self): + expectations = "BUG1 : 1.html = FAIL\n" + expected_results = "BUG1 : 1.html = FAIL\n" + updates = {"1.html": {"WIN RELEASE": {"extra": "TIMEOUT"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testUpdateSomePlatforms(self): + expectations = "BUG1 DEBUG : 1.html = TEXT PASS\n" + # TODO(ojan): Once we add currently unlisted tests, the expect results + # for this test should include the missing bits for RELEASE. + expected_results = "BUG1 LINUX DEBUG : 1.html = TEXT PASS\n" + updates = {"1.html": { + "WIN RELEASE": {"missing": "PASS TEXT"}, + "WIN DEBUG": {"extra": "MISSING TEXT"}, + "MAC RELEASE": {"missing": "PASS TEXT"}, + "MAC DEBUG": {"extra": "MISSING TEXT"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testAddTimeoutToSlowTest(self): + # SLOW tests needing TIMEOUT need manual updating. Should just print + # a log and not modify the test. + expectations = "BUG1 SLOW : 1.html = TEXT\n" + expected_results = "BUG1 SLOW : 1.html = TEXT\n" + updates = {"1.html": {"WIN RELEASE": {"missing": "TIMEOUT"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testAddSlowToTimeoutTest(self): + # SLOW tests needing TIMEOUT need manual updating. Should just print + # a log and not modify the test. + expectations = "BUG1 : 1.html = TIMEOUT\n" + expected_results = "BUG1 : 1.html = TIMEOUT\n" + updates = {"1.html": {"WIN RELEASE": {"missing": "SLOW"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testIncludeLastPlatformInFlakiness(self): + # If a test is flaky on 5/6 platforms and the 6th's expectations are a + # subset of the other 5/6, then give them all the same expectations. + expectations = "BUG2 : 1.html = FAIL\n" + expected_results = "BUG2 : 1.html = FAIL TIMEOUT\n" + updates = {"1.html": { + "WIN RELEASE": {"missing": "TIMEOUT", "extra": "FAIL"}, + "WIN DEBUG": {"missing": "TIMEOUT"}, + "LINUX RELEASE": {"missing": "TIMEOUT"}, + "LINUX DEBUG": {"missing": "TIMEOUT"}, + "MAC RELEASE": {"missing": "TIMEOUT"}, + "MAC DEBUG": {"missing": "TIMEOUT"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testIncludeLastPlatformInFlakinessThreeOutOfFour(self): + # If a test is flaky on 5/6 platforms and the 6th's expectations are a + # subset of the other 5/6, then give them all the same expectations. + expectations = "BUG2 MAC LINUX : 1.html = FAIL\n" + expected_results = "BUG2 LINUX MAC : 1.html = FAIL TIMEOUT\n" + updates = {"1.html": { + "LINUX RELEASE": {"missing": "TIMEOUT"}, + "MAC RELEASE": {"missing": "TIMEOUT"}, + "MAC DEBUG": {"missing": "TIMEOUT"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testExcludeLastPlatformFromFlakiness(self): + # If a test is flaky on 5/6 platforms and the 6th's expectations + # are not a subset of the other 5/6, then don't give them + # all the same expectations. + expectations = "BUG1 : 1.html = FAIL\n" + expected_results = """BUG1 DEBUG : 1.html = FAIL TIMEOUT + BUG1 LINUX MAC RELEASE : 1.html = FAIL TIMEOUT + BUG1 WIN RELEASE : 1.html = FAIL CRASH + """ + updates = {"1.html": { + "WIN RELEASE": {"missing": "CRASH"}, + "WIN DEBUG": {"missing": "TIMEOUT"}, + "LINUX RELEASE": {"missing": "TIMEOUT"}, + "LINUX DEBUG": {"missing": "TIMEOUT"}, + "MAC RELEASE": {"missing": "TIMEOUT"}, + "MAC DEBUG": {"missing": "TIMEOUT"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testStripComments(self): + expectations = """BUG1 : 1.html = TIMEOUT + + // Comment/whitespace should be removed when the test is. + BUG2 WIN RELEASE : 2.html = TEXT + + // Comment/whitespace after test should remain. + + BUG2 MAC : 2.html = TEXT + + // Comment/whitespace at end of file should remain. + """ + expected_results = """BUG1 : 1.html = TIMEOUT + + // Comment/whitespace after test should remain. + + BUG2 MAC DEBUG : 2.html = TEXT + + // Comment/whitespace at end of file should remain. + """ + updates = {"2.html": { + "WIN RELEASE": {"extra": "TEXT"}, + "MAC RELEASE": {"extra": "TEXT"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testLeaveComments(self): + expectations = """BUG1 : 1.html = TIMEOUT + + // Comment/whitespace should remain. + BUG2 : 2.html = FAIL PASS + """ + expected_results = """BUG1 : 1.html = TIMEOUT + + // Comment/whitespace should remain. + BUG2 MAC DEBUG : 2.html = FAIL PASS + BUG2 LINUX MAC RELEASE : 2.html = FAIL PASS + """ + updates = {"2.html": { + "WIN RELEASE": {"extra": "FAIL"}, + "WIN DEBUG": {"extra": "FAIL"}, + "LINUX DEBUG": {"extra": "FAIL"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testLeaveCommentsIfNoWhitespaceAfterTest(self): + expectations = """// Comment/whitespace should remain. + BUG2 WIN RELEASE : 2.html = TEXT + BUG2 : 1.html = IMAGE + """ + expected_results = """// Comment/whitespace should remain. + BUG2 : 1.html = IMAGE + """ + updates = {"2.html": {"WIN RELEASE": {"extra": "TEXT"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testLeavesUnmodifiedExpectationsUntouched(self): + # Ensures tests that would just change sort order of a line are noops. + expectations = "BUG1 WIN LINUX : 1.html = TIMEOUT\n" + expected_results = "BUG1 WIN LINUX : 1.html = TIMEOUT\n" + updates = {"1.html": {"MAC RELEASE": {"missing": "SLOW"}}} + self.updateExpectations(expectations, updates, expected_results) + + ########################################################################### + # Helper functions + + def updateExpectations(self, expectations, updates, expected_results): + results = update_expectations_from_dashboard.UpdateExpectations( + expectations, updates) + self.assertEqual(expected_results, results) + +if '__main__' == __name__: + unittest.main() -- cgit v1.1