summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authordpranke@chromium.org <dpranke@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-03-09 06:10:10 +0000
committerdpranke@chromium.org <dpranke@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-03-09 06:10:10 +0000
commit4e69d7c098a825159a5bef945a96592b8cb732a5 (patch)
tree70dca6c5b3d81dbdb84b6d72d1cdd9b87271ef38
parented23561932b7be119b92366ce9aa6defd9b0ae02 (diff)
downloadchromium_src-4e69d7c098a825159a5bef945a96592b8cb732a5.zip
chromium_src-4e69d7c098a825159a5bef945a96592b8cb732a5.tar.gz
chromium_src-4e69d7c098a825159a5bef945a96592b8cb732a5.tar.bz2
remove old layout_tests code
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@41005 0039d316-1c4b-4281-b951-d872f2087c98
-rwxr-xr-xwebkit/tools/layout_tests/old_run_webkit_tests.py16
-rwxr-xr-xwebkit/tools/layout_tests/test_output_formatter.bat1
-rwxr-xr-xwebkit/tools/layout_tests/test_output_formatter.sh23
-rw-r--r--webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/__init__.py0
-rw-r--r--webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/apache_http_server.py204
-rw-r--r--webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/failure.py202
-rw-r--r--webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/failure_finder.py897
-rw-r--r--webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/failure_finder_test.py374
-rw-r--r--webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/html_generator.py229
-rw-r--r--webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/http_server.bat1
-rwxr-xr-xwebkit/tools/layout_tests/webkitpy/layout_tests/layout_package/http_server.py258
-rwxr-xr-xwebkit/tools/layout_tests/webkitpy/layout_tests/layout_package/http_server.sh23
-rw-r--r--webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/http_server_base.py42
-rw-r--r--webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/json_layout_results_generator.py159
-rw-r--r--webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/json_results_generator.py392
-rw-r--r--webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/lighttpd.conf89
-rw-r--r--webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/metered_stream.py72
-rw-r--r--webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/path_utils.py372
-rw-r--r--webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/platform_utils.py25
-rw-r--r--webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/platform_utils_linux.py223
-rw-r--r--webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/platform_utils_mac.py177
-rw-r--r--webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/platform_utils_win.py185
-rw-r--r--webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/test_expectations.py792
-rw-r--r--webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/test_failures.py242
-rw-r--r--webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/test_files.py71
-rw-r--r--webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/test_shell_thread.py486
-rw-r--r--webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/websocket_server.py272
-rw-r--r--webkit/tools/layout_tests/webkitpy/layout_tests/rebaseline_chromium_webkit_tests.py1004
-rwxr-xr-xwebkit/tools/layout_tests/webkitpy/layout_tests/run_chromium_webkit_tests.py1679
-rwxr-xr-xwebkit/tools/layout_tests/webkitpy/layout_tests/test_output_formatter.py105
-rw-r--r--webkit/tools/layout_tests/webkitpy/layout_tests/test_types/__init__.py0
-rw-r--r--webkit/tools/layout_tests/webkitpy/layout_tests/test_types/fuzzy_image_diff.py47
-rw-r--r--webkit/tools/layout_tests/webkitpy/layout_tests/test_types/image_diff.py199
-rw-r--r--webkit/tools/layout_tests/webkitpy/layout_tests/test_types/test_type_base.py241
-rw-r--r--webkit/tools/layout_tests/webkitpy/layout_tests/test_types/text_diff.py97
35 files changed, 0 insertions, 9199 deletions
diff --git a/webkit/tools/layout_tests/old_run_webkit_tests.py b/webkit/tools/layout_tests/old_run_webkit_tests.py
deleted file mode 100755
index 8449764..0000000
--- a/webkit/tools/layout_tests/old_run_webkit_tests.py
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2010 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Wrapper around webkitpy/layout_tests/run-chromium-webkit-tests.py"""
-import os
-import sys
-
-sys.path.append(os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])),
- "webkitpy", "layout_tests"))
-import run_chromium_webkit_tests
-
-if __name__ == '__main__':
- options, args = run_chromium_webkit_tests.parse_args()
- run_chromium_webkit_tests.main(options, args)
diff --git a/webkit/tools/layout_tests/test_output_formatter.bat b/webkit/tools/layout_tests/test_output_formatter.bat
deleted file mode 100755
index f2e66b0..0000000
--- a/webkit/tools/layout_tests/test_output_formatter.bat
+++ /dev/null
@@ -1 +0,0 @@
-@%~dp0\..\..\..\third_party\python_24\python.exe %~dp0\webkitpy\layout_tests\test_output_formatter.py -v %*
diff --git a/webkit/tools/layout_tests/test_output_formatter.sh b/webkit/tools/layout_tests/test_output_formatter.sh
deleted file mode 100755
index 7635319..0000000
--- a/webkit/tools/layout_tests/test_output_formatter.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/bin/sh
-
-# Copyright (c) 2009 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-exec_dir=$(dirname $0)
-
-if [ "$OSTYPE" = "cygwin" ]; then
- system_root=`cygpath "$SYSTEMROOT"`
- PATH="/usr/bin:$system_root/system32:$system_root:$system_root/system32/WBEM"
- export PATH
- PYTHON_PROG="$exec_dir/../../../third_party/python_24/python.exe"
-else
- PYTHON_PROG=python
- # When not using the included python, we don't get automatic site.py paths.
- # Specifically, run_webkit_tests needs the paths in:
- # third_party/python_24/Lib/site-packages/google.pth
- PYTHONPATH="${exec_dir}/../../../tools/python:$PYTHONPATH"
- export PYTHONPATH
-fi
-
-"$PYTHON_PROG" "$exec_dir/webkitpy/layout_tests/test_output_formatter.py" "-v" "$@"
diff --git a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/__init__.py b/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/__init__.py
+++ /dev/null
diff --git a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/apache_http_server.py b/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/apache_http_server.py
deleted file mode 100644
index 214613d..0000000
--- a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/apache_http_server.py
+++ /dev/null
@@ -1,204 +0,0 @@
-# Copyright (c) 2009 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""A class to start/stop the apache http server used by layout tests."""
-
-import logging
-import optparse
-import os
-import re
-import subprocess
-import sys
-
-import http_server_base
-import path_utils
-import platform_utils
-
-
-class LayoutTestApacheHttpd(http_server_base.HttpServerBase):
-
- def __init__(self, output_dir):
- """Args:
- output_dir: the absolute path to the layout test result directory
- """
- self._output_dir = output_dir
- self._httpd_proc = None
- path_utils.maybe_make_directory(output_dir)
-
- self.mappings = [{'port': 8000},
- {'port': 8080},
- {'port': 8081},
- {'port': 8443, 'sslcert': True}]
-
- # The upstream .conf file assumed the existence of /tmp/WebKit for
- # placing apache files like the lock file there.
- self._runtime_path = os.path.join("/tmp", "WebKit")
- path_utils.maybe_make_directory(self._runtime_path)
-
- # The PID returned when Apache is started goes away (due to dropping
- # privileges?). The proper controlling PID is written to a file in the
- # apache runtime directory.
- self._pid_file = os.path.join(self._runtime_path, 'httpd.pid')
-
- test_dir = path_utils.path_from_base('third_party', 'WebKit',
- 'LayoutTests')
- js_test_resources_dir = self._cygwin_safe_join(test_dir, "fast", "js",
- "resources")
- mime_types_path = self._cygwin_safe_join(test_dir, "http", "conf",
- "mime.types")
- cert_file = self._cygwin_safe_join(test_dir, "http", "conf",
- "webkit-httpd.pem")
- access_log = self._cygwin_safe_join(output_dir, "access_log.txt")
- error_log = self._cygwin_safe_join(output_dir, "error_log.txt")
- document_root = self._cygwin_safe_join(test_dir, "http", "tests")
-
- executable = platform_utils.apache_executable_path()
- if self._is_cygwin():
- executable = self._get_cygwin_path(executable)
-
- cmd = [executable,
- '-f', self._get_apache_config_file_path(test_dir, output_dir),
- '-C', "\'DocumentRoot %s\'" % document_root,
- '-c', "\'Alias /js-test-resources %s\'" % js_test_resources_dir,
- '-C', "\'Listen %s\'" % "127.0.0.1:8000",
- '-C', "\'Listen %s\'" % "127.0.0.1:8081",
- '-c', "\'TypesConfig \"%s\"\'" % mime_types_path,
- '-c', "\'CustomLog \"%s\" common\'" % access_log,
- '-c', "\'ErrorLog \"%s\"\'" % error_log,
- '-C', "\'User \"%s\"\'" % os.environ.get("USERNAME",
- os.environ.get("USER", ""))]
-
- if self._is_cygwin():
- cygbin = path_utils.path_from_base('third_party', 'cygwin', 'bin')
- # Not entirely sure why, but from cygwin we need to run the
- # httpd command through bash.
- self._start_cmd = [
- os.path.join(cygbin, 'bash.exe'),
- '-c',
- 'PATH=%s %s' % (self._get_cygwin_path(cygbin), " ".join(cmd)),
- ]
- else:
- # TODO(ojan): When we get cygwin using Apache 2, use set the
- # cert file for cygwin as well.
- cmd.extend(['-c', "\'SSLCertificateFile %s\'" % cert_file])
- # Join the string here so that Cygwin/Windows and Mac/Linux
- # can use the same code. Otherwise, we could remove the single
- # quotes above and keep cmd as a sequence.
- self._start_cmd = " ".join(cmd)
-
- def _is_cygwin(self):
- return sys.platform in ("win32", "cygwin")
-
- def _cygwin_safe_join(self, *parts):
- """Returns a platform appropriate path."""
- path = os.path.join(*parts)
- if self._is_cygwin():
- return self._get_cygwin_path(path)
- return path
-
- def _get_cygwin_path(self, path):
- """Convert a Windows path to a cygwin path.
-
- The cygpath utility insists on converting paths that it thinks are
- Cygwin root paths to what it thinks the correct roots are. So paths
- such as "C:\b\slave\webkit-release\build\third_party\cygwin\bin"
- are converted to plain "/usr/bin". To avoid this, we
- do the conversion manually.
-
- The path is expected to be an absolute path, on any drive.
- """
- drive_regexp = re.compile(r'([a-z]):[/\\]', re.IGNORECASE)
-
- def lower_drive(matchobj):
- return '/cygdrive/%s/' % matchobj.group(1).lower()
- path = drive_regexp.sub(lower_drive, path)
- return path.replace('\\', '/')
-
- def _get_apache_config_file_path(self, test_dir, output_dir):
- """Returns the path to the apache config file to use.
- Args:
- test_dir: absolute path to the LayoutTests directory.
- output_dir: absolute path to the layout test results directory.
- """
- httpd_config = platform_utils.apache_config_file_path()
- httpd_config_copy = os.path.join(output_dir, "httpd.conf")
- httpd_conf = open(httpd_config).read()
- if self._is_cygwin():
- # This is a gross hack, but it lets us use the upstream .conf file
- # and our checked in cygwin. This tells the server the root
- # directory to look in for .so modules. It will use this path
- # plus the relative paths to the .so files listed in the .conf
- # file. We have apache/cygwin checked into our tree so
- # people don't have to install it into their cygwin.
- cygusr = path_utils.path_from_base('third_party', 'cygwin', 'usr')
- httpd_conf = httpd_conf.replace('ServerRoot "/usr"',
- 'ServerRoot "%s"' % self._get_cygwin_path(cygusr))
-
- # TODO(ojan): Instead of writing an extra file, checkin a conf file
- # upstream. Or, even better, upstream/delete all our chrome http
- # tests so we don't need this special-cased DocumentRoot and then
- # just use the upstream
- # conf file.
- chrome_document_root = path_utils.path_from_base('webkit', 'data',
- 'layout_tests')
- if self._is_cygwin():
- chrome_document_root = self._get_cygwin_path(chrome_document_root)
- httpd_conf = (httpd_conf +
- self._get_virtual_host_config(chrome_document_root, 8081))
-
- f = open(httpd_config_copy, 'wb')
- f.write(httpd_conf)
- f.close()
-
- if self._is_cygwin():
- return self._get_cygwin_path(httpd_config_copy)
- return httpd_config_copy
-
- def _get_virtual_host_config(self, document_root, port, ssl=False):
- """Returns a <VirtualHost> directive block for an httpd.conf file.
- It will listen to 127.0.0.1 on each of the given port.
- """
- return '\n'.join(('<VirtualHost 127.0.0.1:%s>' % port,
- 'DocumentRoot %s' % document_root,
- ssl and 'SSLEngine On' or '',
- '</VirtualHost>', ''))
-
- def _start_httpd_process(self):
- """Starts the httpd process and returns whether there were errors."""
- # Use shell=True because we join the arguments into a string for
- # the sake of Window/Cygwin and it needs quoting that breaks
- # shell=False.
- self._httpd_proc = subprocess.Popen(self._start_cmd,
- stderr=subprocess.PIPE,
- shell=True)
- err = self._httpd_proc.stderr.read()
- if len(err):
- logging.debug(err)
- return False
- return True
-
- def start(self):
- """Starts the apache http server."""
- # Stop any currently running servers.
- self.stop()
-
- logging.debug("Starting apache http server")
- server_started = self.wait_for_action(self._start_httpd_process)
- if server_started:
- logging.debug("Apache started. Testing ports")
- server_started = self.wait_for_action(
- self.is_server_running_on_all_ports)
-
- if server_started:
- logging.debug("Server successfully started")
- else:
- raise Exception('Failed to start http server')
-
- def stop(self):
- """Stops the apache http server."""
- logging.debug("Shutting down any running http servers")
- httpd_pid = None
- if os.path.exists(self._pid_file):
- httpd_pid = int(open(self._pid_file).readline())
- path_utils.shut_down_http_server(httpd_pid)
diff --git a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/failure.py b/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/failure.py
deleted file mode 100644
index 8a4fb48..0000000
--- a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/failure.py
+++ /dev/null
@@ -1,202 +0,0 @@
-# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-CHROMIUM_WIN = "chromium-win"
-CHROMIUM_MAC = "chromium-mac"
-CHROMIUM_LINUX = "chromium-linux"
-WEBKIT_WIN_TITLE = "WebKit Win"
-WEBKIT_MAC_TITLE = "WebKit Mac"
-WEBKIT_TITLE = "WebKit"
-UNKNOWN = "Unknown"
-
-EXPECTED_IMAGE_FILE_ENDING = "-expected.png"
-ACTUAL_IMAGE_FILE_ENDING = "-actual.png"
-UPSTREAM_IMAGE_FILE_ENDING = "-expected-upstream.png"
-EXPECTED_TEXT_FILE_ENDING = "-expected.txt"
-ACTUAL_TEXT_FILE_ENDING = "-actual.txt"
-DIFF_IMAGE_FILE_ENDING = "-diff.png"
-DIFF_TEXT_FILE_ENDING = "-diff.txt"
-
-CHROMIUM_SRC_HOME = "http://src.chromium.org/viewvc/chrome/trunk/src/webkit/"
-CHROMIUM_TRAC_HOME = CHROMIUM_SRC_HOME + "data/layout_tests/"
-WEBKIT_TRAC_HOME = "http://trac.webkit.org/browser/trunk/LayoutTests/"
-WEBKIT_SVN_HOSTNAME = "svn.webkit.org"
-THIRD_PARTY = "third_party"
-
-WEBKIT_PLATFORM_URL_BASE = WEBKIT_TRAC_HOME + "platform"
-WEBKIT_LAYOUT_TEST_BASE_URL = "http://svn.webkit.org/repository/webkit/trunk/"
-WEBKIT_IMAGE_BASELINE_BASE_URL_WIN = (WEBKIT_LAYOUT_TEST_BASE_URL +
- "LayoutTests/platform/win/")
-WEBKIT_IMAGE_BASELINE_BASE_URL_MAC = (WEBKIT_LAYOUT_TEST_BASE_URL +
- "LayoutTests/platform/mac/")
-WEBKIT_TRAC_IMAGE_BASELINE_BASE_URL_MAC = WEBKIT_PLATFORM_URL_BASE + "/mac/"
-WEBKIT_TRAC_IMAGE_BASELINE_BASE_URL_WIN = WEBKIT_PLATFORM_URL_BASE + "/win/"
-
-LAYOUT_TEST_RESULTS_DIR = "layout-test-results"
-
-FAIL = "FAIL"
-TIMEOUT = "TIMEOUT"
-CRASH = "CRASH"
-PASS = "PASS"
-WONTFIX = "WONTFIX"
-
-
-class Failure(object):
- """
- This class represents a failure in the test output, and is
- intended as a data model object.
- """
-
- def __init__(self):
- self.platform = ""
- self.test_path = ""
- self.text_diff_mismatch = False
- self.image_mismatch = False
- self.timeout = False
- self.crashed = False
- self.text_baseline_url = ""
- self.image_baseline_url = ""
- self.text_baseline_age = ""
- self.image_baseline_age = ""
- self.test_age = ""
- self.text_baseline_local = ""
- self.image_baseline_local = ""
- self.text_actual_local = ""
- self.image_actual_local = ""
- self.image_baseline_upstream_url = ""
- self.image_baseline_upstream_local = ""
- self.test_expectations_line = ""
- self.flakiness = 0
-
- def get_expected_image_filename(self):
- return self._rename_end_of_test_path(EXPECTED_IMAGE_FILE_ENDING)
-
- def get_actual_image_filename(self):
- return self._rename_end_of_test_path(ACTUAL_IMAGE_FILE_ENDING)
-
- def get_expected_text_filename(self):
- return self._rename_end_of_test_path(EXPECTED_TEXT_FILE_ENDING)
-
- def get_actual_text_filename(self):
- return self._rename_end_of_test_path(ACTUAL_TEXT_FILE_ENDING)
-
- def get_image_diff_filename(self):
- return self._rename_end_of_test_path(DIFF_IMAGE_FILE_ENDING)
-
- def get_text_diff_filename(self):
- return self._rename_end_of_test_path(DIFF_TEXT_FILE_ENDING)
-
- def get_image_upstream_filename(self):
- return self._rename_end_of_test_path(UPSTREAM_IMAGE_FILE_ENDING)
-
- def _rename_end_of_test_path(self, suffix):
- last_index = self.test_path.rfind(".")
- if last_index == -1:
- return self.test_path
- return self.test_path[0:last_index] + suffix
-
- def get_test_home(self):
- if self.test_path.startswith("chrome"):
- return CHROMIUM_TRAC_HOME + self.test_path
- return WEBKIT_TRAC_HOME + self.test_path
-
- def get_image_baseline_trac_home(self):
- if self.is_image_baseline_in_webkit():
- return self._get_trac_home(self.image_baseline_url)
- return self.image_baseline_url
-
- def get_text_baseline_trac_home(self):
- if self.text_baseline_url and self.is_text_baseline_in_webkit():
- return self._get_trac_home(self.text_baseline_url)
- return self.text_baseline_url
-
- def _get_trac_home(self, file):
- return WEBKIT_TRAC_HOME + file[file.find("LayoutTests"):]
-
- def get_text_baseline_location(self):
- return self._get_file_location(self.text_baseline_url)
-
- def get_image_baseline_location(self):
- return self._get_file_location(self.image_baseline_url)
-
- # TODO(gwilson): Refactor this logic so it can be used by multiple scripts.
- # TODO(gwilson): Change this so that it respects the fallback order of
- # different platforms. (If platform is mac, the fallback should be
- # different.)
-
- def _get_file_location(self, file):
- if not file:
- return None
- if file.find(CHROMIUM_WIN) > -1:
- return CHROMIUM_WIN
- if file.find(CHROMIUM_MAC) > -1:
- return CHROMIUM_MAC
- if file.find(CHROMIUM_LINUX) > -1:
- return CHROMIUM_LINUX
- if file.startswith(WEBKIT_IMAGE_BASELINE_BASE_URL_WIN):
- return WEBKIT_WIN_TITLE
- if file.startswith(WEBKIT_IMAGE_BASELINE_BASE_URL_MAC):
- return WEBKIT_MAC_TITLE
- # TODO(gwilson): Add mac-snowleopard, mac-leopard, mac-tiger here.
- if file.startswith(WEBKIT_LAYOUT_TEST_BASE_URL):
- return WEBKIT_TITLE
- return UNKNOWN
-
- def _is_file_in_webkit(self, file):
- return file != None and (file.find(WEBKIT_SVN_HOSTNAME) > -1 or
- file.find(THIRD_PARTY) > -1)
-
- def is_image_baseline_in_chromium(self):
- return not self.is_image_baseline_in_webkit()
-
- def is_image_baseline_in_webkit(self):
- return self._is_file_in_webkit(self.image_baseline_url)
-
- def is_text_baseline_in_chromium(self):
- return not self.is_text_baseline_in_webkit()
-
- def is_text_baseline_in_webkit(self):
- return self._is_file_in_webkit(self.text_baseline_url)
-
- def get_text_result_location_in_zip_file(self):
- return self._get_file_location_in_zip_file(
- self.get_actual_text_filename())
-
- def get_image_result_location_in_zip_file(self):
- return self._get_file_location_in_zip_file(
- self.get_actual_image_filename())
-
- def _get_file_location_in_zip_file(self, file):
- return "%s/%s" % (LAYOUT_TEST_RESULTS_DIR, file)
-
- # TODO(gwilson): implement this method.
- def get_all_baseline_locations(self):
- return None
-
- # This method determines whether the test is actually expected to fail,
- # in order to know whether to retrieve expected test results for it.
- # (test results dont exist for tests expected to fail/crash.)
-
- def is_expected_to_fail(self):
- return self._find_keyword_in_expectations(FAIL)
-
- def is_expected_to_timeout(self):
- return self._find_keyword_in_expectations(TIMEOUT)
-
- def is_expected_to_crash(self):
- return self._find_keyword_in_expectations(CRASH)
-
- def is_expected_to_pass(self):
- return self._find_keyword_in_expectations(PASS)
-
- def is_wont_fix(self):
- return self._find_keyword_in_expectations(WONTFIX)
-
- def _find_keyword_in_expectations(self, keyword):
- if (not self.test_expectations_line or
- len(self.test_expectations_line) == 0):
- return False
- if self.test_expectations_line.find(keyword) > -1:
- return True
- return False
diff --git a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/failure_finder.py b/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/failure_finder.py
deleted file mode 100644
index 072c9cb..0000000
--- a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/failure_finder.py
+++ /dev/null
@@ -1,897 +0,0 @@
-# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# TODO(gwilson): 1. Change text differs to use external utils.
-# 2. Change text_expectations parsing to existing
-# logic in layout_pagckage.test_expectations.
-
-import difflib
-import errno
-import os
-import path_utils
-import platform_utils
-import re
-import shutil
-import subprocess
-import sys
-import urllib2
-import zipfile
-
-from failure import Failure
-
-WEBKIT_TRAC_HOSTNAME = "trac.webkit.org"
-WEBKIT_LAYOUT_TEST_BASE_URL = ("http://svn.webkit.org/repository/"
- "webkit/trunk/LayoutTests/")
-WEBKIT_PLATFORM_BASELINE_URL = (WEBKIT_LAYOUT_TEST_BASE_URL +
- "platform/%s/")
-
-BUILDBOT_BASE = "http://build.chromium.org/buildbot/"
-WEBKIT_BUILDER_BASE = BUILDBOT_BASE + "waterfall/builders/%s"
-FYI_BUILDER_BASE = BUILDBOT_BASE + "waterfall.fyi/builders/%s"
-RESULTS_URL_BASE = "/builds/%s/steps/webkit_tests/logs/stdio"
-ARCHIVE_URL_BASE = "/builds/%s/steps/archive_webkit_tests_results/logs/stdio"
-ZIP_FILE_URL_BASE = (BUILDBOT_BASE +
- "layout_test_results/%s/%s/layout-test-results.zip")
-CHROMIUM_SRC_HOME = "http://src.chromium.org/viewvc/chrome/trunk/src/webkit/"
-LAYOUT_TEST_REPO_BASE_URL = CHROMIUM_SRC_HOME + "data/layout_tests/"
-
-# TODO(gwilson): Put flaky test dashboard URL here when ready.
-FLAKY_TEST_URL = ""
-FLAKY_TEST_REGEX = "%s</a></td><td align=right>(\d+)</td>"
-
-TEST_EXPECTATIONS_URL = (CHROMIUM_SRC_HOME +
- "tools/layout_tests/test_expectations.txt")
-
-# Failure types as found in builder stdio.
-TEXT_DIFF_MISMATCH = "Text diff mismatch"
-IMAGE_MISMATCH = "Image mismatch"
-TEST_TIMED_OUT = "Test timed out"
-TEST_SHELL_CRASHED = "Test shell crashed"
-
-CHROMIUM_WIN = "chromium-win"
-CHROMIUM_WIN_XP = "chromium-win-xp"
-CHROMIUM_WIN_VISTA = "chromium-win-vista"
-CHROMIUM_WIN_7 = "chromium-win-7"
-CHROMIUM_MAC = "chromium-mac"
-CHROMIUM_LINUX = "chromium-linux"
-PLATFORM = "platform"
-LAYOUTTESTS = "LayoutTests"
-
-# These platform dirs must be in order of their precedence.
-# TODO(gwilson): This is not the same fallback order as test_shell. This list
-# should be reversed, and we need to add detection for the type of OS that
-# the given builder is running.
-WEBKIT_MAC_PLATFORM_DIRS = ["mac-leopard", "mac-snowleopard", "mac"]
-WEBKIT_WIN_PLATFORM_DIRS = ["win", "mac"]
-CHROMIUM_MAC_PLATFORM_DIRS = [CHROMIUM_MAC]
-CHROMIUM_WIN_PLATFORM_DIRS = [CHROMIUM_WIN_XP, CHROMIUM_WIN_VISTA,
- CHROMIUM_WIN_7, CHROMIUM_WIN]
-CHROMIUM_LINUX_PLATFORM_DIRS = [CHROMIUM_LINUX, CHROMIUM_WIN]
-
-ARCHIVE_URL_REGEX = "last.*change: (\d+)"
-BUILD_NAME_REGEX = "build name: ([^\s]*)"
-CHROMIUM_FILE_AGE_REGEX = '<br />\s*Modified\s*<em>.*</em> \((.*)\) by'
-THREAD_NAME_REGEX = "[\S]+?"
-TEST_PATH_REGEX = "[\S]+?"
-FAILED_REGEX = ("DEBUG " + THREAD_NAME_REGEX + " " +
- "(" + TEST_PATH_REGEX + ") failed:\s*"
- "(" + TEXT_DIFF_MISMATCH + ")?\s*"
- "(" + IMAGE_MISMATCH + ")?\s*"
- "(" + TEST_TIMED_OUT + ")?\s*"
- "(" + TEST_SHELL_CRASHED + ")?")
-FAILED_UNEXPECTED_REGEX = " [^\s]+(?: = .*?)?\n"
-LAST_BUILD_REGEX = ("<h2>Recent Builds:</h2>"
- "[\s\S]*?<a href=\"../builders/.*?/builds/(\d+)\">")
-# Sometimes the lines of hyphens gets interrupted with multiple processes
-# outputting to stdio, so don't rely on them being contiguous.
-SUMMARY_REGEX = ("\d+ tests ran as expected, "
- "\d+ didn't:(.*?)-{78}") # -{78} --> 78 dashes in a row.
-SUMMARY_REGRESSIONS = "Regressions:.*?\n((?: [^\s]+(?: = .*?)?\n)+)"
-TEST_EXPECTATIONS_PLATFORM_REGEX = "((WONTFIX |BUG.* )+.* %s.* : %s = [^\n]*)"
-TEST_EXPECTATIONS_NO_PLATFORM_REGEX = ("((WONTFIX |BUG.* )+.*"
- "(?!WIN)(?!LINUX)(?!MAC).* :"
- " %s = [^\n]*)")
-
-WEBKIT_FILE_AGE_REGEX = ('<a class="file" title="View File" href="%s">.*?</a>.'
- '*?<td class="age" .*?>\s*'
- '<a class="timeline" href=".*?" title=".*?">(.*?)</a>')
-
-LOCAL_BASELINE_REGEXES = [
- ".*/third_party/Webkit/LayoutTests/platform/.*?(/.*)",
- ".*/third_party/Webkit/LayoutTests(/.*)",
- ".*/webkit/data/layout_tests/platform/.*?/LayoutTests(/.*)",
- ".*/webkit/data/layout_tests/platform/.*?(/.*)",
- ".*/webkit/data/layout_tests(/.*)",
- "(/.*)"]
-
-UPSTREAM_IMAGE_FILE_ENDING = "-upstream.png"
-
-TEST_EXPECTATIONS_WONTFIX = "WONTFIX"
-
-TEMP_ZIP_DIR = "temp-zip-dir"
-
-TARGETS = ["Release", "Debug"]
-
-
-def get_url_base(use_fyi):
- if use_fyi:
- return FYI_BUILDER_BASE
- return WEBKIT_BUILDER_BASE
-
-
-def get_results_url(build, platform, use_fyi=False):
- return (get_url_base(use_fyi) + RESULTS_URL_BASE) % (platform, build)
-
-
-def get_archive_url(build, platform, use_fyi=False):
- return (get_url_base(use_fyi) + ARCHIVE_URL_BASE) % (platform, build)
-
-
-def get_zip_file_url(build, platform):
- return ZIP_FILE_URL_BASE % (platform, build)
-
-
-def get_builder_url(platform, use_fyi=False):
- return get_url_base(use_fyi) % platform
-
-
-# TODO(gwilson): Once the new flakiness dashboard is done, connect it here.
-def get_flaky_test_url(platform):
- return ""
-
-
-# TODO(gwilson): can we refactor these into the resourcegatherer?
-def is_linux_platform(platform):
- return (platform and platform.find("Linux") > -1)
-
-
-def is_mac_platform(platform):
- return (platform and platform.find("Mac") > -1)
-
-
-def create_directory(dir):
- """
- Method that creates the directory structure given.
- This will create directories recursively until the given dir exists.
- """
- if not os.path.exists(dir):
- os.makedirs(dir, 0777)
-
-
-def extract_first_value(string, regex):
- m = re.search(regex, string)
- if m and m.group(1):
- return m.group(1)
- return None
-
-
-def extract_single_regex_at_url(url, regex):
- content = scrape_url(url)
- m = re.search(regex, content, re.DOTALL)
- if m and m.group(1):
- return m.group(1)
- return None
-
-
-def scrape_url(url):
- return urllib2.urlopen(urllib2.Request(url)).read()
-
-
-def get_image_diff_executable():
- for target in TARGETS:
- try:
- return path_utils.image_diff_path(target)
- except Exception, e:
- continue
- # This build target did not exist, try the next one.
- raise Exception("No image diff executable could be found. You may need "
- "to build the image diff project under at least one build "
- "target to create image diffs.")
-
-
-def generate_png_diff(file1, file2, output_file):
- _compare_available = False
- try:
- executable = get_image_diff_executable()
- cmd = [executable, '--diff', file1, file2, output_file]
- _compare_available = True
- except Exception, e:
- print "No command line to compare %s and %s : %s" % (file1, file2, e)
-
- result = 1
- if _compare_available:
- try:
- result = subprocess.call(cmd)
- except OSError, e:
- if e.errno == errno.ENOENT or e.errno == errno.EACCES:
- _compare_available = False
- print "No possible comparison between %s and %s." % (
- file1, file2)
- else:
- raise e
- if not result:
- print "The given PNG images were the same!"
- return _compare_available
-
-
-# TODO(gwilson): Change this to use the pretty print differs.
-def generate_text_diff(file1, file2, output_file):
- # Open up expected and actual text files and use difflib to compare them.
- dataA = open(file1, 'r').read()
- dataB = open(file2, 'r').read()
- d = difflib.Differ()
- diffs = list(d.compare(dataA.split("\n"), dataB.split("\n")))
- output = open(output_file, 'w')
- output.write("\n".join(diffs))
- output.close()
-
-
-class BaselineCandidate(object):
- """Simple data object for holding the URL and local file path of a
- possible baseline. The local file path is meant to refer to the locally-
- cached version of the file at the URL."""
-
- def __init__(self, local, url):
- self.local_file = local
- self.baseline_url = url
-
- def is_valid(self):
- return self.local_file != None and self.baseline_url != None
-
-
-class FailureFinder(object):
-
- def __init__(self,
- build,
- builder_name,
- exclude_known_failures,
- test_regex,
- output_dir,
- max_failures,
- verbose,
- builder_output_log_file=None,
- archive_step_log_file=None,
- zip_file=None,
- test_expectations_file=None):
- self.build = build
- # TODO(gwilson): add full url-encoding for the platform.
- self.set_platform(builder_name)
- self.exclude_known_failures = exclude_known_failures
- self.exclude_wontfix = True
- self.test_regex = test_regex
- self.output_dir = output_dir
- self.max_failures = max_failures
- self.verbose = verbose
- self.fyi_builder = False
- self._flaky_test_cache = {}
- self._test_expectations_cache = None
- # If true, scraping will still happen but no files will be downloaded.
- self.dont_download = False
- # Local caches of log files. If set, the finder will use these files
- # rather than scraping them from the buildbot.
- self.builder_output_log_file = builder_output_log_file
- self.archive_step_log_file = archive_step_log_file
- self.zip_file = zip_file
- self.test_expectations_file = test_expectations_file
- self.delete_zip_file = True
- # Determines if the script should scrape the baselines from webkit.org
- # and chromium.org, or if it should use local baselines in the current
- # checkout.
- self.use_local_baselines = False
-
- def set_platform(self, platform):
- self.platform = platform.replace(" ", "%20")
-
- # TODO(gwilson): Change this to get the last build that finished
- # successfully.
-
- def get_last_build(self):
- """
- Returns the last build number for this platform.
- If use_fyi is true, this only looks at the fyi builder.
- """
- try:
- return extract_single_regex_at_url(get_builder_url(self.platform,
- self.fyi_builder), LAST_BUILD_REGEX)
- except urllib2.HTTPError:
- if not self.fyi_builder:
- self.fyi_builder = True
- return self.get_last_build()
-
- def get_failures(self):
- if not self.build:
- self.build = self.get_last_build()
- if self.verbose:
- print "Using build number %s" % self.build
-
- if self.use_local_baselines:
- self._build_baseline_indexes()
- self.failures = self._get_failures_from_builder()
- if (self.failures and
- (self._download_result_resources() or self.dont_download)):
- return self.failures
- return None
-
- def _get_failures_from_builder(self):
- """
- Returns a list of failures for the given build and platform by scraping
- the buildbots and parsing their results.
- The list returned contains Failure class objects.
- """
- if self.verbose:
- print "Fetching failures from buildbot..."
-
- content = self._scrape_builder_output()
- if not content:
- return None
- matches = self._find_matches_in_builder_output(content)
-
- if self.verbose:
- print "%s failures found." % len(matches)
-
- failures = []
- matches.sort()
- for match in matches:
- if (len(failures) < self.max_failures and
- (not self.test_regex or match[0].find(self.test_regex) > -1)):
- failure = self._create_failure_from_match(match)
- if self.verbose:
- print failure.test_path
- failures.append(failure)
-
- return failures
-
- def _scrape_builder_output(self):
- # If the build log file is specified, use that instead of scraping.
- if self.builder_output_log_file:
- log = open(self.builder_output_log_file, 'r')
- return "".join(log.readlines())
-
- # Scrape the failures from the buildbot for this revision.
- try:
-
- return scrape_url(get_results_url(self.build, self.platform,
- self.fyi_builder))
- except:
- # If we hit a problem, and we're not on the FYI builder, try it
- # again on the FYI builder.
- if not self.fyi_builder:
- if self.verbose:
- print ("Could not find builder on waterfall, trying fyi "
- "waterfall...")
- self.fyi_builder = True
- return self._scrape_builder_output()
- print "I could not find that builder, or build did not compile."
- print "Check that the builder name matches exactly "
- print "(case sensitive), and wrap quotes around builder names "
- print "that have spaces."
- return None
-
- # TODO(gwilson): The type of failure is now output in the summary, so no
- # matching between the summary and the earlier output is necessary.
- # Change this method and others to derive failure types from summary only.
-
- def _find_matches_in_builder_output(self, output):
- matches = []
- matches = re.findall(FAILED_REGEX, output, re.MULTILINE)
- if self.exclude_known_failures:
- summary = re.search(SUMMARY_REGEX, output, re.DOTALL)
- regressions = []
- if summary:
- regressions = self._find_regressions_in_summary(
- summary.group(1))
- matches = self._match_regressions_to_failures(regressions, matches)
- return matches
-
- def _create_failure_from_match(self, match):
- failure = Failure()
- failure.text_diff_mismatch = match[1] != ''
- failure.image_mismatch = match[2] != ''
- failure.crashed = match[4] != ''
- failure.timeout = match[3] != ''
- failure.test_path = match[0]
- failure.platform = self.platform
- return failure
-
- def _find_regressions_in_summary(self, summary):
- regressions = []
- if not summary or not len(summary):
- return regressions
- matches = re.findall(SUMMARY_REGRESSIONS, summary, re.DOTALL)
- for match in matches:
- lines = re.findall(FAILED_UNEXPECTED_REGEX, match, re.DOTALL)
- for line in lines:
- clipped = line.strip()
- if clipped.find("=") > -1:
- clipped = clipped[:clipped.find("=") - 1]
- regressions.append(clipped)
- return regressions
-
- def _match_regressions_to_failures(self, regressions, failures):
- matches = []
- for regression in regressions:
- for failure in failures:
- if failure[0].find(regression) > -1:
- matches.append(failure)
- break
- return matches
-
- # TODO(gwilson): add support for multiple conflicting build numbers by
- # renaming the zip file and naming the directory appropriately.
-
- def _download_result_resources(self):
- """
- Finds and downloads/extracts all of the test results (pixel/text
- output) for all of the given failures.
- """
-
- target_zip = "%s/layout-test-results-%s.zip" % (self.output_dir,
- self.build)
- if self.zip_file:
- filename = self.zip_file
- self.delete_zip_file = False
- else:
- revision, build_name = \
- self._get_revision_and_build_from_archive_step()
- zip_url = get_zip_file_url(revision, build_name)
- if self.verbose:
- print "Downloading zip file from %s to %s" % (zip_url,
- target_zip)
- filename = self._download_file(zip_url, target_zip, "b")
- if not filename:
- if self.verbose:
- print ("Could not download zip file from %s. "
- "Does it exist?" % zip_url)
- return False
-
- if zipfile.is_zipfile(filename):
- zip = zipfile.ZipFile(filename)
- if self.verbose:
- print 'Extracting files...'
- directory = "%s/layout-test-results-%s" % (self.output_dir,
- self.build)
- create_directory(directory)
- self._unzip_zipfile(zip, TEMP_ZIP_DIR)
-
- for failure in self.failures:
- failure.test_expectations_line = (
- self._get_test_expectations_line(failure.test_path))
- if self.exclude_wontfix and failure.is_wont_fix():
- self.failures.remove(failure)
- continue
- if failure.text_diff_mismatch:
- self._populate_text_failure(failure, directory, zip)
- if failure.image_mismatch:
- self._populate_image_failure(failure, directory, zip)
- if not self.use_local_baselines:
- failure.test_age = self._get_file_age(
- failure.get_test_home())
- failure.flakiness = self._get_flakiness(failure.test_path,
- self.platform)
- zip.close()
- if self.verbose:
- print "Files extracted."
- if self.delete_zip_file:
- if self.verbose:
- print "Cleaning up zip file..."
- path_utils.remove_directory(TEMP_ZIP_DIR)
- os.remove(filename)
- return True
- else:
- if self.verbose:
- print ("Downloaded file '%s' doesn't look like a zip file."
- % filename)
- return False
-
- def _unzip_zipfile(self, zip, base_dir):
- for i, name in enumerate(zip.namelist()):
- if not name.endswith('/'):
- extracted_file_path = os.path.join(base_dir, name)
- try:
- (path, filename) = os.path.split(extracted_file_path)
- os.makedirs(path, 0777)
- except:
- pass
- outfile = open(extracted_file_path, 'wb')
- outfile.write(zip.read(name))
- outfile.flush()
- outfile.close()
- os.chmod(extracted_file_path, 0777)
-
- def _get_revision_and_build_from_archive_step(self):
- if self.archive_step_log_file:
- log = open(self.archive_step_log_file, 'r')
- content = "".join(log.readlines())
- else:
- content = scrape_url(get_archive_url(self.build, self.platform,
- self.fyi_builder))
- revision = extract_first_value(content, ARCHIVE_URL_REGEX)
- build_name = extract_first_value(content, BUILD_NAME_REGEX)
- return (revision, build_name)
-
- def _populate_text_failure(self, failure, directory, zip):
- baseline = self._get_baseline(failure.get_expected_text_filename(),
- directory)
- failure.text_baseline_local = baseline.local_file
- failure.text_baseline_url = baseline.baseline_url
- failure.text_baseline_age = (
- self._get_file_age(failure.get_text_baseline_trac_home()))
- failure.text_actual_local = "%s/%s" % (
- directory, failure.get_actual_text_filename())
- if (baseline and baseline.is_valid() and not self.dont_download):
- self._copy_file_from_zip_dir(
- failure.get_text_result_location_in_zip_file(),
- failure.text_actual_local)
- generate_text_diff(failure.text_baseline_local,
- failure.text_actual_local,
- directory + "/" + failure.get_text_diff_filename())
-
- def _populate_image_failure(self, failure, directory, zip):
- baseline = self._get_baseline(failure.get_expected_image_filename(),
- directory)
- failure.image_baseline_local = baseline.local_file
- failure.image_baseline_url = baseline.baseline_url
- if baseline and baseline.is_valid():
- failure.image_baseline_age = (
- self._get_file_age(failure.get_image_baseline_trac_home()))
- failure.image_actual_local = "%s/%s" % (directory,
- failure.get_actual_image_filename())
- self._copy_file_from_zip_dir(
- failure.get_image_result_location_in_zip_file(),
- failure.image_actual_local)
- if (not generate_png_diff(failure.image_baseline_local,
- failure.image_actual_local,
- "%s/%s" % (directory,
- failure.get_image_diff_filename()))
- and self.verbose):
- print "Could not generate PNG diff for %s" % failure.test_path
- if (failure.is_image_baseline_in_chromium() or
- self.use_local_baselines):
- upstream_baseline = (self._get_upstream_baseline(
- failure.get_expected_image_filename(), directory))
- failure.image_baseline_upstream_local = \
- upstream_baseline.local_file
- failure.image_baseline_upstream_url = \
- upstream_baseline.baseline_url
-
- def _get_baseline(self, filename, directory, upstream_only=False):
- """ Search and download the baseline for the given test (put it in the
- directory given.)"""
-
- local_filename = os.path.join(directory, filename)
- local_directory = local_filename[:local_filename.rfind("/")]
- if upstream_only:
- last_index = local_filename.rfind(".")
- if last_index > -1:
- local_filename = (local_filename[:last_index] +
- UPSTREAM_IMAGE_FILE_ENDING)
-
- download_file_modifiers = ""
- if local_filename.endswith(".png"):
- download_file_modifiers = "b" # binary file
-
- if not self.dont_download:
- create_directory(local_directory)
-
- local_baseline = None
- url_of_baseline = None
-
- if self.use_local_baselines:
- test_path_key = self._normalize_baseline_identifier(filename)
- dict = self.baseline_dict
- if upstream_only:
- dict = self.webkit_baseline_dict
- if test_path_key in dict:
- local_baseline = dict[test_path_key]
- url_of_baseline = local_baseline
- shutil.copy(local_baseline, local_directory)
- elif self.verbose:
- print ("Baseline %s does not exist in the index." %
- test_path_key)
- else:
- index = 0
- possible_files = self._get_possible_file_list(filename,
- upstream_only)
- # Download the baselines from the webkit.org site.
- while local_baseline == None and index < len(possible_files):
- local_baseline = self._download_file(possible_files[index],
- local_filename,
- download_file_modifiers,
- True)
- if local_baseline:
- url_of_baseline = possible_files[index]
- index += 1
-
- if not local_baseline:
- if self.verbose:
- print "Could not find any baseline for %s" % filename
- else:
- local_baseline = os.path.normpath(local_baseline)
- if local_baseline and self.verbose:
- print "Found baseline: %s" % url_of_baseline
-
- return BaselineCandidate(local_baseline, url_of_baseline)
-
- def _add_baseline_paths(self, list, base_path, directories):
- for dir in directories:
- list.append(os.path.join(base_path, dir))
-
- # TODO(gwilson): Refactor this method to use
- # platform_utils_*.BaselineSearchPath instead of custom logic.
-
- def _build_baseline_indexes(self):
- """ Builds an index of all the known local baselines in both chromium
- and webkit. Two baselines are created, a webkit-specific (no chromium
- baseline) dictionary and an overall (both) dictionary. Each one has a
- structure like: "/fast/dom/one-expected.txt" ->
- "C:\\path\\to\\fast\\dom\\one-expected.txt"
- """
- if self.verbose:
- print "Building index of all local baselines..."
-
- self.baseline_dict = {}
- self.webkit_baseline_dict = {}
-
- base = os.path.abspath(os.path.curdir)
- webkit_base = path_utils.path_from_base('third_party', 'Webkit',
- 'LayoutTests')
- chromium_base = path_utils.path_from_base('webkit', 'data',
- 'layout_tests')
- chromium_base_platform = os.path.join(chromium_base, PLATFORM)
- webkit_base_platform = os.path.join(webkit_base, PLATFORM)
-
- possible_chromium_files = []
- possible_webkit_files = []
-
- if is_mac_platform(self.platform):
- self._add_baseline_paths(possible_chromium_files,
- chromium_base_platform,
- CHROMIUM_MAC_PLATFORM_DIRS)
- self._add_baseline_paths(possible_chromium_files,
- webkit_base_platform,
- WEBKIT_MAC_PLATFORM_DIRS)
- self._add_baseline_paths(possible_webkit_files,
- webkit_base_platform,
- WEBKIT_MAC_PLATFORM_DIRS)
- elif is_linux_platform(self.platform):
- self._add_baseline_paths(possible_chromium_files,
- chromium_base_platform,
- CHROMIUM_LINUX_PLATFORM_DIRS)
- else:
- self._add_baseline_paths(possible_chromium_files,
- chromium_base_platform,
- CHROMIUM_WIN_PLATFORM_DIRS)
-
- if not is_mac_platform(self.platform):
- self._add_baseline_paths(possible_webkit_files,
- webkit_base_platform,
- WEBKIT_WIN_PLATFORM_DIRS)
-
- possible_webkit_files.append(webkit_base)
-
- self._populate_baseline_dict(possible_webkit_files,
- self.webkit_baseline_dict)
- self._populate_baseline_dict(possible_chromium_files,
- self.baseline_dict)
- for key in self.webkit_baseline_dict.keys():
- if not key in self.baseline_dict:
- self.baseline_dict[key] = self.webkit_baseline_dict[key]
-
- return True
-
- def _populate_baseline_dict(self, directories, dictionary):
- for dir in directories:
- os.path.walk(dir, self._visit_baseline_dir, dictionary)
-
- def _visit_baseline_dir(self, dict, dirname, names):
- """ Method intended to be called by os.path.walk to build up an index
- of where all the test baselines exist. """
- # Exclude .svn from the walk, since we don't care what is in these
- # dirs.
- if '.svn' in names:
- names.remove('.svn')
- for name in names:
- if name.find("-expected.") > -1:
- test_path_key = os.path.join(dirname, name)
- # Fix path separators to match the separators used on
- # the buildbots.
- test_path_key = test_path_key.replace("\\", "/")
- test_path_key = self._normalize_baseline_identifier(
- test_path_key)
- if not test_path_key in dict:
- dict[test_path_key] = os.path.join(dirname, name)
-
- # TODO(gwilson): Simplify identifier creation to not rely so heavily on
- # directory and path names.
-
- def _normalize_baseline_identifier(self, test_path):
- """ Given either a baseline path (i.e. /LayoutTests/platform/mac/...)
- or a test path (i.e. /LayoutTests/fast/dom/....) will normalize
- to a unique identifier. This is basically a hashing function for
- layout test paths."""
-
- for regex in LOCAL_BASELINE_REGEXES:
- value = extract_first_value(test_path, regex)
- if value:
- return value
- return test_path
-
- def _add_baseline_ur_ls(self, list, base_url, platforms):
- # If the base URL doesn't contain any platform in its path, only add
- # the base URL to the list. This happens with the chrome/ dir.
- if base_url.find("%s") == -1:
- list.append(base_url)
- return
- for platform in platforms:
- list.append(base_url % platform)
-
- # TODO(gwilson): Refactor this method to use
- # platform_utils_*.BaselineSearchPath instead of custom logic. This may
- # require some kind of wrapper since this method looks for URLs instead
- # of local paths.
-
- def _get_possible_file_list(self, filename, only_webkit):
- """ Returns a list of possible filename locations for the given file.
- Uses the platform of the class to determine the order.
- """
-
- possible_chromium_files = []
- possible_webkit_files = []
-
- chromium_platform_url = LAYOUT_TEST_REPO_BASE_URL
- if not filename.startswith("chrome"):
- chromium_platform_url += "platform/%s/"
- chromium_platform_url += filename
-
- webkit_platform_url = WEBKIT_PLATFORM_BASELINE_URL + filename
-
- if is_mac_platform(self.platform):
- self._add_baseline_ur_ls(possible_chromium_files,
- chromium_platform_url,
- CHROMIUM_MAC_PLATFORM_DIRS)
- self._add_baseline_ur_ls(possible_webkit_files,
- webkit_platform_url,
- WEBKIT_MAC_PLATFORM_DIRS)
- elif is_linux_platform(self.platform):
- self._add_baseline_ur_ls(possible_chromium_files,
- chromium_platform_url,
- CHROMIUM_LINUX_PLATFORM_DIRS)
- else:
- self._add_baseline_ur_ls(possible_chromium_files,
- chromium_platform_url,
- CHROMIUM_WIN_PLATFORM_DIRS)
-
- if not is_mac_platform(self.platform):
- self._add_baseline_ur_ls(possible_webkit_files,
- webkit_platform_url,
- WEBKIT_WIN_PLATFORM_DIRS)
- possible_webkit_files.append(WEBKIT_LAYOUT_TEST_BASE_URL + filename)
-
- if only_webkit:
- return possible_webkit_files
- return possible_chromium_files + possible_webkit_files
-
- # Like _GetBaseline, but only retrieves the baseline from upstream (skip
- # looking in chromium).
-
- def _get_upstream_baseline(self, filename, directory):
- return self._get_baseline(filename, directory, upstream_only=True)
-
- def _get_file_age(self, url):
- # Check if the given URL is really a local file path.
- if not url or not url.startswith("http"):
- return None
- try:
- if url.find(WEBKIT_TRAC_HOSTNAME) > -1:
- return extract_single_regex_at_url(url[:url.rfind("/")],
- WEBKIT_FILE_AGE_REGEX %
- url[url.find("/browser"):])
- else:
- return extract_single_regex_at_url(url + "?view=log",
- CHROMIUM_FILE_AGE_REGEX)
- except:
- if self.verbose:
- print "Could not find age for %s. Does the file exist?" % url
- return None
-
- # Returns a flakiness on a scale of 1-50.
- # TODO(gwilson): modify this to also return which of the last 10
- # builds failed for this test.
-
- def _get_flakiness(self, test_path, target_platform):
- url = get_flaky_test_url(target_platform)
- if url == "":
- return None
-
- if url in self._flaky_test_cache:
- content = self._flaky_test_cache[url]
- else:
- content = urllib2.urlopen(urllib2.Request(url)).read()
- self._flaky_test_cache[url] = content
-
- flakiness = extract_first_value(content, FLAKY_TEST_REGEX % test_path)
- return flakiness
-
- def _get_test_expectations(self):
- if not self._test_expectations_cache:
- try:
- if self.test_expectations_file:
- log = open(self.test_expectations_file, 'r')
- self._test_expectations_cache = "\n".join(log.readlines())
- else:
- self._test_expectations_cache = scrape_url(
- TEST_EXPECTATIONS_URL)
- except HTTPError:
- print ("Could not find test_expectations.txt at %s" %
- TEST_EXPECTATIONS_URL)
-
- return self._test_expectations_cache
-
- def _get_test_expectations_line(self, test_path):
- content = self._get_test_expectations()
-
- if not content:
- return None
-
- for match in content.splitlines():
- line = re.search(".*? : (.*?) = .*", match)
- if line and test_path.find(line.group(1)) > -1:
- return match
-
- return None
-
- def _copy_file_from_zip_dir(self, file_in_zip, file_to_create):
- modifiers = ""
- if file_to_create.endswith(".png"):
- modifiers = "b"
- dir = os.path.join(os.path.split(file_to_create)[0:-1])[0]
- create_directory(dir)
- file = os.path.normpath(os.path.join(TEMP_ZIP_DIR, file_in_zip))
- shutil.copy(file, dir)
-
- def _extract_file_from_zip(self, zip, file_in_zip, file_to_create):
- modifiers = ""
- if file_to_create.endswith(".png"):
- modifiers = "b"
- try:
- create_directory(file_to_create[0:file_to_create.rfind("/")])
- localFile = open(file_to_create, "w%s" % modifiers)
- localFile.write(zip.read(file_in_zip))
- localFile.close()
- os.chmod(file_to_create, 0777)
- return True
- except KeyError:
- print "File %s does not exist in zip file." % (file_in_zip)
- except AttributeError:
- print "File %s does not exist in zip file." % (file_in_zip)
- print "Is this zip file assembled correctly?"
- return False
-
- def _download_file(self, url, local_filename=None, modifiers="",
- force=False):
- """
- Copy the contents of a file from a given URL
- to a local file.
- """
- try:
- if local_filename == None:
- local_filename = url.split('/')[-1]
- if os.path.isfile(local_filename) and not force:
- if self.verbose:
- print "File at %s already exists." % local_filename
- return local_filename
- if self.dont_download:
- return local_filename
- webFile = urllib2.urlopen(url)
- localFile = open(local_filename, ("w%s" % modifiers))
- localFile.write(webFile.read())
- webFile.close()
- localFile.close()
- os.chmod(local_filename, 0777)
- except urllib2.HTTPError:
- return None
- except urllib2.URLError:
- print "The url %s is malformed." % url
- return None
- return localFile.name
diff --git a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/failure_finder_test.py b/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/failure_finder_test.py
deleted file mode 100644
index 79da0f4..0000000
--- a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/failure_finder_test.py
+++ /dev/null
@@ -1,374 +0,0 @@
-#!/bin/env/python
-# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import os
-import zipfile
-
-from failure_finder import FailureFinder
-
-TEST_BUILDER_OUTPUT = """090723 10:38:22 test_shell_thread.py:289
- ERROR chrome/fast/forms/textarea-metrics.html failed:
- Text diff mismatch
- 090723 10:38:21 test_shell_thread.py:289
- ERROR chrome/fast/dom/xss-DENIED-javascript-variations.html failed:
- Text diff mismatch
- 090723 10:37:58 test_shell_thread.py:289
- ERROR LayoutTests/plugins/bindings-test.html failed:
- Text diff mismatch
-
-------------------------------------------------------------------------------
-Expected to crash, but passed (1):
- chrome/fast/forms/textarea-metrics.html
-
-Regressions: Unexpected failures (2):
- chrome/fast/dom/xss-DENIED-javascript-variations.html = FAIL
- LayoutTests/plugins/bindings-test.html = FAIL
-------------------------------------------------------------------------------
-"""
-
-TEST_FAILURE_1 = ("layout-test-results/chrome/fast/forms/"
- "textarea-metrics-actual.txt")
-TEST_FAILURE_2 = ("layout-test-results/chrome/fast/dom/"
- "xss-DENIED-javascript-variations-actual.txt")
-TEST_FAILURE_3 = ("layout-test-results/LayoutTests/plugins/"
- "bindings-test-actual.txt")
-
-TEST_ARCHIVE_OUTPUT = """
-Adding layout-test-results\pending\fast\repaint\not-real-actual.checksum
-Adding layout-test-results\pending\fast\repaint\not-real-actual.png
-Adding layout-test-results\pending\fast\repaint\not-real-actual.txt
-last change: 22057
-build name: webkit-rel
-host name: codf138
-saving results to \\my\test\location\webkit-rel\22057
-program finished with exit code 0
-"""
-
-TEST_TEST_EXPECTATIONS = """
-BUG1234 chrome/fast/forms/textarea-metrics.html = CRASH
-"""
-
-TEST_BUILDER_LOG_FILE = "TEST_builder.log"
-TEST_ARCHIVE_LOG_FILE = "TEST_archive.log"
-TEST_DUMMY_ZIP_FILE = "TEST_zipfile.zip"
-TEST_EXPECTATIONS_FILE = "TEST_expectations.txt"
-
-WEBKIT_BUILDER_NUMBER = "9800"
-WEBKIT_FAILURES = (
- ["LayoutTests/fast/backgrounds/animated-svg-as-mask.html",
- "LayoutTests/fast/backgrounds/background-clip-text.html",
- "LayoutTests/fast/backgrounds/mask-composite.html",
- "LayoutTests/fast/backgrounds/repeat/mask-negative-offset-repeat.html",
- "LayoutTests/fast/backgrounds/svg-as-background-3.html",
- "LayoutTests/fast/backgrounds/svg-as-background-6.html",
- "LayoutTests/fast/backgrounds/svg-as-mask.html",
- "LayoutTests/fast/block/float/013.html",
- "LayoutTests/fast/block/float/nested-clearance.html",
- "LayoutTests/fast/block/positioning/047.html"])
-
-CHROMIUM_BASELINE = "chrome/fast/forms/basic-buttons.html"
-EXPECTED_CHROMIUM_LOCAL_BASELINE = "./chrome/fast/forms/basic-buttons.html"
-EXPECTED_CHROMIUM_URL_BASELINE = ("http://src.chromium.org/viewvc/chrome/"
- "trunk/src/webkit/data/layout_tests/chrome/"
- "fast/forms/basic-buttons.html")
-
-WEBKIT_BASELINE = "LayoutTests/fast/forms/11423.html"
-EXPECTED_WEBKIT_LOCAL_BASELINE = "./LayoutTests/fast/forms/11423.html"
-EXPECTED_WEBKIT_URL_BASELINE = (
- "http://svn.webkit.org/repository/webkit/trunk/"
- "LayoutTests/fast/forms/11423.html")
-
-TEST_ZIP_FILE = ("http://build.chromium.org/buildbot/layout_test_results/"
- "webkit-rel/21432/layout-test-results.zip")
-
-EXPECTED_REVISION = "20861"
-EXPECTED_BUILD_NAME = "webkit-rel"
-
-SVG_TEST_EXPECTATION = (
- "LayoutTests/svg/custom/foreign-object-skew-expected.png")
-SVG_TEST_EXPECTATION_UPSTREAM = ("LayoutTests/svg/custom/"
- "foreign-object-skew-expected-upstream.png")
-WEBARCHIVE_TEST_EXPECTATION = ("LayoutTests/webarchive/adopt-attribute-"
- "styled-body-webarchive-expected.webarchive")
-DOM_TEST_EXPECTATION = ("LayoutTests/fast/dom/"
- "attribute-downcast-right-expected.txt")
-DOM_TEST_EXPECTATION_UPSTREAM = ("LayoutTests/fast/dom/"
- "attribute-downcast-right-"
- "expected-upstream.png")
-
-TEST_EXPECTATIONS = """
-BUG1234 WONTFIX : LayoutTests/fast/backgrounds/svg-as-background-3.html = FAIL
-BUG3456 WIN : LayoutTests/fast/backgrounds/svg-as-background-6.html = CRASH
-BUG4567 : LayoutTests/fast/backgrounds/svg-as-mask.html = PASS
-WONTFIX : LayoutTests/fast/block/ = FAIL
-"""
-
-EXPECT_EXACT_MATCH = "LayoutTests/fast/backgrounds/svg-as-background-6.html"
-EXPECT_GENERAL_MATCH = "LayoutTests/fast/block/float/013.html"
-EXPECT_NO_MATCH = "LayoutTests/fast/backgrounds/svg-as-background-99.html"
-
-WEBKIT_ORG = "webkit.org"
-CHROMIUM_ORG = "chromium.org"
-
-
-class FailureFinderTest(object):
-
- def runTests(self):
- all_tests_passed = True
-
- tests = ["testWhitespaceInBuilderName",
- "testGetLastBuild",
- "testFindMatchesInBuilderOutput",
- "testScrapeBuilderOutput",
- "testGetChromiumBaseline",
- "testGetWebkitBaseline",
- "testZipDownload",
- "testUseLocalOutput",
- "testTranslateBuildToZip",
- "testGetBaseline",
- "testFindTestExpectations",
- "testFull"]
-
- for test in tests:
- try:
- result = eval(test + "()")
- if result:
- print "[ OK ] %s" % test
- else:
- all_tests_passed = False
- print "[ FAIL ] %s" % test
- except:
- print "[ ERROR ] %s" % test
- return all_tests_passed
-
-
-def _get_basic_failure_finder():
- return FailureFinder(None, "Webkit", False, "", ".", 10, False)
-
-
-def _test_last_build(failure_finder):
- try:
- last_build = failure_finder.get_last_build()
- # Verify that last_build is not empty and is a number.
- build = int(last_build)
- return (build > 0)
- except:
- return False
-
-
-def test_get_last_build():
- test = _get_basic_failure_finder()
- return _test_last_build(test)
-
-
-def test_whitespace_in_builder_name():
- test = _get_basic_failure_finder()
- test.set_platform("Webkit (webkit.org)")
- return _test_last_build(test)
-
-
-def test_scrape_builder_output():
-
- # Try on the default builder.
- test = _get_basic_failure_finder()
- test.build = "9800"
- output = test._scrape_builder_output()
- if not output:
- return False
-
- # Try on a crazy builder on the FYI waterfall.
- test = _get_basic_failure_finder()
- test.build = "1766"
- test.set_platform("Webkit Linux (webkit.org)")
- output = test._scrape_builder_output()
- if not output:
- return False
-
- return True
-
-
-def test_find_matches_in_builder_output():
- test = _get_basic_failure_finder()
- test.exclude_known_failures = True
- matches = test._find_matches_in_builder_output(TEST_BUILDER_OUTPUT)
- # Verify that we found x matches.
- if len(matches) != 2:
- print "Did not find all unexpected failures."
- return False
-
- test.exclude_known_failures = False
- matches = test._find_matches_in_builder_output(TEST_BUILDER_OUTPUT)
- if len(matches) != 3:
- print "Did not find all failures."
- return False
- return True
-
-
-def _test_baseline(test_name, expected_local, expected_url):
- test = _get_basic_failure_finder()
- # Test baseline that is obviously in Chromium's tree.
- baseline = test._get_baseline(test_name, ".", False)
- try:
- os.remove(baseline.local_file)
- if (baseline.local_file != expected_local or
- baseline.baseline_url != expected_url):
- return False
- except:
- return False
- return True
-
-
-def test_get_chromium_baseline():
- return _test_baseline(CHROMIUM_BASELINE, EXPECTED_CHROMIUM_LOCAL_BASELINE,
- EXPECTED_CHROMIUM_URL_BASELINE)
-
-
-def test_get_webkit_baseline():
- return _test_baseline(WEBKIT_BASELINE, EXPECTED_WEBKIT_LOCAL_BASELINE,
- EXPECTED_WEBKIT_URL_BASELINE)
-
-
-def test_use_local_output():
- test_result = True
- try:
- _write_file(TEST_BUILDER_LOG_FILE, TEST_BUILDER_OUTPUT)
- _write_file(TEST_ARCHIVE_LOG_FILE, TEST_ARCHIVE_OUTPUT)
- _write_file(TEST_EXPECTATIONS_FILE, TEST_TEST_EXPECTATIONS)
- zip = zipfile.ZipFile(TEST_DUMMY_ZIP_FILE, 'w')
- zip.write(TEST_BUILDER_LOG_FILE, TEST_FAILURE_1)
- zip.write(TEST_BUILDER_LOG_FILE, TEST_FAILURE_2)
- zip.write(TEST_BUILDER_LOG_FILE, TEST_FAILURE_3)
- zip.close()
- test = _get_basic_failure_finder()
- test.archive_step_log_file = TEST_ARCHIVE_LOG_FILE
- test.builder_output_log_file = TEST_BUILDER_LOG_FILE
- test.test_expectations_file = TEST_EXPECTATIONS_FILE
- test.zip_file = TEST_DUMMY_ZIP_FILE
- test.dont_download = True
- test.exclude_known_failures = True
- test.delete_zip_file = False
- failures = test.get_failures()
- if not failures or len(failures) != 2:
- print "Did not get expected number of failures :"
- for failure in failures:
- print failure.test_path
- test_result = False
- finally:
- os.remove(TEST_BUILDER_LOG_FILE)
- os.remove(TEST_ARCHIVE_LOG_FILE)
- os.remove(TEST_EXPECTATIONS_FILE)
- os.remove(TEST_DUMMY_ZIP_FILE)
- return test_result
-
-
-def _write_file(filename, contents):
- myfile = open(filename, 'w')
- myfile.write(contents)
- myfile.close()
-
-
-def test_zip_download():
- test = _get_basic_failure_finder()
- try:
- test._download_file(TEST_ZIP_FILE, "test.zip", "b") # "b" -> binary
- os.remove("test.zip")
- return True
- except:
- return False
-
-
-def test_translate_build_to_zip():
- test = _get_basic_failure_finder()
- test.build = WEBKIT_BUILDER_NUMBER
- revision, build_name = test._get_revision_and_build_from_archive_step()
- if revision != EXPECTED_REVISION or build_name != EXPECTED_BUILD_NAME:
- return False
- return True
-
-
-def test_get_baseline():
- test = _get_basic_failure_finder()
- result = True
- test.platform = "chromium-mac"
- baseline = test._get_baseline(WEBARCHIVE_TEST_EXPECTATION, ".")
- if not baseline.local_file or baseline.baseline_url.find(WEBKIT_ORG) == -1:
- result = False
- print "Webarchive layout test not found at webkit.org: %s" % url
- test.platform = "chromium-win"
- baseline = test._get_baseline(SVG_TEST_EXPECTATION, ".")
- if (not baseline.local_file or
- baseline.baseline_url.find(CHROMIUM_ORG) == -1):
- result = False
- print "SVG layout test found at %s, not chromium.org" % url
- baseline = test._get_baseline(SVG_TEST_EXPECTATION, ".", True)
- if not baseline.local_file or baseline.baseline_url.find(WEBKIT_ORG) == -1:
- result = False
- print "Upstream SVG layout test NOT found at webkit.org!"
- baseline = test._get_baseline(DOM_TEST_EXPECTATION, ".", True)
- if (not baseline.local_file or
- baseline.baseline_url.find("/platform/") > -1):
- result = False
- print ("Upstream SVG layout test found in a "
- "platform directory: %s" % url)
- os.remove(WEBARCHIVE_TEST_EXPECTATION)
- os.remove(SVG_TEST_EXPECTATION)
- os.remove(SVG_TEST_EXPECTATION_UPSTREAM)
- os.remove(DOM_TEST_EXPECTATION_UPSTREAM)
- delete_dir("LayoutTests")
- return result
-
-
-def delete_dir(directory):
- """ Recursively deletes empty directories given a root.
- This method will throw an exception if they are not empty. """
- for root, dirs, files in os.walk(directory, topdown=False):
- for d in dirs:
- try:
- os.rmdir(os.path.join(root, d))
- except:
- pass
- os.rmdir(directory)
-
-
-def test_full():
- """ Verifies that the entire system works end-to-end. """
- test = _get_basic_failure_finder()
- test.build = WEBKIT_BUILDER_NUMBER
- test.dont_download = True # Dry run only, no downloading needed.
- failures = test.get_failures()
- # Verify that the max failures parameter works.
- if not failures or len(failures) > 10:
- "Got no failures or too many failures."
- return False
-
- # Verify the failures match the list of expected failures.
- for failure in failures:
- if not (failure.test_path in WEBKIT_FAILURES):
- print "Found a failure I did not expect to see."
- return False
-
- return True
-
-
-def test_find_test_expectations():
- test = _get_basic_failure_finder()
- test._test_expectations_cache = TEST_EXPECTATIONS
- match = test._get_test_expectations_line(EXPECT_EXACT_MATCH)
- if not match:
- return False
- match = test._get_test_expectations_line(EXPECT_GENERAL_MATCH)
- if not match:
- return False
- match = test._get_test_expectations_line(EXPECT_NO_MATCH)
- return not match
-
-
-if __name__ == "__main__":
- fft = FailureFinderTest()
- result = fft.runTests()
- if result:
- print "All tests passed."
- else:
- print "Not all tests passed."
diff --git a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/html_generator.py b/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/html_generator.py
deleted file mode 100644
index 5642b37..0000000
--- a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/html_generator.py
+++ /dev/null
@@ -1,229 +0,0 @@
-# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import re
-
-from failure import Failure
-
-CHROMIUM_BUG_URL = "http://crbug.com/"
-
-
-def extract_first_value(string, regex):
- m = re.search(regex, string)
- if m and m.group(1):
- return m.group(1)
- return None
-
-# TODO(gwilson): Refactor HTML generation into a HTML templating system like
-# Django templates.
-
-
-class HTMLGenerator(object):
-
- def __init__(self, failures, output_dir, build, platform,
- exclude_known_failures):
- self.failures = failures
- self.output_dir = output_dir
- self.build = build
- self.platform = platform
- self.exclude_known_failures = exclude_known_failures
- self.image_size = "200px"
-
- def generate_html(self):
- html = ""
- html += """
- <html>
- <head>
- <style>
- body {
- font-family: sans-serif;
- }
- h2 {
- }
- .mainTable {
- background: #666666;
- }
- .mainTable td , .mainTable th {
- background: white;
- }
- .titlelink {
- font-size: 18pt;
- font-weight: bold;
- }
- .detail {
- margin-left: 10px;
- margin-top: 3px;
- }
- </style>
- </head>
- <body>
- """
- title = "All failures"
- if self.exclude_known_failures:
- title = "Regressions"
-
- html += """
- <h1>%s for build %s (%s)</h1>
- """ % (title, self.build, self.platform)
-
- test_number = 0
-
- # TODO(gwilson): Refactor this to do a join() on an array of HTML,
- # rather than appending strings in a loop.
- for failure in self.failures:
- test_number += 1
- html += """
- <table style="border: 1px solid black; width: 1200px;
- -webkit-border-radius: 5px;" cellspacing="0" cellpadding="4">
- <tr>
- <td style="background-color: #CDECDE;
- border-bottom: 1px solid black;">
- <span class="titlelink">%s.&nbsp;&nbsp;%s</span></td></tr>
- <tr><td>&nbsp;&nbsp;Last modified: <a href="%s">%s</a>
- """ % (test_number, failure.test_path, failure.get_test_home(),
- failure.test_age)
- html += "<div class='detail'>"
- html += "<pre>%s</pre>" % \
- (self._generate_linkified_text_expectations(failure))
-
- html += self._generate_flakiness_html(failure)
-
- if failure.crashed:
- html += "<div>Test <b>CRASHED</b></div>"
- elif failure.timeout:
- html += "<div>Test <b>TIMED OUT</b></div>"
- else:
- html += """
- <table class="mainTable" cellspacing=1 cellpadding=5>
- <tr>
- <th width='250'>&nbsp;</th>
- <th width='200'>Expected</th>
- <th width='200'>Actual</th>
- <th width='200'>Difference</th>
- <th width='200'>Upstream</th>
- </tr>
- """
-
- if failure.text_diff_mismatch:
- html += self._generate_text_failure_html(failure)
-
- if failure.image_mismatch:
- html += self._generate_image_failure_html(failure)
-
- html += "</table>"
- html += "</div></td></tr></table><br>"
- html += """</body></html>"""
-
- # TODO(gwilson): Change this filename to be passed in as an argument.
- html_filename = "%s/index-%s.html" % (self.output_dir, self.build)
- htmlFile = open(html_filename, 'w')
- htmlFile.write(html)
- htmlFile.close()
- return html_filename
-
- def _generate_linkified_text_expectations(self, failure):
- if not failure.test_expectations_line:
- return ""
- bug_number = extract_first_value(failure.test_expectations_line,
- "BUG(\d+)")
- if not bug_number or bug_number == "":
- return ""
- return failure.test_expectations_line.replace("BUG" + bug_number,
- "<a href='%s%s'>BUG%s</a>" % (CHROMIUM_BUG_URL, bug_number,
- bug_number))
-
- # TODO(gwilson): Fix this so that it shows the last ten runs
- # not just a "meter" of flakiness.
-
- def _generate_flakiness_html(self, failure):
- html = ""
- if not failure.flakiness:
- return html
- html += """
- <table>
- <tr>
- <td>Flakiness: (%s)</td>
- """ % (failure.flakiness)
-
- flaky_red = int(round(int(failure.flakiness) / 5))
- flaky_green = 10 - flaky_red
- for i in range(0, flaky_green):
- html += """
- <td style="background: green">&nbsp;&nbsp;</td>
- """
- for i in range(0, flaky_red):
- html += """
- <td style="background: red">&nbsp;&nbsp;</td>
- """
- html += """
- </tr>
- </table><br>
- """
- return html
-
- def _generate_text_failure_html(self, failure):
- html = ""
- if not failure.get_text_baseline_location():
- return """<tr><td colspan='5'>This test likely does not have any
- TEXT baseline for this platform, or one could not
- be found.</td></tr>"""
- html += """
- <tr>
- <td>
- <b><a href="%s">Render Tree Dump</a></b><br>
- <b>%s</b> baseline<br>
- Age: %s<br>
- </td>
- """ % (failure.text_baseline_url,
- failure.get_text_baseline_location(),
- failure.text_baseline_age)
- html += self._generate_text_failure_td(
- failure.get_expected_text_filename(), "expected text")
- html += self._generate_text_failure_td(
- failure.get_actual_text_filename(), "actual text")
- html += self._generate_text_failure_td(
- failure.get_text_diff_filename(), "text diff")
- html += "<td>&nbsp;</td>"
- html += "</tr>"
- return html
-
- def _generate_text_failure_td(self, file_path, anchor_text):
- return ("<td align=center>"
- "<a href='./layout-test-results-%s/%s'>%s</a></td>") % (
- self.build, file_path, anchor_text)
-
- def _generate_image_failure_html(self, failure):
- if not failure.get_image_baseline_location():
- return """<tr><td colspan='5'>This test likely does not have any
- IMAGE baseline for this platform, or one could not be
- found.</td></tr>"""
- html = """
- <tr>
- <td><b><a href="%s">Pixel Dump</a></b><br>
- <b>%s</b> baseline<br>Age: %s</td>
- """ % (failure.image_baseline_url,
- failure.get_image_baseline_location(),
- failure.image_baseline_age)
- html += self._generate_image_failure_td(
- failure.get_expected_image_filename())
- html += self._generate_image_failure_td(
- failure.get_actual_image_filename())
- html += self._generate_image_failure_td(
- failure.get_image_diff_filename())
- if (failure.image_baseline_upstream_local and
- failure.image_baseline_upstream_local != ""):
- html += self._generate_image_failure_td(
- failure.get_image_upstream_filename())
- else:
- html += """
- <td>&nbsp;</td>
- """
- html += "</tr>"
- return html
-
- def _generate_image_failure_td(self, filename):
- return ("<td><a href='./layout-test-results-%s/%s'>"
- "<img style='width: %s' src='./layout-test-results-%s/%s' />"
- "</a></td>") % (self.build, filename, self.image_size,
- self.build, filename)
diff --git a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/http_server.bat b/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/http_server.bat
deleted file mode 100644
index 8aab484..0000000
--- a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/http_server.bat
+++ /dev/null
@@ -1 +0,0 @@
-%~dp0..\..\..\..\..\..\third_party\python_24\python.exe %~dp0http_server.py %*
diff --git a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/http_server.py b/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/http_server.py
deleted file mode 100755
index 747702c..0000000
--- a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/http_server.py
+++ /dev/null
@@ -1,258 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""A class to help start/stop the lighttpd server used by layout tests."""
-
-
-import logging
-import optparse
-import os
-import shutil
-import subprocess
-import sys
-import tempfile
-import time
-import urllib
-
-import http_server_base
-import path_utils
-
-# So we can import httpd_utils below to make ui_tests happy.
-sys.path.insert(0, path_utils.path_from_base('tools', 'python'))
-import google.httpd_utils
-
-
-def remove_log_files(folder, starts_with):
- files = os.listdir(folder)
- for file in files:
- if file.startswith(starts_with):
- full_path = os.path.join(folder, file)
- os.remove(full_path)
-
-
-class Lighttpd(http_server_base.HttpServerBase):
- # Webkit tests
- try:
- _webkit_tests = path_utils.path_from_base('third_party', 'WebKit',
- 'LayoutTests', 'http',
- 'tests')
- _js_test_resource = path_utils.path_from_base('third_party', 'WebKit',
- 'LayoutTests', 'fast',
- 'js', 'resources')
- except path_utils.PathNotFound:
- _webkit_tests = None
- _js_test_resource = None
-
- # Path where we can access all of the tests
- _all_tests = path_utils.path_from_base('webkit', 'data', 'layout_tests')
- # Self generated certificate for SSL server (for client cert get
- # <base-path>\chrome\test\data\ssl\certs\root_ca_cert.crt)
- _pem_file = path_utils.path_from_base('tools', 'python', 'google',
- 'httpd_config', 'httpd2.pem')
- # One mapping where we can get to everything
- VIRTUALCONFIG = [{'port': 8081, 'docroot': _all_tests}]
-
- if _webkit_tests:
- VIRTUALCONFIG.extend(
- # Three mappings (one with SSL enabled) for LayoutTests http tests
- [{'port': 8000, 'docroot': _webkit_tests},
- {'port': 8080, 'docroot': _webkit_tests},
- {'port': 8443, 'docroot': _webkit_tests, 'sslcert': _pem_file}])
-
- def __init__(self, output_dir, background=False, port=None,
- root=None, register_cygwin=None, run_background=None):
- """Args:
- output_dir: the absolute path to the layout test result directory
- """
- self._output_dir = output_dir
- self._process = None
- self._port = port
- self._root = root
- self._register_cygwin = register_cygwin
- self._run_background = run_background
- if self._port:
- self._port = int(self._port)
-
- def is_running(self):
- return self._process != None
-
- def start(self):
- if self.is_running():
- raise 'Lighttpd already running'
-
- base_conf_file = path_utils.path_from_base('webkit',
- 'tools', 'layout_tests', 'webkitpy', 'layout_tests',
- 'layout_package', 'lighttpd.conf')
- out_conf_file = os.path.join(self._output_dir, 'lighttpd.conf')
- time_str = time.strftime("%d%b%Y-%H%M%S")
- access_file_name = "access.log-" + time_str + ".txt"
- access_log = os.path.join(self._output_dir, access_file_name)
- log_file_name = "error.log-" + time_str + ".txt"
- error_log = os.path.join(self._output_dir, log_file_name)
-
- # Remove old log files. We only need to keep the last ones.
- remove_log_files(self._output_dir, "access.log-")
- remove_log_files(self._output_dir, "error.log-")
-
- # Write out the config
- f = file(base_conf_file, 'rb')
- base_conf = f.read()
- f.close()
-
- f = file(out_conf_file, 'wb')
- f.write(base_conf)
-
- # Write out our cgi handlers. Run perl through env so that it
- # processes the #! line and runs perl with the proper command
- # line arguments. Emulate apache's mod_asis with a cat cgi handler.
- f.write(('cgi.assign = ( ".cgi" => "/usr/bin/env",\n'
- ' ".pl" => "/usr/bin/env",\n'
- ' ".asis" => "/bin/cat",\n'
- ' ".php" => "%s" )\n\n') %
- path_utils.lighttpd_php_path())
-
- # Setup log files
- f.write(('server.errorlog = "%s"\n'
- 'accesslog.filename = "%s"\n\n') % (error_log, access_log))
-
- # Setup upload folders. Upload folder is to hold temporary upload files
- # and also POST data. This is used to support XHR layout tests that
- # does POST.
- f.write(('server.upload-dirs = ( "%s" )\n\n') % (self._output_dir))
-
- # Setup a link to where the js test templates are stored
- f.write(('alias.url = ( "/js-test-resources" => "%s" )\n\n') %
- (self._js_test_resource))
-
- # dump out of virtual host config at the bottom.
- if self._root:
- if self._port:
- # Have both port and root dir.
- mappings = [{'port': self._port, 'docroot': self._root}]
- else:
- # Have only a root dir - set the ports as for LayoutTests.
- # This is used in ui_tests to run http tests against a browser.
-
- # default set of ports as for LayoutTests but with a
- # specified root.
- mappings = [{'port': 8000, 'docroot': self._root},
- {'port': 8080, 'docroot': self._root},
- {'port': 8443, 'docroot': self._root,
- 'sslcert': Lighttpd._pem_file}]
- else:
- mappings = self.VIRTUALCONFIG
- for mapping in mappings:
- ssl_setup = ''
- if 'sslcert' in mapping:
- ssl_setup = (' ssl.engine = "enable"\n'
- ' ssl.pemfile = "%s"\n' % mapping['sslcert'])
-
- f.write(('$SERVER["socket"] == "127.0.0.1:%d" {\n'
- ' server.document-root = "%s"\n' +
- ssl_setup +
- '}\n\n') % (mapping['port'], mapping['docroot']))
- f.close()
-
- executable = path_utils.lighttpd_executable_path()
- module_path = path_utils.lighttpd_module_path()
- start_cmd = [executable,
- # Newly written config file
- '-f', path_utils.path_from_base(self._output_dir,
- 'lighttpd.conf'),
- # Where it can find its module dynamic libraries
- '-m', module_path]
-
- if not self._run_background:
- start_cmd.append(# Don't background
- '-D')
-
- # Copy liblightcomp.dylib to /tmp/lighttpd/lib to work around the
- # bug that mod_alias.so loads it from the hard coded path.
- if sys.platform == 'darwin':
- tmp_module_path = '/tmp/lighttpd/lib'
- if not os.path.exists(tmp_module_path):
- os.makedirs(tmp_module_path)
- lib_file = 'liblightcomp.dylib'
- shutil.copyfile(os.path.join(module_path, lib_file),
- os.path.join(tmp_module_path, lib_file))
-
- # Put the cygwin directory first in the path to find cygwin1.dll
- env = os.environ
- if sys.platform in ('cygwin', 'win32'):
- env['PATH'] = '%s;%s' % (
- path_utils.path_from_base('third_party', 'cygwin', 'bin'),
- env['PATH'])
-
- if sys.platform == 'win32' and self._register_cygwin:
- setup_mount = path_utils.path_from_base('third_party', 'cygwin',
- 'setup_mount.bat')
- subprocess.Popen(setup_mount).wait()
-
- logging.debug('Starting http server')
- self._process = subprocess.Popen(start_cmd, env=env)
-
- # Wait for server to start.
- self.mappings = mappings
- server_started = self.wait_for_action(
- self.is_server_running_on_all_ports)
-
- # Our process terminated already
- if not server_started or self._process.returncode != None:
- raise google.httpd_utils.HttpdNotStarted('Failed to start httpd.')
-
- logging.debug("Server successfully started")
-
- # TODO(deanm): Find a nicer way to shutdown cleanly. Our log files are
- # probably not being flushed, etc... why doesn't our python have os.kill ?
-
- def stop(self, force=False):
- if not force and not self.is_running():
- return
-
- httpd_pid = None
- if self._process:
- httpd_pid = self._process.pid
- path_utils.shut_down_http_server(httpd_pid)
-
- if self._process:
- self._process.wait()
- self._process = None
-
-if '__main__' == __name__:
- # Provide some command line params for starting/stopping the http server
- # manually. Also used in ui_tests to run http layout tests in a browser.
- option_parser = optparse.OptionParser()
- option_parser.add_option('-k', '--server',
- help='Server action (start|stop)')
- option_parser.add_option('-p', '--port',
- help='Port to listen on (overrides layout test ports)')
- option_parser.add_option('-r', '--root',
- help='Absolute path to DocumentRoot (overrides layout test roots)')
- option_parser.add_option('--register_cygwin', action="store_true",
- dest="register_cygwin", help='Register Cygwin paths (on Win try bots)')
- option_parser.add_option('--run_background', action="store_true",
- dest="run_background",
- help='Run on background (for running as UI test)')
- options, args = option_parser.parse_args()
-
- if not options.server:
- print ('Usage: %s --server {start|stop} [--root=root_dir]'
- ' [--port=port_number]' % sys.argv[0])
- else:
- if (options.root is None) and (options.port is not None):
- # specifying root but not port means we want httpd on default
- # set of ports that LayoutTest use, but pointing to a different
- # source of tests. Specifying port but no root does not seem
- # meaningful.
- raise 'Specifying port requires also a root.'
- httpd = Lighttpd(tempfile.gettempdir(),
- port=options.port,
- root=options.root,
- register_cygwin=options.register_cygwin,
- run_background=options.run_background)
- if 'start' == options.server:
- httpd.start()
- else:
- httpd.stop(force=True)
diff --git a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/http_server.sh b/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/http_server.sh
deleted file mode 100755
index b3f4b4b..0000000
--- a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/http_server.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/bin/sh
-
-# Copyright (c) 2009 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-exec_dir=$(dirname $0)
-
-if [ "$OSTYPE" = "cygwin" ]; then
- system_root=`cygpath "$SYSTEMROOT"`
- PATH="/usr/bin:$system_root/system32:$system_root:$system_root/system32/WBEM"
- export PATH
- python_prog="$exec_dir/../../../../third_party/python_24/python.exe"
-else
- python_prog=python
- # When not using the included python, we don't get automatic site.py paths.
- # Specifically, run_webkit_tests needs the paths in:
- # third_party/python_24/Lib/site-packages/google.pth
- PYTHONPATH="${exec_dir}/../../../../tools/python:$PYTHONPATH"
- export PYTHONPATH
-fi
-
-"$python_prog" "$exec_dir/http_server.py" "$@"
diff --git a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/http_server_base.py b/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/http_server_base.py
deleted file mode 100644
index af38ab8..0000000
--- a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/http_server_base.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright (c) 2009 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Base class with common routines between the Apache and Lighttpd servers."""
-
-import logging
-import time
-import urllib
-
-
-class HttpServerBase(object):
-
- def wait_for_action(self, action):
- """Repeat the action for 20 seconds or until it succeeds. Returns
- whether it succeeded."""
- start_time = time.time()
- while time.time() - start_time < 20:
- if action():
- return True
- time.sleep(1)
-
- return False
-
- def is_server_running_on_all_ports(self):
- """Returns whether the server is running on all the desired ports."""
- for mapping in self.mappings:
- if 'sslcert' in mapping:
- http_suffix = 's'
- else:
- http_suffix = ''
-
- url = 'http%s://127.0.0.1:%d/' % (http_suffix, mapping['port'])
-
- try:
- response = urllib.urlopen(url)
- logging.debug("Server running at %s" % url)
- except IOError:
- logging.debug("Server NOT running at %s" % url)
- return False
-
- return True
diff --git a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/json_layout_results_generator.py b/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/json_layout_results_generator.py
deleted file mode 100644
index 1618fe2..0000000
--- a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/json_layout_results_generator.py
+++ /dev/null
@@ -1,159 +0,0 @@
-# Copyright (c) 2010 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import logging
-import os
-
-from layout_package import json_results_generator
-from layout_package import path_utils
-from layout_package import test_expectations
-from layout_package import test_failures
-
-
-class JSONLayoutResultsGenerator(json_results_generator.JSONResultsGenerator):
- """A JSON results generator for layout tests."""
-
- LAYOUT_TESTS_PATH = "LayoutTests"
-
- # Additional JSON fields.
- WONTFIX = "wontfixCounts"
- DEFERRED = "deferredCounts"
-
- def __init__(self, builder_name, build_name, build_number,
- results_file_base_path, builder_base_url,
- test_timings, expectations, result_summary, all_tests):
- """Modifies the results.json file. Grabs it off the archive directory
- if it is not found locally.
-
- Args:
- result_summary: ResultsSummary object storing the summary of the test
- results.
- (see the comment of JSONResultsGenerator.__init__ for other Args)
- """
-
- self._builder_name = builder_name
- self._build_name = build_name
- self._build_number = build_number
- self._builder_base_url = builder_base_url
- self._results_file_path = os.path.join(results_file_base_path,
- self.RESULTS_FILENAME)
- self._expectations = expectations
-
- # We don't use self._skipped_tests and self._passed_tests as we
- # override _InsertFailureSummaries.
-
- # We want relative paths to LayoutTest root for JSON output.
- path_to_name = self._get_path_relative_to_layout_test_root
- self._result_summary = result_summary
- self._failures = dict(
- (path_to_name(test), test_failures.determine_result_type(failures))
- for (test, failures) in result_summary.failures.iteritems())
- self._all_tests = [path_to_name(test) for test in all_tests]
- self._test_timings = dict(
- (path_to_name(test_tuple.filename), test_tuple.test_run_time)
- for test_tuple in test_timings)
-
- self._generate_json_output()
-
- def _get_path_relative_to_layout_test_root(self, test):
- """Returns the path of the test relative to the layout test root.
- For example, for:
- src/third_party/WebKit/LayoutTests/fast/forms/foo.html
- We would return
- fast/forms/foo.html
- """
- index = test.find(self.LAYOUT_TESTS_PATH)
- if index is not -1:
- index += len(self.LAYOUT_TESTS_PATH)
-
- if index is -1:
- # Already a relative path.
- relativePath = test
- else:
- relativePath = test[index + 1:]
-
- # Make sure all paths are unix-style.
- return relativePath.replace('\\', '/')
-
- # override
- def _convert_json_to_current_version(self, results_json):
- archive_version = None
- if self.VERSION_KEY in results_json:
- archive_version = results_json[self.VERSION_KEY]
-
- super(JSONLayoutResultsGenerator,
- self)._convert_json_to_current_version(results_json)
-
- # version 2->3
- if archive_version == 2:
- for results_for_builder in results_json.itervalues():
- try:
- test_results = results_for_builder[self.TESTS]
- except:
- continue
-
- for test in test_results:
- # Make sure all paths are relative
- test_path = self._get_path_relative_to_layout_test_root(test)
- if test_path != test:
- test_results[test_path] = test_results[test]
- del test_results[test]
-
- # override
- def _insert_failure_summaries(self, results_for_builder):
- summary = self._result_summary
-
- self._insert_item_into_raw_list(results_for_builder,
- len((set(summary.failures.keys()) |
- summary.tests_by_expectation[test_expectations.SKIP]) &
- summary.tests_by_timeline[test_expectations.NOW]),
- self.FIXABLE_COUNT)
- self._insert_item_into_raw_list(results_for_builder,
- self._get_failure_summary_entry(test_expectations.NOW),
- self.FIXABLE)
- self._insert_item_into_raw_list(results_for_builder,
- len(self._expectations.get_tests_with_timeline(
- test_expectations.NOW)), self.ALL_FIXABLE_COUNT)
- self._insert_item_into_raw_list(results_for_builder,
- self._get_failure_summary_entry(test_expectations.DEFER),
- self.DEFERRED)
- self._insert_item_into_raw_list(results_for_builder,
- self._get_failure_summary_entry(test_expectations.WONTFIX),
- self.WONTFIX)
-
- # override
- def _normalize_results_json(self, test, test_name, tests):
- super(JSONLayoutResultsGenerator, self)._normalize_results_json(
- test, test_name, tests)
-
- # Remove tests that don't exist anymore.
- full_path = os.path.join(path_utils.layout_tests_dir(), test_name)
- full_path = os.path.normpath(full_path)
- if not os.path.exists(full_path):
- del tests[test_name]
-
- def _get_failure_summary_entry(self, timeline):
- """Creates a summary object to insert into the JSON.
-
- Args:
- summary ResultSummary object with test results
- timeline current test_expectations timeline to build entry for
- (e.g., test_expectations.NOW, etc.)
- """
- entry = {}
- summary = self._result_summary
- timeline_tests = summary.tests_by_timeline[timeline]
- entry[self.SKIP_RESULT] = len(
- summary.tests_by_expectation[test_expectations.SKIP] &
- timeline_tests)
- entry[self.PASS_RESULT] = len(
- summary.tests_by_expectation[test_expectations.PASS] &
- timeline_tests)
- for failure_type in summary.tests_by_expectation.keys():
- if failure_type not in self.FAILURE_TO_CHAR:
- continue
- count = len(summary.tests_by_expectation[failure_type] &
- timeline_tests)
- entry[self.FAILURE_TO_CHAR[failure_type]] = count
- return entry
diff --git a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/json_results_generator.py b/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/json_results_generator.py
deleted file mode 100644
index 220059b..0000000
--- a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/json_results_generator.py
+++ /dev/null
@@ -1,392 +0,0 @@
-# Copyright (c) 2009 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import logging
-import os
-import subprocess
-import sys
-import time
-import urllib2
-import xml.dom.minidom
-
-from layout_package import path_utils
-from layout_package import test_expectations
-
-sys.path.append(path_utils.path_from_base('third_party'))
-import simplejson
-
-
-class JSONResultsGenerator(object):
-
- MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG = 750
- # Min time (seconds) that will be added to the JSON.
- MIN_TIME = 1
- JSON_PREFIX = "ADD_RESULTS("
- JSON_SUFFIX = ");"
- PASS_RESULT = "P"
- SKIP_RESULT = "X"
- NO_DATA_RESULT = "N"
- VERSION = 3
- VERSION_KEY = "version"
- RESULTS = "results"
- TIMES = "times"
- BUILD_NUMBERS = "buildNumbers"
- WEBKIT_SVN = "webkitRevision"
- CHROME_SVN = "chromeRevision"
- TIME = "secondsSinceEpoch"
- TESTS = "tests"
-
- FIXABLE_COUNT = "fixableCount"
- FIXABLE = "fixableCounts"
- ALL_FIXABLE_COUNT = "allFixableCount"
-
- # Note that we omit test_expectations.FAIL from this list because
- # it should never show up (it's a legacy input expectation, never
- # an output expectation).
- FAILURE_TO_CHAR = {test_expectations.CRASH: "C",
- test_expectations.TIMEOUT: "T",
- test_expectations.IMAGE: "I",
- test_expectations.TEXT: "F",
- test_expectations.MISSING: "O",
- test_expectations.IMAGE_PLUS_TEXT: "Z"}
- FAILURE_CHARS = FAILURE_TO_CHAR.values()
-
- RESULTS_FILENAME = "results.json"
-
- def __init__(self, builder_name, build_name, build_number,
- results_file_base_path, builder_base_url,
- test_timings, failures, passed_tests, skipped_tests, all_tests):
- """Modifies the results.json file. Grabs it off the archive directory
- if it is not found locally.
-
- Args
- builder_name: the builder name (e.g. Webkit).
- build_name: the build name (e.g. webkit-rel).
- build_number: the build number.
- results_file_base_path: Absolute path to the directory containing the
- results json file.
- builder_base_url: the URL where we have the archived test results.
- test_timings: Map of test name to a test_run-time.
- failures: Map of test name to a failure type (of test_expectations).
- passed_tests: A set containing all the passed tests.
- skipped_tests: A set containing all the skipped tests.
- all_tests: List of all the tests that were run. This should not
- include skipped tests.
- """
- self._builder_name = builder_name
- self._build_name = build_name
- self._build_number = build_number
- self._builder_base_url = builder_base_url
- self._results_file_path = os.path.join(results_file_base_path,
- self.RESULTS_FILENAME)
- self._test_timings = test_timings
- self._failures = failures
- self._passed_tests = passed_tests
- self._skipped_tests = skipped_tests
- self._all_tests = all_tests
-
- self._generate_json_output()
-
- def _generate_json_output(self):
- """Generates the JSON output file."""
- json = self._get_json()
- if json:
- results_file = open(self._results_file_path, "w")
- results_file.write(json)
- results_file.close()
-
- def _get_svn_revision(self, in_directory=None):
- """Returns the svn revision for the given directory.
-
- Args:
- in_directory: The directory where svn is to be run.
- """
- output = subprocess.Popen(["svn", "info", "--xml"],
- cwd=in_directory,
- shell=(sys.platform == 'win32'),
- stdout=subprocess.PIPE).communicate()[0]
- try:
- dom = xml.dom.minidom.parseString(output)
- return dom.getElementsByTagName('entry')[0].getAttribute(
- 'revision')
- except xml.parsers.expat.ExpatError:
- return ""
-
- def _get_archived_json_results(self):
- """Reads old results JSON file if it exists.
- Returns (archived_results, error) tuple where error is None if results
- were successfully read.
- """
- results_json = {}
- old_results = None
- error = None
-
- if os.path.exists(self._results_file_path):
- old_results_file = open(self._results_file_path, "r")
- old_results = old_results_file.read()
- elif self._builder_base_url:
- # Check if we have the archived JSON file on the buildbot server.
- results_file_url = (self._builder_base_url +
- self._build_name + "/" + self.RESULTS_FILENAME)
- logging.error("Local results.json file does not exist. Grabbing "
- "it off the archive at " + results_file_url)
-
- try:
- results_file = urllib2.urlopen(results_file_url)
- info = results_file.info()
- old_results = results_file.read()
- except urllib2.HTTPError, http_error:
- # A non-4xx status code means the bot is hosed for some reason
- # and we can't grab the results.json file off of it.
- if (http_error.code < 400 and http_error.code >= 500):
- error = http_error
- except urllib2.URLError, url_error:
- error = url_error
-
- if old_results:
- # Strip the prefix and suffix so we can get the actual JSON object.
- old_results = old_results[len(self.JSON_PREFIX):
- len(old_results) - len(self.JSON_SUFFIX)]
-
- try:
- results_json = simplejson.loads(old_results)
- except:
- logging.debug("results.json was not valid JSON. Clobbering.")
- # The JSON file is not valid JSON. Just clobber the results.
- results_json = {}
- else:
- logging.debug('Old JSON results do not exist. Starting fresh.')
- results_json = {}
-
- return results_json, error
-
- def _get_json(self):
- """Gets the results for the results.json file."""
- results_json, error = self._get_archived_json_results()
- if error:
- # If there was an error don't write a results.json
- # file at all as it would lose all the information on the bot.
- logging.error("Archive directory is inaccessible. Not modifying "
- "or clobbering the results.json file: " + str(error))
- return None
-
- builder_name = self._builder_name
- if results_json and builder_name not in results_json:
- logging.debug("Builder name (%s) is not in the results.json file."
- % builder_name)
-
- self._convert_json_to_current_version(results_json)
-
- if builder_name not in results_json:
- results_json[builder_name] = (
- self._create_results_for_builder_json())
-
- results_for_builder = results_json[builder_name]
-
- self._insert_generic_metadata(results_for_builder)
-
- self._insert_failure_summaries(results_for_builder)
-
- # Update the all failing tests with result type and time.
- tests = results_for_builder[self.TESTS]
- all_failing_tests = set(self._failures.iterkeys())
- all_failing_tests.update(tests.iterkeys())
- for test in all_failing_tests:
- self._insert_test_time_and_result(test, tests)
-
- # Specify separators in order to get compact encoding.
- results_str = simplejson.dumps(results_json, separators=(',', ':'))
- return self.JSON_PREFIX + results_str + self.JSON_SUFFIX
-
- def _insert_failure_summaries(self, results_for_builder):
- """Inserts aggregate pass/failure statistics into the JSON.
- This method reads self._skipped_tests, self._passed_tests and
- self._failures and inserts FIXABLE, FIXABLE_COUNT and ALL_FIXABLE_COUNT
- entries.
-
- Args:
- results_for_builder: Dictionary containing the test results for a
- single builder.
- """
- # Insert the number of tests that failed.
- self._insert_item_into_raw_list(results_for_builder,
- len(set(self._failures.keys()) | self._skipped_tests),
- self.FIXABLE_COUNT)
-
- # Create a pass/skip/failure summary dictionary.
- entry = {}
- entry[self.SKIP_RESULT] = len(self._skipped_tests)
- entry[self.PASS_RESULT] = len(self._passed_tests)
- get = entry.get
- for failure_type in self._failures.values():
- failure_char = self.FAILURE_TO_CHAR[failure_type]
- entry[failure_char] = get(failure_char, 0) + 1
-
- # Insert the pass/skip/failure summary dictionary.
- self._insert_item_into_raw_list(results_for_builder, entry,
- self.FIXABLE)
-
- # Insert the number of all the tests that are supposed to pass.
- self._insert_item_into_raw_list(results_for_builder,
- len(self._skipped_tests | self._all_tests),
- self.ALL_FIXABLE_COUNT)
-
- def _insert_item_into_raw_list(self, results_for_builder, item, key):
- """Inserts the item into the list with the given key in the results for
- this builder. Creates the list if no such list exists.
-
- Args:
- results_for_builder: Dictionary containing the test results for a
- single builder.
- item: Number or string to insert into the list.
- key: Key in results_for_builder for the list to insert into.
- """
- if key in results_for_builder:
- raw_list = results_for_builder[key]
- else:
- raw_list = []
-
- raw_list.insert(0, item)
- raw_list = raw_list[:self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG]
- results_for_builder[key] = raw_list
-
- def _insert_item_run_length_encoded(self, item, encoded_results):
- """Inserts the item into the run-length encoded results.
-
- Args:
- item: String or number to insert.
- encoded_results: run-length encoded results. An array of arrays, e.g.
- [[3,'A'],[1,'Q']] encodes AAAQ.
- """
- if len(encoded_results) and item == encoded_results[0][1]:
- num_results = encoded_results[0][0]
- if num_results <= self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG:
- encoded_results[0][0] = num_results + 1
- else:
- # Use a list instead of a class for the run-length encoding since
- # we want the serialized form to be concise.
- encoded_results.insert(0, [1, item])
-
- def _insert_generic_metadata(self, results_for_builder):
- """ Inserts generic metadata (such as version number, current time etc)
- into the JSON.
-
- Args:
- results_for_builder: Dictionary containing the test results for
- a single builder.
- """
- self._insert_item_into_raw_list(results_for_builder,
- self._build_number, self.BUILD_NUMBERS)
-
- path_to_webkit = path_utils.path_from_base('third_party', 'WebKit',
- 'WebCore')
- self._insert_item_into_raw_list(results_for_builder,
- self._get_svn_revision(path_to_webkit),
- self.WEBKIT_SVN)
-
- path_to_chrome_base = path_utils.path_from_base()
- self._insert_item_into_raw_list(results_for_builder,
- self._get_svn_revision(path_to_chrome_base),
- self.CHROME_SVN)
-
- self._insert_item_into_raw_list(results_for_builder,
- int(time.time()),
- self.TIME)
-
- def _insert_test_time_and_result(self, test_name, tests):
- """ Insert a test item with its results to the given tests dictionary.
-
- Args:
- tests: Dictionary containing test result entries.
- """
-
- result = JSONResultsGenerator.PASS_RESULT
- time = 0
-
- if test_name not in self._all_tests:
- result = JSONResultsGenerator.NO_DATA_RESULT
-
- if test_name in self._failures:
- result = self.FAILURE_TO_CHAR[self._failures[test_name]]
-
- if test_name in self._test_timings:
- # Floor for now to get time in seconds.
- time = int(self._test_timings[test_name])
-
- if test_name not in tests:
- tests[test_name] = self._create_results_and_times_json()
-
- thisTest = tests[test_name]
- self._insert_item_run_length_encoded(result, thisTest[self.RESULTS])
- self._insert_item_run_length_encoded(time, thisTest[self.TIMES])
- self._normalize_results_json(thisTest, test_name, tests)
-
- def _convert_json_to_current_version(self, results_json):
- """If the JSON does not match the current version, converts it to the
- current version and adds in the new version number.
- """
- if (self.VERSION_KEY in results_json and
- results_json[self.VERSION_KEY] == self.VERSION):
- return
-
- results_json[self.VERSION_KEY] = self.VERSION
-
- def _create_results_and_times_json(self):
- results_and_times = {}
- results_and_times[self.RESULTS] = []
- results_and_times[self.TIMES] = []
- return results_and_times
-
- def _create_results_for_builder_json(self):
- results_for_builder = {}
- results_for_builder[self.TESTS] = {}
- return results_for_builder
-
- def _remove_items_over_max_number_of_builds(self, encoded_list):
- """Removes items from the run-length encoded list after the final
- item that exceeds the max number of builds to track.
-
- Args:
- encoded_results: run-length encoded results. An array of arrays, e.g.
- [[3,'A'],[1,'Q']] encodes AAAQ.
- """
- num_builds = 0
- index = 0
- for result in encoded_list:
- num_builds = num_builds + result[0]
- index = index + 1
- if num_builds > self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG:
- return encoded_list[:index]
- return encoded_list
-
- def _normalize_results_json(self, test, test_name, tests):
- """ Prune tests where all runs pass or tests that no longer exist and
- truncate all results to maxNumberOfBuilds.
-
- Args:
- test: ResultsAndTimes object for this test.
- test_name: Name of the test.
- tests: The JSON object with all the test results for this builder.
- """
- test[self.RESULTS] = self._remove_items_over_max_number_of_builds(
- test[self.RESULTS])
- test[self.TIMES] = self._remove_items_over_max_number_of_builds(
- test[self.TIMES])
-
- is_all_pass = self._is_results_all_of_type(test[self.RESULTS],
- self.PASS_RESULT)
- is_all_no_data = self._is_results_all_of_type(test[self.RESULTS],
- self.NO_DATA_RESULT)
- max_time = max([time[1] for time in test[self.TIMES]])
-
- # Remove all passes/no-data from the results to reduce noise and
- # filesize. If a test passes every run, but takes > MIN_TIME to run,
- # don't throw away the data.
- if is_all_no_data or (is_all_pass and max_time <= self.MIN_TIME):
- del tests[test_name]
-
- def _is_results_all_of_type(self, results, type):
- """Returns whether all the results are of the given type
- (e.g. all passes)."""
- return len(results) == 1 and results[0][1] == type
diff --git a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/lighttpd.conf b/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/lighttpd.conf
deleted file mode 100644
index d3150dd..0000000
--- a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/lighttpd.conf
+++ /dev/null
@@ -1,89 +0,0 @@
-server.tag = "LightTPD/1.4.19 (Win32)"
-server.modules = ( "mod_accesslog",
- "mod_alias",
- "mod_cgi",
- "mod_rewrite" )
-
-# default document root required
-server.document-root = "."
-
-# files to check for if .../ is requested
-index-file.names = ( "index.php", "index.pl", "index.cgi",
- "index.html", "index.htm", "default.htm" )
-# mimetype mapping
-mimetype.assign = (
- ".gif" => "image/gif",
- ".jpg" => "image/jpeg",
- ".jpeg" => "image/jpeg",
- ".png" => "image/png",
- ".svg" => "image/svg+xml",
- ".css" => "text/css",
- ".html" => "text/html",
- ".htm" => "text/html",
- ".xhtml" => "application/xhtml+xml",
- ".js" => "text/javascript",
- ".log" => "text/plain",
- ".conf" => "text/plain",
- ".text" => "text/plain",
- ".txt" => "text/plain",
- ".dtd" => "text/xml",
- ".xml" => "text/xml",
- ".manifest" => "text/cache-manifest",
- )
-
-# Use the "Content-Type" extended attribute to obtain mime type if possible
-mimetype.use-xattr = "enable"
-
-##
-# which extensions should not be handle via static-file transfer
-#
-# .php, .pl, .fcgi are most often handled by mod_fastcgi or mod_cgi
-static-file.exclude-extensions = ( ".php", ".pl", ".cgi" )
-
-server.bind = "localhost"
-server.port = 8001
-
-## virtual directory listings
-dir-listing.activate = "enable"
-#dir-listing.encoding = "iso-8859-2"
-#dir-listing.external-css = "style/oldstyle.css"
-
-## enable debugging
-#debug.log-request-header = "enable"
-#debug.log-response-header = "enable"
-#debug.log-request-handling = "enable"
-#debug.log-file-not-found = "enable"
-
-#### SSL engine
-#ssl.engine = "enable"
-#ssl.pemfile = "server.pem"
-
-# Rewrite rule for utf-8 path test (LayoutTests/http/tests/uri/utf8-path.html)
-# See the apache rewrite rule at LayoutTests/http/tests/uri/intercept/.htaccess
-# Rewrite rule for LayoutTests/http/tests/appcache/cyrillic-uri.html.
-# See the apache rewrite rule at
-# LayoutTests/http/tests/appcache/resources/intercept/.htaccess
-url.rewrite-once = (
- "^/uri/intercept/(.*)" => "/uri/resources/print-uri.php",
- "^/appcache/resources/intercept/(.*)" => "/appcache/resources/print-uri.php"
-)
-
-# LayoutTests/http/tests/xmlhttprequest/response-encoding.html uses an htaccess
-# to override charset for reply2.txt, reply2.xml, and reply4.txt.
-$HTTP["url"] =~ "^/xmlhttprequest/resources/reply2.(txt|xml)" {
- mimetype.assign = (
- ".txt" => "text/plain; charset=windows-1251",
- ".xml" => "text/xml; charset=windows-1251"
- )
-}
-$HTTP["url"] =~ "^/xmlhttprequest/resources/reply4.txt" {
- mimetype.assign = ( ".txt" => "text/plain; charset=koi8-r" )
-}
-
-# LayoutTests/http/tests/appcache/wrong-content-type.html uses an htaccess
-# to override mime type for wrong-content-type.manifest.
-$HTTP["url"] =~ "^/appcache/resources/wrong-content-type.manifest" {
- mimetype.assign = ( ".manifest" => "text/plain" )
-}
-
-# Autogenerated test-specific config follows.
diff --git a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/metered_stream.py b/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/metered_stream.py
deleted file mode 100644
index 575209e..0000000
--- a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/metered_stream.py
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2009 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""
-Package that implements a stream wrapper that has 'meters' as well as
-regular output. A 'meter' is a single line of text that can be erased
-and rewritten repeatedly, without producing multiple lines of output. It
-can be used to produce effects like progress bars.
-"""
-
-
-class MeteredStream:
- """This class is a wrapper around a stream that allows you to implement
- meters.
-
- It can be used like a stream, but calling update() will print
- the string followed by only a carriage return (instead of a carriage
- return and a line feed). This can be used to implement progress bars and
- other sorts of meters. Note that anything written by update() will be
- erased by a subsequent update(), write(), or flush()."""
-
- def __init__(self, verbose, stream):
- """
- Args:
- verbose: whether update is a no-op
- stream: output stream to write to
- """
- self._dirty = False
- self._verbose = verbose
- self._stream = stream
- self._last_update = ""
-
- def write(self, txt):
- """Write text directly to the stream, overwriting and resetting the
- meter."""
- if self._dirty:
- self.update("")
- self._dirty = False
- self._stream.write(txt)
-
- def flush(self):
- """Flush any buffered output."""
- self._stream.flush()
-
- def update(self, str):
- """Write an update to the stream that will get overwritten by the next
- update() or by a write().
-
- This is used for progress updates that don't need to be preserved in
- the log. Note that verbose disables this routine; we have this in
- case we are logging lots of output and the update()s will get lost
- or won't work properly (typically because verbose streams are
- redirected to files.
-
- TODO(dpranke): figure out if there is a way to detect if we're writing
- to a stream that handles CRs correctly (e.g., terminals). That might
- be a cleaner way of handling this.
- """
- if self._verbose:
- return
-
- # Print the necessary number of backspaces to erase the previous
- # message.
- self._stream.write("\b" * len(self._last_update))
- self._stream.write(str)
- num_remaining = len(self._last_update) - len(str)
- if num_remaining > 0:
- self._stream.write(" " * num_remaining + "\b" * num_remaining)
- self._last_update = str
- self._dirty = True
diff --git a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/path_utils.py b/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/path_utils.py
deleted file mode 100644
index 9cf7378..0000000
--- a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/path_utils.py
+++ /dev/null
@@ -1,372 +0,0 @@
-# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""This package contains utility methods for manipulating paths and
-filenames for test results and baselines. It also contains wrappers
-of a few routines in platform_utils.py so that platform_utils.py can
-be considered a 'protected' package - i.e., this file should be
-the only file that ever includes platform_utils. This leads to
-us including a few things that don't really have anything to do
- with paths, unfortunately."""
-
-import errno
-import os
-import stat
-import sys
-
-import platform_utils
-import platform_utils_win
-import platform_utils_mac
-import platform_utils_linux
-
-# Cache some values so we don't have to recalculate them. _basedir is
-# used by PathFromBase() and caches the full (native) path to the top
-# of the source tree (/src). _baseline_search_path is used by
-# ExpectedBaselines() and caches the list of native paths to search
-# for baseline results.
-_basedir = None
-_baseline_search_path = None
-
-
-class PathNotFound(Exception):
- pass
-
-
-def layout_tests_dir():
- """Returns the fully-qualified path to the directory containing the input
- data for the specified layout test."""
- return path_from_base('third_party', 'WebKit', 'LayoutTests')
-
-
-def chromium_baseline_path(platform=None):
- """Returns the full path to the directory containing expected
- baseline results from chromium ports. If |platform| is None, the
- currently executing platform is used.
-
- Note: although directly referencing individual platform_utils_* files is
- usually discouraged, we allow it here so that the rebaselining tool can
- pull baselines for platforms other than the host platform."""
-
- # Normalize the platform string.
- platform = platform_name(platform)
- if platform.startswith('chromium-mac'):
- return platform_utils_mac.baseline_path(platform)
- elif platform.startswith('chromium-win'):
- return platform_utils_win.baseline_path(platform)
- elif platform.startswith('chromium-linux'):
- return platform_utils_linux.baseline_path(platform)
-
- return platform_utils.baseline_path()
-
-
-def webkit_baseline_path(platform):
- """Returns the full path to the directory containing expected
- baseline results from WebKit ports."""
- return path_from_base('third_party', 'WebKit', 'LayoutTests',
- 'platform', platform)
-
-
-def baseline_search_path(platform=None):
- """Returns the list of directories to search for baselines/results for a
- given platform, in order of preference. Paths are relative to the top of
- the source tree. If parameter platform is None, returns the list for the
- current platform that the script is running on.
-
- Note: although directly referencing individual platform_utils_* files is
- usually discouraged, we allow it here so that the rebaselining tool can
- pull baselines for platforms other than the host platform."""
-
- # Normalize the platform name.
- platform = platform_name(platform)
- if platform.startswith('chromium-mac'):
- return platform_utils_mac.baseline_search_path(platform)
- elif platform.startswith('chromium-win'):
- return platform_utils_win.baseline_search_path(platform)
- elif platform.startswith('chromium-linux'):
- return platform_utils_linux.baseline_search_path(platform)
- return platform_utils.baseline_search_path()
-
-
-def expected_baselines(filename, suffix, platform=None, all_baselines=False):
- """Given a test name, finds where the baseline results are located.
-
- Args:
- filename: absolute filename to test file
- suffix: file suffix of the expected results, including dot; e.g. '.txt'
- or '.png'. This should not be None, but may be an empty string.
- platform: layout test platform: 'win', 'linux' or 'mac'. Defaults to
- the current platform.
- all_baselines: If True, return an ordered list of all baseline paths
- for the given platform. If False, return only the first
- one.
- Returns
- a list of ( platform_dir, results_filename ), where
- platform_dir - abs path to the top of the results tree (or test tree)
- results_filename - relative path from top of tree to the results file
- (os.path.join of the two gives you the full path to the file,
- unless None was returned.)
- Return values will be in the format appropriate for the current platform
- (e.g., "\\" for path separators on Windows). If the results file is not
- found, then None will be returned for the directory, but the expected
- relative pathname will still be returned.
- """
- global _baseline_search_path
- global _search_path_platform
- testname = os.path.splitext(relative_test_filename(filename))[0]
-
- baseline_filename = testname + '-expected' + suffix
-
- if (_baseline_search_path is None) or (_search_path_platform != platform):
- _baseline_search_path = baseline_search_path(platform)
- _search_path_platform = platform
-
- baselines = []
- for platform_dir in _baseline_search_path:
- if os.path.exists(os.path.join(platform_dir, baseline_filename)):
- baselines.append((platform_dir, baseline_filename))
-
- if not all_baselines and baselines:
- return baselines
-
- # If it wasn't found in a platform directory, return the expected result
- # in the test directory, even if no such file actually exists.
- platform_dir = layout_tests_dir()
- if os.path.exists(os.path.join(platform_dir, baseline_filename)):
- baselines.append((platform_dir, baseline_filename))
-
- if baselines:
- return baselines
-
- return [(None, baseline_filename)]
-
-
-def expected_filename(filename, suffix):
- """Given a test name, returns an absolute path to its expected results.
-
- If no expected results are found in any of the searched directories, the
- directory in which the test itself is located will be returned. The return
- value is in the format appropriate for the platform (e.g., "\\" for
- path separators on windows).
-
- Args:
- filename: absolute filename to test file
- suffix: file suffix of the expected results, including dot; e.g. '.txt'
- or '.png'. This should not be None, but may be an empty string.
- platform: the most-specific directory name to use to build the
- search list of directories, e.g., 'chromium-win', or
- 'chromium-mac-leopard' (we follow the WebKit format)
- """
- platform_dir, baseline_filename = expected_baselines(filename, suffix)[0]
- if platform_dir:
- return os.path.join(platform_dir, baseline_filename)
- return os.path.join(layout_tests_dir(), baseline_filename)
-
-
-def relative_test_filename(filename):
- """Provide the filename of the test relative to the layout tests
- directory as a unix style path (a/b/c)."""
- return _win_path_to_unix(filename[len(layout_tests_dir()) + 1:])
-
-
-def _win_path_to_unix(path):
- """Convert a windows path to use unix-style path separators (a/b/c)."""
- return path.replace('\\', '/')
-
-#
-# Routines that are arguably platform-specific but have been made
-# generic for now (they used to be in platform_utils_*)
-#
-
-
-def filename_to_uri(full_path):
- """Convert a test file to a URI."""
- LAYOUTTEST_HTTP_DIR = "http/tests/"
- LAYOUTTEST_WEBSOCKET_DIR = "websocket/tests/"
-
- relative_path = _win_path_to_unix(relative_test_filename(full_path))
- port = None
- use_ssl = False
-
- if relative_path.startswith(LAYOUTTEST_HTTP_DIR):
- # http/tests/ run off port 8000 and ssl/ off 8443
- relative_path = relative_path[len(LAYOUTTEST_HTTP_DIR):]
- port = 8000
- elif relative_path.startswith(LAYOUTTEST_WEBSOCKET_DIR):
- # websocket/tests/ run off port 8880 and 9323
- # Note: the root is /, not websocket/tests/
- port = 8880
-
- # Make http/tests/local run as local files. This is to mimic the
- # logic in run-webkit-tests.
- # TODO(jianli): Consider extending this to "media/".
- if port and not relative_path.startswith("local/"):
- if relative_path.startswith("ssl/"):
- port += 443
- protocol = "https"
- else:
- protocol = "http"
- return "%s://127.0.0.1:%u/%s" % (protocol, port, relative_path)
-
- if sys.platform in ('cygwin', 'win32'):
- return "file:///" + get_absolute_path(full_path)
- return "file://" + get_absolute_path(full_path)
-
-
-def get_absolute_path(path):
- """Returns an absolute UNIX path."""
- return _win_path_to_unix(os.path.abspath(path))
-
-
-def maybe_make_directory(*path):
- """Creates the specified directory if it doesn't already exist."""
- # This is a reimplementation of google.path_utils.MaybeMakeDirectory().
- try:
- os.makedirs(os.path.join(*path))
- except OSError, e:
- if e.errno != errno.EEXIST:
- raise
-
-
-def path_from_base(*comps):
- """Returns an absolute filename from a set of components specified
- relative to the top of the source tree. If the path does not exist,
- the exception PathNotFound is raised."""
- # This is a reimplementation of google.path_utils.PathFromBase().
- global _basedir
- if _basedir == None:
- # We compute the top of the source tree by finding the absolute
- # path of this source file, and then climbing up three directories
- # as given in subpath. If we move this file, subpath needs to be
- # updated.
- path = os.path.abspath(__file__)
- subpath = os.path.join('webkit', 'tools', 'layout_tests')
- _basedir = path[:path.index(subpath)]
- path = os.path.join(_basedir, *comps)
- if not os.path.exists(path):
- raise PathNotFound('could not find %s' % (path))
- return path
-
-
-def remove_directory(*path):
- """Recursively removes a directory, even if it's marked read-only.
-
- Remove the directory located at *path, if it exists.
-
- shutil.rmtree() doesn't work on Windows if any of the files or directories
- are read-only, which svn repositories and some .svn files are. We need to
- be able to force the files to be writable (i.e., deletable) as we traverse
- the tree.
-
- Even with all this, Windows still sometimes fails to delete a file, citing
- a permission error (maybe something to do with antivirus scans or disk
- indexing). The best suggestion any of the user forums had was to wait a
- bit and try again, so we do that too. It's hand-waving, but sometimes it
- works. :/
- """
- file_path = os.path.join(*path)
- if not os.path.exists(file_path):
- return
-
- win32 = False
- if sys.platform == 'win32':
- win32 = True
- # Some people don't have the APIs installed. In that case we'll do
- # without.
- try:
- win32api = __import__('win32api')
- win32con = __import__('win32con')
- except ImportError:
- win32 = False
-
- def remove_with_retry(rmfunc, path):
- os.chmod(path, stat.S_IWRITE)
- if win32:
- win32api.SetFileAttributes(path,
- win32con.FILE_ATTRIBUTE_NORMAL)
- try:
- return rmfunc(path)
- except EnvironmentError, e:
- if e.errno != errno.EACCES:
- raise
- print 'Failed to delete %s: trying again' % repr(path)
- time.sleep(0.1)
- return rmfunc(path)
- else:
-
- def remove_with_retry(rmfunc, path):
- if os.path.islink(path):
- return os.remove(path)
- else:
- return rmfunc(path)
-
- for root, dirs, files in os.walk(file_path, topdown=False):
- # For POSIX: making the directory writable guarantees removability.
- # Windows will ignore the non-read-only bits in the chmod value.
- os.chmod(root, 0770)
- for name in files:
- remove_with_retry(os.remove, os.path.join(root, name))
- for name in dirs:
- remove_with_retry(os.rmdir, os.path.join(root, name))
-
- remove_with_retry(os.rmdir, file_path)
-
-#
-# Wrappers around platform_utils
-#
-
-
-def platform_name(platform=None):
- """Returns the appropriate chromium platform name for |platform|. If
- |platform| is None, returns the name of the chromium platform on the
- currently running system. If |platform| is of the form 'chromium-*',
- it is returned unchanged, otherwise 'chromium-' is prepended."""
- if platform == None:
- return platform_utils.platform_name()
- if not platform.startswith('chromium-'):
- platform = "chromium-" + platform
- return platform
-
-
-def platform_version():
- return platform_utils.platform_version()
-
-
-def lighttpd_executable_path():
- return platform_utils.lighttpd_executable_path()
-
-
-def lighttpd_module_path():
- return platform_utils.lighttpd_module_path()
-
-
-def lighttpd_php_path():
- return platform_utils.lighttpd_php_path()
-
-
-def wdiff_path():
- return platform_utils.wdiff_path()
-
-
-def test_shell_path(target):
- return platform_utils.test_shell_path(target)
-
-
-def image_diff_path(target):
- return platform_utils.image_diff_path(target)
-
-
-def layout_test_helper_path(target):
- return platform_utils.layout_test_helper_path(target)
-
-
-def fuzzy_match_path():
- return platform_utils.fuzzy_match_path()
-
-
-def shut_down_http_server(server_pid):
- return platform_utils.shut_down_http_server(server_pid)
-
-
-def kill_all_test_shells():
- platform_utils.kill_all_test_shells()
diff --git a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/platform_utils.py b/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/platform_utils.py
deleted file mode 100644
index c89bac8..0000000
--- a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/platform_utils.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Platform-specific utilities and pseudo-constants
-
-Any functions whose implementations or values differ from one platform to
-another should be defined in their respective platform_utils_<platform>.py
-modules. The appropriate one of those will be imported into this module to
-provide callers with a common, platform-independent interface.
-
-This file should only ever be imported by layout_package.path_utils.
-"""
-
-import sys
-
-# We may not support the version of Python that a user has installed (Cygwin
-# especially has had problems), but we'll allow the platform utils to be
-# included in any case so we don't get an import error.
-if sys.platform in ('cygwin', 'win32'):
- from platform_utils_win import *
-elif sys.platform == 'darwin':
- from platform_utils_mac import *
-elif sys.platform in ('linux', 'linux2', 'freebsd7', 'openbsd4', 'sunos5'):
- from platform_utils_linux import *
diff --git a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/platform_utils_linux.py b/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/platform_utils_linux.py
deleted file mode 100644
index 2aad0a3..0000000
--- a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/platform_utils_linux.py
+++ /dev/null
@@ -1,223 +0,0 @@
-# Copyright (c) 2008-2009 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""This is the Linux implementation of the layout_package.platform_utils
- package. This file should only be imported by that package."""
-
-import os
-import signal
-import subprocess
-import sys
-import logging
-
-import path_utils
-import platform_utils_win
-
-
-def platform_name():
- """Returns the name of the platform we're currently running on."""
- return 'chromium-linux' + platform_version()
-
-
-def platform_version():
- """Returns the version string for the platform, e.g. '-vista' or
- '-snowleopard'. If the platform does not distinguish between
- minor versions, it returns ''."""
- return ''
-
-
-def get_num_cores():
- """Returns the number of cores on the machine. For hyperthreaded machines,
- this will be double the number of actual processors."""
- num_cores = os.sysconf("SC_NPROCESSORS_ONLN")
- if isinstance(num_cores, int) and num_cores > 0:
- return num_cores
- return 1
-
-
-def baseline_path(platform=None):
- """Returns the path relative to the top of the source tree for the
- baselines for the specified platform version. If |platform| is None,
- then the version currently in use is used."""
- if platform is None:
- platform = platform_name()
- return path_utils.path_from_base('webkit', 'data', 'layout_tests',
- 'platform', platform, 'LayoutTests')
-
-
-def baseline_search_path(platform=None):
- """Returns the list of directories to search for baselines/results, in
- order of preference. Paths are relative to the top of the source tree."""
- return [baseline_path(platform),
- platform_utils_win.baseline_path('chromium-win'),
- path_utils.webkit_baseline_path('win'),
- path_utils.webkit_baseline_path('mac')]
-
-
-def apache_executable_path():
- """Returns the executable path to start Apache"""
- path = os.path.join("/usr", "sbin", "apache2")
- if os.path.exists(path):
- return path
- print "Unable to fine Apache executable %s" % path
- _missing_apache()
-
-
-def apache_config_file_path():
- """Returns the path to Apache config file"""
- return path_utils.path_from_base("third_party", "WebKit", "LayoutTests",
- "http", "conf", "apache2-debian-httpd.conf")
-
-
-def lighttpd_executable_path():
- """Returns the executable path to start LigHTTPd"""
- binpath = "/usr/sbin/lighttpd"
- if os.path.exists(binpath):
- return binpath
- print "Unable to find LigHTTPd executable %s" % binpath
- _missing_lighttpd()
-
-
-def lighttpd_module_path():
- """Returns the library module path for LigHTTPd"""
- modpath = "/usr/lib/lighttpd"
- if os.path.exists(modpath):
- return modpath
- print "Unable to find LigHTTPd modules %s" % modpath
- _missing_lighttpd()
-
-
-def lighttpd_php_path():
- """Returns the PHP executable path for LigHTTPd"""
- binpath = "/usr/bin/php-cgi"
- if os.path.exists(binpath):
- return binpath
- print "Unable to find PHP CGI executable %s" % binpath
- _missing_lighttpd()
-
-
-def wdiff_path():
- """Path to the WDiff executable, which we assume is already installed and
- in the user's $PATH."""
- return 'wdiff'
-
-
-def image_diff_path(target):
- """Path to the image_diff binary.
-
- Args:
- target: Build target mode (debug or release)"""
- return _path_from_build_results(target, 'image_diff')
-
-
-def layout_test_helper_path(target):
- """Path to the layout_test helper binary, if needed, empty otherwise"""
- return ''
-
-
-def test_shell_path(target):
- """Return the platform-specific binary path for our TestShell.
-
- Args:
- target: Build target mode (debug or release) """
- if target in ('Debug', 'Release'):
- try:
- debug_path = _path_from_build_results('Debug', 'test_shell')
- release_path = _path_from_build_results('Release', 'test_shell')
-
- debug_mtime = os.stat(debug_path).st_mtime
- release_mtime = os.stat(release_path).st_mtime
-
- if debug_mtime > release_mtime and target == 'Release' or \
- release_mtime > debug_mtime and target == 'Debug':
- logging.info('\x1b[31mWarning: you are not running the most '
- 'recent test_shell binary. You need to pass '
- '--debug or not to select between Debug and '
- 'Release.\x1b[0m')
- # This will fail if we don't have both a debug and release binary.
- # That's fine because, in this case, we must already be running the
- # most up-to-date one.
- except path_utils.PathNotFound:
- pass
-
- return _path_from_build_results(target, 'test_shell')
-
-
-def fuzzy_match_path():
- """Return the path to the fuzzy matcher binary."""
- return path_utils.path_from_base('third_party', 'fuzzymatch', 'fuzzymatch')
-
-
-def shut_down_http_server(server_pid):
- """Shut down the lighttpd web server. Blocks until it's fully shut down.
-
- Args:
- server_pid: The process ID of the running server.
- """
- # server_pid is not set when "http_server.py stop" is run manually.
- if server_pid is None:
- # This isn't ideal, since it could conflict with web server processes
- # not started by http_server.py, but good enough for now.
- kill_all_process('lighttpd')
- kill_all_process('apache2')
- else:
- try:
- os.kill(server_pid, signal.SIGTERM)
- #TODO(mmoss) Maybe throw in a SIGKILL just to be sure?
- except OSError:
- # Sometimes we get a bad PID (e.g. from a stale httpd.pid file),
- # so if kill fails on the given PID, just try to 'killall' web
- # servers.
- shut_down_http_server(None)
-
-
-def kill_process(pid):
- """Forcefully kill the process.
-
- Args:
- pid: The id of the process to be killed.
- """
- os.kill(pid, signal.SIGKILL)
-
-
-def kill_all_process(process_name):
- null = open(os.devnull)
- subprocess.call(['killall', '-TERM', '-u', os.getenv('USER'),
- process_name], stderr=null)
- null.close()
-
-
-def kill_all_test_shells():
- """Kills all instances of the test_shell binary currently running."""
- kill_all_process('test_shell')
-
-#
-# Private helper functions
-#
-
-
-def _missing_lighttpd():
- print 'Please install using: "sudo apt-get install lighttpd php5-cgi"'
- print 'For complete Linux build requirements, please see:'
- print 'http://code.google.com/p/chromium/wiki/LinuxBuildInstructions'
- sys.exit(1)
-
-
-def _missing_apache():
- print ('Please install using: "sudo apt-get install apache2 '
- 'libapache2-mod-php5"')
- print 'For complete Linux build requirements, please see:'
- print 'http://code.google.com/p/chromium/wiki/LinuxBuildInstructions'
- sys.exit(1)
-
-
-def _path_from_build_results(*pathies):
- # FIXME(dkegel): use latest or warn if more than one found?
- for dir in ["sconsbuild", "out", "xcodebuild"]:
- try:
- return path_utils.path_from_base(dir, *pathies)
- except:
- pass
- raise path_utils.PathNotFound("Unable to find %s in build tree" %
- (os.path.join(*pathies)))
diff --git a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/platform_utils_mac.py b/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/platform_utils_mac.py
deleted file mode 100644
index 5cec147..0000000
--- a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/platform_utils_mac.py
+++ /dev/null
@@ -1,177 +0,0 @@
-# Copyright (c) 2008-2009 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""This is the Mac implementation of the layout_package.platform_utils
- package. This file should only be imported by that package."""
-
-import os
-import platform
-import signal
-import subprocess
-
-import path_utils
-
-
-def platform_name():
- """Returns the name of the platform we're currently running on."""
- # At the moment all chromium mac results are version-independent. At some
- # point we may need to return 'chromium-mac' + PlatformVersion()
- return 'chromium-mac'
-
-
-def platform_version():
- """Returns the version string for the platform, e.g. '-vista' or
- '-snowleopard'. If the platform does not distinguish between
- minor versions, it returns ''."""
- os_version_string = platform.mac_ver()[0] # e.g. "10.5.6"
- if not os_version_string:
- return '-leopard'
-
- release_version = int(os_version_string.split('.')[1])
-
- # we don't support 'tiger' or earlier releases
- if release_version == 5:
- return '-leopard'
- elif release_version == 6:
- return '-snowleopard'
-
- return ''
-
-
-def get_num_cores():
- """Returns the number of cores on the machine. For hyperthreaded machines,
- this will be double the number of actual processors."""
- cmd = ['/usr/sbin/sysctl', '-n', 'hw.ncpu']
- return int(subprocess.Popen(cmd, stdout=subprocess.PIPE).stdout.read())
-
-
-def baseline_path(platform=None):
- """Returns the path relative to the top of the source tree for the
- baselines for the specified platform version. If |platform| is None,
- then the version currently in use is used."""
- if platform is None:
- platform = platform_name()
- return path_utils.path_from_base('webkit', 'data', 'layout_tests',
- 'platform', platform, 'LayoutTests')
-
-# TODO: We should add leopard and snowleopard to the list of paths to check
-# once we start running the tests from snowleopard.
-
-
-def baseline_search_path(platform=None):
- """Returns the list of directories to search for baselines/results, in
- order of preference. Paths are relative to the top of the source tree."""
- return [baseline_path(platform),
- path_utils.webkit_baseline_path('mac' + platform_version()),
- path_utils.webkit_baseline_path('mac')]
-
-
-def wdiff_path():
- """Path to the WDiff executable, which we assume is already installed and
- in the user's $PATH."""
- return 'wdiff'
-
-
-def image_diff_path(target):
- """Path to the image_diff executable
-
- Args:
- target: build type - 'Debug','Release',etc."""
- return path_utils.path_from_base('xcodebuild', target, 'image_diff')
-
-
-def layout_test_helper_path(target):
- """Path to the layout_test_helper executable, if needed, empty otherwise
-
- Args:
- target: build type - 'Debug','Release',etc."""
- return path_utils.path_from_base('xcodebuild', target,
- 'layout_test_helper')
-
-
-def test_shell_path(target):
- """Path to the test_shell executable.
-
- Args:
- target: build type - 'Debug','Release',etc."""
- # TODO(pinkerton): make |target| happy with case-sensitive file systems.
- return path_utils.path_from_base('xcodebuild', target, 'TestShell.app',
- 'Contents', 'MacOS', 'TestShell')
-
-
-def apache_executable_path():
- """Returns the executable path to start Apache"""
- return os.path.join("/usr", "sbin", "httpd")
-
-
-def apache_config_file_path():
- """Returns the path to Apache config file"""
- return path_utils.path_from_base("third_party", "WebKit", "LayoutTests",
- "http", "conf", "apache2-httpd.conf")
-
-
-def lighttpd_executable_path():
- """Returns the executable path to start LigHTTPd"""
- return path_utils.path_from_base('third_party', 'lighttpd', 'mac',
- 'bin', 'lighttpd')
-
-
-def lighttpd_module_path():
- """Returns the library module path for LigHTTPd"""
- return path_utils.path_from_base('third_party', 'lighttpd', 'mac', 'lib')
-
-
-def lighttpd_php_path():
- """Returns the PHP executable path for LigHTTPd"""
- return path_utils.path_from_base('third_party', 'lighttpd', 'mac', 'bin',
- 'php-cgi')
-
-
-def shut_down_http_server(server_pid):
- """Shut down the lighttpd web server. Blocks until it's fully shut down.
-
- Args:
- server_pid: The process ID of the running server.
- """
- # server_pid is not set when "http_server.py stop" is run manually.
- if server_pid is None:
- # TODO(mmoss) This isn't ideal, since it could conflict with lighttpd
- # processes not started by http_server.py, but good enough for now.
- kill_all_process('lighttpd')
- kill_all_process('httpd')
- else:
- try:
- os.kill(server_pid, signal.SIGTERM)
- # TODO(mmoss) Maybe throw in a SIGKILL just to be sure?
- except OSError:
- # Sometimes we get a bad PID (e.g. from a stale httpd.pid file),
- # so if kill fails on the given PID, just try to 'killall' web
- # servers.
- shut_down_http_server(None)
-
-
-def kill_process(pid):
- """Forcefully kill the process.
-
- Args:
- pid: The id of the process to be killed.
- """
- os.kill(pid, signal.SIGKILL)
-
-
-def kill_all_process(process_name):
- # On Mac OS X 10.6, killall has a new constraint: -SIGNALNAME or
- # -SIGNALNUMBER must come first. Example problem:
- # $ killall -u $USER -TERM lighttpd
- # killall: illegal option -- T
- # Use of the earlier -TERM placement is just fine on 10.5.
- null = open(os.devnull)
- subprocess.call(['killall', '-TERM', '-u', os.getenv('USER'),
- process_name], stderr=null)
- null.close()
-
-
-def kill_all_test_shells():
- """Kills all instances of the test_shell binary currently running."""
- kill_all_process('TestShell')
diff --git a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/platform_utils_win.py b/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/platform_utils_win.py
deleted file mode 100644
index bc87b8f..0000000
--- a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/platform_utils_win.py
+++ /dev/null
@@ -1,185 +0,0 @@
-# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""This is the Linux implementation of the layout_package.platform_utils
- package. This file should only be imported by that package."""
-
-import os
-import path_utils
-import subprocess
-import sys
-
-
-def platform_name():
- """Returns the name of the platform we're currently running on."""
- # We're not ready for version-specific results yet. When we uncomment
- # this, we also need to add it to the BaselineSearchPath()
- return 'chromium-win' + platform_version()
-
-
-def platform_version():
- """Returns the version string for the platform, e.g. '-vista' or
- '-snowleopard'. If the platform does not distinguish between
- minor versions, it returns ''."""
- winver = sys.getwindowsversion()
- if winver[0] == 6 and (winver[1] == 1):
- return '-7'
- if winver[0] == 6 and (winver[1] == 0):
- return '-vista'
- if winver[0] == 5 and (winver[1] == 1 or winver[1] == 2):
- return '-xp'
- return ''
-
-
-def get_num_cores():
- """Returns the number of cores on the machine. For hyperthreaded machines,
- this will be double the number of actual processors."""
- return int(os.environ.get('NUMBER_OF_PROCESSORS', 1))
-
-
-def baseline_path(platform=None):
- """Returns the path relative to the top of the source tree for the
- baselines for the specified platform version. If |platform| is None,
- then the version currently in use is used."""
- if platform is None:
- platform = platform_name()
- return path_utils.path_from_base('webkit', 'data', 'layout_tests',
- 'platform', platform, 'LayoutTests')
-
-
-def baseline_search_path(platform=None):
- """Returns the list of directories to search for baselines/results, in
- order of preference. Paths are relative to the top of the source tree."""
- dirs = []
- if platform is None:
- platform = platform_name()
-
- if platform == 'chromium-win-xp':
- dirs.append(baseline_path(platform))
- if platform in ('chromium-win-xp', 'chromium-win-vista'):
- dirs.append(baseline_path('chromium-win-vista'))
- dirs.append(baseline_path('chromium-win'))
- dirs.append(path_utils.webkit_baseline_path('win'))
- dirs.append(path_utils.webkit_baseline_path('mac'))
- return dirs
-
-
-def wdiff_path():
- """Path to the WDiff executable, whose binary is checked in on Win"""
- return path_utils.path_from_base('third_party', 'cygwin', 'bin',
- 'wdiff.exe')
-
-
-def image_diff_path(target):
- """Return the platform-specific binary path for the image compare util.
- We use this if we can't find the binary in the default location
- in path_utils.
-
- Args:
- target: Build target mode (debug or release)
- """
- return _find_binary(target, 'image_diff.exe')
-
-
-def layout_test_helper_path(target):
- """Return the platform-specific binary path for the layout test helper.
- We use this if we can't find the binary in the default location
- in path_utils.
-
- Args:
- target: Build target mode (debug or release)
- """
- return _find_binary(target, 'layout_test_helper.exe')
-
-
-def test_shell_path(target):
- """Return the platform-specific binary path for our TestShell.
- We use this if we can't find the binary in the default location
- in path_utils.
-
- Args:
- target: Build target mode (debug or release)
- """
- return _find_binary(target, 'test_shell.exe')
-
-
-def apache_executable_path():
- """Returns the executable path to start Apache"""
- path = path_utils.path_from_base('third_party', 'cygwin', "usr", "sbin")
- # Don't return httpd.exe since we want to use this from cygwin.
- return os.path.join(path, "httpd")
-
-
-def apache_config_file_path():
- """Returns the path to Apache config file"""
- return path_utils.path_from_base("third_party", "WebKit", "LayoutTests",
- "http", "conf", "cygwin-httpd.conf")
-
-
-def lighttpd_executable_path():
- """Returns the executable path to start LigHTTPd"""
- return path_utils.path_from_base('third_party', 'lighttpd', 'win',
- 'LightTPD.exe')
-
-
-def lighttpd_module_path():
- """Returns the library module path for LigHTTPd"""
- return path_utils.path_from_base('third_party', 'lighttpd', 'win', 'lib')
-
-
-def lighttpd_php_path():
- """Returns the PHP executable path for LigHTTPd"""
- return path_utils.path_from_base('third_party', 'lighttpd', 'win', 'php5',
- 'php-cgi.exe')
-
-
-def shut_down_http_server(server_pid):
- """Shut down the lighttpd web server. Blocks until it's fully shut down.
-
- Args:
- server_pid: The process ID of the running server.
- Unused in this implementation of the method.
- """
- subprocess.Popen(('taskkill.exe', '/f', '/im', 'LightTPD.exe'),
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE).wait()
- subprocess.Popen(('taskkill.exe', '/f', '/im', 'httpd.exe'),
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE).wait()
-
-
-def kill_process(pid):
- """Forcefully kill the process.
-
- Args:
- pid: The id of the process to be killed.
- """
- subprocess.call(('taskkill.exe', '/f', '/pid', str(pid)),
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
-
-
-def kill_all_test_shells(self):
- """Kills all instances of the test_shell binary currently running."""
- subprocess.Popen(('taskkill.exe', '/f', '/im', 'test_shell.exe'),
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE).wait()
-
-#
-# Private helper functions.
-#
-
-
-def _find_binary(target, binary):
- """On Windows, we look for binaries that we compile in potentially
- two places: src/webkit/$target (preferably, which we get if we
- built using webkit_glue.gyp), or src/chrome/$target (if compiled some
- other way)."""
- try:
- return path_utils.path_from_base('webkit', target, binary)
- except path_utils.PathNotFound:
- try:
- return path_utils.path_from_base('chrome', target, binary)
- except path_utils.PathNotFound:
- return path_utils.path_from_base('build', target, binary)
diff --git a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/test_expectations.py b/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/test_expectations.py
deleted file mode 100644
index 90b6105..0000000
--- a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/test_expectations.py
+++ /dev/null
@@ -1,792 +0,0 @@
-# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""A helper class for reading in and dealing with tests expectations
-for layout tests.
-"""
-
-import logging
-import os
-import re
-import sys
-import time
-import path_utils
-
-sys.path.append(path_utils.path_from_base('third_party'))
-import simplejson
-
-# Test expectation and modifier constants.
-(PASS, FAIL, TEXT, IMAGE, IMAGE_PLUS_TEXT, TIMEOUT, CRASH, SKIP, WONTFIX,
- DEFER, SLOW, REBASELINE, MISSING, FLAKY, NOW, NONE) = range(16)
-
-# Test expectation file update action constants
-(NO_CHANGE, REMOVE_TEST, REMOVE_PLATFORM, ADD_PLATFORMS_EXCEPT_THIS) = range(4)
-
-
-class TestExpectations:
- TEST_LIST = "test_expectations.txt"
-
- def __init__(self, tests, directory, platform, is_debug_mode, is_lint_mode,
- tests_are_present=True):
- """Reads the test expectations files from the given directory."""
- path = os.path.join(directory, self.TEST_LIST)
- self._expected_failures = TestExpectationsFile(path, tests, platform,
- is_debug_mode, is_lint_mode, tests_are_present=tests_are_present)
-
- # TODO(ojan): Allow for removing skipped tests when getting the list of
- # tests to run, but not when getting metrics.
- # TODO(ojan): Replace the Get* calls here with the more sane API exposed
- # by TestExpectationsFile below. Maybe merge the two classes entirely?
-
- def get_expectations_json_for_all_platforms(self):
- return (
- self._expected_failures.get_expectations_json_for_all_platforms())
-
- def get_rebaselining_failures(self):
- return (self._expected_failures.get_test_set(REBASELINE, FAIL) |
- self._expected_failures.get_test_set(REBASELINE, IMAGE) |
- self._expected_failures.get_test_set(REBASELINE, TEXT) |
- self._expected_failures.get_test_set(REBASELINE,
- IMAGE_PLUS_TEXT))
-
- def get_options(self, test):
- return self._expected_failures.get_options(test)
-
- def get_expectations(self, test):
- return self._expected_failures.get_expectations(test)
-
- def get_expectations_string(self, test):
- """Returns the expectatons for the given test as an uppercase string.
- If there are no expectations for the test, then "PASS" is returned."""
- expectations = self.get_expectations(test)
- retval = []
-
- for expectation in expectations:
- for item in TestExpectationsFile.EXPECTATIONS.items():
- if item[1] == expectation:
- retval.append(item[0])
- break
-
- return " ".join(retval).upper()
-
- def get_timeline_for_test(self, test):
- return self._expected_failures.get_timeline_for_test(test)
-
- def get_tests_with_result_type(self, result_type):
- return self._expected_failures.get_tests_with_result_type(result_type)
-
- def get_tests_with_timeline(self, timeline):
- return self._expected_failures.get_tests_with_timeline(timeline)
-
- def matches_an_expected_result(self, test, result):
- """Returns whether we got one of the expected results for this test."""
- return (result in self._expected_failures.get_expectations(test) or
- (result in (IMAGE, TEXT, IMAGE_PLUS_TEXT) and
- FAIL in self._expected_failures.get_expectations(test)) or
- result == MISSING and self.is_rebaselining(test) or
- result == SKIP and self._expected_failures.has_modifier(test,
- SKIP))
-
- def is_rebaselining(self, test):
- return self._expected_failures.has_modifier(test, REBASELINE)
-
- def has_modifier(self, test, modifier):
- return self._expected_failures.has_modifier(test, modifier)
-
- def remove_platform_from_file(self, tests, platform, backup=False):
- return self._expected_failures.remove_platform_from_file(tests,
- platform,
- backup)
-
-
-def strip_comments(line):
- """Strips comments from a line and return None if the line is empty
- or else the contents of line with leading and trailing spaces removed
- and all other whitespace collapsed"""
-
- commentIndex = line.find('//')
- if commentIndex is -1:
- commentIndex = len(line)
-
- line = re.sub(r'\s+', ' ', line[:commentIndex].strip())
- if line == '':
- return None
- else:
- return line
-
-
-class ModifiersAndExpectations:
- """A holder for modifiers and expectations on a test that serializes to
- JSON."""
-
- def __init__(self, modifiers, expectations):
- self.modifiers = modifiers
- self.expectations = expectations
-
-
-class ExpectationsJsonEncoder(simplejson.JSONEncoder):
- """JSON encoder that can handle ModifiersAndExpectations objects.
- """
-
- def default(self, obj):
- if isinstance(obj, ModifiersAndExpectations):
- return {"modifiers": obj.modifiers,
- "expectations": obj.expectations}
- else:
- return JSONEncoder.default(self, obj)
-
-
-class TestExpectationsFile:
- """Test expectation files consist of lines with specifications of what
- to expect from layout test cases. The test cases can be directories
- in which case the expectations apply to all test cases in that
- directory and any subdirectory. The format of the file is along the
- lines of:
-
- LayoutTests/fast/js/fixme.js = FAIL
- LayoutTests/fast/js/flaky.js = FAIL PASS
- LayoutTests/fast/js/crash.js = CRASH TIMEOUT FAIL PASS
- ...
-
- To add other options:
- SKIP : LayoutTests/fast/js/no-good.js = TIMEOUT PASS
- DEBUG : LayoutTests/fast/js/no-good.js = TIMEOUT PASS
- DEBUG SKIP : LayoutTests/fast/js/no-good.js = TIMEOUT PASS
- LINUX DEBUG SKIP : LayoutTests/fast/js/no-good.js = TIMEOUT PASS
- DEFER LINUX WIN : LayoutTests/fast/js/no-good.js = TIMEOUT PASS
-
- SKIP: Doesn't run the test.
- SLOW: The test takes a long time to run, but does not timeout indefinitely.
- WONTFIX: For tests that we never intend to pass on a given platform.
- DEFER: Test does not count in our statistics for the current release.
- DEBUG: Expectations apply only to the debug build.
- RELEASE: Expectations apply only to release build.
- LINUX/WIN/WIN-XP/WIN-VISTA/WIN-7/MAC: Expectations apply only to these
- platforms.
-
- Notes:
- -A test cannot be both SLOW and TIMEOUT
- -A test cannot be both DEFER and WONTFIX
- -A test should only be one of IMAGE, TEXT, IMAGE+TEXT, or FAIL. FAIL is
- a migratory state that currently means either IMAGE, TEXT, or
- IMAGE+TEXT. Once we have finished migrating the expectations, we will
- change FAIL to have the meaning of IMAGE+TEXT and remove the IMAGE+TEXT
- identifier.
- -A test can be included twice, but not via the same path.
- -If a test is included twice, then the more precise path wins.
- -CRASH tests cannot be DEFER or WONTFIX
- """
-
- EXPECTATIONS = {'pass': PASS,
- 'fail': FAIL,
- 'text': TEXT,
- 'image': IMAGE,
- 'image+text': IMAGE_PLUS_TEXT,
- 'timeout': TIMEOUT,
- 'crash': CRASH,
- 'missing': MISSING}
-
- EXPECTATION_DESCRIPTIONS = {SKIP: ('skipped', 'skipped'),
- PASS: ('pass', 'passes'),
- FAIL: ('failure', 'failures'),
- TEXT: ('text diff mismatch',
- 'text diff mismatch'),
- IMAGE: ('image mismatch', 'image mismatch'),
- IMAGE_PLUS_TEXT: ('image and text mismatch',
- 'image and text mismatch'),
- CRASH: ('test shell crash',
- 'test shell crashes'),
- TIMEOUT: ('test timed out', 'tests timed out'),
- MISSING: ('no expected result found',
- 'no expected results found')}
-
- EXPECTATION_ORDER = (PASS, CRASH, TIMEOUT, MISSING, IMAGE_PLUS_TEXT,
- TEXT, IMAGE, FAIL, SKIP)
-
- BASE_PLATFORMS = ('linux', 'mac', 'win')
- PLATFORMS = BASE_PLATFORMS + ('win-xp', 'win-vista', 'win-7')
-
- BUILD_TYPES = ('debug', 'release')
-
- MODIFIERS = {'skip': SKIP,
- 'wontfix': WONTFIX,
- 'defer': DEFER,
- 'slow': SLOW,
- 'rebaseline': REBASELINE,
- 'none': NONE}
-
- TIMELINES = {'wontfix': WONTFIX,
- 'now': NOW,
- 'defer': DEFER}
-
- RESULT_TYPES = {'skip': SKIP,
- 'pass': PASS,
- 'fail': FAIL,
- 'flaky': FLAKY}
-
- def __init__(self, path, full_test_list, platform, is_debug_mode,
- is_lint_mode, expectations_as_str=None, suppress_errors=False,
- tests_are_present=True):
- """
- path: The path to the expectation file. An error is thrown if a test is
- listed more than once.
- full_test_list: The list of all tests to be run pending processing of
- the expections for those tests.
- platform: Which platform from self.PLATFORMS to filter tests for.
- is_debug_mode: Whether we testing a test_shell built debug mode.
- is_lint_mode: Whether this is just linting test_expecatations.txt.
- expectations_as_str: Contents of the expectations file. Used instead of
- the path. This makes unittesting sane.
- suppress_errors: Whether to suppress lint errors.
- tests_are_present: Whether the test files are present in the local
- filesystem. The LTTF Dashboard uses False here to avoid having to
- keep a local copy of the tree.
- """
-
- self._path = path
- self._expectations_as_str = expectations_as_str
- self._is_lint_mode = is_lint_mode
- self._tests_are_present = tests_are_present
- self._full_test_list = full_test_list
- self._suppress_errors = suppress_errors
- self._errors = []
- self._non_fatal_errors = []
- self._platform = self.to_test_platform_name(platform)
- if self._platform is None:
- raise Exception("Unknown platform '%s'" % (platform))
- self._is_debug_mode = is_debug_mode
-
- # Maps relative test paths as listed in the expectations file to a
- # list of maps containing modifiers and expectations for each time
- # the test is listed in the expectations file.
- self._all_expectations = {}
-
- # Maps a test to its list of expectations.
- self._test_to_expectations = {}
-
- # Maps a test to its list of options (string values)
- self._test_to_options = {}
-
- # Maps a test to its list of modifiers: the constants associated with
- # the options minus any bug or platform strings
- self._test_to_modifiers = {}
-
- # Maps a test to the base path that it was listed with in the list.
- self._test_list_paths = {}
-
- self._modifier_to_tests = self._dict_of_sets(self.MODIFIERS)
- self._expectation_to_tests = self._dict_of_sets(self.EXPECTATIONS)
- self._timeline_to_tests = self._dict_of_sets(self.TIMELINES)
- self._result_type_to_tests = self._dict_of_sets(self.RESULT_TYPES)
-
- self._read(self._get_iterable_expectations())
-
- def _dict_of_sets(self, strings_to_constants):
- """Takes a dict of strings->constants and returns a dict mapping
- each constant to an empty set."""
- d = {}
- for c in strings_to_constants.values():
- d[c] = set()
- return d
-
- def _get_iterable_expectations(self):
- """Returns an object that can be iterated over. Allows for not caring
- about whether we're iterating over a file or a new-line separated
- string."""
- if self._expectations_as_str:
- iterable = [x + "\n" for x in
- self._expectations_as_str.split("\n")]
- # Strip final entry if it's empty to avoid added in an extra
- # newline.
- if iterable[len(iterable) - 1] == "\n":
- return iterable[:len(iterable) - 1]
- return iterable
- else:
- return open(self._path)
-
- def to_test_platform_name(self, name):
- """Returns the test expectation platform that will be used for a
- given platform name, or None if there is no match."""
- chromium_prefix = 'chromium-'
- name = name.lower()
- if name.startswith(chromium_prefix):
- name = name[len(chromium_prefix):]
- if name in self.PLATFORMS:
- return name
- return None
-
- def get_test_set(self, modifier, expectation=None, include_skips=True):
- if expectation is None:
- tests = self._modifier_to_tests[modifier]
- else:
- tests = (self._expectation_to_tests[expectation] &
- self._modifier_to_tests[modifier])
-
- if not include_skips:
- tests = tests - self.get_test_set(SKIP, expectation)
-
- return tests
-
- def get_tests_with_result_type(self, result_type):
- return self._result_type_to_tests[result_type]
-
- def get_tests_with_timeline(self, timeline):
- return self._timeline_to_tests[timeline]
-
- def get_options(self, test):
- """This returns the entire set of options for the given test
- (the modifiers plus the BUGXXXX identifier). This is used by the
- LTTF dashboard."""
- return self._test_to_options[test]
-
- def has_modifier(self, test, modifier):
- return test in self._modifier_to_tests[modifier]
-
- def get_expectations(self, test):
- return self._test_to_expectations[test]
-
- def get_expectations_json_for_all_platforms(self):
- # Specify separators in order to get compact encoding.
- return ExpectationsJsonEncoder(separators=(',', ':')).encode(
- self._all_expectations)
-
- def contains(self, test):
- return test in self._test_to_expectations
-
- def remove_platform_from_file(self, tests, platform, backup=False):
- """Remove the platform option from test expectations file.
-
- If a test is in the test list and has an option that matches the given
- platform, remove the matching platform and save the updated test back
- to the file. If no other platforms remaining after removal, delete the
- test from the file.
-
- Args:
- tests: list of tests that need to update..
- platform: which platform option to remove.
- backup: if true, the original test expectations file is saved as
- [self.TEST_LIST].orig.YYYYMMDDHHMMSS
-
- Returns:
- no
- """
-
- new_file = self._path + '.new'
- logging.debug('Original file: "%s"', self._path)
- logging.debug('New file: "%s"', new_file)
- f_orig = self._get_iterable_expectations()
- f_new = open(new_file, 'w')
-
- tests_removed = 0
- tests_updated = 0
- lineno = 0
- for line in f_orig:
- lineno += 1
- action = self._get_platform_update_action(line, lineno, tests,
- platform)
- if action == NO_CHANGE:
- # Save the original line back to the file
- logging.debug('No change to test: %s', line)
- f_new.write(line)
- elif action == REMOVE_TEST:
- tests_removed += 1
- logging.info('Test removed: %s', line)
- elif action == REMOVE_PLATFORM:
- parts = line.split(':')
- new_options = parts[0].replace(platform.upper() + ' ', '', 1)
- new_line = ('%s:%s' % (new_options, parts[1]))
- f_new.write(new_line)
- tests_updated += 1
- logging.info('Test updated: ')
- logging.info(' old: %s', line)
- logging.info(' new: %s', new_line)
- elif action == ADD_PLATFORMS_EXCEPT_THIS:
- parts = line.split(':')
- new_options = parts[0]
- for p in self.PLATFORMS:
- p = p.upper();
- # This is a temp solution for rebaselining tool.
- # Do not add tags WIN-7 and WIN-VISTA to test expectations
- # if the original line does not specify the platform option.
- # TODO(victorw): Remove WIN-VISTA and WIN-7 once we have
- # reliable Win 7 and Win Vista buildbots setup.
- if not p in (platform.upper(), 'WIN-VISTA', 'WIN-7'):
- new_options += p + ' '
- new_line = ('%s:%s' % (new_options, parts[1]))
- f_new.write(new_line)
- tests_updated += 1
- logging.info('Test updated: ')
- logging.info(' old: %s', line)
- logging.info(' new: %s', new_line)
- else:
- logging.error('Unknown update action: %d; line: %s',
- action, line)
-
- logging.info('Total tests removed: %d', tests_removed)
- logging.info('Total tests updated: %d', tests_updated)
-
- f_orig.close()
- f_new.close()
-
- if backup:
- date_suffix = time.strftime('%Y%m%d%H%M%S',
- time.localtime(time.time()))
- backup_file = ('%s.orig.%s' % (self._path, date_suffix))
- if os.path.exists(backup_file):
- os.remove(backup_file)
- logging.info('Saving original file to "%s"', backup_file)
- os.rename(self._path, backup_file)
- else:
- os.remove(self._path)
-
- logging.debug('Saving new file to "%s"', self._path)
- os.rename(new_file, self._path)
- return True
-
- def parse_expectations_line(self, line, lineno):
- """Parses a line from test_expectations.txt and returns a tuple
- with the test path, options as a list, expectations as a list."""
- line = strip_comments(line)
- if not line:
- return (None, None, None)
-
- options = []
- if line.find(":") is -1:
- test_and_expectation = line.split("=")
- else:
- parts = line.split(":")
- options = self._get_options_list(parts[0])
- test_and_expectation = parts[1].split('=')
-
- test = test_and_expectation[0].strip()
- if (len(test_and_expectation) is not 2):
- self._add_error(lineno, "Missing expectations.",
- test_and_expectation)
- expectations = None
- else:
- expectations = self._get_options_list(test_and_expectation[1])
-
- return (test, options, expectations)
-
- def _get_platform_update_action(self, line, lineno, tests, platform):
- """Check the platform option and return the action needs to be taken.
-
- Args:
- line: current line in test expectations file.
- lineno: current line number of line
- tests: list of tests that need to update..
- platform: which platform option to remove.
-
- Returns:
- NO_CHANGE: no change to the line (comments, test not in the list etc)
- REMOVE_TEST: remove the test from file.
- REMOVE_PLATFORM: remove this platform option from the test.
- ADD_PLATFORMS_EXCEPT_THIS: add all the platforms except this one.
- """
- test, options, expectations = self.parse_expectations_line(line,
- lineno)
- if not test or test not in tests:
- return NO_CHANGE
-
- has_any_platform = False
- for option in options:
- if option in self.PLATFORMS:
- has_any_platform = True
- if not option == platform:
- return REMOVE_PLATFORM
-
- # If there is no platform specified, then it means apply to all
- # platforms. Return the action to add all the platforms except this
- # one.
- if not has_any_platform:
- return ADD_PLATFORMS_EXCEPT_THIS
-
- return REMOVE_TEST
-
- def _has_valid_modifiers_for_current_platform(self, options, lineno,
- test_and_expectations, modifiers):
- """Returns true if the current platform is in the options list or if
- no platforms are listed and if there are no fatal errors in the
- options list.
-
- Args:
- options: List of lowercase options.
- lineno: The line in the file where the test is listed.
- test_and_expectations: The path and expectations for the test.
- modifiers: The set to populate with modifiers.
- """
- has_any_platform = False
- has_bug_id = False
- for option in options:
- if option in self.MODIFIERS:
- modifiers.add(option)
- elif option in self.PLATFORMS:
- has_any_platform = True
- elif option.startswith('bug'):
- has_bug_id = True
- elif option not in self.BUILD_TYPES:
- self._add_error(lineno, 'Invalid modifier for test: %s' %
- option, test_and_expectations)
-
- if has_any_platform and not self._match_platform(options):
- return False
-
- if not has_bug_id and 'wontfix' not in options:
- # TODO(ojan): Turn this into an AddError call once all the
- # tests have BUG identifiers.
- self._log_non_fatal_error(lineno, 'Test lacks BUG modifier.',
- test_and_expectations)
-
- if 'release' in options or 'debug' in options:
- if self._is_debug_mode and 'debug' not in options:
- return False
- if not self._is_debug_mode and 'release' not in options:
- return False
-
- if 'wontfix' in options and 'defer' in options:
- self._add_error(lineno, 'Test cannot be both DEFER and WONTFIX.',
- test_and_expectations)
-
- if self._is_lint_mode and 'rebaseline' in options:
- self._add_error(lineno,
- 'REBASELINE should only be used for running rebaseline.py. '
- 'Cannot be checked in.', test_and_expectations)
-
- return True
-
- def _match_platform(self, options):
- """Match the list of options against our specified platform. If any
- of the options prefix-match self._platform, return True. This handles
- the case where a test is marked WIN and the platform is WIN-VISTA.
-
- Args:
- options: list of options
- """
- for opt in options:
- if self._platform.startswith(opt):
- return True
- return False
-
- def _add_to_all_expectations(self, test, options, expectations):
- # Make all paths unix-style so the dashboard doesn't need to.
- test = test.replace('\\', '/')
- if not test in self._all_expectations:
- self._all_expectations[test] = []
- self._all_expectations[test].append(
- ModifiersAndExpectations(options, expectations))
-
- def _read(self, expectations):
- """For each test in an expectations iterable, generate the
- expectations for it."""
- lineno = 0
- for line in expectations:
- lineno += 1
-
- test_list_path, options, expectations = \
- self.parse_expectations_line(line, lineno)
- if not expectations:
- continue
-
- self._add_to_all_expectations(test_list_path,
- " ".join(options).upper(),
- " ".join(expectations).upper())
-
- modifiers = set()
- if options and not self._has_valid_modifiers_for_current_platform(
- options, lineno, test_list_path, modifiers):
- continue
-
- expectations = self._parse_expectations(expectations, lineno,
- test_list_path)
-
- if 'slow' in options and TIMEOUT in expectations:
- self._add_error(lineno,
- 'A test can not be both slow and timeout. If it times out '
- 'indefinitely, then it should be just timeout.',
- test_list_path)
-
- full_path = os.path.join(path_utils.layout_tests_dir(),
- test_list_path)
- full_path = os.path.normpath(full_path)
- # WebKit's way of skipping tests is to add a -disabled suffix.
- # So we should consider the path existing if the path or the
- # -disabled version exists.
- if (self._tests_are_present and not os.path.exists(full_path)
- and not os.path.exists(full_path + '-disabled')):
- # Log a non fatal error here since you hit this case any
- # time you update test_expectations.txt without syncing
- # the LayoutTests directory
- self._log_non_fatal_error(lineno, 'Path does not exist.',
- test_list_path)
- continue
-
- if not self._full_test_list:
- tests = [test_list_path]
- else:
- tests = self._expand_tests(test_list_path)
-
- self._add_tests(tests, expectations, test_list_path, lineno,
- modifiers, options)
-
- if not self._suppress_errors and (
- len(self._errors) or len(self._non_fatal_errors)):
- if self._is_debug_mode:
- build_type = 'DEBUG'
- else:
- build_type = 'RELEASE'
- print "\nFAILURES FOR PLATFORM: %s, BUILD_TYPE: %s" \
- % (self._platform.upper(), build_type)
-
- for error in self._non_fatal_errors:
- logging.error(error)
- if len(self._errors):
- raise SyntaxError('\n'.join(map(str, self._errors)))
-
- # Now add in the tests that weren't present in the expectations file
- expectations = set([PASS])
- options = []
- modifiers = []
- if self._full_test_list:
- for test in self._full_test_list:
- if not test in self._test_list_paths:
- self._add_test(test, modifiers, expectations, options)
-
- def _get_options_list(self, listString):
- return [part.strip().lower() for part in listString.strip().split(' ')]
-
- def _parse_expectations(self, expectations, lineno, test_list_path):
- result = set()
- for part in expectations:
- if not part in self.EXPECTATIONS:
- self._add_error(lineno, 'Unsupported expectation: %s' % part,
- test_list_path)
- continue
- expectation = self.EXPECTATIONS[part]
- result.add(expectation)
- return result
-
- def _expand_tests(self, test_list_path):
- """Convert the test specification to an absolute, normalized
- path and make sure directories end with the OS path separator."""
- path = os.path.join(path_utils.layout_tests_dir(), test_list_path)
- path = os.path.normpath(path)
- path = self._fix_dir(path)
-
- result = []
- for test in self._full_test_list:
- if test.startswith(path):
- result.append(test)
- return result
-
- def _fix_dir(self, path):
- """Check to see if the path points to a directory, and if so, append
- the directory separator if necessary."""
- if self._tests_are_present:
- if os.path.isdir(path):
- path = os.path.join(path, '')
- else:
- # If we can't check the filesystem to see if this is a directory,
- # we assume that files w/o an extension are directories.
- # TODO(dpranke): What happens w/ LayoutTests/css2.1 ?
- if os.path.splitext(path)[1] == '':
- path = os.path.join(path, '')
- return path
-
- def _add_tests(self, tests, expectations, test_list_path, lineno,
- modifiers, options):
- for test in tests:
- if self._already_seen_test(test, test_list_path, lineno):
- continue
-
- self._clear_expectations_for_test(test, test_list_path)
- self._add_test(test, modifiers, expectations, options)
-
- def _add_test(self, test, modifiers, expectations, options):
- """Sets the expected state for a given test.
-
- This routine assumes the test has not been added before. If it has,
- use _ClearExpectationsForTest() to reset the state prior to
- calling this.
-
- Args:
- test: test to add
- modifiers: sequence of modifier keywords ('wontfix', 'slow', etc.)
- expectations: sequence of expectations (PASS, IMAGE, etc.)
- options: sequence of keywords and bug identifiers."""
- self._test_to_expectations[test] = expectations
- for expectation in expectations:
- self._expectation_to_tests[expectation].add(test)
-
- self._test_to_options[test] = options
- self._test_to_modifiers[test] = set()
- for modifier in modifiers:
- mod_value = self.MODIFIERS[modifier]
- self._modifier_to_tests[mod_value].add(test)
- self._test_to_modifiers[test].add(mod_value)
-
- if 'wontfix' in modifiers:
- self._timeline_to_tests[WONTFIX].add(test)
- elif 'defer' in modifiers:
- self._timeline_to_tests[DEFER].add(test)
- else:
- self._timeline_to_tests[NOW].add(test)
-
- if 'skip' in modifiers:
- self._result_type_to_tests[SKIP].add(test)
- elif expectations == set([PASS]):
- self._result_type_to_tests[PASS].add(test)
- elif len(expectations) > 1:
- self._result_type_to_tests[FLAKY].add(test)
- else:
- self._result_type_to_tests[FAIL].add(test)
-
- def _clear_expectations_for_test(self, test, test_list_path):
- """Remove prexisting expectations for this test.
- This happens if we are seeing a more precise path
- than a previous listing.
- """
- if test in self._test_list_paths:
- self._test_to_expectations.pop(test, '')
- self._remove_from_sets(test, self._expectation_to_tests)
- self._remove_from_sets(test, self._modifier_to_tests)
- self._remove_from_sets(test, self._timeline_to_tests)
- self._remove_from_sets(test, self._result_type_to_tests)
-
- self._test_list_paths[test] = os.path.normpath(test_list_path)
-
- def _remove_from_sets(self, test, dict):
- """Removes the given test from the sets in the dictionary.
-
- Args:
- test: test to look for
- dict: dict of sets of files"""
- for set_of_tests in dict.itervalues():
- if test in set_of_tests:
- set_of_tests.remove(test)
-
- def _already_seen_test(self, test, test_list_path, lineno):
- """Returns true if we've already seen a more precise path for this test
- than the test_list_path.
- """
- if not test in self._test_list_paths:
- return False
-
- prev_base_path = self._test_list_paths[test]
- if (prev_base_path == os.path.normpath(test_list_path)):
- self._add_error(lineno, 'Duplicate expectations.', test)
- return True
-
- # Check if we've already seen a more precise path.
- return prev_base_path.startswith(os.path.normpath(test_list_path))
-
- def _add_error(self, lineno, msg, path):
- """Reports an error that will prevent running the tests. Does not
- immediately raise an exception because we'd like to aggregate all the
- errors so they can all be printed out."""
- self._errors.append('\nLine:%s %s %s' % (lineno, msg, path))
-
- def _log_non_fatal_error(self, lineno, msg, path):
- """Reports an error that will not prevent running the tests. These are
- still errors, but not bad enough to warrant breaking test running."""
- self._non_fatal_errors.append('Line:%s %s %s' % (lineno, msg, path))
diff --git a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/test_failures.py b/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/test_failures.py
deleted file mode 100644
index 782bd34..0000000
--- a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/test_failures.py
+++ /dev/null
@@ -1,242 +0,0 @@
-# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Classes for failures that occur during tests."""
-
-import os
-import test_expectations
-
-
-def determine_result_type(failure_list):
- """Takes a set of test_failures and returns which result type best fits
- the list of failures. "Best fits" means we use the worst type of failure.
-
- Returns:
- one of the test_expectations result types - PASS, TEXT, CRASH, etc."""
-
- if not failure_list or len(failure_list) == 0:
- return test_expectations.PASS
-
- failure_types = [type(f) for f in failure_list]
- if FailureCrash in failure_types:
- return test_expectations.CRASH
- elif FailureTimeout in failure_types:
- return test_expectations.TIMEOUT
- elif (FailureMissingResult in failure_types or
- FailureMissingImage in failure_types or
- FailureMissingImageHash in failure_types):
- return test_expectations.MISSING
- else:
- is_text_failure = FailureTextMismatch in failure_types
- is_image_failure = (FailureImageHashIncorrect in failure_types or
- FailureImageHashMismatch in failure_types)
- if is_text_failure and is_image_failure:
- return test_expectations.IMAGE_PLUS_TEXT
- elif is_text_failure:
- return test_expectations.TEXT
- elif is_image_failure:
- return test_expectations.IMAGE
- else:
- raise ValueError("unclassifiable set of failures: "
- + str(failure_types))
-
-
-class TestFailure(object):
- """Abstract base class that defines the failure interface."""
-
- @staticmethod
- def message():
- """Returns a string describing the failure in more detail."""
- raise NotImplemented
-
- def result_html_output(self, filename):
- """Returns an HTML string to be included on the results.html page."""
- raise NotImplemented
-
- def should_kill_test_shell(self):
- """Returns True if we should kill the test shell before the next
- test."""
- return False
-
- def relative_output_filename(self, filename, modifier):
- """Returns a relative filename inside the output dir that contains
- modifier.
-
- For example, if filename is fast\dom\foo.html and modifier is
- "-expected.txt", the return value is fast\dom\foo-expected.txt
-
- Args:
- filename: relative filename to test file
- modifier: a string to replace the extension of filename with
-
- Return:
- The relative windows path to the output filename
- """
- return os.path.splitext(filename)[0] + modifier
-
-
-class FailureWithType(TestFailure):
- """Base class that produces standard HTML output based on the test type.
-
- Subclasses may commonly choose to override the ResultHtmlOutput, but still
- use the standard OutputLinks.
- """
-
- def __init__(self, test_type):
- TestFailure.__init__(self)
- # TODO(ojan): This class no longer needs to know the test_type.
- self._test_type = test_type
-
- # Filename suffixes used by ResultHtmlOutput.
- OUT_FILENAMES = []
-
- def output_links(self, filename, out_names):
- """Returns a string holding all applicable output file links.
-
- Args:
- filename: the test filename, used to construct the result file names
- out_names: list of filename suffixes for the files. If three or more
- suffixes are in the list, they should be [actual, expected, diff,
- wdiff]. Two suffixes should be [actual, expected], and a
- single item is the [actual] filename suffix.
- If out_names is empty, returns the empty string.
- """
- links = ['']
- uris = [self.relative_output_filename(filename, fn) for
- fn in out_names]
- if len(uris) > 1:
- links.append("<a href='%s'>expected</a>" % uris[1])
- if len(uris) > 0:
- links.append("<a href='%s'>actual</a>" % uris[0])
- if len(uris) > 2:
- links.append("<a href='%s'>diff</a>" % uris[2])
- if len(uris) > 3:
- links.append("<a href='%s'>wdiff</a>" % uris[3])
- return ' '.join(links)
-
- def result_html_output(self, filename):
- return self.message() + self.output_links(filename, self.OUT_FILENAMES)
-
-
-class FailureTimeout(TestFailure):
- """Test timed out. We also want to restart the test shell if this
- happens."""
-
- @staticmethod
- def message():
- return "Test timed out"
-
- def result_html_output(self, filename):
- return "<strong>%s</strong>" % self.message()
-
- def should_kill_test_shell(self):
- return True
-
-
-class FailureCrash(TestFailure):
- """Test shell crashed."""
-
- @staticmethod
- def message():
- return "Test shell crashed"
-
- def result_html_output(self, filename):
- # TODO(tc): create a link to the minidump file
- stack = self.relative_output_filename(filename, "-stack.txt")
- return "<strong>%s</strong> <a href=%s>stack</a>" % (self.message(),
- stack)
-
- def should_kill_test_shell(self):
- return True
-
-
-class FailureMissingResult(FailureWithType):
- """Expected result was missing."""
- OUT_FILENAMES = ["-actual.txt"]
-
- @staticmethod
- def message():
- return "No expected results found"
-
- def result_html_output(self, filename):
- return ("<strong>%s</strong>" % self.message() +
- self.output_links(filename, self.OUT_FILENAMES))
-
-
-class FailureTextMismatch(FailureWithType):
- """Text diff output failed."""
- # Filename suffixes used by ResultHtmlOutput.
- OUT_FILENAMES = ["-actual.txt", "-expected.txt", "-diff.txt"]
- OUT_FILENAMES_WDIFF = ["-actual.txt", "-expected.txt", "-diff.txt",
- "-wdiff.html"]
-
- def __init__(self, test_type, has_wdiff):
- FailureWithType.__init__(self, test_type)
- if has_wdiff:
- self.OUT_FILENAMES = self.OUT_FILENAMES_WDIFF
-
- @staticmethod
- def message():
- return "Text diff mismatch"
-
-
-class FailureMissingImageHash(FailureWithType):
- """Actual result hash was missing."""
- # Chrome doesn't know to display a .checksum file as text, so don't bother
- # putting in a link to the actual result.
- OUT_FILENAMES = []
-
- @staticmethod
- def message():
- return "No expected image hash found"
-
- def result_html_output(self, filename):
- return "<strong>%s</strong>" % self.message()
-
-
-class FailureMissingImage(FailureWithType):
- """Actual result image was missing."""
- OUT_FILENAMES = ["-actual.png"]
-
- @staticmethod
- def message():
- return "No expected image found"
-
- def result_html_output(self, filename):
- return ("<strong>%s</strong>" % self.message() +
- self.output_links(filename, self.OUT_FILENAMES))
-
-
-class FailureImageHashMismatch(FailureWithType):
- """Image hashes didn't match."""
- OUT_FILENAMES = ["-actual.png", "-expected.png", "-diff.png"]
-
- @staticmethod
- def message():
- # We call this a simple image mismatch to avoid confusion, since
- # we link to the PNGs rather than the checksums.
- return "Image mismatch"
-
-
-class FailureFuzzyFailure(FailureWithType):
- """Image hashes didn't match."""
- OUT_FILENAMES = ["-actual.png", "-expected.png"]
-
- @staticmethod
- def message():
- return "Fuzzy image match also failed"
-
-
-class FailureImageHashIncorrect(FailureWithType):
- """Actual result hash is incorrect."""
- # Chrome doesn't know to display a .checksum file as text, so don't bother
- # putting in a link to the actual result.
- OUT_FILENAMES = []
-
- @staticmethod
- def message():
- return "Images match, expected image hash incorrect. "
-
- def result_html_output(self, filename):
- return "<strong>%s</strong>" % self.message()
diff --git a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/test_files.py b/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/test_files.py
deleted file mode 100644
index 99ffc86..0000000
--- a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/test_files.py
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2009 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""This module is used to find all of the layout test files used by Chromium
-(across all platforms). It exposes one public function - GatherTestFiles() -
-which takes an optional list of paths. If a list is passed in, the returned
-list of test files is constrained to those found under the paths passed in,
-i.e. calling GatherTestFiles(["LayoutTests/fast"]) will only return files
-under that directory."""
-
-import glob
-import os
-import path_utils
-
-# When collecting test cases, we include any file with these extensions.
-_supported_file_extensions = set(['.html', '.shtml', '.xml', '.xhtml', '.pl',
- '.php', '.svg'])
-# When collecting test cases, skip these directories
-_skipped_directories = set(['.svn', '_svn', 'resources', 'script-tests'])
-
-
-def gather_test_files(paths):
- """Generate a set of test files and return them.
-
- Args:
- paths: a list of command line paths relative to the webkit/tests
- directory. glob patterns are ok.
- """
- paths_to_walk = set()
- # if paths is empty, provide a pre-defined list.
- if paths:
- for path in paths:
- # If there's an * in the name, assume it's a glob pattern.
- path = os.path.join(path_utils.layout_tests_dir(), path)
- if path.find('*') > -1:
- filenames = glob.glob(path)
- paths_to_walk.update(filenames)
- else:
- paths_to_walk.add(path)
- else:
- paths_to_walk.add(path_utils.layout_tests_dir())
-
- # Now walk all the paths passed in on the command line and get filenames
- test_files = set()
- for path in paths_to_walk:
- if os.path.isfile(path) and _has_supported_extension(path):
- test_files.add(os.path.normpath(path))
- continue
-
- for root, dirs, files in os.walk(path):
- # don't walk skipped directories and sub directories
- if os.path.basename(root) in _skipped_directories:
- del dirs[:]
- continue
-
- for filename in files:
- if _has_supported_extension(filename):
- filename = os.path.join(root, filename)
- filename = os.path.normpath(filename)
- test_files.add(filename)
-
- return test_files
-
-
-def _has_supported_extension(filename):
- """Return true if filename is one of the file extensions we want to run a
- test on."""
- extension = os.path.splitext(filename)[1]
- return extension in _supported_file_extensions
diff --git a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/test_shell_thread.py b/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/test_shell_thread.py
deleted file mode 100644
index fbbba84..0000000
--- a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/test_shell_thread.py
+++ /dev/null
@@ -1,486 +0,0 @@
-# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""A Thread object for running the test shell and processing URLs from a
-shared queue.
-
-Each thread runs a separate instance of the test_shell binary and validates
-the output. When there are no more URLs to process in the shared queue, the
-thread exits.
-"""
-
-import copy
-import logging
-import os
-import Queue
-import signal
-import subprocess
-import sys
-import thread
-import threading
-import time
-
-import path_utils
-import test_failures
-
-
-def process_output(proc, test_info, test_types, test_args, target, output_dir):
- """Receives the output from a test_shell process, subjects it to a number
- of tests, and returns a list of failure types the test produced.
-
- Args:
- proc: an active test_shell process
- test_info: Object containing the test filename, uri and timeout
- test_types: list of test types to subject the output to
- test_args: arguments to be passed to each test
- target: Debug or Release
- output_dir: directory to put crash stack traces into
-
- Returns: a list of failure objects and times for the test being processed
- """
- outlines = []
- extra_lines = []
- failures = []
- crash = False
-
- # Some test args, such as the image hash, may be added or changed on a
- # test-by-test basis.
- local_test_args = copy.copy(test_args)
-
- start_time = time.time()
-
- line = proc.stdout.readline()
-
- # Only start saving output lines once we've loaded the URL for the test.
- url = None
- test_string = test_info.uri.strip()
-
- while line.rstrip() != "#EOF":
- # Make sure we haven't crashed.
- if line == '' and proc.poll() is not None:
- failures.append(test_failures.FailureCrash())
-
- # This is hex code 0xc000001d, which is used for abrupt
- # termination. This happens if we hit ctrl+c from the prompt and
- # we happen to be waiting on the test_shell.
- # sdoyon: Not sure for which OS and in what circumstances the
- # above code is valid. What works for me under Linux to detect
- # ctrl+c is for the subprocess returncode to be negative SIGINT.
- # And that agrees with the subprocess documentation.
- if (-1073741510 == proc.returncode or
- - signal.SIGINT == proc.returncode):
- raise KeyboardInterrupt
- crash = True
- break
-
- # Don't include #URL lines in our output
- if line.startswith("#URL:"):
- url = line.rstrip()[5:]
- if url != test_string:
- logging.fatal("Test got out of sync:\n|%s|\n|%s|" %
- (url, test_string))
- raise AssertionError("test out of sync")
- elif line.startswith("#MD5:"):
- local_test_args.hash = line.rstrip()[5:]
- elif line.startswith("#TEST_TIMED_OUT"):
- # Test timed out, but we still need to read until #EOF.
- failures.append(test_failures.FailureTimeout())
- elif url:
- outlines.append(line)
- else:
- extra_lines.append(line)
-
- line = proc.stdout.readline()
-
- end_test_time = time.time()
-
- if len(extra_lines):
- extra = "".join(extra_lines)
- if crash:
- logging.debug("Stacktrace for %s:\n%s" % (test_string, extra))
- # Strip off "file://" since RelativeTestFilename expects
- # filesystem paths.
- filename = os.path.join(output_dir,
- path_utils.relative_test_filename(test_string[7:]))
- filename = os.path.splitext(filename)[0] + "-stack.txt"
- path_utils.maybe_make_directory(os.path.split(filename)[0])
- open(filename, "wb").write(extra)
- else:
- logging.debug("Previous test output extra lines after dump:\n%s" %
- extra)
-
- # Check the output and save the results.
- time_for_diffs = {}
- for test_type in test_types:
- start_diff_time = time.time()
- new_failures = test_type.compare_output(test_info.filename,
- proc, ''.join(outlines),
- local_test_args, target)
- # Don't add any more failures if we already have a crash, so we don't
- # double-report those tests. We do double-report for timeouts since
- # we still want to see the text and image output.
- if not crash:
- failures.extend(new_failures)
- time_for_diffs[test_type.__class__.__name__] = (
- time.time() - start_diff_time)
-
- total_time_for_all_diffs = time.time() - end_test_time
- test_run_time = end_test_time - start_time
- return TestStats(test_info.filename, failures, test_run_time,
- total_time_for_all_diffs, time_for_diffs)
-
-
-def start_test_shell(command, args):
- """Returns the process for a new test_shell started in layout-tests mode.
- """
- cmd = []
- # Hook for injecting valgrind or other runtime instrumentation,
- # used by e.g. tools/valgrind/valgrind_tests.py.
- wrapper = os.environ.get("BROWSER_WRAPPER", None)
- if wrapper != None:
- cmd += [wrapper]
- cmd += command + ['--layout-tests'] + args
- return subprocess.Popen(cmd,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
-
-
-class TestStats:
-
- def __init__(self, filename, failures, test_run_time,
- total_time_for_all_diffs, time_for_diffs):
- self.filename = filename
- self.failures = failures
- self.test_run_time = test_run_time
- self.total_time_for_all_diffs = total_time_for_all_diffs
- self.time_for_diffs = time_for_diffs
-
-
-class SingleTestThread(threading.Thread):
- """Thread wrapper for running a single test file."""
-
- def __init__(self, test_shell_command, shell_args, test_info, test_types,
- test_args, target, output_dir):
- """
- Args:
- test_info: Object containing the test filename, uri and timeout
- output_dir: Directory to put crash stacks into.
- See TestShellThread for documentation of the remaining arguments.
- """
-
- threading.Thread.__init__(self)
- self._command = test_shell_command
- self._shell_args = shell_args
- self._test_info = test_info
- self._test_types = test_types
- self._test_args = test_args
- self._target = target
- self._output_dir = output_dir
-
- def run(self):
- proc = start_test_shell(self._command, self._shell_args +
- ["--time-out-ms=" + self._test_info.timeout, self._test_info.uri])
- self._test_stats = process_output(proc, self._test_info,
- self._test_types, self._test_args, self._target, self._output_dir)
-
- def get_test_stats(self):
- return self._test_stats
-
-
-class TestShellThread(threading.Thread):
-
- def __init__(self, filename_list_queue, result_queue, test_shell_command,
- test_types, test_args, shell_args, options):
- """Initialize all the local state for this test shell thread.
-
- Args:
- filename_list_queue: A thread safe Queue class that contains lists
- of tuples of (filename, uri) pairs.
- result_queue: A thread safe Queue class that will contain tuples of
- (test, failure lists) for the test results.
- test_shell_command: A list specifying the command+args for
- test_shell
- test_types: A list of TestType objects to run the test output
- against.
- test_args: A TestArguments object to pass to each TestType.
- shell_args: Any extra arguments to be passed to test_shell.exe.
- options: A property dictionary as produced by optparse. The
- command-line options should match those expected by
- run_webkit_tests; they are typically passed via the
- run_webkit_tests.TestRunner class."""
- threading.Thread.__init__(self)
- self._filename_list_queue = filename_list_queue
- self._result_queue = result_queue
- self._filename_list = []
- self._test_shell_command = test_shell_command
- self._test_types = test_types
- self._test_args = test_args
- self._test_shell_proc = None
- self._shell_args = shell_args
- self._options = options
- self._canceled = False
- self._exception_info = None
- self._directory_timing_stats = {}
- self._test_stats = []
- self._num_tests = 0
- self._start_time = 0
- self._stop_time = 0
-
- # Current directory of tests we're running.
- self._current_dir = None
- # Number of tests in self._current_dir.
- self._num_tests_in_current_dir = None
- # Time at which we started running tests from self._current_dir.
- self._current_dir_start_time = None
-
- def get_directory_timing_stats(self):
- """Returns a dictionary mapping test directory to a tuple of
- (number of tests in that directory, time to run the tests)"""
- return self._directory_timing_stats
-
- def get_individual_test_stats(self):
- """Returns a list of (test_filename, time_to_run_test,
- total_time_for_all_diffs, time_for_diffs) tuples."""
- return self._test_stats
-
- def cancel(self):
- """Set a flag telling this thread to quit."""
- self._canceled = True
-
- def get_exception_info(self):
- """If run() terminated on an uncaught exception, return it here
- ((type, value, traceback) tuple).
- Returns None if run() terminated normally. Meant to be called after
- joining this thread."""
- return self._exception_info
-
- def get_total_time(self):
- return max(self._stop_time - self._start_time, 0.0)
-
- def get_num_tests(self):
- return self._num_tests
-
- def run(self):
- """Delegate main work to a helper method and watch for uncaught
- exceptions."""
- self._start_time = time.time()
- self._num_tests = 0
- try:
- logging.debug('%s starting' % (self.getName()))
- self._run(test_runner=None, result_summary=None)
- logging.debug('%s done (%d tests)' % (self.getName(),
- self.get_num_tests()))
- except:
- # Save the exception for our caller to see.
- self._exception_info = sys.exc_info()
- self._stop_time = time.time()
- # Re-raise it and die.
- logging.error('%s dying: %s' % (self.getName(),
- self._exception_info))
- raise
- self._stop_time = time.time()
-
- def run_in_main_thread(self, test_runner, result_summary):
- """This hook allows us to run the tests from the main thread if
- --num-test-shells==1, instead of having to always run two or more
- threads. This allows us to debug the test harness without having to
- do multi-threaded debugging."""
- self._run(test_runner, result_summary)
-
- def _run(self, test_runner, result_summary):
- """Main work entry point of the thread. Basically we pull urls from the
- filename queue and run the tests until we run out of urls.
-
- If test_runner is not None, then we call test_runner.UpdateSummary()
- with the results of each test."""
- batch_size = 0
- batch_count = 0
- if self._options.batch_size:
- try:
- batch_size = int(self._options.batch_size)
- except:
- logging.info("Ignoring invalid batch size '%s'" %
- self._options.batch_size)
-
- # Append tests we're running to the existing tests_run.txt file.
- # This is created in run_webkit_tests.py:_PrepareListsAndPrintOutput.
- tests_run_filename = os.path.join(self._options.results_directory,
- "tests_run.txt")
- tests_run_file = open(tests_run_filename, "a")
-
- while True:
- if self._canceled:
- logging.info('Testing canceled')
- tests_run_file.close()
- return
-
- if len(self._filename_list) is 0:
- if self._current_dir is not None:
- self._directory_timing_stats[self._current_dir] = \
- (self._num_tests_in_current_dir,
- time.time() - self._current_dir_start_time)
-
- try:
- self._current_dir, self._filename_list = \
- self._filename_list_queue.get_nowait()
- except Queue.Empty:
- self._kill_test_shell()
- tests_run_file.close()
- return
-
- self._num_tests_in_current_dir = len(self._filename_list)
- self._current_dir_start_time = time.time()
-
- test_info = self._filename_list.pop()
-
- # We have a url, run tests.
- batch_count += 1
- self._num_tests += 1
- if self._options.run_singly:
- failures = self._run_test_singly(test_info)
- else:
- failures = self._run_test(test_info)
-
- filename = test_info.filename
- tests_run_file.write(filename + "\n")
- if failures:
- # Check and kill test shell if we need too.
- if len([1 for f in failures if f.should_kill_test_shell()]):
- self._kill_test_shell()
- # Reset the batch count since the shell just bounced.
- batch_count = 0
- # Print the error message(s).
- error_str = '\n'.join([' ' + f.message() for f in failures])
- logging.debug("%s %s failed:\n%s" % (self.getName(),
- path_utils.relative_test_filename(filename),
- error_str))
- else:
- logging.debug("%s %s passed" % (self.getName(),
- path_utils.relative_test_filename(filename)))
- self._result_queue.put((filename, failures))
-
- if batch_size > 0 and batch_count > batch_size:
- # Bounce the shell and reset count.
- self._kill_test_shell()
- batch_count = 0
-
- if test_runner:
- test_runner.update_summary(result_summary)
-
- def _run_test_singly(self, test_info):
- """Run a test in a separate thread, enforcing a hard time limit.
-
- Since we can only detect the termination of a thread, not any internal
- state or progress, we can only run per-test timeouts when running test
- files singly.
-
- Args:
- test_info: Object containing the test filename, uri and timeout
-
- Return:
- A list of TestFailure objects describing the error.
- """
- worker = SingleTestThread(self._test_shell_command,
- self._shell_args,
- test_info,
- self._test_types,
- self._test_args,
- self._options.target,
- self._options.results_directory)
-
- worker.start()
-
- # When we're running one test per test_shell process, we can enforce
- # a hard timeout. the test_shell watchdog uses 2.5x the timeout
- # We want to be larger than that.
- worker.join(int(test_info.timeout) * 3.0 / 1000.0)
- if worker.isAlive():
- # If join() returned with the thread still running, the
- # test_shell.exe is completely hung and there's nothing
- # more we can do with it. We have to kill all the
- # test_shells to free it up. If we're running more than
- # one test_shell thread, we'll end up killing the other
- # test_shells too, introducing spurious crashes. We accept that
- # tradeoff in order to avoid losing the rest of this thread's
- # results.
- logging.error('Test thread hung: killing all test_shells')
- path_utils.kill_all_test_shells()
-
- try:
- stats = worker.get_test_stats()
- self._test_stats.append(stats)
- failures = stats.failures
- except AttributeError, e:
- failures = []
- logging.error('Cannot get results of test: %s' %
- test_info.filename)
-
- return failures
-
- def _run_test(self, test_info):
- """Run a single test file using a shared test_shell process.
-
- Args:
- test_info: Object containing the test filename, uri and timeout
-
- Return:
- A list of TestFailure objects describing the error.
- """
- self._ensure_test_shell_is_running()
- # Args to test_shell is a space-separated list of
- # "uri timeout pixel_hash"
- # The timeout and pixel_hash are optional. The timeout is used if this
- # test has a custom timeout. The pixel_hash is used to avoid doing an
- # image dump if the checksums match, so it should be set to a blank
- # value if we are generating a new baseline.
- # (Otherwise, an image from a previous run will be copied into
- # the baseline.)
- image_hash = test_info.image_hash
- if image_hash and self._test_args.new_baseline:
- image_hash = ""
- self._test_shell_proc.stdin.write(("%s %s %s\n" %
- (test_info.uri, test_info.timeout, image_hash)))
-
- # If the test shell is dead, the above may cause an IOError as we
- # try to write onto the broken pipe. If this is the first test for
- # this test shell process, than the test shell did not
- # successfully start. If this is not the first test, then the
- # previous tests have caused some kind of delayed crash. We don't
- # try to recover here.
- self._test_shell_proc.stdin.flush()
-
- stats = process_output(self._test_shell_proc, test_info,
- self._test_types, self._test_args,
- self._options.target,
- self._options.results_directory)
-
- self._test_stats.append(stats)
- return stats.failures
-
- def _ensure_test_shell_is_running(self):
- """Start the shared test shell, if it's not running. Not for use when
- running tests singly, since those each start a separate test shell in
- their own thread.
- """
- if (not self._test_shell_proc or
- self._test_shell_proc.poll() is not None):
- self._test_shell_proc = start_test_shell(self._test_shell_command,
- self._shell_args)
-
- def _kill_test_shell(self):
- """Kill the test shell process if it's running."""
- if self._test_shell_proc:
- self._test_shell_proc.stdin.close()
- self._test_shell_proc.stdout.close()
- if self._test_shell_proc.stderr:
- self._test_shell_proc.stderr.close()
- if (sys.platform not in ('win32', 'cygwin') and
- not self._test_shell_proc.poll()):
- # Closing stdin/stdout/stderr hangs sometimes on OS X.
- null = open(os.devnull, "w")
- subprocess.Popen(["kill", "-9",
- str(self._test_shell_proc.pid)], stderr=null)
- null.close()
- self._test_shell_proc = None
diff --git a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/websocket_server.py b/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/websocket_server.py
deleted file mode 100644
index 8237c3c..0000000
--- a/webkit/tools/layout_tests/webkitpy/layout_tests/layout_package/websocket_server.py
+++ /dev/null
@@ -1,272 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2009 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""A class to help start/stop the PyWebSocket server used by layout tests."""
-
-
-import logging
-import optparse
-import os
-import subprocess
-import sys
-import tempfile
-import time
-
-import path_utils
-import platform_utils
-import http_server
-
-# So we can import httpd_utils below to make ui_tests happy.
-sys.path.insert(0, path_utils.path_from_base('tools', 'python'))
-import google.httpd_utils
-
-_WS_LOG_PREFIX = 'pywebsocket.ws.log-'
-_WSS_LOG_PREFIX = 'pywebsocket.wss.log-'
-
-_DEFAULT_WS_PORT = 8880
-_DEFAULT_WSS_PORT = 9323
-
-
-def remove_log_files(folder, starts_with):
- files = os.listdir(folder)
- for file in files:
- if file.startswith(starts_with):
- full_path = os.path.join(folder, file)
- os.remove(full_path)
-
-
-class PyWebSocketNotStarted(Exception):
- pass
-
-
-class PyWebSocketNotFound(Exception):
- pass
-
-
-class PyWebSocket(http_server.Lighttpd):
-
- def __init__(self, output_dir, port=_DEFAULT_WS_PORT,
- root=None,
- use_tls=False,
- private_key=http_server.Lighttpd._pem_file,
- certificate=http_server.Lighttpd._pem_file,
- register_cygwin=None,
- pidfile=None):
- """Args:
- output_dir: the absolute path to the layout test result directory
- """
- http_server.Lighttpd.__init__(self, output_dir,
- port=port,
- root=root,
- register_cygwin=register_cygwin)
- self._output_dir = output_dir
- self._process = None
- self._port = port
- self._root = root
- self._use_tls = use_tls
- self._private_key = private_key
- self._certificate = certificate
- if self._port:
- self._port = int(self._port)
- if self._use_tls:
- self._server_name = 'PyWebSocket(Secure)'
- else:
- self._server_name = 'PyWebSocket'
- self._pidfile = pidfile
- self._wsout = None
-
- # Webkit tests
- if self._root:
- self._layout_tests = os.path.abspath(self._root)
- self._web_socket_tests = os.path.abspath(
- os.path.join(self._root, 'websocket', 'tests'))
- else:
- try:
- self._web_socket_tests = path_utils.path_from_base(
- 'third_party', 'WebKit', 'LayoutTests', 'websocket',
- 'tests')
- self._layout_tests = path_utils.path_from_base(
- 'third_party', 'WebKit', 'LayoutTests')
- except path_utils.PathNotFound:
- self._web_socket_tests = None
-
- def start(self):
- if not self._web_socket_tests:
- logging.info('No need to start %s server.' % self._server_name)
- return
- if self.is_running():
- raise PyWebSocketNotStarted('%s is already running.' %
- self._server_name)
-
- time_str = time.strftime('%d%b%Y-%H%M%S')
- if self._use_tls:
- log_prefix = _WSS_LOG_PREFIX
- else:
- log_prefix = _WS_LOG_PREFIX
- log_file_name = log_prefix + time_str
-
- # Remove old log files. We only need to keep the last ones.
- remove_log_files(self._output_dir, log_prefix)
-
- error_log = os.path.join(self._output_dir, log_file_name + "-err.txt")
-
- output_log = os.path.join(self._output_dir, log_file_name + "-out.txt")
- self._wsout = open(output_log, "w")
-
- python_interp = sys.executable
- pywebsocket_base = path_utils.path_from_base(
- 'third_party', 'WebKit', 'WebKitTools', 'Scripts', 'webkitpy',
- 'thirdparty', 'pywebsocket')
- pywebsocket_script = path_utils.path_from_base(
- 'third_party', 'WebKit', 'WebKitTools', 'Scripts',
- 'webkitpy', 'thirdparty', 'pywebsocket', 'mod_pywebsocket',
- 'standalone.py')
- start_cmd = [
- python_interp, pywebsocket_script,
- '-p', str(self._port),
- '-d', self._layout_tests,
- '-s', self._web_socket_tests,
- '-x', '/websocket/tests/cookies',
- '-l', error_log,
- ]
-
- handler_map_file = os.path.join(self._web_socket_tests,
- 'handler_map.txt')
- if os.path.exists(handler_map_file):
- logging.debug('Using handler_map_file: %s' % handler_map_file)
- start_cmd.append('-m')
- start_cmd.append(handler_map_file)
- else:
- logging.warning('No handler_map_file found')
-
- if self._use_tls:
- start_cmd.extend(['-t', '-k', self._private_key,
- '-c', self._certificate])
-
- # Put the cygwin directory first in the path to find cygwin1.dll
- env = os.environ
- if sys.platform in ('cygwin', 'win32'):
- env['PATH'] = '%s;%s' % (
- path_utils.path_from_base('third_party', 'cygwin', 'bin'),
- env['PATH'])
-
- if sys.platform == 'win32' and self._register_cygwin:
- setup_mount = path_utils.path_from_base('third_party', 'cygwin',
- 'setup_mount.bat')
- subprocess.Popen(setup_mount).wait()
-
- env['PYTHONPATH'] = (pywebsocket_base + os.path.pathsep +
- env.get('PYTHONPATH', ''))
-
- logging.debug('Starting %s server on %d.' % (
- self._server_name, self._port))
- logging.debug('cmdline: %s' % ' '.join(start_cmd))
- self._process = subprocess.Popen(start_cmd, stdout=self._wsout,
- stderr=subprocess.STDOUT,
- env=env)
-
- # Wait a bit before checking the liveness of the server.
- time.sleep(0.5)
-
- if self._use_tls:
- url = 'https'
- else:
- url = 'http'
- url = url + '://127.0.0.1:%d/' % self._port
- if not google.httpd_utils.UrlIsAlive(url):
- fp = open(output_log)
- try:
- for line in fp:
- logging.error(line)
- finally:
- fp.close()
- raise PyWebSocketNotStarted(
- 'Failed to start %s server on port %s.' %
- (self._server_name, self._port))
-
- # Our process terminated already
- if self._process.returncode != None:
- raise PyWebSocketNotStarted(
- 'Failed to start %s server.' % self._server_name)
- if self._pidfile:
- f = open(self._pidfile, 'w')
- f.write("%d" % self._process.pid)
- f.close()
-
- def stop(self, force=False):
- if not force and not self.is_running():
- return
-
- if self._process:
- pid = self._process.pid
- elif self._pidfile:
- f = open(self._pidfile)
- pid = int(f.read().strip())
- f.close()
-
- if not pid:
- raise PyWebSocketNotFound(
- 'Failed to find %s server pid.' % self._server_name)
-
- logging.debug('Shutting down %s server %d.' % (self._server_name, pid))
- platform_utils.kill_process(pid)
-
- if self._process:
- self._process.wait()
- self._process = None
-
- if self._wsout:
- self._wsout.close()
- self._wsout = None
-
-
-if '__main__' == __name__:
- # Provide some command line params for starting the PyWebSocket server
- # manually.
- option_parser = optparse.OptionParser()
- option_parser.add_option('--server', type='choice',
- choices=['start', 'stop'], default='start',
- help='Server action (start|stop)')
- option_parser.add_option('-p', '--port', dest='port',
- default=None, help='Port to listen on')
- option_parser.add_option('-r', '--root',
- help='Absolute path to DocumentRoot '
- '(overrides layout test roots)')
- option_parser.add_option('-t', '--tls', dest='use_tls',
- action='store_true',
- default=False, help='use TLS (wss://)')
- option_parser.add_option('-k', '--private_key', dest='private_key',
- default='', help='TLS private key file.')
- option_parser.add_option('-c', '--certificate', dest='certificate',
- default='', help='TLS certificate file.')
- option_parser.add_option('--register_cygwin', action="store_true",
- dest="register_cygwin",
- help='Register Cygwin paths (on Win try bots)')
- option_parser.add_option('--pidfile', help='path to pid file.')
- options, args = option_parser.parse_args()
-
- if not options.port:
- if options.use_tls:
- options.port = _DEFAULT_WSS_PORT
- else:
- options.port = _DEFAULT_WS_PORT
-
- kwds = {'port': options.port, 'use_tls': options.use_tls}
- if options.root:
- kwds['root'] = options.root
- if options.private_key:
- kwds['private_key'] = options.private_key
- if options.certificate:
- kwds['certificate'] = options.certificate
- kwds['register_cygwin'] = options.register_cygwin
- if options.pidfile:
- kwds['pidfile'] = options.pidfile
-
- pywebsocket = PyWebSocket(tempfile.gettempdir(), **kwds)
-
- if 'start' == options.server:
- pywebsocket.start()
- else:
- pywebsocket.stop(force=True)
diff --git a/webkit/tools/layout_tests/webkitpy/layout_tests/rebaseline_chromium_webkit_tests.py b/webkit/tools/layout_tests/webkitpy/layout_tests/rebaseline_chromium_webkit_tests.py
deleted file mode 100644
index ba7d5d1..0000000
--- a/webkit/tools/layout_tests/webkitpy/layout_tests/rebaseline_chromium_webkit_tests.py
+++ /dev/null
@@ -1,1004 +0,0 @@
-#!usr/bin/env python
-# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Rebaselining tool that automatically produces baselines for all platforms.
-
-The script does the following for each platform specified:
- 1. Compile a list of tests that need rebaselining.
- 2. Download test result archive from buildbot for the platform.
- 3. Extract baselines from the archive file for all identified files.
- 4. Add new baselines to SVN repository.
- 5. For each test that has been rebaselined, remove this platform option from
- the test in test_expectation.txt. If no other platforms remain after
- removal, delete the rebaselined test from the file.
-
-At the end, the script generates a html that compares old and new baselines.
-"""
-
-import logging
-import optparse
-import os
-import re
-import shutil
-import subprocess
-import sys
-import tempfile
-import time
-import urllib
-import webbrowser
-import zipfile
-
-from layout_package import path_utils
-from layout_package import test_expectations
-from test_types import image_diff
-from test_types import text_diff
-
-# Repository type constants.
-REPO_SVN, REPO_UNKNOWN = range(2)
-
-BASELINE_SUFFIXES = ['.txt', '.png', '.checksum']
-REBASELINE_PLATFORM_ORDER = ['mac', 'win', 'win-xp', 'win-vista', 'linux']
-ARCHIVE_DIR_NAME_DICT = {'win': 'webkit-rel',
- 'win-vista': 'webkit-dbg-vista',
- 'win-xp': 'webkit-rel',
- 'mac': 'webkit-rel-mac5',
- 'linux': 'webkit-rel-linux',
- 'win-canary': 'webkit-rel-webkit-org',
- 'win-vista-canary': 'webkit-dbg-vista',
- 'win-xp-canary': 'webkit-rel-webkit-org',
- 'mac-canary': 'webkit-rel-mac-webkit-org',
- 'linux-canary': 'webkit-rel-linux-webkit-org'}
-
-
-def run_shell_with_return_code(command, print_output=False):
- """Executes a command and returns the output and process return code.
-
- Args:
- command: program and arguments.
- print_output: if true, print the command results to standard output.
-
- Returns:
- command output, return code
- """
-
- # Use a shell for subcommands on Windows to get a PATH search.
- use_shell = sys.platform.startswith('win')
- p = subprocess.Popen(command, stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT, shell=use_shell)
- if print_output:
- output_array = []
- while True:
- line = p.stdout.readline()
- if not line:
- break
- if print_output:
- print line.strip('\n')
- output_array.append(line)
- output = ''.join(output_array)
- else:
- output = p.stdout.read()
- p.wait()
- p.stdout.close()
-
- return output, p.returncode
-
-
-def run_shell(command, print_output=False):
- """Executes a command and returns the output.
-
- Args:
- command: program and arguments.
- print_output: if true, print the command results to standard output.
-
- Returns:
- command output
- """
-
- output, return_code = run_shell_with_return_code(command, print_output)
- return output
-
-
-def log_dashed_string(text, platform, logging_level=logging.INFO):
- """Log text message with dashes on both sides."""
-
- msg = text
- if platform:
- msg += ': ' + platform
- if len(msg) < 78:
- dashes = '-' * ((78 - len(msg)) / 2)
- msg = '%s %s %s' % (dashes, msg, dashes)
-
- if logging_level == logging.ERROR:
- logging.error(msg)
- elif logging_level == logging.WARNING:
- logging.warn(msg)
- else:
- logging.info(msg)
-
-
-def setup_html_directory(html_directory):
- """Setup the directory to store html results.
-
- All html related files are stored in the "rebaseline_html" subdirectory.
-
- Args:
- html_directory: parent directory that stores the rebaselining results.
- If None, a temp directory is created.
-
- Returns:
- the directory that stores the html related rebaselining results.
- """
-
- if not html_directory:
- html_directory = tempfile.mkdtemp()
- elif not os.path.exists(html_directory):
- os.mkdir(html_directory)
-
- html_directory = os.path.join(html_directory, 'rebaseline_html')
- logging.info('Html directory: "%s"', html_directory)
-
- if os.path.exists(html_directory):
- shutil.rmtree(html_directory, True)
- logging.info('Deleted file at html directory: "%s"', html_directory)
-
- if not os.path.exists(html_directory):
- os.mkdir(html_directory)
- return html_directory
-
-
-def get_result_file_fullpath(html_directory, baseline_filename, platform,
- result_type):
- """Get full path of the baseline result file.
-
- Args:
- html_directory: directory that stores the html related files.
- baseline_filename: name of the baseline file.
- platform: win, linux or mac
- result_type: type of the baseline result: '.txt', '.png'.
-
- Returns:
- Full path of the baseline file for rebaselining result comparison.
- """
-
- base, ext = os.path.splitext(baseline_filename)
- result_filename = '%s-%s-%s%s' % (base, platform, result_type, ext)
- fullpath = os.path.join(html_directory, result_filename)
- logging.debug(' Result file full path: "%s".', fullpath)
- return fullpath
-
-
-class Rebaseliner(object):
- """Class to produce new baselines for a given platform."""
-
- REVISION_REGEX = r'<a href=\"(\d+)/\">'
-
- def __init__(self, platform, options):
- self._file_dir = path_utils.path_from_base('webkit', 'tools',
- 'layout_tests')
- self._platform = platform
- self._options = options
- self._rebaselining_tests = []
- self._rebaselined_tests = []
-
- # Create tests and expectations helper which is used to:
- # -. compile list of tests that need rebaselining.
- # -. update the tests in test_expectations file after rebaseline
- # is done.
- self._test_expectations = \
- test_expectations.TestExpectations(None,
- self._file_dir,
- platform,
- False,
- False)
-
- self._repo_type = self._get_repo_type()
-
- def run(self, backup):
- """Run rebaseline process."""
-
- log_dashed_string('Compiling rebaselining tests', self._platform)
- if not self._compile_rebaselining_tests():
- return True
-
- log_dashed_string('Downloading archive', self._platform)
- archive_file = self._download_buildbot_archive()
- logging.info('')
- if not archive_file:
- logging.error('No archive found.')
- return False
-
- log_dashed_string('Extracting and adding new baselines',
- self._platform)
- if not self._extract_and_add_new_baselines(archive_file):
- return False
-
- log_dashed_string('Updating rebaselined tests in file',
- self._platform)
- self._update_rebaselined_tests_in_file(backup)
- logging.info('')
-
- if len(self._rebaselining_tests) != len(self._rebaselined_tests):
- logging.warning('NOT ALL TESTS THAT NEED REBASELINING HAVE BEEN '
- 'REBASELINED.')
- logging.warning(' Total tests needing rebaselining: %d',
- len(self._rebaselining_tests))
- logging.warning(' Total tests rebaselined: %d',
- len(self._rebaselined_tests))
- return False
-
- logging.warning('All tests needing rebaselining were successfully '
- 'rebaselined.')
-
- return True
-
- def get_rebaselining_tests(self):
- return self._rebaselining_tests
-
- def _get_repo_type(self):
- """Get the repository type that client is using."""
- output, return_code = run_shell_with_return_code(['svn', 'info'],
- False)
- if return_code == 0:
- return REPO_SVN
-
- return REPO_UNKNOWN
-
- def _compile_rebaselining_tests(self):
- """Compile list of tests that need rebaselining for the platform.
-
- Returns:
- List of tests that need rebaselining or
- None if there is no such test.
- """
-
- self._rebaselining_tests = \
- self._test_expectations.get_rebaselining_failures()
- if not self._rebaselining_tests:
- logging.warn('No tests found that need rebaselining.')
- return None
-
- logging.info('Total number of tests needing rebaselining '
- 'for "%s": "%d"', self._platform,
- len(self._rebaselining_tests))
-
- test_no = 1
- for test in self._rebaselining_tests:
- logging.info(' %d: %s', test_no, test)
- test_no += 1
-
- return self._rebaselining_tests
-
- def _get_latest_revision(self, url):
- """Get the latest layout test revision number from buildbot.
-
- Args:
- url: Url to retrieve layout test revision numbers.
-
- Returns:
- latest revision or
- None on failure.
- """
-
- logging.debug('Url to retrieve revision: "%s"', url)
-
- f = urllib.urlopen(url)
- content = f.read()
- f.close()
-
- revisions = re.findall(self.REVISION_REGEX, content)
- if not revisions:
- logging.error('Failed to find revision, content: "%s"', content)
- return None
-
- revisions.sort(key=int)
- logging.info('Latest revision: "%s"', revisions[len(revisions) - 1])
- return revisions[len(revisions) - 1]
-
- def _get_archive_dir_name(self, platform, webkit_canary):
- """Get name of the layout test archive directory.
-
- Returns:
- Directory name or
- None on failure
- """
-
- if webkit_canary:
- platform += '-canary'
-
- if platform in ARCHIVE_DIR_NAME_DICT:
- return ARCHIVE_DIR_NAME_DICT[platform]
- else:
- logging.error('Cannot find platform key %s in archive '
- 'directory name dictionary', platform)
- return None
-
- def _get_archive_url(self):
- """Generate the url to download latest layout test archive.
-
- Returns:
- Url to download archive or
- None on failure
- """
-
- dir_name = self._get_archive_dir_name(self._platform,
- self._options.webkit_canary)
- if not dir_name:
- return None
-
- logging.debug('Buildbot platform dir name: "%s"', dir_name)
-
- url_base = '%s/%s/' % (self._options.archive_url, dir_name)
- latest_revision = self._get_latest_revision(url_base)
- if latest_revision is None or latest_revision <= 0:
- return None
-
- archive_url = ('%s%s/layout-test-results.zip' % (url_base,
- latest_revision))
- logging.info('Archive url: "%s"', archive_url)
- return archive_url
-
- def _download_buildbot_archive(self):
- """Download layout test archive file from buildbot.
-
- Returns:
- True if download succeeded or
- False otherwise.
- """
-
- url = self._get_archive_url()
- if url is None:
- return None
-
- fn = urllib.urlretrieve(url)[0]
- logging.info('Archive downloaded and saved to file: "%s"', fn)
- return fn
-
- def _extract_and_add_new_baselines(self, archive_file):
- """Extract new baselines from archive and add them to SVN repository.
-
- Args:
- archive_file: full path to the archive file.
-
- Returns:
- List of tests that have been rebaselined or
- None on failure.
- """
-
- zip_file = zipfile.ZipFile(archive_file, 'r')
- zip_namelist = zip_file.namelist()
-
- logging.debug('zip file namelist:')
- for name in zip_namelist:
- logging.debug(' ' + name)
-
- platform = path_utils.platform_name(self._platform)
- logging.debug('Platform dir: "%s"', platform)
-
- test_no = 1
- self._rebaselined_tests = []
- for test in self._rebaselining_tests:
- logging.info('Test %d: %s', test_no, test)
-
- found = False
- svn_error = False
- test_basename = os.path.splitext(test)[0]
- for suffix in BASELINE_SUFFIXES:
- archive_test_name = ('layout-test-results/%s-actual%s' %
- (test_basename, suffix))
- logging.debug(' Archive test file name: "%s"',
- archive_test_name)
- if not archive_test_name in zip_namelist:
- logging.info(' %s file not in archive.', suffix)
- continue
-
- found = True
- logging.info(' %s file found in archive.', suffix)
-
- # Extract new baseline from archive and save it to a temp file.
- data = zip_file.read(archive_test_name)
- temp_fd, temp_name = tempfile.mkstemp(suffix)
- f = os.fdopen(temp_fd, 'wb')
- f.write(data)
- f.close()
-
- expected_filename = '%s-expected%s' % (test_basename, suffix)
- expected_fullpath = os.path.join(
- path_utils.chromium_baseline_path(platform),
- expected_filename)
- expected_fullpath = os.path.normpath(expected_fullpath)
- logging.debug(' Expected file full path: "%s"',
- expected_fullpath)
-
- # TODO(victorw): for now, the rebaselining tool checks whether
- # or not THIS baseline is duplicate and should be skipped.
- # We could improve the tool to check all baselines in upper
- # and lower
- # levels and remove all duplicated baselines.
- if self._is_dup_baseline(temp_name,
- expected_fullpath,
- test,
- suffix,
- self._platform):
- os.remove(temp_name)
- self._delete_baseline(expected_fullpath)
- continue
-
- # Create the new baseline directory if it doesn't already
- # exist.
- path_utils.maybe_make_directory(
- os.path.dirname(expected_fullpath))
-
- shutil.move(temp_name, expected_fullpath)
-
- if not self._svn_add(expected_fullpath):
- svn_error = True
- elif suffix != '.checksum':
- self._create_html_baseline_files(expected_fullpath)
-
- if not found:
- logging.warn(' No new baselines found in archive.')
- else:
- if svn_error:
- logging.warn(' Failed to add baselines to SVN.')
- else:
- logging.info(' Rebaseline succeeded.')
- self._rebaselined_tests.append(test)
-
- test_no += 1
-
- zip_file.close()
- os.remove(archive_file)
-
- return self._rebaselined_tests
-
- def _is_dup_baseline(self, new_baseline, baseline_path, test, suffix,
- platform):
- """Check whether a baseline is duplicate and can fallback to same
- baseline for another platform. For example, if a test has same
- baseline on linux and windows, then we only store windows
- baseline and linux baseline will fallback to the windows version.
-
- Args:
- expected_filename: baseline expectation file name.
- test: test name.
- suffix: file suffix of the expected results, including dot;
- e.g. '.txt' or '.png'.
- platform: baseline platform 'mac', 'win' or 'linux'.
-
- Returns:
- True if the baseline is unnecessary.
- False otherwise.
- """
- test_filepath = os.path.join(path_utils.layout_tests_dir(), test)
- all_baselines = path_utils.expected_baselines(test_filepath,
- suffix, platform, True)
- for (fallback_dir, fallback_file) in all_baselines:
- if fallback_dir and fallback_file:
- fallback_fullpath = os.path.normpath(
- os.path.join(fallback_dir, fallback_file))
- if fallback_fullpath.lower() != baseline_path.lower():
- if not self._diff_baselines(new_baseline,
- fallback_fullpath):
- logging.info(' Found same baseline at %s',
- fallback_fullpath)
- return True
- else:
- return False
-
- return False
-
- def _diff_baselines(self, file1, file2):
- """Check whether two baselines are different.
-
- Args:
- file1, file2: full paths of the baselines to compare.
-
- Returns:
- True if two files are different or have different extensions.
- False otherwise.
- """
-
- ext1 = os.path.splitext(file1)[1].upper()
- ext2 = os.path.splitext(file2)[1].upper()
- if ext1 != ext2:
- logging.warn('Files to compare have different ext. '
- 'File1: %s; File2: %s', file1, file2)
- return True
-
- if ext1 == '.PNG':
- return image_diff.ImageDiff(self._platform, '').diff_files(file1,
- file2)
- else:
- return text_diff.TestTextDiff(self._platform, '').diff_files(file1,
- file2)
-
- def _delete_baseline(self, filename):
- """Remove the file from repository and delete it from disk.
-
- Args:
- filename: full path of the file to delete.
- """
-
- if not filename or not os.path.isfile(filename):
- return
-
- if self._repo_type == REPO_SVN:
- parent_dir, basename = os.path.split(filename)
- original_dir = os.getcwd()
- os.chdir(parent_dir)
- run_shell(['svn', 'delete', '--force', basename], False)
- os.chdir(original_dir)
- else:
- os.remove(filename)
-
- def _update_rebaselined_tests_in_file(self, backup):
- """Update the rebaselined tests in test expectations file.
-
- Args:
- backup: if True, backup the original test expectations file.
-
- Returns:
- no
- """
-
- if self._rebaselined_tests:
- self._test_expectations.remove_platform_from_file(
- self._rebaselined_tests, self._platform, backup)
- else:
- logging.info('No test was rebaselined so nothing to remove.')
-
- def _svn_add(self, filename):
- """Add the file to SVN repository.
-
- Args:
- filename: full path of the file to add.
-
- Returns:
- True if the file already exists in SVN or is sucessfully added
- to SVN.
- False otherwise.
- """
-
- if not filename:
- return False
-
- parent_dir, basename = os.path.split(filename)
- if self._repo_type != REPO_SVN or parent_dir == filename:
- logging.info("No svn checkout found, skip svn add.")
- return True
-
- original_dir = os.getcwd()
- os.chdir(parent_dir)
- status_output = run_shell(['svn', 'status', basename], False)
- os.chdir(original_dir)
- output = status_output.upper()
- if output.startswith('A') or output.startswith('M'):
- logging.info(' File already added to SVN: "%s"', filename)
- return True
-
- if output.find('IS NOT A WORKING COPY') >= 0:
- logging.info(' File is not a working copy, add its parent: "%s"',
- parent_dir)
- return self._svn_add(parent_dir)
-
- os.chdir(parent_dir)
- add_output = run_shell(['svn', 'add', basename], True)
- os.chdir(original_dir)
- output = add_output.upper().rstrip()
- if output.startswith('A') and output.find(basename.upper()) >= 0:
- logging.info(' Added new file: "%s"', filename)
- self._svn_prop_set(filename)
- return True
-
- if (not status_output) and (add_output.upper().find(
- 'ALREADY UNDER VERSION CONTROL') >= 0):
- logging.info(' File already under SVN and has no change: "%s"',
- filename)
- return True
-
- logging.warn(' Failed to add file to SVN: "%s"', filename)
- logging.warn(' Svn status output: "%s"', status_output)
- logging.warn(' Svn add output: "%s"', add_output)
- return False
-
- def _svn_prop_set(self, filename):
- """Set the baseline property
-
- Args:
- filename: full path of the file to add.
-
- Returns:
- True if the file already exists in SVN or is sucessfully added
- to SVN.
- False otherwise.
- """
- ext = os.path.splitext(filename)[1].upper()
- if ext != '.TXT' and ext != '.PNG' and ext != '.CHECKSUM':
- return
-
- parent_dir, basename = os.path.split(filename)
- original_dir = os.getcwd()
- os.chdir(parent_dir)
- if ext == '.PNG':
- cmd = ['svn', 'pset', 'svn:mime-type', 'image/png', basename]
- else:
- cmd = ['svn', 'pset', 'svn:eol-style', 'LF', basename]
-
- logging.debug(' Set svn prop: %s', ' '.join(cmd))
- run_shell(cmd, False)
- os.chdir(original_dir)
-
- def _create_html_baseline_files(self, baseline_fullpath):
- """Create baseline files (old, new and diff) in html directory.
-
- The files are used to compare the rebaselining results.
-
- Args:
- baseline_fullpath: full path of the expected baseline file.
- """
-
- if not baseline_fullpath or not os.path.exists(baseline_fullpath):
- return
-
- # Copy the new baseline to html directory for result comparison.
- baseline_filename = os.path.basename(baseline_fullpath)
- new_file = get_result_file_fullpath(self._options.html_directory,
- baseline_filename, self._platform,
- 'new')
- shutil.copyfile(baseline_fullpath, new_file)
- logging.info(' Html: copied new baseline file from "%s" to "%s".',
- baseline_fullpath, new_file)
-
- # Get the old baseline from SVN and save to the html directory.
- output = run_shell(['svn', 'cat', '-r', 'BASE', baseline_fullpath])
- if (not output) or (output.upper().rstrip().endswith(
- 'NO SUCH FILE OR DIRECTORY')):
- logging.info(' No base file: "%s"', baseline_fullpath)
- return
- base_file = get_result_file_fullpath(self._options.html_directory,
- baseline_filename, self._platform,
- 'old')
- f = open(base_file, 'wb')
- f.write(output)
- f.close()
- logging.info(' Html: created old baseline file: "%s".',
- base_file)
-
- # Get the diff between old and new baselines and save to the html dir.
- if baseline_filename.upper().endswith('.TXT'):
- # If the user specified a custom diff command in their svn config
- # file, then it'll be used when we do svn diff, which we don't want
- # to happen since we want the unified diff. Using --diff-cmd=diff
- # doesn't always work, since they can have another diff executable
- # in their path that gives different line endings. So we use a
- # bogus temp directory as the config directory, which gets
- # around these problems.
- if sys.platform.startswith("win"):
- parent_dir = tempfile.gettempdir()
- else:
- parent_dir = sys.path[0] # tempdir is not secure.
- bogus_dir = os.path.join(parent_dir, "temp_svn_config")
- logging.debug(' Html: temp config dir: "%s".', bogus_dir)
- if not os.path.exists(bogus_dir):
- os.mkdir(bogus_dir)
- delete_bogus_dir = True
- else:
- delete_bogus_dir = False
-
- output = run_shell(["svn", "diff", "--config-dir", bogus_dir,
- baseline_fullpath])
- if output:
- diff_file = get_result_file_fullpath(
- self._options.html_directory, baseline_filename,
- self._platform, 'diff')
- f = open(diff_file, 'wb')
- f.write(output)
- f.close()
- logging.info(' Html: created baseline diff file: "%s".',
- diff_file)
-
- if delete_bogus_dir:
- shutil.rmtree(bogus_dir, True)
- logging.debug(' Html: removed temp config dir: "%s".',
- bogus_dir)
-
-
-class HtmlGenerator(object):
- """Class to generate rebaselining result comparison html."""
-
- HTML_REBASELINE = ('<html>'
- '<head>'
- '<style>'
- 'body {font-family: sans-serif;}'
- '.mainTable {background: #666666;}'
- '.mainTable td , .mainTable th {background: white;}'
- '.detail {margin-left: 10px; margin-top: 3px;}'
- '</style>'
- '<title>Rebaselining Result Comparison (%(time)s)'
- '</title>'
- '</head>'
- '<body>'
- '<h2>Rebaselining Result Comparison (%(time)s)</h2>'
- '%(body)s'
- '</body>'
- '</html>')
- HTML_NO_REBASELINING_TESTS = (
- '<p>No tests found that need rebaselining.</p>')
- HTML_TABLE_TEST = ('<table class="mainTable" cellspacing=1 cellpadding=5>'
- '%s</table><br>')
- HTML_TR_TEST = ('<tr>'
- '<th style="background-color: #CDECDE; border-bottom: '
- '1px solid black; font-size: 18pt; font-weight: bold" '
- 'colspan="5">'
- '<a href="%s">%s</a>'
- '</th>'
- '</tr>')
- HTML_TEST_DETAIL = ('<div class="detail">'
- '<tr>'
- '<th width="100">Baseline</th>'
- '<th width="100">Platform</th>'
- '<th width="200">Old</th>'
- '<th width="200">New</th>'
- '<th width="150">Difference</th>'
- '</tr>'
- '%s'
- '</div>')
- HTML_TD_NOLINK = '<td align=center><a>%s</a></td>'
- HTML_TD_LINK = '<td align=center><a href="%(uri)s">%(name)s</a></td>'
- HTML_TD_LINK_IMG = ('<td><a href="%(uri)s">'
- '<img style="width: 200" src="%(uri)s" /></a></td>')
- HTML_TR = '<tr>%s</tr>'
-
- def __init__(self, options, platforms, rebaselining_tests):
- self._html_directory = options.html_directory
- self._platforms = platforms
- self._rebaselining_tests = rebaselining_tests
- self._html_file = os.path.join(options.html_directory,
- 'rebaseline.html')
-
- def generate_html(self):
- """Generate html file for rebaselining result comparison."""
-
- logging.info('Generating html file')
-
- html_body = ''
- if not self._rebaselining_tests:
- html_body += self.HTML_NO_REBASELINING_TESTS
- else:
- tests = list(self._rebaselining_tests)
- tests.sort()
-
- test_no = 1
- for test in tests:
- logging.info('Test %d: %s', test_no, test)
- html_body += self._generate_html_for_one_test(test)
-
- html = self.HTML_REBASELINE % ({'time': time.asctime(),
- 'body': html_body})
- logging.debug(html)
-
- f = open(self._html_file, 'w')
- f.write(html)
- f.close()
-
- logging.info('Baseline comparison html generated at "%s"',
- self._html_file)
-
- def show_html(self):
- """Launch the rebaselining html in brwoser."""
-
- logging.info('Launching html: "%s"', self._html_file)
-
- html_uri = path_utils.filename_to_uri(self._html_file)
- webbrowser.open(html_uri, 1)
-
- logging.info('Html launched.')
-
- def _generate_baseline_links(self, test_basename, suffix, platform):
- """Generate links for baseline results (old, new and diff).
-
- Args:
- test_basename: base filename of the test
- suffix: baseline file suffixes: '.txt', '.png'
- platform: win, linux or mac
-
- Returns:
- html links for showing baseline results (old, new and diff)
- """
-
- baseline_filename = '%s-expected%s' % (test_basename, suffix)
- logging.debug(' baseline filename: "%s"', baseline_filename)
-
- new_file = get_result_file_fullpath(self._html_directory,
- baseline_filename, platform, 'new')
- logging.info(' New baseline file: "%s"', new_file)
- if not os.path.exists(new_file):
- logging.info(' No new baseline file: "%s"', new_file)
- return ''
-
- old_file = get_result_file_fullpath(self._html_directory,
- baseline_filename, platform, 'old')
- logging.info(' Old baseline file: "%s"', old_file)
- if suffix == '.png':
- html_td_link = self.HTML_TD_LINK_IMG
- else:
- html_td_link = self.HTML_TD_LINK
-
- links = ''
- if os.path.exists(old_file):
- links += html_td_link % {
- 'uri': path_utils.filename_to_uri(old_file),
- 'name': baseline_filename}
- else:
- logging.info(' No old baseline file: "%s"', old_file)
- links += self.HTML_TD_NOLINK % ''
-
- links += html_td_link % {'uri': path_utils.filename_to_uri(new_file),
- 'name': baseline_filename}
-
- diff_file = get_result_file_fullpath(self._html_directory,
- baseline_filename, platform,
- 'diff')
- logging.info(' Baseline diff file: "%s"', diff_file)
- if os.path.exists(diff_file):
- links += html_td_link % {'uri': path_utils.filename_to_uri(
- diff_file), 'name': 'Diff'}
- else:
- logging.info(' No baseline diff file: "%s"', diff_file)
- links += self.HTML_TD_NOLINK % ''
-
- return links
-
- def _generate_html_for_one_test(self, test):
- """Generate html for one rebaselining test.
-
- Args:
- test: layout test name
-
- Returns:
- html that compares baseline results for the test.
- """
-
- test_basename = os.path.basename(os.path.splitext(test)[0])
- logging.info(' basename: "%s"', test_basename)
- rows = []
- for suffix in BASELINE_SUFFIXES:
- if suffix == '.checksum':
- continue
-
- logging.info(' Checking %s files', suffix)
- for platform in self._platforms:
- links = self._generate_baseline_links(test_basename, suffix,
- platform)
- if links:
- row = self.HTML_TD_NOLINK % self._get_baseline_result_type(
- suffix)
- row += self.HTML_TD_NOLINK % platform
- row += links
- logging.debug(' html row: %s', row)
-
- rows.append(self.HTML_TR % row)
-
- if rows:
- test_path = os.path.join(path_utils.layout_tests_dir(), test)
- html = self.HTML_TR_TEST % (path_utils.filename_to_uri(test_path),
- test)
- html += self.HTML_TEST_DETAIL % ' '.join(rows)
-
- logging.debug(' html for test: %s', html)
- return self.HTML_TABLE_TEST % html
-
- return ''
-
- def _get_baseline_result_type(self, suffix):
- """Name of the baseline result type."""
-
- if suffix == '.png':
- return 'Pixel'
- elif suffix == '.txt':
- return 'Render Tree'
- else:
- return 'Other'
-
-
-def main():
- """Main function to produce new baselines."""
-
- option_parser = optparse.OptionParser()
- option_parser.add_option('-v', '--verbose',
- action='store_true',
- default=False,
- help='include debug-level logging.')
-
- option_parser.add_option('-p', '--platforms',
- default='mac,win,win-xp,win-vista,linux',
- help=('Comma delimited list of platforms '
- 'that need rebaselining.'))
-
- option_parser.add_option('-u', '--archive_url',
- default=('http://build.chromium.org/buildbot/'
- 'layout_test_results'),
- help=('Url to find the layout test result archive'
- ' file.'))
-
- option_parser.add_option('-w', '--webkit_canary',
- action='store_true',
- default=False,
- help=('If True, pull baselines from webkit.org '
- 'canary bot.'))
-
- option_parser.add_option('-b', '--backup',
- action='store_true',
- default=False,
- help=('Whether or not to backup the original test'
- ' expectations file after rebaseline.'))
-
- option_parser.add_option('-d', '--html_directory',
- default='',
- help=('The directory that stores the results for'
- ' rebaselining comparison.'))
-
- options = option_parser.parse_args()[0]
-
- # Set up our logging format.
- log_level = logging.INFO
- if options.verbose:
- log_level = logging.DEBUG
- logging.basicConfig(level=log_level,
- format=('%(asctime)s %(filename)s:%(lineno)-3d '
- '%(levelname)s %(message)s'),
- datefmt='%y%m%d %H:%M:%S')
-
- # Verify 'platforms' option is valid
- if not options.platforms:
- logging.error('Invalid "platforms" option. --platforms must be '
- 'specified in order to rebaseline.')
- sys.exit(1)
- platforms = [p.strip().lower() for p in options.platforms.split(',')]
- for platform in platforms:
- if not platform in REBASELINE_PLATFORM_ORDER:
- logging.error('Invalid platform: "%s"' % (platform))
- sys.exit(1)
-
- # Adjust the platform order so rebaseline tool is running at the order of
- # 'mac', 'win' and 'linux'. This is in same order with layout test baseline
- # search paths. It simplifies how the rebaseline tool detects duplicate
- # baselines. Check _IsDupBaseline method for details.
- rebaseline_platforms = []
- for platform in REBASELINE_PLATFORM_ORDER:
- if platform in platforms:
- rebaseline_platforms.append(platform)
-
- options.html_directory = setup_html_directory(options.html_directory)
-
- rebaselining_tests = set()
- backup = options.backup
- for platform in rebaseline_platforms:
- rebaseliner = Rebaseliner(platform, options)
-
- logging.info('')
- log_dashed_string('Rebaseline started', platform)
- if rebaseliner.run(backup):
- # Only need to backup one original copy of test expectation file.
- backup = False
- log_dashed_string('Rebaseline done', platform)
- else:
- log_dashed_string('Rebaseline failed', platform, logging.ERROR)
-
- rebaselining_tests |= set(rebaseliner.get_rebaselining_tests())
-
- logging.info('')
- log_dashed_string('Rebaselining result comparison started', None)
- html_generator = HtmlGenerator(options,
- rebaseline_platforms,
- rebaselining_tests)
- html_generator.generate_html()
- html_generator.show_html()
- log_dashed_string('Rebaselining result comparison done', None)
-
- sys.exit(0)
-
-if '__main__' == __name__:
- main()
diff --git a/webkit/tools/layout_tests/webkitpy/layout_tests/run_chromium_webkit_tests.py b/webkit/tools/layout_tests/webkitpy/layout_tests/run_chromium_webkit_tests.py
deleted file mode 100755
index e42bd8e..0000000
--- a/webkit/tools/layout_tests/webkitpy/layout_tests/run_chromium_webkit_tests.py
+++ /dev/null
@@ -1,1679 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Run layout tests using the test_shell.
-
-This is a port of the existing webkit test script run-webkit-tests.
-
-The TestRunner class runs a series of tests (TestType interface) against a set
-of test files. If a test file fails a TestType, it returns a list TestFailure
-objects to the TestRunner. The TestRunner then aggregates the TestFailures to
-create a final report.
-
-This script reads several files, if they exist in the test_lists subdirectory
-next to this script itself. Each should contain a list of paths to individual
-tests or entire subdirectories of tests, relative to the outermost test
-directory. Entire lines starting with '//' (comments) will be ignored.
-
-For details of the files' contents and purposes, see test_lists/README.
-"""
-
-import errno
-import glob
-import logging
-import math
-import optparse
-import os
-import Queue
-import random
-import re
-import shutil
-import subprocess
-import sys
-import time
-import traceback
-
-from layout_package import apache_http_server
-from layout_package import test_expectations
-from layout_package import http_server
-from layout_package import json_layout_results_generator
-from layout_package import metered_stream
-from layout_package import path_utils
-from layout_package import platform_utils
-from layout_package import test_failures
-from layout_package import test_shell_thread
-from layout_package import test_files
-from layout_package import websocket_server
-from test_types import fuzzy_image_diff
-from test_types import image_diff
-from test_types import test_type_base
-from test_types import text_diff
-
-sys.path.append(path_utils.path_from_base('third_party'))
-import simplejson
-
-# Indicates that we want detailed progress updates in the output (prints
-# directory-by-directory feedback).
-LOG_DETAILED_PROGRESS = 'detailed-progress'
-
-# Log any unexpected results while running (instead of just at the end).
-LOG_UNEXPECTED = 'unexpected'
-
-# Builder base URL where we have the archived test results.
-BUILDER_BASE_URL = "http://build.chromium.org/buildbot/layout_test_results/"
-
-TestExpectationsFile = test_expectations.TestExpectationsFile
-
-
-class TestInfo:
- """Groups information about a test for easy passing of data."""
-
- def __init__(self, filename, timeout):
- """Generates the URI and stores the filename and timeout for this test.
- Args:
- filename: Full path to the test.
- timeout: Timeout for running the test in TestShell.
- """
- self.filename = filename
- self.uri = path_utils.filename_to_uri(filename)
- self.timeout = timeout
- expected_hash_file = path_utils.expected_filename(filename,
- '.checksum')
- try:
- self.image_hash = open(expected_hash_file, "r").read()
- except IOError, e:
- if errno.ENOENT != e.errno:
- raise
- self.image_hash = None
-
-
-class ResultSummary(object):
- """A class for partitioning the test results we get into buckets.
-
- This class is basically a glorified struct and it's private to this file
- so we don't bother with any information hiding."""
-
- def __init__(self, expectations, test_files):
- self.total = len(test_files)
- self.remaining = self.total
- self.expectations = expectations
- self.expected = 0
- self.unexpected = 0
- self.tests_by_expectation = {}
- self.tests_by_timeline = {}
- self.results = {}
- self.unexpected_results = {}
- self.failures = {}
- self.tests_by_expectation[test_expectations.SKIP] = set()
- for expectation in TestExpectationsFile.EXPECTATIONS.values():
- self.tests_by_expectation[expectation] = set()
- for timeline in TestExpectationsFile.TIMELINES.values():
- self.tests_by_timeline[timeline] = (
- expectations.get_tests_with_timeline(timeline))
-
- def add(self, test, failures, result, expected):
- """Add a result into the appropriate bin.
-
- Args:
- test: test file name
- failures: list of failure objects from test execution
- result: result of test (PASS, IMAGE, etc.).
- expected: whether the result was what we expected it to be.
- """
-
- self.tests_by_expectation[result].add(test)
- self.results[test] = result
- self.remaining -= 1
- if len(failures):
- self.failures[test] = failures
- if expected:
- self.expected += 1
- else:
- self.unexpected_results[test] = result
- self.unexpected += 1
-
-
-class TestRunner:
- """A class for managing running a series of tests on a series of layout
- test files."""
-
- HTTP_SUBDIR = os.sep.join(['', 'http', ''])
- WEBSOCKET_SUBDIR = os.sep.join(['', 'websocket', ''])
-
- # The per-test timeout in milliseconds, if no --time-out-ms option was
- # given to run_webkit_tests. This should correspond to the default timeout
- # in test_shell.exe.
- DEFAULT_TEST_TIMEOUT_MS = 6 * 1000
-
- NUM_RETRY_ON_UNEXPECTED_FAILURE = 1
-
- def __init__(self, options, meter):
- """Initialize test runner data structures.
-
- Args:
- options: a dictionary of command line options
- meter: a MeteredStream object to record updates to.
- """
- self._options = options
- self._meter = meter
-
- if options.use_apache:
- self._http_server = apache_http_server.LayoutTestApacheHttpd(
- options.results_directory)
- else:
- self._http_server = http_server.Lighttpd(options.results_directory)
-
- self._websocket_server = websocket_server.PyWebSocket(
- options.results_directory)
- # disable wss server. need to install pyOpenSSL on buildbots.
- # self._websocket_secure_server = websocket_server.PyWebSocket(
- # options.results_directory, use_tls=True, port=9323)
-
- # a list of TestType objects
- self._test_types = []
-
- # a set of test files, and the same tests as a list
- self._test_files = set()
- self._test_files_list = None
- self._file_dir = path_utils.path_from_base('webkit', 'tools',
- 'layout_tests')
- self._result_queue = Queue.Queue()
-
- # These are used for --log detailed-progress to track status by
- # directory.
- self._current_dir = None
- self._current_progress_str = ""
- self._current_test_number = 0
-
- def __del__(self):
- logging.debug("flushing stdout")
- sys.stdout.flush()
- logging.debug("flushing stderr")
- sys.stderr.flush()
- logging.debug("stopping http server")
- # Stop the http server.
- self._http_server.stop()
- # Stop the Web Socket / Web Socket Secure servers.
- self._websocket_server.stop()
- # self._websocket_secure_server.Stop()
-
- def gather_file_paths(self, paths):
- """Find all the files to test.
-
- Args:
- paths: a list of globs to use instead of the defaults."""
- self._test_files = test_files.gather_test_files(paths)
-
- def parse_expectations(self, platform, is_debug_mode):
- """Parse the expectations from the test_list files and return a data
- structure holding them. Throws an error if the test_list files have
- invalid syntax."""
- if self._options.lint_test_files:
- test_files = None
- else:
- test_files = self._test_files
-
- try:
- self._expectations = test_expectations.TestExpectations(test_files,
- self._file_dir, platform, is_debug_mode,
- self._options.lint_test_files)
- return self._expectations
- except Exception, err:
- if self._options.lint_test_files:
- print str(err)
- else:
- raise err
-
- def prepare_lists_and_print_output(self, write):
- """Create appropriate subsets of test lists and returns a
- ResultSummary object. Also prints expected test counts.
-
- Args:
- write: A callback to write info to (e.g., a LoggingWriter) or
- sys.stdout.write.
- """
-
- # Remove skipped - both fixable and ignored - files from the
- # top-level list of files to test.
- num_all_test_files = len(self._test_files)
- write("Found: %d tests" % (len(self._test_files)))
- skipped = set()
- if num_all_test_files > 1 and not self._options.force:
- skipped = self._expectations.get_tests_with_result_type(
- test_expectations.SKIP)
- self._test_files -= skipped
-
- # Create a sorted list of test files so the subset chunk,
- # if used, contains alphabetically consecutive tests.
- self._test_files_list = list(self._test_files)
- if self._options.randomize_order:
- random.shuffle(self._test_files_list)
- else:
- self._test_files_list.sort()
-
- # If the user specifies they just want to run a subset of the tests,
- # just grab a subset of the non-skipped tests.
- if self._options.run_chunk or self._options.run_part:
- chunk_value = self._options.run_chunk or self._options.run_part
- test_files = self._test_files_list
- try:
- (chunk_num, chunk_len) = chunk_value.split(":")
- chunk_num = int(chunk_num)
- assert(chunk_num >= 0)
- test_size = int(chunk_len)
- assert(test_size > 0)
- except:
- logging.critical("invalid chunk '%s'" % chunk_value)
- sys.exit(1)
-
- # Get the number of tests
- num_tests = len(test_files)
-
- # Get the start offset of the slice.
- if self._options.run_chunk:
- chunk_len = test_size
- # In this case chunk_num can be really large. We need
- # to make the slave fit in the current number of tests.
- slice_start = (chunk_num * chunk_len) % num_tests
- else:
- # Validate the data.
- assert(test_size <= num_tests)
- assert(chunk_num <= test_size)
-
- # To count the chunk_len, and make sure we don't skip
- # some tests, we round to the next value that fits exactly
- # all the parts.
- rounded_tests = num_tests
- if rounded_tests % test_size != 0:
- rounded_tests = (num_tests + test_size -
- (num_tests % test_size))
-
- chunk_len = rounded_tests / test_size
- slice_start = chunk_len * (chunk_num - 1)
- # It does not mind if we go over test_size.
-
- # Get the end offset of the slice.
- slice_end = min(num_tests, slice_start + chunk_len)
-
- files = test_files[slice_start:slice_end]
-
- tests_run_msg = 'Running: %d tests (chunk slice [%d:%d] of %d)' % (
- (slice_end - slice_start), slice_start, slice_end, num_tests)
- write(tests_run_msg)
-
- # If we reached the end and we don't have enough tests, we run some
- # from the beginning.
- if (self._options.run_chunk and
- (slice_end - slice_start < chunk_len)):
- extra = 1 + chunk_len - (slice_end - slice_start)
- extra_msg = (' last chunk is partial, appending [0:%d]' %
- extra)
- write(extra_msg)
- tests_run_msg += "\n" + extra_msg
- files.extend(test_files[0:extra])
- tests_run_filename = os.path.join(self._options.results_directory,
- "tests_run.txt")
- tests_run_file = open(tests_run_filename, "w")
- tests_run_file.write(tests_run_msg + "\n")
- tests_run_file.close()
-
- len_skip_chunk = int(len(files) * len(skipped) /
- float(len(self._test_files)))
- skip_chunk_list = list(skipped)[0:len_skip_chunk]
- skip_chunk = set(skip_chunk_list)
-
- # Update expectations so that the stats are calculated correctly.
- # We need to pass a list that includes the right # of skipped files
- # to ParseExpectations so that ResultSummary() will get the correct
- # stats. So, we add in the subset of skipped files, and then
- # subtract them back out.
- self._test_files_list = files + skip_chunk_list
- self._test_files = set(self._test_files_list)
-
- self._expectations = self.parse_expectations(
- path_utils.platform_name(), self._options.target == 'Debug')
-
- self._test_files = set(files)
- self._test_files_list = files
- else:
- skip_chunk = skipped
-
- result_summary = ResultSummary(self._expectations,
- self._test_files | skip_chunk)
- self._print_expected_results_of_type(write, result_summary,
- test_expectations.PASS, "passes")
- self._print_expected_results_of_type(write, result_summary,
- test_expectations.FAIL, "failures")
- self._print_expected_results_of_type(write, result_summary,
- test_expectations.FLAKY, "flaky")
- self._print_expected_results_of_type(write, result_summary,
- test_expectations.SKIP, "skipped")
-
-
- if self._options.force:
- write('Running all tests, including skips (--force)')
- else:
- # Note that we don't actually run the skipped tests (they were
- # subtracted out of self._test_files, above), but we stub out the
- # results here so the statistics can remain accurate.
- for test in skip_chunk:
- result_summary.add(test, [], test_expectations.SKIP,
- expected=True)
- write("")
-
- return result_summary
-
- def add_test_type(self, test_type):
- """Add a TestType to the TestRunner."""
- self._test_types.append(test_type)
-
- def _get_dir_for_test_file(self, test_file):
- """Returns the highest-level directory by which to shard the given
- test file."""
- index = test_file.rfind(os.sep + 'LayoutTests' + os.sep)
-
- test_file = test_file[index + len('LayoutTests/'):]
- test_file_parts = test_file.split(os.sep, 1)
- directory = test_file_parts[0]
- test_file = test_file_parts[1]
-
- # The http tests are very stable on mac/linux.
- # TODO(ojan): Make the http server on Windows be apache so we can
- # turn shard the http tests there as well. Switching to apache is
- # what made them stable on linux/mac.
- return_value = directory
- while ((directory != 'http' or sys.platform in ('darwin', 'linux2'))
- and test_file.find(os.sep) >= 0):
- test_file_parts = test_file.split(os.sep, 1)
- directory = test_file_parts[0]
- return_value = os.path.join(return_value, directory)
- test_file = test_file_parts[1]
-
- return return_value
-
- def _get_test_info_for_file(self, test_file):
- """Returns the appropriate TestInfo object for the file. Mostly this
- is used for looking up the timeout value (in ms) to use for the given
- test."""
- if self._expectations.has_modifier(test_file, test_expectations.SLOW):
- return TestInfo(test_file, self._options.slow_time_out_ms)
- return TestInfo(test_file, self._options.time_out_ms)
-
- def _get_test_file_queue(self, test_files):
- """Create the thread safe queue of lists of (test filenames, test URIs)
- tuples. Each TestShellThread pulls a list from this queue and runs
- those tests in order before grabbing the next available list.
-
- Shard the lists by directory. This helps ensure that tests that depend
- on each other (aka bad tests!) continue to run together as most
- cross-tests dependencies tend to occur within the same directory.
-
- Return:
- The Queue of lists of TestInfo objects.
- """
-
- if (self._options.experimental_fully_parallel or
- self._is_single_threaded()):
- filename_queue = Queue.Queue()
- for test_file in test_files:
- filename_queue.put(
- ('.', [self._get_test_info_for_file(test_file)]))
- return filename_queue
-
- tests_by_dir = {}
- for test_file in test_files:
- directory = self._get_dir_for_test_file(test_file)
- tests_by_dir.setdefault(directory, [])
- tests_by_dir[directory].append(
- self._get_test_info_for_file(test_file))
-
- # Sort by the number of tests in the dir so that the ones with the
- # most tests get run first in order to maximize parallelization.
- # Number of tests is a good enough, but not perfect, approximation
- # of how long that set of tests will take to run. We can't just use
- # a PriorityQueue until we move # to Python 2.6.
- test_lists = []
- http_tests = None
- for directory in tests_by_dir:
- test_list = tests_by_dir[directory]
- # Keep the tests in alphabetical order.
- # TODO: Remove once tests are fixed so they can be run in any
- # order.
- test_list.reverse()
- test_list_tuple = (directory, test_list)
- if directory == 'LayoutTests' + os.sep + 'http':
- http_tests = test_list_tuple
- else:
- test_lists.append(test_list_tuple)
- test_lists.sort(lambda a, b: cmp(len(b[1]), len(a[1])))
-
- # Put the http tests first. There are only a couple hundred of them,
- # but each http test takes a very long time to run, so sorting by the
- # number of tests doesn't accurately capture how long they take to run.
- if http_tests:
- test_lists.insert(0, http_tests)
-
- filename_queue = Queue.Queue()
- for item in test_lists:
- filename_queue.put(item)
- return filename_queue
-
- def _get_test_shell_args(self, index):
- """Returns the tuple of arguments for tests and for test_shell."""
- shell_args = []
- test_args = test_type_base.TestArguments()
- if not self._options.no_pixel_tests:
- png_path = os.path.join(self._options.results_directory,
- "png_result%s.png" % index)
- shell_args.append("--pixel-tests=" + png_path)
- test_args.png_path = png_path
-
- test_args.new_baseline = self._options.new_baseline
-
- test_args.show_sources = self._options.sources
-
- if self._options.startup_dialog:
- shell_args.append('--testshell-startup-dialog')
-
- if self._options.gp_fault_error_box:
- shell_args.append('--gp-fault-error-box')
-
- return (test_args, shell_args)
-
- def _contains_tests(self, subdir):
- for test_file in self._test_files_list:
- if test_file.find(subdir) >= 0:
- return True
- return False
-
- def _instantiate_test_shell_threads(self, test_shell_binary, test_files,
- result_summary):
- """Instantitates and starts the TestShellThread(s).
-
- Return:
- The list of threads.
- """
- test_shell_command = [test_shell_binary]
-
- if self._options.wrapper:
- # This split() isn't really what we want -- it incorrectly will
- # split quoted strings within the wrapper argument -- but in
- # practice it shouldn't come up and the --help output warns
- # about it anyway.
- test_shell_command = (self._options.wrapper.split() +
- test_shell_command)
-
- filename_queue = self._get_test_file_queue(test_files)
-
- # Instantiate TestShellThreads and start them.
- threads = []
- for i in xrange(int(self._options.num_test_shells)):
- # Create separate TestTypes instances for each thread.
- test_types = []
- for t in self._test_types:
- test_types.append(t(self._options.platform,
- self._options.results_directory))
-
- test_args, shell_args = self._get_test_shell_args(i)
- thread = test_shell_thread.TestShellThread(filename_queue,
- self._result_queue,
- test_shell_command,
- test_types,
- test_args,
- shell_args,
- self._options)
- if self._is_single_threaded():
- thread.run_in_main_thread(self, result_summary)
- else:
- thread.start()
- threads.append(thread)
-
- return threads
-
- def _stop_layout_test_helper(self, proc):
- """Stop the layout test helper and closes it down."""
- if proc:
- logging.debug("Stopping layout test helper")
- proc.stdin.write("x\n")
- proc.stdin.close()
- proc.wait()
-
- def _is_single_threaded(self):
- """Returns whether we should run all the tests in the main thread."""
- return int(self._options.num_test_shells) == 1
-
- def _run_tests(self, test_shell_binary, file_list, result_summary):
- """Runs the tests in the file_list.
-
- Return: A tuple (failures, thread_timings, test_timings,
- individual_test_timings)
- failures is a map from test to list of failure types
- thread_timings is a list of dicts with the total runtime
- of each thread with 'name', 'num_tests', 'total_time' properties
- test_timings is a list of timings for each sharded subdirectory
- of the form [time, directory_name, num_tests]
- individual_test_timings is a list of run times for each test
- in the form {filename:filename, test_run_time:test_run_time}
- result_summary: summary object to populate with the results
- """
- threads = self._instantiate_test_shell_threads(test_shell_binary,
- file_list,
- result_summary)
-
- # Wait for the threads to finish and collect test failures.
- failures = {}
- test_timings = {}
- individual_test_timings = []
- thread_timings = []
- try:
- for thread in threads:
- while thread.isAlive():
- # Let it timeout occasionally so it can notice a
- # KeyboardInterrupt. Actually, the timeout doesn't
- # really matter: apparently it suffices to not use
- # an indefinite blocking join for it to
- # be interruptible by KeyboardInterrupt.
- thread.join(0.1)
- self.update_summary(result_summary)
- thread_timings.append({'name': thread.getName(),
- 'num_tests': thread.get_num_tests(),
- 'total_time': thread.get_total_time()})
- test_timings.update(thread.get_directory_timing_stats())
- individual_test_timings.extend(
- thread.get_individual_test_stats())
- except KeyboardInterrupt:
- for thread in threads:
- thread.cancel()
- self._stop_layout_test_helper(layout_test_helper_proc)
- raise
- for thread in threads:
- # Check whether a TestShellThread died before normal completion.
- exception_info = thread.get_exception_info()
- if exception_info is not None:
- # Re-raise the thread's exception here to make it clear that
- # testing was aborted. Otherwise, the tests that did not run
- # would be assumed to have passed.
- raise exception_info[0], exception_info[1], exception_info[2]
-
- # Make sure we pick up any remaining tests.
- self.update_summary(result_summary)
- return (thread_timings, test_timings, individual_test_timings)
-
- def run(self, result_summary):
- """Run all our tests on all our test files.
-
- For each test file, we run each test type. If there are any failures,
- we collect them for reporting.
-
- Args:
- result_summary: a summary object tracking the test results.
-
- Return:
- We return nonzero if there are regressions compared to the last run.
- """
- if not self._test_files:
- return 0
- start_time = time.time()
- test_shell_binary = path_utils.test_shell_path(self._options.target)
-
- # Start up any helper needed
- layout_test_helper_proc = None
- if not self._options.no_pixel_tests:
- helper_path = path_utils.layout_test_helper_path(
- self._options.target)
- if len(helper_path):
- logging.debug("Starting layout helper %s" % helper_path)
- layout_test_helper_proc = subprocess.Popen(
- [helper_path], stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=None)
- is_ready = layout_test_helper_proc.stdout.readline()
- if not is_ready.startswith('ready'):
- logging.error("layout_test_helper failed to be ready")
-
- # Check that the system dependencies (themes, fonts, ...) are correct.
- if not self._options.nocheck_sys_deps:
- proc = subprocess.Popen([test_shell_binary,
- "--check-layout-test-sys-deps"])
- if proc.wait() != 0:
- logging.info("Aborting because system dependencies check "
- "failed.\n To override, invoke with "
- "--nocheck-sys-deps")
- sys.exit(1)
-
- if self._contains_tests(self.HTTP_SUBDIR):
- self._http_server.start()
-
- if self._contains_tests(self.WEBSOCKET_SUBDIR):
- self._websocket_server.start()
- # self._websocket_secure_server.Start()
-
- thread_timings, test_timings, individual_test_timings = (
- self._run_tests(test_shell_binary, self._test_files_list,
- result_summary))
-
- # We exclude the crashes from the list of results to retry, because
- # we want to treat even a potentially flaky crash as an error.
- failures = self._get_failures(result_summary, include_crashes=False)
- retries = 0
- retry_summary = result_summary
- while (retries < self.NUM_RETRY_ON_UNEXPECTED_FAILURE and
- len(failures)):
- logging.debug("Retrying %d unexpected failure(s)" % len(failures))
- retries += 1
- retry_summary = ResultSummary(self._expectations, failures.keys())
- self._run_tests(test_shell_binary, failures.keys(), retry_summary)
- failures = self._get_failures(retry_summary, include_crashes=True)
-
- self._stop_layout_test_helper(layout_test_helper_proc)
- end_time = time.time()
-
- write = create_logging_writer(self._options, 'timing')
- self._print_timing_statistics(write, end_time - start_time,
- thread_timings, test_timings,
- individual_test_timings,
- result_summary)
-
- self._meter.update("")
-
- if self._options.verbose:
- # We write this block to stdout for compatibility with the
- # buildbot log parser, which only looks at stdout, not stderr :(
- write = lambda s: sys.stdout.write("%s\n" % s)
- else:
- write = create_logging_writer(self._options, 'actual')
-
- self._print_result_summary(write, result_summary)
-
- sys.stdout.flush()
- sys.stderr.flush()
-
- if (LOG_DETAILED_PROGRESS in self._options.log or
- (LOG_UNEXPECTED in self._options.log and
- result_summary.total != result_summary.expected)):
- print
-
- # This summary data gets written to stdout regardless of log level
- self._print_one_line_summary(result_summary.total,
- result_summary.expected)
-
- unexpected_results = self._summarize_unexpected_results(result_summary,
- retry_summary)
- self._print_unexpected_results(unexpected_results)
-
- # Write the same data to log files.
- self._write_json_files(unexpected_results, result_summary,
- individual_test_timings)
-
- # Write the summary to disk (results.html) and maybe open the
- # test_shell to this file.
- wrote_results = self._write_results_html_file(result_summary)
- if not self._options.noshow_results and wrote_results:
- self._show_results_html_file()
-
- # Ignore flaky failures and unexpected passes so we don't turn the
- # bot red for those.
- return unexpected_results['num_regressions']
-
- def update_summary(self, result_summary):
- """Update the summary while running tests."""
- while True:
- try:
- (test, fail_list) = self._result_queue.get_nowait()
- result = test_failures.determine_result_type(fail_list)
- expected = self._expectations.matches_an_expected_result(test,
- result)
- result_summary.add(test, fail_list, result, expected)
- if (LOG_DETAILED_PROGRESS in self._options.log and
- (self._options.experimental_fully_parallel or
- self._is_single_threaded())):
- self._display_detailed_progress(result_summary)
- else:
- if not expected and LOG_UNEXPECTED in self._options.log:
- self._print_unexpected_test_result(test, result)
- self._display_one_line_progress(result_summary)
- except Queue.Empty:
- return
-
- def _display_one_line_progress(self, result_summary):
- """Displays the progress through the test run."""
- self._meter.update("Testing: %d ran as expected, %d didn't, %d left" %
- (result_summary.expected, result_summary.unexpected,
- result_summary.remaining))
-
- def _display_detailed_progress(self, result_summary):
- """Display detailed progress output where we print the directory name
- and one dot for each completed test. This is triggered by
- "--log detailed-progress"."""
- if self._current_test_number == len(self._test_files_list):
- return
-
- next_test = self._test_files_list[self._current_test_number]
- next_dir = os.path.dirname(
- path_utils.relative_test_filename(next_test))
- if self._current_progress_str == "":
- self._current_progress_str = "%s: " % (next_dir)
- self._current_dir = next_dir
-
- while next_test in result_summary.results:
- if next_dir != self._current_dir:
- self._meter.write("%s\n" % (self._current_progress_str))
- self._current_progress_str = "%s: ." % (next_dir)
- self._current_dir = next_dir
- else:
- self._current_progress_str += "."
-
- if (next_test in result_summary.unexpected_results and
- LOG_UNEXPECTED in self._options.log):
- result = result_summary.unexpected_results[next_test]
- self._meter.write("%s\n" % self._current_progress_str)
- self._print_unexpected_test_result(next_test, result)
- self._current_progress_str = "%s: " % self._current_dir
-
- self._current_test_number += 1
- if self._current_test_number == len(self._test_files_list):
- break
-
- next_test = self._test_files_list[self._current_test_number]
- next_dir = os.path.dirname(
- path_utils.relative_test_filename(next_test))
-
- if result_summary.remaining:
- remain_str = " (%d)" % (result_summary.remaining)
- self._meter.update("%s%s" %
- (self._current_progress_str, remain_str))
- else:
- self._meter.write("%s\n" % (self._current_progress_str))
-
- def _get_failures(self, result_summary, include_crashes):
- """Filters a dict of results and returns only the failures.
-
- Args:
- result_summary: the results of the test run
- include_crashes: whether crashes are included in the output.
- We use False when finding the list of failures to retry
- to see if the results were flaky. Although the crashes may also be
- flaky, we treat them as if they aren't so that they're not ignored.
- Returns:
- a dict of files -> results
- """
- failed_results = {}
- for test, result in result_summary.unexpected_results.iteritems():
- if (result == test_expectations.PASS or
- result == test_expectations.CRASH and not include_crashes):
- continue
- failed_results[test] = result
-
- return failed_results
-
- def _summarize_unexpected_results(self, result_summary, retry_summary):
- """Summarize any unexpected results as a dict.
-
- TODO(dpranke): split this data structure into a separate class?
-
- Args:
- result_summary: summary object from initial test runs
- retry_summary: summary object from final test run of retried tests
- Returns:
- A dictionary containing a summary of the unexpected results from the
- run, with the following fields:
- 'version': a version indicator (1 in this version)
- 'fixable': # of fixable tests (NOW - PASS)
- 'skipped': # of skipped tests (NOW & SKIPPED)
- 'num_regressions': # of non-flaky failures
- 'num_flaky': # of flaky failures
- 'num_passes': # of unexpected passes
- 'tests': a dict of tests -> {'expected': '...', 'actual': '...'}
- """
- results = {}
- results['version'] = 1
-
- tbe = result_summary.tests_by_expectation
- tbt = result_summary.tests_by_timeline
- results['fixable'] = len(tbt[test_expectations.NOW] -
- tbe[test_expectations.PASS])
- results['skipped'] = len(tbt[test_expectations.NOW] &
- tbe[test_expectations.SKIP])
-
- num_passes = 0
- num_flaky = 0
- num_regressions = 0
- keywords = {}
- for k, v in TestExpectationsFile.EXPECTATIONS.iteritems():
- keywords[v] = k.upper()
-
- tests = {}
- for filename, result in result_summary.unexpected_results.iteritems():
- # Note that if a test crashed in the original run, we ignore
- # whether or not it crashed when we retried it (if we retried it),
- # and always consider the result not flaky.
- test = path_utils.relative_test_filename(filename)
- expected = self._expectations.get_expectations_string(filename)
- actual = [keywords[result]]
-
- if result == test_expectations.PASS:
- num_passes += 1
- elif result == test_expectations.CRASH:
- num_regressions += 1
- else:
- if filename not in retry_summary.unexpected_results:
- actual.extend(
- self._expectations.get_expectations_string(
- filename).split(" "))
- num_flaky += 1
- else:
- retry_result = retry_summary.unexpected_results[filename]
- if result != retry_result:
- actual.append(keywords[retry_result])
- num_flaky += 1
- else:
- num_regressions += 1
-
- tests[test] = {}
- tests[test]['expected'] = expected
- tests[test]['actual'] = " ".join(actual)
-
- results['tests'] = tests
- results['num_passes'] = num_passes
- results['num_flaky'] = num_flaky
- results['num_regressions'] = num_regressions
-
- return results
-
- def _write_json_files(self, unexpected_results, result_summary,
- individual_test_timings):
- """Writes the results of the test run as JSON files into the results
- dir.
-
- There are three different files written into the results dir:
- unexpected_results.json: A short list of any unexpected results.
- This is used by the buildbots to display results.
- expectations.json: This is used by the flakiness dashboard.
- results.json: A full list of the results - used by the flakiness
- dashboard and the aggregate results dashboard.
-
- Args:
- unexpected_results: dict of unexpected results
- result_summary: full summary object
- individual_test_timings: list of test times (used by the flakiness
- dashboard).
- """
- logging.debug("Writing JSON files in %s." %
- self._options.results_directory)
- unexpected_file = open(os.path.join(self._options.results_directory,
- "unexpected_results.json"), "w")
- unexpected_file.write(simplejson.dumps(unexpected_results,
- sort_keys=True, indent=2))
- unexpected_file.close()
-
- # Write a json file of the test_expectations.txt file for the layout
- # tests dashboard.
- expectations_file = open(os.path.join(self._options.results_directory,
- "expectations.json"), "w")
- expectations_json = \
- self._expectations.get_expectations_json_for_all_platforms()
- expectations_file.write("ADD_EXPECTATIONS(" + expectations_json + ");")
- expectations_file.close()
-
- json_layout_results_generator.JSONLayoutResultsGenerator(
- self._options.builder_name, self._options.build_name,
- self._options.build_number, self._options.results_directory,
- BUILDER_BASE_URL, individual_test_timings,
- self._expectations, result_summary, self._test_files_list)
-
- logging.debug("Finished writing JSON files.")
-
- def _print_expected_results_of_type(self, write, result_summary,
- result_type, result_type_str):
- """Print the number of the tests in a given result class.
-
- Args:
- write: A callback to write info to (e.g., a LoggingWriter) or
- sys.stdout.write.
- result_summary - the object containing all the results to report on
- result_type - the particular result type to report in the summary.
- result_type_str - a string description of the result_type.
- """
- tests = self._expectations.get_tests_with_result_type(result_type)
- now = result_summary.tests_by_timeline[test_expectations.NOW]
- wontfix = result_summary.tests_by_timeline[test_expectations.WONTFIX]
- defer = result_summary.tests_by_timeline[test_expectations.DEFER]
-
- # We use a fancy format string in order to print the data out in a
- # nicely-aligned table.
- fmtstr = ("Expect: %%5d %%-8s (%%%dd now, %%%dd defer, %%%dd wontfix)"
- % (self._num_digits(now), self._num_digits(defer),
- self._num_digits(wontfix)))
- write(fmtstr % (len(tests), result_type_str, len(tests & now),
- len(tests & defer), len(tests & wontfix)))
-
- def _num_digits(self, num):
- """Returns the number of digits needed to represent the length of a
- sequence."""
- ndigits = 1
- if len(num):
- ndigits = int(math.log10(len(num))) + 1
- return ndigits
-
- def _print_timing_statistics(self, write, total_time, thread_timings,
- directory_test_timings, individual_test_timings,
- result_summary):
- """Record timing-specific information for the test run.
-
- Args:
- write: A callback to write info to (e.g., a LoggingWriter) or
- sys.stdout.write.
- total_time: total elapsed time (in seconds) for the test run
- thread_timings: wall clock time each thread ran for
- directory_test_timings: timing by directory
- individual_test_timings: timing by file
- result_summary: summary object for the test run
- """
- write("Test timing:")
- write(" %6.2f total testing time" % total_time)
- write("")
- write("Thread timing:")
- cuml_time = 0
- for t in thread_timings:
- write(" %10s: %5d tests, %6.2f secs" %
- (t['name'], t['num_tests'], t['total_time']))
- cuml_time += t['total_time']
- write(" %6.2f cumulative, %6.2f optimal" %
- (cuml_time, cuml_time / int(self._options.num_test_shells)))
- write("")
-
- self._print_aggregate_test_statistics(write, individual_test_timings)
- self._print_individual_test_times(write, individual_test_timings,
- result_summary)
- self._print_directory_timings(write, directory_test_timings)
-
- def _print_aggregate_test_statistics(self, write, individual_test_timings):
- """Prints aggregate statistics (e.g. median, mean, etc.) for all tests.
- Args:
- write: A callback to write info to (e.g., a LoggingWriter) or
- sys.stdout.write.
- individual_test_timings: List of test_shell_thread.TestStats for all
- tests.
- """
- test_types = individual_test_timings[0].time_for_diffs.keys()
- times_for_test_shell = []
- times_for_diff_processing = []
- times_per_test_type = {}
- for test_type in test_types:
- times_per_test_type[test_type] = []
-
- for test_stats in individual_test_timings:
- times_for_test_shell.append(test_stats.test_run_time)
- times_for_diff_processing.append(
- test_stats.total_time_for_all_diffs)
- time_for_diffs = test_stats.time_for_diffs
- for test_type in test_types:
- times_per_test_type[test_type].append(
- time_for_diffs[test_type])
-
- self._print_statistics_for_test_timings(write,
- "PER TEST TIME IN TESTSHELL (seconds):", times_for_test_shell)
- self._print_statistics_for_test_timings(write,
- "PER TEST DIFF PROCESSING TIMES (seconds):",
- times_for_diff_processing)
- for test_type in test_types:
- self._print_statistics_for_test_timings(write,
- "PER TEST TIMES BY TEST TYPE: %s" % test_type,
- times_per_test_type[test_type])
-
- def _print_individual_test_times(self, write, individual_test_timings,
- result_summary):
- """Prints the run times for slow, timeout and crash tests.
- Args:
- write: A callback to write info to (e.g., a LoggingWriter) or
- sys.stdout.write.
- individual_test_timings: List of test_shell_thread.TestStats for all
- tests.
- result_summary: summary object for test run
- """
- # Reverse-sort by the time spent in test_shell.
- individual_test_timings.sort(lambda a, b:
- cmp(b.test_run_time, a.test_run_time))
-
- num_printed = 0
- slow_tests = []
- timeout_or_crash_tests = []
- unexpected_slow_tests = []
- for test_tuple in individual_test_timings:
- filename = test_tuple.filename
- is_timeout_crash_or_slow = False
- if self._expectations.has_modifier(filename,
- test_expectations.SLOW):
- is_timeout_crash_or_slow = True
- slow_tests.append(test_tuple)
-
- if filename in result_summary.failures:
- result = result_summary.results[filename]
- if (result == test_expectations.TIMEOUT or
- result == test_expectations.CRASH):
- is_timeout_crash_or_slow = True
- timeout_or_crash_tests.append(test_tuple)
-
- if (not is_timeout_crash_or_slow and
- num_printed < self._options.num_slow_tests_to_log):
- num_printed = num_printed + 1
- unexpected_slow_tests.append(test_tuple)
-
- write("")
- self._print_test_list_timing(write, "%s slowest tests that are not "
- "marked as SLOW and did not timeout/crash:" %
- self._options.num_slow_tests_to_log, unexpected_slow_tests)
- write("")
- self._print_test_list_timing(write, "Tests marked as SLOW:",
- slow_tests)
- write("")
- self._print_test_list_timing(write, "Tests that timed out or crashed:",
- timeout_or_crash_tests)
- write("")
-
- def _print_test_list_timing(self, write, title, test_list):
- """Print timing info for each test.
-
- Args:
- write: A callback to write info to (e.g., a LoggingWriter) or
- sys.stdout.write.
- title: section heading
- test_list: tests that fall in this section
- """
- write(title)
- for test_tuple in test_list:
- filename = test_tuple.filename[len(
- path_utils.layout_tests_dir()) + 1:]
- filename = filename.replace('\\', '/')
- test_run_time = round(test_tuple.test_run_time, 1)
- write(" %s took %s seconds" % (filename, test_run_time))
-
- def _print_directory_timings(self, write, directory_test_timings):
- """Print timing info by directory for any directories that
- take > 10 seconds to run.
-
- Args:
- write: A callback to write info to (e.g., a LoggingWriter) or
- sys.stdout.write.
- directory_test_timing: time info for each directory
- """
- timings = []
- for directory in directory_test_timings:
- num_tests, time_for_directory = directory_test_timings[directory]
- timings.append((round(time_for_directory, 1), directory,
- num_tests))
- timings.sort()
-
- write("Time to process slowest subdirectories:")
- min_seconds_to_print = 10
- for timing in timings:
- if timing[0] > min_seconds_to_print:
- write(" %s took %s seconds to run %s tests." % (timing[1],
- timing[0], timing[2]))
- write("")
-
- def _print_statistics_for_test_timings(self, write, title, timings):
- """Prints the median, mean and standard deviation of the values in
- timings.
-
- Args:
- write: A callback to write info to (e.g., a LoggingWriter) or
- sys.stdout.write.
- title: Title for these timings.
- timings: A list of floats representing times.
- """
- write(title)
- timings.sort()
-
- num_tests = len(timings)
- percentile90 = timings[int(.9 * num_tests)]
- percentile99 = timings[int(.99 * num_tests)]
-
- if num_tests % 2 == 1:
- median = timings[((num_tests - 1) / 2) - 1]
- else:
- lower = timings[num_tests / 2 - 1]
- upper = timings[num_tests / 2]
- median = (float(lower + upper)) / 2
-
- mean = sum(timings) / num_tests
-
- for time in timings:
- sum_of_deviations = math.pow(time - mean, 2)
-
- std_deviation = math.sqrt(sum_of_deviations / num_tests)
- write(" Median: %6.3f" % median)
- write(" Mean: %6.3f" % mean)
- write(" 90th percentile: %6.3f" % percentile90)
- write(" 99th percentile: %6.3f" % percentile99)
- write(" Standard dev: %6.3f" % std_deviation)
- write("")
-
- def _print_result_summary(self, write, result_summary):
- """Print a short summary about how many tests passed.
-
- Args:
- write: A callback to write info to (e.g., a LoggingWriter) or
- sys.stdout.write.
- result_summary: information to log
- """
- failed = len(result_summary.failures)
- skipped = len(
- result_summary.tests_by_expectation[test_expectations.SKIP])
- total = result_summary.total
- passed = total - failed - skipped
- pct_passed = 0.0
- if total > 0:
- pct_passed = float(passed) * 100 / total
-
- write("")
- write("=> Results: %d/%d tests passed (%.1f%%)" %
- (passed, total, pct_passed))
- write("")
- self._print_result_summary_entry(write, result_summary,
- test_expectations.NOW, "Tests to be fixed for the current release")
-
- write("")
- self._print_result_summary_entry(write, result_summary,
- test_expectations.DEFER,
- "Tests we'll fix in the future if they fail (DEFER)")
-
- write("")
- self._print_result_summary_entry(write, result_summary,
- test_expectations.WONTFIX,
- "Tests that will only be fixed if they crash (WONTFIX)")
-
- def _print_result_summary_entry(self, write, result_summary, timeline,
- heading):
- """Print a summary block of results for a particular timeline of test.
-
- Args:
- write: A callback to write info to (e.g., a LoggingWriter) or
- sys.stdout.write.
- result_summary: summary to print results for
- timeline: the timeline to print results for (NOT, WONTFIX, etc.)
- heading: a textual description of the timeline
- """
- total = len(result_summary.tests_by_timeline[timeline])
- not_passing = (total -
- len(result_summary.tests_by_expectation[test_expectations.PASS] &
- result_summary.tests_by_timeline[timeline]))
- write("=> %s (%d):" % (heading, not_passing))
-
- for result in TestExpectationsFile.EXPECTATION_ORDER:
- if result == test_expectations.PASS:
- continue
- results = (result_summary.tests_by_expectation[result] &
- result_summary.tests_by_timeline[timeline])
- desc = TestExpectationsFile.EXPECTATION_DESCRIPTIONS[result]
- if not_passing and len(results):
- pct = len(results) * 100.0 / not_passing
- write(" %5d %-24s (%4.1f%%)" % (len(results),
- desc[len(results) != 1], pct))
-
- def _print_one_line_summary(self, total, expected):
- """Print a one-line summary of the test run to stdout.
-
- Args:
- total: total number of tests run
- expected: number of expected results
- """
- unexpected = total - expected
- if unexpected == 0:
- print "All %d tests ran as expected." % expected
- elif expected == 1:
- print "1 test ran as expected, %d didn't:" % unexpected
- else:
- print "%d tests ran as expected, %d didn't:" % (expected,
- unexpected)
-
- def _print_unexpected_results(self, unexpected_results):
- """Prints any unexpected results in a human-readable form to stdout."""
- passes = {}
- flaky = {}
- regressions = {}
-
- if len(unexpected_results['tests']):
- print ""
-
- for test, results in unexpected_results['tests'].iteritems():
- actual = results['actual'].split(" ")
- expected = results['expected'].split(" ")
- if actual == ['PASS']:
- if 'CRASH' in expected:
- _add_to_dict_of_lists(passes,
- 'Expected to crash, but passed',
- test)
- elif 'TIMEOUT' in expected:
- _add_to_dict_of_lists(passes,
- 'Expected to timeout, but passed',
- test)
- else:
- _add_to_dict_of_lists(passes,
- 'Expected to fail, but passed',
- test)
- elif len(actual) > 1:
- # We group flaky tests by the first actual result we got.
- _add_to_dict_of_lists(flaky, actual[0], test)
- else:
- _add_to_dict_of_lists(regressions, results['actual'], test)
-
- if len(passes):
- for key, tests in passes.iteritems():
- print "%s: (%d)" % (key, len(tests))
- tests.sort()
- for test in tests:
- print " %s" % test
- print
-
- if len(flaky):
- descriptions = TestExpectationsFile.EXPECTATION_DESCRIPTIONS
- for key, tests in flaky.iteritems():
- result = TestExpectationsFile.EXPECTATIONS[key.lower()]
- print "Unexpected flakiness: %s (%d)" % (
- descriptions[result][1], len(tests))
- tests.sort()
-
- for test in tests:
- result = unexpected_results['tests'][test]
- actual = result['actual'].split(" ")
- expected = result['expected'].split(" ")
- result = TestExpectationsFile.EXPECTATIONS[key.lower()]
- new_expectations_list = list(set(actual) | set(expected))
- print " %s = %s" % (test, " ".join(new_expectations_list))
- print
-
- if len(regressions):
- descriptions = TestExpectationsFile.EXPECTATION_DESCRIPTIONS
- for key, tests in regressions.iteritems():
- result = TestExpectationsFile.EXPECTATIONS[key.lower()]
- print "Regressions: Unexpected %s : (%d)" % (
- descriptions[result][1], len(tests))
- tests.sort()
- for test in tests:
- print " %s = %s" % (test, key)
- print
-
- if len(unexpected_results['tests']) and self._options.verbose:
- print "-" * 78
-
- def _print_unexpected_test_result(self, test, result):
- """Prints one unexpected test result line."""
- desc = TestExpectationsFile.EXPECTATION_DESCRIPTIONS[result][0]
- self._meter.write(" %s -> unexpected %s\n" %
- (path_utils.relative_test_filename(test), desc))
-
- def _write_results_html_file(self, result_summary):
- """Write results.html which is a summary of tests that failed.
-
- Args:
- result_summary: a summary of the results :)
-
- Returns:
- True if any results were written (since expected failures may be
- omitted)
- """
- # test failures
- if self._options.full_results_html:
- test_files = result_summary.failures.keys()
- else:
- unexpected_failures = self._get_failures(result_summary,
- include_crashes=True)
- test_files = unexpected_failures.keys()
- if not len(test_files):
- return False
-
- out_filename = os.path.join(self._options.results_directory,
- "results.html")
- out_file = open(out_filename, 'w')
- # header
- if self._options.full_results_html:
- h2 = "Test Failures"
- else:
- h2 = "Unexpected Test Failures"
- out_file.write("<html><head><title>Layout Test Results (%(time)s)"
- "</title></head><body><h2>%(h2)s (%(time)s)</h2>\n"
- % {'h2': h2, 'time': time.asctime()})
-
- test_files.sort()
- for test_file in test_files:
- test_failures = result_summary.failures.get(test_file, [])
- out_file.write("<p><a href='%s'>%s</a><br />\n"
- % (path_utils.filename_to_uri(test_file),
- path_utils.relative_test_filename(test_file)))
- for failure in test_failures:
- out_file.write("&nbsp;&nbsp;%s<br/>"
- % failure.result_html_output(
- path_utils.relative_test_filename(test_file)))
- out_file.write("</p>\n")
-
- # footer
- out_file.write("</body></html>\n")
- return True
-
- def _show_results_html_file(self):
- """Launches the test shell open to the results.html page."""
- results_filename = os.path.join(self._options.results_directory,
- "results.html")
- subprocess.Popen([path_utils.test_shell_path(self._options.target),
- path_utils.filename_to_uri(results_filename)])
-
-
-def _add_to_dict_of_lists(dict, key, value):
- dict.setdefault(key, []).append(value)
-
-
-def read_test_files(files):
- tests = []
- for file in files:
- for line in open(file):
- line = test_expectations.strip_comments(line)
- if line:
- tests.append(line)
- return tests
-
-
-def create_logging_writer(options, log_option):
- """Returns a write() function that will write the string to logging.info()
- if comp was specified in --log or if --verbose is true. Otherwise the
- message is dropped.
-
- Args:
- options: list of command line options from optparse
- log_option: option to match in options.log in order for the messages
- to be logged (e.g., 'actual' or 'expected')
- """
- if options.verbose or log_option in options.log.split(","):
- return logging.info
- return lambda str: 1
-
-
-def main(options, args):
- """Run the tests. Will call sys.exit when complete.
-
- Args:
- options: a dictionary of command line options
- args: a list of sub directories or files to test
- """
-
- if options.sources:
- options.verbose = True
-
- # Set up our logging format.
- meter = metered_stream.MeteredStream(options.verbose, sys.stderr)
- log_fmt = '%(message)s'
- log_datefmt = '%y%m%d %H:%M:%S'
- log_level = logging.INFO
- if options.verbose:
- log_fmt = ('%(asctime)s %(filename)s:%(lineno)-4d %(levelname)s '
- '%(message)s')
- log_level = logging.DEBUG
- logging.basicConfig(level=log_level, format=log_fmt, datefmt=log_datefmt,
- stream=meter)
-
- if not options.target:
- if options.debug:
- options.target = "Debug"
- else:
- options.target = "Release"
-
- if not options.use_apache:
- options.use_apache = sys.platform in ('darwin', 'linux2')
-
- if options.results_directory.startswith("/"):
- # Assume it's an absolute path and normalize.
- options.results_directory = path_utils.get_absolute_path(
- options.results_directory)
- else:
- # If it's a relative path, make the output directory relative to
- # Debug or Release.
- basedir = path_utils.path_from_base('webkit')
- options.results_directory = path_utils.get_absolute_path(
- os.path.join(basedir, options.target, options.results_directory))
-
- if options.clobber_old_results:
- # Just clobber the actual test results directories since the other
- # files in the results directory are explicitly used for cross-run
- # tracking.
- meter.update("Clobbering old results in %s" %
- options.results_directory)
- layout_tests_dir = path_utils.layout_tests_dir()
- possible_dirs = os.listdir(layout_tests_dir)
- for dirname in possible_dirs:
- if os.path.isdir(os.path.join(layout_tests_dir, dirname)):
- shutil.rmtree(
- os.path.join(options.results_directory, dirname),
- ignore_errors=True)
-
- # Ensure platform is valid and force it to the form 'chromium-<platform>'.
- options.platform = path_utils.platform_name(options.platform)
-
- if not options.num_test_shells:
- # TODO(ojan): Investigate perf/flakiness impact of using numcores + 1.
- options.num_test_shells = platform_utils.get_num_cores()
-
- write = create_logging_writer(options, 'config')
- write("Running %s test_shells in parallel" % options.num_test_shells)
-
- if not options.time_out_ms:
- if options.target == "Debug":
- options.time_out_ms = str(2 * TestRunner.DEFAULT_TEST_TIMEOUT_MS)
- else:
- options.time_out_ms = str(TestRunner.DEFAULT_TEST_TIMEOUT_MS)
-
- options.slow_time_out_ms = str(5 * int(options.time_out_ms))
- write("Regular timeout: %s, slow test timeout: %s" %
- (options.time_out_ms, options.slow_time_out_ms))
-
- # Include all tests if none are specified.
- new_args = []
- for arg in args:
- if arg and arg != '':
- new_args.append(arg)
-
- paths = new_args
- if not paths:
- paths = []
- if options.test_list:
- paths += read_test_files(options.test_list)
-
- # Create the output directory if it doesn't already exist.
- path_utils.maybe_make_directory(options.results_directory)
- meter.update("Gathering files ...")
-
- test_runner = TestRunner(options, meter)
- test_runner.gather_file_paths(paths)
-
- if options.lint_test_files:
- # Creating the expecations for each platform/target pair does all the
- # test list parsing and ensures it's correct syntax (e.g. no dupes).
- for platform in TestExpectationsFile.PLATFORMS:
- test_runner.parse_expectations(platform, is_debug_mode=True)
- test_runner.parse_expectations(platform, is_debug_mode=False)
- print ("If there are no fail messages, errors or exceptions, then the "
- "lint succeeded.")
- sys.exit(0)
-
- try:
- test_shell_binary_path = path_utils.test_shell_path(options.target)
- except path_utils.PathNotFound:
- print "\nERROR: test_shell is not found. Be sure that you have built"
- print "it and that you are using the correct build. This script"
- print "will run the Release one by default. Use --debug to use the"
- print "Debug build.\n"
- sys.exit(1)
-
- write = create_logging_writer(options, "config")
- write("Using platform '%s'" % options.platform)
- write("Placing test results in %s" % options.results_directory)
- if options.new_baseline:
- write("Placing new baselines in %s" %
- path_utils.chromium_baseline_path(options.platform))
- write("Using %s build at %s" % (options.target, test_shell_binary_path))
- if options.no_pixel_tests:
- write("Not running pixel tests")
- write("")
-
- meter.update("Parsing expectations ...")
- test_runner.parse_expectations(options.platform, options.target == 'Debug')
-
- meter.update("Preparing tests ...")
- write = create_logging_writer(options, "expected")
- result_summary = test_runner.prepare_lists_and_print_output(write)
-
- if 'cygwin' == sys.platform:
- logging.warn("#" * 40)
- logging.warn("# UNEXPECTED PYTHON VERSION")
- logging.warn("# This script should be run using the version of python")
- logging.warn("# in third_party/python_24/")
- logging.warn("#" * 40)
- sys.exit(1)
-
- # Delete the disk cache if any to ensure a clean test run.
- cachedir = os.path.split(test_shell_binary_path)[0]
- cachedir = os.path.join(cachedir, "cache")
- if os.path.exists(cachedir):
- shutil.rmtree(cachedir)
-
- test_runner.add_test_type(text_diff.TestTextDiff)
- if not options.no_pixel_tests:
- test_runner.add_test_type(image_diff.ImageDiff)
- if options.fuzzy_pixel_tests:
- test_runner.add_test_type(fuzzy_image_diff.FuzzyImageDiff)
-
- meter.update("Starting ...")
- has_new_failures = test_runner.run(result_summary)
-
- logging.debug("Exit status: %d" % has_new_failures)
- sys.exit(has_new_failures)
-
-
-def parse_args(args=None):
- """Prove a default set of command line args.
-
- Returns a tuple of options, args from optparse"""
- option_parser = optparse.OptionParser()
- option_parser.add_option("", "--no-pixel-tests", action="store_true",
- default=False,
- help="disable pixel-to-pixel PNG comparisons")
- option_parser.add_option("", "--fuzzy-pixel-tests", action="store_true",
- default=False,
- help="Also use fuzzy matching to compare pixel "
- "test outputs.")
- option_parser.add_option("", "--results-directory",
- default="layout-test-results",
- help="Output results directory source dir,"
- " relative to Debug or Release")
- option_parser.add_option("", "--new-baseline", action="store_true",
- default=False,
- help="save all generated results as new baselines"
- " into the platform directory, overwriting "
- "whatever's already there.")
- option_parser.add_option("", "--noshow-results", action="store_true",
- default=False, help="don't launch the test_shell"
- " with results after the tests are done")
- option_parser.add_option("", "--full-results-html", action="store_true",
- default=False, help="show all failures in "
- "results.html, rather than only regressions")
- option_parser.add_option("", "--clobber-old-results", action="store_true",
- default=False, help="Clobbers test results from "
- "previous runs.")
- option_parser.add_option("", "--lint-test-files", action="store_true",
- default=False, help="Makes sure the test files "
- "parse for all configurations. Does not run any "
- "tests.")
- option_parser.add_option("", "--force", action="store_true",
- default=False,
- help="Run all tests, even those marked SKIP "
- "in the test list")
- option_parser.add_option("", "--num-test-shells",
- help="Number of testshells to run in parallel.")
- option_parser.add_option("", "--use-apache", action="store_true",
- default=False,
- help="Whether to use apache instead of lighttpd.")
- option_parser.add_option("", "--time-out-ms", default=None,
- help="Set the timeout for each test")
- option_parser.add_option("", "--run-singly", action="store_true",
- default=False,
- help="run a separate test_shell for each test")
- option_parser.add_option("", "--debug", action="store_true", default=False,
- help="use the debug binary instead of the release"
- " binary")
- option_parser.add_option("", "--num-slow-tests-to-log", default=50,
- help="Number of slow tests whose timings "
- "to print.")
- option_parser.add_option("", "--platform",
- help="Override the platform for expected results")
- option_parser.add_option("", "--target", default="",
- help="Set the build target configuration "
- "(overrides --debug)")
- option_parser.add_option("", "--log", action="store",
- default="detailed-progress,unexpected",
- help="log various types of data. The param should"
- " be a comma-separated list of values from: "
- "actual,config," + LOG_DETAILED_PROGRESS +
- ",expected,timing," + LOG_UNEXPECTED + " "
- "(defaults to " +
- "--log detailed-progress,unexpected)")
- option_parser.add_option("-v", "--verbose", action="store_true",
- default=False, help="include debug-level logging")
- option_parser.add_option("", "--sources", action="store_true",
- help="show expected result file path for each "
- "test (implies --verbose)")
- option_parser.add_option("", "--startup-dialog", action="store_true",
- default=False,
- help="create a dialog on test_shell.exe startup")
- option_parser.add_option("", "--gp-fault-error-box", action="store_true",
- default=False,
- help="enable Windows GP fault error box")
- option_parser.add_option("", "--wrapper",
- help="wrapper command to insert before "
- "invocations of test_shell; option is split "
- "on whitespace before running. (Example: "
- "--wrapper='valgrind --smc-check=all')")
- option_parser.add_option("", "--test-list", action="append",
- help="read list of tests to run from file",
- metavar="FILE")
- option_parser.add_option("", "--nocheck-sys-deps", action="store_true",
- default=False,
- help="Don't check the system dependencies "
- "(themes)")
- option_parser.add_option("", "--randomize-order", action="store_true",
- default=False,
- help=("Run tests in random order (useful for "
- "tracking down corruption)"))
- option_parser.add_option("", "--run-chunk",
- default=None,
- help=("Run a specified chunk (n:l), the "
- "nth of len l, of the layout tests"))
- option_parser.add_option("", "--run-part",
- default=None,
- help=("Run a specified part (n:m), the nth of m"
- " parts, of the layout tests"))
- option_parser.add_option("", "--batch-size",
- default=None,
- help=("Run a the tests in batches (n), after "
- "every n tests, the test shell is "
- "relaunched."))
- option_parser.add_option("", "--builder-name",
- default="DUMMY_BUILDER_NAME",
- help=("The name of the builder shown on the "
- "waterfall running this script e.g. "
- "WebKit."))
- option_parser.add_option("", "--build-name",
- default="DUMMY_BUILD_NAME",
- help=("The name of the builder used in its path, "
- "e.g. webkit-rel."))
- option_parser.add_option("", "--build-number",
- default="DUMMY_BUILD_NUMBER",
- help=("The build number of the builder running"
- "this script."))
- option_parser.add_option("", "--experimental-fully-parallel",
- action="store_true", default=False,
- help="run all tests in parallel")
- return option_parser.parse_args(args)
-
-if '__main__' == __name__:
- options, args = parse_args()
- main(options, args)
diff --git a/webkit/tools/layout_tests/webkitpy/layout_tests/test_output_formatter.py b/webkit/tools/layout_tests/webkitpy/layout_tests/test_output_formatter.py
deleted file mode 100755
index ff3d444..0000000
--- a/webkit/tools/layout_tests/webkitpy/layout_tests/test_output_formatter.py
+++ /dev/null
@@ -1,105 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""
-This is a script for generating easily-viewable comparisons of text and pixel
-diffs.
-"""
-import optparse
-
-from layout_package import test_expectations
-from layout_package import failure
-from layout_package import failure_finder
-from layout_package import failure_finder_test
-from layout_package import html_generator
-
-DEFAULT_BUILDER = "Webkit"
-
-
-def main(options, args):
-
- if options.run_tests:
- fft = failure_finder_test.FailureFinderTest()
- return fft.runTests()
-
- # TODO(gwilson): Add a check that verifies the given platform exists.
-
- finder = failure_finder.FailureFinder(options.build_number,
- options.platform_builder,
- (not options.include_expected),
- options.test_regex,
- options.output_dir,
- int(options.max_failures),
- options.verbose,
- options.builder_log,
- options.archive_log,
- options.zip_file,
- options.expectations_file)
- finder.use_local_baselines = options.local
- failure_list = finder.get_failures()
-
- if not failure_list:
- print "Did not find any failures."
- return
-
- generator = html_generator.HTMLGenerator(failure_list,
- options.output_dir,
- finder.build,
- options.platform_builder,
- (not options.include_expected))
- filename = generator.generate_html()
-
- if filename and options.verbose:
- print "File created at %s" % filename
-
-if __name__ == "__main__":
- option_parser = optparse.OptionParser()
- option_parser.add_option("-v", "--verbose", action="store_true",
- default=False,
- help="Display lots of output.")
- option_parser.add_option("-i", "--include-expected", action="store_true",
- default=False,
- help="Include expected failures in output")
- option_parser.add_option("-p", "--platform-builder",
- default=DEFAULT_BUILDER,
- help="Use the given builder")
- option_parser.add_option("-b", "--build-number",
- default=None,
- help="Use the given build number")
- option_parser.add_option("-t", "--test-regex",
- default=None,
- help="Use the given regex to filter tests")
- option_parser.add_option("-o", "--output-dir",
- default=".",
- help="Output files to given directory")
- option_parser.add_option("-m", "--max-failures",
- default=100,
- help="Limit the maximum number of failures")
- option_parser.add_option("-r", "--run-tests", action="store_true",
- default=False,
- help="Runs unit tests")
- option_parser.add_option("-u", "--builder-log",
- default=None,
- help=("Use the local builder log file "
- "instead of scraping the buildbots"))
- option_parser.add_option("-a", "--archive-log",
- default=None,
- help=("Use the local archive log file "
- "instead of scraping the buildbots"))
- option_parser.add_option("-e", "--expectations-file",
- default=None,
- help=("Use the local test expectations file "
- "instead of scraping the buildbots"))
- option_parser.add_option("-z", "--zip-file",
- default=None,
- help=("Use the local test output zip file "
- "instead of scraping the buildbots"))
- option_parser.add_option("-l", "--local", action="store_true",
- default=False,
- help=("Use local baselines instead of scraping "
- "baselines from source websites"))
-
- options, args = option_parser.parse_args()
- main(options, args)
diff --git a/webkit/tools/layout_tests/webkitpy/layout_tests/test_types/__init__.py b/webkit/tools/layout_tests/webkitpy/layout_tests/test_types/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/webkit/tools/layout_tests/webkitpy/layout_tests/test_types/__init__.py
+++ /dev/null
diff --git a/webkit/tools/layout_tests/webkitpy/layout_tests/test_types/fuzzy_image_diff.py b/webkit/tools/layout_tests/webkitpy/layout_tests/test_types/fuzzy_image_diff.py
deleted file mode 100644
index 78d5d0c..0000000
--- a/webkit/tools/layout_tests/webkitpy/layout_tests/test_types/fuzzy_image_diff.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Compares the image output of a test to the expected image output using
-fuzzy matching.
-"""
-
-import errno
-import logging
-import os
-import shutil
-import subprocess
-
-from layout_package import path_utils
-from layout_package import test_failures
-from test_types import test_type_base
-
-
-class FuzzyImageDiff(test_type_base.TestTypeBase):
-
- def compare_output(self, filename, proc, output, test_args, target):
- """Implementation of CompareOutput that checks the output image and
- checksum against the expected files from the LayoutTest directory.
- """
- failures = []
-
- # If we didn't produce a hash file, this test must be text-only.
- if test_args.hash is None:
- return failures
-
- expected_png_file = path_utils.expected_filename(filename, '.png')
-
- if test_args.show_sources:
- logging.debug('Using %s' % expected_png_file)
-
- # Also report a missing expected PNG file.
- if not os.path.isfile(expected_png_file):
- failures.append(test_failures.FailureMissingImage(self))
-
- # Run the fuzzymatcher
- r = subprocess.call([path_utils.fuzzy_match_path(),
- test_args.png_path, expected_png_file])
- if r != 0:
- failures.append(test_failures.FailureFuzzyFailure(self))
-
- return failures
diff --git a/webkit/tools/layout_tests/webkitpy/layout_tests/test_types/image_diff.py b/webkit/tools/layout_tests/webkitpy/layout_tests/test_types/image_diff.py
deleted file mode 100644
index e9dde80..0000000
--- a/webkit/tools/layout_tests/webkitpy/layout_tests/test_types/image_diff.py
+++ /dev/null
@@ -1,199 +0,0 @@
-# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Compares the image output of a test to the expected image output.
-
-Compares hashes for the generated and expected images. If the output doesn't
-match, returns FailureImageHashMismatch and outputs both hashes into the layout
-test results directory.
-"""
-
-import errno
-import logging
-import os
-import shutil
-import subprocess
-
-from layout_package import path_utils
-from layout_package import test_failures
-from test_types import test_type_base
-
-# Cache whether we have the image_diff executable available.
-_compare_available = True
-_compare_msg_printed = False
-
-
-class ImageDiff(test_type_base.TestTypeBase):
-
- def _copy_output_png(self, test_filename, source_image, extension):
- """Copies result files into the output directory with appropriate
- names.
-
- Args:
- test_filename: the test filename
- source_file: path to the image file (either actual or expected)
- extension: extension to indicate -actual.png or -expected.png
- """
- self._make_output_directory(test_filename)
- dest_image = self.output_filename(test_filename, extension)
-
- try:
- shutil.copyfile(source_image, dest_image)
- except IOError, e:
- # A missing expected PNG has already been recorded as an error.
- if errno.ENOENT != e.errno:
- raise
-
- def _save_baseline_files(self, filename, png_path, checksum):
- """Saves new baselines for the PNG and checksum.
-
- Args:
- filename: test filename
- png_path: path to the actual PNG result file
- checksum: value of the actual checksum result
- """
- png_file = open(png_path, "rb")
- png_data = png_file.read()
- png_file.close()
- self._save_baseline_data(filename, png_data, ".png")
- self._save_baseline_data(filename, checksum, ".checksum")
-
- def _create_image_diff(self, filename, target):
- """Creates the visual diff of the expected/actual PNGs.
-
- Args:
- filename: the name of the test
- target: Debug or Release
- """
- diff_filename = self.output_filename(filename,
- self.FILENAME_SUFFIX_COMPARE)
- actual_filename = self.output_filename(filename,
- self.FILENAME_SUFFIX_ACTUAL + '.png')
- expected_filename = self.output_filename(filename,
- self.FILENAME_SUFFIX_EXPECTED + '.png')
-
- global _compare_available
- cmd = ''
-
- try:
- executable = path_utils.image_diff_path(target)
- cmd = [executable, '--diff', actual_filename, expected_filename,
- diff_filename]
- except Exception, e:
- _compare_available = False
-
- result = 1
- if _compare_available:
- try:
- result = subprocess.call(cmd)
- except OSError, e:
- if e.errno == errno.ENOENT or e.errno == errno.EACCES:
- _compare_available = False
- else:
- raise e
- except ValueError:
- # work around a race condition in Python 2.4's implementation
- # of subprocess.Popen
- pass
-
- global _compare_msg_printed
-
- if not _compare_available and not _compare_msg_printed:
- _compare_msg_printed = True
- print('image_diff not found. Make sure you have a ' + target +
- ' build of the image_diff executable.')
-
- return result
-
- def compare_output(self, filename, proc, output, test_args, target):
- """Implementation of CompareOutput that checks the output image and
- checksum against the expected files from the LayoutTest directory.
- """
- failures = []
-
- # If we didn't produce a hash file, this test must be text-only.
- if test_args.hash is None:
- return failures
-
- # If we're generating a new baseline, we pass.
- if test_args.new_baseline:
- self._save_baseline_files(filename, test_args.png_path,
- test_args.hash)
- return failures
-
- # Compare hashes.
- expected_hash_file = path_utils.expected_filename(filename,
- '.checksum')
- expected_png_file = path_utils.expected_filename(filename, '.png')
-
- if test_args.show_sources:
- logging.debug('Using %s' % expected_hash_file)
- logging.debug('Using %s' % expected_png_file)
-
- try:
- expected_hash = open(expected_hash_file, "r").read()
- except IOError, e:
- if errno.ENOENT != e.errno:
- raise
- expected_hash = ''
-
-
- if not os.path.isfile(expected_png_file):
- # Report a missing expected PNG file.
- self.write_output_files(filename, '', '.checksum', test_args.hash,
- expected_hash, diff=False, wdiff=False)
- self._copy_output_png(filename, test_args.png_path, '-actual.png')
- failures.append(test_failures.FailureMissingImage(self))
- return failures
- elif test_args.hash == expected_hash:
- # Hash matched (no diff needed, okay to return).
- return failures
-
-
- self.write_output_files(filename, '', '.checksum', test_args.hash,
- expected_hash, diff=False, wdiff=False)
- self._copy_output_png(filename, test_args.png_path, '-actual.png')
- self._copy_output_png(filename, expected_png_file, '-expected.png')
-
- # Even though we only use result in one codepath below but we
- # still need to call CreateImageDiff for other codepaths.
- result = self._create_image_diff(filename, target)
- if expected_hash == '':
- failures.append(test_failures.FailureMissingImageHash(self))
- elif test_args.hash != expected_hash:
- # Hashes don't match, so see if the images match. If they do, then
- # the hash is wrong.
- if result == 0:
- failures.append(test_failures.FailureImageHashIncorrect(self))
- else:
- failures.append(test_failures.FailureImageHashMismatch(self))
-
- return failures
-
- def diff_files(self, file1, file2):
- """Diff two image files.
-
- Args:
- file1, file2: full paths of the files to compare.
-
- Returns:
- True if two files are different.
- False otherwise.
- """
-
- try:
- executable = path_utils.image_diff_path('Debug')
- except Exception, e:
- logging.warn('Failed to find image diff executable.')
- return True
-
- cmd = [executable, file1, file2]
- result = 1
- try:
- result = subprocess.call(cmd)
- except OSError, e:
- logging.warn('Failed to compare image diff: %s', e)
- return True
-
- return result == 1
diff --git a/webkit/tools/layout_tests/webkitpy/layout_tests/test_types/test_type_base.py b/webkit/tools/layout_tests/webkitpy/layout_tests/test_types/test_type_base.py
deleted file mode 100644
index 77c728f..0000000
--- a/webkit/tools/layout_tests/webkitpy/layout_tests/test_types/test_type_base.py
+++ /dev/null
@@ -1,241 +0,0 @@
-# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Defines the interface TestTypeBase which other test types inherit from.
-
-Also defines the TestArguments "struct" to pass them additional arguments.
-"""
-
-import cgi
-import difflib
-import errno
-import logging
-import os.path
-import subprocess
-
-from layout_package import path_utils
-
-
-class TestArguments(object):
- """Struct-like wrapper for additional arguments needed by
- specific tests."""
- # Whether to save new baseline results.
- new_baseline = False
-
- # Path to the actual PNG file generated by pixel tests
- png_path = None
-
- # Value of checksum generated by pixel tests.
- hash = None
-
- # Whether to use wdiff to generate by-word diffs.
- wdiff = False
-
- # Whether to report the locations of the expected result files used.
- show_sources = False
-
-# Python bug workaround. See the wdiff code in WriteOutputFiles for an
-# explanation.
-_wdiff_available = True
-
-
-class TestTypeBase(object):
-
- # Filename pieces when writing failures to the test results directory.
- FILENAME_SUFFIX_ACTUAL = "-actual"
- FILENAME_SUFFIX_EXPECTED = "-expected"
- FILENAME_SUFFIX_DIFF = "-diff"
- FILENAME_SUFFIX_WDIFF = "-wdiff.html"
- FILENAME_SUFFIX_COMPARE = "-diff.png"
-
- def __init__(self, platform, root_output_dir):
- """Initialize a TestTypeBase object.
-
- Args:
- platform: the platform (e.g., 'chromium-mac-leopard')
- identifying the platform-specific results to be used.
- root_output_dir: The unix style path to the output dir.
- """
- self._root_output_dir = root_output_dir
- self._platform = platform
-
- def _make_output_directory(self, filename):
- """Creates the output directory (if needed) for a given test
- filename."""
- output_filename = os.path.join(self._root_output_dir,
- path_utils.relative_test_filename(filename))
- path_utils.maybe_make_directory(os.path.split(output_filename)[0])
-
- def _save_baseline_data(self, filename, data, modifier):
- """Saves a new baseline file into the platform directory.
-
- The file will be named simply "<test>-expected<modifier>", suitable for
- use as the expected results in a later run.
-
- Args:
- filename: path to the test file
- data: result to be saved as the new baseline
- modifier: type of the result file, e.g. ".txt" or ".png"
- """
- relative_dir = os.path.dirname(
- path_utils.relative_test_filename(filename))
- output_dir = os.path.join(
- path_utils.chromium_baseline_path(self._platform), relative_dir)
- output_file = os.path.basename(os.path.splitext(filename)[0] +
- self.FILENAME_SUFFIX_EXPECTED + modifier)
-
- path_utils.maybe_make_directory(output_dir)
- output_path = os.path.join(output_dir, output_file)
- logging.debug('writing new baseline to "%s"' % (output_path))
- open(output_path, "wb").write(data)
-
- def output_filename(self, filename, modifier):
- """Returns a filename inside the output dir that contains modifier.
-
- For example, if filename is c:/.../fast/dom/foo.html and modifier is
- "-expected.txt", the return value is
- c:/cygwin/tmp/layout-test-results/fast/dom/foo-expected.txt
-
- Args:
- filename: absolute filename to test file
- modifier: a string to replace the extension of filename with
-
- Return:
- The absolute windows path to the output filename
- """
- output_filename = os.path.join(self._root_output_dir,
- path_utils.relative_test_filename(filename))
- return os.path.splitext(output_filename)[0] + modifier
-
- def compare_output(self, filename, proc, output, test_args, target):
- """Method that compares the output from the test with the
- expected value.
-
- This is an abstract method to be implemented by all sub classes.
-
- Args:
- filename: absolute filename to test file
- proc: a reference to the test_shell process
- output: a string containing the output of the test
- test_args: a TestArguments object holding optional additional
- arguments
- target: Debug or Release
-
- Return:
- a list of TestFailure objects, empty if the test passes
- """
- raise NotImplemented
-
- def write_output_files(self, filename, test_type, file_type, output,
- expected, diff=True, wdiff=False):
- """Writes the test output, the expected output and optionally the diff
- between the two to files in the results directory.
-
- The full output filename of the actual, for example, will be
- <filename><test_type>-actual<file_type>
- For instance,
- my_test-simp-actual.txt
-
- Args:
- filename: The test filename
- test_type: A string describing the test type, e.g. "simp"
- file_type: A string describing the test output file type, e.g. ".txt"
- output: A string containing the test output
- expected: A string containing the expected test output
- diff: if True, write a file containing the diffs too. This should be
- False for results that are not text
- wdiff: if True, write an HTML file containing word-by-word diffs
- """
- self._make_output_directory(filename)
- actual_filename = self.output_filename(filename,
- test_type + self.FILENAME_SUFFIX_ACTUAL + file_type)
- expected_filename = self.output_filename(filename,
- test_type + self.FILENAME_SUFFIX_EXPECTED + file_type)
- if output:
- open(actual_filename, "wb").write(output)
- if expected:
- open(expected_filename, "wb").write(expected)
-
- if not output or not expected:
- return
-
- if diff:
- diff = difflib.unified_diff(expected.splitlines(True),
- output.splitlines(True),
- expected_filename,
- actual_filename)
-
- diff_filename = self.output_filename(filename,
- test_type + self.FILENAME_SUFFIX_DIFF + file_type)
- open(diff_filename, "wb").write(''.join(diff))
-
- if wdiff:
- # Shell out to wdiff to get colored inline diffs.
- executable = path_utils.wdiff_path()
- cmd = [executable,
- '--start-delete=##WDIFF_DEL##',
- '--end-delete=##WDIFF_END##',
- '--start-insert=##WDIFF_ADD##',
- '--end-insert=##WDIFF_END##',
- expected_filename,
- actual_filename]
- filename = self.output_filename(filename,
- test_type + self.FILENAME_SUFFIX_WDIFF)
-
- global _wdiff_available
-
- try:
- # Python's Popen has a bug that causes any pipes opened to a
- # process that can't be executed to be leaked. Since this
- # code is specifically designed to tolerate exec failures
- # to gracefully handle cases where wdiff is not installed,
- # the bug results in a massive file descriptor leak. As a
- # workaround, if an exec failure is ever experienced for
- # wdiff, assume it's not available. This will leak one
- # file descriptor but that's better than leaking each time
- # wdiff would be run.
- #
- # http://mail.python.org/pipermail/python-list/
- # 2008-August/505753.html
- # http://bugs.python.org/issue3210
- #
- # It also has a threading bug, so we don't output wdiff if
- # the Popen raises a ValueError.
- # http://bugs.python.org/issue1236
- if _wdiff_available:
- wdiff = subprocess.Popen(
- cmd, stdout=subprocess.PIPE).communicate()[0]
- wdiff_failed = False
-
- except OSError, e:
- if (e.errno == errno.ENOENT or e.errno == errno.EACCES or
- e.errno == errno.ECHILD):
- _wdiff_available = False
- else:
- raise e
- except ValueError, e:
- wdiff_failed = True
-
- out = open(filename, 'wb')
-
- if not _wdiff_available:
- out.write(
- "wdiff not installed.<br/> "
- "If you're running OS X, you can install via macports."
- "<br/>"
- "If running Ubuntu linux, you can run "
- "'sudo apt-get install wdiff'.")
- elif wdiff_failed:
- out.write('wdiff failed due to running with multiple '
- 'test_shells in parallel.')
- else:
- wdiff = cgi.escape(wdiff)
- wdiff = wdiff.replace('##WDIFF_DEL##', '<span class=del>')
- wdiff = wdiff.replace('##WDIFF_ADD##', '<span class=add>')
- wdiff = wdiff.replace('##WDIFF_END##', '</span>')
- out.write('<head><style>.del { background: #faa; } ')
- out.write('.add { background: #afa; }</style></head>')
- out.write('<pre>' + wdiff + '</pre>')
-
- out.close()
diff --git a/webkit/tools/layout_tests/webkitpy/layout_tests/test_types/text_diff.py b/webkit/tools/layout_tests/webkitpy/layout_tests/test_types/text_diff.py
deleted file mode 100644
index 80cd47d..0000000
--- a/webkit/tools/layout_tests/webkitpy/layout_tests/test_types/text_diff.py
+++ /dev/null
@@ -1,97 +0,0 @@
-# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Compares the text output of a test to the expected text output.
-
-If the output doesn't match, returns FailureTextMismatch and outputs the diff
-files into the layout test results directory.
-"""
-
-import errno
-import logging
-import os.path
-
-from layout_package import path_utils
-from layout_package import test_failures
-from test_types import test_type_base
-
-
-def is_render_tree_dump(data):
- """Returns true if data appears to be a render tree dump as opposed to a
- plain text dump."""
- return data.find("RenderView at (0,0)") != -1
-
-
-class TestTextDiff(test_type_base.TestTypeBase):
-
- def get_normalized_output_text(self, output):
- # Some tests produce "\r\n" explicitly. Our system (Python/Cygwin)
- # helpfully changes the "\n" to "\r\n", resulting in "\r\r\n".
- norm = output.replace("\r\r\n", "\r\n").strip("\r\n").replace(
- "\r\n", "\n")
- return norm + "\n"
-
- def get_normalized_expected_text(self, filename, show_sources):
- """Given the filename of the test, read the expected output from a file
- and normalize the text. Returns a string with the expected text, or ''
- if the expected output file was not found."""
- # Read the platform-specific expected text.
- expected_filename = path_utils.expected_filename(filename, '.txt')
- if show_sources:
- logging.debug('Using %s' % expected_filename)
-
- return self.get_normalized_text(expected_filename)
-
- def get_normalized_text(self, filename):
- try:
- text = open(filename).read()
- except IOError, e:
- if errno.ENOENT != e.errno:
- raise
- return ''
-
- # Normalize line endings
- return text.strip("\r\n").replace("\r\n", "\n") + "\n"
-
- def compare_output(self, filename, proc, output, test_args, target):
- """Implementation of CompareOutput that checks the output text against
- the expected text from the LayoutTest directory."""
- failures = []
-
- # If we're generating a new baseline, we pass.
- if test_args.new_baseline:
- self._save_baseline_data(filename, output, ".txt")
- return failures
-
- # Normalize text to diff
- output = self.get_normalized_output_text(output)
- expected = self.get_normalized_expected_text(filename,
- test_args.show_sources)
-
- # Write output files for new tests, too.
- if output != expected:
- # Text doesn't match, write output files.
- self.write_output_files(filename, "", ".txt", output, expected,
- diff=True, wdiff=True)
-
- if expected == '':
- failures.append(test_failures.FailureMissingResult(self))
- else:
- failures.append(test_failures.FailureTextMismatch(self, True))
-
- return failures
-
- def diff_files(self, file1, file2):
- """Diff two text files.
-
- Args:
- file1, file2: full paths of the files to compare.
-
- Returns:
- True if two files are different.
- False otherwise.
- """
-
- return (self.get_normalized_text(file1) !=
- self.get_normalized_text(file2))