diff options
author | ojan@chromium.org <ojan@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2009-08-05 19:24:47 +0000 |
---|---|---|
committer | ojan@chromium.org <ojan@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2009-08-05 19:24:47 +0000 |
commit | 7fa319410e5dbc1739e476f317a33e0ab276a94a (patch) | |
tree | c7f4e77ea5cb7dc6e67699f2df63463daf23a741 /webkit/tools/layout_tests/run_webkit_tests.py | |
parent | 4a83c269850847be740568f72640bd57b0c2c86d (diff) | |
download | chromium_src-7fa319410e5dbc1739e476f317a33e0ab276a94a.zip chromium_src-7fa319410e5dbc1739e476f317a33e0ab276a94a.tar.gz chromium_src-7fa319410e5dbc1739e476f317a33e0ab276a94a.tar.bz2 |
First stab at a layout tests flakiness/speed dashboard.
This isn't functional yet, but I want to get this reviewed
and in the tree so I can do the rest incrementally.
This works by having the bots generate JSON that is
then red into a static HTML file that generates
the dashboard from the JSON.
I've tried to make this generic, so we should be able
to use the same HTML file for our other test types
(e.g. UI tests) as well once this is functional by
just having the bots that run those tests generate
the JSON files and copy them to the right place.
All the work that needs doing to get this 100% functional
is listed as a TODO at the top of flakiness_dashboard.html.
Most of what's left is buildbot integration (i.e. copy
files to the right place on the bot).
Review URL: http://codereview.chromium.org/149656
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@22505 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'webkit/tools/layout_tests/run_webkit_tests.py')
-rwxr-xr-x | webkit/tools/layout_tests/run_webkit_tests.py | 32 |
1 files changed, 32 insertions, 0 deletions
diff --git a/webkit/tools/layout_tests/run_webkit_tests.py b/webkit/tools/layout_tests/run_webkit_tests.py index 1b09bd2..89af345 100755 --- a/webkit/tools/layout_tests/run_webkit_tests.py +++ b/webkit/tools/layout_tests/run_webkit_tests.py @@ -28,6 +28,7 @@ import optparse import os import Queue import random +import re import shutil import subprocess import sys @@ -39,6 +40,7 @@ import google.path_utils from layout_package import compare_failures from layout_package import test_expectations from layout_package import http_server +from layout_package import json_results_generator from layout_package import path_utils from layout_package import platform_utils from layout_package import test_failures @@ -575,6 +577,9 @@ class TestRunner: # Write summaries to stdout. self._PrintResults(failures, sys.stdout) + if self._options.verbose: + self._WriteJSONFiles(failures, individual_test_timings); + # Write the same data to a log file. out_filename = os.path.join(self._options.results_directory, "score.txt") output_file = open(out_filename, "w") @@ -591,6 +596,33 @@ class TestRunner: sys.stderr.flush() return len(regressions) + def _WriteJSONFiles(self, failures, individual_test_timings): + # Write a json file of the test_expectations.txt file for the layout tests + # dashboard. + expectations_file = open(os.path.join(self._options.results_directory, + "expectations.json"), "w") + # TODO(ojan): Generate JSON using a JSON library instead of relying on + # GetExpectationsForAllPlatforms returning an object that only uses + # primitive types. + # Strip whitespace to reduce filesize. + expectations_json = re.sub(r'\s+', '', + repr(self._expectations.GetExpectationsForAllPlatforms())) + expectations_file.write(("ADD_EXPECTATIONS(" + expectations_json + ");")) + expectations_file.close() + + results_file_path = os.path.join(self._options.results_directory, + "results.json") + # TODO(ojan): get these from the bot + builder_name = "WebKitBuilder" + build_number = "12346" + json_generator = json_results_generator.JSONResultsGenerator(failures, + individual_test_timings, builder_name, build_number, results_file_path) + results_json = json_generator.GetJSON() + + results_file = open(results_file_path, "w") + results_file.write(results_json) + results_file.close() + def _PrintTimingStatistics(self, directory_test_timings, individual_test_timings, failures): self._PrintAggregateTestStatistics(individual_test_timings) |