summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--tools/purify/chrome_tests.py11
-rwxr-xr-xwebkit/tools/layout_tests/run_webkit_tests.py15
2 files changed, 19 insertions, 7 deletions
diff --git a/tools/purify/chrome_tests.py b/tools/purify/chrome_tests.py
index e349cb2..b6cdf7e 100644
--- a/tools/purify/chrome_tests.py
+++ b/tools/purify/chrome_tests.py
@@ -284,9 +284,6 @@ class ChromeTests:
script_cmd = ["python.exe", script, "--run-singly", "-v",
"--noshow-results", "--time-out-ms=200000",
"--nocheck-sys-deps"]
- if not run_all:
- script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size))
-
if len(self._args):
# if the arg is a txt file, then treat it as a list of tests
if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt":
@@ -299,8 +296,14 @@ class ChromeTests:
script_cmd, multi=True, cmd_args=["--timeout=0"])
return ret
- # store each chunk in its own directory so that we can find the data later
+ # Store each chunk in its own directory so that we can find the data later.
chunk_dir = os.path.join("chunk_%05d" % chunk_num)
+ script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size))
+
+ # Put the layout test results in the chunk dir as well.
+ script_cmd.append("--results-dir=%s" % os.path.join(self._report_dir,
+ chunk_dir));
+
ret = self.ScriptedTest("webkit", "test_shell.exe", "layout",
script_cmd, multi=True, cmd_args=["--timeout=0"],
out_dir_extra=chunk_dir)
diff --git a/webkit/tools/layout_tests/run_webkit_tests.py b/webkit/tools/layout_tests/run_webkit_tests.py
index 00191fa..ff1d024 100755
--- a/webkit/tools/layout_tests/run_webkit_tests.py
+++ b/webkit/tools/layout_tests/run_webkit_tests.py
@@ -262,18 +262,27 @@ class TestRunner:
slice_end = min(num_tests, slice_start + chunk_len)
files = test_files[slice_start:slice_end]
- logging.info('Run: %d tests (chunk slice [%d:%d] of %d)' % (
- (slice_end - slice_start), slice_start, slice_end, num_tests))
+ tests_run_msg = 'Run: %d tests (chunk slice [%d:%d] of %d)' % (
+ (slice_end - slice_start), slice_start, slice_end, num_tests)
+ logging.info(tests_run_msg)
# If we reached the end and we don't have enough tests, we run some
# from the beginning.
if self._options.run_chunk and (slice_end - slice_start < chunk_len):
extra = 1 + chunk_len - (slice_end - slice_start)
- logging.info(' last chunk is partial, appending [0:%d]' % extra)
+ extra_msg = ' last chunk is partial, appending [0:%d]' % extra
+ logging.info(extra_msg)
+ tests_run_msg += "\n" + extra_msg
files.extend(test_files[0:extra])
self._test_files_list = files
self._test_files = set(files)
+ tests_run_filename = os.path.join(self._options.results_directory,
+ "tests_run.txt")
+ tests_run_file = open(tests_run_filename, "w")
+ tests_run_file.write(tests_run_msg)
+ tests_run_file.close()
+
# update expectations so that the stats are calculated correctly
self._expectations = self._ParseExpectations(
path_utils.PlatformName(), options.target == 'Debug')