summaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authordkegel@google.com <dkegel@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2009-03-30 20:29:28 +0000
committerdkegel@google.com <dkegel@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2009-03-30 20:29:28 +0000
commitda9fac160e8d37a9d317346f700096cb0cd630bb (patch)
tree9fd8968687199f84488e9a9b9db37f9959b008e2 /tools
parentd4d91b1c67990fc11fcc7cd2c98aedd08a40ad0a (diff)
downloadchromium_src-da9fac160e8d37a9d317346f700096cb0cd630bb.zip
chromium_src-da9fac160e8d37a9d317346f700096cb0cd630bb.tar.gz
chromium_src-da9fac160e8d37a9d317346f700096cb0cd630bb.tar.bz2
Resubmit http://codereview.chromium.org/55034 with one-line fix to test_shell_thread.py
Review URL: http://codereview.chromium.org/58003 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@12804 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'tools')
-rwxr-xr-xtools/valgrind/chrome_tests.py241
-rw-r--r--tools/valgrind/suppressions.txt32
-rwxr-xr-xtools/valgrind/valgrind_analyze.py15
3 files changed, 147 insertions, 141 deletions
diff --git a/tools/valgrind/chrome_tests.py b/tools/valgrind/chrome_tests.py
index 42c90ba..a21e0ec 100755
--- a/tools/valgrind/chrome_tests.py
+++ b/tools/valgrind/chrome_tests.py
@@ -8,8 +8,7 @@
''' Runs various chrome tests through valgrind_test.py.
This file is a copy of ../purify/chrome_tests.py. Eventually, it would be nice
-to merge these two files. For now, I'm leaving it here with sections that
-aren't supported commented out as this is more of a work in progress.
+to merge these two files.
'''
import glob
@@ -39,10 +38,6 @@ class TestNotFound(Exception): pass
class ChromeTests:
'''This class is derived from the chrome_tests.py file in ../purify/.
-
- TODO(erg): Finish implementing this. I've commented out all the parts that I
- don't have working yet. We still need to deal with layout tests, and long
- term, the UI tests.
'''
def __init__(self, options, args, test):
@@ -56,8 +51,7 @@ class ChromeTests:
"googleurl": self.TestGoogleurl,
"media": self.TestMedia,
"printing": self.TestPrinting,
-# "layout": self.TestLayout,
-# "layout_all": self.TestLayoutAll,
+ "layout": self.TestLayout,
"ui": self.TestUI
}
@@ -143,9 +137,9 @@ class ChromeTests:
cmd.append("--indirect")
if exe:
cmd.append(os.path.join(self._options.build_dir, exe))
- # Valgrind runs tests slowly, so slow tests hurt more; show elapased time
- # so we can find the slowpokes.
- cmd.append("--gtest_print_time");
+ # Valgrind runs tests slowly, so slow tests hurt more; show elapased time
+ # so we can find the slowpokes.
+ cmd.append("--gtest_print_time")
return cmd
def Run(self):
@@ -186,47 +180,6 @@ class ChromeTests:
cmd.extend(cmd_args)
return common.RunSubprocess(cmd, 0)
- def ScriptedTest(self, module, exe, name, script, multi=False, cmd_args=None,
- out_dir_extra=None):
- '''Valgrind a target binary, which will be executed one or more times via a
- script or driver program.
- Args:
- module - which top level component this test is from (webkit, base, etc.)
- exe - the name of the exe (it's assumed to exist in build_dir)
- name - the name of this test (used to name output files)
- script - the driver program or script. If it's python.exe, we use
- search-path behavior to execute, otherwise we assume that it is in
- build_dir.
- multi - a boolean hint that the exe will be run multiple times, generating
- multiple output files (without this option, only the last run will be
- recorded and analyzed)
- cmd_args - extra arguments to pass to the valgrind_test.py script
- '''
- cmd = self._DefaultCommand(module)
- exe = os.path.join(self._options.build_dir, exe)
- cmd.append("--exe=%s" % exe)
- cmd.append("--name=%s" % name)
- if multi:
- out = os.path.join(google.path_utils.ScriptDir(),
- "latest")
- if out_dir_extra:
- out = os.path.join(out, out_dir_extra)
- if os.path.exists(out):
- old_files = glob.glob(os.path.join(out, "*.txt"))
- for f in old_files:
- os.remove(f)
- else:
- os.makedirs(out)
- out = os.path.join(out, "%s%%5d.txt" % name)
- cmd.append("--out_file=%s" % out)
- if cmd_args:
- cmd.extend(cmd_args)
- if script[0] != "python.exe" and not os.path.exists(script[0]):
- script[0] = os.path.join(self._options.build_dir, script[0])
- cmd.extend(script)
- self._ReadGtestFilterFile(name, cmd)
- return common.RunSubprocess(cmd, 0)
-
def TestBase(self):
return self.SimpleTest("base", "base_unittests")
@@ -258,92 +211,98 @@ class ChromeTests:
"--ui-test-action-timeout=80000",
"--ui-test-action-max-timeout=180000"])
-# def TestLayoutAll(self):
-# return self.TestLayout(run_all=True)
-
-# def TestLayout(self, run_all=False):
-# # A "chunk file" is maintained in the local directory so that each test
-# # runs a slice of the layout tests of size chunk_size that increments with
-# # each run. Since tests can be added and removed from the layout tests at
-# # any time, this is not going to give exact coverage, but it will allow us
-# # to continuously run small slices of the layout tests under purify rather
-# # than having to run all of them in one shot.
-# chunk_num = 0
-# # Tests currently seem to take about 20-30s each.
-# chunk_size = 120 # so about 40-60 minutes per run
-# chunk_file = os.path.join(os.environ["TEMP"], "purify_layout_chunk.txt")
-# if not run_all:
-# try:
-# f = open(chunk_file)
-# if f:
-# str = f.read()
-# if len(str):
-# chunk_num = int(str)
-# # This should be enough so that we have a couple of complete runs
-# # of test data stored in the archive (although note that when we loop
-# # that we almost guaranteed won't be at the end of the test list)
-# if chunk_num > 10000:
-# chunk_num = 0
-# f.close()
-# except IOError, (errno, strerror):
-# logging.error("error reading from file %s (%d, %s)" % (chunk_file,
-# errno, strerror))
-
-# script = os.path.join(self._source_dir, "webkit", "tools", "layout_tests",
-# "run_webkit_tests.py")
-# script_cmd = ["python.exe", script, "--run-singly", "-v",
-# "--noshow-results", "--time-out-ms=200000",
-# "--nocheck-sys-deps"]
-# if not run_all:
-# script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size))
-
-# if len(self._args):
-# # if the arg is a txt file, then treat it as a list of tests
-# if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt":
-# script_cmd.append("--test-list=%s" % self._args[0])
-# else:
-# script_cmd.extend(self._args)
-
-# if run_all:
-# ret = self.ScriptedTest("webkit", "test_shell.exe", "layout",
-# script_cmd, multi=True, cmd_args=["--timeout=0"])
-# return ret
-
-# # store each chunk in its own directory so that we can find the data later
-# chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num)
-# ret = self.ScriptedTest("webkit", "test_shell.exe", "layout",
-# script_cmd, multi=True, cmd_args=["--timeout=0"],
-# out_dir_extra=chunk_dir)
-
-# # Wait until after the test runs to completion to write out the new chunk
-# # number. This way, if the bot is killed, we'll start running again from
-# # the current chunk rather than skipping it.
-# try:
-# f = open(chunk_file, "w")
-# chunk_num += 1
-# f.write("%d" % chunk_num)
-# f.close()
-# except IOError, (errno, strerror):
-# logging.error("error writing to file %s (%d, %s)" % (chunk_file, errno,
-# strerror))
-# # Since we're running small chunks of the layout tests, it's important to
-# # mark the ones that have errors in them. These won't be visible in the
-# # summary list for long, but will be useful for someone reviewing this bot.
-# return ret
-
-# def TestUI(self):
-# if not self._options.no_reinstrument:
-# instrumentation_error = self.InstrumentDll()
-# if instrumentation_error:
-# return instrumentation_error
-# return self.ScriptedTest("chrome", "chrome.exe", "ui_tests",
-# ["ui_tests.exe",
-# "--single-process",
-# "--ui-test-timeout=120000",
-# "--ui-test-action-timeout=80000",
-# "--ui-test-action-max-timeout=180000"],
-# multi=True)
-
+ def TestLayoutChunk(self, chunk_num, chunk_size):
+ # Run tests [chunk_num*chunk_size .. (chunk_num+1)*chunk_size) from the
+ # list of tests. Wrap around to beginning of list at end.
+ # If chunk_size is zero, run all tests in the list once.
+ # If a text file is given as argument, it is used as the list of tests.
+ #
+ # Build the ginormous commandline in 'cmd'.
+ # It's going to be roughly
+ # python valgrind_test.py ... python run_webkit_tests.py ...
+ # but we'll use the --indirect flag to valgrind_test.py
+ # to avoid valgrinding python.
+ # Start by building the valgrind_test.py commandline.
+ cmd = self._DefaultCommand("webkit")
+ cmd.append("--trace_children")
+ cmd.append("--indirect")
+ # Now build script_cmd, the run_webkits_tests.py commandline
+ # Store each chunk in its own directory so that we can find the data later
+ chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num)
+ test_shell = os.path.join(self._options.build_dir, "test_shell")
+ out_dir = os.path.join(google.path_utils.ScriptDir(), "latest")
+ out_dir = os.path.join(out_dir, chunk_dir)
+ if os.path.exists(out_dir):
+ old_files = glob.glob(os.path.join(out_dir, "*.txt"))
+ for f in old_files:
+ os.remove(f)
+ else:
+ os.makedirs(out_dir)
+ script = os.path.join(self._source_dir, "webkit", "tools", "layout_tests",
+ "run_webkit_tests.py")
+ script_cmd = ["python", script, "--run-singly", "-v",
+ "--noshow-results", "--time-out-ms=200000",
+ "--nocheck-sys-deps"]
+ if (chunk_size > 0):
+ script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size))
+ if len(self._args):
+ # if the arg is a txt file, then treat it as a list of tests
+ if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt":
+ script_cmd.append("--test-list=%s" % self._args[0])
+ else:
+ script_cmd.extend(self._args)
+ self._ReadGtestFilterFile("layout", script_cmd)
+ # Now run script_cmd with the wrapper in cmd
+ cmd.extend(["--"])
+ cmd.extend(script_cmd)
+ ret = common.RunSubprocess(cmd, 0)
+ return ret
+
+ def TestLayout(self):
+ # A "chunk file" is maintained in the local directory so that each test
+ # runs a slice of the layout tests of size chunk_size that increments with
+ # each run. Since tests can be added and removed from the layout tests at
+ # any time, this is not going to give exact coverage, but it will allow us
+ # to continuously run small slices of the layout tests under purify rather
+ # than having to run all of them in one shot.
+ chunk_size = self._options.num_tests
+ if (chunk_size == 0):
+ return self.TestLayoutChunk(0, 0)
+ chunk_num = 0
+ chunk_file = os.path.join("valgrind_layout_chunk.txt")
+ logging.info("Reading state from " + chunk_file)
+ try:
+ f = open(chunk_file)
+ if f:
+ str = f.read()
+ if len(str):
+ chunk_num = int(str)
+ # This should be enough so that we have a couple of complete runs
+ # of test data stored in the archive (although note that when we loop
+ # that we almost guaranteed won't be at the end of the test list)
+ if chunk_num > 10000:
+ chunk_num = 0
+ f.close()
+ except IOError, (errno, strerror):
+ logging.error("error reading from file %s (%d, %s)" % (chunk_file,
+ errno, strerror))
+ ret = self.TestLayoutChunk(chunk_num, chunk_size)
+ # Wait until after the test runs to completion to write out the new chunk
+ # number. This way, if the bot is killed, we'll start running again from
+ # the current chunk rather than skipping it.
+ logging.info("Saving state to " + chunk_file)
+ try:
+ f = open(chunk_file, "w")
+ chunk_num += 1
+ f.write("%d" % chunk_num)
+ f.close()
+ except IOError, (errno, strerror):
+ logging.error("error writing to file %s (%d, %s)" % (chunk_file, errno,
+ strerror))
+ # Since we're running small chunks of the layout tests, it's important to
+ # mark the ones that have errors in them. These won't be visible in the
+ # summary list for long, but will be useful for someone reviewing this bot.
+ return ret
def _main(_):
parser = optparse.OptionParser("usage: %prog -b <dir> -t <test> "
@@ -370,6 +329,12 @@ def _main(_):
parser.add_option("", "--generate_suppressions", action="store_true",
default=False,
help="Skip analysis and generate suppressions")
+ # My machine can do about 120 layout tests/hour in release mode.
+ # Let's do 30 minutes worth per run.
+ # The CPU is mostly idle, so perhaps we can raise this when
+ # we figure out how to run them more efficiently.
+ parser.add_option("-n", "--num_tests", default=60, type="int",
+ help="for layout tests: number of subtests per run. 0 for all.")
options, args = parser.parse_args()
diff --git a/tools/valgrind/suppressions.txt b/tools/valgrind/suppressions.txt
index 96b2708..0c1d2f7 100644
--- a/tools/valgrind/suppressions.txt
+++ b/tools/valgrind/suppressions.txt
@@ -152,9 +152,8 @@
v8_bug_275
Memcheck:Leak
fun:_Znaj
- fun:_ZN2v88internal8NewArrayIcEEPT_i
+ ...
fun:_ZN2v88internal8FlagList18SetFlagsFromStringEPKci
- fun:_ZN2v82V818SetFlagsFromStringEPKci
}
######### Baseline leaks -- just here to get test_shell_tests green -- fix later ######
{
@@ -260,3 +259,32 @@
obj:*
fun:pango_itemize_with_base_dir
}
+{
+ # See http://crbug.com/9450
+ v8_bindings_leak_crbug_9450
+ Memcheck:Leak
+ fun:_Znwj
+ fun:_ZN7WebCore7V8Proxy20RegisterGlobalHandleENS_16GlobalHandleTypeEPvN2v810PersistentINS3_5ValueEEE
+}
+{
+ # Font memory leak. See http://crbug.com/9475
+ bug_9475
+ Memcheck:Leak
+ fun:_Znwj
+ fun:_ZN7WebCore9FontCache22createFontPlatformDataERKNS_15FontDescriptionERKNS_12AtomicStringE
+}
+{
+ # V8 leak? See http://crbug.com/9458
+ bug_9458
+ Memcheck:Leak
+ fun:_Znwj
+ fun:_NPN_RegisterObject
+ fun:_Z25CreateV8ObjectForNPObjectP8NPObjectS0_
+}
+{
+ # webkit leak? See http://crbug.com/9503
+ bug_9503
+ Memcheck:Leak
+ ...
+ fun:_ZN19TestWebViewDelegate24UpdateSelectionClipboardEb
+}
diff --git a/tools/valgrind/valgrind_analyze.py b/tools/valgrind/valgrind_analyze.py
index 4260fcd..4ddbff8 100755
--- a/tools/valgrind/valgrind_analyze.py
+++ b/tools/valgrind/valgrind_analyze.py
@@ -11,6 +11,7 @@ import logging
import optparse
import os
import sys
+import time
from xml.dom.minidom import parse
# These are functions (using C++ mangled names) that we look for in stack
@@ -189,10 +190,22 @@ class ValgrindAnalyze:
self._errors = set()
for file in files:
+ # Wait up to a minute for valgrind to finish writing.
+ f = open(file, "r")
+ ntries = 60
+ for tries in range(0, ntries):
+ f.seek(0)
+ if sum((1 for line in f if '</valgrindoutput>' in line)) > 0:
+ break
+ time.sleep(1)
+ f.close()
+ if tries == ntries-1:
+ logging.error("valgrind never finished?")
raw_errors = parse(file).getElementsByTagName("error")
for raw_error in raw_errors:
# Ignore "possible" leaks for now by default.
- if (show_all_leaks or getTextOf(raw_error, "kind") != "Leak_PossiblyLost"):
+ if (show_all_leaks or
+ getTextOf(raw_error, "kind") != "Leak_PossiblyLost"):
self._errors.add(ValgrindError(source_dir, raw_error))
def Report(self):