diff options
-rwxr-xr-x | tools/valgrind/chrome_tests.py | 346 | ||||
-rwxr-xr-x | tools/valgrind/chrome_tests.sh | 4 | ||||
-rw-r--r-- | tools/valgrind/suppressions.txt | 183 | ||||
-rwxr-xr-x | tools/valgrind/valgrind_analyze.py | 176 | ||||
-rwxr-xr-x | tools/valgrind/valgrind_test.py | 154 |
5 files changed, 863 insertions, 0 deletions
diff --git a/tools/valgrind/chrome_tests.py b/tools/valgrind/chrome_tests.py new file mode 100755 index 0000000..d393353 --- /dev/null +++ b/tools/valgrind/chrome_tests.py @@ -0,0 +1,346 @@ +#!/usr/bin/python +# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# chrome_tests.py + +''' Runs various chrome tests through valgrind_test.py. + +This file is a copy of ../purify/chrome_tests.py. Eventually, it would be nice +to merge these two files. For now, I'm leaving it here with sections that +aren't supported commented out as this is more of a work in progress. +''' + +import glob +import logging +import optparse +import os +import stat +import sys + +import google.logging_utils +import google.path_utils +# Import the platform_utils up in the layout tests which have been modified to +# work under non-Windows platforms instead of the ones that are in the +# tools/python/google directory. (See chrome_tests.sh which sets PYTHONPATH +# correctly.) +# +# TODO(erg): Copy/Move the relevant functions from the layout_package version +# of platform_utils back up to google.platform_utils +# package. http://crbug.com/6164 +import layout_package.platform_utils + +import common + + +class TestNotFound(Exception): pass + + +class ChromeTests: + '''This class is derived from the chrome_tests.py file in ../purify/. + + TODO(erg): Finish implementing this. I've commented out all the parts that I + don't have working yet. We still need to deal with layout tests, and long + term, the UI tests. + ''' + + def __init__(self, options, args, test): + # the known list of tests + self._test_list = { +# "test_shell": self.TestTestShell, + "unit": self.TestUnit, + "net": self.TestNet, + "ipc": self.TestIpc, + "base": self.TestBase, +# "layout": self.TestLayout, +# "layout_all": self.TestLayoutAll, +# "ui": self.TestUI + } + + if test not in self._test_list: + raise TestNotFound("Unknown test: %s" % test) + + self._options = options + self._args = args + self._test = test + + script_dir = google.path_utils.ScriptDir() + utility = layout_package.platform_utils.PlatformUtility(script_dir) + # Compute the top of the tree (the "source dir") from the script dir (where + # this script lives). We assume that the script dir is in tools/valgrind/ + # relative to the top of the tree. + self._source_dir = os.path.dirname(os.path.dirname(script_dir)) + # since this path is used for string matching, make sure it's always + # an absolute Windows-style path + self._source_dir = utility.GetAbsolutePath(self._source_dir) + valgrind_test = os.path.join(script_dir, "valgrind_test.py") + self._command_preamble = ["python", valgrind_test, "--echo_to_stdout", + "--source_dir=%s" % (self._source_dir)] + + def _DefaultCommand(self, module, exe=None): + '''Generates the default command array that most tests will use.''' + module_dir = os.path.join(self._source_dir, module) + + # For now, the suppressions files are all the same across all modules. Copy + # the code in the purify version of chrome_tests.py if we ever need + # per-module suppressions again... + self._data_dir = google.path_utils.ScriptDir() + + if not self._options.build_dir: + dir_chrome = os.path.join(self._source_dir, "chrome", "Hammer") + dir_module = os.path.join(module_dir, "Hammer") + if exe: + exe_chrome = os.path.join(dir_chrome, exe) + exe_module = os.path.join(dir_module, exe) + if os.path.isfile(exe_chrome) and not os.path.isfile(exe_module): + self._options.build_dir = dir_chrome + elif os.path.isfile(exe_module) and not os.path.isfile(exe_chrome): + self._options.build_dir = dir_module + elif (os.stat(exe_module)[stat.ST_MTIME] > + os.stat(exe_chrome)[stat.ST_MTIME]): + self._options.build_dir = dir_module + else: + self._options.build_dir = dir_chrome + else: + if os.path.isdir(dir_chrome) and not os.path.isdir(dir_module): + self._options.build_dir = dir_chrome + elif os.path.isdir(dir_module) and not os.path.isdir(dir_chrome): + self._options.build_dir = dir_module + elif (os.stat(dir_module)[stat.ST_MTIME] > + os.stat(dir_chrome)[stat.ST_MTIME]): + self._options.build_dir = dir_module + else: + self._options.build_dir = dir_chrome + + cmd = list(self._command_preamble) + cmd.append("--data_dir=%s" % self._data_dir) + if self._options.baseline: + cmd.append("--baseline") + if self._options.verbose: + cmd.append("--verbose") + if self._options.generate_suppressions: + cmd.append("--generate_suppressions") + if exe: + cmd.append(os.path.join(self._options.build_dir, exe)) + return cmd + + def Run(self): + ''' Runs the test specified by command-line argument --test ''' + logging.info("running test %s" % (self._test)) + return self._test_list[self._test]() + + def _ReadGtestFilterFile(self, name, cmd): + '''Read a file which is a list of tests to filter out with --gtest_filter + and append the command-line option to cmd. + ''' + filters = [] + filename = os.path.join(self._data_dir, name + ".gtest.txt") + if os.path.exists(filename): + f = open(filename, 'r') + for line in f.readlines(): + if line.startswith("#") or line.startswith("//") or line.isspace(): + continue + line = line.rstrip() + filters.append(line) + gtest_filter = self._options.gtest_filter + if len(filters): + if gtest_filter: + gtest_filter += ":" + if gtest_filter.find("-") < 0: + gtest_filter += "-" + else: + gtest_filter = "-" + gtest_filter += ":".join(filters) + if gtest_filter: + cmd.append("--gtest_filter=%s" % gtest_filter) + + def SimpleTest(self, module, name): + cmd = self._DefaultCommand(module, name) + self._ReadGtestFilterFile(name, cmd) + return common.RunSubprocess(cmd, 0) + + def ScriptedTest(self, module, exe, name, script, multi=False, cmd_args=None, + out_dir_extra=None): + '''Valgrind a target binary, which will be executed one or more times via a + script or driver program. + Args: + module - which top level component this test is from (webkit, base, etc.) + exe - the name of the exe (it's assumed to exist in build_dir) + name - the name of this test (used to name output files) + script - the driver program or script. If it's python.exe, we use + search-path behavior to execute, otherwise we assume that it is in + build_dir. + multi - a boolean hint that the exe will be run multiple times, generating + multiple output files (without this option, only the last run will be + recorded and analyzed) + cmd_args - extra arguments to pass to the valgrind_test.py script + ''' + cmd = self._DefaultCommand(module) + exe = os.path.join(self._options.build_dir, exe) + cmd.append("--exe=%s" % exe) + cmd.append("--name=%s" % name) + if multi: + out = os.path.join(google.path_utils.ScriptDir(), + "latest") + if out_dir_extra: + out = os.path.join(out, out_dir_extra) + if os.path.exists(out): + old_files = glob.glob(os.path.join(out, "*.txt")) + for f in old_files: + os.remove(f) + else: + os.makedirs(out) + out = os.path.join(out, "%s%%5d.txt" % name) + cmd.append("--out_file=%s" % out) + if cmd_args: + cmd.extend(cmd_args) + if script[0] != "python.exe" and not os.path.exists(script[0]): + script[0] = os.path.join(self._options.build_dir, script[0]) + cmd.extend(script) + self._ReadGtestFilterFile(name, cmd) + return common.RunSubprocess(cmd, 0) + + def TestBase(self): + return self.SimpleTest("base", "base_unittests") + + def TestIpc(self): + return self.SimpleTest("chrome", "ipc_tests") + + def TestNet(self): + return self.SimpleTest("net", "net_unittests") + + def TestTestShell(self): + return self.SimpleTest("webkit", "test_shell_tests") + + def TestUnit(self): + return self.SimpleTest("chrome", "unit_tests") + +# def TestLayoutAll(self): +# return self.TestLayout(run_all=True) + +# def TestLayout(self, run_all=False): +# # A "chunk file" is maintained in the local directory so that each test +# # runs a slice of the layout tests of size chunk_size that increments with +# # each run. Since tests can be added and removed from the layout tests at +# # any time, this is not going to give exact coverage, but it will allow us +# # to continuously run small slices of the layout tests under purify rather +# # than having to run all of them in one shot. +# chunk_num = 0 +# # Tests currently seem to take about 20-30s each. +# chunk_size = 120 # so about 40-60 minutes per run +# chunk_file = os.path.join(os.environ["TEMP"], "purify_layout_chunk.txt") +# if not run_all: +# try: +# f = open(chunk_file) +# if f: +# str = f.read() +# if len(str): +# chunk_num = int(str) +# # This should be enough so that we have a couple of complete runs +# # of test data stored in the archive (although note that when we loop +# # that we almost guaranteed won't be at the end of the test list) +# if chunk_num > 10000: +# chunk_num = 0 +# f.close() +# except IOError, (errno, strerror): +# logging.error("error reading from file %s (%d, %s)" % (chunk_file, +# errno, strerror)) + +# script = os.path.join(self._source_dir, "webkit", "tools", "layout_tests", +# "run_webkit_tests.py") +# script_cmd = ["python.exe", script, "--run-singly", "-v", +# "--noshow-results", "--time-out-ms=200000", +# "--nocheck-sys-deps"] +# if not run_all: +# script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size)) + +# if len(self._args): +# # if the arg is a txt file, then treat it as a list of tests +# if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt": +# script_cmd.append("--test-list=%s" % self._args[0]) +# else: +# script_cmd.extend(self._args) + +# if run_all: +# ret = self.ScriptedTest("webkit", "test_shell.exe", "layout", +# script_cmd, multi=True, cmd_args=["--timeout=0"]) +# return ret + +# # store each chunk in its own directory so that we can find the data later +# chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num) +# ret = self.ScriptedTest("webkit", "test_shell.exe", "layout", +# script_cmd, multi=True, cmd_args=["--timeout=0"], +# out_dir_extra=chunk_dir) + +# # Wait until after the test runs to completion to write out the new chunk +# # number. This way, if the bot is killed, we'll start running again from +# # the current chunk rather than skipping it. +# try: +# f = open(chunk_file, "w") +# chunk_num += 1 +# f.write("%d" % chunk_num) +# f.close() +# except IOError, (errno, strerror): +# logging.error("error writing to file %s (%d, %s)" % (chunk_file, errno, +# strerror)) +# # Since we're running small chunks of the layout tests, it's important to +# # mark the ones that have errors in them. These won't be visible in the +# # summary list for long, but will be useful for someone reviewing this bot. +# return ret + +# def TestUI(self): +# if not self._options.no_reinstrument: +# instrumentation_error = self.InstrumentDll() +# if instrumentation_error: +# return instrumentation_error +# return self.ScriptedTest("chrome", "chrome.exe", "ui_tests", +# ["ui_tests.exe", +# "--single-process", +# "--ui-test-timeout=120000", +# "--ui-test-action-timeout=80000", +# "--ui-test-action-max-timeout=180000"], +# multi=True) + + +def _main(argv): + parser = optparse.OptionParser("usage: %prog -b <dir> -t <test> " + "[-t <test> ...]") + parser.disable_interspersed_args() + parser.add_option("-b", "--build_dir", + help="the location of the output of the compiler output") + parser.add_option("-t", "--test", action="append", + help="which test to run") + parser.add_option("", "--baseline", action="store_true", default=False, + help="generate baseline data instead of validating") + parser.add_option("", "--gtest_filter", + help="additional arguments to --gtest_filter") + parser.add_option("-v", "--verbose", action="store_true", default=False, + help="verbose output - enable debug log messages") + parser.add_option("", "--no-reinstrument", action="store_true", default=False, + help="Don't force a re-instrumentation for ui_tests") + parser.add_option("", "--generate_suppressions", action="store_true", + default=False, + help="Skip analysis and generate suppressions") + + options, args = parser.parse_args() + + if options.verbose: + google.logging_utils.config_root(logging.DEBUG) + else: + google.logging_utils.config_root() + + if not options.test or not len(options.test): + parser.error("--test not specified") + + for t in options.test: + tests = ChromeTests(options, args, t) + ret = tests.Run() + if ret: return ret + return 0 + + +if __name__ == "__main__": + ret = _main(sys.argv) + sys.exit(ret) + diff --git a/tools/valgrind/chrome_tests.sh b/tools/valgrind/chrome_tests.sh new file mode 100755 index 0000000..e854252 --- /dev/null +++ b/tools/valgrind/chrome_tests.sh @@ -0,0 +1,4 @@ +#!/bin/sh + +export THISDIR=`dirname $0` +PYTHONPATH=$THISDIR/../../webkit/tools/layout_tests/:$THISDIR/../purify:$THISDIR/../python "./chrome_tests.py" "$@" diff --git a/tools/valgrind/suppressions.txt b/tools/valgrind/suppressions.txt new file mode 100644 index 0000000..d142686 --- /dev/null +++ b/tools/valgrind/suppressions.txt @@ -0,0 +1,183 @@ +{ + Uninitialized value in deflate + Memcheck:Cond + fun:longest_match + fun:deflate_slow + fun:MOZ_Z_deflate +} +{ + dlopen leak; not our problem. + Memcheck:Leak + fun:malloc + fun:_dl_map_object_from_fd + fun:_dl_map_object + fun:dl_open_worker + fun:_dl_catch_error + fun:_dl_open + fun:dlopen_doit + fun:_dl_catch_error + fun:_dlerror_run + fun:dlopen@@GLIBC_2.1 + fun:PR_LoadLibraryWithFlags + obj:* +} +{ + Leak inside GTK? + Memcheck:Leak + fun:calloc + fun:g_malloc0 + obj:/usr/lib32/libgobject-2.0.so.0.1600.3 + obj:/usr/lib32/libgobject-2.0.so.0.1600.3 + fun:g_type_register_fundamental + obj:/usr/lib32/libgobject-2.0.so.0.1600.3 + fun:g_type_init_with_debug_flags + fun:g_type_init + fun:gdk_pre_parse_libgtk_only + obj:/usr/lib32/libgtk-x11-2.0.so.0.1200.9 + fun:g_option_context_parse + fun:gtk_parse_args +} +{ + Leak inside GTK? + Memcheck:Leak + fun:memalign + fun:posix_memalign + obj:/usr/lib32/libglib-2.0.so.0.1600.3 + fun:g_slice_alloc + fun:g_slice_alloc0 + fun:g_type_create_instance + obj:/usr/lib32/libgobject-2.0.so.0.1600.3 + fun:g_object_newv + fun:g_object_new_valist + fun:g_object_new + fun:gdk_display_open + fun:gdk_display_open_default_libgtk_only +} +{ + Fontconfig leak? + Memcheck:Leak + fun:malloc + obj:/usr/lib32/libfontconfig.so.1.3.0 + obj:/usr/lib32/libfontconfig.so.1.3.0 + obj:/usr/lib32/libfontconfig.so.1.3.0 + obj:/usr/lib32/libfontconfig.so.1.3.0 + obj:/usr/lib32/libexpat.so.1.5.2 + obj:/usr/lib32/libexpat.so.1.5.2 + obj:/usr/lib32/libexpat.so.1.5.2 + obj:/usr/lib32/libexpat.so.1.5.2 + fun:XML_ParseBuffer + fun:FcConfigParseAndLoad + fun:FcConfigParseAndLoad + obj:/usr/lib32/libfontconfig.so.1.3.0 + obj:/usr/lib32/libfontconfig.so.1.3.0 + obj:/usr/lib32/libexpat.so.1.5.2 + obj:/usr/lib32/libexpat.so.1.5.2 + obj:/usr/lib32/libexpat.so.1.5.2 + obj:/usr/lib32/libexpat.so.1.5.2 + fun:XML_ParseBuffer + fun:FcConfigParseAndLoad + fun:FcInitLoadConfig + fun:FcInitLoadConfigAndFonts + fun:FcInit +} +{ + Fontconfig Leak? + Memcheck:Leak + fun:malloc + fun:FcStrCopy + obj:/usr/lib32/libfontconfig.so.1.3.0 + obj:/usr/lib32/libexpat.so.1.5.2 + obj:/usr/lib32/libexpat.so.1.5.2 + obj:/usr/lib32/libexpat.so.1.5.2 + obj:/usr/lib32/libexpat.so.1.5.2 + fun:XML_ParseBuffer + fun:FcConfigParseAndLoad + fun:FcConfigParseAndLoad + obj:/usr/lib32/libfontconfig.so.1.3.0 + obj:/usr/lib32/libfontconfig.so.1.3.0 + obj:/usr/lib32/libexpat.so.1.5.2 + obj:/usr/lib32/libexpat.so.1.5.2 + obj:/usr/lib32/libexpat.so.1.5.2 + obj:/usr/lib32/libexpat.so.1.5.2 + fun:XML_ParseBuffer + fun:FcConfigParseAndLoad + fun:FcInitLoadConfig + fun:FcInitLoadConfigAndFonts + fun:FcInit +} +{ + SECMOD leak + Memcheck:Leak + fun:calloc + fun:PR_Calloc + obj:/usr/lib32/nss/libnssckbi.so + obj:/usr/lib32/nss/libnssckbi.so + obj:/usr/lib32/nss/libnssckbi.so + obj:/usr/lib32/nss/libnssckbi.so + obj:/usr/lib32/nss/libnssckbi.so + obj:/usr/lib32/nss/libnssckbi.so + obj:/usr/lib32/libnss3.so.1d + obj:/usr/lib32/libnss3.so.1d + fun:SECMOD_LoadModule + fun:SECMOD_LoadUserModule +} +{ + SECMOD leak? + Memcheck:Leak + fun:malloc + fun:PR_Malloc + fun:PL_ArenaAllocate + obj:/usr/lib32/libnss3.so.1d + obj:/usr/lib32/libnss3.so.1d + obj:/usr/lib32/libnss3.so.1d + obj:/usr/lib32/libnss3.so.1d + obj:/usr/lib32/libnss3.so.1d + fun:SECMOD_LoadUserModule +} +{ + NSS leak part 3. + Memcheck:Leak + fun:calloc + fun:PR_Calloc + obj:/usr/lib32/libnss3.so.1d + obj:/usr/lib32/libnss3.so.1d + obj:/usr/lib32/libnss3.so.1d + obj:/usr/lib32/libnss3.so.1d + obj:/usr/lib32/libnss3.so.1d + obj:/usr/lib32/libnss3.so.1d + fun:NSS_NoDB_Init +} +{ + GTK leak? + Memcheck:Leak + fun:calloc + fun:g_malloc0 + obj:/usr/lib32/libgobject-2.0.so.0.1600.3 + obj:/usr/lib32/libgobject-2.0.so.0.1600.3 + fun:g_type_register_fundamental + obj:/usr/lib32/libgobject-2.0.so.0.1600.3 + fun:g_type_init_with_debug_flags + fun:g_type_init + fun:gdk_pre_parse_libgtk_only + obj:/usr/lib32/libgtk-x11-2.0.so.0.1200.9 + fun:g_option_context_parse + fun:gtk_parse_args + fun:gtk_init_check +} +{ + (Probable memory leak in Skia) + Memcheck:Leak + fun:_Znwj + fun:_ZN10SkFontHost15ResolveTypefaceEj + fun:_ZN24SkScalerContext_FreeType9setupSizeEv + fun:_ZN24SkScalerContext_FreeType19generateFontMetricsEPN7SkPaint11FontMetricsES2_ + fun:_ZN15SkScalerContext14getFontMetricsEPN7SkPaint11FontMetricsES2_ + fun:_ZN12SkGlyphCacheC1EPK12SkDescriptor + fun:_ZN12SkGlyphCache10VisitCacheEPK12SkDescriptorPFbPKS_PvES5_ +} +{ + Error in ICU + Memcheck:Overlap + fun:memcpy + fun:init_resb_result +} diff --git a/tools/valgrind/valgrind_analyze.py b/tools/valgrind/valgrind_analyze.py new file mode 100755 index 0000000..c73f6bf --- /dev/null +++ b/tools/valgrind/valgrind_analyze.py @@ -0,0 +1,176 @@ +#!/usr/bin/python +# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# valgrind_analyze.py + +''' Given a valgrind XML file, parses errors and uniques them.''' + +import logging +import optparse +import os +import sys +from xml.dom.minidom import parse + +# These are functions (using C++ mangled names) that we look for in stack +# traces. We don't show stack frames while pretty printing when they are below +# any of the following: +_TOP_OF_STACK_POINTS = [ + # Don't show our testing framework. + "testing::Test::Run()", + # Also don't show the internals of libc/pthread. + "start_thread" +] + +def getTextOf(top_node, name): + ''' Returns all text in all DOM nodes with a certain |name| that are children + of |top_node|. + ''' + + text = "" + for nodes_named in top_node.getElementsByTagName(name): + text += "".join([node.data for node in nodes_named.childNodes + if node.nodeType == node.TEXT_NODE]) + return text + +def removeCommonRoot(source_dir, directory): + '''Returns a string with the string prefix |source_dir| removed from + |directory|.''' + if source_dir: + # Do this for safety, just in case directory is an absolute path outside of + # source_dir. + prefix = os.path.commonprefix([source_dir, directory]) + return directory[len(prefix) + 1:] + + return directory + +# Constants that give real names to the abbreviations in valgrind XML output. +INSTRUCTION_POINTER = "ip" +OBJECT_FILE = "obj" +FUNCTION_NAME = "fn" +SRC_FILE_DIR = "dir" +SRC_FILE_NAME = "file" +SRC_LINE = "line" + +class ValgrindError: + ''' Takes a <DOM Element: error> node and reads all the data from it. A + ValgrindError is immutable and is hashed on its pretty printed output. + ''' + + def __init__(self, source_dir, error_node): + ''' Copies all the relevant information out of the DOM and into object + properties. + + Args: + error_node: The <error></error> DOM node we're extracting from. + source_dir: Prefix that should be stripped from the <dir> node. + ''' + + self._kind = getTextOf(error_node, "kind") + self._what = getTextOf(error_node, "what") + + self._frames = [] + stack_node = error_node.getElementsByTagName("stack")[0] + + for frame in stack_node.getElementsByTagName("frame"): + frame_dict = { + INSTRUCTION_POINTER : getTextOf(frame, INSTRUCTION_POINTER), + OBJECT_FILE : getTextOf(frame, OBJECT_FILE), + FUNCTION_NAME : getTextOf(frame, FUNCTION_NAME), + SRC_FILE_DIR : removeCommonRoot( + source_dir, getTextOf(frame, SRC_FILE_DIR)), + SRC_FILE_NAME : getTextOf(frame, SRC_FILE_NAME), + SRC_LINE : getTextOf(frame, SRC_LINE) + } + + self._frames += [frame_dict] + + if frame_dict[FUNCTION_NAME] in _TOP_OF_STACK_POINTS: + break + + def __str__(self): + ''' Pretty print the type and stack frame of this specific error.''' + output = self._kind + "\n" + for frame in self._frames: + output += (" " + (frame[FUNCTION_NAME] or frame[INSTRUCTION_POINTER]) + + " (") + + if frame[SRC_FILE_DIR] != "": + output += (frame[SRC_FILE_DIR] + "/" + frame[SRC_FILE_DIR] + ":" + + frame[SRC_LINE]) + else: + output += frame[OBJECT_FILE] + output += ")\n" + + return output + + def UniqueString(self): + ''' String to use for object identity. Don't print this, use str(obj) + instead.''' + rep = self._kind + " " + for frame in self._frames: + rep += frame[FUNCTION_NAME] + + if frame[SRC_FILE_DIR] != "": + rep += frame[SRC_FILE_DIR] + "/" + frame[SRC_FILE_NAME] + else: + rep += frame[OBJECT_FILE] + + return rep + + def __hash__(self): + return hash(self.UniqueString()) + def __eq__(self, rhs): + return self.UniqueString() == rhs + +class ValgrindAnalyze: + ''' Given a set of Valgrind XML files, parse all the errors out of them, + unique them and output the results.''' + + def __init__(self, source_dir, files): + '''Reads in a set of files. + + Args: + source_dir: Path to top of source tree for this build + files: A list of filenames. + ''' + + self._errors = set() + for file in files: + raw_errors = parse(file).getElementsByTagName("error") + for raw_error in raw_errors: + self._errors.add(ValgrindError(source_dir, raw_error)) + + def Report(self): + if self._errors: + logging.error("FAIL! There were %s errors: " % len(self._errors)) + + for error in self._errors: + logging.error(error) + + return -1 + + logging.info("PASS! No errors found!") + return 0 + +def _main(): + '''For testing only. The ValgrindAnalyze class should be imported instead.''' + retcode = 0 + parser = optparse.OptionParser("usage: %prog [options] <files to analyze>") + parser.add_option("", "--source_dir", + help="path to top of source tree for this build" + "(used to normalize source paths in baseline)") + + (options, args) = parser.parse_args() + if not len(args) >= 1: + parser.error("no filename specified") + filenames = args + + analyzer = ValgrindAnalyze(options.source_dir, filenames) + retcode = analyzer.Report() + + sys.exit(retcode) + +if __name__ == "__main__": + _main() diff --git a/tools/valgrind/valgrind_test.py b/tools/valgrind/valgrind_test.py new file mode 100755 index 0000000..1a6f363 --- /dev/null +++ b/tools/valgrind/valgrind_test.py @@ -0,0 +1,154 @@ +#!/usr/bin/python +# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# purify_test.py + +'''Runs an exe through Valgrind and puts the intermediate files in a +directory. +''' + +import datetime +import glob +import logging +import optparse +import os +import re +import shutil +import sys +import time + +import google.path_utils + +import common + +import valgrind_analyze + +rmtree = shutil.rmtree + +class Valgrind(): + TMP_DIR = "valgrind.tmp" + + def __init__(self): + self._data_dir = None + + def CreateOptionParser(self): + self._parser = optparse.OptionParser("usage: %prog [options] <program to " + "test>") + self._parser.add_option("-e", "--echo_to_stdout", + dest="echo_to_stdout", action="store_true", default=False, + help="echo purify output to standard output") + self._parser.add_option("-t", "--timeout", + dest="timeout", metavar="TIMEOUT", default=10000, + help="timeout in seconds for the run (default 10000)") + self._parser.add_option("", "--source_dir", + help="path to top of source tree for this build" + "(used to normalize source paths in baseline)") + self._parser.add_option("", "--data_dir", default=".", + help="path to where purify data files live") + self._parser.add_option("", "--generate_suppressions", action="store_true", + default=False, + help="Skip analysis and generate suppressions") + self._parser.description = __doc__ + + def ParseArgv(self): + self.CreateOptionParser() + self._options, self._args = self._parser.parse_args() + self._timeout = int(self._options.timeout) + self._data_dir = self._options.data_dir + self._generate_suppressions = self._options.generate_suppressions + self._source_dir = self._options.source_dir + return True + + def Setup(self): + return self.ParseArgv() + + def Execute(self): + ''' Execute the app to be tested after successful instrumentation. + Full execution command-line provided by subclassers via proc.''' + logging.info("starting execution...") + # note that self._args begins with the exe to be run + proc = ["valgrind", "--smc-check=all", "--leak-check=full", + "--track-origins=yes", "--num-callers=30"] + + # Either generate suppressions or load them. + if self._generate_suppressions: + proc += ["--gen-suppressions=all"] + else: + proc += ["--xml=yes"] + + suppressions = os.path.join(self._data_dir, "suppressions.txt") + if os.path.exists(suppressions): + proc += ["--suppressions=%s" % suppressions] + else: + logging.warning("WARNING: NOT USING SUPPRESSIONS!") + + proc += ["--log-file=" + self.TMP_DIR + "/valgrind.%p"] + self._args + + # If we have a valgrind.tmp directory, we failed to cleanup last time. + if os.path.exists(self.TMP_DIR): + shutil.rmtree(self.TMP_DIR) + os.mkdir(self.TMP_DIR) + common.RunSubprocess(proc, self._timeout) + + # Always return true, even if running the subprocess failed. We depend on + # Analyze to determine if the run was valid. (This behaviour copied from + # the purify_test.py script.) + return True + + def Analyze(self): + # Glob all the files in the "valgrind.tmp" directory + filenames = glob.glob(self.TMP_DIR + "/valgrind.*") + analyzer = valgrind_analyze.ValgrindAnalyze(self._source_dir, filenames) + analyzer.Report() + return 1 + + def Cleanup(self): + # Right now, we can cleanup by deleting our temporary directory. Other + # cleanup is still a TODO? + shutil.rmtree(self.TMP_DIR) + return True + + def RunTestsAndAnalyze(self): + self.Execute() + if self._generate_suppressions: + logging.info("Skipping analysis to let you look at the raw output...") + return 0 + + retcode = self.Analyze() + if retcode: + logging.error("Analyze failed.") + return retcode + logging.info("Execution and analysis completed successfully.") + return 0 + + def Main(self): + '''Call this to run through the whole process: Setup, Execute, Analyze''' + start = datetime.datetime.now() + retcode = -1 + if self.Setup(): + retcode = self.RunTestsAndAnalyze() + + # Skip cleanup on generate. + if not self._generate_suppressions: + self.Cleanup() + else: + logging.error("Setup failed") + end = datetime.datetime.now() + seconds = (end - start).seconds + hours = seconds / 3600 + seconds = seconds % 3600 + minutes = seconds / 60 + seconds = seconds % 60 + logging.info("elapsed time: %02d:%02d:%02d" % (hours, minutes, seconds)) + return retcode + + + +if __name__ == "__main__": + valgrind = Valgrind() + retcode = valgrind.Main() + sys.exit(retcode) + + |