summaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authornsylvain@chromium.org <nsylvain@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2009-11-20 23:31:59 +0000
committernsylvain@chromium.org <nsylvain@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2009-11-20 23:31:59 +0000
commit70aea9a5998558d4e356b7ffdba7af25fc9dde95 (patch)
tree8e0a47e676d561f60ceb98c44f2c5c4bc5bf6083 /tools
parentb41422d5a4b73bf35c759374625c98d7f7d11cd7 (diff)
downloadchromium_src-70aea9a5998558d4e356b7ffdba7af25fc9dde95.zip
chromium_src-70aea9a5998558d4e356b7ffdba7af25fc9dde95.tar.gz
chromium_src-70aea9a5998558d4e356b7ffdba7af25fc9dde95.tar.bz2
Remove old files.
Review URL: http://codereview.chromium.org/425008 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@32711 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'tools')
-rw-r--r--tools/purify/chrome_tests.py523
-rwxr-xr-xtools/purify/chrome_tests.sh13
-rw-r--r--tools/purify/common.py344
-rw-r--r--tools/purify/data/filters.pftbin13005 -> 0 bytes
-rw-r--r--tools/purify/data/ignore.txt12
-rw-r--r--tools/purify/purify_analyze.py939
-rw-r--r--tools/purify/purify_coverage.py87
-rw-r--r--tools/purify/purify_inuse.py92
-rw-r--r--tools/purify/purify_message.py610
-rw-r--r--tools/purify/purify_test.py251
-rw-r--r--tools/purify/quantify_test.py61
-rw-r--r--tools/purify/sharded_test_runner.py33
-rw-r--r--tools/purify/test_runner.py51
13 files changed, 0 insertions, 3016 deletions
diff --git a/tools/purify/chrome_tests.py b/tools/purify/chrome_tests.py
deleted file mode 100644
index 0484d8d..0000000
--- a/tools/purify/chrome_tests.py
+++ /dev/null
@@ -1,523 +0,0 @@
-#!/bin/env python
-# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# chrome_tests.py
-
-''' Runs various chrome tests through purify_test.py
-'''
-
-import glob
-import logging
-import optparse
-import os
-import stat
-import sys
-
-import google.logging_utils
-import google.path_utils
-import google.platform_utils
-
-import common
-
-
-class TestNotFound(Exception): pass
-
-class ChromeTests:
-
- def __init__(self, options, args, test):
- # the known list of tests
- self._test_list = {"test_shell": self.TestTestShell,
- "unit": self.TestUnit,
- "net": self.TestNet,
- "ipc": self.TestIpc,
- "base": self.TestBase,
- "layout": self.TestLayout,
- "dll": self.TestDll,
- "layout_all": self.TestLayoutAll,
- "ui": self.TestUI,
- "v8": self.TestV8}
-
- if test not in self._test_list:
- raise TestNotFound("Unknown test: %s" % test)
-
- # Make sure tests running under purify are using the
- # normal memory allocator instead of tcmalloc.
- os.environ["CHROME_ALLOCATOR"] = "winheap"
-
- self._options = options
- self._args = args
- self._test = test
-
- script_dir = google.path_utils.ScriptDir()
- utility = google.platform_utils.PlatformUtility(script_dir)
-
- # Compute the top of the tree (the "source dir") from the script dir (where
- # this script lives). We assume that the script dir is in tools/purify
- # relative to the top of the tree.
- self._source_dir = os.path.dirname(os.path.dirname(script_dir))
-
- # since this path is used for string matching, make sure it's always
- # an absolute Windows-style path
- self._source_dir = utility.GetAbsolutePath(self._source_dir)
-
- self._report_dir = options.report_dir
- if not self._report_dir:
- if not options.buildbot:
- self._report_dir = os.path.join(script_dir, "latest", test)
- else:
- # On the buildbot, we archive to a specific location on chrome-web
- # with a directory based on the test name and the current svn revision.
- # NOTE: These modules are located in trunk/tools/buildbot, which is not
- # in the default config. You'll need to check this out and add
- # scripts/* to your PYTHONPATH to test outside of the buildbot.
- import slave_utils
- import chromium_config
- chrome_web_dir = chromium_config.Archive.purify_test_result_archive
- current_version = str(slave_utils.SubversionRevision(self._source_dir))
- # This line is how the buildbot master figures out our directory.
- print "last change:", current_version
- self._report_dir = os.path.join(chrome_web_dir, test, current_version)
- if not os.path.exists(self._report_dir):
- os.makedirs(self._report_dir)
-
- purify_test = os.path.join(script_dir, "purify_test.py")
- self._command_preamble = ["python.exe", purify_test, "--echo_to_stdout",
- "--source_dir=%s" % (self._source_dir),
- "--save_cache"]
-
- def FindBuildDir(self, target, module, exe):
- module_dir = os.path.join(self._source_dir, module)
- dir_chrome = os.path.join(self._source_dir, "chrome", target)
- dir_module = os.path.join(module_dir, target)
- if not os.path.isdir(dir_chrome) and not os.path.isdir(dir_module):
- return None
-
- if exe:
- exe_chrome = os.path.join(dir_chrome, exe)
- exe_module = os.path.join(dir_module, exe)
- if not os.path.isfile(exe_chrome) and not os.path.isfile(exe_module):
- return None
- if os.path.isfile(exe_chrome) and not os.path.isfile(exe_module):
- return dir_chrome
- elif os.path.isfile(exe_module) and not os.path.isfile(exe_chrome):
- return dir_module
- elif (os.stat(exe_module)[stat.ST_MTIME] >
- os.stat(exe_chrome)[stat.ST_MTIME]):
- return dir_module
- else:
- return dir_chrome
- else:
- if os.path.isdir(dir_chrome) and not os.path.isdir(dir_module):
- return dir_chrome
- elif os.path.isdir(dir_module) and not os.path.isdir(dir_chrome):
- return dir_module
- elif (os.stat(dir_module)[stat.ST_MTIME] >
- os.stat(dir_chrome)[stat.ST_MTIME]):
- return dir_module
- else:
- return dir_chrome
-
- def ComputeBuildDir(self, module, exe=None):
- ''' Computes the build dir for the given module / exe '''
- if self._options.build_dir:
- self._build_dir = self._options.build_dir
- return self._build_dir
-
- # Use whatever build dir matches and was built most recently. We prefer
- # the 'Purify' build dir, so warn in other cases.
- dirs = []
- dir = self.FindBuildDir("Purify", module, exe)
- if dir:
- dirs.append(dir)
- dir = self.FindBuildDir("Release", module, exe)
- if dir:
- dirs.append(dir)
- dir = self.FindBuildDir("Debug", module, exe)
- if dir:
- dirs.append(dir)
- while True:
- if len(dirs) == 0:
- raise Exception("Can't find appropriate build dir")
- if len(dirs) == 1:
- self._build_dir = dirs[0]
- if self._build_dir.endswith("Debug"):
- logging.warning("Using Debug build. "
- "This is not recommended under Purify.")
- elif self._build_dir.endswith("Release"):
- logging.warning("Using Release build. "
- "Consider building the 'Purify' target instead since "
- "you don't need to worry about setting a magic "
- "environment variable for it to behave correctly.")
- return self._build_dir
- if os.stat(dirs[0])[stat.ST_MTIME] > os.stat(dirs[1])[stat.ST_MTIME]:
- dirs.remove(dirs[0])
- else:
- dirs.remove(dirs[1])
-
- def _DefaultCommand(self, module, exe=None):
- '''Generates the default command array that most tests will use.'''
- module_dir = os.path.join(self._source_dir, module)
- if module == "chrome":
- # unfortunately, not all modules have the same directory structure
- self._data_dir = os.path.join(module_dir, "test", "data", "purify")
- else:
- self._data_dir = os.path.join(module_dir, "data", "purify")
-
- cmd = list(self._command_preamble)
- cmd.append("--data_dir=%s" % self._data_dir)
- cmd.append("--report_dir=%s" % self._report_dir)
- if self._options.baseline:
- cmd.append("--baseline")
- if self._options.verbose:
- cmd.append("--verbose")
-
- # Recompute _build_dir since the module and exe might have changed from
- # a previous call (we might be running multiple tests).
- self.ComputeBuildDir(module, exe);
- if exe:
- cmd.append(os.path.join(self._build_dir, exe))
- return cmd
-
- def Run(self):
- ''' Runs the test specified by command-line argument --test '''
- logging.info("running test %s" % (self._test))
- return self._test_list[self._test]()
-
- def _ReadGtestFilterFile(self, name, cmd):
- '''Read a file which is a list of tests to filter out with --gtest_filter
- and append the command-line option to cmd.
- '''
- filters = []
- filename = os.path.join(self._data_dir, name + ".gtest.txt")
- if os.path.exists(filename):
- logging.info("using gtest filter from %s" % filename)
- f = open(filename, 'r')
- for line in f.readlines():
- if line.startswith("#") or line.startswith("//") or line.isspace():
- continue
- line = line.rstrip()
- filters.append(line)
- gtest_filter = self._options.gtest_filter
- if len(filters):
- if gtest_filter:
- gtest_filter += ":"
- if gtest_filter.find("-") < 0:
- gtest_filter += "-"
- else:
- gtest_filter = "-"
- gtest_filter += ":".join(filters)
- if gtest_filter:
- cmd.append("--gtest_filter=%s" % gtest_filter)
-
- def SimpleTest(self, module, name, total_shards=None):
- cmd = self._DefaultCommand(module, name)
- exe = cmd[-1]
- current_path = os.path.dirname(sys.argv[0])
-
- # TODO(nsylvain): Add a flag to disable this.
- if total_shards:
- script = ["python.exe",
- os.path.join(current_path, "sharded_test_runner.py"), exe,
- str(total_shards)]
- return self.ScriptedTest(module, name, name, script, multi=True)
- elif self._options.run_singly:
- script = ["python.exe",
- os.path.join(current_path, "test_runner.py"), exe]
- return self.ScriptedTest(module, name, name, script, multi=True)
- else:
- self._ReadGtestFilterFile(name, cmd)
- cmd.append("--gtest_print_time")
- return common.RunSubprocess(cmd, 0)
-
- def ScriptedTest(self, module, exe, name, script, multi=False, cmd_args=None,
- out_dir_extra=None):
- '''Purify a target exe, which will be executed one or more times via a
- script or driver program.
- Args:
- module - which top level component this test is from (webkit, base, etc.)
- exe - the name of the exe (it's assumed to exist in build_dir)
- name - the name of this test (used to name output files)
- script - the driver program or script. If it's python.exe, we use
- search-path behavior to execute, otherwise we assume that it is in
- build_dir.
- multi - a boolean hint that the exe will be run multiple times, generating
- multiple output files (without this option, only the last run will be
- recorded and analyzed)
- cmd_args - extra arguments to pass to the purify_test.py script
- '''
- if out_dir_extra:
- self._report_dir = os.path.join(self._report_dir, out_dir_extra)
- cmd = self._DefaultCommand(module)
- exe = os.path.join(self._options.build_dir, exe)
- cmd.append("--exe=%s" % exe)
- cmd.append("--name=%s" % name)
- if multi:
- if out_dir_extra:
- if os.path.exists(self._report_dir):
- old_files = glob.glob(os.path.join(self._report_dir, "*.txt"))
- for f in old_files:
- os.remove(f)
- else:
- os.makedirs(self._report_dir)
- out_file = os.path.join(self._report_dir, "%s%%5d.txt" % name)
- cmd.append("--out_file=%s" % out_file)
- if cmd_args:
- cmd.extend(cmd_args)
- if script[0] != "python.exe" and not os.path.exists(script[0]):
- script[0] = os.path.join(self._options.build_dir, script[0])
- cmd.extend(script)
- self._ReadGtestFilterFile(name, cmd)
- return common.RunSubprocess(cmd, 0)
-
- def InstrumentDll(self):
- '''Does a blocking Purify instrumentation of chrome.dll.'''
- # TODO(paulg): Make this code support any DLL.
- cmd = self._DefaultCommand("chrome")
- cmd.append("--instrument_only")
- cmd.append(os.path.join(self._options.build_dir, "chrome.dll"))
- result = common.RunSubprocess(cmd, 0)
- if result:
- logging.error("Instrumentation error: %d" % result)
- return result
-
- def TestDll(self):
- return self.InstrumentDll()
-
- def TestBase(self):
- return self.SimpleTest("base", "base_unittests.exe")
-
- def TestIpc(self):
- return self.SimpleTest("chrome", "ipc_tests.exe")
-
- def TestNet(self):
- return self.SimpleTest("net", "net_unittests.exe")
-
- def TestTestShell(self):
- return self.SimpleTest("webkit", "test_shell_tests.exe")
-
- def TestUnit(self):
- return self.SimpleTest("chrome", "unit_tests.exe", total_shards=5)
-
- def TestLayoutAll(self):
- return self.TestLayout(run_all=True)
-
- def TestLayout(self, run_all=False):
- # A "chunk file" is maintained in the local directory so that each test
- # runs a slice of the layout tests of size chunk_size that increments with
- # each run. Since tests can be added and removed from the layout tests at
- # any time, this is not going to give exact coverage, but it will allow us
- # to continuously run small slices of the layout tests under purify rather
- # than having to run all of them in one shot.
- chunk_num = 0
- # Tests currently seem to take about 20-30s each.
- chunk_size = 120 # so about 40-60 minutes per run
- chunk_file = os.path.join(os.environ["TEMP"], "purify_layout_chunk.txt")
- if not run_all:
- try:
- f = open(chunk_file)
- if f:
- str = f.read()
- if len(str):
- chunk_num = int(str)
- # This should be enough so that we have a couple of complete runs
- # of test data stored in the archive (although note that when we loop
- # that we almost guaranteed won't be at the end of the test list)
- if chunk_num > 10000:
- chunk_num = 0
- f.close()
- except IOError, (errno, strerror):
- logging.error("error reading from file %s (%d, %s)" % (chunk_file,
- errno, strerror))
-
- script = os.path.join(self._source_dir, "webkit", "tools", "layout_tests",
- "run_webkit_tests.py")
- script_cmd = ["python.exe", script, "--run-singly", "-v",
- "--noshow-results", "--time-out-ms=200000",
- "--nocheck-sys-deps"]
- if len(self._args):
- # if the arg is a txt file, then treat it as a list of tests
- if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt":
- script_cmd.append("--test-list=%s" % self._args[0])
- else:
- script_cmd.extend(self._args)
-
- if run_all:
- ret = self.ScriptedTest("webkit", "test_shell.exe", "layout",
- script_cmd, multi=True, cmd_args=["--timeout=0"])
- return ret
-
- # Store each chunk in its own directory so that we can find the data later.
- chunk_dir = os.path.join("chunk_%05d" % chunk_num)
- script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size))
-
- # Put the layout test results in the chunk dir as well.
- script_cmd.append("--results-dir=%s" % os.path.join(self._report_dir,
- chunk_dir));
-
- ret = self.ScriptedTest("webkit", "test_shell.exe", "layout",
- script_cmd, multi=True, cmd_args=["--timeout=0"],
- out_dir_extra=chunk_dir)
-
- # Wait until after the test runs to completion to write out the new chunk
- # number. This way, if the bot is killed, we'll start running again from
- # the current chunk rather than skipping it.
- try:
- f = open(chunk_file, "w")
- chunk_num += 1
- f.write("%d" % chunk_num)
- f.close()
- except IOError, (errno, strerror):
- logging.error("error writing to file %s (%d, %s)" % (chunk_file, errno,
- strerror))
- # Since we're running small chunks of the layout tests, it's important to
- # mark the ones that have errors in them. These won't be visible in the
- # summary list for long, but will be useful for someone reviewing this bot.
- #return ret
- # For now, since a fair number of layout tests are still red, we'll use the
- # magic orange indicator return code to avoid making the tree look red when
- # nothing has changed. When We get the layout tests into a stable green,
- # this code should be undone.
- # BUG=7516
- if ret:
- return 88
- return 0
-
- def TestUIAll(self):
- if not self._options.no_reinstrument:
- instrumentation_error = self.InstrumentDll()
- if instrumentation_error:
- return instrumentation_error
- return self.ScriptedTest("chrome", "chrome.exe", "ui_tests",
- ["ui_tests.exe",
- "--single-process",
- "--ui-test-timeout=180000",
- "--ui-test-action-timeout=80000",
- "--ui-test-action-max-timeout=180000",
- "--ui-test-sleep-timeout=40000"],
- multi=True)
-
- def TestUI(self):
- # If --gtest_filter is set, then we need to ignore the batch index.
- if self._options.gtest_filter:
- return self.TestUIAll()
-
- # Similar to layout test, we run a slice of UI tests with each run.
- # This is achieved by using --batch-count (total number of slices) and
- # --batch-index (current slice index) command line switches of UI tests.
- # A "batch index file" is maintained in the local directory so that each
- # test runs the kth slice of UI tests and increments k for next run.
- # Note: a full cycle of UI tests is finished in batch_count runs.
- # If new test cases are added in the middle of a cycle, some tests
- # may get skipped in the current cycle. For more discussion on this issue,
- # see http://codereview.chromium.org/149600.
-
- # Break UI tests into 16 slices so we have about 30-40 minutes per run.
- batch_count = 16
- batch_index = 0
- batch_index_file = os.path.join(os.environ["TEMP"],
- "purify_ui_batch_index.txt")
- try:
- f = open(batch_index_file)
- if f:
- str = f.read()
- if len(str):
- batch_index = int(str)
- if batch_index >= batch_count:
- batch_index = 0
- f.close()
- except IOError, (errno, strerror):
- logging.error("error reading from file %s (%d, %s)" % (batch_index_file,
- errno, strerror))
-
- script_cmd = ["ui_tests.exe", "--single-process",
- "--ui-test-timeout=180000",
- "--ui-test-action-timeout=80000",
- "--ui-test-action-max-timeout=180000",
- "--ui-test-sleep-timeout=40000",
- "--batch-count=%s" % batch_count,
- "--batch-index=%s" % batch_index]
-
- ret = self.ScriptedTest("chrome", "chrome.exe", "ui_tests",
- script_cmd, multi=True)
-
- # Wait until after the test runs to completion to write out the new batch
- # index. This way, if the bot is killed, we'll start running again from
- # the current chunk rather than skipping it.
- try:
- f = open(batch_index_file, "w")
- batch_index += 1
- if batch_index == batch_count:
- batch_index = 0
- f.write("%d" % batch_index)
- f.close()
- except IOError, (errno, strerror):
- logging.error("error writing to file %s (%d, %s)" % (batch_index_file,
- errno, strerror))
-
- # As we just started to run full pass UI test under purify, there are a
- # fair number of test errors.
- if ret:
- return 88
- return 0
-
- def TestV8(self):
- shell = "v8_shell.exe"
- # We need to compute _build_dir early to in order to pass in the
- # shell path as an argument to the test script.
- self.ComputeBuildDir("chrome", shell)
- script = os.path.join(self._source_dir, "v8", "tools", "test.py")
- shell_path = os.path.join(self._options.build_dir, shell)
- return self.ScriptedTest("chrome", shell, "v8",
- ["python.exe",
- script,
- "--no-build",
- "--progress=dots",
- "--shell=" + shell_path],
- multi = True)
-
-def _main(argv):
- parser = optparse.OptionParser("usage: %prog -b <dir> -t <test> "
- "[-t <test> ...]")
- parser.disable_interspersed_args()
- parser.add_option("-b", "--build_dir",
- help="the location of the output of the compiler output")
- parser.add_option("-t", "--test", action="append",
- help="which test to run")
- parser.add_option("", "--baseline", action="store_true", default=False,
- help="generate baseline data instead of validating")
- parser.add_option("", "--gtest_filter",
- help="additional arguments to --gtest_filter")
- parser.add_option("-v", "--verbose", action="store_true", default=False,
- help="verbose output - enable debug log messages")
- parser.add_option("", "--no-reinstrument", action="store_true", default=False,
- help="Don't force a re-instrumentation for ui_tests")
- parser.add_option("", "--run-singly", action="store_true", default=False,
- help="run tests independently of each other so that they "
- "don't interfere with each other and so that errors "
- "can be accurately attributed to their source");
- parser.add_option("", "--report_dir",
- help="path where report files are saved")
- parser.add_option("", "--buildbot", action="store_true", default=False,
- help="whether we're being run in a buildbot environment")
- options, args = parser.parse_args()
-
- if options.verbose:
- google.logging_utils.config_root(logging.DEBUG)
- else:
- google.logging_utils.config_root()
-
- if not options.test or not len(options.test):
- parser.error("--test not specified")
-
- for t in options.test:
- tests = ChromeTests(options, args, t)
- ret = tests.Run()
- if ret: return ret
- return 0
-
-
-if __name__ == "__main__":
- ret = _main(sys.argv)
- sys.exit(ret)
diff --git a/tools/purify/chrome_tests.sh b/tools/purify/chrome_tests.sh
deleted file mode 100755
index 7455048..0000000
--- a/tools/purify/chrome_tests.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/sh
-
-# Copyright (c) 2009 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-system_root=`cygpath "$SYSTEMROOT"`
-export PATH="/usr/bin:$system_root/system32:$system_root:$system_root/system32/WBEM"
-
-exec_dir=$(dirname $0)
-
-"$exec_dir/../../third_party/python_24/python.exe" \
- "$exec_dir/chrome_tests.py" "$@"
diff --git a/tools/purify/common.py b/tools/purify/common.py
deleted file mode 100644
index 7702be0..0000000
--- a/tools/purify/common.py
+++ /dev/null
@@ -1,344 +0,0 @@
-#!/bin/env python
-# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# common.py
-
-""" Common code used by purify_test.py and quantify_test.py in order to automate
-running of Rational Purify and Quantify in a consistent manner.
-"""
-
-# Purify and Quantify have a front-end (e.g. quantifyw.exe) which talks to a
-# back-end engine (e.g. quantifye.exe). The back-end seems to handle
-# instrumentation, while the front-end controls program execution and
-# measurement. The front-end will dynamically launch the back-end if
-# instrumentation is needed (sometimes in the middle of a run if a dll is
-# loaded dynamically).
-# In an ideal world, this script would simply execute the front-end and check
-# the output. However, purify is not the most reliable or well-documented app
-# on the planet, and my attempts to get it to run this way led to the back-end
-# engine hanging during instrumentation. The workaround to this was to run two
-# passes, first running the engine to do instrumentation rather than letting
-# the front-end do it for you, then running the front-end to actually do the
-# run. Each time through we're deleting all of the instrumented files in the
-# cache to ensure that we're testing that instrumentation works from scratch.
-# (although this can be changed with an option)
-
-import datetime
-import logging
-import optparse
-import os
-import signal
-import subprocess
-import sys
-import tempfile
-import time
-
-import google.logging_utils
-
-# hard-coded location of Rational files and directories
-PROGRAMFILES_PATH = os.environ.get('PROGRAMFILES',
- os.path.join("C:\\", "Program Files"))
-RATIONAL_PATH = os.path.join(PROGRAMFILES_PATH, "Rational")
-COMMON_PATH = os.path.join(RATIONAL_PATH, "common")
-PPLUS_PATH = os.path.join(RATIONAL_PATH, "PurifyPlus")
-PURIFY_PATH = os.path.join(COMMON_PATH, "purify.exe")
-PURIFYW_PATH = os.path.join(PPLUS_PATH, "purifyW.exe")
-PURIFYE_PATH = os.path.join(PPLUS_PATH, "purifye.exe")
-QUANTIFYE_PATH = os.path.join(PPLUS_PATH, "quantifye.exe")
-QUANTIFYW_PATH = os.path.join(PPLUS_PATH, "quantifyw.exe")
-
-class TimeoutError(Exception): pass
-
-
-def _print_line(line, flush=True):
- # Printing to a text file (including stdout) on Windows always winds up
- # using \r\n automatically. On buildbot, this winds up being read by a master
- # running on Linux, so we manually convert crlf to '\n'
- print line.rstrip() + '\n',
- if flush:
- sys.stdout.flush()
-
-def RunSubprocess(proc, timeout=0, detach=False):
- """ Runs a subprocess, until it finishes or |timeout| is exceeded and the
- process is killed with taskkill. A |timeout| <= 0 means no timeout.
-
- Args:
- proc: list of process components (exe + args)
- timeout: how long to wait before killing, <= 0 means wait forever
- detach: Whether to pass the DETACHED_PROCESS argument to CreateProcess
- on Windows. This is used by Purify subprocesses on buildbot which
- seem to get confused by the parent console that buildbot sets up.
- """
-
- logging.info("running %s, timeout %d sec" % (" ".join(proc), timeout))
- if detach:
- # see MSDN docs for "Process Creation Flags"
- DETACHED_PROCESS = 0x8
- p = subprocess.Popen(proc, creationflags=DETACHED_PROCESS)
- else:
- # For non-detached processes, manually read and print out stdout and stderr.
- # By default, the subprocess is supposed to inherit these from its parent,
- # however when run under buildbot, it seems unable to read data from a
- # grandchild process, so we have to read the child and print the data as if
- # it came from us for buildbot to read it. We're not sure why this is
- # necessary.
- # TODO(erikkay): should we buffer stderr and stdout separately?
- p = subprocess.Popen(proc, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
-
- logging.info("started subprocess")
-
- # How long to wait (in seconds) before printing progress log messages.
- progress_delay = 300
- progress_delay_time = time.time() + progress_delay
- did_timeout = False
- if timeout > 0:
- wait_until = time.time() + timeout
- while p.poll() is None and not did_timeout:
- if not detach:
- line = p.stdout.readline()
- while line and not did_timeout:
- _print_line(line)
- line = p.stdout.readline()
- if timeout > 0:
- did_timeout = time.time() > wait_until
- else:
- # When we detach, blocking on reading stdout doesn't work, so we sleep
- # a short time and poll.
- time.sleep(0.5)
- if time.time() >= progress_delay_time:
- # Force output on a periodic basis to avoid getting killed off by the
- # buildbot.
- # TODO(erikkay): I'd prefer a less obtrusive 'print ".",' with a flush
- # but because of how we're doing subprocesses, this doesn't appear to
- # work reliably.
- logging.info("%s still running..." % os.path.basename(proc[0]))
- progress_delay_time = time.time() + progress_delay
- if timeout > 0:
- did_timeout = time.time() > wait_until
-
- if did_timeout:
- logging.info("process timed out")
- else:
- logging.info("process ended, did not time out")
-
- if did_timeout:
- if sys.platform == "win32":
- subprocess.call(["taskkill", "/T", "/F", "/PID", str(p.pid)])
- else:
- # Does this kill all children, too?
- os.kill(p.pid, signal.SIGINT)
- logging.error("KILLED %d" % p.pid)
- # Give the process a chance to actually die before continuing
- # so that cleanup can happen safely.
- time.sleep(1.0)
- logging.error("TIMEOUT waiting for %s" % proc[0])
- raise TimeoutError(proc[0])
- elif not detach:
- for line in p.stdout.readlines():
- _print_line(line, False)
- if sys.platform != 'darwin': # stdout flush fails on Mac
- logging.info("flushing stdout")
- p.stdout.flush()
-
- logging.info("collecting result code")
- result = p.poll()
- if result:
- logging.error("%s exited with non-zero result code %d" % (proc[0], result))
- return result
-
-
-def FixPath(path):
- """We pass computed paths to Rational as arguments, so these paths must be
- valid windows paths. When running in cygwin's python, computed paths
- wind up looking like /cygdrive/c/..., so we need to call out to cygpath
- to fix them up.
- """
- if sys.platform != "cygwin":
- return path
- p = subprocess.Popen(["cygpath", "-a", "-m", path], stdout=subprocess.PIPE)
- return p.communicate()[0].rstrip()
-
-
-class Rational(object):
- ''' Common superclass for Purify and Quantify automation objects. Handles
- common argument parsing as well as the general program flow of Instrument,
- Execute, Analyze.
- '''
-
- def __init__(self):
- google.logging_utils.config_root()
- self._out_file = None
-
- def Run(self):
- '''Call this to run through the whole process:
- Setup, Instrument, Execute, Analyze'''
- start = datetime.datetime.now()
- retcode = -1
- if self.Setup():
- retcode = self._Run()
- self.Cleanup()
- else:
- logging.error("Setup failed")
- end = datetime.datetime.now()
- seconds = (end - start).seconds
- hours = seconds / 3600
- seconds = seconds % 3600
- minutes = seconds / 60
- seconds = seconds % 60
- logging.info("elapsed time: %02d:%02d:%02d" % (hours, minutes, seconds))
- return retcode
-
- def _Run(self):
- retcode = -1
- if not self.Instrument():
- logging.error("Instrumentation failed.")
- return retcode
- if self._instrument_only:
- logging.info("Instrumentation completed successfully.")
- return 0
- if not self.Execute():
- logging.error("Execute failed.")
- return
- retcode = self.Analyze()
- if retcode:
- logging.error("Analyze failed.")
- return retcode
- logging.info("Instrumentation and execution completed successfully.")
- return 0
-
- def CreateOptionParser(self):
- '''Creates OptionParser with shared arguments. Overridden by subclassers
- to add custom arguments.'''
- parser = optparse.OptionParser("usage: %prog [options] <program to test>")
- # since the trailing program likely has command-line args of itself
- # we need to stop parsing when we reach the first positional arg
- parser.disable_interspersed_args()
- parser.add_option("-o", "--out_file", dest="out_file", metavar="OUTFILE",
- default="",
- help="output data is written to OUTFILE")
- parser.add_option("-s", "--save_cache",
- dest="save_cache", action="store_true", default=False,
- help="don't delete instrumentation cache")
- parser.add_option("-c", "--cache_dir", dest="cache_dir", metavar="CACHEDIR",
- default="",
- help="location of instrumentation cache is CACHEDIR")
- parser.add_option("-m", "--manual",
- dest="manual_run", action="store_true", default=False,
- help="target app is being run manually, don't timeout")
- parser.add_option("-t", "--timeout",
- dest="timeout", metavar="TIMEOUT", default=10000,
- help="timeout in seconds for the run (default 10000)")
- parser.add_option("-v", "--verbose", action="store_true", default=False,
- help="verbose output - enable debug log messages")
- parser.add_option("", "--instrument_only", action="store_true",
- default=False,
- help="Only instrument the target without running")
- self._parser = parser
-
- def Setup(self):
- if self.ParseArgv():
- logging.info("instrumentation cache in %s" % self._cache_dir)
- logging.info("output saving to %s" % self._out_file)
- # Ensure that Rational's common dir and cache dir are in the front of the
- # path. The common dir is required for purify to run in any case, and
- # the cache_dir is required when using the /Replace=yes option.
- os.environ["PATH"] = (COMMON_PATH + ";" + self._cache_dir + ";" +
- os.environ["PATH"])
- # clear the cache to make sure we're starting clean
- self.__ClearInstrumentationCache()
- return True
- return False
-
- def Instrument(self, proc):
- '''Instrument the app to be tested. Full instrumentation command-line
- provided by subclassers via proc.'''
- logging.info("starting instrumentation...")
- if RunSubprocess(proc, self._timeout, detach=True) == 0:
- if "/Replace=yes" in proc:
- if os.path.exists(self._exe + ".Original"):
- return True
- elif self._instrument_only:
- # TODO(paulg): Catch instrumentation errors and clean up properly.
- return True
- elif os.path.isdir(self._cache_dir):
- for cfile in os.listdir(self._cache_dir):
- # TODO(erikkay): look for the actual munged purify filename
- ext = os.path.splitext(cfile)[1]
- if ext == ".exe":
- return True
- logging.error("no instrumentation data generated")
- return False
-
- def Execute(self, proc):
- ''' Execute the app to be tested after successful instrumentation.
- Full execution command-line provided by subclassers via proc.'''
- logging.info("starting execution...")
- # note that self._args begins with the exe to be run
- proc += self._args
- if RunSubprocess(proc, self._timeout) == 0:
- return True
- return False
-
- def Analyze(self):
- '''Analyze step after a successful Execution. Should be overridden
- by the subclasser if instrumentation is desired.
- Returns 0 for success, 88 for warning (see ReturnCodeCommand) and anything
- else for error
- '''
- return -1
-
- def ParseArgv(self):
- '''Parses arguments according to CreateOptionParser
- Subclassers must override if they have extra arguments.'''
- self.CreateOptionParser()
- self._options, self._args = self._parser.parse_args()
- if self._options.verbose:
- google.logging_utils.config_root(logging.DEBUG)
- self._save_cache = self._options.save_cache
- self._manual_run = self._options.manual_run
- if self._manual_run:
- logging.info("manual run - timeout disabled")
- self._timeout = 0
- else:
- self._timeout = int(self._options.timeout)
- logging.info("timeout set to %ds" % (self._timeout))
- if self._save_cache:
- logging.info("saving instrumentation cache")
- if not self._options.cache_dir:
- try:
- temp_dir = os.environ["TEMP"]
- except KeyError:
- temp_dir = tempfile.mkdtemp()
- self._cache_dir = os.path.join(FixPath(temp_dir),
- "instrumentation_cache")
- else:
- self._cache_dir = FixPath(os.path.abspath(self._options.cache_dir))
- if self._options.out_file:
- self._out_file = FixPath(os.path.abspath(self._options.out_file))
- if len(self._args) == 0:
- self._parser.error("missing program to %s" % (self.__class__.__name__,))
- return False
- self._exe = self._args[0]
- self._exe_dir = FixPath(os.path.abspath(os.path.dirname(self._exe)))
- self._instrument_only = self._options.instrument_only
- return True
-
- def Cleanup(self):
- # delete the cache to avoid filling up the hard drive when we're using
- # temporary directory names
- self.__ClearInstrumentationCache()
-
- def __ClearInstrumentationCache(self):
- if not self._save_cache:
- logging.info("clearing instrumentation cache %s" % self._cache_dir)
- if os.path.isdir(self._cache_dir):
- for cfile in os.listdir(self._cache_dir):
- file = os.path.join(self._cache_dir, cfile)
- if os.path.isfile(file):
- try:
- os.remove(file)
- except:
- logging.warning("unable to delete file %s: %s" % (file,
- sys.exc_info()[0]))
diff --git a/tools/purify/data/filters.pft b/tools/purify/data/filters.pft
deleted file mode 100644
index ca7e0e6..0000000
--- a/tools/purify/data/filters.pft
+++ /dev/null
Binary files differ
diff --git a/tools/purify/data/ignore.txt b/tools/purify/data/ignore.txt
deleted file mode 100644
index 16a11b2..0000000
--- a/tools/purify/data/ignore.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-# See bug 1157381
-Pure: Trap bits found in live chunk
-
-# See bugs 1151263 and 1164562
-Memory leak .+ allocated in InitSecurityInterfaceA \[SECUR32\.DLL\]
-
-# See bug 1163766
-# Ugly regexps are trying to deal with Purify's demangling bugs.
-Memory leak .+ allocated in \?NewRunnableMethod.+ExpireHistoryBackend.+ScopedRunnableMethodFactory
-Memory leak .+ allocated in RevocableStore::RevokeAll\(void\)
-Memory leak .+ allocated in \?NewRunnableMethod.+CommitLaterTask.+CancelableTask.+CommitLaterTask
-Memory leak .+ allocated in history::HistoryBackend::ScheduleCommit\(void\)
diff --git a/tools/purify/purify_analyze.py b/tools/purify/purify_analyze.py
deleted file mode 100644
index d08c901..0000000
--- a/tools/purify/purify_analyze.py
+++ /dev/null
@@ -1,939 +0,0 @@
-#!/bin/env python
-# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# purify_analyze.py
-
-''' Given a Purify text file, parses messages, normalizes and uniques them.
-If there's an existing baseline of this data, it can compare against that
-baseline and return an error code if there are any new errors not in the
-baseline. '''
-
-import logging
-import optparse
-import os
-import re
-import sys
-
-import google.logging_utils
-import google.path_utils
-
-import purify_message
-
-class MemoryTreeNode(object):
- ''' A node in a tree representing stack traces of memory allocation.
- Essentially, each node in the tree is a hashtable mapping a child
- function name to a child node. Each node contains the total number
- of bytes of all of its descendants.
- See also: PurifyAnalyze.PrintMemoryInUse()
- '''
-
- pat_initializer = re.compile('(.*)\`dynamic initializer for \'(.*)\'\'')
-
- @classmethod
- def CreateTree(cls, message_list):
- '''Creates a tree from message_list. All of the Message objects are built
- into a tree with a default "ROOT" root node that is then returned.
- Args:
- message_list: a MessageList object.
- '''
- root = MemoryTreeNode("ROOT", 0, 0)
- msgs = message_list.AllMessages()
- for msg in msgs:
- bytes = msg._bytes
- blocks = msg._blocks
- stack = msg.GetAllocStack()
- stack_lines = stack.GetLines()
- size = len(stack_lines)
- node = root
- node._AddAlloc(bytes, blocks)
- counted = False
- # process stack lines from the bottom up to build a call-stack tree
- functions = [line["function"] for line in stack_lines]
- functions.reverse()
- for func in functions:
- if node == root:
- m = MemoryTreeNode.pat_initializer.match(func)
- if m:
- node = node._AddChild("INITIALIZERS", bytes, blocks)
- func = m.group(1) + m.group(2)
- # don't process ellided or truncated stack lines
- if func:
- node = node._AddChild(func, bytes, blocks)
- counted = True
- if not counted:
- # Nodes with no stack frames in our code wind up not being counted
- # above. These seem to be attributable to Windows DLL
- # initialization, so just throw them into that bucket.
- node._AddChild("WINDOWS", bytes, blocks)
- return root
-
- def __init__(self, function, bytes, blocks):
- '''
- Args:
- function: A string representing a unique method or function.
- bytes: initial number of bytes allocated in this node
- blocks: initial number of blocks allocated in this node
- '''
- self._function = function
- self._bytes = bytes
- self._blocks = blocks
- self._allocs = 1
- self._children = {}
-
- def _AddAlloc(self, bytes, blocks):
- '''Adds bytes and blocks to this node's allocation totals
- '''
- self._allocs += 1
- self._bytes += bytes
- self._blocks += blocks
-
- def _AddChild(self, function, bytes, blocks):
- '''Adds a child node if not present. Otherwise, adds
- bytes and blocks to it's allocation total.
- '''
- if function not in self._children:
- self._children[function] = MemoryTreeNode(function, bytes, blocks)
- else:
- self._children[function]._AddAlloc(bytes, blocks)
- return self._children[function]
-
- def __cmp__(self, other):
- # sort by size, then blocks, then function name
- return cmp((self._bytes, self._blocks, self._function),
- (other._bytes, other._blocks, other._function))
-
- def __str__(self):
- return "(%d bytes, %d blocks, %d allocs) %s" % (
- self._bytes, self._blocks, self._allocs, self._function)
-
- def PrintRecursive(self, padding="", byte_filter=0):
- '''Print the tree and all of its children recursively (depth-first). All
- nodes at a given level of the tree are sorted in descending order by size.
-
- Args:
- padding: Printed at the front of the line. Each recursive call adds a
- single space character.
- byte_filter: a number of bytes below which we'll prune the tree
- '''
- print "%s%s" % (padding, self)
- padding = padding + " "
- # sort the children in descending order (see __cmp__)
- swapped = self._children.values()
- swapped.sort(reverse=True)
- rest_bytes = 0
- rest_blocks = 0
- rest_allocs = 0
- for node in swapped:
- if node._bytes < byte_filter:
- rest_bytes += node._bytes
- rest_blocks += node._blocks
- rest_allocs += node._allocs
- else:
- node.PrintRecursive(padding, byte_filter)
- if rest_bytes:
- print "%s(%d bytes, %d blocks, %d allocs) PRUNED" % (padding,
- rest_bytes, rest_blocks, rest_allocs)
-
-class PurifyAnalyze:
- ''' Given a Purify text file, parses all of the messages inside of it and
- normalizes them. Provides a mechanism for comparing this normalized set
- against a baseline and detecting if new errors have been introduced. '''
-
- # a line which is the start of a new message
- pat_msg_start = re.compile('^\[([A-Z])\] (.*)$')
- # a message with a specific type
- pat_msg_type = re.compile('^([A-Z]{3}): (.*)$')
- pat_leak_summary = re.compile("Summary of ... memory leaks")
- pat_miu_summary = re.compile("Summary of ... memory in use")
- pat_starting = re.compile("Starting Purify'd ([^\\s]+\\\\[^\\s]+)")
- pat_arguments = re.compile("\s+Command line arguments:\s+([^\s].*)")
- pat_terminate = re.compile('Message: TerminateProcess called with code')
- # Purify treats this as a warning, but for us it's a fatal error.
- pat_instrumentation_failed = re.compile('^.* file not instrumented')
- # misc to ignore
- pat_ignore = (re.compile('^(Start|Exit)ing'),
- re.compile('^Program terminated'),
- re.compile('^Terminating thread'),
- re.compile('^Message: CryptVerifySignature'))
- # message types that aren't analyzed
- # handled, ignored and continued exceptions will likely never be interesting
- # TODO(erikkay): MPK ("potential" memory leaks) may be worth turning on
- types_excluded = ("EXH", "EXI", "EXC", "MPK")
-
-
- def __init__(self, files, echo, name=None, source_dir=None, data_dir=None,
- report_dir=None):
- # The input file we're analyzing.
- self._files = files
-
- # Whether the input file contents should be echoed to stdout.
- self._echo = echo
-
- # A symbolic name for the run being analyzed, often the name of the
- # exe which was purified.
- self._name = name
-
- # The top of the source code tree of the code we're analyzing.
- # This prefix is stripped from all filenames in stacks for normalization.
- if source_dir:
- purify_message.Stack.SetSourceDir(source_dir)
-
- script_dir = google.path_utils.ScriptDir()
-
- if data_dir:
- self._data_dir = data_dir
- self._global_data_dir = os.path.join(script_dir, "data")
- else:
- self._data_dir = os.path.join(script_dir, "data")
- self._global_data_dir = None
-
- if report_dir:
- self._report_dir = report_dir
- else:
- self._report_dir = os.path.join(script_dir, "latest")
-
- # A map of message_type to a MessageList of that type.
- self._message_lists = {}
- self._ReadIgnoreFile()
-
- def _ReadIgnoreFile(self):
- '''Read a file which is a list of regexps for either the title or the
- top-most visible stack line.
- '''
- self._pat_ignore = []
- filenames = [os.path.join(self._data_dir, "ignore.txt")]
- if self._global_data_dir:
- filenames.append(os.path.join(self._global_data_dir, "ignore.txt"))
- for filename in filenames:
- if os.path.exists(filename):
- f = open(filename, 'r')
- for line in f.readlines():
- if line.startswith("#") or line.startswith("//") or line.isspace():
- continue
- line = line.rstrip()
- pat = re.compile(line)
- if pat:
- self._pat_ignore.append(pat)
-
- def ShouldIgnore(self, msg):
- '''Should the message be ignored as irrelevant to analysis '''
- # never ignore memory in use
- if msg.Type() == "MIU":
- return False
-
- # check ignore patterns against title and top-most visible stack frames
- strings = [msg._title]
- err = msg.GetErrorStack()
- if err:
- line = err.GetTopVisibleStackLine().get('function', None)
- if line:
- strings.append(line)
- alloc = msg.GetAllocStack()
- if alloc:
- line = alloc.GetTopVisibleStackLine().get('function', None)
- if line:
- strings.append(line)
- for pat in self._pat_ignore:
- for str in strings:
- if pat.match(str):
- logging.debug("Igorning message based on ignore.txt")
- logging.debug(msg.NormalizedStr(verbose=True))
- return True
-
- # unless it's explicitly in the ignore file, never ignore these
- if msg.Type() == purify_message.FATAL:
- return False
-
- # certain message types aren't that interesting
- if msg.Type() in PurifyAnalyze.types_excluded:
- logging.debug("Igorning message because type is excluded")
- logging.debug(msg.NormalizedStr(verbose=True))
- return True
- # if the message stacks have no local stack frames, we can ignore them
- if msg.StacksAllExternal():
- logging.debug("Igorning message because stacks are all external")
- logging.debug(msg.NormalizedStr(verbose=True))
- return True
-
- # Microsoft's STL has a bunch of non-harmful UMRs in it. Most of them
- # are filtered out by Purify's default filters and by our explicit ignore
- # list. This code notices ones that have made it through so we can add
- # them to the ignore list later.
- if msg.Type() == "UMR":
- if err.GetTopStackLine()['file'].endswith('.'):
- logging.debug("non-ignored UMR in STL: %s" % msg._title)
-
- return False
-
- def AddMessage(self, msg):
- ''' Append the message to an array for its type. Returns boolean indicating
- whether the message was actually added or was ignored.'''
- if msg:
- if self.ShouldIgnore(msg):
- return False
- if msg.Type() not in self._message_lists:
- self._message_lists[msg.Type()] = purify_message.MessageList(msg.Type())
- self._message_lists[msg.Type()].AddMessage(msg)
- return True
- return False
-
- def _BeginNewSublist(self, key):
- '''See MessageList.BeginNewSublist for details.
- '''
- if key in self._message_lists:
- self._message_lists[key].BeginNewSublist()
-
- def ReadFile(self):
- ''' Reads a Purify ASCII file and parses and normalizes the messages in
- the file.
- Returns False if a fatal error was detected, True otherwise.
- '''
- # Purify files consist of a series of "messages". These messages have a type
- # (designated as a three letter code - see message_type), a severity
- # (designated by a one letter code - see message_severity) and some
- # textual details. It will often also have a stack trace of the error
- # location, and (for memory errors) may also have a stack trace of the
- # allocation location.
-
- fatal_errors = 0
- fatal_exe = ""
-
- for file in self._files:
- exe = ""
- error = None
- message = None
- for line in open(file, mode='rb'):
- line = line.rstrip()
- m = PurifyAnalyze.pat_msg_start.match(line)
- if m:
- if exe == fatal_exe:
- # since we hit a fatal error in this program, ignore all messages
- # until the program changes
- continue
- # we matched a new message, so if there's an existing one, it's time
- # to finish processing it
- if message:
- message.SetProgram(exe)
- if not self.AddMessage(message):
- # error is only set if the message we just tried to add would
- # otherwise be considered a fatal error. Since AddMessage failed
- # (presumably the messages matched the ignore list), we reset
- # error to None
- error = None
- message = None
- if error:
- if error.Type() == "EXU":
- # Don't treat EXU as fatal, since unhandled exceptions
- # in other threads don't necessarily lead to the app to exit.
- # TODO(erikkay): verify that we still do trap exceptions that lead
- # to early termination.
- logging.warning(error.NormalizedStr(verbose=True))
- error = None
- else:
- if len(self._files) > 1:
- logging.error("Fatal error in program: %s" % error.Program())
- logging.error(error.NormalizedStr(verbose=True))
- fatal_errors += 1
- error = None
- fatal_exe = exe
- continue
- severity = m.group(1)
- line = m.group(2)
- m = PurifyAnalyze.pat_msg_type.match(line)
- if m:
- type = m.group(1)
- message = purify_message.Message(severity, type, m.group(2))
- if type == "EXU":
- error = message
- elif severity == "O":
- message = purify_message.Message(severity, purify_message.FATAL,
- line)
- # This is an internal Purify error, and it means that this run can't
- # be trusted and analysis should be aborted.
- error = message
- elif PurifyAnalyze.pat_instrumentation_failed.match(line):
- message = purify_message.Message(severity, purify_message.FATAL,
- line)
- error = message
- elif PurifyAnalyze.pat_terminate.match(line):
- message = purify_message.Message(severity, purify_message.FATAL,
- line)
- error = message
- elif PurifyAnalyze.pat_leak_summary.match(line):
- # TODO(erikkay): should we do sublists for MLK and MPK too?
- # Maybe that means we need to handle "new" and "all" messages
- # separately.
- #self._BeginNewSublist("MLK")
- #self._BeginNewSublist("MPK")
- pass
- elif PurifyAnalyze.pat_miu_summary.match(line):
- # Each time Purify is asked to do generate a list of all memory in use
- # or new memory in use, it first emits this summary line. Since the
- # different lists can overlap, we need to tell MessageList to begin
- # a new sublist.
- # TODO(erikkay): should we tag "new" and "all" sublists explicitly
- # somehow?
- self._BeginNewSublist("MIU")
- elif PurifyAnalyze.pat_starting.match(line):
- m = PurifyAnalyze.pat_starting.match(line)
- exe = m.group(1)
- last_slash = exe.rfind("\\")
- if not purify_message.Stack.source_dir:
- path = os.path.abspath(os.path.join(exe[:last_slash], "..", ".."))
- purify_message.Stack.SetSourceDir(path)
- if not self._name:
- self._name = exe[(last_slash+1):]
- else:
- unknown = True
- for pat in PurifyAnalyze.pat_ignore:
- if pat.match(line):
- unknown = False
- break
- if unknown:
- logging.error("unknown line " + line)
- else:
- if message:
- message.AddLine(line)
- elif PurifyAnalyze.pat_arguments.match(line):
- m = PurifyAnalyze.pat_arguments.match(line)
- exe += " " + m.group(1)
-
- # Purify output should never end with a real message
- if message:
- logging.error("Unexpected message at end of file %s" % file)
-
- return fatal_errors == 0
-
- def GetMessageList(self, key):
- if key in self._message_lists:
- return self._message_lists[key]
- else:
- return None
-
- def Summary(self, echo=None, save=True):
- ''' Print a summary of how many messages of each type were found. '''
- # make sure everyone else is done first
- sys.stderr.flush()
- sys.stdout.flush()
- if echo == None:
- echo = self._echo
- if save:
- filename = os.path.join(self._report_dir, "Summary.txt")
- file = open(filename, "w")
- else:
- file = None
- logging.info("summary of Purify messages:")
- self._ReportFixableMessages()
- for key in self._message_lists:
- list = self._message_lists[key]
- unique = list.UniqueMessages()
- all = list.AllMessages()
- count = 0
- for msg in all:
- count += msg._count
- self._PrintAndSave("%s(%s) unique:%d total:%d" % (self._name,
- purify_message.GetMessageType(key), len(unique), count), file)
- if key not in ["MIU"]:
- ignore_file = "%s_%s_ignore.txt" % (self._name, key)
- ignore_hashes = self._MessageHashesFromFile(ignore_file)
- ignored = 0
-
- groups = list.UniqueMessageGroups()
- group_keys = groups.keys()
- group_keys.sort(cmp=lambda x,y: len(groups[y]) - len(groups[x]))
- for group in group_keys:
- # filter out ignored messages
- kept_msgs= [x for x in groups[group] if hash(x) not in ignore_hashes]
- ignored += len(groups[group]) - len(kept_msgs)
- groups[group] = kept_msgs
- if ignored:
- self._PrintAndSave("%s(%s) ignored:%d" % (self._name,
- purify_message.GetMessageType(key), ignored), file)
- total = reduce(lambda x, y: x + len(groups[y]), group_keys, 0)
- if total:
- self._PrintAndSave("%s(%s) group summary:" % (self._name,
- purify_message.GetMessageType(key)), file)
- self._PrintAndSave(" TOTAL: %d" % total, file)
- for group in group_keys:
- if len(groups[group]):
- self._PrintAndSave(" %s: %d" % (group, len(groups[group])),
- file)
- if echo:
- for group in group_keys:
- msgs = groups[group]
- if len(msgs) == 0:
- continue
- self._PrintAndSave("messages from %s (%d)" % (group, len(msgs)),
- file)
- self._PrintAndSave("="*79, file)
- for msg in msgs:
- # for the summary output, line numbers are useful
- self._PrintAndSave(msg.NormalizedStr(verbose=True), file)
- # make sure stdout is flushed to avoid weird overlaps with logging
- sys.stdout.flush()
- if file:
- file.close()
-
- def PrintMemoryInUse(self, byte_filter=16384):
- ''' Print one or more trees showing a hierarchy of memory allocations.
- Args:
- byte_filter: a number of bytes below which we'll prune the tree
- '''
- list = self.GetMessageList("MIU")
- sublists = list.GetSublists()
- if not sublists:
- sublists = [list]
- trees = []
- summaries = []
- # create the trees and summaries
- for sublist in sublists:
- tree = MemoryTreeNode.CreateTree(sublist)
- trees.append(tree)
-
- # while the tree is a hierarchical assignment from the root/bottom of the
- # stack down, the summary is simply adding the total of the top-most
- # stack item from our code
- summary = {}
- total = 0
- summaries.append(summary)
- for msg in sublist.AllMessages():
- total += msg._bytes
- stack = msg.GetAllocStack()
- if stack._all_external:
- alloc_caller = "WINDOWS"
- else:
- lines = stack.GetLines()
- for line in lines:
- alloc_caller = line["function"]
- if alloc_caller:
- break
- summary[alloc_caller] = summary.get(alloc_caller, 0) + msg._bytes
- summary["TOTAL"] = total
-
- # print out the summaries and trees.
- # TODO(erikkay): perhaps we should be writing this output to a file
- # instead?
- tree_number = 1
- num_trees = len(trees)
- for tree, summary in zip(trees, summaries):
- print "MEMORY SNAPSHOT %d of %d" % (tree_number, num_trees)
- lines = summary.keys()
- lines.sort(cmp=lambda x,y: summary[y] - summary[x])
- rest = 0
- for line in lines:
- bytes = summary[line]
- if bytes < byte_filter:
- rest += bytes
- else:
- print "%d: %s" % (bytes, line)
- print "%d: REST" % rest
- print
- print "BEGIN TREE"
- tree.PrintRecursive(byte_filter=byte_filter)
- tree_number += 1
-
- # make sure stdout is flushed to avoid weird overlaps with logging
- sys.stdout.flush()
-
- def BugReport(self, save=True):
- ''' Print a summary of how many messages of each type were found and write
- to BugReport.txt
- '''
- if save:
- filename = os.path.join(self._report_dir, "BugReport.txt")
- file = open(filename, "w")
- else:
- file = None
- # make sure everyone else is done first
- sys.stderr.flush()
- sys.stdout.flush()
- logging.info("summary of Purify bugs:")
-
- # This is a specialized set of counters for unit tests, with some
- # unfortunate hard-coded knowledge.
- test_counts = {}
- total_count = 0
- for key in self._message_lists:
- bug = {}
- list = self._message_lists[key]
- unique = list.UniqueMessages()
- all = list.AllMessages()
- count = 0
- for msg in all:
- if msg._title not in bug:
- # use a single sample message to represent all messages
- # that match this title
- bug[msg._title] = {"message":msg,
- "total":0,
- "count":0,
- "programs":set()}
- total_count += 1
- this_bug = bug[msg._title]
- this_bug["total"] += msg._count
- this_bug["count"] += 1
- prog = msg.Program()
- if self._name == "layout":
- # For layout tests, use the last argument, which is the URL that's
- # passed into test_shell.
- this_bug["programs"].add(prog)
- prog_args = prog.split(" ")
- if len(prog_args):
- path = prog_args[-1].replace('\\', '/')
- index = path.rfind("layout_tests/")
- if index >= 0:
- path = path[(index + len("layout_tests/")):]
- else:
- index = path.rfind("127.0.0.1:")
- if index >= 0:
- # the port number is 8000 or 9000, but length is the same
- path = "http: " + path[(index + len("127.0.0.1:8000/")):]
- count = 1 + test_counts.get(path, 0)
- test_counts[path] = count
- elif self._name == "ui":
- # ui_tests.exe appends a --test-name= argument to chrome.exe
- prog_args = prog.split(" ")
- arg_prefix = "--test-name="
- test_name = "UNKNOWN"
- for arg in prog_args:
- index = arg.find(arg_prefix)
- if index >= 0:
- test_name = arg[len(arg_prefix):]
- count = 1 + test_counts.get(test_name, 0)
- test_counts[test_name] = count
- break
- this_bug["programs"].add(test_name)
- else:
- this_bug["programs"].add(prog)
-
- for title in bug:
- b = bug[title]
- self._PrintAndSave("[%s] %s" % (key, title), file)
- self._PrintAndSave("%d tests, %d stacks, %d instances" % (
- len(b["programs"]), b["count"], b["total"]), file)
- self._PrintAndSave("Reproducible with:", file)
- for program in b["programs"]:
- self._PrintAndSave(" %s" % program, file)
- self._PrintAndSave("Sample error details:", file)
- self._PrintAndSave("=====================", file)
- self._PrintAndSave(b["message"].NormalizedStr(verbose=True), file)
- if len(test_counts):
- self._PrintAndSave("", file)
- self._PrintAndSave("test error counts", file)
- self._PrintAndSave("========================", file)
- tests = test_counts.keys()
- tests.sort()
- for test in tests:
- self._PrintAndSave("%s: %d" % (test, test_counts[test]), file)
- if total_count == 0:
- self._PrintAndSave("No bugs. Shocking, I know.", file)
- # make sure stdout is flushed to avoid weird overlaps with logging
- sys.stdout.flush()
- if file:
- file.close()
-
- def SaveStrings(self, string_list, key, fname_extra=""):
- '''Output a list of strings to a file in the report dir.
- '''
- out = os.path.join(self._report_dir,
- "%s_%s%s.txt" % (self._name, key, fname_extra))
- logging.info("saving %s" % (out))
- try:
- f = open(out, "w+")
- f.write('\n'.join(string_list))
- except IOError, (errno, strerror):
- logging.error("error writing to file %s (%d, %s)" % out, errno, strerror)
- if f:
- f.close()
- return True
-
- def SaveResults(self, path=None, verbose=False):
- ''' Output normalized data to baseline files for future comparison runs.
- Messages are saved in sorted order into a separate file for each message
- type. See Message.NormalizedStr() for details of what's written.
- '''
- if not path:
- path = self._report_dir
- for key in self._message_lists:
- out = os.path.join(path, "%s_%s.txt" % (self._name, key))
- logging.info("saving %s" % (out))
- f = open(out, "w+")
- list = self._message_lists[key].UniqueMessages()
- # TODO(erikkay): should the count of each message be a diff factor?
- # (i.e. the same error shows up, but more frequently)
- for message in list:
- f.write(message.NormalizedStr(verbose=verbose))
- f.write("\n")
- f.close()
- return True
-
- def _PrintAndSave(self, msg, file):
- ''' Print |msg| to both stdout and to file. '''
- if file:
- file.write(msg + "\n")
- print msg
- sys.stdout.flush()
-
- def _ReportFixableMessages(self):
- ''' Collects all baseline files for the executable being tested, including
- lists of flakey results, and logs the total number of messages in them.
- '''
- # TODO(pamg): As long as we're looking at all the files, we could use the
- # information to report any message types that no longer happen at all.
- fixable = 0
- flakey = 0
- paths = [os.path.join(self._data_dir, x)
- for x in os.listdir(self._data_dir)]
- for path in paths:
- # We only care about this executable's files, and not its gtest filters.
- if (not os.path.basename(path).startswith(self._name) or
- not path.endswith(".txt") or
- path.endswith("gtest.txt") or
- path.endswith("_ignore.txt") or
- not os.path.isfile(path)):
- continue
- msgs = self._MessageHashesFromFile(path)
- if path.find("flakey") == -1:
- fixable += len(msgs)
- else:
- flakey += len(msgs)
-
- logging.info("Fixable errors: %s" % fixable)
- logging.info("Flakey errors: %s" % flakey)
-
- def _MessageHashesFromFile(self, filename):
- ''' Reads a file of normalized messages (see SaveResults) and creates a
- dictionary mapping the hash of each message to its text.
- '''
- # NOTE: this uses the same hashing algorithm as Message.__hash__.
- # Unfortunately, we can't use the same code easily since Message is based
- # on parsing an original Purify output file and this code is reading a file
- # of already normalized messages. This means that these two bits of code
- # need to be kept in sync.
- msgs = {}
- if not os.path.isabs(filename):
- filename = os.path.join(self._data_dir, filename)
- if os.path.exists(filename):
- logging.info("reading messages from %s" % filename)
- file = open(filename, "r")
- msg = ""
- title = None
- lines = file.readlines()
- # in case the file doesn't end in a blank line
- lines.append("\n")
- for line in lines:
- # allow these files to have comments in them
- if line.startswith('#') or line.startswith('//'):
- continue
- if not title:
- if not line.isspace():
- # first line of each message is a title
- title = line
- continue
- elif not line.isspace():
- msg += line
- else:
- # note that the hash doesn't include the title, see Message.__hash__
- h = hash(msg)
- msgs[h] = title + msg
- title = None
- msg = ""
- logging.info("%s: %d msgs" % (filename, len(msgs)))
- return msgs
-
- def _SaveGroupSummary(self, message_list):
- '''Save a summary of message groups and their counts to a file in report_dir
- '''
- string_list = []
- groups = message_list.UniqueMessageGroups()
- group_keys = groups.keys()
-
- group_keys.sort(cmp=lambda x,y: len(groups[y]) - len(groups[x]))
- for group in group_keys:
- string_list.append("%s: %d" % (group, len(groups[group])))
-
- self.SaveStrings(string_list, message_list.GetType(), "_GROUPS")
-
- def CompareResults(self):
- ''' Compares the results from the current run with the baseline data
- stored in data/<name>_<key>.txt returning False if it finds new errors
- that are not in the baseline. See ReadFile() and SaveResults() for
- details of what's in the original file and what's in the baseline.
- Errors that show up in the baseline but not the current run are not
- considered errors (they're considered "fixed"), but they do suggest
- that the baseline file could be re-generated.'''
- errors = 0
- fixes = 0
- for type in purify_message.message_type:
- if type in ["MIU"]:
- continue
- # number of new errors for this message type
- type_errors = []
- # number of new unexpected fixes for this message type
- type_fixes = []
- # the messages from the current run that are in the baseline
- new_baseline = []
- # a common prefix used to describe the program being analyzed and the
- # type of message which is used to generate filenames and descriptive
- # error messages
- type_name = "%s_%s" % (self._name, type)
-
- # open the baseline file to compare against
- baseline_file = "%s.txt" % type_name
- baseline_hashes = self._MessageHashesFromFile(baseline_file)
-
- # read the flakey file if it exists
- flakey_file = "%s_flakey.txt" % type_name
- flakey_hashes = self._MessageHashesFromFile(flakey_file)
-
- # read the ignore file if it exists
- ignore_file = "%s_ignore.txt" % type_name
- ignore_hashes = self._MessageHashesFromFile(ignore_file)
-
- # messages from the current run
- current_list = self.GetMessageList(type)
- if current_list:
- # Since we're looking at the list of unique messages,
- # if the number of occurrances of a given unique message
- # changes, it won't show up as an error.
- current_messages = current_list.UniqueMessages()
- else:
- current_messages = []
- current_hashes = {}
- # compute errors and new baseline
- for message in current_messages:
- msg_hash = hash(message)
- current_hashes[msg_hash] = message
- if msg_hash in ignore_hashes or msg_hash in flakey_hashes:
- continue
- if msg_hash in baseline_hashes:
- new_baseline.append(msg_hash)
- continue
- type_errors.append(msg_hash)
- # compute unexpected fixes
- for msg_hash in baseline_hashes:
- if (msg_hash not in current_hashes and
- msg_hash not in ignore_hashes and
- msg_hash not in flakey_hashes):
- type_fixes.append(baseline_hashes[msg_hash])
-
- if len(current_messages) or len(type_errors) or len(type_fixes):
- logging.info("%d '%s(%s)' messages "
- "(%d new, %d unexpectedly fixed)" % (len(current_messages),
- purify_message.GetMessageType(type), type,
- len(type_errors), len(type_fixes)))
-
- if len(type_errors):
- strs = [current_hashes[x].NormalizedStr(verbose=True)
- for x in type_errors]
- logging.error("%d new '%s(%s)' errors found\n%s" % (len(type_errors),
- purify_message.GetMessageType(type), type,
- '\n'.join(strs)))
- strs = [current_hashes[x].NormalizedStr() for x in type_errors]
- self.SaveStrings(strs, type, "_NEW")
- errors += len(type_errors)
-
- if len(type_fixes):
- # we don't have access to the original message, so all we can do is log
- # the non-verbose normalized text
- logging.warning("%d new '%s(%s)' unexpected fixes found\n%s" % (
- len(type_fixes), purify_message.GetMessageType(type),
- type, '\n'.join(type_fixes)))
- self.SaveStrings(type_fixes, type, "_FIXED")
- fixes += len(type_fixes)
- if len(current_messages) == 0:
- logging.warning("all errors fixed in %s" % baseline_file)
-
- if len(type_fixes) or len(type_errors):
- strs = [baseline_hashes[x] for x in new_baseline]
- self.SaveStrings(strs, type, "_BASELINE")
-
- if current_list:
- self._SaveGroupSummary(current_list)
-
- if errors:
- logging.error("%d total new errors found" % errors)
- return -1
- else:
- logging.info("no new errors found - yay!")
- if fixes:
- logging.warning("%d total errors unexpectedly fixed" % fixes)
- # magic return code to turn the builder orange (via ReturnCodeCommand)
- return 88
- return 0
-
-
-# The following code is here for testing and development purposes.
-
-def _main():
- retcode = 0
-
- parser = optparse.OptionParser("usage: %prog [options] <files to analyze>")
- parser.add_option("-b", "--baseline", action="store_true", default=False,
- help="save output to baseline files")
- parser.add_option("-m", "--memory_in_use",
- action="store_true", default=False,
- help="print memory in use summary")
- parser.add_option("", "--validate",
- action="store_true", default=False,
- help="validate results vs. baseline")
- parser.add_option("-e", "--echo_to_stdout",
- action="store_true", default=False,
- help="echo purify output to standard output")
- parser.add_option("", "--source_dir",
- help="path to top of source tree for this build"
- "(used to normalize source paths in output)")
- parser.add_option("", "--byte_filter", default=16384,
- help="prune the tree below this number of bytes")
- parser.add_option("-n", "--name",
- help="name of the test being run "
- "(used for output filenames)")
- parser.add_option("", "--data_dir",
- help="path to where purify data files live")
- parser.add_option("", "--bug_report", default=False,
- action="store_true",
- help="print output as an attempted summary of bugs")
- parser.add_option("-v", "--verbose", action="store_true", default=False,
- help="verbose output - enable debug log messages")
- parser.add_option("", "--report_dir",
- help="path where report files are saved")
-
- (options, args) = parser.parse_args()
- if not len(args) >= 1:
- parser.error("no filename specified")
- filenames = args
-
- if options.verbose:
- google.logging_utils.config_root(level=logging.DEBUG)
- else:
- google.logging_utils.config_root(level=logging.INFO)
- pa = PurifyAnalyze(filenames, options.echo_to_stdout, options.name,
- options.source_dir, options.data_dir, options.report_dir)
- execute_crash = not pa.ReadFile()
- if options.bug_report:
- pa.BugReport()
- pa.Summary(False)
- elif options.memory_in_use:
- pa.PrintMemoryInUse(int(options.byte_filter))
- elif execute_crash:
- retcode = -1
- logging.error("Fatal error during test execution. Analysis skipped.")
- elif options.validate:
- if pa.CompareResults() != 0:
- retcode = -1
- pa.SaveResults()
- pa.Summary()
- elif options.baseline:
- if not pa.SaveResults(verbose=True):
- retcode = -1
- pa.Summary(False)
- else:
- pa.Summary(False)
-
- sys.exit(retcode)
-
-if __name__ == "__main__":
- _main()
-
-
diff --git a/tools/purify/purify_coverage.py b/tools/purify/purify_coverage.py
deleted file mode 100644
index e88af50..0000000
--- a/tools/purify/purify_coverage.py
+++ /dev/null
@@ -1,87 +0,0 @@
-#!/bin/env python
-# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# purify_coverage.py
-
-import logging
-import optparse
-import os
-import re
-import sys
-
-import google.path_utils
-
-# local modules
-import common
-import purify_analyze
-import purify_message
-
-
-class PurifyCoverage(common.Rational):
- def __init__(self):
- common.Rational.__init__(self)
- script_dir = google.path_utils.ScriptDir()
- self._latest_dir = os.path.join(script_dir, "latest")
-
- def CreateOptionParser(self):
- common.Rational.CreateOptionParser(self)
- self._parser.description = __doc__
- self._parser.add_option("-n", "--name",
- dest="name", default=None,
- help="name of the test being run "
- "(used for output filenames)")
- self._parser.add_option("", "--source_dir",
- help="path to top of source tree for this build"
- "(used to normalize source paths in baseline)")
-
- def ParseArgv(self):
- if common.Rational.ParseArgv(self):
- self._name = self._options.name
- if not self._name:
- self._name = os.path.basename(self._exe)
- # _out_file can be set in common.Rational.ParseArgv
- if not self._out_file:
- self._out_file = os.path.join(self._latest_dir,
- "%s_coverage.txt" % (self._name))
- self._source_dir = self._options.source_dir
- return True
- return False
-
- def _PurifyCommand(self):
- cmd = [common.PURIFYW_PATH, "/CacheDir=" + self._cache_dir,
- "/ShowInstrumentationProgress=no", "/ShowLoadLibraryProgress=no",
- "/AllocCallStackLength=30", "/Coverage",
- "/CoverageDefaultInstrumentationType=line"]
- return cmd
-
- def Instrument(self):
- cmd = self._PurifyCommand()
- # /Run=no means instrument only
- cmd.append("/Run=no")
- cmd.append(os.path.abspath(self._exe))
- return common.Rational.Instrument(self, cmd)
-
- def Execute(self):
- cmd = self._PurifyCommand()
- cmd.append("/SaveTextData=" + self._out_file)
- # TODO(erikkay): should we also do /SaveMergeTextData?
- return common.Rational.Execute(self, cmd)
-
- def Analyze(self):
- if not os.path.isfile(self._out_file):
- logging.info("no output file %s" % self._out_file)
- return -1
- # TODO(erikkay): parse the output into a form we could use on the buildbots
- return 0
-
-if __name__ == "__main__":
- rational = PurifyCoverage()
- if rational.Run():
- retcode = 0
- else:
- retcode = -1
- sys.exit(retcode)
-
-
diff --git a/tools/purify/purify_inuse.py b/tools/purify/purify_inuse.py
deleted file mode 100644
index 12d13f2..0000000
--- a/tools/purify/purify_inuse.py
+++ /dev/null
@@ -1,92 +0,0 @@
-#!/bin/env python
-# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# purify_inuse.py
-
-import logging
-import optparse
-import os
-import re
-import sys
-
-import google.path_utils
-
-# local modules
-import common
-import purify_analyze
-import purify_message
-
-
-class PurifyInUse(common.Rational):
- def __init__(self):
- common.Rational.__init__(self)
- script_dir = google.path_utils.ScriptDir()
- self._latest_dir = os.path.join(script_dir, "latest")
-
- def CreateOptionParser(self):
- common.Rational.CreateOptionParser(self)
- self._parser.description = __doc__
- self._parser.add_option("-n", "--name",
- dest="name", default=None,
- help="name of the test being run "
- "(used for output filenames)")
- self._parser.add_option("", "--source_dir",
- help="path to top of source tree for this build"
- "(used to normalize source paths in baseline)")
- self._parser.add_option("", "--byte_filter", default=16384,
- help="prune the tree below this number of bytes")
-
- def ParseArgv(self):
- if common.Rational.ParseArgv(self):
- self._name = self._options.name
- if not self._name:
- self._name = os.path.basename(self._exe)
- # _out_file can be set in common.Rational.ParseArgv
- if not self._out_file:
- self._out_file = os.path.join(self._latest_dir, "%s.txt" % (self._name))
- self._source_dir = self._options.source_dir
- self._byte_filter = int(self._options.byte_filter)
- return True
- return False
-
- def _PurifyCommand(self):
- cmd = [common.PURIFYW_PATH, "/CacheDir=" + self._cache_dir,
- "/ShowInstrumentationProgress=no", "/ShowLoadLibraryProgress=no",
- "/AllocCallStackLength=30", "/ErrorCallStackLength=30",
- "/LeaksAtExit=no", "/InUseAtExit=yes"]
- return cmd
-
- def Instrument(self):
- cmd = self._PurifyCommand()
- # /Run=no means instrument only
- cmd.append("/Run=no")
- cmd.append(os.path.abspath(self._exe))
- return common.Rational.Instrument(self, cmd)
-
- def Execute(self):
- cmd = self._PurifyCommand()
- cmd.append("/SaveTextData=" + self._out_file)
- return common.Rational.Execute(self, cmd)
-
- def Analyze(self):
- if not os.path.isfile(self._out_file):
- logging.info("no output file %s" % self._out_file)
- return -1
- pa = purify_analyze.PurifyAnalyze(self._out_file, False,
- self._name, self._source_dir)
- if not pa.ReadFile():
- logging.warning("inuse summary suspect due to fatal error during run")
- pa.PrintMemoryInUse(byte_filter=self._byte_filter)
- return 0
-
-if __name__ == "__main__":
- rational = PurifyInUse()
- if rational.Run():
- retcode = 0
- else:
- retcode = -1
- sys.exit(retcode)
-
-
diff --git a/tools/purify/purify_message.py b/tools/purify/purify_message.py
deleted file mode 100644
index 83ed039..0000000
--- a/tools/purify/purify_message.py
+++ /dev/null
@@ -1,610 +0,0 @@
-#!/bin/env python
-# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# purify_message.py
-
-''' Utility objects and functions to parse and unique Purify messages '''
-
-import cStringIO
-import logging
-import re
-import sys
-
-import google.logging_utils
-
-# used to represent one or more elided frames
-ELIDE = "..."
-# used to represent stack truncation at a known entry point
-TRUNCATE = "^^^"
-# a file that's outside of our source directory
-EXTERNAL_FILE = "EXTERNAL_FILE"
-
-# mapping of purify message types to descriptions
-message_type = {
- "ABR": "Array Bounds Read",
- "ABW": "Array Bounds Write",
- "ABWL": "Array Bounds Write (late detect)",
- "BSR": "Beyond Stack Read",
- "BSW": "Beyond Stack Write",
- "COM": "COM API/Interface Failure",
- "EXC": "Continued Exception",
- "EXH": "Handled Exception",
- "EXI": "Ignored Exception",
- "EXU": "Unhandled Exception",
- "FFM": "Freeing Freed Memory",
- "FIM": "Freeing Invalid Memory",
- "FMM": "Freeing Mismatched Memory",
- "FMR": "Free Memory Read",
- "FMW": "Free Memory Write",
- "FMWL": "Free Memory Write (late detect)",
- "HAN": "Invalid Handle",
- "HIU": "Handle In Use",
- "ILK": "COM Interface Leak",
- "IPR": "Invalid Pointer Read",
- "IPW": "Invalid Pointer Write",
- "MAF": "Memory Allocation Failure",
- "MIU": "Memory In Use",
- "MLK": "Memory Leak",
- "MPK": "Potential Memory Leak",
- "NPR": "Null Pointer Read",
- "NPW": "Null Pointer Write",
- "PAR": "Bad Parameter",
- "UMC": "Uninitialized Memory Copy",
- "UMR": "Uninitialized Memory Read",
-}
-
-# a magic message type which is not enumerated with the normal message type dict
-FATAL = "FATAL"
-
-def GetMessageType(key):
- if key in message_type:
- return message_type[key]
- elif key == FATAL:
- return key
- logging.warn("unknown message type %s" % key)
- return "UNKNOWN"
-
-# currently unused, but here for documentation purposes
-message_severity = {
- "I": "Informational",
- "E": "Error",
- "W": "Warning",
- "O": "Internal Purify Error",
-}
-
-
-class Stack:
- ''' A normalized Purify Stack. The stack is constructed by adding one line
- at a time from a stack in a Purify text file via AddLine.
- Supports cmp and hash so that stacks which normalize the same can be sorted
- and uniqued.
- The original stack contents are preserved so that it's possible to drill
- down into the full details if necessary. '''
-
- # The top of the source tree. This is stripped from the filename as part
- # of normalization.
- source_dir = ""
-
- @classmethod
- def SetSourceDir(cls, dir):
- # normalize the dir
- cls.source_dir = dir.replace("\\", "/").lower()
- logging.debug("Stack.source_dir = %s" % cls.source_dir)
-
- # a line in a stack trace
- pat_stack_line = re.compile('(.*)\[(\w:)?([^\:\s]*)(:\d+)?(\s+.*)?]')
-
- # Known stack entry points that allow us to truncate the rest of the stack
- # below that point.
- pat_known_entries = (
- re.compile('RunnableMethod::Run\(void\)'),
- re.compile('ChromeMain'),
- re.compile('BrowserMain'),
- re.compile('wWinMain'),
- re.compile('TimerManager::ProcessPendingTimer\(void\)'),
- re.compile('RunnableMethod::RunableMethod\(.*\)'),
- re.compile('RenderViewHost::OnMessageReceived\(Message::IPC const&\)'),
- re.compile('testing::Test::Run\(void\)'),
- re.compile('testing::TestInfoImpl::Run\(void\)'),
- re.compile('Thread::ThreadFunc\\(void \*\)'),
- re.compile('TimerTask::Run\(void\)'),
- re.compile('MessageLoop::RunTask\(Task \*\)'),
- re.compile('.DispatchToMethod\@.*'),
- )
-
- # if functions match the following, elide them from the stack
- pat_func_elide = (re.compile('^std::'), re.compile('^new\('))
- # if files match the following, elide them from the stack
- pat_file_elide = (re.compile('.*platformsdk_win2008.*'),
- re.compile('.*.(dll|DLL)$'),
- # bug 1069902
- re.compile('webkit/pending/wtf/fastmalloc\.h'),
- # When we leak sqlite stuff, we leak a lot, and the stacks
- # are all over the place. For now, let's assume that
- # sqlite itself is leak free and focus on our calling code.
- re.compile('third_party/sqlite/.*'),
- )
-
- pat_unit_test = re.compile('^([a-zA-Z0-9]+)_(\w+)_Test::.*')
-
- def __init__(self, title):
- self._title = title.lstrip()
- self._stack = []
- self._orig = ""
- # are we currently in an eliding block
- self._eliding = False
- # have we truncated the stack?
- self._truncated = False
- # is the stack made up completely of external code? (i.e. elided)
- self._all_external = True
- # a logical group that this stack belongs to
- self._group = None
- # top stack line (preserved even if elided)
- self._top_stack_line = None
-
- def GetLines(self):
- return self._stack
-
- def GetTopStackLine(self):
- return self._top_stack_line
-
- def GetTopVisibleStackLine(self):
- for line in self._stack:
- if line['function']:
- return line
- return {}
-
- def GetGroup(self):
- '''A logical grouping for this stack, allowing related stacks to be grouped
- together. Subgroups within a group are separated by ".".
- (e.g. group.subgroup.subsubgroup)
- '''
- return self._group;
-
- def _ComputeStackLine(self, line):
- line = line.lstrip()
- m = Stack.pat_stack_line.match(line)
- if m:
- func = m.group(1).rstrip()
- func = self._DemangleSymbol(func)
- func = self._DetemplatizeSymbol(func)
- if m.group(2):
- file = m.group(2) + m.group(3)
- else:
- file = m.group(3)
- # paths are normalized to use / and be lower case
- file = file.replace("\\", "/").lower()
- if not file.startswith(Stack.source_dir):
- file = EXTERNAL_FILE
- else:
- file = file[len(Stack.source_dir):]
- # trim leading / if present
- if file[0] == "/":
- file = file[1:]
- loc = m.group(4)
- if loc:
- loc = int(loc[1:])
- else:
- loc = 0
- return {'function': func, 'file': file, 'line_number': loc}
- return None
-
- def _ShouldElide(self, stack_line):
- func = stack_line['function']
- file = stack_line['file']
- # elide certain common functions from the stack such as the STL
- for pat in Stack.pat_func_elide:
- if pat.match(func):
- logging.debug("eliding due to func pat match: %s" % func)
- return True
- if file == EXTERNAL_FILE:
- # if it's not in our source tree, then elide
- logging.debug("eliding due to external file: %s" % file)
- return True
- # elide certain common file sources from the stack, usually this
- # involves system libraries
- for pat in Stack.pat_file_elide:
- if pat.match(file):
- logging.debug("eliding due to file pat match: %s" % file)
- return True
-
- return False
-
- def AddLine(self, line):
- ''' Add one line from a stack in a Purify text file. Lines must be
- added in order (top down). Lines are added to two internal structures:
- an original string copy and an array of normalized lines, split into
- (function, file, line number).
- Stack normalization does several things:
- * elides sections of the stack that are in external code
- * truncates the stack at so called "known entry points"
- * removes template type information from symbols
- Returns False if the line was elided or otherwise omitted.
- '''
- self._orig += line + "\n"
- stack_line = self._ComputeStackLine(line)
- if stack_line:
- if not self._top_stack_line:
- self._top_stack_line = stack_line
- # Unit test entry points are good groupings. Even if we already have a
- # group set, a later unit-test stack line will override.
- # Note that we also do this even if the stack has already been truncated
- # since this is useful information.
- # TODO(erikkay): Maybe in this case, the truncation should be overridden?
- test_match = Stack.pat_unit_test.match(stack_line["function"])
- if test_match:
- self._group = test_match.group(1) + "." + test_match.group(2)
-
- if self._truncated:
- return False
-
- if self._ShouldElide(stack_line):
- if not self._eliding:
- self._eliding = True
- self._stack.append({'function': "", 'file': ELIDE, 'line_number': 0})
- return False
- else:
- self._stack.append(stack_line)
- self._eliding = False
- self._all_external = False
-
- # when we reach one of the known common stack entry points, truncate
- # the stack to avoid printing overly redundant information
- if len(self._stack) > 1:
- for f in Stack.pat_known_entries:
- if f.match(stack_line["function"]):
- if not self._group:
- # we're at the end of the stack, so use the path to the file
- # as the group if we don't already have one
- # This won't be incredibly reliable, but might still be useful.
- prev = self._stack[-2]
- if prev['file']:
- self._group = '.'.join(prev['file'].split('/')[:-1])
- self._stack.append({'function': "", 'file': TRUNCATE,
- 'line_number': 0})
- self._truncated = True
- return False
- return True
- else:
- # skip these lines
- logging.debug(">>>" + line)
- return False
-
- def _DemangleSymbol(self, symbol):
- # TODO(erikkay) - I'm not sure why Purify prepends an address on the
- # front of some of these as if it were a namespace (?A<addr>::). From an
- # analysis standpoint, it seems meaningless and can change from machine to
- # machine, so it's best if it's thrown away
- if symbol.startswith("?A0x"):
- skipto = symbol.find("::")
- if skipto >= 0:
- symbol = symbol[(skipto+2):]
- else:
- logging.warn("unable to strip address off of symbol (%s)" % symbol)
- # TODO(erikkay) there are more symbols not being properly demangled
- # in Purify's output. Some of these look like template-related issues.
- return symbol
-
- def _DetemplatizeSymbol(self, symbol):
- ''' remove all of the template arguments and return values from the
- symbol, normalizing it, making it more readable, and less precise '''
- ret = ""
- nested = 0
- for i in range(len(symbol)):
- if nested > 0:
- if symbol[i] == '>':
- nested -= 1
- elif symbol[i] == '<':
- nested += 1
- elif symbol[i] == '<':
- nested += 1
- else:
- ret += symbol[i]
- return ret
-
- def __hash__(self):
- return hash(self.NormalizedStr())
-
- def __cmp__(self, other):
- if not other:
- return 1
- len_self = len(self._stack)
- len_other = len(other._stack)
- min_len = min(len_self, len_other)
- # sort stacks from the bottom up
- for i in range(-1, -(min_len + 1), -1):
- # compare file, then func, but omit line number
- ret = cmp((self._stack[i]['file'], self._stack[i]['function']),
- (other._stack[i]['file'], other._stack[i]['function']))
- if ret:
- return ret
- return cmp(len_self, len_other)
-
- def NormalizedStr(self, verbose=False):
- ''' String version of the normalized stack. See AddLine for normalization
- details. '''
- # use cStringIO for more efficient string building
- out = cStringIO.StringIO()
- for line in self._stack:
- out.write(" ")
- out.write(line['file'])
- if verbose and line['line_number'] > 0:
- out.write(":%d" % line['line_number'])
- out.write(" ")
- out.write(line['function'])
- out.write("\n")
- ret = out.getvalue()
- out.close()
- return ret
-
- def __str__(self):
- return self._orig
-
-
-class Message:
- '''A normalized message from a Purify text file. Messages all have a
- severity, most have a type, and many have an error stack and/or an
- allocation stack.
- Supports cmp and hash so that messages which normalize the same can be
- sorted and uniqued.'''
-
- pat_count = re.compile('^(.*) \{(\d+) occurrences?\}')
- pat_leak = re.compile('(Potential )?[Mm]emory leak of (\d+) bytes? '
- 'from (\d+) blocks? allocated in (.+)')
- pat_miu = re.compile('Memory use of (\d+) bytes? '
- '(\((\d+)% initialized\) )?from (\d+) blocks? '
- 'allocated .. (.+)')
- # these are headings to different types of stack traces
- pat_loc_error = re.compile('\s*(Exception|Error|Call) location')
- pat_loc_alloc = re.compile('\s*Allocation location')
- pat_loc_free = re.compile('\s*Free location')
- pat_loc_free2 = re.compile('\s*Location of free attempt')
-
- def __init__(self, severity, type, title):
- self._severity = severity
- self._type = type
- self._program = None
- self._head = ""
- self._loc_alloc = None
- self._loc_error = None
- self._loc_free = None
- self._stack = None
- self._count = 1
- self._bytes = 0
- self._blocks = 0
- m = Message.pat_count.match(title)
- if m:
- self._title = m.group(1)
- self._count = int(m.group(2))
- else:
- m = Message.pat_leak.match(title)
- if m:
- self._title = m.group(4)
- self._bytes = int(m.group(2))
- self._blocks = int(m.group(3))
- else:
- m = Message.pat_miu.match(title)
- if m:
- self._title = m.group(5)
- self._bytes = int(m.group(1))
- self._blocks = int(m.group(4))
- #print "%d/%d - %s" % (self._bytes, self._blocks, title[0:60])
- elif type == "MIU":
- logging.error("%s didn't match" % title)
- sys.exit(-1)
- else:
- self._title = title
-
- def GetAllocStack(self):
- return self._loc_alloc
-
- def GetErrorStack(self):
- return self._loc_error
-
- def GetGroup(self):
- '''An attempted logical grouping for this Message computed by the contained
- Stack objects.
- '''
- group = None
- if self._loc_alloc:
- group = self._loc_alloc.GetGroup()
- if not group and self._loc_error:
- group = self._loc_error.GetGroup()
- if not group and self._loc_free:
- group = self._loc_free.GetGroup()
- if not group:
- group = "UNKNOWN"
- return group
-
- def AddLine(self, line):
- '''Add a line one at a time (in order from the Purify text file) to
- build up the message and its associated stacks. '''
-
- if Message.pat_loc_error.match(line):
- self._stack = Stack(line)
- self._loc_error = self._stack
- elif Message.pat_loc_alloc.match(line):
- self._stack = Stack(line)
- self._loc_alloc = self._stack
- elif Message.pat_loc_free.match(line) or Message.pat_loc_free2.match(line):
- self._stack = Stack(line)
- self._loc_free = self._stack
- elif self._stack:
- if not line.startswith(" "):
- logging.debug("*** " + line)
- self._stack.AddLine(line)
- else:
- self._head += line.lstrip()
-
- def Type(self):
- return self._type
-
- def Program(self):
- return self._program
-
- def SetProgram(self, program):
- self._program = program
-
- def StacksAllExternal(self):
- '''Returns True if the stacks it contains are made up completely of
- external (elided) symbols'''
- return ((not self._loc_error or self._loc_error._all_external) and
- (not self._loc_alloc or self._loc_alloc._all_external) and
- (not self._loc_free or self._loc_free._all_external))
-
- def __hash__(self):
- # NOTE: see also _MessageHashesFromFile. If this method changes, then
- # _MessageHashesFromFile must be updated to match.
- s = ""
- if self._loc_error:
- s += "Error Location\n" + self._loc_error.NormalizedStr()
- if self._loc_alloc:
- s += "Alloc Location\n" + self._loc_alloc.NormalizedStr()
- if self._loc_free:
- s += "Free Location\n" + self._loc_free.NormalizedStr()
- return hash(s)
-
- def NormalizedStr(self, verbose=False):
- '''String version of the normalized message. Only includes title
- and normalized versions of error and allocation stacks if present.
- Example:
- Unitialized Memory Read in Foo::Bar()
- Error Location
- foo/Foo.cc Foo::Bar(void)
- foo/main.cc start(void)
- foo/main.cc main(void)
- Alloc Location
- foo/Foo.cc Foo::Foo(void)
- foo/main.cc start(void)
- foo/main.cc main(void)
- '''
- ret = ""
- # some of the message types are more verbose than others and we
- # don't need to indicate their type
- if verbose and self._type not in ["UMR", "IPR", "IPW"]:
- ret += GetMessageType(self._type) + ": "
- if verbose and self._bytes > 0:
- ret += "(%d bytes, %d blocks) " % (self._bytes, self._blocks)
- ret += "%s\n" % self._title
- if self._loc_error:
- ret += "Error Location\n" + self._loc_error.NormalizedStr(verbose)
- if self._loc_alloc:
- ret += "Alloc Location\n" + self._loc_alloc.NormalizedStr(verbose)
- if self._loc_free:
- ret += "Free Location\n" + self._loc_free.NormalizedStr(verbose)
- return ret
-
- def __str__(self):
- ret = self._title + "\n" + self._head
- if self._loc_error:
- ret += "Error Location\n" + str(self._loc_error)
- if self._loc_alloc:
- ret += "Alloc Location\n" + str(self._loc_alloc)
- if self._loc_free:
- ret += "Free Location\n" + str(self._loc_free)
- return ret
-
- def __cmp__(self, other):
- if not other:
- return 1
- ret = 0
- if self._loc_error:
- ret = cmp(self._loc_error, other._loc_error)
- if ret == 0 and self._loc_alloc:
- ret = cmp(self._loc_alloc, other._loc_alloc)
- if ret == 0 and self._loc_free:
- ret = cmp(self._loc_free, other._loc_free)
- # since title is often not very interesting, we sort against that last
- if ret == 0:
- ret = cmp(self._title, other._title)
- return ret
-
-
-class MessageList:
- '''A collection of Message objects of a given message type.'''
- def __init__(self, type):
- self._type = type
- self._messages = []
- self._unique_messages = None
- self._sublists = None
- self._bytes = 0
-
- def GetType(self):
- return self._type
-
- def BeginNewSublist(self):
- '''Some message types are logically grouped into sets of messages which
- should not be mixed in the same list. Specifically, Memory In Use (MIU),
- Memory Leak (MLK) and Potential Memory Leak (MPK) are generated in a set
- all at once, but this generation can happen at multiple distinct times,
- either via the Purify UI or through Purify API calls. For example, if
- Purify is told to dump a list all memory leaks once, and then a few minutes
- later, the two lists will certainly overlap, so they should be kept
- in separate lists.
- In order to accommodate this, MessageList supports the notion of sublists.
- When the caller determines that one list of messages of a type has ended
- and a new list has begun, it calls BeginNewSublist() which takes the current
- set of messages, puts them into a new MessageList and puts that into the
- sublists array. Later, when the caller needs to get at these messages,
- GetSublists() should be called.
- '''
- if len(self._messages):
- # if this is the first list, no need to make a new one
- list = MessageList(self._type)
- list._messages = self._messages
- if not self._sublists:
- self._sublists = [list]
- else:
- self._sublists.append(list)
- self._messages = []
- logging.info("total size: %d" % self._bytes)
- self._bytes = 0
-
- def GetSublists(self):
- '''Returns the current list of sublists. If there are currently sublists
- and there are any messages that aren't in a sublist, BeginNewSublist() is
- called implicitly by this method to force those ungrouped messages into
- their own sublist.
- '''
- if self._sublists and len(self._sublists) and len(self._messages):
- self.BeginNewSublist()
- return self._sublists
-
- def AddMessage(self, msg):
- '''Adds a message to this MessageList.'''
- # TODO(erikkay): assert if _unique_messages exists
- self._messages.append(msg)
- self._bytes += msg._bytes
-
- def AllMessages(self):
- '''Returns an array of all Message objects in this MessageList. '''
- # TODO(erikkay): handle case with sublists
- return self._messages
-
- def UniqueMessages(self):
- '''Returns an array of the unique normalized Message objects in this
- MessageList.
- '''
- # the list is lazily computed since we have to create a sorted list,
- # which is only valid once all messages have been added
- # TODO(erikkay): handle case with sublists
- if not self._unique_messages:
- self._unique_messages = list(set(self._messages))
- self._unique_messages.sort()
- return self._unique_messages
-
- def UniqueMessageGroups(self):
- '''Returns a dictionary mapping Message group names to arrays of uniqued
- normalized Message objects in this MessageList.
- '''
- unique = self.UniqueMessages()
- groups = {}
- for msg in unique:
- group = msg.GetGroup()
- if not group in groups:
- groups[group] = []
- groups[group].append(msg)
- return groups
-
diff --git a/tools/purify/purify_test.py b/tools/purify/purify_test.py
deleted file mode 100644
index ff60583..0000000
--- a/tools/purify/purify_test.py
+++ /dev/null
@@ -1,251 +0,0 @@
-#!/bin/env python
-# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# purify_test.py
-
-'''Runs an exe through Purify and verifies that Purify was
-able to successfully instrument and run it. The original purpose was
-to be able to identify when a change to our code breaks our ability to Purify
-the app. This can happen with seemingly innocuous changes to code due to bugs
-in Purify, and is notoriously difficult to track down when it does happen.
-Perhaps more importantly in the long run, this can also automate detection of
-leaks and other memory bugs. It also may be useful to allow people to run
-Purify in a consistent manner without having to worry about broken PATHs,
-corrupt instrumentation, or other per-machine flakiness that Purify is
-sometimes subject to.
-'''
-
-import glob
-import logging
-import optparse
-import os
-import re
-import shutil
-import sys
-import time
-
-import google.path_utils
-
-# local modules
-import common
-import purify_analyze
-
-class Purify(common.Rational):
- def __init__(self):
- common.Rational.__init__(self)
- self._data_dir = None
-
- def CreateOptionParser(self):
- common.Rational.CreateOptionParser(self)
- self._parser.description = __doc__
- self._parser.add_option("-e", "--echo_to_stdout",
- dest="echo_to_stdout", action="store_true", default=False,
- help="echo purify output to standard output")
- self._parser.add_option("-b", "--baseline",
- dest="baseline", action="store_true", default=False,
- help="create baseline error files")
- self._parser.add_option("-n", "--name",
- dest="name", default=None,
- help="name of the test being run "
- "(used for output filenames)")
- self._parser.add_option("", "--source_dir",
- help="path to top of source tree for this build"
- "(used to normalize source paths in baseline)")
- self._parser.add_option("", "--exe",
- help="The actual exe to instrument which is "
- "different than the program being run. This "
- "is useful when the exe you want to purify is "
- "run by another script or program.")
- self._parser.add_option("", "--data_dir",
- help="path where global purify data files live")
- self._parser.add_option("", "--report_dir",
- help="path where report files are saved")
-
- def ParseArgv(self):
- script_dir = google.path_utils.ScriptDir()
- if common.Rational.ParseArgv(self):
- if self._options.exe:
- self._exe = self._options.exe;
- if not os.path.isfile(self._exe):
- logging.error("file doesn't exist " + self._exe)
- return False
- self._exe_dir = common.FixPath(os.path.abspath(os.path.dirname(self._exe)))
- self._echo_to_stdout = self._options.echo_to_stdout
- self._baseline = self._options.baseline
- self._name = self._options.name
- if not self._name:
- self._name = os.path.basename(self._exe)
- self._report_dir = self._options.report_dir
- if not self._report_dir:
- self._report_dir = os.path.join(script_dir, "latest")
- # _out_file can be set in common.Rational.ParseArgv
- if not self._out_file:
- self._out_file = os.path.join(self._report_dir, "%s.txt" % self._name)
- self._source_dir = self._options.source_dir
- self._data_dir = self._options.data_dir
- if not self._data_dir:
- self._data_dir = os.path.join(script_dir, "data")
- return True
- return False
-
- def _PurifyCommand(self):
- cmd = [common.PURIFY_PATH, "/CacheDir=" + self._cache_dir]
- return cmd
-
- def Setup(self):
- script_dir = google.path_utils.ScriptDir()
- if common.Rational.Setup(self):
- if self._instrument_only:
- return True
- pft_file = os.path.join(script_dir, "data", "filters.pft")
- shutil.copyfile(pft_file, self._exe.replace(".exe", "_exe.pft"))
- string_list = [
- "[Purify]",
- "option -cache-dir=\"%s\"" % (self._cache_dir),
- "option -save-text-data=\"%s\"" % (common.FixPath(self._out_file)),
- # Change the recorded stack depth to be much larger than the default.
- # (webkit/v8 stacks in particular seem to get quite deep)
- "option -alloc-call-stack-length=30",
- "option -error-call-stack-length=30",
- "option -free-call-stack-length=30",
- # Report leaks.
- "option -leaks-at-exit=yes",
- # Don't report memory in use (that's for memory profiling).
- "option -in-use-at-exit=no",
- # The maximum number of subprocesses. If this is exceeded, Purify
- # seems to lose its mind, and we have a number of tests that use
- # much larger than the default of 5.
- "option -number-of-puts=30",
- # With our large pdbs, purify's default timeout (30) isn't always
- # enough. If this isn't enough, -1 means no timeout.
- "option -server-comm-timeout=120",
- # check stack memory loads for UMRs, etc.
- # currently disabled due to noisiness (see bug 5189)
- #"option -stack-load-checking=yes",
- ]
- ini_file = self._exe.replace(".exe", "_pure.ini")
- if os.path.isfile(ini_file):
- ini_file_orig = ini_file + ".Original"
- if not os.path.isfile(ini_file_orig):
- os.rename(ini_file, ini_file_orig)
- try:
- f = open(ini_file, "w+")
- f.write('\n'.join(string_list))
- except IOError, (errno, strerror):
- logging.error("error writing to file %s (%d, %s)" % ini_file, errno,
- strerror)
- return False
- if f:
- f.close()
- return True
- return False
-
- def Instrument(self):
- if not os.path.isfile(self._exe):
- logging.error("file doesn't exist " + self._exe)
- return False
- cmd = self._PurifyCommand()
- # /Run=no means instrument
- cmd.extend(["/Run=no"])
- if not self._instrument_only:
- # /Replace=yes means replace the exe in place
- cmd.extend(["/Replace=yes"])
- cmd.append(os.path.abspath(self._exe))
- return common.Rational.Instrument(self, cmd)
-
- def _ExistingOutputFiles(self):
- pat_multi = re.compile('(.*)%[0-9]+d(.*)')
- m = pat_multi.match(self._out_file)
- if m:
- g = m.group(1) + '[0-9]*' + m.group(2)
- out = glob.glob(g)
- if os.path.isfile(m.group(1) + m.group(2)):
- out.append(m.group(1) + m.group(2))
- return out
- if not os.path.isfile(self._out_file):
- return []
- return [self._out_file]
-
- def Execute(self):
- # delete the old file(s) to make sure that this run actually generated
- # something new
- out_files = self._ExistingOutputFiles()
- for f in out_files:
- os.remove(f)
- common.Rational.Execute(self, [])
- # Unfortunately, when we replace the exe, there's no way here to figure out
- # if purify is actually going to output a file or if the exe just crashed
- # badly. The reason is that it takes some small amount of time for purify
- # to dump out the file.
- count = 60
- while count > 0 and not os.path.isfile(self._out_file):
- time.sleep(0.2)
- count -= 1
- # Always return true, even if Execute failed - we'll depend on Analyze to
- # determine if the run was valid.
- return True
-
- def Analyze(self):
- out_files = self._ExistingOutputFiles()
- if not len(out_files):
- logging.info("no output files matching %s" % self._out_file)
- return -1
- pa = purify_analyze.PurifyAnalyze(out_files, self._echo_to_stdout,
- self._name, self._source_dir,
- self._data_dir, self._report_dir)
- if not pa.ReadFile():
- # even though there was a fatal error during Purify, it's still useful
- # to see the normalized output
- pa.Summary()
- if self._baseline:
- logging.warning("baseline not generated due to fatal error")
- else:
- logging.warning("baseline comparison skipped due to fatal error")
- return -1
- if self._baseline:
- pa.Summary(False)
- if pa.SaveResults():
- return 0
- return -1
- else:
- retcode = pa.CompareResults()
- if retcode != 0:
- pa.SaveResults(self._report_dir)
- pa.Summary()
- # with more than one output file, it's also important to emit the bug
- # report which includes info on the arguments that generated each stack
- if len(out_files) > 1:
- pa.BugReport()
- return retcode
-
- def Cleanup(self):
- common.Rational.Cleanup(self);
- if self._instrument_only:
- return
- cmd = self._PurifyCommand()
- # undo the /Replace=yes that was done in Instrument(), which means to
- # remove the instrumented exe, and then rename exe.Original back to exe.
- cmd.append("/UndoReplace")
- cmd.append(os.path.abspath(self._exe))
- common.RunSubprocess(cmd, self._timeout, detach=True)
- # if we overwrote an existing ini file, restore it
- ini_file = self._exe.replace(".exe", "_pure.ini")
- if os.path.isfile(ini_file):
- os.remove(ini_file)
- ini_file_orig = ini_file + ".Original"
- if os.path.isfile(ini_file_orig):
- os.rename(ini_file_orig, ini_file)
- # remove the pft file we wrote out
- pft_file = self._exe.replace(".exe", "_exe.pft")
- if os.path.isfile(pft_file):
- os.remove(pft_file)
-
-
-if __name__ == "__main__":
- rational = Purify()
- retcode = rational.Run()
- sys.exit(retcode)
-
-
diff --git a/tools/purify/quantify_test.py b/tools/purify/quantify_test.py
deleted file mode 100644
index 0c8a700..0000000
--- a/tools/purify/quantify_test.py
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/bin/env python
-# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# quantify_test.py
-
-'''Runs an app through Quantify and verifies that Quantify was able to
-successfully instrument and run it. The original purpose was to allow people
-to run Quantify in a consistent manner without having to worry about broken
-PATHs, corrupt instrumentation, or other per-machine flakiness that Quantify is
-sometimes subject to. Unlike purify_test, the output from quantify_test is
-a binary file, which is much more useful in manual analysis. As such, this
-tool is not particularly interesting for automated analysis yet.
-'''
-
-import os
-import sys
-
-# local modules
-import common
-
-class Quantify(common.Rational):
- def __init__(self):
- common.Rational.__init__(self)
-
- def CreateOptionParser(self):
- common.Rational.CreateOptionParser(self)
- self._parser.description = __doc__
-
- def ParseArgv(self):
- if common.Rational.ParseArgv(self):
- if not self._out_file:
- self._out_file = os.path.join(self._cache_dir,
- "%s.qfy" % (os.path.basename(self._exe)))
- return True
- return False
-
- def Instrument(self):
- proc = [common.QUANTIFYE_PATH, "-quantify",
- '-quantify_home="' + common.PPLUS_PATH + '"' ,
- "/CacheDir=" + self._cache_dir,
- "-first-search-dir=" + self._exe_dir, self._exe]
- return common.Rational.Instrument(self, proc)
-
- def Execute(self):
- # TODO(erikkay): add an option to also do /SaveTextData and add an
- # Analyze method for automated analysis of that data.
- proc = [common.QUANTIFYW_PATH, "/CacheDir=" + self._cache_dir,
- "/ShowInstrumentationProgress=no", "/ShowLoadLibraryProgress=no",
- "/SaveData=" + self._out_file]
- return common.Rational.Execute(self, proc)
-
-if __name__ == "__main__":
- retcode = -1
- rational = Quantify()
- if rational.Run():
- retcode = 0
- sys.exit(retcode)
-
-
diff --git a/tools/purify/sharded_test_runner.py b/tools/purify/sharded_test_runner.py
deleted file mode 100644
index e7f5ecf..0000000
--- a/tools/purify/sharded_test_runner.py
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/bin/env python
-# Copyright (c) 2009 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# sharded_test_runner
-
-import optparse
-import os
-import re
-import subprocess
-import sys
-
-import common
-
-def RunShardedTests(exe, total_shards, params):
- os.environ['GTEST_TOTAL_SHARDS'] = str(total_shards)
- for shard in range(total_shards):
- os.environ['GTEST_SHARD_INDEX'] = str(shard)
- cmd = [exe]
- cmd.extend(params)
- common.RunSubprocess(cmd)
-
-
-def main():
- exe = sys.argv[1]
- total_shards = int(sys.argv[2])
- params = sys.argv[3:]
- RunShardedTests(exe, total_shards, params)
-
-
-if __name__ == "__main__":
- main()
diff --git a/tools/purify/test_runner.py b/tools/purify/test_runner.py
deleted file mode 100644
index 78ab615..0000000
--- a/tools/purify/test_runner.py
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/bin/env python
-# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# test_runner
-
-import optparse
-import os
-import re
-import subprocess
-import sys
-
-import google.logging_utils
-import google.path_utils
-
-import common
-
-
-def GetAllTests(exe):
- cmd = [exe, "--gtest_list_tests"]
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
- line = p.stdout.readline().rstrip()
- test = line
- tests = []
- while line:
- line = p.stdout.readline().rstrip()
- if line.startswith(' '):
- tests.append(test + line.lstrip())
- else:
- test = line
- return tests
-
-
-def RunTestsSingly(exe, tests):
- for test in tests:
- filter = test
- if len(sys.argv) > 2:
- filter = filter + ":" + sys.argv[2]
- cmd = [exe, "--gtest_filter=" + filter]
- common.RunSubprocess(cmd)
-
-
-def main():
- exe = sys.argv[1]
- all_tests = GetAllTests(exe)
- RunTestsSingly(exe, all_tests)
-
-
-if __name__ == "__main__":
- main()