summaryrefslogtreecommitdiffstats
path: root/tools/valgrind/drmemory_analyze.py
blob: 87c13100063f8157da3b2294c61e55d3e2346cc2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

# drmemory_analyze.py

''' Given a Dr. Memory output file, parses errors and uniques them.'''

from collections import defaultdict
import common
import logging
import optparse
import os
import re
import subprocess
import sys
import time


class DrMemoryAnalyzer:
  ''' Given a set of Dr.Memory output files, parse all the errors out of
  them, unique them and output the results.'''

  def __init__(self):
    self.known_errors = set()

  def ReadLine(self):
    self.line_ = self.cur_fd_.readline()

  def ReadSection(self):
    result = [self.line_]
    self.ReadLine()
    while len(self.line_.strip()) > 0:
      result.append(self.line_)
      self.ReadLine()
    return result

  def ParseReportFile(self, filename):
    ret = []

    self.cur_fd_ = open(filename, 'r')
    while True:
      self.ReadLine()
      if (self.line_ == ''): break

      match = re.search("^Error #[0-9]+: (.*)", self.line_)
      if match:
        self.line_ = match.groups()[0].strip() + "\n"
        tmp = self.ReadSection()
        ret.append("".join(tmp).strip())

      if re.search("SUPPRESSIONS USED:", self.line_):
        self.ReadLine()
        while self.line_.strip() != "":
          line = self.line_.strip()
          (count, name) = re.match(" *([0-9]+)x(?: \(leaked .*\))?: (.*)",
                                   line).groups()
          count = int(count)
          self.used_suppressions[name] += count
          self.ReadLine()

      if self.line_.startswith("ASSERT FAILURE"):
        ret.append(self.line_.strip())

    self.cur_fd_.close()
    return ret

  def Report(self, filenames, testcase, check_sanity):
    sys.stdout.flush()
    # TODO(timurrrr): support positive tests / check_sanity==True

    to_report = []
    self.used_suppressions = defaultdict(int)
    for f in filenames:
      cur_reports = self.ParseReportFile(f)

      # Filter out the reports that were there in previous tests.
      for r in cur_reports:
        if r in self.known_errors:
          pass  # TODO: print out a hash once we add hashes to the reports.
        else:
          self.known_errors.add(r)
          to_report.append(r)

    common.PrintUsedSuppressionsList(self.used_suppressions)

    if not to_report:
      logging.info("PASS: No error reports found")
      return 0

    logging.error("Found %i error reports" % len(to_report))
    for report in to_report:
      if testcase:
        logging.error("\n%s\nNote: observed on `%s`\n" %
                      (report, testcase))
      else:
        logging.error("\n%s\n" % report)
    logging.error("Total: %i error reports" % len(to_report))
    return -1


def main():
  '''For testing only. The DrMemoryAnalyze class should be imported instead.'''
  parser = optparse.OptionParser("usage: %prog [options] <files to analyze>")
  parser.add_option("", "--source_dir",
                    help="path to top of source tree for this build"
                    "(used to normalize source paths in baseline)")

  (options, args) = parser.parse_args()
  if len(args) == 0:
    parser.error("no filename specified")
  filenames = args

  return DrMemoryAnalyzer().Report(filenames, None, False)


if __name__ == '__main__':
  sys.exit(main())