summaryrefslogtreecommitdiffstats
path: root/tools/code_coverage
diff options
context:
space:
mode:
authorthakis@chromium.org <thakis@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-11-12 03:50:59 +0000
committerthakis@chromium.org <thakis@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-11-12 03:50:59 +0000
commit032c8f5c5238d6e34ef76d960a856488baa077f8 (patch)
tree1f70ddd25d2034fe66663071a1d364bea9237b72 /tools/code_coverage
parentaa8c307f6d228dbcc19a8e16f8e7d66e63c10f9b (diff)
downloadchromium_src-032c8f5c5238d6e34ef76d960a856488baa077f8.zip
chromium_src-032c8f5c5238d6e34ef76d960a856488baa077f8.tar.gz
chromium_src-032c8f5c5238d6e34ef76d960a856488baa077f8.tar.bz2
Remove two unused scripts.
process_coverage.py: The bots seem to use tools/build/scripts/slave/chromium/process_coverage.py instead coverage.py: Was replaced by coverage_posix.py as far as I can tell. BUG=none Review URL: https://codereview.chromium.org/68633006 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@234396 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'tools/code_coverage')
-rwxr-xr-xtools/code_coverage/coverage.py359
-rwxr-xr-xtools/code_coverage/process_coverage.py413
2 files changed, 0 insertions, 772 deletions
diff --git a/tools/code_coverage/coverage.py b/tools/code_coverage/coverage.py
deleted file mode 100755
index a1496d7..0000000
--- a/tools/code_coverage/coverage.py
+++ /dev/null
@@ -1,359 +0,0 @@
-#!/bin/env python
-# Copyright (c) 2011 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-
-"""Module to setup and generate code coverage data
-
-This module first sets up the environment for code coverage, instruments the
-binaries, runs the tests and collects the code coverage data.
-
-
-Usage:
- coverage.py --upload=<upload_location>
- --revision=<revision_number>
- --src_root=<root_of_source_tree>
- [--tools_path=<tools_path>]
-"""
-
-import logging
-import optparse
-import os
-import shutil
-import subprocess
-import sys
-import tempfile
-
-import google.logging_utils
-import google.process_utils as proc
-
-
-# The list of binaries that will be instrumented for code coverage
-# TODO(niranjan): Re-enable instrumentation of chrome.exe and chrome.dll once we
-# resolve the issue where vsinstr.exe is confused while reading symbols.
-windows_binaries = [#'chrome.exe',
- #'chrome.dll',
- 'unit_tests.exe',
- 'automated_ui_tests.exe',
- 'installer_util_unittests.exe',
- 'ipc_tests.exe',
- 'memory_test.exe',
- 'page_cycler_tests.exe',
- 'perf_tests.exe',
- 'reliability_tests.exe',
- 'security_tests.dll',
- 'startup_tests.exe',
- 'tab_switching_test.exe',
- 'test_shell.exe']
-
-# The list of [tests, args] that will be run.
-# Failing tests have been commented out.
-# TODO(niranjan): Need to add layout tests that excercise the test shell.
-windows_tests = [
- ['unit_tests.exe', ''],
-# ['automated_ui_tests.exe', ''],
- ['installer_util_unittests.exe', ''],
- ['ipc_tests.exe', ''],
- ['page_cycler_tests.exe', '--gtest_filter=*File --no-sandbox'],
- ['reliability_tests.exe', '--no-sandbox'],
- ['startup_tests.exe', '--no-sandbox'],
- ['tab_switching_test.exe', '--no-sandbox'],
- ]
-
-
-def IsWindows():
- """Checks if the current platform is Windows.
- """
- return sys.platform[:3] == 'win'
-
-
-class Coverage(object):
- """Class to set up and generate code coverage.
-
- This class contains methods that are useful to set up the environment for
- code coverage.
-
- Attributes:
- instrumented: A boolean indicating if all the binaries have been
- instrumented.
- """
-
- def __init__(self,
- revision,
- src_path = None,
- tools_path = None,
- archive=None):
- """Init method for the Coverage class.
-
- Args:
- revision: Revision number of the Chromium source tree.
- src_path: Location of the Chromium source base.
- tools_path: Location of the Visual Studio Team Tools. (Win32 only)
- archive: Archive location for the intermediate .coverage results.
- """
- google.logging_utils.config_root()
- self.revision = revision
- self.instrumented = False
- self.tools_path = tools_path
- self.src_path = src_path
- self._dir = tempfile.mkdtemp()
- self._archive = archive
-
- def SetUp(self, binaries):
- """Set up the platform specific environment and instrument the binaries for
- coverage.
-
- This method sets up the environment, instruments all the compiled binaries
- and sets up the code coverage counters.
-
- Args:
- binaries: List of binaries that need to be instrumented.
-
- Returns:
- True on success.
- False on error.
- """
- if self.instrumented:
- logging.error('Binaries already instrumented')
- return False
- if IsWindows():
- # Stop all previous instance of VSPerfMon counters
- counters_command = ('%s -shutdown' %
- (os.path.join(self.tools_path, 'vsperfcmd.exe')))
- (retcode, output) = proc.RunCommandFull(counters_command,
- collect_output=True)
- # TODO(niranjan): Add a check that to verify that the binaries were built
- # using the /PROFILE linker flag.
- if self.tools_path == None:
- logging.error('Could not locate Visual Studio Team Server tools')
- return False
- # Remove trailing slashes
- self.tools_path = self.tools_path.rstrip('\\')
- # Add this to the env PATH.
- os.environ['PATH'] = os.environ['PATH'] + ';' + self.tools_path
- instrument_command = '%s /COVERAGE ' % (os.path.join(self.tools_path,
- 'vsinstr.exe'))
- for binary in binaries:
- logging.info('binary = %s' % (binary))
- logging.info('instrument_command = %s' % (instrument_command))
- # Instrument each binary in the list
- binary = os.path.join(self.src_path, 'chrome', 'Release', binary)
- (retcode, output) = proc.RunCommandFull(instrument_command + binary,
- collect_output=True)
- # Check if the file has been instrumented correctly.
- if output.pop().rfind('Successfully instrumented') == -1:
- logging.error('Error instrumenting %s' % (binary))
- return False
- # We are now ready to run tests and measure code coverage.
- self.instrumented = True
- return True
-
- def TearDown(self):
- """Tear down method.
-
- This method shuts down the counters, and cleans up all the intermediate
- artifacts.
- """
- if self.instrumented == False:
- return
-
- if IsWindows():
- # Stop counters
- counters_command = ('%s -shutdown' %
- (os.path.join(self.tools_path, 'vsperfcmd.exe')))
- (retcode, output) = proc.RunCommandFull(counters_command,
- collect_output=True)
- logging.info('Counters shut down: %s' % (output))
- # TODO(niranjan): Revert the instrumented binaries to their original
- # versions.
- else:
- return
- if self._archive:
- shutil.copytree(self._dir, os.path.join(self._archive, self.revision))
- logging.info('Archived the .coverage files')
- # Delete all the temp files and folders
- if self._dir != None:
- shutil.rmtree(self._dir, ignore_errors=True)
- logging.info('Cleaned up temporary files and folders')
- # Reset the instrumented flag.
- self.instrumented = False
-
- def RunTest(self, src_root, test):
- """Run tests and collect the .coverage file
-
- Args:
- src_root: Path to the root of the source.
- test: Path to the test to be run.
-
- Returns:
- Path of the intermediate .coverage file on success.
- None on error.
- """
- # Generate the intermediate file name for the coverage results
- test_name = os.path.split(test[0])[1].strip('.exe')
- # test_command = binary + args
- test_command = '%s %s' % (os.path.join(src_root,
- 'chrome',
- 'Release',
- test[0]),
- test[1])
-
- coverage_file = os.path.join(self._dir, '%s_win32_%s.coverage' %
- (test_name, self.revision))
- logging.info('.coverage file for test %s: %s' % (test_name, coverage_file))
-
- # After all the binaries have been instrumented, we start the counters.
- counters_command = ('%s -start:coverage -output:%s' %
- (os.path.join(self.tools_path, 'vsperfcmd.exe'),
- coverage_file))
- # Here we use subprocess.call() instead of the RunCommandFull because the
- # VSPerfCmd spawns another process before terminating and this confuses
- # the subprocess.Popen() used by RunCommandFull.
- retcode = subprocess.call(counters_command)
-
- # Run the test binary
- logging.info('Executing test %s: ' % test_command)
- (retcode, output) = proc.RunCommandFull(test_command, collect_output=True)
- if retcode != 0: # Return error if the tests fail
- logging.error('One or more tests failed in %s.' % test_command)
- return None
-
- # Stop the counters
- counters_command = ('%s -shutdown' %
- (os.path.join(self.tools_path, 'vsperfcmd.exe')))
- (retcode, output) = proc.RunCommandFull(counters_command,
- collect_output=True)
- logging.info('Counters shut down: %s' % (output))
- # Return the intermediate .coverage file
- return coverage_file
-
- def Upload(self, list_coverage, upload_path, sym_path=None, src_root=None):
- """Upload the results to the dashboard.
-
- This method uploads the coverage data to a dashboard where it will be
- processed. On Windows, this method will first convert the .coverage file to
- the lcov format. This method needs to be called before the TearDown method.
-
- Args:
- list_coverage: The list of coverage data files to consoliate and upload.
- upload_path: Destination where the coverage data will be processed.
- sym_path: Symbol path for the build (Win32 only)
- src_root: Root folder of the source tree (Win32 only)
-
- Returns:
- True on success.
- False on failure.
- """
- if upload_path == None:
- logging.info('Upload path not specified. Will not convert to LCOV')
- return True
-
- if IsWindows():
- # Stop counters
- counters_command = ('%s -shutdown' %
- (os.path.join(self.tools_path, 'vsperfcmd.exe')))
- (retcode, output) = proc.RunCommandFull(counters_command,
- collect_output=True)
- logging.info('Counters shut down: %s' % (output))
- lcov_file = os.path.join(upload_path, 'chrome_win32_%s.lcov' %
- (self.revision))
- lcov = open(lcov_file, 'w')
- for coverage_file in list_coverage:
- # Convert the intermediate .coverage file to lcov format
- if self.tools_path == None:
- logging.error('Lcov converter tool not found')
- return False
- self.tools_path = self.tools_path.rstrip('\\')
- convert_command = ('%s -sym_path=%s -src_root=%s %s' %
- (os.path.join(self.tools_path,
- 'coverage_analyzer.exe'),
- sym_path,
- src_root,
- coverage_file))
- (retcode, output) = proc.RunCommandFull(convert_command,
- collect_output=True)
- # TODO(niranjan): Fix this to check for the correct return code.
-# if output != 0:
-# logging.error('Conversion to LCOV failed. Exiting.')
- tmp_lcov_file = coverage_file + '.lcov'
- logging.info('Conversion to lcov complete for %s' % (coverage_file))
- # Now append this .lcov file to the cumulative lcov file
- logging.info('Consolidating LCOV file: %s' % (tmp_lcov_file))
- tmp_lcov = open(tmp_lcov_file, 'r')
- lcov.write(tmp_lcov.read())
- tmp_lcov.close()
- lcov.close()
- logging.info('LCOV file uploaded to %s' % (upload_path))
-
-
-def main():
- # Command line parsing
- parser = optparse.OptionParser()
- # Path where the .coverage to .lcov converter tools are stored.
- parser.add_option('-t',
- '--tools_path',
- dest='tools_path',
- default=None,
- help='Location of the coverage tools (windows only)')
- parser.add_option('-u',
- '--upload',
- dest='upload_path',
- default=None,
- help='Location where the results should be uploaded')
- # We need the revision number so that we can generate the output file of the
- # format chrome_<platform>_<revision>.lcov
- parser.add_option('-r',
- '--revision',
- dest='revision',
- default=None,
- help='Revision number of the Chromium source repo')
- # Root of the source tree. Needed for converting the generated .coverage file
- # on Windows to the open source lcov format.
- parser.add_option('-s',
- '--src_root',
- dest='src_root',
- default=None,
- help='Root of the source repository')
- parser.add_option('-a',
- '--archive',
- dest='archive',
- default=None,
- help='Archive location of the intermediate .coverage data')
-
- (options, args) = parser.parse_args()
-
- if options.revision == None:
- parser.error('Revision number not specified')
- if options.src_root == None:
- parser.error('Source root not specified')
-
- if IsWindows():
- # Initialize coverage
- cov = Coverage(options.revision,
- options.src_root,
- options.tools_path,
- options.archive)
- list_coverage = []
- # Instrument the binaries
- if cov.SetUp(windows_binaries):
- # Run all the tests
- for test in windows_tests:
- coverage = cov.RunTest(options.src_root, test)
- if coverage == None: # Indicate failure to the buildbots.
- return 1
- # Collect the intermediate file
- list_coverage.append(coverage)
- else:
- logging.error('Error during instrumentation.')
- sys.exit(1)
-
- cov.Upload(list_coverage,
- options.upload_path,
- os.path.join(options.src_root, 'chrome', 'Release'),
- options.src_root)
- cov.TearDown()
-
-
-if __name__ == '__main__':
- sys.exit(main())
diff --git a/tools/code_coverage/process_coverage.py b/tools/code_coverage/process_coverage.py
deleted file mode 100755
index 07d83ac..0000000
--- a/tools/code_coverage/process_coverage.py
+++ /dev/null
@@ -1,413 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2011 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-
-"""Script to clean the lcov files and convert it to HTML
-
-TODO(niranjan): Add usage information here
-"""
-
-
-import optparse
-import os
-import shutil
-import subprocess
-import sys
-import tempfile
-import time
-import urllib2
-
-
-# These are source files that were generated during compile time. We want to
-# remove references to these files from the lcov file otherwise genhtml will
-# throw an error.
-win32_srcs_exclude = ['parse.y',
- 'xpathgrammar.cpp',
- 'cssgrammar.cpp',
- 'csspropertynames.gperf']
-
-# Number of lines of a new coverage data set
-# to send at a time to the dashboard.
-POST_CHUNK_SIZE = 50
-
-# Number of post request failures to allow before exiting.
-MAX_FAILURES = 5
-
-def CleanPathNames(dir):
- """Clean the pathnames of the HTML generated by genhtml.
-
- This method is required only for code coverage on Win32. Due to a known issue
- with reading from CIFS shares mounted on Linux, genhtml appends a ^M to every
- file name it reads from the Windows share, causing corrupt filenames in
- genhtml's output folder.
-
- Args:
- dir: Output folder of the genhtml output.
-
- Returns:
- None
- """
- # Stip off the ^M characters that get appended to the file name
- for dirpath, dirname, filenames in os.walk(dir):
- for file in filenames:
- file_clean = file.replace('\r', '')
- if file_clean != file:
- os.rename(file, file_clean)
-
-
-def GenerateHtml(lcov_path, dash_root):
- """Runs genhtml to convert lcov data to human readable HTML.
-
- This script expects the LCOV file name to be in the format:
- chrome_<platform>_<revision#>.lcov.
- This method parses the file name and then sets up the correct folder
- hierarchy for the coverage data and then runs genhtml to get the actual HTML
- formatted coverage data.
-
- Args:
- lcov_path: Path of the lcov data file.
- dash_root: Root location of the dashboard.
-
- Returns:
- Code coverage percentage on sucess.
- None on failure.
- """
- # Parse the LCOV file name.
- filename = os.path.basename(lcov_path).split('.')[0]
- buffer = filename.split('_')
- dash_root = dash_root.rstrip('/') # Remove trailing '/'
-
- # Set up correct folder hierarchy in the dashboard root
- # TODO(niranjan): Check the formatting using a regexp
- if len(buffer) >= 3: # Check if filename has right formatting
- platform = buffer[len(buffer) - 2]
- revision = buffer[len(buffer) - 1]
- if os.path.exists(os.path.join(dash_root, platform)) == False:
- os.mkdir(os.path.join(dash_root, platform))
- output_dir = os.path.join(dash_root, platform, revision)
- os.mkdir(output_dir)
- else:
- # TODO(niranjan): Add failure logging here.
- return None # File not formatted correctly
-
- # Run genhtml
- os.system('/usr/bin/genhtml -o %s %s' % (output_dir, lcov_path))
- # TODO(niranjan): Check the exit status of the genhtml command.
- # TODO(niranjan): Parse the stdout and return coverage percentage.
- CleanPathNames(output_dir)
- return 'dummy' # TODO(niranjan): Return actual percentage.
-
-
-def CleanWin32Lcov(lcov_path, src_root):
- """Cleanup the lcov data generated on Windows.
-
- This method fixes up the paths inside the lcov file from the Win32 specific
- paths to the actual paths of the mounted CIFS share. The lcov files generated
- on Windows have the following format:
-
- SF:c:\chrome_src\src\skia\sgl\skscan_antihair.cpp
- DA:97,0
- DA:106,0
- DA:107,0
- DA:109,0
- ...
- end_of_record
-
- This method changes the source-file (SF) lines to a format compatible with
- genhtml on Linux by fixing paths. This method also removes references to
- certain dynamically generated files to be excluded from the code ceverage.
-
- Args:
- lcov_path: Path of the Win32 lcov file to be cleaned.
- src_root: Location of the source and symbols dir.
- Returns:
- None
- """
- strip_flag = False
- lcov = open(lcov_path, 'r')
- loc_csv_file = open(lcov_path + '.csv', 'w')
- (tmpfile_id, tmpfile_name) = tempfile.mkstemp()
- tmpfile = open(tmpfile_name, 'w')
- src_root = src_root.rstrip('/') # Remove trailing '/'
- for line in lcov:
- if line.startswith('SF'):
- # We want to exclude certain auto-generated files otherwise genhtml will
- # fail to convert lcov to HTML.
- for exp in win32_srcs_exclude:
- if line.rfind(exp) != -1:
- strip_flag = True # Indicates that we want to remove this section
-
- # Now we normalize the paths
- # e.g. Change SF:c:\foo\src\... to SF:/chrome_src/...
- parse_buffer = line.split(':')
- buffer = '%s:%s%s' % (parse_buffer[0],
- src_root,
- parse_buffer[2])
- buffer = buffer.replace('\\', '/')
- line = buffer.replace('\r', '')
-
- # We want an accurate count of the lines of code in a given file so that
- # we can estimate the code coverage perscentage accurately. We use a
- # third party script cloc.pl which gives that count and then just parse
- # its command line output to filter out the other unnecessary data.
- # TODO(niranjan): Find out a better way of doing this.
- buffer = buffer.lstrip('SF:')
- file_for_loc = buffer.replace('\r\n', '')
- # TODO(niranjan): Add a check to see if cloc is present on the machine.
- command = ["perl",
- "cloc.pl",
- file_for_loc]
- output = subprocess.Popen(command,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT).communicate()[0]
- if output.rfind('error:'):
- return None
-
- tmp_buf1 = output.split('=')
- tmp_buf2 = tmp_buf1[len(tmp_buf1) - 2].split('x')[0].split(' ')
- loc = tmp_buf2[len(tmp_buf2) - 2]
- loc_csv_file.write('%s,%s\r\n' % (file_for_loc, loc))
-
- # Write to the temp file if the section to write is valid
- if strip_flag == False:
- # Also write this to the 'clean' LCOV file
- tmpfile.write('%s' % (line))
-
- # Reset the strip flag
- if line.endswith('end_of_record'):
- strip_flag = False
-
- # Close the files and replace the lcov file by the 'clean' tmpfile
- tmpfile.close()
- lcov.close()
- loc_csv_file.close()
- shutil.move(tmpfile_name, lcov_path)
-
-
-def ParseCoverageDataForDashboard(lcov_path):
- """Parse code coverage data into coverage results per source node.
-
- Use lcov and linecount data to create a map of source nodes to
- corresponding total and tested line counts.
-
- Args:
- lcov_path: File path to lcov coverage data.
-
- Returns:
- List of strings with comma separated source node and coverage.
- """
- results = {}
- linecount_path = lcov_path + '.csv'
- assert(os.path.exists(linecount_path),
- 'linecount csv does not exist at: %s' % linecount_path)
- csv_file = open(linecount_path, 'r')
- linecounts = csv_file.readlines()
- csv_file.close()
- lcov_file = open(lcov_path, 'r')
- srcfile_index = 0
- for line in lcov_file:
- line = line.strip()
-
- # Set the current srcfile name for a new src file declaration.
- if line[:len('SF:')] == 'SF:':
- instrumented_set = {}
- executed_set = {}
- srcfile_name = line[len('SF:'):]
-
- # Mark coverage data points hashlist style for the current src file.
- if line[:len('DA:')] == 'DA:':
- line_info = line[len('DA:'):].split(',')
- assert(len(line_info) == 2, 'DA: line format unexpected - %s' % line)
- (line_num, line_was_executed) = line_info
- instrumented_set[line_num] = True
- # line_was_executed is '0' or '1'
- if int(line_was_executed):
- executed_set[line_num] = True
-
- # Update results for the current src file at record end.
- if line == 'end_of_record':
- instrumented = len(instrumented_set.keys())
- executed = len(executed_set.keys())
- parent_directory = srcfile_name[:srcfile_name.rfind('/') + 1]
- linecount_point = linecounts[srcfile_index].strip().split(',')
- assert(len(linecount_point) == 2,
- 'lintcount format unexpected - %s' % linecounts[srcfile_index])
- (linecount_path, linecount_count) = linecount_point
- srcfile_index += 1
-
- # Sanity check that path names in the lcov and linecount are lined up.
- if linecount_path[-10:] != srcfile_name[-10:]:
- print 'NAME MISMATCH: %s :: %s' % (srcfile_name, linecount_path)
- if instrumented > int(linecount_count):
- linecount_count = instrumented
-
- # Keep counts the same way that it is done in the genhtml utility.
- # Count the coverage of a file towards the file,
- # the parent directory, and the source root.
- AddResults(results, srcfile_name, int(linecount_count), executed)
- AddResults(results, parent_directory, int(linecount_count), executed)
- AddResults(results, '/', instrumented, executed)
-
- lcov_file.close()
- keys = results.keys()
- keys.sort()
- # The first key (sorted) will be the base directory '/'
- # but its full path may be '/mnt/chrome_src/src/'
- # using this offset will ignore the part '/mnt/chrome_src/src'.
- # Offset is the last '/' that isn't the last character for the
- # first directory name in results (position 1 in keys).
- offset = len(keys[1][:keys[1][:-1].rfind('/')])
- lines = []
- for key in keys:
- if len(key) > offset:
- node_path = key[offset:]
- else:
- node_path = key
- (total, covered) = results[key]
- percent = float(covered) * 100 / total
- lines.append('%s,%.2f' % (node_path, percent))
- return lines
-
-
-def AddResults(results, location, lines_total, lines_executed):
- """Add resulting line tallies to a location's total.
-
- Args:
- results: Map of node location to corresponding coverage data.
- location: Source node string.
- lines_total: Number of lines to add to the total count for this node.
- lines_executed: Number of lines to add to the executed count for this node.
- """
- if results.has_key(location):
- (i, e) = results[location]
- results[location] = (i + lines_total, e + lines_executed)
- else:
- results[location] = (lines_total, lines_executed)
-
-
-def PostResultsToDashboard(lcov_path, results, post_url):
- """Post coverage results to coverage dashboard.
-
- Args:
- lcov_path: File path for lcov data in the expected format:
- <project>_<platform>_<cl#>.coverage.lcov
- results: string list in the appropriate posting format.
- """
- project_platform_cl = lcov_path.split('.')[0].split('_')
- assert(len(project_platform_cl) == 3,
- 'lcov_path not in expected format: %s' % lcov_path)
- (project, platform, cl_string) = project_platform_cl
- project_name = '%s-%s' % (project, platform)
- url = '%s/newdata.do?project=%s&cl=%s' % (post_url, project_name, cl_string)
-
- # Send POSTs of POST_CHUNK_SIZE lines of the result set until
- # there is no more data and last_loop is set to True.
- last_loop = False
- cur_line = 0
- while not last_loop:
- body = '\n'.join(results[cur_line:cur_line + POST_CHUNK_SIZE])
- cur_line += POST_CHUNK_SIZE
- last_loop = (cur_line >= len(results))
- req = urllib2.Request('%s&last=%s' % (url, str(last_loop)), body)
- req.add_header('Content-Type', 'text/plain')
- SendPost(req)
-
-
-# Global counter for the current number of request failures.
-num_fails = 0
-
-def SendPost(req):
- """Execute a post request and retry for up to MAX_FAILURES.
-
- Args:
- req: A urllib2 request object.
-
- Raises:
- URLError: If urlopen throws after too many retries.
- HTTPError: If urlopen throws after too many retries.
- """
- global num_fails
- try:
- urllib2.urlopen(req)
- # Reset failure count.
- num_fails = 0
- except (urllib2.URLError, urllib2.HTTPError):
- num_fails += 1
- if num_fails < MAX_FAILURES:
- print 'fail, retrying (%d)' % num_fails
- time.sleep(5)
- SendPost(req)
- else:
- print 'POST request exceeded allowed retries.'
- raise
-
-
-def main():
- if not sys.platform.startswith('linux'):
- print 'This script is supported only on Linux'
- return 0
-
- # Command line parsing
- parser = optparse.OptionParser()
- parser.add_option('-p',
- '--platform',
- dest='platform',
- default=None,
- help=('Platform that the locv file was generated on. Must'
- 'be one of {win32, linux2, linux3, macosx}'))
- parser.add_option('-s',
- '--source',
- dest='src_dir',
- default=None,
- help='Path to the source code and symbols')
- parser.add_option('-d',
- '--dash_root',
- dest='dash_root',
- default=None,
- help='Root directory for the dashboard')
- parser.add_option('-l',
- '--lcov',
- dest='lcov_path',
- default=None,
- help='Location of the LCOV file to process')
- parser.add_option('-u',
- '--post_url',
- dest='post_url',
- default=None,
- help='Base URL of the coverage dashboard')
- (options, args) = parser.parse_args()
-
- if options.platform == None:
- parser.error('Platform not specified')
- if options.lcov_path == None:
- parser.error('lcov file path not specified')
- if options.src_dir == None:
- parser.error('Source directory not specified')
- if options.dash_root == None:
- parser.error('Dashboard root not specified')
- if options.post_url == None:
- parser.error('Post URL not specified')
- if options.platform == 'win32':
- CleanWin32Lcov(options.lcov_path, options.src_dir)
- percent = GenerateHtml(options.lcov_path, options.dash_root)
- if percent == None:
- # TODO(niranjan): Add logging.
- print 'Failed to generate code coverage'
- return 1
- else:
- # TODO(niranjan): Do something with the code coverage numbers
- pass
- else:
- print 'Unsupported platform'
- return 1
-
- # Prep coverage results for dashboard and post new set.
- parsed_data = ParseCoverageDataForDashboard(options.lcov_path)
- PostResultsToDashboard(options.lcov_path, parsed_data, options.post_url)
- return 0
-
-
-if __name__ == '__main__':
- sys.exit(main())