diff options
author | phajdan.jr <phajdan.jr@chromium.org> | 2015-10-14 09:07:14 -0700 |
---|---|---|
committer | Commit bot <commit-bot@chromium.org> | 2015-10-14 16:08:09 +0000 |
commit | d0843b400ecc42728c229f9eebb44aefc9a31f3f (patch) | |
tree | 773a04b52eb176053e342ea7c5030057a6e369a6 /infra | |
parent | b3fb641552ace46ce637f4345ac8809cbfbe0059 (diff) | |
download | chromium_src-d0843b400ecc42728c229f9eebb44aefc9a31f3f.zip chromium_src-d0843b400ecc42728c229f9eebb44aefc9a31f3f.tar.gz chromium_src-d0843b400ecc42728c229f9eebb44aefc9a31f3f.tar.bz2 |
runtest.py: remove annotation related logic
BUG=506498
Review URL: https://codereview.chromium.org/1408563002
Cr-Commit-Position: refs/heads/master@{#354033}
Diffstat (limited to 'infra')
-rwxr-xr-x | infra/scripts/legacy/scripts/common/gtest_utils.py | 167 | ||||
-rw-r--r-- | infra/scripts/legacy/scripts/slave/annotation_utils.py | 132 | ||||
-rwxr-xr-x | infra/scripts/legacy/scripts/slave/runtest.py | 44 |
3 files changed, 3 insertions, 340 deletions
diff --git a/infra/scripts/legacy/scripts/common/gtest_utils.py b/infra/scripts/legacy/scripts/common/gtest_utils.py deleted file mode 100755 index 4a30719..0000000 --- a/infra/scripts/legacy/scripts/common/gtest_utils.py +++ /dev/null @@ -1,167 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2012 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -import json -import os -import re -import tempfile - - -# These labels should match the ones output by gtest's JSON. -TEST_UNKNOWN_LABEL = 'UNKNOWN' -TEST_SUCCESS_LABEL = 'SUCCESS' -TEST_FAILURE_LABEL = 'FAILURE' -TEST_FAILURE_ON_EXIT_LABEL = 'FAILURE_ON_EXIT' -TEST_CRASH_LABEL = 'CRASH' -TEST_TIMEOUT_LABEL = 'TIMEOUT' -TEST_SKIPPED_LABEL = 'SKIPPED' -TEST_WARNING_LABEL = 'WARNING' - -FULL_RESULTS_FILENAME = 'full_results.json' -TIMES_MS_FILENAME = 'times_ms.json' - -def CompressList(lines, max_length, middle_replacement): - """Ensures that |lines| is no longer than |max_length|. If |lines| need to - be compressed then the middle items are replaced by |middle_replacement|. - """ - if len(lines) <= max_length: - return lines - remove_from_start = max_length / 2 - return (lines[:remove_from_start] + - [middle_replacement] + - lines[len(lines) - (max_length - remove_from_start):]) - - -class GTestJSONParser(object): - # Limit of output snippet lines. Avoids flooding the logs with amount - # of output that gums up the infrastructure. - OUTPUT_SNIPPET_LINES_LIMIT = 5000 - - def __init__(self, mastername=None): - self.json_file_path = None - self.delete_json_file = False - - self.disabled_tests = set() - self.passed_tests = set() - self.failed_tests = set() - self.flaky_tests = set() - self.test_logs = {} - self.run_results = {} - - self.parsing_errors = [] - - self.master_name = mastername - - # List our labels that match the ones output by gtest JSON. - self.SUPPORTED_LABELS = (TEST_UNKNOWN_LABEL, - TEST_SUCCESS_LABEL, - TEST_FAILURE_LABEL, - TEST_FAILURE_ON_EXIT_LABEL, - TEST_CRASH_LABEL, - TEST_TIMEOUT_LABEL, - TEST_SKIPPED_LABEL) - - def ProcessLine(self, line): - # Deliberately do nothing - we parse out-of-band JSON summary - # instead of in-band stdout. - pass - - def PassedTests(self): - return sorted(self.passed_tests) - - def FailedTests(self, include_fails=False, include_flaky=False): - return sorted(self.failed_tests) - - def TriesForTest(self, test): - """Returns a list containing the state for all tries of the given test.""" - return self.run_results.get(test, [TEST_UNKNOWN_LABEL]) - - def FailureDescription(self, test): - return self.test_logs.get(test, []) - - @staticmethod - def MemoryToolReportHashes(): - return [] - - def ParsingErrors(self): - return self.parsing_errors - - def ClearParsingErrors(self): - self.parsing_errors = ['Cleared.'] - - def DisabledTests(self): - return len(self.disabled_tests) - - def FlakyTests(self): - return len(self.flaky_tests) - - @staticmethod - def RunningTests(): - return [] - - def PrepareJSONFile(self, cmdline_path): - if cmdline_path: - self.json_file_path = cmdline_path - # If the caller requested JSON summary, do not delete it. - self.delete_json_file = False - else: - fd, self.json_file_path = tempfile.mkstemp() - os.close(fd) - # When we create the file ourselves, delete it to avoid littering. - self.delete_json_file = True - return self.json_file_path - - def ProcessJSONFile(self, build_dir): - if not self.json_file_path: - return - - with open(self.json_file_path) as json_file: - try: - json_output = json_file.read() - json_data = json.loads(json_output) - except ValueError: - # Only signal parsing error if the file is non-empty. Empty file - # most likely means the binary doesn't support JSON output. - if json_output: - self.parsing_errors = json_output.split('\n') - else: - self.ProcessJSONData(json_data, build_dir) - - if self.delete_json_file: - os.remove(self.json_file_path) - - def ProcessJSONData(self, json_data, build_dir=None): - self.disabled_tests.update(json_data['disabled_tests']) - - for iteration_data in json_data['per_iteration_data']: - for test_name, test_runs in iteration_data.iteritems(): - if test_runs[-1]['status'] == 'SUCCESS': - self.passed_tests.add(test_name) - else: - self.failed_tests.add(test_name) - - self.run_results[test_name] = [] - self.test_logs.setdefault(test_name, []) - for run_index, run_data in enumerate(test_runs, start=1): - # Mark as flaky if the run result differs. - if run_data['status'] != test_runs[0]['status']: - self.flaky_tests.add(test_name) - if run_data['status'] in self.SUPPORTED_LABELS: - self.run_results[test_name].append(run_data['status']) - else: - self.run_results[test_name].append(TEST_UNKNOWN_LABEL) - run_lines = ['%s (run #%d):' % (test_name, run_index)] - # Make sure the annotations are ASCII to avoid character set related - # errors. They are mostly informational anyway, and more detailed - # info can be obtained from the original JSON output. - ascii_lines = run_data['output_snippet'].encode('ascii', - errors='replace') - decoded_lines = CompressList( - ascii_lines.decode('string_escape').split('\n'), - self.OUTPUT_SNIPPET_LINES_LIMIT, - '<truncated, full output is in gzipped JSON ' - 'output at end of step>') - run_lines.extend(decoded_lines) - self.test_logs[test_name].extend(run_lines) diff --git a/infra/scripts/legacy/scripts/slave/annotation_utils.py b/infra/scripts/legacy/scripts/slave/annotation_utils.py deleted file mode 100644 index 331daa2..0000000 --- a/infra/scripts/legacy/scripts/slave/annotation_utils.py +++ /dev/null @@ -1,132 +0,0 @@ -# Copyright 2013 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -"""Generates annotated output. - -TODO(stip): Move the gtest_utils gtest parser selection code from runtest.py -to here. -TODO(stip): Move the perf dashboard code from runtest.py to here. -""" - -import re - -from slave import slave_utils - - -# Status codes that can be returned by the evaluateCommand method. -# From buildbot.status.builder. -# See: http://docs.buildbot.net/current/developer/results.html -SUCCESS, WARNINGS, FAILURE, SKIPPED, EXCEPTION, RETRY = range(6) - - -def getText(result, observer, name): - """Generate a text summary for the waterfall. - - Updates the waterfall with any unusual test output, with a link to logs of - failed test steps. - """ - GTEST_DASHBOARD_BASE = ('http://test-results.appspot.com' - '/dashboards/flakiness_dashboard.html') - - # TODO(xusydoc): unify this with gtest reporting below so getText() is - # less confusing - if hasattr(observer, 'PerformanceSummary'): - basic_info = [name] - summary_text = ['<div class="BuildResultInfo">'] - summary_text.extend(observer.PerformanceSummary()) - summary_text.append('</div>') - return basic_info + summary_text - - # basic_info is an array of lines to display on the waterfall. - basic_info = [name] - - disabled = observer.DisabledTests() - if disabled: - basic_info.append('%s disabled' % str(disabled)) - - flaky = observer.FlakyTests() - if flaky: - basic_info.append('%s flaky' % str(flaky)) - - failed_test_count = len(observer.FailedTests()) - if failed_test_count == 0: - if result == SUCCESS: - return basic_info - elif result == WARNINGS: - return basic_info + ['warnings'] - - if observer.RunningTests(): - basic_info += ['did not complete'] - - # TODO(xusydoc): see if 'crashed or hung' should be tracked by RunningTests(). - if failed_test_count: - failure_text = ['failed %d' % failed_test_count] - if observer.master_name: - # Include the link to the flakiness dashboard. - failure_text.append('<div class="BuildResultInfo">') - failure_text.append('<a href="%s#testType=%s' - '&tests=%s">' % (GTEST_DASHBOARD_BASE, - name, - ','.join(observer.FailedTests()))) - failure_text.append('Flakiness dashboard') - failure_text.append('</a>') - failure_text.append('</div>') - else: - failure_text = ['crashed or hung'] - return basic_info + failure_text - - -def annotate(test_name, result, log_processor, perf_dashboard_id=None): - """Given a test result and tracker, update the waterfall with test results.""" - - # Always print raw exit code of the subprocess. This is very helpful - # for debugging, especially when one gets the "crashed or hung" message - # with no output (exit code can have some clues, especially on Windows). - print 'exit code (as seen by runtest.py): %d' % result - - get_text_result = SUCCESS - - for failure in sorted(log_processor.FailedTests()): - clean_test_name = re.sub(r'[^\w\.\-]', '_', failure) - slave_utils.WriteLogLines(clean_test_name, - log_processor.FailureDescription(failure)) - for report_hash in sorted(log_processor.MemoryToolReportHashes()): - slave_utils.WriteLogLines(report_hash, - log_processor.MemoryToolReport(report_hash)) - - if log_processor.ParsingErrors(): - # Generate a log file containing the list of errors. - slave_utils.WriteLogLines('log parsing error(s)', - log_processor.ParsingErrors()) - - log_processor.ClearParsingErrors() - - if hasattr(log_processor, 'evaluateCommand'): - parser_result = log_processor.evaluateCommand('command') - if parser_result > result: - result = parser_result - - if result == SUCCESS: - if (len(log_processor.ParsingErrors()) or - len(log_processor.FailedTests()) or - len(log_processor.MemoryToolReportHashes())): - print '@@@STEP_WARNINGS@@@' - get_text_result = WARNINGS - elif result == slave_utils.WARNING_EXIT_CODE: - print '@@@STEP_WARNINGS@@@' - get_text_result = WARNINGS - else: - print '@@@STEP_FAILURE@@@' - get_text_result = FAILURE - - for desc in getText(get_text_result, log_processor, test_name): - print '@@@STEP_TEXT@%s@@@' % desc - - if hasattr(log_processor, 'PerformanceLogs'): - if not perf_dashboard_id: - raise Exception('runtest.py error: perf step specified but' - 'no test_id in factory_properties!') - for logname, log in log_processor.PerformanceLogs().iteritems(): - lines = [str(l).rstrip() for l in log] - slave_utils.WriteLogLines(logname, lines, perf=perf_dashboard_id) diff --git a/infra/scripts/legacy/scripts/slave/runtest.py b/infra/scripts/legacy/scripts/slave/runtest.py index 0bf9d1a..ac54b3d 100755 --- a/infra/scripts/legacy/scripts/slave/runtest.py +++ b/infra/scripts/legacy/scripts/slave/runtest.py @@ -21,9 +21,7 @@ import subprocess import sys from common import chromium_utils -from common import gtest_utils -from slave import annotation_utils from slave import build_directory from slave import slave_utils from slave import xvfb @@ -120,13 +118,6 @@ def _BuildTestBinaryCommand(_build_dir, test_exe_path, options): return command -def _UsingGtestJson(options): - """Returns True if we're using GTest JSON summary.""" - return (options.annotate == 'gtest' and - not options.run_python_script and - not options.run_shell_script) - - def _GenerateRunIsolatedCommand(build_dir, test_exe_path, options, command): """Converts the command to run through the run isolate script. @@ -158,23 +149,6 @@ def _GetSanitizerSymbolizeCommand(strip_path_prefix=None, json_file_name=None): return command -def _SymbolizeSnippetsInJSON(options, json_file_name): - if not json_file_name: - return - symbolize_command = _GetSanitizerSymbolizeCommand( - strip_path_prefix=options.strip_path_prefix, - json_file_name=json_file_name) - try: - p = subprocess.Popen(symbolize_command, stderr=subprocess.PIPE) - (_, stderr) = p.communicate() - except OSError as e: - print 'Exception while symbolizing snippets: %s' % e - - if p.returncode != 0: - print "Error: failed to symbolize snippets in JSON:\n" - print stderr - - def _Main(options, args, extra_env): """Using the target build configuration, run the executable given in the first non-option argument, passing any following arguments to that @@ -240,10 +214,6 @@ def _Main(options, args, extra_env): command.extend(args[1:]) log_processor = None - if _UsingGtestJson(options): - log_processor = gtest_utils.GTestJSONParser( - options.build_properties.get('mastername')) - try: # TODO(dpranke): checking on test_exe is a temporary hack until we # can change the buildbot master to pass --xvfb instead of --no-xvfb @@ -260,10 +230,9 @@ def _Main(options, args, extra_env): with_wm=(options.factory_properties.get('window_manager', 'True') == 'True')) - if _UsingGtestJson(options): - json_file_name = log_processor.PrepareJSONFile( - options.test_launcher_summary_output) - command.append('--test-launcher-summary-output=%s' % json_file_name) + if options.test_launcher_summary_output: + command.append('--test-launcher-summary-output=%s' % + options.test_launcher_summary_output) command = _GenerateRunIsolatedCommand(build_dir, test_exe_path, options, command) @@ -302,13 +271,6 @@ def _Main(options, args, extra_env): finally: if start_xvfb: xvfb.StopVirtualX(None) - if _UsingGtestJson(options): - if options.use_symbolization_script: - _SymbolizeSnippetsInJSON(options, json_file_name) - log_processor.ProcessJSONFile(options.build_dir) - - if options.annotate: - annotation_utils.annotate(options.test_type, result, log_processor) return result |