diff options
Diffstat (limited to 'infra/scripts/legacy/scripts/slave')
-rw-r--r-- | infra/scripts/legacy/scripts/slave/annotation_utils.py | 132 | ||||
-rwxr-xr-x | infra/scripts/legacy/scripts/slave/runtest.py | 44 |
2 files changed, 3 insertions, 173 deletions
diff --git a/infra/scripts/legacy/scripts/slave/annotation_utils.py b/infra/scripts/legacy/scripts/slave/annotation_utils.py deleted file mode 100644 index 331daa2..0000000 --- a/infra/scripts/legacy/scripts/slave/annotation_utils.py +++ /dev/null @@ -1,132 +0,0 @@ -# Copyright 2013 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -"""Generates annotated output. - -TODO(stip): Move the gtest_utils gtest parser selection code from runtest.py -to here. -TODO(stip): Move the perf dashboard code from runtest.py to here. -""" - -import re - -from slave import slave_utils - - -# Status codes that can be returned by the evaluateCommand method. -# From buildbot.status.builder. -# See: http://docs.buildbot.net/current/developer/results.html -SUCCESS, WARNINGS, FAILURE, SKIPPED, EXCEPTION, RETRY = range(6) - - -def getText(result, observer, name): - """Generate a text summary for the waterfall. - - Updates the waterfall with any unusual test output, with a link to logs of - failed test steps. - """ - GTEST_DASHBOARD_BASE = ('http://test-results.appspot.com' - '/dashboards/flakiness_dashboard.html') - - # TODO(xusydoc): unify this with gtest reporting below so getText() is - # less confusing - if hasattr(observer, 'PerformanceSummary'): - basic_info = [name] - summary_text = ['<div class="BuildResultInfo">'] - summary_text.extend(observer.PerformanceSummary()) - summary_text.append('</div>') - return basic_info + summary_text - - # basic_info is an array of lines to display on the waterfall. - basic_info = [name] - - disabled = observer.DisabledTests() - if disabled: - basic_info.append('%s disabled' % str(disabled)) - - flaky = observer.FlakyTests() - if flaky: - basic_info.append('%s flaky' % str(flaky)) - - failed_test_count = len(observer.FailedTests()) - if failed_test_count == 0: - if result == SUCCESS: - return basic_info - elif result == WARNINGS: - return basic_info + ['warnings'] - - if observer.RunningTests(): - basic_info += ['did not complete'] - - # TODO(xusydoc): see if 'crashed or hung' should be tracked by RunningTests(). - if failed_test_count: - failure_text = ['failed %d' % failed_test_count] - if observer.master_name: - # Include the link to the flakiness dashboard. - failure_text.append('<div class="BuildResultInfo">') - failure_text.append('<a href="%s#testType=%s' - '&tests=%s">' % (GTEST_DASHBOARD_BASE, - name, - ','.join(observer.FailedTests()))) - failure_text.append('Flakiness dashboard') - failure_text.append('</a>') - failure_text.append('</div>') - else: - failure_text = ['crashed or hung'] - return basic_info + failure_text - - -def annotate(test_name, result, log_processor, perf_dashboard_id=None): - """Given a test result and tracker, update the waterfall with test results.""" - - # Always print raw exit code of the subprocess. This is very helpful - # for debugging, especially when one gets the "crashed or hung" message - # with no output (exit code can have some clues, especially on Windows). - print 'exit code (as seen by runtest.py): %d' % result - - get_text_result = SUCCESS - - for failure in sorted(log_processor.FailedTests()): - clean_test_name = re.sub(r'[^\w\.\-]', '_', failure) - slave_utils.WriteLogLines(clean_test_name, - log_processor.FailureDescription(failure)) - for report_hash in sorted(log_processor.MemoryToolReportHashes()): - slave_utils.WriteLogLines(report_hash, - log_processor.MemoryToolReport(report_hash)) - - if log_processor.ParsingErrors(): - # Generate a log file containing the list of errors. - slave_utils.WriteLogLines('log parsing error(s)', - log_processor.ParsingErrors()) - - log_processor.ClearParsingErrors() - - if hasattr(log_processor, 'evaluateCommand'): - parser_result = log_processor.evaluateCommand('command') - if parser_result > result: - result = parser_result - - if result == SUCCESS: - if (len(log_processor.ParsingErrors()) or - len(log_processor.FailedTests()) or - len(log_processor.MemoryToolReportHashes())): - print '@@@STEP_WARNINGS@@@' - get_text_result = WARNINGS - elif result == slave_utils.WARNING_EXIT_CODE: - print '@@@STEP_WARNINGS@@@' - get_text_result = WARNINGS - else: - print '@@@STEP_FAILURE@@@' - get_text_result = FAILURE - - for desc in getText(get_text_result, log_processor, test_name): - print '@@@STEP_TEXT@%s@@@' % desc - - if hasattr(log_processor, 'PerformanceLogs'): - if not perf_dashboard_id: - raise Exception('runtest.py error: perf step specified but' - 'no test_id in factory_properties!') - for logname, log in log_processor.PerformanceLogs().iteritems(): - lines = [str(l).rstrip() for l in log] - slave_utils.WriteLogLines(logname, lines, perf=perf_dashboard_id) diff --git a/infra/scripts/legacy/scripts/slave/runtest.py b/infra/scripts/legacy/scripts/slave/runtest.py index 0bf9d1a..ac54b3d 100755 --- a/infra/scripts/legacy/scripts/slave/runtest.py +++ b/infra/scripts/legacy/scripts/slave/runtest.py @@ -21,9 +21,7 @@ import subprocess import sys from common import chromium_utils -from common import gtest_utils -from slave import annotation_utils from slave import build_directory from slave import slave_utils from slave import xvfb @@ -120,13 +118,6 @@ def _BuildTestBinaryCommand(_build_dir, test_exe_path, options): return command -def _UsingGtestJson(options): - """Returns True if we're using GTest JSON summary.""" - return (options.annotate == 'gtest' and - not options.run_python_script and - not options.run_shell_script) - - def _GenerateRunIsolatedCommand(build_dir, test_exe_path, options, command): """Converts the command to run through the run isolate script. @@ -158,23 +149,6 @@ def _GetSanitizerSymbolizeCommand(strip_path_prefix=None, json_file_name=None): return command -def _SymbolizeSnippetsInJSON(options, json_file_name): - if not json_file_name: - return - symbolize_command = _GetSanitizerSymbolizeCommand( - strip_path_prefix=options.strip_path_prefix, - json_file_name=json_file_name) - try: - p = subprocess.Popen(symbolize_command, stderr=subprocess.PIPE) - (_, stderr) = p.communicate() - except OSError as e: - print 'Exception while symbolizing snippets: %s' % e - - if p.returncode != 0: - print "Error: failed to symbolize snippets in JSON:\n" - print stderr - - def _Main(options, args, extra_env): """Using the target build configuration, run the executable given in the first non-option argument, passing any following arguments to that @@ -240,10 +214,6 @@ def _Main(options, args, extra_env): command.extend(args[1:]) log_processor = None - if _UsingGtestJson(options): - log_processor = gtest_utils.GTestJSONParser( - options.build_properties.get('mastername')) - try: # TODO(dpranke): checking on test_exe is a temporary hack until we # can change the buildbot master to pass --xvfb instead of --no-xvfb @@ -260,10 +230,9 @@ def _Main(options, args, extra_env): with_wm=(options.factory_properties.get('window_manager', 'True') == 'True')) - if _UsingGtestJson(options): - json_file_name = log_processor.PrepareJSONFile( - options.test_launcher_summary_output) - command.append('--test-launcher-summary-output=%s' % json_file_name) + if options.test_launcher_summary_output: + command.append('--test-launcher-summary-output=%s' % + options.test_launcher_summary_output) command = _GenerateRunIsolatedCommand(build_dir, test_exe_path, options, command) @@ -302,13 +271,6 @@ def _Main(options, args, extra_env): finally: if start_xvfb: xvfb.StopVirtualX(None) - if _UsingGtestJson(options): - if options.use_symbolization_script: - _SymbolizeSnippetsInJSON(options, json_file_name) - log_processor.ProcessJSONFile(options.build_dir) - - if options.annotate: - annotation_utils.annotate(options.test_type, result, log_processor) return result |