summaryrefslogtreecommitdiffstats
path: root/infra/scripts
diff options
context:
space:
mode:
authoragable <agable@chromium.org>2015-07-06 15:17:06 -0700
committerCommit bot <commit-bot@chromium.org>2015-07-06 22:20:12 +0000
commit4b46e72bba0a62e8a7b7720ec444fb7cb19c8653 (patch)
tree03a76699b2ad22d82c41f8236a42f31118c04997 /infra/scripts
parent51743c8575422a0400c49b2b90fc912e53dd052d (diff)
downloadchromium_src-4b46e72bba0a62e8a7b7720ec444fb7cb19c8653.zip
chromium_src-4b46e72bba0a62e8a7b7720ec444fb7cb19c8653.tar.gz
chromium_src-4b46e72bba0a62e8a7b7720ec444fb7cb19c8653.tar.bz2
Revert src-side runtest.py cleanup
This reverts * "Remove Android-specific code from runtest.py" acdd38a3c64dd0aeaf82c4c63b6c669a573780d6. * "Clean up src-side runtest.py, patch #2 (remove log scraping)" d02a4df34615015cede40b8aed73b3320eddcc9b. Reason for reverting: All perf tests have started failing. See bug for details. R=phajdan.jr@chromium.org, simonhatch@chromium.org TBR=akuegel@chromium.org, phajdan.jr@chromium.org, sergiyb@chromium.org, simonhatch@chromium.org NOPRESUBMIT=true NOTREECHECKS=true NOTRY=true BUG=506498,507315 Review URL: https://codereview.chromium.org/1220423002 Cr-Commit-Position: refs/heads/master@{#337494}
Diffstat (limited to 'infra/scripts')
-rwxr-xr-xinfra/scripts/legacy/scripts/common/gtest_utils.py404
-rw-r--r--infra/scripts/legacy/scripts/slave/annotation_utils.py21
-rwxr-xr-xinfra/scripts/legacy/scripts/slave/gtest_slave_utils.py65
-rw-r--r--infra/scripts/legacy/scripts/slave/performance_log_processor.py844
-rwxr-xr-xinfra/scripts/legacy/scripts/slave/runtest.py382
-rwxr-xr-xinfra/scripts/legacy/scripts/slave/telemetry_utils.py114
6 files changed, 1801 insertions, 29 deletions
diff --git a/infra/scripts/legacy/scripts/common/gtest_utils.py b/infra/scripts/legacy/scripts/common/gtest_utils.py
index 92d789f..7c50ef6 100755
--- a/infra/scripts/legacy/scripts/common/gtest_utils.py
+++ b/infra/scripts/legacy/scripts/common/gtest_utils.py
@@ -35,6 +35,410 @@ def CompressList(lines, max_length, middle_replacement):
lines[len(lines) - (max_length - remove_from_start):])
+class GTestLogParser(object):
+ """This helper class process GTest test output."""
+
+ def __init__(self):
+ # State tracking for log parsing
+ self.completed = False
+ self._current_test = ''
+ self._failure_description = []
+ self._current_report_hash = ''
+ self._current_report = []
+ self._parsing_failures = False
+
+ # Line number currently being processed.
+ self._line_number = 0
+
+ # List of parsing errors, as human-readable strings.
+ self._internal_error_lines = []
+
+ # Tests are stored here as 'test.name': (status, [description]).
+ # The status should be one of ('started', 'OK', 'failed', 'timeout',
+ # 'warning'). Warning indicates that a test did not pass when run in
+ # parallel with other tests but passed when run alone. The description is
+ # a list of lines detailing the test's error, as reported in the log.
+ self._test_status = {}
+
+ # Reports are stored here as 'hash': [report].
+ self._memory_tool_reports = {}
+
+ # This may be either text or a number. It will be used in the phrase
+ # '%s disabled' or '%s flaky' on the waterfall display.
+ self._disabled_tests = 0
+ self._flaky_tests = 0
+
+ # Regular expressions for parsing GTest logs. Test names look like
+ # SomeTestCase.SomeTest
+ # SomeName/SomeTestCase.SomeTest/1
+ # This regexp also matches SomeName.SomeTest/1, which should be harmless.
+ test_name_regexp = r'((\w+/)?\w+\.\w+(/\d+)?)'
+
+ self._master_name_re = re.compile(r'\[Running for master: "([^"]*)"')
+ self.master_name = ''
+
+ self._test_name = re.compile(test_name_regexp)
+ self._test_start = re.compile(r'\[\s+RUN\s+\] ' + test_name_regexp)
+ self._test_ok = re.compile(r'\[\s+OK\s+\] ' + test_name_regexp)
+ self._test_fail = re.compile(r'\[\s+FAILED\s+\] ' + test_name_regexp)
+ self._test_passed = re.compile(r'\[\s+PASSED\s+\] \d+ tests?.')
+ self._run_test_cases_line = re.compile(
+ r'\[\s*\d+\/\d+\]\s+[0-9\.]+s ' + test_name_regexp + ' .+')
+ self._test_timeout = re.compile(
+ r'Test timeout \([0-9]+ ms\) exceeded for ' + test_name_regexp)
+ self._disabled = re.compile(r'\s*YOU HAVE (\d+) DISABLED TEST')
+ self._flaky = re.compile(r'\s*YOU HAVE (\d+) FLAKY TEST')
+
+ self._report_start = re.compile(
+ r'### BEGIN MEMORY TOOL REPORT \(error hash=#([0-9A-F]+)#\)')
+ self._report_end = re.compile(
+ r'### END MEMORY TOOL REPORT \(error hash=#([0-9A-F]+)#\)')
+
+ self._retry_message = re.compile('RETRYING FAILED TESTS:')
+ self.retrying_failed = False
+
+ self.TEST_STATUS_MAP = {
+ 'OK': TEST_SUCCESS_LABEL,
+ 'failed': TEST_FAILURE_LABEL,
+ 'timeout': TEST_TIMEOUT_LABEL,
+ 'warning': TEST_WARNING_LABEL
+ }
+
+ def GetCurrentTest(self):
+ return self._current_test
+
+ def _StatusOfTest(self, test):
+ """Returns the status code for the given test, or 'not known'."""
+ test_status = self._test_status.get(test, ('not known', []))
+ return test_status[0]
+
+ def _TestsByStatus(self, status, include_fails, include_flaky):
+ """Returns list of tests with the given status.
+
+ Args:
+ include_fails: If False, tests containing 'FAILS_' anywhere in their
+ names will be excluded from the list.
+ include_flaky: If False, tests containing 'FLAKY_' anywhere in their
+ names will be excluded from the list.
+ """
+ test_list = [x[0] for x in self._test_status.items()
+ if self._StatusOfTest(x[0]) == status]
+
+ if not include_fails:
+ test_list = [x for x in test_list if x.find('FAILS_') == -1]
+ if not include_flaky:
+ test_list = [x for x in test_list if x.find('FLAKY_') == -1]
+
+ return test_list
+
+ def _RecordError(self, line, reason):
+ """Record a log line that produced a parsing error.
+
+ Args:
+ line: text of the line at which the error occurred
+ reason: a string describing the error
+ """
+ self._internal_error_lines.append('%s: %s [%s]' %
+ (self._line_number, line.strip(), reason))
+
+ def RunningTests(self):
+ """Returns list of tests that appear to be currently running."""
+ return self._TestsByStatus('started', True, True)
+
+ def ParsingErrors(self):
+ """Returns a list of lines that have caused parsing errors."""
+ return self._internal_error_lines
+
+ def ClearParsingErrors(self):
+ """Clears the currently stored parsing errors."""
+ self._internal_error_lines = ['Cleared.']
+
+ def PassedTests(self, include_fails=False, include_flaky=False):
+ """Returns list of tests that passed."""
+ return self._TestsByStatus('OK', include_fails, include_flaky)
+
+ def FailedTests(self, include_fails=False, include_flaky=False):
+ """Returns list of tests that failed, timed out, or didn't finish
+ (crashed).
+
+ This list will be incorrect until the complete log has been processed,
+ because it will show currently running tests as having failed.
+
+ Args:
+ include_fails: If true, all failing tests with FAILS_ in their names will
+ be included. Otherwise, they will only be included if they crashed or
+ timed out.
+ include_flaky: If true, all failing tests with FLAKY_ in their names will
+ be included. Otherwise, they will only be included if they crashed or
+ timed out.
+
+ """
+ return (self._TestsByStatus('failed', include_fails, include_flaky) +
+ self._TestsByStatus('timeout', True, True) +
+ self._TestsByStatus('warning', include_fails, include_flaky) +
+ self.RunningTests())
+
+ def TriesForTest(self, test):
+ """Returns a list containing the state for all tries of the given test.
+ This parser doesn't support retries so a single result is returned."""
+ return [self.TEST_STATUS_MAP.get(self._StatusOfTest(test),
+ TEST_UNKNOWN_LABEL)]
+
+ def DisabledTests(self):
+ """Returns the name of the disabled test (if there is only 1) or the number
+ of disabled tests.
+ """
+ return self._disabled_tests
+
+ def FlakyTests(self):
+ """Returns the name of the flaky test (if there is only 1) or the number
+ of flaky tests.
+ """
+ return self._flaky_tests
+
+ def FailureDescription(self, test):
+ """Returns a list containing the failure description for the given test.
+
+ If the test didn't fail or timeout, returns [].
+ """
+ test_status = self._test_status.get(test, ('', []))
+ return ['%s: ' % test] + test_status[1]
+
+ def MemoryToolReportHashes(self):
+ """Returns list of report hashes found in the log."""
+ return self._memory_tool_reports.keys()
+
+ def MemoryToolReport(self, report_hash):
+ """Returns a list containing the report for a given hash.
+
+ If the report hash doesn't exist, returns [].
+ """
+ return self._memory_tool_reports.get(report_hash, [])
+
+ def CompletedWithoutFailure(self):
+ """Returns True if all tests completed and no tests failed unexpectedly."""
+ return self.completed and not self.FailedTests()
+
+ def ProcessLine(self, line):
+ """This is called once with each line of the test log."""
+
+ # Track line number for error messages.
+ self._line_number += 1
+
+ # Some tests (net_unittests in particular) run subprocesses which can write
+ # stuff to shared stdout buffer. Sometimes such output appears between new
+ # line and gtest directives ('[ RUN ]', etc) which breaks the parser.
+ # Code below tries to detect such cases and recognize a mixed line as two
+ # separate lines.
+
+ # List of regexps that parses expects to find at the start of a line but
+ # which can be somewhere in the middle.
+ gtest_regexps = [
+ self._test_start,
+ self._test_ok,
+ self._test_fail,
+ self._test_passed,
+ ]
+
+ for regexp in gtest_regexps:
+ match = regexp.search(line)
+ if match:
+ break
+
+ if not match or match.start() == 0:
+ self._ProcessLine(line)
+ else:
+ self._ProcessLine(line[:match.start()])
+ self._ProcessLine(line[match.start():])
+
+ def _ProcessLine(self, line):
+ """Parses the line and changes the state of parsed tests accordingly.
+
+ Will recognize newly started tests, OK or FAILED statuses, timeouts, etc.
+ """
+
+ # Note: When sharding, the number of disabled and flaky tests will be read
+ # multiple times, so this will only show the most recent values (but they
+ # should all be the same anyway).
+
+ # Is it a line listing the master name?
+ if not self.master_name:
+ results = self._master_name_re.match(line)
+ if results:
+ self.master_name = results.group(1)
+
+ results = self._run_test_cases_line.match(line)
+ if results:
+ # A run_test_cases.py output.
+ if self._current_test:
+ if self._test_status[self._current_test][0] == 'started':
+ self._test_status[self._current_test] = (
+ 'timeout', self._failure_description)
+ self._current_test = ''
+ self._failure_description = []
+ return
+
+ # Is it a line declaring all tests passed?
+ results = self._test_passed.match(line)
+ if results:
+ self.completed = True
+ self._current_test = ''
+ return
+
+ # Is it a line reporting disabled tests?
+ results = self._disabled.match(line)
+ if results:
+ try:
+ disabled = int(results.group(1))
+ except ValueError:
+ disabled = 0
+ if disabled > 0 and isinstance(self._disabled_tests, int):
+ self._disabled_tests = disabled
+ else:
+ # If we can't parse the line, at least give a heads-up. This is a
+ # safety net for a case that shouldn't happen but isn't a fatal error.
+ self._disabled_tests = 'some'
+ return
+
+ # Is it a line reporting flaky tests?
+ results = self._flaky.match(line)
+ if results:
+ try:
+ flaky = int(results.group(1))
+ except ValueError:
+ flaky = 0
+ if flaky > 0 and isinstance(self._flaky_tests, int):
+ self._flaky_tests = flaky
+ else:
+ # If we can't parse the line, at least give a heads-up. This is a
+ # safety net for a case that shouldn't happen but isn't a fatal error.
+ self._flaky_tests = 'some'
+ return
+
+ # Is it the start of a test?
+ results = self._test_start.match(line)
+ if results:
+ if self._current_test:
+ if self._test_status[self._current_test][0] == 'started':
+ self._test_status[self._current_test] = (
+ 'timeout', self._failure_description)
+ test_name = results.group(1)
+ self._test_status[test_name] = ('started', ['Did not complete.'])
+ self._current_test = test_name
+ if self.retrying_failed:
+ self._failure_description = self._test_status[test_name][1]
+ self._failure_description.extend(['', 'RETRY OUTPUT:', ''])
+ else:
+ self._failure_description = []
+ return
+
+ # Is it a test success line?
+ results = self._test_ok.match(line)
+ if results:
+ test_name = results.group(1)
+ status = self._StatusOfTest(test_name)
+ if status != 'started':
+ self._RecordError(line, 'success while in status %s' % status)
+ if self.retrying_failed:
+ self._test_status[test_name] = ('warning', self._failure_description)
+ else:
+ self._test_status[test_name] = ('OK', [])
+ self._failure_description = []
+ self._current_test = ''
+ return
+
+ # Is it a test failure line?
+ results = self._test_fail.match(line)
+ if results:
+ test_name = results.group(1)
+ status = self._StatusOfTest(test_name)
+ if status not in ('started', 'failed', 'timeout'):
+ self._RecordError(line, 'failure while in status %s' % status)
+ # Don't overwrite the failure description when a failing test is listed a
+ # second time in the summary, or if it was already recorded as timing
+ # out.
+ if status not in ('failed', 'timeout'):
+ self._test_status[test_name] = ('failed', self._failure_description)
+ self._failure_description = []
+ self._current_test = ''
+ return
+
+ # Is it a test timeout line?
+ results = self._test_timeout.search(line)
+ if results:
+ test_name = results.group(1)
+ status = self._StatusOfTest(test_name)
+ if status not in ('started', 'failed'):
+ self._RecordError(line, 'timeout while in status %s' % status)
+ self._test_status[test_name] = (
+ 'timeout', self._failure_description + ['Killed (timed out).'])
+ self._failure_description = []
+ self._current_test = ''
+ return
+
+ # Is it the start of a new memory tool report?
+ results = self._report_start.match(line)
+ if results:
+ report_hash = results.group(1)
+ if report_hash in self._memory_tool_reports:
+ self._RecordError(line, 'multiple reports for this hash')
+ self._memory_tool_reports[report_hash] = []
+ self._current_report_hash = report_hash
+ self._current_report = []
+ return
+
+ # Is it the end of a memory tool report?
+ results = self._report_end.match(line)
+ if results:
+ report_hash = results.group(1)
+ if not self._current_report_hash:
+ self._RecordError(line, 'no BEGIN matches this END')
+ elif report_hash != self._current_report_hash:
+ self._RecordError(line, 'expected (error hash=#%s#)' %
+ self._current_report_hash)
+ else:
+ self._memory_tool_reports[self._current_report_hash] = (
+ self._current_report)
+ self._current_report_hash = ''
+ self._current_report = []
+ return
+
+ # Is it the start of the retry tests?
+ results = self._retry_message.match(line)
+ if results:
+ self.retrying_failed = True
+ return
+
+ # Random line: if we're in a report, collect it. Reports are
+ # generated after all tests are finished, so this should always belong to
+ # the current report hash.
+ if self._current_report_hash:
+ self._current_report.append(line)
+ return
+
+ # Random line: if we're in a test, collect it for the failure description.
+ # Tests may run simultaneously, so this might be off, but it's worth a try.
+ # This also won't work if a test times out before it begins running.
+ if self._current_test:
+ self._failure_description.append(line)
+
+ # Parse the "Failing tests:" list at the end of the output, and add any
+ # additional failed tests to the list. For example, this includes tests
+ # that crash after the OK line.
+ if self._parsing_failures:
+ results = self._test_name.match(line)
+ if results:
+ test_name = results.group(1)
+ status = self._StatusOfTest(test_name)
+ if status in ('not known', 'OK'):
+ self._test_status[test_name] = (
+ 'failed', ['Unknown error, see stdio log.'])
+ else:
+ self._parsing_failures = False
+ elif line.startswith('Failing tests:'):
+ self._parsing_failures = True
+
+
class GTestJSONParser(object):
# Limit of output snippet lines. Avoids flooding the logs with amount
# of output that gums up the infrastructure.
diff --git a/infra/scripts/legacy/scripts/slave/annotation_utils.py b/infra/scripts/legacy/scripts/slave/annotation_utils.py
index 331daa2..88a77e9 100644
--- a/infra/scripts/legacy/scripts/slave/annotation_utils.py
+++ b/infra/scripts/legacy/scripts/slave/annotation_utils.py
@@ -11,15 +11,10 @@ TODO(stip): Move the perf dashboard code from runtest.py to here.
import re
+from slave import performance_log_processor
from slave import slave_utils
-# Status codes that can be returned by the evaluateCommand method.
-# From buildbot.status.builder.
-# See: http://docs.buildbot.net/current/developer/results.html
-SUCCESS, WARNINGS, FAILURE, SKIPPED, EXCEPTION, RETRY = range(6)
-
-
def getText(result, observer, name):
"""Generate a text summary for the waterfall.
@@ -51,9 +46,9 @@ def getText(result, observer, name):
failed_test_count = len(observer.FailedTests())
if failed_test_count == 0:
- if result == SUCCESS:
+ if result == performance_log_processor.SUCCESS:
return basic_info
- elif result == WARNINGS:
+ elif result == performance_log_processor.WARNINGS:
return basic_info + ['warnings']
if observer.RunningTests():
@@ -85,7 +80,7 @@ def annotate(test_name, result, log_processor, perf_dashboard_id=None):
# with no output (exit code can have some clues, especially on Windows).
print 'exit code (as seen by runtest.py): %d' % result
- get_text_result = SUCCESS
+ get_text_result = performance_log_processor.SUCCESS
for failure in sorted(log_processor.FailedTests()):
clean_test_name = re.sub(r'[^\w\.\-]', '_', failure)
@@ -107,18 +102,18 @@ def annotate(test_name, result, log_processor, perf_dashboard_id=None):
if parser_result > result:
result = parser_result
- if result == SUCCESS:
+ if result == performance_log_processor.SUCCESS:
if (len(log_processor.ParsingErrors()) or
len(log_processor.FailedTests()) or
len(log_processor.MemoryToolReportHashes())):
print '@@@STEP_WARNINGS@@@'
- get_text_result = WARNINGS
+ get_text_result = performance_log_processor.WARNINGS
elif result == slave_utils.WARNING_EXIT_CODE:
print '@@@STEP_WARNINGS@@@'
- get_text_result = WARNINGS
+ get_text_result = performance_log_processor.WARNINGS
else:
print '@@@STEP_FAILURE@@@'
- get_text_result = FAILURE
+ get_text_result = performance_log_processor.FAILURE
for desc in getText(get_text_result, log_processor, test_name):
print '@@@STEP_TEXT@%s@@@' % desc
diff --git a/infra/scripts/legacy/scripts/slave/gtest_slave_utils.py b/infra/scripts/legacy/scripts/slave/gtest_slave_utils.py
index e35dd1d..55fe843 100755
--- a/infra/scripts/legacy/scripts/slave/gtest_slave_utils.py
+++ b/infra/scripts/legacy/scripts/slave/gtest_slave_utils.py
@@ -25,6 +25,71 @@ FULL_RESULTS_FILENAME = 'full_results.json'
TIMES_MS_FILENAME = 'times_ms.json'
+# Note: GTestUnexpectedDeathTracker is being deprecated in favor of
+# common.gtest_utils.GTestLogParser. See scripts/slave/runtest.py for details.
+class GTestUnexpectedDeathTracker(object):
+ """A lightweight version of log parser that keeps track of running tests
+ for unexpected timeout or crash."""
+
+ def __init__(self):
+ self._current_test = None
+ self._completed = False
+ self._test_start = re.compile(r'\[\s+RUN\s+\] (\w+\.\w+)')
+ self._test_ok = re.compile(r'\[\s+OK\s+\] (\w+\.\w+)')
+ self._test_fail = re.compile(r'\[\s+FAILED\s+\] (\w+\.\w+)')
+ self._test_passed = re.compile(r'\[\s+PASSED\s+\] \d+ tests?.')
+
+ self._failed_tests = set()
+
+ def OnReceiveLine(self, line):
+ results = self._test_start.search(line)
+ if results:
+ self._current_test = results.group(1)
+ return
+
+ results = self._test_ok.search(line)
+ if results:
+ self._current_test = ''
+ return
+
+ results = self._test_fail.search(line)
+ if results:
+ self._failed_tests.add(results.group(1))
+ self._current_test = ''
+ return
+
+ results = self._test_passed.search(line)
+ if results:
+ self._completed = True
+ self._current_test = ''
+ return
+
+ def GetResultsMap(self):
+ """Returns a map of TestResults."""
+
+ if self._current_test:
+ self._failed_tests.add(self._current_test)
+
+ test_results_map = dict()
+ for test in self._failed_tests:
+ test_results_map[canonical_name(test)] = [TestResult(test, failed=True)]
+
+ return test_results_map
+
+ def CompletedWithoutFailure(self):
+ """Returns True if all tests completed and no tests failed unexpectedly."""
+
+ if not self._completed:
+ return False
+
+ for test in self._failed_tests:
+ test_modifier = TestResult(test, failed=True).modifier
+ if test_modifier not in (TestResult.FAILS, TestResult.FLAKY):
+ return False
+
+ return True
+
+
def GetResultsMap(observer):
"""Returns a map of TestResults."""
diff --git a/infra/scripts/legacy/scripts/slave/performance_log_processor.py b/infra/scripts/legacy/scripts/slave/performance_log_processor.py
new file mode 100644
index 0000000..db110f3
--- /dev/null
+++ b/infra/scripts/legacy/scripts/slave/performance_log_processor.py
@@ -0,0 +1,844 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""This module contains PerformanceLogProcessor and subclasses.
+
+Several performance tests have complicated log output, this module is intended
+to help buildsteps parse these logs and identify if tests had anomalies.
+
+The classes in this file all have the same method ProcessLine, just like
+GTestLogParser in //tools/build/scripts/common/gtest_utils.py. They also
+construct a set of files which are used for graphing.
+
+Note: This module is doomed to be deprecated in the future, as Telemetry
+results will be passed more directly to the new performance dashboard.
+"""
+
+import collections
+import json
+import logging
+import os
+import re
+
+from common import chromium_utils
+
+# TODO(crbug.com/403564).
+import config
+
+# Status codes that can be returned by the evaluateCommand method.
+# From buildbot.status.builder.
+# See: http://docs.buildbot.net/current/developer/results.html
+SUCCESS, WARNINGS, FAILURE, SKIPPED, EXCEPTION, RETRY = range(6)
+
+
+class PerformanceLogProcessor(object):
+ """Parent class for performance log parsers.
+
+ The only essential public method that subclasses must define is the method
+ ProcessLine, which takes one line of a test output log and uses it
+ to change the internal state of the PerformanceLogProcessor object,
+ so that methods such as PerformanceLogs return the right thing.
+ """
+
+ # The file perf_expectations.json holds performance expectations.
+ # For more info, see: http://goo.gl/BhYvDa
+ PERF_EXPECTATIONS_PATH = 'src/tools/perf_expectations/'
+
+ def __init__(self, revision=None, factory_properties=None,
+ build_properties=None, webkit_revision='undefined'):
+ """Initializes the log processor.
+
+ Args:
+ revision: Revision number; this currently could also be a git number.
+ It is sent to the perf dashboard to be used as the x-value.
+ factory_properties: Factory properties dict.
+ build_properties: Build properties dict.
+ webkit_revision: Blink revision number.
+ """
+ if factory_properties is None:
+ factory_properties = {}
+
+ # Performance regression/speedup alerts.
+ self._read_expectations = False
+
+ self._perf_id = factory_properties.get('perf_id')
+ self._perf_name = factory_properties.get('perf_name')
+ self._perf_filename = factory_properties.get('perf_filename')
+ self._test_name = factory_properties.get('test_name')
+
+ self._perf_data = {}
+ self._perf_test_keys = {}
+ self._perf_ref_keys = {}
+ self._perf_regress = []
+ self._perf_improve = []
+
+ # A dict mapping output file names to lists of lines in a file.
+ self._output = {}
+
+ # Whether or not the processing has been finalized (i.e. whether
+ # self._FinalizeProcessing has been called.)
+ self._finalized = False
+
+ # The text summary will be built by other methods as we go.
+ # This is a list of strings with messages about the processing.
+ self._text_summary = []
+
+ # Enable expectations if the local configuration supports it.
+ self._expectations = (factory_properties.get('expectations')
+ and self._perf_id and self._perf_name)
+ if self._expectations and not self._perf_filename:
+ self._perf_filename = os.path.join(self.PERF_EXPECTATIONS_PATH,
+ 'perf_expectations.json')
+
+ if revision:
+ self._revision = revision
+ else:
+ raise ValueError('Must provide a revision to PerformanceLogProcessor.')
+
+ self._webkit_revision = webkit_revision
+
+ if not build_properties:
+ build_properties = {}
+ self._git_revision = build_properties.get('git_revision', 'undefined')
+ self._version = build_properties.get('version', 'undefined')
+ self._channel = build_properties.get('channel', 'undefined')
+ self._webrtc_revision = build_properties.get('got_webrtc_revision',
+ 'undefined')
+
+ self._v8_revision = 'undefined'
+ if factory_properties.get('show_v8_revision'):
+ self._v8_revision = build_properties.get('got_v8_revision', 'undefined')
+
+ self._percentiles = [.1, .25, .5, .75, .90, .95, .99]
+
+ def IsChartJson(self): # pylint: disable=R0201
+ """This is not the new telemetry --chartjson output format."""
+ return False
+
+ def PerformanceLogs(self):
+ if not self._finalized:
+ self._FinalizeProcessing()
+ self._finalized = True
+ return self._output
+
+ def PerformanceSummary(self):
+ """Returns a list of strings about performance changes and other info."""
+ if not self._finalized:
+ self._FinalizeProcessing()
+ self._finalized = True
+ return self.PerformanceChanges() + self._text_summary
+
+ def _FinalizeProcessing(self):
+ """Hook for subclasses to do final operations before output is returned."""
+ # This method is to be defined by inheriting classes.
+ pass
+
+ def AppendLog(self, filename, data):
+ """Appends some data to an output file."""
+ self._output[filename] = self._output.get(filename, []) + data
+
+ def PrependLog(self, filename, data):
+ """Prepends some data to an output file."""
+ self._output[filename] = data + self._output.get(filename, [])
+
+ def FailedTests(self): # pylint: disable=R0201
+ return []
+
+ def MemoryToolReportHashes(self): # pylint: disable=R0201
+ return []
+
+ def ParsingErrors(self): # pylint: disable=R0201
+ return []
+
+ def LoadPerformanceExpectationsData(self, all_perf_data):
+ """Load the expectations data.
+
+ All keys in perf_expectations have 4 components:
+ slave/test/graph/trace
+
+ LoadPerformanceExpectationsData finds all keys that match the initial
+ portion of the string ("slave/test") and adds the graph and result
+ portions to the expected performance structure.
+ """
+
+ for perf_key in all_perf_data.keys():
+ # tools/perf_expectations/tests/perf_expectations_unittest.py should have
+ # a matching regular expression.
+ m = re.search(r'^' + self._perf_name + '/' + self._test_name +
+ r'/([\w\.-]+)/([\w\.-]+)$', perf_key)
+ if not m:
+ continue
+
+ perf_data = all_perf_data[perf_key]
+ graph = m.group(1)
+ trace = m.group(2)
+
+ # By default, all perf data is type=relative.
+ perf_data.setdefault('type', 'relative')
+
+ # By default, relative perf data is compare against the fqtn+'_ref'.
+ if perf_data['type'] == 'relative' and 'ref' not in perf_data:
+ perf_data['ref'] = '%s/%s/%s/%s_ref' % (
+ self._perf_name, self._test_name, graph, trace)
+
+ # For each test key, we add a reference in _perf_test_keys to perf_data.
+ self._perf_test_keys.setdefault(perf_key, [])
+ self._perf_test_keys[perf_key].append(perf_data)
+
+ # For each ref key, we add a reference in _perf_ref_keys to perf_data.
+ if 'ref' in perf_data:
+ self._perf_ref_keys.setdefault(perf_data['ref'], [])
+ self._perf_ref_keys[perf_data['ref']].append(perf_data)
+
+ self._perf_data.setdefault(graph, {})
+ self._perf_data[graph][trace] = perf_data
+
+ def LoadPerformanceExpectations(self):
+ if not self._expectations:
+ # self._expectations is false when a given factory doesn't enable
+ # expectations, or doesn't have both perf_id and perf_name set.
+ return
+ try:
+ perf_file = open(self._perf_filename, 'r')
+ except IOError, e:
+ logging.error('I/O Error reading expectations %s(%s): %s' %
+ (self._perf_filename, e.errno, e.strerror))
+ return
+
+ perf_data = {}
+ if perf_file:
+ try:
+ perf_data = json.load(perf_file)
+ except ValueError:
+ perf_file.seek(0)
+ logging.error('Error parsing expectations %s: \'%s\'' %
+ (self._perf_filename, perf_file.read().strip()))
+ perf_file.close()
+
+ # Find this perf/test entry
+ if perf_data and perf_data.has_key('load') and perf_data['load']:
+ self.LoadPerformanceExpectationsData(perf_data)
+ else:
+ logging.error('not loading perf expectations: perf_data is disabled')
+ self._read_expectations = True
+
+ def TrackActualPerformance(self, graph=None, trace=None, value=None,
+ stddev=None):
+ """Set actual performance data when we come across useful values.
+
+ trace will be of the form "RESULTTYPE" or "RESULTTYPE_ref".
+ A trace with _ref in its name refers to a reference build.
+
+ Common result types for page cyclers: t, vm_rss_f_r, IO_b_b, etc.
+ A test's result types vary between test types. Currently, a test
+ only needs to output the appropriate text format to embed a new
+ result type.
+ """
+
+ fqtn = '%s/%s/%s/%s' % (self._perf_name, self._test_name, graph, trace)
+ if fqtn in self._perf_test_keys:
+ for perf_data in self._perf_test_keys[fqtn]:
+ perf_data['actual_test'] = value
+ perf_data['actual_var'] = stddev
+
+ if perf_data['type'] == 'absolute' and 'actual_test' in perf_data:
+ perf_data['actual_delta'] = perf_data['actual_test']
+
+ elif perf_data['type'] == 'relative':
+ if 'actual_test' in perf_data and 'actual_ref' in perf_data:
+ perf_data['actual_delta'] = (
+ perf_data['actual_test'] - perf_data['actual_ref'])
+
+ if fqtn in self._perf_ref_keys:
+ for perf_data in self._perf_ref_keys[fqtn]:
+ perf_data['actual_ref'] = value
+
+ if 'actual_test' in perf_data and 'actual_ref' in perf_data:
+ perf_data['actual_delta'] = (
+ perf_data['actual_test'] - perf_data['actual_ref'])
+
+ def PerformanceChangesAsText(self):
+ """Returns a list of strings which describe performance changes."""
+
+ text = []
+
+ if self._expectations and not self._read_expectations:
+ text.append('MISS_EXPECTATIONS')
+
+ if self._perf_regress:
+ text.append('PERF_REGRESS: ' + ', '.join(self._perf_regress))
+
+ if self._perf_improve:
+ text.append('PERF_IMPROVE: ' + ', '.join(self._perf_improve))
+
+ return text
+
+ def ComparePerformance(self, graph, trace):
+ """Populates internal data about improvements and regressions."""
+ # Skip graphs and traces we don't expect values for.
+ if not graph in self._perf_data or not trace in self._perf_data[graph]:
+ return
+
+ perfdata = self._perf_data[graph][trace]
+ graph_result = graph + '/' + trace
+
+ # Skip result types that didn't calculate a delta.
+ if not 'actual_delta' in perfdata:
+ return
+
+ # Skip result types that don't have regress/improve values.
+ if 'regress' not in perfdata or 'improve' not in perfdata:
+ return
+
+ # Set the high and low performance tests.
+ # The actual delta needs to be within this range to keep the perf test
+ # green. If the results fall above or below this range, the test will go
+ # red (signaling a regression) or orange (signaling a speedup).
+ actual = perfdata['actual_delta']
+ regress = perfdata['regress']
+ improve = perfdata['improve']
+ if (('better' in perfdata and perfdata['better'] == 'lower') or
+ ('better' not in perfdata and regress > improve)):
+ # The "lower is better" case. (ie. time results)
+ if actual < improve:
+ ratio = 1 - _Divide(actual, improve)
+ self._perf_improve.append('%s (%s)' % (graph_result,
+ _FormatPercentage(ratio)))
+ elif actual > regress:
+ ratio = _Divide(actual, regress) - 1
+ self._perf_regress.append('%s (%s)' % (graph_result,
+ _FormatPercentage(ratio)))
+ else:
+ # The "higher is better" case. (ie. score results)
+ if actual > improve:
+ ratio = _Divide(actual, improve) - 1
+ self._perf_improve.append('%s (%s)' % (graph_result,
+ _FormatPercentage(ratio)))
+ elif actual < regress:
+ ratio = 1 - _Divide(actual, regress)
+ self._perf_regress.append('%s (%s)' % (graph_result,
+ _FormatPercentage(ratio)))
+
+ def PerformanceChanges(self):
+ """Compares actual and expected results.
+
+ Returns:
+ A list of strings indicating improvements or regressions.
+ """
+ # Compare actual and expected results.
+ for graph in self._perf_data:
+ for trace in self._perf_data[graph]:
+ self.ComparePerformance(graph, trace)
+
+ return self.PerformanceChangesAsText()
+
+ # Unused argument cmd.
+ # pylint: disable=W0613
+ def evaluateCommand(self, cmd):
+ """Returns a status code indicating success, failure, etc.
+
+ See: http://docs.buildbot.net/current/developer/cls-buildsteps.html
+
+ Args:
+ cmd: A command object. Not used here.
+
+ Returns:
+ A status code (One of SUCCESS, WARNINGS, FAILURE, etc.)
+ """
+ if self._expectations and not self._read_expectations:
+ return WARNINGS
+
+ # make sure regression and improvement logs are calculated
+ self.PerformanceSummary()
+
+ if self._perf_regress:
+ return FAILURE
+
+ if self._perf_improve:
+ return WARNINGS
+
+ # There was no change in performance, report success.
+ return SUCCESS
+
+ def ProcessLine(self, line):
+ """Process one line of a log file."""
+ # This method must be overridden by subclass
+ pass
+
+
+class GraphingLogProcessor(PerformanceLogProcessor):
+ """Parent class for any log processor expecting standard data to be graphed.
+
+ The log will be parsed looking for any lines of the forms:
+ <*>RESULT <graph_name>: <trace_name>= <value> <units>
+ or
+ <*>RESULT <graph_name>: <trace_name>= [<value>,value,value,...] <units>
+ or
+ <*>RESULT <graph_name>: <trace_name>= {<mean>, <std deviation>} <units>
+
+ For example,
+ *RESULT vm_final_browser: OneTab= 8488 kb
+ RESULT startup: ref= [167.00,148.00,146.00,142.00] ms
+ RESULT TabCapturePerformance_foo: Capture= {30.7, 1.45} ms
+
+ The leading * is optional; it indicates that the data from that line should
+ be considered "important", which may mean for example that it's graphed by
+ default.
+
+ If multiple values are given in [], their mean and (sample) standard
+ deviation will be written; if only one value is given, that will be written.
+ A trailing comma is permitted in the list of values.
+
+ NOTE: All lines except for RESULT lines are ignored, including the Avg and
+ Stddev lines output by Telemetry!
+
+ Any of the <fields> except <value> may be empty, in which case the
+ not-terribly-useful defaults will be used. The <graph_name> and <trace_name>
+ should not contain any spaces, colons (:) nor equals-signs (=). Furthermore,
+ the <trace_name> will be used on the waterfall display, so it should be kept
+ short. If the trace_name ends with '_ref', it will be interpreted as a
+ reference value, and shown alongside the corresponding main value on the
+ waterfall.
+
+ Semantic note: The terms graph and chart are used interchangeably here.
+ """
+
+ # The file into which the GraphingLogProcessor will save a list of graph
+ # names for use by the JS doing the plotting.
+ GRAPH_LIST = config.Master.perf_graph_list
+
+ RESULTS_REGEX = re.compile(r'(?P<IMPORTANT>\*)?RESULT '
+ r'(?P<GRAPH>[^:]*): (?P<TRACE>[^=]*)= '
+ r'(?P<VALUE>[\{\[]?[-\d\., ]+[\}\]]?)('
+ r' ?(?P<UNITS>.+))?')
+ HISTOGRAM_REGEX = re.compile(r'(?P<IMPORTANT>\*)?HISTOGRAM '
+ r'(?P<GRAPH>[^:]*): (?P<TRACE>[^=]*)= '
+ r'(?P<VALUE_JSON>{.*})(?P<UNITS>.+)?')
+
+ class Trace(object):
+ """Encapsulates data for one trace. Here, this means one point."""
+
+ def __init__(self):
+ self.important = False
+ self.value = 0.0
+ self.stddev = 0.0
+
+ def __str__(self):
+ result = _FormatHumanReadable(self.value)
+ if self.stddev:
+ result += '+/-%s' % _FormatHumanReadable(self.stddev)
+ return result
+
+ class Graph(object):
+ """Encapsulates a set of points that should appear on the same graph."""
+
+ def __init__(self):
+ self.units = None
+ self.traces = {}
+
+ def IsImportant(self):
+ """A graph is considered important if any of its traces is important."""
+ for trace in self.traces.itervalues():
+ if trace.important:
+ return True
+ return False
+
+ def BuildTracesDict(self):
+ """Returns a dictionary mapping trace names to [value, stddev]."""
+ traces_dict = {}
+ for name, trace in self.traces.items():
+ traces_dict[name] = [str(trace.value), str(trace.stddev)]
+ return traces_dict
+
+ def __init__(self, *args, **kwargs):
+ """Initiates this log processor."""
+ PerformanceLogProcessor.__init__(self, *args, **kwargs)
+
+ # A dict of Graph objects, by name.
+ self._graphs = {}
+
+ # Load performance expectations for this test.
+ self.LoadPerformanceExpectations()
+
+ def ProcessLine(self, line):
+ """Processes one result line, and updates the state accordingly."""
+ results_match = self.RESULTS_REGEX.search(line)
+ histogram_match = self.HISTOGRAM_REGEX.search(line)
+ if results_match:
+ self._ProcessResultLine(results_match)
+ elif histogram_match:
+ self._ProcessHistogramLine(histogram_match)
+
+ def _ProcessResultLine(self, line_match):
+ """Processes a line that matches the standard RESULT line format.
+
+ Args:
+ line_match: A MatchObject as returned by re.search.
+ """
+ match_dict = line_match.groupdict()
+ graph_name = match_dict['GRAPH'].strip()
+ trace_name = match_dict['TRACE'].strip()
+
+ graph = self._graphs.get(graph_name, self.Graph())
+ graph.units = match_dict['UNITS'] or ''
+ trace = graph.traces.get(trace_name, self.Trace())
+ trace.value = match_dict['VALUE']
+ trace.important = match_dict['IMPORTANT'] or False
+
+ # Compute the mean and standard deviation for a multiple-valued item,
+ # or the numerical value of a single-valued item.
+ if trace.value.startswith('['):
+ try:
+ value_list = [float(x) for x in trace.value.strip('[],').split(',')]
+ except ValueError:
+ # Report, but ignore, corrupted data lines. (Lines that are so badly
+ # broken that they don't even match the RESULTS_REGEX won't be
+ # detected.)
+ logging.warning("Bad test output: '%s'" % trace.value.strip())
+ return
+ trace.value, trace.stddev, filedata = self._CalculateStatistics(
+ value_list, trace_name)
+ assert filedata is not None
+ for filename in filedata:
+ self.PrependLog(filename, filedata[filename])
+ elif trace.value.startswith('{'):
+ stripped = trace.value.strip('{},')
+ try:
+ trace.value, trace.stddev = [float(x) for x in stripped.split(',')]
+ except ValueError:
+ logging.warning("Bad test output: '%s'" % trace.value.strip())
+ return
+ else:
+ try:
+ trace.value = float(trace.value)
+ except ValueError:
+ logging.warning("Bad test output: '%s'" % trace.value.strip())
+ return
+
+ graph.traces[trace_name] = trace
+ self._graphs[graph_name] = graph
+
+ # Store values in actual performance.
+ self.TrackActualPerformance(graph=graph_name, trace=trace_name,
+ value=trace.value, stddev=trace.stddev)
+
+ def _ProcessHistogramLine(self, line_match):
+ """Processes a line that matches the HISTOGRAM line format.
+
+ Args:
+ line_match: A MatchObject as returned by re.search.
+ """
+ match_dict = line_match.groupdict()
+ graph_name = match_dict['GRAPH'].strip()
+ trace_name = match_dict['TRACE'].strip()
+ units = (match_dict['UNITS'] or '').strip()
+ histogram_json = match_dict['VALUE_JSON']
+ important = match_dict['IMPORTANT'] or False
+ try:
+ histogram_data = json.loads(histogram_json)
+ except ValueError:
+ # Report, but ignore, corrupted data lines. (Lines that are so badly
+ # broken that they don't even match the HISTOGRAM_REGEX won't be
+ # detected.)
+ logging.warning("Bad test output: '%s'" % histogram_json.strip())
+ return
+
+ # Compute percentile data, create a graph for all percentile values.
+ percentiles = self._CalculatePercentiles(histogram_data, trace_name)
+ for i in percentiles:
+ percentile_graph_name = graph_name + "_" + str(i['percentile'])
+ graph = self._graphs.get(percentile_graph_name, self.Graph())
+ graph.units = units
+ trace = graph.traces.get(trace_name, self.Trace())
+ trace.value = i['value']
+ trace.important = important
+ graph.traces[trace_name] = trace
+ self._graphs[percentile_graph_name] = graph
+ self.TrackActualPerformance(graph=percentile_graph_name,
+ trace=trace_name,
+ value=i['value'])
+
+ # Compute geometric mean and standard deviation.
+ graph = self._graphs.get(graph_name, self.Graph())
+ graph.units = units
+ trace = graph.traces.get(trace_name, self.Trace())
+ trace.value, trace.stddev = self._CalculateHistogramStatistics(
+ histogram_data, trace_name)
+ trace.important = important
+ graph.traces[trace_name] = trace
+ self._graphs[graph_name] = graph
+ self.TrackActualPerformance(graph=graph_name, trace=trace_name,
+ value=trace.value, stddev=trace.stddev)
+
+ # _CalculateStatistics needs to be a member function.
+ # pylint: disable=R0201
+ # Unused argument value_list.
+ # pylint: disable=W0613
+ def _CalculateStatistics(self, value_list, trace_name):
+ """Returns a tuple with some statistics based on the given value list.
+
+ This method may be overridden by subclasses wanting a different standard
+ deviation calcuation (or some other sort of error value entirely).
+
+ Args:
+ value_list: the list of values to use in the calculation
+ trace_name: the trace that produced the data (not used in the base
+ implementation, but subclasses may use it)
+
+ Returns:
+ A 3-tuple - mean, standard deviation, and a dict which is either
+ empty or contains information about some file contents.
+ """
+ mean, stddev = chromium_utils.MeanAndStandardDeviation(value_list)
+ return mean, stddev, {}
+
+ def _CalculatePercentiles(self, histogram, trace_name):
+ """Returns a list of percentile values from a histogram.
+
+ This method may be overridden by subclasses.
+
+ Args:
+ histogram: histogram data (relevant keys: "buckets", and for each bucket,
+ "min", "max" and "count").
+ trace_name: the trace that produced the data (not used in the base
+ implementation, but subclasses may use it)
+
+ Returns:
+ A list of dicts, each of which has the keys "percentile" and "value".
+ """
+ return chromium_utils.HistogramPercentiles(histogram, self._percentiles)
+
+ def _CalculateHistogramStatistics(self, histogram, trace_name):
+ """Returns the geometric mean and standard deviation for a histogram.
+
+ This method may be overridden by subclasses.
+
+ Args:
+ histogram: histogram data (relevant keys: "buckets", and for each bucket,
+ "min", "max" and "count").
+ trace_name: the trace that produced the data (not used in the base
+ implementation, but subclasses may use it)
+ """
+ geom_mean, stddev = chromium_utils.GeomMeanAndStdDevFromHistogram(histogram)
+ return geom_mean, stddev
+
+ def _BuildSummaryJson(self, graph):
+ """Returns JSON with the data in the given graph plus revision information.
+
+ Args:
+ graph: A GraphingLogProcessor.Graph object.
+
+ Returns:
+ The format output here is the "-summary.dat line" format; that is, it
+ is a JSON encoding of a dictionary that has the key "traces"
+ """
+ assert self._revision, 'revision must always be present'
+
+ graph_dict = collections.OrderedDict([
+ ('traces', graph.BuildTracesDict()),
+ ('rev', str(self._revision)),
+ ('git_revision', str(self._git_revision)),
+ ('webkit_rev', str(self._webkit_revision)),
+ ('webrtc_rev', str(self._webrtc_revision)),
+ ('v8_rev', str(self._v8_revision)),
+ ('ver', str(self._version)),
+ ('chan', str(self._channel)),
+ ('units', str(graph.units)),
+ ])
+
+ # Include a sorted list of important trace names if there are any.
+ important = [t for t in graph.traces.keys() if graph.traces[t].important]
+ if important:
+ graph_dict['important'] = sorted(important)
+
+ return json.dumps(graph_dict)
+
+ def _FinalizeProcessing(self):
+ self._CreateSummaryOutput()
+ self._GenerateGraphInfo()
+
+ def _CreateSummaryOutput(self):
+ """Writes the summary data file and collect the waterfall display text.
+
+ The summary file contains JSON-encoded data.
+
+ The waterfall contains lines for each important trace, in the form
+ tracename: value< (refvalue)>
+ """
+
+ for graph_name, graph in self._graphs.iteritems():
+ # Write a line in the applicable summary file for each graph.
+ filename = ('%s-summary.dat' % graph_name)
+ data = [self._BuildSummaryJson(graph) + '\n']
+ self._output[filename] = data + self._output.get(filename, [])
+
+ # Add a line to the waterfall for each important trace.
+ for trace_name, trace in graph.traces.iteritems():
+ if trace_name.endswith('_ref'):
+ continue
+ if trace.important:
+ display = '%s: %s' % (trace_name, _FormatHumanReadable(trace.value))
+ if graph.traces.get(trace_name + '_ref'):
+ display += ' (%s)' % _FormatHumanReadable(
+ graph.traces[trace_name + '_ref'].value)
+ self._text_summary.append(display)
+
+ self._text_summary.sort()
+
+ def _GenerateGraphInfo(self):
+ """Outputs a list of graphs viewed this session, for use by the plotter.
+
+ These will be collated and sorted on the master side.
+ """
+ graphs = {}
+ for name, graph in self._graphs.iteritems():
+ graphs[name] = {'name': name,
+ 'important': graph.IsImportant(),
+ 'units': graph.units}
+ self._output[self.GRAPH_LIST] = json.dumps(graphs).split('\n')
+
+ def GetGraphs(self):
+ """Returns a list of graph names."""
+ return self._graphs.keys()
+
+ def GetTraces(self, graph):
+ """Returns a dict of traces associated with the given graph.
+
+ Returns:
+ A dict mapping names of traces to two-element lists of value, stddev.
+ """
+ return self._graphs[graph].BuildTracesDict()
+
+ def GetUnits(self, graph):
+ """Returns the units associated with the given graph."""
+ return str(self._graphs[graph].units)
+
+
+class GraphingPageCyclerLogProcessor(GraphingLogProcessor):
+ """Handles additional processing for page-cycler timing data."""
+
+ _page_list = ['(unknown)']
+ PAGES_REGEXP = re.compile(r'^Pages: \[(?P<LIST>.*)\]')
+
+ def ProcessLine(self, line):
+ """Also looks for the Pages: line to find the page count."""
+ # super() should be used instead of GetParentClass().
+ # pylint: disable=W0212
+ line_match = self.PAGES_REGEXP.search(line)
+ if line_match:
+ self._page_list = line_match.groupdict()['LIST'].strip().split(',')
+ if len(self._page_list) < 1:
+ self._page_list = ['(unknown)']
+ else:
+ chromium_utils.GetParentClass(self).ProcessLine(self, line)
+
+ def _CalculateStatistics(self, value_list, trace_name):
+ """Handles statistics generation and recording for page-cycler data.
+
+ Sums the timings over all pages for each iteration and returns a tuple
+ (mean, standard deviation) of those sums. Also saves a data file
+ <revision>_<tracename>.dat holding a line of times for each URL loaded,
+ for use by humans when debugging a regression.
+ """
+
+ # If the name of the trace is one of the pages in the page list then we are
+ # dealing with the results for that page only, not the overall results. So
+ # calculate the statistics like a normal GraphingLogProcessor, not the
+ # GraphingPageCyclerLogProcessor.
+ if trace_name in self._page_list:
+ return super(GraphingPageCyclerLogProcessor, self)._CalculateStatistics(
+ value_list, trace_name)
+
+ value_count = len(value_list)
+ page_count = len(self._page_list)
+ # Chunk value_list into groups, where each sub-list
+ # has all the page times of one iteration.
+ iterations = [value_list[start:start+page_count]
+ for start in xrange(0, value_count, page_count)]
+
+ iteration_times = map(sum, iterations)
+ page_times_list = map(list, zip(*iterations))
+ page_times_dict = dict(zip(self._page_list, page_times_list))
+
+ pagedata = self._SavePageData(page_times_dict, trace_name)
+ val, stddev = chromium_utils.FilteredMeanAndStandardDeviation(
+ iteration_times)
+ return val, stddev, pagedata
+
+ def _SavePageData(self, page_times, trace_name):
+ """Saves a file holding the timing data for each page loaded.
+
+ Args:
+ page_times: a dict mapping a page URL to a list of its times
+ trace_name: the trace that produced this set of times
+
+ Returns:
+ A dict with one entry, mapping filename to file contents.
+ """
+ file_data = []
+ for page, times in sorted(page_times.iteritems()):
+ mean, stddev = chromium_utils.FilteredMeanAndStandardDeviation(times)
+ file_data.append('%s (%s+/-%s): %s' % (page,
+ _FormatFloat(mean),
+ _FormatFloat(stddev),
+ _JoinWithSpacesAndNewLine(times)))
+
+ filename = '%s_%s.dat' % (self._revision, trace_name)
+ return {filename: file_data}
+
+
+def _FormatFloat(number):
+ """Formats float with two decimal points."""
+ if number:
+ return '%.2f' % number
+ else:
+ return '0.00'
+
+
+def _FormatPercentage(ratio):
+ """Formats a number as a string with a percentage (e.g. 0.5 => "50%")."""
+ return '%s%%' % _FormatFloat(100 * ratio)
+
+
+def _Divide(x, y):
+ """Divides with float division, or returns infinity if denominator is 0."""
+ if y == 0:
+ return float('inf')
+ return float(x) / y
+
+
+def _FormatHumanReadable(number):
+ """Formats a float into three significant figures, using metric suffixes.
+
+ Only m, k, and M prefixes (for 1/1000, 1000, and 1,000,000) are used.
+ Examples:
+ 0.0387 => 38.7m
+ 1.1234 => 1.12
+ 10866 => 10.8k
+ 682851200 => 683M
+ """
+ metric_prefixes = {-3: 'm', 0: '', 3: 'k', 6: 'M'}
+ scientific = '%.2e' % float(number) # 6.83e+005
+ e_idx = scientific.find('e') # 4, or 5 if negative
+ digits = float(scientific[:e_idx]) # 6.83
+ exponent = int(scientific[e_idx + 1:]) # int('+005') = 5
+ while exponent % 3:
+ digits *= 10
+ exponent -= 1
+ while exponent > 6:
+ digits *= 10
+ exponent -= 1
+ while exponent < -3:
+ digits /= 10
+ exponent += 1
+ if digits >= 100:
+ # Don't append a meaningless '.0' to an integer number.
+ digits = int(digits)
+ # Exponent is now divisible by 3, between -3 and 6 inclusive: (-3, 0, 3, 6).
+ return '%s%s' % (digits, metric_prefixes[exponent])
+
+
+def _JoinWithSpacesAndNewLine(words):
+ """Joins a list of words together with spaces."""
+ return ' '.join(str(w) for w in words) + '\n'
diff --git a/infra/scripts/legacy/scripts/slave/runtest.py b/infra/scripts/legacy/scripts/slave/runtest.py
index c1d8d07..49e7ed1 100755
--- a/infra/scripts/legacy/scripts/slave/runtest.py
+++ b/infra/scripts/legacy/scripts/slave/runtest.py
@@ -37,8 +37,10 @@ from slave import annotation_utils
from slave import build_directory
from slave import crash_utils
from slave import gtest_slave_utils
+from slave import performance_log_processor
from slave import results_dashboard
from slave import slave_utils
+from slave import telemetry_utils
from slave import xvfb
USAGE = '%s [options] test.exe [test args]' % os.path.basename(sys.argv[0])
@@ -48,9 +50,27 @@ CHROME_SANDBOX_PATH = '/opt/chromium/chrome_sandbox'
# Directory to write JSON for test results into.
DEST_DIR = 'gtest_results'
+# Names of httpd configuration file under different platforms.
+HTTPD_CONF = {
+ 'linux': 'httpd2_linux.conf',
+ 'mac': 'httpd2_mac.conf',
+ 'win': 'httpd.conf'
+}
+# Regex matching git comment lines containing svn revision info.
+GIT_SVN_ID_RE = re.compile(r'^git-svn-id: .*@([0-9]+) .*$')
+# Regex for the master branch commit position.
+GIT_CR_POS_RE = re.compile(r'^Cr-Commit-Position: refs/heads/master@{#(\d+)}$')
+
# The directory that this script is in.
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
+LOG_PROCESSOR_CLASSES = {
+ 'gtest': gtest_utils.GTestLogParser,
+ 'graphing': performance_log_processor.GraphingLogProcessor,
+ 'pagecycler': performance_log_processor.GraphingPageCyclerLogProcessor,
+}
+
+
def _LaunchDBus():
"""Launches DBus to work around a bug in GLib.
@@ -111,13 +131,14 @@ def _ShutdownDBus():
def _RunGTestCommand(
- options, command, extra_env, pipes=None):
+ options, command, extra_env, log_processor=None, pipes=None):
"""Runs a test, printing and possibly processing the output.
Args:
options: Options passed for this invocation of runtest.py.
command: A list of strings in a command (the command and its arguments).
extra_env: A dictionary of extra environment variables to set.
+ log_processor: A log processor instance which has the ProcessLine method.
pipes: A list of command string lists which the output will be piped to.
Returns:
@@ -137,7 +158,26 @@ def _RunGTestCommand(
# TODO(phajdan.jr): Clean this up when internal waterfalls are fixed.
env.update({'CHROMIUM_TEST_LAUNCHER_BOT_MODE': '1'})
- return chromium_utils.RunCommand(command, pipes=pipes, env=env)
+ log_processors = {}
+ if log_processor:
+ log_processors[log_processor.__class__.__name__] = log_processor
+
+ if (not 'GTestLogParser' in log_processors and
+ options.log_processor_output_file):
+ log_processors['GTestLogParser'] = gtest_utils.GTestLogParser()
+
+ def _ProcessLine(line):
+ for current_log_processor in log_processors.values():
+ current_log_processor.ProcessLine(line)
+
+ result = chromium_utils.RunCommand(
+ command, pipes=pipes, parser_func=_ProcessLine, env=env)
+
+ if options.log_processor_output_file:
+ _WriteLogProcessorResultsToOutput(
+ log_processors['GTestLogParser'], options.log_processor_output_file)
+
+ return result
def _GetMaster():
@@ -150,6 +190,97 @@ def _GetMasterString(master):
return '[Running for master: "%s"]' % master
+def _GetGitCommitPositionFromLog(log):
+ """Returns either the commit position or svn rev from a git log."""
+ # Parse from the bottom up, in case the commit message embeds the message
+ # from a different commit (e.g., for a revert).
+ for r in [GIT_CR_POS_RE, GIT_SVN_ID_RE]:
+ for line in reversed(log.splitlines()):
+ m = r.match(line.strip())
+ if m:
+ return m.group(1)
+ return None
+
+
+def _GetGitCommitPosition(dir_path):
+ """Extracts the commit position or svn revision number of the HEAD commit."""
+ git_exe = 'git.bat' if sys.platform.startswith('win') else 'git'
+ p = subprocess.Popen(
+ [git_exe, 'log', '-n', '1', '--pretty=format:%B', 'HEAD'],
+ cwd=dir_path, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ (log, _) = p.communicate()
+ if p.returncode != 0:
+ return None
+ return _GetGitCommitPositionFromLog(log)
+
+
+def _IsGitDirectory(dir_path):
+ """Checks whether the given directory is in a git repository.
+
+ Args:
+ dir_path: The directory path to be tested.
+
+ Returns:
+ True if given directory is in a git repository, False otherwise.
+ """
+ git_exe = 'git.bat' if sys.platform.startswith('win') else 'git'
+ with open(os.devnull, 'w') as devnull:
+ p = subprocess.Popen([git_exe, 'rev-parse', '--git-dir'],
+ cwd=dir_path, stdout=devnull, stderr=devnull)
+ return p.wait() == 0
+
+
+def _GetRevision(in_directory):
+ """Returns the SVN revision, git commit position, or git hash.
+
+ Args:
+ in_directory: A directory in the repository to be checked.
+
+ Returns:
+ An SVN revision as a string if the given directory is in a SVN repository,
+ or a git commit position number, or if that's not available, a git hash.
+ If all of that fails, an empty string is returned.
+ """
+ import xml.dom.minidom
+ if not os.path.exists(os.path.join(in_directory, '.svn')):
+ if _IsGitDirectory(in_directory):
+ svn_rev = _GetGitCommitPosition(in_directory)
+ if svn_rev:
+ return svn_rev
+ return _GetGitRevision(in_directory)
+ else:
+ return ''
+
+ # Note: Not thread safe: http://bugs.python.org/issue2320
+ output = subprocess.Popen(['svn', 'info', '--xml'],
+ cwd=in_directory,
+ shell=(sys.platform == 'win32'),
+ stdout=subprocess.PIPE).communicate()[0]
+ try:
+ dom = xml.dom.minidom.parseString(output)
+ return dom.getElementsByTagName('entry')[0].getAttribute('revision')
+ except xml.parsers.expat.ExpatError:
+ return ''
+ return ''
+
+
+def _GetGitRevision(in_directory):
+ """Returns the git hash tag for the given directory.
+
+ Args:
+ in_directory: The directory where git is to be run.
+
+ Returns:
+ The git SHA1 hash string.
+ """
+ git_exe = 'git.bat' if sys.platform.startswith('win') else 'git'
+ p = subprocess.Popen(
+ [git_exe, 'rev-parse', 'HEAD'],
+ cwd=in_directory, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ (stdout, _) = p.communicate()
+ return stdout.strip()
+
+
def _GenerateJSONForTestResults(options, log_processor):
"""Generates or updates a JSON file from the gtest results XML and upload the
file to the archive server.
@@ -210,12 +341,15 @@ def _GenerateJSONForTestResults(options, log_processor):
if options.revision:
generate_json_options.chrome_revision = options.revision
else:
- generate_json_options.chrome_revision = ''
+ chrome_dir = chromium_utils.FindUpwardParent(build_dir, 'third_party')
+ generate_json_options.chrome_revision = _GetRevision(chrome_dir)
if options.webkit_revision:
generate_json_options.webkit_revision = options.webkit_revision
else:
- generate_json_options.webkit_revision = ''
+ webkit_dir = chromium_utils.FindUpward(
+ build_dir, 'third_party', 'WebKit', 'Source')
+ generate_json_options.webkit_revision = _GetRevision(webkit_dir)
# Generate results JSON file and upload it to the appspot server.
generator = gtest_slave_utils.GenerateJSONResults(
@@ -275,11 +409,31 @@ def _UsingGtestJson(options):
not options.run_shell_script)
-def _SelectLogProcessor(options):
+def _ListLogProcessors(selection):
+ """Prints a list of available log processor classes iff the input is 'list'.
+
+ Args:
+ selection: A log processor name, or the string "list".
+
+ Returns:
+ True if a list was printed, False otherwise.
+ """
+ shouldlist = selection and selection == 'list'
+ if shouldlist:
+ print
+ print 'Available log processors:'
+ for p in LOG_PROCESSOR_CLASSES:
+ print ' ', p, LOG_PROCESSOR_CLASSES[p].__name__
+
+ return shouldlist
+
+
+def _SelectLogProcessor(options, is_telemetry):
"""Returns a log processor class based on the command line options.
Args:
options: Command-line options (from OptionParser).
+ is_telemetry: bool for whether to create a telemetry log processor.
Returns:
A log processor class, or None.
@@ -287,9 +441,67 @@ def _SelectLogProcessor(options):
if _UsingGtestJson(options):
return gtest_utils.GTestJSONParser
+ if is_telemetry:
+ return telemetry_utils.TelemetryResultsProcessor
+
+ if options.annotate:
+ if options.annotate in LOG_PROCESSOR_CLASSES:
+ if options.generate_json_file and options.annotate != 'gtest':
+ raise NotImplementedError('"%s" doesn\'t make sense with '
+ 'options.generate_json_file.')
+ else:
+ return LOG_PROCESSOR_CLASSES[options.annotate]
+ else:
+ raise KeyError('"%s" is not a valid GTest parser!' % options.annotate)
+ elif options.generate_json_file:
+ return LOG_PROCESSOR_CLASSES['gtest']
+
return None
+def _GetCommitPos(build_properties):
+ """Extracts the commit position from the build properties, if its there."""
+ if 'got_revision_cp' not in build_properties:
+ return None
+ commit_pos = build_properties['got_revision_cp']
+ return int(re.search(r'{#(\d+)}', commit_pos).group(1))
+
+
+def _GetMainRevision(options):
+ """Return revision to use as the numerical x-value in the perf dashboard.
+
+ This will be used as the value of "rev" in the data passed to
+ results_dashboard.SendResults.
+
+ In order or priority, this function could return:
+ 1. The value of the --revision flag (IF it can be parsed as an int).
+ 2. The value of "got_revision_cp" in build properties.
+ 3. An SVN number, git commit position, or git commit hash.
+ """
+ if options.revision and options.revision.isdigit():
+ return options.revision
+ commit_pos_num = _GetCommitPos(options.build_properties)
+ if commit_pos_num is not None:
+ return commit_pos_num
+ # TODO(sullivan,qyearsley): Don't fall back to _GetRevision if it returns
+ # a git commit, since this should be a numerical revision. Instead, abort
+ # and fail.
+ return _GetRevision(os.path.dirname(os.path.abspath(options.build_dir)))
+
+
+def _GetBlinkRevision(options):
+ if options.webkit_revision:
+ webkit_revision = options.webkit_revision
+ else:
+ try:
+ webkit_dir = chromium_utils.FindUpward(
+ os.path.abspath(options.build_dir), 'third_party', 'WebKit', 'Source')
+ webkit_revision = _GetRevision(webkit_dir)
+ except Exception:
+ webkit_revision = None
+ return webkit_revision
+
+
def _CreateLogProcessor(log_processor_class, options):
"""Creates a log processor instance.
@@ -303,10 +515,25 @@ def _CreateLogProcessor(log_processor_class, options):
if not log_processor_class:
return None
- if log_processor_class.__name__ == 'GTestJSONParser':
- return log_processor_class(options.build_properties.get('mastername'))
+ if log_processor_class.__name__ == 'GTestLogParser':
+ tracker_obj = log_processor_class()
+ elif log_processor_class.__name__ == 'GTestJSONParser':
+ tracker_obj = log_processor_class(
+ options.build_properties.get('mastername'))
+ else:
+ webkit_revision = _GetBlinkRevision(options) or 'undefined'
+ revision = _GetMainRevision(options) or 'undefined'
+
+ tracker_obj = log_processor_class(
+ revision=revision,
+ build_properties=options.build_properties,
+ factory_properties=options.factory_properties,
+ webkit_revision=webkit_revision)
- return None
+ if options.annotate and options.generate_json_file:
+ tracker_obj.ProcessLine(_GetMasterString(_GetMaster()))
+
+ return tracker_obj
def _WriteLogProcessorResultsToOutput(log_processor, log_output_file):
@@ -374,6 +601,54 @@ def _SymbolizeSnippetsInJSON(options, json_file_name):
print stderr
+def _MainParse(options, _args):
+ """Run input through annotated test parser.
+
+ This doesn't execute a test, but reads test input from a file and runs it
+ through the specified annotation parser (aka log processor).
+ """
+ if not options.annotate:
+ raise chromium_utils.MissingArgument('--parse-input doesn\'t make sense '
+ 'without --annotate.')
+
+ # If --annotate=list was passed, list the log processor classes and exit.
+ if _ListLogProcessors(options.annotate):
+ return 0
+
+ log_processor_class = _SelectLogProcessor(options, False)
+ log_processor = _CreateLogProcessor(log_processor_class, options)
+
+ if options.generate_json_file:
+ if os.path.exists(options.test_output_xml):
+ # remove the old XML output file.
+ os.remove(options.test_output_xml)
+
+ if options.parse_input == '-':
+ f = sys.stdin
+ else:
+ try:
+ f = open(options.parse_input, 'rb')
+ except IOError as e:
+ print 'Error %d opening \'%s\': %s' % (e.errno, options.parse_input,
+ e.strerror)
+ return 1
+
+ with f:
+ for line in f:
+ log_processor.ProcessLine(line)
+
+ if options.generate_json_file:
+ if not _GenerateJSONForTestResults(options, log_processor):
+ return 1
+
+ if options.annotate:
+ annotation_utils.annotate(
+ options.test_type, options.parse_result, log_processor,
+ perf_dashboard_id=options.perf_dashboard_id)
+
+ return options.parse_result
+
+
def _MainMac(options, args, extra_env):
"""Runs the test on mac."""
if len(args) < 1:
@@ -399,7 +674,10 @@ def _MainMac(options, args, extra_env):
command = _BuildTestBinaryCommand(build_dir, test_exe_path, options)
command.extend(args[1:])
- log_processor_class = _SelectLogProcessor(options)
+ # If --annotate=list was passed, list the log processor classes and exit.
+ if _ListLogProcessors(options.annotate):
+ return 0
+ log_processor_class = _SelectLogProcessor(options, False)
log_processor = _CreateLogProcessor(log_processor_class, options)
if options.generate_json_file:
@@ -419,7 +697,8 @@ def _MainMac(options, args, extra_env):
command = _GenerateRunIsolatedCommand(build_dir, test_exe_path, options,
command)
- result = _RunGTestCommand(options, command, extra_env, pipes=pipes)
+ result = _RunGTestCommand(options, command, extra_env, pipes=pipes,
+ log_processor=log_processor)
finally:
if _UsingGtestJson(options):
log_processor.ProcessJSONFile(options.build_dir)
@@ -507,7 +786,10 @@ def _MainLinux(options, args, extra_env):
command = _BuildTestBinaryCommand(build_dir, test_exe_path, options)
command.extend(args[1:])
- log_processor_class = _SelectLogProcessor(options)
+ # If --annotate=list was passed, list the log processor classes and exit.
+ if _ListLogProcessors(options.annotate):
+ return 0
+ log_processor_class = _SelectLogProcessor(options, False)
log_processor = _CreateLogProcessor(log_processor_class, options)
if options.generate_json_file:
@@ -547,7 +829,8 @@ def _MainLinux(options, args, extra_env):
command = _GenerateRunIsolatedCommand(build_dir, test_exe_path, options,
command)
- result = _RunGTestCommand(options, command, extra_env, pipes=pipes)
+ result = _RunGTestCommand(options, command, extra_env, pipes=pipes,
+ log_processor=log_processor)
finally:
if start_xvfb:
xvfb.StopVirtualX(slave_name)
@@ -611,7 +894,10 @@ def _MainWin(options, args, extra_env):
# directory from previous test runs (i.e.- from crashes or unittest leaks).
slave_utils.RemoveChromeTemporaryFiles()
- log_processor_class = _SelectLogProcessor(options)
+ # If --annotate=list was passed, list the log processor classes and exit.
+ if _ListLogProcessors(options.annotate):
+ return 0
+ log_processor_class = _SelectLogProcessor(options, False)
log_processor = _CreateLogProcessor(log_processor_class, options)
if options.generate_json_file:
@@ -627,7 +913,7 @@ def _MainWin(options, args, extra_env):
command = _GenerateRunIsolatedCommand(build_dir, test_exe_path, options,
command)
- result = _RunGTestCommand(options, command, extra_env)
+ result = _RunGTestCommand(options, command, extra_env, log_processor)
finally:
if _UsingGtestJson(options):
log_processor.ProcessJSONFile(options.build_dir)
@@ -644,6 +930,64 @@ def _MainWin(options, args, extra_env):
return result
+def _MainAndroid(options, args, extra_env):
+ """Runs tests on android.
+
+ Running GTest-based tests on android is different than on Linux as it requires
+ src/build/android/test_runner.py to deploy and communicate with the device.
+ Python scripts are the same as with Linux.
+
+ Args:
+ options: Command-line options for this invocation of runtest.py.
+ args: Command and arguments for the test.
+ extra_env: A dictionary of extra environment variables to set.
+
+ Returns:
+ Exit status code.
+ """
+ if options.run_python_script:
+ return _MainLinux(options, args, extra_env)
+
+ if len(args) < 1:
+ raise chromium_utils.MissingArgument('Usage: %s' % USAGE)
+
+ if _ListLogProcessors(options.annotate):
+ return 0
+ log_processor_class = _SelectLogProcessor(options, False)
+ log_processor = _CreateLogProcessor(log_processor_class, options)
+
+ if options.generate_json_file:
+ if os.path.exists(options.test_output_xml):
+ # remove the old XML output file.
+ os.remove(options.test_output_xml)
+
+ # Assume it's a gtest apk, so use the android harness.
+ test_suite = args[0]
+ run_test_target_option = '--release'
+ if options.target == 'Debug':
+ run_test_target_option = '--debug'
+ command = ['src/build/android/test_runner.py', 'gtest',
+ run_test_target_option, '-s', test_suite]
+
+ if options.flakiness_dashboard_server:
+ command += ['--flakiness-dashboard-server=%s' %
+ options.flakiness_dashboard_server]
+
+ result = _RunGTestCommand(
+ options, command, extra_env, log_processor=log_processor)
+
+ if options.generate_json_file:
+ if not _GenerateJSONForTestResults(options, log_processor):
+ return 1
+
+ if options.annotate:
+ annotation_utils.annotate(
+ options.test_type, result, log_processor,
+ perf_dashboard_id=options.perf_dashboard_id)
+
+ return result
+
+
def _ConfigureSanitizerTools(options, args, extra_env):
if (options.enable_asan or options.enable_tsan or
options.enable_msan or options.enable_lsan):
@@ -943,12 +1287,18 @@ def main():
'--results-directory is required with --generate-json-file=True')
return 1
- if sys.platform.startswith('darwin'):
+ if options.parse_input:
+ result = _MainParse(options, args)
+ elif sys.platform.startswith('darwin'):
result = _MainMac(options, args, extra_env)
elif sys.platform == 'win32':
result = _MainWin(options, args, extra_env)
elif sys.platform == 'linux2':
- result = _MainLinux(options, args, extra_env)
+ if options.factory_properties.get('test_platform',
+ options.test_platform) == 'android':
+ result = _MainAndroid(options, args, extra_env)
+ else:
+ result = _MainLinux(options, args, extra_env)
else:
sys.stderr.write('Unknown sys.platform value %s\n' % repr(sys.platform))
return 1
diff --git a/infra/scripts/legacy/scripts/slave/telemetry_utils.py b/infra/scripts/legacy/scripts/slave/telemetry_utils.py
new file mode 100755
index 0000000..e3ea592
--- /dev/null
+++ b/infra/scripts/legacy/scripts/slave/telemetry_utils.py
@@ -0,0 +1,114 @@
+#! /usr/bin/env python
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+# pylint: disable=R0201
+
+"""Log parsing for telemetry tests.
+
+The TelemetryResultsProcessor loads and contains results that were output in
+JSON format from Telemetry. It can be used as a replacement for the classes in
+the performance_log_processor module.
+"""
+
+import json
+import logging
+import os
+
+from slave.performance_log_processor import _FormatHumanReadable
+
+
+class TelemetryResultsProcessor(object):
+
+ def __init__(self, filename, is_ref, cleanup_parent_dir):
+ self._chart_filename = filename
+ self._is_reference_build = is_ref
+ self._cleanup_parent_dir = cleanup_parent_dir
+
+ def ChartJson(self):
+ try:
+ return json.load(open(self._chart_filename))
+ except (IOError, ValueError):
+ logging.error('Error reading telemetry results from %s',
+ self._chart_filename)
+ logging.error('This usually means that telemetry did not run, so it could'
+ ' not generate the file. Please check the device running the test.')
+ return None
+
+ def Cleanup(self):
+ try:
+ os.remove(self._chart_filename)
+ except OSError:
+ logging.error('Unable to remove telemetry output file %s',
+ self._chart_filename)
+ if self._cleanup_parent_dir:
+ try:
+ os.rmdir(os.path.dirname(self._chart_filename))
+ except OSError:
+ logging.error('Unable to remove telemetry output dir %s',
+ os.path.dirname(self._chart_filename))
+
+ def IsChartJson(self):
+ """This is the new telemetry --chartjson output format."""
+ return True
+
+ def IsReferenceBuild(self):
+ return self._is_reference_build
+
+ def ProcessLine(self, line):
+ pass
+
+ def FailedTests(self):
+ return []
+
+ def MemoryToolReportHashes(self): # pylint: disable=R0201
+ return []
+
+ def ParsingErrors(self): # pylint: disable=R0201
+ return []
+
+ def PerformanceSummary(self):
+ """Writes the waterfall display text.
+
+ The waterfall contains lines for each important trace, in the form
+ tracename: value< (refvalue)>
+ """
+ if self._is_reference_build:
+ return []
+
+ chartjson_data = self.ChartJson()
+ if not chartjson_data:
+ return []
+
+ charts = chartjson_data.get('charts')
+ if not charts:
+ return []
+
+ def _summary_to_string(chart_name, chart_values):
+ summary = chart_values.get('summary')
+ if not summary:
+ return None
+
+ important = summary.get('important')
+ if not important:
+ return None
+
+ value_type = summary.get('type')
+ if value_type == 'list_of_scalar_values':
+ values = summary.get('values')
+ if not values or None in values:
+ return '%s: %s' % (chart_name, 'NaN')
+ else:
+ mean = sum(values) / float(len(values))
+ return '%s: %s' % (chart_name, _FormatHumanReadable(mean))
+ elif value_type == 'scalar':
+ value = summary.get('value')
+ if value is None:
+ return '%s: %s' % (chart_name, 'NaN')
+ else:
+ return '%s: %s' % (chart_name, _FormatHumanReadable(value))
+ return None
+
+ gen = (_summary_to_string(chart_name, chart_values)
+ for chart_name, chart_values in sorted(charts.iteritems()))
+ return [i for i in gen if i]