diff options
author | rschoen@google.com <rschoen@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2013-09-14 00:53:33 +0000 |
---|---|---|
committer | rschoen@google.com <rschoen@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2013-09-14 00:53:33 +0000 |
commit | 1093c95702e6c21e5417dd9e6454a0f6d150ce1c (patch) | |
tree | f2d6febd3d36528df48d78510d3b110c43f805eb /build/util/lib | |
parent | 539156f5620872120c0465fc570efc8d26d06284 (diff) | |
download | chromium_src-1093c95702e6c21e5417dd9e6454a0f6d150ce1c.zip chromium_src-1093c95702e6c21e5417dd9e6454a0f6d150ce1c.tar.gz chromium_src-1093c95702e6c21e5417dd9e6454a0f6d150ce1c.tar.bz2 |
Copy perf_tests_helper.py into build/util/lib
Sorry for the delay on this one. Based off advice in https://codereview.chromium.org/22754003. Just a simple copy of the file, and removing the (seemingly unnecessary?) import of android_commands.
Will remove dependencies on old one as part of above mentioned patch.
Review URL: https://chromiumcodereview.appspot.com/23578019
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@223190 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'build/util/lib')
-rw-r--r-- | build/util/lib/common/perf_result_data_type.py | 20 | ||||
-rw-r--r-- | build/util/lib/common/perf_tests_results_helper.py | 151 |
2 files changed, 171 insertions, 0 deletions
diff --git a/build/util/lib/common/perf_result_data_type.py b/build/util/lib/common/perf_result_data_type.py new file mode 100644 index 0000000..67b550a --- /dev/null +++ b/build/util/lib/common/perf_result_data_type.py @@ -0,0 +1,20 @@ +# Copyright 2013 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +DEFAULT = 'default' +UNIMPORTANT = 'unimportant' +HISTOGRAM = 'histogram' +UNIMPORTANT_HISTOGRAM = 'unimportant-histogram' +INFORMATIONAL = 'informational' + +ALL_TYPES = [DEFAULT, UNIMPORTANT, HISTOGRAM, UNIMPORTANT_HISTOGRAM, + INFORMATIONAL] + + +def IsValidType(datatype): + return datatype in ALL_TYPES + + +def IsHistogram(datatype): + return (datatype == HISTOGRAM or datatype == UNIMPORTANT_HISTOGRAM) diff --git a/build/util/lib/common/perf_tests_results_helper.py b/build/util/lib/common/perf_tests_results_helper.py new file mode 100644 index 0000000..fbdb7b2 --- /dev/null +++ b/build/util/lib/common/perf_tests_results_helper.py @@ -0,0 +1,151 @@ +# Copyright 2013 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import re +import sys + +import json +import logging +import math + +import perf_result_data_type + + +# Mapping from result type to test output +RESULT_TYPES = {perf_result_data_type.UNIMPORTANT: 'RESULT ', + perf_result_data_type.DEFAULT: '*RESULT ', + perf_result_data_type.INFORMATIONAL: '', + perf_result_data_type.UNIMPORTANT_HISTOGRAM: 'HISTOGRAM ', + perf_result_data_type.HISTOGRAM: '*HISTOGRAM '} + + +def _EscapePerfResult(s): + """Escapes |s| for use in a perf result.""" + return re.sub('[\:|=/#&,]', '_', s) + + +def _Flatten(values): + """Returns a simple list without sub-lists.""" + ret = [] + for entry in values: + if isinstance(entry, list): + ret.extend(_Flatten(entry)) + else: + ret.append(entry) + return ret + + +def GeomMeanAndStdDevFromHistogram(histogram_json): + histogram = json.loads(histogram_json) + # Handle empty histograms gracefully. + if not 'buckets' in histogram: + return 0.0, 0.0 + count = 0 + sum_of_logs = 0 + for bucket in histogram['buckets']: + if 'high' in bucket: + bucket['mean'] = (bucket['low'] + bucket['high']) / 2.0 + else: + bucket['mean'] = bucket['low'] + if bucket['mean'] > 0: + sum_of_logs += math.log(bucket['mean']) * bucket['count'] + count += bucket['count'] + + if count == 0: + return 0.0, 0.0 + + sum_of_squares = 0 + geom_mean = math.exp(sum_of_logs / count) + for bucket in histogram['buckets']: + if bucket['mean'] > 0: + sum_of_squares += (bucket['mean'] - geom_mean) ** 2 * bucket['count'] + return geom_mean, math.sqrt(sum_of_squares / count) + + +def _MeanAndStdDevFromList(values): + avg = None + sd = None + if len(values) > 1: + try: + value = '[%s]' % ','.join([str(v) for v in values]) + avg = sum([float(v) for v in values]) / len(values) + sqdiffs = [(float(v) - avg) ** 2 for v in values] + variance = sum(sqdiffs) / (len(values) - 1) + sd = math.sqrt(variance) + except ValueError: + value = ", ".join(values) + else: + value = values[0] + return value, avg, sd + + +def PrintPages(page_list): + """Prints list of pages to stdout in the format required by perf tests.""" + print 'Pages: [%s]' % ','.join([_EscapePerfResult(p) for p in page_list]) + + +def PrintPerfResult(measurement, trace, values, units, + result_type=perf_result_data_type.DEFAULT, + print_to_stdout=True): + """Prints numerical data to stdout in the format required by perf tests. + + The string args may be empty but they must not contain any colons (:) or + equals signs (=). + + Args: + measurement: A description of the quantity being measured, e.g. "vm_peak". + trace: A description of the particular data point, e.g. "reference". + values: A list of numeric measured values. An N-dimensional list will be + flattened and treated as a simple list. + units: A description of the units of measure, e.g. "bytes". + result_type: Accepts values of perf_result_data_type.ALL_TYPES. + print_to_stdout: If True, prints the output in stdout instead of returning + the output to caller. + + Returns: + String of the formated perf result. + """ + assert (perf_result_data_type.IsValidType(result_type), + 'result type: %s is invalid' % result_type) + + trace_name = _EscapePerfResult(trace) + + if (result_type == perf_result_data_type.UNIMPORTANT or + result_type == perf_result_data_type.DEFAULT or + result_type == perf_result_data_type.INFORMATIONAL): + assert isinstance(values, list) + assert len(values) + assert '/' not in measurement + value, avg, sd = _MeanAndStdDevFromList(_Flatten(values)) + output = '%s%s: %s%s%s %s' % ( + RESULT_TYPES[result_type], + _EscapePerfResult(measurement), + trace_name, + # Do not show equal sign if the trace is empty. Usually it happens when + # measurement is enough clear to describe the result. + '= ' if trace_name else '', + value, + units) + else: + assert perf_result_data_type.IsHistogram(result_type) + assert isinstance(values, list) + # The histograms can only be printed individually, there's no computation + # across different histograms. + assert len(values) == 1 + value = values[0] + output = '%s%s: %s= %s' % ( + RESULT_TYPES[result_type], + _EscapePerfResult(measurement), + trace_name, + value) + avg, sd = GeomMeanAndStdDevFromHistogram(value) + + if avg: + output += '\nAvg %s: %f%s' % (measurement, avg, units) + if sd: + output += '\nSd %s: %f%s' % (measurement, sd, units) + if print_to_stdout: + print output + sys.stdout.flush() + return output |