summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authortonyg@chromium.org <tonyg@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-11-01 17:16:52 +0000
committertonyg@chromium.org <tonyg@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-11-01 17:16:52 +0000
commit5f3a84d49d42ccb02fed88ec8e1e7fdc89c012e5 (patch)
tree89a6525dc1e1d681ef6086252d67175eeaaab75e
parent28c951ef33104765aaca03a90643bff118480a7d (diff)
downloadchromium_src-5f3a84d49d42ccb02fed88ec8e1e7fdc89c012e5.zip
chromium_src-5f3a84d49d42ccb02fed88ec8e1e7fdc89c012e5.tar.gz
chromium_src-5f3a84d49d42ccb02fed88ec8e1e7fdc89c012e5.tar.bz2
[chrome-remote-control] Clean up kraken output a little.
1. Mark sub-results as unimportant so the total stands out more on the buildbot. 2. Alphabetize results. 3. Display averages instead of lists in the csv output. BUG=None TEST=Manual Review URL: https://codereview.chromium.org/11364016 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@165411 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r--tools/chrome_remote_control/chrome_remote_control/multi_page_benchmark.py10
-rw-r--r--tools/perf/perf_tools/kraken.py2
2 files changed, 10 insertions, 2 deletions
diff --git a/tools/chrome_remote_control/chrome_remote_control/multi_page_benchmark.py b/tools/chrome_remote_control/chrome_remote_control/multi_page_benchmark.py
index 59c1728..2cc6018 100644
--- a/tools/chrome_remote_control/chrome_remote_control/multi_page_benchmark.py
+++ b/tools/chrome_remote_control/chrome_remote_control/multi_page_benchmark.py
@@ -17,6 +17,11 @@ sys.path.append(
from perf_tests_helper import GeomMeanAndStdDevFromHistogram
from perf_tests_helper import PrintPerfResult # pylint: disable=F0401
+
+def _Mean(l):
+ return float(sum(l)) / len(l) if len(l) > 0 else 0.0
+
+
class MeasurementFailure(page_test.Failure):
"""Exception that can be thrown from MeasurePage to indicate an undesired but
designed-for problem."""
@@ -65,7 +70,8 @@ results! You must return the same dict keys every time."""
self.results_summary[(name, units, data_type)].append(value)
def PrintSummary(self, trace_tag):
- for measurement_units_type, values in self.results_summary.iteritems():
+ for measurement_units_type, values in sorted(
+ self.results_summary.iteritems()):
measurement, units, data_type = measurement_units_type
trace = measurement + (trace_tag or '')
PrintPerfResult(measurement, trace, values, units, data_type)
@@ -93,6 +99,8 @@ class CsvBenchmarkResults(BenchmarkResults):
if self.field_types[name] == 'histogram':
avg, _ = GeomMeanAndStdDevFromHistogram(value)
row.append(avg)
+ elif isinstance(value, list):
+ row.append(_Mean(value))
else:
row.append(value)
self._results_writer.writerow(row)
diff --git a/tools/perf/perf_tools/kraken.py b/tools/perf/perf_tools/kraken.py
index 3110452..a854634 100644
--- a/tools/perf/perf_tools/kraken.py
+++ b/tools/perf/perf_tools/kraken.py
@@ -26,6 +26,6 @@ decodeURIComponent(formElement.value.split("?")[1]);
for key in result_dict:
if key == 'v':
continue
- results.Add(key, 'ms', result_dict[key])
+ results.Add(key, 'ms', result_dict[key], data_type='unimportant')
total += _Mean(result_dict[key])
results.Add('Total', 'ms', total)