From f9e906de95d75ac8f074c1b1d3d0c89d0d83d194 Mon Sep 17 00:00:00 2001 From: "tonyg@chromium.org" Date: Fri, 30 Nov 2012 23:06:15 +0000 Subject: Rename sunspider_benchmark to sunspider. The way the perfbot works is that the benchmark name is used as the graph name. Since the graph is historically named sunspider, this makes for a clean transition with preserved history. BUG=163680 TEST=None Review URL: https://codereview.chromium.org/11348331 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@170587 0039d316-1c4b-4281-b951-d872f2087c98 --- tools/perf/perf_tools/sunspider.py | 35 ++++++++++++++++++++++++++++ tools/perf/perf_tools/sunspider_benchmark.py | 35 ---------------------------- 2 files changed, 35 insertions(+), 35 deletions(-) create mode 100644 tools/perf/perf_tools/sunspider.py delete mode 100644 tools/perf/perf_tools/sunspider_benchmark.py (limited to 'tools') diff --git a/tools/perf/perf_tools/sunspider.py b/tools/perf/perf_tools/sunspider.py new file mode 100644 index 0000000..2a01a25 --- /dev/null +++ b/tools/perf/perf_tools/sunspider.py @@ -0,0 +1,35 @@ +# Copyright (c) 2012 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import collections +import json + +from telemetry import multi_page_benchmark +from telemetry import util + + +class SunSpiderBenchark(multi_page_benchmark.MultiPageBenchmark): + def MeasurePage(self, _, tab, results): + js_is_done = """ +window.location.pathname.indexOf('sunspider-results') >= 0""" + def _IsDone(): + return tab.runtime.Evaluate(js_is_done) + util.WaitFor(_IsDone, 300, poll_interval=5) + + js_get_results = 'JSON.stringify(output);' + js_results = json.loads(tab.runtime.Evaluate(js_get_results)) + r = collections.defaultdict(list) + totals = [] + # js_results is: [{'foo': v1, 'bar': v2}, + # {'foo': v3, 'bar': v4}, + # ...] + for result in js_results: + total = 0 + for key, value in result.iteritems(): + r[key].append(value) + total += value + totals.append(total) + for key, values in r.iteritems(): + results.Add('t', 'ms', values, chart_name=key, data_type='unimportant') + results.Add('t', 'ms', totals, chart_name='total') diff --git a/tools/perf/perf_tools/sunspider_benchmark.py b/tools/perf/perf_tools/sunspider_benchmark.py deleted file mode 100644 index 2a01a25..0000000 --- a/tools/perf/perf_tools/sunspider_benchmark.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright (c) 2012 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -import collections -import json - -from telemetry import multi_page_benchmark -from telemetry import util - - -class SunSpiderBenchark(multi_page_benchmark.MultiPageBenchmark): - def MeasurePage(self, _, tab, results): - js_is_done = """ -window.location.pathname.indexOf('sunspider-results') >= 0""" - def _IsDone(): - return tab.runtime.Evaluate(js_is_done) - util.WaitFor(_IsDone, 300, poll_interval=5) - - js_get_results = 'JSON.stringify(output);' - js_results = json.loads(tab.runtime.Evaluate(js_get_results)) - r = collections.defaultdict(list) - totals = [] - # js_results is: [{'foo': v1, 'bar': v2}, - # {'foo': v3, 'bar': v4}, - # ...] - for result in js_results: - total = 0 - for key, value in result.iteritems(): - r[key].append(value) - total += value - totals.append(total) - for key, values in r.iteritems(): - results.Add('t', 'ms', values, chart_name=key, data_type='unimportant') - results.Add('t', 'ms', totals, chart_name='total') -- cgit v1.1