summaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authorvmpstr@chromium.org <vmpstr@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-10-21 21:41:15 +0000
committervmpstr@chromium.org <vmpstr@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-10-21 21:41:15 +0000
commit752e2cf0862cd3daa0c219e8dfb13ebd5330e081 (patch)
treee6f00b1e8784aeba19094bbcbab4a572b4375544 /tools
parent281a28973176ba69116db7b2b4254952cf1a7119 (diff)
downloadchromium_src-752e2cf0862cd3daa0c219e8dfb13ebd5330e081.zip
chromium_src-752e2cf0862cd3daa0c219e8dfb13ebd5330e081.tar.gz
chromium_src-752e2cf0862cd3daa0c219e8dfb13ebd5330e081.tar.bz2
cc: Allow micro benchmarks to receive arguments from js.
This patch adds the ability for us to pass parameters via telemetry or via javascript to the micro benchmarks. This allows us to be a bit more flexible in what we test, as opposed to have a set of fixed parameters to test. BUG=307756 R=enne@chromium.org,nduca@chromium.org Review URL: https://codereview.chromium.org/27739002 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@229922 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'tools')
-rw-r--r--tools/perf/measurements/record_per_area.py20
1 files changed, 16 insertions, 4 deletions
diff --git a/tools/perf/measurements/record_per_area.py b/tools/perf/measurements/record_per_area.py
index c1c34c1..5121f13 100644
--- a/tools/perf/measurements/record_per_area.py
+++ b/tools/perf/measurements/record_per_area.py
@@ -41,14 +41,26 @@ class RecordPerArea(page_measurement.PageMeasurement):
function(value) {
window.benchmark_results.done = true;
window.benchmark_results.results = value;
- });
+ }, [{width: 1, height: 1},
+ {width: 250, height: 250},
+ {width: 500, height: 500},
+ {width: 750, height: 750},
+ {width: 1000, height: 1000},
+ {width: 256, height: 1024},
+ {width: 1024, height: 256}]);
""")
def _IsDone():
return tab.EvaluateJavaScript(
- 'window.benchmark_results.done', timeout=120)
- util.WaitFor(_IsDone, timeout=120)
+ 'window.benchmark_results.done', timeout=300)
+ util.WaitFor(_IsDone, timeout=300)
all_data = tab.EvaluateJavaScript('window.benchmark_results.results')
for data in all_data:
- results.Add('time_for_area_%07d' % (data['area']), 'ms', data['time_ms'])
+ width = data['width']
+ height = data['height']
+ area = width * height
+ time_ms = data['time_ms']
+
+ results.Add('area_%07d_%dx%d' % (area, width, height), 'ms', time_ms)
+