From cf4d52c90d21f94e122910353d8b15ac9b17e87c Mon Sep 17 00:00:00 2001 From: "tonyg@chromium.org" Date: Tue, 30 Oct 2012 18:37:39 +0000 Subject: Automate Kraken benchmark with Chrome Remote Control. This uses web page replay to record the benchmark itself so it deletes the unused kraken data checked into chrome/test/data. BUG=126516 TEST=Manual on linux Review URL: https://codereview.chromium.org/11348021 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@164951 0039d316-1c4b-4281-b951-d872f2087c98 --- tools/perf/page_sets/kraken.json | 9 +++++++ tools/perf/perf_tools/kraken.py | 31 ++++++++++++++++++++++ .../perf/perf_tools/multipage_benchmark_runner.py | 3 +++ 3 files changed, 43 insertions(+) create mode 100644 tools/perf/page_sets/kraken.json create mode 100644 tools/perf/perf_tools/kraken.py (limited to 'tools') diff --git a/tools/perf/page_sets/kraken.json b/tools/perf/page_sets/kraken.json new file mode 100644 index 0000000..e6c5a6f --- /dev/null +++ b/tools/perf/page_sets/kraken.json @@ -0,0 +1,9 @@ +{ + "description": "Kraken JavaScript benchmark", + "archive_path": "../data/kraken.wpr", + "pages": [ + { + "url": "http://krakenbenchmark.mozilla.org/kraken-1.1/driver.html" + } + ] +} diff --git a/tools/perf/perf_tools/kraken.py b/tools/perf/perf_tools/kraken.py new file mode 100644 index 0000000..3110452 --- /dev/null +++ b/tools/perf/perf_tools/kraken.py @@ -0,0 +1,31 @@ +# Copyright (c) 2012 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +from chrome_remote_control import multi_page_benchmark +from chrome_remote_control import util + +def _Mean(l): + return float(sum(l)) / len(l) if len(l) > 0 else 0.0 + +class Kraken(multi_page_benchmark.MultiPageBenchmark): + def MeasurePage(self, _, tab, results): + js_is_done = """ +document.title.indexOf("Results") != -1 && document.readyState == "complete" +""" + def _IsDone(): + return bool(tab.runtime.Evaluate(js_is_done)) + util.WaitFor(_IsDone, 300) + + js_get_results = """ +var formElement = document.getElementsByTagName("input")[0]; +decodeURIComponent(formElement.value.split("?")[1]); +""" + result_dict = eval(tab.runtime.Evaluate(js_get_results)) + total = 0 + for key in result_dict: + if key == 'v': + continue + results.Add(key, 'ms', result_dict[key]) + total += _Mean(result_dict[key]) + results.Add('Total', 'ms', total) diff --git a/tools/perf/perf_tools/multipage_benchmark_runner.py b/tools/perf/perf_tools/multipage_benchmark_runner.py index db7861c..d90b2d5 100755 --- a/tools/perf/perf_tools/multipage_benchmark_runner.py +++ b/tools/perf/perf_tools/multipage_benchmark_runner.py @@ -14,6 +14,7 @@ from chrome_remote_control import page_runner from chrome_remote_control import page_set import perf_tools.first_paint_time_benchmark +import perf_tools.kraken import perf_tools.scrolling_benchmark import perf_tools.skpicture_printer import perf_tools.texture_upload_benchmark @@ -22,6 +23,8 @@ import perf_tools.texture_upload_benchmark _BENCHMARKS = { 'first_paint_time_benchmark': perf_tools.first_paint_time_benchmark.FirstPaintTimeBenchmark, + 'kraken': + perf_tools.kraken.Kraken, 'scrolling_benchmark': perf_tools.scrolling_benchmark.ScrollingBenchmark, 'skpicture_printer': -- cgit v1.1