summaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authortonyg@chromium.org <tonyg@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-01-03 21:27:02 +0000
committertonyg@chromium.org <tonyg@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-01-03 21:27:02 +0000
commit954be28bf9d90dcee3c7446dd13df36552063229 (patch)
tree509cdc798b175c2570bc1815f57c30004a751651 /tools
parentd8c54f5bdd8e812faa71d715ca9b6871db9999fd (diff)
downloadchromium_src-954be28bf9d90dcee3c7446dd13df36552063229.zip
chromium_src-954be28bf9d90dcee3c7446dd13df36552063229.tar.gz
chromium_src-954be28bf9d90dcee3c7446dd13df36552063229.tar.bz2
[Telemetry] Increase verbosity of spaceport benchmark.
It sometimes times out on the bots and there's no indication of what is going on when it does. This should help troubleshoot. BUG=166703 TEST=tools/perf/run_multipage_benchmarks -v --browser=system spaceport tools/perf/page_sets/spaceport.json Review URL: https://codereview.chromium.org/11743010 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@175016 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'tools')
-rw-r--r--tools/perf/perf_tools/spaceport.py13
1 files changed, 10 insertions, 3 deletions
diff --git a/tools/perf/perf_tools/spaceport.py b/tools/perf/perf_tools/spaceport.py
index d0adb839..4a7715b 100644
--- a/tools/perf/perf_tools/spaceport.py
+++ b/tools/perf/perf_tools/spaceport.py
@@ -2,6 +2,8 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+import logging
+
from telemetry import multi_page_benchmark
from telemetry import util
@@ -25,11 +27,16 @@ class SpaceportBenchmark(multi_page_benchmark.MultiPageBenchmark):
""")
js_get_results = 'JSON.stringify(window.__results)'
+ num_tests_complete = [0] # A list to work around closure issue.
def _IsDone():
num_tests_in_benchmark = 24
- result_dict = eval(tab.runtime.Evaluate(js_get_results))
- return num_tests_in_benchmark == len(result_dict)
- util.WaitFor(_IsDone, 1200)
+ num_results = len(eval(tab.runtime.Evaluate(js_get_results)))
+ if num_results > num_tests_complete[0]:
+ num_tests_complete[0] = num_results
+ logging.info('Completed benchmark %d of %d' % (num_tests_complete[0],
+ num_tests_in_benchmark))
+ return num_tests_complete[0] >= num_tests_in_benchmark
+ util.WaitFor(_IsDone, 1200, poll_interval=5)
result_dict = eval(tab.runtime.Evaluate(js_get_results))
for key in result_dict: