summaryrefslogtreecommitdiffstats
path: root/tools/perf/perf_tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/perf_tools')
-rw-r--r--tools/perf/perf_tools/cheapness_predictor_benchmark.py42
-rw-r--r--tools/perf/perf_tools/cheapness_predictor_measurement.py122
-rw-r--r--tools/perf/perf_tools/cheapness_predictor_metrics.py86
-rw-r--r--tools/perf/perf_tools/dromaeo.py4
-rw-r--r--tools/perf/perf_tools/histogram_metric.py (renamed from tools/perf/perf_tools/histogram_measurement.py)2
-rw-r--r--tools/perf/perf_tools/image_decoding_measurement.py (renamed from tools/perf/perf_tools/image_decoding_benchmark.py)10
-rw-r--r--tools/perf/perf_tools/jsgamebench.py4
-rw-r--r--tools/perf/perf_tools/kraken.py4
-rw-r--r--tools/perf/perf_tools/loading_measurement.py (renamed from tools/perf/perf_tools/loading_benchmark.py)4
-rw-r--r--tools/perf/perf_tools/memory_measurement.py (renamed from tools/perf/perf_tools/memory_benchmark.py)16
-rw-r--r--tools/perf/perf_tools/octane.py4
-rw-r--r--tools/perf/perf_tools/page_cycler.py16
-rw-r--r--tools/perf/perf_tools/rasterize_and_record_benchmark.py18
-rw-r--r--tools/perf/perf_tools/robohornetpro.py6
-rw-r--r--tools/perf/perf_tools/scrolling_benchmark.py8
-rw-r--r--tools/perf/perf_tools/skpicture_printer.py4
-rw-r--r--tools/perf/perf_tools/smoothness_benchmark.py182
-rw-r--r--tools/perf/perf_tools/smoothness_measurement.py227
-rw-r--r--tools/perf/perf_tools/smoothness_metrics.js (renamed from tools/perf/perf_tools/smoothness_measurement.js)0
-rw-r--r--tools/perf/perf_tools/smoothness_metrics.py50
-rw-r--r--tools/perf/perf_tools/spaceport.py13
-rw-r--r--tools/perf/perf_tools/startup_measurement.py (renamed from tools/perf/perf_tools/startup_benchmark.py)4
-rw-r--r--tools/perf/perf_tools/sunspider.py4
23 files changed, 411 insertions, 419 deletions
diff --git a/tools/perf/perf_tools/cheapness_predictor_benchmark.py b/tools/perf/perf_tools/cheapness_predictor_benchmark.py
deleted file mode 100644
index a0915eb..0000000
--- a/tools/perf/perf_tools/cheapness_predictor_benchmark.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright (c) 2013 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-from perf_tools import cheapness_predictor_measurement
-from telemetry.page import page_benchmark
-
-PREDICTOR_STATS = [
- {'name': 'picture_pile_count', 'units': ''},
- {'name': 'predictor_accuracy', 'units': 'percent'},
- {'name': 'predictor_safely_wrong_count', 'units': ''},
- {'name': 'predictor_badly_wrong_count', 'units': ''}]
-
-class CheapnessPredictorBenchmark(page_benchmark.PageBenchmark):
- def __init__(self):
- super(CheapnessPredictorBenchmark, self).__init__('smoothness')
- self._measurement = None
-
- def CustomizeBrowserOptions(self, options):
- options.AppendExtraBrowserArg('--dom-automation')
- options.AppendExtraBrowserArg('--enable-prediction-benchmarking')
- options.AppendExtraBrowserArg('--enable-gpu-benchmarking')
- options.AppendExtraBrowserArg('--enable-threaded-compositing')
- options.AppendExtraBrowserArg('--enable-impl-side-painting')
-
- def DidNavigateToPage(self, page, tab):
- self._measurement = \
- cheapness_predictor_measurement.CheapnessPredictorMeasurement(tab)
- self._measurement.GatherInitialStats()
-
- def DidRunAction(self, page, tab, action):
- self._measurement.GatherDeltaStats()
-
- def CanRunForPage(self, page):
- return hasattr(page, 'smoothness')
-
- def MeasurePage(self, page, tab, results):
- predictor_stats = self._measurement.stats
-
- for stat_to_gather in PREDICTOR_STATS:
- results.Add(stat_to_gather['name'],
- stat_to_gather['units'],
- predictor_stats[stat_to_gather['name']])
diff --git a/tools/perf/perf_tools/cheapness_predictor_measurement.py b/tools/perf/perf_tools/cheapness_predictor_measurement.py
index 9500f3e..cf175b5 100644
--- a/tools/perf/perf_tools/cheapness_predictor_measurement.py
+++ b/tools/perf/perf_tools/cheapness_predictor_measurement.py
@@ -1,86 +1,42 @@
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-import json
-
-class CheapnessPredictorMeasurement(object):
- def __init__(self, tab):
- self._tab = tab
- self._initial_stats = {}
- self.stats = {}
-
- def GatherInitialStats(self):
- self._initial_stats = self._GatherStats()
-
- def GatherDeltaStats(self):
- final_stats = self._GatherStats()
-
- correct_count = final_stats['predictor_correct_count'] - \
- self._initial_stats['predictor_correct_count']
-
- incorrect_count = final_stats['predictor_incorrect_count'] - \
- self._initial_stats['predictor_incorrect_count']
-
- percent, total = self._GetPercentAndTotal(correct_count, incorrect_count)
-
- self.stats['picture_pile_count'] = total
- self.stats['predictor_correct_count'] = correct_count
- self.stats['predictor_incorrect_count'] = incorrect_count
- self.stats['predictor_accuracy'] = percent
- self.stats['predictor_safely_wrong_count'] = \
- final_stats['predictor_safely_wrong_count'] - \
- self._initial_stats['predictor_safely_wrong_count']
- self.stats['predictor_badly_wrong_count'] = \
- final_stats['predictor_badly_wrong_count'] - \
- self._initial_stats['predictor_badly_wrong_count']
-
- def _GatherStats(self):
- stats = {}
-
- incorrect_count, correct_count = \
- self._GetBooleanHistogramCounts(self._tab,
- 'Renderer4.CheapPredictorAccuracy')
-
- percent, total = self._GetPercentAndTotal(correct_count, incorrect_count)
- stats['picture_pile_count'] = total
- stats['predictor_correct_count'] = correct_count
- stats['predictor_incorrect_count'] = incorrect_count
- stats['predictor_accuracy'] = percent
-
- _, safely_wrong_count = \
- self._GetBooleanHistogramCounts(self._tab,
- 'Renderer4.CheapPredictorSafelyWrong')
- stats['predictor_safely_wrong_count'] = safely_wrong_count
-
- _, badly_wrong_count = \
- self._GetBooleanHistogramCounts(self._tab,
- 'Renderer4.CheapPredictorBadlyWrong')
- stats['predictor_badly_wrong_count'] = badly_wrong_count
-
- return stats
-
-
- def _GetPercentAndTotal(self, correct_count, incorrect_count):
- total = incorrect_count + correct_count
- percent = 0
- if total > 0:
- percent = 100 * correct_count / float(total)
- return percent, total
-
- def _GetBooleanHistogramCounts(self, tab, histogram_name):
- count = [0, 0]
- js = ('window.domAutomationController.getHistogram ? '
- 'window.domAutomationController.getHistogram('
- '"%s") : ""' % (histogram_name))
- data = tab.EvaluateJavaScript(js)
- if not data:
- return count
-
- histogram = json.loads(data)
- if histogram:
- for bucket in histogram['buckets']:
- if bucket['low'] > 1:
- continue
- count[bucket['low']] += bucket['count']
-
- return count
+from perf_tools import cheapness_predictor_metrics
+from telemetry.page import page_measurement
+
+PREDICTOR_STATS = [
+ {'name': 'picture_pile_count', 'units': ''},
+ {'name': 'predictor_accuracy', 'units': 'percent'},
+ {'name': 'predictor_safely_wrong_count', 'units': ''},
+ {'name': 'predictor_badly_wrong_count', 'units': ''}]
+
+class CheapnessPredictorMeasurement(page_measurement.PageMeasurement):
+ def __init__(self):
+ super(CheapnessPredictorMeasurement, self).__init__('smoothness')
+ self._metrics = None
+
+ def CustomizeBrowserOptions(self, options):
+ options.AppendExtraBrowserArg('--dom-automation')
+ options.AppendExtraBrowserArg('--enable-prediction-benchmarking')
+ options.AppendExtraBrowserArg('--enable-gpu-benchmarking')
+ options.AppendExtraBrowserArg('--enable-threaded-compositing')
+ options.AppendExtraBrowserArg('--enable-impl-side-painting')
+
+ def DidNavigateToPage(self, page, tab):
+ self._metrics = \
+ cheapness_predictor_metrics.CheapnessPredictorMetrics(tab)
+ self._metrics.GatherInitialStats()
+
+ def DidRunAction(self, page, tab, action):
+ self._metrics.GatherDeltaStats()
+
+ def CanRunForPage(self, page):
+ return hasattr(page, 'smoothness')
+
+ def MeasurePage(self, page, tab, results):
+ predictor_stats = self._metrics.stats
+
+ for stat_to_gather in PREDICTOR_STATS:
+ results.Add(stat_to_gather['name'],
+ stat_to_gather['units'],
+ predictor_stats[stat_to_gather['name']])
diff --git a/tools/perf/perf_tools/cheapness_predictor_metrics.py b/tools/perf/perf_tools/cheapness_predictor_metrics.py
new file mode 100644
index 0000000..2fc14c3
--- /dev/null
+++ b/tools/perf/perf_tools/cheapness_predictor_metrics.py
@@ -0,0 +1,86 @@
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import json
+
+class CheapnessPredictorMetrics(object):
+ def __init__(self, tab):
+ self._tab = tab
+ self._initial_stats = {}
+ self.stats = {}
+
+ def GatherInitialStats(self):
+ self._initial_stats = self._GatherStats()
+
+ def GatherDeltaStats(self):
+ final_stats = self._GatherStats()
+
+ correct_count = final_stats['predictor_correct_count'] - \
+ self._initial_stats['predictor_correct_count']
+
+ incorrect_count = final_stats['predictor_incorrect_count'] - \
+ self._initial_stats['predictor_incorrect_count']
+
+ percent, total = self._GetPercentAndTotal(correct_count, incorrect_count)
+
+ self.stats['picture_pile_count'] = total
+ self.stats['predictor_correct_count'] = correct_count
+ self.stats['predictor_incorrect_count'] = incorrect_count
+ self.stats['predictor_accuracy'] = percent
+ self.stats['predictor_safely_wrong_count'] = \
+ final_stats['predictor_safely_wrong_count'] - \
+ self._initial_stats['predictor_safely_wrong_count']
+ self.stats['predictor_badly_wrong_count'] = \
+ final_stats['predictor_badly_wrong_count'] - \
+ self._initial_stats['predictor_badly_wrong_count']
+
+ def _GatherStats(self):
+ stats = {}
+
+ incorrect_count, correct_count = \
+ self._GetBooleanHistogramCounts(self._tab,
+ 'Renderer4.CheapPredictorAccuracy')
+
+ percent, total = self._GetPercentAndTotal(correct_count, incorrect_count)
+ stats['picture_pile_count'] = total
+ stats['predictor_correct_count'] = correct_count
+ stats['predictor_incorrect_count'] = incorrect_count
+ stats['predictor_accuracy'] = percent
+
+ _, safely_wrong_count = \
+ self._GetBooleanHistogramCounts(self._tab,
+ 'Renderer4.CheapPredictorSafelyWrong')
+ stats['predictor_safely_wrong_count'] = safely_wrong_count
+
+ _, badly_wrong_count = \
+ self._GetBooleanHistogramCounts(self._tab,
+ 'Renderer4.CheapPredictorBadlyWrong')
+ stats['predictor_badly_wrong_count'] = badly_wrong_count
+
+ return stats
+
+
+ def _GetPercentAndTotal(self, correct_count, incorrect_count):
+ total = incorrect_count + correct_count
+ percent = 0
+ if total > 0:
+ percent = 100 * correct_count / float(total)
+ return percent, total
+
+ def _GetBooleanHistogramCounts(self, tab, histogram_name):
+ count = [0, 0]
+ js = ('window.domAutomationController.getHistogram ? '
+ 'window.domAutomationController.getHistogram('
+ '"%s") : ""' % (histogram_name))
+ data = tab.EvaluateJavaScript(js)
+ if not data:
+ return count
+
+ histogram = json.loads(data)
+ if histogram:
+ for bucket in histogram['buckets']:
+ if bucket['low'] > 1:
+ continue
+ count[bucket['low']] += bucket['count']
+
+ return count
diff --git a/tools/perf/perf_tools/dromaeo.py b/tools/perf/perf_tools/dromaeo.py
index 14ccb27..96037ea 100644
--- a/tools/perf/perf_tools/dromaeo.py
+++ b/tools/perf/perf_tools/dromaeo.py
@@ -3,9 +3,9 @@
# found in the LICENSE file.
from telemetry.core import util
-from telemetry.page import page_benchmark
+from telemetry.page import page_measurement
-class Dromaeo(page_benchmark.PageBenchmark):
+class Dromaeo(page_measurement.PageMeasurement):
def MeasurePage(self, page, tab, results):
js_is_done = 'window.document.cookie.indexOf("__done=1") >= 0'
def _IsDone():
diff --git a/tools/perf/perf_tools/histogram_measurement.py b/tools/perf/perf_tools/histogram_metric.py
index 267329a..d05394e 100644
--- a/tools/perf/perf_tools/histogram_measurement.py
+++ b/tools/perf/perf_tools/histogram_metric.py
@@ -6,7 +6,7 @@ from perf_tools import histogram as histogram_module
BROWSER_HISTOGRAM = 'browser_histogram'
RENDERER_HISTOGRAM = 'renderer_histogram'
-class HistogramMeasurement(object):
+class HistogramMetric(object):
def __init__(self, histogram, histogram_type):
self.name = histogram['name']
self.units = histogram['units']
diff --git a/tools/perf/perf_tools/image_decoding_benchmark.py b/tools/perf/perf_tools/image_decoding_measurement.py
index 58b6155..c293ac5 100644
--- a/tools/perf/perf_tools/image_decoding_benchmark.py
+++ b/tools/perf/perf_tools/image_decoding_measurement.py
@@ -2,10 +2,10 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-from telemetry.page import page_benchmark
+from telemetry.page import page_measurement
-class ImageDecoding(page_benchmark.PageBenchmark):
+class ImageDecoding(page_measurement.PageMeasurement):
def WillNavigateToPage(self, page, tab):
tab.StartTimelineRecording()
@@ -17,11 +17,11 @@ class ImageDecoding(page_benchmark.PageBenchmark):
decode_image_events = \
tab.timeline_model.GetAllOfName('DecodeImage')
- # If it is a real image benchmark, then store only the last-minIterations
+ # If it is a real image page, then store only the last-minIterations
# decode tasks.
if (hasattr(page,
- 'image_decoding_benchmark_limit_results_to_min_iterations') and
- page.image_decoding_benchmark_limit_results_to_min_iterations):
+ 'image_decoding_measurement_limit_results_to_min_iterations') and
+ page.image_decoding_measurement_limit_results_to_min_iterations):
assert _IsDone()
min_iterations = tab.EvaluateJavaScript('minIterations')
decode_image_events = decode_image_events[-min_iterations:]
diff --git a/tools/perf/perf_tools/jsgamebench.py b/tools/perf/perf_tools/jsgamebench.py
index 9fdf1166..474d43a 100644
--- a/tools/perf/perf_tools/jsgamebench.py
+++ b/tools/perf/perf_tools/jsgamebench.py
@@ -3,9 +3,9 @@
# found in the LICENSE file.
from telemetry.core import util
-from telemetry.page import page_benchmark
+from telemetry.page import page_measurement
-class JsGameBench(page_benchmark.PageBenchmark):
+class JsGameBench(page_measurement.PageMeasurement):
def MeasurePage(self, _, tab, results):
tab.ExecuteJavaScript('UI.call({}, "perftest")')
diff --git a/tools/perf/perf_tools/kraken.py b/tools/perf/perf_tools/kraken.py
index d09c01b..2d2086d 100644
--- a/tools/perf/perf_tools/kraken.py
+++ b/tools/perf/perf_tools/kraken.py
@@ -3,12 +3,12 @@
# found in the LICENSE file.
from telemetry.core import util
-from telemetry.page import page_benchmark
+from telemetry.page import page_measurement
def _Mean(l):
return float(sum(l)) / len(l) if len(l) > 0 else 0.0
-class Kraken(page_benchmark.PageBenchmark):
+class Kraken(page_measurement.PageMeasurement):
def MeasurePage(self, _, tab, results):
js_is_done = """
document.title.indexOf("Results") != -1 && document.readyState == "complete"
diff --git a/tools/perf/perf_tools/loading_benchmark.py b/tools/perf/perf_tools/loading_measurement.py
index d0aa214..2ff0e97 100644
--- a/tools/perf/perf_tools/loading_benchmark.py
+++ b/tools/perf/perf_tools/loading_measurement.py
@@ -5,9 +5,9 @@
import collections
from telemetry.core import util
-from telemetry.page import page_benchmark
+from telemetry.page import page_measurement
-class LoadingBenchmark(page_benchmark.PageBenchmark):
+class LoadingMeasurement(page_measurement.PageMeasurement):
@property
def results_are_the_same_on_every_page(self):
return False
diff --git a/tools/perf/perf_tools/memory_benchmark.py b/tools/perf/perf_tools/memory_measurement.py
index 6c051d6..8d1724d 100644
--- a/tools/perf/perf_tools/memory_benchmark.py
+++ b/tools/perf/perf_tools/memory_measurement.py
@@ -1,8 +1,8 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-from perf_tools import histogram_measurement
-from telemetry.page import page_benchmark
+from perf_tools import histogram_metric
+from telemetry.page import page_measurement
MEMORY_HISTOGRAMS = [
{'name': 'V8.MemoryExternalFragmentationTotal', 'units': 'percent'},
@@ -13,15 +13,15 @@ MEMORY_HISTOGRAMS = [
BROWSER_MEMORY_HISTOGRAMS = [
{'name': 'Memory.BrowserUsed', 'units': 'kb'}]
-class MemoryBenchmark(page_benchmark.PageBenchmark):
+class MemoryMeasurement(page_measurement.PageMeasurement):
def __init__(self):
- super(MemoryBenchmark, self).__init__('stress_memory')
+ super(MemoryMeasurement, self).__init__('stress_memory')
self.histograms = (
- [histogram_measurement.HistogramMeasurement(
- h, histogram_measurement.RENDERER_HISTOGRAM)
+ [histogram_metric.HistogramMetric(
+ h, histogram_metric.RENDERER_HISTOGRAM)
for h in MEMORY_HISTOGRAMS] +
- [histogram_measurement.HistogramMeasurement(
- h, histogram_measurement.BROWSER_HISTOGRAM)
+ [histogram_metric.HistogramMetric(
+ h, histogram_metric.BROWSER_HISTOGRAM)
for h in BROWSER_MEMORY_HISTOGRAMS])
def DidNavigateToPage(self, page, tab):
diff --git a/tools/perf/perf_tools/octane.py b/tools/perf/perf_tools/octane.py
index d6f4f1a..16be773 100644
--- a/tools/perf/perf_tools/octane.py
+++ b/tools/perf/perf_tools/octane.py
@@ -2,9 +2,9 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core import util
-from telemetry.page import page_benchmark
+from telemetry.page import page_measurement
-class Octane(page_benchmark.PageBenchmark):
+class Octane(page_measurement.PageMeasurement):
def MeasurePage(self, _, tab, results):
js_is_done = """
completed && !document.getElementById("progress-bar-container")"""
diff --git a/tools/perf/perf_tools/page_cycler.py b/tools/perf/perf_tools/page_cycler.py
index a0cb196..7890740 100644
--- a/tools/perf/perf_tools/page_cycler.py
+++ b/tools/perf/perf_tools/page_cycler.py
@@ -2,14 +2,14 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-"""The page cycler benchmark.
+"""The page cycler measurement.
-This benchmark registers a window load handler in which is forces a layout and
+This measurement registers a window load handler in which is forces a layout and
then records the value of performance.now(). This call to now() measures the
time from navigationStart (immediately after the previous page's beforeunload
event) until after the layout in the page's load event. In addition, two garbage
collections are performed in between the page loads (in the beforeunload event).
-This extra garbage collection time is not included in the benchmark times.
+This extra garbage collection time is not included in the measurement times.
Finally, various memory and IO statistics are gathered at the very end of
cycling all pages.
@@ -18,16 +18,16 @@ cycling all pages.
import os
import sys
-from perf_tools import histogram_measurement
+from perf_tools import histogram_metric
from telemetry.core import util
-from telemetry.page import page_benchmark
+from telemetry.page import page_measurement
MEMORY_HISTOGRAMS = [
{'name': 'V8.MemoryExternalFragmentationTotal', 'units': 'percent'},
{'name': 'V8.MemoryHeapSampleTotalCommitted', 'units': 'kb'},
{'name': 'V8.MemoryHeapSampleTotalUsed', 'units': 'kb'}]
-class PageCycler(page_benchmark.PageBenchmark):
+class PageCycler(page_measurement.PageMeasurement):
def AddCommandLineOptions(self, parser):
# The page cyclers should default to 10 iterations. In order to change the
# default of an option, we must remove and re-add it.
@@ -50,8 +50,8 @@ class PageCycler(page_benchmark.PageBenchmark):
self.start_commit_charge = tab.browser.memory_stats['SystemCommitCharge']
# pylint: disable=W0201
- self.histograms = [histogram_measurement.HistogramMeasurement(
- h, histogram_measurement.RENDERER_HISTOGRAM)
+ self.histograms = [histogram_metric.HistogramMetric(
+ h, histogram_metric.RENDERER_HISTOGRAM)
for h in MEMORY_HISTOGRAMS]
def WillNavigateToPage(self, page, tab):
diff --git a/tools/perf/perf_tools/rasterize_and_record_benchmark.py b/tools/perf/perf_tools/rasterize_and_record_benchmark.py
index 65a1919..5834393 100644
--- a/tools/perf/perf_tools/rasterize_and_record_benchmark.py
+++ b/tools/perf/perf_tools/rasterize_and_record_benchmark.py
@@ -4,8 +4,8 @@
import time
-from perf_tools import smoothness_measurement
-from telemetry.page import page_benchmark
+from perf_tools import smoothness_metrics
+from telemetry.page import page_measurement
def DivideIfPossibleOrZero(numerator, denominator):
if denominator == 0:
@@ -52,10 +52,10 @@ def CalcPaintingResults(rendering_stats_deltas, results):
results.Add('total_record_and_rasterize_time', 'seconds', totalRecordTime +
totalRasterizeTime, data_type='unimportant')
-class RasterizeAndPaintBenchmark(page_benchmark.PageBenchmark):
+class RasterizeAndPaintMeasurement(page_measurement.PageMeasurement):
def __init__(self):
- super(RasterizeAndPaintBenchmark, self).__init__('', True)
- self._measurement = None
+ super(RasterizeAndPaintMeasurement, self).__init__('', True)
+ self._metrics = None
def AddCommandLineOptions(self, parser):
parser.add_option('--report-all-results', dest='report_all_results',
@@ -70,22 +70,22 @@ class RasterizeAndPaintBenchmark(page_benchmark.PageBenchmark):
options.extra_browser_args.append('--slow-down-raster-scale-factor=100')
def MeasurePage(self, page, tab, results):
- self._measurement = smoothness_measurement.SmoothnessMeasurement(tab)
+ self._metrics = smoothness_metrics.SmoothnessMetrics(tab)
# Wait until the page has loaded and come to a somewhat steady state
# (empirical wait time)
time.sleep(5)
- self._measurement.SetNeedsDisplayOnAllLayersAndStart()
+ self._metrics.SetNeedsDisplayOnAllLayersAndStart()
# Wait until all rasterization tasks are completed (empirical wait time)
# TODO(ernstm): Replace by a more accurate mechanism to measure stats for
# exactly one frame.
time.sleep(5)
- self._measurement.Stop()
+ self._metrics.Stop()
- rendering_stats_deltas = self._measurement.deltas
+ rendering_stats_deltas = self._metrics.deltas
CalcPaintingResults(rendering_stats_deltas, results)
diff --git a/tools/perf/perf_tools/robohornetpro.py b/tools/perf/perf_tools/robohornetpro.py
index dcc69df..9bd2553 100644
--- a/tools/perf/perf_tools/robohornetpro.py
+++ b/tools/perf/perf_tools/robohornetpro.py
@@ -2,11 +2,11 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core import util
-from telemetry.page import page_benchmark
+from telemetry.page import page_measurement
-class RobohornetPro(page_benchmark.PageBenchmark):
+class RobohornetPro(page_measurement.PageMeasurement):
def CustomizeBrowserOptions(self, options):
- # Benchmark require use of real Date.now() for measurement.
+ # Measurement require use of real Date.now() for measurement.
options.wpr_make_javascript_deterministic = False
def MeasurePage(self, _, tab, results):
diff --git a/tools/perf/perf_tools/scrolling_benchmark.py b/tools/perf/perf_tools/scrolling_benchmark.py
deleted file mode 100644
index cc3ddad..0000000
--- a/tools/perf/perf_tools/scrolling_benchmark.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-from perf_tools import smoothness_benchmark
-
-class ScrollingBenchmark(smoothness_benchmark.SmoothnessBenchmark):
- def __init__(self):
- super(ScrollingBenchmark, self).__init__()
diff --git a/tools/perf/perf_tools/skpicture_printer.py b/tools/perf/perf_tools/skpicture_printer.py
index beeb0d4..290bde1 100644
--- a/tools/perf/perf_tools/skpicture_printer.py
+++ b/tools/perf/perf_tools/skpicture_printer.py
@@ -3,11 +3,11 @@
# found in the LICENSE file.
import os
-from telemetry.page import page_benchmark
+from telemetry.page import page_measurement
_JS = 'chrome.gpuBenchmarking.printToSkPicture("{0}");'
-class SkPicturePrinter(page_benchmark.PageBenchmark):
+class SkPicturePrinter(page_measurement.PageMeasurement):
def AddCommandLineOptions(self, parser):
parser.add_option('-o', '--outdir', help='Output directory')
diff --git a/tools/perf/perf_tools/smoothness_benchmark.py b/tools/perf/perf_tools/smoothness_benchmark.py
deleted file mode 100644
index 5ec96b6..0000000
--- a/tools/perf/perf_tools/smoothness_benchmark.py
+++ /dev/null
@@ -1,182 +0,0 @@
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-from perf_tools import smoothness_measurement
-from telemetry.core import util
-from telemetry.page import page_benchmark
-
-class DidNotScrollException(page_benchmark.MeasurementFailure):
- def __init__(self):
- super(DidNotScrollException, self).__init__('Page did not scroll')
-
-def DivideIfPossibleOrZero(numerator, denominator):
- if denominator == 0:
- return 0
- return numerator / denominator
-
-def CalcScrollResults(rendering_stats_deltas, results):
- num_frames_sent_to_screen = rendering_stats_deltas['numFramesSentToScreen']
-
- mean_frame_time_seconds = (
- rendering_stats_deltas['totalTimeInSeconds'] /
- float(num_frames_sent_to_screen))
-
- dropped_percent = (
- rendering_stats_deltas['droppedFrameCount'] /
- float(num_frames_sent_to_screen))
-
- num_impl_thread_scrolls = rendering_stats_deltas.get(
- 'numImplThreadScrolls', 0)
- num_main_thread_scrolls = rendering_stats_deltas.get(
- 'numMainThreadScrolls', 0)
-
- percent_impl_scrolled = DivideIfPossibleOrZero(
- float(num_impl_thread_scrolls),
- num_impl_thread_scrolls + num_main_thread_scrolls)
-
- num_layers = (
- rendering_stats_deltas.get('numLayersDrawn', 0) /
- float(num_frames_sent_to_screen))
-
- num_missing_tiles = (
- rendering_stats_deltas.get('numMissingTiles', 0) /
- float(num_frames_sent_to_screen))
-
- results.Add('mean_frame_time', 'ms', round(mean_frame_time_seconds * 1000, 3))
- results.Add('dropped_percent', '%', round(dropped_percent * 100, 1),
- data_type='unimportant')
- results.Add('percent_impl_scrolled', '%',
- round(percent_impl_scrolled * 100, 1),
- data_type='unimportant')
- results.Add('average_num_layers_drawn', '', round(num_layers, 1),
- data_type='unimportant')
- results.Add('average_num_missing_tiles', '', round(num_missing_tiles, 1),
- data_type='unimportant')
-
-def CalcTextureUploadResults(rendering_stats_deltas, results):
- if (('totalCommitCount' not in rendering_stats_deltas)
- or rendering_stats_deltas['totalCommitCount'] == 0) :
- averageCommitTimeMs = 0
- else :
- averageCommitTimeMs = (
- 1000 * rendering_stats_deltas['totalCommitTimeInSeconds'] /
- rendering_stats_deltas['totalCommitCount'])
-
- results.Add('texture_upload_count', 'count',
- rendering_stats_deltas.get('textureUploadCount', 0))
- results.Add('total_texture_upload_time', 'seconds',
- rendering_stats_deltas.get('totalTextureUploadTimeInSeconds', 0))
- results.Add('average_commit_time', 'ms', averageCommitTimeMs,
- data_type='unimportant')
-
-def CalcFirstPaintTimeResults(results, tab):
- if tab.browser.is_content_shell:
- results.Add('first_paint', 'ms', 'unsupported')
- return
-
- tab.ExecuteJavaScript("""
- window.__rafFired = false;
- window.webkitRequestAnimationFrame(function() {
- window.__rafFired = true;
- });
- """)
- util.WaitFor(lambda: tab.EvaluateJavaScript('window.__rafFired'), 60)
-
- first_paint_secs = tab.EvaluateJavaScript(
- 'window.chrome.loadTimes().firstPaintTime - ' +
- 'window.chrome.loadTimes().startLoadTime')
-
- results.Add('first_paint', 'ms', round(first_paint_secs * 1000, 1))
-
-def CalcImageDecodingResults(rendering_stats_deltas, results):
- totalDeferredImageDecodeCount = rendering_stats_deltas.get(
- 'totalDeferredImageDecodeCount', 0)
- totalDeferredImageCacheHitCount = rendering_stats_deltas.get(
- 'totalDeferredImageCacheHitCount', 0)
- totalImageGatheringCount = rendering_stats_deltas.get(
- 'totalImageGatheringCount', 0)
- totalDeferredImageDecodeTimeInSeconds = rendering_stats_deltas.get(
- 'totalDeferredImageDecodeTimeInSeconds', 0)
- totalImageGatheringTimeInSeconds = rendering_stats_deltas.get(
- 'totalImageGatheringTimeInSeconds', 0)
-
- averageImageGatheringTime = DivideIfPossibleOrZero(
- (totalImageGatheringTimeInSeconds * 1000), totalImageGatheringCount)
-
- results.Add('total_deferred_image_decode_count', 'count',
- totalDeferredImageDecodeCount,
- data_type='unimportant')
- results.Add('total_image_cache_hit_count', 'count',
- totalDeferredImageCacheHitCount,
- data_type='unimportant')
- results.Add('average_image_gathering_time', 'ms', averageImageGatheringTime,
- data_type='unimportant')
- results.Add('total_deferred_image_decoding_time', 'seconds',
- totalDeferredImageDecodeTimeInSeconds,
- data_type='unimportant')
-
-class SmoothnessBenchmark(page_benchmark.PageBenchmark):
- def __init__(self):
- super(SmoothnessBenchmark, self).__init__('smoothness')
- self.force_enable_threaded_compositing = False
- self.use_gpu_benchmarking_extension = True
- self._measurement = None
-
- def AddCommandLineOptions(self, parser):
- parser.add_option('--report-all-results', dest='report_all_results',
- action='store_true',
- help='Reports all data collected, not just FPS')
-
- def CustomizeBrowserOptions(self, options):
- if self.use_gpu_benchmarking_extension:
- options.extra_browser_args.append('--enable-gpu-benchmarking')
- if self.force_enable_threaded_compositing:
- options.extra_browser_args.append('--enable-threaded-compositing')
-
- def CanRunForPage(self, page):
- return hasattr(page, 'smoothness')
-
- def WillRunAction(self, page, tab, action):
- if tab.browser.platform.IsRawDisplayFrameRateSupported():
- tab.browser.platform.StartRawDisplayFrameRateMeasurement()
- self._measurement = smoothness_measurement.SmoothnessMeasurement(tab)
- if action.CanBeBound():
- self._measurement.BindToAction(action)
- else:
- self._measurement.Start()
-
- def DidRunAction(self, page, tab, action):
- if tab.browser.platform.IsRawDisplayFrameRateSupported():
- tab.browser.platform.StopRawDisplayFrameRateMeasurement()
- if not action.CanBeBound():
- self._measurement.Stop()
-
- def MeasurePage(self, page, tab, results):
- rendering_stats_deltas = self._measurement.deltas
-
- if not (rendering_stats_deltas['numFramesSentToScreen'] > 0):
- raise DidNotScrollException()
-
- load_timings = tab.EvaluateJavaScript("window.performance.timing")
- load_time_seconds = (
- float(load_timings['loadEventStart']) -
- load_timings['navigationStart']) / 1000
- dom_content_loaded_time_seconds = (
- float(load_timings['domContentLoadedEventStart']) -
- load_timings['navigationStart']) / 1000
- results.Add('load_time', 'seconds', load_time_seconds)
- results.Add('dom_content_loaded_time', 'seconds',
- dom_content_loaded_time_seconds)
-
- CalcFirstPaintTimeResults(results, tab)
- CalcScrollResults(rendering_stats_deltas, results)
- CalcTextureUploadResults(rendering_stats_deltas, results)
- CalcImageDecodingResults(rendering_stats_deltas, results)
-
- if self.options.report_all_results:
- for k, v in rendering_stats_deltas.iteritems():
- results.Add(k, '', v)
-
- if tab.browser.platform.IsRawDisplayFrameRateSupported():
- for r in tab.browser.platform.GetRawDisplayFrameRateMeasurements():
- results.Add(r.name, r.unit, r.value)
diff --git a/tools/perf/perf_tools/smoothness_measurement.py b/tools/perf/perf_tools/smoothness_measurement.py
index bea02d5..08b663b 100644
--- a/tools/perf/perf_tools/smoothness_measurement.py
+++ b/tools/perf/perf_tools/smoothness_measurement.py
@@ -1,51 +1,182 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-import os
-
-class SmoothnessMeasurement(object):
- def __init__(self, tab):
- self._tab = tab
- # Bring in the smoothness benchmark
- with open(
- os.path.join(os.path.dirname(__file__),
- 'smoothness_measurement.js')) as f:
- js = f.read()
- tab.ExecuteJavaScript(js)
-
- def Start(self):
- self._tab.ExecuteJavaScript(
- 'window.__renderingStats = new __RenderingStats();'
- 'window.__renderingStats.start()')
-
- def SetNeedsDisplayOnAllLayersAndStart(self):
- self._tab.ExecuteJavaScript(
- 'chrome.gpuBenchmarking.setNeedsDisplayOnAllLayers();'
- 'window.__renderingStats = new __RenderingStats();'
- 'window.__renderingStats.start()')
-
- def Stop(self):
- self._tab.ExecuteJavaScript('window.__renderingStats.stop()')
-
- def BindToAction(self, action):
- # Make the scroll test start and stop measurement automatically.
- self._tab.ExecuteJavaScript(
- 'window.__renderingStats = new __RenderingStats();')
- action.BindMeasurementJavaScript(self._tab,
- 'window.__renderingStats.start();',
- 'window.__renderingStats.stop();')
-
- @property
- def start_values(self):
- return self._tab.EvaluateJavaScript(
- 'window.__renderingStats.getStartValues()')
-
- @property
- def end_values(self):
- return self._tab.EvaluateJavaScript(
- 'window.__renderingStats.getEndValues()')
-
- @property
- def deltas(self):
- return self._tab.EvaluateJavaScript(
- 'window.__renderingStats.getDeltas()')
+from perf_tools import smoothness_metrics
+from telemetry.core import util
+from telemetry.page import page_measurement
+
+class DidNotScrollException(page_measurement.MeasurementFailure):
+ def __init__(self):
+ super(DidNotScrollException, self).__init__('Page did not scroll')
+
+def DivideIfPossibleOrZero(numerator, denominator):
+ if denominator == 0:
+ return 0
+ return numerator / denominator
+
+def CalcScrollResults(rendering_stats_deltas, results):
+ num_frames_sent_to_screen = rendering_stats_deltas['numFramesSentToScreen']
+
+ mean_frame_time_seconds = (
+ rendering_stats_deltas['totalTimeInSeconds'] /
+ float(num_frames_sent_to_screen))
+
+ dropped_percent = (
+ rendering_stats_deltas['droppedFrameCount'] /
+ float(num_frames_sent_to_screen))
+
+ num_impl_thread_scrolls = rendering_stats_deltas.get(
+ 'numImplThreadScrolls', 0)
+ num_main_thread_scrolls = rendering_stats_deltas.get(
+ 'numMainThreadScrolls', 0)
+
+ percent_impl_scrolled = DivideIfPossibleOrZero(
+ float(num_impl_thread_scrolls),
+ num_impl_thread_scrolls + num_main_thread_scrolls)
+
+ num_layers = (
+ rendering_stats_deltas.get('numLayersDrawn', 0) /
+ float(num_frames_sent_to_screen))
+
+ num_missing_tiles = (
+ rendering_stats_deltas.get('numMissingTiles', 0) /
+ float(num_frames_sent_to_screen))
+
+ results.Add('mean_frame_time', 'ms', round(mean_frame_time_seconds * 1000, 3))
+ results.Add('dropped_percent', '%', round(dropped_percent * 100, 1),
+ data_type='unimportant')
+ results.Add('percent_impl_scrolled', '%',
+ round(percent_impl_scrolled * 100, 1),
+ data_type='unimportant')
+ results.Add('average_num_layers_drawn', '', round(num_layers, 1),
+ data_type='unimportant')
+ results.Add('average_num_missing_tiles', '', round(num_missing_tiles, 1),
+ data_type='unimportant')
+
+def CalcTextureUploadResults(rendering_stats_deltas, results):
+ if (('totalCommitCount' not in rendering_stats_deltas)
+ or rendering_stats_deltas['totalCommitCount'] == 0) :
+ averageCommitTimeMs = 0
+ else :
+ averageCommitTimeMs = (
+ 1000 * rendering_stats_deltas['totalCommitTimeInSeconds'] /
+ rendering_stats_deltas['totalCommitCount'])
+
+ results.Add('texture_upload_count', 'count',
+ rendering_stats_deltas.get('textureUploadCount', 0))
+ results.Add('total_texture_upload_time', 'seconds',
+ rendering_stats_deltas.get('totalTextureUploadTimeInSeconds', 0))
+ results.Add('average_commit_time', 'ms', averageCommitTimeMs,
+ data_type='unimportant')
+
+def CalcFirstPaintTimeResults(results, tab):
+ if tab.browser.is_content_shell:
+ results.Add('first_paint', 'ms', 'unsupported')
+ return
+
+ tab.ExecuteJavaScript("""
+ window.__rafFired = false;
+ window.webkitRequestAnimationFrame(function() {
+ window.__rafFired = true;
+ });
+ """)
+ util.WaitFor(lambda: tab.EvaluateJavaScript('window.__rafFired'), 60)
+
+ first_paint_secs = tab.EvaluateJavaScript(
+ 'window.chrome.loadTimes().firstPaintTime - ' +
+ 'window.chrome.loadTimes().startLoadTime')
+
+ results.Add('first_paint', 'ms', round(first_paint_secs * 1000, 1))
+
+def CalcImageDecodingResults(rendering_stats_deltas, results):
+ totalDeferredImageDecodeCount = rendering_stats_deltas.get(
+ 'totalDeferredImageDecodeCount', 0)
+ totalDeferredImageCacheHitCount = rendering_stats_deltas.get(
+ 'totalDeferredImageCacheHitCount', 0)
+ totalImageGatheringCount = rendering_stats_deltas.get(
+ 'totalImageGatheringCount', 0)
+ totalDeferredImageDecodeTimeInSeconds = rendering_stats_deltas.get(
+ 'totalDeferredImageDecodeTimeInSeconds', 0)
+ totalImageGatheringTimeInSeconds = rendering_stats_deltas.get(
+ 'totalImageGatheringTimeInSeconds', 0)
+
+ averageImageGatheringTime = DivideIfPossibleOrZero(
+ (totalImageGatheringTimeInSeconds * 1000), totalImageGatheringCount)
+
+ results.Add('total_deferred_image_decode_count', 'count',
+ totalDeferredImageDecodeCount,
+ data_type='unimportant')
+ results.Add('total_image_cache_hit_count', 'count',
+ totalDeferredImageCacheHitCount,
+ data_type='unimportant')
+ results.Add('average_image_gathering_time', 'ms', averageImageGatheringTime,
+ data_type='unimportant')
+ results.Add('total_deferred_image_decoding_time', 'seconds',
+ totalDeferredImageDecodeTimeInSeconds,
+ data_type='unimportant')
+
+class SmoothnessMeasurement(page_measurement.PageMeasurement):
+ def __init__(self):
+ super(SmoothnessMeasurement, self).__init__('smoothness')
+ self.force_enable_threaded_compositing = False
+ self.use_gpu_benchmarking_extension = True
+ self._metrics = None
+
+ def AddCommandLineOptions(self, parser):
+ parser.add_option('--report-all-results', dest='report_all_results',
+ action='store_true',
+ help='Reports all data collected, not just FPS')
+
+ def CustomizeBrowserOptions(self, options):
+ if self.use_gpu_benchmarking_extension:
+ options.extra_browser_args.append('--enable-gpu-benchmarking')
+ if self.force_enable_threaded_compositing:
+ options.extra_browser_args.append('--enable-threaded-compositing')
+
+ def CanRunForPage(self, page):
+ return hasattr(page, 'smoothness')
+
+ def WillRunAction(self, page, tab, action):
+ if tab.browser.platform.IsRawDisplayFrameRateSupported():
+ tab.browser.platform.StartRawDisplayFrameRateMeasurement()
+ self._metrics = smoothness_metrics.SmoothnessMetrics(tab)
+ if action.CanBeBound():
+ self._metrics.BindToAction(action)
+ else:
+ self._metrics.Start()
+
+ def DidRunAction(self, page, tab, action):
+ if tab.browser.platform.IsRawDisplayFrameRateSupported():
+ tab.browser.platform.StopRawDisplayFrameRateMeasurement()
+ if not action.CanBeBound():
+ self._metrics.Stop()
+
+ def MeasurePage(self, page, tab, results):
+ rendering_stats_deltas = self._metrics.deltas
+
+ if not (rendering_stats_deltas['numFramesSentToScreen'] > 0):
+ raise DidNotScrollException()
+
+ load_timings = tab.EvaluateJavaScript("window.performance.timing")
+ load_time_seconds = (
+ float(load_timings['loadEventStart']) -
+ load_timings['navigationStart']) / 1000
+ dom_content_loaded_time_seconds = (
+ float(load_timings['domContentLoadedEventStart']) -
+ load_timings['navigationStart']) / 1000
+ results.Add('load_time', 'seconds', load_time_seconds)
+ results.Add('dom_content_loaded_time', 'seconds',
+ dom_content_loaded_time_seconds)
+
+ CalcFirstPaintTimeResults(results, tab)
+ CalcScrollResults(rendering_stats_deltas, results)
+ CalcTextureUploadResults(rendering_stats_deltas, results)
+ CalcImageDecodingResults(rendering_stats_deltas, results)
+
+ if self.options.report_all_results:
+ for k, v in rendering_stats_deltas.iteritems():
+ results.Add(k, '', v)
+
+ if tab.browser.platform.IsRawDisplayFrameRateSupported():
+ for r in tab.browser.platform.GetRawDisplayFrameRateMeasurements():
+ results.Add(r.name, r.unit, r.value)
diff --git a/tools/perf/perf_tools/smoothness_measurement.js b/tools/perf/perf_tools/smoothness_metrics.js
index 22336fe..22336fe 100644
--- a/tools/perf/perf_tools/smoothness_measurement.js
+++ b/tools/perf/perf_tools/smoothness_metrics.js
diff --git a/tools/perf/perf_tools/smoothness_metrics.py b/tools/perf/perf_tools/smoothness_metrics.py
new file mode 100644
index 0000000..5148084
--- /dev/null
+++ b/tools/perf/perf_tools/smoothness_metrics.py
@@ -0,0 +1,50 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import os
+
+class SmoothnessMetrics(object):
+ def __init__(self, tab):
+ self._tab = tab
+ with open(
+ os.path.join(os.path.dirname(__file__),
+ 'smoothness_metrics.js')) as f:
+ js = f.read()
+ tab.ExecuteJavaScript(js)
+
+ def Start(self):
+ self._tab.ExecuteJavaScript(
+ 'window.__renderingStats = new __RenderingStats();'
+ 'window.__renderingStats.start()')
+
+ def SetNeedsDisplayOnAllLayersAndStart(self):
+ self._tab.ExecuteJavaScript(
+ 'chrome.gpuBenchmarking.setNeedsDisplayOnAllLayers();'
+ 'window.__renderingStats = new __RenderingStats();'
+ 'window.__renderingStats.start()')
+
+ def Stop(self):
+ self._tab.ExecuteJavaScript('window.__renderingStats.stop()')
+
+ def BindToAction(self, action):
+ # Make the scroll test start and stop measurement automatically.
+ self._tab.ExecuteJavaScript(
+ 'window.__renderingStats = new __RenderingStats();')
+ action.BindMeasurementJavaScript(self._tab,
+ 'window.__renderingStats.start();',
+ 'window.__renderingStats.stop();')
+
+ @property
+ def start_values(self):
+ return self._tab.EvaluateJavaScript(
+ 'window.__renderingStats.getStartValues()')
+
+ @property
+ def end_values(self):
+ return self._tab.EvaluateJavaScript(
+ 'window.__renderingStats.getEndValues()')
+
+ @property
+ def deltas(self):
+ return self._tab.EvaluateJavaScript(
+ 'window.__renderingStats.getDeltas()')
diff --git a/tools/perf/perf_tools/spaceport.py b/tools/perf/perf_tools/spaceport.py
index 2a4d623..8dec484 100644
--- a/tools/perf/perf_tools/spaceport.py
+++ b/tools/perf/perf_tools/spaceport.py
@@ -5,9 +5,9 @@
import logging
from telemetry.core import util
-from telemetry.page import page_benchmark
+from telemetry.page import page_measurement
-class SpaceportBenchmark(page_benchmark.PageBenchmark):
+class SpaceportMeasurement(page_measurement.PageMeasurement):
def CustomizeBrowserOptions(self, options):
options.extra_browser_args.extend(['--disable-gpu-vsync'])
@@ -29,13 +29,14 @@ class SpaceportBenchmark(page_benchmark.PageBenchmark):
js_get_results = 'JSON.stringify(window.__results)'
num_tests_complete = [0] # A list to work around closure issue.
def _IsDone():
- num_tests_in_benchmark = 24
+ num_tests_in_measurement = 24
num_results = len(eval(tab.EvaluateJavaScript(js_get_results)))
if num_results > num_tests_complete[0]:
num_tests_complete[0] = num_results
- logging.info('Completed benchmark %d of %d' % (num_tests_complete[0],
- num_tests_in_benchmark))
- return num_tests_complete[0] >= num_tests_in_benchmark
+ logging.info('Completed measurement %d of %d'
+ % (num_tests_complete[0],
+ num_tests_in_measurement))
+ return num_tests_complete[0] >= num_tests_in_measurement
util.WaitFor(_IsDone, 1200, poll_interval=5)
result_dict = eval(tab.EvaluateJavaScript(js_get_results))
diff --git a/tools/perf/perf_tools/startup_benchmark.py b/tools/perf/perf_tools/startup_measurement.py
index 2f81113..f328a37 100644
--- a/tools/perf/perf_tools/startup_benchmark.py
+++ b/tools/perf/perf_tools/startup_measurement.py
@@ -4,10 +4,10 @@
import json
-from telemetry.page import page_benchmark
+from telemetry.page import page_measurement
# Test how long Chrome takes to load when warm.
-class PerfWarm(page_benchmark.PageBenchmark):
+class PerfWarm(page_measurement.PageMeasurement):
def __init__(self):
super(PerfWarm, self).__init__(needs_browser_restart_after_each_run=True,
discard_first_result=True)
diff --git a/tools/perf/perf_tools/sunspider.py b/tools/perf/perf_tools/sunspider.py
index d65bfd8..9bbb40d 100644
--- a/tools/perf/perf_tools/sunspider.py
+++ b/tools/perf/perf_tools/sunspider.py
@@ -6,10 +6,10 @@ import collections
import json
from telemetry.core import util
-from telemetry.page import page_benchmark
+from telemetry.page import page_measurement
-class SunSpiderBenchark(page_benchmark.PageBenchmark):
+class SunSpiderMeasurement(page_measurement.PageMeasurement):
def MeasurePage(self, _, tab, results):
js_is_done = """
window.location.pathname.indexOf('sunspider-results') >= 0"""