diff options
author | nduca@chromium.org <nduca@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2013-05-03 01:58:45 +0000 |
---|---|---|
committer | nduca@chromium.org <nduca@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2013-05-03 01:58:45 +0000 |
commit | 9da0cd52ddaec592b089c318a320614265f5b121 (patch) | |
tree | 3c93958c6e8c401b1130cc976b65d69490753eab /tools | |
parent | 8e08b1423631dd2bb761adecf6e67ca41e70c5c0 (diff) | |
download | chromium_src-9da0cd52ddaec592b089c318a320614265f5b121.zip chromium_src-9da0cd52ddaec592b089c318a320614265f5b121.tar.gz chromium_src-9da0cd52ddaec592b089c318a320614265f5b121.tar.bz2 |
[telemetry] Rename PageBenchmark to PageMeasurement
Benchmark is an ambiguous word. This is clearer.
R=dtu@chromium.org
Review URL: https://codereview.chromium.org/14760006
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@198030 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'tools')
49 files changed, 875 insertions, 860 deletions
diff --git a/tools/perf/page_sets/image_decoding_benchmark.json b/tools/perf/page_sets/image_decoding_benchmark.json index 01ff56a..fcf1ce2 100644 --- a/tools/perf/page_sets/image_decoding_benchmark.json +++ b/tools/perf/page_sets/image_decoding_benchmark.json @@ -2,7 +2,7 @@ "description": "A directed benchmark of image decoding performance", "post_navigate_javascript_to_execute": "runBenchmark();", "wait_for_javascript_expression": "isDone", - "image_decoding_benchmark_limit_results_to_min_iterations": true, + "image_decoding_measurement_limit_results_to_min_iterations": true, "pages": [ { "url": "file:///../../../chrome/test/data/image_decoding/image_decoding.html?gif" }, { "url": "file:///../../../chrome/test/data/image_decoding/image_decoding.html?jpg" }, diff --git a/tools/perf/perf_tools/cheapness_predictor_benchmark.py b/tools/perf/perf_tools/cheapness_predictor_benchmark.py deleted file mode 100644 index a0915eb..0000000 --- a/tools/perf/perf_tools/cheapness_predictor_benchmark.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (c) 2013 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. -from perf_tools import cheapness_predictor_measurement -from telemetry.page import page_benchmark - -PREDICTOR_STATS = [ - {'name': 'picture_pile_count', 'units': ''}, - {'name': 'predictor_accuracy', 'units': 'percent'}, - {'name': 'predictor_safely_wrong_count', 'units': ''}, - {'name': 'predictor_badly_wrong_count', 'units': ''}] - -class CheapnessPredictorBenchmark(page_benchmark.PageBenchmark): - def __init__(self): - super(CheapnessPredictorBenchmark, self).__init__('smoothness') - self._measurement = None - - def CustomizeBrowserOptions(self, options): - options.AppendExtraBrowserArg('--dom-automation') - options.AppendExtraBrowserArg('--enable-prediction-benchmarking') - options.AppendExtraBrowserArg('--enable-gpu-benchmarking') - options.AppendExtraBrowserArg('--enable-threaded-compositing') - options.AppendExtraBrowserArg('--enable-impl-side-painting') - - def DidNavigateToPage(self, page, tab): - self._measurement = \ - cheapness_predictor_measurement.CheapnessPredictorMeasurement(tab) - self._measurement.GatherInitialStats() - - def DidRunAction(self, page, tab, action): - self._measurement.GatherDeltaStats() - - def CanRunForPage(self, page): - return hasattr(page, 'smoothness') - - def MeasurePage(self, page, tab, results): - predictor_stats = self._measurement.stats - - for stat_to_gather in PREDICTOR_STATS: - results.Add(stat_to_gather['name'], - stat_to_gather['units'], - predictor_stats[stat_to_gather['name']]) diff --git a/tools/perf/perf_tools/cheapness_predictor_measurement.py b/tools/perf/perf_tools/cheapness_predictor_measurement.py index 9500f3e..cf175b5 100644 --- a/tools/perf/perf_tools/cheapness_predictor_measurement.py +++ b/tools/perf/perf_tools/cheapness_predictor_measurement.py @@ -1,86 +1,42 @@ # Copyright (c) 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. -import json - -class CheapnessPredictorMeasurement(object): - def __init__(self, tab): - self._tab = tab - self._initial_stats = {} - self.stats = {} - - def GatherInitialStats(self): - self._initial_stats = self._GatherStats() - - def GatherDeltaStats(self): - final_stats = self._GatherStats() - - correct_count = final_stats['predictor_correct_count'] - \ - self._initial_stats['predictor_correct_count'] - - incorrect_count = final_stats['predictor_incorrect_count'] - \ - self._initial_stats['predictor_incorrect_count'] - - percent, total = self._GetPercentAndTotal(correct_count, incorrect_count) - - self.stats['picture_pile_count'] = total - self.stats['predictor_correct_count'] = correct_count - self.stats['predictor_incorrect_count'] = incorrect_count - self.stats['predictor_accuracy'] = percent - self.stats['predictor_safely_wrong_count'] = \ - final_stats['predictor_safely_wrong_count'] - \ - self._initial_stats['predictor_safely_wrong_count'] - self.stats['predictor_badly_wrong_count'] = \ - final_stats['predictor_badly_wrong_count'] - \ - self._initial_stats['predictor_badly_wrong_count'] - - def _GatherStats(self): - stats = {} - - incorrect_count, correct_count = \ - self._GetBooleanHistogramCounts(self._tab, - 'Renderer4.CheapPredictorAccuracy') - - percent, total = self._GetPercentAndTotal(correct_count, incorrect_count) - stats['picture_pile_count'] = total - stats['predictor_correct_count'] = correct_count - stats['predictor_incorrect_count'] = incorrect_count - stats['predictor_accuracy'] = percent - - _, safely_wrong_count = \ - self._GetBooleanHistogramCounts(self._tab, - 'Renderer4.CheapPredictorSafelyWrong') - stats['predictor_safely_wrong_count'] = safely_wrong_count - - _, badly_wrong_count = \ - self._GetBooleanHistogramCounts(self._tab, - 'Renderer4.CheapPredictorBadlyWrong') - stats['predictor_badly_wrong_count'] = badly_wrong_count - - return stats - - - def _GetPercentAndTotal(self, correct_count, incorrect_count): - total = incorrect_count + correct_count - percent = 0 - if total > 0: - percent = 100 * correct_count / float(total) - return percent, total - - def _GetBooleanHistogramCounts(self, tab, histogram_name): - count = [0, 0] - js = ('window.domAutomationController.getHistogram ? ' - 'window.domAutomationController.getHistogram(' - '"%s") : ""' % (histogram_name)) - data = tab.EvaluateJavaScript(js) - if not data: - return count - - histogram = json.loads(data) - if histogram: - for bucket in histogram['buckets']: - if bucket['low'] > 1: - continue - count[bucket['low']] += bucket['count'] - - return count +from perf_tools import cheapness_predictor_metrics +from telemetry.page import page_measurement + +PREDICTOR_STATS = [ + {'name': 'picture_pile_count', 'units': ''}, + {'name': 'predictor_accuracy', 'units': 'percent'}, + {'name': 'predictor_safely_wrong_count', 'units': ''}, + {'name': 'predictor_badly_wrong_count', 'units': ''}] + +class CheapnessPredictorMeasurement(page_measurement.PageMeasurement): + def __init__(self): + super(CheapnessPredictorMeasurement, self).__init__('smoothness') + self._metrics = None + + def CustomizeBrowserOptions(self, options): + options.AppendExtraBrowserArg('--dom-automation') + options.AppendExtraBrowserArg('--enable-prediction-benchmarking') + options.AppendExtraBrowserArg('--enable-gpu-benchmarking') + options.AppendExtraBrowserArg('--enable-threaded-compositing') + options.AppendExtraBrowserArg('--enable-impl-side-painting') + + def DidNavigateToPage(self, page, tab): + self._metrics = \ + cheapness_predictor_metrics.CheapnessPredictorMetrics(tab) + self._metrics.GatherInitialStats() + + def DidRunAction(self, page, tab, action): + self._metrics.GatherDeltaStats() + + def CanRunForPage(self, page): + return hasattr(page, 'smoothness') + + def MeasurePage(self, page, tab, results): + predictor_stats = self._metrics.stats + + for stat_to_gather in PREDICTOR_STATS: + results.Add(stat_to_gather['name'], + stat_to_gather['units'], + predictor_stats[stat_to_gather['name']]) diff --git a/tools/perf/perf_tools/cheapness_predictor_metrics.py b/tools/perf/perf_tools/cheapness_predictor_metrics.py new file mode 100644 index 0000000..2fc14c3 --- /dev/null +++ b/tools/perf/perf_tools/cheapness_predictor_metrics.py @@ -0,0 +1,86 @@ +# Copyright (c) 2013 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. +import json + +class CheapnessPredictorMetrics(object): + def __init__(self, tab): + self._tab = tab + self._initial_stats = {} + self.stats = {} + + def GatherInitialStats(self): + self._initial_stats = self._GatherStats() + + def GatherDeltaStats(self): + final_stats = self._GatherStats() + + correct_count = final_stats['predictor_correct_count'] - \ + self._initial_stats['predictor_correct_count'] + + incorrect_count = final_stats['predictor_incorrect_count'] - \ + self._initial_stats['predictor_incorrect_count'] + + percent, total = self._GetPercentAndTotal(correct_count, incorrect_count) + + self.stats['picture_pile_count'] = total + self.stats['predictor_correct_count'] = correct_count + self.stats['predictor_incorrect_count'] = incorrect_count + self.stats['predictor_accuracy'] = percent + self.stats['predictor_safely_wrong_count'] = \ + final_stats['predictor_safely_wrong_count'] - \ + self._initial_stats['predictor_safely_wrong_count'] + self.stats['predictor_badly_wrong_count'] = \ + final_stats['predictor_badly_wrong_count'] - \ + self._initial_stats['predictor_badly_wrong_count'] + + def _GatherStats(self): + stats = {} + + incorrect_count, correct_count = \ + self._GetBooleanHistogramCounts(self._tab, + 'Renderer4.CheapPredictorAccuracy') + + percent, total = self._GetPercentAndTotal(correct_count, incorrect_count) + stats['picture_pile_count'] = total + stats['predictor_correct_count'] = correct_count + stats['predictor_incorrect_count'] = incorrect_count + stats['predictor_accuracy'] = percent + + _, safely_wrong_count = \ + self._GetBooleanHistogramCounts(self._tab, + 'Renderer4.CheapPredictorSafelyWrong') + stats['predictor_safely_wrong_count'] = safely_wrong_count + + _, badly_wrong_count = \ + self._GetBooleanHistogramCounts(self._tab, + 'Renderer4.CheapPredictorBadlyWrong') + stats['predictor_badly_wrong_count'] = badly_wrong_count + + return stats + + + def _GetPercentAndTotal(self, correct_count, incorrect_count): + total = incorrect_count + correct_count + percent = 0 + if total > 0: + percent = 100 * correct_count / float(total) + return percent, total + + def _GetBooleanHistogramCounts(self, tab, histogram_name): + count = [0, 0] + js = ('window.domAutomationController.getHistogram ? ' + 'window.domAutomationController.getHistogram(' + '"%s") : ""' % (histogram_name)) + data = tab.EvaluateJavaScript(js) + if not data: + return count + + histogram = json.loads(data) + if histogram: + for bucket in histogram['buckets']: + if bucket['low'] > 1: + continue + count[bucket['low']] += bucket['count'] + + return count diff --git a/tools/perf/perf_tools/dromaeo.py b/tools/perf/perf_tools/dromaeo.py index 14ccb27..96037ea 100644 --- a/tools/perf/perf_tools/dromaeo.py +++ b/tools/perf/perf_tools/dromaeo.py @@ -3,9 +3,9 @@ # found in the LICENSE file. from telemetry.core import util -from telemetry.page import page_benchmark +from telemetry.page import page_measurement -class Dromaeo(page_benchmark.PageBenchmark): +class Dromaeo(page_measurement.PageMeasurement): def MeasurePage(self, page, tab, results): js_is_done = 'window.document.cookie.indexOf("__done=1") >= 0' def _IsDone(): diff --git a/tools/perf/perf_tools/histogram_measurement.py b/tools/perf/perf_tools/histogram_metric.py index 267329a..d05394e 100644 --- a/tools/perf/perf_tools/histogram_measurement.py +++ b/tools/perf/perf_tools/histogram_metric.py @@ -6,7 +6,7 @@ from perf_tools import histogram as histogram_module BROWSER_HISTOGRAM = 'browser_histogram' RENDERER_HISTOGRAM = 'renderer_histogram' -class HistogramMeasurement(object): +class HistogramMetric(object): def __init__(self, histogram, histogram_type): self.name = histogram['name'] self.units = histogram['units'] diff --git a/tools/perf/perf_tools/image_decoding_benchmark.py b/tools/perf/perf_tools/image_decoding_measurement.py index 58b6155..c293ac5 100644 --- a/tools/perf/perf_tools/image_decoding_benchmark.py +++ b/tools/perf/perf_tools/image_decoding_measurement.py @@ -2,10 +2,10 @@ # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. -from telemetry.page import page_benchmark +from telemetry.page import page_measurement -class ImageDecoding(page_benchmark.PageBenchmark): +class ImageDecoding(page_measurement.PageMeasurement): def WillNavigateToPage(self, page, tab): tab.StartTimelineRecording() @@ -17,11 +17,11 @@ class ImageDecoding(page_benchmark.PageBenchmark): decode_image_events = \ tab.timeline_model.GetAllOfName('DecodeImage') - # If it is a real image benchmark, then store only the last-minIterations + # If it is a real image page, then store only the last-minIterations # decode tasks. if (hasattr(page, - 'image_decoding_benchmark_limit_results_to_min_iterations') and - page.image_decoding_benchmark_limit_results_to_min_iterations): + 'image_decoding_measurement_limit_results_to_min_iterations') and + page.image_decoding_measurement_limit_results_to_min_iterations): assert _IsDone() min_iterations = tab.EvaluateJavaScript('minIterations') decode_image_events = decode_image_events[-min_iterations:] diff --git a/tools/perf/perf_tools/jsgamebench.py b/tools/perf/perf_tools/jsgamebench.py index 9fdf1166..474d43a 100644 --- a/tools/perf/perf_tools/jsgamebench.py +++ b/tools/perf/perf_tools/jsgamebench.py @@ -3,9 +3,9 @@ # found in the LICENSE file. from telemetry.core import util -from telemetry.page import page_benchmark +from telemetry.page import page_measurement -class JsGameBench(page_benchmark.PageBenchmark): +class JsGameBench(page_measurement.PageMeasurement): def MeasurePage(self, _, tab, results): tab.ExecuteJavaScript('UI.call({}, "perftest")') diff --git a/tools/perf/perf_tools/kraken.py b/tools/perf/perf_tools/kraken.py index d09c01b..2d2086d 100644 --- a/tools/perf/perf_tools/kraken.py +++ b/tools/perf/perf_tools/kraken.py @@ -3,12 +3,12 @@ # found in the LICENSE file. from telemetry.core import util -from telemetry.page import page_benchmark +from telemetry.page import page_measurement def _Mean(l): return float(sum(l)) / len(l) if len(l) > 0 else 0.0 -class Kraken(page_benchmark.PageBenchmark): +class Kraken(page_measurement.PageMeasurement): def MeasurePage(self, _, tab, results): js_is_done = """ document.title.indexOf("Results") != -1 && document.readyState == "complete" diff --git a/tools/perf/perf_tools/loading_benchmark.py b/tools/perf/perf_tools/loading_measurement.py index d0aa214..2ff0e97 100644 --- a/tools/perf/perf_tools/loading_benchmark.py +++ b/tools/perf/perf_tools/loading_measurement.py @@ -5,9 +5,9 @@ import collections from telemetry.core import util -from telemetry.page import page_benchmark +from telemetry.page import page_measurement -class LoadingBenchmark(page_benchmark.PageBenchmark): +class LoadingMeasurement(page_measurement.PageMeasurement): @property def results_are_the_same_on_every_page(self): return False diff --git a/tools/perf/perf_tools/memory_benchmark.py b/tools/perf/perf_tools/memory_measurement.py index 6c051d6..8d1724d 100644 --- a/tools/perf/perf_tools/memory_benchmark.py +++ b/tools/perf/perf_tools/memory_measurement.py @@ -1,8 +1,8 @@ # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. -from perf_tools import histogram_measurement -from telemetry.page import page_benchmark +from perf_tools import histogram_metric +from telemetry.page import page_measurement MEMORY_HISTOGRAMS = [ {'name': 'V8.MemoryExternalFragmentationTotal', 'units': 'percent'}, @@ -13,15 +13,15 @@ MEMORY_HISTOGRAMS = [ BROWSER_MEMORY_HISTOGRAMS = [ {'name': 'Memory.BrowserUsed', 'units': 'kb'}] -class MemoryBenchmark(page_benchmark.PageBenchmark): +class MemoryMeasurement(page_measurement.PageMeasurement): def __init__(self): - super(MemoryBenchmark, self).__init__('stress_memory') + super(MemoryMeasurement, self).__init__('stress_memory') self.histograms = ( - [histogram_measurement.HistogramMeasurement( - h, histogram_measurement.RENDERER_HISTOGRAM) + [histogram_metric.HistogramMetric( + h, histogram_metric.RENDERER_HISTOGRAM) for h in MEMORY_HISTOGRAMS] + - [histogram_measurement.HistogramMeasurement( - h, histogram_measurement.BROWSER_HISTOGRAM) + [histogram_metric.HistogramMetric( + h, histogram_metric.BROWSER_HISTOGRAM) for h in BROWSER_MEMORY_HISTOGRAMS]) def DidNavigateToPage(self, page, tab): diff --git a/tools/perf/perf_tools/octane.py b/tools/perf/perf_tools/octane.py index d6f4f1a..16be773 100644 --- a/tools/perf/perf_tools/octane.py +++ b/tools/perf/perf_tools/octane.py @@ -2,9 +2,9 @@ # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from telemetry.core import util -from telemetry.page import page_benchmark +from telemetry.page import page_measurement -class Octane(page_benchmark.PageBenchmark): +class Octane(page_measurement.PageMeasurement): def MeasurePage(self, _, tab, results): js_is_done = """ completed && !document.getElementById("progress-bar-container")""" diff --git a/tools/perf/perf_tools/page_cycler.py b/tools/perf/perf_tools/page_cycler.py index a0cb196..7890740 100644 --- a/tools/perf/perf_tools/page_cycler.py +++ b/tools/perf/perf_tools/page_cycler.py @@ -2,14 +2,14 @@ # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. -"""The page cycler benchmark. +"""The page cycler measurement. -This benchmark registers a window load handler in which is forces a layout and +This measurement registers a window load handler in which is forces a layout and then records the value of performance.now(). This call to now() measures the time from navigationStart (immediately after the previous page's beforeunload event) until after the layout in the page's load event. In addition, two garbage collections are performed in between the page loads (in the beforeunload event). -This extra garbage collection time is not included in the benchmark times. +This extra garbage collection time is not included in the measurement times. Finally, various memory and IO statistics are gathered at the very end of cycling all pages. @@ -18,16 +18,16 @@ cycling all pages. import os import sys -from perf_tools import histogram_measurement +from perf_tools import histogram_metric from telemetry.core import util -from telemetry.page import page_benchmark +from telemetry.page import page_measurement MEMORY_HISTOGRAMS = [ {'name': 'V8.MemoryExternalFragmentationTotal', 'units': 'percent'}, {'name': 'V8.MemoryHeapSampleTotalCommitted', 'units': 'kb'}, {'name': 'V8.MemoryHeapSampleTotalUsed', 'units': 'kb'}] -class PageCycler(page_benchmark.PageBenchmark): +class PageCycler(page_measurement.PageMeasurement): def AddCommandLineOptions(self, parser): # The page cyclers should default to 10 iterations. In order to change the # default of an option, we must remove and re-add it. @@ -50,8 +50,8 @@ class PageCycler(page_benchmark.PageBenchmark): self.start_commit_charge = tab.browser.memory_stats['SystemCommitCharge'] # pylint: disable=W0201 - self.histograms = [histogram_measurement.HistogramMeasurement( - h, histogram_measurement.RENDERER_HISTOGRAM) + self.histograms = [histogram_metric.HistogramMetric( + h, histogram_metric.RENDERER_HISTOGRAM) for h in MEMORY_HISTOGRAMS] def WillNavigateToPage(self, page, tab): diff --git a/tools/perf/perf_tools/rasterize_and_record_benchmark.py b/tools/perf/perf_tools/rasterize_and_record_benchmark.py index 65a1919..5834393 100644 --- a/tools/perf/perf_tools/rasterize_and_record_benchmark.py +++ b/tools/perf/perf_tools/rasterize_and_record_benchmark.py @@ -4,8 +4,8 @@ import time -from perf_tools import smoothness_measurement -from telemetry.page import page_benchmark +from perf_tools import smoothness_metrics +from telemetry.page import page_measurement def DivideIfPossibleOrZero(numerator, denominator): if denominator == 0: @@ -52,10 +52,10 @@ def CalcPaintingResults(rendering_stats_deltas, results): results.Add('total_record_and_rasterize_time', 'seconds', totalRecordTime + totalRasterizeTime, data_type='unimportant') -class RasterizeAndPaintBenchmark(page_benchmark.PageBenchmark): +class RasterizeAndPaintMeasurement(page_measurement.PageMeasurement): def __init__(self): - super(RasterizeAndPaintBenchmark, self).__init__('', True) - self._measurement = None + super(RasterizeAndPaintMeasurement, self).__init__('', True) + self._metrics = None def AddCommandLineOptions(self, parser): parser.add_option('--report-all-results', dest='report_all_results', @@ -70,22 +70,22 @@ class RasterizeAndPaintBenchmark(page_benchmark.PageBenchmark): options.extra_browser_args.append('--slow-down-raster-scale-factor=100') def MeasurePage(self, page, tab, results): - self._measurement = smoothness_measurement.SmoothnessMeasurement(tab) + self._metrics = smoothness_metrics.SmoothnessMetrics(tab) # Wait until the page has loaded and come to a somewhat steady state # (empirical wait time) time.sleep(5) - self._measurement.SetNeedsDisplayOnAllLayersAndStart() + self._metrics.SetNeedsDisplayOnAllLayersAndStart() # Wait until all rasterization tasks are completed (empirical wait time) # TODO(ernstm): Replace by a more accurate mechanism to measure stats for # exactly one frame. time.sleep(5) - self._measurement.Stop() + self._metrics.Stop() - rendering_stats_deltas = self._measurement.deltas + rendering_stats_deltas = self._metrics.deltas CalcPaintingResults(rendering_stats_deltas, results) diff --git a/tools/perf/perf_tools/robohornetpro.py b/tools/perf/perf_tools/robohornetpro.py index dcc69df..9bd2553 100644 --- a/tools/perf/perf_tools/robohornetpro.py +++ b/tools/perf/perf_tools/robohornetpro.py @@ -2,11 +2,11 @@ # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from telemetry.core import util -from telemetry.page import page_benchmark +from telemetry.page import page_measurement -class RobohornetPro(page_benchmark.PageBenchmark): +class RobohornetPro(page_measurement.PageMeasurement): def CustomizeBrowserOptions(self, options): - # Benchmark require use of real Date.now() for measurement. + # Measurement require use of real Date.now() for measurement. options.wpr_make_javascript_deterministic = False def MeasurePage(self, _, tab, results): diff --git a/tools/perf/perf_tools/scrolling_benchmark.py b/tools/perf/perf_tools/scrolling_benchmark.py deleted file mode 100644 index cc3ddad..0000000 --- a/tools/perf/perf_tools/scrolling_benchmark.py +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright (c) 2012 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. -from perf_tools import smoothness_benchmark - -class ScrollingBenchmark(smoothness_benchmark.SmoothnessBenchmark): - def __init__(self): - super(ScrollingBenchmark, self).__init__() diff --git a/tools/perf/perf_tools/skpicture_printer.py b/tools/perf/perf_tools/skpicture_printer.py index beeb0d4..290bde1 100644 --- a/tools/perf/perf_tools/skpicture_printer.py +++ b/tools/perf/perf_tools/skpicture_printer.py @@ -3,11 +3,11 @@ # found in the LICENSE file. import os -from telemetry.page import page_benchmark +from telemetry.page import page_measurement _JS = 'chrome.gpuBenchmarking.printToSkPicture("{0}");' -class SkPicturePrinter(page_benchmark.PageBenchmark): +class SkPicturePrinter(page_measurement.PageMeasurement): def AddCommandLineOptions(self, parser): parser.add_option('-o', '--outdir', help='Output directory') diff --git a/tools/perf/perf_tools/smoothness_benchmark.py b/tools/perf/perf_tools/smoothness_benchmark.py deleted file mode 100644 index 5ec96b6..0000000 --- a/tools/perf/perf_tools/smoothness_benchmark.py +++ /dev/null @@ -1,182 +0,0 @@ -# Copyright (c) 2012 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. -from perf_tools import smoothness_measurement -from telemetry.core import util -from telemetry.page import page_benchmark - -class DidNotScrollException(page_benchmark.MeasurementFailure): - def __init__(self): - super(DidNotScrollException, self).__init__('Page did not scroll') - -def DivideIfPossibleOrZero(numerator, denominator): - if denominator == 0: - return 0 - return numerator / denominator - -def CalcScrollResults(rendering_stats_deltas, results): - num_frames_sent_to_screen = rendering_stats_deltas['numFramesSentToScreen'] - - mean_frame_time_seconds = ( - rendering_stats_deltas['totalTimeInSeconds'] / - float(num_frames_sent_to_screen)) - - dropped_percent = ( - rendering_stats_deltas['droppedFrameCount'] / - float(num_frames_sent_to_screen)) - - num_impl_thread_scrolls = rendering_stats_deltas.get( - 'numImplThreadScrolls', 0) - num_main_thread_scrolls = rendering_stats_deltas.get( - 'numMainThreadScrolls', 0) - - percent_impl_scrolled = DivideIfPossibleOrZero( - float(num_impl_thread_scrolls), - num_impl_thread_scrolls + num_main_thread_scrolls) - - num_layers = ( - rendering_stats_deltas.get('numLayersDrawn', 0) / - float(num_frames_sent_to_screen)) - - num_missing_tiles = ( - rendering_stats_deltas.get('numMissingTiles', 0) / - float(num_frames_sent_to_screen)) - - results.Add('mean_frame_time', 'ms', round(mean_frame_time_seconds * 1000, 3)) - results.Add('dropped_percent', '%', round(dropped_percent * 100, 1), - data_type='unimportant') - results.Add('percent_impl_scrolled', '%', - round(percent_impl_scrolled * 100, 1), - data_type='unimportant') - results.Add('average_num_layers_drawn', '', round(num_layers, 1), - data_type='unimportant') - results.Add('average_num_missing_tiles', '', round(num_missing_tiles, 1), - data_type='unimportant') - -def CalcTextureUploadResults(rendering_stats_deltas, results): - if (('totalCommitCount' not in rendering_stats_deltas) - or rendering_stats_deltas['totalCommitCount'] == 0) : - averageCommitTimeMs = 0 - else : - averageCommitTimeMs = ( - 1000 * rendering_stats_deltas['totalCommitTimeInSeconds'] / - rendering_stats_deltas['totalCommitCount']) - - results.Add('texture_upload_count', 'count', - rendering_stats_deltas.get('textureUploadCount', 0)) - results.Add('total_texture_upload_time', 'seconds', - rendering_stats_deltas.get('totalTextureUploadTimeInSeconds', 0)) - results.Add('average_commit_time', 'ms', averageCommitTimeMs, - data_type='unimportant') - -def CalcFirstPaintTimeResults(results, tab): - if tab.browser.is_content_shell: - results.Add('first_paint', 'ms', 'unsupported') - return - - tab.ExecuteJavaScript(""" - window.__rafFired = false; - window.webkitRequestAnimationFrame(function() { - window.__rafFired = true; - }); - """) - util.WaitFor(lambda: tab.EvaluateJavaScript('window.__rafFired'), 60) - - first_paint_secs = tab.EvaluateJavaScript( - 'window.chrome.loadTimes().firstPaintTime - ' + - 'window.chrome.loadTimes().startLoadTime') - - results.Add('first_paint', 'ms', round(first_paint_secs * 1000, 1)) - -def CalcImageDecodingResults(rendering_stats_deltas, results): - totalDeferredImageDecodeCount = rendering_stats_deltas.get( - 'totalDeferredImageDecodeCount', 0) - totalDeferredImageCacheHitCount = rendering_stats_deltas.get( - 'totalDeferredImageCacheHitCount', 0) - totalImageGatheringCount = rendering_stats_deltas.get( - 'totalImageGatheringCount', 0) - totalDeferredImageDecodeTimeInSeconds = rendering_stats_deltas.get( - 'totalDeferredImageDecodeTimeInSeconds', 0) - totalImageGatheringTimeInSeconds = rendering_stats_deltas.get( - 'totalImageGatheringTimeInSeconds', 0) - - averageImageGatheringTime = DivideIfPossibleOrZero( - (totalImageGatheringTimeInSeconds * 1000), totalImageGatheringCount) - - results.Add('total_deferred_image_decode_count', 'count', - totalDeferredImageDecodeCount, - data_type='unimportant') - results.Add('total_image_cache_hit_count', 'count', - totalDeferredImageCacheHitCount, - data_type='unimportant') - results.Add('average_image_gathering_time', 'ms', averageImageGatheringTime, - data_type='unimportant') - results.Add('total_deferred_image_decoding_time', 'seconds', - totalDeferredImageDecodeTimeInSeconds, - data_type='unimportant') - -class SmoothnessBenchmark(page_benchmark.PageBenchmark): - def __init__(self): - super(SmoothnessBenchmark, self).__init__('smoothness') - self.force_enable_threaded_compositing = False - self.use_gpu_benchmarking_extension = True - self._measurement = None - - def AddCommandLineOptions(self, parser): - parser.add_option('--report-all-results', dest='report_all_results', - action='store_true', - help='Reports all data collected, not just FPS') - - def CustomizeBrowserOptions(self, options): - if self.use_gpu_benchmarking_extension: - options.extra_browser_args.append('--enable-gpu-benchmarking') - if self.force_enable_threaded_compositing: - options.extra_browser_args.append('--enable-threaded-compositing') - - def CanRunForPage(self, page): - return hasattr(page, 'smoothness') - - def WillRunAction(self, page, tab, action): - if tab.browser.platform.IsRawDisplayFrameRateSupported(): - tab.browser.platform.StartRawDisplayFrameRateMeasurement() - self._measurement = smoothness_measurement.SmoothnessMeasurement(tab) - if action.CanBeBound(): - self._measurement.BindToAction(action) - else: - self._measurement.Start() - - def DidRunAction(self, page, tab, action): - if tab.browser.platform.IsRawDisplayFrameRateSupported(): - tab.browser.platform.StopRawDisplayFrameRateMeasurement() - if not action.CanBeBound(): - self._measurement.Stop() - - def MeasurePage(self, page, tab, results): - rendering_stats_deltas = self._measurement.deltas - - if not (rendering_stats_deltas['numFramesSentToScreen'] > 0): - raise DidNotScrollException() - - load_timings = tab.EvaluateJavaScript("window.performance.timing") - load_time_seconds = ( - float(load_timings['loadEventStart']) - - load_timings['navigationStart']) / 1000 - dom_content_loaded_time_seconds = ( - float(load_timings['domContentLoadedEventStart']) - - load_timings['navigationStart']) / 1000 - results.Add('load_time', 'seconds', load_time_seconds) - results.Add('dom_content_loaded_time', 'seconds', - dom_content_loaded_time_seconds) - - CalcFirstPaintTimeResults(results, tab) - CalcScrollResults(rendering_stats_deltas, results) - CalcTextureUploadResults(rendering_stats_deltas, results) - CalcImageDecodingResults(rendering_stats_deltas, results) - - if self.options.report_all_results: - for k, v in rendering_stats_deltas.iteritems(): - results.Add(k, '', v) - - if tab.browser.platform.IsRawDisplayFrameRateSupported(): - for r in tab.browser.platform.GetRawDisplayFrameRateMeasurements(): - results.Add(r.name, r.unit, r.value) diff --git a/tools/perf/perf_tools/smoothness_measurement.py b/tools/perf/perf_tools/smoothness_measurement.py index bea02d5..08b663b 100644 --- a/tools/perf/perf_tools/smoothness_measurement.py +++ b/tools/perf/perf_tools/smoothness_measurement.py @@ -1,51 +1,182 @@ # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. -import os - -class SmoothnessMeasurement(object): - def __init__(self, tab): - self._tab = tab - # Bring in the smoothness benchmark - with open( - os.path.join(os.path.dirname(__file__), - 'smoothness_measurement.js')) as f: - js = f.read() - tab.ExecuteJavaScript(js) - - def Start(self): - self._tab.ExecuteJavaScript( - 'window.__renderingStats = new __RenderingStats();' - 'window.__renderingStats.start()') - - def SetNeedsDisplayOnAllLayersAndStart(self): - self._tab.ExecuteJavaScript( - 'chrome.gpuBenchmarking.setNeedsDisplayOnAllLayers();' - 'window.__renderingStats = new __RenderingStats();' - 'window.__renderingStats.start()') - - def Stop(self): - self._tab.ExecuteJavaScript('window.__renderingStats.stop()') - - def BindToAction(self, action): - # Make the scroll test start and stop measurement automatically. - self._tab.ExecuteJavaScript( - 'window.__renderingStats = new __RenderingStats();') - action.BindMeasurementJavaScript(self._tab, - 'window.__renderingStats.start();', - 'window.__renderingStats.stop();') - - @property - def start_values(self): - return self._tab.EvaluateJavaScript( - 'window.__renderingStats.getStartValues()') - - @property - def end_values(self): - return self._tab.EvaluateJavaScript( - 'window.__renderingStats.getEndValues()') - - @property - def deltas(self): - return self._tab.EvaluateJavaScript( - 'window.__renderingStats.getDeltas()') +from perf_tools import smoothness_metrics +from telemetry.core import util +from telemetry.page import page_measurement + +class DidNotScrollException(page_measurement.MeasurementFailure): + def __init__(self): + super(DidNotScrollException, self).__init__('Page did not scroll') + +def DivideIfPossibleOrZero(numerator, denominator): + if denominator == 0: + return 0 + return numerator / denominator + +def CalcScrollResults(rendering_stats_deltas, results): + num_frames_sent_to_screen = rendering_stats_deltas['numFramesSentToScreen'] + + mean_frame_time_seconds = ( + rendering_stats_deltas['totalTimeInSeconds'] / + float(num_frames_sent_to_screen)) + + dropped_percent = ( + rendering_stats_deltas['droppedFrameCount'] / + float(num_frames_sent_to_screen)) + + num_impl_thread_scrolls = rendering_stats_deltas.get( + 'numImplThreadScrolls', 0) + num_main_thread_scrolls = rendering_stats_deltas.get( + 'numMainThreadScrolls', 0) + + percent_impl_scrolled = DivideIfPossibleOrZero( + float(num_impl_thread_scrolls), + num_impl_thread_scrolls + num_main_thread_scrolls) + + num_layers = ( + rendering_stats_deltas.get('numLayersDrawn', 0) / + float(num_frames_sent_to_screen)) + + num_missing_tiles = ( + rendering_stats_deltas.get('numMissingTiles', 0) / + float(num_frames_sent_to_screen)) + + results.Add('mean_frame_time', 'ms', round(mean_frame_time_seconds * 1000, 3)) + results.Add('dropped_percent', '%', round(dropped_percent * 100, 1), + data_type='unimportant') + results.Add('percent_impl_scrolled', '%', + round(percent_impl_scrolled * 100, 1), + data_type='unimportant') + results.Add('average_num_layers_drawn', '', round(num_layers, 1), + data_type='unimportant') + results.Add('average_num_missing_tiles', '', round(num_missing_tiles, 1), + data_type='unimportant') + +def CalcTextureUploadResults(rendering_stats_deltas, results): + if (('totalCommitCount' not in rendering_stats_deltas) + or rendering_stats_deltas['totalCommitCount'] == 0) : + averageCommitTimeMs = 0 + else : + averageCommitTimeMs = ( + 1000 * rendering_stats_deltas['totalCommitTimeInSeconds'] / + rendering_stats_deltas['totalCommitCount']) + + results.Add('texture_upload_count', 'count', + rendering_stats_deltas.get('textureUploadCount', 0)) + results.Add('total_texture_upload_time', 'seconds', + rendering_stats_deltas.get('totalTextureUploadTimeInSeconds', 0)) + results.Add('average_commit_time', 'ms', averageCommitTimeMs, + data_type='unimportant') + +def CalcFirstPaintTimeResults(results, tab): + if tab.browser.is_content_shell: + results.Add('first_paint', 'ms', 'unsupported') + return + + tab.ExecuteJavaScript(""" + window.__rafFired = false; + window.webkitRequestAnimationFrame(function() { + window.__rafFired = true; + }); + """) + util.WaitFor(lambda: tab.EvaluateJavaScript('window.__rafFired'), 60) + + first_paint_secs = tab.EvaluateJavaScript( + 'window.chrome.loadTimes().firstPaintTime - ' + + 'window.chrome.loadTimes().startLoadTime') + + results.Add('first_paint', 'ms', round(first_paint_secs * 1000, 1)) + +def CalcImageDecodingResults(rendering_stats_deltas, results): + totalDeferredImageDecodeCount = rendering_stats_deltas.get( + 'totalDeferredImageDecodeCount', 0) + totalDeferredImageCacheHitCount = rendering_stats_deltas.get( + 'totalDeferredImageCacheHitCount', 0) + totalImageGatheringCount = rendering_stats_deltas.get( + 'totalImageGatheringCount', 0) + totalDeferredImageDecodeTimeInSeconds = rendering_stats_deltas.get( + 'totalDeferredImageDecodeTimeInSeconds', 0) + totalImageGatheringTimeInSeconds = rendering_stats_deltas.get( + 'totalImageGatheringTimeInSeconds', 0) + + averageImageGatheringTime = DivideIfPossibleOrZero( + (totalImageGatheringTimeInSeconds * 1000), totalImageGatheringCount) + + results.Add('total_deferred_image_decode_count', 'count', + totalDeferredImageDecodeCount, + data_type='unimportant') + results.Add('total_image_cache_hit_count', 'count', + totalDeferredImageCacheHitCount, + data_type='unimportant') + results.Add('average_image_gathering_time', 'ms', averageImageGatheringTime, + data_type='unimportant') + results.Add('total_deferred_image_decoding_time', 'seconds', + totalDeferredImageDecodeTimeInSeconds, + data_type='unimportant') + +class SmoothnessMeasurement(page_measurement.PageMeasurement): + def __init__(self): + super(SmoothnessMeasurement, self).__init__('smoothness') + self.force_enable_threaded_compositing = False + self.use_gpu_benchmarking_extension = True + self._metrics = None + + def AddCommandLineOptions(self, parser): + parser.add_option('--report-all-results', dest='report_all_results', + action='store_true', + help='Reports all data collected, not just FPS') + + def CustomizeBrowserOptions(self, options): + if self.use_gpu_benchmarking_extension: + options.extra_browser_args.append('--enable-gpu-benchmarking') + if self.force_enable_threaded_compositing: + options.extra_browser_args.append('--enable-threaded-compositing') + + def CanRunForPage(self, page): + return hasattr(page, 'smoothness') + + def WillRunAction(self, page, tab, action): + if tab.browser.platform.IsRawDisplayFrameRateSupported(): + tab.browser.platform.StartRawDisplayFrameRateMeasurement() + self._metrics = smoothness_metrics.SmoothnessMetrics(tab) + if action.CanBeBound(): + self._metrics.BindToAction(action) + else: + self._metrics.Start() + + def DidRunAction(self, page, tab, action): + if tab.browser.platform.IsRawDisplayFrameRateSupported(): + tab.browser.platform.StopRawDisplayFrameRateMeasurement() + if not action.CanBeBound(): + self._metrics.Stop() + + def MeasurePage(self, page, tab, results): + rendering_stats_deltas = self._metrics.deltas + + if not (rendering_stats_deltas['numFramesSentToScreen'] > 0): + raise DidNotScrollException() + + load_timings = tab.EvaluateJavaScript("window.performance.timing") + load_time_seconds = ( + float(load_timings['loadEventStart']) - + load_timings['navigationStart']) / 1000 + dom_content_loaded_time_seconds = ( + float(load_timings['domContentLoadedEventStart']) - + load_timings['navigationStart']) / 1000 + results.Add('load_time', 'seconds', load_time_seconds) + results.Add('dom_content_loaded_time', 'seconds', + dom_content_loaded_time_seconds) + + CalcFirstPaintTimeResults(results, tab) + CalcScrollResults(rendering_stats_deltas, results) + CalcTextureUploadResults(rendering_stats_deltas, results) + CalcImageDecodingResults(rendering_stats_deltas, results) + + if self.options.report_all_results: + for k, v in rendering_stats_deltas.iteritems(): + results.Add(k, '', v) + + if tab.browser.platform.IsRawDisplayFrameRateSupported(): + for r in tab.browser.platform.GetRawDisplayFrameRateMeasurements(): + results.Add(r.name, r.unit, r.value) diff --git a/tools/perf/perf_tools/smoothness_measurement.js b/tools/perf/perf_tools/smoothness_metrics.js index 22336fe..22336fe 100644 --- a/tools/perf/perf_tools/smoothness_measurement.js +++ b/tools/perf/perf_tools/smoothness_metrics.js diff --git a/tools/perf/perf_tools/smoothness_metrics.py b/tools/perf/perf_tools/smoothness_metrics.py new file mode 100644 index 0000000..5148084 --- /dev/null +++ b/tools/perf/perf_tools/smoothness_metrics.py @@ -0,0 +1,50 @@ +# Copyright (c) 2012 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. +import os + +class SmoothnessMetrics(object): + def __init__(self, tab): + self._tab = tab + with open( + os.path.join(os.path.dirname(__file__), + 'smoothness_metrics.js')) as f: + js = f.read() + tab.ExecuteJavaScript(js) + + def Start(self): + self._tab.ExecuteJavaScript( + 'window.__renderingStats = new __RenderingStats();' + 'window.__renderingStats.start()') + + def SetNeedsDisplayOnAllLayersAndStart(self): + self._tab.ExecuteJavaScript( + 'chrome.gpuBenchmarking.setNeedsDisplayOnAllLayers();' + 'window.__renderingStats = new __RenderingStats();' + 'window.__renderingStats.start()') + + def Stop(self): + self._tab.ExecuteJavaScript('window.__renderingStats.stop()') + + def BindToAction(self, action): + # Make the scroll test start and stop measurement automatically. + self._tab.ExecuteJavaScript( + 'window.__renderingStats = new __RenderingStats();') + action.BindMeasurementJavaScript(self._tab, + 'window.__renderingStats.start();', + 'window.__renderingStats.stop();') + + @property + def start_values(self): + return self._tab.EvaluateJavaScript( + 'window.__renderingStats.getStartValues()') + + @property + def end_values(self): + return self._tab.EvaluateJavaScript( + 'window.__renderingStats.getEndValues()') + + @property + def deltas(self): + return self._tab.EvaluateJavaScript( + 'window.__renderingStats.getDeltas()') diff --git a/tools/perf/perf_tools/spaceport.py b/tools/perf/perf_tools/spaceport.py index 2a4d623..8dec484 100644 --- a/tools/perf/perf_tools/spaceport.py +++ b/tools/perf/perf_tools/spaceport.py @@ -5,9 +5,9 @@ import logging from telemetry.core import util -from telemetry.page import page_benchmark +from telemetry.page import page_measurement -class SpaceportBenchmark(page_benchmark.PageBenchmark): +class SpaceportMeasurement(page_measurement.PageMeasurement): def CustomizeBrowserOptions(self, options): options.extra_browser_args.extend(['--disable-gpu-vsync']) @@ -29,13 +29,14 @@ class SpaceportBenchmark(page_benchmark.PageBenchmark): js_get_results = 'JSON.stringify(window.__results)' num_tests_complete = [0] # A list to work around closure issue. def _IsDone(): - num_tests_in_benchmark = 24 + num_tests_in_measurement = 24 num_results = len(eval(tab.EvaluateJavaScript(js_get_results))) if num_results > num_tests_complete[0]: num_tests_complete[0] = num_results - logging.info('Completed benchmark %d of %d' % (num_tests_complete[0], - num_tests_in_benchmark)) - return num_tests_complete[0] >= num_tests_in_benchmark + logging.info('Completed measurement %d of %d' + % (num_tests_complete[0], + num_tests_in_measurement)) + return num_tests_complete[0] >= num_tests_in_measurement util.WaitFor(_IsDone, 1200, poll_interval=5) result_dict = eval(tab.EvaluateJavaScript(js_get_results)) diff --git a/tools/perf/perf_tools/startup_benchmark.py b/tools/perf/perf_tools/startup_measurement.py index 2f81113..f328a37 100644 --- a/tools/perf/perf_tools/startup_benchmark.py +++ b/tools/perf/perf_tools/startup_measurement.py @@ -4,10 +4,10 @@ import json -from telemetry.page import page_benchmark +from telemetry.page import page_measurement # Test how long Chrome takes to load when warm. -class PerfWarm(page_benchmark.PageBenchmark): +class PerfWarm(page_measurement.PageMeasurement): def __init__(self): super(PerfWarm, self).__init__(needs_browser_restart_after_each_run=True, discard_first_result=True) diff --git a/tools/perf/perf_tools/sunspider.py b/tools/perf/perf_tools/sunspider.py index d65bfd8..9bbb40d 100644 --- a/tools/perf/perf_tools/sunspider.py +++ b/tools/perf/perf_tools/sunspider.py @@ -6,10 +6,10 @@ import collections import json from telemetry.core import util -from telemetry.page import page_benchmark +from telemetry.page import page_measurement -class SunSpiderBenchark(page_benchmark.PageBenchmark): +class SunSpiderMeasurement(page_measurement.PageMeasurement): def MeasurePage(self, _, tab, results): js_is_done = """ window.location.pathname.indexOf('sunspider-results') >= 0""" diff --git a/tools/perf/run_multipage_benchmarks b/tools/perf/run_multipage_benchmarks index 052d2fa..f2370d7 100755 --- a/tools/perf/run_multipage_benchmarks +++ b/tools/perf/run_multipage_benchmarks @@ -54,8 +54,33 @@ if __name__ == '__main__': print ListBootstrapDeps() sys.exit(0) - from telemetry.page import page_benchmark_runner + from telemetry.page import page_measurement_runner import page_sets - benchmark_dir = os.path.dirname(perf_tools.__file__) + measurement_dir = os.path.dirname(perf_tools.__file__) page_set_filenames = page_sets.GetAllPageSetFilenames() - sys.exit(page_benchmark_runner.Main(benchmark_dir, page_set_filenames)) + + old_benchmark_names = { + "cheapness_predictor_benchmark": "cheapness_predictor_measurement", + "image_decoding_benchmark": "image_decoding_measurement", + "loading_benchmark": "loading_measurement", + "memory_benchmark": "memory_measurement", + "rasterize_and_record_benchmark": "rasterize_and_record_benchmark", + "scrolling_benchmark": "smoothness_measurement", + "smoothness_benchmark": "smoothness_measurement", + "startup_benchmark": "startup_measurement" + } + + # There are bots that are hard-coded to run some specific named tests. + # Convert these to the current naming conventions by overriding them in the runner. + class MeasurementRunner(page_measurement_runner.PageMeasurementRunner): + def GetModernizedTestName(self, arg): + if arg not in old_benchmark_names: + return arg + sys.stderr.write( + 'An old name %s was given. Please use %s in the future.\n' % ( + arg, + old_benchmark_names.get(arg))) + return old_benchmark_names[arg] + + runner = MeasurementRunner() + sys.exit(runner.Run(measurement_dir, page_set_filenames)) diff --git a/tools/telemetry/telemetry/core/browser_credentials.py b/tools/telemetry/telemetry/core/browser_credentials.py index 707ca26..23e3f05 100644 --- a/tools/telemetry/telemetry/core/browser_credentials.py +++ b/tools/telemetry/telemetry/core/browser_credentials.py @@ -133,7 +133,7 @@ class BrowserCredentials(object): '..', 'examples', 'credentials_example.json'))) logging.warning(""" - Credentials for %s were not found. %i pages will not be benchmarked. + Credentials for %s were not found. %i pages will not be tested. To fix this, either add svn-internal to your .gclient using http://goto/read-src-internal, or add your own credentials to: diff --git a/tools/telemetry/telemetry/core/browser_options.py b/tools/telemetry/telemetry/core/browser_options.py index 64e23b9..f1ef7fc 100644 --- a/tools/telemetry/telemetry/core/browser_options.py +++ b/tools/telemetry/telemetry/core/browser_options.py @@ -126,7 +126,7 @@ class BrowserOptions(optparse.Values): group.add_option('--allow-live-sites', dest='allow_live_sites', action='store_true', help='Run against live sites if the Web Page Replay archives don\'t ' - 'exist. Without this flag, the benchmark will just fail instead ' + 'exist. Without this flag, the test will just fail instead ' 'of running against live sites.') parser.add_option_group(group) @@ -147,7 +147,7 @@ class BrowserOptions(optparse.Values): group = optparse.OptionGroup(parser, 'Platform options') group.add_option('--no-performance-mode', action='store_true', help='Some platforms run on "full performance mode" where the ' - 'benchmark is executed at maximum CPU speed in order to minimize noise ' + 'test is executed at maximum CPU speed in order to minimize noise ' '(specially important for dashboards / continuous builds). ' 'This option prevents Telemetry from tweaking such platform settings.') parser.add_option_group(group) diff --git a/tools/telemetry/telemetry/core/browser_unittest.py b/tools/telemetry/telemetry/core/browser_unittest.py index d6e87d8..6d4dd05 100644 --- a/tools/telemetry/telemetry/core/browser_unittest.py +++ b/tools/telemetry/telemetry/core/browser_unittest.py @@ -19,9 +19,8 @@ class BrowserTest(unittest.TestCase): assert b.tabs[0].url def testCommandLineOverriding(self): - # This test starts the browser with --enable-benchmarking, which should - # create a chrome.Interval namespace. This tests whether the command line is - # being set. + # This test starts the browser with --user-agent=telemetry. This tests + # whether the user agent is then set. options = options_for_unittests.GetCopy() flag1 = '--user-agent=telemetry' diff --git a/tools/telemetry/telemetry/core/chrome/desktop_browser_backend.py b/tools/telemetry/telemetry/core/chrome/desktop_browser_backend.py index 58a6307..5487038 100644 --- a/tools/telemetry/telemetry/core/chrome/desktop_browser_backend.py +++ b/tools/telemetry/telemetry/core/chrome/desktop_browser_backend.py @@ -41,7 +41,7 @@ class DesktopBrowserBackend(browser_backend.BrowserBackend): self._LaunchBrowser(options) # For old chrome versions, might have to relaunch to have the - # correct benchmarking switch. + # correct net_benchmarking switch. if self._chrome_branch_number < 1418: self.Close() self._supports_net_benchmarking = False diff --git a/tools/telemetry/telemetry/page/actions/page_action.py b/tools/telemetry/telemetry/page/actions/page_action.py index 8ac0f65..c6da080 100644 --- a/tools/telemetry/telemetry/page/actions/page_action.py +++ b/tools/telemetry/telemetry/page/actions/page_action.py @@ -41,14 +41,14 @@ class PageAction(object): def CanBeBound(self): """If this class implements BindMeasurementJavaScript, override CanBeBound - to return True so that a benchmark knows it can bind measurements.""" + to return True so that a test knows it can bind measurements.""" return False def BindMeasurementJavaScript( self, tab, start_js, stop_js): # pylint: disable=W0613 """Let this action determine when measurements should start and stop. - A benchmark or measurement can call this method to provide the action + A measurement can call this method to provide the action with JavaScript code that starts and stops measurements. The action determines when to execute the provided JavaScript code, for more accurate timings. diff --git a/tools/telemetry/telemetry/page/actions/scroll.py b/tools/telemetry/telemetry/page/actions/scroll.py index 6d12feb2..33b9091 100644 --- a/tools/telemetry/telemetry/page/actions/scroll.py +++ b/tools/telemetry/telemetry/page/actions/scroll.py @@ -39,7 +39,7 @@ class ScrollAction(page_action.PageAction): tab.ExecuteJavaScript( 'window.__scrollAction.start(document.body);') - # Poll for scroll benchmark completion. + # Poll for scroll action completion. util.WaitFor(lambda: tab.EvaluateJavaScript( 'window.__scrollActionDone'), 60) diff --git a/tools/telemetry/telemetry/page/actions/scroll_unittest.py b/tools/telemetry/telemetry/page/actions/scroll_unittest.py index 578ecd8..cc840cc 100644 --- a/tools/telemetry/telemetry/page/actions/scroll_unittest.py +++ b/tools/telemetry/telemetry/page/actions/scroll_unittest.py @@ -66,14 +66,13 @@ class ScrollActionTest(tab_test_case.TabTestCase): js = f.read() self._tab.ExecuteJavaScript(js) - # Verify that the rect returned by getBoundingVisibleRect() in - # scroll.js is completely contained within the viewport. Scroll - # events dispatched by the benchmarks use the center of this rect - # as their location, and this location needs to be within the - # viewport bounds to correctly decide between main-thread and - # impl-thread scroll. If the scrollable area were not clipped - # to the viewport bounds, then the instance used here (the scrollable - # area being more than twice as tall as the viewport) would + # Verify that the rect returned by getBoundingVisibleRect() in scroll.js is + # completely contained within the viewport. Scroll events dispatched by the + # scrolling API use the center of this rect as their location, and this + # location needs to be within the viewport bounds to correctly decide + # between main-thread and impl-thread scroll. If the scrollable area were + # not clipped to the viewport bounds, then the instance used here (the + # scrollable area being more than twice as tall as the viewport) would # result in a scroll location outside of the viewport bounds. self._tab.ExecuteJavaScript("""document.body.style.height = (2 * window.innerHeight + 1) + 'px';""") diff --git a/tools/telemetry/telemetry/page/block_page_benchmark_results.py b/tools/telemetry/telemetry/page/block_page_measurement_results.py index 52b5640..625c5c8 100644 --- a/tools/telemetry/telemetry/page/block_page_benchmark_results.py +++ b/tools/telemetry/telemetry/page/block_page_measurement_results.py @@ -3,11 +3,12 @@ # found in the LICENSE file. import os -from telemetry.page import page_benchmark_results +from telemetry.page import page_measurement_results -class BlockPageBenchmarkResults(page_benchmark_results.PageBenchmarkResults): +class BlockPageMeasurementResults( + page_measurement_results.PageMeasurementResults): def __init__(self, output_file): - super(BlockPageBenchmarkResults, self).__init__() + super(BlockPageMeasurementResults, self).__init__() self._output_file = output_file def DidMeasurePage(self): @@ -29,4 +30,4 @@ class BlockPageBenchmarkResults(page_benchmark_results.PageBenchmarkResults): self._output_file.write(os.linesep) self._output_file.write(os.linesep) - super(BlockPageBenchmarkResults, self).DidMeasurePage() + super(BlockPageMeasurementResults, self).DidMeasurePage() diff --git a/tools/telemetry/telemetry/page/block_page_benchmark_results_unittest.py b/tools/telemetry/telemetry/page/block_page_measurement_results_unittest.py index fd38071..21af60e 100644 --- a/tools/telemetry/telemetry/page/block_page_benchmark_results_unittest.py +++ b/tools/telemetry/telemetry/page/block_page_measurement_results_unittest.py @@ -5,11 +5,11 @@ import StringIO import os import unittest -from telemetry.page import block_page_benchmark_results +from telemetry.page import block_page_measurement_results from telemetry.page import page_set -BlockPageBenchmarkResults = \ - block_page_benchmark_results.BlockPageBenchmarkResults +BlockPageMeasurementResults = \ + block_page_measurement_results.BlockPageMeasurementResults def _MakePageSet(): return page_set.PageSet.FromDict({ @@ -21,14 +21,14 @@ def _MakePageSet(): ] }, os.path.dirname(__file__)) -class NonPrintingBlockPageBenchmarkResults(BlockPageBenchmarkResults): +class NonPrintingBlockPageMeasurementResults(BlockPageMeasurementResults): def __init__(self, *args): - super(NonPrintingBlockPageBenchmarkResults, self).__init__(*args) + super(NonPrintingBlockPageMeasurementResults, self).__init__(*args) def _PrintPerfResult(self, *args): pass -class BlockPageBenchmarkResultsTest(unittest.TestCase): +class BlockPageMeasurementResultsTest(unittest.TestCase): def setUp(self): self._output = StringIO.StringIO() self._page_set = _MakePageSet() @@ -43,7 +43,7 @@ class BlockPageBenchmarkResultsTest(unittest.TestCase): return [line.split(': ', 1) for line in self.lines] def test_with_output_after_every_page(self): - results = NonPrintingBlockPageBenchmarkResults(self._output) + results = NonPrintingBlockPageMeasurementResults(self._output) results.WillMeasurePage(self._page_set[0]) results.Add('foo', 'seconds', 3) results.DidMeasurePage() diff --git a/tools/telemetry/telemetry/page/csv_page_benchmark_results.py b/tools/telemetry/telemetry/page/csv_page_measurement_results.py index 16df18e..8afd29e 100644 --- a/tools/telemetry/telemetry/page/csv_page_benchmark_results.py +++ b/tools/telemetry/telemetry/page/csv_page_measurement_results.py @@ -1,11 +1,12 @@ # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. -from telemetry.page import page_benchmark_results +from telemetry.page import page_measurement_results -class CsvPageBenchmarkResults(page_benchmark_results.PageBenchmarkResults): +class CsvPageMeasurementResults( + page_measurement_results.PageMeasurementResults): def __init__(self, results_writer, output_after_every_page): - super(CsvPageBenchmarkResults, self).__init__() + super(CsvPageMeasurementResults, self).__init__() self._results_writer = results_writer self._did_output_header = False self._header_names_written_to_writer = None @@ -14,7 +15,7 @@ class CsvPageBenchmarkResults(page_benchmark_results.PageBenchmarkResults): def DidMeasurePage(self): assert self.values_for_current_page, 'Failed to call WillMeasurePage' if not self._output_after_every_page: - super(CsvPageBenchmarkResults, self).DidMeasurePage() + super(CsvPageMeasurementResults, self).DidMeasurePage() return if not self._did_output_header: @@ -24,7 +25,7 @@ class CsvPageBenchmarkResults(page_benchmark_results.PageBenchmarkResults): self._OutputValuesForPage(self.values_for_current_page) - super(CsvPageBenchmarkResults, self).DidMeasurePage() + super(CsvPageMeasurementResults, self).DidMeasurePage() def PrintSummary(self, trace_tag): if not self._output_after_every_page: @@ -32,7 +33,7 @@ class CsvPageBenchmarkResults(page_benchmark_results.PageBenchmarkResults): for page_values in self.page_results: self._OutputValuesForPage(page_values) - super(CsvPageBenchmarkResults, self).PrintSummary(trace_tag) + super(CsvPageMeasurementResults, self).PrintSummary(trace_tag) def _ValidateOutputNamesForCurrentPage(self): assert self._did_output_header @@ -42,7 +43,7 @@ class CsvPageBenchmarkResults(page_benchmark_results.PageBenchmarkResults): set(self._header_names_written_to_writer) if header_names_written_to_writer == current_page_measurement_names: return - assert False, """To use CsvPageBenchmarkResults, you must add the same + assert False, """To use CsvPageMeasurementResults, you must add the same result names for every page. In this case, first page output: %s @@ -51,7 +52,7 @@ output: %s Change your test to produce the same thing each time, or modify -PageBenchmark.results_are_the_same_on_every_page to return False. +PageMeasurement.results_are_the_same_on_every_page to return False. """ % (repr(header_names_written_to_writer), repr(current_page_measurement_names)) diff --git a/tools/telemetry/telemetry/page/csv_page_benchmark_results_unittest.py b/tools/telemetry/telemetry/page/csv_page_measurement_results_unittest.py index e1d7b44c..12f8821 100644 --- a/tools/telemetry/telemetry/page/csv_page_benchmark_results_unittest.py +++ b/tools/telemetry/telemetry/page/csv_page_measurement_results_unittest.py @@ -6,7 +6,7 @@ import csv import os import unittest -from telemetry.page import csv_page_benchmark_results +from telemetry.page import csv_page_measurement_results from telemetry.page import page_set def _MakePageSet(): @@ -18,15 +18,15 @@ def _MakePageSet(): ] }, os.path.dirname(__file__)) -class NonPrintingCsvPageBenchmarkResults( - csv_page_benchmark_results.CsvPageBenchmarkResults): +class NonPrintingCsvPageMeasurementResults( + csv_page_measurement_results.CsvPageMeasurementResults): def __init__(self, *args): - super(NonPrintingCsvPageBenchmarkResults, self).__init__(*args) + super(NonPrintingCsvPageMeasurementResults, self).__init__(*args) def _PrintPerfResult(self, *args): pass -class CsvPageBenchmarkResultsTest(unittest.TestCase): +class CsvPageMeasurementResultsTest(unittest.TestCase): def setUp(self): self._output = StringIO.StringIO() self._page_set = _MakePageSet() @@ -47,7 +47,8 @@ class CsvPageBenchmarkResultsTest(unittest.TestCase): return rows[1:] def test_with_output_after_every_page(self): - results = NonPrintingCsvPageBenchmarkResults(csv.writer(self._output), True) + results = NonPrintingCsvPageMeasurementResults( + csv.writer(self._output), True) results.WillMeasurePage(self._page_set[0]) results.Add('foo', 'seconds', 3) results.DidMeasurePage() @@ -69,7 +70,8 @@ class CsvPageBenchmarkResultsTest(unittest.TestCase): [self._page_set[1].url, '4']) def test_with_output_after_every_page_and_inconsistency(self): - results = NonPrintingCsvPageBenchmarkResults(csv.writer(self._output), True) + results = NonPrintingCsvPageMeasurementResults( + csv.writer(self._output), True) results.WillMeasurePage(self._page_set[0]) results.Add('foo', 'seconds', 3) results.DidMeasurePage() @@ -83,7 +85,7 @@ class CsvPageBenchmarkResultsTest(unittest.TestCase): lambda: results.DidMeasurePage()) # pylint: disable=W0108 def test_with_output_at_print_summary_time(self): - results = NonPrintingCsvPageBenchmarkResults(csv.writer(self._output), + results = NonPrintingCsvPageMeasurementResults(csv.writer(self._output), False) results.WillMeasurePage(self._page_set[0]) results.Add('foo', 'seconds', 3) @@ -104,7 +106,7 @@ class CsvPageBenchmarkResultsTest(unittest.TestCase): [self._page_set[1].url, '4', '-']]) def test_histogram(self): - results = NonPrintingCsvPageBenchmarkResults(csv.writer(self._output), + results = NonPrintingCsvPageMeasurementResults(csv.writer(self._output), False) results.WillMeasurePage(self._page_set[0]) results.Add('a', '', diff --git a/tools/telemetry/telemetry/page/page_benchmark_results_unittest.py b/tools/telemetry/telemetry/page/page_benchmark_results_unittest.py deleted file mode 100644 index 63d23875..0000000 --- a/tools/telemetry/telemetry/page/page_benchmark_results_unittest.py +++ /dev/null @@ -1,267 +0,0 @@ -# Copyright (c) 2012 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. -import os -import unittest - -from telemetry.page import page_benchmark_results -from telemetry.page import page_set -from telemetry.page import perf_tests_helper - -def _MakePageSet(): - return page_set.PageSet.FromDict({ - "description": "hello", - "archive_path": "foo.wpr", - "pages": [ - {"url": "http://www.foo.com/"}, - {"url": "http://www.bar.com/"} - ] - }, os.path.dirname(__file__)) - -class NonPrintingPageBenchmarkResults( - page_benchmark_results.PageBenchmarkResults): - def __init__(self): - super(NonPrintingPageBenchmarkResults, self).__init__() - - def _PrintPerfResult(self, *args): - pass - -class SummarySavingPageBenchmarkResults( - page_benchmark_results.PageBenchmarkResults): - def __init__(self): - super(SummarySavingPageBenchmarkResults, self).__init__() - self.results = [] - - def _PrintPerfResult(self, *args): - res = perf_tests_helper.PrintPerfResult(*args, print_to_stdout=False) - self.results.append(res) - -class PageBenchmarkResultsTest(unittest.TestCase): - def test_basic(self): - test_page_set = _MakePageSet() - - benchmark_results = NonPrintingPageBenchmarkResults() - benchmark_results.WillMeasurePage(test_page_set.pages[0]) - benchmark_results.Add('a', 'seconds', 3) - benchmark_results.DidMeasurePage() - - benchmark_results.WillMeasurePage(test_page_set.pages[1]) - benchmark_results.Add('a', 'seconds', 3) - benchmark_results.DidMeasurePage() - - benchmark_results.PrintSummary('trace_tag') - - def test_url_is_invalid_value(self): - test_page_set = _MakePageSet() - - benchmark_results = NonPrintingPageBenchmarkResults() - benchmark_results.WillMeasurePage(test_page_set.pages[0]) - self.assertRaises( - AssertionError, - lambda: benchmark_results.Add('url', 'string', 'foo')) - - def test_unit_change(self): - test_page_set = _MakePageSet() - - benchmark_results = NonPrintingPageBenchmarkResults() - benchmark_results.WillMeasurePage(test_page_set.pages[0]) - benchmark_results.Add('a', 'seconds', 3) - benchmark_results.DidMeasurePage() - - benchmark_results.WillMeasurePage(test_page_set.pages[1]) - self.assertRaises( - AssertionError, - lambda: benchmark_results.Add('a', 'foobgrobbers', 3)) - - def test_type_change(self): - test_page_set = _MakePageSet() - - benchmark_results = NonPrintingPageBenchmarkResults() - benchmark_results.WillMeasurePage(test_page_set.pages[0]) - benchmark_results.Add('a', 'seconds', 3) - benchmark_results.DidMeasurePage() - - benchmark_results.WillMeasurePage(test_page_set.pages[1]) - self.assertRaises( - AssertionError, - lambda: benchmark_results.Add('a', 'seconds', 3, data_type='histogram')) - - def test_basic_summary(self): - test_page_set = _MakePageSet() - - benchmark_results = SummarySavingPageBenchmarkResults() - benchmark_results.WillMeasurePage(test_page_set.pages[0]) - benchmark_results.Add('a', 'seconds', 3) - benchmark_results.DidMeasurePage() - - benchmark_results.WillMeasurePage(test_page_set.pages[1]) - benchmark_results.Add('a', 'seconds', 7) - benchmark_results.DidMeasurePage() - - benchmark_results.PrintSummary(None) - expected = ['RESULT a_by_url: http___www.foo.com_= 3 seconds', - 'RESULT a_by_url: http___www.bar.com_= 7 seconds', - '*RESULT a: a= [3,7] seconds\nAvg a: 5.000000seconds\n' + - 'Sd a: 2.828427seconds'] - self.assertEquals( - benchmark_results.results, - expected) - - def test_basic_summary_pass_and_fail_page(self): - """If a page failed, only print summary for individual passing pages.""" - test_page_set = _MakePageSet() - - benchmark_results = SummarySavingPageBenchmarkResults() - benchmark_results.WillMeasurePage(test_page_set.pages[0]) - benchmark_results.Add('a', 'seconds', 3) - benchmark_results.DidMeasurePage() - benchmark_results.AddFailure(test_page_set.pages[0], 'message', 'details') - - benchmark_results.WillMeasurePage(test_page_set.pages[1]) - benchmark_results.Add('a', 'seconds', 7) - benchmark_results.DidMeasurePage() - - benchmark_results.PrintSummary(None) - expected = ['RESULT a_by_url: http___www.bar.com_= 7 seconds'] - self.assertEquals(benchmark_results.results, expected) - - def test_basic_summary_all_pages_fail(self): - """If all pages fail, no summary is printed.""" - test_page_set = _MakePageSet() - - benchmark_results = SummarySavingPageBenchmarkResults() - benchmark_results.WillMeasurePage(test_page_set.pages[0]) - benchmark_results.Add('a', 'seconds', 3) - benchmark_results.DidMeasurePage() - benchmark_results.AddFailure(test_page_set.pages[0], 'message', 'details') - - benchmark_results.WillMeasurePage(test_page_set.pages[1]) - benchmark_results.Add('a', 'seconds', 7) - benchmark_results.DidMeasurePage() - benchmark_results.AddFailure(test_page_set.pages[1], 'message', 'details') - - benchmark_results.PrintSummary(None) - self.assertEquals(benchmark_results.results, []) - - def test_repeated_pageset_one_iteration_one_page_fails(self): - """Page fails on one iteration, no results for that page should print.""" - test_page_set = _MakePageSet() - - benchmark_results = SummarySavingPageBenchmarkResults() - benchmark_results.WillMeasurePage(test_page_set.pages[0]) - benchmark_results.Add('a', 'seconds', 3) - benchmark_results.DidMeasurePage() - - benchmark_results.WillMeasurePage(test_page_set.pages[1]) - benchmark_results.Add('a', 'seconds', 7) - benchmark_results.DidMeasurePage() - benchmark_results.AddFailure(test_page_set.pages[1], 'message', 'details') - - benchmark_results.WillMeasurePage(test_page_set.pages[0]) - benchmark_results.Add('a', 'seconds', 4) - benchmark_results.DidMeasurePage() - - benchmark_results.WillMeasurePage(test_page_set.pages[1]) - benchmark_results.Add('a', 'seconds', 8) - benchmark_results.DidMeasurePage() - - benchmark_results.PrintSummary(None) - expected = ['RESULT a_by_url: http___www.foo.com_= [3,4] seconds\n' + - 'Avg a_by_url: 3.500000seconds\nSd a_by_url: 0.707107seconds'] - self.assertEquals(benchmark_results.results, expected) - - def test_repeated_pageset(self): - test_page_set = _MakePageSet() - - benchmark_results = SummarySavingPageBenchmarkResults() - benchmark_results.WillMeasurePage(test_page_set.pages[0]) - benchmark_results.Add('a', 'seconds', 3) - benchmark_results.DidMeasurePage() - - benchmark_results.WillMeasurePage(test_page_set.pages[1]) - benchmark_results.Add('a', 'seconds', 7) - benchmark_results.DidMeasurePage() - - benchmark_results.WillMeasurePage(test_page_set.pages[0]) - benchmark_results.Add('a', 'seconds', 4) - benchmark_results.DidMeasurePage() - - benchmark_results.WillMeasurePage(test_page_set.pages[1]) - benchmark_results.Add('a', 'seconds', 8) - benchmark_results.DidMeasurePage() - - benchmark_results.PrintSummary(None) - expected = ['RESULT a_by_url: http___www.foo.com_= [3,4] seconds\n' + - 'Avg a_by_url: 3.500000seconds\nSd a_by_url: 0.707107seconds', - 'RESULT a_by_url: http___www.bar.com_= [7,8] seconds\n' + - 'Avg a_by_url: 7.500000seconds\nSd a_by_url: 0.707107seconds', - '*RESULT a: a= [3,7,4,8] seconds\n' + - 'Avg a: 5.500000seconds\nSd a: 2.380476seconds' - ] - self.assertEquals( - benchmark_results.results, - expected) - - def test_overall_results(self): - test_page_set = _MakePageSet() - - benchmark_results = SummarySavingPageBenchmarkResults() - - benchmark_results.AddSummary('a', 'seconds', 1) - - benchmark_results.WillMeasurePage(test_page_set.pages[0]) - benchmark_results.Add('b', 'seconds', 2) - benchmark_results.DidMeasurePage() - - benchmark_results.WillMeasurePage(test_page_set.pages[1]) - benchmark_results.Add('b', 'seconds', 3) - benchmark_results.DidMeasurePage() - - benchmark_results.AddSummary('c', 'seconds', 4) - - benchmark_results.PrintSummary(None) - self.assertEquals( - benchmark_results.results, - ['RESULT b_by_url: http___www.foo.com_= 2 seconds', - 'RESULT b_by_url: http___www.bar.com_= 3 seconds', - '*RESULT b: b= [2,3] seconds\n' + - 'Avg b: 2.500000seconds\nSd b: 0.707107seconds', - '*RESULT a: a= 1 seconds', - '*RESULT c: c= 4 seconds']) - - benchmark_results.results = [] - benchmark_results.PrintSummary(trace_tag='_ref') - - self.assertEquals( - benchmark_results.results, - ['*RESULT b: b_ref= [2,3] seconds\n' + - 'Avg b: 2.500000seconds\nSd b: 0.707107seconds', - '*RESULT a: a_ref= 1 seconds', - '*RESULT c: c_ref= 4 seconds']) - - def test_histogram(self): - test_page_set = _MakePageSet() - - benchmark_results = SummarySavingPageBenchmarkResults() - benchmark_results.WillMeasurePage(test_page_set.pages[0]) - benchmark_results.Add('a', '', - '{"buckets": [{"low": 1, "high": 2, "count": 1}]}', - data_type='histogram') - benchmark_results.DidMeasurePage() - - benchmark_results.WillMeasurePage(test_page_set.pages[1]) - benchmark_results.Add('a', '', - '{"buckets": [{"low": 2, "high": 3, "count": 1}]}', - data_type='histogram') - benchmark_results.DidMeasurePage() - - benchmark_results.PrintSummary(None) - - expected = [ - 'HISTOGRAM a_by_url: http___www.foo.com_= ' + - '{"buckets": [{"low": 1, "high": 2, "count": 1}]}\n' + - 'Avg a_by_url: 1.500000', - 'HISTOGRAM a_by_url: http___www.bar.com_= ' + - '{"buckets": [{"low": 2, "high": 3, "count": 1}]}\n' + - 'Avg a_by_url: 2.500000'] - self.assertEquals(benchmark_results.results, expected) diff --git a/tools/telemetry/telemetry/page/page_benchmark.py b/tools/telemetry/telemetry/page/page_measurement.py index 117904a..04b941e 100644 --- a/tools/telemetry/telemetry/page/page_benchmark.py +++ b/tools/telemetry/telemetry/page/page_measurement.py @@ -8,28 +8,24 @@ class MeasurementFailure(page_test.Failure): designed-for problem.""" pass -class PageBenchmark(page_test.PageTest): - """Glue code for running a benchmark across a set of pages. +class PageMeasurement(page_test.PageTest): + """Glue code for running a measurement across a set of pages. - To use this, subclass from the benchmark and override MeasurePage. For + To use this, subclass from the measurement and override MeasurePage. For example: - class BodyChildElementBenchmark(PageBenchmark): + class BodyChildElementMeasurement(PageMeasurement): def MeasurePage(self, page, tab, results): body_child_count = tab.EvaluateJavaScript( 'document.body.children.length') results.Add('body_children', 'count', body_child_count) if __name__ == '__main__': - page_benchmark.Main(BodyChildElementBenchmark()) - - All benchmarks should include a unit test! - - TODO(nduca): Add explanation of how to write the unit test. + page_measurement.Main(BodyChildElementMeasurement()) To add test-specific options: - class BodyChildElementBenchmark(PageBenchmark): + class BodyChildElementMeasurement(PageMeasurement): def AddCommandLineOptions(parser): parser.add_option('--element', action='store', default='body') @@ -42,7 +38,7 @@ class PageBenchmark(page_test.PageTest): action_name_to_run='', needs_browser_restart_after_each_run=False, discard_first_result=False): - super(PageBenchmark, self).__init__( + super(PageMeasurement, self).__init__( '_RunTest', action_name_to_run, needs_browser_restart_after_each_run, @@ -55,9 +51,9 @@ class PageBenchmark(page_test.PageTest): @property def results_are_the_same_on_every_page(self): - """By default, benchmarks are assumed to output the same values for every + """By default, measurements are assumed to output the same values for every page. This allows incremental output, for example in CSV. If, however, the - benchmark discovers what values it can report as it goes, and those values + measurement discovers what values it can report as it goes, and those values may vary from page to page, you need to override this function and return False. Output will not appear in this mode until the entire pageset has run.""" diff --git a/tools/telemetry/telemetry/page/page_benchmark_results.py b/tools/telemetry/telemetry/page/page_measurement_results.py index e4dbd36..210b2f3 100644 --- a/tools/telemetry/telemetry/page/page_benchmark_results.py +++ b/tools/telemetry/telemetry/page/page_measurement_results.py @@ -7,7 +7,7 @@ from itertools import chain from telemetry.page import page_test from telemetry.page import perf_tests_helper -from telemetry.page import page_benchmark_value +from telemetry.page import page_measurement_value class ValuesForSinglePage(object): def __init__(self, page): @@ -43,9 +43,9 @@ class ValuesForSinglePage(object): return values[0] return None -class PageBenchmarkResults(page_test.PageTestResults): +class PageMeasurementResults(page_test.PageTestResults): def __init__(self): - super(PageBenchmarkResults, self).__init__() + super(PageMeasurementResults, self).__init__() self._page_results = [] self._overall_results = [] @@ -76,19 +76,19 @@ class PageBenchmarkResults(page_test.PageTestResults): return self._all_measurements_that_have_been_seen def Add(self, trace_name, units, value, chart_name=None, data_type='default'): - value = self._GetPageBenchmarkValue(trace_name, units, value, chart_name, + value = self._GetPageMeasurementValue(trace_name, units, value, chart_name, data_type) self._values_for_current_page.AddValue(value) def AddSummary(self, trace_name, units, value, chart_name=None, data_type='default'): - value = self._GetPageBenchmarkValue(trace_name, units, value, chart_name, + value = self._GetPageMeasurementValue(trace_name, units, value, chart_name, data_type) self._overall_results.append(value) - def _GetPageBenchmarkValue(self, trace_name, units, value, chart_name, + def _GetPageMeasurementValue(self, trace_name, units, value, chart_name, data_type): - value = page_benchmark_value.PageBenchmarkValue( + value = page_measurement_value.PageMeasurementValue( trace_name, units, value, chart_name, data_type) measurement_name = value.measurement_name diff --git a/tools/telemetry/telemetry/page/page_measurement_results_unittest.py b/tools/telemetry/telemetry/page/page_measurement_results_unittest.py new file mode 100644 index 0000000..c48e0a7 --- /dev/null +++ b/tools/telemetry/telemetry/page/page_measurement_results_unittest.py @@ -0,0 +1,267 @@ +# Copyright (c) 2012 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. +import os +import unittest + +from telemetry.page import page_measurement_results +from telemetry.page import page_set +from telemetry.page import perf_tests_helper + +def _MakePageSet(): + return page_set.PageSet.FromDict({ + "description": "hello", + "archive_path": "foo.wpr", + "pages": [ + {"url": "http://www.foo.com/"}, + {"url": "http://www.bar.com/"} + ] + }, os.path.dirname(__file__)) + +class NonPrintingPageMeasurementResults( + page_measurement_results.PageMeasurementResults): + def __init__(self): + super(NonPrintingPageMeasurementResults, self).__init__() + + def _PrintPerfResult(self, *args): + pass + +class SummarySavingPageMeasurementResults( + page_measurement_results.PageMeasurementResults): + def __init__(self): + super(SummarySavingPageMeasurementResults, self).__init__() + self.results = [] + + def _PrintPerfResult(self, *args): + res = perf_tests_helper.PrintPerfResult(*args, print_to_stdout=False) + self.results.append(res) + +class PageMeasurementResultsTest(unittest.TestCase): + def test_basic(self): + test_page_set = _MakePageSet() + + measurement_results = NonPrintingPageMeasurementResults() + measurement_results.WillMeasurePage(test_page_set.pages[0]) + measurement_results.Add('a', 'seconds', 3) + measurement_results.DidMeasurePage() + + measurement_results.WillMeasurePage(test_page_set.pages[1]) + measurement_results.Add('a', 'seconds', 3) + measurement_results.DidMeasurePage() + + measurement_results.PrintSummary('trace_tag') + + def test_url_is_invalid_value(self): + test_page_set = _MakePageSet() + + measurement_results = NonPrintingPageMeasurementResults() + measurement_results.WillMeasurePage(test_page_set.pages[0]) + self.assertRaises( + AssertionError, + lambda: measurement_results.Add('url', 'string', 'foo')) + + def test_unit_change(self): + test_page_set = _MakePageSet() + + measurement_results = NonPrintingPageMeasurementResults() + measurement_results.WillMeasurePage(test_page_set.pages[0]) + measurement_results.Add('a', 'seconds', 3) + measurement_results.DidMeasurePage() + + measurement_results.WillMeasurePage(test_page_set.pages[1]) + self.assertRaises( + AssertionError, + lambda: measurement_results.Add('a', 'foobgrobbers', 3)) + + def test_type_change(self): + test_page_set = _MakePageSet() + + measurement_results = NonPrintingPageMeasurementResults() + measurement_results.WillMeasurePage(test_page_set.pages[0]) + measurement_results.Add('a', 'seconds', 3) + measurement_results.DidMeasurePage() + + measurement_results.WillMeasurePage(test_page_set.pages[1]) + self.assertRaises( + AssertionError, + lambda: measurement_results.Add('a', 'seconds', 3, data_type='histogram')) + + def test_basic_summary(self): + test_page_set = _MakePageSet() + + measurement_results = SummarySavingPageMeasurementResults() + measurement_results.WillMeasurePage(test_page_set.pages[0]) + measurement_results.Add('a', 'seconds', 3) + measurement_results.DidMeasurePage() + + measurement_results.WillMeasurePage(test_page_set.pages[1]) + measurement_results.Add('a', 'seconds', 7) + measurement_results.DidMeasurePage() + + measurement_results.PrintSummary(None) + expected = ['RESULT a_by_url: http___www.foo.com_= 3 seconds', + 'RESULT a_by_url: http___www.bar.com_= 7 seconds', + '*RESULT a: a= [3,7] seconds\nAvg a: 5.000000seconds\n' + + 'Sd a: 2.828427seconds'] + self.assertEquals( + measurement_results.results, + expected) + + def test_basic_summary_pass_and_fail_page(self): + """If a page failed, only print summary for individual passing pages.""" + test_page_set = _MakePageSet() + + measurement_results = SummarySavingPageMeasurementResults() + measurement_results.WillMeasurePage(test_page_set.pages[0]) + measurement_results.Add('a', 'seconds', 3) + measurement_results.DidMeasurePage() + measurement_results.AddFailure(test_page_set.pages[0], 'message', 'details') + + measurement_results.WillMeasurePage(test_page_set.pages[1]) + measurement_results.Add('a', 'seconds', 7) + measurement_results.DidMeasurePage() + + measurement_results.PrintSummary(None) + expected = ['RESULT a_by_url: http___www.bar.com_= 7 seconds'] + self.assertEquals(measurement_results.results, expected) + + def test_basic_summary_all_pages_fail(self): + """If all pages fail, no summary is printed.""" + test_page_set = _MakePageSet() + + measurement_results = SummarySavingPageMeasurementResults() + measurement_results.WillMeasurePage(test_page_set.pages[0]) + measurement_results.Add('a', 'seconds', 3) + measurement_results.DidMeasurePage() + measurement_results.AddFailure(test_page_set.pages[0], 'message', 'details') + + measurement_results.WillMeasurePage(test_page_set.pages[1]) + measurement_results.Add('a', 'seconds', 7) + measurement_results.DidMeasurePage() + measurement_results.AddFailure(test_page_set.pages[1], 'message', 'details') + + measurement_results.PrintSummary(None) + self.assertEquals(measurement_results.results, []) + + def test_repeated_pageset_one_iteration_one_page_fails(self): + """Page fails on one iteration, no results for that page should print.""" + test_page_set = _MakePageSet() + + measurement_results = SummarySavingPageMeasurementResults() + measurement_results.WillMeasurePage(test_page_set.pages[0]) + measurement_results.Add('a', 'seconds', 3) + measurement_results.DidMeasurePage() + + measurement_results.WillMeasurePage(test_page_set.pages[1]) + measurement_results.Add('a', 'seconds', 7) + measurement_results.DidMeasurePage() + measurement_results.AddFailure(test_page_set.pages[1], 'message', 'details') + + measurement_results.WillMeasurePage(test_page_set.pages[0]) + measurement_results.Add('a', 'seconds', 4) + measurement_results.DidMeasurePage() + + measurement_results.WillMeasurePage(test_page_set.pages[1]) + measurement_results.Add('a', 'seconds', 8) + measurement_results.DidMeasurePage() + + measurement_results.PrintSummary(None) + expected = ['RESULT a_by_url: http___www.foo.com_= [3,4] seconds\n' + + 'Avg a_by_url: 3.500000seconds\nSd a_by_url: 0.707107seconds'] + self.assertEquals(measurement_results.results, expected) + + def test_repeated_pageset(self): + test_page_set = _MakePageSet() + + measurement_results = SummarySavingPageMeasurementResults() + measurement_results.WillMeasurePage(test_page_set.pages[0]) + measurement_results.Add('a', 'seconds', 3) + measurement_results.DidMeasurePage() + + measurement_results.WillMeasurePage(test_page_set.pages[1]) + measurement_results.Add('a', 'seconds', 7) + measurement_results.DidMeasurePage() + + measurement_results.WillMeasurePage(test_page_set.pages[0]) + measurement_results.Add('a', 'seconds', 4) + measurement_results.DidMeasurePage() + + measurement_results.WillMeasurePage(test_page_set.pages[1]) + measurement_results.Add('a', 'seconds', 8) + measurement_results.DidMeasurePage() + + measurement_results.PrintSummary(None) + expected = ['RESULT a_by_url: http___www.foo.com_= [3,4] seconds\n' + + 'Avg a_by_url: 3.500000seconds\nSd a_by_url: 0.707107seconds', + 'RESULT a_by_url: http___www.bar.com_= [7,8] seconds\n' + + 'Avg a_by_url: 7.500000seconds\nSd a_by_url: 0.707107seconds', + '*RESULT a: a= [3,7,4,8] seconds\n' + + 'Avg a: 5.500000seconds\nSd a: 2.380476seconds' + ] + self.assertEquals( + measurement_results.results, + expected) + + def test_overall_results(self): + test_page_set = _MakePageSet() + + measurement_results = SummarySavingPageMeasurementResults() + + measurement_results.AddSummary('a', 'seconds', 1) + + measurement_results.WillMeasurePage(test_page_set.pages[0]) + measurement_results.Add('b', 'seconds', 2) + measurement_results.DidMeasurePage() + + measurement_results.WillMeasurePage(test_page_set.pages[1]) + measurement_results.Add('b', 'seconds', 3) + measurement_results.DidMeasurePage() + + measurement_results.AddSummary('c', 'seconds', 4) + + measurement_results.PrintSummary(None) + self.assertEquals( + measurement_results.results, + ['RESULT b_by_url: http___www.foo.com_= 2 seconds', + 'RESULT b_by_url: http___www.bar.com_= 3 seconds', + '*RESULT b: b= [2,3] seconds\n' + + 'Avg b: 2.500000seconds\nSd b: 0.707107seconds', + '*RESULT a: a= 1 seconds', + '*RESULT c: c= 4 seconds']) + + measurement_results.results = [] + measurement_results.PrintSummary(trace_tag='_ref') + + self.assertEquals( + measurement_results.results, + ['*RESULT b: b_ref= [2,3] seconds\n' + + 'Avg b: 2.500000seconds\nSd b: 0.707107seconds', + '*RESULT a: a_ref= 1 seconds', + '*RESULT c: c_ref= 4 seconds']) + + def test_histogram(self): + test_page_set = _MakePageSet() + + measurement_results = SummarySavingPageMeasurementResults() + measurement_results.WillMeasurePage(test_page_set.pages[0]) + measurement_results.Add('a', '', + '{"buckets": [{"low": 1, "high": 2, "count": 1}]}', + data_type='histogram') + measurement_results.DidMeasurePage() + + measurement_results.WillMeasurePage(test_page_set.pages[1]) + measurement_results.Add('a', '', + '{"buckets": [{"low": 2, "high": 3, "count": 1}]}', + data_type='histogram') + measurement_results.DidMeasurePage() + + measurement_results.PrintSummary(None) + + expected = [ + 'HISTOGRAM a_by_url: http___www.foo.com_= ' + + '{"buckets": [{"low": 1, "high": 2, "count": 1}]}\n' + + 'Avg a_by_url: 1.500000', + 'HISTOGRAM a_by_url: http___www.bar.com_= ' + + '{"buckets": [{"low": 2, "high": 3, "count": 1}]}\n' + + 'Avg a_by_url: 2.500000'] + self.assertEquals(measurement_results.results, expected) diff --git a/tools/telemetry/telemetry/page/page_benchmark_runner.py b/tools/telemetry/telemetry/page/page_measurement_runner.py index 4c9c611..5fdec43 100755 --- a/tools/telemetry/telemetry/page/page_benchmark_runner.py +++ b/tools/telemetry/telemetry/page/page_measurement_runner.py @@ -6,21 +6,21 @@ import csv import os import sys -from telemetry.page import block_page_benchmark_results -from telemetry.page import csv_page_benchmark_results -from telemetry.page import page_benchmark +from telemetry.page import block_page_measurement_results +from telemetry.page import csv_page_measurement_results +from telemetry.page import page_measurement from telemetry.page import page_test_runner -def Main(benchmark_dir, page_set_filenames): - """Turns a PageBenchmark into a command-line program. +def Main(measurement_dir, page_set_filenames): + """Turns a PageMeasurement into a command-line program. Args: - benchmark_dir: Path to directory containing PageBenchmarks. + measurement_dir: Path to directory containing PageMeasurements. """ - runner = PageBenchmarkRunner() - sys.exit(runner.Run(benchmark_dir, page_set_filenames)) + runner = PageMeasurementRunner() + sys.exit(runner.Run(measurement_dir, page_set_filenames)) -class PageBenchmarkRunner(page_test_runner.PageTestRunner): +class PageMeasurementRunner(page_test_runner.PageTestRunner): def AddCommandLineOptions(self, parser): parser.add_option('--output-format', dest='output_format', @@ -36,24 +36,24 @@ class PageBenchmarkRunner(page_test_runner.PageTestRunner): @property def test_class(self): - return page_benchmark.PageBenchmark + return page_measurement.PageMeasurement @property def test_class_name(self): - return 'benchmark' + return 'measurement' - def PrepareResults(self, benchmark): + def PrepareResults(self, measurement): if not self._options.output_file or self._options.output_file == '-': output_file = sys.stdout else: output_file = open(os.path.expanduser(self._options.output_file), 'w') if self._options.output_format == 'csv': - results = csv_page_benchmark_results.CsvPageBenchmarkResults( + results = csv_page_measurement_results.CsvPageMeasurementResults( csv.writer(output_file), - benchmark.results_are_the_same_on_every_page) + measurement.results_are_the_same_on_every_page) elif self._options.output_format in ('block', 'terminal-block'): - results = block_page_benchmark_results.BlockPageBenchmarkResults( + results = block_page_measurement_results.BlockPageMeasurementResults( output_file) else: raise Exception('Invalid --output-format value: "%s". Valid values are ' @@ -72,4 +72,4 @@ class PageBenchmarkRunner(page_test_runner.PageTestRunner): output_trace_tag = '_ref' results.PrintSummary(output_trace_tag) - return super(PageBenchmarkRunner, self).OutputResults(results) + return super(PageMeasurementRunner, self).OutputResults(results) diff --git a/tools/telemetry/telemetry/page/page_benchmark_unittest.py b/tools/telemetry/telemetry/page/page_measurement_unittest.py index 5ab908f..ed7fdc7 100644 --- a/tools/telemetry/telemetry/page/page_benchmark_unittest.py +++ b/tools/telemetry/telemetry/page/page_measurement_unittest.py @@ -5,8 +5,8 @@ import json import os from telemetry.core import wpr_modes -from telemetry.page import page_benchmark -from telemetry.page import page_benchmark_unittest_base +from telemetry.page import page_measurement +from telemetry.page import page_measurement_unittest_base from telemetry.page import page as page_module from telemetry.page import page_set from telemetry.page import page_set_archive_info @@ -14,11 +14,11 @@ from telemetry.page.actions import all_page_actions from telemetry.page.actions import page_action from telemetry.test import options_for_unittests -class BenchThatFails(page_benchmark.PageBenchmark): +class MeasurementThatFails(page_measurement.PageMeasurement): def MeasurePage(self, page, tab, results): - raise page_benchmark.MeasurementFailure('Intentional failure.') + raise page_measurement.MeasurementFailure('Intentional failure.') -class BenchThatHasDefaults(page_benchmark.PageBenchmark): +class MeasurementThatHasDefaults(page_measurement.PageMeasurement): def AddCommandLineOptions(self, parser): parser.add_option('-x', dest='x', default=3) @@ -26,32 +26,32 @@ class BenchThatHasDefaults(page_benchmark.PageBenchmark): assert self.options.x == 3 results.Add('x', 'ms', 7) -class BenchForBlank(page_benchmark.PageBenchmark): +class MeasurementForBlank(page_measurement.PageMeasurement): def MeasurePage(self, page, tab, results): contents = tab.EvaluateJavaScript('document.body.textContent') assert contents.strip() == 'Hello world' -class BenchForReplay(page_benchmark.PageBenchmark): +class MeasurementForReplay(page_measurement.PageMeasurement): def MeasurePage(self, page, tab, results): # Web Page Replay returns '404 Not found' if a page is not in the archive. contents = tab.EvaluateJavaScript('document.body.textContent') if '404 Not Found' in contents.strip(): - raise page_benchmark.MeasurementFailure('Page not in archive.') + raise page_measurement.MeasurementFailure('Page not in archive.') -class BenchQueryParams(page_benchmark.PageBenchmark): +class MeasurementQueryParams(page_measurement.PageMeasurement): def MeasurePage(self, page, tab, results): query = tab.EvaluateJavaScript('window.location.search') assert query.strip() == '?foo=1' -class BenchWithAction(page_benchmark.PageBenchmark): +class MeasurementWithAction(page_measurement.PageMeasurement): def __init__(self): - super(BenchWithAction, self).__init__('test_action') + super(MeasurementWithAction, self).__init__('test_action') def MeasurePage(self, page, tab, results): pass -class PageBenchmarkUnitTest( - page_benchmark_unittest_base.PageBenchmarkUnitTestBase): +class PageMeasurementUnitTest( + page_measurement_unittest_base.PageMeasurementUnitTestBase): def setUp(self): self._options = options_for_unittests.GetCopy() @@ -59,27 +59,27 @@ class PageBenchmarkUnitTest( def testGotToBlank(self): ps = self.CreatePageSetFromFileInUnittestDataDir('blank.html') - benchmark = BenchForBlank() - all_results = self.RunBenchmark(benchmark, ps, options=self._options) + measurement = MeasurementForBlank() + all_results = self.RunMeasurement(measurement, ps, options=self._options) self.assertEquals(0, len(all_results.page_failures)) def testGotQueryParams(self): ps = self.CreatePageSet('file:///../../unittest_data/blank.html?foo=1') - benchmark = BenchQueryParams() + measurement = MeasurementQueryParams() ps.pages[-1].query_params = '?foo=1' - all_results = self.RunBenchmark(benchmark, ps, options=self._options) + all_results = self.RunMeasurement(measurement, ps, options=self._options) self.assertEquals(0, len(all_results.page_failures)) def testFailure(self): ps = self.CreatePageSetFromFileInUnittestDataDir('blank.html') - benchmark = BenchThatFails() - all_results = self.RunBenchmark(benchmark, ps, options=self._options) + measurement = MeasurementThatFails() + all_results = self.RunMeasurement(measurement, ps, options=self._options) self.assertEquals(1, len(all_results.page_failures)) def testDefaults(self): ps = self.CreatePageSetFromFileInUnittestDataDir('blank.html') - benchmark = BenchThatHasDefaults() - all_results = self.RunBenchmark(benchmark, ps, options=self._options) + measurement = MeasurementThatHasDefaults() + all_results = self.RunMeasurement(measurement, ps, options=self._options) self.assertEquals(len(all_results.page_results), 1) self.assertEquals( all_results.page_results[0].FindValueByTraceName('x').value, 7) @@ -99,7 +99,7 @@ class PageBenchmarkUnitTest( """) try: ps = page_set.PageSet() - benchmark = BenchForReplay() + measurement = MeasurementForReplay() # First record an archive with only www.google.com. self._options.wpr_mode = wpr_modes.WPR_RECORD @@ -108,7 +108,7 @@ class PageBenchmarkUnitTest( '', '', json.loads(archive_info_template % (test_archive, google_url))) ps.pages = [page_module.Page(google_url, ps)] - all_results = self.RunBenchmark(benchmark, ps, options=self._options) + all_results = self.RunMeasurement(measurement, ps, options=self._options) self.assertEquals(0, len(all_results.page_failures)) # Now replay it and verify that google.com is found but foo.com is not. @@ -117,14 +117,14 @@ class PageBenchmarkUnitTest( ps.wpr_archive_info = page_set_archive_info.PageSetArchiveInfo( '', '', json.loads(archive_info_template % (test_archive, foo_url))) ps.pages = [page_module.Page(foo_url, ps)] - all_results = self.RunBenchmark(benchmark, ps, options=self._options) + all_results = self.RunMeasurement(measurement, ps, options=self._options) self.assertEquals(1, len(all_results.page_failures)) ps.wpr_archive_info = page_set_archive_info.PageSetArchiveInfo( '', '', json.loads(archive_info_template % (test_archive, google_url))) ps.pages = [page_module.Page(google_url, ps)] - all_results = self.RunBenchmark(benchmark, ps, options=self._options) + all_results = self.RunMeasurement(measurement, ps, options=self._options) self.assertEquals(0, len(all_results.page_failures)) self.assertTrue(os.path.isfile(test_archive)) @@ -142,6 +142,6 @@ class PageBenchmarkUnitTest( ps = self.CreatePageSetFromFileInUnittestDataDir('blank.html') setattr(ps.pages[0], 'test_action', {'action': 'mock'}) - benchmark = BenchWithAction() - self.RunBenchmark(benchmark, ps, options=self._options) + measurement = MeasurementWithAction() + self.RunMeasurement(measurement, ps, options=self._options) self.assertTrue(action_called[0]) diff --git a/tools/telemetry/telemetry/page/page_benchmark_unittest_base.py b/tools/telemetry/telemetry/page/page_measurement_unittest_base.py index 5face3e..685f601 100644 --- a/tools/telemetry/telemetry/page/page_benchmark_unittest_base.py +++ b/tools/telemetry/telemetry/page/page_measurement_unittest_base.py @@ -7,13 +7,13 @@ import unittest from telemetry.core import browser_finder from telemetry.page import page_runner from telemetry.page import page as page_module -from telemetry.page import page_benchmark_results +from telemetry.page import page_measurement_results from telemetry.page import page_set from telemetry.test import options_for_unittests -class PageBenchmarkUnitTestBase(unittest.TestCase): +class PageMeasurementUnitTestBase(unittest.TestCase): """unittest.TestCase-derived class to help in the construction of unit tests - for a benchmark.""" + for a measurement.""" def CreatePageSetFromFileInUnittestDataDir(self, test_filename): return self.CreatePageSet('file:///' + os.path.join( @@ -27,23 +27,23 @@ class PageBenchmarkUnitTestBase(unittest.TestCase): ps.pages.append(page) return ps - def RunBenchmark(self, benchmark, ps, options=None): - """Runs a benchmark against a pageset, returning the rows its outputs.""" + def RunMeasurement(self, measurement, ps, options=None): + """Runs a measurement against a pageset, returning the rows its outputs.""" if options is None: options = options_for_unittests.GetCopy() assert options temp_parser = options.CreateParser() - benchmark.AddCommandLineOptions(temp_parser) + measurement.AddCommandLineOptions(temp_parser) defaults = temp_parser.get_default_values() for k, v in defaults.__dict__.items(): if hasattr(options, k): continue setattr(options, k, v) - benchmark.CustomizeBrowserOptions(options) + measurement.CustomizeBrowserOptions(options) possible_browser = browser_finder.FindBrowser(options) - results = page_benchmark_results.PageBenchmarkResults() + results = page_measurement_results.PageMeasurementResults() with page_runner.PageRunner(ps) as runner: - runner.Run(options, possible_browser, benchmark, results) + runner.Run(options, possible_browser, measurement, results) return results diff --git a/tools/telemetry/telemetry/page/page_benchmark_value.py b/tools/telemetry/telemetry/page/page_measurement_value.py index 0977125..c2c8e0e 100644 --- a/tools/telemetry/telemetry/page/page_benchmark_value.py +++ b/tools/telemetry/telemetry/page/page_measurement_value.py @@ -6,7 +6,7 @@ from telemetry.page import perf_tests_helper def _Mean(l): return float(sum(l)) / len(l) if len(l) > 0 else 0.0 -class PageBenchmarkValue(object): +class PageMeasurementValue(object): def __init__(self, trace_name, units, value, chart_name, data_type): self.trace_name = trace_name self.units = units diff --git a/tools/telemetry/telemetry/page/page_runner.py b/tools/telemetry/telemetry/page/page_runner.py index 99c44b2..11763a2 100644 --- a/tools/telemetry/telemetry/page/page_runner.py +++ b/tools/telemetry/telemetry/page/page_runner.py @@ -12,7 +12,7 @@ import random from telemetry.core import util from telemetry.core import wpr_modes from telemetry.core import exceptions -from telemetry.page import page_benchmark_results +from telemetry.page import page_measurement_results from telemetry.page import page_filter as page_filter_module from telemetry.page import page_test @@ -81,7 +81,7 @@ class PageRunner(object): if not page.archive_path: if options.allow_live_sites: logging.warning(""" - No page set archive provided for the page %s. Benchmarking against live sites! + No page set archive provided for the page %s. Running against live sites! Results won't be repeatable or comparable. """, page.url) else: @@ -96,7 +96,7 @@ class PageRunner(object): if not os.path.isfile(page.archive_path): if options.allow_live_sites: logging.warning(""" - The page set archive %s for page %s does not exist, benchmarking against live + The page set archive %s for page %s does not exist, running against live sites! Results won't be repeatable or comparable. To fix this, either add svn-internal to your .gclient using @@ -159,7 +159,7 @@ class PageRunner(object): not self.has_called_will_run_page_set): # If discarding results, substitute a dummy object. results_for_current_run = ( - page_benchmark_results.PageBenchmarkResults()) + page_measurement_results.PageMeasurementResults()) else: results_for_current_run = out_results tries = 3 diff --git a/tools/telemetry/telemetry/page/page_runner_unittest.py b/tools/telemetry/telemetry/page/page_runner_unittest.py index b175575..625c3af 100644 --- a/tools/telemetry/telemetry/page/page_runner_unittest.py +++ b/tools/telemetry/telemetry/page/page_runner_unittest.py @@ -40,7 +40,7 @@ class StubCredentialsBackend(object): class PageRunnerTests(unittest.TestCase): # TODO(nduca): Move the basic "test failed, test succeeded" tests from - # page_benchmark_unittest to here. + # page_measurement_unittest to here. def testHandlingOfCrashedTab(self): ps = page_set.PageSet() diff --git a/tools/telemetry/telemetry/page/page_test.py b/tools/telemetry/telemetry/page/page_test.py index 3e83685..a19c0df 100644 --- a/tools/telemetry/telemetry/page/page_test.py +++ b/tools/telemetry/telemetry/page/page_test.py @@ -35,7 +35,7 @@ def GetCompoundActionFromPage(page, action_name): return action_list class Failure(Exception): - """Exception that can be thrown from PageBenchmark to indicate an + """Exception that can be thrown from PageMeasurement to indicate an undesired but designed-for problem.""" pass @@ -89,7 +89,7 @@ class PageTest(object): return self._discard_first_result def AddCommandLineOptions(self, parser): - """Override to expose command-line options for this benchmark. + """Override to expose command-line options for this test. The provided parser is an optparse.OptionParser instance and accepts all normal results. The parsed options are available in Run as diff --git a/tools/telemetry/telemetry/page/page_test_runner.py b/tools/telemetry/telemetry/page/page_test_runner.py index bd316a1..92a74b5 100644 --- a/tools/telemetry/telemetry/page/page_test_runner.py +++ b/tools/telemetry/telemetry/page/page_test_runner.py @@ -44,48 +44,34 @@ class PageTestRunner(object): self.RunTestOnPageSet(test, ps, results) return self.OutputResults(results) - def AttemptToFindTest(self, args, test_dir): - """Find the test by matching the arguments against the known test names. + def FindTestConstructors(self, test_dir): + return discover.DiscoverClasses( + test_dir, os.path.join(test_dir, '..'), self.test_class) + + def FindTestName(self, test_constructors, args): + """Find the test name in an arbitrary argument list. We can't use the optparse parser, because the test may add its own command-line options. If the user passed in any of those, the optparse parsing will fail. Returns: - An instance of the test class on success. - None on failure. + test_name or none """ - tests = discover.DiscoverClasses( - test_dir, os.path.join(test_dir, '..'), self.test_class) - test_name = None - for arg in args: - if arg in tests: + for arg in [self.GetModernizedTestName(a) for a in args]: + if arg in test_constructors: test_name = arg - if test_name: - return tests[test_name]() - else: - return None - - def GetTest(self, test_dir): - tests = discover.DiscoverClasses( - test_dir, os.path.join(test_dir, '..'), self.test_class) + return test_name - if len(self._args) < 1: - error_message = 'No %s specified.\nAvailable %ss:\n' % ( - self.test_class_name, self.test_class_name) - test_list_string = ',\n'.join(sorted(tests.keys())) - self.PrintParseError(error_message + test_list_string) - - test_name = self._args[0] - if test_name not in tests: - error_message = 'No %s named %s.\nAvailable %ss:\n' % ( - self.test_class_name, self._args[0], self.test_class_name) - test_list_string = ',\n'.join(sorted(tests.keys())) - self.PrintParseError(error_message + test_list_string) + def GetModernizedTestName(self, arg): + """Sometimes tests change names but buildbots keep calling the old name. - return tests[test_name]() + If arg matches an old test name, return the new test name instead. + Otherwise, return the arg. + """ + return arg def GetPageSet(self, test, page_set_filenames): ps = test.CreatePageSet(self._options) @@ -109,13 +95,27 @@ class PageTestRunner(object): self.AddCommandLineOptions(self._parser) page_runner.PageRunner.AddCommandLineOptions(self._parser) - test = self.AttemptToFindTest(args, test_dir) - if test: + test_constructors = self.FindTestConstructors(test_dir) + test_name = self.FindTestName(test_constructors, args) + test = None + if test_name: + test = test_constructors[test_name]() test.AddCommandLineOptions(self._parser) _, self._args = self._parser.parse_args() - test = self.GetTest(test_dir) + if len(self._args) < 1: + error_message = 'No %s specified.\nAvailable %ss:\n' % ( + self.test_class_name, self.test_class_name) + test_list_string = ',\n'.join(sorted(test_constructors.keys())) + self.PrintParseError(error_message + test_list_string) + + if not test: + error_message = 'No %s named %s.\nAvailable %ss:\n' % ( + self.test_class_name, self._args[0], self.test_class_name) + test_list_string = ',\n'.join(sorted(test_constructors.keys())) + self.PrintParseError(error_message + test_list_string) + ps = self.GetPageSet(test, page_set_filenames) if len(self._args) > 2: diff --git a/tools/telemetry/telemetry/page/record_wpr.py b/tools/telemetry/telemetry/page/record_wpr.py index b315401..24d655a 100755 --- a/tools/telemetry/telemetry/page/record_wpr.py +++ b/tools/telemetry/telemetry/page/record_wpr.py @@ -11,21 +11,21 @@ import time from telemetry.core import browser_finder from telemetry.core import browser_options from telemetry.core import wpr_modes -from telemetry.page import page_benchmark +from telemetry.page import page_measurement from telemetry.page import page_runner from telemetry.page import page_set from telemetry.page import page_test from telemetry.test import discover class RecordPage(page_test.PageTest): - def __init__(self, benchmarks): + def __init__(self, measurements): # This class overwrites PageTest.Run, so that the test method name is not # really used (except for throwing an exception if it doesn't exist). super(RecordPage, self).__init__('Run') self._action_names = set( - [benchmark().action_name_to_run - for benchmark in benchmarks.values() - if benchmark().action_name_to_run]) + [measurement().action_name_to_run + for measurement in measurements.values() + if measurement().action_name_to_run]) def CanRunForPage(self, page): return bool(self._CompoundActionsForPage(page)) @@ -40,7 +40,7 @@ class RecordPage(page_test.PageTest): tab.WaitForDocumentReadyStateToBeComplete() time.sleep(3) - # Run the actions for all benchmarks. Reload the page between + # Run the actions for all measurements. Reload the page between # actions. should_reload = False for compound_action in self._CompoundActionsForPage(page): @@ -59,15 +59,15 @@ class RecordPage(page_test.PageTest): return actions -def Main(benchmark_dir): - benchmarks = discover.DiscoverClasses(benchmark_dir, - os.path.join(benchmark_dir, '..'), - page_benchmark.PageBenchmark) +def Main(measurement_dir): + measurements = discover.DiscoverClasses(measurement_dir, + os.path.join(measurement_dir, '..'), + page_measurement.PageMeasurement) options = browser_options.BrowserOptions() parser = options.CreateParser('%prog <page_set>') page_runner.PageRunner.AddCommandLineOptions(parser) - recorder = RecordPage(benchmarks) + recorder = RecordPage(measurements) recorder.AddCommandLineOptions(parser) _, args = parser.parse_args() |