diff options
Diffstat (limited to 'tools/perf/perf_tools')
7 files changed, 39 insertions, 40 deletions
diff --git a/tools/perf/perf_tools/first_paint_time_benchmark.py b/tools/perf/perf_tools/first_paint_time_benchmark.py index 0eb5840..ba8fbc1 100644 --- a/tools/perf/perf_tools/first_paint_time_benchmark.py +++ b/tools/perf/perf_tools/first_paint_time_benchmark.py @@ -8,9 +8,10 @@ from chrome_remote_control import multi_page_benchmark from chrome_remote_control import util class FirstPaintTimeBenchmark(multi_page_benchmark.MultiPageBenchmark): - def MeasurePage(self, _, tab): + def MeasurePage(self, _, tab, results): if tab.browser.is_content_shell: - return {'first_paint_secs': 'unsupported'} + results.Add('first_paint', 'seconds', 'unsupported') + return tab.runtime.Execute(""" window.__rafFired = false; @@ -22,11 +23,9 @@ class FirstPaintTimeBenchmark(multi_page_benchmark.MultiPageBenchmark): first_paint_secs = tab.runtime.Evaluate( 'window.chrome.loadTimes().firstPaintTime - ' + - 'window.chrome.loadTimes().startLoadTime') + 'window.chrome.loadTimes().requestTime') - return { - 'first_paint_secs': round(first_paint_secs, 1) - } + results.Add('first_paint', 'seconds', round(first_paint_secs, 1)) def Main(): return multi_page_benchmark.Main(FirstPaintTimeBenchmark()) diff --git a/tools/perf/perf_tools/first_paint_time_benchmark_unittest.py b/tools/perf/perf_tools/first_paint_time_benchmark_unittest.py index 87e0559..60a549d 100644 --- a/tools/perf/perf_tools/first_paint_time_benchmark_unittest.py +++ b/tools/perf/perf_tools/first_paint_time_benchmark_unittest.py @@ -16,8 +16,8 @@ class FirstPaintTimeBenchmarkUnitTest( self.assertEqual(0, len(all_results.page_failures)) self.assertEqual(1, len(all_results.page_results)) - results0 = all_results.page_results[0]['results'] - if results0['first_paint_secs'] == 'unsupported': + results0 = all_results.page_results[0] + if results0['first_paint'] == 'unsupported': # This test can't run on content_shell. return - self.assertTrue(results0['first_paint_secs'] > 0) + self.assertTrue(results0['first_paint'] > 0) diff --git a/tools/perf/perf_tools/scrolling_benchmark.py b/tools/perf/perf_tools/scrolling_benchmark.py index 5eae210..d472a2a 100644 --- a/tools/perf/perf_tools/scrolling_benchmark.py +++ b/tools/perf/perf_tools/scrolling_benchmark.py @@ -10,7 +10,7 @@ class DidNotScrollException(multi_page_benchmark.MeasurementFailure): def __init__(self): super(DidNotScrollException, self).__init__('Page did not scroll') -def CalcScrollResults(rendering_stats_deltas): +def CalcScrollResults(rendering_stats_deltas, results): num_frames_sent_to_screen = rendering_stats_deltas['numFramesSentToScreen'] mean_frame_time_seconds = ( @@ -21,10 +21,8 @@ def CalcScrollResults(rendering_stats_deltas): rendering_stats_deltas['droppedFrameCount'] / float(num_frames_sent_to_screen)) - return { - 'mean_frame_time_ms': round(mean_frame_time_seconds * 1000, 3), - 'dropped_percent': round(dropped_percent * 100, 1) - } + results.Add('mean_frame_time', 'ms', round(mean_frame_time_seconds * 1000, 3)) + results.Add('dropped_percent', '%', round(dropped_percent * 100, 1)) class ScrollingBenchmark(multi_page_benchmark.MultiPageBenchmark): def __init__(self): @@ -79,16 +77,12 @@ class ScrollingBenchmark(multi_page_benchmark.MultiPageBenchmark): if not options.no_gpu_benchmarking_extension: options.extra_browser_args.append('--enable-gpu-benchmarking') - def MeasurePage(self, page, tab): + def MeasurePage(self, page, tab, results): rendering_stats_deltas = self.ScrollPageFully(page, tab) - scroll_results = CalcScrollResults(rendering_stats_deltas) + CalcScrollResults(rendering_stats_deltas, results) if self.options.report_all_results: - all_results = {} - all_results.update(rendering_stats_deltas) - all_results.update(scroll_results) - return all_results - return scroll_results - + for k, v in rendering_stats_deltas.iteritems(): + results.Add(k, '', v) def Main(): diff --git a/tools/perf/perf_tools/scrolling_benchmark_unittest.py b/tools/perf/perf_tools/scrolling_benchmark_unittest.py index 3993820..7848517 100644 --- a/tools/perf/perf_tools/scrolling_benchmark_unittest.py +++ b/tools/perf/perf_tools/scrolling_benchmark_unittest.py @@ -1,6 +1,7 @@ # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. +from chrome_remote_control import multi_page_benchmark from chrome_remote_control import multi_page_benchmark_unittest_base from perf_tools import scrolling_benchmark @@ -15,19 +16,22 @@ class ScrollingBenchmarkUnitTest( self.assertEqual(0, len(all_results.page_failures)) self.assertEqual(1, len(all_results.page_results)) - results0 = all_results.page_results[0]['results'] + results0 = all_results.page_results[0] self.assertTrue('dropped_percent' in results0) - self.assertTrue('mean_frame_time_ms' in results0) + self.assertTrue('mean_frame_time' in results0) def testCalcResultsFromRAFRenderStats(self): rendering_stats = {'droppedFrameCount': 5, 'totalTimeInSeconds': 1, 'numAnimationFrames': 10, 'numFramesSentToScreen': 10} - res = scrolling_benchmark.CalcScrollResults(rendering_stats) - self.assertEquals(50, res['dropped_percent']) - self.assertAlmostEquals(100, res['mean_frame_time_ms'], 2) + res = multi_page_benchmark.BenchmarkResults() + res.WillMeasurePage(True) + scrolling_benchmark.CalcScrollResults(rendering_stats, res) + res.DidMeasurePage() + self.assertEquals(50, res.page_results[0]['dropped_percent']) + self.assertAlmostEquals(100, res.page_results[0]['mean_frame_time'], 2) def testCalcResultsRealRenderStats(self): rendering_stats = {'numFramesSentToScreen': 60, @@ -42,9 +46,12 @@ class ScrollingBenchmarkUnitTest( 'totalTextureUploadTimeInSeconds': 0, 'totalRasterizeTimeInSeconds': 0, 'totalTimeInSeconds': 1.0} - res = scrolling_benchmark.CalcScrollResults(rendering_stats) - self.assertEquals(0, res['dropped_percent']) - self.assertAlmostEquals(1000/60.0, res['mean_frame_time_ms'], 2) + res = multi_page_benchmark.BenchmarkResults() + res.WillMeasurePage(True) + scrolling_benchmark.CalcScrollResults(rendering_stats, res) + res.DidMeasurePage() + self.assertEquals(0, res.page_results[0]['dropped_percent']) + self.assertAlmostEquals(1000/60., res.page_results[0]['mean_frame_time'], 2) class ScrollingBenchmarkWithoutGpuBenchmarkingUnitTest( multi_page_benchmark_unittest_base.MultiPageBenchmarkUnitTestBase): @@ -62,7 +69,7 @@ class ScrollingBenchmarkWithoutGpuBenchmarkingUnitTest( self.assertEqual(0, len(all_results.page_failures)) self.assertEqual(1, len(all_results.page_results)) - results0 = all_results.page_results[0]['results'] + results0 = all_results.page_results[0] self.assertTrue('dropped_percent' in results0) - self.assertTrue('mean_frame_time_ms' in results0) + self.assertTrue('mean_frame_time' in results0) diff --git a/tools/perf/perf_tools/skpicture_printer.py b/tools/perf/perf_tools/skpicture_printer.py index 2c0a982..97c9ea8 100755 --- a/tools/perf/perf_tools/skpicture_printer.py +++ b/tools/perf/perf_tools/skpicture_printer.py @@ -17,7 +17,7 @@ class SkPicturePrinter(multi_page_benchmark.MultiPageBenchmark): options.extra_browser_args.extend(['--enable-gpu-benchmarking', '--no-sandbox']) - def MeasurePage(self, page, tab): + def MeasurePage(self, page, tab, results): # Derive output path from the URL. The pattern just replaces all special # characters in the url with underscore. outpath = re.sub('://|[.~!*\:@&=+$,/?%#]', '_', page.url) @@ -27,7 +27,7 @@ class SkPicturePrinter(multi_page_benchmark.MultiPageBenchmark): # Replace win32 path separator char '\' with '\\'. js = _JS.format(outpath.replace('\\', '\\\\')) tab.runtime.Evaluate(js) - return {'output_path': outpath} + results.Add('output_path', 'path', outpath) def Main(): return multi_page_benchmark.Main(SkPicturePrinter()) diff --git a/tools/perf/perf_tools/skpicture_printer_unittest.py b/tools/perf/perf_tools/skpicture_printer_unittest.py index b93fe34..6aaac7f 100755 --- a/tools/perf/perf_tools/skpicture_printer_unittest.py +++ b/tools/perf/perf_tools/skpicture_printer_unittest.py @@ -30,7 +30,7 @@ class SkPicturePrinterUnitTest( self.assertEqual(0, len(all_results.page_failures)) self.assertEqual(1, len(all_results.page_results)) - results0 = all_results.page_results[0]['results'] + results0 = all_results.page_results[0] outdir = results0['output_path'] self.assertTrue('non_scrollable_page_html' in outdir) diff --git a/tools/perf/perf_tools/texture_upload_benchmark.py b/tools/perf/perf_tools/texture_upload_benchmark.py index c087e13..c837f6a 100644 --- a/tools/perf/perf_tools/texture_upload_benchmark.py +++ b/tools/perf/perf_tools/texture_upload_benchmark.py @@ -5,7 +5,7 @@ from chrome_remote_control import multi_page_benchmark from perf_tools import scrolling_benchmark class TextureUploadBenchmark(scrolling_benchmark.ScrollingBenchmark): - def MeasurePage(self, page, tab): + def MeasurePage(self, page, tab, results): rendering_stats_deltas = self.ScrollPageFully(page, tab) if (('totalCommitCount' not in rendering_stats_deltas) @@ -16,10 +16,9 @@ class TextureUploadBenchmark(scrolling_benchmark.ScrollingBenchmark): 1000 * rendering_stats_deltas['totalCommitTimeInSeconds'] / rendering_stats_deltas['totalCommitCount']) - return { - 'texture_upload_count': rendering_stats_deltas['textureUploadCount'], - 'average_commit_time_ms': averageCommitTimeMs - } + results.Add('texture_upload_count', 'count', + rendering_stats_deltas['textureUploadCount']) + results.Add('average_commit_time', 'ms', averageCommitTimeMs) def Main(): return multi_page_benchmark.Main(TextureUploadBenchmark()) |