diff options
author | tonyg@chromium.org <tonyg@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2012-10-12 17:11:18 +0000 |
---|---|---|
committer | tonyg@chromium.org <tonyg@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2012-10-12 17:11:18 +0000 |
commit | cf2b70ce4b396e88905ab0e2c4b2dac4e19610a5 (patch) | |
tree | 5f23bfc12a577d3d8d156eaf78a0aeed332823fb /tools | |
parent | 78ff329855b73fb391afe21a140dd40303feda66 (diff) | |
download | chromium_src-cf2b70ce4b396e88905ab0e2c4b2dac4e19610a5.zip chromium_src-cf2b70ce4b396e88905ab0e2c4b2dac4e19610a5.tar.gz chromium_src-cf2b70ce4b396e88905ab0e2c4b2dac4e19610a5.tar.bz2 |
Output CRC results in a format the perfbots understand.
This required specifying units in the results. With this patch, we now
print a summary at the end of the benchmark like:
*RESULT mean_frame_time: mean_frame_time= [10.966,17.067,10.177] ms
Avg mean_frame_time: 12.736667ms
Sd mean_frame_time: 3.770871ms
BUG=None
TEST=None
Review URL: https://codereview.chromium.org/11090037
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@161604 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'tools')
9 files changed, 123 insertions, 76 deletions
diff --git a/tools/chrome_remote_control/chrome_remote_control/multi_page_benchmark.py b/tools/chrome_remote_control/chrome_remote_control/multi_page_benchmark.py index 4267262..269d49b 100644 --- a/tools/chrome_remote_control/chrome_remote_control/multi_page_benchmark.py +++ b/tools/chrome_remote_control/chrome_remote_control/multi_page_benchmark.py @@ -1,6 +1,7 @@ # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. +from collections import defaultdict import csv import logging import os @@ -12,43 +13,87 @@ from chrome_remote_control import page_runner from chrome_remote_control import page_set from chrome_remote_control import page_test +# Get build/android/pylib scripts into our path. +# TODO(tonyg): Move perf_tests_helper.py to a common location. +sys.path.append( + os.path.abspath( + os.path.join(os.path.dirname(__file__), + '../../../build/android/pylib'))) +from perf_tests_helper import PrintPerfResult # pylint: disable=F0401 + class MeasurementFailure(page_test.Failure): """Exception that can be thrown from MeasurePage to indicate an undesired but designed-for problem.""" pass + class BenchmarkResults(page_test.PageTestResults): def __init__(self): super(BenchmarkResults, self).__init__() + self.results_summary = defaultdict(list) self.page_results = [] + self.field_names = None + self.field_units = {} + + self._page = None + self._page_values = {} + + def WillMeasurePage(self, page): + self._page = page + self._page_values = {} + + def Add(self, name, units, value): + assert name not in self._page_values, 'Result names must be unique' + assert name != 'url', 'The name url cannot be used' + if self.field_names: + assert name in self.field_names, """MeasurePage returned inconsistent +results! You must return the same dict keys every time.""" + else: + self.field_units[name] = units + self._page_values[name] = value + + def DidMeasurePage(self): + assert self._page, 'Failed to call WillMeasurePage' + + if not self.field_names: + self.field_names = self._page_values.keys() + self.field_names.sort() + + self.page_results.append(self._page_values) + for name in self.field_names: + units = self.field_units[name] + value = self._page_values[name] + self.results_summary[(name, units)].append(value) - def AddPageResults(self, page, results): - self.page_results.append({'page': page, - 'results': results}) -class CsvBenchmarkResults(page_test.PageTestResults): +class CsvBenchmarkResults(BenchmarkResults): def __init__(self, results_writer): super(CsvBenchmarkResults, self).__init__() self._results_writer = results_writer - self.field_names = None + self._did_write_header = False - def AddPageResults(self, page, results): - assert 'url' not in results + def DidMeasurePage(self): + super(CsvBenchmarkResults, self).DidMeasurePage() - if not self.field_names: - self.field_names = results.keys() - self.field_names.sort() - self._results_writer.writerow(['url'] + self.field_names) + if not self._did_write_header: + self._did_write_header = True + row = ['url'] + for name in self.field_names: + row.append('%s (%s)' % (name, self.field_units[name])) + self._results_writer.writerow(row) - row = [page.url] + row = [self._page.url] for name in self.field_names: - # If this assertion pops, your MeasurePage is returning inconsistent - # results! You must return the same dict keys every time! - assert name in results, """MeasurePage returned inconsistent results! You -must return the same dict keys every time.""" - row.append(results[name]) + value = self._page_values[name] + row.append(value) self._results_writer.writerow(row) + def PrintSummary(self, trace_tag): + for measurement_units, values in self.results_summary.iteritems(): + measurement, units = measurement_units + trace = measurement + (trace_tag or '') + PrintPerfResult(measurement, trace, values, units) + # TODO(nduca): Rename to page_benchmark class MultiPageBenchmark(page_test.PageTest): @@ -58,10 +103,10 @@ class MultiPageBenchmark(page_test.PageTest): example: class BodyChildElementBenchmark(MultiPageBenchmark): - def MeasurePage(self, page, tab): + def MeasurePage(self, page, tab, results): body_child_count = tab.runtime.Evaluate( 'document.body.children.length') - return {'body_child_count': body_child_count} + results.Add('body_children', 'count', body_child_count) if __name__ == '__main__': multi_page_benchmark.Main(BodyChildElementBenchmark()) @@ -76,39 +121,39 @@ class MultiPageBenchmark(page_test.PageTest): def AddOptions(parser): parser.add_option('--element', action='store', default='body') - def MeasurePage(self, page, tab): + def MeasurePage(self, page, tab, results): body_child_count = tab.runtime.Evaluate( 'document.querySelector('%s').children.length') - return {'child_count': child_count} + results.Add('children', 'count', child_count) """ def __init__(self): super(MultiPageBenchmark, self).__init__('_RunTest') def _RunTest(self, page, tab, results): - page_results = self.MeasurePage(page, tab) - results.AddPageResults(page, page_results) + results.WillMeasurePage(page) + self.MeasurePage(page, tab, results) + results.DidMeasurePage() - def MeasurePage(self, page, tab): + def MeasurePage(self, page, tab, results): """Override to actually measure the page's performance. page is a page_set.Page tab is an instance of chrome_remote_control.Tab - Should return a dictionary of measured values on success, or raise an - exception on failure. The fields in the dictionary must be the same across - all iterations. The dictionary must not include the field 'url.' The - MultiPageBenchmark will add that automatically to the final output. + Should call results.Add(name, units, value) for each result, or raise an + exception on failure. The name and units of each Add() call must be + the same across all iterations. The name 'url' must not be used. Prefer field names that are in accordance with python variable style. E.g. field_name. Put together: - def MeasurePage(self, page, tab): + def MeasurePage(self, page, tab, results): res = tab.runtime.Evaluate('2+2') if res != 4: raise Exception('Oh, wow.') - return {'two_plus_two': res} + results.Add('two_plus_two', 'count', res) """ raise NotImplementedError() @@ -143,6 +188,9 @@ def Main(benchmark, args=None): results = CsvBenchmarkResults(csv.writer(sys.stdout)) with page_runner.PageRunner(ps) as runner: runner.Run(options, possible_browser, benchmark, results) + # When using an exact executable, assume it is a reference build for the + # purpose of outputting the perf results. + results.PrintSummary(options.browser_executable and '_ref' or '') if len(results.page_failures): logging.warning('Failed pages: %s', '\n'.join( diff --git a/tools/chrome_remote_control/chrome_remote_control/multi_page_benchmark_unittest.py b/tools/chrome_remote_control/chrome_remote_control/multi_page_benchmark_unittest.py index 9eb6034..e958380 100644 --- a/tools/chrome_remote_control/chrome_remote_control/multi_page_benchmark_unittest.py +++ b/tools/chrome_remote_control/chrome_remote_control/multi_page_benchmark_unittest.py @@ -8,24 +8,24 @@ from chrome_remote_control import multi_page_benchmark_unittest_base from chrome_remote_control import page_set class BenchThatFails(multi_page_benchmark.MultiPageBenchmark): - def MeasurePage(self, page, tab): + def MeasurePage(self, page, tab, results): raise multi_page_benchmark.MeasurementFailure('Intentional failure.') class BenchThatHasDefaults(multi_page_benchmark.MultiPageBenchmark): def AddOptions(self, parser): parser.add_option('-x', dest='x', default=3) - def MeasurePage(self, page, tab): + def MeasurePage(self, page, tab, results): assert self.options.x == 3 - return {'x': 7} + results.Add('x', 'ms', 7) class BenchForBlank(multi_page_benchmark.MultiPageBenchmark): - def MeasurePage(self, page, tab): + def MeasurePage(self, page, tab, results): contents = tab.runtime.Evaluate('document.body.textContent') assert contents.strip() == 'Hello world' class BenchForReplay(multi_page_benchmark.MultiPageBenchmark): - def MeasurePage(self, page, tab): + def MeasurePage(self, page, tab, results): # Web Page Replay returns '404 Not found' if a page is not in the archive. contents = tab.runtime.Evaluate('document.body.textContent') if '404 Not Found' in contents.strip(): @@ -56,7 +56,7 @@ class MultiPageBenchmarkUnitTest( benchmark = BenchThatHasDefaults() all_results = self.RunBenchmark(benchmark, ps) self.assertEquals(len(all_results.page_results), 1) - self.assertEquals(all_results.page_results[0]['results']['x'], 7) + self.assertEquals(all_results.page_results[0]['x'], 7) def testRecordAndReplay(self): test_archive = '/tmp/google.wpr' diff --git a/tools/perf/perf_tools/first_paint_time_benchmark.py b/tools/perf/perf_tools/first_paint_time_benchmark.py index 0eb5840..ba8fbc1 100644 --- a/tools/perf/perf_tools/first_paint_time_benchmark.py +++ b/tools/perf/perf_tools/first_paint_time_benchmark.py @@ -8,9 +8,10 @@ from chrome_remote_control import multi_page_benchmark from chrome_remote_control import util class FirstPaintTimeBenchmark(multi_page_benchmark.MultiPageBenchmark): - def MeasurePage(self, _, tab): + def MeasurePage(self, _, tab, results): if tab.browser.is_content_shell: - return {'first_paint_secs': 'unsupported'} + results.Add('first_paint', 'seconds', 'unsupported') + return tab.runtime.Execute(""" window.__rafFired = false; @@ -22,11 +23,9 @@ class FirstPaintTimeBenchmark(multi_page_benchmark.MultiPageBenchmark): first_paint_secs = tab.runtime.Evaluate( 'window.chrome.loadTimes().firstPaintTime - ' + - 'window.chrome.loadTimes().startLoadTime') + 'window.chrome.loadTimes().requestTime') - return { - 'first_paint_secs': round(first_paint_secs, 1) - } + results.Add('first_paint', 'seconds', round(first_paint_secs, 1)) def Main(): return multi_page_benchmark.Main(FirstPaintTimeBenchmark()) diff --git a/tools/perf/perf_tools/first_paint_time_benchmark_unittest.py b/tools/perf/perf_tools/first_paint_time_benchmark_unittest.py index 87e0559..60a549d 100644 --- a/tools/perf/perf_tools/first_paint_time_benchmark_unittest.py +++ b/tools/perf/perf_tools/first_paint_time_benchmark_unittest.py @@ -16,8 +16,8 @@ class FirstPaintTimeBenchmarkUnitTest( self.assertEqual(0, len(all_results.page_failures)) self.assertEqual(1, len(all_results.page_results)) - results0 = all_results.page_results[0]['results'] - if results0['first_paint_secs'] == 'unsupported': + results0 = all_results.page_results[0] + if results0['first_paint'] == 'unsupported': # This test can't run on content_shell. return - self.assertTrue(results0['first_paint_secs'] > 0) + self.assertTrue(results0['first_paint'] > 0) diff --git a/tools/perf/perf_tools/scrolling_benchmark.py b/tools/perf/perf_tools/scrolling_benchmark.py index 5eae210..d472a2a 100644 --- a/tools/perf/perf_tools/scrolling_benchmark.py +++ b/tools/perf/perf_tools/scrolling_benchmark.py @@ -10,7 +10,7 @@ class DidNotScrollException(multi_page_benchmark.MeasurementFailure): def __init__(self): super(DidNotScrollException, self).__init__('Page did not scroll') -def CalcScrollResults(rendering_stats_deltas): +def CalcScrollResults(rendering_stats_deltas, results): num_frames_sent_to_screen = rendering_stats_deltas['numFramesSentToScreen'] mean_frame_time_seconds = ( @@ -21,10 +21,8 @@ def CalcScrollResults(rendering_stats_deltas): rendering_stats_deltas['droppedFrameCount'] / float(num_frames_sent_to_screen)) - return { - 'mean_frame_time_ms': round(mean_frame_time_seconds * 1000, 3), - 'dropped_percent': round(dropped_percent * 100, 1) - } + results.Add('mean_frame_time', 'ms', round(mean_frame_time_seconds * 1000, 3)) + results.Add('dropped_percent', '%', round(dropped_percent * 100, 1)) class ScrollingBenchmark(multi_page_benchmark.MultiPageBenchmark): def __init__(self): @@ -79,16 +77,12 @@ class ScrollingBenchmark(multi_page_benchmark.MultiPageBenchmark): if not options.no_gpu_benchmarking_extension: options.extra_browser_args.append('--enable-gpu-benchmarking') - def MeasurePage(self, page, tab): + def MeasurePage(self, page, tab, results): rendering_stats_deltas = self.ScrollPageFully(page, tab) - scroll_results = CalcScrollResults(rendering_stats_deltas) + CalcScrollResults(rendering_stats_deltas, results) if self.options.report_all_results: - all_results = {} - all_results.update(rendering_stats_deltas) - all_results.update(scroll_results) - return all_results - return scroll_results - + for k, v in rendering_stats_deltas.iteritems(): + results.Add(k, '', v) def Main(): diff --git a/tools/perf/perf_tools/scrolling_benchmark_unittest.py b/tools/perf/perf_tools/scrolling_benchmark_unittest.py index 3993820..7848517 100644 --- a/tools/perf/perf_tools/scrolling_benchmark_unittest.py +++ b/tools/perf/perf_tools/scrolling_benchmark_unittest.py @@ -1,6 +1,7 @@ # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. +from chrome_remote_control import multi_page_benchmark from chrome_remote_control import multi_page_benchmark_unittest_base from perf_tools import scrolling_benchmark @@ -15,19 +16,22 @@ class ScrollingBenchmarkUnitTest( self.assertEqual(0, len(all_results.page_failures)) self.assertEqual(1, len(all_results.page_results)) - results0 = all_results.page_results[0]['results'] + results0 = all_results.page_results[0] self.assertTrue('dropped_percent' in results0) - self.assertTrue('mean_frame_time_ms' in results0) + self.assertTrue('mean_frame_time' in results0) def testCalcResultsFromRAFRenderStats(self): rendering_stats = {'droppedFrameCount': 5, 'totalTimeInSeconds': 1, 'numAnimationFrames': 10, 'numFramesSentToScreen': 10} - res = scrolling_benchmark.CalcScrollResults(rendering_stats) - self.assertEquals(50, res['dropped_percent']) - self.assertAlmostEquals(100, res['mean_frame_time_ms'], 2) + res = multi_page_benchmark.BenchmarkResults() + res.WillMeasurePage(True) + scrolling_benchmark.CalcScrollResults(rendering_stats, res) + res.DidMeasurePage() + self.assertEquals(50, res.page_results[0]['dropped_percent']) + self.assertAlmostEquals(100, res.page_results[0]['mean_frame_time'], 2) def testCalcResultsRealRenderStats(self): rendering_stats = {'numFramesSentToScreen': 60, @@ -42,9 +46,12 @@ class ScrollingBenchmarkUnitTest( 'totalTextureUploadTimeInSeconds': 0, 'totalRasterizeTimeInSeconds': 0, 'totalTimeInSeconds': 1.0} - res = scrolling_benchmark.CalcScrollResults(rendering_stats) - self.assertEquals(0, res['dropped_percent']) - self.assertAlmostEquals(1000/60.0, res['mean_frame_time_ms'], 2) + res = multi_page_benchmark.BenchmarkResults() + res.WillMeasurePage(True) + scrolling_benchmark.CalcScrollResults(rendering_stats, res) + res.DidMeasurePage() + self.assertEquals(0, res.page_results[0]['dropped_percent']) + self.assertAlmostEquals(1000/60., res.page_results[0]['mean_frame_time'], 2) class ScrollingBenchmarkWithoutGpuBenchmarkingUnitTest( multi_page_benchmark_unittest_base.MultiPageBenchmarkUnitTestBase): @@ -62,7 +69,7 @@ class ScrollingBenchmarkWithoutGpuBenchmarkingUnitTest( self.assertEqual(0, len(all_results.page_failures)) self.assertEqual(1, len(all_results.page_results)) - results0 = all_results.page_results[0]['results'] + results0 = all_results.page_results[0] self.assertTrue('dropped_percent' in results0) - self.assertTrue('mean_frame_time_ms' in results0) + self.assertTrue('mean_frame_time' in results0) diff --git a/tools/perf/perf_tools/skpicture_printer.py b/tools/perf/perf_tools/skpicture_printer.py index 2c0a982..97c9ea8 100755 --- a/tools/perf/perf_tools/skpicture_printer.py +++ b/tools/perf/perf_tools/skpicture_printer.py @@ -17,7 +17,7 @@ class SkPicturePrinter(multi_page_benchmark.MultiPageBenchmark): options.extra_browser_args.extend(['--enable-gpu-benchmarking', '--no-sandbox']) - def MeasurePage(self, page, tab): + def MeasurePage(self, page, tab, results): # Derive output path from the URL. The pattern just replaces all special # characters in the url with underscore. outpath = re.sub('://|[.~!*\:@&=+$,/?%#]', '_', page.url) @@ -27,7 +27,7 @@ class SkPicturePrinter(multi_page_benchmark.MultiPageBenchmark): # Replace win32 path separator char '\' with '\\'. js = _JS.format(outpath.replace('\\', '\\\\')) tab.runtime.Evaluate(js) - return {'output_path': outpath} + results.Add('output_path', 'path', outpath) def Main(): return multi_page_benchmark.Main(SkPicturePrinter()) diff --git a/tools/perf/perf_tools/skpicture_printer_unittest.py b/tools/perf/perf_tools/skpicture_printer_unittest.py index b93fe34..6aaac7f 100755 --- a/tools/perf/perf_tools/skpicture_printer_unittest.py +++ b/tools/perf/perf_tools/skpicture_printer_unittest.py @@ -30,7 +30,7 @@ class SkPicturePrinterUnitTest( self.assertEqual(0, len(all_results.page_failures)) self.assertEqual(1, len(all_results.page_results)) - results0 = all_results.page_results[0]['results'] + results0 = all_results.page_results[0] outdir = results0['output_path'] self.assertTrue('non_scrollable_page_html' in outdir) diff --git a/tools/perf/perf_tools/texture_upload_benchmark.py b/tools/perf/perf_tools/texture_upload_benchmark.py index c087e13..c837f6a 100644 --- a/tools/perf/perf_tools/texture_upload_benchmark.py +++ b/tools/perf/perf_tools/texture_upload_benchmark.py @@ -5,7 +5,7 @@ from chrome_remote_control import multi_page_benchmark from perf_tools import scrolling_benchmark class TextureUploadBenchmark(scrolling_benchmark.ScrollingBenchmark): - def MeasurePage(self, page, tab): + def MeasurePage(self, page, tab, results): rendering_stats_deltas = self.ScrollPageFully(page, tab) if (('totalCommitCount' not in rendering_stats_deltas) @@ -16,10 +16,9 @@ class TextureUploadBenchmark(scrolling_benchmark.ScrollingBenchmark): 1000 * rendering_stats_deltas['totalCommitTimeInSeconds'] / rendering_stats_deltas['totalCommitCount']) - return { - 'texture_upload_count': rendering_stats_deltas['textureUploadCount'], - 'average_commit_time_ms': averageCommitTimeMs - } + results.Add('texture_upload_count', 'count', + rendering_stats_deltas['textureUploadCount']) + results.Add('average_commit_time', 'ms', averageCommitTimeMs) def Main(): return multi_page_benchmark.Main(TextureUploadBenchmark()) |