diff options
author | qinmin@chromium.org <qinmin@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2013-02-01 05:58:54 +0000 |
---|---|---|
committer | qinmin@chromium.org <qinmin@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2013-02-01 05:58:54 +0000 |
commit | db556fe2511c54ab336b6f32221761bb0de993b3 (patch) | |
tree | 09f81931f951e7b298b8ae9d9ed2fe6ed98688bf /tools | |
parent | 2b498344ae34277f3967d0d2b302861bcd1299b5 (diff) | |
download | chromium_src-db556fe2511c54ab336b6f32221761bb0de993b3.zip chromium_src-db556fe2511c54ab336b6f32221761bb0de993b3.tar.gz chromium_src-db556fe2511c54ab336b6f32221761bb0de993b3.tar.bz2 |
As chrome on android starts to use deferred image decoding, images decoding may not be captured by InspectorTimelineAgent.
In such case, we cannot rely on telemetry to get the decoding time.
Disable this test for now while we working on a fix.
Since this test is android only, just disable it
BUG=173184
Review URL: https://chromiumcodereview.appspot.com/12100007
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@180094 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'tools')
-rw-r--r-- | tools/perf/perf_tools/image_decoding_benchmark.py | 46 | ||||
-rw-r--r-- | tools/perf/perf_tools/image_decoding_benchmark_unittest.py | 21 |
2 files changed, 39 insertions, 28 deletions
diff --git a/tools/perf/perf_tools/image_decoding_benchmark.py b/tools/perf/perf_tools/image_decoding_benchmark.py index c14a84c..ff13431 100644 --- a/tools/perf/perf_tools/image_decoding_benchmark.py +++ b/tools/perf/perf_tools/image_decoding_benchmark.py @@ -6,29 +6,37 @@ from telemetry import multi_page_benchmark class ImageDecoding(multi_page_benchmark.MultiPageBenchmark): - def WillNavigateToPage(self, page, tab): - tab.StartTimelineRecording() + # TODO(qinmin): uncomment this after we fix the image decoding benchmark + # for lazily decoded images + # def WillNavigateToPage(self, page, tab): + # tab.StartTimelineRecording() def MeasurePage(self, page, tab, results): - tab.StopTimelineRecording() - def _IsDone(): - return tab.EvaluateJavaScript('isDone') + # TODO(qinmin): This android only test may fail after we switch to + # deferred image decoding and impl-side painting. Before we fix the test, + # temporarily disable calculation for lazily decoded images. + # Uncommented the following lines after we fix the timeline for lazily + # decoded images. + return + # tab.StopTimelineRecording() + # def _IsDone(): + # return tab.EvaluateJavaScript('isDone') - decode_image_events = \ - tab.timeline_model.GetAllOfName('DecodeImage') + # decode_image_events = \ + # tab.timeline_model.GetAllOfName('DecodeImage') # If it is a real image benchmark, then store only the last-minIterations # decode tasks. - if (hasattr(page, - 'image_decoding_benchmark_limit_results_to_min_iterations') and - page.image_decoding_benchmark_limit_results_to_min_iterations): - assert _IsDone() - min_iterations = tab.EvaluateJavaScript('minIterations') - decode_image_events = decode_image_events[-min_iterations:] + # if (hasattr(page, + # 'image_decoding_benchmark_limit_results_to_min_iterations') and + # page.image_decoding_benchmark_limit_results_to_min_iterations): + # assert _IsDone() + # min_iterations = tab.EvaluateJavaScript('minIterations') + # decode_image_events = decode_image_events[-min_iterations:] - durations = [d.duration_ms for d in decode_image_events] - if not durations: - results.Add('ImageDecoding_avg', 'ms', 'unsupported') - return - image_decoding_avg = sum(durations) / len(durations) - results.Add('ImageDecoding_avg', 'ms', image_decoding_avg) + # durations = [d.duration_ms for d in decode_image_events] + # if not durations: + # results.Add('ImageDecoding_avg', 'ms', 'unsupported') + # return + # image_decoding_avg = sum(durations) / len(durations) + # results.Add('ImageDecoding_avg', 'ms', image_decoding_avg) diff --git a/tools/perf/perf_tools/image_decoding_benchmark_unittest.py b/tools/perf/perf_tools/image_decoding_benchmark_unittest.py index c1309da..b2fdd09 100644 --- a/tools/perf/perf_tools/image_decoding_benchmark_unittest.py +++ b/tools/perf/perf_tools/image_decoding_benchmark_unittest.py @@ -3,21 +3,24 @@ # found in the LICENSE file. from telemetry import multi_page_benchmark_unittest_base -from perf_tools import image_decoding_benchmark +# from perf_tools import image_decoding_benchmark class ImageDecodingBenchmarkUnitTest( multi_page_benchmark_unittest_base.MultiPageBenchmarkUnitTestBase): def testImageDecodingMeasurement(self): - ps = self.CreatePageSetFromFileInUnittestDataDir('image_decoding.html') + # TODO(qinmin): uncomment this after we fix the image decoding benchmark + # for lazily decoded images + return + # ps = self.CreatePageSetFromFileInUnittestDataDir('image_decoding.html') - benchmark = image_decoding_benchmark.ImageDecoding() - all_results = self.RunBenchmark(benchmark, ps) + # benchmark = image_decoding_benchmark.ImageDecoding() + # all_results = self.RunBenchmark(benchmark, ps) - self.assertEqual(0, len(all_results.page_failures)) - self.assertEqual(1, len(all_results.page_results)) + # self.assertEqual(0, len(all_results.page_failures)) + # self.assertEqual(1, len(all_results.page_results)) - results0 = all_results.page_results[0] - self.assertTrue('ImageDecoding_avg' in results0) - self.assertTrue(results0['ImageDecoding_avg'] > 0) + # results0 = all_results.page_results[0] + # self.assertTrue('ImageDecoding_avg' in results0) + # self.assertTrue(results0['ImageDecoding_avg'] > 0) |