summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--tools/perf/perf_tools/image_decoding_benchmark.py46
-rw-r--r--tools/perf/perf_tools/image_decoding_benchmark_unittest.py21
2 files changed, 39 insertions, 28 deletions
diff --git a/tools/perf/perf_tools/image_decoding_benchmark.py b/tools/perf/perf_tools/image_decoding_benchmark.py
index c14a84c..ff13431 100644
--- a/tools/perf/perf_tools/image_decoding_benchmark.py
+++ b/tools/perf/perf_tools/image_decoding_benchmark.py
@@ -6,29 +6,37 @@ from telemetry import multi_page_benchmark
class ImageDecoding(multi_page_benchmark.MultiPageBenchmark):
- def WillNavigateToPage(self, page, tab):
- tab.StartTimelineRecording()
+ # TODO(qinmin): uncomment this after we fix the image decoding benchmark
+ # for lazily decoded images
+ # def WillNavigateToPage(self, page, tab):
+ # tab.StartTimelineRecording()
def MeasurePage(self, page, tab, results):
- tab.StopTimelineRecording()
- def _IsDone():
- return tab.EvaluateJavaScript('isDone')
+ # TODO(qinmin): This android only test may fail after we switch to
+ # deferred image decoding and impl-side painting. Before we fix the test,
+ # temporarily disable calculation for lazily decoded images.
+ # Uncommented the following lines after we fix the timeline for lazily
+ # decoded images.
+ return
+ # tab.StopTimelineRecording()
+ # def _IsDone():
+ # return tab.EvaluateJavaScript('isDone')
- decode_image_events = \
- tab.timeline_model.GetAllOfName('DecodeImage')
+ # decode_image_events = \
+ # tab.timeline_model.GetAllOfName('DecodeImage')
# If it is a real image benchmark, then store only the last-minIterations
# decode tasks.
- if (hasattr(page,
- 'image_decoding_benchmark_limit_results_to_min_iterations') and
- page.image_decoding_benchmark_limit_results_to_min_iterations):
- assert _IsDone()
- min_iterations = tab.EvaluateJavaScript('minIterations')
- decode_image_events = decode_image_events[-min_iterations:]
+ # if (hasattr(page,
+ # 'image_decoding_benchmark_limit_results_to_min_iterations') and
+ # page.image_decoding_benchmark_limit_results_to_min_iterations):
+ # assert _IsDone()
+ # min_iterations = tab.EvaluateJavaScript('minIterations')
+ # decode_image_events = decode_image_events[-min_iterations:]
- durations = [d.duration_ms for d in decode_image_events]
- if not durations:
- results.Add('ImageDecoding_avg', 'ms', 'unsupported')
- return
- image_decoding_avg = sum(durations) / len(durations)
- results.Add('ImageDecoding_avg', 'ms', image_decoding_avg)
+ # durations = [d.duration_ms for d in decode_image_events]
+ # if not durations:
+ # results.Add('ImageDecoding_avg', 'ms', 'unsupported')
+ # return
+ # image_decoding_avg = sum(durations) / len(durations)
+ # results.Add('ImageDecoding_avg', 'ms', image_decoding_avg)
diff --git a/tools/perf/perf_tools/image_decoding_benchmark_unittest.py b/tools/perf/perf_tools/image_decoding_benchmark_unittest.py
index c1309da..b2fdd09 100644
--- a/tools/perf/perf_tools/image_decoding_benchmark_unittest.py
+++ b/tools/perf/perf_tools/image_decoding_benchmark_unittest.py
@@ -3,21 +3,24 @@
# found in the LICENSE file.
from telemetry import multi_page_benchmark_unittest_base
-from perf_tools import image_decoding_benchmark
+# from perf_tools import image_decoding_benchmark
class ImageDecodingBenchmarkUnitTest(
multi_page_benchmark_unittest_base.MultiPageBenchmarkUnitTestBase):
def testImageDecodingMeasurement(self):
- ps = self.CreatePageSetFromFileInUnittestDataDir('image_decoding.html')
+ # TODO(qinmin): uncomment this after we fix the image decoding benchmark
+ # for lazily decoded images
+ return
+ # ps = self.CreatePageSetFromFileInUnittestDataDir('image_decoding.html')
- benchmark = image_decoding_benchmark.ImageDecoding()
- all_results = self.RunBenchmark(benchmark, ps)
+ # benchmark = image_decoding_benchmark.ImageDecoding()
+ # all_results = self.RunBenchmark(benchmark, ps)
- self.assertEqual(0, len(all_results.page_failures))
- self.assertEqual(1, len(all_results.page_results))
+ # self.assertEqual(0, len(all_results.page_failures))
+ # self.assertEqual(1, len(all_results.page_results))
- results0 = all_results.page_results[0]
- self.assertTrue('ImageDecoding_avg' in results0)
- self.assertTrue(results0['ImageDecoding_avg'] > 0)
+ # results0 = all_results.page_results[0]
+ # self.assertTrue('ImageDecoding_avg' in results0)
+ # self.assertTrue(results0['ImageDecoding_avg'] > 0)