diff options
author | dennisjeffrey@google.com <dennisjeffrey@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2013-04-17 00:06:23 +0000 |
---|---|---|
committer | dennisjeffrey@google.com <dennisjeffrey@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2013-04-17 00:06:23 +0000 |
commit | a703506e2b37a4c4bd4fdc15024cca320504bb87 (patch) | |
tree | ab987f5a10f503d3a2ce09ded04f0dbdcf2a72f3 /tools | |
parent | fe07dda918dce85f94fda58a55a6db03467086e5 (diff) | |
download | chromium_src-a703506e2b37a4c4bd4fdc15024cca320504bb87.zip chromium_src-a703506e2b37a4c4bd4fdc15024cca320504bb87.tar.gz chromium_src-a703506e2b37a4c4bd4fdc15024cca320504bb87.tar.bz2 |
[Telemetry] Print summaries for individual passing pages if failed pages exist.
Previously, Telemetry would not output any summary ("RESULT=") lines if
at least one page in a page set failed. Now, if a failed page exists,
Telemetry will output the individual page results from the passing pages,
but it will not output any average data or any overall results that are
not associated with a page.
Also added a few unit tests for this change.
BUG=230998
TEST=Verified with the scrolling_benchmark on a local chromeOS device
(which currently has at least one failing page) that RESULT= lines are
now outputted only for the passing pages. Also verified that all unit
tests for page_benchmark_results.py pass.
Review URL: https://codereview.chromium.org/14172017
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@194491 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'tools')
-rw-r--r-- | tools/telemetry/telemetry/page/page_benchmark_results.py | 58 | ||||
-rw-r--r-- | tools/telemetry/telemetry/page/page_benchmark_results_unittest.py | 63 |
2 files changed, 100 insertions, 21 deletions
diff --git a/tools/telemetry/telemetry/page/page_benchmark_results.py b/tools/telemetry/telemetry/page/page_benchmark_results.py index b13e2b9..d7f7930 100644 --- a/tools/telemetry/telemetry/page/page_benchmark_results.py +++ b/tools/telemetry/telemetry/page/page_benchmark_results.py @@ -120,11 +120,20 @@ class PageBenchmarkResults(page_test.PageTestResults): measurement, trace, values, units, result_type) def PrintSummary(self, trace_tag): - if self.page_failures: - return + """Print summary data in a format expected by buildbot for perf dashboards. + + If any failed pages exist, only output individual page results for + non-failing pages, and do not output any average data. + + Args: + trace_tag: a string tag to append to the key for a result trace. + """ + failed_pages = [p['page'] for p in self.page_failures] + success_page_results = [r for r in self._page_results + if r.page not in failed_pages] unique_page_urls = [] - for page_values in self._page_results: + for page_values in success_page_results: url = page_values.page.display_url if unique_page_urls and unique_page_urls[0] == url: break @@ -134,7 +143,7 @@ class PageBenchmarkResults(page_test.PageTestResults): results_summary = defaultdict(list) for measurement_name in \ self._all_measurements_that_have_been_seen.iterkeys(): - for page_values in self._page_results: + for page_values in success_page_results: value = page_values.FindValueByMeasurementName(measurement_name) if not value: continue @@ -159,7 +168,10 @@ class PageBenchmarkResults(page_test.PageTestResults): else: trace = measurement + (trace_tag or '') - if not trace_tag and len(value_url_list) > 1: + # Print individual _by_url results if there's more than 1 successful page, + # or if there's exactly 1 successful page but a failure exists. + if not trace_tag and (len(value_url_list) > 1 or + (self.page_failures and len(value_url_list) == 1)): url_value_map = defaultdict(list) for value, url in value_url_list: if 'histogram' in data_type and url_value_map[url]: @@ -172,21 +184,25 @@ class PageBenchmarkResults(page_test.PageTestResults): self._PrintPerfResult(measurement + '_by_url', url, url_value_map[url], units, by_url_data_type) + # If there were no page failures, print the average data. # For histograms, we don't print the average data, only the _by_url, # unless there is only 1 page in which case the _by_urls are omitted. - if 'histogram' not in data_type or len(value_url_list) == 1: - values = [i[0] for i in value_url_list] - if isinstance(values[0], list): - values = list(chain.from_iterable(values)) - self._PrintPerfResult(measurement, trace, values, units, data_type) - - # Output the overall results (results not associated with a page). - for value in self._overall_results: - values = value.value - if not isinstance(values, list): - values = [values] - measurement_name = value.chart_name - if not measurement_name: - measurement_name = value.trace_name - self._PrintPerfResult(measurement_name, value.trace_name, - values, value.units, value.data_type) + if not self.page_failures: + if 'histogram' not in data_type or len(value_url_list) == 1: + values = [i[0] for i in value_url_list] + if isinstance(values[0], list): + values = list(chain.from_iterable(values)) + self._PrintPerfResult(measurement, trace, values, units, data_type) + + # If there were no failed pages, output the overall results (results not + # associated with a page). + if not self.page_failures: + for value in self._overall_results: + values = value.value + if not isinstance(values, list): + values = [values] + measurement_name = value.chart_name + if not measurement_name: + measurement_name = value.trace_name + self._PrintPerfResult(measurement_name, value.trace_name, + values, value.units, value.data_type) diff --git a/tools/telemetry/telemetry/page/page_benchmark_results_unittest.py b/tools/telemetry/telemetry/page/page_benchmark_results_unittest.py index e3e20bf..3cbd3b1 100644 --- a/tools/telemetry/telemetry/page/page_benchmark_results_unittest.py +++ b/tools/telemetry/telemetry/page/page_benchmark_results_unittest.py @@ -107,6 +107,69 @@ class PageBenchmarkResultsTest(unittest.TestCase): benchmark_results.results, expected) + def test_basic_summary_pass_and_fail_page(self): + """If a page failed, only print summary for individual passing pages.""" + test_page_set = _MakePageSet() + + benchmark_results = SummarySavingPageBenchmarkResults() + benchmark_results.WillMeasurePage(test_page_set.pages[0]) + benchmark_results.Add('a', 'seconds', 3) + benchmark_results.DidMeasurePage() + benchmark_results.AddFailure(test_page_set.pages[0], 'message', 'details') + + benchmark_results.WillMeasurePage(test_page_set.pages[1]) + benchmark_results.Add('a', 'seconds', 7) + benchmark_results.DidMeasurePage() + + benchmark_results.PrintSummary(None) + expected = ['RESULT a_by_url: http___www.bar.com_= 7 seconds'] + self.assertEquals(benchmark_results.results, expected) + + def test_basic_summary_all_pages_fail(self): + """If all pages fail, no summary is printed.""" + test_page_set = _MakePageSet() + + benchmark_results = SummarySavingPageBenchmarkResults() + benchmark_results.WillMeasurePage(test_page_set.pages[0]) + benchmark_results.Add('a', 'seconds', 3) + benchmark_results.DidMeasurePage() + benchmark_results.AddFailure(test_page_set.pages[0], 'message', 'details') + + benchmark_results.WillMeasurePage(test_page_set.pages[1]) + benchmark_results.Add('a', 'seconds', 7) + benchmark_results.DidMeasurePage() + benchmark_results.AddFailure(test_page_set.pages[1], 'message', 'details') + + benchmark_results.PrintSummary(None) + self.assertEquals(benchmark_results.results, []) + + def test_repeated_pageset_one_iteration_one_page_fails(self): + """Page fails on one iteration, no results for that page should print.""" + test_page_set = _MakePageSet() + + benchmark_results = SummarySavingPageBenchmarkResults() + benchmark_results.WillMeasurePage(test_page_set.pages[0]) + benchmark_results.Add('a', 'seconds', 3) + benchmark_results.DidMeasurePage() + + benchmark_results.WillMeasurePage(test_page_set.pages[1]) + benchmark_results.Add('a', 'seconds', 7) + benchmark_results.DidMeasurePage() + benchmark_results.AddFailure(test_page_set.pages[1], 'message', 'details') + + benchmark_results.WillMeasurePage(test_page_set.pages[0]) + benchmark_results.Add('a', 'seconds', 4) + benchmark_results.DidMeasurePage() + + benchmark_results.WillMeasurePage(test_page_set.pages[1]) + benchmark_results.Add('a', 'seconds', 8) + benchmark_results.DidMeasurePage() + + benchmark_results.PrintSummary(None) + expected = ['RESULT a_by_url: http___www.foo.com_= [3,4] seconds\n' + + 'Avg a_by_url: 3.500000seconds\nSd a_by_url: 0.707107seconds'] + self.assertEquals(benchmark_results.results, expected) + def test_repeated_pageset(self): test_page_set = _MakePageSet() |