summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authortonyg@chromium.org <tonyg@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-04-04 09:26:08 +0000
committertonyg@chromium.org <tonyg@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-04-04 09:26:08 +0000
commit5bfe3bf04f473256770bd8c5148dc3f4a43d4240 (patch)
treeaeea122f654141f01732bcc3b212fafcbf020444
parent89f3f8a19a28f5684e3c922f9b5548cd9ba30c37 (diff)
downloadchromium_src-5bfe3bf04f473256770bd8c5148dc3f4a43d4240.zip
chromium_src-5bfe3bf04f473256770bd8c5148dc3f4a43d4240.tar.gz
chromium_src-5bfe3bf04f473256770bd8c5148dc3f4a43d4240.tar.bz2
[Telemetry] Combine _by_url results when using --pageset-repeat.
Previously, running with --pageset-repeat would result in a separate RESULT line for each iteration of the same page. The perfbot log processor doesn't understand multiple RESULT lines with the same chart and trace. So instead, combine all repeated runs for the same page into one RESULT line per page. This is necessary for the page cyclers to use page sets. Because they will use a --pageset-repeat=10. BUG=None TEST=tools/telemetry/run_tests --browser=system page_benchmark_results_unittest NOTRY=True Review URL: https://chromiumcodereview.appspot.com/13571002 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@192261 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r--tools/telemetry/telemetry/page/page_benchmark_results.py19
-rw-r--r--tools/telemetry/telemetry/page/page_benchmark_results_unittest.py32
2 files changed, 49 insertions, 2 deletions
diff --git a/tools/telemetry/telemetry/page/page_benchmark_results.py b/tools/telemetry/telemetry/page/page_benchmark_results.py
index 2c7df7d..6a97346 100644
--- a/tools/telemetry/telemetry/page/page_benchmark_results.py
+++ b/tools/telemetry/telemetry/page/page_benchmark_results.py
@@ -111,6 +111,13 @@ class PageBenchmarkResults(page_test.PageTestResults):
if self.page_failures:
return
+ unique_page_urls = []
+ for page_values in self._page_results:
+ url = page_values.page.display_url
+ if unique_page_urls and unique_page_urls[0] == url:
+ break
+ unique_page_urls.append(url)
+
# Build the results summary.
results_summary = defaultdict(list)
for measurement_name in \
@@ -141,9 +148,17 @@ class PageBenchmarkResults(page_test.PageTestResults):
trace = measurement + (trace_tag or '')
if not trace_tag and len(value_url_list) > 1:
+ url_value_map = defaultdict(list)
for value, url in value_url_list:
- self._PrintPerfResult(measurement + '_by_url', url, [value], units,
- by_url_data_type)
+ if 'histogram' in data_type and url_value_map[url]:
+ # TODO(tonyg/marja): The histogram processing code only accepts one
+ # histogram, so we only report the first histogram. Once histograms
+ # support aggregating multiple values, this can be removed.
+ continue
+ url_value_map[url].append(value)
+ for url in unique_page_urls:
+ self._PrintPerfResult(measurement + '_by_url', url,
+ url_value_map[url], units, by_url_data_type)
# For histograms, we don't print the average data, only the _by_url,
# unless there is only 1 page in which case the _by_urls are omitted.
diff --git a/tools/telemetry/telemetry/page/page_benchmark_results_unittest.py b/tools/telemetry/telemetry/page/page_benchmark_results_unittest.py
index 8f76a3b..b2edad3 100644
--- a/tools/telemetry/telemetry/page/page_benchmark_results_unittest.py
+++ b/tools/telemetry/telemetry/page/page_benchmark_results_unittest.py
@@ -107,6 +107,38 @@ class PageBenchmarkResultsTest(unittest.TestCase):
benchmark_results.results,
expected)
+ def test_repeated_pageset(self):
+ test_page_set = _MakePageSet()
+
+ benchmark_results = SummarySavingPageBenchmarkResults()
+ benchmark_results.WillMeasurePage(test_page_set.pages[0])
+ benchmark_results.Add('a', 'seconds', 3)
+ benchmark_results.DidMeasurePage()
+
+ benchmark_results.WillMeasurePage(test_page_set.pages[1])
+ benchmark_results.Add('a', 'seconds', 7)
+ benchmark_results.DidMeasurePage()
+
+ benchmark_results.WillMeasurePage(test_page_set.pages[0])
+ benchmark_results.Add('a', 'seconds', 4)
+ benchmark_results.DidMeasurePage()
+
+ benchmark_results.WillMeasurePage(test_page_set.pages[1])
+ benchmark_results.Add('a', 'seconds', 8)
+ benchmark_results.DidMeasurePage()
+
+ benchmark_results.PrintSummary(None)
+ expected = ['RESULT a_by_url: http___www.foo.com_= [3,4] seconds\n' +
+ 'Avg a_by_url: 3.500000seconds\nSd a_by_url: 0.707107seconds',
+ 'RESULT a_by_url: http___www.bar.com_= [7,8] seconds\n' +
+ 'Avg a_by_url: 7.500000seconds\nSd a_by_url: 0.707107seconds',
+ '*RESULT a: a= [3,7,4,8] seconds\n' +
+ 'Avg a: 5.500000seconds\nSd a: 2.380476seconds'
+ ]
+ self.assertEquals(
+ benchmark_results.results,
+ expected)
+
def test_histogram(self):
test_page_set = _MakePageSet()