diff options
12 files changed, 597 insertions, 163 deletions
diff --git a/tools/telemetry/telemetry/block_page_benchmark_results.py b/tools/telemetry/telemetry/block_page_benchmark_results.py new file mode 100644 index 0000000..f76394c --- /dev/null +++ b/tools/telemetry/telemetry/block_page_benchmark_results.py @@ -0,0 +1,32 @@ +# Copyright (c) 2012 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. +import os + +from telemetry.page_benchmark_results import PageBenchmarkResults + +class BlockPageBenchmarkResults(PageBenchmarkResults): + def __init__(self, output_file): + super(BlockPageBenchmarkResults, self).__init__() + self._output_file = output_file + + def DidMeasurePage(self): + page_values = self.values_for_current_page + + lines = ['url: %s' % + self.values_for_current_page.page.url] + sorted_measurement_names = page_values.measurement_names + sorted_measurement_names.sort() + + for measurement_name in sorted_measurement_names: + value = page_values.FindValueByMeasurementName(measurement_name) + lines.append('%s (%s): %s' % + (measurement_name, + value.units, + value.output_value)) + for line in lines: + self._output_file.write(line) + self._output_file.write(os.linesep) + self._output_file.write(os.linesep) + + super(BlockPageBenchmarkResults, self).DidMeasurePage() diff --git a/tools/telemetry/telemetry/block_page_benchmark_results_unittest.py b/tools/telemetry/telemetry/block_page_benchmark_results_unittest.py new file mode 100644 index 0000000..ba4c02e --- /dev/null +++ b/tools/telemetry/telemetry/block_page_benchmark_results_unittest.py @@ -0,0 +1,63 @@ +# Copyright (c) 2012 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. +import StringIO +import os +import unittest + +from telemetry import block_page_benchmark_results +from telemetry.page_set import PageSet + +BlockPageBenchmarkResults = \ + block_page_benchmark_results.BlockPageBenchmarkResults + +def _MakePageSet(): + return PageSet.FromDict({ + "description": "hello", + "archive_path": "foo.wpr", + "pages": [ + {"url": "http://www.foo.com/"}, + {"url": "http://www.bar.com/"} + ] + }, os.path.dirname(__file__)) + +class NonPrintingBlockPageBenchmarkResults(BlockPageBenchmarkResults): + def __init__(self, *args): + super(NonPrintingBlockPageBenchmarkResults, self).__init__(*args) + + def _PrintPerfResult(self, *args): + pass + +class BlockPageBenchmarkResultsTest(unittest.TestCase): + def setUp(self): + self._output = StringIO.StringIO() + self._page_set = _MakePageSet() + + @property + def lines(self): + lines = StringIO.StringIO(self._output.getvalue()).readlines() + return [line.strip() for line in lines] + + @property + def data(self): + return [line.split(': ', 1) for line in self.lines] + + def test_with_output_after_every_page(self): + results = NonPrintingBlockPageBenchmarkResults(self._output) + results.WillMeasurePage(self._page_set[0]) + results.Add('foo', 'seconds', 3) + results.DidMeasurePage() + + results.WillMeasurePage(self._page_set[1]) + results.Add('bar', 'seconds', 4) + results.DidMeasurePage() + + expected = [ + ['url', 'http://www.foo.com/'], + ['foo (seconds)', '3'], + [''], + ['url', 'http://www.bar.com/'], + ['bar (seconds)', '4'], + [''] + ] + self.assertEquals(self.data, expected) diff --git a/tools/telemetry/telemetry/csv_page_benchmark_results.py b/tools/telemetry/telemetry/csv_page_benchmark_results.py new file mode 100644 index 0000000..3a9995e --- /dev/null +++ b/tools/telemetry/telemetry/csv_page_benchmark_results.py @@ -0,0 +1,79 @@ +# Copyright (c) 2012 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. +from telemetry.page_benchmark_results import PageBenchmarkResults + +class CsvPageBenchmarkResults(PageBenchmarkResults): + def __init__(self, results_writer, output_after_every_page): + super(CsvPageBenchmarkResults, self).__init__() + self._results_writer = results_writer + self._did_output_header = False + self._header_names_written_to_writer = None + self._output_after_every_page = output_after_every_page + + def DidMeasurePage(self): + assert self.values_for_current_page, 'Failed to call WillMeasurePage' + if not self._output_after_every_page: + super(CsvPageBenchmarkResults, self).DidMeasurePage() + return + + if not self._did_output_header: + self._OutputHeader() + else: + self._ValidateOutputNamesForCurrentPage() + + self._OutputValuesForPage(self.values_for_current_page) + + super(CsvPageBenchmarkResults, self).DidMeasurePage() + + def PrintSummary(self, trace_tag): + if not self._output_after_every_page: + self._OutputHeader() + for page_values in self.all_values_for_all_pages: + self._OutputValuesForPage(page_values) + + super(CsvPageBenchmarkResults, self).PrintSummary(trace_tag) + + def _ValidateOutputNamesForCurrentPage(self): + assert self._did_output_header + current_page_measurement_names = \ + self.values_for_current_page.measurement_names + if self._header_names_written_to_writer == current_page_measurement_names: + return + assert False, """To use CsvPageBenchmarkResults, you must add the same +result names for every page. In this case, first page output: +%s + +Thus, all subsequent pages must output this as well. Instead, the current page +output: +%s + +Change your test to produce the same thing each time, or modify +MultiPageBenchmark.results_are_the_same_on_every_page to return False. +""" % (repr(self._header_names_written_to_writer), + repr(current_page_measurement_names)) + + def _OutputHeader(self): + assert not self._did_output_header + all_measurement_names = list( + self.all_measurements_that_have_been_seen.keys()) + all_measurement_names.sort() + self._did_output_header = True + self._header_names_written_to_writer = list(all_measurement_names) + + row = ['url'] + for measurement_name in all_measurement_names: + measurement_data = \ + self.all_measurements_that_have_been_seen[measurement_name] + row.append('%s (%s)' % (measurement_name, measurement_data['units'])) + self._results_writer.writerow(row) + + def _OutputValuesForPage(self, page_values): + row = [page_values.page.url] + for measurement_name in self._header_names_written_to_writer: + value = page_values.FindValueByMeasurementName(measurement_name) + if value: + row.append('%s' % value.output_value) + else: + row.append('-') + self._results_writer.writerow(row) diff --git a/tools/telemetry/telemetry/csv_page_benchmark_results_unittest.py b/tools/telemetry/telemetry/csv_page_benchmark_results_unittest.py new file mode 100644 index 0000000..db57b35 --- /dev/null +++ b/tools/telemetry/telemetry/csv_page_benchmark_results_unittest.py @@ -0,0 +1,106 @@ +# Copyright (c) 2012 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. +import StringIO +import csv +import os +import unittest + +from telemetry.csv_page_benchmark_results import CsvPageBenchmarkResults +from telemetry.page_set import PageSet + +def _MakePageSet(): + return PageSet.FromDict({ + "description": "hello", + "archive_path": "foo.wpr", + "pages": [ + {"url": "http://www.foo.com/"}, + {"url": "http://www.bar.com/"} + ] + }, os.path.dirname(__file__)) + +class NonPrintingCsvPageBenchmarkResults(CsvPageBenchmarkResults): + def __init__(self, *args): + super(NonPrintingCsvPageBenchmarkResults, self).__init__(*args) + + def _PrintPerfResult(self, *args): + pass + +class CsvPageBenchmarkResultsTest(unittest.TestCase): + def setUp(self): + self._output = StringIO.StringIO() + self._page_set = _MakePageSet() + + @property + def lines(self): + lines = StringIO.StringIO(self._output.getvalue()).readlines() + return lines + + @property + def output_header_row(self): + rows = list(csv.reader(self.lines)) + return rows[0] + + @property + def output_data_rows(self): + rows = list(csv.reader(self.lines)) + return rows[1:] + + def test_with_output_after_every_page(self): + results = NonPrintingCsvPageBenchmarkResults(csv.writer(self._output), True) + results.WillMeasurePage(self._page_set[0]) + results.Add('foo', 'seconds', 3) + results.DidMeasurePage() + self.assertEquals( + self.output_header_row, + ['url', 'foo (seconds)']) + self.assertEquals( + self.output_data_rows[0], + [self._page_set[0].url, '3']) + + results.WillMeasurePage(self._page_set[1]) + results.Add('foo', 'seconds', 4) + results.DidMeasurePage() + self.assertEquals( + len(self.output_data_rows), + 2) + self.assertEquals( + self.output_data_rows[1], + [self._page_set[1].url, '4']) + + def test_with_output_after_every_page_and_inconsistency(self): + results = NonPrintingCsvPageBenchmarkResults(csv.writer(self._output), True) + results.WillMeasurePage(self._page_set[0]) + results.Add('foo', 'seconds', 3) + results.DidMeasurePage() + + # We printed foo, now change to bar + results.WillMeasurePage(self._page_set[1]) + results.Add('bar', 'seconds', 4) + + self.assertRaises( + Exception, + lambda: results.DidMeasurePage()) # pylint: disable=W0108 + + def test_with_output_at_print_summary_time(self): + results = NonPrintingCsvPageBenchmarkResults(csv.writer(self._output), + False) + results.WillMeasurePage(self._page_set[0]) + results.Add('foo', 'seconds', 3) + results.DidMeasurePage() + + results.WillMeasurePage(self._page_set[1]) + results.Add('bar', 'seconds', 4) + results.DidMeasurePage() + + results.PrintSummary('tag') + + self.assertEquals( + self.output_header_row, + ['url', 'bar (seconds)', 'foo (seconds)']) + self.assertEquals( + self.output_data_rows, + [[self._page_set[0].url, '-', '3'], + [self._page_set[1].url, '4', '-']]) + + diff --git a/tools/telemetry/telemetry/multi_page_benchmark.py b/tools/telemetry/telemetry/multi_page_benchmark.py index 3a8b734..9261b83 100644 --- a/tools/telemetry/telemetry/multi_page_benchmark.py +++ b/tools/telemetry/telemetry/multi_page_benchmark.py @@ -1,169 +1,13 @@ # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. -from collections import defaultdict -import os -import sys - from telemetry import page_test -# Get build/android/pylib scripts into our path. -# TODO(tonyg): Move perf_tests_helper.py to a common location. -sys.path.append( - os.path.abspath( - os.path.join(os.path.dirname(__file__), - '../../../build/android/pylib'))) -# pylint: disable=F0401 -from perf_tests_helper import GeomMeanAndStdDevFromHistogram -from perf_tests_helper import PrintPerfResult # pylint: disable=F0401 - - -def _Mean(l): - return float(sum(l)) / len(l) if len(l) > 0 else 0.0 - - class MeasurementFailure(page_test.Failure): """Exception that can be thrown from MeasurePage to indicate an undesired but designed-for problem.""" pass - -class BenchmarkResults(page_test.PageTestResults): - def __init__(self): - super(BenchmarkResults, self).__init__() - self.results_summary = defaultdict(list) - self.page_results = [] - self.urls = [] - self.field_names = None - self.field_units = {} - self.field_types = {} - - self._page = None - self._page_values = {} - - def WillMeasurePage(self, page): - self._page = page - self._page_values = {} - - def Add(self, trace_name, units, value, chart_name=None, data_type='default'): - name = trace_name - if chart_name: - name = '%s.%s' % (chart_name, trace_name) - assert name not in self._page_values, 'Result names must be unique' - assert name != 'url', 'The name url cannot be used' - if self.field_names: - assert name in self.field_names, """MeasurePage returned inconsistent -results! You must return the same dict keys every time.""" - else: - self.field_units[name] = units - self.field_types[name] = data_type - self._page_values[name] = value - - def DidMeasurePage(self): - assert self._page, 'Failed to call WillMeasurePage' - - if not self.field_names: - self.field_names = self._page_values.keys() - self.field_names.sort() - - self.page_results.append(self._page_values) - self.urls.append(self._page.display_url) - for name in self.field_names: - units = self.field_units[name] - data_type = self.field_types[name] - value = self._page_values[name] - self.results_summary[(name, units, data_type)].append(value) - - def PrintSummary(self, trace_tag): - if self.page_failures: - return - for measurement_units_type, values in sorted( - self.results_summary.iteritems()): - measurement, units, data_type = measurement_units_type - if data_type == 'histogram': - # For histograms, the _by_url data is important. - by_url_data_type = 'histogram' - else: - # For non-histograms, the _by_url data is unimportant. - by_url_data_type = 'unimportant' - if '.' in measurement: - measurement, trace = measurement.split('.', 1) - trace += (trace_tag or '') - else: - trace = measurement + (trace_tag or '') - if len(self.urls) > 1 and not trace_tag: - print - assert len(self.urls) == len(values) - for i, value in enumerate(values): - PrintPerfResult(measurement + '_by_url', self.urls[i], [value], units, - by_url_data_type) - # For histograms, we don't print the average data, only the _by_url. - if not data_type == 'histogram': - PrintPerfResult(measurement, trace, values, units, data_type) - - -class IncrementalBenchmarkResults(BenchmarkResults): - def __init__(self): - super(IncrementalBenchmarkResults, self).__init__() - self._did_process_header = False - - def DidMeasurePage(self): - super(IncrementalBenchmarkResults, self).DidMeasurePage() - - if not self._did_process_header: - self.ProcessHeader() - - row = [self._page.url] - for name in self.field_names: - value = self._page_values[name] - if self.field_types[name] == 'histogram': - avg, _ = GeomMeanAndStdDevFromHistogram(value) - row.append(avg) - elif isinstance(value, list): - row.append(_Mean(value)) - else: - row.append(value) - self.OutputRow(row) - - def OutputRow(self, row): - raise NotImplementedError() - - def ProcessHeader(self): - raise NotImplementedError() - -class CsvBenchmarkResults(IncrementalBenchmarkResults): - def __init__(self, results_writer): - super(CsvBenchmarkResults, self).__init__() - self._results_writer = results_writer - - def OutputRow(self, row): - self._results_writer.writerow(row) - - def ProcessHeader(self): - self._did_process_header = True - row = ['url'] - for name in self.field_names: - row.append('%s (%s)' % (name, self.field_units[name])) - self.OutputRow(row) - -class TerminalBlockBenchmarkResults(IncrementalBenchmarkResults): - def __init__(self, output_location): - super(TerminalBlockBenchmarkResults, self).__init__() - self._output_location = output_location - self._header_row = None - - def OutputRow(self, row): - for i in range(len(row)): - print >> self._output_location, '%s:' % self._header_row[i], row[i] - print >> self._output_location - - def ProcessHeader(self): - self._did_process_header = True - self._header_row = ['url'] - for name in self.field_names: - self._header_row.append('%s (%s)' % (name, self.field_units[name])) - - # TODO(nduca): Rename to page_benchmark class MultiPageBenchmark(page_test.PageTest): """Glue code for running a benchmark across a set of pages. @@ -203,6 +47,16 @@ class MultiPageBenchmark(page_test.PageTest): self.MeasurePage(page, tab, results) results.DidMeasurePage() + @property + def results_are_the_same_on_every_page(self): + """By default, benchmarks are assumed to output the same values for every + page. This allows incremental output, for example in CSV. If, however, the + benchmark discovers what values it can report as it goes, and those values + may vary from page to page, you need to override this function and return + False. Output will not appear in this mode until the entire pageset has + run.""" + return True + def MeasurePage(self, page, tab, results): """Override to actually measure the page's performance. diff --git a/tools/telemetry/telemetry/multi_page_benchmark_runner.py b/tools/telemetry/telemetry/multi_page_benchmark_runner.py index 002e559..1acb558 100755 --- a/tools/telemetry/telemetry/multi_page_benchmark_runner.py +++ b/tools/telemetry/telemetry/multi_page_benchmark_runner.py @@ -8,8 +8,10 @@ import os import sys from telemetry import all_page_interactions # pylint: disable=W0611 +from telemetry import block_page_benchmark_results from telemetry import browser_finder from telemetry import browser_options +from telemetry import csv_page_benchmark_results from telemetry import discover from telemetry import multi_page_benchmark from telemetry import page_runner @@ -37,7 +39,7 @@ def Main(benchmark_dir): parser.add_option('--output-format', dest='output_format', default='csv', - help='Output format. Can be "csv" or "terminal-block". ' + help='Output format. Can be "csv" or "block". ' 'Defaults to "%default".') benchmark = None @@ -67,12 +69,14 @@ Use --browser=list to figure out which are available.\n""" sys.exit(1) if options.output_format == 'csv': - results = multi_page_benchmark.CsvBenchmarkResults(csv.writer(sys.stdout)) - elif options.output_format == 'terminal-block': - results = multi_page_benchmark.TerminalBlockBenchmarkResults(sys.stdout) + results = csv_page_benchmark_results.CsvPageBenchmarkResults( + csv.writer(sys.stdout), + benchmark.results_are_the_same_on_every_page) + elif options.output_format in ('block', 'terminal-block'): + results = block_page_benchmark_results.BlockPageBenchmarkResults(sys.stdout) else: raise Exception('Invalid --output-format value: "%s". Valid values are ' - '"csv" and "terminal-block".' + '"csv" and "block".' % options.output_format) with page_runner.PageRunner(ps) as runner: diff --git a/tools/telemetry/telemetry/page_benchmark_results.py b/tools/telemetry/telemetry/page_benchmark_results.py new file mode 100644 index 0000000..4b52b6f --- /dev/null +++ b/tools/telemetry/telemetry/page_benchmark_results.py @@ -0,0 +1,128 @@ +# Copyright (c) 2012 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. +from collections import defaultdict +from telemetry.page_test import PageTestResults +from telemetry.perf_tests_helper import PrintPerfResult +from telemetry.page_benchmark_value import PageBenchmarkValue + +class ValuesForSinglePage(object): + def __init__(self, page): + self.page = page + self.values = [] + + def AddValue(self, value): + self.values.append(value) + + @property + def measurement_names(self): + return [value.measurement_name for value in self.values] + + def FindValueByMeasurementName(self, measurement_name): + values = [value for value in self.values + if value.measurement_name == measurement_name] + assert len(values) <= 1 + if len(values): + return values[0] + return None + +class PageBenchmarkResults(PageTestResults): + def __init__(self): + super(PageBenchmarkResults, self).__init__() + self._all_values_for_all_pages = [] + + self._all_measurements_that_have_been_seen = {} + + self._values_for_current_page = {} + + @property + def values_for_current_page(self): + return self._values_for_current_page + + @property + def all_values_for_all_pages(self): + return self._all_values_for_all_pages + + def WillMeasurePage(self, page): + self._values_for_current_page = ValuesForSinglePage(page) + + @property + def all_measurements_that_have_been_seen(self): + return self._all_measurements_that_have_been_seen + + def Add(self, trace_name, units, value, chart_name=None, data_type='default'): + value = PageBenchmarkValue(trace_name, units, value, chart_name, data_type) + measurement_name = value.measurement_name + + # Sanity checks. + assert measurement_name != 'url', 'The name url cannot be used' + if measurement_name in self._all_measurements_that_have_been_seen: + measurement_data = \ + self._all_measurements_that_have_been_seen[measurement_name] + last_seen_units = measurement_data['units'] + last_seen_data_type = measurement_data['type'] + assert last_seen_units == units, \ + 'Unit cannot change for a name once it has been provided' + assert last_seen_data_type == data_type, \ + 'Unit cannot change for a name once it has been provided' + else: + self._all_measurements_that_have_been_seen[measurement_name] = { + 'units': units, + 'type': data_type} + + self._values_for_current_page.AddValue(value) + + def DidMeasurePage(self): + assert self._values_for_current_page, 'Failed to call WillMeasurePage' + self._all_values_for_all_pages.append(self._values_for_current_page) + self._values_for_current_page = None + + def _PrintPerfResult(self, measurement, trace, values, units, + result_type='default'): + PrintPerfResult(measurement, trace, values, units, result_type) + + def PrintSummary(self, trace_tag): + if self.page_failures: + return + + # Build the results summary. + results_summary = defaultdict(list) + for measurement_name in \ + self._all_measurements_that_have_been_seen.iterkeys(): + for page_values in self._all_values_for_all_pages: + value = page_values.FindValueByMeasurementName(measurement_name) + if not value: + continue + measurement_units_type = (measurement_name, + value.units, + value.data_type) + value_url = (value.output_value, page_values.page.url) + results_summary[measurement_units_type].append(value_url) + + # Output the results summary sorted by name, then units, then data type. + for measurement_units_type, value_url_list in sorted( + results_summary.iteritems()): + measurement, units, data_type = measurement_units_type + + if data_type == 'histogram': + # For histograms, the _by_url data is important. + by_url_data_type = 'histogram' + else: + # For non-histograms, the _by_url data is unimportant. + by_url_data_type = 'unimportant' + if '.' in measurement: + measurement, trace = measurement.split('.', 1) + trace += (trace_tag or '') + else: + trace = measurement + (trace_tag or '') + + if not trace_tag: + for value, url in value_url_list: + self._PrintPerfResult(measurement + '_by_url', url, [value], units, + by_url_data_type) + + # For histograms, we don't print the average data, only the _by_url. + if not data_type == 'histogram': + values = [i[0] for i in value_url_list] + self._PrintPerfResult(measurement, trace, values, units, data_type) + diff --git a/tools/telemetry/telemetry/page_benchmark_results_unittest.py b/tools/telemetry/telemetry/page_benchmark_results_unittest.py new file mode 100644 index 0000000..fec3aaf --- /dev/null +++ b/tools/telemetry/telemetry/page_benchmark_results_unittest.py @@ -0,0 +1,107 @@ +# Copyright (c) 2012 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. +import os +import unittest + +from telemetry.page_benchmark_results import PageBenchmarkResults +from telemetry.page_set import PageSet +from telemetry.perf_tests_helper import PrintPerfResult + + +def _MakePageSet(): + return PageSet.FromDict({ + "description": "hello", + "archive_path": "foo.wpr", + "pages": [ + {"url": "http://www.foo.com/"}, + {"url": "http://www.bar.com/"} + ] + }, os.path.dirname(__file__)) + +class NonPrintingPageBenchmarkResults(PageBenchmarkResults): + def __init__(self): + super(NonPrintingPageBenchmarkResults, self).__init__() + + def _PrintPerfResult(self, *args): + pass + +class SummarySavingPageBenchmarkResults(PageBenchmarkResults): + def __init__(self): + super(SummarySavingPageBenchmarkResults, self).__init__() + self.results = [] + + def _PrintPerfResult(self, *args): + res = PrintPerfResult(*args, print_to_stdout=False) + self.results.append(res) + +class PageBenchmarkResultsTest(unittest.TestCase): + def test_basic(self): + page_set = _MakePageSet() + + benchmark_results = NonPrintingPageBenchmarkResults() + benchmark_results.WillMeasurePage(page_set.pages[0]) + benchmark_results.Add('a', 'seconds', 3) + benchmark_results.DidMeasurePage() + + benchmark_results.WillMeasurePage(page_set.pages[1]) + benchmark_results.Add('a', 'seconds', 3) + benchmark_results.DidMeasurePage() + + benchmark_results.PrintSummary('trace_tag') + + def test_url_is_invalid_value(self): + page_set = _MakePageSet() + + benchmark_results = NonPrintingPageBenchmarkResults() + benchmark_results.WillMeasurePage(page_set.pages[0]) + self.assertRaises( + AssertionError, + lambda: benchmark_results.Add('url', 'string', 'foo')) + + def test_unit_change(self): + page_set = _MakePageSet() + + benchmark_results = NonPrintingPageBenchmarkResults() + benchmark_results.WillMeasurePage(page_set.pages[0]) + benchmark_results.Add('a', 'seconds', 3) + benchmark_results.DidMeasurePage() + + benchmark_results.WillMeasurePage(page_set.pages[1]) + self.assertRaises( + AssertionError, + lambda: benchmark_results.Add('a', 'foobgrobbers', 3)) + + def test_type_change(self): + page_set = _MakePageSet() + + benchmark_results = NonPrintingPageBenchmarkResults() + benchmark_results.WillMeasurePage(page_set.pages[0]) + benchmark_results.Add('a', 'seconds', 3) + benchmark_results.DidMeasurePage() + + benchmark_results.WillMeasurePage(page_set.pages[1]) + self.assertRaises( + AssertionError, + lambda: benchmark_results.Add('a', 'seconds', 3, data_type='histogram')) + + def test_basic_summary(self): + page_set = _MakePageSet() + + benchmark_results = SummarySavingPageBenchmarkResults() + benchmark_results.WillMeasurePage(page_set.pages[0]) + benchmark_results.Add('a', 'seconds', 3) + benchmark_results.DidMeasurePage() + + benchmark_results.WillMeasurePage(page_set.pages[1]) + benchmark_results.Add('a', 'seconds', 7) + benchmark_results.DidMeasurePage() + + benchmark_results.PrintSummary(None) + expected = ['RESULT a_by_url: http___www.foo.com_= 3 seconds', + 'RESULT a_by_url: http___www.bar.com_= 7 seconds', + '*RESULT a: a= [3,7] seconds\nAvg a: 5.000000seconds\n' + + 'Sd a: 2.828427seconds'] + self.assertEquals( + benchmark_results.results, + expected) diff --git a/tools/telemetry/telemetry/page_benchmark_value.py b/tools/telemetry/telemetry/page_benchmark_value.py new file mode 100644 index 0000000..0a7c3a9 --- /dev/null +++ b/tools/telemetry/telemetry/page_benchmark_value.py @@ -0,0 +1,32 @@ +# Copyright (c) 2012 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. +from telemetry.perf_tests_helper import GeomMeanAndStdDevFromHistogram + +def _Mean(l): + return float(sum(l)) / len(l) if len(l) > 0 else 0.0 + +class PageBenchmarkValue(object): + def __init__(self, trace_name, units, value, chart_name, data_type): + self.trace_name = trace_name + self.units = units + self.value = value + self.chart_name = chart_name + self.data_type = data_type + + @property + def measurement_name(self): + if self.chart_name: + return '%s.%s' % (self.chart_name, self.trace_name) + else: + return self.trace_name + + @property + def output_value(self): + if self.data_type == 'histogram': + return GeomMeanAndStdDevFromHistogram(self.value) + elif isinstance(self.value, list): + return _Mean(self.value) + else: + return self.value + diff --git a/tools/telemetry/telemetry/page_runner.py b/tools/telemetry/telemetry/page_runner.py index 09508b7..ea2b984 100644 --- a/tools/telemetry/telemetry/page_runner.py +++ b/tools/telemetry/telemetry/page_runner.py @@ -95,8 +95,10 @@ http://goto/read-src-internal, or create a new archive using record_wpr. # Check tracing directory. if options.trace_dir: + if not os.path.exists(options.trace_dir): + os.mkdir(options.trace_dir) if not os.path.isdir(options.trace_dir): - raise Exception('Trace directory doesn\'t exist: %s' % + raise Exception('--trace-dir isn\'t a directory: %s' % options.trace_dir) elif os.listdir(options.trace_dir): raise Exception('Trace directory isn\'t empty: %s' % options.trace_dir) diff --git a/tools/telemetry/telemetry/perf_tests_helper.py b/tools/telemetry/telemetry/perf_tests_helper.py new file mode 100644 index 0000000..8fa8963 --- /dev/null +++ b/tools/telemetry/telemetry/perf_tests_helper.py @@ -0,0 +1,25 @@ +# Copyright (c) 2012 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. +from __future__ import absolute_import + +import os +import sys + +def __init__(): + path = os.path.join(os.path.dirname(__file__), + '../../../build/android') + path = os.path.abspath(path) + assert os.path.exists(os.path.join(path, + 'pylib', '__init__.py')) + if path not in sys.path: + sys.path.append(path) + +__init__() + +from pylib import perf_tests_helper # pylint: disable=F0401 +GeomMeanAndStdDevFromHistogram = \ + perf_tests_helper.GeomMeanAndStdDevFromHistogram +PrintPerfResult = \ + perf_tests_helper.PrintPerfResult + diff --git a/tools/telemetry/telemetry/websocket.py b/tools/telemetry/telemetry/websocket.py index 6d0cab8..056f44a 100644 --- a/tools/telemetry/telemetry/websocket.py +++ b/tools/telemetry/telemetry/websocket.py @@ -8,8 +8,10 @@ import sys def __init__(): ws_path = os.path.join(os.path.dirname(__file__), '../third_party/websocket-client') + ws_path = os.path.abspath(ws_path) assert os.path.exists(os.path.join(ws_path, 'websocket.py')) - sys.path.append(ws_path) + if ws_path not in sys.path: + sys.path.append(ws_path) __init__() |