summaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authorhartmanng@chromium.org <hartmanng@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-12-01 23:09:54 +0000
committerhartmanng@chromium.org <hartmanng@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-12-01 23:09:54 +0000
commitdf2d1010ff9843074a84d33830a40d628c271bc2 (patch)
treecd0a47cf413f68b6b25c8c9c7a2e860ecaa0dfc8 /tools
parent52e97402cfdbffff431879b0f123050f973badd4 (diff)
downloadchromium_src-df2d1010ff9843074a84d33830a40d628c271bc2.zip
chromium_src-df2d1010ff9843074a84d33830a40d628c271bc2.tar.gz
chromium_src-df2d1010ff9843074a84d33830a40d628c271bc2.tar.bz2
[telemetry] Adding --output-format option to enable more user-friendly output.
BUG=162923 Review URL: https://chromiumcodereview.appspot.com/11348284 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@170678 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'tools')
-rw-r--r--tools/telemetry/telemetry/multi_page_benchmark.py57
-rwxr-xr-xtools/telemetry/telemetry/multi_page_benchmark_runner.py16
2 files changed, 60 insertions, 13 deletions
diff --git a/tools/telemetry/telemetry/multi_page_benchmark.py b/tools/telemetry/telemetry/multi_page_benchmark.py
index b24374c..a7a3d11 100644
--- a/tools/telemetry/telemetry/multi_page_benchmark.py
+++ b/tools/telemetry/telemetry/multi_page_benchmark.py
@@ -86,21 +86,16 @@ results! You must return the same dict keys every time."""
PrintPerfResult(measurement, trace, values, units, data_type)
-class CsvBenchmarkResults(BenchmarkResults):
- def __init__(self, results_writer):
- super(CsvBenchmarkResults, self).__init__()
- self._results_writer = results_writer
- self._did_write_header = False
+class IncrementalBenchmarkResults(BenchmarkResults):
+ def __init__(self):
+ super(IncrementalBenchmarkResults, self).__init__()
+ self._did_process_header = False
def DidMeasurePage(self):
- super(CsvBenchmarkResults, self).DidMeasurePage()
+ super(IncrementalBenchmarkResults, self).DidMeasurePage()
- if not self._did_write_header:
- self._did_write_header = True
- row = ['url']
- for name in self.field_names:
- row.append('%s (%s)' % (name, self.field_units[name]))
- self._results_writer.writerow(row)
+ if not self._did_process_header:
+ self.ProcessHeader()
row = [self._page.url]
for name in self.field_names:
@@ -112,8 +107,46 @@ class CsvBenchmarkResults(BenchmarkResults):
row.append(_Mean(value))
else:
row.append(value)
+ self.OutputRow(row)
+
+ def OutputRow(self, row):
+ raise NotImplementedError()
+
+ def ProcessHeader(self):
+ raise NotImplementedError()
+
+class CsvBenchmarkResults(IncrementalBenchmarkResults):
+ def __init__(self, results_writer):
+ super(CsvBenchmarkResults, self).__init__()
+ self._results_writer = results_writer
+
+ def OutputRow(self, row):
self._results_writer.writerow(row)
+ def ProcessHeader(self):
+ self._did_process_header = True
+ row = ['url']
+ for name in self.field_names:
+ row.append('%s (%s)' % (name, self.field_units[name]))
+ self.OutputRow(row)
+
+class TerminalBlockBenchmarkResults(IncrementalBenchmarkResults):
+ def __init__(self, output_location):
+ super(TerminalBlockBenchmarkResults, self).__init__()
+ self._output_location = output_location
+ self._header_row = None
+
+ def OutputRow(self, row):
+ for i in range(len(row)):
+ print >> self._output_location, '%s:' % self._header_row[i], row[i]
+ print >> self._output_location
+
+ def ProcessHeader(self):
+ self._did_process_header = True
+ self._header_row = ['url']
+ for name in self.field_names:
+ self._header_row.append('%s (%s)' % (name, self.field_units[name]))
+
# TODO(nduca): Rename to page_benchmark
class MultiPageBenchmark(page_test.PageTest):
diff --git a/tools/telemetry/telemetry/multi_page_benchmark_runner.py b/tools/telemetry/telemetry/multi_page_benchmark_runner.py
index 89fc5e5..002e559 100755
--- a/tools/telemetry/telemetry/multi_page_benchmark_runner.py
+++ b/tools/telemetry/telemetry/multi_page_benchmark_runner.py
@@ -34,6 +34,12 @@ def Main(benchmark_dir):
options = browser_options.BrowserOptions()
parser = options.CreateParser('%prog [options] <benchmark> <page_set>')
+ parser.add_option('--output-format',
+ dest='output_format',
+ default='csv',
+ help='Output format. Can be "csv" or "terminal-block". '
+ 'Defaults to "%default".')
+
benchmark = None
if benchmark_name is not None:
benchmark = benchmarks[benchmark_name]()
@@ -60,7 +66,15 @@ def Main(benchmark_dir):
Use --browser=list to figure out which are available.\n"""
sys.exit(1)
- results = multi_page_benchmark.CsvBenchmarkResults(csv.writer(sys.stdout))
+ if options.output_format == 'csv':
+ results = multi_page_benchmark.CsvBenchmarkResults(csv.writer(sys.stdout))
+ elif options.output_format == 'terminal-block':
+ results = multi_page_benchmark.TerminalBlockBenchmarkResults(sys.stdout)
+ else:
+ raise Exception('Invalid --output-format value: "%s". Valid values are '
+ '"csv" and "terminal-block".'
+ % options.output_format)
+
with page_runner.PageRunner(ps) as runner:
runner.Run(options, possible_browser, benchmark, results)
# When using an exact executable, assume it is a reference build for the