summaryrefslogtreecommitdiffstats
path: root/tools/telemetry
diff options
context:
space:
mode:
authoreakuefner@chromium.org <eakuefner@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2014-08-05 07:50:55 +0000
committereakuefner@chromium.org <eakuefner@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2014-08-05 07:50:55 +0000
commitbf322e97bb0a3101c7671fc92a2de14b939f2ffd (patch)
tree3d293f9201b04a9db16f63998bb5b740b072de95 /tools/telemetry
parentcc0d4be2389a0cc9eb37e50b4fd868916655ba21 (diff)
downloadchromium_src-bf322e97bb0a3101c7671fc92a2de14b939f2ffd.zip
chromium_src-bf322e97bb0a3101c7671fc92a2de14b939f2ffd.tar.gz
chromium_src-bf322e97bb0a3101c7671fc92a2de14b939f2ffd.tar.bz2
Plumb Telemetry test name through to results object creation
We want to be able to ask for the current benchmark's name when generating results. This threads the benchmark through so that it will be available to results objects. Review URL: https://codereview.chromium.org/386943007 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@287483 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'tools/telemetry')
-rw-r--r--tools/telemetry/telemetry/benchmark.py33
-rw-r--r--tools/telemetry/telemetry/page/page_measurement_unittest_base.py12
-rw-r--r--tools/telemetry/telemetry/page/page_runner.py12
-rw-r--r--tools/telemetry/telemetry/page/page_runner_unittest.py62
-rw-r--r--tools/telemetry/telemetry/page/profile_generator.py8
-rwxr-xr-xtools/telemetry/telemetry/page/record_wpr.py19
-rw-r--r--tools/telemetry/telemetry/page/record_wpr_unittest.py6
-rw-r--r--tools/telemetry/telemetry/results/html_output_formatter.py8
-rw-r--r--tools/telemetry/telemetry/results/html_output_formatter_unittest.py10
-rw-r--r--tools/telemetry/telemetry/results/json_output_formatter.py15
-rw-r--r--tools/telemetry/telemetry/results/json_output_formatter_unittest.py12
-rw-r--r--tools/telemetry/telemetry/results/results_options.py6
12 files changed, 136 insertions, 67 deletions
diff --git a/tools/telemetry/telemetry/benchmark.py b/tools/telemetry/telemetry/benchmark.py
index 47477a3..e5fcd2a 100644
--- a/tools/telemetry/telemetry/benchmark.py
+++ b/tools/telemetry/telemetry/benchmark.py
@@ -17,7 +17,7 @@ from telemetry.page import page_runner
from telemetry.page import page_set
from telemetry.page import page_test
from telemetry.page import test_expectations
-from telemetry.results import page_test_results
+from telemetry.results import results_options
from telemetry.util import cloud_storage
@@ -25,6 +25,14 @@ Disabled = decorators.Disabled
Enabled = decorators.Enabled
+class BenchmarkMetadata(object):
+ def __init__(self, name):
+ self._name = name
+
+ @property
+ def name(self):
+ return self._name
+
class Benchmark(command_line.Command):
"""Base class for a Telemetry benchmark.
@@ -62,26 +70,29 @@ class Benchmark(command_line.Command):
def CustomizeBrowserOptions(self, options):
"""Add browser options that are required by this benchmark."""
- def Run(self, args):
+ def GetMetadata(self):
+ return BenchmarkMetadata(self.Name())
+
+ def Run(self, finder_options):
"""Run this test with the given options."""
- self.CustomizeBrowserOptions(args.browser_options)
+ self.CustomizeBrowserOptions(finder_options.browser_options)
- test = self.PageTestClass()()
- test.__name__ = self.__class__.__name__
+ pt = self.PageTestClass()()
+ pt.__name__ = self.__class__.__name__
if hasattr(self, '_disabled_strings'):
- test._disabled_strings = self._disabled_strings
+ pt._disabled_strings = self._disabled_strings
if hasattr(self, '_enabled_strings'):
- test._enabled_strings = self._enabled_strings
+ pt._enabled_strings = self._enabled_strings
- ps = self.CreatePageSet(args)
+ ps = self.CreatePageSet(finder_options)
expectations = self.CreateExpectations(ps)
- self._DownloadGeneratedProfileArchive(args)
+ self._DownloadGeneratedProfileArchive(finder_options)
- results = page_test_results.PageTestResults()
+ results = results_options.CreateResults(self.GetMetadata(), finder_options)
try:
- results = page_runner.Run(test, ps, expectations, args)
+ page_runner.Run(pt, ps, expectations, finder_options, results)
except page_test.TestNotSupportedOnPlatformFailure as failure:
logging.warning(str(failure))
diff --git a/tools/telemetry/telemetry/page/page_measurement_unittest_base.py b/tools/telemetry/telemetry/page/page_measurement_unittest_base.py
index 088fa23..fdd3730 100644
--- a/tools/telemetry/telemetry/page/page_measurement_unittest_base.py
+++ b/tools/telemetry/telemetry/page/page_measurement_unittest_base.py
@@ -1,15 +1,17 @@
-# Copyright 2012 The Chromium Authors. All rights reserved.
+# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
+from telemetry import benchmark
from telemetry.core import exceptions
from telemetry.core import util
from telemetry.page import page_runner
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
from telemetry.page import page_test
+from telemetry.results import results_options
from telemetry.page import test_expectations
from telemetry.unittest import options_for_unittests
@@ -25,6 +27,10 @@ class BasicTestPage(page_module.Page):
interaction.End()
+class EmptyMetadataForTest(benchmark.BenchmarkMetadata):
+ def __init__(self):
+ super(EmptyMetadataForTest, self).__init__('')
+
class PageMeasurementUnitTestBase(unittest.TestCase):
"""unittest.TestCase-derived class to help in the construction of unit tests
for a measurement."""
@@ -63,7 +69,9 @@ class PageMeasurementUnitTestBase(unittest.TestCase):
options.output_trace_tag = None
page_runner.ProcessCommandLineArgs(temp_parser, options)
measurement.ProcessCommandLineArgs(temp_parser, options)
- return page_runner.Run(measurement, ps, expectations, options)
+ results = results_options.CreateResults(EmptyMetadataForTest(), options)
+ page_runner.Run(measurement, ps, expectations, options, results)
+ return results
def TestTracingCleanedUp(self, measurement_class, options=None):
ps = self.CreatePageSetFromFileInUnittestDataDir('blank.html')
diff --git a/tools/telemetry/telemetry/page/page_runner.py b/tools/telemetry/telemetry/page/page_runner.py
index 4dfa2ca..9b41e7b 100644
--- a/tools/telemetry/telemetry/page/page_runner.py
+++ b/tools/telemetry/telemetry/page/page_runner.py
@@ -1,4 +1,4 @@
-# Copyright 2012 The Chromium Authors. All rights reserved.
+# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -327,10 +327,8 @@ def _UpdatePageSetArchivesIfChanged(page_set):
cloud_storage.GetIfChanged(path, page_set.bucket)
-def Run(test, page_set, expectations, finder_options):
+def Run(test, page_set, expectations, finder_options, results):
"""Runs a given test against a given page_set with the given options."""
- results = results_options.PrepareResults(test, finder_options)
-
test.ValidatePageSet(page_set)
# Create a possible_browser with the given options.
@@ -357,7 +355,7 @@ def Run(test, page_set, expectations, finder_options):
if not should_run:
logging.warning('You are trying to run a disabled test.')
logging.warning('Pass --also-run-disabled-tests to squelch this message.')
- return results
+ return
# Reorder page set based on options.
pages = _ShuffleAndFilterPageSet(page_set, finder_options)
@@ -392,7 +390,7 @@ def Run(test, page_set, expectations, finder_options):
pages.remove(page)
if not pages:
- return results
+ return
state = _RunState()
# TODO(dtu): Move results creation and results_for_current_run into RunState.
@@ -435,7 +433,7 @@ def Run(test, page_set, expectations, finder_options):
finally:
state.StopBrowser()
- return results
+ return
def _ShuffleAndFilterPageSet(page_set, finder_options):
diff --git a/tools/telemetry/telemetry/page/page_runner_unittest.py b/tools/telemetry/telemetry/page/page_runner_unittest.py
index 701c293..20c64ee 100644
--- a/tools/telemetry/telemetry/page/page_runner_unittest.py
+++ b/tools/telemetry/telemetry/page/page_runner_unittest.py
@@ -1,4 +1,4 @@
-# Copyright 2012 The Chromium Authors. All rights reserved.
+# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -7,6 +7,7 @@ import os
import tempfile
import unittest
+from telemetry import benchmark
from telemetry import decorators
from telemetry.core import browser_finder
from telemetry.core import exceptions
@@ -18,6 +19,7 @@ from telemetry.page import page_set
from telemetry.page import page_test
from telemetry.page import page_runner
from telemetry.page import test_expectations
+from telemetry.results import results_options
from telemetry.unittest import options_for_unittests
from telemetry.value import scalar
from telemetry.value import string
@@ -39,6 +41,9 @@ def SetUpPageRunnerArguments(options):
options.MergeDefaultValues(parser.get_default_values())
page_runner.ProcessCommandLineArgs(parser, options)
+class EmptyMetadataForTest(benchmark.BenchmarkMetadata):
+ def __init__(self):
+ super(EmptyMetadataForTest, self).__init__('')
class StubCredentialsBackend(object):
def __init__(self, login_return_value):
@@ -79,7 +84,8 @@ class PageRunnerTests(unittest.TestCase):
options = options_for_unittests.GetCopy()
options.output_format = 'none'
SetUpPageRunnerArguments(options)
- results = page_runner.Run(Test(), ps, expectations, options)
+ results = results_options.CreateResults(EmptyMetadataForTest(), options)
+ page_runner.Run(Test(), ps, expectations, options, results)
self.assertEquals(0, len(GetSuccessfulPageRuns(results)))
self.assertEquals(1, len(results.failures))
@@ -108,7 +114,8 @@ class PageRunnerTests(unittest.TestCase):
options.output_format = 'none'
test = Test()
SetUpPageRunnerArguments(options)
- results = page_runner.Run(test, ps, expectations, options)
+ results = results_options.CreateResults(EmptyMetadataForTest(), options)
+ page_runner.Run(test, ps, expectations, options, results)
self.assertEquals(2, test.run_count)
self.assertEquals(1, len(GetSuccessfulPageRuns(results)))
self.assertEquals(1, len(results.failures))
@@ -127,8 +134,8 @@ class PageRunnerTests(unittest.TestCase):
options = options_for_unittests.GetCopy()
options.output_format = 'none'
SetUpPageRunnerArguments(options)
- results = page_runner.Run(
- Test(), ps, expectations, options)
+ results = results_options.CreateResults(EmptyMetadataForTest(), options)
+ page_runner.Run(Test(), ps, expectations, options, results)
self.assertEquals(1, len(GetSuccessfulPageRuns(results)))
self.assertEquals(0, len(results.failures))
@@ -153,7 +160,8 @@ class PageRunnerTests(unittest.TestCase):
options.output_format = 'csv'
SetUpPageRunnerArguments(options)
- results = page_runner.Run(CrashyMeasurement(), ps, expectations, options)
+ results = results_options.CreateResults(EmptyMetadataForTest(), options)
+ page_runner.Run(CrashyMeasurement(), ps, expectations, options, results)
self.assertEquals(1, len(GetSuccessfulPageRuns(results)))
self.assertEquals(0, len(results.failures))
@@ -187,7 +195,8 @@ class PageRunnerTests(unittest.TestCase):
options.page_repeat = 1
options.pageset_repeat = 1
SetUpPageRunnerArguments(options)
- results = page_runner.Run(Measurement(), ps, expectations, options)
+ results = results_options.CreateResults(EmptyMetadataForTest(), options)
+ page_runner.Run(Measurement(), ps, expectations, options, results)
self.assertEquals(0, len(GetSuccessfulPageRuns(results)))
self.assertEquals(0, len(results.failures))
self.assertEquals(0, len(results.all_page_specific_values))
@@ -195,7 +204,8 @@ class PageRunnerTests(unittest.TestCase):
options.page_repeat = 1
options.pageset_repeat = 2
SetUpPageRunnerArguments(options)
- results = page_runner.Run(Measurement(), ps, expectations, options)
+ results = results_options.CreateResults(EmptyMetadataForTest(), options)
+ page_runner.Run(Measurement(), ps, expectations, options, results)
self.assertEquals(2, len(GetSuccessfulPageRuns(results)))
self.assertEquals(0, len(results.failures))
self.assertEquals(2, len(results.all_page_specific_values))
@@ -203,7 +213,8 @@ class PageRunnerTests(unittest.TestCase):
options.page_repeat = 2
options.pageset_repeat = 1
SetUpPageRunnerArguments(options)
- results = page_runner.Run(Measurement(), ps, expectations, options)
+ results = results_options.CreateResults(EmptyMetadataForTest(), options)
+ page_runner.Run(Measurement(), ps, expectations, options, results)
self.assertEquals(2, len(GetSuccessfulPageRuns(results)))
self.assertEquals(0, len(results.failures))
self.assertEquals(2, len(results.all_page_specific_values))
@@ -212,7 +223,8 @@ class PageRunnerTests(unittest.TestCase):
options.page_repeat = 1
options.pageset_repeat = 1
SetUpPageRunnerArguments(options)
- results = page_runner.Run(Measurement(), ps, expectations, options)
+ results = results_options.CreateResults(EmptyMetadataForTest(), options)
+ page_runner.Run(Measurement(), ps, expectations, options, results)
self.assertEquals(0, len(GetSuccessfulPageRuns(results)))
self.assertEquals(0, len(results.failures))
self.assertEquals(0, len(results.all_page_specific_values))
@@ -245,7 +257,8 @@ class PageRunnerTests(unittest.TestCase):
options.page_repeat = 1
options.pageset_repeat = 2
SetUpPageRunnerArguments(options)
- results = page_runner.Run(Measurement(), ps, expectations, options)
+ results = results_options.CreateResults(EmptyMetadataForTest(), options)
+ page_runner.Run(Measurement(), ps, expectations, options, results)
results.PrintSummary()
self.assertEquals(4, len(GetSuccessfulPageRuns(results)))
self.assertEquals(0, len(results.failures))
@@ -306,7 +319,8 @@ class PageRunnerTests(unittest.TestCase):
options = options_for_unittests.GetCopy()
options.output_format = 'none'
SetUpPageRunnerArguments(options)
- page_runner.Run(test, ps, expectations, options)
+ results = results_options.CreateResults(EmptyMetadataForTest(), options)
+ page_runner.Run(test, ps, expectations, options, results)
finally:
os.remove(f.name)
@@ -335,7 +349,8 @@ class PageRunnerTests(unittest.TestCase):
options = options_for_unittests.GetCopy()
options.output_format = 'none'
SetUpPageRunnerArguments(options)
- page_runner.Run(test, ps, expectations, options)
+ results = results_options.CreateResults(EmptyMetadataForTest(), options)
+ page_runner.Run(test, ps, expectations, options, results)
self.assertTrue(hasattr(test, 'hasRun') and test.hasRun)
@@ -364,7 +379,8 @@ class PageRunnerTests(unittest.TestCase):
options = options_for_unittests.GetCopy()
options.output_format = 'none'
SetUpPageRunnerArguments(options)
- page_runner.Run(test, ps, expectations, options)
+ results = results_options.CreateResults(EmptyMetadataForTest(), options)
+ page_runner.Run(test, ps, expectations, options, results)
# Ensure that page_runner allows the test to customize the browser before it
# launches.
@@ -396,7 +412,8 @@ class PageRunnerTests(unittest.TestCase):
options = options_for_unittests.GetCopy()
options.output_format = 'none'
SetUpPageRunnerArguments(options)
- page_runner.Run(test, ps, expectations, options)
+ results = results_options.CreateResults(EmptyMetadataForTest(), options)
+ page_runner.Run(test, ps, expectations, options, results)
def testRunPageWithStartupUrl(self):
ps = page_set.PageSet()
@@ -426,7 +443,8 @@ class PageRunnerTests(unittest.TestCase):
return
test = Measurement()
SetUpPageRunnerArguments(options)
- page_runner.Run(test, ps, expectations, options)
+ results = results_options.CreateResults(EmptyMetadataForTest(), options)
+ page_runner.Run(test, ps, expectations, options, results)
self.assertEquals('about:blank', options.browser_options.startup_url)
self.assertTrue(test.browser_restarted)
@@ -454,7 +472,8 @@ class PageRunnerTests(unittest.TestCase):
options = options_for_unittests.GetCopy()
options.output_format = 'none'
SetUpPageRunnerArguments(options)
- page_runner.Run(test, ps, expectations, options)
+ results = results_options.CreateResults(EmptyMetadataForTest(), options)
+ page_runner.Run(test, ps, expectations, options, results)
assert test.did_call_clean_up
# Ensure skipping the test if page cannot be run on the browser
@@ -490,7 +509,8 @@ class PageRunnerTests(unittest.TestCase):
options = options_for_unittests.GetCopy()
options.output_format = 'none'
SetUpPageRunnerArguments(options)
- results = page_runner.Run(test, ps, expectations, options)
+ results = results_options.CreateResults(EmptyMetadataForTest(), options)
+ page_runner.Run(test, ps, expectations, options, results)
self.assertFalse(test.will_navigate_to_page_called)
self.assertEquals(0, len(GetSuccessfulPageRuns(results)))
self.assertEquals(0, len(results.failures))
@@ -519,7 +539,8 @@ class PageRunnerTests(unittest.TestCase):
pass
test = ArchiveTest()
- page_runner.Run(test, ps, expectations, options)
+ results = results_options.CreateResults(EmptyMetadataForTest(), options)
+ page_runner.Run(test, ps, expectations, options, results)
if expect_from_archive and not test.archive_path_exist:
logging.warning('archive path did not exist, asserting that page '
'is from archive is skipped.')
@@ -574,7 +595,8 @@ class PageRunnerTests(unittest.TestCase):
options = options_for_unittests.GetCopy()
options.output_format = 'none'
SetUpPageRunnerArguments(options)
- results = page_runner.Run(Test(max_failures=2), ps, expectations, options)
+ results = results_options.CreateResults(EmptyMetadataForTest(), options)
+ page_runner.Run(Test(max_failures=2), ps, expectations, options, results)
self.assertEquals(0, len(GetSuccessfulPageRuns(results)))
# Runs up to max_failures+1 failing tests before stopping, since
# every tests after max_failures failures have been encountered
diff --git a/tools/telemetry/telemetry/page/profile_generator.py b/tools/telemetry/telemetry/page/profile_generator.py
index 1a6c03e..66e3e3a 100644
--- a/tools/telemetry/telemetry/page/profile_generator.py
+++ b/tools/telemetry/telemetry/page/profile_generator.py
@@ -1,4 +1,4 @@
-# Copyright 2013 The Chromium Authors. All rights reserved.
+# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -12,12 +12,14 @@ import stat
import sys
import tempfile
+from telemetry import benchmark
from telemetry.core import browser_options
from telemetry.core import discover
from telemetry.core import util
from telemetry.page import page_runner
from telemetry.page import profile_creator
from telemetry.page import test_expectations
+from telemetry.results import results_options
def _DiscoverProfileCreatorClasses():
@@ -72,7 +74,9 @@ def GenerateProfiles(profile_creator_class, profile_creator_name, options):
temp_output_directory = tempfile.mkdtemp()
options.output_profile_path = temp_output_directory
- results = page_runner.Run(test, test.page_set, expectations, options)
+ results = results_options.CreateResults(
+ benchmark.BenchmarkMetadata(test.__class__.__name__), options)
+ page_runner.Run(test, test.page_set, expectations, options, results)
if results.failures:
logging.warning('Some pages failed.')
diff --git a/tools/telemetry/telemetry/page/record_wpr.py b/tools/telemetry/telemetry/page/record_wpr.py
index 9d46c9c..2984ef4 100755
--- a/tools/telemetry/telemetry/page/record_wpr.py
+++ b/tools/telemetry/telemetry/page/record_wpr.py
@@ -17,6 +17,7 @@ from telemetry.page import page_test
from telemetry.page import profile_creator
from telemetry.page import test_expectations
from telemetry.results import page_measurement_results
+from telemetry.results import results_options
class RecorderPageTest(page_test.PageTest): # pylint: disable=W0223
@@ -113,7 +114,6 @@ def _MaybeGetInstanceOfClass(target, base_dir, cls):
class WprRecorder(object):
-
def __init__(self, base_dir, target, args=None):
action_names_to_run = FindAllActionNames(base_dir)
self._record_page_test = RecorderPageTest(action_names_to_run)
@@ -139,6 +139,14 @@ class WprRecorder(object):
options.browser_options.no_proxy_server = True
return options
+ def CreateResults(self):
+ if self._benchmark is not None:
+ benchmark_metadata = self._benchmark.GetMetadata()
+ else:
+ benchmark_metadata = benchmark.BenchmarkMetadata('record_wpr')
+
+ return results_options.CreateResults(benchmark_metadata, self._options)
+
def _AddCommandLineArgs(self):
page_runner.AddCommandLineArgs(self._parser)
if self._benchmark is not None:
@@ -163,11 +171,11 @@ class WprRecorder(object):
sys.exit(1)
return ps
- def Record(self):
+ def Record(self, results):
self._page_set.wpr_archive_info.AddNewTemporaryRecording()
self._record_page_test.CustomizeBrowserOptions(self._options)
- return page_runner.Run(self._record_page_test, self._page_set,
- test_expectations.TestExpectations(), self._options)
+ page_runner.Run(self._record_page_test, self._page_set,
+ test_expectations.TestExpectations(), self._options, results)
def HandleResults(self, results):
if results.failures or results.skipped_values:
@@ -185,6 +193,7 @@ def Main(base_dir):
sys.exit(1)
target = quick_args.pop()
wpr_recorder = WprRecorder(base_dir, target)
- results = wpr_recorder.Record()
+ results = wpr_recorder.CreateResults()
+ wpr_recorder.Record(results)
wpr_recorder.HandleResults(results)
return min(255, len(results.failures))
diff --git a/tools/telemetry/telemetry/page/record_wpr_unittest.py b/tools/telemetry/telemetry/page/record_wpr_unittest.py
index 75ae016..3da7384 100644
--- a/tools/telemetry/telemetry/page/record_wpr_unittest.py
+++ b/tools/telemetry/telemetry/page/record_wpr_unittest.py
@@ -144,7 +144,8 @@ class RecordWprUnitTests(tab_test_case.TabTestCase):
mock_page_set = MockPageSet(url=self._url)
wpr_recorder = record_wpr.WprRecorder(self._test_data_dir,
mock_page_set, flags)
- results = wpr_recorder.Record()
+ results = wpr_recorder.CreateResults()
+ wpr_recorder.Record(results)
self.assertEqual(set(mock_page_set.pages), results.pages_that_succeeded)
def testWprRecorderWithBenchmark(self):
@@ -152,7 +153,8 @@ class RecordWprUnitTests(tab_test_case.TabTestCase):
mock_benchmark = MockBenchmark()
wpr_recorder = record_wpr.WprRecorder(self._test_data_dir, mock_benchmark,
flags)
- results = wpr_recorder.Record()
+ results = wpr_recorder.CreateResults()
+ wpr_recorder.Record(results)
self.assertEqual(set(mock_benchmark.mock_page_set.pages),
results.pages_that_succeeded)
diff --git a/tools/telemetry/telemetry/results/html_output_formatter.py b/tools/telemetry/telemetry/results/html_output_formatter.py
index ec8966f..96ca479 100644
--- a/tools/telemetry/telemetry/results/html_output_formatter.py
+++ b/tools/telemetry/telemetry/results/html_output_formatter.py
@@ -30,12 +30,12 @@ _UNIT_JSON = ('tools', 'perf', 'unit-info.json')
# Leaving as-is now since we are going to move HtmlOutputFormatter to be
# based on JSON anyway.
class HtmlOutputFormatter(buildbot_output_formatter.BuildbotOutputFormatter):
- def __init__(self, output_stream, test_name, reset_results, upload_results,
+ def __init__(self, output_stream, metadata, reset_results, upload_results,
browser_type, results_label=None, trace_tag=''):
# Pass output_stream=None so that we blow up if
# BuildbotOutputFormatter ever use the output_stream.
super(HtmlOutputFormatter, self).__init__(None, trace_tag)
- self._test_name = test_name
+ self._metadata = metadata
self._reset_results = reset_results
self._upload_results = upload_results
self._html_output_stream = output_stream
@@ -101,6 +101,10 @@ class HtmlOutputFormatter(buildbot_output_formatter.BuildbotOutputFormatter):
'important': result_type == 'default'
}
+ @property
+ def _test_name(self):
+ return self._metadata.name
+
def GetResults(self):
return self._result
diff --git a/tools/telemetry/telemetry/results/html_output_formatter_unittest.py b/tools/telemetry/telemetry/results/html_output_formatter_unittest.py
index 8b4b0a0c..693d6c5 100644
--- a/tools/telemetry/telemetry/results/html_output_formatter_unittest.py
+++ b/tools/telemetry/telemetry/results/html_output_formatter_unittest.py
@@ -5,6 +5,7 @@ import os
import StringIO
import unittest
+from telemetry import benchmark
from telemetry.page import page_set
from telemetry.results import html_output_formatter
from telemetry.results import page_test_results
@@ -27,6 +28,9 @@ class DeterministicHtmlOutputFormatter(
def _GetRevision(self):
return 'revision'
+class FakeMetadataForTest(benchmark.BenchmarkMetadata):
+ def __init__(self):
+ super(FakeMetadataForTest, self).__init__('test_name')
# Wrap string IO with a .name property so that it behaves more like a file.
class StringIOFile(StringIO.StringIO):
@@ -54,7 +58,7 @@ class HtmlOutputFormatterTest(unittest.TestCase):
results.DidRunPage(test_page_set.pages[1])
formatter = DeterministicHtmlOutputFormatter(
- output_file, 'test_name', False, False, 'browser_type')
+ output_file, FakeMetadataForTest(), False, False, 'browser_type')
formatter.Format(results)
expected = {
"platform": "browser_type",
@@ -111,7 +115,7 @@ class HtmlOutputFormatterTest(unittest.TestCase):
results.DidRunPage(test_page_set.pages[1])
formatter = DeterministicHtmlOutputFormatter(
- output_file, 'test_name', False, False, 'browser_type')
+ output_file, FakeMetadataForTest(), False, False, 'browser_type')
formatter.Format(results)
expected = [
{
@@ -207,7 +211,7 @@ class HtmlOutputFormatterTest(unittest.TestCase):
results.DidRunPage(test_page_set.pages[1])
formatter = DeterministicHtmlOutputFormatter(
- output_file, 'test_name', True, False, 'browser_type')
+ output_file, FakeMetadataForTest(), True, False, 'browser_type')
formatter.Format(results)
expected = [{
"platform": "browser_type",
diff --git a/tools/telemetry/telemetry/results/json_output_formatter.py b/tools/telemetry/telemetry/results/json_output_formatter.py
index 1b1496f..09e05a8 100644
--- a/tools/telemetry/telemetry/results/json_output_formatter.py
+++ b/tools/telemetry/telemetry/results/json_output_formatter.py
@@ -6,9 +6,10 @@ import json
from telemetry.results import output_formatter
-def ResultsAsDict(res):
+def ResultsAsDict(res, metadata):
result_dict = {
- 'format_version': '0.1',
+ 'format_version': '0.2',
+ 'benchmark_name': metadata.name,
'summary_values': [v.AsDict() for v in res.all_summary_values],
'per_page_values': [v.AsDict() for v in res.all_page_specific_values],
'pages': dict((p.id, p.AsDict()) for p in _all_pages(res))
@@ -21,9 +22,15 @@ def _all_pages(res):
return pages
class JsonOutputFormatter(output_formatter.OutputFormatter):
- def __init__(self, output_stream):
+ def __init__(self, output_stream, metadata):
super(JsonOutputFormatter, self).__init__(output_stream)
+ self._metadata = metadata
+
+ @property
+ def metadata(self):
+ return self._metadata
def Format(self, page_test_results):
- json.dump(ResultsAsDict(page_test_results), self.output_stream)
+ json.dump(ResultsAsDict(page_test_results, self.metadata),
+ self.output_stream)
self.output_stream.write('\n')
diff --git a/tools/telemetry/telemetry/results/json_output_formatter_unittest.py b/tools/telemetry/telemetry/results/json_output_formatter_unittest.py
index 7b59c17..0a40432 100644
--- a/tools/telemetry/telemetry/results/json_output_formatter_unittest.py
+++ b/tools/telemetry/telemetry/results/json_output_formatter_unittest.py
@@ -6,9 +6,9 @@ import os
import unittest
import json
+from telemetry import benchmark
from telemetry.results import json_output_formatter
from telemetry.results import page_test_results
-from telemetry.results.json_output_formatter import ResultsAsDict
from telemetry.page import page_set
from telemetry.value import scalar
@@ -25,12 +25,12 @@ def _HasPage(pages, page):
def _HasValueNamed(values, name):
return len([x for x in values if x['name'] == name]) == 1
-
class JsonOutputFormatterTest(unittest.TestCase):
def setUp(self):
self._output = StringIO.StringIO()
self._page_set = _MakePageSet()
- self._formatter = json_output_formatter.JsonOutputFormatter(self._output)
+ self._formatter = json_output_formatter.JsonOutputFormatter(self._output,
+ benchmark.BenchmarkMetadata('test_name'))
def testOutputAndParse(self):
results = page_test_results.PageTestResults()
@@ -52,7 +52,7 @@ class JsonOutputFormatterTest(unittest.TestCase):
results.AddValue(v0)
results.DidRunPage(self._page_set[0])
- d = ResultsAsDict(results)
+ d = json_output_formatter.ResultsAsDict(results, self._formatter.metadata)
self.assertTrue(_HasPage(d['pages'], self._page_set[0]))
self.assertTrue(_HasValueNamed(d['per_page_values'], 'foo'))
@@ -69,7 +69,7 @@ class JsonOutputFormatterTest(unittest.TestCase):
results.AddValue(v1)
results.DidRunPage(self._page_set[1])
- d = ResultsAsDict(results)
+ d = json_output_formatter.ResultsAsDict(results, self._formatter.metadata)
self.assertTrue(_HasPage(d['pages'], self._page_set[0]))
self.assertTrue(_HasPage(d['pages'], self._page_set[1]))
@@ -81,7 +81,7 @@ class JsonOutputFormatterTest(unittest.TestCase):
v = scalar.ScalarValue(None, 'baz', 'seconds', 5)
results.AddSummaryValue(v)
- d = ResultsAsDict(results)
+ d = json_output_formatter.ResultsAsDict(results, self._formatter.metadata)
self.assertFalse(d['pages'])
self.assertTrue(_HasValueNamed(d['summary_values'], 'baz'))
diff --git a/tools/telemetry/telemetry/results/results_options.py b/tools/telemetry/telemetry/results/results_options.py
index edb161f..1d330cf 100644
--- a/tools/telemetry/telemetry/results/results_options.py
+++ b/tools/telemetry/telemetry/results/results_options.py
@@ -43,7 +43,7 @@ def AddResultsOptions(parser):
parser.add_option_group(group)
-def PrepareResults(test, options):
+def CreateResults(metadata, options):
# TODO(chrishenry): This logic prevents us from having multiple
# OutputFormatters. We should have an output_file per OutputFormatter.
# Maybe we should have --output-dir instead of --output-file?
@@ -85,12 +85,12 @@ def PrepareResults(test, options):
output_formatters.append(buildbot_output_formatter.BuildbotOutputFormatter(
sys.stdout, trace_tag=options.output_trace_tag))
output_formatters.append(html_output_formatter.HtmlOutputFormatter(
- output_stream, test.__class__.__name__, options.reset_results,
+ output_stream, metadata, options.reset_results,
options.upload_results, options.browser_type,
options.results_label, trace_tag=options.output_trace_tag))
elif options.output_format == 'json':
output_formatters.append(json_output_formatter.JsonOutputFormatter(
- output_stream))
+ output_stream, metadata))
else:
# Should never be reached. The parser enforces the choices.
raise Exception('Invalid --output-format "%s". Valid choices are: %s'