diff options
author | zhenw <zhenw@chromium.org> | 2015-07-16 16:39:24 -0700 |
---|---|---|
committer | Commit bot <commit-bot@chromium.org> | 2015-07-16 23:41:01 +0000 |
commit | f552e26b4b7dd5e5b42088f0364a77b5193ea803 (patch) | |
tree | 89a0e9dae377b998a5f7ea5ece8b20af324e91d7 | |
parent | 83dbc5011d9c708003457b38cbfd33ae563b4ff5 (diff) | |
download | chromium_src-f552e26b4b7dd5e5b42088f0364a77b5193ea803.zip chromium_src-f552e26b4b7dd5e5b42088f0364a77b5193ea803.tar.gz chromium_src-f552e26b4b7dd5e5b42088f0364a77b5193ea803.tar.bz2 |
[Startup Tracing][Telemetry] Add StoryTest to Telemetry
This CL adds StoryTest to Telemetry timeline based measurement. It is part of the effort to deprecate page_test's hooks. It is also needed by startup tracing.
Design doc:
https://docs.google.com/document/d/1yRCXhrQ-0rsfUgNHt9T4YdnmJYrXKN6aK56Ozk3kPVc/edit#heading=h.7suon55jg9u4
BUG=455391, 317481, 482098
CQ_EXTRA_TRYBOTS=tryserver.chromium.perf:linux_perf_bisect;tryserver.chromium.perf:mac_perf_bisect;tryserver.chromium.perf:win_perf_bisect;tryserver.chromium.perf:android_nexus5_perf_bisect
Review URL: https://codereview.chromium.org/1222003003
Cr-Commit-Position: refs/heads/master@{#339169}
8 files changed, 146 insertions, 31 deletions
diff --git a/components/cronet/android/test/javaperftests/run.py b/components/cronet/android/test/javaperftests/run.py index e8fb683..7c822b0 100755 --- a/components/cronet/android/test/javaperftests/run.py +++ b/components/cronet/android/test/javaperftests/run.py @@ -165,12 +165,13 @@ class CronetPerfTestMeasurement( super(CronetPerfTestMeasurement, self).__init__(options) self._adb = adb - def WillRunStory(self, tracing_controller, synthetic_delay_categories=None): + def WillRunStoryForPageTest(self, tracing_controller, + synthetic_delay_categories=None): # Skip parent implementation which doesn't apply to Cronet perf test app as # it is not a browser with a timeline interface. pass - def Measure(self, tracing_controller, results): + def MeasureForPageTest(self, tracing_controller, results): # Reads results from |RESULTS_FILE| on target and adds to |results|. jsonResults = json.loads(self._adb.GetFileContents(RESULTS_FILE)[0]) for test in jsonResults: diff --git a/tools/perf/measurements/smoothness.py b/tools/perf/measurements/smoothness.py index b88b6fc..9aec942 100644 --- a/tools/perf/measurements/smoothness.py +++ b/tools/perf/measurements/smoothness.py @@ -45,16 +45,16 @@ class Smoothness(page_test.PageTest): self._tbm = timeline_based_measurement.TimelineBasedMeasurement( timeline_based_measurement.Options(category_filter), _CustomResultsWrapper) - self._tbm.WillRunStory( + self._tbm.WillRunStoryForPageTest( tracing_controller, page.GetSyntheticDelayCategories()) def ValidateAndMeasurePage(self, _, tab, results): tracing_controller = tab.browser.platform.tracing_controller - self._tbm.Measure(tracing_controller, results) + self._tbm.MeasureForPageTest(tracing_controller, results) def CleanUpAfterPage(self, _, tab): tracing_controller = tab.browser.platform.tracing_controller - self._tbm.DidRunStory(tracing_controller) + self._tbm.DidRunStoryForPageTest(tracing_controller) tab.ExecuteJavaScript('window.gc();') diff --git a/tools/telemetry/telemetry/internal/story_runner.py b/tools/telemetry/telemetry/internal/story_runner.py index 0226bee..6d980e3 100644 --- a/tools/telemetry/telemetry/internal/story_runner.py +++ b/tools/telemetry/telemetry/internal/story_runner.py @@ -18,6 +18,7 @@ from telemetry import story as story_module from telemetry.util import wpr_modes from telemetry.value import failure from telemetry.value import skip +from telemetry.web_perf import story_test class ArchiveError(Exception): @@ -64,10 +65,12 @@ def ProcessCommandLineArgs(parser, args): parser.error('--pageset-repeat must be a positive integer.') -def _RunStoryAndProcessErrorIfNeeded(story, results, state): +def _RunStoryAndProcessErrorIfNeeded(story, results, state, test): def ProcessError(): results.AddValue(failure.FailureValue(story, sys.exc_info())) try: + if isinstance(test, story_test.StoryTest): + test.WillRunStory(state.platform) state.WillRunStory(story) if not state.CanRunStory(story): results.AddValue(skip.SkipValue( @@ -76,6 +79,8 @@ def _RunStoryAndProcessErrorIfNeeded(story, results, state): '(SharedState.CanRunStory() returns False).')) return state.RunStory(results) + if isinstance(test, story_test.StoryTest): + test.Measure(state.platform, results) except (page_test.Failure, exceptions.TimeoutException, exceptions.LoginException, exceptions.ProfilingException): ProcessError() @@ -94,12 +99,17 @@ def _RunStoryAndProcessErrorIfNeeded(story, results, state): has_existing_exception = sys.exc_info() is not None try: state.DidRunStory(results) + # if state.DidRunStory raises exception, things are messed up badly and we + # do not need to run test.DidRunStory at that point. + if isinstance(test, story_test.StoryTest): + test.DidRunStory(state.platform) except Exception: if not has_existing_exception: raise # Print current exception and propagate existing exception. exception_formatter.PrintFormattedException( - msg='Exception from DidRunStory: ') + msg='Exception raised when cleaning story run: ') + class StoryGroup(object): def __init__(self, shared_state_class): @@ -201,8 +211,7 @@ def Run(test, story_set, finder_options, results, max_failures=None): results.WillRunPage(story) try: _WaitForThermalThrottlingIfNeeded(state.platform) - _RunStoryAndProcessErrorIfNeeded( - story, results, state) + _RunStoryAndProcessErrorIfNeeded(story, results, state, test) except exceptions.Error: # Catch all Telemetry errors to give the story a chance to retry. # The retry is enabled by tearing down the state and creating diff --git a/tools/telemetry/telemetry/internal/story_runner_unittest.py b/tools/telemetry/telemetry/internal/story_runner_unittest.py index e4f0d44..01ebf49 100644 --- a/tools/telemetry/telemetry/internal/story_runner_unittest.py +++ b/tools/telemetry/telemetry/internal/story_runner_unittest.py @@ -23,9 +23,14 @@ from telemetry.testing import system_stub from telemetry.value import list_of_scalar_values from telemetry.value import scalar from telemetry.value import summary as summary_module +from telemetry.web_perf import story_test from telemetry.web_perf import timeline_based_measurement from telemetry.wpr import archive_info +# Import Python mock module (https://pypi.python.org/pypi/mock) +util.AddDirToPythonPath(util.GetTelemetryDir(), 'third_party', 'mock') +import mock # pylint: disable=import-error + # This linter complains if we define classes nested inside functions. # pylint: disable=bad-super-call @@ -239,6 +244,62 @@ class StoryRunnerTest(unittest.TestCase): self.assertEquals(0, len(self.results.failures)) self.assertEquals(3, GetNumberOfSuccessfulPageRuns(self.results)) + def testCallOrderBetweenStoryTestAndSharedState(self): + """Check that the call order between StoryTest and SharedState is correct. + """ + TEST_WILL_RUN_STORY = 'test.WillRunStory' + TEST_MEASURE = 'test.Measure' + TEST_DID_RUN_STORY = 'test.DidRunStory' + STATE_WILL_RUN_STORY = 'state.WillRunStory' + STATE_RUN_STORY = 'state.RunStory' + STATE_DID_RUN_STORY = 'state.DidRunStory' + + EXPECTED_CALLS_IN_ORDER = [TEST_WILL_RUN_STORY, + STATE_WILL_RUN_STORY, + STATE_RUN_STORY, + TEST_MEASURE, + STATE_DID_RUN_STORY, + TEST_DID_RUN_STORY] + + class TestStoryTest(story_test.StoryTest): + def WillRunStory(self, platform): + pass + + def Measure(self, platform, results): + pass + + def DidRunStory(self, platform): + pass + + class TestSharedStateForStoryTest(TestSharedState): + def RunStory(self, results): + pass + + @mock.patch.object(TestStoryTest, 'WillRunStory') + @mock.patch.object(TestStoryTest, 'Measure') + @mock.patch.object(TestStoryTest, 'DidRunStory') + @mock.patch.object(TestSharedStateForStoryTest, 'WillRunStory') + @mock.patch.object(TestSharedStateForStoryTest, 'RunStory') + @mock.patch.object(TestSharedStateForStoryTest, 'DidRunStory') + def GetCallsInOrder(state_DidRunStory, state_RunStory, state_WillRunStory, + test_DidRunStory, test_Measure, test_WillRunStory): + manager = mock.MagicMock() + manager.attach_mock(test_WillRunStory, TEST_WILL_RUN_STORY) + manager.attach_mock(test_Measure, TEST_MEASURE) + manager.attach_mock(test_DidRunStory, TEST_DID_RUN_STORY) + manager.attach_mock(state_WillRunStory, STATE_WILL_RUN_STORY) + manager.attach_mock(state_RunStory, STATE_RUN_STORY) + manager.attach_mock(state_DidRunStory, STATE_DID_RUN_STORY) + + test = TestStoryTest() + story_set = story_module.StorySet() + story_set.AddStory(DummyLocalStory(TestSharedStateForStoryTest)) + story_runner.Run(test, story_set, self.options, self.results) + return [call[0] for call in manager.mock_calls] + + calls_in_order = GetCallsInOrder() # pylint: disable=no-value-for-parameter + self.assertEquals(EXPECTED_CALLS_IN_ORDER, calls_in_order) + def testTearDownIsCalledOnceForEachStoryGroupWithPageSetRepeat(self): self.options.pageset_repeat = 3 fooz_init_call_counter = [0] diff --git a/tools/telemetry/telemetry/story/shared_state.py b/tools/telemetry/telemetry/story/shared_state.py index 1057bee..0b360d3 100644 --- a/tools/telemetry/telemetry/story/shared_state.py +++ b/tools/telemetry/telemetry/story/shared_state.py @@ -14,7 +14,7 @@ class SharedState(object): Override to do any action before running stories that share this same state. Args: - test: a page_test.PageTest instance. + test: a page_test.PageTest or story_test.StoryTest instance. options: a BrowserFinderOptions instance that contains command line options. story_set: a story.StorySet instance. diff --git a/tools/telemetry/telemetry/web_perf/story_test.py b/tools/telemetry/telemetry/web_perf/story_test.py new file mode 100644 index 0000000..798ef9c --- /dev/null +++ b/tools/telemetry/telemetry/web_perf/story_test.py @@ -0,0 +1,44 @@ +# Copyright 2015 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +class StoryTest(object): + """A class for creating story tests. + + The overall test run control flow follows this order: + test.WillRunStory + state.WillRunStory + state.RunStory + test.Measure + state.DidRunStory + test.DidRunStory + """ + + def WillRunStory(self, platform): + """Override to do any action before running the story. + + This is run before state.WillRunStory. + Args: + platform: The platform that the story will run on. + """ + raise NotImplementedError() + + def Measure(self, platform, results): + """Override to take the measurement. + + This is run only if state.RunStory is successful. + Args: + platform: The platform that the story will run on. + results: The results of running the story. + """ + raise NotImplementedError() + + def DidRunStory(self, platform): + """Override to do any action after running the story, e.g., clean up. + + This is run after state.DidRunStory. And this is always called even if the + test run failed. + Args: + platform: The platform that the story will run on. + """ + raise NotImplementedError() diff --git a/tools/telemetry/telemetry/web_perf/timeline_based_measurement.py b/tools/telemetry/telemetry/web_perf/timeline_based_measurement.py index 62fc39e..9156e8b 100644 --- a/tools/telemetry/telemetry/web_perf/timeline_based_measurement.py +++ b/tools/telemetry/telemetry/web_perf/timeline_based_measurement.py @@ -16,7 +16,7 @@ from telemetry.web_perf.metrics import responsiveness_metric from telemetry.web_perf.metrics import smoothness from telemetry.web_perf import timeline_interaction_record as tir_module from telemetry.web_perf import smooth_gesture_util -from telemetry import decorators +from telemetry.web_perf import story_test # TimelineBasedMeasurement considers all instrumentation as producing a single # timeline. But, depending on the amount of instrumentation that is enabled, @@ -182,7 +182,7 @@ class Options(object): self._tracing_options = value -class TimelineBasedMeasurement(object): +class TimelineBasedMeasurement(story_test.StoryTest): """Collects multiple metrics based on their interaction records. A timeline based measurement shifts the burden of what metrics to collect onto @@ -215,7 +215,20 @@ class TimelineBasedMeasurement(object): self._tbm_options = options self._results_wrapper_class = results_wrapper_class - def WillRunStory(self, tracing_controller, synthetic_delay_categories=None): + def WillRunStory(self, platform): + """Set up test according to the tbm options.""" + pass + + def Measure(self, platform, results): + """Collect all possible metrics and added them to results.""" + pass + + def DidRunStory(self, platform): + """Clean up test according to the tbm options.""" + pass + + def WillRunStoryForPageTest(self, tracing_controller, + synthetic_delay_categories=None): """Configure and start tracing. Args: @@ -235,14 +248,7 @@ class TimelineBasedMeasurement(object): tracing_controller.Start(self._tbm_options.tracing_options, category_filter) - @decorators.Deprecated( - 2015, 7, 19, 'Please use WillRunStory instead. The user story concept is ' - 'being renamed to story.') - def WillRunUserStory(self, tracing_controller, - synthetic_delay_categories=None): - self.WillRunStory(tracing_controller, synthetic_delay_categories) - - def Measure(self, tracing_controller, results): + def MeasureForPageTest(self, tracing_controller, results): """Collect all possible metrics and added them to results.""" trace_result = tracing_controller.Stop() results.AddValue(trace.TraceValue(results.current_page, trace_result)) @@ -255,12 +261,6 @@ class TimelineBasedMeasurement(object): self._results_wrapper_class) meta_metrics.AddResults(results) - def DidRunStory(self, tracing_controller): + def DidRunStoryForPageTest(self, tracing_controller): if tracing_controller.is_tracing_running: tracing_controller.Stop() - - @decorators.Deprecated( - 2015, 7, 19, 'Please use DidRunStory instead. The user story concept is ' - 'being renamed to story.') - def DidRunUserStory(self, tracing_controller): - self.DidRunStory(tracing_controller) diff --git a/tools/telemetry/telemetry/web_perf/timeline_based_page_test.py b/tools/telemetry/telemetry/web_perf/timeline_based_page_test.py index e2f4646..e11751c 100644 --- a/tools/telemetry/telemetry/web_perf/timeline_based_page_test.py +++ b/tools/telemetry/telemetry/web_perf/timeline_based_page_test.py @@ -16,14 +16,14 @@ class TimelineBasedPageTest(page_test.PageTest): def WillNavigateToPage(self, page, tab): tracing_controller = tab.browser.platform.tracing_controller - self._measurement.WillRunStory( + self._measurement.WillRunStoryForPageTest( tracing_controller, page.GetSyntheticDelayCategories()) def ValidateAndMeasurePage(self, page, tab, results): """Collect all possible metrics and added them to results.""" tracing_controller = tab.browser.platform.tracing_controller - self._measurement.Measure(tracing_controller, results) + self._measurement.MeasureForPageTest(tracing_controller, results) def CleanUpAfterPage(self, page, tab): tracing_controller = tab.browser.platform.tracing_controller - self._measurement.DidRunStory(tracing_controller) + self._measurement.DidRunStoryForPageTest(tracing_controller) |