summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorleilei <leilei@chromium.org>2016-03-25 14:35:27 -0700
committerCommit bot <commit-bot@chromium.org>2016-03-25 21:41:34 +0000
commitd1fd45b7740d761757feebf912d250515cc0baf6 (patch)
tree46b08def26df83b0d8588611121ae22c8cf836bd
parentda88c32858fdbc6040b07d51019d9f564e37416e (diff)
downloadchromium_src-d1fd45b7740d761757feebf912d250515cc0baf6.zip
chromium_src-d1fd45b7740d761757feebf912d250515cc0baf6.tar.gz
chromium_src-d1fd45b7740d761757feebf912d250515cc0baf6.tar.bz2
Don't restart chrome for each run and drop the first result.
Right now we collect the dialog latency data for the first time, that means it starts Chrome with MR extension, waits for 5s, and open MR dialog in each run. But we found a big variation on Windows for MR dialog latency, sometimes it takes less than 1s, sometimes it takes more than 4s. Moreover we believe this is not the normal use case. In this change, we will not restart Chrome for each run, just open MR dialog and close it in each run, but discard the first result, since it will have big variation with the results from other runs. This is closer to the normal user case and hope we can get more stable performance results. BUG= Review URL: https://codereview.chromium.org/1817313002 Cr-Commit-Position: refs/heads/master@{#383367}
-rw-r--r--chrome/test/media_router/telemetry/benchmarks/media_router_benchmark.py7
-rw-r--r--chrome/test/media_router/telemetry/benchmarks/media_router_measurements.py7
-rw-r--r--chrome/test/media_router/telemetry/benchmarks/media_router_metric.py46
-rw-r--r--chrome/test/media_router/telemetry/benchmarks/pagesets/media_router_pages.py26
4 files changed, 62 insertions, 24 deletions
diff --git a/chrome/test/media_router/telemetry/benchmarks/media_router_benchmark.py b/chrome/test/media_router/telemetry/benchmarks/media_router_benchmark.py
index 3ce86a5..78356ce 100644
--- a/chrome/test/media_router/telemetry/benchmarks/media_router_benchmark.py
+++ b/chrome/test/media_router/telemetry/benchmarks/media_router_benchmark.py
@@ -15,7 +15,7 @@ from benchmarks import media_router_timeline_metric
class _BaseCastBenchmark(perf_benchmark.PerfBenchmark):
- options = {'page_repeat': 5}
+ options = {'page_repeat': 6}
page_set = media_router_pages.MediaRouterPageSet
@@ -30,6 +30,11 @@ class _BaseCastBenchmark(perf_benchmark.PerfBenchmark):
'--enable-stats-collection-bindings'
])
+ @classmethod
+ def ValueCanBeAddedPredicate(cls, value, is_first_result):
+ """Only drops the first result."""
+ return not is_first_result
+
class TraceEventCaseBenckmark(_BaseCastBenchmark):
diff --git a/chrome/test/media_router/telemetry/benchmarks/media_router_measurements.py b/chrome/test/media_router/telemetry/benchmarks/media_router_measurements.py
index 4eeae89..320ae9e 100644
--- a/chrome/test/media_router/telemetry/benchmarks/media_router_measurements.py
+++ b/chrome/test/media_router/telemetry/benchmarks/media_router_measurements.py
@@ -11,6 +11,11 @@ class MediaRouterPageTest(page_test.PageTest):
def __init__(self):
super(MediaRouterPageTest, self).__init__()
+ self._media_router_metric = media_router_metric.MediaRouterMetric()
+
+ def DidNavigateToPage(self, page, tab):
+ self._media_router_metric.Start(page, tab)
def ValidateAndMeasurePage(self, page, tab, results):
- media_router_metric.MediaRouterMetric().AddResults(tab, results)
+ self._media_router_metric.Stop(page, tab)
+ self._media_router_metric.AddResults(tab, results)
diff --git a/chrome/test/media_router/telemetry/benchmarks/media_router_metric.py b/chrome/test/media_router/telemetry/benchmarks/media_router_metric.py
index 352e386..0439b58 100644
--- a/chrome/test/media_router/telemetry/benchmarks/media_router_metric.py
+++ b/chrome/test/media_router/telemetry/benchmarks/media_router_metric.py
@@ -4,6 +4,7 @@
import json
+from telemetry.value import histogram
from telemetry.value import histogram_util
from telemetry.value import scalar
@@ -27,19 +28,44 @@ HISTOGRAMS_TO_RECORD = [
class MediaRouterMetric(Metric):
"A metric for media router dialog latency from histograms."
+ def __init__(self):
+ super(MediaRouterMetric, self).__init__()
+ self._histogram_start = dict()
+ self._histogram_delta = dict()
+ self._started = False
+
def Start(self, page, tab):
- raise NotImplementedError()
+ self._started = True
+
+ for h in HISTOGRAMS_TO_RECORD:
+ histogram_data = histogram_util.GetHistogram(
+ h['type'], h['name'], tab)
+ # Histogram data may not be available
+ if not histogram_data:
+ continue
+ self._histogram_start[h['name']] = histogram_data
def Stop(self, page, tab):
- raise NotImplementedError()
+ assert self._started, 'Must call Start() first'
+ for h in HISTOGRAMS_TO_RECORD:
+ # Histogram data may not be available
+ if h['name'] not in self._histogram_start:
+ continue
+ histogram_data = histogram_util.GetHistogram(
+ h['type'], h['name'], tab)
+
+ if not histogram_data:
+ continue
+ self._histogram_delta[h['name']] = histogram_util.SubtractHistogram(
+ histogram_data, self._histogram_start[h['name']])
def AddResults(self, tab, results):
+ assert self._histogram_delta, 'Must call Stop() first'
for h in HISTOGRAMS_TO_RECORD:
- result = json.loads(histogram_util.GetHistogram(
- h['type'], h['name'], tab))
- if 'sum' in result:
- # For all the histograms logged here, there's a single entry so sum
- # is the exact value for that entry.
- results.AddValue(scalar.ScalarValue(
- results.current_page, h['display_name'], h['units'],
- result['sum']))
+ # Histogram data may not be available
+ if h['name'] not in self._histogram_delta:
+ continue
+ results.AddValue(histogram.HistogramValue(
+ results.current_page, h['display_name'], h['units'],
+ raw_value_json=self._histogram_delta[h['name']], important=False,
+ description=h.get('description')))
diff --git a/chrome/test/media_router/telemetry/benchmarks/pagesets/media_router_pages.py b/chrome/test/media_router/telemetry/benchmarks/pagesets/media_router_pages.py
index 70135a1..e1b89e5 100644
--- a/chrome/test/media_router/telemetry/benchmarks/pagesets/media_router_pages.py
+++ b/chrome/test/media_router/telemetry/benchmarks/pagesets/media_router_pages.py
@@ -5,25 +5,15 @@
from telemetry import page
from telemetry import story
from telemetry.page import shared_page_state
+from telemetry.core import exceptions
-class SharedState(shared_page_state.SharedPageState):
- """Shared state that restarts the browser for every single story."""
-
- def __init__(self, test, finder_options, story_set):
- super(SharedState, self).__init__(
- test, finder_options, story_set)
-
- def DidRunStory(self, results):
- super(SharedState, self).DidRunStory(results)
- self._StopBrowser()
class CastPage(page.Page):
def __init__(self, page_set):
super(CastPage, self).__init__(
url='file://basic_test.html',
- page_set=page_set,
- shared_page_state_class=SharedState)
+ page_set=page_set)
def RunPageInteractions(self, action_runner):
with action_runner.CreateInteraction('LaunchDialog'):
@@ -31,6 +21,18 @@ class CastPage(page.Page):
action_runner.Wait(5)
action_runner.TapElement(selector='#start_session_button')
action_runner.Wait(5)
+ for tab in action_runner.tab.browser.tabs:
+ # Close media router dialog
+ if tab.url == 'chrome://media-router/':
+ try:
+ tab.ExecuteJavaScript(
+ 'window.document.getElementById("media-router-container").' +
+ 'shadowRoot.getElementById("container-header").shadowRoot.' +
+ 'getElementById("close-button").click();')
+ except exceptions.DevtoolsTargetCrashException:
+ # Ignore the crash exception, this exception is caused by the js
+ # code which closes the dialog, it is expected.
+ pass
class MediaRouterPageSet(story.StorySet):