summaryrefslogtreecommitdiffstats
path: root/tools/telemetry
diff options
context:
space:
mode:
authortonyg@chromium.org <tonyg@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-07-23 23:54:44 +0000
committertonyg@chromium.org <tonyg@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-07-23 23:54:44 +0000
commit22f4d5efc728f28c0126e25dea47b8d4a3a2781d (patch)
treeab57f90ce89f3c7ca1ce8ef454e3fbc78f97fc68 /tools/telemetry
parentfff433f3ba45f2ef166b495442e4018e688a2739 (diff)
downloadchromium_src-22f4d5efc728f28c0126e25dea47b8d4a3a2781d.zip
chromium_src-22f4d5efc728f28c0126e25dea47b8d4a3a2781d.tar.gz
chromium_src-22f4d5efc728f28c0126e25dea47b8d4a3a2781d.tar.bz2
[Telemetry] Add a profiler based loading measurement.
This measurement reports the hottest functions during page load. I plan to run it over the top million sites. It works by loading each page twice, the profile is captured for the second load only. This is because cross-origin navigations cause the renderer to be swapped out during profiling and to get more stable results. This required a couple of core modifications: 1. csv_page_measurement_results should work when the page runner creates a dummy results object because discard_first_results is enabled. 2. discard_first_results should discard the first result of each page in the page set. Not just the first page. BUG=None Review URL: https://chromiumcodereview.appspot.com/19857003 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@213279 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'tools/telemetry')
-rw-r--r--tools/telemetry/telemetry/core/browser.py20
-rw-r--r--tools/telemetry/telemetry/core/platform/profiler/perf_profiler.py33
-rw-r--r--tools/telemetry/telemetry/core/platform/profiler/perf_profiler_unittest.py31
-rw-r--r--tools/telemetry/telemetry/core/platform/profiler/testdata/perf.profile0
-rw-r--r--tools/telemetry/telemetry/page/csv_page_measurement_results.py5
-rw-r--r--tools/telemetry/telemetry/page/page_runner.py11
-rw-r--r--tools/telemetry/telemetry/page/page_runner_unittest.py24
7 files changed, 106 insertions, 18 deletions
diff --git a/tools/telemetry/telemetry/core/browser.py b/tools/telemetry/telemetry/core/browser.py
index c604cae..c2a7037 100644
--- a/tools/telemetry/telemetry/core/browser.py
+++ b/tools/telemetry/telemetry/core/browser.py
@@ -192,26 +192,32 @@ class Browser(object):
del result['ProcessCount']
return result
- def StartProfiling(self, options, base_output_file):
- """Starts profiling using |options|.profiler_tool. Results are saved to
+ def StartProfiling(self, profiler_name, base_output_file):
+ """Starts profiling using |profiler_name|. Results are saved to
|base_output_file|.<process_name>."""
assert not self._active_profilers
- profiler_class = profiler_finder.FindProfiler(options.profiler)
+ profiler_class = profiler_finder.FindProfiler(profiler_name)
- if not profiler_class.is_supported(options):
+ if not profiler_class.is_supported(self._browser_backend.options):
raise Exception('The %s profiler is not '
- 'supported on this platform.' % options.profiler_tool)
+ 'supported on this platform.' % profiler_name)
self._active_profilers.append(
profiler_class(self._browser_backend, self._platform_backend,
base_output_file))
def StopProfiling(self):
- """Stops all active profilers and saves their results."""
+ """Stops all active profilers and saves their results.
+
+ Returns:
+ A list of filenames produced by the profiler.
+ """
+ output_files = []
for profiler in self._active_profilers:
- profiler.CollectProfile()
+ output_files.extend(profiler.CollectProfile())
self._active_profilers = []
+ return output_files
def StartTracing(self, custom_categories=None, timeout=10):
return self._browser_backend.StartTracing(custom_categories, timeout)
diff --git a/tools/telemetry/telemetry/core/platform/profiler/perf_profiler.py b/tools/telemetry/telemetry/core/platform/profiler/perf_profiler.py
index ab4c918..58cea8a 100644
--- a/tools/telemetry/telemetry/core/platform/profiler/perf_profiler.py
+++ b/tools/telemetry/telemetry/core/platform/profiler/perf_profiler.py
@@ -3,6 +3,8 @@
# found in the LICENSE file.
import logging
+import os
+import re
import signal
import subprocess
import sys
@@ -40,6 +42,7 @@ class _SingleProcessPerfProfiler(object):
self._tmp_output_file.close()
print 'To view the profile, run:'
print ' perf report -i %s' % self._output_file
+ return self._output_file
def _GetStdOut(self):
self._tmp_output_file.flush()
@@ -79,5 +82,33 @@ class PerfProfiler(profiler.Profiler):
return False
def CollectProfile(self):
+ output_files = []
for single_process in self._process_profilers:
- single_process.CollectProfile()
+ output_files.append(single_process.CollectProfile())
+ return output_files
+
+ @classmethod
+ def GetTopSamples(cls, file_name, number):
+ """Parses the perf generated profile in |file_name| and returns a
+ {function: period} dict of the |number| hottests functions.
+ """
+ assert os.path.exists(file_name)
+ report = subprocess.Popen(
+ ['perf', 'report', '--show-total-period', '-U', '-t', '^', '-i',
+ file_name],
+ stdout=subprocess.PIPE, stderr=open(os.devnull, 'w')).communicate()[0]
+ period_by_function = {}
+ for line in report.split('\n'):
+ if not line or line.startswith('#'):
+ continue
+ fields = line.split('^')
+ if len(fields) != 5:
+ continue
+ period = int(fields[1])
+ function = fields[4].partition(' ')[2]
+ function = re.sub('<.*>', '', function) # Strip template params.
+ function = re.sub('[(].*[)]', '', function) # Strip function params.
+ period_by_function[function] = period
+ if len(period_by_function) == number:
+ break
+ return period_by_function
diff --git a/tools/telemetry/telemetry/core/platform/profiler/perf_profiler_unittest.py b/tools/telemetry/telemetry/core/platform/profiler/perf_profiler_unittest.py
new file mode 100644
index 0000000..8ca407f
--- /dev/null
+++ b/tools/telemetry/telemetry/core/platform/profiler/perf_profiler_unittest.py
@@ -0,0 +1,31 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import os
+import logging
+import unittest
+
+from telemetry.core.platform.profiler import perf_profiler
+from telemetry.unittest import options_for_unittests
+
+class TestPerfProfiler(unittest.TestCase):
+ def testPerfProfiler(self):
+ options = options_for_unittests.GetCopy()
+ if not perf_profiler.PerfProfiler.is_supported(options):
+ logging.warning('PerfProfiler is not supported. Skipping test')
+ return
+
+ profile_file = os.path.join(os.path.dirname(__file__),
+ 'testdata', 'perf.profile')
+ self.assertEqual(perf_profiler.PerfProfiler.GetTopSamples(profile_file, 10),
+ { 'v8::internal::StaticMarkingVisitor::MarkMapContents': 63615201,
+ 'v8::internal::RelocIterator::next': 38271931,
+ 'v8::internal::LAllocator::MeetConstraintsBetween': 42913933,
+ 'v8::internal::FlexibleBodyVisitor::Visit': 31909537,
+ 'v8::internal::LiveRange::CreateAssignedOperand': 42913933,
+ 'void v8::internal::RelocInfo::Visit': 96878864,
+ 'WebCore::HTMLTokenizer::nextToken': 48240439,
+ 'v8::internal::Scanner::ScanIdentifierOrKeyword': 46054550,
+ 'sk_memset32_SSE2': 45121317,
+ 'v8::internal::HeapObject::Size': 39786862
+ })
diff --git a/tools/telemetry/telemetry/core/platform/profiler/testdata/perf.profile b/tools/telemetry/telemetry/core/platform/profiler/testdata/perf.profile
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tools/telemetry/telemetry/core/platform/profiler/testdata/perf.profile
diff --git a/tools/telemetry/telemetry/page/csv_page_measurement_results.py b/tools/telemetry/telemetry/page/csv_page_measurement_results.py
index bd2e8d2..2440dd8 100644
--- a/tools/telemetry/telemetry/page/csv_page_measurement_results.py
+++ b/tools/telemetry/telemetry/page/csv_page_measurement_results.py
@@ -7,9 +7,10 @@ from telemetry.page import page_measurement_results
class CsvPageMeasurementResults(
page_measurement_results.PageMeasurementResults):
- def __init__(self, output_stream, output_after_every_page):
+ def __init__(self, output_stream=None, output_after_every_page=None):
super(CsvPageMeasurementResults, self).__init__()
- self._results_writer = csv.writer(output_stream)
+ if output_stream:
+ self._results_writer = csv.writer(output_stream)
self._did_output_header = False
self._header_names_written_to_writer = None
self._output_after_every_page = output_after_every_page
diff --git a/tools/telemetry/telemetry/page/page_runner.py b/tools/telemetry/telemetry/page/page_runner.py
index a3f1b85..8e20cec 100644
--- a/tools/telemetry/telemetry/page/page_runner.py
+++ b/tools/telemetry/telemetry/page/page_runner.py
@@ -1,6 +1,7 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+import collections
import glob
import logging
import os
@@ -27,7 +28,7 @@ class _RunState(object):
self._append_to_existing_wpr = False
self._last_archive_path = None
self._first_browser = True
- self.first_page = True
+ self.first_page = collections.defaultdict(lambda: True)
self.profiler_dir = None
def StartBrowser(self, test, page_set, page, possible_browser,
@@ -69,8 +70,8 @@ class _RunState(object):
if not self.tab:
self.tab = self.browser.tabs[0]
- if self.first_page:
- self.first_page = False
+ if self.first_page[page]:
+ self.first_page[page] = False
test.WillRunPageSet(self.tab)
def StopBrowser(self):
@@ -93,7 +94,7 @@ class _RunState(object):
output_file = os.path.join(self.profiler_dir, page.url_as_file_safe_name)
if options.page_repeat != 1 or options.pageset_repeat != 1:
output_file = _GetSequentialFileName(output_file)
- self.browser.StartProfiling(options, output_file)
+ self.browser.StartProfiling(options.profiler, output_file)
def StopProfiling(self):
self.browser.StopProfiling()
@@ -192,7 +193,7 @@ def Run(test, page_set, expectations, options):
else:
possible_browser.options.wpr_mode = wpr_modes.WPR_OFF
results_for_current_run = results
- if state.first_page and test.discard_first_result:
+ if state.first_page[page] and test.discard_first_result:
# If discarding results, substitute a dummy object.
results_for_current_run = type(results)()
results_for_current_run.StartTest(page)
diff --git a/tools/telemetry/telemetry/page/page_runner_unittest.py b/tools/telemetry/telemetry/page/page_runner_unittest.py
index 8aa37a1..e0ad39d 100644
--- a/tools/telemetry/telemetry/page/page_runner_unittest.py
+++ b/tools/telemetry/telemetry/page/page_runner_unittest.py
@@ -62,11 +62,14 @@ class PageRunnerTests(unittest.TestCase):
def testDiscardFirstResult(self):
ps = page_set.PageSet()
expectations = test_expectations.TestExpectations()
- page = page_module.Page(
+ ps.pages.append(page_module.Page(
'file:///' + os.path.join('..', '..', 'unittest_data', 'blank.html'),
ps,
- base_dir=os.path.dirname(__file__))
- ps.pages.append(page)
+ base_dir=os.path.dirname(__file__)))
+ ps.pages.append(page_module.Page(
+ 'file:///' + os.path.join('..', '..', 'unittest_data', 'blank.html'),
+ ps,
+ base_dir=os.path.dirname(__file__)))
class Test(page_test.PageTest):
@property
@@ -77,10 +80,25 @@ class PageRunnerTests(unittest.TestCase):
options = options_for_unittests.GetCopy()
options.output_format = 'none'
+
+ options.page_repeat = 1
+ options.pageset_repeat = 1
results = page_runner.Run(Test('RunTest'), ps, expectations, options)
self.assertEquals(0, len(results.successes))
self.assertEquals(0, len(results.failures))
+ options.page_repeat = 1
+ options.pageset_repeat = 2
+ results = page_runner.Run(Test('RunTest'), ps, expectations, options)
+ self.assertEquals(2, len(results.successes))
+ self.assertEquals(0, len(results.failures))
+
+ options.page_repeat = 2
+ options.pageset_repeat = 1
+ results = page_runner.Run(Test('RunTest'), ps, expectations, options)
+ self.assertEquals(2, len(results.successes))
+ self.assertEquals(0, len(results.failures))
+
def testCredentialsWhenLoginFails(self):
credentials_backend = StubCredentialsBackend(login_return_value=False)
did_run = self.runCredentialsTest(credentials_backend)