diff options
author | nduca@chromium.org <nduca@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2013-01-15 23:58:56 +0000 |
---|---|---|
committer | nduca@chromium.org <nduca@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2013-01-15 23:58:56 +0000 |
commit | f29d69683514723f80eb206b00df35ab43b2592e (patch) | |
tree | c76e8957bbf0bb07e45071f1f991ba470aa8b4d4 /tools | |
parent | 95023f5108e68b2220d73036c31712e4ce556d55 (diff) | |
download | chromium_src-f29d69683514723f80eb206b00df35ab43b2592e.zip chromium_src-f29d69683514723f80eb206b00df35ab43b2592e.tar.gz chromium_src-f29d69683514723f80eb206b00df35ab43b2592e.tar.bz2 |
Let tests request a browser restart for each page in the pageset.
This reduces the noise on painting measurements from 30% to 5%. I will revisit
why when I can, but I thought I'd throw this out as a temporary solution to getting good stable numbers is key for us.
R=tonyg
NOTRY=True
Review URL: https://chromiumcodereview.appspot.com/11888020
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@177018 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'tools')
-rw-r--r-- | tools/telemetry/telemetry/multi_page_benchmark.py | 9 | ||||
-rw-r--r-- | tools/telemetry/telemetry/page_runner.py | 4 | ||||
-rw-r--r-- | tools/telemetry/telemetry/page_test.py | 11 |
3 files changed, 21 insertions, 3 deletions
diff --git a/tools/telemetry/telemetry/multi_page_benchmark.py b/tools/telemetry/telemetry/multi_page_benchmark.py index 9261b83..c24def3 100644 --- a/tools/telemetry/telemetry/multi_page_benchmark.py +++ b/tools/telemetry/telemetry/multi_page_benchmark.py @@ -39,8 +39,13 @@ class MultiPageBenchmark(page_test.PageTest): 'document.querySelector('%s').children.length') results.Add('children', 'count', child_count) """ - def __init__(self, interaction_name=''): - super(MultiPageBenchmark, self).__init__('_RunTest', interaction_name) + def __init__(self, + interaction_name_to_run='', + needs_browser_restart_after_each_run=False): + super(MultiPageBenchmark, self).__init__( + '_RunTest', + interaction_name_to_run, + needs_browser_restart_after_each_run) def _RunTest(self, page, tab, results): results.WillMeasurePage(page) diff --git a/tools/telemetry/telemetry/page_runner.py b/tools/telemetry/telemetry/page_runner.py index e386877..3cfb6d4 100644 --- a/tools/telemetry/telemetry/page_runner.py +++ b/tools/telemetry/telemetry/page_runner.py @@ -162,6 +162,10 @@ class PageRunner(object): if options.trace_dir: self._EndTracing(state, options, page) + + if test.needs_browser_restart_after_each_run: + state.Close() + break except browser_gone_exception.BrowserGoneException: logging.warning('Lost connection to browser. Retrying.') diff --git a/tools/telemetry/telemetry/page_test.py b/tools/telemetry/telemetry/page_test.py index 702c22d..8fbd2d0 100644 --- a/tools/telemetry/telemetry/page_test.py +++ b/tools/telemetry/telemetry/page_test.py @@ -30,7 +30,10 @@ class PageTestResults(object): class PageTest(object): """A class styled on unittest.TestCase for creating page-specific tests.""" - def __init__(self, test_method_name, interaction_name_to_run=''): + def __init__(self, + test_method_name, + interaction_name_to_run='', + needs_browser_restart_after_each_run=False): self.options = None try: self._test_method = getattr(self, test_method_name) @@ -38,6 +41,12 @@ class PageTest(object): raise ValueError, 'No such method %s.%s' % ( self.__class_, test_method_name) # pylint: disable=E1101 self._interaction_name_to_run = interaction_name_to_run + self._needs_browser_restart_after_each_run = ( + needs_browser_restart_after_each_run) + + @property + def needs_browser_restart_after_each_run(self): + return self._needs_browser_restart_after_each_run def AddCommandLineOptions(self, parser): """Override to expose command-line options for this benchmark. |