summaryrefslogtreecommitdiffstats
path: root/tools/telemetry
diff options
context:
space:
mode:
authorjeremy@chromium.org <jeremy@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-04-09 12:40:43 +0000
committerjeremy@chromium.org <jeremy@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-04-09 12:40:43 +0000
commit21b1af5309c5567e6c5f0f9723b680f739a5f9ab (patch)
tree804ea413e15075940989daf8632906d92895f47e /tools/telemetry
parentf620b50f614fb27e3bebb986595b6f137f01ff41 (diff)
downloadchromium_src-21b1af5309c5567e6c5f0f9723b680f739a5f9ab.zip
chromium_src-21b1af5309c5567e6c5f0f9723b680f739a5f9ab.tar.gz
chromium_src-21b1af5309c5567e6c5f0f9723b680f739a5f9ab.tar.bz2
Telemetry: Allow test to specify skipping the first iteration
For Startup tests, some test cases exercise "Warm" runs of Chrome. To facilitate this, it's desirable to be able to perform an initial test run whose results are ignored and only serves to "warm up" resources needed by the test. This CL adds a |discard_first_result| property to the |PageTest| object which serves this purpose. BUG= Review URL: https://chromiumcodereview.appspot.com/13773006 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@193086 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'tools/telemetry')
-rw-r--r--tools/telemetry/telemetry/page/page_benchmark.py6
-rw-r--r--tools/telemetry/telemetry/page/page_runner.py22
-rw-r--r--tools/telemetry/telemetry/page/page_runner_unittest.py23
-rw-r--r--tools/telemetry/telemetry/page/page_test.py11
4 files changed, 53 insertions, 9 deletions
diff --git a/tools/telemetry/telemetry/page/page_benchmark.py b/tools/telemetry/telemetry/page/page_benchmark.py
index fc79b70..117904a 100644
--- a/tools/telemetry/telemetry/page/page_benchmark.py
+++ b/tools/telemetry/telemetry/page/page_benchmark.py
@@ -40,11 +40,13 @@ class PageBenchmark(page_test.PageTest):
"""
def __init__(self,
action_name_to_run='',
- needs_browser_restart_after_each_run=False):
+ needs_browser_restart_after_each_run=False,
+ discard_first_result=False):
super(PageBenchmark, self).__init__(
'_RunTest',
action_name_to_run,
- needs_browser_restart_after_each_run)
+ needs_browser_restart_after_each_run,
+ discard_first_result)
def _RunTest(self, page, tab, results):
results.WillMeasurePage(page)
diff --git a/tools/telemetry/telemetry/page/page_runner.py b/tools/telemetry/telemetry/page/page_runner.py
index 5ce6f69..ab24745 100644
--- a/tools/telemetry/telemetry/page/page_runner.py
+++ b/tools/telemetry/telemetry/page/page_runner.py
@@ -12,6 +12,7 @@ import random
from telemetry.core import util
from telemetry.core import wpr_modes
from telemetry.core import exceptions
+from telemetry.page import page_benchmark_results
from telemetry.page import page_filter as page_filter_module
from telemetry.page import page_test
@@ -66,7 +67,7 @@ class PageRunner(object):
def __exit__(self, *args):
self.Close()
- def Run(self, options, possible_browser, test, results):
+ def Run(self, options, possible_browser, test, out_results):
# Reorder page set based on options.
pages = _ShuffleAndFilterPageSet(self.page_set, options)
@@ -87,7 +88,7 @@ class PageRunner(object):
No page set archive provided for the page %s. Not running the page. To run
against live sites, pass the flag --allow-live-sites.
""", page.url)
- results.AddFailure(page, 'Page set archive not defined', '')
+ out_results.AddFailure(page, 'Page set archive not defined', '')
pages_without_archives.append(page)
elif options.wpr_mode != wpr_modes.WPR_RECORD:
# The page has an archive, and we're not recording.
@@ -108,7 +109,7 @@ class PageRunner(object):
http://goto/read-src-internal, or create a new archive using record_wpr.
To run against live sites, pass the flag --allow-live-sites.
""", os.path.relpath(page.archive_path), page.url)
- results.AddFailure(page, 'Page set archive doesn\'t exist', '')
+ out_results.AddFailure(page, 'Page set archive doesn\'t exist', '')
pages_without_archives.append(page)
pages = [page for page in pages if page not in pages_without_archives]
@@ -141,6 +142,8 @@ class PageRunner(object):
state = _RunState()
last_archive_path = None
is_first_run = True
+ results_for_current_run = out_results
+
try:
for page in pages:
if options.wpr_mode != wpr_modes.WPR_RECORD:
@@ -152,6 +155,12 @@ class PageRunner(object):
state.Close()
state = _RunState()
last_archive_path = page.archive_path
+ if (test.discard_first_result and is_first_run):
+ # If discarding results, substitute a dummy object.
+ results_for_current_run = (
+ page_benchmark_results.PageBenchmarkResults())
+ else:
+ results_for_current_run = out_results
tries = 3
while tries:
try:
@@ -174,10 +183,11 @@ class PageRunner(object):
if is_first_run:
is_first_run = False
- test.WillRunPageSet(state.tab, results)
+ test.WillRunPageSet(state.tab, results_for_current_run)
try:
- self._RunPage(options, page, state.tab, test, results)
+ self._RunPage(options, page, state.tab, test,
+ results_for_current_run)
self._CheckThermalThrottling(state.browser.platform)
except exceptions.TabCrashException:
stdout = ''
@@ -204,7 +214,7 @@ class PageRunner(object):
if not tries:
logging.error('Lost connection to browser 3 times. Failing.')
raise
- test.DidRunPageSet(state.tab, results)
+ test.DidRunPageSet(state.tab, results_for_current_run)
finally:
state.Close()
diff --git a/tools/telemetry/telemetry/page/page_runner_unittest.py b/tools/telemetry/telemetry/page/page_runner_unittest.py
index 6fe860e..b175575 100644
--- a/tools/telemetry/telemetry/page/page_runner_unittest.py
+++ b/tools/telemetry/telemetry/page/page_runner_unittest.py
@@ -59,6 +59,29 @@ class PageRunnerTests(unittest.TestCase):
self.assertEquals(0, len(results.page_successes))
self.assertEquals(1, len(results.page_failures))
+ def testDiscardFirstResult(self):
+ ps = page_set.PageSet()
+ page = page_module.Page(
+ 'file:///' + os.path.join('..', '..', 'unittest_data', 'blank.html'),
+ ps,
+ base_dir=os.path.dirname(__file__))
+ ps.pages.append(page)
+ results = page_test.PageTestResults()
+
+ class Test(page_test.PageTest):
+ @property
+ def discard_first_result(self):
+ return True
+ def RunTest(self, *args):
+ pass
+
+ with page_runner.PageRunner(ps) as runner:
+ options = options_for_unittests.GetCopy()
+ possible_browser = browser_finder.FindBrowser(options)
+ runner.Run(options, possible_browser, Test('RunTest'), results)
+ self.assertEquals(0, len(results.page_successes))
+ self.assertEquals(0, len(results.page_failures))
+
def disabled_testCredentialsWhenLoginFails(self):
# This test is disabled because it runs against live sites, and needs to be
# fixed. crbug.com/179038
diff --git a/tools/telemetry/telemetry/page/page_test.py b/tools/telemetry/telemetry/page/page_test.py
index 482d166..5b3b455 100644
--- a/tools/telemetry/telemetry/page/page_test.py
+++ b/tools/telemetry/telemetry/page/page_test.py
@@ -64,7 +64,8 @@ class PageTest(object):
def __init__(self,
test_method_name,
action_name_to_run='',
- needs_browser_restart_after_each_run=False):
+ needs_browser_restart_after_each_run=False,
+ discard_first_result=False):
self.options = None
try:
self._test_method = getattr(self, test_method_name)
@@ -74,11 +75,19 @@ class PageTest(object):
self._action_name_to_run = action_name_to_run
self._needs_browser_restart_after_each_run = (
needs_browser_restart_after_each_run)
+ self._discard_first_result = discard_first_result
@property
def needs_browser_restart_after_each_run(self):
return self._needs_browser_restart_after_each_run
+ @property
+ def discard_first_result(self):
+ """When set to True, the first run of the test is discarded. This is
+ useful for cases where it's desirable to have some test resource cached so
+ the first run of the test can warm things up. """
+ return self._discard_first_result
+
def AddCommandLineOptions(self, parser):
"""Override to expose command-line options for this benchmark.