summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorchrishenry@google.com <chrishenry@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2014-08-01 20:42:14 +0000
committerchrishenry@google.com <chrishenry@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2014-08-01 20:42:14 +0000
commit57f2796401f84a3df2a23f2069ad537e82836fdf (patch)
treecc92edb92bbe930629dd1da83b7c0b048498fac7
parent07c63d399d1904ac8b0b5b88e0ca41b3483b7826 (diff)
downloadchromium_src-57f2796401f84a3df2a23f2069ad537e82836fdf.zip
chromium_src-57f2796401f84a3df2a23f2069ad537e82836fdf.tar.gz
chromium_src-57f2796401f84a3df2a23f2069ad537e82836fdf.tar.bz2
Formalize the concept of page runs in PageTestResults.
A page run is created on StartTest and finalized on StopTest. We allow StopTest to discard the entire run (to support discard_first_run option). We also allow a page run to restart a new attempt at running the page (via PageTestResults.WillAttemptPageRun) - this will clear any existing results for the run. As a results of this change: * page_runner.py no longer need to use a hack of creating a temporary results object. * When we skip a test, it is now considered a success, so the last line of the test will now include the count for skipped tests (the line that says: [ PASSED ] 1 test. See gtest_test_results_unittest]. This patch does not yet remove AddSuccess, since it is still used in GTestTestResults. I plan to move the logic there to StopTest instead of AddSuccess (similarly for _EmitFailure and _EmitSkip). BUG=383639 Review URL: https://codereview.chromium.org/393283008 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@287083 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r--tools/telemetry/telemetry/page/page_runner.py25
-rw-r--r--tools/telemetry/telemetry/page/page_runner_unittest.py45
-rw-r--r--tools/telemetry/telemetry/page/profile_generator.py2
-rwxr-xr-xtools/telemetry/telemetry/page/record_wpr.py3
-rw-r--r--tools/telemetry/telemetry/page/record_wpr_unittest.py21
-rw-r--r--tools/telemetry/telemetry/results/gtest_test_results.py16
-rw-r--r--tools/telemetry/telemetry/results/gtest_test_results_unittest.py13
-rw-r--r--tools/telemetry/telemetry/results/json_output_formatter.py2
-rw-r--r--tools/telemetry/telemetry/results/page_measurement_results_unittest.py4
-rw-r--r--tools/telemetry/telemetry/results/page_run.py54
-rw-r--r--tools/telemetry/telemetry/results/page_run_unittest.py63
-rw-r--r--tools/telemetry/telemetry/results/page_test_results.py73
-rw-r--r--tools/telemetry/telemetry/results/page_test_results_unittest.py40
13 files changed, 283 insertions, 78 deletions
diff --git a/tools/telemetry/telemetry/page/page_runner.py b/tools/telemetry/telemetry/page/page_runner.py
index 358e504..4dfa2ca 100644
--- a/tools/telemetry/telemetry/page/page_runner.py
+++ b/tools/telemetry/telemetry/page/page_runner.py
@@ -3,7 +3,6 @@
# found in the LICENSE file.
import collections
-import copy
import logging
import optparse
import os
@@ -247,7 +246,8 @@ def _PrepareAndRunPage(test, page_set, expectations, finder_options,
while tries:
tries -= 1
try:
- results_for_current_run = copy.copy(results)
+ results.WillAttemptPageRun()
+
if test.RestartBrowserBeforeEachPage() or page.startup_url:
state.StopBrowser()
# If we are restarting the browser for each page customize the per page
@@ -258,7 +258,7 @@ def _PrepareAndRunPage(test, page_set, expectations, finder_options,
if not page.CanRunOnBrowser(browser_info.BrowserInfo(state.browser)):
logging.info('Skip test for page %s because browser is not supported.'
% page.url)
- return results
+ return
expectation = expectations.GetExpectationForPage(state.browser, page)
@@ -268,8 +268,7 @@ def _PrepareAndRunPage(test, page_set, expectations, finder_options,
state.StartProfiling(page, finder_options)
try:
- _RunPage(test, page, state, expectation,
- results_for_current_run, finder_options)
+ _RunPage(test, page, state, expectation, results, finder_options)
_CheckThermalThrottling(state.browser.platform)
except exceptions.TabCrashException as e:
if test.is_multi_tab_test:
@@ -285,11 +284,7 @@ def _PrepareAndRunPage(test, page_set, expectations, finder_options,
if (test.StopBrowserAfterPage(state.browser, page)):
state.StopBrowser()
- if state.first_page[page]:
- state.first_page[page] = False
- if test.discard_first_result:
- return results
- return results_for_current_run
+ return
except exceptions.BrowserGoneException as e:
state.StopBrowser()
if not tries:
@@ -415,12 +410,18 @@ def Run(test, page_set, expectations, finder_options):
while state.repeat_state.ShouldRepeatPage():
results.WillRunPage(page)
try:
- results = _PrepareAndRunPage(
+ _PrepareAndRunPage(
test, page_set, expectations, finder_options, browser_options,
page, credentials_path, possible_browser, results, state)
finally:
state.repeat_state.DidRunPage()
- results.DidRunPage(page)
+
+ discard_run = False
+ if state.first_page[page]:
+ state.first_page[page] = False
+ if test.discard_first_result:
+ discard_run = True
+ results.DidRunPage(page, discard_run=discard_run)
test.DidRunPageRepeats(page)
if (not test.max_failures is None and
len(results.failures) > test.max_failures):
diff --git a/tools/telemetry/telemetry/page/page_runner_unittest.py b/tools/telemetry/telemetry/page/page_runner_unittest.py
index 0ea36a1..701c293 100644
--- a/tools/telemetry/telemetry/page/page_runner_unittest.py
+++ b/tools/telemetry/telemetry/page/page_runner_unittest.py
@@ -20,6 +20,7 @@ from telemetry.page import page_runner
from telemetry.page import test_expectations
from telemetry.unittest import options_for_unittests
from telemetry.value import scalar
+from telemetry.value import string
SIMPLE_CREDENTIALS_STRING = """
@@ -57,6 +58,10 @@ class StubCredentialsBackend(object):
self.did_get_login_no_longer_needed = True
+def GetSuccessfulPageRuns(results):
+ return [run for run in results.all_page_runs if run.ok or run.skipped]
+
+
class PageRunnerTests(unittest.TestCase):
# TODO(nduca): Move the basic "test failed, test succeeded" tests from
# page_measurement_unittest to here.
@@ -75,7 +80,7 @@ class PageRunnerTests(unittest.TestCase):
options.output_format = 'none'
SetUpPageRunnerArguments(options)
results = page_runner.Run(Test(), ps, expectations, options)
- self.assertEquals(0, len(results.successes))
+ self.assertEquals(0, len(GetSuccessfulPageRuns(results)))
self.assertEquals(1, len(results.failures))
def testHandlingOfTestThatRaisesWithNonFatalUnknownExceptions(self):
@@ -105,7 +110,7 @@ class PageRunnerTests(unittest.TestCase):
SetUpPageRunnerArguments(options)
results = page_runner.Run(test, ps, expectations, options)
self.assertEquals(2, test.run_count)
- self.assertEquals(1, len(results.successes))
+ self.assertEquals(1, len(GetSuccessfulPageRuns(results)))
self.assertEquals(1, len(results.failures))
def testHandlingOfCrashedTabWithExpectedFailure(self):
@@ -124,7 +129,7 @@ class PageRunnerTests(unittest.TestCase):
SetUpPageRunnerArguments(options)
results = page_runner.Run(
Test(), ps, expectations, options)
- self.assertEquals(1, len(results.successes))
+ self.assertEquals(1, len(GetSuccessfulPageRuns(results)))
self.assertEquals(0, len(results.failures))
def testRetryOnBrowserCrash(self):
@@ -135,7 +140,11 @@ class PageRunnerTests(unittest.TestCase):
class CrashyMeasurement(page_measurement.PageMeasurement):
has_crashed = False
- def MeasurePage(self, _, tab, __):
+ def MeasurePage(self, page, tab, results):
+ # This value should be discarded on the first run when the
+ # browser crashed.
+ results.AddValue(
+ string.StringValue(page, 'test', 't', str(self.has_crashed)))
if not self.has_crashed:
self.has_crashed = True
raise exceptions.BrowserGoneException(tab.browser)
@@ -146,8 +155,11 @@ class PageRunnerTests(unittest.TestCase):
SetUpPageRunnerArguments(options)
results = page_runner.Run(CrashyMeasurement(), ps, expectations, options)
- self.assertEquals(1, len(results.successes))
+ self.assertEquals(1, len(GetSuccessfulPageRuns(results)))
self.assertEquals(0, len(results.failures))
+ self.assertEquals(1, len(results.all_page_specific_values))
+ self.assertEquals(
+ 'True', results.all_page_specific_values[0].GetRepresentativeString())
@decorators.Disabled('xp') # Flaky, http://crbug.com/390079.
def testDiscardFirstResult(self):
@@ -162,8 +174,9 @@ class PageRunnerTests(unittest.TestCase):
@property
def discard_first_result(self):
return True
- def MeasurePage(self, *args):
- pass
+
+ def MeasurePage(self, page, _, results):
+ results.AddValue(string.StringValue(page, 'test', 't', page.url))
options = options_for_unittests.GetCopy()
options.output_format = 'none'
@@ -175,30 +188,34 @@ class PageRunnerTests(unittest.TestCase):
options.pageset_repeat = 1
SetUpPageRunnerArguments(options)
results = page_runner.Run(Measurement(), ps, expectations, options)
- self.assertEquals(0, len(results.successes))
+ self.assertEquals(0, len(GetSuccessfulPageRuns(results)))
self.assertEquals(0, len(results.failures))
+ self.assertEquals(0, len(results.all_page_specific_values))
options.page_repeat = 1
options.pageset_repeat = 2
SetUpPageRunnerArguments(options)
results = page_runner.Run(Measurement(), ps, expectations, options)
- self.assertEquals(2, len(results.successes))
+ self.assertEquals(2, len(GetSuccessfulPageRuns(results)))
self.assertEquals(0, len(results.failures))
+ self.assertEquals(2, len(results.all_page_specific_values))
options.page_repeat = 2
options.pageset_repeat = 1
SetUpPageRunnerArguments(options)
results = page_runner.Run(Measurement(), ps, expectations, options)
- self.assertEquals(2, len(results.successes))
+ self.assertEquals(2, len(GetSuccessfulPageRuns(results)))
self.assertEquals(0, len(results.failures))
+ self.assertEquals(2, len(results.all_page_specific_values))
options.output_format = 'html'
options.page_repeat = 1
options.pageset_repeat = 1
SetUpPageRunnerArguments(options)
results = page_runner.Run(Measurement(), ps, expectations, options)
- self.assertEquals(0, len(results.successes))
+ self.assertEquals(0, len(GetSuccessfulPageRuns(results)))
self.assertEquals(0, len(results.failures))
+ self.assertEquals(0, len(results.all_page_specific_values))
@decorators.Disabled('win')
def testPagesetRepeat(self):
@@ -230,7 +247,7 @@ class PageRunnerTests(unittest.TestCase):
SetUpPageRunnerArguments(options)
results = page_runner.Run(Measurement(), ps, expectations, options)
results.PrintSummary()
- self.assertEquals(4, len(results.successes))
+ self.assertEquals(4, len(GetSuccessfulPageRuns(results)))
self.assertEquals(0, len(results.failures))
with open(output_file) as f:
stdout = f.read()
@@ -475,7 +492,7 @@ class PageRunnerTests(unittest.TestCase):
SetUpPageRunnerArguments(options)
results = page_runner.Run(test, ps, expectations, options)
self.assertFalse(test.will_navigate_to_page_called)
- self.assertEquals(0, len(results.successes))
+ self.assertEquals(0, len(GetSuccessfulPageRuns(results)))
self.assertEquals(0, len(results.failures))
def TestUseLiveSitesFlag(self, options, expect_from_archive):
@@ -558,7 +575,7 @@ class PageRunnerTests(unittest.TestCase):
options.output_format = 'none'
SetUpPageRunnerArguments(options)
results = page_runner.Run(Test(max_failures=2), ps, expectations, options)
- self.assertEquals(0, len(results.successes))
+ self.assertEquals(0, len(GetSuccessfulPageRuns(results)))
# Runs up to max_failures+1 failing tests before stopping, since
# every tests after max_failures failures have been encountered
# may all be passing.
diff --git a/tools/telemetry/telemetry/page/profile_generator.py b/tools/telemetry/telemetry/page/profile_generator.py
index e30d414..1a6c03e 100644
--- a/tools/telemetry/telemetry/page/profile_generator.py
+++ b/tools/telemetry/telemetry/page/profile_generator.py
@@ -77,7 +77,7 @@ def GenerateProfiles(profile_creator_class, profile_creator_name, options):
if results.failures:
logging.warning('Some pages failed.')
logging.warning('Failed pages:\n%s',
- '\n'.join(results.pages_that_had_failures))
+ '\n'.join(results.pages_that_failed))
return 1
# Everything is a-ok, move results to final destination.
diff --git a/tools/telemetry/telemetry/page/record_wpr.py b/tools/telemetry/telemetry/page/record_wpr.py
index 92cdad9..9d46c9c 100755
--- a/tools/telemetry/telemetry/page/record_wpr.py
+++ b/tools/telemetry/telemetry/page/record_wpr.py
@@ -174,7 +174,8 @@ class WprRecorder(object):
logging.warning('Some pages failed and/or were skipped. The recording '
'has not been updated for these pages.')
results.PrintSummary()
- self._page_set.wpr_archive_info.AddRecordedPages(results.successes)
+ self._page_set.wpr_archive_info.AddRecordedPages(
+ results.pages_that_succeeded)
def Main(base_dir):
diff --git a/tools/telemetry/telemetry/page/record_wpr_unittest.py b/tools/telemetry/telemetry/page/record_wpr_unittest.py
index c8de3ed..75ae016 100644
--- a/tools/telemetry/telemetry/page/record_wpr_unittest.py
+++ b/tools/telemetry/telemetry/page/record_wpr_unittest.py
@@ -70,6 +70,7 @@ class MockPageTest(page_test.PageTest):
class MockBenchmark(benchmark.Benchmark):
test = MockPageTest
+ mock_page_set = None
@classmethod
def AddTestCommandLineArgs(cls, group):
@@ -79,7 +80,8 @@ class MockBenchmark(benchmark.Benchmark):
kwargs = {}
if (options.mock_benchmark_url):
kwargs['url'] = options.mock_benchmark_url
- return MockPageSet(**kwargs)
+ self.mock_page_set = MockPageSet(**kwargs)
+ return self.mock_page_set
class RecordWprUnitTests(tab_test_case.TabTestCase):
@@ -139,23 +141,20 @@ class RecordWprUnitTests(tab_test_case.TabTestCase):
def testWprRecorderWithPageSet(self):
flags = []
+ mock_page_set = MockPageSet(url=self._url)
wpr_recorder = record_wpr.WprRecorder(self._test_data_dir,
- MockPageSet(url=self._url), flags)
+ mock_page_set, flags)
results = wpr_recorder.Record()
- self.assertEquals(1, len(results.successes))
- mock_page = results.successes.pop()
- self.assertTrue('RunFoo' in mock_page.func_calls)
- self.assertFalse('RunBaz' in mock_page.func_calls)
+ self.assertEqual(set(mock_page_set.pages), results.pages_that_succeeded)
def testWprRecorderWithBenchmark(self):
flags = ['--mock-benchmark-url', self._url]
- wpr_recorder = record_wpr.WprRecorder(self._test_data_dir, MockBenchmark(),
+ mock_benchmark = MockBenchmark()
+ wpr_recorder = record_wpr.WprRecorder(self._test_data_dir, mock_benchmark,
flags)
results = wpr_recorder.Record()
- self.assertEquals(1, len(results.successes))
- mock_page = results.successes.pop()
- self.assertFalse('RunFoo' in mock_page.func_calls)
- self.assertTrue('RunBaz' in mock_page.func_calls)
+ self.assertEqual(set(mock_benchmark.mock_page_set.pages),
+ results.pages_that_succeeded)
def testCommandLineFlags(self):
flags = [
diff --git a/tools/telemetry/telemetry/results/gtest_test_results.py b/tools/telemetry/telemetry/results/gtest_test_results.py
index 7554d2e..244efe8 100644
--- a/tools/telemetry/telemetry/results/gtest_test_results.py
+++ b/tools/telemetry/telemetry/results/gtest_test_results.py
@@ -63,18 +63,26 @@ class GTestTestResults(page_test_results.PageTestResults):
self._output_stream.flush()
def PrintSummary(self):
- unit = 'test' if len(self.successes) == 1 else 'tests'
+ successful_runs = []
+ failed_runs = []
+ for run in self.all_page_runs:
+ if run.failed:
+ failed_runs.append(run)
+ else:
+ successful_runs.append(run)
+
+ unit = 'test' if len(successful_runs) == 1 else 'tests'
print >> self._output_stream, '[ PASSED ]', (
- '%d %s.' % (len(self.successes), unit))
+ '%d %s.' % (len(successful_runs), unit))
if self.failures:
- unit = 'test' if len(self.failures) == 1 else 'tests'
+ unit = 'test' if len(failed_runs) == 1 else 'tests'
print >> self._output_stream, '[ FAILED ]', (
'%d %s, listed below:' % (len(self.failures), unit))
for failure_value in self.failures:
print >> self._output_stream, '[ FAILED ] ', (
failure_value.page.display_name)
print >> self._output_stream
- count = len(self.failures)
+ count = len(failed_runs)
unit = 'TEST' if count == 1 else 'TESTS'
print >> self._output_stream, '%d FAILED %s' % (count, unit)
print >> self._output_stream
diff --git a/tools/telemetry/telemetry/results/gtest_test_results_unittest.py b/tools/telemetry/telemetry/results/gtest_test_results_unittest.py
index 565ddc1..36f7328 100644
--- a/tools/telemetry/telemetry/results/gtest_test_results_unittest.py
+++ b/tools/telemetry/telemetry/results/gtest_test_results_unittest.py
@@ -46,6 +46,7 @@ class GTestTestResultsTest(
results.WillRunPage(test_page_set.pages[0])
self._mock_timer.SetTime(0.007)
results.AddSuccess(test_page_set.pages[0])
+ results.DidRunPage(test_page_set.pages[0])
results.PrintSummary()
expected = ('[ RUN ] http://www.foo.com/\n'
@@ -60,6 +61,8 @@ class GTestTestResultsTest(
results.WillRunPage(test_page_set.pages[0])
exc_info = self.CreateException()
results.AddValue(failure.FailureValue(test_page_set.pages[0], exc_info))
+ results.DidRunPage(test_page_set.pages[0])
+
results.PrintSummary()
exception_trace = ''.join(traceback.format_exception(*exc_info))
expected = ('[ RUN ] http://www.foo.com/\n'
@@ -78,10 +81,12 @@ class GTestTestResultsTest(
self._mock_timer.SetTime(0.007)
results.AddValue(skip.SkipValue(test_page_set.pages[0],
'Page skipped for testing reason'))
+ results.DidRunPage(test_page_set.pages[0])
+
results.PrintSummary()
expected = ('[ RUN ] http://www.foo.com/\n'
'[ OK ] http://www.foo.com/ (7 ms)\n'
- '[ PASSED ] 0 tests.\n\n')
+ '[ PASSED ] 1 test.\n\n')
self.assertEquals(expected, ''.join(results.output_data))
def testPassAndFailedPages(self):
@@ -92,18 +97,22 @@ class GTestTestResultsTest(
results.WillRunPage(test_page_set.pages[0])
self._mock_timer.SetTime(0.007)
results.AddSuccess(test_page_set.pages[0])
+ results.DidRunPage(test_page_set.pages[0])
results.WillRunPage(test_page_set.pages[1])
self._mock_timer.SetTime(0.009)
results.AddValue(failure.FailureValue(test_page_set.pages[1], exc_info))
+ results.DidRunPage(test_page_set.pages[1])
results.WillRunPage(test_page_set.pages[2])
self._mock_timer.SetTime(0.015)
results.AddValue(failure.FailureValue(test_page_set.pages[2], exc_info))
+ results.DidRunPage(test_page_set.pages[2])
results.WillRunPage(test_page_set.pages[3])
self._mock_timer.SetTime(0.020)
results.AddSuccess(test_page_set.pages[3])
+ results.DidRunPage(test_page_set.pages[3])
results.PrintSummary()
exception_trace = ''.join(traceback.format_exception(*exc_info))
@@ -132,6 +141,7 @@ class GTestTestResultsTest(
results.WillRunPage(test_page_set.pages[0])
self._mock_timer.SetTime(0.007)
results.AddSuccess(test_page_set.pages[0])
+ results.DidRunPage(test_page_set.pages[0])
expected = ('[ RUN ] http://www.foo.com/\n'
'[ OK ] http://www.foo.com/ (7 ms)\n')
self.assertEquals(expected, ''.join(results.output_data))
@@ -140,6 +150,7 @@ class GTestTestResultsTest(
self._mock_timer.SetTime(0.009)
exception_trace = ''.join(traceback.format_exception(*exc_info))
results.AddValue(failure.FailureValue(test_page_set.pages[1], exc_info))
+ results.DidRunPage(test_page_set.pages[1])
expected = ('[ RUN ] http://www.foo.com/\n'
'[ OK ] http://www.foo.com/ (7 ms)\n'
'[ RUN ] http://www.bar.com/\n'
diff --git a/tools/telemetry/telemetry/results/json_output_formatter.py b/tools/telemetry/telemetry/results/json_output_formatter.py
index 0740797..1b1496f 100644
--- a/tools/telemetry/telemetry/results/json_output_formatter.py
+++ b/tools/telemetry/telemetry/results/json_output_formatter.py
@@ -17,7 +17,7 @@ def ResultsAsDict(res):
return result_dict
def _all_pages(res):
- pages = set(value.page for value in res.all_page_specific_values)
+ pages = set(page_run.page for page_run in res.all_page_runs)
return pages
class JsonOutputFormatter(output_formatter.OutputFormatter):
diff --git a/tools/telemetry/telemetry/results/page_measurement_results_unittest.py b/tools/telemetry/telemetry/results/page_measurement_results_unittest.py
index 5b61f68..4ce1b8e 100644
--- a/tools/telemetry/telemetry/results/page_measurement_results_unittest.py
+++ b/tools/telemetry/telemetry/results/page_measurement_results_unittest.py
@@ -112,13 +112,13 @@ class PageMeasurementResultsTest(unittest.TestCase):
results = SummarySavingPageMeasurementResults()
results.WillRunPage(self.pages[0])
results.AddValue(scalar.ScalarValue(self.pages[0], 'a', 'seconds', 3))
- results.DidRunPage(self.pages[0])
results.AddValue(failure.FailureValue.FromMessage(self.pages[0], 'message'))
+ results.DidRunPage(self.pages[0])
results.WillRunPage(self.pages[1])
results.AddValue(scalar.ScalarValue(self.pages[1], 'a', 'seconds', 7))
- results.DidRunPage(self.pages[1])
results.AddValue(failure.FailureValue.FromMessage(self.pages[1], 'message'))
+ results.DidRunPage(self.pages[1])
results.PrintSummary()
self.assertEquals(results.results, [])
diff --git a/tools/telemetry/telemetry/results/page_run.py b/tools/telemetry/telemetry/results/page_run.py
new file mode 100644
index 0000000..75ca12f
--- /dev/null
+++ b/tools/telemetry/telemetry/results/page_run.py
@@ -0,0 +1,54 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.value import failure
+from telemetry.value import skip
+
+
+class PageRun(object):
+ def __init__(self, page):
+ self._page = page
+ self._values = []
+
+ def AddValue(self, value):
+ self._values.append(value)
+
+ def ClearValues(self):
+ self._values = []
+
+ @property
+ def page(self):
+ return self._page
+
+ @property
+ def values(self):
+ """The values that correspond to this page run."""
+ return self._values
+
+ @property
+ def ok(self):
+ """Whether the current run is still ok.
+
+ To be precise: returns true if there is neither FailureValue nor
+ SkipValue in self.values.
+ """
+ return not self.skipped and not self.failed
+
+ @property
+ def skipped(self):
+ """Whether the current run is being skipped.
+
+ To be precise: returns true if there is any SkipValue in self.values.
+ """
+ return any(isinstance(v, skip.SkipValue) for v in self.values)
+
+ @property
+ def failed(self):
+ """Whether the current run failed.
+
+ To be precise: returns true if there is a FailureValue but not
+ SkipValue in self.values.
+ """
+ return not self.skipped and any(
+ isinstance(v, failure.FailureValue) for v in self.values)
diff --git a/tools/telemetry/telemetry/results/page_run_unittest.py b/tools/telemetry/telemetry/results/page_run_unittest.py
new file mode 100644
index 0000000..800ec3d
--- /dev/null
+++ b/tools/telemetry/telemetry/results/page_run_unittest.py
@@ -0,0 +1,63 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import unittest
+
+from telemetry.page import page_set
+from telemetry.results import page_run
+from telemetry.value import failure
+from telemetry.value import scalar
+from telemetry.value import skip
+
+
+class PageRunTest(unittest.TestCase):
+ def setUp(self):
+ self.page_set = page_set.PageSet(file_path=os.path.dirname(__file__))
+ self.page_set.AddPageWithDefaultRunNavigate("http://www.bar.com/")
+
+ @property
+ def pages(self):
+ return self.page_set.pages
+
+ def testPageRunFailed(self):
+ run = page_run.PageRun(self.pages[0])
+ run.AddValue(failure.FailureValue.FromMessage(self.pages[0], 'test'))
+ self.assertFalse(run.ok)
+ self.assertTrue(run.failed)
+ self.assertFalse(run.skipped)
+
+ run = page_run.PageRun(self.pages[0])
+ run.AddValue(scalar.ScalarValue(self.pages[0], 'a', 's', 1))
+ run.AddValue(failure.FailureValue.FromMessage(self.pages[0], 'test'))
+ self.assertFalse(run.ok)
+ self.assertTrue(run.failed)
+ self.assertFalse(run.skipped)
+
+ def testPageRunSkipped(self):
+ run = page_run.PageRun(self.pages[0])
+ run.AddValue(failure.FailureValue.FromMessage(self.pages[0], 'test'))
+ run.AddValue(skip.SkipValue(self.pages[0], 'test'))
+ self.assertFalse(run.ok)
+ self.assertFalse(run.failed)
+ self.assertTrue(run.skipped)
+
+ run = page_run.PageRun(self.pages[0])
+ run.AddValue(scalar.ScalarValue(self.pages[0], 'a', 's', 1))
+ run.AddValue(skip.SkipValue(self.pages[0], 'test'))
+ self.assertFalse(run.ok)
+ self.assertFalse(run.failed)
+ self.assertTrue(run.skipped)
+
+ def testPageRunSucceeded(self):
+ run = page_run.PageRun(self.pages[0])
+ self.assertTrue(run.ok)
+ self.assertFalse(run.failed)
+ self.assertFalse(run.skipped)
+
+ run = page_run.PageRun(self.pages[0])
+ run.AddValue(scalar.ScalarValue(self.pages[0], 'a', 's', 1))
+ self.assertTrue(run.ok)
+ self.assertFalse(run.failed)
+ self.assertFalse(run.skipped)
diff --git a/tools/telemetry/telemetry/results/page_test_results.py b/tools/telemetry/telemetry/results/page_test_results.py
index 4d3e98d..c234eb6 100644
--- a/tools/telemetry/telemetry/results/page_test_results.py
+++ b/tools/telemetry/telemetry/results/page_test_results.py
@@ -8,9 +8,11 @@ import logging
import traceback
from telemetry import value as value_module
+from telemetry.results import page_run
from telemetry.value import failure
from telemetry.value import skip
+
class PageTestResults(object):
def __init__(self, output_stream=None, output_formatters=None, trace_tag=''):
"""
@@ -29,14 +31,10 @@ class PageTestResults(object):
self._output_formatters = (
output_formatters if output_formatters is not None else [])
self._trace_tag = trace_tag
- self._current_page = None
-
- # TODO(chrishenry,eakuefner): Remove self.successes once they can
- # be inferred.
- self.successes = []
+ self._current_page_run = None
+ self._all_page_runs = []
self._representative_value_for_each_value_name = {}
- self._all_page_specific_values = []
self._all_summary_values = []
def __copy__(self):
@@ -50,7 +48,12 @@ class PageTestResults(object):
@property
def all_page_specific_values(self):
- return self._all_page_specific_values
+ values = []
+ for run in self._all_page_runs:
+ values += run.values
+ if self._current_page_run:
+ values += self._current_page_run.values
+ return values
@property
def all_summary_values(self):
@@ -58,43 +61,72 @@ class PageTestResults(object):
@property
def current_page(self):
- return self._current_page
+ assert self._current_page_run, 'Not currently running test.'
+ return self._current_page_run.page
+
+ @property
+ def all_page_runs(self):
+ return self._all_page_runs
@property
def pages_that_succeeded(self):
"""Returns the set of pages that succeeded."""
- pages = set(value.page for value in self._all_page_specific_values)
- pages.difference_update(self.pages_that_had_failures)
+ pages = set(run.page for run in self.all_page_runs)
+ pages.difference_update(self.pages_that_failed)
return pages
@property
- def pages_that_had_failures(self):
+ def pages_that_failed(self):
"""Returns the set of failed pages."""
- return set(v.page for v in self.failures)
+ failed_pages = set()
+ for run in self.all_page_runs:
+ if run.failed:
+ failed_pages.add(run.page)
+ return failed_pages
@property
def failures(self):
- values = self._all_page_specific_values
+ values = self.all_page_specific_values
return [v for v in values if isinstance(v, failure.FailureValue)]
@property
def skipped_values(self):
- values = self._all_page_specific_values
+ values = self.all_page_specific_values
return [v for v in values if isinstance(v, skip.SkipValue)]
def _GetStringFromExcInfo(self, err):
return ''.join(traceback.format_exception(*err))
def WillRunPage(self, page):
- self._current_page = page
+ assert not self._current_page_run, 'Did not call DidRunPage.'
+ self._current_page_run = page_run.PageRun(page)
- def DidRunPage(self, page): # pylint: disable=W0613
- self._current_page = None
+ def DidRunPage(self, page, discard_run=False): # pylint: disable=W0613
+ """
+ Args:
+ page: The current page under test.
+ discard_run: Whether to discard the entire run and all of its
+ associated results.
+ """
+ assert self._current_page_run, 'Did not call WillRunPage.'
+ if not discard_run:
+ self._all_page_runs.append(self._current_page_run)
+ self._current_page_run = None
+
+ def WillAttemptPageRun(self):
+ """To be called when a single attempt on a page run is starting.
+
+ This is called between WillRunPage and DidRunPage and can be
+ called multiple times, one for each attempt.
+ """
+ # Clear any values from previous attempts for this page run.
+ self._current_page_run.ClearValues()
def AddValue(self, value):
+ assert self._current_page_run, 'Not currently running test.'
self._ValidateValue(value)
# TODO(eakuefner/chrishenry): Add only one skip per pagerun assert here
- self._all_page_specific_values.append(value)
+ self._current_page_run.AddValue(value)
def AddSummaryValue(self, value):
assert value.page is None
@@ -109,8 +141,9 @@ class PageTestResults(object):
value.name]
assert value.IsMergableWith(representative_value)
+ # TODO(chrishenry): Kill this in a separate patch.
def AddSuccess(self, page):
- self.successes.append(page)
+ pass
def PrintSummary(self):
for output_formatter in self._output_formatters:
@@ -118,7 +151,7 @@ class PageTestResults(object):
if self.failures:
logging.error('Failed pages:\n%s', '\n'.join(
- p.display_name for p in self.pages_that_had_failures))
+ p.display_name for p in self.pages_that_failed))
if self.skipped_values:
logging.warning('Skipped pages:\n%s', '\n'.join(
diff --git a/tools/telemetry/telemetry/results/page_test_results_unittest.py b/tools/telemetry/telemetry/results/page_test_results_unittest.py
index 4cb8296..c170c1f 100644
--- a/tools/telemetry/telemetry/results/page_test_results_unittest.py
+++ b/tools/telemetry/telemetry/results/page_test_results_unittest.py
@@ -1,16 +1,16 @@
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+
import os
-from telemetry.results import base_test_results_unittest
from telemetry.page import page_set
+from telemetry.results import base_test_results_unittest
from telemetry.results import page_test_results
from telemetry.value import failure
from telemetry.value import skip
-class NonPrintingPageTestResults(
- page_test_results.PageTestResults):
+class NonPrintingPageTestResults(page_test_results.PageTestResults):
def __init__(self):
super(NonPrintingPageTestResults, self).__init__()
@@ -28,21 +28,39 @@ class PageTestResultsTest(base_test_results_unittest.BaseTestResultsUnittest):
def pages(self):
return self.page_set.pages
- def test_failures(self):
+ def testFailures(self):
results = NonPrintingPageTestResults()
+ results.WillRunPage(self.pages[0])
results.AddValue(
failure.FailureValue(self.pages[0], self.CreateException()))
+ results.DidRunPage(self.pages[0])
+
+ results.WillRunPage(self.pages[1])
results.AddSuccess(self.pages[1])
- self.assertEquals(results.pages_that_had_failures, set([self.pages[0]]))
- self.assertEquals(results.successes, [self.pages[1]])
+ results.DidRunPage(self.pages[1])
- def test_skips(self):
+ self.assertEqual(set([self.pages[0]]), results.pages_that_failed)
+ self.assertEqual(set([self.pages[1]]), results.pages_that_succeeded)
+
+ self.assertEqual(2, len(results.all_page_runs))
+ self.assertTrue(results.all_page_runs[0].failed)
+ self.assertTrue(results.all_page_runs[1].ok)
+
+ def testSkips(self):
results = NonPrintingPageTestResults()
+ results.WillRunPage(self.pages[0])
results.AddValue(skip.SkipValue(self.pages[0], 'testing reason'))
+ results.DidRunPage(self.pages[0])
+
+ results.WillRunPage(self.pages[1])
results.AddSuccess(self.pages[1])
+ results.DidRunPage(self.pages[1])
- expected_page_id = self.pages[0].id
- actual_page_id = results.skipped_values[0].page.id
+ self.assertTrue(results.all_page_runs[0].skipped)
+ self.assertEqual(self.pages[0], results.all_page_runs[0].page)
+ self.assertEqual(set([self.pages[0], self.pages[1]]),
+ results.pages_that_succeeded)
- self.assertEquals(expected_page_id, actual_page_id)
- self.assertEquals(results.successes, [self.pages[1]])
+ self.assertEqual(2, len(results.all_page_runs))
+ self.assertTrue(results.all_page_runs[0].skipped)
+ self.assertTrue(results.all_page_runs[1].ok)