diff options
author | kkania@google.com <kkania@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2009-09-24 18:07:11 +0000 |
---|---|---|
committer | kkania@google.com <kkania@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2009-09-24 18:07:11 +0000 |
commit | 480fdef415946f3a4a77d567f0fdb0716d70cc57 (patch) | |
tree | 15d2efc158c13e9ecb0e722d69b5a53c4f30d4f6 /o3d | |
parent | df6d7777e48c1136c745e40e745fe5e5f3513ff4 (diff) | |
download | chromium_src-480fdef415946f3a4a77d567f0fdb0716d70cc57.zip chromium_src-480fdef415946f3a4a77d567f0fdb0716d70cc57.tar.gz chromium_src-480fdef415946f3a4a77d567f0fdb0716d70cc57.tar.bz2 |
Changed selenium tests to recover from test crashes/hangs. Divided perceptual diff tests into unit test cases, which can be run alongside the selenium tests.
Review URL: http://codereview.chromium.org/212031
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@27092 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'o3d')
-rw-r--r-- | o3d/tests/selenium/javascript_unit_test_list.txt | 2 | ||||
-rw-r--r-- | o3d/tests/selenium/javascript_unit_tests.py | 8 | ||||
-rw-r--r-- | o3d/tests/selenium/main.py | 761 | ||||
-rw-r--r-- | o3d/tests/selenium/pdiff_test.py | 139 | ||||
-rw-r--r-- | o3d/tests/selenium/pulse_testrunner.py | 149 | ||||
-rw-r--r-- | o3d/tests/selenium/sample_list.txt | 19 | ||||
-rw-r--r-- | o3d/tests/selenium/samples_tests.py | 6 | ||||
-rw-r--r-- | o3d/tests/selenium/selenium_constants.py | 4 | ||||
-rw-r--r-- | o3d/tests/selenium/selenium_utilities.py | 76 | ||||
-rw-r--r-- | o3d/tests/selenium/test_runner.py | 398 |
10 files changed, 841 insertions, 721 deletions
diff --git a/o3d/tests/selenium/javascript_unit_test_list.txt b/o3d/tests/selenium/javascript_unit_test_list.txt index c97627d..b54c637 100644 --- a/o3d/tests/selenium/javascript_unit_test_list.txt +++ b/o3d/tests/selenium/javascript_unit_test_list.txt @@ -96,4 +96,4 @@ small effect-import-test medium TestStressDrawShapes except(*googlechrome) medium TestStressMultiWindow except(*googlechrome) -large TestStressCullingZSort pdiff_threshold(450) +large TestStressCullingZSort pdiff_threshold(450) screenshots(8) diff --git a/o3d/tests/selenium/javascript_unit_tests.py b/o3d/tests/selenium/javascript_unit_tests.py index 5b59f19..b714298 100644 --- a/o3d/tests/selenium/javascript_unit_tests.py +++ b/o3d/tests/selenium/javascript_unit_tests.py @@ -49,10 +49,10 @@ import selenium_utilities class JavaScriptUnitTests(selenium_utilities.SeleniumTestCase): """Runs the JavaScript unit tests for the sample utilities.""" - def __init__(self, name, session, browser, path_to_html, test_type=None, - sample_path=None, options=None): + def __init__(self, name, browser, path_to_html, test_type=None, + sample_path=None, options=None): selenium_utilities.SeleniumTestCase.__init__( - self, name, session, browser, path_to_html, test_type, sample_path, + self, name, browser, path_to_html, test_type, sample_path, options) def GenericTest(self): @@ -275,7 +275,7 @@ class JavaScriptUnitTests(selenium_utilities.SeleniumTestCase): s.run_script("window.g_clock = " + str(clock * 3.14159 * 2.5 + 0.5)) self.assertTrue( selenium_utilities.TakeScreenShot(s, self.browser, "window.g_client", - "cullingzsort" + str(clock))) + "cullingzsort" + str(clock + 1))) s.run_script("g_framesRendered = 0") while int(s.get_eval("window.g_framesRendered")) < 3: s.run_script("window.g_client.render()") diff --git a/o3d/tests/selenium/main.py b/o3d/tests/selenium/main.py index bf4e9a8..44075eb 100644 --- a/o3d/tests/selenium/main.py +++ b/o3d/tests/selenium/main.py @@ -61,12 +61,13 @@ import time import unittest import gflags import javascript_unit_tests -# Import custom testrunner for pulse -import pulse_testrunner +import test_runner import selenium import samples_tests import selenium_constants import selenium_utilities +import pdiff_test +import Queue if sys.platform == 'win32' or sys.platform == 'cygwin': default_java_exe = "java.exe" @@ -114,6 +115,7 @@ gflags.DEFINE_string( "30", "Specifies the timeout value, in seconds, for the selenium server.") + # Browsers to choose from (for browser flag). # use --browser $BROWSER_NAME to run # tests for that browser @@ -133,7 +135,6 @@ gflags.DEFINE_string( "", "specifies the path from the web root to the samples.") - class MyRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): """Hook to handle HTTP server requests. @@ -147,6 +148,8 @@ class MyRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): pass # TODO: might be nice to have a verbose option for debugging. + + class LocalFileHTTPServer(threading.Thread): """Minimal HTTP server that serves local files. @@ -348,137 +351,104 @@ class SeleniumRemoteControl(threading.Thread): return selenium_server -class SeleniumSession(object): - """A selenium browser session, with support servers. - - The support servers include a Selenium Remote Control server, and - a local HTTP server to serve static test files. - - Members: - session: a selenium() instance - selenium_server: a SeleniumRemoteControl() instance - http_server: a LocalFileHTTPServer() instance - runner: a TestRunner() instance - """ - - def __init__(self, verbose, java_path, selenium_server, server_timeout, - http_root=None): - """Initializes a Selenium Session. +class SeleniumSessionBuilder: + def __init__(self, sel_port, sel_timeout, http_port, browserpath): - Args: - verbose: boolean verbose flag - java_path: path to java used to run selenium. - selenium_server: path to jar containing selenium server. - server_timeout: server timeout value, in seconds. - http_root: Serve http pages using this path as the document root. When - None, use the default. - """ - # Start up a static file server, to serve the test pages. - - if not http_root: - http_root = FLAGS.product_dir - - self.http_server = LocalFileHTTPServer.StartServer(http_root) - - if self.http_server: - # Start up the Selenium Remote Control Server - self.selenium_server = SeleniumRemoteControl.StartServer(verbose, - java_path, - selenium_server, - server_timeout) - if not self.http_server or not self.selenium_server: - return - - # Set up a testing runner - self.runner = pulse_testrunner.PulseTestRunner() - - # Set up a phantom selenium session so we can call shutdown if needed. - self.session = selenium.selenium( - "localhost", self.selenium_server.selenium_port, "*firefox", - "http://" + socket.gethostname() + ":" + - str(self.http_server.http_port)) - - def StartSession(self, browser): - """Starts the Selenium Session and connects to the HTTP server. - - Args: - browser: selenium browser name - """ + self.sel_port = sel_port + self.sel_timeout = sel_timeout + self.http_port = http_port + self.browserpath = browserpath + def NewSeleniumSession(self, browser): if browser == "*googlechrome": # TODO: Replace socket.gethostname() with "localhost" # once Chrome local proxy fix is in. server_url = "http://" + socket.gethostname() + ":" else: server_url = "http://localhost:" - server_url += str(self.http_server.http_port) + server_url += str(self.http_port) browser_path_with_space = "" - if FLAGS.browserpath: - browser_path_with_space = " " + FLAGS.browserpath - - self.session = selenium.selenium("localhost", - self.selenium_server.selenium_port, - browser + browser_path_with_space, - server_url) - self.session.start() - - def CloseSession(self): - """Closes the selenium sesssion.""" - self.session.stop() - - def TearDown(self): - """Stops the selenium server.""" - self.session.shut_down_selenium_server() - - def TestBrowser(self, browser, test_list, test_prefix, test_suffixes, - server_timeout): - """Runs Selenium tests for a specific browser. + if self.browserpath: + browser_path_with_space = " " + self.browserpath - Args: - browser: selenium browser name (eg. *iexplore, *firefox). - test_list: list to add tests to. - test_prefix: prefix of tests to run. - test_suffixes: comma separated suffixes of tests to run. - server_timeout: server timeout value, in milliseconds - - Returns: - result: result of test runner. - """ - print "Testing %s..." % browser - self.StartSession(browser) - self.session.set_timeout(server_timeout) - self.runner.setBrowser(browser) - - try: - result = self.runner.run( - SeleniumSuite(self.session, browser, test_list, - test_prefix, test_suffixes)) - finally: - self.CloseSession() - - return result + new_session = selenium.selenium("localhost", + self.sel_port, + browser + browser_path_with_space, + server_url) + + new_session.start() + new_session.set_timeout(self.sel_timeout) + + return new_session -class LocalTestSuite(unittest.TestSuite): - """Wrapper for unittest.TestSuite so we can collect the tests.""" - def __init__(self): - unittest.TestSuite.__init__(self) - self.test_list = [] +def TestBrowser(session_builder, browser, test_list): + """Runs Selenium tests for a specific browser. - def addTest(self, name, test): - """Adds a test to the TestSuite and records its name and test_path. + Args: + session_builder: session_builder for creating new selenium sessions. + browser: selenium browser name (eg. *iexplore, *firefox). + test_list: list of tests. + + Returns: + summary_result: result of test runners. + """ + print "Testing %s..." % browser + + summary_result = test_runner.TestResult(test_runner.StringBuffer(), browser) + + # Fill up the selenium test queue. + test_queue = Queue.Queue() + for test in test_list: + test_queue.put(test) + + + pdiff_queue = None + if FLAGS.screenshots: + # Need to do screen comparisons. + # |pdiff_queue| is the queue of perceptual diff tests that need to be done. + # This queue is added to by individual slenium test runners. + # |pdiff_result_queue| is the result of the perceptual diff tests. + pdiff_queue = Queue.Queue() + pdiff_result_queue = Queue.Queue() + pdiff_worker = test_runner.PDiffTestRunner(pdiff_queue, + pdiff_result_queue, + browser) + pdiff_worker.start() + + # Start initial selenium test runner. + worker = test_runner.SeleniumTestRunner(session_builder, browser, + test_queue, pdiff_queue) + worker.start() + + # Run through all selenium tests. + while not worker.IsCompletelyDone(): + if worker.IsTesting() and worker.IsPastDeadline(): + # Test has taken more than allotted. Abort and go to next test. + worker.AbortTest() + + elif worker.DidFinishTest(): + # Do this so that a worker does not grab test off queue till we tell it. + result = worker.Continue() + result.printAll(sys.stdout) + summary_result.merge(result) + + if FLAGS.screenshots: + # Finish screenshot comparisons. + pdiff_worker.EndTesting() + while not pdiff_worker.IsCompletelyDone(): + time.sleep(1) + + # Be careful here, make sure no one else is editing |pdiff_reult_queue|. + while not pdiff_result_queue.empty(): + result = pdiff_result_queue.get() + result.printAll(sys.stdout) + summary_result.merge(result) + + return summary_result - Args: - name: name of test. - test: test to pass to unittest.TestSuite. - """ - unittest.TestSuite.addTest(self, test) - try: - self.test_list.append((name, test.options)) - except AttributeError: - self.test_list.append((name, [])) def MatchesSuffix(name, suffixes): @@ -500,21 +470,20 @@ def MatchesSuffix(name, suffixes): return True -def AddTests(test_suite, session, browser, module, filename, prefix, - test_prefix_filter, test_suffixes, path_to_html): - """Add tests defined in filename. +def _GetTestsFromFile(filename, prefix, test_prefix_filter, test_suffixes, + browser, module, path_to_html): + """Add tests defined in filename, and associated perceptual diff test, if + needed. Assumes module has a method "GenericTest" that uses self.args to run. Args: - test_suite: A Selenium test_suite to add tests to. - session: a Selenium instance. - browser: browser name. - module: module which will have method GenericTest() called to run each test. filename: filename of file with list of tests. prefix: prefix to add to the beginning of each test. test_prefix_filter: Only adds a test if it starts with this. test_suffixes: list of suffixes to filter by. An empty list = pass all. + browser: browser name. + module: module which will have method GenericTest() called to run each test. path_to_html: Path from server root to html """ # See comments in that file for the expected format. @@ -524,6 +493,8 @@ def AddTests(test_suite, session, browser, module, filename, prefix, samples = test_list_file.readlines() test_list_file.close() + tests = [] + for sample in samples: sample = sample.strip() if not sample or sample[0] == ";" or sample[0] == "#": @@ -535,13 +506,17 @@ def AddTests(test_suite, session, browser, module, filename, prefix, options = arguments[2:] # TODO: Add filter based on test_type - - name = ("Test" + prefix + re.sub("\W", "_", test_path) + - test_type.capitalize()) + if test_path.startswith("Test"): + name = test_path + else: + # Need to make a name. + name = ("Test" + prefix + re.sub("\W", "_", test_path) + + test_type.capitalize()) # Only execute this test if the current browser is not in the list # of skipped browsers. test_skipped = False + screenshot_count = 0 for option in options: if option.startswith("except"): skipped_platforms = selenium_utilities.GetArgument(option) @@ -549,388 +524,90 @@ def AddTests(test_suite, session, browser, module, filename, prefix, skipped_platforms = skipped_platforms.split(",") if browser in skipped_platforms: test_skipped = True + elif option.startswith("screenshots"): + screenshot_count += int(selenium_utilities.GetArgument(option)) + elif option.startswith("screenshot"): + screenshot_count += 1 + + if (test_prefix_filter and not name.startswith(test_prefix_filter) or + test_suffixes and not MatchesSuffix(name, test_suffixes)): + test_skipped = True if not test_skipped: - # Check if there is already a test function by this name in the module. - if (test_path.startswith(test_prefix_filter) and - hasattr(module, test_path) and callable(getattr(module, test_path))): - test_suite.addTest(test_path, module(test_path, session, browser, - path_to_html, options=options)) - elif (name.startswith(test_prefix_filter) and - MatchesSuffix(name, test_suffixes)): - # no, so add a method that will run a test generically. + # Add a test method with this name if it doesn't exist. + if not (hasattr(module, name) and callable(getattr(module, name))): setattr(module, name, module.GenericTest) - test_suite.addTest(name, module(name, session, browser, path_to_html, - test_type, test_path, options)) + + new_test = module(name, browser, path_to_html, test_type, test_path, + options) + + if screenshot_count and FLAGS.screenshots: + pdiff_name = name + 'Screenshots' + screenshot = selenium_utilities.ScreenshotNameFromTestName(test_path) + setattr(pdiff_test.PDiffTest, pdiff_name, + pdiff_test.PDiffTest.PDiffTest) + new_pdiff = pdiff_test.PDiffTest(pdiff_name, + screenshot_count, + screenshot, + FLAGS.screencompare, + FLAGS.screenshotsdir, + FLAGS.referencedir, + options) + tests += [(new_test, new_pdiff)] + else: + tests += [new_test] + + + return tests -def SeleniumSuite(session, browser, test_list, test_prefix, test_suffixes): - """Creates a test suite to run the unit tests. +def GetTestsForBrowser(browser, test_prefix, test_suffixes): + """Returns list of tests from test files. Args: - session: a selenium() instance browser: browser name - test_list: list to add tests to. test_prefix: prefix of tests to run. test_suffixes: A comma separated string of suffixes to filter by. Returns: - A selenium test suite. + A list of unittest.TestCase. """ - - test_suite = LocalTestSuite() - + tests = [] suffixes = test_suffixes.split(",") # add sample tests. filename = os.path.abspath(os.path.join(script_dir, "sample_list.txt")) - AddTests(test_suite, - session, - browser, - samples_tests.SampleTests, - filename, - "Sample", - test_prefix, - suffixes, - FLAGS.samplespath.replace("\\", "/")) - + tests += _GetTestsFromFile(filename, "Sample", test_prefix, suffixes, browser, + samples_tests.SampleTests, + FLAGS.samplespath.replace("\\","/")) + # add javascript tests. filename = os.path.abspath(os.path.join(script_dir, "javascript_unit_test_list.txt")) - AddTests(test_suite, - session, - browser, - javascript_unit_tests.JavaScriptUnitTests, - filename, - "UnitTest", - test_prefix, - suffixes, - '') + tests += _GetTestsFromFile(filename, "UnitTest", test_prefix, suffixes, + browser, javascript_unit_tests.JavaScriptUnitTests, + "") - test_list += test_suite.test_list + return tests - return test_suite - -def CompareScreenshots(browser, test_list, screencompare, screenshotsdir, - screencompare_tool, verbose): - """Performs the image validation for test-case frame captures. - - Args: - browser: selenium browser name - test_list: list of tests that ran. - screencompare: True to actually run tests. - screenshotsdir: path of directory containing images to compare. - screencompare_tool: path to image diff tool. - verbose: If True then outputs verbose info. - - Returns: - A Results object. - """ - print "Validating captured frames against reference data..." - - class Results(object): - """An object to return results of screenshot compares. - - Similar to unittest.TestResults. - """ - - def __init__(self): - object.__init__(self) - self.tests_run = 0 - self.current_test = None - self.errors = [] - self.failures = [] - self.start_time = 0 - - def StartTest(self, test): - """Adds a test. - - Args: - test: name of test. - """ - self.start_time = time.time() - self.tests_run += 1 - self.current_test = test - - def TimeTaken(self): - """Returns the time since the last call to StartTest.""" - return time.time() - self.start_time - - def AddFailure(self, test, browser, message): - """Adds a failure. - - Args: - test: name of the test. - browser: name of the browser. - message: error message to print - """ - self.failures.append(test) - print "ERROR: ", message - print("SELENIUMRESULT %s <%s> [%.3fs]: FAIL" - % (test, browser, self.TimeTaken())) - - def AddSuccess(self, test): - """Adds a success. - - Args: - test: name of the test. - """ - print("SELENIUMRESULT %s <%s> [%.3fs]: PASS" - % (test, browser, self.TimeTaken())) - - def WasSuccessful(self): - """Returns true if all tests were successful.""" - return not self.errors and not self.failures - - results = Results() - - if not screencompare: - return results - - base_path = os.getcwd() - - reference_files = os.listdir(os.path.join( - base_path, - selenium_constants.REFERENCE_SCREENSHOT_PATH)) - - generated_files = os.listdir(os.path.join(base_path, screenshotsdir)) - - # Prep the test list for matching - temp = [] - for (test, options) in test_list: - test = selenium_utilities.StripTestTypeSuffix(test) - temp.append((test.lower(), options)) - test_list = temp - - # Create regex object for filename - # file is in format "FILENAME_reference.png" - reference_file_name_regex = re.compile(r"^(.*)_reference\.png") - generated_file_name_regex = re.compile(r"^(.*)\.png") - - # check that there is a reference file for each generated file. - for file_name in generated_files: - match = generated_file_name_regex.search(file_name) - - if match is None: - # no matches - continue - - # Generated file name without png extension - actual_name = match.group(1) - - # Get full paths to reference and generated files - reference_file = os.path.join( - base_path, - selenium_constants.REFERENCE_SCREENSHOT_PATH, - actual_name + "_reference.png") - generated_file = os.path.join( - base_path, - screenshotsdir, - actual_name + ".png") - - test_name = "TestReferenceScreenshotExists_" + actual_name - results.StartTest(test_name) - if not os.path.exists(reference_file): - # reference file does not exist - results.AddFailure( - test_name, browser, - "Missing reference file %s for generated file %s." % - (reference_file, generated_file)) - else: - results.AddSuccess(test_name) - - # Assuming both the result and reference image sets are the same size, - # verify that corresponding images are similar within tolerance. - for file_name in reference_files: - match = reference_file_name_regex.search(file_name) - - if match is None: - # no matches - continue - - # Generated file name without png extension - actual_name = match.group(1) - # Get full paths to reference and generated files - reference_file = os.path.join( - base_path, - selenium_constants.REFERENCE_SCREENSHOT_PATH, - file_name) - platform_specific_reference_file = os.path.join( - base_path, - selenium_constants.PLATFORM_SPECIFIC_REFERENCE_SCREENSHOT_PATH, - actual_name + "_reference.png") - generated_file = os.path.join( - base_path, - screenshotsdir, - actual_name + ".png") - - # Generate a test case name - test_name = "TestScreenCompare_" + actual_name - - # skip the reference file if the test is not in the test list. - basename = os.path.splitext(os.path.basename(file_name))[0] - basename = re.sub("\d+_reference", "", basename).lower() - basename = re.sub("\W", "_", basename) - test_was_run = False - test_options = [] - for (test, options) in test_list: - if test.endswith(basename): - test_was_run = True - test_options = options or [] - break - - if test_was_run: - results.StartTest(test_name) - else: - # test was not planned to run for this reference image. - if os.path.exists(generated_file): - # a generated file exists? The test name does not match the screenshot. - results.StartTest(test_name) - results.AddFailure(test_name, browser, - "Test name and screenshot name do not match.") - continue - - # Check if there is a platform specific version of the reference image - if os.path.exists(platform_specific_reference_file): - reference_file = platform_specific_reference_file - - # Check if perceptual diff exists - pdiff_path = os.path.join(base_path, screencompare_tool) - if not os.path.exists(pdiff_path): - # Perceptualdiff.exe does not exist, fail. - results.AddFailure( - test_name, browser, - "Perceptual diff %s does not exist." % pdiff_path) - continue - - pixel_threshold = "10" - alpha_threshold = "1.0" - use_colorfactor = False - use_downsample = False - use_edge = True - edge_threshold = "5" - - # Find out if the test specified any options relating to perceptual diff - # that will override the defaults. - for opt in test_options: - if opt.startswith("pdiff_threshold"): - pixel_threshold = selenium_utilities.GetArgument(opt) - elif (opt.startswith("pdiff_threshold_mac") and - sys.platform == "darwin"): - pixel_threshold = selenium_utilities.GetArgument(opt) - elif (opt.startswith("pdiff_threshold_win") and - sys.platform == 'win32' or sys.platform == "cygwin"): - pixel_threshold = selenium_utilities.GetArgument(opt) - elif (opt.startswith("pdiff_threshold_linux") and - sys.platform[:5] == "linux"): - pixel_threshold = selenium_utilities.GetArgument(opt) - elif (opt.startswith("colorfactor")): - colorfactor = selenium_utilities.GetArgument(opt) - use_colorfactor = True - elif (opt.startswith("downsample")): - downsample_factor = selenium_utilities.GetArgument(opt) - use_downsample = True - elif (opt.startswith("pdiff_edge_ignore_off")): - use_edge = False - elif (opt.startswith("pdiff_edge_threshold")): - edge_threshold = selenium_utilities.GetArgument(opt) - - # Check if file exists - if os.path.exists(generated_file): - diff_file = os.path.join(base_path, screenshotsdir, - "compare_%s.png" % actual_name) - - # Run perceptual diff - arguments = [pdiff_path, - reference_file, - generated_file, - "-output", diff_file, - "-fov", "45", - "-alphaThreshold", alpha_threshold, - # Turn on verbose output for the percetual diff so we - # can see how far off we are on the threshold. - "-verbose", - # Set the threshold to zero so we can get a count - # of the different pixels. This causes the program - # to return failure for most images, but we can compare - # the values ourselves below. - "-threshold", "0"] - if use_colorfactor: - arguments += ["-colorfactor", colorfactor] - if use_downsample: - arguments += ["-downsample", downsample_factor] - if use_edge: - arguments += ["-ignoreEdges", edge_threshold] - - # Print the perceptual diff command line so we can debug easier. - if verbose: - print " ".join(arguments) - - # diff tool should return 0 on success - expected_result = 0 - - pdiff_pipe = subprocess.Popen(arguments, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - (pdiff_stdout, pdiff_stderr) = pdiff_pipe.communicate() - result = pdiff_pipe.returncode - - # Find out how many pixels were different by looking at the output. - pixel_re = re.compile("(\d+) pixels are different", re.DOTALL) - pixel_match = pixel_re.search(pdiff_stdout) - different_pixels = "0" - if pixel_match: - different_pixels = pixel_match.group(1) - - alpha_re = re.compile("max alpha delta of ([0-9\.]+)", re.DOTALL) - alpha_delta = "0.0" - alpha_match = alpha_re.search(pdiff_stdout) - if alpha_match: - alpha_delta = alpha_match.group(1) - - if (result == expected_result or (pixel_match and - int(different_pixels) <= int(pixel_threshold))): - # The perceptual diff passed. - pass_re = re.compile("PASS: (.*?)\n", re.DOTALL) - pass_match = pass_re.search(pdiff_stdout) - reason = "Images are not perceptually different." - if pass_match: - reason = pass_match.group(1) - print ("%s PASSED with %s different pixels " - "(threshold %s) because: %s" % (test_name, - different_pixels, - pixel_threshold, - reason)) - results.AddSuccess(test_name) - else: - # The perceptual diff failed. - if pixel_match and int(different_pixels) > int(pixel_threshold): - results.AddFailure( - test_name, browser, - ("Reference framebuffer (%s) does not match generated " - "file (%s): %s non-matching pixels, max alpha delta: %s, " - "threshold: %s, alphaThreshold: %s." % - (reference_file, generated_file, different_pixels, alpha_delta, - pixel_threshold, alpha_threshold))) - else: - # The perceptual diff failed for some reason other than - # pixel differencing. - fail_re = re.compile("FAIL: (.*?)\n", re.DOTALL) - fail_match = fail_re.search(pdiff_stdout) - reason = "Unknown failure" - if fail_match: - reason = fail_match.group(1) - results.AddFailure( - test_name, browser, - ("Perceptual diff of reference (%s) and generated (%s) files " - "failed because: %s" % - (reference_file, generated_file, reason))) - else: - # Generated file does not exist - results.AddFailure(test_name, browser, - "File %s does not exist." % generated_file) - - return results +def GetChromePath(): + value = None + if sys.platform == "win32" or sys.platform == "cygwin": + import _winreg + try: + key = _winreg.OpenKey(_winreg.HKEY_CLASSES_ROOT, + "Applications\\chrome.exe\\shell\\open\\command") + (value, type) = _winreg.QueryValueEx(key, None) + _winreg.CloseKey(key) + value = os.path.dirname(value) + + except WindowsError: + value = None + if '*googlechrome' in FLAGS.browser: + raise Exception("Unable to determine location for Chrome -- " + + "is it installed?") + + return value def main(unused_argv): @@ -945,45 +622,84 @@ def main(unused_argv): FLAGS.referencedir, selenium_constants.PLATFORM_SCREENSHOT_DIR, "") + + # Launch HTTP server. + http_server = LocalFileHTTPServer.StartServer(FLAGS.product_dir) - # Open a new session to Selenium Remote Control - selenium_session = SeleniumSession(FLAGS.verbose, FLAGS.java, - os.path.abspath(FLAGS.selenium_server), - FLAGS.servertimeout) - if not selenium_session.http_server or not selenium_session.selenium_server: + if not http_server: + print "Could not start a local http server with root." % FLAGS.product_dir return 1 + + + # Start Selenium Remote Control and Selenium Session Builder. + sel_server_jar = os.path.abspath(FLAGS.selenium_server) + sel_server = SeleniumRemoteControl.StartServer( + FLAGS.verbose, FLAGS.java, sel_server_jar, + FLAGS.servertimeout) + + if not sel_server: + print "Could not start selenium server at %s." % sel_server_jar + return 1 + + session_builder = SeleniumSessionBuilder( + sel_server.selenium_port, + int(FLAGS.servertimeout) * 1000, + http_server.http_port, + FLAGS.browserpath) + all_tests_passed = True + # Test browsers. for browser in FLAGS.browser: if browser in set(selenium_constants.SELENIUM_BROWSER_SET): - test_list = [] - result = selenium_session.TestBrowser(browser, test_list, - FLAGS.testprefix, - FLAGS.testsuffixes, - int(FLAGS.servertimeout) * 1000) - - # Compare screenshots - compare_result = CompareScreenshots(browser, - test_list, - FLAGS.screenshots, - FLAGS.screenshotsdir, - FLAGS.screencompare, - FLAGS.verbose) - if not result.wasSuccessful() or not compare_result.WasSuccessful(): + test_list = GetTestsForBrowser(browser, FLAGS.testprefix, + FLAGS.testsuffixes) + + result = TestBrowser(session_builder, browser, test_list) + + if not result.wasSuccessful(): all_tests_passed = False - # Log results - print "Results for %s:" % browser - print " %d tests run." % (result.testsRun + compare_result.tests_run) - print " %d errors." % (len(result.errors) + len(compare_result.errors)) - print " %d failures.\n" % (len(result.failures) + - len(compare_result.failures)) + + # Log non-succesful tests, for convenience. + print "" + print "Failures for %s:" % browser + print "[Selenium tests]" + for entry in test_list: + if type(entry) == tuple: + test = entry[0] + else: + test = entry + + if test in result.results: + if result.results[test] != 'PASS': + print test.name + + print "" + print "[Perceptual Diff tests]" + for entry in test_list: + if type(entry) == tuple: + pdiff_test = entry[1] + if pdiff_test in result.results: + if result.results[pdiff_test] != 'PASS': + print pdiff_test.name + + + # Log summary results. + print "" + print "Summary for %s:" % browser + print " %d tests run." % result.testsRun + print " %d errors." % len(result.errors) + print " %d failures.\n" % len(result.failures) else: print "ERROR: Browser %s is invalid." % browser print "Run with --help to view list of supported browsers.\n" all_tests_passed = False - # Wrap up session - selenium_session.TearDown() + # Shut down remote control + shutdown_session = selenium.selenium("localhost", + sel_server.selenium_port, "*firefox", + "http://%s:%d" % (socket.gethostname(), http_server.http_port)) + shutdown_session.shut_down_selenium_server() if all_tests_passed: # All tests successful. @@ -992,21 +708,6 @@ def main(unused_argv): # Return error code 1. return 1 -def GetChromePath(): - value = None - if sys.platform == "win32" or sys.platform == "cygwin": - import _winreg - try: - key = _winreg.OpenKey(_winreg.HKEY_CLASSES_ROOT, - "Applications\\chrome.exe\\shell\\open\\command") - (value, type) = _winreg.QueryValueEx(key, None) - _winreg.CloseKey(key) - except WindowsError: - raise Exception("Unable to determine location for Chrome -- " - "it is installed?") - value = os.path.dirname(value) - return value - if __name__ == "__main__": remaining_argv = FLAGS(sys.argv) diff --git a/o3d/tests/selenium/pdiff_test.py b/o3d/tests/selenium/pdiff_test.py new file mode 100644 index 0000000..fc63c9d --- /dev/null +++ b/o3d/tests/selenium/pdiff_test.py @@ -0,0 +1,139 @@ +import os
+import re
+import subprocess
+import unittest
+import sys
+
+import selenium_utilities
+import selenium_constants
+
+class PDiffTest(unittest.TestCase):
+ """A perceptual diff test class, for running perceptual diffs on any
+ number of screenshots."""
+
+ def __init__(self, name, num_screenshots, screenshot_name, pdiff_path,
+ gen_dir, ref_dir, options):
+ unittest.TestCase.__init__(self, name)
+ self.name = name
+ self.num_screenshots = num_screenshots
+ self.screenshot_name = screenshot_name
+ self.pdiff_path = pdiff_path
+ self.gen_dir = gen_dir
+ self.ref_dir = ref_dir
+ self.options = options
+
+ def shortDescription(self):
+ """override unittest.TestCase shortDescription for our own descriptions."""
+ return "Screenshot comparison for: " + self.name
+
+ def PDiffTest(self):
+ """Runs a generic Perceptual Diff test."""
+ # Get arguments for perceptual diff.
+ pixel_threshold = "10"
+ alpha_threshold = "1.0"
+ use_colorfactor = False
+ use_downsample = False
+ use_edge = True
+ edge_threshold = "5"
+
+ for opt in self.options:
+ if opt.startswith("pdiff_threshold"):
+ pixel_threshold = selenium_utilities.GetArgument(opt)
+ elif (opt.startswith("pdiff_threshold_mac") and
+ sys.platform == "darwin"):
+ pixel_threshold = selenium_utilities.GetArgument(opt)
+ elif (opt.startswith("pdiff_threshold_win") and
+ sys.platform == 'win32' or sys.platform == "cygwin"):
+ pixel_threshold = selenium_utilities.GetArgument(opt)
+ elif (opt.startswith("pdiff_threshold_linux") and
+ sys.platform[:5] == "linux"):
+ pixel_threshold = selenium_utilities.GetArgument(opt)
+ elif (opt.startswith("colorfactor")):
+ colorfactor = selenium_utilities.GetArgument(opt)
+ use_colorfactor = True
+ elif (opt.startswith("downsample")):
+ downsample_factor = selenium_utilities.GetArgument(opt)
+ use_downsample = True
+ elif (opt.startswith("pdiff_edge_ignore_off")):
+ use_edge = False
+ elif (opt.startswith("pdiff_edge_threshold")):
+ edge_threshold = selenium_utilities.GetArgument(opt)
+
+ results = []
+ # Loop over number of screenshots.
+ for screenshot_no in range(self.num_screenshots):
+ # Find reference image.
+ J = os.path.join
+ platform_img_path = J(self.ref_dir,
+ selenium_constants.PLATFORM_SCREENSHOT_DIR,
+ self.screenshot_name + str(screenshot_no + 1) +
+ '_reference.png')
+ reg_img_path = J(self.ref_dir,
+ selenium_constants.DEFAULT_SCREENSHOT_DIR,
+ self.screenshot_name + str(screenshot_no + 1) +
+ '_reference.png')
+
+ if os.path.exists(platform_img_path):
+ ref_img_path = platform_img_path
+ elif os.path.exists(reg_img_path):
+ ref_img_path = reg_img_path
+ else:
+ self.fail('Reference image for ' + self.screenshot_name + ' not found.'
+ + '\nNeither file exists %s NOR %s' %
+ (reg_img_path, platform_img_path))
+
+ # Find generated image.
+ gen_img_path = J(self.gen_dir, self.screenshot_name +
+ str(screenshot_no + 1) + '.png')
+ diff_img_path = J(self.gen_dir, 'cmp_' + self.screenshot_name +
+ str(screenshot_no + 1) + '.png')
+
+ self.assertTrue(os.path.exists(gen_img_path),
+ 'Generated screenshot for ' + self.screenshot_name +
+ ' not found.\nFile does not exist: %s' % gen_img_path)
+
+ # Run perceptual diff
+ arguments = [self.pdiff_path,
+ ref_img_path,
+ gen_img_path,
+ "-output", diff_img_path,
+ "-fov", "45",
+ "-alphaThreshold", alpha_threshold,
+ # Turn on verbose output for the percetual diff so we
+ # can see how far off we are on the threshold.
+ "-verbose",
+ # Set the threshold to zero so we can get a count
+ # of the different pixels. This causes the program
+ # to return failure for most images, but we can compare
+ # the values ourselves below.
+ "-threshold", "0"]
+ if use_colorfactor:
+ arguments += ["-colorfactor", colorfactor]
+ if use_downsample:
+ arguments += ["-downsample", downsample_factor]
+ if use_edge:
+ arguments += ["-ignoreEdges", edge_threshold]
+
+ pdiff_pipe = subprocess.Popen(arguments,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ (pdiff_stdout, pdiff_stderr) = pdiff_pipe.communicate()
+ result = pdiff_pipe.returncode
+ # Find out how many pixels were different by looking at the output.
+ pixel_re = re.compile("(\d+) pixels are different", re.DOTALL)
+ pixel_match = pixel_re.search(pdiff_stdout)
+ different_pixels = "0"
+ if pixel_match:
+ different_pixels = pixel_match.group(1)
+
+ results += [(gen_img_path, int(different_pixels))]
+
+ all_tests_passed = True
+ msg = "Pixel Threshold is %s. Failing screenshots:\n" % pixel_threshold
+ for path, pixels in results:
+ if pixels >= pixel_threshold:
+ all_tests_passed = False
+ msg += " %s, differing by %s\n" % (path, str(pixels))
+
+ if not all_tests_passed:
+ self.assertTrue(all_tests_passed, msg)
diff --git a/o3d/tests/selenium/pulse_testrunner.py b/o3d/tests/selenium/pulse_testrunner.py deleted file mode 100644 index 71f7bb3..0000000 --- a/o3d/tests/selenium/pulse_testrunner.py +++ /dev/null @@ -1,149 +0,0 @@ -#!/usr/bin/python2.4 -# Copyright 2009, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -"""Test runner that displays customized results for pulse. - -Instead of the usual '.', 'E', and 'F' symbols, more detailed -results are printed after each test. - -ie. 'TESTRESULT $TESTNAME: {PASS/FAIL}' -""" - - - -import sys -import time -import unittest - - -# Disable lint errors about functions having to start with upper case. -# This is done to standardize the case of all functions in this class. -# pylint: disable-msg=C6409 -class _PulseTestResult(unittest.TestResult): - """A specialized class that prints formatted text results to a stream. - - Test results are formatted to be recognized in pulse. - Used by PulseTestRunner. - """ - separator1 = "=" * 70 - separator2 = "-" * 70 - - def __init__(self, stream, descriptions, verbosity, browser): - unittest.TestResult.__init__(self) - self.stream = stream - self.show_all = verbosity > 1 - self.dots = verbosity == 1 - self.descriptions = descriptions - # Dictionary of start times - self.start_times = {} - # Dictionary of results - self.results = {} - self.browser = browser - - def getDescription(self, test): - """Gets description of test.""" - if self.descriptions: - return test.shortDescription() or str(test) - else: - return str(test) - - def startTest(self, test): - """Starts test.""" - # Records the start time - self.start_times[test] = time.time() - # Default testresult if success not called - self.results[test] = "FAIL" - unittest.TestResult.startTest(self, test) - if self.show_all: - self.stream.write(self.getDescription(test)) - self.stream.write(" ... ") - - def stopTest(self, test): - """Called when test is ended.""" - time_taken = time.time() - self.start_times[test] - result = self.results[test] - self.stream.writeln("SELENIUMRESULT %s <%s> [%.3fs]: %s" - % (test, self.browser, time_taken, result)) - - def addSuccess(self, test): - """Adds success result to TestResult.""" - unittest.TestResult.addSuccess(self, test) - self.results[test] = "PASS" - - def addError(self, test, err): - """Adds error result to TestResult.""" - unittest.TestResult.addError(self, test, err) - self.results[test] = "FAIL" - - def addFailure(self, test, err): - """Adds failure result to TestResult.""" - unittest.TestResult.addFailure(self, test, err) - self.results[test] = "FAIL" - - def printErrors(self): - """Prints all errors and failures.""" - if self.dots or self.show_all: - self.stream.writeln() - self.printErrorList("ERROR", self.errors) - self.printErrorList("FAIL", self.failures) - - def printErrorList(self, flavour, errors): - """Prints a given list of errors.""" - for test, err in errors: - self.stream.writeln(self.separator1) - self.stream.writeln("%s: %s" % (flavour, self.getDescription(test))) - self.stream.writeln(self.separator2) - self.stream.writeln("%s" % err) - - -class PulseTestRunner(unittest.TextTestRunner): - """A specialized test runner class that displays results in textual form. - - Test results are formatted to be recognized in pulse. - """ - - def __init__(self, stream=sys.stderr, descriptions=1, verbosity=1): - self.browser = "default_browser" - unittest.TextTestRunner.__init__(self, stream, descriptions, verbosity) - - def setBrowser(self, browser): - """Sets the browser name.""" - self.browser = browser - - def _makeResult(self): - """Returns a formatted test result for pulse.""" - return _PulseTestResult(self.stream, - self.descriptions, - self.verbosity, - self.browser) - -if __name__ == "__main__": - pass diff --git a/o3d/tests/selenium/sample_list.txt b/o3d/tests/selenium/sample_list.txt index 9b35450..90c3c62 100644 --- a/o3d/tests/selenium/sample_list.txt +++ b/o3d/tests/selenium/sample_list.txt @@ -72,9 +72,9 @@ # http://wiki.corp.google.com/twiki/bin/view/Main/ClientThreeDSampleGuidelines # # -medium 2d screenshot pdiff_threshold(200) pdiff_threshold_mac(41200) colorfactor(0.8) downsample(1) +medium 2d screenshot timeout(30000) pdiff_threshold(200) pdiff_threshold_mac(41200) colorfactor(0.8) downsample(1) medium animation -large animated-scene screenshot timeout(45000) pdiff_threshold(200) +large animated-scene screenshot timeout(55000) pdiff_threshold(200) large beachdemo/beachdemo screenshot timeout(120000) pdiff_threshold(200) pdiff_threshold_mac(2100) downsample(1) except(*iexplore, *googlechrome) medium billboards screenshot pdiff_threshold(200) medium bitmap-draw-image screenshot pdiff_threshold(200) @@ -124,16 +124,19 @@ large GoogleIO-2009/step14ex screenshot pdiff_threshold(200) timeou # function to custom run the test. As such, only the 'except' and # pdiff_threshold options have any meaning -small TestSampleErrorTextureSmall pdiff_threshold(200) -small TestSampleHelloCube_TexturesSmall pdiff_threshold(450) +small TestSampleErrorTextureSmall pdiff_threshold(200) screenshots(5) +small TestSampleHelloCube_TexturesSmall pdiff_threshold(450) screenshot small TestSampleRefreshPageLoad_Small -medium TestSampleCustomCamera pdiff_threshold(200) pdiff_threshold_win(200) +medium TestSampleCustomCamera pdiff_threshold(200) pdiff_threshold_win(200) screenshot medium TestSamplePicking medium TestSampleRenderMode -medium TestSampleRotateModel pdiff_threshold(200) -medium TestSampleShader_Test pdiff_threshold(200) pdiff_threshold_win(200) +medium TestSampleRotateModel pdiff_threshold(200) screenshot +medium TestSampleShader_Test pdiff_threshold(200) pdiff_threshold_win(200) screenshots(13) large TestSampleMultipleClientsLarge large TestSamplePingPongLarge # This test currently fails on IE as it considers localhost: to be a trusted # domain. -small TestLoadTextureFromFileSmall except(*iexplore) +# Do not run this test until get rid of scons. It assumes a particular +# directory structure to find an asset. Need to change to reflect new gyp +# directory structure. Should be changed when scons is gone. +#small TestLoadTextureFromFileSmall except(*iexplore) diff --git a/o3d/tests/selenium/samples_tests.py b/o3d/tests/selenium/samples_tests.py index ef5a66b..b15592e 100644 --- a/o3d/tests/selenium/samples_tests.py +++ b/o3d/tests/selenium/samples_tests.py @@ -59,10 +59,10 @@ class SampleTests(selenium_utilities.SeleniumTestCase): # TODO: Change to correct object class when NPAPI class is exposed. SELENIUM_OBJ_TYPE = "[object HTMLObjectElement]" - def __init__(self, name, session, browser, path_to_html, test_type=None, + def __init__(self, name, browser, path_to_html, test_type=None, sample_path=None, options=None): selenium_utilities.SeleniumTestCase.__init__( - self, name, session, browser, path_to_html, test_type, sample_path, + self, name, browser, path_to_html, test_type, sample_path, options) def GenericTest(self): @@ -314,7 +314,7 @@ class SampleTests(selenium_utilities.SeleniumTestCase): s.select("//select[@id='shaderSelect']", ("index=%d" % shader)) # Take screenshot self.assertTrue(selenium_utilities.TakeScreenShot( - s, self.browser, "g_client", ("shader-test%d" % shader))) + s, self.browser, "g_client", "shader-test%d" % (shader + 1))) def TestSampleErrorTextureSmall(self): """Tests error-texture.html.""" diff --git a/o3d/tests/selenium/selenium_constants.py b/o3d/tests/selenium/selenium_constants.py index 936772c..75b5116 100644 --- a/o3d/tests/selenium/selenium_constants.py +++ b/o3d/tests/selenium/selenium_constants.py @@ -50,6 +50,7 @@ DEFAULT_SCREENSHOT_PATH = os.path.join(o3d_dir, # Path where reference screenshots will be stored. # Unfortunately we need separate reference images for certain platforms # for certain tests. +DEFAULT_SCREENSHOT_DIR = "reference" if sys.platform == "darwin": PLATFORM_SCREENSHOT_DIR = "reference-mac" elif sys.platform[:5] == "linux": @@ -68,3 +69,6 @@ SELENIUM_BROWSER_SET = ["*iexplore", "*firefox", "*googlechrome", "*safari"] # otherwise the OpenGL context will be clipped to the size of the window RESIZE_WIDTH = 1400 RESIZE_HEIGHT = 1200 + +# Time to wait (after load timeout) till assume the browser has crashed. +MAX_SELENIUM_TEST_TIME = 60 diff --git a/o3d/tests/selenium/selenium_utilities.py b/o3d/tests/selenium/selenium_utilities.py index 1e55089..ba28e1c 100644 --- a/o3d/tests/selenium/selenium_utilities.py +++ b/o3d/tests/selenium/selenium_utilities.py @@ -61,6 +61,25 @@ def IsValidSuffix(name): return True return False +def ScreenshotNameFromTestName(name): + name = StripTestTypeSuffix(name) + + if name.startswith("Test"): + # Make sure these are in order. + prefixes = ["TestStress", "TestSample", "Test"] + for prefix in prefixes: + if name.startswith(prefix): + name = name[len(prefix):] + break + + # Lowercase the name only for custom test methods. + name = name.lower() + + name = name.replace("_", "-") + name = name.replace("/", "_") + + return name + def StripTestTypeSuffix(name): """Removes the suffix from name if it is a valid test type.""" @@ -205,7 +224,6 @@ def TakeScreenShotAtPath(session, file = open(full_path + ".png", 'wb') file.write(png) file.close() - print "Saved screenshot %s." % full_path return True return False @@ -214,7 +232,7 @@ def TakeScreenShotAtPath(session, class SeleniumTestCase(unittest.TestCase): """Wrapper for TestCase for selenium.""" - def __init__(self, name, session, browser, path_to_html, test_type=None, + def __init__(self, name, browser, path_to_html, test_type=None, sample_path=None, options=None): """Constructor for SampleTests. @@ -229,12 +247,33 @@ class SeleniumTestCase(unittest.TestCase): """ unittest.TestCase.__init__(self, name) - self.session = session + self.name = name + self.session = None self.browser = browser self.test_type = test_type self.sample_path = sample_path - self.options = options self.path_to_html = path_to_html + self.screenshots = [] + self.timeout = 10000 + self.client = "g_client" + # parse options + for option in options: + if option.startswith("screenshot"): + clock = GetArgument(option) + if clock is None: + clock = "27.5" + self.screenshots.append(clock) + elif option.startswith("timeout"): + self.timeout = int(GetArgument(option)) + elif option.startswith("client"): + self.client = GetArgument(option) + + + def SetSession(self, session): + self.session = session + + def GetTestTimeout(self): + return self.timeout def GetURL(self, url): """Gets a URL for the test.""" @@ -267,33 +306,17 @@ class SeleniumTestCase(unittest.TestCase): g_client which is the o3d client object for that sample. This is used to take a screenshot. """ - screenshots = [] - timeout = 10000 - client = "g_client" - + self.assertTrue(not self.timeout is None) + self.assertTrue(not self.client is None) self.assertTrue(self.test_type in ["small", "medium", "large"]) - # parse options - for option in self.options: - if option.startswith("screenshot"): - clock = GetArgument(option) - if clock is None: - clock = "27.5" - screenshots.append(clock) - elif option.startswith("timeout"): - timeout = GetArgument(option) - self.assertTrue(not timeout is None) - elif option.startswith("client"): - client = GetArgument(option) - self.assertTrue(not client is None) - url = self.GetURL(base_path + self.sample_path + ".html") # load the sample. self.session.open(url) # wait for it to initialize. - self.session.wait_for_condition(ready_condition, timeout) + self.session.wait_for_condition(ready_condition, self.timeout) self.session.run_script( "if (window.o3d_prepForSelenium) { window.o3d_prepForSelenium(); }") @@ -303,14 +326,15 @@ class SeleniumTestCase(unittest.TestCase): # take a screenshot. screenshot_id = 1 - for clock in screenshots: + for clock in self.screenshots: # if they are animated we need to stop the animation and set the clock # to some time so we get a known state. self.session.run_script("g_timeMult = 0") self.session.run_script("g_clock = " + clock) # take a screenshot. - screenshot = self.sample_path.replace("/", "_") + str(screenshot_id) + screenshot = self.sample_path.replace("_", "-").replace("/", "_") + screenshot += str(screenshot_id) self.assertTrue(TakeScreenShot(self.session, self.browser, - client, screenshot)) + self.client, screenshot)) screenshot_id += 1 diff --git a/o3d/tests/selenium/test_runner.py b/o3d/tests/selenium/test_runner.py new file mode 100644 index 0000000..0ff5789 --- /dev/null +++ b/o3d/tests/selenium/test_runner.py @@ -0,0 +1,398 @@ +#!/usr/bin/python2.4
+# Copyright 2009, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Test runners and associated classes.
+
+Each test runner has its own thread, which attempts to perform a given test.
+If a test hangs, the test runner can be aborted or exited.
+
+"""
+
+import os
+import sys
+
+import socket
+import subprocess
+import threading
+import time
+import unittest
+import gflags
+import selenium
+import selenium_constants
+import Queue
+import thread
+import copy
+
+class StringBuffer:
+ """Primitive string buffer.
+
+ Members:
+ data: the contents of the buffer
+ """
+ def __init__(self):
+ self.data = ""
+ def write(self, data):
+ self.data += str(data)
+ def writeln(self, data=None):
+ if data is not None:
+ self.write(data)
+ self.write("\n")
+ def get(self):
+ get_data = self.data
+ self.data = ""
+ return get_data
+
+class TestResult(unittest.TestResult):
+ """A specialized class that prints formatted text results to a stream.
+
+ """
+ separator1 = "=" * 70
+ separator2 = "-" * 70
+
+ def __init__(self, stream, browser):
+ unittest.TestResult.__init__(self)
+ self.stream = stream
+ # Dictionary of start times
+ self.start_times = {}
+ # Dictionary of results
+ self.results = {}
+ self.browser = browser
+
+ def getDescription(self, test):
+ """Gets description of test."""
+ return test.shortDescription() or str(test)
+
+ def startTest(self, test):
+ """Starts test."""
+ # Records the start time
+ self.start_times[test] = time.time()
+ # Default testresult if success not called
+ self.results[test] = "FAIL"
+ unittest.TestResult.startTest(self, test)
+ self.stream.writeln()
+ self.stream.writeln(self.separator2)
+ self.stream.write(self.getDescription(test))
+ self.stream.writeln(" ... ")
+
+ def stopTest(self, test):
+ """Called when test is ended."""
+ time_taken = time.time() - self.start_times[test]
+ result = self.results[test]
+ self.stream.writeln("SELENIUMRESULT %s <%s> [%.3fs]: %s"
+ % (test, self.browser, time_taken, result))
+
+ def addSuccess(self, test):
+ """Adds success result to TestResult."""
+ unittest.TestResult.addSuccess(self, test)
+ self.results[test] = "PASS"
+
+ def addError(self, test, err):
+ """Adds error result to TestResult."""
+ unittest.TestResult.addError(self, test, err)
+ self.results[test] = "FAIL"
+
+ def addFailure(self, test, err):
+ """Adds failure result to TestResult."""
+ unittest.TestResult.addFailure(self, test, err)
+ self.results[test] = "FAIL"
+
+ def noResponse(self, test):
+ """Configures the result for a test that did not respond."""
+ self.results[test] = "FAIL"
+ self.testsRun += 1
+ self.errors.append("No response from test")
+
+ self.stream.writeln()
+ self.stream.writeln(self.separator2)
+ self.stream.write(self.getDescription(test))
+ self.stream.writeln(" ... ")
+ self.stream.writeln("SELENIUMRESULT %s <%s> [?s]: FAIL (HUNG?)"
+ % (test, self.browser))
+ self.stream.writeln()
+
+ def printErrors(self):
+ """Prints all errors and failures."""
+ self.stream.writeln()
+ self.printErrorList("ERROR", self.errors)
+ self.printErrorList("FAIL", self.failures)
+
+ def printErrorList(self, flavour, errors):
+ """Prints a given list of errors."""
+ for test, err in errors:
+ self.stream.writeln("%s:" % flavour)
+ self.stream.writeln("%s" % err)
+
+ def printAll(self, stream):
+ """Prints the entire stream to the given stream."""
+ stream.write(self.stream.data)
+
+ def merge(self, result):
+ """Merges the given result into this resultl."""
+ self.testsRun += result.testsRun
+ for key, entry in result.results.iteritems():
+ self.results[key] = entry
+ for error in result.errors:
+ self.errors.append(error)
+ for failure in result.failures:
+ self.failures.append(failure)
+ self.stream.write(result.stream)
+
+
+class TestRunnerThread(threading.Thread):
+ """Abstract test runner class. Launches its own thread for running tests.
+ Formats test results.
+
+ Members:
+ completely_done_event: event that occurs just before thread exits.
+ test: the currently running test.
+ browser: selenium_name of browser that will be tested.
+ """
+ def __init__(self):
+ threading.Thread.__init__(self)
+ # This thread is a daemon so that the program can exit even if the
+ # thread has not finished.
+ self.setDaemon(True)
+ self.completely_done_event = threading.Event()
+ self.test_copy = None
+ self.browser = "default_browser"
+
+ def IsCompletelyDone(self):
+ """Returns true if this test runner is completely done."""
+ return self.completely_done_event.isSet()
+
+ def run(self):
+ pass
+
+ def SetBrowser(self, browser):
+ """Sets the browser name."""
+ self.browser = browser
+
+ def GetNoResponseResult(self):
+ """Returns a generic no response result for last test."""
+ result = TestResult(StringBuffer(), self.browser)
+ result.noResponse(self.test)
+ return result
+
+ def RunTest(self, test):
+ "Run the given test case or test suite."
+ self.test = test
+
+ stream = StringBuffer()
+ result = TestResult(stream, self.browser)
+ startTime = time.time()
+ test(result)
+ stopTime = time.time()
+ timeTaken = stopTime - startTime
+ result.printErrors()
+ run = result.testsRun
+ stream.writeln("Took %.2fs" % timeTaken)
+ stream.writeln()
+ return result
+
+
+class PDiffTestRunner(TestRunnerThread):
+ """Test runner for Perceptual Diff tests. Polls a test queue and launches
+ given tests. Adds result to given queue.
+
+ Members:
+ pdiff_queue: list of tests to run, when they arrive.
+ result_queue: queue of our tests results.
+ browser: selenium name of browser to be tested.
+ end_testing_event: event that occurs when we are guaranteed no more tests
+ will be added to the queue.
+ """
+ def __init__(self, pdiff_queue, result_queue, browser):
+ TestRunnerThread.__init__(self)
+ self.pdiff_queue = pdiff_queue
+ self.result_queue = result_queue
+ self.browser = browser
+
+ self.end_testing_event = threading.Event()
+
+ def EndTesting(self):
+ """Called to notify thread that no more tests will be added to the test
+ queue."""
+ self.end_testing_event.set()
+
+ def run(self):
+ while True:
+ try:
+ test = self.pdiff_queue.get_nowait()
+
+ result = self.RunTest(test)
+
+ self.result_queue.put(result)
+
+ except Queue.Empty:
+ if self.end_testing_event.isSet() and self.pdiff_queue.empty():
+ break
+ else:
+ time.sleep(1)
+
+ self.completely_done_event.set()
+
+
+class SeleniumTestRunner(TestRunnerThread):
+ """Test runner for Selenium tests. Takes a test from a test queue and launches
+ it. Tries to handle hung/crashed tests gracefully.
+
+ Members:
+ testing_event: event that occurs when the runner is testing.
+ finished_event: event that occurs when thread has finished testing and
+ before it starts its next test.
+ can_continue_lock: lock for |can_continue|.
+ can_continue: is True when main thread permits the test runner to continue.
+ sel_builder: builder that constructs new selenium sessions, as needed.
+ browser: selenium name of browser to be tested.
+ session: current selenium session being used in tests, can be None.
+ test_queue: queue of tests to run.
+ pdiff_queue: queue of perceptual diff tests to run. We add a perceptual
+ diff test to the queue when the related selenium test passes.
+ deadline: absolute time of when the test should be done.
+ """
+ def __init__(self, sel_builder, browser, test_queue, pdiff_queue):
+ TestRunnerThread.__init__(self)
+
+ # Synchronization.
+ self.testing_event = threading.Event()
+ self.finished_event = threading.Event()
+ self.can_continue_lock = threading.Lock()
+ self.can_continue = False
+
+ # Selenium variables.
+ self.sel_builder = sel_builder
+ self.browser = browser
+
+ # Test variables.
+ self.test_queue = test_queue
+ self.pdiff_queue = pdiff_queue
+
+ self.deadline = 0
+
+ def IsPastDeadline(self):
+ if time.time() > self.deadline:
+ return True
+ return False
+
+ def IsTesting(self):
+ return self.testing_event.isSet()
+
+ def DidFinishTest(self):
+ return self.finished_event.isSet()
+
+ def Continue(self):
+ """Signals to thread to continue testing.
+
+ Returns:
+ result: the result for the recently finished test.
+ """
+
+ self.finished_event.clear()
+
+ self.can_continue_lock.acquire()
+ self.can_continue = True
+ result = self.result
+ self.can_continue_lock.release()
+
+ return result
+
+ def AbortTest(self):
+ self._StopSession()
+ self._StartSession()
+
+ def _StartSession(self):
+ self.session = self.sel_builder.NewSeleniumSession(self.browser)
+ # Copy the session so we can shut down safely on a different thread.
+ self.shutdown_session = copy.deepcopy(self.session)
+
+ def _StopSession(self):
+ if self.session is not None:
+ self.session = None
+ try:
+ # This can cause an exception on some browsers.
+ # Silenly disregard the exception.
+ self.shutdown_session.stop()
+ except:
+ pass
+
+ def run(self):
+ self._StartSession()
+
+ while not self.test_queue.empty():
+ try:
+ # Grab test from queue.
+ test_obj = self.test_queue.get_nowait()
+ if type(test_obj) == tuple:
+ test = test_obj[0]
+ pdiff_test = test_obj[1]
+ else:
+ test = test_obj
+ pdiff_test = None
+
+ self.can_continue = False
+
+ # Deadline is the time to load page timeout plus a constant.
+ self.deadline = (time.time() + (test.GetTestTimeout() / 1000.0) +
+ selenium_constants.MAX_SELENIUM_TEST_TIME)
+ # Supply test with necessary selenium session.
+ test.SetSession(self.session)
+
+ # Run test.
+ self.testing_event.set()
+ self.result = self.RunTest(test)
+
+ if time.time() > self.deadline:
+ self.result = self.GetNoResponseResult()
+
+ self.testing_event.clear()
+ self.finished_event.set()
+
+ # Wait for instruction from the main thread.
+ while True:
+ self.can_continue_lock.acquire()
+ can_continue = self.can_continue
+ self.can_continue_lock.release()
+ if can_continue:
+ break
+ time.sleep(.5)
+
+ if self.pdiff_queue is not None and pdiff_test is not None:
+ if self.result.wasSuccessful():
+ # Add the dependent perceptual diff test.
+ self.pdiff_queue.put(pdiff_test)
+
+ except Queue.Empty:
+ break
+
+ self._StopSession()
+ self.completely_done_event.set()
+
+
|