summaryrefslogtreecommitdiffstats
path: root/components/test
diff options
context:
space:
mode:
authormelandory <melandory@chromium.org>2015-04-23 04:32:19 -0700
committerCommit bot <commit-bot@chromium.org>2015-04-23 11:33:02 +0000
commita12e1e10c4a910a4f8aa20e3ea8b611846f19ed9 (patch)
tree8846e2d08c15b141f62512f800d83a42ca545750 /components/test
parent4631e09b52effe32b8817c517be9d1a1594d73cc (diff)
downloadchromium_src-a12e1e10c4a910a4f8aa20e3ea8b611846f19ed9.zip
chromium_src-a12e1e10c4a910a4f8aa20e3ea8b611846f19ed9.tar.gz
chromium_src-a12e1e10c4a910a4f8aa20e3ea8b611846f19ed9.tar.bz2
[Password manager tests automation] Refactor test_runner.
Refactoring consists of following steps: * test-case is runned for all websites, then another one for all websites. * module |test| is now executing by importing and running its function instead of running it as python script. * platform independent timeout mechanism. * parallel run using threads and threadpool alike solution to distribute jobs between them. BUG=369521 R=vabr@chromium.org Review URL: https://codereview.chromium.org/1089383002 Cr-Commit-Position: refs/heads/master@{#326495}
Diffstat (limited to 'components/test')
-rw-r--r--components/test/data/password_manager/automated_tests/environment.py7
-rw-r--r--components/test/data/password_manager/automated_tests/run_tests.py275
-rw-r--r--components/test/data/password_manager/automated_tests/tests.py28
3 files changed, 124 insertions, 186 deletions
diff --git a/components/test/data/password_manager/automated_tests/environment.py b/components/test/data/password_manager/automated_tests/environment.py
index fd52754..3743465 100644
--- a/components/test/data/password_manager/automated_tests/environment.py
+++ b/components/test/data/password_manager/automated_tests/environment.py
@@ -288,7 +288,12 @@ class Environment:
getattr(websitetest, test_case_name)()
except Exception as e:
successful = False
- error = e.message
+ # httplib.CannotSendRequest doesn't define a message,
+ # so type(e).__name__ will at least log exception name as a reason.
+ # TODO(melandory): logging.exception(e) produces meaningful result
+ # for httplib.CannotSendRequest, so we can try to propagate information
+ # that reason is an exception to the logging phase.
+ error = "Exception %s %s" % (type(e).__name__, e)
self.tests_results.append(
(websitetest.name, test_case_name, successful, error))
diff --git a/components/test/data/password_manager/automated_tests/run_tests.py b/components/test/data/password_manager/automated_tests/run_tests.py
index d1118f0..ef114da 100644
--- a/components/test/data/password_manager/automated_tests/run_tests.py
+++ b/components/test/data/password_manager/automated_tests/run_tests.py
@@ -35,15 +35,20 @@ descending order of severity):
You have to set up appropriate logging handlers to have the logs appear.
"""
-import argparse
import ConfigParser
+import Queue
+import argparse
import logging
+import multiprocessing
import os
import shutil
-import subprocess
+import stopit
import tempfile
import time
+from threading import Thread
+from collections import defaultdict
+
import tests
@@ -51,126 +56,90 @@ import tests
# of logging.DEBUG, which is already used for detailed test debug messages.
SCRIPT_DEBUG = 9
+class Config:
+ test_cases_to_run = tests.TEST_CASES
+ save_only_fails = False
+ tests_to_run = tests.all_tests.keys()
+ max_tests_in_parallel = 1
-class TestRunner(object):
- """Runs tests for a single website."""
-
- def __init__(self, test_cmd, test_name):
- """Initialize the TestRunner.
-
- Args:
- test_cmd: List of command line arguments to be supplied to
- every test run.
- test_name: Test name (e.g., facebook).
- """
- self.logger = logging.getLogger("run_tests")
-
- self.profile_path = tempfile.mkdtemp()
- results = tempfile.NamedTemporaryFile(delete=False)
- self.results_path = results.name
- results.close()
- self.test_cmd = test_cmd + ["--profile-path", self.profile_path,
- "--save-path", self.results_path]
- self.test_name = test_name
- # TODO(vabr): Ideally we would replace timeout with something allowing
- # calling tests directly inside Python, and working on other platforms.
- #
- # The website test runs multiple scenarios, each one has an internal
- # timeout of 200s for waiting (see |remaining_time_to_wait| and
- # Wait() in websitetest.py). Expecting that not every scenario should
- # take 200s, the maximum time allocated for all of them is 300s.
- self.test_cmd = ["timeout", "300"] + self.test_cmd
-
- self.logger.log(SCRIPT_DEBUG,
- "TestRunner set up for test %s, command '%s', "
- "profile path %s, results file %s",
- self.test_name, self.test_cmd, self.profile_path,
- self.results_path)
-
- self.runner_process = None
- # The tests can be flaky. This is why we try to rerun up to 3 times.
- self.max_test_runs_left = 3
- self.failures = []
- self._run_test()
-
- def get_test_result(self):
- """Return the test results.
-
- Returns:
- (True, []) if the test passed.
- (False, list_of_failures) if the test failed.
- None if the test is still running.
- """
-
- test_running = self.runner_process and self.runner_process.poll() is None
- if test_running:
- return None
- # Test is not running, now we have to check if we want to start it again.
- if self._check_if_test_passed():
- self.logger.log(SCRIPT_DEBUG, "Test %s passed", self.test_name)
- return True, []
- if self.max_test_runs_left == 0:
- self.logger.log(SCRIPT_DEBUG, "Test %s failed", self.test_name)
- return False, self.failures
- self._run_test()
- return None
-
- def _check_if_test_passed(self):
- """Returns True if and only if the test passed."""
-
- success = False
- if os.path.isfile(self.results_path):
- with open(self.results_path, "r") as results:
- # TODO(vabr): Parse the results to make sure all scenarios succeeded
- # instead of hard-coding here the number of tests scenarios from
- # test.py:main.
- NUMBER_OF_TEST_SCENARIOS = 3
- passed_scenarios = 0
- for line in results:
- self.failures.append(line)
- passed_scenarios += line.count("successful='True'")
- success = passed_scenarios == NUMBER_OF_TEST_SCENARIOS
- if success:
- break
-
- self.logger.log(
- SCRIPT_DEBUG,
- "Test run of {0} has succeeded: {1}".format(self.test_name, success))
- return success
-
- def _run_test(self):
- """Executes the command to run the test."""
- with open(self.results_path, "w"):
- pass # Just clear the results file.
- shutil.rmtree(path=self.profile_path, ignore_errors=True)
- self.max_test_runs_left -= 1
- self.logger.log(SCRIPT_DEBUG, "Run of test %s started", self.test_name)
- self.runner_process = subprocess.Popen(self.test_cmd)
-
-
-def _apply_defaults(config, defaults):
- """Adds default values from |defaults| to |config|.
-
- Note: This differs from ConfigParser's mechanism for providing defaults in
- two aspects:
- * The "defaults" here become explicit, and are associated with sections.
- * Sections get created for the added defaults where needed, that is, if
- they do not exist before.
+ def __init__(self, config_path):
+ config = ConfigParser.ConfigParser()
+ config.read(config_path)
+ if config.has_option("run_options", "tests_in_parallel"):
+ self.max_tests_in_parallel = config.getint(
+ "run_options", "tests_in_parallel")
- Args:
- config: A ConfigParser instance to be updated
- defaults: A dictionary mapping (section_string, option_string) pairs
- to string values. For every section/option combination not already
- contained in |config|, the value from |defaults| is stored in |config|.
- """
- for (section, option) in defaults:
- if not config.has_section(section):
- config.add_section(section)
- if not config.has_option(section, option):
- config.set(section, option, defaults[(section, option)])
+ self.chrome_path = config.get("binaries", "chrome-path")
+ self.chromedriver_path = config.get("binaries", "chromedriver-path")
+ self.passwords_path = config.get("data_files", "passwords_path")
+
+ if config.has_option("run_options", "tests_to_run"):
+ self.tests_to_run = config.get("run_options", "tests_to_run").split(",")
+ if config.has_option("run_options", "test_cases_to_run"):
+ self.test_cases_to_run = config.get(
+ "run_options", "test_cases_to_run").split(",")
+ if (config.has_option("logging", "save-only-fails")):
+ self.save_only_fails = config.getboolean("logging", "save-only-fails")
-def run_tests(config_path):
+
+def LogResultsOfTestRun(config, results):
+ """ Logs |results| of a test run. """
+ logger = logging.getLogger("run_tests")
+ failed_tests = []
+ failed_tests_num = 0
+ for result in results:
+ website, test_case, success, reason = result
+ if not (config.save_only_fails and success):
+ logger.debug("Test case %s has %s on Website %s", test_case,
+ website, {True: "passed", False: "failed"}[success])
+ if not success:
+ logger.debug("Reason of failure: %s", reason)
+
+ if not success:
+ failed_tests.append("%s.%s" % (website, test_case))
+ failed_tests_num += 1
+
+ logger.info("%d failed test cases out of %d, failing test cases: %s",
+ failed_tests_num, len(results),
+ sorted([name for name in failed_tests]))
+
+
+def RunTestCaseOnWebsite((website, test_case, config)):
+ """ Runs a |test_case| on a |website|. In case when |test_case| has
+ failed it tries to rerun it. If run takes too long, then it is stopped.
+ """
+
+ profile_path = tempfile.mkdtemp()
+ # The tests can be flaky. This is why we try to rerun up to 3 times.
+ attempts = 3
+ result = ("", "", False, "")
+ logger = logging.getLogger("run_tests")
+ for _ in xrange(attempts):
+ shutil.rmtree(path=profile_path, ignore_errors=True)
+ logger.log(SCRIPT_DEBUG, "Run of test case %s of website %s started",
+ test_case, website)
+ try:
+ with stopit.ThreadingTimeout(100) as timeout:
+ logger.log(SCRIPT_DEBUG,
+ "Run test with parameters: %s %s %s %s %s %s",
+ config.chrome_path, config.chromedriver_path,
+ profile_path, config.passwords_path,
+ website, test_case)
+ result = tests.RunTest(config.chrome_path, config.chromedriver_path,
+ profile_path, config.passwords_path,
+ website, test_case)[0]
+ if timeout != timeout.EXECUTED:
+ result = (website, test_case, False, "Timeout")
+ _, _, success, _ = result
+ if success:
+ return result
+ except Exception as e:
+ result = (website, test_case, False, e)
+ return result
+
+
+def RunTests(config_path):
"""Runs automated tests.
Runs the tests and returns the results through logging:
@@ -183,61 +152,21 @@ def run_tests(config_path):
config_path: The path to the config INI file. See the top of the file
for format description.
"""
- def has_test_run_finished(runner, result):
- result = runner.get_test_result()
- if result: # This test run is finished.
- status, log = result
- results.append((runner.test_name, status, log))
- return True
- else:
- return False
-
- defaults = {("run_options", "tests_in_parallel"): "1"}
- config = ConfigParser.ConfigParser()
- _apply_defaults(config, defaults)
- config.read(config_path)
- max_tests_in_parallel = config.getint("run_options", "tests_in_parallel")
- full_path = os.path.realpath(__file__)
- tests_dir = os.path.dirname(full_path)
- tests_path = os.path.join(tests_dir, "tests.py")
- test_name_idx = 2 # Index of "test_name_placeholder" below.
- general_test_cmd = ["python", tests_path, "test_name_placeholder",
- "--chrome-path", config.get("binaries", "chrome-path"),
- "--chromedriver-path",
- config.get("binaries", "chromedriver-path"),
- "--passwords-path",
- config.get("data_files", "passwords_path")]
- runners = []
- if config.has_option("run_options", "tests_to_run"):
- tests_to_run = config.get("run_options", "tests_to_run").split(",")
- else:
- tests_to_run = tests.all_tests.keys()
- if (config.has_option("logging", "save-only-failures") and
- config.getboolean("logging", "save-only-failures")):
- general_test_cmd.append("--save-only-failures")
-
- if config.has_option("run_options", "test_cases_to_run"):
- general_test_cmd += ["--test-cases-to-run",
- config.get("run_options", "test_cases_to_run").replace(",", " ")]
-
+ config = Config(config_path)
logger = logging.getLogger("run_tests")
- logger.log(SCRIPT_DEBUG, "%d tests to run: %s", len(tests_to_run),
- tests_to_run)
- results = [] # List of (name, bool_passed, failure_log).
- while len(runners) + len(tests_to_run) > 0:
- runners = [runner for runner in runners if not has_test_run_finished(
- runner, results)]
- while len(runners) < max_tests_in_parallel and len(tests_to_run):
- test_name = tests_to_run.pop()
- specific_test_cmd = list(general_test_cmd)
- specific_test_cmd[test_name_idx] = test_name
- runners.append(TestRunner(specific_test_cmd, test_name))
- time.sleep(1)
- failed_tests = [(name, log) for (name, passed, log) in results if not passed]
- logger.info("%d failed tests out of %d, failing tests: %s",
- len(failed_tests), len(results),
- [name for (name, _) in failed_tests])
- logger.debug("Logs of failing tests: %s", failed_tests)
+ logger.log(SCRIPT_DEBUG, "%d tests to run: %s", len(config.tests_to_run),
+ config.tests_to_run)
+ data = [(website, test_case, config)
+ for website in config.tests_to_run
+ for test_case in config.test_cases_to_run]
+ number_of_processes = min([config.max_tests_in_parallel,
+ len(config.test_cases_to_run) *
+ len(config.tests_to_run)])
+ p = multiprocessing.Pool(number_of_processes)
+ results = p.map(RunTestCaseOnWebsite, data)
+ p.close()
+ p.join()
+ LogResultsOfTestRun(config, results)
def main():
@@ -245,7 +174,7 @@ def main():
parser.add_argument("config_path", metavar="N",
help="Path to the config.ini file.")
args = parser.parse_args()
- run_tests(args.config_path)
+ RunTests(args.config_path)
if __name__ == "__main__":
diff --git a/components/test/data/password_manager/automated_tests/tests.py b/components/test/data/password_manager/automated_tests/tests.py
index bddbafb..f8fe6ff 100644
--- a/components/test/data/password_manager/automated_tests/tests.py
+++ b/components/test/data/password_manager/automated_tests/tests.py
@@ -11,6 +11,9 @@ from environment import Environment
from websitetest import WebsiteTest
+TEST_CASES = ("PromptFailTest", "PromptSuccessTest", "SaveAndAutofillTest")
+
+
class Alexa(WebsiteTest):
def Login(self):
@@ -504,8 +507,10 @@ def SaveResults(environment_tests_results, environment_save_path,
with open(environment_save_path, "w") as save_file:
save_file.write(xml)
+
def RunTest(chrome_path, chromedriver_path, profile_path,
- environment_passwords_path, website_test_name, test_case_name):
+ environment_passwords_path, website_test_name,
+ test_case_name):
"""Runs the test for the specified website.
Args:
@@ -528,15 +533,16 @@ def RunTest(chrome_path, chromedriver_path, profile_path,
environment = Environment(chrome_path, chromedriver_path, profile_path,
environment_passwords_path,
enable_automatic_password_saving)
+ try:
+ if website_test_name in all_tests:
+ environment.AddWebsiteTest(all_tests[website_test_name])
+ else:
+ raise Exception("Test name {} is unknown.".format(website_test_name))
- if website_test_name in all_tests:
- environment.AddWebsiteTest(all_tests[website_test_name])
- else:
- raise Exception("Test name {} is unknown.".format(website_test_name))
-
- environment.RunTestsOnSites(test_case_name)
- environment.Quit()
- return environment.tests_results
+ environment.RunTestsOnSites(test_case_name)
+ return environment.tests_results
+ finally:
+ environment.Quit()
def main():
parser = argparse.ArgumentParser(
@@ -575,9 +581,7 @@ def main():
if args.save_path:
save_path = args.save_path
- test_cases_to_run = args.test_cases_to_run or\
- ("PromptFailTest", "PromptSuccessTest", "SaveAndAutofillTest")
-
+ test_cases_to_run = args.test_cases_to_run or TEST_CASES
for test_case in test_cases_to_run:
tests_results = RunTest(
args.chrome_path, args.chromedriver_path, args.profile_path,