summaryrefslogtreecommitdiffstats
path: root/components/test/data/password_manager/automated_tests/run_tests.py
blob: ef114daea6257b44ef45d3521f35831153066083 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
# -*- coding: utf-8 -*-
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

"""Encapsulates running tests defined in tests.py.

Running this script requires passing --config-path with a path to a config file
of the following structure:

  [data_files]
  passwords_path=<path to a file with passwords>
  [binaries]
  chrome-path=<chrome binary path>
  chromedriver-path=<chrome driver path>
  [run_options]
  # |tests_in_parallel| is optional, the default value is 1.
  tests_in_parallel=<number of parallel tests>
  # |tests_to_runs| field is optional, if it is absent all tests will be run.
  tests_to_run=<test names to run, comma delimited>
  # |test_cases_to_run| field is optional, if it is absent all test cases
  # will be run.
  test_cases_to_run=<test names to run, comma delimited>
  [logging]
  # |save-only-failures| is oprional, the default is false.
  save-only-failures=<Boolean parameter which enforces saving results of only
                      failed tests>

The script uses the Python's logging library to report the test results,
as well as debugging information. It emits three levels of logs (in
descending order of severity):
  logging.INFO: Summary of the tests.
  logging.DEBUG: Details about tests failures.
  SCRIPT_DEBUG (see below): Debug info of this script.
You have to set up appropriate logging handlers to have the logs appear.
"""

import ConfigParser
import Queue
import argparse
import logging
import multiprocessing
import os
import shutil
import stopit
import tempfile
import time

from threading import Thread
from collections import defaultdict

import tests


# Just below logging.DEBUG, use for this script's debug messages instead
# of logging.DEBUG, which is already used for detailed test debug messages.
SCRIPT_DEBUG = 9

class Config:
  test_cases_to_run = tests.TEST_CASES
  save_only_fails = False
  tests_to_run = tests.all_tests.keys()
  max_tests_in_parallel = 1

  def __init__(self, config_path):
    config = ConfigParser.ConfigParser()
    config.read(config_path)
    if config.has_option("run_options", "tests_in_parallel"):
      self.max_tests_in_parallel = config.getint(
          "run_options", "tests_in_parallel")

    self.chrome_path = config.get("binaries", "chrome-path")
    self.chromedriver_path = config.get("binaries", "chromedriver-path")
    self.passwords_path = config.get("data_files", "passwords_path")

    if config.has_option("run_options", "tests_to_run"):
      self.tests_to_run = config.get("run_options", "tests_to_run").split(",")

    if config.has_option("run_options", "test_cases_to_run"):
      self.test_cases_to_run = config.get(
          "run_options", "test_cases_to_run").split(",")
    if (config.has_option("logging", "save-only-fails")):
      self.save_only_fails = config.getboolean("logging", "save-only-fails")


def LogResultsOfTestRun(config, results):
  """ Logs |results| of a test run. """
  logger = logging.getLogger("run_tests")
  failed_tests = []
  failed_tests_num = 0
  for result in results:
    website, test_case, success, reason = result
    if not (config.save_only_fails and success):
      logger.debug("Test case %s has %s on Website %s", test_case,
                  website, {True: "passed", False: "failed"}[success])
      if not success:
        logger.debug("Reason of failure: %s", reason)

    if not success:
      failed_tests.append("%s.%s" % (website, test_case))
      failed_tests_num += 1

  logger.info("%d failed test cases out of %d, failing test cases: %s",
              failed_tests_num, len(results),
              sorted([name for name in failed_tests]))


def RunTestCaseOnWebsite((website, test_case, config)):
  """ Runs a |test_case| on a |website|. In case when |test_case| has
  failed it tries to rerun it. If run takes too long, then it is stopped.
  """

  profile_path = tempfile.mkdtemp()
  # The tests can be flaky. This is why we try to rerun up to 3 times.
  attempts = 3
  result = ("", "", False, "")
  logger = logging.getLogger("run_tests")
  for _ in xrange(attempts):
    shutil.rmtree(path=profile_path, ignore_errors=True)
    logger.log(SCRIPT_DEBUG, "Run of test case %s of website %s started",
               test_case, website)
    try:
      with stopit.ThreadingTimeout(100) as timeout:
        logger.log(SCRIPT_DEBUG,
                   "Run test with parameters: %s %s %s %s %s %s",
                   config.chrome_path, config.chromedriver_path,
                   profile_path, config.passwords_path,
                   website, test_case)
        result = tests.RunTest(config.chrome_path, config.chromedriver_path,
                               profile_path, config.passwords_path,
                               website, test_case)[0]
      if timeout != timeout.EXECUTED:
        result =  (website, test_case, False, "Timeout")
      _, _, success, _ = result
      if success:
        return result
    except Exception as e:
      result = (website, test_case, False, e)
  return result


def RunTests(config_path):
  """Runs automated tests.

  Runs the tests and returns the results through logging:
  On logging.INFO logging level, it returns the summary of how many tests
  passed and failed.
  On logging.DEBUG logging level, it returns the failure logs, if any.
  (On SCRIPT_DEBUG it returns diagnostics for this script.)

  Args:
    config_path: The path to the config INI file. See the top of the file
      for format description.
  """
  config = Config(config_path)
  logger = logging.getLogger("run_tests")
  logger.log(SCRIPT_DEBUG, "%d tests to run: %s", len(config.tests_to_run),
             config.tests_to_run)
  data = [(website, test_case, config)
          for website in config.tests_to_run
          for test_case in config.test_cases_to_run]
  number_of_processes = min([config.max_tests_in_parallel,
                             len(config.test_cases_to_run) *
                             len(config.tests_to_run)])
  p = multiprocessing.Pool(number_of_processes)
  results = p.map(RunTestCaseOnWebsite, data)
  p.close()
  p.join()
  LogResultsOfTestRun(config, results)


def main():
  parser = argparse.ArgumentParser()
  parser.add_argument("config_path", metavar="N",
                      help="Path to the config.ini file.")
  args = parser.parse_args()
  RunTests(args.config_path)


if __name__ == "__main__":
  main()