diff options
author | Paweł Hajdan, Jr <phajdan.jr@chromium.org> | 2014-11-05 16:35:51 +0100 |
---|---|---|
committer | Paweł Hajdan, Jr <phajdan.jr@chromium.org> | 2014-11-05 15:36:51 +0000 |
commit | b0d9981737dbe92852366bf416f19dfcb978d6c6 (patch) | |
tree | 3da725d010a1fac7e79e9dc6bba40fdc099f1638 | |
parent | d1e250bc34f3e17256bcbe9a5a394a641a9eb804 (diff) | |
download | chromium_src-b0d9981737dbe92852366bf416f19dfcb978d6c6.zip chromium_src-b0d9981737dbe92852366bf416f19dfcb978d6c6.tar.gz chromium_src-b0d9981737dbe92852366bf416f19dfcb978d6c6.tar.bz2 |
Revert of Reland "Switch telemetry over to use typ to run the unit tests." (patchset #1 id:1 of https://codereview.chromium.org/707453002/)
Reason for revert:
This seems to return with 0 exit code when tests are failing, see e.g. http://build.chromium.org/p/chromium.mac/builders/Mac10.9%20Tests%20%28dbg%29/builds/18/steps/telemetry_perf_unittests/logs/stdio . Previous build was correctly reporting the steps as failed (http://build.chromium.org/p/chromium.mac/buildstatus?builder=Mac10.9%20Tests%20%28dbg%29&number=17).
Original issue's description:
> Reland "Switch telemetry over to use typ to run the unit tests."
>
> Original review: https://codereview.chromium.org/659293003
>
> Using typ allows us to run the tests in parallel and share
> the logic for parsing the results and uploading them to the
> flakiness dashboard with other python test steps.
>
> TBR=dtu@chromium.org, tonyg@chromium.org, nduca@chromium.org, dpranke@chromium.org
> BUG=402172, 388256
>
> Committed: https://chromium.googlesource.com/chromium/src/+/fc4a811ffe26653019e2213947146a721620e0fc
TBR=dpranke@chromium.org,dtu@chromium.org,nduca@chromium.org,tonyg@chromium.org
NOTREECHECKS=true
NOTRY=true
BUG=402172, 388256
Review URL: https://codereview.chromium.org/708483002
Cr-Commit-Position: refs/heads/master@{#302804}
-rwxr-xr-x | tools/perf/run_tests | 23 | ||||
-rw-r--r-- | tools/telemetry/PRESUBMIT.py | 4 | ||||
-rw-r--r-- | tools/telemetry/bootstrap_deps | 47 | ||||
-rwxr-xr-x | tools/telemetry/run_tests | 22 | ||||
-rw-r--r-- | tools/telemetry/telemetry/decorators.py | 78 | ||||
-rw-r--r-- | tools/telemetry/telemetry/unittest/browser_test_case.py | 45 | ||||
-rw-r--r-- | tools/telemetry/telemetry/unittest/json_results.py | 247 | ||||
-rw-r--r-- | tools/telemetry/telemetry/unittest/run_chromeos_tests.py | 44 | ||||
-rw-r--r-- | tools/telemetry/telemetry/unittest/run_tests.py | 257 | ||||
-rw-r--r-- | tools/telemetry/telemetry/unittest/run_tests_unittest.py | 36 | ||||
-rw-r--r-- | tools/telemetry/telemetry/web_perf/timeline_based_measurement_unittest.py | 9 |
11 files changed, 510 insertions, 302 deletions
diff --git a/tools/perf/run_tests b/tools/perf/run_tests index 1ebfe32..6497020 100755 --- a/tools/perf/run_tests +++ b/tools/perf/run_tests @@ -9,22 +9,17 @@ This script DOES NOT run benchmarks. run_benchmark does that. """ import os -import subprocess import sys +sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, 'telemetry')) -if __name__ == '__main__': - perf_dir = os.path.dirname(os.path.realpath(__file__)) - telemetry_dir = os.path.realpath(os.path.join(perf_dir, '..', 'telemetry')) +from telemetry.unittest import gtest_progress_reporter +from telemetry.unittest import run_tests - env = os.environ.copy() - if 'PYTHONPATH' in env: - env['PYTHONPATH'] = env['PYTHONPATH'] + os.pathsep + telemetry_dir - else: - env['PYTHONPATH'] = telemetry_dir - path_to_run_tests = os.path.join(telemetry_dir, 'telemetry', 'unittest', - 'run_tests.py') - argv = ['--top-level-dir', perf_dir] + sys.argv[1:] - sys.exit(subprocess.call([sys.executable, path_to_run_tests] + argv, - env=env)) +if __name__ == '__main__': + base_dir = os.path.dirname(os.path.realpath(__file__)) + progress_reporters = [ + gtest_progress_reporter.GTestProgressReporter(sys.stdout)] + run_tests.config = run_tests.Config(base_dir, [base_dir], progress_reporters) + sys.exit(run_tests.RunTestsCommand.main()) diff --git a/tools/telemetry/PRESUBMIT.py b/tools/telemetry/PRESUBMIT.py index c424658..52c8806 100644 --- a/tools/telemetry/PRESUBMIT.py +++ b/tools/telemetry/PRESUBMIT.py @@ -33,9 +33,7 @@ def _CommonChecks(input_api, output_api): return results def GetPathsToPrepend(input_api): - return [input_api.PresubmitLocalPath(), - os.path.join(input_api.PresubmitLocalPath(), os.path.pardir, - os.path.pardir, 'third_party', 'typ')] + return [input_api.PresubmitLocalPath()] def RunWithPrependedPath(prepended_path, fn, *args): old_path = sys.path diff --git a/tools/telemetry/bootstrap_deps b/tools/telemetry/bootstrap_deps index d949ab8..7359ee9 100644 --- a/tools/telemetry/bootstrap_deps +++ b/tools/telemetry/bootstrap_deps @@ -5,23 +5,36 @@ # This file specifies dependencies required to bootstrap Telemetry. It is in a # minimal version of the format used by other DEPS files that gclient can read, # but it should only be used to bootstrap Telemetry *outside* of a normal -# Chrome checkout. In particular, the normal 'value' part of the python -# dict is not used and hence does not contain real URLs for the repos. +# Chrome checkout. deps = { - "src/tools/telemetry": "", - "src/build/android": "", - "src/build/util": "", - "src/chrome/test/data/extensions/profiles": "", - "src/third_party/android_testrunner": "", - "src/third_party/android_tools/sdk/platform-tools": "", - "src/third_party/chromite/ssh_keys": "", - "src/third_party/flot/jquery.flot.min.js": "", - "src/third_party/WebKit/PerformanceTests/resources/jquery.tablesorter.min.js": "", - "src/third_party/WebKit/PerformanceTests/resources/statistics.js": "", - "src/third_party/webpagereplay": "", - "src/third_party/trace-viewer": "", - "src/third_party/typ": "", - "src/tools/crx_id": "", - "src/tools/perf/unit-info.json": "", + "src/tools/telemetry": + "https://src.chromium.org/chrome/trunk/src/tools/telemetry", + + "src/build/android": + "https://src.chromium.org/chrome/trunk/src/build/android", + "src/build/util": + "https://src.chromium.org/chrome/trunk/src/build/util", + "src/chrome/test/data/extensions/profiles": + "https://src.chromium.org/chrome/trunk/src/chrome/test/data/extensions/profiles", + "src/third_party/android_testrunner": + "https://src.chromium.org/chrome/trunk/src/third_party/android_testrunner", + "src/third_party/android_tools/sdk/platform-tools": + "https://src.chromium.org/chrome/trunk/src/third_party/android_tools/sdk/platform-tools", + "src/third_party/chromite/ssh_keys": + "https://src.chromium.org/chrome/trunk/src/third_party/chromite/ssh_keys", + "src/third_party/flot/jquery.flot.min.js": + "https://src.chromium.org/chrome/trunk/src/third_party/flot/jquery.flot.min.js", + "src/third_party/WebKit/PerformanceTests/resources/jquery.tablesorter.min.js": + "https://src.chromium.org/blink/trunk/PerformanceTests/resources/jquery.tablesorter.min.js", + "src/third_party/WebKit/PerformanceTests/resources/statistics.js": + "https://src.chromium.org/blink/trunk/PerformanceTests/resources/statistics.js", + "src/third_party/webpagereplay": + "https://web-page-replay.googlecode.com/svn/trunk", + "src/third_party/trace-viewer": + "https://trace-viewer.googlecode.com/svn/trunk", + "src/tools/crx_id": + "https://src.chromium.org/chrome/trunk/src/tools/crx_id", + "src/tools/perf/unit-info.json": + "https://src.chromium.org/chrome/trunk/src/tools/perf/unit-info.json" } diff --git a/tools/telemetry/run_tests b/tools/telemetry/run_tests index e2bb33c..96a0835 100755 --- a/tools/telemetry/run_tests +++ b/tools/telemetry/run_tests @@ -5,20 +5,14 @@ import os import sys -import subprocess +from telemetry.unittest import gtest_progress_reporter +from telemetry.unittest import run_tests -if __name__ == '__main__': - telemetry_dir = os.path.dirname(os.path.realpath(__file__)) - - env = os.environ.copy() - if 'PYTHONPATH' in env: - env['PYTHONPATH'] = env['PYTHONPATH'] + os.pathsep + telemetry_dir - else: - env['PYTHONPATH'] = telemetry_dir - path_to_run_tests = os.path.join(telemetry_dir, 'telemetry', 'unittest', - 'run_tests.py') - argv = ['--top-level-dir', telemetry_dir] + sys.argv[1:] - sys.exit(subprocess.call([sys.executable, path_to_run_tests] + argv, - env=env)) +if __name__ == '__main__': + base_dir = os.path.dirname(os.path.realpath(__file__)) + progress_reporters = [ + gtest_progress_reporter.GTestProgressReporter(sys.stdout)] + run_tests.config = run_tests.Config(base_dir, [base_dir], progress_reporters) + sys.exit(run_tests.RunTestsCommand.main()) diff --git a/tools/telemetry/telemetry/decorators.py b/tools/telemetry/telemetry/decorators.py index 9897984..0ac1408 100644 --- a/tools/telemetry/telemetry/decorators.py +++ b/tools/telemetry/telemetry/decorators.py @@ -92,31 +92,6 @@ def Enabled(*args): return _Enabled -# TODO(dpranke): Remove if we don't need this. -def Isolated(*args): - """Decorator for noting that tests must be run in isolation. - - The test will be run by itself (not concurrently with any other tests) - if ANY of the args match the browser type, OS name, or OS version.""" - def _Isolated(func): - if not isinstance(func, types.FunctionType): - func._isolated_strings = isolated_strings - return func - @functools.wraps(func) - def wrapper(*args, **kwargs): - func(*args, **kwargs) - wrapper._isolated_strings = isolated_strings - return wrapper - if len(args) == 1 and callable(args[0]): - isolated_strings = [] - return _Isolated(args[0]) - isolated_strings = list(args) - for isolated_string in isolated_strings: - # TODO(tonyg): Validate that these strings are recognized. - assert isinstance(isolated_string, str), 'Isolated accepts a list of strs' - return _Isolated - - def IsEnabled(test, possible_browser): """Returns True iff |test| is enabled given the |possible_browser|. @@ -127,11 +102,6 @@ def IsEnabled(test, possible_browser): _enabled_strings attributes. possible_browser: A PossibleBrowser to check whether |test| may run against. """ - should_skip, _ = ShouldSkip(test, possible_browser) - return not should_skip - -def ShouldSkip(test, possible_browser): - """Returns whether the test should be skipped and the reason for it.""" platform_attributes = [a.lower() for a in [ possible_browser.browser_type, possible_browser.platform.GetOSName(), @@ -150,46 +120,28 @@ def ShouldSkip(test, possible_browser): if hasattr(test, '_disabled_strings'): disabled_strings = test._disabled_strings if not disabled_strings: - return True, '' # No arguments to @Disabled means always disable. + return False # No arguments to @Disabled means always disable. for disabled_string in disabled_strings: if disabled_string in platform_attributes: - return (True, - 'Skipping %s because it is disabled for %s. ' - 'You are running %s.' % (name, - ' and '.join(disabled_strings), - ' '.join(platform_attributes))) + print ( + 'Skipping %s because it is disabled for %s. ' + 'You are running %s.' % (name, + ' and '.join(disabled_strings), + ' '.join(platform_attributes))) + return False if hasattr(test, '_enabled_strings'): enabled_strings = test._enabled_strings if not enabled_strings: - return False, None # No arguments to @Enabled means always enable. + return True # No arguments to @Enabled means always enable. for enabled_string in enabled_strings: if enabled_string in platform_attributes: - return False, None - return (True, - 'Skipping %s because it is only enabled for %s. ' - 'You are running %s.' % (name, - ' or '.join(enabled_strings), - ' '.join(platform_attributes))) - return False, None - - return False, None - -def ShouldBeIsolated(test, possible_browser): - platform_attributes = [a.lower() for a in [ - possible_browser.browser_type, - possible_browser.platform.GetOSName(), - possible_browser.platform.GetOSVersionName(), - ]] - if possible_browser.supports_tab_control: - platform_attributes.append('has tabs') - - if hasattr(test, '_isolated_strings'): - isolated_strings = test._isolated_strings - if not isolated_strings: - return True # No arguments to @Isolated means always isolate. - for isolated_string in isolated_strings: - if isolated_string in platform_attributes: return True + print ( + 'Skipping %s because it is only enabled for %s. ' + 'You are running %s.' % (name, + ' or '.join(enabled_strings), + ' '.join(platform_attributes))) return False - return False + + return True diff --git a/tools/telemetry/telemetry/unittest/browser_test_case.py b/tools/telemetry/telemetry/unittest/browser_test_case.py index c5d6e2d..ffb58ce 100644 --- a/tools/telemetry/telemetry/unittest/browser_test_case.py +++ b/tools/telemetry/telemetry/unittest/browser_test_case.py @@ -9,49 +9,28 @@ from telemetry.core import browser_finder from telemetry.unittest import options_for_unittests from telemetry.util import path -current_browser_options = None -current_browser = None - - -def teardown_browser(): - global current_browser - global current_browser_options - - if current_browser: - current_browser.Close() - current_browser = None - current_browser_options = None - class BrowserTestCase(unittest.TestCase): @classmethod def setUpClass(cls): - global current_browser - global current_browser_options - options = options_for_unittests.GetCopy() - cls.CustomizeBrowserOptions(options.browser_options) - if not current_browser or (current_browser_options != - options.browser_options): - if current_browser: - teardown_browser() + browser_to_create = browser_finder.FindBrowser(options) + if not browser_to_create: + raise Exception('No browser found, cannot continue test.') - browser_to_create = browser_finder.FindBrowser(options) - if not browser_to_create: - raise Exception('No browser found, cannot continue test.') - - try: - current_browser = browser_to_create.Create(options) - current_browser_options = options.browser_options - except: - cls.tearDownClass() - raise - cls._browser = current_browser + cls._browser = None + try: + cls._browser = browser_to_create.Create(options) + except: + cls.tearDownClass() + raise @classmethod def tearDownClass(cls): - pass + if cls._browser: + cls._browser.Close() + cls._browser = None @classmethod def CustomizeBrowserOptions(cls, options): diff --git a/tools/telemetry/telemetry/unittest/json_results.py b/tools/telemetry/telemetry/unittest/json_results.py new file mode 100644 index 0000000..256e5c9 --- /dev/null +++ b/tools/telemetry/telemetry/unittest/json_results.py @@ -0,0 +1,247 @@ +# Copyright 2014 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import functools +import json +import re +import time +import unittest +import urllib2 + + +# TODO(dpranke): This code is largely cloned from, and redundant with, +# src/mojo/tools/run_mojo_python_tests.py, and also duplicates logic +# in test-webkitpy and run-webkit-tests. We should consolidate the +# python TestResult parsing/converting/uploading code as much as possible. + + +def AddOptions(parser): + parser.add_option('--metadata', action='append', default=[], + help=('optional key=value metadata that will be stored ' + 'in the results files (can be used for revision ' + 'numbers, etc.)')) + parser.add_option('--write-full-results-to', metavar='FILENAME', + action='store', + help='The path to write the list of full results to.') + parser.add_option('--builder-name', + help='The name of the builder as shown on the waterfall.') + parser.add_option('--master-name', + help='The name of the buildbot master.') + parser.add_option("--test-results-server", default="", + help=('If specified, upload full_results.json file to ' + 'this server.')) + parser.add_option('--test-type', + help=('Name of test type / step on the waterfall ' + '(e.g., "telemetry_unittests").')) + + +def ValidateArgs(parser, args): + for val in args.metadata: + if '=' not in val: + parser.error('Error: malformed metadata "%s"' % val) + + if (args.test_results_server and + (not args.builder_name or not args.master_name or not args.test_type)): + parser.error('Error: --builder-name, --master-name, and --test-type ' + 'must be specified along with --test-result-server.') + + +def WriteFullResultsIfNecessary(args, full_results): + if not args.write_full_results_to: + return + + with open(args.write_full_results_to, 'w') as fp: + json.dump(full_results, fp, indent=2) + fp.write("\n") + + +def UploadFullResultsIfNecessary(args, full_results): + if not args.test_results_server: + return False, '' + + url = 'http://%s/testfile/upload' % args.test_results_server + attrs = [('builder', args.builder_name), + ('master', args.master_name), + ('testtype', args.test_type)] + content_type, data = _EncodeMultiPartFormData(attrs, full_results) + return _UploadData(url, data, content_type) + + +TEST_SEPARATOR = '.' + + +def FullResults(args, suite, results): + """Convert the unittest results to the Chromium JSON test result format. + + This matches run-webkit-tests (the layout tests) and the flakiness dashboard. + """ + + full_results = {} + full_results['interrupted'] = False + full_results['path_delimiter'] = TEST_SEPARATOR + full_results['version'] = 3 + full_results['seconds_since_epoch'] = time.time() + full_results['builder_name'] = args.builder_name or '' + for md in args.metadata: + key, val = md.split('=', 1) + full_results[key] = val + + all_test_names = AllTestNames(suite) + sets_of_passing_test_names = map(PassingTestNames, results) + sets_of_failing_test_names = map(functools.partial(FailedTestNames, suite), + results) + + # TODO(crbug.com/405379): This handles tests that are skipped via the + # unittest skip decorators (like skipUnless). The tests that are skipped via + # telemetry's decorators package are not included in the test suite at all so + # we need those to be passed in in order to include them. + skipped_tests = (set(all_test_names) - sets_of_passing_test_names[0] + - sets_of_failing_test_names[0]) + + num_tests = len(all_test_names) + num_failures = NumFailuresAfterRetries(suite, results) + num_skips = len(skipped_tests) + num_passes = num_tests - num_failures - num_skips + full_results['num_failures_by_type'] = { + 'FAIL': num_failures, + 'PASS': num_passes, + 'SKIP': num_skips, + } + + full_results['tests'] = {} + + for test_name in all_test_names: + if test_name in skipped_tests: + value = { + 'expected': 'SKIP', + 'actual': 'SKIP', + } + else: + value = { + 'expected': 'PASS', + 'actual': ActualResultsForTest(test_name, + sets_of_failing_test_names, + sets_of_passing_test_names), + } + if value['actual'].endswith('FAIL'): + value['is_unexpected'] = True + _AddPathToTrie(full_results['tests'], test_name, value) + + return full_results + + +def ActualResultsForTest(test_name, sets_of_failing_test_names, + sets_of_passing_test_names): + actuals = [] + for retry_num in range(len(sets_of_failing_test_names)): + if test_name in sets_of_failing_test_names[retry_num]: + actuals.append('FAIL') + elif test_name in sets_of_passing_test_names[retry_num]: + assert ((retry_num == 0) or + (test_name in sets_of_failing_test_names[retry_num - 1])), ( + 'We should not have run a test that did not fail ' + 'on the previous run.') + actuals.append('PASS') + + assert actuals, 'We did not find any result data for %s.' % test_name + return ' '.join(actuals) + + +def ExitCodeFromFullResults(full_results): + return 1 if full_results['num_failures_by_type']['FAIL'] else 0 + + +def AllTestNames(suite): + test_names = [] + # _tests is protected pylint: disable=W0212 + for test in suite._tests: + if isinstance(test, unittest.suite.TestSuite): + test_names.extend(AllTestNames(test)) + else: + test_names.append(test.id()) + return test_names + + +def NumFailuresAfterRetries(suite, results): + return len(FailedTestNames(suite, results[-1])) + + +def FailedTestNames(suite, result): + failed_test_names = set() + for test, error in result.failures + result.errors: + if isinstance(test, unittest.TestCase): + failed_test_names.add(test.id()) + elif isinstance(test, unittest.suite._ErrorHolder): # pylint: disable=W0212 + # If there's an error in setUpClass or setUpModule, unittest gives us an + # _ErrorHolder object. We can parse the object's id for the class or + # module that failed, then find all tests in that class or module. + match = re.match('setUp[a-zA-Z]+ \\((.+)\\)', test.id()) + assert match, "Don't know how to retry after this error:\n%s" % error + module_or_class = match.groups()[0] + failed_test_names |= _FindChildren(module_or_class, AllTestNames(suite)) + else: + assert False, 'Unknown test type: %s' % test.__class__ + return failed_test_names + + +def _FindChildren(parent, potential_children): + children = set() + parent_name_parts = parent.split('.') + for potential_child in potential_children: + child_name_parts = potential_child.split('.') + if parent_name_parts == child_name_parts[:len(parent_name_parts)]: + children.add(potential_child) + return children + + +def PassingTestNames(result): + return set(test.id() for test in result.successes) + + +def _AddPathToTrie(trie, path, value): + if TEST_SEPARATOR not in path: + trie[path] = value + return + directory, rest = path.split(TEST_SEPARATOR, 1) + if directory not in trie: + trie[directory] = {} + _AddPathToTrie(trie[directory], rest, value) + + +def _EncodeMultiPartFormData(attrs, full_results): + # Cloned from webkitpy/common/net/file_uploader.py + BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-' + CRLF = '\r\n' + lines = [] + + for key, value in attrs: + lines.append('--' + BOUNDARY) + lines.append('Content-Disposition: form-data; name="%s"' % key) + lines.append('') + lines.append(value) + + lines.append('--' + BOUNDARY) + lines.append('Content-Disposition: form-data; name="file"; ' + 'filename="full_results.json"') + lines.append('Content-Type: application/json') + lines.append('') + lines.append(json.dumps(full_results)) + + lines.append('--' + BOUNDARY + '--') + lines.append('') + body = CRLF.join(lines) + content_type = 'multipart/form-data; boundary=%s' % BOUNDARY + return content_type, body + + +def _UploadData(url, data, content_type): + request = urllib2.Request(url, data, {'Content-Type': content_type}) + try: + response = urllib2.urlopen(request) + if response.code == 200: + return False, '' + return True, ('Uploading the JSON results failed with %d: "%s"' % + (response.code, response.read())) + except Exception as e: + return True, 'Uploading the JSON results raised "%s"\n' % str(e) diff --git a/tools/telemetry/telemetry/unittest/run_chromeos_tests.py b/tools/telemetry/telemetry/unittest/run_chromeos_tests.py index abf6f38..0912d84 100644 --- a/tools/telemetry/telemetry/unittest/run_chromeos_tests.py +++ b/tools/telemetry/telemetry/unittest/run_chromeos_tests.py @@ -3,7 +3,9 @@ # found in the LICENSE file. import logging import os +import sys +from telemetry.unittest import gtest_progress_reporter from telemetry.unittest import run_tests from telemetry.core import util @@ -12,29 +14,35 @@ def RunTestsForChromeOS(browser_type, unit_tests, perf_tests): stream = _LoggingOutputStream() error_string = '' - if unit_tests: - logging.info('Running telemetry unit tests with browser_type "%s".' % - browser_type) - ret = _RunOneSetOfTests(browser_type, 'telemetry', unit_tests, stream) - if ret: - error_string += 'The unit tests failed.\n' + logging.info('Running telemetry unit tests with browser_type "%s".' % + browser_type) + ret = _RunOneSetOfTests(browser_type, 'telemetry', + os.path.join('telemetry', 'telemetry'), + unit_tests, stream) + if ret: + error_string += 'The unit tests failed.\n' - if perf_tests: - logging.info('Running telemetry perf tests with browser_type "%s".' % - browser_type) - ret = _RunOneSetOfTests(browser_type, 'perf', perf_tests, stream) - if ret: - error_string = 'The perf tests failed.\n' + logging.info('Running telemetry perf tests with browser_type "%s".' % + browser_type) + ret = _RunOneSetOfTests(browser_type, 'perf', 'perf', perf_tests, stream) + if ret: + error_string = 'The perf tests failed.\n' return error_string -def _RunOneSetOfTests(browser_type, dir_name, tests, stream): - top_level_dir = os.path.join(util.GetChromiumSrcDir(), 'tools', dir_name) - args = ['--browser', browser_type, - '--top-level-dir', top_level_dir, - '--jobs', '1'] + tests - return run_tests.RunTestsCommand.main(args, stream=stream) +def _RunOneSetOfTests(browser_type, root_dir, sub_dir, tests, stream): + if not tests: + return + top_level_dir = os.path.join(util.GetChromiumSrcDir(), 'tools', root_dir) + sub_dir = os.path.join(util.GetChromiumSrcDir(), 'tools', sub_dir) + + sys.path.append(top_level_dir) + + output_formatters = [gtest_progress_reporter.GTestProgressReporter(stream)] + run_tests.config = run_tests.Config(top_level_dir, [sub_dir], + output_formatters) + return run_tests.RunTestsCommand.main(['--browser', browser_type] + tests) class _LoggingOutputStream(object): diff --git a/tools/telemetry/telemetry/unittest/run_tests.py b/tools/telemetry/telemetry/unittest/run_tests.py index 1a827a9..de825d2 100644 --- a/tools/telemetry/telemetry/unittest/run_tests.py +++ b/tools/telemetry/telemetry/unittest/run_tests.py @@ -1,19 +1,114 @@ # Copyright 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. -import sys + +import logging +import unittest from telemetry import decorators from telemetry.core import browser_finder from telemetry.core import browser_options from telemetry.core import command_line -from telemetry.core import util -from telemetry.unittest import options_for_unittests -from telemetry.unittest import browser_test_case +from telemetry.core import discover +from telemetry.unittest import json_results +from telemetry.unittest import progress_reporter + + +class Config(object): + def __init__(self, top_level_dir, test_dirs, progress_reporters): + self._top_level_dir = top_level_dir + self._test_dirs = tuple(test_dirs) + self._progress_reporters = tuple(progress_reporters) + + @property + def top_level_dir(self): + return self._top_level_dir + + @property + def test_dirs(self): + return self._test_dirs + + @property + def progress_reporters(self): + return self._progress_reporters + + +def Discover(start_dir, top_level_dir=None, pattern='test*.py'): + loader = unittest.defaultTestLoader + loader.suiteClass = progress_reporter.TestSuite + + test_suites = [] + modules = discover.DiscoverModules(start_dir, top_level_dir, pattern) + for module in modules: + if hasattr(module, 'suite'): + suite = module.suite() + else: + suite = loader.loadTestsFromModule(module) + if suite.countTestCases(): + test_suites.append(suite) + return test_suites + + +def FilterSuite(suite, predicate): + new_suite = suite.__class__() + for test in suite: + if isinstance(test, unittest.TestSuite): + subsuite = FilterSuite(test, predicate) + if subsuite.countTestCases(): + new_suite.addTest(subsuite) + else: + assert isinstance(test, unittest.TestCase) + if predicate(test): + new_suite.addTest(test) + + return new_suite + + +def DiscoverTests(search_dirs, top_level_dir, possible_browser, + selected_tests=None, selected_tests_are_exact=False, + run_disabled_tests=False): + def IsTestSelected(test): + if selected_tests: + found = False + for name in selected_tests: + if selected_tests_are_exact: + if name == test.id(): + found = True + else: + if name in test.id(): + found = True + if not found: + return False + if run_disabled_tests: + return True + # pylint: disable=W0212 + if not hasattr(test, '_testMethodName'): + return True + method = getattr(test, test._testMethodName) + return decorators.IsEnabled(method, possible_browser) + + wrapper_suite = progress_reporter.TestSuite() + for search_dir in search_dirs: + wrapper_suite.addTests(Discover(search_dir, top_level_dir, '*_unittest.py')) + return FilterSuite(wrapper_suite, IsTestSelected) + + +def RestoreLoggingLevel(func): + def _LoggingRestoreWrapper(*args, **kwargs): + # Cache the current logging level, this needs to be done before calling + # parser.parse_args, which changes logging level based on verbosity + # setting. + logging_level = logging.getLogger().getEffectiveLevel() + try: + return func(*args, **kwargs) + finally: + # Restore logging level, which may be changed in parser.parse_args. + logging.getLogger().setLevel(logging_level) + + return _LoggingRestoreWrapper -util.AddDirToPythonPath(util.GetChromiumSrcDir(), 'third_party', 'typ') -import typ +config = None class RunTestsCommand(command_line.OptparseCommand): @@ -21,10 +116,6 @@ class RunTestsCommand(command_line.OptparseCommand): usage = '[test_name ...] [<options>]' - def __init__(self): - super(RunTestsCommand, self).__init__() - self.stream = sys.stdout - @classmethod def CreateParser(cls): options = browser_options.BrowserFinderOptions() @@ -40,24 +131,22 @@ class RunTestsCommand(command_line.OptparseCommand): dest='run_disabled_tests', action='store_true', default=False, help='Ignore @Disabled and @Enabled restrictions.') + parser.add_option('--retry-limit', type='int', + help='Retry each failure up to N times' + ' to de-flake things.') parser.add_option('--exact-test-filter', action='store_true', default=False, help='Treat test filter as exact matches (default is ' 'substring matches).') - - typ.ArgumentParser.add_option_group(parser, - "Options for running the tests", - running=True, - skip=['-d', '--path', '-v', - '--verbose']) - typ.ArgumentParser.add_option_group(parser, - "Options for reporting the results", - reporting=True) + json_results.AddOptions(parser) @classmethod def ProcessCommandLineArgs(cls, parser, args): + if args.verbosity == 0: + logging.getLogger().setLevel(logging.WARN) + # We retry failures by default unless we're running a list of tests # explicitly. - if not args.retry_limit and not args.positional_args: + if args.retry_limit is None and not args.positional_args: args.retry_limit = 3 try: @@ -70,110 +159,50 @@ class RunTestsCommand(command_line.OptparseCommand): 'Re-run with --browser=list to see ' 'available browser types.' % args.browser_type) - @classmethod - def main(cls, args=None, stream=None): # pylint: disable=W0221 - # We override the superclass so that we can hook in the 'stream' arg. - parser = cls.CreateParser() - cls.AddCommandLineArgs(parser) - options, positional_args = parser.parse_args(args) - options.positional_args = positional_args - cls.ProcessCommandLineArgs(parser, options) - - obj = cls() - if stream is not None: - obj.stream = stream - return obj.Run(options) + json_results.ValidateArgs(parser, args) def Run(self, args): possible_browser = browser_finder.FindBrowser(args) - runner = typ.Runner() - if self.stream: - runner.host.stdout = self.stream - - # Telemetry seems to overload the system if we run one test per core, - # so we scale things back a fair amount. Many of the telemetry tests - # are long-running, so there's a limit to how much parallelism we - # can effectively use for now anyway. - # - # It should be possible to handle multiple devices if we adjust - # the browser_finder code properly, but for now we only handle the one - # on Android and ChromeOS. - if possible_browser.platform.GetOSName() in ('android', 'chromeos'): - runner.args.jobs = 1 - else: - runner.args.jobs = max(int(args.jobs) // 4, 1) - - runner.args.metadata = args.metadata - runner.args.passthrough = args.passthrough - runner.args.retry_limit = args.retry_limit - runner.args.test_results_server = args.test_results_server - runner.args.test_type = args.test_type - runner.args.timing = args.timing - runner.args.top_level_dir = args.top_level_dir - runner.args.verbose = args.verbosity - runner.args.write_full_results_to = args.write_full_results_to - runner.args.write_trace_to = args.write_trace_to - - runner.args.path.append(util.GetUnittestDataDir()) - - runner.classifier = GetClassifier(args, possible_browser) - runner.context = args - runner.setup_fn = _SetUpProcess - runner.teardown_fn = _TearDownProcess - runner.win_multiprocessing = typ.WinMultiprocessing.importable - try: - ret, _, _ = runner.run() - except KeyboardInterrupt: - print >> sys.stderr, "interrupted, exiting" - ret = 130 - return ret - - -def GetClassifier(args, possible_browser): - def ClassifyTest(test_set, test): - name = test.id() - if args.positional_args: - if _MatchesSelectedTest(name, args.positional_args, - args.exact_test_filter): - assert hasattr(test, '_testMethodName') - method = getattr(test, test._testMethodName) # pylint: disable=W0212 - if decorators.ShouldBeIsolated(method, possible_browser): - test_set.isolated_tests.append(typ.TestInput(name)) - else: - test_set.parallel_tests.append(typ.TestInput(name)) - else: - assert hasattr(test, '_testMethodName') - method = getattr(test, test._testMethodName) # pylint: disable=W0212 - should_skip, reason = decorators.ShouldSkip(method, possible_browser) - if should_skip and not args.run_disabled_tests: - test_set.tests_to_skip.append(typ.TestInput(name, msg=reason)) - elif decorators.ShouldBeIsolated(method, possible_browser): - test_set.isolated_tests.append(typ.TestInput(name)) - else: - test_set.parallel_tests.append(typ.TestInput(name)) + test_suite, result = self.RunOneSuite(possible_browser, args) + + results = [result] - return ClassifyTest + failed_tests = json_results.FailedTestNames(test_suite, result) + retry_limit = args.retry_limit + while retry_limit and failed_tests: + args.positional_args = failed_tests + args.exact_test_filter = True -def _MatchesSelectedTest(name, selected_tests, selected_tests_are_exact): - if not selected_tests: - return False - if selected_tests_are_exact: - return any(name in selected_tests) - else: - return any(test in name for test in selected_tests) + _, result = self.RunOneSuite(possible_browser, args) + results.append(result) + failed_tests = json_results.FailedTestNames(test_suite, result) + retry_limit -= 1 -def _SetUpProcess(child, context): # pylint: disable=W0613 - args = context - options_for_unittests.Push(args) + full_results = json_results.FullResults(args, test_suite, results) + json_results.WriteFullResultsIfNecessary(args, full_results) + err_occurred, err_str = json_results.UploadFullResultsIfNecessary( + args, full_results) + if err_occurred: + for line in err_str.splitlines(): + logging.error(line) + return 1 -def _TearDownProcess(child, context): # pylint: disable=W0613 - browser_test_case.teardown_browser() - options_for_unittests.Pop() + return json_results.ExitCodeFromFullResults(full_results) + def RunOneSuite(self, possible_browser, args): + test_suite = DiscoverTests(config.test_dirs, config.top_level_dir, + possible_browser, args.positional_args, + args.exact_test_filter, args.run_disabled_tests) + runner = progress_reporter.TestRunner() + result = runner.run(test_suite, config.progress_reporters, + args.repeat_count, args) + return test_suite, result -if __name__ == '__main__': - RunTestsCommand.main() + @classmethod + @RestoreLoggingLevel + def main(cls, args=None): + return super(RunTestsCommand, cls).main(args) diff --git a/tools/telemetry/telemetry/unittest/run_tests_unittest.py b/tools/telemetry/telemetry/unittest/run_tests_unittest.py index 6db9331..ee02d93 100644 --- a/tools/telemetry/telemetry/unittest/run_tests_unittest.py +++ b/tools/telemetry/telemetry/unittest/run_tests_unittest.py @@ -4,17 +4,11 @@ import unittest +from telemetry import decorators from telemetry.core import util from telemetry.unittest import run_tests -class MockArgs(object): - def __init__(self): - self.positional_args = [] - self.exact_test_filter = True - self.run_disabled_tests = False - - class MockPossibleBrowser(object): def __init__(self, browser_type, os_name, os_version_name, supports_tab_control): @@ -37,19 +31,25 @@ class MockPlatform(object): class RunTestsUnitTest(unittest.TestCase): + def setUp(self): + self.suite = unittest.TestSuite() + self.suite.addTests(run_tests.Discover( + util.GetTelemetryDir(), util.GetTelemetryDir(), 'disabled_cases.py')) + def _GetEnabledTests(self, browser_type, os_name, os_version_name, supports_tab_control): - - runner = run_tests.typ.Runner() - host = runner.host - runner.top_level_dir = util.GetTelemetryDir() - runner.args.tests = [host.join(util.GetTelemetryDir(), - 'telemetry', 'unittest', 'disabled_cases.py')] - possible_browser = MockPossibleBrowser( - browser_type, os_name, os_version_name, supports_tab_control) - runner.classifier = run_tests.GetClassifier(MockArgs(), possible_browser) - _, test_set = runner.find_tests(runner.args) - return set(test.name.split('.')[-1] for test in test_set.parallel_tests) + # pylint: disable=W0212 + def MockPredicate(test): + method = getattr(test, test._testMethodName) + return decorators.IsEnabled(method, MockPossibleBrowser( + browser_type, os_name, os_version_name, supports_tab_control)) + + enabled_tests = set() + for i in run_tests.FilterSuite(self.suite, MockPredicate)._tests: + for j in i: + for k in j: + enabled_tests.add(k._testMethodName) + return enabled_tests def testSystemMacMavericks(self): self.assertEquals( diff --git a/tools/telemetry/telemetry/web_perf/timeline_based_measurement_unittest.py b/tools/telemetry/telemetry/web_perf/timeline_based_measurement_unittest.py index faf975ce..f82583b 100644 --- a/tools/telemetry/telemetry/web_perf/timeline_based_measurement_unittest.py +++ b/tools/telemetry/telemetry/web_perf/timeline_based_measurement_unittest.py @@ -15,7 +15,6 @@ from telemetry.timeline import model as model_module from telemetry.timeline import async_slice from telemetry.unittest import options_for_unittests from telemetry.unittest import page_test_test_case -from telemetry.unittest import browser_test_case from telemetry.value import scalar from telemetry.web_perf import timeline_based_measurement as tbm_module from telemetry.web_perf import timeline_interaction_record as tir_module @@ -202,12 +201,9 @@ class TestTimelinebasedMeasurementPage(page_module.Page): class TimelineBasedMeasurementTest(page_test_test_case.PageTestTestCase): def setUp(self): - browser_test_case.teardown_browser() self._options = options_for_unittests.GetCopy() self._options.browser_options.wpr_mode = wpr_modes.WPR_OFF - # This test is flaky when run in parallel on the mac: crbug.com/426676 - @benchmark.Disabled('mac') def testSmoothnessTimelineBasedMeasurementForSmoke(self): ps = self.CreateEmptyPageSet() ps.AddPage(TestTimelinebasedMeasurementPage( @@ -225,8 +221,6 @@ class TimelineBasedMeasurementTest(page_test_test_case.PageTestTestCase): 'DrawerAnimation-frame_time_discrepancy') self.assertEquals(len(v), 1) - # This test is flaky when run in parallel on the mac: crbug.com/426676 - @benchmark.Disabled('mac') def testFastTimelineBasedMeasurementForSmoke(self): ps = self.CreateEmptyPageSet() ps.AddPage(TestTimelinebasedMeasurementPage( @@ -262,8 +256,7 @@ class TimelineBasedMeasurementTest(page_test_test_case.PageTestTestCase): self.assertGreaterEqual(v[0].value, 200.0) # Disabled since mainthread_jank metric is not supported on windows platform. - # Also, flaky on the mac when run in parallel: crbug.com/426676 - @benchmark.Disabled('win', 'mac') + @benchmark.Disabled('win') def testMainthreadJankTimelineBasedMeasurement(self): ps = self.CreateEmptyPageSet() ps.AddPage(TestTimelinebasedMeasurementPage( |