diff options
author | dpranke <dpranke@chromium.org> | 2014-11-06 16:54:43 -0800 |
---|---|---|
committer | Commit bot <commit-bot@chromium.org> | 2014-11-07 00:55:02 +0000 |
commit | 122c806963c35931970ea27e58ebb71e408c351b (patch) | |
tree | 263a63a15d89a2363d074d338c8226c3e4596ad4 | |
parent | 4c3b71b800b5cb02a6298e590b2c551c6ec5fe61 (diff) | |
download | chromium_src-122c806963c35931970ea27e58ebb71e408c351b.zip chromium_src-122c806963c35931970ea27e58ebb71e408c351b.tar.gz chromium_src-122c806963c35931970ea27e58ebb71e408c351b.tar.bz2 |
Attempt to switch telemetry to typ again.
TBR=dtu@chromium.org, phajdan.jr@chromium.org
BUG=388256
Review URL: https://codereview.chromium.org/708443003
Cr-Commit-Position: refs/heads/master@{#303141}
-rwxr-xr-x | tools/perf/run_tests | 24 | ||||
-rw-r--r-- | tools/telemetry/PRESUBMIT.py | 4 | ||||
-rw-r--r-- | tools/telemetry/bootstrap_deps | 47 | ||||
-rwxr-xr-x | tools/telemetry/run_tests | 23 | ||||
-rw-r--r-- | tools/telemetry/telemetry/decorators.py | 78 | ||||
-rw-r--r-- | tools/telemetry/telemetry/unittest/browser_test_case.py | 45 | ||||
-rw-r--r-- | tools/telemetry/telemetry/unittest/json_results.py | 247 | ||||
-rw-r--r-- | tools/telemetry/telemetry/unittest/run_chromeos_tests.py | 44 | ||||
-rw-r--r-- | tools/telemetry/telemetry/unittest/run_tests.py | 259 | ||||
-rw-r--r-- | tools/telemetry/telemetry/unittest/run_tests_unittest.py | 36 | ||||
-rw-r--r-- | tools/telemetry/telemetry/web_perf/timeline_based_measurement_unittest.py | 9 |
11 files changed, 306 insertions, 510 deletions
diff --git a/tools/perf/run_tests b/tools/perf/run_tests index 6497020..6b4d309 100755 --- a/tools/perf/run_tests +++ b/tools/perf/run_tests @@ -9,17 +9,23 @@ This script DOES NOT run benchmarks. run_benchmark does that. """ import os +import subprocess import sys -sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, 'telemetry')) -from telemetry.unittest import gtest_progress_reporter -from telemetry.unittest import run_tests +if __name__ == '__main__': + perf_dir = os.path.dirname(os.path.realpath(__file__)) + telemetry_dir = os.path.realpath(os.path.join(perf_dir, '..', 'telemetry')) + env = os.environ.copy() + if 'PYTHONPATH' in env: + env['PYTHONPATH'] = env['PYTHONPATH'] + os.pathsep + telemetry_dir + else: + env['PYTHONPATH'] = telemetry_dir -if __name__ == '__main__': - base_dir = os.path.dirname(os.path.realpath(__file__)) - progress_reporters = [ - gtest_progress_reporter.GTestProgressReporter(sys.stdout)] - run_tests.config = run_tests.Config(base_dir, [base_dir], progress_reporters) - sys.exit(run_tests.RunTestsCommand.main()) + path_to_run_tests = os.path.join(telemetry_dir, 'telemetry', 'unittest', + 'run_tests.py') + argv = ['--top-level-dir', perf_dir] + sys.argv[1:] + ret = subprocess.call([sys.executable, path_to_run_tests] + argv, env=env) + print 'run_tests exiting, ret = %d' % ret + sys.exit(ret) diff --git a/tools/telemetry/PRESUBMIT.py b/tools/telemetry/PRESUBMIT.py index 52c8806..c424658 100644 --- a/tools/telemetry/PRESUBMIT.py +++ b/tools/telemetry/PRESUBMIT.py @@ -33,7 +33,9 @@ def _CommonChecks(input_api, output_api): return results def GetPathsToPrepend(input_api): - return [input_api.PresubmitLocalPath()] + return [input_api.PresubmitLocalPath(), + os.path.join(input_api.PresubmitLocalPath(), os.path.pardir, + os.path.pardir, 'third_party', 'typ')] def RunWithPrependedPath(prepended_path, fn, *args): old_path = sys.path diff --git a/tools/telemetry/bootstrap_deps b/tools/telemetry/bootstrap_deps index 7359ee9..d949ab8 100644 --- a/tools/telemetry/bootstrap_deps +++ b/tools/telemetry/bootstrap_deps @@ -5,36 +5,23 @@ # This file specifies dependencies required to bootstrap Telemetry. It is in a # minimal version of the format used by other DEPS files that gclient can read, # but it should only be used to bootstrap Telemetry *outside* of a normal -# Chrome checkout. +# Chrome checkout. In particular, the normal 'value' part of the python +# dict is not used and hence does not contain real URLs for the repos. deps = { - "src/tools/telemetry": - "https://src.chromium.org/chrome/trunk/src/tools/telemetry", - - "src/build/android": - "https://src.chromium.org/chrome/trunk/src/build/android", - "src/build/util": - "https://src.chromium.org/chrome/trunk/src/build/util", - "src/chrome/test/data/extensions/profiles": - "https://src.chromium.org/chrome/trunk/src/chrome/test/data/extensions/profiles", - "src/third_party/android_testrunner": - "https://src.chromium.org/chrome/trunk/src/third_party/android_testrunner", - "src/third_party/android_tools/sdk/platform-tools": - "https://src.chromium.org/chrome/trunk/src/third_party/android_tools/sdk/platform-tools", - "src/third_party/chromite/ssh_keys": - "https://src.chromium.org/chrome/trunk/src/third_party/chromite/ssh_keys", - "src/third_party/flot/jquery.flot.min.js": - "https://src.chromium.org/chrome/trunk/src/third_party/flot/jquery.flot.min.js", - "src/third_party/WebKit/PerformanceTests/resources/jquery.tablesorter.min.js": - "https://src.chromium.org/blink/trunk/PerformanceTests/resources/jquery.tablesorter.min.js", - "src/third_party/WebKit/PerformanceTests/resources/statistics.js": - "https://src.chromium.org/blink/trunk/PerformanceTests/resources/statistics.js", - "src/third_party/webpagereplay": - "https://web-page-replay.googlecode.com/svn/trunk", - "src/third_party/trace-viewer": - "https://trace-viewer.googlecode.com/svn/trunk", - "src/tools/crx_id": - "https://src.chromium.org/chrome/trunk/src/tools/crx_id", - "src/tools/perf/unit-info.json": - "https://src.chromium.org/chrome/trunk/src/tools/perf/unit-info.json" + "src/tools/telemetry": "", + "src/build/android": "", + "src/build/util": "", + "src/chrome/test/data/extensions/profiles": "", + "src/third_party/android_testrunner": "", + "src/third_party/android_tools/sdk/platform-tools": "", + "src/third_party/chromite/ssh_keys": "", + "src/third_party/flot/jquery.flot.min.js": "", + "src/third_party/WebKit/PerformanceTests/resources/jquery.tablesorter.min.js": "", + "src/third_party/WebKit/PerformanceTests/resources/statistics.js": "", + "src/third_party/webpagereplay": "", + "src/third_party/trace-viewer": "", + "src/third_party/typ": "", + "src/tools/crx_id": "", + "src/tools/perf/unit-info.json": "", } diff --git a/tools/telemetry/run_tests b/tools/telemetry/run_tests index 96a0835..fbae923 100755 --- a/tools/telemetry/run_tests +++ b/tools/telemetry/run_tests @@ -5,14 +5,21 @@ import os import sys - -from telemetry.unittest import gtest_progress_reporter -from telemetry.unittest import run_tests +import subprocess if __name__ == '__main__': - base_dir = os.path.dirname(os.path.realpath(__file__)) - progress_reporters = [ - gtest_progress_reporter.GTestProgressReporter(sys.stdout)] - run_tests.config = run_tests.Config(base_dir, [base_dir], progress_reporters) - sys.exit(run_tests.RunTestsCommand.main()) + telemetry_dir = os.path.dirname(os.path.realpath(__file__)) + + env = os.environ.copy() + if 'PYTHONPATH' in env: + env['PYTHONPATH'] = env['PYTHONPATH'] + os.pathsep + telemetry_dir + else: + env['PYTHONPATH'] = telemetry_dir + + path_to_run_tests = os.path.join(telemetry_dir, 'telemetry', 'unittest', + 'run_tests.py') + argv = ['--top-level-dir', telemetry_dir] + sys.argv[1:] + ret = subprocess.call([sys.executable, path_to_run_tests] + argv, env=env) + print 'run_tests exiting, ret = %d' % ret + sys.exit(ret) diff --git a/tools/telemetry/telemetry/decorators.py b/tools/telemetry/telemetry/decorators.py index 0ac1408..9897984 100644 --- a/tools/telemetry/telemetry/decorators.py +++ b/tools/telemetry/telemetry/decorators.py @@ -92,6 +92,31 @@ def Enabled(*args): return _Enabled +# TODO(dpranke): Remove if we don't need this. +def Isolated(*args): + """Decorator for noting that tests must be run in isolation. + + The test will be run by itself (not concurrently with any other tests) + if ANY of the args match the browser type, OS name, or OS version.""" + def _Isolated(func): + if not isinstance(func, types.FunctionType): + func._isolated_strings = isolated_strings + return func + @functools.wraps(func) + def wrapper(*args, **kwargs): + func(*args, **kwargs) + wrapper._isolated_strings = isolated_strings + return wrapper + if len(args) == 1 and callable(args[0]): + isolated_strings = [] + return _Isolated(args[0]) + isolated_strings = list(args) + for isolated_string in isolated_strings: + # TODO(tonyg): Validate that these strings are recognized. + assert isinstance(isolated_string, str), 'Isolated accepts a list of strs' + return _Isolated + + def IsEnabled(test, possible_browser): """Returns True iff |test| is enabled given the |possible_browser|. @@ -102,6 +127,11 @@ def IsEnabled(test, possible_browser): _enabled_strings attributes. possible_browser: A PossibleBrowser to check whether |test| may run against. """ + should_skip, _ = ShouldSkip(test, possible_browser) + return not should_skip + +def ShouldSkip(test, possible_browser): + """Returns whether the test should be skipped and the reason for it.""" platform_attributes = [a.lower() for a in [ possible_browser.browser_type, possible_browser.platform.GetOSName(), @@ -120,28 +150,46 @@ def IsEnabled(test, possible_browser): if hasattr(test, '_disabled_strings'): disabled_strings = test._disabled_strings if not disabled_strings: - return False # No arguments to @Disabled means always disable. + return True, '' # No arguments to @Disabled means always disable. for disabled_string in disabled_strings: if disabled_string in platform_attributes: - print ( - 'Skipping %s because it is disabled for %s. ' - 'You are running %s.' % (name, - ' and '.join(disabled_strings), - ' '.join(platform_attributes))) - return False + return (True, + 'Skipping %s because it is disabled for %s. ' + 'You are running %s.' % (name, + ' and '.join(disabled_strings), + ' '.join(platform_attributes))) if hasattr(test, '_enabled_strings'): enabled_strings = test._enabled_strings if not enabled_strings: - return True # No arguments to @Enabled means always enable. + return False, None # No arguments to @Enabled means always enable. for enabled_string in enabled_strings: if enabled_string in platform_attributes: + return False, None + return (True, + 'Skipping %s because it is only enabled for %s. ' + 'You are running %s.' % (name, + ' or '.join(enabled_strings), + ' '.join(platform_attributes))) + return False, None + + return False, None + +def ShouldBeIsolated(test, possible_browser): + platform_attributes = [a.lower() for a in [ + possible_browser.browser_type, + possible_browser.platform.GetOSName(), + possible_browser.platform.GetOSVersionName(), + ]] + if possible_browser.supports_tab_control: + platform_attributes.append('has tabs') + + if hasattr(test, '_isolated_strings'): + isolated_strings = test._isolated_strings + if not isolated_strings: + return True # No arguments to @Isolated means always isolate. + for isolated_string in isolated_strings: + if isolated_string in platform_attributes: return True - print ( - 'Skipping %s because it is only enabled for %s. ' - 'You are running %s.' % (name, - ' or '.join(enabled_strings), - ' '.join(platform_attributes))) return False - - return True + return False diff --git a/tools/telemetry/telemetry/unittest/browser_test_case.py b/tools/telemetry/telemetry/unittest/browser_test_case.py index ffb58ce..c5d6e2d 100644 --- a/tools/telemetry/telemetry/unittest/browser_test_case.py +++ b/tools/telemetry/telemetry/unittest/browser_test_case.py @@ -9,28 +9,49 @@ from telemetry.core import browser_finder from telemetry.unittest import options_for_unittests from telemetry.util import path +current_browser_options = None +current_browser = None + + +def teardown_browser(): + global current_browser + global current_browser_options + + if current_browser: + current_browser.Close() + current_browser = None + current_browser_options = None + class BrowserTestCase(unittest.TestCase): @classmethod def setUpClass(cls): + global current_browser + global current_browser_options + options = options_for_unittests.GetCopy() + cls.CustomizeBrowserOptions(options.browser_options) - browser_to_create = browser_finder.FindBrowser(options) - if not browser_to_create: - raise Exception('No browser found, cannot continue test.') + if not current_browser or (current_browser_options != + options.browser_options): + if current_browser: + teardown_browser() - cls._browser = None - try: - cls._browser = browser_to_create.Create(options) - except: - cls.tearDownClass() - raise + browser_to_create = browser_finder.FindBrowser(options) + if not browser_to_create: + raise Exception('No browser found, cannot continue test.') + + try: + current_browser = browser_to_create.Create(options) + current_browser_options = options.browser_options + except: + cls.tearDownClass() + raise + cls._browser = current_browser @classmethod def tearDownClass(cls): - if cls._browser: - cls._browser.Close() - cls._browser = None + pass @classmethod def CustomizeBrowserOptions(cls, options): diff --git a/tools/telemetry/telemetry/unittest/json_results.py b/tools/telemetry/telemetry/unittest/json_results.py deleted file mode 100644 index 256e5c9..0000000 --- a/tools/telemetry/telemetry/unittest/json_results.py +++ /dev/null @@ -1,247 +0,0 @@ -# Copyright 2014 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -import functools -import json -import re -import time -import unittest -import urllib2 - - -# TODO(dpranke): This code is largely cloned from, and redundant with, -# src/mojo/tools/run_mojo_python_tests.py, and also duplicates logic -# in test-webkitpy and run-webkit-tests. We should consolidate the -# python TestResult parsing/converting/uploading code as much as possible. - - -def AddOptions(parser): - parser.add_option('--metadata', action='append', default=[], - help=('optional key=value metadata that will be stored ' - 'in the results files (can be used for revision ' - 'numbers, etc.)')) - parser.add_option('--write-full-results-to', metavar='FILENAME', - action='store', - help='The path to write the list of full results to.') - parser.add_option('--builder-name', - help='The name of the builder as shown on the waterfall.') - parser.add_option('--master-name', - help='The name of the buildbot master.') - parser.add_option("--test-results-server", default="", - help=('If specified, upload full_results.json file to ' - 'this server.')) - parser.add_option('--test-type', - help=('Name of test type / step on the waterfall ' - '(e.g., "telemetry_unittests").')) - - -def ValidateArgs(parser, args): - for val in args.metadata: - if '=' not in val: - parser.error('Error: malformed metadata "%s"' % val) - - if (args.test_results_server and - (not args.builder_name or not args.master_name or not args.test_type)): - parser.error('Error: --builder-name, --master-name, and --test-type ' - 'must be specified along with --test-result-server.') - - -def WriteFullResultsIfNecessary(args, full_results): - if not args.write_full_results_to: - return - - with open(args.write_full_results_to, 'w') as fp: - json.dump(full_results, fp, indent=2) - fp.write("\n") - - -def UploadFullResultsIfNecessary(args, full_results): - if not args.test_results_server: - return False, '' - - url = 'http://%s/testfile/upload' % args.test_results_server - attrs = [('builder', args.builder_name), - ('master', args.master_name), - ('testtype', args.test_type)] - content_type, data = _EncodeMultiPartFormData(attrs, full_results) - return _UploadData(url, data, content_type) - - -TEST_SEPARATOR = '.' - - -def FullResults(args, suite, results): - """Convert the unittest results to the Chromium JSON test result format. - - This matches run-webkit-tests (the layout tests) and the flakiness dashboard. - """ - - full_results = {} - full_results['interrupted'] = False - full_results['path_delimiter'] = TEST_SEPARATOR - full_results['version'] = 3 - full_results['seconds_since_epoch'] = time.time() - full_results['builder_name'] = args.builder_name or '' - for md in args.metadata: - key, val = md.split('=', 1) - full_results[key] = val - - all_test_names = AllTestNames(suite) - sets_of_passing_test_names = map(PassingTestNames, results) - sets_of_failing_test_names = map(functools.partial(FailedTestNames, suite), - results) - - # TODO(crbug.com/405379): This handles tests that are skipped via the - # unittest skip decorators (like skipUnless). The tests that are skipped via - # telemetry's decorators package are not included in the test suite at all so - # we need those to be passed in in order to include them. - skipped_tests = (set(all_test_names) - sets_of_passing_test_names[0] - - sets_of_failing_test_names[0]) - - num_tests = len(all_test_names) - num_failures = NumFailuresAfterRetries(suite, results) - num_skips = len(skipped_tests) - num_passes = num_tests - num_failures - num_skips - full_results['num_failures_by_type'] = { - 'FAIL': num_failures, - 'PASS': num_passes, - 'SKIP': num_skips, - } - - full_results['tests'] = {} - - for test_name in all_test_names: - if test_name in skipped_tests: - value = { - 'expected': 'SKIP', - 'actual': 'SKIP', - } - else: - value = { - 'expected': 'PASS', - 'actual': ActualResultsForTest(test_name, - sets_of_failing_test_names, - sets_of_passing_test_names), - } - if value['actual'].endswith('FAIL'): - value['is_unexpected'] = True - _AddPathToTrie(full_results['tests'], test_name, value) - - return full_results - - -def ActualResultsForTest(test_name, sets_of_failing_test_names, - sets_of_passing_test_names): - actuals = [] - for retry_num in range(len(sets_of_failing_test_names)): - if test_name in sets_of_failing_test_names[retry_num]: - actuals.append('FAIL') - elif test_name in sets_of_passing_test_names[retry_num]: - assert ((retry_num == 0) or - (test_name in sets_of_failing_test_names[retry_num - 1])), ( - 'We should not have run a test that did not fail ' - 'on the previous run.') - actuals.append('PASS') - - assert actuals, 'We did not find any result data for %s.' % test_name - return ' '.join(actuals) - - -def ExitCodeFromFullResults(full_results): - return 1 if full_results['num_failures_by_type']['FAIL'] else 0 - - -def AllTestNames(suite): - test_names = [] - # _tests is protected pylint: disable=W0212 - for test in suite._tests: - if isinstance(test, unittest.suite.TestSuite): - test_names.extend(AllTestNames(test)) - else: - test_names.append(test.id()) - return test_names - - -def NumFailuresAfterRetries(suite, results): - return len(FailedTestNames(suite, results[-1])) - - -def FailedTestNames(suite, result): - failed_test_names = set() - for test, error in result.failures + result.errors: - if isinstance(test, unittest.TestCase): - failed_test_names.add(test.id()) - elif isinstance(test, unittest.suite._ErrorHolder): # pylint: disable=W0212 - # If there's an error in setUpClass or setUpModule, unittest gives us an - # _ErrorHolder object. We can parse the object's id for the class or - # module that failed, then find all tests in that class or module. - match = re.match('setUp[a-zA-Z]+ \\((.+)\\)', test.id()) - assert match, "Don't know how to retry after this error:\n%s" % error - module_or_class = match.groups()[0] - failed_test_names |= _FindChildren(module_or_class, AllTestNames(suite)) - else: - assert False, 'Unknown test type: %s' % test.__class__ - return failed_test_names - - -def _FindChildren(parent, potential_children): - children = set() - parent_name_parts = parent.split('.') - for potential_child in potential_children: - child_name_parts = potential_child.split('.') - if parent_name_parts == child_name_parts[:len(parent_name_parts)]: - children.add(potential_child) - return children - - -def PassingTestNames(result): - return set(test.id() for test in result.successes) - - -def _AddPathToTrie(trie, path, value): - if TEST_SEPARATOR not in path: - trie[path] = value - return - directory, rest = path.split(TEST_SEPARATOR, 1) - if directory not in trie: - trie[directory] = {} - _AddPathToTrie(trie[directory], rest, value) - - -def _EncodeMultiPartFormData(attrs, full_results): - # Cloned from webkitpy/common/net/file_uploader.py - BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-' - CRLF = '\r\n' - lines = [] - - for key, value in attrs: - lines.append('--' + BOUNDARY) - lines.append('Content-Disposition: form-data; name="%s"' % key) - lines.append('') - lines.append(value) - - lines.append('--' + BOUNDARY) - lines.append('Content-Disposition: form-data; name="file"; ' - 'filename="full_results.json"') - lines.append('Content-Type: application/json') - lines.append('') - lines.append(json.dumps(full_results)) - - lines.append('--' + BOUNDARY + '--') - lines.append('') - body = CRLF.join(lines) - content_type = 'multipart/form-data; boundary=%s' % BOUNDARY - return content_type, body - - -def _UploadData(url, data, content_type): - request = urllib2.Request(url, data, {'Content-Type': content_type}) - try: - response = urllib2.urlopen(request) - if response.code == 200: - return False, '' - return True, ('Uploading the JSON results failed with %d: "%s"' % - (response.code, response.read())) - except Exception as e: - return True, 'Uploading the JSON results raised "%s"\n' % str(e) diff --git a/tools/telemetry/telemetry/unittest/run_chromeos_tests.py b/tools/telemetry/telemetry/unittest/run_chromeos_tests.py index 0912d84..abf6f38 100644 --- a/tools/telemetry/telemetry/unittest/run_chromeos_tests.py +++ b/tools/telemetry/telemetry/unittest/run_chromeos_tests.py @@ -3,9 +3,7 @@ # found in the LICENSE file. import logging import os -import sys -from telemetry.unittest import gtest_progress_reporter from telemetry.unittest import run_tests from telemetry.core import util @@ -14,35 +12,29 @@ def RunTestsForChromeOS(browser_type, unit_tests, perf_tests): stream = _LoggingOutputStream() error_string = '' - logging.info('Running telemetry unit tests with browser_type "%s".' % - browser_type) - ret = _RunOneSetOfTests(browser_type, 'telemetry', - os.path.join('telemetry', 'telemetry'), - unit_tests, stream) - if ret: - error_string += 'The unit tests failed.\n' + if unit_tests: + logging.info('Running telemetry unit tests with browser_type "%s".' % + browser_type) + ret = _RunOneSetOfTests(browser_type, 'telemetry', unit_tests, stream) + if ret: + error_string += 'The unit tests failed.\n' - logging.info('Running telemetry perf tests with browser_type "%s".' % - browser_type) - ret = _RunOneSetOfTests(browser_type, 'perf', 'perf', perf_tests, stream) - if ret: - error_string = 'The perf tests failed.\n' + if perf_tests: + logging.info('Running telemetry perf tests with browser_type "%s".' % + browser_type) + ret = _RunOneSetOfTests(browser_type, 'perf', perf_tests, stream) + if ret: + error_string = 'The perf tests failed.\n' return error_string -def _RunOneSetOfTests(browser_type, root_dir, sub_dir, tests, stream): - if not tests: - return - top_level_dir = os.path.join(util.GetChromiumSrcDir(), 'tools', root_dir) - sub_dir = os.path.join(util.GetChromiumSrcDir(), 'tools', sub_dir) - - sys.path.append(top_level_dir) - - output_formatters = [gtest_progress_reporter.GTestProgressReporter(stream)] - run_tests.config = run_tests.Config(top_level_dir, [sub_dir], - output_formatters) - return run_tests.RunTestsCommand.main(['--browser', browser_type] + tests) +def _RunOneSetOfTests(browser_type, dir_name, tests, stream): + top_level_dir = os.path.join(util.GetChromiumSrcDir(), 'tools', dir_name) + args = ['--browser', browser_type, + '--top-level-dir', top_level_dir, + '--jobs', '1'] + tests + return run_tests.RunTestsCommand.main(args, stream=stream) class _LoggingOutputStream(object): diff --git a/tools/telemetry/telemetry/unittest/run_tests.py b/tools/telemetry/telemetry/unittest/run_tests.py index de825d2..0f41ec7 100644 --- a/tools/telemetry/telemetry/unittest/run_tests.py +++ b/tools/telemetry/telemetry/unittest/run_tests.py @@ -1,114 +1,19 @@ # Copyright 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. - -import logging -import unittest +import sys from telemetry import decorators from telemetry.core import browser_finder from telemetry.core import browser_options from telemetry.core import command_line -from telemetry.core import discover -from telemetry.unittest import json_results -from telemetry.unittest import progress_reporter - - -class Config(object): - def __init__(self, top_level_dir, test_dirs, progress_reporters): - self._top_level_dir = top_level_dir - self._test_dirs = tuple(test_dirs) - self._progress_reporters = tuple(progress_reporters) - - @property - def top_level_dir(self): - return self._top_level_dir - - @property - def test_dirs(self): - return self._test_dirs - - @property - def progress_reporters(self): - return self._progress_reporters - - -def Discover(start_dir, top_level_dir=None, pattern='test*.py'): - loader = unittest.defaultTestLoader - loader.suiteClass = progress_reporter.TestSuite - - test_suites = [] - modules = discover.DiscoverModules(start_dir, top_level_dir, pattern) - for module in modules: - if hasattr(module, 'suite'): - suite = module.suite() - else: - suite = loader.loadTestsFromModule(module) - if suite.countTestCases(): - test_suites.append(suite) - return test_suites - - -def FilterSuite(suite, predicate): - new_suite = suite.__class__() - for test in suite: - if isinstance(test, unittest.TestSuite): - subsuite = FilterSuite(test, predicate) - if subsuite.countTestCases(): - new_suite.addTest(subsuite) - else: - assert isinstance(test, unittest.TestCase) - if predicate(test): - new_suite.addTest(test) - - return new_suite - - -def DiscoverTests(search_dirs, top_level_dir, possible_browser, - selected_tests=None, selected_tests_are_exact=False, - run_disabled_tests=False): - def IsTestSelected(test): - if selected_tests: - found = False - for name in selected_tests: - if selected_tests_are_exact: - if name == test.id(): - found = True - else: - if name in test.id(): - found = True - if not found: - return False - if run_disabled_tests: - return True - # pylint: disable=W0212 - if not hasattr(test, '_testMethodName'): - return True - method = getattr(test, test._testMethodName) - return decorators.IsEnabled(method, possible_browser) - - wrapper_suite = progress_reporter.TestSuite() - for search_dir in search_dirs: - wrapper_suite.addTests(Discover(search_dir, top_level_dir, '*_unittest.py')) - return FilterSuite(wrapper_suite, IsTestSelected) - - -def RestoreLoggingLevel(func): - def _LoggingRestoreWrapper(*args, **kwargs): - # Cache the current logging level, this needs to be done before calling - # parser.parse_args, which changes logging level based on verbosity - # setting. - logging_level = logging.getLogger().getEffectiveLevel() - try: - return func(*args, **kwargs) - finally: - # Restore logging level, which may be changed in parser.parse_args. - logging.getLogger().setLevel(logging_level) - - return _LoggingRestoreWrapper +from telemetry.core import util +from telemetry.unittest import options_for_unittests +from telemetry.unittest import browser_test_case +util.AddDirToPythonPath(util.GetChromiumSrcDir(), 'third_party', 'typ') -config = None +import typ class RunTestsCommand(command_line.OptparseCommand): @@ -116,6 +21,10 @@ class RunTestsCommand(command_line.OptparseCommand): usage = '[test_name ...] [<options>]' + def __init__(self): + super(RunTestsCommand, self).__init__() + self.stream = sys.stdout + @classmethod def CreateParser(cls): options = browser_options.BrowserFinderOptions() @@ -131,22 +40,24 @@ class RunTestsCommand(command_line.OptparseCommand): dest='run_disabled_tests', action='store_true', default=False, help='Ignore @Disabled and @Enabled restrictions.') - parser.add_option('--retry-limit', type='int', - help='Retry each failure up to N times' - ' to de-flake things.') parser.add_option('--exact-test-filter', action='store_true', default=False, help='Treat test filter as exact matches (default is ' 'substring matches).') - json_results.AddOptions(parser) + + typ.ArgumentParser.add_option_group(parser, + "Options for running the tests", + running=True, + skip=['-d', '--path', '-v', + '--verbose']) + typ.ArgumentParser.add_option_group(parser, + "Options for reporting the results", + reporting=True) @classmethod def ProcessCommandLineArgs(cls, parser, args): - if args.verbosity == 0: - logging.getLogger().setLevel(logging.WARN) - # We retry failures by default unless we're running a list of tests # explicitly. - if args.retry_limit is None and not args.positional_args: + if not args.retry_limit and not args.positional_args: args.retry_limit = 3 try: @@ -159,50 +70,112 @@ class RunTestsCommand(command_line.OptparseCommand): 'Re-run with --browser=list to see ' 'available browser types.' % args.browser_type) - json_results.ValidateArgs(parser, args) + @classmethod + def main(cls, args=None, stream=None): # pylint: disable=W0221 + # We override the superclass so that we can hook in the 'stream' arg. + parser = cls.CreateParser() + cls.AddCommandLineArgs(parser) + options, positional_args = parser.parse_args(args) + options.positional_args = positional_args + cls.ProcessCommandLineArgs(parser, options) + + obj = cls() + if stream is not None: + obj.stream = stream + return obj.Run(options) def Run(self, args): possible_browser = browser_finder.FindBrowser(args) - test_suite, result = self.RunOneSuite(possible_browser, args) - - results = [result] + runner = typ.Runner() + if self.stream: + runner.host.stdout = self.stream + + # Telemetry seems to overload the system if we run one test per core, + # so we scale things back a fair amount. Many of the telemetry tests + # are long-running, so there's a limit to how much parallelism we + # can effectively use for now anyway. + # + # It should be possible to handle multiple devices if we adjust + # the browser_finder code properly, but for now we only handle the one + # on Android and ChromeOS. + if possible_browser.platform.GetOSName() in ('android', 'chromeos'): + runner.args.jobs = 1 + else: + runner.args.jobs = max(int(args.jobs) // 4, 1) + + runner.args.metadata = args.metadata + runner.args.passthrough = args.passthrough + runner.args.retry_limit = args.retry_limit + runner.args.test_results_server = args.test_results_server + runner.args.test_type = args.test_type + runner.args.timing = args.timing + runner.args.top_level_dir = args.top_level_dir + runner.args.verbose = args.verbosity + runner.args.write_full_results_to = args.write_full_results_to + runner.args.write_trace_to = args.write_trace_to + + runner.args.path.append(util.GetUnittestDataDir()) + + runner.classifier = GetClassifier(args, possible_browser) + runner.context = args + runner.setup_fn = _SetUpProcess + runner.teardown_fn = _TearDownProcess + runner.win_multiprocessing = typ.WinMultiprocessing.importable + try: + ret, _, _ = runner.run() + except KeyboardInterrupt: + print >> sys.stderr, "interrupted, exiting" + ret = 130 + return ret + + +def GetClassifier(args, possible_browser): + def ClassifyTest(test_set, test): + name = test.id() + if args.positional_args: + if _MatchesSelectedTest(name, args.positional_args, + args.exact_test_filter): + assert hasattr(test, '_testMethodName') + method = getattr(test, test._testMethodName) # pylint: disable=W0212 + if decorators.ShouldBeIsolated(method, possible_browser): + test_set.isolated_tests.append(typ.TestInput(name)) + else: + test_set.parallel_tests.append(typ.TestInput(name)) + else: + assert hasattr(test, '_testMethodName') + method = getattr(test, test._testMethodName) # pylint: disable=W0212 + should_skip, reason = decorators.ShouldSkip(method, possible_browser) + if should_skip and not args.run_disabled_tests: + test_set.tests_to_skip.append(typ.TestInput(name, msg=reason)) + elif decorators.ShouldBeIsolated(method, possible_browser): + test_set.isolated_tests.append(typ.TestInput(name)) + else: + test_set.parallel_tests.append(typ.TestInput(name)) - failed_tests = json_results.FailedTestNames(test_suite, result) - retry_limit = args.retry_limit + return ClassifyTest - while retry_limit and failed_tests: - args.positional_args = failed_tests - args.exact_test_filter = True - _, result = self.RunOneSuite(possible_browser, args) - results.append(result) +def _MatchesSelectedTest(name, selected_tests, selected_tests_are_exact): + if not selected_tests: + return False + if selected_tests_are_exact: + return any(name in selected_tests) + else: + return any(test in name for test in selected_tests) - failed_tests = json_results.FailedTestNames(test_suite, result) - retry_limit -= 1 - full_results = json_results.FullResults(args, test_suite, results) - json_results.WriteFullResultsIfNecessary(args, full_results) +def _SetUpProcess(child, context): # pylint: disable=W0613 + args = context + options_for_unittests.Push(args) - err_occurred, err_str = json_results.UploadFullResultsIfNecessary( - args, full_results) - if err_occurred: - for line in err_str.splitlines(): - logging.error(line) - return 1 - return json_results.ExitCodeFromFullResults(full_results) +def _TearDownProcess(child, context): # pylint: disable=W0613 + browser_test_case.teardown_browser() + options_for_unittests.Pop() - def RunOneSuite(self, possible_browser, args): - test_suite = DiscoverTests(config.test_dirs, config.top_level_dir, - possible_browser, args.positional_args, - args.exact_test_filter, args.run_disabled_tests) - runner = progress_reporter.TestRunner() - result = runner.run(test_suite, config.progress_reporters, - args.repeat_count, args) - return test_suite, result - @classmethod - @RestoreLoggingLevel - def main(cls, args=None): - return super(RunTestsCommand, cls).main(args) +if __name__ == '__main__': + ret_code = RunTestsCommand.main() + print 'run_tests.py exiting, ret_code = %d' % ret_code + sys.exit(ret_code) diff --git a/tools/telemetry/telemetry/unittest/run_tests_unittest.py b/tools/telemetry/telemetry/unittest/run_tests_unittest.py index ee02d93..6db9331 100644 --- a/tools/telemetry/telemetry/unittest/run_tests_unittest.py +++ b/tools/telemetry/telemetry/unittest/run_tests_unittest.py @@ -4,11 +4,17 @@ import unittest -from telemetry import decorators from telemetry.core import util from telemetry.unittest import run_tests +class MockArgs(object): + def __init__(self): + self.positional_args = [] + self.exact_test_filter = True + self.run_disabled_tests = False + + class MockPossibleBrowser(object): def __init__(self, browser_type, os_name, os_version_name, supports_tab_control): @@ -31,25 +37,19 @@ class MockPlatform(object): class RunTestsUnitTest(unittest.TestCase): - def setUp(self): - self.suite = unittest.TestSuite() - self.suite.addTests(run_tests.Discover( - util.GetTelemetryDir(), util.GetTelemetryDir(), 'disabled_cases.py')) - def _GetEnabledTests(self, browser_type, os_name, os_version_name, supports_tab_control): - # pylint: disable=W0212 - def MockPredicate(test): - method = getattr(test, test._testMethodName) - return decorators.IsEnabled(method, MockPossibleBrowser( - browser_type, os_name, os_version_name, supports_tab_control)) - - enabled_tests = set() - for i in run_tests.FilterSuite(self.suite, MockPredicate)._tests: - for j in i: - for k in j: - enabled_tests.add(k._testMethodName) - return enabled_tests + + runner = run_tests.typ.Runner() + host = runner.host + runner.top_level_dir = util.GetTelemetryDir() + runner.args.tests = [host.join(util.GetTelemetryDir(), + 'telemetry', 'unittest', 'disabled_cases.py')] + possible_browser = MockPossibleBrowser( + browser_type, os_name, os_version_name, supports_tab_control) + runner.classifier = run_tests.GetClassifier(MockArgs(), possible_browser) + _, test_set = runner.find_tests(runner.args) + return set(test.name.split('.')[-1] for test in test_set.parallel_tests) def testSystemMacMavericks(self): self.assertEquals( diff --git a/tools/telemetry/telemetry/web_perf/timeline_based_measurement_unittest.py b/tools/telemetry/telemetry/web_perf/timeline_based_measurement_unittest.py index f82583b..faf975ce 100644 --- a/tools/telemetry/telemetry/web_perf/timeline_based_measurement_unittest.py +++ b/tools/telemetry/telemetry/web_perf/timeline_based_measurement_unittest.py @@ -15,6 +15,7 @@ from telemetry.timeline import model as model_module from telemetry.timeline import async_slice from telemetry.unittest import options_for_unittests from telemetry.unittest import page_test_test_case +from telemetry.unittest import browser_test_case from telemetry.value import scalar from telemetry.web_perf import timeline_based_measurement as tbm_module from telemetry.web_perf import timeline_interaction_record as tir_module @@ -201,9 +202,12 @@ class TestTimelinebasedMeasurementPage(page_module.Page): class TimelineBasedMeasurementTest(page_test_test_case.PageTestTestCase): def setUp(self): + browser_test_case.teardown_browser() self._options = options_for_unittests.GetCopy() self._options.browser_options.wpr_mode = wpr_modes.WPR_OFF + # This test is flaky when run in parallel on the mac: crbug.com/426676 + @benchmark.Disabled('mac') def testSmoothnessTimelineBasedMeasurementForSmoke(self): ps = self.CreateEmptyPageSet() ps.AddPage(TestTimelinebasedMeasurementPage( @@ -221,6 +225,8 @@ class TimelineBasedMeasurementTest(page_test_test_case.PageTestTestCase): 'DrawerAnimation-frame_time_discrepancy') self.assertEquals(len(v), 1) + # This test is flaky when run in parallel on the mac: crbug.com/426676 + @benchmark.Disabled('mac') def testFastTimelineBasedMeasurementForSmoke(self): ps = self.CreateEmptyPageSet() ps.AddPage(TestTimelinebasedMeasurementPage( @@ -256,7 +262,8 @@ class TimelineBasedMeasurementTest(page_test_test_case.PageTestTestCase): self.assertGreaterEqual(v[0].value, 200.0) # Disabled since mainthread_jank metric is not supported on windows platform. - @benchmark.Disabled('win') + # Also, flaky on the mac when run in parallel: crbug.com/426676 + @benchmark.Disabled('win', 'mac') def testMainthreadJankTimelineBasedMeasurement(self): ps = self.CreateEmptyPageSet() ps.AddPage(TestTimelinebasedMeasurementPage( |