diff options
author | dpranke@chromium.org <dpranke@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2010-01-21 02:22:42 +0000 |
---|---|---|
committer | dpranke@chromium.org <dpranke@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2010-01-21 02:22:42 +0000 |
commit | 0b22e83c01cb9023e1f9412cfe29f435230f2787 (patch) | |
tree | 4e6869ac460f61a9d3892d5d0c4903a5a473cbe9 /webkit | |
parent | 724f05b66a915f443a49eef8d0f0df2601229572 (diff) | |
download | chromium_src-0b22e83c01cb9023e1f9412cfe29f435230f2787.zip chromium_src-0b22e83c01cb9023e1f9412cfe29f435230f2787.tar.gz chromium_src-0b22e83c01cb9023e1f9412cfe29f435230f2787.tar.bz2 |
Reindent all the code to 4-space tabs to prepare for upstreaming and conversion
to PEP 8 compliance. (Change 2 of 4).
BUG=none
R=eseidel@chromium.org
TEST=none
Review URL: http://codereview.chromium.org/555016
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@36720 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'webkit')
-rwxr-xr-x | webkit/tools/layout_tests/dedup-tests.py | 30 | ||||
-rw-r--r-- | webkit/tools/layout_tests/rebaseline.py | 1669 | ||||
-rwxr-xr-x | webkit/tools/layout_tests/run_webkit_tests.py | 3036 | ||||
-rwxr-xr-x | webkit/tools/layout_tests/test_output_formatter.py | 146 | ||||
-rwxr-xr-x | webkit/tools/layout_tests/test_output_xml_to_json.py | 195 | ||||
-rw-r--r-- | webkit/tools/layout_tests/update_expectations_from_dashboard.py | 842 | ||||
-rw-r--r-- | webkit/tools/layout_tests/update_expectations_from_dashboard_unittest.py | 705 |
7 files changed, 3348 insertions, 3275 deletions
diff --git a/webkit/tools/layout_tests/dedup-tests.py b/webkit/tools/layout_tests/dedup-tests.py index 6e1f635..0165e40 100755 --- a/webkit/tools/layout_tests/dedup-tests.py +++ b/webkit/tools/layout_tests/dedup-tests.py @@ -26,24 +26,24 @@ hashes = collections.defaultdict(set) # Fill in the map. cmd = ['git', 'ls-tree', '-r', 'HEAD', 'webkit/data/layout_tests/'] try: - git = subprocess.Popen(cmd, stdout=subprocess.PIPE) + git = subprocess.Popen(cmd, stdout=subprocess.PIPE) except OSError, e: - if e.errno == 2: # No such file or directory. - print >>sys.stderr, "Error: 'No such file' when running git." - print >>sys.stderr, "This script requires git." - sys.exit(1) - raise e + if e.errno == 2: # No such file or directory. + print >> sys.stderr, "Error: 'No such file' when running git." + print >> sys.stderr, "This script requires git." + sys.exit(1) + raise e for line in git.stdout: - attrs, file = line.strip().split('\t') - _, _, hash = attrs.split(' ') - hashes[hash].add(file) + attrs, file = line.strip().split('\t') + _, _, hash = attrs.split(' ') + hashes[hash].add(file) # Dump out duplicated files. for cluster in hashes.values(): - if len(cluster) < 2: - continue - for file in cluster: - if '/chromium-linux/' in file: - if file.replace('/chromium-linux/', '/chromium-win/') in cluster: - print file + if len(cluster) < 2: + continue + for file in cluster: + if '/chromium-linux/' in file: + if file.replace('/chromium-linux/', '/chromium-win/') in cluster: + print file diff --git a/webkit/tools/layout_tests/rebaseline.py b/webkit/tools/layout_tests/rebaseline.py index 81104fc..392180ae2 100644 --- a/webkit/tools/layout_tests/rebaseline.py +++ b/webkit/tools/layout_tests/rebaseline.py @@ -51,932 +51,961 @@ ARCHIVE_DIR_NAME_DICT = {'win': 'webkit-rel', 'mac-canary': 'webkit-rel-mac-webkit-org', 'linux-canary': 'webkit-rel-linux-webkit-org'} -def RunShellWithReturnCode(command, print_output=False): - """Executes a command and returns the output and process return code. - - Args: - command: program and arguments. - print_output: if true, print the command results to standard output. - - Returns: - command output, return code - """ - - # Use a shell for subcommands on Windows to get a PATH search. - use_shell = sys.platform.startswith('win') - p = subprocess.Popen(command, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, shell=use_shell) - if print_output: - output_array = [] - while True: - line = p.stdout.readline() - if not line: - break - if print_output: - print line.strip('\n') - output_array.append(line) - output = ''.join(output_array) - else: - output = p.stdout.read() - p.wait() - p.stdout.close() - - return output, p.returncode - -def RunShell(command, print_output=False): - """Executes a command and returns the output. - - Args: - command: program and arguments. - print_output: if true, print the command results to standard output. - - Returns: - command output - """ - - output, return_code = RunShellWithReturnCode(command, print_output) - return output - -def LogDashedString(text, platform, logging_level=logging.INFO): - """Log text message with dashes on both sides.""" - - msg = text - if platform: - msg += ': ' + platform - if len(msg) < 78: - dashes = '-' * ((78 - len(msg)) / 2) - msg = '%s %s %s' % (dashes, msg, dashes) - - if logging_level == logging.ERROR: - logging.error(msg) - elif logging_level == logging.WARNING: - logging.warn(msg) - else: - logging.info(msg) - - -def SetupHtmlDirectory(html_directory): - """Setup the directory to store html results. - - All html related files are stored in the "rebaseline_html" subdirectory. - - Args: - html_directory: parent directory that stores the rebaselining results. - If None, a temp directory is created. - - Returns: - the directory that stores the html related rebaselining results. - """ - - if not html_directory: - html_directory = tempfile.mkdtemp() - elif not os.path.exists(html_directory): - os.mkdir(html_directory) - html_directory = os.path.join(html_directory, 'rebaseline_html') - logging.info('Html directory: "%s"', html_directory) - - if os.path.exists(html_directory): - shutil.rmtree(html_directory, True) - logging.info('Deleted file at html directory: "%s"', html_directory) - - if not os.path.exists(html_directory): - os.mkdir(html_directory) - return html_directory - - -def GetResultFileFullpath(html_directory, baseline_filename, platform, - result_type): - """Get full path of the baseline result file. - - Args: - html_directory: directory that stores the html related files. - baseline_filename: name of the baseline file. - platform: win, linux or mac - result_type: type of the baseline result: '.txt', '.png'. - - Returns: - Full path of the baseline file for rebaselining result comparison. - """ - - base, ext = os.path.splitext(baseline_filename) - result_filename = '%s-%s-%s%s' % (base, platform, result_type, ext) - fullpath = os.path.join(html_directory, result_filename) - logging.debug(' Result file full path: "%s".', fullpath) - return fullpath - - -class Rebaseliner(object): - """Class to produce new baselines for a given platform.""" - - REVISION_REGEX = r'<a href=\"(\d+)/\">' - - def __init__(self, platform, options): - self._file_dir = path_utils.GetAbsolutePath(os.path.dirname(sys.argv[0])) - self._platform = platform - self._options = options - self._rebaselining_tests = [] - self._rebaselined_tests = [] +def RunShellWithReturnCode(command, print_output=False): + """Executes a command and returns the output and process return code. - # Create tests and expectations helper which is used to: - # -. compile list of tests that need rebaselining. - # -. update the tests in test_expectations file after rebaseline is done. - self._test_expectations = test_expectations.TestExpectations(None, - self._file_dir, - platform, - False, - False) + Args: + command: program and arguments. + print_output: if true, print the command results to standard output. - self._repo_type = self._GetRepoType() + Returns: + command output, return code + """ - def Run(self, backup): - """Run rebaseline process.""" + # Use a shell for subcommands on Windows to get a PATH search. + use_shell = sys.platform.startswith('win') + p = subprocess.Popen(command, stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, shell=use_shell) + if print_output: + output_array = [] + while True: + line = p.stdout.readline() + if not line: + break + if print_output: + print line.strip('\n') + output_array.append(line) + output = ''.join(output_array) + else: + output = p.stdout.read() + p.wait() + p.stdout.close() - LogDashedString('Compiling rebaselining tests', self._platform) - if not self._CompileRebaseliningTests(): - return True + return output, p.returncode - LogDashedString('Downloading archive', self._platform) - archive_file = self._DownloadBuildBotArchive() - logging.info('') - if not archive_file: - logging.error('No archive found.') - return False - LogDashedString('Extracting and adding new baselines', self._platform) - if not self._ExtractAndAddNewBaselines(archive_file): - return False +def RunShell(command, print_output=False): + """Executes a command and returns the output. - LogDashedString('Updating rebaselined tests in file', self._platform) - self._UpdateRebaselinedTestsInFile(backup) - logging.info('') + Args: + command: program and arguments. + print_output: if true, print the command results to standard output. - if len(self._rebaselining_tests) != len(self._rebaselined_tests): - logging.warning('NOT ALL TESTS THAT NEED REBASELINING HAVE BEEN ' - 'REBASELINED.') - logging.warning(' Total tests needing rebaselining: %d', - len(self._rebaselining_tests)) - logging.warning(' Total tests rebaselined: %d', - len(self._rebaselined_tests)) - return False + Returns: + command output + """ - logging.warning('All tests needing rebaselining were successfully ' - 'rebaselined.') + output, return_code = RunShellWithReturnCode(command, print_output) + return output - return True - def GetRebaseliningTests(self): - return self._rebaselining_tests +def LogDashedString(text, platform, logging_level=logging.INFO): + """Log text message with dashes on both sides.""" + + msg = text + if platform: + msg += ': ' + platform + if len(msg) < 78: + dashes = '-' * ((78 - len(msg)) / 2) + msg = '%s %s %s' % (dashes, msg, dashes) + + if logging_level == logging.ERROR: + logging.error(msg) + elif logging_level == logging.WARNING: + logging.warn(msg) + else: + logging.info(msg) - def _GetRepoType(self): - """Get the repository type that client is using.""" - output, return_code = RunShellWithReturnCode(['svn', 'info'], False) - if return_code == 0: - return REPO_SVN +def SetupHtmlDirectory(html_directory): + """Setup the directory to store html results. - return REPO_UNKNOWN + All html related files are stored in the "rebaseline_html" subdirectory. - def _CompileRebaseliningTests(self): - """Compile list of tests that need rebaselining for the platform. + Args: + html_directory: parent directory that stores the rebaselining results. + If None, a temp directory is created. Returns: - List of tests that need rebaselining or - None if there is no such test. + the directory that stores the html related rebaselining results. """ - self._rebaselining_tests = self._test_expectations.GetRebaseliningFailures() - if not self._rebaselining_tests: - logging.warn('No tests found that need rebaselining.') - return None + if not html_directory: + html_directory = tempfile.mkdtemp() + elif not os.path.exists(html_directory): + os.mkdir(html_directory) + + html_directory = os.path.join(html_directory, 'rebaseline_html') + logging.info('Html directory: "%s"', html_directory) - logging.info('Total number of tests needing rebaselining for "%s": "%d"', - self._platform, len(self._rebaselining_tests)) + if os.path.exists(html_directory): + shutil.rmtree(html_directory, True) + logging.info('Deleted file at html directory: "%s"', html_directory) - test_no = 1 - for test in self._rebaselining_tests: - logging.info(' %d: %s', test_no, test) - test_no += 1 + if not os.path.exists(html_directory): + os.mkdir(html_directory) + return html_directory - return self._rebaselining_tests - def _GetLatestRevision(self, url): - """Get the latest layout test revision number from buildbot. +def GetResultFileFullpath(html_directory, baseline_filename, platform, + result_type): + """Get full path of the baseline result file. Args: - url: Url to retrieve layout test revision numbers. + html_directory: directory that stores the html related files. + baseline_filename: name of the baseline file. + platform: win, linux or mac + result_type: type of the baseline result: '.txt', '.png'. Returns: - latest revision or - None on failure. + Full path of the baseline file for rebaselining result comparison. """ - logging.debug('Url to retrieve revision: "%s"', url) + base, ext = os.path.splitext(baseline_filename) + result_filename = '%s-%s-%s%s' % (base, platform, result_type, ext) + fullpath = os.path.join(html_directory, result_filename) + logging.debug(' Result file full path: "%s".', fullpath) + return fullpath + - f = urllib.urlopen(url) - content = f.read() - f.close() +class Rebaseliner(object): + """Class to produce new baselines for a given platform.""" + + REVISION_REGEX = r'<a href=\"(\d+)/\">' + + def __init__(self, platform, options): + self._file_dir = path_utils.GetAbsolutePath( + os.path.dirname(sys.argv[0])) + self._platform = platform + self._options = options + self._rebaselining_tests = [] + self._rebaselined_tests = [] + + # Create tests and expectations helper which is used to: + # -. compile list of tests that need rebaselining. + # -. update the tests in test_expectations file after rebaseline + # is done. + self._test_expectations = \ + test_expectations.TestExpectations(None, + self._file_dir, + platform, + False, + False) + + self._repo_type = self._GetRepoType() + + def Run(self, backup): + """Run rebaseline process.""" + + LogDashedString('Compiling rebaselining tests', self._platform) + if not self._CompileRebaseliningTests(): + return True - revisions = re.findall(self.REVISION_REGEX, content) - if not revisions: - logging.error('Failed to find revision, content: "%s"', content) - return None + LogDashedString('Downloading archive', self._platform) + archive_file = self._DownloadBuildBotArchive() + logging.info('') + if not archive_file: + logging.error('No archive found.') + return False - revisions.sort(key=int) - logging.info('Latest revision: "%s"', revisions[len(revisions) - 1]) - return revisions[len(revisions) - 1] + LogDashedString('Extracting and adding new baselines', self._platform) + if not self._ExtractAndAddNewBaselines(archive_file): + return False - def _GetArchiveDirName(self, platform, webkit_canary): - """Get name of the layout test archive directory. + LogDashedString('Updating rebaselined tests in file', self._platform) + self._UpdateRebaselinedTestsInFile(backup) + logging.info('') + + if len(self._rebaselining_tests) != len(self._rebaselined_tests): + logging.warning('NOT ALL TESTS THAT NEED REBASELINING HAVE BEEN ' + 'REBASELINED.') + logging.warning(' Total tests needing rebaselining: %d', + len(self._rebaselining_tests)) + logging.warning(' Total tests rebaselined: %d', + len(self._rebaselined_tests)) + return False - Returns: - Directory name or - None on failure - """ + logging.warning('All tests needing rebaselining were successfully ' + 'rebaselined.') - if webkit_canary: - platform += '-canary' + return True - if platform in ARCHIVE_DIR_NAME_DICT: - return ARCHIVE_DIR_NAME_DICT[platform] - else: - logging.error('Cannot find platform key %s in archive directory name ' - 'dictionary', platform) - return None + def GetRebaseliningTests(self): + return self._rebaselining_tests - def _GetArchiveUrl(self): - """Generate the url to download latest layout test archive. + def _GetRepoType(self): + """Get the repository type that client is using.""" - Returns: - Url to download archive or - None on failure - """ + output, return_code = RunShellWithReturnCode(['svn', 'info'], False) + if return_code == 0: + return REPO_SVN - dir_name = self._GetArchiveDirName(self._platform, - self._options.webkit_canary) - if not dir_name: - return None + return REPO_UNKNOWN - logging.debug('Buildbot platform dir name: "%s"', dir_name) + def _CompileRebaseliningTests(self): + """Compile list of tests that need rebaselining for the platform. - url_base = '%s/%s/' % (self._options.archive_url, dir_name) - latest_revision = self._GetLatestRevision(url_base) - if latest_revision is None or latest_revision <= 0: - return None + Returns: + List of tests that need rebaselining or + None if there is no such test. + """ - archive_url = ('%s%s/layout-test-results.zip' % (url_base, - latest_revision)) - logging.info('Archive url: "%s"', archive_url) - return archive_url + self._rebaselining_tests = \ + self._test_expectations.GetRebaseliningFailures() + if not self._rebaselining_tests: + logging.warn('No tests found that need rebaselining.') + return None - def _DownloadBuildBotArchive(self): - """Download layout test archive file from buildbot. + logging.info('Total number of tests needing rebaselining ' + 'for "%s": "%d"', self._platform, + len(self._rebaselining_tests)) - Returns: - True if download succeeded or - False otherwise. - """ + test_no = 1 + for test in self._rebaselining_tests: + logging.info(' %d: %s', test_no, test) + test_no += 1 - url = self._GetArchiveUrl() - if url is None: - return None + return self._rebaselining_tests - fn = urllib.urlretrieve(url)[0] - logging.info('Archive downloaded and saved to file: "%s"', fn) - return fn + def _GetLatestRevision(self, url): + """Get the latest layout test revision number from buildbot. - def _ExtractAndAddNewBaselines(self, archive_file): - """Extract new baselines from archive and add them to SVN repository. + Args: + url: Url to retrieve layout test revision numbers. - Args: - archive_file: full path to the archive file. + Returns: + latest revision or + None on failure. + """ - Returns: - List of tests that have been rebaselined or - None on failure. - """ + logging.debug('Url to retrieve revision: "%s"', url) - zip_file = zipfile.ZipFile(archive_file, 'r') - zip_namelist = zip_file.namelist() - - logging.debug('zip file namelist:') - for name in zip_namelist: - logging.debug(' ' + name) - - platform = path_utils.PlatformName(self._platform) - logging.debug('Platform dir: "%s"', platform) - - test_no = 1 - self._rebaselined_tests = [] - for test in self._rebaselining_tests: - logging.info('Test %d: %s', test_no, test) - - found = False - svn_error = False - test_basename = os.path.splitext(test)[0] - for suffix in BASELINE_SUFFIXES: - archive_test_name = 'layout-test-results/%s-actual%s' % (test_basename, - suffix) - logging.debug(' Archive test file name: "%s"', archive_test_name) - if not archive_test_name in zip_namelist: - logging.info(' %s file not in archive.', suffix) - continue - - found = True - logging.info(' %s file found in archive.', suffix) - - # Extract new baseline from archive and save it to a temp file. - data = zip_file.read(archive_test_name) - temp_fd, temp_name = tempfile.mkstemp(suffix) - f = os.fdopen(temp_fd, 'wb') - f.write(data) + f = urllib.urlopen(url) + content = f.read() f.close() - expected_filename = '%s-expected%s' % (test_basename, suffix) - expected_fullpath = os.path.join( - path_utils.ChromiumBaselinePath(platform), expected_filename) - expected_fullpath = os.path.normpath(expected_fullpath) - logging.debug(' Expected file full path: "%s"', expected_fullpath) - - # TODO(victorw): for now, the rebaselining tool checks whether - # or not THIS baseline is duplicate and should be skipped. - # We could improve the tool to check all baselines in upper and lower - # levels and remove all duplicated baselines. - if self._IsDupBaseline(temp_name, - expected_fullpath, - test, - suffix, - self._platform): - os.remove(temp_name) - self._DeleteBaseline(expected_fullpath) - continue - - # Create the new baseline directory if it doesn't already exist. - path_utils.MaybeMakeDirectory(os.path.dirname(expected_fullpath)) - - shutil.move(temp_name, expected_fullpath) - - if not self._SvnAdd(expected_fullpath): - svn_error = True - elif suffix != '.checksum': - self._CreateHtmlBaselineFiles(expected_fullpath) - - if not found: - logging.warn(' No new baselines found in archive.') - else: - if svn_error: - logging.warn(' Failed to add baselines to SVN.') - else: - logging.info(' Rebaseline succeeded.') - self._rebaselined_tests.append(test) - - test_no += 1 + revisions = re.findall(self.REVISION_REGEX, content) + if not revisions: + logging.error('Failed to find revision, content: "%s"', content) + return None - zip_file.close() - os.remove(archive_file) + revisions.sort(key=int) + logging.info('Latest revision: "%s"', revisions[len(revisions) - 1]) + return revisions[len(revisions) - 1] - return self._rebaselined_tests + def _GetArchiveDirName(self, platform, webkit_canary): + """Get name of the layout test archive directory. - def _IsDupBaseline(self, new_baseline, baseline_path, test, suffix, platform): - """Check whether a baseline is duplicate and can fallback to same - baseline for another platform. For example, if a test has same baseline - on linux and windows, then we only store windows baseline and linux - baseline will fallback to the windows version. + Returns: + Directory name or + None on failure + """ - Args: - expected_filename: baseline expectation file name. - test: test name. - suffix: file suffix of the expected results, including dot; e.g. '.txt' - or '.png'. - platform: baseline platform 'mac', 'win' or 'linux'. + if webkit_canary: + platform += '-canary' - Returns: - True if the baseline is unnecessary. - False otherwise. - """ - test_filepath = os.path.join(path_utils.LayoutTestsDir(), test) - all_baselines = path_utils.ExpectedBaselines(test_filepath, - suffix, - platform, - True) - for (fallback_dir, fallback_file) in all_baselines: - if fallback_dir and fallback_file: - fallback_fullpath = os.path.normpath( - os.path.join(fallback_dir, fallback_file)) - if fallback_fullpath.lower() != baseline_path.lower(): - if not self._DiffBaselines(new_baseline, fallback_fullpath): - logging.info(' Found same baseline at %s', fallback_fullpath) + if platform in ARCHIVE_DIR_NAME_DICT: + return ARCHIVE_DIR_NAME_DICT[platform] + else: + logging.error('Cannot find platform key %s in archive ' + 'directory name dictionary', platform) + return None + + def _GetArchiveUrl(self): + """Generate the url to download latest layout test archive. + + Returns: + Url to download archive or + None on failure + """ + + dir_name = self._GetArchiveDirName(self._platform, + self._options.webkit_canary) + if not dir_name: + return None + + logging.debug('Buildbot platform dir name: "%s"', dir_name) + + url_base = '%s/%s/' % (self._options.archive_url, dir_name) + latest_revision = self._GetLatestRevision(url_base) + if latest_revision is None or latest_revision <= 0: + return None + + archive_url = ('%s%s/layout-test-results.zip' % (url_base, + latest_revision)) + logging.info('Archive url: "%s"', archive_url) + return archive_url + + def _DownloadBuildBotArchive(self): + """Download layout test archive file from buildbot. + + Returns: + True if download succeeded or + False otherwise. + """ + + url = self._GetArchiveUrl() + if url is None: + return None + + fn = urllib.urlretrieve(url)[0] + logging.info('Archive downloaded and saved to file: "%s"', fn) + return fn + + def _ExtractAndAddNewBaselines(self, archive_file): + """Extract new baselines from archive and add them to SVN repository. + + Args: + archive_file: full path to the archive file. + + Returns: + List of tests that have been rebaselined or + None on failure. + """ + + zip_file = zipfile.ZipFile(archive_file, 'r') + zip_namelist = zip_file.namelist() + + logging.debug('zip file namelist:') + for name in zip_namelist: + logging.debug(' ' + name) + + platform = path_utils.PlatformName(self._platform) + logging.debug('Platform dir: "%s"', platform) + + test_no = 1 + self._rebaselined_tests = [] + for test in self._rebaselining_tests: + logging.info('Test %d: %s', test_no, test) + + found = False + svn_error = False + test_basename = os.path.splitext(test)[0] + for suffix in BASELINE_SUFFIXES: + archive_test_name = ('layout-test-results/%s-actual%s' % + (test_basename, suffix)) + logging.debug(' Archive test file name: "%s"', + archive_test_name) + if not archive_test_name in zip_namelist: + logging.info(' %s file not in archive.', suffix) + continue + + found = True + logging.info(' %s file found in archive.', suffix) + + # Extract new baseline from archive and save it to a temp file. + data = zip_file.read(archive_test_name) + temp_fd, temp_name = tempfile.mkstemp(suffix) + f = os.fdopen(temp_fd, 'wb') + f.write(data) + f.close() + + expected_filename = '%s-expected%s' % (test_basename, suffix) + expected_fullpath = os.path.join( + path_utils.ChromiumBaselinePath(platform), + expected_filename) + expected_fullpath = os.path.normpath(expected_fullpath) + logging.debug(' Expected file full path: "%s"', + expected_fullpath) + + # TODO(victorw): for now, the rebaselining tool checks whether + # or not THIS baseline is duplicate and should be skipped. + # We could improve the tool to check all baselines in upper + # and lower + # levels and remove all duplicated baselines. + if self._IsDupBaseline(temp_name, + expected_fullpath, + test, + suffix, + self._platform): + os.remove(temp_name) + self._DeleteBaseline(expected_fullpath) + continue + + # Create the new baseline directory if it doesn't already + # exist. + path_utils.MaybeMakeDirectory( + os.path.dirname(expected_fullpath)) + + shutil.move(temp_name, expected_fullpath) + + if not self._SvnAdd(expected_fullpath): + svn_error = True + elif suffix != '.checksum': + self._CreateHtmlBaselineFiles(expected_fullpath) + + if not found: + logging.warn(' No new baselines found in archive.') + else: + if svn_error: + logging.warn(' Failed to add baselines to SVN.') + else: + logging.info(' Rebaseline succeeded.') + self._rebaselined_tests.append(test) + + test_no += 1 + + zip_file.close() + os.remove(archive_file) + + return self._rebaselined_tests + + def _IsDupBaseline(self, new_baseline, baseline_path, test, suffix, + platform): + """Check whether a baseline is duplicate and can fallback to same + baseline for another platform. For example, if a test has same + baseline on linux and windows, then we only store windows + baseline and linux baseline will fallback to the windows version. + + Args: + expected_filename: baseline expectation file name. + test: test name. + suffix: file suffix of the expected results, including dot; + e.g. '.txt' or '.png'. + platform: baseline platform 'mac', 'win' or 'linux'. + + Returns: + True if the baseline is unnecessary. + False otherwise. + """ + test_filepath = os.path.join(path_utils.LayoutTestsDir(), test) + all_baselines = path_utils.ExpectedBaselines(test_filepath, + suffix, + platform, + True) + for (fallback_dir, fallback_file) in all_baselines: + if fallback_dir and fallback_file: + fallback_fullpath = os.path.normpath( + os.path.join(fallback_dir, fallback_file)) + if fallback_fullpath.lower() != baseline_path.lower(): + if not self._DiffBaselines(new_baseline, + fallback_fullpath): + logging.info(' Found same baseline at %s', + fallback_fullpath) + return True + else: + return False + + return False + + def _DiffBaselines(self, file1, file2): + """Check whether two baselines are different. + + Args: + file1, file2: full paths of the baselines to compare. + + Returns: + True if two files are different or have different extensions. + False otherwise. + """ + + ext1 = os.path.splitext(file1)[1].upper() + ext2 = os.path.splitext(file2)[1].upper() + if ext1 != ext2: + logging.warn('Files to compare have different ext. ' + 'File1: %s; File2: %s', file1, file2) return True - else: - return False - - return False - - def _DiffBaselines(self, file1, file2): - """Check whether two baselines are different. - - Args: - file1, file2: full paths of the baselines to compare. - Returns: - True if two files are different or have different extensions. - False otherwise. - """ - - ext1 = os.path.splitext(file1)[1].upper() - ext2 = os.path.splitext(file2)[1].upper() - if ext1 != ext2: - logging.warn('Files to compare have different ext. File1: %s; File2: %s', - file1, file2) - return True - - if ext1 == '.PNG': - return image_diff.ImageDiff(self._platform, '').DiffFiles(file1, - file2) - else: - return text_diff.TestTextDiff(self._platform, '').DiffFiles(file1, - file2) - - def _DeleteBaseline(self, filename): - """Remove the file from repository and delete it from disk. - - Args: - filename: full path of the file to delete. - """ + if ext1 == '.PNG': + return image_diff.ImageDiff(self._platform, '').DiffFiles(file1, + file2) + else: + return text_diff.TestTextDiff(self._platform, '').DiffFiles(file1, + file2) - if not filename or not os.path.isfile(filename): - return + def _DeleteBaseline(self, filename): + """Remove the file from repository and delete it from disk. - if self._repo_type == REPO_SVN: - parent_dir, basename = os.path.split(filename) - original_dir = os.getcwd() - os.chdir(parent_dir) - RunShell(['svn', 'delete', '--force', basename], False) - os.chdir(original_dir) - else: - os.remove(filename) + Args: + filename: full path of the file to delete. + """ - def _UpdateRebaselinedTestsInFile(self, backup): - """Update the rebaselined tests in test expectations file. + if not filename or not os.path.isfile(filename): + return - Args: - backup: if True, backup the original test expectations file. + if self._repo_type == REPO_SVN: + parent_dir, basename = os.path.split(filename) + original_dir = os.getcwd() + os.chdir(parent_dir) + RunShell(['svn', 'delete', '--force', basename], False) + os.chdir(original_dir) + else: + os.remove(filename) - Returns: - no - """ + def _UpdateRebaselinedTestsInFile(self, backup): + """Update the rebaselined tests in test expectations file. - if self._rebaselined_tests: - self._test_expectations.RemovePlatformFromFile(self._rebaselined_tests, - self._platform, - backup) - else: - logging.info('No test was rebaselined so nothing to remove.') + Args: + backup: if True, backup the original test expectations file. - def _SvnAdd(self, filename): - """Add the file to SVN repository. + Returns: + no + """ - Args: - filename: full path of the file to add. + if self._rebaselined_tests: + self._test_expectations.RemovePlatformFromFile( + self._rebaselined_tests, self._platform, backup) + else: + logging.info('No test was rebaselined so nothing to remove.') - Returns: - True if the file already exists in SVN or is sucessfully added to SVN. - False otherwise. - """ + def _SvnAdd(self, filename): + """Add the file to SVN repository. - if not filename: - return False - - parent_dir, basename = os.path.split(filename) - if self._repo_type != REPO_SVN or parent_dir == filename: - logging.info("No svn checkout found, skip svn add.") - return True - - original_dir = os.getcwd() - os.chdir(parent_dir) - status_output = RunShell(['svn', 'status', basename], False) - os.chdir(original_dir) - output = status_output.upper() - if output.startswith('A') or output.startswith('M'): - logging.info(' File already added to SVN: "%s"', filename) - return True - - if output.find('IS NOT A WORKING COPY') >= 0: - logging.info(' File is not a working copy, add its parent: "%s"', - parent_dir) - return self._SvnAdd(parent_dir) - - os.chdir(parent_dir) - add_output = RunShell(['svn', 'add', basename], True) - os.chdir(original_dir) - output = add_output.upper().rstrip() - if output.startswith('A') and output.find(basename.upper()) >= 0: - logging.info(' Added new file: "%s"', filename) - self._SvnPropSet(filename) - return True - - if (not status_output) and (add_output.upper().find( - 'ALREADY UNDER VERSION CONTROL') >= 0): - logging.info(' File already under SVN and has no change: "%s"', filename) - return True - - logging.warn(' Failed to add file to SVN: "%s"', filename) - logging.warn(' Svn status output: "%s"', status_output) - logging.warn(' Svn add output: "%s"', add_output) - return False - - def _SvnPropSet(self, filename): - """Set the baseline property + Args: + filename: full path of the file to add. - Args: - filename: full path of the file to add. + Returns: + True if the file already exists in SVN or is sucessfully added + to SVN. + False otherwise. + """ - Returns: - True if the file already exists in SVN or is sucessfully added to SVN. - False otherwise. - """ - ext = os.path.splitext(filename)[1].upper() - if ext != '.TXT' and ext != '.PNG' and ext != '.CHECKSUM': - return - - parent_dir, basename = os.path.split(filename) - original_dir = os.getcwd() - os.chdir(parent_dir) - if ext == '.PNG': - cmd = [ 'svn', 'pset', 'svn:mime-type', 'image/png', basename ] - else: - cmd = [ 'svn', 'pset', 'svn:eol-style', 'LF', basename ] + if not filename: + return False - logging.debug(' Set svn prop: %s', ' '.join(cmd)) - RunShell(cmd, False) - os.chdir(original_dir) + parent_dir, basename = os.path.split(filename) + if self._repo_type != REPO_SVN or parent_dir == filename: + logging.info("No svn checkout found, skip svn add.") + return True - def _CreateHtmlBaselineFiles(self, baseline_fullpath): - """Create baseline files (old, new and diff) in html directory. + original_dir = os.getcwd() + os.chdir(parent_dir) + status_output = RunShell(['svn', 'status', basename], False) + os.chdir(original_dir) + output = status_output.upper() + if output.startswith('A') or output.startswith('M'): + logging.info(' File already added to SVN: "%s"', filename) + return True - The files are used to compare the rebaselining results. + if output.find('IS NOT A WORKING COPY') >= 0: + logging.info(' File is not a working copy, add its parent: "%s"', + parent_dir) + return self._SvnAdd(parent_dir) + + os.chdir(parent_dir) + add_output = RunShell(['svn', 'add', basename], True) + os.chdir(original_dir) + output = add_output.upper().rstrip() + if output.startswith('A') and output.find(basename.upper()) >= 0: + logging.info(' Added new file: "%s"', filename) + self._SvnPropSet(filename) + return True - Args: - baseline_fullpath: full path of the expected baseline file. - """ + if (not status_output) and (add_output.upper().find( + 'ALREADY UNDER VERSION CONTROL') >= 0): + logging.info(' File already under SVN and has no change: "%s"', + filename) + return True - if not baseline_fullpath or not os.path.exists(baseline_fullpath): - return - - # Copy the new baseline to html directory for result comparison. - baseline_filename = os.path.basename(baseline_fullpath) - new_file = GetResultFileFullpath(self._options.html_directory, - baseline_filename, - self._platform, - 'new') - shutil.copyfile(baseline_fullpath, new_file) - logging.info(' Html: copied new baseline file from "%s" to "%s".', - baseline_fullpath, new_file) - - # Get the old baseline from SVN and save to the html directory. - output = RunShell(['svn', 'cat', '-r', 'BASE', baseline_fullpath]) - if (not output) or (output.upper().rstrip().endswith( - 'NO SUCH FILE OR DIRECTORY')): - logging.info(' No base file: "%s"', baseline_fullpath) - return - base_file = GetResultFileFullpath(self._options.html_directory, - baseline_filename, - self._platform, - 'old') - f = open(base_file, 'wb') - f.write(output) - f.close() - logging.info(' Html: created old baseline file: "%s".', - base_file) - - # Get the diff between old and new baselines and save to the html directory. - if baseline_filename.upper().endswith('.TXT'): - # If the user specified a custom diff command in their svn config file, - # then it'll be used when we do svn diff, which we don't want to happen - # since we want the unified diff. Using --diff-cmd=diff doesn't always - # work, since they can have another diff executable in their path that - # gives different line endings. So we use a bogus temp directory as the - # config directory, which gets around these problems. - if sys.platform.startswith("win"): - parent_dir = tempfile.gettempdir() - else: - parent_dir = sys.path[0] # tempdir is not secure. - bogus_dir = os.path.join(parent_dir, "temp_svn_config") - logging.debug(' Html: temp config dir: "%s".', bogus_dir) - if not os.path.exists(bogus_dir): - os.mkdir(bogus_dir) - delete_bogus_dir = True - else: - delete_bogus_dir = False - - output = RunShell(["svn", "diff", "--config-dir", bogus_dir, - baseline_fullpath]) - if output: - diff_file = GetResultFileFullpath(self._options.html_directory, + logging.warn(' Failed to add file to SVN: "%s"', filename) + logging.warn(' Svn status output: "%s"', status_output) + logging.warn(' Svn add output: "%s"', add_output) + return False + + def _SvnPropSet(self, filename): + """Set the baseline property + + Args: + filename: full path of the file to add. + + Returns: + True if the file already exists in SVN or is sucessfully added + to SVN. + False otherwise. + """ + ext = os.path.splitext(filename)[1].upper() + if ext != '.TXT' and ext != '.PNG' and ext != '.CHECKSUM': + return + + parent_dir, basename = os.path.split(filename) + original_dir = os.getcwd() + os.chdir(parent_dir) + if ext == '.PNG': + cmd = ['svn', 'pset', 'svn:mime-type', 'image/png', basename] + else: + cmd = ['svn', 'pset', 'svn:eol-style', 'LF', basename] + + logging.debug(' Set svn prop: %s', ' '.join(cmd)) + RunShell(cmd, False) + os.chdir(original_dir) + + def _CreateHtmlBaselineFiles(self, baseline_fullpath): + """Create baseline files (old, new and diff) in html directory. + + The files are used to compare the rebaselining results. + + Args: + baseline_fullpath: full path of the expected baseline file. + """ + + if not baseline_fullpath or not os.path.exists(baseline_fullpath): + return + + # Copy the new baseline to html directory for result comparison. + baseline_filename = os.path.basename(baseline_fullpath) + new_file = GetResultFileFullpath(self._options.html_directory, + baseline_filename, + self._platform, + 'new') + shutil.copyfile(baseline_fullpath, new_file) + logging.info(' Html: copied new baseline file from "%s" to "%s".', + baseline_fullpath, new_file) + + # Get the old baseline from SVN and save to the html directory. + output = RunShell(['svn', 'cat', '-r', 'BASE', baseline_fullpath]) + if (not output) or (output.upper().rstrip().endswith( + 'NO SUCH FILE OR DIRECTORY')): + logging.info(' No base file: "%s"', baseline_fullpath) + return + base_file = GetResultFileFullpath(self._options.html_directory, baseline_filename, self._platform, - 'diff') - f = open(diff_file, 'wb') + 'old') + f = open(base_file, 'wb') f.write(output) f.close() - logging.info(' Html: created baseline diff file: "%s".', - diff_file) + logging.info(' Html: created old baseline file: "%s".', + base_file) + + # Get the diff between old and new baselines and save to the html dir. + if baseline_filename.upper().endswith('.TXT'): + # If the user specified a custom diff command in their svn config + # file, then it'll be used when we do svn diff, which we don't want + # to happen since we want the unified diff. Using --diff-cmd=diff + # doesn't always work, since they can have another diff executable + # in their path that gives different line endings. So we use a + # bogus temp directory as the config directory, which gets + # around these problems. + if sys.platform.startswith("win"): + parent_dir = tempfile.gettempdir() + else: + parent_dir = sys.path[0] # tempdir is not secure. + bogus_dir = os.path.join(parent_dir, "temp_svn_config") + logging.debug(' Html: temp config dir: "%s".', bogus_dir) + if not os.path.exists(bogus_dir): + os.mkdir(bogus_dir) + delete_bogus_dir = True + else: + delete_bogus_dir = False + + output = RunShell(["svn", "diff", "--config-dir", bogus_dir, + baseline_fullpath]) + if output: + diff_file = GetResultFileFullpath(self._options.html_directory, + baseline_filename, + self._platform, + 'diff') + f = open(diff_file, 'wb') + f.write(output) + f.close() + logging.info(' Html: created baseline diff file: "%s".', + diff_file) + + if delete_bogus_dir: + shutil.rmtree(bogus_dir, True) + logging.debug(' Html: removed temp config dir: "%s".', + bogus_dir) - if delete_bogus_dir: - shutil.rmtree(bogus_dir, True) - logging.debug(' Html: removed temp config dir: "%s".', bogus_dir) class HtmlGenerator(object): - """Class to generate rebaselining result comparison html.""" - - HTML_REBASELINE = ('<html>' - '<head>' - '<style>' - 'body {font-family: sans-serif;}' - '.mainTable {background: #666666;}' - '.mainTable td , .mainTable th {background: white;}' - '.detail {margin-left: 10px; margin-top: 3px;}' - '</style>' - '<title>Rebaselining Result Comparison (%(time)s)</title>' - '</head>' - '<body>' - '<h2>Rebaselining Result Comparison (%(time)s)</h2>' - '%(body)s' - '</body>' - '</html>') - HTML_NO_REBASELINING_TESTS = '<p>No tests found that need rebaselining.</p>' - HTML_TABLE_TEST = ('<table class="mainTable" cellspacing=1 cellpadding=5>' - '%s</table><br>') - HTML_TR_TEST = ('<tr>' - '<th style="background-color: #CDECDE; border-bottom: ' - '1px solid black; font-size: 18pt; font-weight: bold" ' - 'colspan="5">' - '<a href="%s">%s</a>' - '</th>' - '</tr>') - HTML_TEST_DETAIL = ('<div class="detail">' - '<tr>' - '<th width="100">Baseline</th>' - '<th width="100">Platform</th>' - '<th width="200">Old</th>' - '<th width="200">New</th>' - '<th width="150">Difference</th>' - '</tr>' - '%s' - '</div>') - HTML_TD_NOLINK = '<td align=center><a>%s</a></td>' - HTML_TD_LINK = '<td align=center><a href="%(uri)s">%(name)s</a></td>' - HTML_TD_LINK_IMG = ('<td><a href="%(uri)s">' - '<img style="width: 200" src="%(uri)s" /></a></td>') - HTML_TR = '<tr>%s</tr>' - - def __init__(self, options, platforms, rebaselining_tests): - self._html_directory = options.html_directory - self._platforms = platforms - self._rebaselining_tests = rebaselining_tests - self._html_file = os.path.join(options.html_directory, 'rebaseline.html') - - def GenerateHtml(self): - """Generate html file for rebaselining result comparison.""" - - logging.info('Generating html file') - - html_body = '' - if not self._rebaselining_tests: - html_body += self.HTML_NO_REBASELINING_TESTS - else: - tests = list(self._rebaselining_tests) - tests.sort() - - test_no = 1 - for test in tests: - logging.info('Test %d: %s', test_no, test) - html_body += self._GenerateHtmlForOneTest(test) + """Class to generate rebaselining result comparison html.""" + + HTML_REBASELINE = ('<html>' + '<head>' + '<style>' + 'body {font-family: sans-serif;}' + '.mainTable {background: #666666;}' + '.mainTable td , .mainTable th {background: white;}' + '.detail {margin-left: 10px; margin-top: 3px;}' + '</style>' + '<title>Rebaselining Result Comparison (%(time)s)' + '</title>' + '</head>' + '<body>' + '<h2>Rebaselining Result Comparison (%(time)s)</h2>' + '%(body)s' + '</body>' + '</html>') + HTML_NO_REBASELINING_TESTS = ( + '<p>No tests found that need rebaselining.</p>') + HTML_TABLE_TEST = ('<table class="mainTable" cellspacing=1 cellpadding=5>' + '%s</table><br>') + HTML_TR_TEST = ('<tr>' + '<th style="background-color: #CDECDE; border-bottom: ' + '1px solid black; font-size: 18pt; font-weight: bold" ' + 'colspan="5">' + '<a href="%s">%s</a>' + '</th>' + '</tr>') + HTML_TEST_DETAIL = ('<div class="detail">' + '<tr>' + '<th width="100">Baseline</th>' + '<th width="100">Platform</th>' + '<th width="200">Old</th>' + '<th width="200">New</th>' + '<th width="150">Difference</th>' + '</tr>' + '%s' + '</div>') + HTML_TD_NOLINK = '<td align=center><a>%s</a></td>' + HTML_TD_LINK = '<td align=center><a href="%(uri)s">%(name)s</a></td>' + HTML_TD_LINK_IMG = ('<td><a href="%(uri)s">' + '<img style="width: 200" src="%(uri)s" /></a></td>') + HTML_TR = '<tr>%s</tr>' + + def __init__(self, options, platforms, rebaselining_tests): + self._html_directory = options.html_directory + self._platforms = platforms + self._rebaselining_tests = rebaselining_tests + self._html_file = os.path.join(options.html_directory, + 'rebaseline.html') + + def GenerateHtml(self): + """Generate html file for rebaselining result comparison.""" + + logging.info('Generating html file') + + html_body = '' + if not self._rebaselining_tests: + html_body += self.HTML_NO_REBASELINING_TESTS + else: + tests = list(self._rebaselining_tests) + tests.sort() - html = self.HTML_REBASELINE % ({'time': time.asctime(), 'body': html_body}) - logging.debug(html) + test_no = 1 + for test in tests: + logging.info('Test %d: %s', test_no, test) + html_body += self._GenerateHtmlForOneTest(test) - f = open(self._html_file, 'w') - f.write(html) - f.close() + html = self.HTML_REBASELINE % ({'time': time.asctime(), + 'body': html_body}) + logging.debug(html) - logging.info('Baseline comparison html generated at "%s"', - self._html_file) + f = open(self._html_file, 'w') + f.write(html) + f.close() - def ShowHtml(self): - """Launch the rebaselining html in brwoser.""" + logging.info('Baseline comparison html generated at "%s"', + self._html_file) - logging.info('Launching html: "%s"', self._html_file) + def ShowHtml(self): + """Launch the rebaselining html in brwoser.""" - html_uri = path_utils.FilenameToUri(self._html_file) - webbrowser.open(html_uri, 1) + logging.info('Launching html: "%s"', self._html_file) - logging.info('Html launched.') + html_uri = path_utils.FilenameToUri(self._html_file) + webbrowser.open(html_uri, 1) - def _GenerateBaselineLinks(self, test_basename, suffix, platform): - """Generate links for baseline results (old, new and diff). + logging.info('Html launched.') - Args: - test_basename: base filename of the test - suffix: baseline file suffixes: '.txt', '.png' - platform: win, linux or mac + def _GenerateBaselineLinks(self, test_basename, suffix, platform): + """Generate links for baseline results (old, new and diff). - Returns: - html links for showing baseline results (old, new and diff) - """ + Args: + test_basename: base filename of the test + suffix: baseline file suffixes: '.txt', '.png' + platform: win, linux or mac - baseline_filename = '%s-expected%s' % (test_basename, suffix) - logging.debug(' baseline filename: "%s"', baseline_filename) - - new_file = GetResultFileFullpath(self._html_directory, - baseline_filename, - platform, - 'new') - logging.info(' New baseline file: "%s"', new_file) - if not os.path.exists(new_file): - logging.info(' No new baseline file: "%s"', new_file) - return '' - - old_file = GetResultFileFullpath(self._html_directory, - baseline_filename, - platform, - 'old') - logging.info(' Old baseline file: "%s"', old_file) - if suffix == '.png': - html_td_link = self.HTML_TD_LINK_IMG - else: - html_td_link = self.HTML_TD_LINK + Returns: + html links for showing baseline results (old, new and diff) + """ - links = '' - if os.path.exists(old_file): - links += html_td_link % {'uri': path_utils.FilenameToUri(old_file), - 'name': baseline_filename} - else: - logging.info(' No old baseline file: "%s"', old_file) - links += self.HTML_TD_NOLINK % '' - - links += html_td_link % {'uri': path_utils.FilenameToUri(new_file), - 'name': baseline_filename} - - diff_file = GetResultFileFullpath(self._html_directory, - baseline_filename, - platform, - 'diff') - logging.info(' Baseline diff file: "%s"', diff_file) - if os.path.exists(diff_file): - links += html_td_link % {'uri': path_utils.FilenameToUri(diff_file), - 'name': 'Diff'} - else: - logging.info(' No baseline diff file: "%s"', diff_file) - links += self.HTML_TD_NOLINK % '' + baseline_filename = '%s-expected%s' % (test_basename, suffix) + logging.debug(' baseline filename: "%s"', baseline_filename) - return links + new_file = GetResultFileFullpath(self._html_directory, + baseline_filename, + platform, + 'new') + logging.info(' New baseline file: "%s"', new_file) + if not os.path.exists(new_file): + logging.info(' No new baseline file: "%s"', new_file) + return '' - def _GenerateHtmlForOneTest(self, test): - """Generate html for one rebaselining test. + old_file = GetResultFileFullpath(self._html_directory, + baseline_filename, + platform, + 'old') + logging.info(' Old baseline file: "%s"', old_file) + if suffix == '.png': + html_td_link = self.HTML_TD_LINK_IMG + else: + html_td_link = self.HTML_TD_LINK - Args: - test: layout test name + links = '' + if os.path.exists(old_file): + links += html_td_link % {'uri': path_utils.FilenameToUri(old_file), + 'name': baseline_filename} + else: + logging.info(' No old baseline file: "%s"', old_file) + links += self.HTML_TD_NOLINK % '' - Returns: - html that compares baseline results for the test. - """ + links += html_td_link % {'uri': path_utils.FilenameToUri(new_file), + 'name': baseline_filename} - test_basename = os.path.basename(os.path.splitext(test)[0]) - logging.info(' basename: "%s"', test_basename) - rows = [] - for suffix in BASELINE_SUFFIXES: - if suffix == '.checksum': - continue - - logging.info(' Checking %s files', suffix) - for platform in self._platforms: - links = self._GenerateBaselineLinks(test_basename, suffix, platform) - if links: - row = self.HTML_TD_NOLINK % self._GetBaselineResultType(suffix) - row += self.HTML_TD_NOLINK % platform - row += links - logging.debug(' html row: %s', row) - - rows.append(self.HTML_TR % row) - - if rows: - test_path = os.path.join(path_utils.LayoutTestsDir(), test) - html = self.HTML_TR_TEST % (path_utils.FilenameToUri(test_path), test) - html += self.HTML_TEST_DETAIL % ' '.join(rows) - - logging.debug(' html for test: %s', html) - return self.HTML_TABLE_TEST % html - - return '' - - def _GetBaselineResultType(self, suffix): - """Name of the baseline result type.""" - - if suffix == '.png': - return 'Pixel' - elif suffix == '.txt': - return 'Render Tree' - else: - return 'Other' + diff_file = GetResultFileFullpath(self._html_directory, + baseline_filename, + platform, + 'diff') + logging.info(' Baseline diff file: "%s"', diff_file) + if os.path.exists(diff_file): + links += html_td_link % {'uri': path_utils.FilenameToUri( + diff_file), 'name': 'Diff'} + else: + logging.info(' No baseline diff file: "%s"', diff_file) + links += self.HTML_TD_NOLINK % '' + + return links + + def _GenerateHtmlForOneTest(self, test): + """Generate html for one rebaselining test. + + Args: + test: layout test name + + Returns: + html that compares baseline results for the test. + """ + + test_basename = os.path.basename(os.path.splitext(test)[0]) + logging.info(' basename: "%s"', test_basename) + rows = [] + for suffix in BASELINE_SUFFIXES: + if suffix == '.checksum': + continue + + logging.info(' Checking %s files', suffix) + for platform in self._platforms: + links = self._GenerateBaselineLinks(test_basename, suffix, + platform) + if links: + row = self.HTML_TD_NOLINK % self._GetBaselineResultType( + suffix) + row += self.HTML_TD_NOLINK % platform + row += links + logging.debug(' html row: %s', row) + + rows.append(self.HTML_TR % row) + + if rows: + test_path = os.path.join(path_utils.LayoutTestsDir(), test) + html = self.HTML_TR_TEST % (path_utils.FilenameToUri(test_path), + test) + html += self.HTML_TEST_DETAIL % ' '.join(rows) + + logging.debug(' html for test: %s', html) + return self.HTML_TABLE_TEST % html + + return '' + + def _GetBaselineResultType(self, suffix): + """Name of the baseline result type.""" + + if suffix == '.png': + return 'Pixel' + elif suffix == '.txt': + return 'Render Tree' + else: + return 'Other' def main(): - """Main function to produce new baselines.""" - - option_parser = optparse.OptionParser() - option_parser.add_option('-v', '--verbose', - action='store_true', - default=False, - help='include debug-level logging.') - - option_parser.add_option('-p', '--platforms', - default='mac,win,win-xp,win-vista,linux', - help=('Comma delimited list of platforms that need ' - 'rebaselining.')) - - option_parser.add_option('-u', '--archive_url', - default=('http://build.chromium.org/buildbot/' - 'layout_test_results'), - help=('Url to find the layout test result archive ' - 'file.')) - - option_parser.add_option('-w', '--webkit_canary', - action='store_true', - default=False, - help=('If True, pull baselines from webkit.org ' - 'canary bot.')) - - option_parser.add_option('-b', '--backup', - action='store_true', - default=False, - help=('Whether or not to backup the original test ' - 'expectations file after rebaseline.')) - - option_parser.add_option('-d', '--html_directory', - default='', - help=('The directory that stores the results for ' - 'rebaselining comparison.')) - - options = option_parser.parse_args()[0] - - # Set up our logging format. - log_level = logging.INFO - if options.verbose: - log_level = logging.DEBUG - logging.basicConfig(level=log_level, - format=('%(asctime)s %(filename)s:%(lineno)-3d ' - '%(levelname)s %(message)s'), - datefmt='%y%m%d %H:%M:%S') - - # Verify 'platforms' option is valid - if not options.platforms: - logging.error('Invalid "platforms" option. --platforms must be specified ' - 'in order to rebaseline.') - sys.exit(1) - platforms = [p.strip().lower() for p in options.platforms.split(',')] - for platform in platforms: - if not platform in REBASELINE_PLATFORM_ORDER: - logging.error('Invalid platform: "%s"' % (platform)) - sys.exit(1) - - # Adjust the platform order so rebaseline tool is running at the order of - # 'mac', 'win' and 'linux'. This is in same order with layout test baseline - # search paths. It simplifies how the rebaseline tool detects duplicate - # baselines. Check _IsDupBaseline method for details. - rebaseline_platforms = [] - for platform in REBASELINE_PLATFORM_ORDER: - if platform in platforms: - rebaseline_platforms.append(platform) - - options.html_directory = SetupHtmlDirectory(options.html_directory) - - rebaselining_tests = set() - backup = options.backup - for platform in rebaseline_platforms: - rebaseliner = Rebaseliner(platform, options) - - logging.info('') - LogDashedString('Rebaseline started', platform) - if rebaseliner.Run(backup): - # Only need to backup one original copy of test expectation file. - backup = False - LogDashedString('Rebaseline done', platform) - else: - LogDashedString('Rebaseline failed', platform, logging.ERROR) + """Main function to produce new baselines.""" + + option_parser = optparse.OptionParser() + option_parser.add_option('-v', '--verbose', + action='store_true', + default=False, + help='include debug-level logging.') + + option_parser.add_option('-p', '--platforms', + default='mac,win,win-xp,win-vista,linux', + help=('Comma delimited list of platforms ' + 'that need rebaselining.')) + + option_parser.add_option('-u', '--archive_url', + default=('http://build.chromium.org/buildbot/' + 'layout_test_results'), + help=('Url to find the layout test result archive' + ' file.')) + + option_parser.add_option('-w', '--webkit_canary', + action='store_true', + default=False, + help=('If True, pull baselines from webkit.org ' + 'canary bot.')) + + option_parser.add_option('-b', '--backup', + action='store_true', + default=False, + help=('Whether or not to backup the original test' + ' expectations file after rebaseline.')) + + option_parser.add_option('-d', '--html_directory', + default='', + help=('The directory that stores the results for' + ' rebaselining comparison.')) + + options = option_parser.parse_args()[0] + + # Set up our logging format. + log_level = logging.INFO + if options.verbose: + log_level = logging.DEBUG + logging.basicConfig(level=log_level, + format=('%(asctime)s %(filename)s:%(lineno)-3d ' + '%(levelname)s %(message)s'), + datefmt='%y%m%d %H:%M:%S') + + # Verify 'platforms' option is valid + if not options.platforms: + logging.error('Invalid "platforms" option. --platforms must be ' + 'specified in order to rebaseline.') + sys.exit(1) + platforms = [p.strip().lower() for p in options.platforms.split(',')] + for platform in platforms: + if not platform in REBASELINE_PLATFORM_ORDER: + logging.error('Invalid platform: "%s"' % (platform)) + sys.exit(1) + + # Adjust the platform order so rebaseline tool is running at the order of + # 'mac', 'win' and 'linux'. This is in same order with layout test baseline + # search paths. It simplifies how the rebaseline tool detects duplicate + # baselines. Check _IsDupBaseline method for details. + rebaseline_platforms = [] + for platform in REBASELINE_PLATFORM_ORDER: + if platform in platforms: + rebaseline_platforms.append(platform) + + options.html_directory = SetupHtmlDirectory(options.html_directory) + + rebaselining_tests = set() + backup = options.backup + for platform in rebaseline_platforms: + rebaseliner = Rebaseliner(platform, options) + + logging.info('') + LogDashedString('Rebaseline started', platform) + if rebaseliner.Run(backup): + # Only need to backup one original copy of test expectation file. + backup = False + LogDashedString('Rebaseline done', platform) + else: + LogDashedString('Rebaseline failed', platform, logging.ERROR) - rebaselining_tests |= set(rebaseliner.GetRebaseliningTests()) + rebaselining_tests |= set(rebaseliner.GetRebaseliningTests()) - logging.info('') - LogDashedString('Rebaselining result comparison started', None) - html_generator = HtmlGenerator(options, - rebaseline_platforms, - rebaselining_tests) - html_generator.GenerateHtml() - html_generator.ShowHtml() - LogDashedString('Rebaselining result comparison done', None) + logging.info('') + LogDashedString('Rebaselining result comparison started', None) + html_generator = HtmlGenerator(options, + rebaseline_platforms, + rebaselining_tests) + html_generator.GenerateHtml() + html_generator.ShowHtml() + LogDashedString('Rebaselining result comparison done', None) - sys.exit(0) + sys.exit(0) if '__main__' == __name__: - main() + main() diff --git a/webkit/tools/layout_tests/run_webkit_tests.py b/webkit/tools/layout_tests/run_webkit_tests.py index 3f66e87..cfca88b 100755 --- a/webkit/tools/layout_tests/run_webkit_tests.py +++ b/webkit/tools/layout_tests/run_webkit_tests.py @@ -66,1542 +66,1592 @@ BUILDER_BASE_URL = "http://build.chromium.org/buildbot/layout_test_results/" TestExpectationsFile = test_expectations.TestExpectationsFile + class TestInfo: - """Groups information about a test for easy passing of data.""" - def __init__(self, filename, timeout): - """Generates the URI and stores the filename and timeout for this test. - Args: - filename: Full path to the test. - timeout: Timeout for running the test in TestShell. - """ - self.filename = filename - self.uri = path_utils.FilenameToUri(filename) - self.timeout = timeout - expected_hash_file = path_utils.ExpectedFilename(filename, '.checksum') - try: - self.image_hash = open(expected_hash_file, "r").read() - except IOError, e: - if errno.ENOENT != e.errno: - raise - self.image_hash = None + """Groups information about a test for easy passing of data.""" + + def __init__(self, filename, timeout): + """Generates the URI and stores the filename and timeout for this test. + Args: + filename: Full path to the test. + timeout: Timeout for running the test in TestShell. + """ + self.filename = filename + self.uri = path_utils.FilenameToUri(filename) + self.timeout = timeout + expected_hash_file = path_utils.ExpectedFilename(filename, '.checksum') + try: + self.image_hash = open(expected_hash_file, "r").read() + except IOError, e: + if errno.ENOENT != e.errno: + raise + self.image_hash = None class ResultSummary(object): - """A class for partitioning the test results we get into buckets. - - This class is basically a glorified struct and it's private to this file - so we don't bother with any information hiding.""" - def __init__(self, expectations, test_files): - self.total = len(test_files) - self.remaining = self.total - self.expectations = expectations - self.expected = 0 - self.unexpected = 0 - self.tests_by_expectation = {} - self.tests_by_timeline = {} - self.results = {} - self.unexpected_results = {} - self.failures = {} - self.tests_by_expectation[test_expectations.SKIP] = set() - for expectation in TestExpectationsFile.EXPECTATIONS.values(): - self.tests_by_expectation[expectation] = set() - for timeline in TestExpectationsFile.TIMELINES.values(): - self.tests_by_timeline[timeline] = expectations.GetTestsWithTimeline( - timeline) - - def Add(self, test, failures, result, expected): - """Add a result into the appropriate bin. - - Args: - test: test file name - failures: list of failure objects from test execution - result: result of test (PASS, IMAGE, etc.). - expected: whether the result was what we expected it to be. - """ - - self.tests_by_expectation[result].add(test) - self.results[test] = result - self.remaining -= 1 - if len(failures): - self.failures[test] = failures - if expected: - self.expected += 1 - else: - self.unexpected_results[test] = result - self.unexpected += 1 + """A class for partitioning the test results we get into buckets. + + This class is basically a glorified struct and it's private to this file + so we don't bother with any information hiding.""" + + def __init__(self, expectations, test_files): + self.total = len(test_files) + self.remaining = self.total + self.expectations = expectations + self.expected = 0 + self.unexpected = 0 + self.tests_by_expectation = {} + self.tests_by_timeline = {} + self.results = {} + self.unexpected_results = {} + self.failures = {} + self.tests_by_expectation[test_expectations.SKIP] = set() + for expectation in TestExpectationsFile.EXPECTATIONS.values(): + self.tests_by_expectation[expectation] = set() + for timeline in TestExpectationsFile.TIMELINES.values(): + self.tests_by_timeline[timeline] = ( + expectations.GetTestsWithTimeline(timeline)) + + def Add(self, test, failures, result, expected): + """Add a result into the appropriate bin. + + Args: + test: test file name + failures: list of failure objects from test execution + result: result of test (PASS, IMAGE, etc.). + expected: whether the result was what we expected it to be. + """ + + self.tests_by_expectation[result].add(test) + self.results[test] = result + self.remaining -= 1 + if len(failures): + self.failures[test] = failures + if expected: + self.expected += 1 + else: + self.unexpected_results[test] = result + self.unexpected += 1 class TestRunner: - """A class for managing running a series of tests on a series of layout test - files.""" - - HTTP_SUBDIR = os.sep.join(['', 'http', '']) - WEBSOCKET_SUBDIR = os.sep.join(['', 'websocket', '']) - - # The per-test timeout in milliseconds, if no --time-out-ms option was given - # to run_webkit_tests. This should correspond to the default timeout in - # test_shell.exe. - DEFAULT_TEST_TIMEOUT_MS = 6 * 1000 - - NUM_RETRY_ON_UNEXPECTED_FAILURE = 1 - - def __init__(self, options, meter): - """Initialize test runner data structures. - - Args: - options: a dictionary of command line options - meter: a MeteredStream object to record updates to. - """ - self._options = options - self._meter = meter - - if options.use_apache: - self._http_server = apache_http_server.LayoutTestApacheHttpd( - options.results_directory) - else: - self._http_server = http_server.Lighttpd(options.results_directory) - - self._websocket_server = websocket_server.PyWebSocket( - options.results_directory) - # disable wss server. need to install pyOpenSSL on buildbots. - # self._websocket_secure_server = websocket_server.PyWebSocket( - # options.results_directory, use_tls=True, port=9323) - - # a list of TestType objects - self._test_types = [] - - # a set of test files, and the same tests as a list - self._test_files = set() - self._test_files_list = None - self._file_dir = path_utils.GetAbsolutePath(os.path.dirname(sys.argv[0])) - self._result_queue = Queue.Queue() - - # These are used for --log detailed-progress to track status by directory. - self._current_dir = None - self._current_progress_str = "" - self._current_test_number = 0 - - def __del__(self): - logging.debug("flushing stdout") - sys.stdout.flush() - logging.debug("flushing stderr") - sys.stderr.flush() - logging.debug("stopping http server") - # Stop the http server. - self._http_server.Stop() - # Stop the Web Socket / Web Socket Secure servers. - self._websocket_server.Stop() - # self._websocket_secure_server.Stop() - - def GatherFilePaths(self, paths): - """Find all the files to test. - - Args: - paths: a list of globs to use instead of the defaults.""" - self._test_files = test_files.GatherTestFiles(paths) - - def ParseExpectations(self, platform, is_debug_mode): - """Parse the expectations from the test_list files and return a data - structure holding them. Throws an error if the test_list files have invalid - syntax. - """ - if self._options.lint_test_files: - test_files = None - else: - test_files = self._test_files - - try: - self._expectations = test_expectations.TestExpectations(test_files, - self._file_dir, platform, is_debug_mode, - self._options.lint_test_files) - return self._expectations - except Exception, err: - if self._options.lint_test_files: - print str(err) - else: - raise err - - def PrepareListsAndPrintOutput(self, write): - """Create appropriate subsets of test lists and returns a ResultSummary - object. Also prints expected test counts. - - Args: - write: A callback to write info to (e.g., a LoggingWriter) or - sys.stdout.write. - """ - - # Remove skipped - both fixable and ignored - files from the - # top-level list of files to test. - num_all_test_files = len(self._test_files) - write("Found: %d tests" % (len(self._test_files))) - skipped = set() - if num_all_test_files > 1 and not self._options.force: - skipped = self._expectations.GetTestsWithResultType( - test_expectations.SKIP) - self._test_files -= skipped - - # Create a sorted list of test files so the subset chunk, if used, contains - # alphabetically consecutive tests. - self._test_files_list = list(self._test_files) - if self._options.randomize_order: - random.shuffle(self._test_files_list) - else: - self._test_files_list.sort() - - # If the user specifies they just want to run a subset of the tests, - # just grab a subset of the non-skipped tests. - if self._options.run_chunk or self._options.run_part: - chunk_value = self._options.run_chunk or self._options.run_part - test_files = self._test_files_list - try: - (chunk_num, chunk_len) = chunk_value.split(":") - chunk_num = int(chunk_num) - assert(chunk_num >= 0) - test_size = int(chunk_len) - assert(test_size > 0) - except: - logging.critical("invalid chunk '%s'" % chunk_value) - sys.exit(1) - - # Get the number of tests - num_tests = len(test_files) - - # Get the start offset of the slice. - if self._options.run_chunk: - chunk_len = test_size - # In this case chunk_num can be really large. We need to make the - # slave fit in the current number of tests. - slice_start = (chunk_num * chunk_len) % num_tests - else: - # Validate the data. - assert(test_size <= num_tests) - assert(chunk_num <= test_size) - - # To count the chunk_len, and make sure we don't skip some tests, we - # round to the next value that fits exacly all the parts. - rounded_tests = num_tests - if rounded_tests % test_size != 0: - rounded_tests = num_tests + test_size - (num_tests % test_size) - - chunk_len = rounded_tests / test_size - slice_start = chunk_len * (chunk_num - 1) - # It does not mind if we go over test_size. - - # Get the end offset of the slice. - slice_end = min(num_tests, slice_start + chunk_len) - - files = test_files[slice_start:slice_end] - - tests_run_msg = 'Running: %d tests (chunk slice [%d:%d] of %d)' % ( - (slice_end - slice_start), slice_start, slice_end, num_tests) - write(tests_run_msg) - - # If we reached the end and we don't have enough tests, we run some - # from the beginning. - if self._options.run_chunk and (slice_end - slice_start < chunk_len): - extra = 1 + chunk_len - (slice_end - slice_start) - extra_msg = ' last chunk is partial, appending [0:%d]' % extra - write(extra_msg) - tests_run_msg += "\n" + extra_msg - files.extend(test_files[0:extra]) - tests_run_filename = os.path.join(self._options.results_directory, - "tests_run.txt") - tests_run_file = open(tests_run_filename, "w") - tests_run_file.write(tests_run_msg + "\n") - tests_run_file.close() - - len_skip_chunk = int(len(files) * len(skipped) / - float(len(self._test_files))) - skip_chunk_list = list(skipped)[0:len_skip_chunk] - skip_chunk = set(skip_chunk_list) - - # Update expectations so that the stats are calculated correctly. - # We need to pass a list that includes the right # of skipped files - # to ParseExpectations so that ResultSummary() will get the correct - # stats. So, we add in the subset of skipped files, and then subtract - # them back out. - self._test_files_list = files + skip_chunk_list - self._test_files = set(self._test_files_list) - - self._expectations = self.ParseExpectations( - path_utils.PlatformName(), options.target == 'Debug') - - self._test_files = set(files) - self._test_files_list = files - else: - skip_chunk = skipped - - result_summary = ResultSummary(self._expectations, - self._test_files | skip_chunk) - self._PrintExpectedResultsOfType(write, result_summary, - test_expectations.PASS, "passes") - self._PrintExpectedResultsOfType(write, result_summary, - test_expectations.FAIL, "failures") - self._PrintExpectedResultsOfType(write, result_summary, - test_expectations.FLAKY, "flaky") - self._PrintExpectedResultsOfType(write, result_summary, - test_expectations.SKIP, "skipped") - - - if self._options.force: - write('Running all tests, including skips (--force)') - else: - # Note that we don't actually run the skipped tests (they were - # subtracted out of self._test_files, above), but we stub out the - # results here so the statistics can remain accurate. - for test in skip_chunk: - result_summary.Add(test, [], test_expectations.SKIP, expected=True) - write("") - - return result_summary - - def AddTestType(self, test_type): - """Add a TestType to the TestRunner.""" - self._test_types.append(test_type) - - def _GetDirForTestFile(self, test_file): - """Returns the highest-level directory by which to shard the given test - file.""" - index = test_file.rfind(os.sep + 'LayoutTests' + os.sep) - - test_file = test_file[index + len('LayoutTests/'):] - test_file_parts = test_file.split(os.sep, 1) - directory = test_file_parts[0] - test_file = test_file_parts[1] - - # The http tests are very stable on mac/linux. - # TODO(ojan): Make the http server on Windows be apache so we can turn - # shard the http tests there as well. Switching to apache is what made them - # stable on linux/mac. - return_value = directory - while ((directory != 'http' or sys.platform in ('darwin', 'linux2')) and - test_file.find(os.sep) >= 0): - test_file_parts = test_file.split(os.sep, 1) - directory = test_file_parts[0] - return_value = os.path.join(return_value, directory) - test_file = test_file_parts[1] - - return return_value - - def _GetTestInfoForFile(self, test_file): - """Returns the appropriate TestInfo object for the file. Mostly this is used - for looking up the timeout value (in ms) to use for the given test.""" - if self._expectations.HasModifier(test_file, test_expectations.SLOW): - return TestInfo(test_file, self._options.slow_time_out_ms) - return TestInfo(test_file, self._options.time_out_ms) - - def _GetTestFileQueue(self, test_files): - """Create the thread safe queue of lists of (test filenames, test URIs) - tuples. Each TestShellThread pulls a list from this queue and runs those - tests in order before grabbing the next available list. - - Shard the lists by directory. This helps ensure that tests that depend - on each other (aka bad tests!) continue to run together as most - cross-tests dependencies tend to occur within the same directory. - - Return: - The Queue of lists of TestInfo objects. - """ - - if (self._options.experimental_fully_parallel or - self._IsSingleThreaded()): - filename_queue = Queue.Queue() - for test_file in test_files: - filename_queue.put(('.', [self._GetTestInfoForFile(test_file)])) - return filename_queue - - tests_by_dir = {} - for test_file in test_files: - directory = self._GetDirForTestFile(test_file) - tests_by_dir.setdefault(directory, []) - tests_by_dir[directory].append(self._GetTestInfoForFile(test_file)) - - # Sort by the number of tests in the dir so that the ones with the most - # tests get run first in order to maximize parallelization. Number of tests - # is a good enough, but not perfect, approximation of how long that set of - # tests will take to run. We can't just use a PriorityQueue until we move - # to Python 2.6. - test_lists = [] - http_tests = None - for directory in tests_by_dir: - test_list = tests_by_dir[directory] - # Keep the tests in alphabetical order. - # TODO: Remove once tests are fixed so they can be run in any order. - test_list.reverse() - test_list_tuple = (directory, test_list) - if directory == 'LayoutTests' + os.sep + 'http': - http_tests = test_list_tuple - else: - test_lists.append(test_list_tuple) - test_lists.sort(lambda a, b: cmp(len(b[1]), len(a[1]))) - - # Put the http tests first. There are only a couple hundred of them, but - # each http test takes a very long time to run, so sorting by the number - # of tests doesn't accurately capture how long they take to run. - if http_tests: - test_lists.insert(0, http_tests) - - filename_queue = Queue.Queue() - for item in test_lists: - filename_queue.put(item) - return filename_queue - - def _GetTestShellArgs(self, index): - """Returns the tuple of arguments for tests and for test_shell.""" - shell_args = [] - test_args = test_type_base.TestArguments() - if not self._options.no_pixel_tests: - png_path = os.path.join(self._options.results_directory, - "png_result%s.png" % index) - shell_args.append("--pixel-tests=" + png_path) - test_args.png_path = png_path - - test_args.new_baseline = self._options.new_baseline - - test_args.show_sources = self._options.sources - - if self._options.startup_dialog: - shell_args.append('--testshell-startup-dialog') - - if self._options.gp_fault_error_box: - shell_args.append('--gp-fault-error-box') - - return (test_args, shell_args) - - def _ContainsTests(self, subdir): - for test_file in self._test_files_list: - if test_file.find(subdir) >= 0: - return True - return False + """A class for managing running a series of tests on a series of layout + test files.""" - def _InstantiateTestShellThreads(self, test_shell_binary, test_files, - result_summary): - """Instantitates and starts the TestShellThread(s). + HTTP_SUBDIR = os.sep.join(['', 'http', '']) + WEBSOCKET_SUBDIR = os.sep.join(['', 'websocket', '']) - Return: - The list of threads. - """ - test_shell_command = [test_shell_binary] - - if self._options.wrapper: - # This split() isn't really what we want -- it incorrectly will - # split quoted strings within the wrapper argument -- but in - # practice it shouldn't come up and the --help output warns - # about it anyway. - test_shell_command = self._options.wrapper.split() + test_shell_command - - filename_queue = self._GetTestFileQueue(test_files) - - # Instantiate TestShellThreads and start them. - threads = [] - for i in xrange(int(self._options.num_test_shells)): - # Create separate TestTypes instances for each thread. - test_types = [] - for t in self._test_types: - test_types.append(t(self._options.platform, - self._options.results_directory)) - - test_args, shell_args = self._GetTestShellArgs(i) - thread = test_shell_thread.TestShellThread(filename_queue, - self._result_queue, - test_shell_command, - test_types, - test_args, - shell_args, - self._options) - if self._IsSingleThreaded(): - thread.RunInMainThread(self, result_summary) - else: - thread.start() - threads.append(thread) - - return threads - - def _StopLayoutTestHelper(self, proc): - """Stop the layout test helper and closes it down.""" - if proc: - logging.debug("Stopping layout test helper") - proc.stdin.write("x\n") - proc.stdin.close() - proc.wait() - - def _IsSingleThreaded(self): - """Returns whether we should run all the tests in the main thread.""" - return int(self._options.num_test_shells) == 1 - - def _RunTests(self, test_shell_binary, file_list, result_summary): - """Runs the tests in the file_list. - - Return: A tuple (failures, thread_timings, test_timings, - individual_test_timings) - failures is a map from test to list of failure types - thread_timings is a list of dicts with the total runtime of each thread - with 'name', 'num_tests', 'total_time' properties - test_timings is a list of timings for each sharded subdirectory of the - form [time, directory_name, num_tests] - individual_test_timings is a list of run times for each test in the form - {filename:filename, test_run_time:test_run_time} - result_summary: summary object to populate with the results - """ - threads = self._InstantiateTestShellThreads(test_shell_binary, file_list, - result_summary) - - # Wait for the threads to finish and collect test failures. - failures = {} - test_timings = {} - individual_test_timings = [] - thread_timings = [] - try: - for thread in threads: - while thread.isAlive(): - # Let it timeout occasionally so it can notice a KeyboardInterrupt - # Actually, the timeout doesn't really matter: apparently it - # suffices to not use an indefinite blocking join for it to - # be interruptible by KeyboardInterrupt. - thread.join(0.1) - self.UpdateSummary(result_summary) - thread_timings.append({ 'name': thread.getName(), - 'num_tests': thread.GetNumTests(), - 'total_time': thread.GetTotalTime()}); - test_timings.update(thread.GetDirectoryTimingStats()) - individual_test_timings.extend(thread.GetIndividualTestStats()) - except KeyboardInterrupt: - for thread in threads: - thread.Cancel() - self._StopLayoutTestHelper(layout_test_helper_proc) - raise - for thread in threads: - # Check whether a TestShellThread died before normal completion. - exception_info = thread.GetExceptionInfo() - if exception_info is not None: - # Re-raise the thread's exception here to make it clear that - # testing was aborted. Otherwise, the tests that did not run - # would be assumed to have passed. - raise exception_info[0], exception_info[1], exception_info[2] - - # Make sure we pick up any remaining tests. - self.UpdateSummary(result_summary) - return (thread_timings, test_timings, individual_test_timings) - - def Run(self, result_summary): - """Run all our tests on all our test files. - - For each test file, we run each test type. If there are any failures, we - collect them for reporting. + # The per-test timeout in milliseconds, if no --time-out-ms option was + # given to run_webkit_tests. This should correspond to the default timeout + # in test_shell.exe. + DEFAULT_TEST_TIMEOUT_MS = 6 * 1000 - Args: - result_summary: a summary object tracking the test results. + NUM_RETRY_ON_UNEXPECTED_FAILURE = 1 - Return: - We return nonzero if there are regressions compared to the last run. - """ - if not self._test_files: - return 0 - start_time = time.time() - test_shell_binary = path_utils.TestShellPath(self._options.target) + def __init__(self, options, meter): + """Initialize test runner data structures. - # Start up any helper needed - layout_test_helper_proc = None - if not options.no_pixel_tests: - helper_path = path_utils.LayoutTestHelperPath(self._options.target) - if len(helper_path): - logging.debug("Starting layout helper %s" % helper_path) - layout_test_helper_proc = subprocess.Popen([helper_path], - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=None) - is_ready = layout_test_helper_proc.stdout.readline() - if not is_ready.startswith('ready'): - logging.error("layout_test_helper failed to be ready") - - # Check that the system dependencies (themes, fonts, ...) are correct. - if not self._options.nocheck_sys_deps: - proc = subprocess.Popen([test_shell_binary, - "--check-layout-test-sys-deps"]) - if proc.wait() != 0: - logging.info("Aborting because system dependencies check failed.\n" - "To override, invoke with --nocheck-sys-deps") - sys.exit(1) + Args: + options: a dictionary of command line options + meter: a MeteredStream object to record updates to. + """ + self._options = options + self._meter = meter - if self._ContainsTests(self.HTTP_SUBDIR): - self._http_server.Start() - - if self._ContainsTests(self.WEBSOCKET_SUBDIR): - self._websocket_server.Start() - # self._websocket_secure_server.Start() - - thread_timings, test_timings, individual_test_timings = ( - self._RunTests(test_shell_binary, self._test_files_list, - result_summary)) - - # We exclude the crashes from the list of results to retry, because - # we want to treat even a potentially flaky crash as an error. - failures = self._GetFailures(result_summary, include_crashes=False) - retries = 0 - retry_summary = result_summary - while (retries < self.NUM_RETRY_ON_UNEXPECTED_FAILURE and len(failures)): - logging.debug("Retrying %d unexpected failure(s)" % len(failures)) - retries += 1 - retry_summary = ResultSummary(self._expectations, failures.keys()) - self._RunTests(test_shell_binary, failures.keys(), retry_summary) - failures = self._GetFailures(retry_summary, include_crashes=True) - - self._StopLayoutTestHelper(layout_test_helper_proc) - end_time = time.time() - - write = CreateLoggingWriter(self._options, 'timing') - self._PrintTimingStatistics(write, end_time - start_time, - thread_timings, test_timings, - individual_test_timings, - result_summary) - - self._meter.update("") - - if self._options.verbose: - # We write this block to stdout for compatibility with the buildbot - # log parser, which only looks at stdout, not stderr :( - write = lambda s: sys.stdout.write("%s\n" % s) - else: - write = CreateLoggingWriter(self._options, 'actual') - - self._PrintResultSummary(write, result_summary) - - sys.stdout.flush() - sys.stderr.flush() - - if (LOG_DETAILED_PROGRESS in self._options.log or - (LOG_UNEXPECTED in self._options.log and - result_summary.total != result_summary.expected)): - print - - # This summary data gets written to stdout regardless of log level - self._PrintOneLineSummary(result_summary.total, result_summary.expected) - - unexpected_results = self._SummarizeUnexpectedResults(result_summary, - retry_summary) - self._PrintUnexpectedResults(unexpected_results) - - # Write the same data to log files. - self._WriteJSONFiles(unexpected_results, result_summary, - individual_test_timings) - - # Write the summary to disk (results.html) and maybe open the test_shell - # to this file. - wrote_results = self._WriteResultsHtmlFile(result_summary) - if not self._options.noshow_results and wrote_results: - self._ShowResultsHtmlFile() - - # Ignore flaky failures and unexpected passes so we don't turn the - # bot red for those. - return unexpected_results['num_regressions'] - - def UpdateSummary(self, result_summary): - """Update the summary while running tests.""" - while True: - try: - (test, fail_list) = self._result_queue.get_nowait() - result = test_failures.DetermineResultType(fail_list) - expected = self._expectations.MatchesAnExpectedResult(test, result) - result_summary.Add(test, fail_list, result, expected) - if (LOG_DETAILED_PROGRESS in self._options.log and - (self._options.experimental_fully_parallel or - self._IsSingleThreaded())): - self._DisplayDetailedProgress(result_summary) + if options.use_apache: + self._http_server = apache_http_server.LayoutTestApacheHttpd( + options.results_directory) else: - if not expected and LOG_UNEXPECTED in self._options.log: - self._PrintUnexpectedTestResult(test, result) - self._DisplayOneLineProgress(result_summary) - except Queue.Empty: - return - - def _DisplayOneLineProgress(self, result_summary): - """Displays the progress through the test run.""" - self._meter.update("Testing: %d ran as expected, %d didn't, %d left" % - (result_summary.expected, result_summary.unexpected, - result_summary.remaining)) - - def _DisplayDetailedProgress(self, result_summary): - """Display detailed progress output where we print the directory name - and one dot for each completed test. This is triggered by - "--log detailed-progress".""" - if self._current_test_number == len(self._test_files_list): - return - - next_test = self._test_files_list[self._current_test_number] - next_dir = os.path.dirname(path_utils.RelativeTestFilename(next_test)) - if self._current_progress_str == "": - self._current_progress_str = "%s: " % (next_dir) - self._current_dir = next_dir - - while next_test in result_summary.results: - if next_dir != self._current_dir: - self._meter.write("%s\n" % (self._current_progress_str)) - self._current_progress_str = "%s: ." % (next_dir) - self._current_dir = next_dir - else: - self._current_progress_str += "." - - if (next_test in result_summary.unexpected_results and - LOG_UNEXPECTED in self._options.log): - result = result_summary.unexpected_results[next_test] - self._meter.write("%s\n" % self._current_progress_str) - self._PrintUnexpectedTestResult(next_test, result) - self._current_progress_str = "%s: " % self._current_dir - - self._current_test_number += 1 - if self._current_test_number == len(self._test_files_list): - break - - next_test = self._test_files_list[self._current_test_number] - next_dir = os.path.dirname(path_utils.RelativeTestFilename(next_test)) - - if result_summary.remaining: - remain_str = " (%d)" % (result_summary.remaining) - self._meter.update("%s%s" % (self._current_progress_str, remain_str)) - else: - self._meter.write("%s\n" % (self._current_progress_str)) - - def _GetFailures(self, result_summary, include_crashes): - """Filters a dict of results and returns only the failures. - - Args: - result_summary: the results of the test run - include_crashes: whether crashes are included in the output. - We use False when finding the list of failures to retry - to see if the results were flaky. Although the crashes may also be - flaky, we treat them as if they aren't so that they're not ignored. - Returns: - a dict of files -> results - """ - failed_results = {} - for test, result in result_summary.unexpected_results.iteritems(): - if (result == test_expectations.PASS or - result == test_expectations.CRASH and not include_crashes): - continue - failed_results[test] = result - - return failed_results - - def _SummarizeUnexpectedResults(self, result_summary, retry_summary): - """Summarize any unexpected results as a dict. - - TODO(dpranke): split this data structure into a separate class? + self._http_server = http_server.Lighttpd(options.results_directory) + + self._websocket_server = websocket_server.PyWebSocket( + options.results_directory) + # disable wss server. need to install pyOpenSSL on buildbots. + # self._websocket_secure_server = websocket_server.PyWebSocket( + # options.results_directory, use_tls=True, port=9323) + + # a list of TestType objects + self._test_types = [] + + # a set of test files, and the same tests as a list + self._test_files = set() + self._test_files_list = None + self._file_dir = path_utils.GetAbsolutePath( + os.path.dirname(sys.argv[0])) + self._result_queue = Queue.Queue() + + # These are used for --log detailed-progress to track status by + # directory. + self._current_dir = None + self._current_progress_str = "" + self._current_test_number = 0 + + def __del__(self): + logging.debug("flushing stdout") + sys.stdout.flush() + logging.debug("flushing stderr") + sys.stderr.flush() + logging.debug("stopping http server") + # Stop the http server. + self._http_server.Stop() + # Stop the Web Socket / Web Socket Secure servers. + self._websocket_server.Stop() + # self._websocket_secure_server.Stop() + + def GatherFilePaths(self, paths): + """Find all the files to test. + + Args: + paths: a list of globs to use instead of the defaults.""" + self._test_files = test_files.GatherTestFiles(paths) + + def ParseExpectations(self, platform, is_debug_mode): + """Parse the expectations from the test_list files and return a data + structure holding them. Throws an error if the test_list files have + invalid syntax.""" + if self._options.lint_test_files: + test_files = None + else: + test_files = self._test_files + + try: + self._expectations = test_expectations.TestExpectations(test_files, + self._file_dir, platform, is_debug_mode, + self._options.lint_test_files) + return self._expectations + except Exception, err: + if self._options.lint_test_files: + print str(err) + else: + raise err + + def PrepareListsAndPrintOutput(self, write): + """Create appropriate subsets of test lists and returns a + ResultSummary object. Also prints expected test counts. + + Args: + write: A callback to write info to (e.g., a LoggingWriter) or + sys.stdout.write. + """ + + # Remove skipped - both fixable and ignored - files from the + # top-level list of files to test. + num_all_test_files = len(self._test_files) + write("Found: %d tests" % (len(self._test_files))) + skipped = set() + if num_all_test_files > 1 and not self._options.force: + skipped = self._expectations.GetTestsWithResultType( + test_expectations.SKIP) + self._test_files -= skipped + + # Create a sorted list of test files so the subset chunk, + # if used, contains alphabetically consecutive tests. + self._test_files_list = list(self._test_files) + if self._options.randomize_order: + random.shuffle(self._test_files_list) + else: + self._test_files_list.sort() + + # If the user specifies they just want to run a subset of the tests, + # just grab a subset of the non-skipped tests. + if self._options.run_chunk or self._options.run_part: + chunk_value = self._options.run_chunk or self._options.run_part + test_files = self._test_files_list + try: + (chunk_num, chunk_len) = chunk_value.split(":") + chunk_num = int(chunk_num) + assert(chunk_num >= 0) + test_size = int(chunk_len) + assert(test_size > 0) + except: + logging.critical("invalid chunk '%s'" % chunk_value) + sys.exit(1) + + # Get the number of tests + num_tests = len(test_files) + + # Get the start offset of the slice. + if self._options.run_chunk: + chunk_len = test_size + # In this case chunk_num can be really large. We need + # to make the slave fit in the current number of tests. + slice_start = (chunk_num * chunk_len) % num_tests + else: + # Validate the data. + assert(test_size <= num_tests) + assert(chunk_num <= test_size) + + # To count the chunk_len, and make sure we don't skip + # some tests, we round to the next value that fits exactly + # all the parts. + rounded_tests = num_tests + if rounded_tests % test_size != 0: + rounded_tests = (num_tests + test_size - + (num_tests % test_size)) + + chunk_len = rounded_tests / test_size + slice_start = chunk_len * (chunk_num - 1) + # It does not mind if we go over test_size. + + # Get the end offset of the slice. + slice_end = min(num_tests, slice_start + chunk_len) + + files = test_files[slice_start:slice_end] + + tests_run_msg = 'Running: %d tests (chunk slice [%d:%d] of %d)' % ( + (slice_end - slice_start), slice_start, slice_end, num_tests) + write(tests_run_msg) + + # If we reached the end and we don't have enough tests, we run some + # from the beginning. + if (self._options.run_chunk and + (slice_end - slice_start < chunk_len)): + extra = 1 + chunk_len - (slice_end - slice_start) + extra_msg = (' last chunk is partial, appending [0:%d]' % + extra) + write(extra_msg) + tests_run_msg += "\n" + extra_msg + files.extend(test_files[0:extra]) + tests_run_filename = os.path.join(self._options.results_directory, + "tests_run.txt") + tests_run_file = open(tests_run_filename, "w") + tests_run_file.write(tests_run_msg + "\n") + tests_run_file.close() + + len_skip_chunk = int(len(files) * len(skipped) / + float(len(self._test_files))) + skip_chunk_list = list(skipped)[0:len_skip_chunk] + skip_chunk = set(skip_chunk_list) + + # Update expectations so that the stats are calculated correctly. + # We need to pass a list that includes the right # of skipped files + # to ParseExpectations so that ResultSummary() will get the correct + # stats. So, we add in the subset of skipped files, and then + # subtract them back out. + self._test_files_list = files + skip_chunk_list + self._test_files = set(self._test_files_list) + + self._expectations = self.ParseExpectations( + path_utils.PlatformName(), options.target == 'Debug') + + self._test_files = set(files) + self._test_files_list = files + else: + skip_chunk = skipped + + result_summary = ResultSummary(self._expectations, + self._test_files | skip_chunk) + self._PrintExpectedResultsOfType(write, result_summary, + test_expectations.PASS, "passes") + self._PrintExpectedResultsOfType(write, result_summary, + test_expectations.FAIL, "failures") + self._PrintExpectedResultsOfType(write, result_summary, + test_expectations.FLAKY, "flaky") + self._PrintExpectedResultsOfType(write, result_summary, + test_expectations.SKIP, "skipped") + + + if self._options.force: + write('Running all tests, including skips (--force)') + else: + # Note that we don't actually run the skipped tests (they were + # subtracted out of self._test_files, above), but we stub out the + # results here so the statistics can remain accurate. + for test in skip_chunk: + result_summary.Add(test, [], test_expectations.SKIP, + expected=True) + write("") + + return result_summary + + def AddTestType(self, test_type): + """Add a TestType to the TestRunner.""" + self._test_types.append(test_type) + + def _GetDirForTestFile(self, test_file): + """Returns the highest-level directory by which to shard the given + test file.""" + index = test_file.rfind(os.sep + 'LayoutTests' + os.sep) + + test_file = test_file[index + len('LayoutTests/'):] + test_file_parts = test_file.split(os.sep, 1) + directory = test_file_parts[0] + test_file = test_file_parts[1] + + # The http tests are very stable on mac/linux. + # TODO(ojan): Make the http server on Windows be apache so we can + # turn shard the http tests there as well. Switching to apache is + # what made them stable on linux/mac. + return_value = directory + while ((directory != 'http' or sys.platform in ('darwin', 'linux2')) + and test_file.find(os.sep) >= 0): + test_file_parts = test_file.split(os.sep, 1) + directory = test_file_parts[0] + return_value = os.path.join(return_value, directory) + test_file = test_file_parts[1] + + return return_value + + def _GetTestInfoForFile(self, test_file): + """Returns the appropriate TestInfo object for the file. Mostly this + is used for looking up the timeout value (in ms) to use for the given + test.""" + if self._expectations.HasModifier(test_file, test_expectations.SLOW): + return TestInfo(test_file, self._options.slow_time_out_ms) + return TestInfo(test_file, self._options.time_out_ms) + + def _GetTestFileQueue(self, test_files): + """Create the thread safe queue of lists of (test filenames, test URIs) + tuples. Each TestShellThread pulls a list from this queue and runs + those tests in order before grabbing the next available list. + + Shard the lists by directory. This helps ensure that tests that depend + on each other (aka bad tests!) continue to run together as most + cross-tests dependencies tend to occur within the same directory. + + Return: + The Queue of lists of TestInfo objects. + """ + + if (self._options.experimental_fully_parallel or + self._IsSingleThreaded()): + filename_queue = Queue.Queue() + for test_file in test_files: + filename_queue.put('.', [self._GetTestInfoForFile(test_file)]) + return filename_queue + + tests_by_dir = {} + for test_file in test_files: + directory = self._GetDirForTestFile(test_file) + tests_by_dir.setdefault(directory, []) + tests_by_dir[directory].append(self._GetTestInfoForFile(test_file)) + + # Sort by the number of tests in the dir so that the ones with the + # most tests get run first in order to maximize parallelization. + # Number of tests is a good enough, but not perfect, approximation + # of how long that set of tests will take to run. We can't just use + # a PriorityQueue until we move # to Python 2.6. + test_lists = [] + http_tests = None + for directory in tests_by_dir: + test_list = tests_by_dir[directory] + # Keep the tests in alphabetical order. + # TODO: Remove once tests are fixed so they can be run in any + # order. + test_list.reverse() + test_list_tuple = (directory, test_list) + if directory == 'LayoutTests' + os.sep + 'http': + http_tests = test_list_tuple + else: + test_lists.append(test_list_tuple) + test_lists.sort(lambda a, b: cmp(len(b[1]), len(a[1]))) + + # Put the http tests first. There are only a couple hundred of them, + # but each http test takes a very long time to run, so sorting by the + # number of tests doesn't accurately capture how long they take to run. + if http_tests: + test_lists.insert(0, http_tests) + + filename_queue = Queue.Queue() + for item in test_lists: + filename_queue.put(item) + return filename_queue + + def _GetTestShellArgs(self, index): + """Returns the tuple of arguments for tests and for test_shell.""" + shell_args = [] + test_args = test_type_base.TestArguments() + if not self._options.no_pixel_tests: + png_path = os.path.join(self._options.results_directory, + "png_result%s.png" % index) + shell_args.append("--pixel-tests=" + png_path) + test_args.png_path = png_path + + test_args.new_baseline = self._options.new_baseline + + test_args.show_sources = self._options.sources + + if self._options.startup_dialog: + shell_args.append('--testshell-startup-dialog') + + if self._options.gp_fault_error_box: + shell_args.append('--gp-fault-error-box') + + return (test_args, shell_args) + + def _ContainsTests(self, subdir): + for test_file in self._test_files_list: + if test_file.find(subdir) >= 0: + return True + return False + + def _InstantiateTestShellThreads(self, test_shell_binary, test_files, + result_summary): + """Instantitates and starts the TestShellThread(s). + + Return: + The list of threads. + """ + test_shell_command = [test_shell_binary] + + if self._options.wrapper: + # This split() isn't really what we want -- it incorrectly will + # split quoted strings within the wrapper argument -- but in + # practice it shouldn't come up and the --help output warns + # about it anyway. + test_shell_command = (self._options.wrapper.split() + + test_shell_command) + + filename_queue = self._GetTestFileQueue(test_files) + + # Instantiate TestShellThreads and start them. + threads = [] + for i in xrange(int(self._options.num_test_shells)): + # Create separate TestTypes instances for each thread. + test_types = [] + for t in self._test_types: + test_types.append(t(self._options.platform, + self._options.results_directory)) + + test_args, shell_args = self._GetTestShellArgs(i) + thread = test_shell_thread.TestShellThread(filename_queue, + self._result_queue, + test_shell_command, + test_types, + test_args, + shell_args, + self._options) + if self._IsSingleThreaded(): + thread.RunInMainThread(self, result_summary) + else: + thread.start() + threads.append(thread) + + return threads + + def _StopLayoutTestHelper(self, proc): + """Stop the layout test helper and closes it down.""" + if proc: + logging.debug("Stopping layout test helper") + proc.stdin.write("x\n") + proc.stdin.close() + proc.wait() + + def _IsSingleThreaded(self): + """Returns whether we should run all the tests in the main thread.""" + return int(self._options.num_test_shells) == 1 + + def _RunTests(self, test_shell_binary, file_list, result_summary): + """Runs the tests in the file_list. + + Return: A tuple (failures, thread_timings, test_timings, + individual_test_timings) + failures is a map from test to list of failure types + thread_timings is a list of dicts with the total runtime + of each thread with 'name', 'num_tests', 'total_time' properties + test_timings is a list of timings for each sharded subdirectory + of the form [time, directory_name, num_tests] + individual_test_timings is a list of run times for each test + in the form {filename:filename, test_run_time:test_run_time} + result_summary: summary object to populate with the results + """ + threads = self._InstantiateTestShellThreads(test_shell_binary, + file_list, + result_summary) + + # Wait for the threads to finish and collect test failures. + failures = {} + test_timings = {} + individual_test_timings = [] + thread_timings = [] + try: + for thread in threads: + while thread.isAlive(): + # Let it timeout occasionally so it can notice a + # KeyboardInterrupt. Actually, the timeout doesn't + # really matter: apparently it suffices to not use + # an indefinite blocking join for it to + # be interruptible by KeyboardInterrupt. + thread.join(0.1) + self.UpdateSummary(result_summary) + thread_timings.append({'name': thread.getName(), + 'num_tests': thread.GetNumTests(), + 'total_time': thread.GetTotalTime()}) + test_timings.update(thread.GetDirectoryTimingStats()) + individual_test_timings.extend(thread.GetIndividualTestStats()) + except KeyboardInterrupt: + for thread in threads: + thread.Cancel() + self._StopLayoutTestHelper(layout_test_helper_proc) + raise + for thread in threads: + # Check whether a TestShellThread died before normal completion. + exception_info = thread.GetExceptionInfo() + if exception_info is not None: + # Re-raise the thread's exception here to make it clear that + # testing was aborted. Otherwise, the tests that did not run + # would be assumed to have passed. + raise exception_info[0], exception_info[1], exception_info[2] + + # Make sure we pick up any remaining tests. + self.UpdateSummary(result_summary) + return (thread_timings, test_timings, individual_test_timings) + + def Run(self, result_summary): + """Run all our tests on all our test files. + + For each test file, we run each test type. If there are any failures, + we collect them for reporting. + + Args: + result_summary: a summary object tracking the test results. + + Return: + We return nonzero if there are regressions compared to the last run. + """ + if not self._test_files: + return 0 + start_time = time.time() + test_shell_binary = path_utils.TestShellPath(self._options.target) + + # Start up any helper needed + layout_test_helper_proc = None + if not options.no_pixel_tests: + helper_path = path_utils.LayoutTestHelperPath(self._options.target) + if len(helper_path): + logging.debug("Starting layout helper %s" % helper_path) + layout_test_helper_proc = subprocess.Popen( + [helper_path], stdin=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=None) + is_ready = layout_test_helper_proc.stdout.readline() + if not is_ready.startswith('ready'): + logging.error("layout_test_helper failed to be ready") + + # Check that the system dependencies (themes, fonts, ...) are correct. + if not self._options.nocheck_sys_deps: + proc = subprocess.Popen([test_shell_binary, + "--check-layout-test-sys-deps"]) + if proc.wait() != 0: + logging.info("Aborting because system dependencies check " + "failed.\n To override, invoke with " + "--nocheck-sys-deps") + sys.exit(1) + + if self._ContainsTests(self.HTTP_SUBDIR): + self._http_server.Start() + + if self._ContainsTests(self.WEBSOCKET_SUBDIR): + self._websocket_server.Start() + # self._websocket_secure_server.Start() + + thread_timings, test_timings, individual_test_timings = ( + self._RunTests(test_shell_binary, self._test_files_list, + result_summary)) + + # We exclude the crashes from the list of results to retry, because + # we want to treat even a potentially flaky crash as an error. + failures = self._GetFailures(result_summary, include_crashes=False) + retries = 0 + retry_summary = result_summary + while (retries < self.NUM_RETRY_ON_UNEXPECTED_FAILURE and + len(failures)): + logging.debug("Retrying %d unexpected failure(s)" % len(failures)) + retries += 1 + retry_summary = ResultSummary(self._expectations, failures.keys()) + self._RunTests(test_shell_binary, failures.keys(), retry_summary) + failures = self._GetFailures(retry_summary, include_crashes=True) + + self._StopLayoutTestHelper(layout_test_helper_proc) + end_time = time.time() + + write = CreateLoggingWriter(self._options, 'timing') + self._PrintTimingStatistics(write, end_time - start_time, + thread_timings, test_timings, + individual_test_timings, + result_summary) + + self._meter.update("") + + if self._options.verbose: + # We write this block to stdout for compatibility with the + # buildbot log parser, which only looks at stdout, not stderr :( + write = lambda s: sys.stdout.write("%s\n" % s) + else: + write = CreateLoggingWriter(self._options, 'actual') + + self._PrintResultSummary(write, result_summary) + + sys.stdout.flush() + sys.stderr.flush() + + if (LOG_DETAILED_PROGRESS in self._options.log or + (LOG_UNEXPECTED in self._options.log and + result_summary.total != result_summary.expected)): + print + + # This summary data gets written to stdout regardless of log level + self._PrintOneLineSummary(result_summary.total, + result_summary.expected) + + unexpected_results = self._SummarizeUnexpectedResults(result_summary, + retry_summary) + self._PrintUnexpectedResults(unexpected_results) + + # Write the same data to log files. + self._WriteJSONFiles(unexpected_results, result_summary, + individual_test_timings) + + # Write the summary to disk (results.html) and maybe open the + # test_shell to this file. + wrote_results = self._WriteResultsHtmlFile(result_summary) + if not self._options.noshow_results and wrote_results: + self._ShowResultsHtmlFile() + + # Ignore flaky failures and unexpected passes so we don't turn the + # bot red for those. + return unexpected_results['num_regressions'] + + def UpdateSummary(self, result_summary): + """Update the summary while running tests.""" + while True: + try: + (test, fail_list) = self._result_queue.get_nowait() + result = test_failures.DetermineResultType(fail_list) + expected = self._expectations.MatchesAnExpectedResult(test, + result) + result_summary.Add(test, fail_list, result, expected) + if (LOG_DETAILED_PROGRESS in self._options.log and + (self._options.experimental_fully_parallel or + self._IsSingleThreaded())): + self._DisplayDetailedProgress(result_summary) + else: + if not expected and LOG_UNEXPECTED in self._options.log: + self._PrintUnexpectedTestResult(test, result) + self._DisplayOneLineProgress(result_summary) + except Queue.Empty: + return + + def _DisplayOneLineProgress(self, result_summary): + """Displays the progress through the test run.""" + self._meter.update("Testing: %d ran as expected, %d didn't, %d left" % + (result_summary.expected, result_summary.unexpected, + result_summary.remaining)) + + def _DisplayDetailedProgress(self, result_summary): + """Display detailed progress output where we print the directory name + and one dot for each completed test. This is triggered by + "--log detailed-progress".""" + if self._current_test_number == len(self._test_files_list): + return + + next_test = self._test_files_list[self._current_test_number] + next_dir = os.path.dirname(path_utils.RelativeTestFilename(next_test)) + if self._current_progress_str == "": + self._current_progress_str = "%s: " % (next_dir) + self._current_dir = next_dir + + while next_test in result_summary.results: + if next_dir != self._current_dir: + self._meter.write("%s\n" % (self._current_progress_str)) + self._current_progress_str = "%s: ." % (next_dir) + self._current_dir = next_dir + else: + self._current_progress_str += "." + + if (next_test in result_summary.unexpected_results and + LOG_UNEXPECTED in self._options.log): + result = result_summary.unexpected_results[next_test] + self._meter.write("%s\n" % self._current_progress_str) + self._PrintUnexpectedTestResult(next_test, result) + self._current_progress_str = "%s: " % self._current_dir + + self._current_test_number += 1 + if self._current_test_number == len(self._test_files_list): + break + + next_test = self._test_files_list[self._current_test_number] + next_dir = os.path.dirname( + path_utils.RelativeTestFilename(next_test)) + + if result_summary.remaining: + remain_str = " (%d)" % (result_summary.remaining) + self._meter.update("%s%s" % + (self._current_progress_str, remain_str)) + else: + self._meter.write("%s\n" % (self._current_progress_str)) + + def _GetFailures(self, result_summary, include_crashes): + """Filters a dict of results and returns only the failures. + + Args: + result_summary: the results of the test run + include_crashes: whether crashes are included in the output. + We use False when finding the list of failures to retry + to see if the results were flaky. Although the crashes may also be + flaky, we treat them as if they aren't so that they're not ignored. + Returns: + a dict of files -> results + """ + failed_results = {} + for test, result in result_summary.unexpected_results.iteritems(): + if (result == test_expectations.PASS or + result == test_expectations.CRASH and not include_crashes): + continue + failed_results[test] = result + + return failed_results + + def _SummarizeUnexpectedResults(self, result_summary, retry_summary): + """Summarize any unexpected results as a dict. + + TODO(dpranke): split this data structure into a separate class? + + Args: + result_summary: summary object from initial test runs + retry_summary: summary object from final test run of retried tests + Returns: + A dictionary containing a summary of the unexpected results from the + run, with the following fields: + 'version': a version indicator (1 in this version) + 'fixable': # of fixable tests (NOW - PASS) + 'skipped': # of skipped tests (NOW & SKIPPED) + 'num_regressions': # of non-flaky failures + 'num_flaky': # of flaky failures + 'num_passes': # of unexpected passes + 'tests': a dict of tests -> {'expected': '...', 'actual': '...'} + """ + results = {} + results['version'] = 1 + + tbe = result_summary.tests_by_expectation + tbt = result_summary.tests_by_timeline + results['fixable'] = len(tbt[test_expectations.NOW] - + tbe[test_expectations.PASS]) + results['skipped'] = len(tbt[test_expectations.NOW] & + tbe[test_expectations.SKIP]) + + num_passes = 0 + num_flaky = 0 + num_regressions = 0 + keywords = {} + for k, v in TestExpectationsFile.EXPECTATIONS.iteritems(): + keywords[v] = k.upper() + + tests = {} + for filename, result in result_summary.unexpected_results.iteritems(): + # Note that if a test crashed in the original run, we ignore + # whether or not it crashed when we retried it (if we retried it), + # and always consider the result not flaky. + test = path_utils.RelativeTestFilename(filename) + expected = self._expectations.GetExpectationsString(filename) + actual = [keywords[result]] + + if result == test_expectations.PASS: + num_passes += 1 + elif result == test_expectations.CRASH: + num_regressions += 1 + else: + if filename not in retry_summary.unexpected_results: + actual.extend( + self._expectations.GetExpectationsString( + filename).split(" ")) + num_flaky += 1 + else: + retry_result = retry_summary.unexpected_results[filename] + if result != retry_result: + actual.append(keywords[retry_result]) + num_flaky += 1 + else: + num_regressions += 1 + + tests[test] = {} + tests[test]['expected'] = expected + tests[test]['actual'] = " ".join(actual) + + results['tests'] = tests + results['num_passes'] = num_passes + results['num_flaky'] = num_flaky + results['num_regressions'] = num_regressions + + return results + + def _WriteJSONFiles(self, unexpected_results, result_summary, + individual_test_timings): + """Writes the results of the test run as JSON files into the results + dir. + + There are three different files written into the results dir: + unexpected_results.json: A short list of any unexpected results. + This is used by the buildbots to display results. + expectations.json: This is used by the flakiness dashboard. + results.json: A full list of the results - used by the flakiness + dashboard and the aggregate results dashboard. + + Args: + unexpected_results: dict of unexpected results + result_summary: full summary object + individual_test_timings: list of test times (used by the flakiness + dashboard). + """ + logging.debug("Writing JSON files in %s." % + self._options.results_directory) + unexpected_file = open(os.path.join(self._options.results_directory, + "unexpected_results.json"), "w") + unexpected_file.write(simplejson.dumps(unexpected_results, + sort_keys=True, indent=2)) + unexpected_file.close() + + # Write a json file of the test_expectations.txt file for the layout + # tests dashboard. + expectations_file = open(os.path.join(self._options.results_directory, + "expectations.json"), "w") + expectations_json = \ + self._expectations.GetExpectationsJsonForAllPlatforms() + expectations_file.write("ADD_EXPECTATIONS(" + expectations_json + ");") + expectations_file.close() + + json_layout_results_generator.JSONLayoutResultsGenerator( + self._options.builder_name, self._options.build_name, + self._options.build_number, self._options.results_directory, + BUILDER_BASE_URL, individual_test_timings, + self._expectations, result_summary, self._test_files_list) + + logging.debug("Finished writing JSON files.") + + def _PrintExpectedResultsOfType(self, write, result_summary, result_type, + result_type_str): + """Print the number of the tests in a given result class. + + Args: + write: A callback to write info to (e.g., a LoggingWriter) or + sys.stdout.write. + result_summary - the object containing all the results to report on + result_type - the particular result type to report in the summary. + result_type_str - a string description of the result_type. + """ + tests = self._expectations.GetTestsWithResultType(result_type) + now = result_summary.tests_by_timeline[test_expectations.NOW] + wontfix = result_summary.tests_by_timeline[test_expectations.WONTFIX] + defer = result_summary.tests_by_timeline[test_expectations.DEFER] + + # We use a fancy format string in order to print the data out in a + # nicely-aligned table. + fmtstr = ("Expect: %%5d %%-8s (%%%dd now, %%%dd defer, %%%dd wontfix)" + % (self._NumDigits(now), self._NumDigits(defer), + self._NumDigits(wontfix))) + write(fmtstr % (len(tests), result_type_str, len(tests & now), + len(tests & defer), len(tests & wontfix))) + + def _NumDigits(self, num): + """Returns the number of digits needed to represent the length of a + sequence.""" + ndigits = 1 + if len(num): + ndigits = int(math.log10(len(num))) + 1 + return ndigits + + def _PrintTimingStatistics(self, write, total_time, thread_timings, + directory_test_timings, individual_test_timings, + result_summary): + """Record timing-specific information for the test run. + + Args: + write: A callback to write info to (e.g., a LoggingWriter) or + sys.stdout.write. + total_time: total elapsed time (in seconds) for the test run + thread_timings: wall clock time each thread ran for + directory_test_timings: timing by directory + individual_test_timings: timing by file + result_summary: summary object for the test run + """ + write("Test timing:") + write(" %6.2f total testing time" % total_time) + write("") + write("Thread timing:") + cuml_time = 0 + for t in thread_timings: + write(" %10s: %5d tests, %6.2f secs" % + (t['name'], t['num_tests'], t['total_time'])) + cuml_time += t['total_time'] + write(" %6.2f cumulative, %6.2f optimal" % + (cuml_time, cuml_time / int(self._options.num_test_shells))) + write("") + + self._PrintAggregateTestStatistics(write, individual_test_timings) + self._PrintIndividualTestTimes(write, individual_test_timings, + result_summary) + self._PrintDirectoryTimings(write, directory_test_timings) + + def _PrintAggregateTestStatistics(self, write, individual_test_timings): + """Prints aggregate statistics (e.g. median, mean, etc.) for all tests. + Args: + write: A callback to write info to (e.g., a LoggingWriter) or + sys.stdout.write. + individual_test_timings: List of test_shell_thread.TestStats for all + tests. + """ + test_types = individual_test_timings[0].time_for_diffs.keys() + times_for_test_shell = [] + times_for_diff_processing = [] + times_per_test_type = {} + for test_type in test_types: + times_per_test_type[test_type] = [] + + for test_stats in individual_test_timings: + times_for_test_shell.append(test_stats.test_run_time) + times_for_diff_processing.append( + test_stats.total_time_for_all_diffs) + time_for_diffs = test_stats.time_for_diffs + for test_type in test_types: + times_per_test_type[test_type].append( + time_for_diffs[test_type]) + + self._PrintStatisticsForTestTimings(write, + "PER TEST TIME IN TESTSHELL (seconds):", times_for_test_shell) + self._PrintStatisticsForTestTimings(write, + "PER TEST DIFF PROCESSING TIMES (seconds):", + times_for_diff_processing) + for test_type in test_types: + self._PrintStatisticsForTestTimings(write, + "PER TEST TIMES BY TEST TYPE: %s" % test_type, + times_per_test_type[test_type]) + + def _PrintIndividualTestTimes(self, write, individual_test_timings, + result_summary): + """Prints the run times for slow, timeout and crash tests. + Args: + write: A callback to write info to (e.g., a LoggingWriter) or + sys.stdout.write. + individual_test_timings: List of test_shell_thread.TestStats for all + tests. + result_summary: summary object for test run + """ + # Reverse-sort by the time spent in test_shell. + individual_test_timings.sort(lambda a, b: + cmp(b.test_run_time, a.test_run_time)) + + num_printed = 0 + slow_tests = [] + timeout_or_crash_tests = [] + unexpected_slow_tests = [] + for test_tuple in individual_test_timings: + filename = test_tuple.filename + is_timeout_crash_or_slow = False + if self._expectations.HasModifier(filename, + test_expectations.SLOW): + is_timeout_crash_or_slow = True + slow_tests.append(test_tuple) + + if filename in result_summary.failures: + result = result_summary.results[filename] + if (result == test_expectations.TIMEOUT or + result == test_expectations.CRASH): + is_timeout_crash_or_slow = True + timeout_or_crash_tests.append(test_tuple) + + if (not is_timeout_crash_or_slow and + num_printed < self._options.num_slow_tests_to_log): + num_printed = num_printed + 1 + unexpected_slow_tests.append(test_tuple) + + write("") + self._PrintTestListTiming(write, "%s slowest tests that are not " + "marked as SLOW and did not timeout/crash:" % + self._options.num_slow_tests_to_log, unexpected_slow_tests) + write("") + self._PrintTestListTiming(write, "Tests marked as SLOW:", slow_tests) + write("") + self._PrintTestListTiming(write, "Tests that timed out or crashed:", + timeout_or_crash_tests) + write("") + + def _PrintTestListTiming(self, write, title, test_list): + """Print timing info for each test. + + Args: + write: A callback to write info to (e.g., a LoggingWriter) or + sys.stdout.write. + title: section heading + test_list: tests that fall in this section + """ + write(title) + for test_tuple in test_list: + filename = test_tuple.filename[len( + path_utils.LayoutTestsDir()) + 1:] + filename = filename.replace('\\', '/') + test_run_time = round(test_tuple.test_run_time, 1) + write(" %s took %s seconds" % (filename, test_run_time)) + + def _PrintDirectoryTimings(self, write, directory_test_timings): + """Print timing info by directory for any directories that + take > 10 seconds to run. + + Args: + write: A callback to write info to (e.g., a LoggingWriter) or + sys.stdout.write. + directory_test_timing: time info for each directory + """ + timings = [] + for directory in directory_test_timings: + num_tests, time_for_directory = directory_test_timings[directory] + timings.append((round(time_for_directory, 1), directory, + num_tests)) + timings.sort() + + write("Time to process slowest subdirectories:") + min_seconds_to_print = 10 + for timing in timings: + if timing[0] > min_seconds_to_print: + write(" %s took %s seconds to run %s tests." % (timing[1], + timing[0], timing[2])) + write("") + + def _PrintStatisticsForTestTimings(self, write, title, timings): + """Prints the median, mean and standard deviation of the values in + timings. + + Args: + write: A callback to write info to (e.g., a LoggingWriter) or + sys.stdout.write. + title: Title for these timings. + timings: A list of floats representing times. + """ + write(title) + timings.sort() + + num_tests = len(timings) + percentile90 = timings[int(.9 * num_tests)] + percentile99 = timings[int(.99 * num_tests)] + + if num_tests % 2 == 1: + median = timings[((num_tests - 1) / 2) - 1] + else: + lower = timings[num_tests / 2 - 1] + upper = timings[num_tests / 2] + median = (float(lower + upper)) / 2 + + mean = sum(timings) / num_tests + + for time in timings: + sum_of_deviations = math.pow(time - mean, 2) + + std_deviation = math.sqrt(sum_of_deviations / num_tests) + write(" Median: %6.3f" % median) + write(" Mean: %6.3f" % mean) + write(" 90th percentile: %6.3f" % percentile90) + write(" 99th percentile: %6.3f" % percentile99) + write(" Standard dev: %6.3f" % std_deviation) + write("") + + def _PrintResultSummary(self, write, result_summary): + """Print a short summary about how many tests passed. + + Args: + write: A callback to write info to (e.g., a LoggingWriter) or + sys.stdout.write. + result_summary: information to log + """ + failed = len(result_summary.failures) + skipped = len( + result_summary.tests_by_expectation[test_expectations.SKIP]) + total = result_summary.total + passed = total - failed - skipped + pct_passed = 0.0 + if total > 0: + pct_passed = float(passed) * 100 / total + + write("") + write("=> Results: %d/%d tests passed (%.1f%%)" % + (passed, total, pct_passed)) + write("") + self._PrintResultSummaryEntry(write, result_summary, + test_expectations.NOW, "Tests to be fixed for the current release") + + write("") + self._PrintResultSummaryEntry(write, result_summary, + test_expectations.DEFER, + "Tests we'll fix in the future if they fail (DEFER)") + + write("") + self._PrintResultSummaryEntry(write, result_summary, + test_expectations.WONTFIX, + "Tests that will only be fixed if they crash (WONTFIX)") + + def _PrintResultSummaryEntry(self, write, result_summary, timeline, + heading): + """Print a summary block of results for a particular timeline of test. + + Args: + write: A callback to write info to (e.g., a LoggingWriter) or + sys.stdout.write. + result_summary: summary to print results for + timeline: the timeline to print results for (NOT, WONTFIX, etc.) + heading: a textual description of the timeline + """ + total = len(result_summary.tests_by_timeline[timeline]) + not_passing = (total - + len(result_summary.tests_by_expectation[test_expectations.PASS] & + result_summary.tests_by_timeline[timeline])) + write("=> %s (%d):" % (heading, not_passing)) + + for result in TestExpectationsFile.EXPECTATION_ORDER: + if result == test_expectations.PASS: + continue + results = (result_summary.tests_by_expectation[result] & + result_summary.tests_by_timeline[timeline]) + desc = TestExpectationsFile.EXPECTATION_DESCRIPTIONS[result] + if not_passing and len(results): + pct = len(results) * 100.0 / not_passing + write(" %5d %-24s (%4.1f%%)" % (len(results), + desc[len(results) != 1], pct)) + + def _PrintOneLineSummary(self, total, expected): + """Print a one-line summary of the test run to stdout. + + Args: + total: total number of tests run + expected: number of expected results + """ + unexpected = total - expected + if unexpected == 0: + print "All %d tests ran as expected." % expected + elif expected == 1: + print "1 test ran as expected, %d didn't:" % unexpected + else: + print "%d tests ran as expected, %d didn't:" % (expected, + unexpected) + + def _PrintUnexpectedResults(self, unexpected_results): + """Prints any unexpected results in a human-readable form to stdout.""" + passes = {} + flaky = {} + regressions = {} + + if len(unexpected_results['tests']): + print "" + + for test, results in unexpected_results['tests'].iteritems(): + actual = results['actual'].split(" ") + expected = results['expected'].split(" ") + if actual == ['PASS']: + if 'CRASH' in expected: + _AddToDictOfLists(passes, 'Expected to crash, but passed', + test) + elif 'TIMEOUT' in expected: + _AddToDictOfLists(passes, + 'Expected to timeout, but passed', test) + else: + _AddToDictOfLists(passes, 'Expected to fail, but passed', + test) + elif len(actual) > 1: + # We group flaky tests by the first actual result we got. + _AddToDictOfLists(flaky, actual[0], test) + else: + _AddToDictOfLists(regressions, results['actual'], test) + + if len(passes): + for key, tests in passes.iteritems(): + print "%s: (%d)" % (key, len(tests)) + tests.sort() + for test in tests: + print " %s" % test + print + + if len(flaky): + descriptions = TestExpectationsFile.EXPECTATION_DESCRIPTIONS + for key, tests in flaky.iteritems(): + result = TestExpectationsFile.EXPECTATIONS[key.lower()] + print "Unexpected flakiness: %s (%d)" % ( + descriptions[result][1], len(tests)) + tests.sort() + + for test in tests: + result = unexpected_results['tests'][test] + actual = result['actual'].split(" ") + expected = result['expected'].split(" ") + result = TestExpectationsFile.EXPECTATIONS[key.lower()] + new_expectations_list = list(set(actual) | set(expected)) + print " %s = %s" % (test, " ".join(new_expectations_list)) + print + + if len(regressions): + descriptions = TestExpectationsFile.EXPECTATION_DESCRIPTIONS + for key, tests in regressions.iteritems(): + result = TestExpectationsFile.EXPECTATIONS[key.lower()] + print "Regressions: Unexpected %s : (%d)" % ( + descriptions[result][1], len(tests)) + tests.sort() + for test in tests: + print " %s = %s" % (test, key) + print + + if len(unexpected_results['tests']) and self._options.verbose: + print "-" * 78 + + def _PrintUnexpectedTestResult(self, test, result): + """Prints one unexpected test result line.""" + desc = TestExpectationsFile.EXPECTATION_DESCRIPTIONS[result][0] + self._meter.write(" %s -> unexpected %s\n" % + (path_utils.RelativeTestFilename(test), desc)) + + def _WriteResultsHtmlFile(self, result_summary): + """Write results.html which is a summary of tests that failed. + + Args: + result_summary: a summary of the results :) + + Returns: + True if any results were written (since expected failures may be + omitted) + """ + # test failures + if self._options.full_results_html: + test_files = result_summary.failures.keys() + else: + unexpected_failures = self._GetFailures(result_summary, + include_crashes=True) + test_files = unexpected_failures.keys() + if not len(test_files): + return False - Args: - result_summary: summary object from initial test runs - retry_summary: summary object from final test run of retried tests - Returns: - A dictionary containing a summary of the unexpected results from the - run, with the following fields: - 'version': a version indicator (1 in this version) - 'fixable': # of fixable tests (NOW - PASS) - 'skipped': # of skipped tests (NOW & SKIPPED) - 'num_regressions': # of non-flaky failures - 'num_flaky': # of flaky failures - 'num_passes': # of unexpected passes - 'tests': a dict of tests -> { 'expected' : '...', 'actual' : '...' } - """ - results = {} - results['version'] = 1 - - tbe = result_summary.tests_by_expectation - tbt = result_summary.tests_by_timeline - results['fixable'] = len(tbt[test_expectations.NOW] - - tbe[test_expectations.PASS]) - results['skipped'] = len(tbt[test_expectations.NOW] & - tbe[test_expectations.SKIP]) - - num_passes = 0 - num_flaky = 0 - num_regressions = 0 - keywords = {} - for k, v in TestExpectationsFile.EXPECTATIONS.iteritems(): - keywords[v] = k.upper() - - tests = {} - for filename, result in result_summary.unexpected_results.iteritems(): - # Note that if a test crashed in the original run, we ignore whether or - # not it crashed when we retried it (if we retried it), and always - # consider the result not flaky. - test = path_utils.RelativeTestFilename(filename) - expected = self._expectations.GetExpectationsString(filename) - actual = [keywords[result]] - - if result == test_expectations.PASS: - num_passes += 1 - elif result == test_expectations.CRASH: - num_regressions += 1 - else: - if filename not in retry_summary.unexpected_results: - actual.extend( - self._expectations.GetExpectationsString(filename).split(" ")) - num_flaky += 1 + out_filename = os.path.join(self._options.results_directory, + "results.html") + out_file = open(out_filename, 'w') + # header + if self._options.full_results_html: + h2 = "Test Failures" else: - retry_result = retry_summary.unexpected_results[filename] - if result != retry_result: - actual.append(keywords[retry_result]) - num_flaky += 1 - else: - num_regressions += 1 - - tests[test] = {} - tests[test]['expected'] = expected - tests[test]['actual'] = " ".join(actual) - - results['tests'] = tests - results['num_passes'] = num_passes - results['num_flaky'] = num_flaky - results['num_regressions'] = num_regressions - - return results - - def _WriteJSONFiles(self, unexpected_results, result_summary, - individual_test_timings): - """Writes the results of the test run as JSON files into the results dir. - - There are three different files written into the results dir: - unexpected_results.json: A short list of any unexpected results. This - is used by the buildbots to display results. - expectations.json: This is used by the flakiness dashboard. - results.json: A full list of the results - used by the flakiness - dashboard and the aggregate results dashboard. + h2 = "Unexpected Test Failures" + out_file.write("<html><head><title>Layout Test Results (%(time)s)" + "</title></head><body><h2>%(h2)s (%(time)s)</h2>\n" + % {'h2': h2, 'time': time.asctime()}) + + test_files.sort() + for test_file in test_files: + test_failures = result_summary.failures.get(test_file, []) + out_file.write("<p><a href='%s'>%s</a><br />\n" + % (path_utils.FilenameToUri(test_file), + path_utils.RelativeTestFilename(test_file))) + for failure in test_failures: + out_file.write(" %s<br/>" + % failure.ResultHtmlOutput( + path_utils.RelativeTestFilename(test_file))) + out_file.write("</p>\n") + + # footer + out_file.write("</body></html>\n") + return True - Args: - unexpected_results: dict of unexpected results - result_summary: full summary object - individual_test_timings: list of test times (used by the flakiness - dashboard). - """ - logging.debug("Writing JSON files in %s." % self._options.results_directory) - unexpected_file = open(os.path.join(self._options.results_directory, - "unexpected_results.json"), "w") - unexpected_file.write(simplejson.dumps(unexpected_results, sort_keys=True, - indent=2)) - unexpected_file.close() - - # Write a json file of the test_expectations.txt file for the layout tests - # dashboard. - expectations_file = open(os.path.join(self._options.results_directory, - "expectations.json"), "w") - expectations_json = self._expectations.GetExpectationsJsonForAllPlatforms() - expectations_file.write(("ADD_EXPECTATIONS(" + expectations_json + ");")) - expectations_file.close() - - json_layout_results_generator.JSONLayoutResultsGenerator( - self._options.builder_name, self._options.build_name, - self._options.build_number, self._options.results_directory, - BUILDER_BASE_URL, individual_test_timings, - self._expectations, result_summary, self._test_files_list) - - logging.debug("Finished writing JSON files.") - - def _PrintExpectedResultsOfType(self, write, result_summary, result_type, - result_type_str): - """Print the number of the tests in a given result class. + def _ShowResultsHtmlFile(self): + """Launches the test shell open to the results.html page.""" + results_filename = os.path.join(self._options.results_directory, + "results.html") + subprocess.Popen([path_utils.TestShellPath(self._options.target), + path_utils.FilenameToUri(results_filename)]) - Args: - write: A callback to write info to (e.g., a LoggingWriter) or - sys.stdout.write. - result_summary - the object containing all the results to report on - result_type - the particular result type to report in the summary. - result_type_str - a string description of the result_type. - """ - tests = self._expectations.GetTestsWithResultType(result_type) - now = result_summary.tests_by_timeline[test_expectations.NOW] - wontfix = result_summary.tests_by_timeline[test_expectations.WONTFIX] - defer = result_summary.tests_by_timeline[test_expectations.DEFER] - - # We use a fancy format string in order to print the data out in a - # nicely-aligned table. - fmtstr = ("Expect: %%5d %%-8s (%%%dd now, %%%dd defer, %%%dd wontfix)" % - (self._NumDigits(now), self._NumDigits(defer), - self._NumDigits(wontfix))) - write(fmtstr % - (len(tests), result_type_str, len(tests & now), len(tests & defer), - len(tests & wontfix))) - - def _NumDigits(self, num): - """Returns the number of digits needed to represent the length of a - sequence.""" - ndigits = 1 - if len(num): - ndigits = int(math.log10(len(num))) + 1 - return ndigits - - def _PrintTimingStatistics(self, write, total_time, thread_timings, - directory_test_timings, individual_test_timings, - result_summary): - """Record timing-specific information for the test run. - Args: - write: A callback to write info to (e.g., a LoggingWriter) or - sys.stdout.write. - total_time: total elapsed time (in seconds) for the test run - thread_timings: wall clock time each thread ran for - directory_test_timings: timing by directory - individual_test_timings: timing by file - result_summary: summary object for the test run - """ - write("Test timing:") - write(" %6.2f total testing time" % total_time) - write("") - write("Thread timing:") - cuml_time = 0 - for t in thread_timings: - write(" %10s: %5d tests, %6.2f secs" % - (t['name'], t['num_tests'], t['total_time'])) - cuml_time += t['total_time'] - write(" %6.2f cumulative, %6.2f optimal" % - (cuml_time, cuml_time / int(self._options.num_test_shells))) - write("") +def _AddToDictOfLists(dict, key, value): + dict.setdefault(key, []).append(value) - self._PrintAggregateTestStatistics(write, individual_test_timings) - self._PrintIndividualTestTimes(write, individual_test_timings, - result_summary) - self._PrintDirectoryTimings(write, directory_test_timings) - def _PrintAggregateTestStatistics(self, write, individual_test_timings): - """Prints aggregate statistics (e.g. median, mean, etc.) for all tests. - Args: - write: A callback to write info to (e.g., a LoggingWriter) or - sys.stdout.write. - individual_test_timings: List of test_shell_thread.TestStats for all - tests. - """ - test_types = individual_test_timings[0].time_for_diffs.keys() - times_for_test_shell = [] - times_for_diff_processing = [] - times_per_test_type = {} - for test_type in test_types: - times_per_test_type[test_type] = [] - - for test_stats in individual_test_timings: - times_for_test_shell.append(test_stats.test_run_time) - times_for_diff_processing.append(test_stats.total_time_for_all_diffs) - time_for_diffs = test_stats.time_for_diffs - for test_type in test_types: - times_per_test_type[test_type].append(time_for_diffs[test_type]) - - self._PrintStatisticsForTestTimings(write, - "PER TEST TIME IN TESTSHELL (seconds):", times_for_test_shell) - self._PrintStatisticsForTestTimings(write, - "PER TEST DIFF PROCESSING TIMES (seconds):", times_for_diff_processing) - for test_type in test_types: - self._PrintStatisticsForTestTimings(write, - "PER TEST TIMES BY TEST TYPE: %s" % test_type, - times_per_test_type[test_type]) - - def _PrintIndividualTestTimes(self, write, individual_test_timings, - result_summary): - """Prints the run times for slow, timeout and crash tests. - Args: - write: A callback to write info to (e.g., a LoggingWriter) or - sys.stdout.write. - individual_test_timings: List of test_shell_thread.TestStats for all - tests. - result_summary: summary object for test run - """ - # Reverse-sort by the time spent in test_shell. - individual_test_timings.sort(lambda a, b: - cmp(b.test_run_time, a.test_run_time)) - - num_printed = 0 - slow_tests = [] - timeout_or_crash_tests = [] - unexpected_slow_tests = [] - for test_tuple in individual_test_timings: - filename = test_tuple.filename - is_timeout_crash_or_slow = False - if self._expectations.HasModifier(filename, test_expectations.SLOW): - is_timeout_crash_or_slow = True - slow_tests.append(test_tuple) - - if filename in result_summary.failures: - result = result_summary.results[filename] - if (result == test_expectations.TIMEOUT or - result == test_expectations.CRASH): - is_timeout_crash_or_slow = True - timeout_or_crash_tests.append(test_tuple) - - if (not is_timeout_crash_or_slow and - num_printed < self._options.num_slow_tests_to_log): - num_printed = num_printed + 1 - unexpected_slow_tests.append(test_tuple) +def ReadTestFiles(files): + tests = [] + for file in files: + for line in open(file): + line = test_expectations.StripComments(line) + if line: + tests.append(line) + return tests - write("") - self._PrintTestListTiming(write, "%s slowest tests that are not marked " - "as SLOW and did not timeout/crash:" % - self._options.num_slow_tests_to_log, unexpected_slow_tests) - write("") - self._PrintTestListTiming(write, "Tests marked as SLOW:", slow_tests) - write("") - self._PrintTestListTiming(write, "Tests that timed out or crashed:", - timeout_or_crash_tests) - write("") - def _PrintTestListTiming(self, write, title, test_list): - """Print timing info for each test. +def CreateLoggingWriter(options, log_option): + """Returns a write() function that will write the string to logging.info() + if comp was specified in --log or if --verbose is true. Otherwise the + message is dropped. Args: - write: A callback to write info to (e.g., a LoggingWriter) or - sys.stdout.write. - title: section heading - test_list: tests that fall in this section + options: list of command line options from optparse + log_option: option to match in options.log in order for the messages + to be logged (e.g., 'actual' or 'expected') """ - write(title) - for test_tuple in test_list: - filename = test_tuple.filename[len(path_utils.LayoutTestsDir()) + 1:] - filename = filename.replace('\\', '/') - test_run_time = round(test_tuple.test_run_time, 1) - write(" %s took %s seconds" % (filename, test_run_time)) + if options.verbose or log_option in options.log.split(","): + return logging.info + return lambda str: 1 - def _PrintDirectoryTimings(self, write, directory_test_timings): - """Print timing info by directory for any directories that take > 10 seconds - to run. - Args: - write: A callback to write info to (e.g., a LoggingWriter) or - sys.stdout.write. - directory_test_timing: time info for each directory - """ - timings = [] - for directory in directory_test_timings: - num_tests, time_for_directory = directory_test_timings[directory] - timings.append((round(time_for_directory, 1), directory, num_tests)) - timings.sort() - - write("Time to process slowest subdirectories:") - min_seconds_to_print = 10 - for timing in timings: - if timing[0] > min_seconds_to_print: - write(" %s took %s seconds to run %s tests." % (timing[1], timing[0], - timing[2])) - write("") +def main(options, args): + """Run the tests. Will call sys.exit when complete. - def _PrintStatisticsForTestTimings(self, write, title, timings): - """Prints the median, mean and standard deviation of the values in timings. Args: - write: A callback to write info to (e.g., a LoggingWriter) or - sys.stdout.write. - title: Title for these timings. - timings: A list of floats representing times. + options: a dictionary of command line options + args: a list of sub directories or files to test """ - write(title) - timings.sort() - num_tests = len(timings) - percentile90 = timings[int(.9 * num_tests)] - percentile99 = timings[int(.99 * num_tests)] + if options.sources: + options.verbose = True + + # Set up our logging format. + meter = metered_stream.MeteredStream(options.verbose, sys.stderr) + log_fmt = '%(message)s' + log_datefmt = '%y%m%d %H:%M:%S' + log_level = logging.INFO + if options.verbose: + log_fmt = ('%(asctime)s %(filename)s:%(lineno)-4d %(levelname)s ' + '%(message)s') + log_level = logging.DEBUG + logging.basicConfig(level=log_level, format=log_fmt, datefmt=log_datefmt, + stream=meter) + + if not options.target: + if options.debug: + options.target = "Debug" + else: + options.target = "Release" - if num_tests % 2 == 1: - median = timings[((num_tests - 1) / 2) - 1] - else: - lower = timings[num_tests / 2 - 1] - upper = timings[num_tests / 2] - median = (float(lower + upper)) / 2 + if not options.use_apache: + options.use_apache = sys.platform in ('darwin', 'linux2') - mean = sum(timings) / num_tests + if options.results_directory.startswith("/"): + # Assume it's an absolute path and normalize. + options.results_directory = path_utils.GetAbsolutePath( + options.results_directory) + else: + # If it's a relative path, make the output directory relative to + # Debug or Release. + basedir = path_utils.PathFromBase('webkit') + options.results_directory = path_utils.GetAbsolutePath( + os.path.join(basedir, options.target, options.results_directory)) + + if options.clobber_old_results: + # Just clobber the actual test results directories since the other + # files in the results directory are explicitly used for cross-run + # tracking. + path = os.path.join(options.results_directory, 'LayoutTests') + if os.path.exists(path): + shutil.rmtree(path) + + # Ensure platform is valid and force it to the form 'chromium-<platform>'. + options.platform = path_utils.PlatformName(options.platform) + + if not options.num_test_shells: + # TODO(ojan): Investigate perf/flakiness impact of using numcores + 1. + options.num_test_shells = platform_utils.GetNumCores() + + write = CreateLoggingWriter(options, 'config') + write("Running %s test_shells in parallel" % options.num_test_shells) + + if not options.time_out_ms: + if options.target == "Debug": + options.time_out_ms = str(2 * TestRunner.DEFAULT_TEST_TIMEOUT_MS) + else: + options.time_out_ms = str(TestRunner.DEFAULT_TEST_TIMEOUT_MS) + + options.slow_time_out_ms = str(5 * int(options.time_out_ms)) + write("Regular timeout: %s, slow test timeout: %s" % + (options.time_out_ms, options.slow_time_out_ms)) + + # Include all tests if none are specified. + new_args = [] + for arg in args: + if arg and arg != '': + new_args.append(arg) + + paths = new_args + if not paths: + paths = [] + if options.test_list: + paths += ReadTestFiles(options.test_list) + + # Create the output directory if it doesn't already exist. + path_utils.MaybeMakeDirectory(options.results_directory) + meter.update("Gathering files ...") + + test_runner = TestRunner(options, meter) + test_runner.GatherFilePaths(paths) + + if options.lint_test_files: + # Creating the expecations for each platform/target pair does all the + # test list parsing and ensures it's correct syntax (e.g. no dupes). + for platform in TestExpectationsFile.PLATFORMS: + test_runner.ParseExpectations(platform, is_debug_mode=True) + test_runner.ParseExpectations(platform, is_debug_mode=False) + print ("If there are no fail messages, errors or exceptions, then the " + "lint succeeded.") + sys.exit(0) - for time in timings: - sum_of_deviations = math.pow(time - mean, 2) + try: + test_shell_binary_path = path_utils.TestShellPath(options.target) + except path_utils.PathNotFound: + print "\nERROR: test_shell is not found. Be sure that you have built" + print "it and that you are using the correct build. This script" + print "will run the Release one by default. Use --debug to use the" + print "Debug build.\n" + sys.exit(1) - std_deviation = math.sqrt(sum_of_deviations / num_tests) - write(" Median: %6.3f" % median) - write(" Mean: %6.3f" % mean) - write(" 90th percentile: %6.3f" % percentile90) - write(" 99th percentile: %6.3f" % percentile99) - write(" Standard dev: %6.3f" % std_deviation) + write = CreateLoggingWriter(options, "config") + write("Using platform '%s'" % options.platform) + write("Placing test results in %s" % options.results_directory) + if options.new_baseline: + write("Placing new baselines in %s" % + path_utils.ChromiumBaselinePath(options.platform)) + write("Using %s build at %s" % (options.target, test_shell_binary_path)) + if options.no_pixel_tests: + write("Not running pixel tests") write("") - def _PrintResultSummary(self, write, result_summary): - """Print a short summary to the output file about how many tests passed. - - Args: - write: A callback to write info to (e.g., a LoggingWriter) or - sys.stdout.write. - result_summary: information to log - """ - failed = len(result_summary.failures) - skipped = len(result_summary.tests_by_expectation[test_expectations.SKIP]) - total = result_summary.total - passed = total - failed - skipped - pct_passed = 0.0 - if total > 0: - pct_passed = float(passed) * 100 / total - - write(""); - write("=> Results: %d/%d tests passed (%.1f%%)" % - (passed, total, pct_passed)) - write(""); - self._PrintResultSummaryEntry(write, result_summary, test_expectations.NOW, - "Tests to be fixed for the current release") - - write(""); - self._PrintResultSummaryEntry(write, result_summary, - test_expectations.DEFER, - "Tests we'll fix in the future if they fail (DEFER)") - - write(""); - self._PrintResultSummaryEntry(write, result_summary, - test_expectations.WONTFIX, - "Tests that will only be fixed if they crash (WONTFIX)") - - def _PrintResultSummaryEntry(self, write, result_summary, timeline, heading): - """Print a summary block of results for a particular timeline of test. - - Args: - write: A callback to write info to (e.g., a LoggingWriter) or - sys.stdout.write. - result_summary: summary to print results for - timeline: the timeline to print results for (NOT, WONTFIX, etc.) - heading: a textual description of the timeline - """ - total = len(result_summary.tests_by_timeline[timeline]) - not_passing = (total - - len(result_summary.tests_by_expectation[test_expectations.PASS] & - result_summary.tests_by_timeline[timeline])) - write("=> %s (%d):" % (heading, not_passing)) - - for result in TestExpectationsFile.EXPECTATION_ORDER: - if result == test_expectations.PASS: - continue - results = (result_summary.tests_by_expectation[result] & - result_summary.tests_by_timeline[timeline]) - desc = TestExpectationsFile.EXPECTATION_DESCRIPTIONS[result] - if not_passing and len(results): - pct = len(results) * 100.0 / not_passing - write(" %5d %-24s (%4.1f%%)" % (len(results), - desc[len(results) != 1], pct)) - - def _PrintOneLineSummary(self, total, expected): - """Print a one-line summary of the test run to stdout. + meter.update("Parsing expectations ...") + test_runner.ParseExpectations(options.platform, options.target == 'Debug') - Args: - total: total number of tests run - expected: number of expected results - """ - unexpected = total - expected - if unexpected == 0: - print "All %d tests ran as expected." % expected - elif expected == 1: - print "1 test ran as expected, %d didn't:" % unexpected - else: - print "%d tests ran as expected, %d didn't:" % (expected, unexpected) - - def _PrintUnexpectedResults(self, unexpected_results): - """Prints any unexpected results in a human-readable form to stdout.""" - passes = {} - flaky = {} - regressions = {} - - if len(unexpected_results['tests']): - print "" - - for test, results in unexpected_results['tests'].iteritems(): - actual = results['actual'].split(" ") - expected = results['expected'].split(" ") - if actual == ['PASS']: - if 'CRASH' in expected: - _AddToDictOfLists(passes, 'Expected to crash, but passed', test) - elif 'TIMEOUT' in expected: - _AddToDictOfLists(passes, 'Expected to timeout, but passed', test) - else: - _AddToDictOfLists(passes, 'Expected to fail, but passed', test) - elif len(actual) > 1: - # We group flaky tests by the first actual result we got. - _AddToDictOfLists(flaky, actual[0], test) - else: - _AddToDictOfLists(regressions, results['actual'], test) - - if len(passes): - for key, tests in passes.iteritems(): - print "%s: (%d)" % (key, len(tests)) - tests.sort() - for test in tests: - print " %s" % test - print - - if len(flaky): - descriptions = TestExpectationsFile.EXPECTATION_DESCRIPTIONS - for key, tests in flaky.iteritems(): - result = TestExpectationsFile.EXPECTATIONS[key.lower()] - print "Unexpected flakiness: %s (%d)" % ( - descriptions[result][1], len(tests)) - tests.sort() - - for test in tests: - actual = unexpected_results['tests'][test]['actual'].split(" ") - expected = unexpected_results['tests'][test]['expected'].split(" ") - result = TestExpectationsFile.EXPECTATIONS[key.lower()] - new_expectations_list = list(set(actual) | set(expected)) - print " %s = %s" % (test, " ".join(new_expectations_list)) - print - - if len(regressions): - descriptions = TestExpectationsFile.EXPECTATION_DESCRIPTIONS - for key, tests in regressions.iteritems(): - result = TestExpectationsFile.EXPECTATIONS[key.lower()] - print "Regressions: Unexpected %s : (%d)" % ( - descriptions[result][1], len(tests)) - tests.sort() - for test in tests: - print " %s = %s" % (test, key) - print - - if len(unexpected_results['tests']) and self._options.verbose: - print "-" * 78 - - def _PrintUnexpectedTestResult(self, test, result): - """Prints one unexpected test result line.""" - desc = TestExpectationsFile.EXPECTATION_DESCRIPTIONS[result][0] - self._meter.write(" %s -> unexpected %s\n" % - (path_utils.RelativeTestFilename(test), desc)) - - def _WriteResultsHtmlFile(self, result_summary): - """Write results.html which is a summary of tests that failed. + meter.update("Preparing tests ...") + write = CreateLoggingWriter(options, "expected") + result_summary = test_runner.PrepareListsAndPrintOutput(write) - Args: - result_summary: a summary of the results :) - - Returns: - True if any results were written (since expected failures may be omitted) - """ - # test failures - if self._options.full_results_html: - test_files = result_summary.failures.keys() - else: - unexpected_failures = self._GetFailures(result_summary, - include_crashes=True) - test_files = unexpected_failures.keys() - if not len(test_files): - return False - - out_filename = os.path.join(self._options.results_directory, - "results.html") - out_file = open(out_filename, 'w') - # header - if self._options.full_results_html: - h2 = "Test Failures" - else: - h2 = "Unexpected Test Failures" - out_file.write("<html><head><title>Layout Test Results (%(time)s)</title>" - "</head><body><h2>%(h2)s (%(time)s)</h2>\n" - % {'h2': h2, 'time': time.asctime()}) - - test_files.sort() - for test_file in test_files: - test_failures = result_summary.failures.get(test_file, []) - out_file.write("<p><a href='%s'>%s</a><br />\n" - % (path_utils.FilenameToUri(test_file), - path_utils.RelativeTestFilename(test_file))) - for failure in test_failures: - out_file.write(" %s<br/>" - % failure.ResultHtmlOutput( - path_utils.RelativeTestFilename(test_file))) - out_file.write("</p>\n") - - # footer - out_file.write("</body></html>\n") - return True - - def _ShowResultsHtmlFile(self): - """Launches the test shell open to the results.html page.""" - results_filename = os.path.join(self._options.results_directory, - "results.html") - subprocess.Popen([path_utils.TestShellPath(self._options.target), - path_utils.FilenameToUri(results_filename)]) + if 'cygwin' == sys.platform: + logging.warn("#" * 40) + logging.warn("# UNEXPECTED PYTHON VERSION") + logging.warn("# This script should be run using the version of python") + logging.warn("# in third_party/python_24/") + logging.warn("#" * 40) + sys.exit(1) + # Delete the disk cache if any to ensure a clean test run. + cachedir = os.path.split(test_shell_binary_path)[0] + cachedir = os.path.join(cachedir, "cache") + if os.path.exists(cachedir): + shutil.rmtree(cachedir) -def _AddToDictOfLists(dict, key, value): - dict.setdefault(key, []).append(value) - -def ReadTestFiles(files): - tests = [] - for file in files: - for line in open(file): - line = test_expectations.StripComments(line) - if line: tests.append(line) - return tests + test_runner.AddTestType(text_diff.TestTextDiff) + if not options.no_pixel_tests: + test_runner.AddTestType(image_diff.ImageDiff) + if options.fuzzy_pixel_tests: + test_runner.AddTestType(fuzzy_image_diff.FuzzyImageDiff) -def CreateLoggingWriter(options, log_option): - """Returns a write() function that will write the string to logging.info() - if comp was specified in --log or if --verbose is true. Otherwise the - message is dropped. - - Args: - options: list of command line options from optparse - log_option: option to match in options.log in order for the messages to be - logged (e.g., 'actual' or 'expected') - """ - if options.verbose or log_option in options.log.split(","): - return logging.info - return lambda str: 1 + meter.update("Starting ...") + has_new_failures = test_runner.Run(result_summary) -def main(options, args): - """Run the tests. Will call sys.exit when complete. - - Args: - options: a dictionary of command line options - args: a list of sub directories or files to test - """ - - if options.sources: - options.verbose = True - - # Set up our logging format. - meter = metered_stream.MeteredStream(options.verbose, sys.stderr) - log_fmt = '%(message)s' - log_datefmt = '%y%m%d %H:%M:%S' - log_level = logging.INFO - if options.verbose: - log_fmt = '%(asctime)s %(filename)s:%(lineno)-4d %(levelname)s %(message)s' - log_level = logging.DEBUG - logging.basicConfig(level=log_level, format=log_fmt, datefmt=log_datefmt, - stream=meter) - - if not options.target: - if options.debug: - options.target = "Debug" - else: - options.target = "Release" - - if not options.use_apache: - options.use_apache = sys.platform in ('darwin', 'linux2') - - if options.results_directory.startswith("/"): - # Assume it's an absolute path and normalize. - options.results_directory = path_utils.GetAbsolutePath( - options.results_directory) - else: - # If it's a relative path, make the output directory relative to Debug or - # Release. - basedir = path_utils.PathFromBase('webkit') - options.results_directory = path_utils.GetAbsolutePath( - os.path.join(basedir, options.target, options.results_directory)) - - if options.clobber_old_results: - # Just clobber the actual test results directories since the other files - # in the results directory are explicitly used for cross-run tracking. - path = os.path.join(options.results_directory, 'LayoutTests') - if os.path.exists(path): - shutil.rmtree(path) - - # Ensure platform is valid and force it to the form 'chromium-<platform>'. - options.platform = path_utils.PlatformName(options.platform) - - if not options.num_test_shells: - # TODO(ojan): Investigate perf/flakiness impact of using numcores + 1. - options.num_test_shells = platform_utils.GetNumCores() - - write = CreateLoggingWriter(options, 'config') - write("Running %s test_shells in parallel" % options.num_test_shells) - - if not options.time_out_ms: - if options.target == "Debug": - options.time_out_ms = str(2 * TestRunner.DEFAULT_TEST_TIMEOUT_MS) - else: - options.time_out_ms = str(TestRunner.DEFAULT_TEST_TIMEOUT_MS) - - options.slow_time_out_ms = str(5 * int(options.time_out_ms)) - write("Regular timeout: %s, slow test timeout: %s" % - (options.time_out_ms, options.slow_time_out_ms)) - - # Include all tests if none are specified. - new_args = [] - for arg in args: - if arg and arg != '': - new_args.append(arg) - - paths = new_args - if not paths: - paths = [] - if options.test_list: - paths += ReadTestFiles(options.test_list) - - # Create the output directory if it doesn't already exist. - path_utils.MaybeMakeDirectory(options.results_directory) - meter.update("Gathering files ...") - - test_runner = TestRunner(options, meter) - test_runner.GatherFilePaths(paths) - - if options.lint_test_files: - # Creating the expecations for each platform/target pair does all the - # test list parsing and ensures it's correct syntax (e.g. no dupes). - for platform in TestExpectationsFile.PLATFORMS: - test_runner.ParseExpectations(platform, is_debug_mode=True) - test_runner.ParseExpectations(platform, is_debug_mode=False) - print ("If there are no fail messages, errors or exceptions, then the " - "lint succeeded.") - sys.exit(0) - - try: - test_shell_binary_path = path_utils.TestShellPath(options.target) - except path_utils.PathNotFound: - print "\nERROR: test_shell is not found. Be sure that you have built it" - print "and that you are using the correct build. This script will run the" - print "Release one by default. Use --debug to use the Debug build.\n" - sys.exit(1) - - write = CreateLoggingWriter(options, "config") - write("Using platform '%s'" % options.platform) - write("Placing test results in %s" % options.results_directory) - if options.new_baseline: - write("Placing new baselines in %s" % - path_utils.ChromiumBaselinePath(options.platform)) - write("Using %s build at %s" % (options.target, test_shell_binary_path)) - if options.no_pixel_tests: - write("Not running pixel tests") - write("") - - meter.update("Parsing expectations ...") - test_runner.ParseExpectations(options.platform, options.target == 'Debug') - - meter.update("Preparing tests ...") - write = CreateLoggingWriter(options, "expected") - result_summary = test_runner.PrepareListsAndPrintOutput(write) - - if 'cygwin' == sys.platform: - logging.warn("#" * 40) - logging.warn("# UNEXPECTED PYTHON VERSION") - logging.warn("# This script should be run using the version of python") - logging.warn("# in third_party/python_24/") - logging.warn("#" * 40) - sys.exit(1) - - # Delete the disk cache if any to ensure a clean test run. - cachedir = os.path.split(test_shell_binary_path)[0] - cachedir = os.path.join(cachedir, "cache") - if os.path.exists(cachedir): - shutil.rmtree(cachedir) - - test_runner.AddTestType(text_diff.TestTextDiff) - if not options.no_pixel_tests: - test_runner.AddTestType(image_diff.ImageDiff) - if options.fuzzy_pixel_tests: - test_runner.AddTestType(fuzzy_image_diff.FuzzyImageDiff) - - meter.update("Starting ...") - has_new_failures = test_runner.Run(result_summary) - - logging.debug("Exit status: %d" % has_new_failures) - sys.exit(has_new_failures) + logging.debug("Exit status: %d" % has_new_failures) + sys.exit(has_new_failures) if '__main__' == __name__: - option_parser = optparse.OptionParser() - option_parser.add_option("", "--no-pixel-tests", action="store_true", - default=False, - help="disable pixel-to-pixel PNG comparisons") - option_parser.add_option("", "--fuzzy-pixel-tests", action="store_true", - default=False, - help="Also use fuzzy matching to compare pixel test" - " outputs.") - option_parser.add_option("", "--results-directory", - default="layout-test-results", - help="Output results directory source dir," - " relative to Debug or Release") - option_parser.add_option("", "--new-baseline", action="store_true", - default=False, - help="save all generated results as new baselines " - "into the platform directory, overwriting " - "whatever's already there.") - option_parser.add_option("", "--noshow-results", action="store_true", - default=False, help="don't launch the test_shell" - " with results after the tests are done") - option_parser.add_option("", "--full-results-html", action="store_true", - default=False, help="show all failures in " - "results.html, rather than only regressions") - option_parser.add_option("", "--clobber-old-results", action="store_true", - default=False, help="Clobbers test results from " - "previous runs.") - option_parser.add_option("", "--lint-test-files", action="store_true", - default=False, help="Makes sure the test files " - "parse for all configurations. Does not run any " - "tests.") - option_parser.add_option("", "--force", action="store_true", - default=False, - help="Run all tests, even those marked SKIP in the " - "test list") - option_parser.add_option("", "--num-test-shells", - help="Number of testshells to run in parallel.") - option_parser.add_option("", "--use-apache", action="store_true", - default=False, - help="Whether to use apache instead of lighttpd.") - option_parser.add_option("", "--time-out-ms", default=None, - help="Set the timeout for each test") - option_parser.add_option("", "--run-singly", action="store_true", - default=False, - help="run a separate test_shell for each test") - option_parser.add_option("", "--debug", action="store_true", default=False, - help="use the debug binary instead of the release " - "binary") - option_parser.add_option("", "--num-slow-tests-to-log", default=50, - help="Number of slow tests whose timings to print.") - option_parser.add_option("", "--platform", - help="Override the platform for expected results") - option_parser.add_option("", "--target", default="", - help="Set the build target configuration (overrides" - " --debug)") - option_parser.add_option("", "--log", action="store", - default="detailed-progress,unexpected", - help="log various types of data. The param should " - "be a comma-separated list of values from: " - "actual,config," + LOG_DETAILED_PROGRESS + - ",expected,timing," + LOG_UNEXPECTED + - " (defaults to --log detailed-progress,unexpected)") - option_parser.add_option("-v", "--verbose", action="store_true", - default=False, help="include debug-level logging") - option_parser.add_option("", "--sources", action="store_true", - help="show expected result file path for each test " - "(implies --verbose)") - option_parser.add_option("", "--startup-dialog", action="store_true", - default=False, - help="create a dialog on test_shell.exe startup") - option_parser.add_option("", "--gp-fault-error-box", action="store_true", - default=False, - help="enable Windows GP fault error box") - option_parser.add_option("", "--wrapper", - help="wrapper command to insert before invocations " - "of test_shell; option is split on whitespace " - "before running. (example: " - "--wrapper='valgrind --smc-check=all')") - option_parser.add_option("", "--test-list", action="append", - help="read list of tests to run from file", - metavar="FILE") - option_parser.add_option("", "--nocheck-sys-deps", action="store_true", - default=False, - help="Don't check the system dependencies (themes)") - option_parser.add_option("", "--randomize-order", action="store_true", - default=False, - help=("Run tests in random order (useful for " - "tracking down corruption)")) - option_parser.add_option("", "--run-chunk", - default=None, - help=("Run a specified chunk (n:l), the nth of len " - "l, of the layout tests")) - option_parser.add_option("", "--run-part", - default=None, - help=("Run a specified part (n:m), the nth of m" - " parts, of the layout tests")) - option_parser.add_option("", "--batch-size", - default=None, - help=("Run a the tests in batches (n), after every " - "n tests, the test shell is relaunched.")) - option_parser.add_option("", "--builder-name", - default="DUMMY_BUILDER_NAME", - help=("The name of the builder shown on the " - "waterfall running this script e.g. WebKit.")) - option_parser.add_option("", "--build-name", - default="DUMMY_BUILD_NAME", - help=("The name of the builder used in its path, " - "e.g. webkit-rel.")) - option_parser.add_option("", "--build-number", - default="DUMMY_BUILD_NUMBER", - help=("The build number of the builder running" - "this script.")) - option_parser.add_option("", "--experimental-fully-parallel", - action="store_true", default=False, - help="run all tests in parallel") - - options, args = option_parser.parse_args() - main(options, args) + option_parser = optparse.OptionParser() + option_parser.add_option("", "--no-pixel-tests", action="store_true", + default=False, + help="disable pixel-to-pixel PNG comparisons") + option_parser.add_option("", "--fuzzy-pixel-tests", action="store_true", + default=False, + help="Also use fuzzy matching to compare pixel " + "test outputs.") + option_parser.add_option("", "--results-directory", + default="layout-test-results", + help="Output results directory source dir," + " relative to Debug or Release") + option_parser.add_option("", "--new-baseline", action="store_true", + default=False, + help="save all generated results as new baselines" + " into the platform directory, overwriting " + "whatever's already there.") + option_parser.add_option("", "--noshow-results", action="store_true", + default=False, help="don't launch the test_shell" + " with results after the tests are done") + option_parser.add_option("", "--full-results-html", action="store_true", + default=False, help="show all failures in " + "results.html, rather than only regressions") + option_parser.add_option("", "--clobber-old-results", action="store_true", + default=False, help="Clobbers test results from " + "previous runs.") + option_parser.add_option("", "--lint-test-files", action="store_true", + default=False, help="Makes sure the test files " + "parse for all configurations. Does not run any " + "tests.") + option_parser.add_option("", "--force", action="store_true", + default=False, + help="Run all tests, even those marked SKIP " + "in the test list") + option_parser.add_option("", "--num-test-shells", + help="Number of testshells to run in parallel.") + option_parser.add_option("", "--use-apache", action="store_true", + default=False, + help="Whether to use apache instead of lighttpd.") + option_parser.add_option("", "--time-out-ms", default=None, + help="Set the timeout for each test") + option_parser.add_option("", "--run-singly", action="store_true", + default=False, + help="run a separate test_shell for each test") + option_parser.add_option("", "--debug", action="store_true", default=False, + help="use the debug binary instead of the release" + " binary") + option_parser.add_option("", "--num-slow-tests-to-log", default=50, + help="Number of slow tests whose timings " + "to print.") + option_parser.add_option("", "--platform", + help="Override the platform for expected results") + option_parser.add_option("", "--target", default="", + help="Set the build target configuration " + "(overrides --debug)") + option_parser.add_option("", "--log", action="store", + default="detailed-progress,unexpected", + help="log various types of data. The param should" + " be a comma-separated list of values from: " + "actual,config," + LOG_DETAILED_PROGRESS + + ",expected,timing," + LOG_UNEXPECTED + " " + "(defaults to " + + "--log detailed-progress,unexpected)") + option_parser.add_option("-v", "--verbose", action="store_true", + default=False, help="include debug-level logging") + option_parser.add_option("", "--sources", action="store_true", + help="show expected result file path for each " + "test (implies --verbose)") + option_parser.add_option("", "--startup-dialog", action="store_true", + default=False, + help="create a dialog on test_shell.exe startup") + option_parser.add_option("", "--gp-fault-error-box", action="store_true", + default=False, + help="enable Windows GP fault error box") + option_parser.add_option("", "--wrapper", + help="wrapper command to insert before " + "invocations of test_shell; option is split " + "on whitespace before running. (Example: " + "--wrapper='valgrind --smc-check=all')") + option_parser.add_option("", "--test-list", action="append", + help="read list of tests to run from file", + metavar="FILE") + option_parser.add_option("", "--nocheck-sys-deps", action="store_true", + default=False, + help="Don't check the system dependencies " + "(themes)") + option_parser.add_option("", "--randomize-order", action="store_true", + default=False, + help=("Run tests in random order (useful for " + "tracking down corruption)")) + option_parser.add_option("", "--run-chunk", + default=None, + help=("Run a specified chunk (n:l), the " + "nth of len l, of the layout tests")) + option_parser.add_option("", "--run-part", + default=None, + help=("Run a specified part (n:m), the nth of m" + " parts, of the layout tests")) + option_parser.add_option("", "--batch-size", + default=None, + help=("Run a the tests in batches (n), after " + "every n tests, the test shell is " + "relaunched.")) + option_parser.add_option("", "--builder-name", + default="DUMMY_BUILDER_NAME", + help=("The name of the builder shown on the " + "waterfall running this script e.g. " + "WebKit.")) + option_parser.add_option("", "--build-name", + default="DUMMY_BUILD_NAME", + help=("The name of the builder used in its path, " + "e.g. webkit-rel.")) + option_parser.add_option("", "--build-number", + default="DUMMY_BUILD_NUMBER", + help=("The build number of the builder running" + "this script.")) + option_parser.add_option("", "--experimental-fully-parallel", + action="store_true", default=False, + help="run all tests in parallel") + + options, args = option_parser.parse_args() + main(options, args) diff --git a/webkit/tools/layout_tests/test_output_formatter.py b/webkit/tools/layout_tests/test_output_formatter.py index 81e6302..f60dad1 100755 --- a/webkit/tools/layout_tests/test_output_formatter.py +++ b/webkit/tools/layout_tests/test_output_formatter.py @@ -17,89 +17,89 @@ from layout_package import html_generator DEFAULT_BUILDER = "Webkit" + def main(options, args): - if options.run_tests: - fft = failure_finder_test.FailureFinderTest() - return fft.runTests() + if options.run_tests: + fft = failure_finder_test.FailureFinderTest() + return fft.runTests() - # TODO(gwilson): Add a check that verifies the given platform exists. + # TODO(gwilson): Add a check that verifies the given platform exists. - finder = failure_finder.FailureFinder(options.build_number, - options.platform_builder, - (not options.include_expected), - options.test_regex, - options.output_dir, - int(options.max_failures), - options.verbose, - options.builder_log, - options.archive_log, - options.zip_file, - options.expectations_file) - finder.use_local_baselines = options.local - failure_list = finder.GetFailures() + finder = failure_finder.FailureFinder(options.build_number, + options.platform_builder, + (not options.include_expected), + options.test_regex, + options.output_dir, + int(options.max_failures), + options.verbose, + options.builder_log, + options.archive_log, + options.zip_file, + options.expectations_file) + finder.use_local_baselines = options.local + failure_list = finder.GetFailures() - if not failure_list: - print "Did not find any failures." - return + if not failure_list: + print "Did not find any failures." + return - generator = html_generator.HTMLGenerator(failure_list, - options.output_dir, - finder.build, - options.platform_builder, - (not options.include_expected)) - filename = generator.GenerateHTML() + generator = html_generator.HTMLGenerator(failure_list, + options.output_dir, + finder.build, + options.platform_builder, + (not options.include_expected)) + filename = generator.GenerateHTML() - if filename and options.verbose: - print "File created at %s" % filename + if filename and options.verbose: + print "File created at %s" % filename if __name__ == "__main__": - option_parser = optparse.OptionParser() - option_parser.add_option("-v", "--verbose", action = "store_true", - default = False, - help = "Display lots of output.") - option_parser.add_option("-i", "--include-expected", action = "store_true", - default = False, - help = "Include expected failures in output") - option_parser.add_option("-p", "--platform-builder", - default = DEFAULT_BUILDER, - help = "Use the given builder") - option_parser.add_option("-b", "--build-number", - default = None, - help = "Use the given build number") - option_parser.add_option("-t", "--test-regex", - default = None, - help = "Use the given regex to filter tests") - option_parser.add_option("-o", "--output-dir", - default = ".", - help = "Output files to given directory") - option_parser.add_option("-m", "--max-failures", - default = 100, - help = "Limit the maximum number of failures") - option_parser.add_option("-r", "--run-tests", action = "store_true", - default = False, - help = "Runs unit tests") - option_parser.add_option("-u", "--builder-log", - default = None, - help = ("Use the local builder log file instead of " - "scraping the buildbots")) - option_parser.add_option("-a", "--archive-log", - default = None, - help = ("Use the local archive log file instead of " - "scraping the buildbots")) - option_parser.add_option("-e", "--expectations-file", - default = None, - help = ("Use the local test expectations file " + option_parser = optparse.OptionParser() + option_parser.add_option("-v", "--verbose", action="store_true", + default=False, + help="Display lots of output.") + option_parser.add_option("-i", "--include-expected", action="store_true", + default=False, + help="Include expected failures in output") + option_parser.add_option("-p", "--platform-builder", + default=DEFAULT_BUILDER, + help="Use the given builder") + option_parser.add_option("-b", "--build-number", + default=None, + help="Use the given build number") + option_parser.add_option("-t", "--test-regex", + default=None, + help="Use the given regex to filter tests") + option_parser.add_option("-o", "--output-dir", + default=".", + help="Output files to given directory") + option_parser.add_option("-m", "--max-failures", + default=100, + help="Limit the maximum number of failures") + option_parser.add_option("-r", "--run-tests", action="store_true", + default=False, + help="Runs unit tests") + option_parser.add_option("-u", "--builder-log", + default=None, + help=("Use the local builder log file " + "instead of scraping the buildbots")) + option_parser.add_option("-a", "--archive-log", + default=None, + help=("Use the local archive log file " "instead of scraping the buildbots")) - option_parser.add_option("-z", "--zip-file", - default = None, - help = ("Use the local test output zip file " + option_parser.add_option("-e", "--expectations-file", + default=None, + help=("Use the local test expectations file " "instead of scraping the buildbots")) - option_parser.add_option("-l", "--local", action = "store_true", - default = False, - help = ("Use local baselines instead of scraping " + option_parser.add_option("-z", "--zip-file", + default=None, + help=("Use the local test output zip file " + "instead of scraping the buildbots")) + option_parser.add_option("-l", "--local", action="store_true", + default=False, + help=("Use local baselines instead of scraping " "baselines from source websites")) - options, args = option_parser.parse_args() - main(options, args) - + options, args = option_parser.parse_args() + main(options, args) diff --git a/webkit/tools/layout_tests/test_output_xml_to_json.py b/webkit/tools/layout_tests/test_output_xml_to_json.py index 61c6e24..bda1ff3 100755 --- a/webkit/tools/layout_tests/test_output_xml_to_json.py +++ b/webkit/tools/layout_tests/test_output_xml_to_json.py @@ -22,109 +22,114 @@ from layout_package import test_expectations # Builder base URL where we have the archived test results. BUILDER_BASE_URL = "http://build.chromium.org/buildbot/gtest-results/" + class JSONGeneratorFromXML(object): - def __init__(self, options): - self._options = options - - # Check the results directory - if not os.path.exists(self._options.results_directory): - os.mkdir(self._options.results_directory) - - results_xml_file = None - try: - results_xml_file = open(self._options.input_results_xml) - except IOError, e: - logging.fatal("Cannot open file %s", self._options.input_results_xml) - sys.exit(1) - - summary = self._ParseTestResultsXML( - minidom.parse(results_xml_file).documentElement) - results_xml_file.close() - - json_results_generator.JSONResultsGenerator( - self._options.builder_name, self._options.build_name, - self._options.build_number, self._options.results_directory, - self._options.builder_base_url, - self._test_timings, - self._failures, self._passed_tests, self._skipped_tests, - self._tests_list) - - def _ParseTestResultsXML(self, node): - self._tests_list = set() - self._passed_tests = set() - self._skipped_tests = set() - self._test_timings = {} - self._failures = {} - - testcases = node.getElementsByTagName('testcase') - for testcase in testcases: - name = testcase.getAttribute('name') - classname = testcase.getAttribute('classname') - test_name = "%s.%s" % (classname, name) - - status = testcase.getAttribute('status') - if status == 'notrun': - if name.startswith('DISABLED_'): - self._skipped_tests.add(test_name) - continue - - failures = testcase.getElementsByTagName('failure') - if failures: - self._failures[test_name] = test_expectations.TEXT - else: - self._passed_tests.add(test_name) - - self._test_timings[test_name] = float(testcase.getAttribute('time')) - self._tests_list.add(test_name) + + def __init__(self, options): + self._options = options + + # Check the results directory + if not os.path.exists(self._options.results_directory): + os.mkdir(self._options.results_directory) + + results_xml_file = None + try: + results_xml_file = open(self._options.input_results_xml) + except IOError, e: + logging.fatal("Cannot open file %s", + self._options.input_results_xml) + sys.exit(1) + + summary = self._ParseTestResultsXML( + minidom.parse(results_xml_file).documentElement) + results_xml_file.close() + + json_results_generator.JSONResultsGenerator( + self._options.builder_name, self._options.build_name, + self._options.build_number, self._options.results_directory, + self._options.builder_base_url, + self._test_timings, + self._failures, self._passed_tests, self._skipped_tests, + self._tests_list) + + def _ParseTestResultsXML(self, node): + self._tests_list = set() + self._passed_tests = set() + self._skipped_tests = set() + self._test_timings = {} + self._failures = {} + + testcases = node.getElementsByTagName('testcase') + for testcase in testcases: + name = testcase.getAttribute('name') + classname = testcase.getAttribute('classname') + test_name = "%s.%s" % (classname, name) + + status = testcase.getAttribute('status') + if status == 'notrun': + if name.startswith('DISABLED_'): + self._skipped_tests.add(test_name) + continue + + failures = testcase.getElementsByTagName('failure') + if failures: + self._failures[test_name] = test_expectations.TEXT + else: + self._passed_tests.add(test_name) + + self._test_timings[test_name] = float( + testcase.getAttribute('time')) + self._tests_list.add(test_name) def main(options, args): - """Parse the tests results and generate JSON files. + """Parse the tests results and generate JSON files. - Args: - options: a dictionary of command line options - args: a list of sub directories or files to test - """ + Args: + options: a dictionary of command line options + args: a list of sub directories or files to test + """ - if not options.test_type: - logging.error("--test-type needs to be specified.") - sys.exit(1) + if not options.test_type: + logging.error("--test-type needs to be specified.") + sys.exit(1) - canon_test_type = options.test_type.replace("-", "_") - if not options.input_results_xml: - options.input_results_xml = "%s.xml" % (canon_test_type) - if not options.builder_base_url: - options.builder_base_url = "%s%s/" % (BUILDER_BASE_URL, options.test_type) + canon_test_type = options.test_type.replace("-", "_") + if not options.input_results_xml: + options.input_results_xml = "%s.xml" % (canon_test_type) + if not options.builder_base_url: + options.builder_base_url = "%s%s/" % (BUILDER_BASE_URL, + options.test_type) - JSONGeneratorFromXML(options) + JSONGeneratorFromXML(options) - return + return if '__main__' == __name__: - option_parser = optparse.OptionParser() - option_parser.add_option("", "--test-type", default="", - help="Test type that generated the results XML," - " e.g. unit-tests.") - option_parser.add_option("", "--results-directory", default="./", - help="Output results directory source dir.") - option_parser.add_option("", "--input-results-xml", default="", - help="Test results xml file (input for us)." - " default is TEST_TYPE.xml") - option_parser.add_option("", "--builder-base-url", default="", - help=("A URL where we have the archived test " - "results. (default=%sTEST_TYPE_results/)" - % BUILDER_BASE_URL)) - option_parser.add_option("", "--builder-name", - default="DUMMY_BUILDER_NAME", - help="The name of the builder shown on the " - "waterfall running this script e.g. WebKit.") - option_parser.add_option("", "--build-name", - default="DUMMY_BUILD_NAME", - help="The name of the builder used in its path, " - "e.g. webkit-rel.") - option_parser.add_option("", "--build-number", - default="DUMMY_BUILD_NUMBER", - help="The build number of the builder running" - "this script.") - options, args = option_parser.parse_args() - main(options, args) + option_parser = optparse.OptionParser() + option_parser.add_option("", "--test-type", default="", + help="Test type that generated the results XML," + " e.g. unit-tests.") + option_parser.add_option("", "--results-directory", default="./", + help="Output results directory source dir.") + option_parser.add_option("", "--input-results-xml", default="", + help="Test results xml file (input for us)." + " default is TEST_TYPE.xml") + option_parser.add_option("", "--builder-base-url", default="", + help=("A URL where we have the archived test " + "results. (default=%sTEST_TYPE_results/)" + % BUILDER_BASE_URL)) + option_parser.add_option("", "--builder-name", + default="DUMMY_BUILDER_NAME", + help="The name of the builder shown on the " + "waterfall running this script e.g. WebKit.") + option_parser.add_option("", "--build-name", + default="DUMMY_BUILD_NAME", + help="The name of the builder used in its path, " + "e.g. webkit-rel.") + option_parser.add_option("", "--build-number", + default="DUMMY_BUILD_NUMBER", + help="The build number of the builder running" + "this script.") + options, args = option_parser.parse_args() + main(options, args) diff --git a/webkit/tools/layout_tests/update_expectations_from_dashboard.py b/webkit/tools/layout_tests/update_expectations_from_dashboard.py index 40c5521..b5774b6 100644 --- a/webkit/tools/layout_tests/update_expectations_from_dashboard.py +++ b/webkit/tools/layout_tests/update_expectations_from_dashboard.py @@ -7,7 +7,8 @@ and apply them to test_expectations.txt. Usage: -1. Go to http://src.chromium.org/viewvc/chrome/trunk/src/webkit/tools/layout_tests/flakiness_dashboard.html#expectationsUpdate=true +1. Go to http://src.chromium.org/viewvc/chrome/trunk/src/webkit/tools/ + layout_tests/flakiness_dashboard.html#expectationsUpdate=true 2. Copy-paste that JSON into a local file. 3. python update_expectations_from_dashboard.py path/to/local/file """ @@ -22,433 +23,454 @@ from layout_package import test_expectations sys.path.append(path_utils.PathFromBase('third_party')) import simplejson + def UpdateExpectations(expectations, updates): - expectations = ExpectationsUpdater(None, None, - 'WIN', False, False, expectations, True) - return expectations.UpdateBasedOnJSON(updates) + expectations = ExpectationsUpdater(None, None, + 'WIN', False, False, expectations, True) + return expectations.UpdateBasedOnJSON(updates) + class OptionsAndExpectationsHolder(object): - """Container for a list of options and a list of expectations for a given - test.""" - def __init__(self, options, expectations): - self.options = options - self.expectations = expectations + """Container for a list of options and a list of expectations for a given + test.""" + + def __init__(self, options, expectations): + self.options = options + self.expectations = expectations + class BuildInfo(OptionsAndExpectationsHolder): - """Container for a list of options and expectations for a given test as well - as a map from build_type (e.g. debug/release) to a list of platforms (e.g. - ["win", "linux"]). - """ - def __init__(self, options, expectations, build_info): - OptionsAndExpectationsHolder.__init__(self, options, expectations) - self.build_info = build_info + """Container for a list of options and expectations for a given test as + well as a map from build_type (e.g. debug/release) to a list of platforms + (e.g. ["win", "linux"]). + """ + + def __init__(self, options, expectations, build_info): + OptionsAndExpectationsHolder.__init__(self, options, expectations) + self.build_info = build_info + class ExpectationsUpdater(test_expectations.TestExpectationsFile): - """Class to update test_expectations.txt based on updates in the following - form: - {"test1.html": { - "WIN RELEASE": {"missing": "FAIL TIMEOUT", "extra": "CRASH"}} - "WIN DEBUG": {"missing": "FAIL TIMEOUT"}} - "test2.html": ... - } - """ - - def _GetBuildTypesAndPlatforms(self, options): - """Splits up the options list into three lists: platforms, build_types and - other_options.""" - platforms = [] - build_types = [] - other_options = [] - for option in options: - if option in self.PLATFORMS: - platforms.append(option) - elif option in self.BUILD_TYPES: - build_types.append(option) - else: - other_options.append(option) - - if not len(build_types): - build_types = self.BUILD_TYPES - - if not len(platforms): - # If there are no platforms specified, use the most generic version - # of each platform name so we don't have to dedupe them later. - platforms = self.BASE_PLATFORMS - - return (platforms, build_types, other_options) - - def _ApplyUpdatesToResults(self, test, results, update_json, expectations, - other_options): - """Applies the updates from the JSON to the existing results in - test_expectations. - Args: - test: The test to update. - results: The results object to update. - update_json: The parsed JSON object with the updates. - expectations: The existing expectatons for this test. - other_options: The existing modifiers for this test excluding platforms - and build_types. + """Class to update test_expectations.txt based on updates in the following + form: + {"test1.html": { + "WIN RELEASE": {"missing": "FAIL TIMEOUT", "extra": "CRASH"}} + "WIN DEBUG": {"missing": "FAIL TIMEOUT"}} + "test2.html": ... + } """ - updates = update_json[test] - for build_info in updates: - platform, build_type = build_info.lower().split(' ') - - # If the platform/build_type is not currently listed for the test, - # skip it as this platform/build_type may be listed in another line. - if platform not in results or build_type not in results[platform]: - continue - - these_results = results[platform][build_type] - these_updates = updates[build_info] - these_expectations = these_results.expectations - these_options = these_results.options - - self._ApplyExtraUpdates(these_updates, these_options, these_expectations) - self._ApplyMissingUpdates(test, these_updates, these_options, - these_expectations) - - def _ApplyExtraUpdates(self, updates, options, expectations): - """Remove extraneous expectations/options in the updates object to - the given options/expectations lists. - """ - if "extra" not in updates: - return - - items = updates["extra"].lower().split(' ') - for item in items: - if item in self.EXPECTATIONS: - if item in expectations: - expectations.remove(item) - else: - if item in options: - options.remove(item) - - def _ApplyMissingUpdates(self, test, updates, options, expectations): - """Apply an addition expectations/options in the updates object to - the given options/expectations lists. - """ - if "missing" not in updates: - return - - items = updates["missing"].lower().split(' ') - for item in items: - if item == 'other': - continue - - # Don't add TIMEOUT to SLOW tests. Automating that is too - # complicated instead, print out tests that need manual attention. - if ((item == "timeout" and - ("slow" in options or "slow" in items)) or - (item == "slow" and - ("timeout" in expectations or "timeout" in items))): - logging.info("NEEDS MANUAL ATTENTION: %s may need to be marked " - "TIMEOUT or SLOW." % test) - elif item in self.EXPECTATIONS: - if item not in expectations: - expectations.append(item) - if ("fail" in expectations and - (item == "image+text" or item == "image" or item == "text")): - expectations.remove("fail") - else: - if item not in options: - options.append(item) - - def _AppendPlatform(self, item, build_type, platform): - """Appends the give build_type and platform to the BuildInfo item. - """ - build_info = item.build_info - if build_type not in build_info: - build_info[build_type] = [] - build_info[build_type].append(platform) - - def _GetUpdatesDedupedByMatchingOptionsAndExpectations(self, results): - """Converts the results, which is - results[platforms][build_type] = OptionsAndExpectationsHolder - to BuildInfo objects, which dedupes platform/build_types that have the same - expectations and options. - """ - updates = [] - for platform in results: - for build_type in results[platform]: - options = results[platform][build_type].options - expectations = results[platform][build_type].expectations - found_match = False + def _GetBuildTypesAndPlatforms(self, options): + """Splits up the options list into three lists: platforms, + build_types and other_options.""" + platforms = [] + build_types = [] + other_options = [] + for option in options: + if option in self.PLATFORMS: + platforms.append(option) + elif option in self.BUILD_TYPES: + build_types.append(option) + else: + other_options.append(option) + + if not len(build_types): + build_types = self.BUILD_TYPES + + if not len(platforms): + # If there are no platforms specified, use the most generic version + # of each platform name so we don't have to dedup them later. + platforms = self.BASE_PLATFORMS + + return (platforms, build_types, other_options) + + def _ApplyUpdatesToResults(self, test, results, update_json, expectations, + other_options): + """Applies the updates from the JSON to the existing results in + test_expectations. + Args: + test: The test to update. + results: The results object to update. + update_json: The parsed JSON object with the updates. + expectations: The existing expectatons for this test. + other_options: The existing modifiers for this test + excluding platforms and build_types. + """ + updates = update_json[test] + for build_info in updates: + platform, build_type = build_info.lower().split(' ') + + # If the platform/build_type is not currently listed for the test, + # skip it as this platform/build_type may be listed in another + # line. + if platform not in results or build_type not in results[platform]: + continue + + these_results = results[platform][build_type] + these_updates = updates[build_info] + these_expectations = these_results.expectations + these_options = these_results.options + + self._ApplyExtraUpdates(these_updates, these_options, + these_expectations) + self._ApplyMissingUpdates(test, these_updates, these_options, + these_expectations) + + def _ApplyExtraUpdates(self, updates, options, expectations): + """Remove extraneous expectations/options in the updates object to + the given options/expectations lists. + """ + if "extra" not in updates: + return + + items = updates["extra"].lower().split(' ') + for item in items: + if item in self.EXPECTATIONS: + if item in expectations: + expectations.remove(item) + else: + if item in options: + options.remove(item) + + def _ApplyMissingUpdates(self, test, updates, options, expectations): + """Apply an addition expectations/options in the updates object to + the given options/expectations lists. + """ + if "missing" not in updates: + return + + items = updates["missing"].lower().split(' ') + for item in items: + if item == 'other': + continue + + # Don't add TIMEOUT to SLOW tests. Automating that is too + # complicated instead, print out tests that need manual attention. + if ((item == "timeout" and + ("slow" in options or "slow" in items)) or + (item == "slow" and + ("timeout" in expectations or "timeout" in items))): + logging.info("NEEDS MANUAL ATTENTION: %s may need " + "to be marked TIMEOUT or SLOW." % test) + elif item in self.EXPECTATIONS: + if item not in expectations: + expectations.append(item) + if ("fail" in expectations and + (item == "image+text" or item == "image" or + item == "text")): + expectations.remove("fail") + else: + if item not in options: + options.append(item) + + def _AppendPlatform(self, item, build_type, platform): + """Appends the give build_type and platform to the BuildInfo item. + """ + build_info = item.build_info + if build_type not in build_info: + build_info[build_type] = [] + build_info[build_type].append(platform) + + def _GetUpdatesDedupedByMatchingOptionsAndExpectations(self, results): + """Converts the results, which is + results[platforms][build_type] = OptionsAndExpectationsHolder + to BuildInfo objects, which dedupes platform/build_types that + have the same expectations and options. + """ + updates = [] + for platform in results: + for build_type in results[platform]: + options = results[platform][build_type].options + expectations = results[platform][build_type].expectations + + found_match = False + for update in updates: + if (update.options == options and + update.expectations == expectations): + self._AppendPlatform(update, build_type, platform) + found_match = True + break + + if found_match: + continue + + update = BuildInfo(options, expectations, {}) + self._AppendPlatform(update, build_type, platform) + updates.append(update) + + return self._RoundUpFlakyUpdates(updates) + + def _HasMajorityBuildConfigurations(self, candidate, candidate2): + """Returns true if the candidate BuildInfo represents all build + configurations except the single one listed in candidate2. + For example, if a test is FAIL TIMEOUT on all bots except WIN-Release, + where it is just FAIL. Or if a test is FAIL TIMEOUT on MAC-Release, + Mac-Debug and Linux-Release, but only FAIL on Linux-Debug. + """ + build_types = self.BUILD_TYPES[:] + build_info = candidate.build_info + if "release" not in build_info or "debug" not in build_info: + return None + + release_set = set(build_info["release"]) + debug_set = set(build_info["debug"]) + if len(release_set - debug_set) is 1: + full_set = release_set + partial_set = debug_set + needed_build_type = "debug" + elif len(debug_set - release_set) is 1: + full_set = debug_set + partial_set = release_set + needed_build_type = "release" + else: + return None + + build_info2 = candidate2.build_info + if needed_build_type not in build_info2: + return None + + build_type = None + for this_build_type in build_info2: + # Can only work if this candidate has one build_type. + if build_type: + return None + build_type = this_build_type + + if set(build_info2[needed_build_type]) == full_set - partial_set: + return full_set + else: + return None + + def _RoundUpFlakyUpdates(self, updates): + """Consolidates the updates into one update if 5/6 results are + flaky and the is a subset of the flaky results 6th just not + happening to flake or 3/4 results are flaky and the 4th has a + subset of the flaky results. + """ + if len(updates) is not 2: + return updates + + item1, item2 = updates + candidate = None + candidate_platforms = self._HasMajorityBuildConfigurations(item1, + item2) + if candidate_platforms: + candidate = item1 + else: + candidate_platforms = self._HasMajorityBuildConfigurations(item1, + item2) + if candidate_platforms: + candidate = item2 + + if candidate: + options1 = set(item1.options) + options2 = set(item2.options) + expectations1 = set(item1.expectations) + if not len(expectations1): + expectations1.add("pass") + expectations2 = set(item2.expectations) + if not len(expectations2): + expectations2.add("pass") + + options_union = options1 | options2 + expectations_union = expectations1 | expectations2 + # If the options and expectations are equal to their respective + # unions then we can round up to include the 6th platform. + if (candidate == item1 and options1 == options_union and + expectations1 == expectations_union and len(expectations2) or + candidate == item2 and options2 == options_union and + expectations2 == expectations_union and len(expectations1)): + for build_type in self.BUILD_TYPES: + candidate.build_info[build_type] = list( + candidate_platforms) + updates = [candidate] + return updates + + def UpdateBasedOnJSON(self, update_json): + """Updates the expectations based on the update_json, which is of the + following form: + {"1.html": { + "WIN DEBUG": {"extra": "FAIL", "missing", "PASS"}, + "WIN RELEASE": {"extra": "FAIL"} + }} + """ + output = [] + + comment_lines = [] + removed_test_on_previous_line = False + lineno = 0 + for line in self._GetIterableExpectations(): + lineno += 1 + test, options, expectations = self.ParseExpectationsLine(line, + lineno) + + # If there are no updates for this test, then output the line + # unmodified. + if (test not in update_json): + if test: + self._WriteCompletedLines(output, comment_lines, line) + else: + if removed_test_on_previous_line: + removed_test_on_previous_line = False + comment_lines = [] + comment_lines.append(line) + continue + + platforms, build_types, other_options = \ + self._GetBuildTypesAndPlatforms(options) + + updates = update_json[test] + has_updates_for_this_line = False + for build_info in updates: + platform, build_type = build_info.lower().split(' ') + if platform in platforms and build_type in build_types: + has_updates_for_this_line = True + + # If the updates for this test don't apply for the platforms / + # build-types listed in this line, then output the line unmodified. + if not has_updates_for_this_line: + self._WriteCompletedLines(output, comment_lines, line) + continue + + results = {} + for platform in platforms: + results[platform] = {} + for build_type in build_types: + results[platform][build_type] = \ + OptionsAndExpectationsHolder(other_options[:], + expectations[:]) + + self._ApplyUpdatesToResults(test, results, update_json, + expectations, other_options) + + deduped_updates = \ + self._GetUpdatesDedupedByMatchingOptionsAndExpectations( + results) + removed_test_on_previous_line = not self._WriteUpdates(output, + comment_lines, test, deduped_updates) + # Append any comment/whitespace lines at the end of test_expectations. + output.extend(comment_lines) + return "".join(output) + + def _WriteUpdates(self, output, comment_lines, test, updates): + """Writes the updates to the output. + Args: + output: List to append updates to. + comment_lines: Comments that come before this test that should be + prepending iff any tests lines are written out. + test: The test being updating. + updates: List of BuildInfo instances that represent the final values + for this test line.. + """ + wrote_any_lines = False for update in updates: - if (update.options == options and - update.expectations == expectations): - self._AppendPlatform(update, build_type, platform) - found_match = True - break; - - if found_match: - continue - - update = BuildInfo(options, expectations, {}) - self._AppendPlatform(update, build_type, platform) - updates.append(update) - - return self._RoundUpFlakyUpdates(updates) - - def _HasMajorityBuildConfigurations(self, candidate, candidate2): - """Returns true if the candidate BuildInfo represents all build - configurations except the single one listed in candidate2. - For example, if a test is FAIL TIMEOUT on all bots except WIN-Release, - where it is just FAIL. Or if a test is FAIL TIMEOUT on MAC-Release, - Mac-Debug and Linux-Release, but only FAIL on Linux-Debug. - """ - build_types = self.BUILD_TYPES[:] - build_info = candidate.build_info - if "release" not in build_info or "debug" not in build_info: - return None - - release_set = set(build_info["release"]) - debug_set = set(build_info["debug"]) - if len(release_set - debug_set) is 1: - full_set = release_set - partial_set = debug_set - needed_build_type = "debug" - elif len(debug_set - release_set) is 1: - full_set = debug_set - partial_set = release_set - needed_build_type = "release" - else: - return None - - build_info2 = candidate2.build_info - if needed_build_type not in build_info2: - return None - - build_type = None - for this_build_type in build_info2: - # Can only work if this candidate has one build_type. - if build_type: - return None - build_type = this_build_type - - if set(build_info2[needed_build_type]) == full_set - partial_set: - return full_set - else: - return None - - def _RoundUpFlakyUpdates(self, updates): - """Consolidates the updates into one update if 5/6 results are flaky and the - is a subset of the flaky results 6th just not happening to flake or 3/4 - results are flaky and the 4th has a subset of the flaky results. - """ - if len(updates) is not 2: - return updates - - item1, item2 = updates - candidate = None - candidate_platforms = self._HasMajorityBuildConfigurations(item1, item2) - if candidate_platforms: - candidate = item1 - else: - candidate_platforms = self._HasMajorityBuildConfigurations(item1, item2) - if candidate_platforms: - candidate = item2 - - if candidate: - options1 = set(item1.options) - options2 = set(item2.options) - expectations1 = set(item1.expectations) - if not len(expectations1): - expectations1.add("pass") - expectations2 = set(item2.expectations) - if not len(expectations2): - expectations2.add("pass") - - options_union = options1 | options2 - expectations_union = expectations1 | expectations2 - # If the options and expectations are equal to their respective unions - # then we can round up to include the 6th platform. - if (candidate == item1 and options1 == options_union and - expectations1 == expectations_union and len(expectations2) or - candidate == item2 and options2 == options_union and - expectations2 == expectations_union and len(expectations1)): - for build_type in self.BUILD_TYPES: - candidate.build_info[build_type] = list(candidate_platforms) - updates = [candidate] - return updates - - def UpdateBasedOnJSON(self, update_json): - """Updates the expectations based on the update_json, which is of the - following form: - {"1.html": { - "WIN DEBUG": {"extra": "FAIL", "missing", "PASS"}, - "WIN RELEASE": {"extra": "FAIL"} - }} - """ - output = [] - - comment_lines = [] - removed_test_on_previous_line = False - lineno = 0 - for line in self._GetIterableExpectations(): - lineno += 1 - test, options, expectations = self.ParseExpectationsLine(line, lineno) - - # If there are no updates for this test, then output the line unmodified. - if (test not in update_json): - if test: - self._WriteCompletedLines(output, comment_lines, line) + options = update.options + expectations = update.expectations + + has_meaningful_modifier = False + for option in options: + if option in self.MODIFIERS: + has_meaningful_modifier = True + break + + has_non_pass_expectation = False + for expectation in expectations: + if expectation != "pass": + has_non_pass_expectation = True + break + + # If this test is only left with platform, build_type, bug number + # and a PASS or no expectation, then we can exclude it from + # test_expectations. + if not has_meaningful_modifier and not has_non_pass_expectation: + continue + + if not has_non_pass_expectation: + expectations = ["pass"] + + missing_build_types = list(self.BUILD_TYPES) + sentinal = None + for build_type in update.build_info: + if not sentinal: + sentinal = update.build_info[build_type] + # Remove build_types where the list of platforms is equal. + if sentinal == update.build_info[build_type]: + missing_build_types.remove(build_type) + + has_all_build_types = not len(missing_build_types) + if has_all_build_types: + self._WriteLine(output, comment_lines, update, options, + build_type, expectations, test, + has_all_build_types) + wrote_any_lines = True + else: + for build_type in update.build_info: + self._WriteLine(output, comment_lines, update, options, + build_type, expectations, test, + has_all_build_types) + wrote_any_lines = True + + return wrote_any_lines + + def _WriteCompletedLines(self, output, comment_lines, test_line=None): + """Writes the comment_lines and test_line to the output and empties + out the comment_lines.""" + output.extend(comment_lines) + del comment_lines[:] + if test_line: + output.append(test_line) + + def _GetPlatform(self, platforms): + """Returns the platform to use. If all platforms are listed, then + return the empty string as that's what we want to list in + test_expectations.txt. + + Args: + platforms: List of lower-case platform names. + """ + platforms.sort() + if platforms == list(self.BASE_PLATFORMS): + return "" else: - if removed_test_on_previous_line: - removed_test_on_previous_line = False - comment_lines = [] - comment_lines.append(line) - continue - - platforms, build_types, other_options = self._GetBuildTypesAndPlatforms( - options) - - updates = update_json[test] - has_updates_for_this_line = False - for build_info in updates: - platform, build_type = build_info.lower().split(' ') - if platform in platforms and build_type in build_types: - has_updates_for_this_line = True - - # If the updates for this test don't apply for the platforms/build-types - # listed in this line, then output the line unmodified. - if not has_updates_for_this_line: - self._WriteCompletedLines(output, comment_lines, line) - continue - - results = {} - for platform in platforms: - results[platform] = {} - for build_type in build_types: - results[platform][build_type] = OptionsAndExpectationsHolder( - other_options[:], expectations[:]) - - self._ApplyUpdatesToResults(test, results, update_json, expectations, - other_options) - - deduped_updates = self._GetUpdatesDedupedByMatchingOptionsAndExpectations( - results) - removed_test_on_previous_line = not self._WriteUpdates(output, - comment_lines, test, deduped_updates) - # Append any comment/whitespace lines at the end of test_expectations. - output.extend(comment_lines) - return "".join(output) - - def _WriteUpdates(self, output, comment_lines, test, updates): - """Writes the updates to the output. - Args: - output: List to append updates to. - comment_lines: Comments that come before this test that should be - prepending iff any tests lines are written out. - test: The test being updating. - updates: List of BuildInfo instances that represent the final values - for this test line.. - """ - wrote_any_lines = False - for update in updates: - options = update.options - expectations = update.expectations - - has_meaningful_modifier = False - for option in options: - if option in self.MODIFIERS: - has_meaningful_modifier = True - break - - has_non_pass_expectation = False - for expectation in expectations: - if expectation != "pass": - has_non_pass_expectation = True - break - - # If this test is only left with platform, build_type, bug number - # and a PASS or no expectation, then we can exclude it from - # test_expectations. - if not has_meaningful_modifier and not has_non_pass_expectation: - continue - - if not has_non_pass_expectation: - expectations = ["pass"] - - missing_build_types = list(self.BUILD_TYPES) - sentinal = None - for build_type in update.build_info: - if not sentinal: - sentinal = update.build_info[build_type] - # Remove build_types where the list of platforms is equal. - if sentinal == update.build_info[build_type]: - missing_build_types.remove(build_type) - - has_all_build_types = not len(missing_build_types) - if has_all_build_types: - self._WriteLine(output, comment_lines, update, options, build_type, - expectations, test, has_all_build_types) - wrote_any_lines = True - else: - for build_type in update.build_info: - self._WriteLine(output, comment_lines, update, options, build_type, - expectations, test, has_all_build_types) - wrote_any_lines = True - - return wrote_any_lines - - def _WriteCompletedLines(self, output, comment_lines, test_line=None): - """Writes the comment_lines and test_line to the output and empties out - the comment_lines. - """ - output.extend(comment_lines) - del comment_lines[:] - if test_line: - output.append(test_line) - - def _GetPlatform(self, platforms): - """Returns the platform to use. If all platforms are listed, then return - the empty string as that's what we want to list in test_expectations.txt. - Args: - platforms: List of lower-case platform names. - """ - platforms.sort() - if platforms == list(self.BASE_PLATFORMS): - return "" - else: - return " ".join(platforms) - - def _WriteLine(self, output, comment_lines, update, options, build_type, - expectations, test, exclude_build_type): - """Writes a test_expectations.txt line. - Args: - output: List to append new lines to. - comment_lines: List of lines to prepend before the new line. - update: The update object. - """ - line = options[:] + return " ".join(platforms) + + def _WriteLine(self, output, comment_lines, update, options, build_type, + expectations, test, exclude_build_type): + """Writes a test_expectations.txt line. + Args: + output: List to append new lines to. + comment_lines: List of lines to prepend before the new line. + update: The update object. + """ + line = options[:] + + platforms = self._GetPlatform(update.build_info[build_type]) + if platforms: + line.append(platforms) + if not exclude_build_type: + line.append(build_type) - platforms = self._GetPlatform(update.build_info[build_type]) - if platforms: - line.append(platforms) - if not exclude_build_type: - line.append(build_type) + line = [x.upper() for x in line] + expectations = [x.upper() for x in expectations] - line = [x.upper() for x in line] - expectations = [x.upper() for x in expectations] + line = line + [":", test, "="] + expectations + self._WriteCompletedLines(output, comment_lines, " ".join(line) + "\n") - line = line + [":", test, "="] + expectations - self._WriteCompletedLines(output, comment_lines, " ".join(line) + "\n") def main(): - logging.basicConfig(level=logging.INFO, - format='%(message)s') + logging.basicConfig(level=logging.INFO, + format='%(message)s') - updates = simplejson.load(open(sys.argv[1])) + updates = simplejson.load(open(sys.argv[1])) - path_to_expectations = path_utils.GetAbsolutePath( - os.path.dirname(sys.argv[0])) - path_to_expectations = os.path.join(path_to_expectations, - "test_expectations.txt") + path_to_expectations = path_utils.GetAbsolutePath( + os.path.dirname(sys.argv[0])) + path_to_expectations = os.path.join(path_to_expectations, + "test_expectations.txt") - old_expectations = open(path_to_expectations).read() - new_expectations = UpdateExpectations(old_expectations, updates) - open(path_to_expectations, 'w').write(new_expectations) + old_expectations = open(path_to_expectations).read() + new_expectations = UpdateExpectations(old_expectations, updates) + open(path_to_expectations, 'w').write(new_expectations) if '__main__' == __name__: - main() + main() diff --git a/webkit/tools/layout_tests/update_expectations_from_dashboard_unittest.py b/webkit/tools/layout_tests/update_expectations_from_dashboard_unittest.py index 5ac7786..102054d 100644 --- a/webkit/tools/layout_tests/update_expectations_from_dashboard_unittest.py +++ b/webkit/tools/layout_tests/update_expectations_from_dashboard_unittest.py @@ -12,375 +12,342 @@ import unittest import update_expectations_from_dashboard -class UpdateExpectationsUnittest(unittest.TestCase): - ########################################################################### - # Tests - def testKeepsUnmodifiedLines(self): - expectations = """// Ensure comments and newlines don't get stripped. -BUG1 SLOW : 1.html = PASS - -BUG2 : 2.html = FAIL TIMEOUT -""" - - expected_results = """// Ensure comments and newlines don't get stripped. -BUG1 SLOW : 1.html = PASS - -BUG2 : 2.html = FAIL TIMEOUT -""" - - updates = [] - self.updateExpectations(expectations, updates, expected_results) - - def testRemoveFlakyExpectation(self): - expectations = "BUG1 : 1.html = TIMEOUT FAIL\n" - expected_results = "BUG1 : 1.html = TIMEOUT\n" - updates = {"1.html": { - "WIN RELEASE": {"extra": "FAIL"}, - "WIN DEBUG": {"extra": "FAIL"}, - "LINUX RELEASE": {"extra": "FAIL"}, - "LINUX DEBUG": {"extra": "FAIL"}, - "MAC RELEASE": {"extra": "FAIL"}, - "MAC DEBUG": {"extra": "FAIL"} - }} - self.updateExpectations(expectations, updates, expected_results) - - def testRemoveExpectationSlowTest(self): - expectations = "BUG1 SLOW : 1.html = FAIL\n" - expected_results = "BUG1 SLOW : 1.html = PASS\n" - updates = {"1.html": { - "WIN RELEASE": {"extra": "FAIL"}, - "WIN DEBUG": {"extra": "FAIL"}, - "LINUX RELEASE": {"extra": "FAIL"}, - "LINUX DEBUG": {"extra": "FAIL"}, - "MAC RELEASE": {"extra": "FAIL"}, - "MAC DEBUG": {"extra": "FAIL"} - }} - self.updateExpectations(expectations, updates, expected_results) - - def testRemoveExpectation(self): - expectations = "BUG1 : 1.html = FAIL\n" - expected_results = "" - updates = {"1.html": { - "WIN RELEASE": {"extra": "FAIL"}, - "WIN DEBUG": {"extra": "FAIL"}, - "LINUX RELEASE": {"extra": "FAIL"}, - "LINUX DEBUG": {"extra": "FAIL"}, - "MAC RELEASE": {"extra": "FAIL"}, - "MAC DEBUG": {"extra": "FAIL"} - }} - self.updateExpectations(expectations, updates, expected_results) - - def testRemoveExpectationFromOnePlatform(self): - expectations = "BUG1 : 1.html = FAIL\n" - expected_results = """BUG1 MAC WIN DEBUG : 1.html = FAIL -BUG1 RELEASE : 1.html = FAIL -""" - updates = {"1.html": { - "LINUX DEBUG": {"extra": "FAIL"}, - }} - self.updateExpectations(expectations, updates, expected_results) - - def testRemoveSlow(self): - expectations = "BUG1 SLOW : 1.html = PASS\n" - expected_results = "" - updates = {"1.html": { - "WIN RELEASE": {"extra": "SLOW"}, - "WIN DEBUG": {"extra": "SLOW"}, - "LINUX RELEASE": {"extra": "SLOW"}, - "LINUX DEBUG": {"extra": "SLOW"}, - "MAC RELEASE": {"extra": "SLOW"}, - "MAC DEBUG": {"extra": "SLOW"} - }} - self.updateExpectations(expectations, updates, expected_results) - - def testAddFlakyExpectation(self): - expectations = "BUG1 : 1.html = TIMEOUT\n" - expected_results = "BUG1 : 1.html = TIMEOUT FAIL\n" - updates = {"1.html": { - "WIN RELEASE": {"missing": "FAIL"}, - "WIN DEBUG": {"missing": "FAIL"}, - "LINUX RELEASE": {"missing": "FAIL"}, - "LINUX DEBUG": {"missing": "FAIL"}, - "MAC RELEASE": {"missing": "FAIL"}, - "MAC DEBUG": {"missing": "FAIL"} - }} - self.updateExpectations(expectations, updates, expected_results) - - def testAddExpectationSlowTest(self): - expectations = "BUG1 SLOW : 1.html = PASS\n" - expected_results = "BUG1 SLOW : 1.html = PASS FAIL\n" - updates = {"1.html": { - "WIN RELEASE": {"missing": "FAIL"}, - "WIN DEBUG": {"missing": "FAIL"}, - "LINUX RELEASE": {"missing": "FAIL"}, - "LINUX DEBUG": {"missing": "FAIL"}, - "MAC RELEASE": {"missing": "FAIL"}, - "MAC DEBUG": {"missing": "FAIL"} - }} - self.updateExpectations(expectations, updates, expected_results) - - def testAddExpectation(self): - # not yet implemented - return - - expectations = "" - expected_results = "BUG1 : 1.html = FAIL\n" - updates = {"1.html": { - "WIN RELEASE": {"missing": "FAIL"}, - "WIN DEBUG": {"missing": "FAIL"}, - "LINUX RELEASE": {"missing": "FAIL"}, - "LINUX DEBUG": {"missing": "FAIL"}, - "MAC RELEASE": {"missing": "FAIL"}, - "MAC DEBUG": {"missing": "FAIL"} - }} - self.updateExpectations(expectations, updates, expected_results) - - def testAddExpectationForOnePlatform(self): - expectations = "BUG1 WIN : 1.html = TIMEOUT\n" - expected_results = "BUG1 WIN : 1.html = TIMEOUT\n" - # TODO(ojan): Once we add currently unlisted tests, this expect results - # for this test should be: - #expected_results = """BUG1 WIN : 1.html = TIMEOUT - #BUG_AUTO LINUX DEBUG : 1.html = TIMEOUT - #""" - updates = {"1.html": { - "LINUX DEBUG": {"missing": "TIMEOUT"}, - }} - self.updateExpectations(expectations, updates, expected_results) - - def testAddSlow(self): - expectations = "BUG1 : 1.html = FAIL\n" - expected_results = "BUG1 SLOW : 1.html = FAIL\n" - updates = {"1.html": { - "WIN RELEASE": {"missing": "SLOW"}, - "WIN DEBUG": {"missing": "SLOW"}, - "LINUX RELEASE": {"missing": "SLOW"}, - "LINUX DEBUG": {"missing": "SLOW"}, - "MAC RELEASE": {"missing": "SLOW"}, - "MAC DEBUG": {"missing": "SLOW"} - }} - self.updateExpectations(expectations, updates, expected_results) - - def testAddRemoveMultipleExpectations(self): - expectations = """BUG1 WIN : 1.html = FAIL -BUG2 MAC : 1.html = FAIL""" - expected_results = """BUG1 SLOW WIN : 1.html = FAIL -BUG2 MAC : 1.html = TIMEOUT\n""" - # TODO(ojan): Once we add currently unlisted tests, this expect results - # for this test should be: - #expected_results = """BUG1 SLOW WIN : 1.html = FAIL - #BUG_AUTO LINUX SLOW : 1.html = PASS - #BUG2 MAC : 1.html = TIMEOUT - #""" - - updates = {"1.html": { - "WIN RELEASE": {"missing": "SLOW"}, - "WIN DEBUG": {"missing": "SLOW"}, - "LINUX RELEASE": {"missing": "SLOW"}, - "LINUX DEBUG": {"missing": "SLOW"}, - "MAC RELEASE": {"missing": "TIMEOUT", "extra": "FAIL"}, - "MAC DEBUG": {"missing": "TIMEOUT", "extra": "FAIL"} - }} - self.updateExpectations(expectations, updates, expected_results) - - def testAddExistingExpectation(self): - expectations = "BUG1 : 1.html = FAIL\n" - expected_results = "BUG1 : 1.html = FAIL\n" - updates = {"1.html": { - "WIN RELEASE": {"missing": "FAIL"}, - }} - self.updateExpectations(expectations, updates, expected_results) - - def testAddImageOrTextToFailExpectation(self): - expectations = """BUG1 WIN RELEASE : 1.html = FAIL -BUG1 MAC RELEASE : 1.html = FAIL -BUG1 LINUX RELEASE : 1.html = FAIL -BUG1 LINUX DEBUG : 1.html = TIMEOUT -""" - expected_results = """BUG1 WIN RELEASE : 1.html = IMAGE+TEXT -BUG1 MAC RELEASE : 1.html = IMAGE -BUG1 LINUX RELEASE : 1.html = TEXT -BUG1 LINUX DEBUG : 1.html = TIMEOUT IMAGE+TEXT -""" - updates = {"1.html": { - "WIN RELEASE": {"missing": "IMAGE+TEXT"}, - "MAC RELEASE": {"missing": "IMAGE"}, - "LINUX RELEASE": {"missing": "TEXT"}, - "LINUX DEBUG": {"missing": "IMAGE+TEXT"}, - }} - self.updateExpectations(expectations, updates, expected_results) - - def testAddOther(self): - # Other is a catchall for more obscure expectations results. - # We should never add it to test_expectations. - expectations = "BUG1 WIN RELEASE : 1.html = FAIL\n" - expected_results = "BUG1 WIN RELEASE : 1.html = FAIL\n" - updates = {"1.html": { - "WIN RELEASE": {"missing": "OTHER"}, - }} - self.updateExpectations(expectations, updates, expected_results) - - def testRemoveNonExistantExpectation(self): - expectations = "BUG1 : 1.html = FAIL\n" - expected_results = "BUG1 : 1.html = FAIL\n" - updates = {"1.html": { - "WIN RELEASE": {"extra": "TIMEOUT"}, - }} - self.updateExpectations(expectations, updates, expected_results) - - def testUpdateSomePlatforms(self): - expectations = "BUG1 DEBUG : 1.html = TEXT PASS\n" - # TODO(ojan): Once we add currently unlisted tests, the expect results - # for this test should include the missing bits for RELEASE. - expected_results = "BUG1 LINUX DEBUG : 1.html = TEXT PASS\n" - updates = {"1.html": { - "WIN RELEASE": {"missing": "PASS TEXT"}, - "WIN DEBUG":{"extra": "MISSING TEXT"}, - "MAC RELEASE":{"missing": "PASS TEXT"}, - "MAC DEBUG":{"extra": "MISSING TEXT"} - }} - self.updateExpectations(expectations, updates, expected_results) - - def testAddTimeoutToSlowTest(self): - # SLOW tests needing TIMEOUT need manual updating. Should just print - # a log and not modify the test. - expectations = "BUG1 SLOW : 1.html = TEXT\n" - expected_results = "BUG1 SLOW : 1.html = TEXT\n" - updates = {"1.html": { - "WIN RELEASE": {"missing": "TIMEOUT"}, - }} - self.updateExpectations(expectations, updates, expected_results) - - def testAddSlowToTimeoutTest(self): - # SLOW tests needing TIMEOUT need manual updating. Should just print - # a log and not modify the test. - expectations = "BUG1 : 1.html = TIMEOUT\n" - expected_results = "BUG1 : 1.html = TIMEOUT\n" - updates = {"1.html": { - "WIN RELEASE": {"missing": "SLOW"}, - }} - self.updateExpectations(expectations, updates, expected_results) - - def testIncludeLastPlatformInFlakiness(self): - # If a test is flaky on 5/6 platforms and the 6th's expectations are a - # subset of the other 5/6, then give them all the same expectations. - expectations = "BUG2 : 1.html = FAIL\n" - expected_results = "BUG2 : 1.html = FAIL TIMEOUT\n" - updates = {"1.html": { - "WIN RELEASE": {"missing": "TIMEOUT", "extra": "FAIL"}, - "WIN DEBUG": {"missing": "TIMEOUT"}, - "LINUX RELEASE": {"missing": "TIMEOUT"}, - "LINUX DEBUG": {"missing": "TIMEOUT"}, - "MAC RELEASE": {"missing": "TIMEOUT"}, - "MAC DEBUG": {"missing": "TIMEOUT"} - }} - self.updateExpectations(expectations, updates, expected_results) - - def testIncludeLastPlatformInFlakinessThreeOutOfFour(self): - # If a test is flaky on 5/6 platforms and the 6th's expectations are a - # subset of the other 5/6, then give them all the same expectations. - expectations = "BUG2 MAC LINUX : 1.html = FAIL\n" - expected_results = "BUG2 LINUX MAC : 1.html = FAIL TIMEOUT\n" - updates = {"1.html": { - "LINUX RELEASE": {"missing": "TIMEOUT"}, - "MAC RELEASE": {"missing": "TIMEOUT"}, - "MAC DEBUG": {"missing": "TIMEOUT"} - }} - self.updateExpectations(expectations, updates, expected_results) - - def testExcludeLastPlatformFromFlakiness(self): - # If a test is flaky on 5/6 platforms and the 6th's expectations are not a - # subset of the other 5/6, then don't give them all the same expectations. - expectations = "BUG1 : 1.html = FAIL\n" - expected_results = """BUG1 DEBUG : 1.html = FAIL TIMEOUT -BUG1 LINUX MAC RELEASE : 1.html = FAIL TIMEOUT -BUG1 WIN RELEASE : 1.html = FAIL CRASH -""" - updates = {"1.html": { - "WIN RELEASE" : {"missing": "CRASH"}, - "WIN DEBUG": {"missing": "TIMEOUT"}, - "LINUX RELEASE": {"missing": "TIMEOUT"}, - "LINUX DEBUG": {"missing": "TIMEOUT"}, - "MAC RELEASE": {"missing": "TIMEOUT"}, - "MAC DEBUG": {"missing": "TIMEOUT"} - }} - self.updateExpectations(expectations, updates, expected_results) - - def testStripComments(self): - expectations = """BUG1 : 1.html = TIMEOUT - -// Comment/whitespace should be removed when the test is. -BUG2 WIN RELEASE : 2.html = TEXT - -// Comment/whitespace after test should remain. - -BUG2 MAC : 2.html = TEXT - -// Comment/whitespace at end of file should remain. -""" - expected_results = """BUG1 : 1.html = TIMEOUT - -// Comment/whitespace after test should remain. - -BUG2 MAC DEBUG : 2.html = TEXT - -// Comment/whitespace at end of file should remain. -""" - updates = {"2.html": { - "WIN RELEASE": {"extra": "TEXT"}, - "MAC RELEASE": {"extra": "TEXT"} - }} - self.updateExpectations(expectations, updates, expected_results) - - def testLeaveComments(self): - expectations = """BUG1 : 1.html = TIMEOUT - -// Comment/whitespace should remain. -BUG2 : 2.html = FAIL PASS -""" - expected_results = """BUG1 : 1.html = TIMEOUT - -// Comment/whitespace should remain. -BUG2 MAC DEBUG : 2.html = FAIL PASS -BUG2 LINUX MAC RELEASE : 2.html = FAIL PASS -""" - updates = {"2.html": { - "WIN RELEASE": {"extra": "FAIL"}, - "WIN DEBUG": {"extra": "FAIL"}, - "LINUX DEBUG": {"extra": "FAIL"}, - }} - self.updateExpectations(expectations, updates, expected_results) - - def testLeaveCommentsIfNoWhitespaceAfterTest(self): - expectations = """// Comment/whitespace should remain. -BUG2 WIN RELEASE : 2.html = TEXT -BUG2 : 1.html = IMAGE -""" - expected_results = """// Comment/whitespace should remain. -BUG2 : 1.html = IMAGE -""" - updates = {"2.html": { - "WIN RELEASE": {"extra": "TEXT"}, - }} - self.updateExpectations(expectations, updates, expected_results) - - - def testLeavesUnmodifiedExpectationsUntouched(self): - # Ensures tests that would just change sort order of a line are noops. - expectations = "BUG1 WIN LINUX : 1.html = TIMEOUT\n" - expected_results = "BUG1 WIN LINUX : 1.html = TIMEOUT\n" - updates = {"1.html": { - "MAC RELEASE": {"missing": "SLOW"}, - }} - self.updateExpectations(expectations, updates, expected_results) - - ########################################################################### - # Helper functions - def updateExpectations(self, expectations, updates, expected_results): - results = update_expectations_from_dashboard.UpdateExpectations( - expectations, updates) - self.assertEqual(expected_results, results) +class UpdateExpectationsUnittest(unittest.TestCase): + ########################################################################### + # Tests + + def testKeepsUnmodifiedLines(self): + expectations = """// Ensure comments and newlines don't get stripped. + BUG1 SLOW : 1.html = PASS + + BUG2 : 2.html = FAIL TIMEOUT + """ + exp_results = """// Ensure comments and newlines don't get stripped. + BUG1 SLOW : 1.html = PASS + + BUG2 : 2.html = FAIL TIMEOUT + """ + + updates = [] + self.updateExpectations(expectations, updates, exp_results) + + def testRemoveFlakyExpectation(self): + expectations = "BUG1 : 1.html = TIMEOUT FAIL\n" + expected_results = "BUG1 : 1.html = TIMEOUT\n" + updates = {"1.html": { + "WIN RELEASE": {"extra": "FAIL"}, + "WIN DEBUG": {"extra": "FAIL"}, + "LINUX RELEASE": {"extra": "FAIL"}, + "LINUX DEBUG": {"extra": "FAIL"}, + "MAC RELEASE": {"extra": "FAIL"}, + "MAC DEBUG": {"extra": "FAIL"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testRemoveExpectationSlowTest(self): + expectations = "BUG1 SLOW : 1.html = FAIL\n" + expected_results = "BUG1 SLOW : 1.html = PASS\n" + updates = {"1.html": { + "WIN RELEASE": {"extra": "FAIL"}, + "WIN DEBUG": {"extra": "FAIL"}, + "LINUX RELEASE": {"extra": "FAIL"}, + "LINUX DEBUG": {"extra": "FAIL"}, + "MAC RELEASE": {"extra": "FAIL"}, + "MAC DEBUG": {"extra": "FAIL"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testRemoveExpectation(self): + expectations = "BUG1 : 1.html = FAIL\n" + expected_results = "" + updates = {"1.html": { + "WIN RELEASE": {"extra": "FAIL"}, + "WIN DEBUG": {"extra": "FAIL"}, + "LINUX RELEASE": {"extra": "FAIL"}, + "LINUX DEBUG": {"extra": "FAIL"}, + "MAC RELEASE": {"extra": "FAIL"}, + "MAC DEBUG": {"extra": "FAIL"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testRemoveExpectationFromOnePlatform(self): + expectations = "BUG1 : 1.html = FAIL\n" + expected_results = """BUG1 MAC WIN DEBUG : 1.html = FAIL + BUG1 RELEASE : 1.html = FAIL + """ + updates = {"1.html": {"LINUX DEBUG": {"extra": "FAIL"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testRemoveSlow(self): + expectations = "BUG1 SLOW : 1.html = PASS\n" + expected_results = "" + updates = {"1.html": { + "WIN RELEASE": {"extra": "SLOW"}, + "WIN DEBUG": {"extra": "SLOW"}, + "LINUX RELEASE": {"extra": "SLOW"}, + "LINUX DEBUG": {"extra": "SLOW"}, + "MAC RELEASE": {"extra": "SLOW"}, + "MAC DEBUG": {"extra": "SLOW"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testAddFlakyExpectation(self): + expectations = "BUG1 : 1.html = TIMEOUT\n" + expected_results = "BUG1 : 1.html = TIMEOUT FAIL\n" + updates = {"1.html": { + "WIN RELEASE": {"missing": "FAIL"}, + "WIN DEBUG": {"missing": "FAIL"}, + "LINUX RELEASE": {"missing": "FAIL"}, + "LINUX DEBUG": {"missing": "FAIL"}, + "MAC RELEASE": {"missing": "FAIL"}, + "MAC DEBUG": {"missing": "FAIL"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testAddExpectationSlowTest(self): + expectations = "BUG1 SLOW : 1.html = PASS\n" + expected_results = "BUG1 SLOW : 1.html = PASS FAIL\n" + updates = {"1.html": { + "WIN RELEASE": {"missing": "FAIL"}, + "WIN DEBUG": {"missing": "FAIL"}, + "LINUX RELEASE": {"missing": "FAIL"}, + "LINUX DEBUG": {"missing": "FAIL"}, + "MAC RELEASE": {"missing": "FAIL"}, + "MAC DEBUG": {"missing": "FAIL"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testAddExpectation(self): + # not yet implemented + return + + expectations = "" + expected_results = "BUG1 : 1.html = FAIL\n" + updates = {"1.html": { + "WIN RELEASE": {"missing": "FAIL"}, + "WIN DEBUG": {"missing": "FAIL"}, + "LINUX RELEASE": {"missing": "FAIL"}, + "LINUX DEBUG": {"missing": "FAIL"}, + "MAC RELEASE": {"missing": "FAIL"}, + "MAC DEBUG": {"missing": "FAIL"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testAddExpectationForOnePlatform(self): + expectations = "BUG1 WIN : 1.html = TIMEOUT\n" + expected_results = "BUG1 WIN : 1.html = TIMEOUT\n" + # TODO(ojan): Once we add currently unlisted tests, this expect results + # for this test should be: + #expected_results = """BUG1 WIN : 1.html = TIMEOUT + #BUG_AUTO LINUX DEBUG : 1.html = TIMEOUT + #""" + updates = {"1.html": {"LINUX DEBUG": {"missing": "TIMEOUT"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testAddSlow(self): + expectations = "BUG1 : 1.html = FAIL\n" + expected_results = "BUG1 SLOW : 1.html = FAIL\n" + updates = {"1.html": { + "WIN RELEASE": {"missing": "SLOW"}, + "WIN DEBUG": {"missing": "SLOW"}, + "LINUX RELEASE": {"missing": "SLOW"}, + "LINUX DEBUG": {"missing": "SLOW"}, + "MAC RELEASE": {"missing": "SLOW"}, + "MAC DEBUG": {"missing": "SLOW"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testAddRemoveMultipleExpectations(self): + expectations = """BUG1 WIN : 1.html = FAIL + BUG2 MAC : 1.html = FAIL""" + expected_results = """BUG1 SLOW WIN : 1.html = FAIL + BUG2 MAC : 1.html = TIMEOUT\n""" + # TODO(ojan): Once we add currently unlisted tests, this expect results + # for this test should be: + #expected_results = """BUG1 SLOW WIN : 1.html = FAIL + #BUG_AUTO LINUX SLOW : 1.html = PASS + #BUG2 MAC : 1.html = TIMEOUT + #""" + + updates = {"1.html": { + "WIN RELEASE": {"missing": "SLOW"}, + "WIN DEBUG": {"missing": "SLOW"}, + "LINUX RELEASE": {"missing": "SLOW"}, + "LINUX DEBUG": {"missing": "SLOW"}, + "MAC RELEASE": {"missing": "TIMEOUT", "extra": "FAIL"}, + "MAC DEBUG": {"missing": "TIMEOUT", "extra": "FAIL"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testAddExistingExpectation(self): + expectations = "BUG1 : 1.html = FAIL\n" + expected_results = "BUG1 : 1.html = FAIL\n" + updates = {"1.html": {"WIN RELEASE": {"missing": "FAIL"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testAddImageOrTextToFailExpectation(self): + expectations = """BUG1 WIN RELEASE : 1.html = FAIL + BUG1 MAC RELEASE : 1.html = FAIL + BUG1 LINUX RELEASE : 1.html = FAIL + BUG1 LINUX DEBUG : 1.html = TIMEOUT + """ + expected_results = """BUG1 WIN RELEASE : 1.html = IMAGE+TEXT + BUG1 MAC RELEASE : 1.html = IMAGE + BUG1 LINUX RELEASE : 1.html = TEXT + BUG1 LINUX DEBUG : 1.html = TIMEOUT IMAGE+TEXT + """ + updates = {"1.html": { + "WIN RELEASE": {"missing": "IMAGE+TEXT"}, + "MAC RELEASE": {"missing": "IMAGE"}, + "LINUX RELEASE": {"missing": "TEXT"}, + "LINUX DEBUG": {"missing": "IMAGE+TEXT"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testAddOther(self): + # Other is a catchall for more obscure expectations results. + # We should never add it to test_expectations. + expectations = "BUG1 WIN RELEASE : 1.html = FAIL\n" + expected_results = "BUG1 WIN RELEASE : 1.html = FAIL\n" + updates = {"1.html": {"WIN RELEASE": {"missing": "OTHER"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testRemoveNonExistantExpectation(self): + expectations = "BUG1 : 1.html = FAIL\n" + expected_results = "BUG1 : 1.html = FAIL\n" + updates = {"1.html": {"WIN RELEASE": {"extra": "TIMEOUT"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testUpdateSomePlatforms(self): + expectations = "BUG1 DEBUG : 1.html = TEXT PASS\n" + # TODO(ojan): Once we add currently unlisted tests, the expect results + # for this test should include the missing bits for RELEASE. + expected_results = "BUG1 LINUX DEBUG : 1.html = TEXT PASS\n" + updates = {"1.html": { + "WIN RELEASE": {"missing": "PASS TEXT"}, + "WIN DEBUG": {"extra": "MISSING TEXT"}, + "MAC RELEASE": {"missing": "PASS TEXT"}, + "MAC DEBUG": {"extra": "MISSING TEXT"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testAddTimeoutToSlowTest(self): + # SLOW tests needing TIMEOUT need manual updating. Should just print + # a log and not modify the test. + expectations = "BUG1 SLOW : 1.html = TEXT\n" + expected_results = "BUG1 SLOW : 1.html = TEXT\n" + updates = {"1.html": {"WIN RELEASE": {"missing": "TIMEOUT"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testAddSlowToTimeoutTest(self): + # SLOW tests needing TIMEOUT need manual updating. Should just print + # a log and not modify the test. + expectations = "BUG1 : 1.html = TIMEOUT\n" + expected_results = "BUG1 : 1.html = TIMEOUT\n" + updates = {"1.html": {"WIN RELEASE": {"missing": "SLOW"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testIncludeLastPlatformInFlakiness(self): + # If a test is flaky on 5/6 platforms and the 6th's expectations are a + # subset of the other 5/6, then give them all the same expectations. + expectations = "BUG2 : 1.html = FAIL\n" + expected_results = "BUG2 : 1.html = FAIL TIMEOUT\n" + updates = {"1.html": { + "WIN RELEASE": {"missing": "TIMEOUT", "extra": "FAIL"}, + "WIN DEBUG": {"missing": "TIMEOUT"}, + "LINUX RELEASE": {"missing": "TIMEOUT"}, + "LINUX DEBUG": {"missing": "TIMEOUT"}, + "MAC RELEASE": {"missing": "TIMEOUT"}, + "MAC DEBUG": {"missing": "TIMEOUT"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testIncludeLastPlatformInFlakinessThreeOutOfFour(self): + # If a test is flaky on 5/6 platforms and the 6th's expectations are a + # subset of the other 5/6, then give them all the same expectations. + expectations = "BUG2 MAC LINUX : 1.html = FAIL\n" + expected_results = "BUG2 LINUX MAC : 1.html = FAIL TIMEOUT\n" + updates = {"1.html": { + "LINUX RELEASE": {"missing": "TIMEOUT"}, + "MAC RELEASE": {"missing": "TIMEOUT"}, + "MAC DEBUG": {"missing": "TIMEOUT"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testExcludeLastPlatformFromFlakiness(self): + # If a test is flaky on 5/6 platforms and the 6th's expectations + # are not a subset of the other 5/6, then don't give them + # all the same expectations. + expectations = "BUG1 : 1.html = FAIL\n" + expected_results = """BUG1 DEBUG : 1.html = FAIL TIMEOUT + BUG1 LINUX MAC RELEASE : 1.html = FAIL TIMEOUT + BUG1 WIN RELEASE : 1.html = FAIL CRASH + """ + updates = {"1.html": { + "WIN RELEASE": {"missing": "CRASH"}, + "WIN DEBUG": {"missing": "TIMEOUT"}, + "LINUX RELEASE": {"missing": "TIMEOUT"}, + "LINUX DEBUG": {"missing": "TIMEOUT"}, + "MAC RELEASE": {"missing": "TIMEOUT"}, + "MAC DEBUG": {"missing": "TIMEOUT"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testStripComments(self): + expectations = """BUG1 : 1.html = TIMEOUT + + // Comment/whitespace should be removed when the test is. + BUG2 WIN RELEASE : 2.html = TEXT + + // Comment/whitespace after test should remain. + + BUG2 MAC : 2.html = TEXT + + // Comment/whitespace at end of file should remain. + """ + expected_results = """BUG1 : 1.html = TIMEOUT + + // Comment/whitespace after test should remain. + + BUG2 MAC DEBUG : 2.html = TEXT + + // Comment/whitespace at end of file should remain. + """ + updates = {"2.html": { + "WIN RELEASE": {"extra": "TEXT"}, + "MAC RELEASE": {"extra": "TEXT"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testLeaveComments(self): + expectations = """BUG1 : 1.html = TIMEOUT + + // Comment/whitespace should remain. + BUG2 : 2.html = FAIL PASS + """ + expected_results = """BUG1 : 1.html = TIMEOUT + + // Comment/whitespace should remain. + BUG2 MAC DEBUG : 2.html = FAIL PASS + BUG2 LINUX MAC RELEASE : 2.html = FAIL PASS + """ + updates = {"2.html": { + "WIN RELEASE": {"extra": "FAIL"}, + "WIN DEBUG": {"extra": "FAIL"}, + "LINUX DEBUG": {"extra": "FAIL"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testLeaveCommentsIfNoWhitespaceAfterTest(self): + expectations = """// Comment/whitespace should remain. + BUG2 WIN RELEASE : 2.html = TEXT + BUG2 : 1.html = IMAGE + """ + expected_results = """// Comment/whitespace should remain. + BUG2 : 1.html = IMAGE + """ + updates = {"2.html": {"WIN RELEASE": {"extra": "TEXT"}}} + self.updateExpectations(expectations, updates, expected_results) + + def testLeavesUnmodifiedExpectationsUntouched(self): + # Ensures tests that would just change sort order of a line are noops. + expectations = "BUG1 WIN LINUX : 1.html = TIMEOUT\n" + expected_results = "BUG1 WIN LINUX : 1.html = TIMEOUT\n" + updates = {"1.html": {"MAC RELEASE": {"missing": "SLOW"}}} + self.updateExpectations(expectations, updates, expected_results) + + ########################################################################### + # Helper functions + + def updateExpectations(self, expectations, updates, expected_results): + results = update_expectations_from_dashboard.UpdateExpectations( + expectations, updates) + self.assertEqual(expected_results, results) if '__main__' == __name__: - unittest.main() + unittest.main() |