summaryrefslogtreecommitdiffstats
path: root/tools/perf_expectations
diff options
context:
space:
mode:
authoranandc@chromium.org <anandc@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-03-20 21:00:46 +0000
committeranandc@chromium.org <anandc@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-03-20 21:00:46 +0000
commit5bb2512e215d0d7c3419ccb40b59a66b7754a20d (patch)
tree51b2bea19cb7e9fb90a8e6098db36288178be08b /tools/perf_expectations
parenta7f1b4db6463f347844f9395c21dd7cf3fc837f0 (diff)
downloadchromium_src-5bb2512e215d0d7c3419ccb40b59a66b7754a20d.zip
chromium_src-5bb2512e215d0d7c3419ccb40b59a66b7754a20d.tar.gz
chromium_src-5bb2512e215d0d7c3419ccb40b59a66b7754a20d.tar.bz2
The regularly running perf-AV tests require re-baseline-ing of expectations about once a week. The steps involved in re-baseline-ing are:
Identify the tests to update, based off reported results. Figure out reva and revb values, which is the start and end revision numbers for the range that we should use to obtain new thresholds. Modify lines in perf_expectations.json that relate to the tests to be updated, so that they may be used as input to make_expectations.py. This CL tracks creating a python script to do the last step listed above. BUG=172930 Review URL: https://chromiumcodereview.appspot.com/12101002 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@189395 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'tools/perf_expectations')
-rwxr-xr-xtools/perf_expectations/make_expectations.py10
-rw-r--r--tools/perf_expectations/sample_test_cases.json28
-rw-r--r--tools/perf_expectations/update_perf_expectations.py263
-rw-r--r--tools/perf_expectations/update_perf_expectations_unitttest.py204
4 files changed, 502 insertions, 3 deletions
diff --git a/tools/perf_expectations/make_expectations.py b/tools/perf_expectations/make_expectations.py
index 997be97..6b9efff 100755
--- a/tools/perf_expectations/make_expectations.py
+++ b/tools/perf_expectations/make_expectations.py
@@ -101,7 +101,7 @@ def GetRowDigest(rowdata, key):
return sha1.hexdigest()[0:8]
-def WriteJson(filename, data, keys):
+def WriteJson(filename, data, keys, calculate_sha1=True):
"""Write a list of |keys| in |data| to the file specified in |filename|."""
try:
file = open(filename, 'w')
@@ -112,8 +112,12 @@ def WriteJson(filename, data, keys):
jsondata = []
for key in keys:
rowdata = GetRowData(data, key)
- # Include an updated checksum.
- rowdata.append('"sha1": "%s"' % GetRowDigest(rowdata, key))
+ if calculate_sha1:
+ # Include an updated checksum.
+ rowdata.append('"sha1": "%s"' % GetRowDigest(rowdata, key))
+ else:
+ if 'sha1' in data[key]:
+ rowdata.append('"sha1": "%s"' % (data[key]['sha1']))
jsondata.append('"%s": {%s}' % (key, ', '.join(rowdata)))
jsondata.append('"load": true')
jsontext = '{%s\n}' % ',\n '.join(jsondata)
diff --git a/tools/perf_expectations/sample_test_cases.json b/tools/perf_expectations/sample_test_cases.json
new file mode 100644
index 0000000..0fb7462
--- /dev/null
+++ b/tools/perf_expectations/sample_test_cases.json
@@ -0,0 +1,28 @@
+{"linux-release/media_tests_av_perf/audio_latency/latency": {"reva": 180005, "revb": 180520, "type": "absolute", "better": "lower", "improve": 190, "regress": 222, "sha1": "fc9815d5"},
+"linux-release/media_tests_av_perf/dropped_fps/tulip2.wav": {"reva": 181131, "revb": 181572, "type": "absolute", "better": "lower", "improve": 0, "regress": 0, "sha1": "fb8157f9"},
+"linux-release/media_tests_av_perf/dropped_fps/tulip2.webm": {"reva": 181131, "revb": 181572, "type": "absolute", "better": "lower", "improve": 0, "regress": 0, "sha1": "c0fb3421"},
+"linux-release/media_tests_av_perf/dropped_frames/crowd1080.webm": {"reva": 181131, "revb": 181572, "type": "absolute", "better": "lower", "improve": 0, "regress": 0, "sha1": "fa9582d3"},
+"linux-release/media_tests_av_perf/dropped_frames/crowd2160.webm": {"reva": 181131, "revb": 181572, "type": "absolute", "better": "lower", "improve": 166, "regress": 231, "sha1": "ca3a7a47"},
+"linux-release/media_tests_av_perf/fps/tulip2.m4a": {"reva": 34239, "revb": 1298213, "type": "absolute", "better": "higher", "improve": 0, "regress": 0},
+"linux-release/media_tests_av_perf/fps/tulip2.mp3": {"reva": 34239, "revb": 1298213, "type": "absolute", "better": "higher", "improve": 0, "regress": 0},
+"linux-release/media_tests_av_perf/fps/tulip2.mp4": {"reva": 34239, "revb": 1298213, "type": "absolute", "better": "higher", "improve": 32, "regress": 28},
+"linux-release/media_tests_av_perf/fps/tulip2.ogg": {"reva": 34239, "revb": 1298213, "type": "absolute", "better": "higher", "improve": 0, "regress": 0},
+"linux-release/media_tests_av_perf/fps/tulip2.ogv": {"reva": 34239, "revb": 1298213, "type": "absolute", "better": "higher", "improve": 32, "regress": 28},
+"linux-release/media_tests_av_perf/fps/tulip2.wav": {"reva": 34239, "revb": 1298213, "type": "absolute", "better": "higher", "improve": 0, "regress": 0},
+"win-release/media_tests_av_perf/dropped_fps/tulip2.wav": {"reva": 181131, "revb": 181572, "type": "absolute", "better": "lower", "improve": 0, "regress": 0, "sha1": "646c02f2"},
+"win-release/media_tests_av_perf/dropped_fps/tulip2.webm": {"reva": 181131, "revb": 181572, "type": "absolute", "better": "lower", "improve": 0, "regress": 0, "sha1": "46c97b57"},
+"win-release/media_tests_av_perf/dropped_frames/crowd1080.webm": {"reva": 181131, "revb": 181572, "type": "absolute", "better": "lower", "improve": 0, "regress": 0, "sha1": "9b709aab"},
+"win-release/media_tests_av_perf/dropped_frames/crowd2160.webm": {"reva": 181131, "revb": 181572, "type": "absolute", "better": "lower", "improve": 174, "regress": 204, "sha1": "4c0270a6"},
+"win-release/media_tests_av_perf/fps/crowd1080.webm": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 53, "regress": 43, "sha1": "7ad49461"},
+"win-release/media_tests_av_perf/fps/crowd2160.webm": {"reva": 176330, "revb": 176978, "type": "absolute", "better": "higher", "improve": 26.0399945997, "regress": 25.9062437562, "sha1": "700526a9"},
+"win-release/media_tests_av_perf/fps/crowd360.webm": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 51, "regress": 47, "sha1": "7f8ef21c"},
+"win-release/media_tests_av_perf/fps/crowd480.webm": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 50, "regress": 47, "sha1": "5dc96881"},
+"win-release/media_tests_av_perf/fps/crowd720.webm": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 52, "regress": 47, "sha1": "4fcfb653"},
+"win-release/media_tests_av_perf/fps/tulip2.m4a": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 0, "regress": 0, "sha1": "54d94538"},
+"win-release/media_tests_av_perf/fps/tulip2.mp3": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 0, "regress": 0, "sha1": "113aef17"},
+"win-release/media_tests_av_perf/fps/tulip2.mp4": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 30, "regress": 28, "sha1": "a22847d0"},
+"win-release/media_tests_av_perf/fps/tulip2.ogg": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 0, "regress": 0, "sha1": "6ee2e716"},
+"win-release/media_tests_av_perf/fps/tulip2.ogv": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 32, "regress": 26, "sha1": "dfadb872"},
+"win-release/media_tests_av_perf/fps/tulip2.wav": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 0, "regress": 0, "sha1": "530c5bf5"},
+"win-release/media_tests_av_perf/fps/tulip2.webm": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 30, "regress": 28, "sha1": "35b91c8e"}
+} \ No newline at end of file
diff --git a/tools/perf_expectations/update_perf_expectations.py b/tools/perf_expectations/update_perf_expectations.py
new file mode 100644
index 0000000..d1ce983
--- /dev/null
+++ b/tools/perf_expectations/update_perf_expectations.py
@@ -0,0 +1,263 @@
+#!/usr/bin/env python
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Prepare tests that require re-baselining for input to make_expectations.py.
+
+The regularly running perf-AV tests require re-baselineing of expectations
+about once a week. The steps involved in rebaselining are:
+
+1.) Identify the tests to update, based off reported e-mail results.
+2.) Figure out reva and revb values, which is the starting and ending revision
+ numbers for the range that we should use to obtain new thresholds.
+3.) Modify lines in perf_expectations.json referring to the tests to be updated,
+ so that they may be used as input to make_expectations.py.
+
+This script automates the last step above.
+
+Here's a sample line from perf_expectations.json:
+
+"win-release/media_tests_av_perf/fps/tulip2.m4a": {"reva": 163299, \
+"revb": 164141, "type": "absolute", "better": "higher", "improve": 0, \
+"regress": 0, "sha1": "54d94538"},
+
+To get the above test ready for input to make_expectations.py, it should become:
+
+"win-release/media_tests_av_perf/fps/tulip2.m4a": {"reva": <new reva>, \
+"revb": <new revb>, "type": "absolute", "better": "higher", "improve": 0, \
+"regress": 0},
+
+Examples:
+
+1.) To update the test specified above and get baseline
+values using the revision range 12345 and 23456, run this script with a command
+line like this:
+ python update_perf_expectations.py -f \
+ win-release/media_tests_av_perf/fps/tulip2.m4a --reva 12345 --revb 23456
+Or, using an input file,
+where the input file contains a single line with text
+ win-release/media_tests_av_perf/fps/tulip2.m4a
+run with this command line:
+ python update_perf_expectations.py -i input.txt --reva 12345 --revb 23456
+
+2.) Let's say you want to update all seek tests on windows, and get baseline
+values using the revision range 12345 and 23456.
+Run this script with this command line:
+ python update_perf_expectations.py -f win-release/media_tests_av_perf/seek/ \
+ --reva 12345 --revb 23456
+Or:
+ python update_perf_expectations.py -f win-release/.*/seek/ --reva 12345 \
+ --revb 23456
+
+Or, using an input file,
+where the input file contains a single line with text win-release/.*/seek/:
+ python update_perf_expectations.py -i input.txt --reva 12345 --revb 23456
+
+3.) Similarly, if you want to update seek tests on all platforms
+ python update_perf_expectations.py -f .*-release/.*/seek/ --reva 12345 \
+ --revb 23456
+
+"""
+
+import logging
+from optparse import OptionParser
+import os
+import re
+
+import make_expectations as perf_ex_lib
+
+# Default logging is INFO. Use --verbose to enable DEBUG logging.
+_DEFAULT_LOG_LEVEL = logging.INFO
+
+
+def GetTestsToUpdate(contents, all_test_keys):
+ """Parses input contents and obtains tests to be re-baselined.
+
+ Args:
+ contents: string containing contents of input file.
+ all_test_keys: list of keys of test dictionary.
+ Returns:
+ A list of keys for tests that should be updated.
+ """
+ # Each line of the input file specifies a test case to update.
+ tests_list = []
+ for test_case_filter in contents.splitlines():
+ # Skip any empty lines.
+ if test_case_filter:
+ # Sample expected line:
+ # win-release/media_tests_av_perf/seek/\
+ # CACHED_BUFFERED_SEEK_NoConstraints_crowd1080.ogv
+ # Or, if reg-ex, then sample line:
+ # win-release/media-tests_av_perf/seek*
+ # Skip any leading spaces if they exist in the input file.
+ logging.debug('Trying to match %s', test_case_filter)
+ tests_list.extend(GetMatchingTests(test_case_filter.strip(),
+ all_test_keys))
+ return tests_list
+
+
+def GetMatchingTests(tests_to_update, all_test_keys):
+ """Parses input reg-ex filter and obtains tests to be re-baselined.
+
+ Args:
+ tests_to_update: reg-ex string specifying tests to be updated.
+ all_test_keys: list of keys of tests dictionary.
+ Returns:
+ A list of keys for tests that should be updated.
+ """
+ tests_list = []
+ search_string = re.compile(tests_to_update)
+ # Get matching tests from the dictionary of tests
+ for test_key in all_test_keys:
+ if search_string.match(test_key):
+ tests_list.append(test_key)
+ logging.debug('%s will be updated', test_key)
+ logging.info('%s tests found matching reg-ex: %s', len(tests_list),
+ tests_to_update)
+ return tests_list
+
+
+def PrepareTestsForUpdate(tests_to_update, all_tests, reva, revb):
+ """Modifies value of tests that are to re-baselined:
+ Set reva and revb values to specified new values. Remove sha1.
+
+ Args:
+ tests_to_update: list of tests to be updated.
+ all_tests: dictionary of all tests.
+ reva: oldest revision in range to use for new values.
+ revb: newest revision in range to use for new values.
+ Raises:
+ ValueError: If reva or revb are not valid ints, or if either
+ of them are negative.
+ """
+ reva = int(reva)
+ revb = int(revb)
+
+ if reva < 0 or revb < 0:
+ raise ValueError('Revision values should be positive.')
+ # Ensure reva is less than revb.
+ # (this is similar to the check done in make_expectations.py)
+ if revb < reva:
+ temp = revb
+ revb = reva
+ reva = temp
+ for test_key in tests_to_update:
+ # Get original test from the dictionary of tests
+ test_value = all_tests[test_key]
+ if test_value:
+ # Sample line in perf_expectations.json:
+ # "linux-release/media_tests _av_perf/dropped_frames/crowd360.webm":\
+ # {"reva": 155180, "revb": 155280, "type": "absolute", \
+ # "better": "lower", "improve": 0, "regress": 3, "sha1": "276ba29c"},
+ # Set new revision range
+ test_value['reva'] = reva
+ test_value['revb'] = revb
+ # Remove sha1 to indicate this test requires an update
+ # Check first to make sure it exist.
+ if 'sha1' in test_value:
+ del test_value['sha1']
+ else:
+ logging.warning('%s does not exist.', test_key)
+ logging.info('Done preparing tests for update.')
+
+
+def GetCommandLineOptions():
+ """Parse command line arguments.
+
+ Returns:
+ An options object containing command line arguments and their values.
+ """
+ parser = OptionParser()
+
+ parser.add_option('--reva', dest='reva', type='int',
+ help='Starting revision of new range.',
+ metavar='START_REVISION')
+ parser.add_option('--revb', dest='revb', type='int',
+ help='Ending revision of new range.',
+ metavar='END_REVISION')
+ parser.add_option('-f', dest='tests_filter',
+ help='Regex to use for filtering tests to be updated. '
+ 'At least one of -filter or -input_file must be provided. '
+ 'If both are provided, then input-file is used.',
+ metavar='FILTER', default='')
+ parser.add_option('-i', dest='input_file',
+ help='Optional path to file with reg-exes for tests to'
+ ' update. If provided, it overrides the filter argument.',
+ metavar='INPUT_FILE', default='')
+ parser.add_option('--config', dest='config_file',
+ default=perf_ex_lib.DEFAULT_CONFIG_FILE,
+ help='Set the config file to FILE.', metavar='FILE')
+ parser.add_option('-v', dest='verbose', action='store_true', default=False,
+ help='Enable verbose output.')
+ options = parser.parse_args()[0]
+ return options
+
+
+def Main():
+ """Main driver function."""
+ options = GetCommandLineOptions()
+
+ _SetLogger(options.verbose)
+ # Do some command-line validation
+ if not options.input_file and not options.tests_filter:
+ logging.error('At least one of input-file or test-filter must be provided.')
+ exit(1)
+ if options.input_file and options.tests_filter:
+ logging.error('Specify only one of input file or test-filter.')
+ exit(1)
+ if not options.reva or not options.revb:
+ logging.error('Start and end revision of range must be specified.')
+ exit(1)
+
+ # Load config.
+ config = perf_ex_lib.ConvertJsonIntoDict(
+ perf_ex_lib.ReadFile(options.config_file))
+
+ # Obtain the perf expectations file from the config file.
+ perf_file = os.path.join(
+ os.path.dirname(options.config_file), config['perf_file'])
+
+ # We should have all the information we require now.
+ # On to the real thang.
+ # First, get all the existing tests from the original perf_expectations file.
+ all_tests = perf_ex_lib.ConvertJsonIntoDict(
+ perf_ex_lib.ReadFile(perf_file))
+ all_test_keys = all_tests.keys()
+ # Remove the load key, because we don't want to modify it.
+ all_test_keys.remove('load')
+ # Keep tests sorted, like in the original file.
+ all_test_keys.sort()
+
+ # Next, get all tests that have been identified for an update.
+ tests_to_update = []
+ if options.input_file:
+ # Tests to update have been specified in an input_file.
+ # Get contents of file.
+ tests_filter = perf_ex_lib.ReadFile(options.input_file)
+ elif options.tests_filter:
+ # Tests to update have been specified as a reg-ex filter.
+ tests_filter = options.tests_filter
+
+ # Get tests to update based on filter specified.
+ tests_to_update = GetTestsToUpdate(tests_filter, all_test_keys)
+ logging.info('Done obtaining matching tests.')
+
+ # Now, prepare tests for update.
+ PrepareTestsForUpdate(tests_to_update, all_tests, options.reva, options.revb)
+
+ # Finally, write modified tests back to perf_expectations file.
+ perf_ex_lib.WriteJson(perf_file, all_tests, all_test_keys,
+ calculate_sha1=False)
+ logging.info('Done writing tests for update to %s.', perf_file)
+
+
+def _SetLogger(verbose):
+ log_level = _DEFAULT_LOG_LEVEL
+ if verbose:
+ log_level = logging.DEBUG
+ logging.basicConfig(level=log_level, format='%(message)s')
+
+
+if __name__ == '__main__':
+ Main()
diff --git a/tools/perf_expectations/update_perf_expectations_unitttest.py b/tools/perf_expectations/update_perf_expectations_unitttest.py
new file mode 100644
index 0000000..9e54b89
--- /dev/null
+++ b/tools/perf_expectations/update_perf_expectations_unitttest.py
@@ -0,0 +1,204 @@
+#!/usr/bin/env python
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Unit tests for update_perf_expectations."""
+import copy
+from StringIO import StringIO
+import unittest
+import make_expectations as perf_ex_lib
+import update_perf_expectations as upe_mod
+
+
+# A separate .json file contains the list of test cases we'll use.
+# The tests used to be defined inline here, but are >80 characters in length.
+# Now they are expected to be defined in file ./sample_test_cases.json.
+# Create a dictionary of tests using .json file.
+all_tests = perf_ex_lib.ConvertJsonIntoDict(
+ perf_ex_lib.ReadFile('sample_test_cases.json'))
+# Get all keys.
+all_tests_keys = all_tests.keys()
+
+
+def VerifyPreparedTests(self, tests_to_update, reva, revb):
+ # Work with a copy of the set of tests.
+ all_tests_copy = copy.deepcopy(all_tests)
+ upe_mod.PrepareTestsForUpdate(tests_to_update, all_tests_copy, reva, revb)
+ # Make sure reva < revb
+ if reva > revb:
+ temp = reva
+ reva = revb
+ revb = temp
+ # Run through all tests and make sure only those that were
+ # specified to be modified had their 'sha1' value removed.
+ for test_key in all_tests_keys:
+ new_test_value = all_tests_copy[test_key]
+ original_test_value = all_tests[test_key]
+ if test_key in tests_to_update:
+ # Make sure there is no "sha1".
+ self.assertFalse('sha1' in new_test_value)
+ # Make sure reva and revb values are correctly set.
+ self.assertEqual(reva, new_test_value['reva'])
+ self.assertEqual(revb, new_test_value['revb'])
+ else:
+ # Make sure there is an "sha1" value
+ self.assertTrue('sha1' in new_test_value)
+ # Make sure the sha1, reva and revb values have not changed.
+ self.assertEqual(original_test_value['sha1'], new_test_value['sha1'])
+ self.assertEqual(original_test_value['reva'], new_test_value['reva'])
+ self.assertEqual(original_test_value['revb'], new_test_value['revb'])
+
+
+class UpdatePerfExpectationsTest(unittest.TestCase):
+ def testFilterMatch(self):
+ """Verifies different regular expressions test filter."""
+ self.maxDiff = None
+ # Tests to update specified by a single literal string.
+ tests_to_update = 'win-release/media_tests_av_perf/fps/tulip2.webm'
+ expected_tests_list = ['win-release/media_tests_av_perf/fps/tulip2.webm']
+ self.assertEqual(expected_tests_list,
+ upe_mod.GetMatchingTests(tests_to_update,
+ all_tests_keys))
+
+ # Tests to update specified by a single reg-ex
+ tests_to_update = 'win-release/media_tests_av_perf/fps.*'
+ expected_tests_list = ['win-release/media_tests_av_perf/fps/crowd1080.webm',
+ 'win-release/media_tests_av_perf/fps/crowd2160.webm',
+ 'win-release/media_tests_av_perf/fps/crowd360.webm',
+ 'win-release/media_tests_av_perf/fps/crowd480.webm',
+ 'win-release/media_tests_av_perf/fps/crowd720.webm',
+ 'win-release/media_tests_av_perf/fps/tulip2.m4a',
+ 'win-release/media_tests_av_perf/fps/tulip2.mp3',
+ 'win-release/media_tests_av_perf/fps/tulip2.mp4',
+ 'win-release/media_tests_av_perf/fps/tulip2.ogg',
+ 'win-release/media_tests_av_perf/fps/tulip2.ogv',
+ 'win-release/media_tests_av_perf/fps/tulip2.wav',
+ 'win-release/media_tests_av_perf/fps/tulip2.webm']
+ actual_list = upe_mod.GetMatchingTests(tests_to_update,
+ all_tests_keys)
+ actual_list.sort()
+ self.assertEqual(expected_tests_list, actual_list)
+
+ # Tests to update are specified by a single reg-ex, spanning multiple OSes.
+ tests_to_update = '.*-release/media_tests_av_perf/fps.*'
+ expected_tests_list = ['linux-release/media_tests_av_perf/fps/tulip2.m4a',
+ 'linux-release/media_tests_av_perf/fps/tulip2.mp3',
+ 'linux-release/media_tests_av_perf/fps/tulip2.mp4',
+ 'linux-release/media_tests_av_perf/fps/tulip2.ogg',
+ 'linux-release/media_tests_av_perf/fps/tulip2.ogv',
+ 'linux-release/media_tests_av_perf/fps/tulip2.wav',
+ 'win-release/media_tests_av_perf/fps/crowd1080.webm',
+ 'win-release/media_tests_av_perf/fps/crowd2160.webm',
+ 'win-release/media_tests_av_perf/fps/crowd360.webm',
+ 'win-release/media_tests_av_perf/fps/crowd480.webm',
+ 'win-release/media_tests_av_perf/fps/crowd720.webm',
+ 'win-release/media_tests_av_perf/fps/tulip2.m4a',
+ 'win-release/media_tests_av_perf/fps/tulip2.mp3',
+ 'win-release/media_tests_av_perf/fps/tulip2.mp4',
+ 'win-release/media_tests_av_perf/fps/tulip2.ogg',
+ 'win-release/media_tests_av_perf/fps/tulip2.ogv',
+ 'win-release/media_tests_av_perf/fps/tulip2.wav',
+ 'win-release/media_tests_av_perf/fps/tulip2.webm']
+ actual_list = upe_mod.GetMatchingTests(tests_to_update,
+ all_tests_keys)
+ actual_list.sort()
+ self.assertEqual(expected_tests_list, actual_list)
+
+ def testLinesFromInputFile(self):
+ """Verifies different string formats specified in input file."""
+
+ # Tests to update have been specified by a single literal string in
+ # an input file.
+ # Use the StringIO class to mock a file object.
+ lines_from_file = StringIO(
+ 'win-release/media_tests_av_perf/fps/tulip2.webm')
+ contents = lines_from_file.read()
+ expected_tests_list = ['win-release/media_tests_av_perf/fps/tulip2.webm']
+ actual_list = upe_mod.GetTestsToUpdate(contents, all_tests_keys)
+ actual_list.sort()
+ self.assertEqual(expected_tests_list, actual_list)
+ lines_from_file.close()
+
+ # Tests to update specified by a single reg-ex in an input file.
+ lines_from_file = StringIO('win-release/media_tests_av_perf/fps/tulip2.*\n')
+ contents = lines_from_file.read()
+ expected_tests_list = ['win-release/media_tests_av_perf/fps/tulip2.m4a',
+ 'win-release/media_tests_av_perf/fps/tulip2.mp3',
+ 'win-release/media_tests_av_perf/fps/tulip2.mp4',
+ 'win-release/media_tests_av_perf/fps/tulip2.ogg',
+ 'win-release/media_tests_av_perf/fps/tulip2.ogv',
+ 'win-release/media_tests_av_perf/fps/tulip2.wav',
+ 'win-release/media_tests_av_perf/fps/tulip2.webm']
+ actual_list = upe_mod.GetTestsToUpdate(contents, all_tests_keys)
+ actual_list.sort()
+ self.assertEqual(expected_tests_list, actual_list)
+ lines_from_file.close()
+
+ # Tests to update specified by multiple lines in an input file.
+ lines_from_file = StringIO(
+ '.*-release/media_tests_av_perf/fps/tulip2.*\n'
+ 'win-release/media_tests_av_perf/dropped_fps/tulip2.*\n'
+ 'linux-release/media_tests_av_perf/audio_latency/latency')
+ contents = lines_from_file.read()
+ expected_tests_list = [
+ 'linux-release/media_tests_av_perf/audio_latency/latency',
+ 'linux-release/media_tests_av_perf/fps/tulip2.m4a',
+ 'linux-release/media_tests_av_perf/fps/tulip2.mp3',
+ 'linux-release/media_tests_av_perf/fps/tulip2.mp4',
+ 'linux-release/media_tests_av_perf/fps/tulip2.ogg',
+ 'linux-release/media_tests_av_perf/fps/tulip2.ogv',
+ 'linux-release/media_tests_av_perf/fps/tulip2.wav',
+ 'win-release/media_tests_av_perf/dropped_fps/tulip2.wav',
+ 'win-release/media_tests_av_perf/dropped_fps/tulip2.webm',
+ 'win-release/media_tests_av_perf/fps/tulip2.m4a',
+ 'win-release/media_tests_av_perf/fps/tulip2.mp3',
+ 'win-release/media_tests_av_perf/fps/tulip2.mp4',
+ 'win-release/media_tests_av_perf/fps/tulip2.ogg',
+ 'win-release/media_tests_av_perf/fps/tulip2.ogv',
+ 'win-release/media_tests_av_perf/fps/tulip2.wav',
+ 'win-release/media_tests_av_perf/fps/tulip2.webm']
+ actual_list = upe_mod.GetTestsToUpdate(contents, all_tests_keys)
+ actual_list.sort()
+ self.assertEqual(expected_tests_list, actual_list)
+ lines_from_file.close()
+
+ def testPreparingForUpdate(self):
+ """Verifies that tests to be modified are changed as expected."""
+ tests_to_update = [
+ 'linux-release/media_tests_av_perf/audio_latency/latency',
+ 'linux-release/media_tests_av_perf/fps/tulip2.m4a',
+ 'linux-release/media_tests_av_perf/fps/tulip2.mp3',
+ 'linux-release/media_tests_av_perf/fps/tulip2.mp4',
+ 'linux-release/media_tests_av_perf/fps/tulip2.ogg',
+ 'linux-release/media_tests_av_perf/fps/tulip2.ogv',
+ 'linux-release/media_tests_av_perf/fps/tulip2.wav',
+ 'win-release/media_tests_av_perf/dropped_fps/tulip2.wav',
+ 'win-release/media_tests_av_perf/dropped_fps/tulip2.webm',
+ 'win-release/media_tests_av_perf/fps/tulip2.mp3',
+ 'win-release/media_tests_av_perf/fps/tulip2.mp4',
+ 'win-release/media_tests_av_perf/fps/tulip2.ogg',
+ 'win-release/media_tests_av_perf/fps/tulip2.ogv',
+ 'win-release/media_tests_av_perf/fps/tulip2.wav',
+ 'win-release/media_tests_av_perf/fps/tulip2.webm']
+ # Test regular positive integers.
+ reva = 12345
+ revb = 54321
+ VerifyPreparedTests(self, tests_to_update, reva, revb)
+ # Test negative values.
+ reva = -54321
+ revb = 12345
+ with self.assertRaises(ValueError):
+ upe_mod.PrepareTestsForUpdate(tests_to_update, all_tests, reva, revb)
+ # Test reva greater than revb.
+ reva = 54321
+ revb = 12345
+ upe_mod.PrepareTestsForUpdate(tests_to_update, all_tests, reva, revb)
+ # Test non-integer values
+ reva = 'sds'
+ revb = 12345
+ with self.assertRaises(ValueError):
+ upe_mod.PrepareTestsForUpdate(tests_to_update, all_tests, reva, revb)
+
+
+if __name__ == '__main__':
+ unittest.main()