summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xtools/auto_bisect/bisect_perf_regression.py55
-rw-r--r--tools/auto_bisect/bisect_perf_regression_test.py87
-rwxr-xr-xtools/run-bisect-perf-regression.py6
3 files changed, 124 insertions, 24 deletions
diff --git a/tools/auto_bisect/bisect_perf_regression.py b/tools/auto_bisect/bisect_perf_regression.py
index f8765e1..e80fd43 100755
--- a/tools/auto_bisect/bisect_perf_regression.py
+++ b/tools/auto_bisect/bisect_perf_regression.py
@@ -1518,7 +1518,6 @@ class BisectPerformanceMetrics(object):
print 'Something went wrong while updating DEPS file. [%s]' % e
return False
-
def CreateDEPSPatch(self, depot, revision):
"""Modifies DEPS and returns diff as text.
@@ -1664,6 +1663,14 @@ class BisectPerformanceMetrics(object):
'std_dev': 0.0,
'values': [0.0]
}
+
+ # When debug_fake_test_mean is set, its value is returned as the mean
+ # and the flag is cleared so that further calls behave as if it wasn't
+ # set (returning the fake_results dict as defined above).
+ if self.opts.debug_fake_first_test_mean:
+ fake_results['mean'] = float(self.opts.debug_fake_first_test_mean)
+ self.opts.debug_fake_first_test_mean = 0
+
return (fake_results, success_code)
# For Windows platform set posix=False, to parse windows paths correctly.
@@ -2414,10 +2421,10 @@ class BisectPerformanceMetrics(object):
# Perform the performance tests on the good and bad revisions, to get
# reference values.
bad_results, good_results = self.GatherReferenceValues(good_revision,
- bad_revision,
- command_to_run,
- metric,
- target_depot)
+ bad_revision,
+ command_to_run,
+ metric,
+ target_depot)
if self.opts.output_buildbot_annotations:
bisect_utils.OutputAnnotationStepClosed()
@@ -2436,12 +2443,34 @@ class BisectPerformanceMetrics(object):
good_results[0])
return results
-
# We need these reference values to determine if later runs should be
# classified as pass or fail.
known_bad_value = bad_results[0]
known_good_value = good_results[0]
+ # Check the direction of improvement only if the improvement_direction
+ # option is set to a specific direction (1 for higher is better or -1 for
+ # lower is better).
+ improvement_dir = self.opts.improvement_direction
+ if improvement_dir:
+ higher_is_better = improvement_dir > 0
+ if higher_is_better:
+ message = "Expecting higher values to be better for this metric, "
+ else:
+ message = "Expecting lower values to be better for this metric, "
+ metric_increased = known_bad_value['mean'] > known_good_value['mean']
+ if metric_increased:
+ message += "and the metric appears to have increased. "
+ else:
+ message += "and the metric appears to have decreased. "
+ if ((higher_is_better and metric_increased) or
+ (not higher_is_better and not metric_increased)):
+ results.error = (message + 'Then, the test results for the ends of '
+ 'the given \'good\' - \'bad\' range of revisions '
+ 'represent an improvement (and not a regression).')
+ return results
+ print message, "Therefore we continue to bisect."
+
# Can just mark the good and bad revisions explicitly here since we
# already know the results.
bad_revision_data = revision_data[revision_list[0]]
@@ -2939,12 +2968,14 @@ class BisectOptions(object):
self.debug_ignore_build = None
self.debug_ignore_sync = None
self.debug_ignore_perf_test = None
+ self.debug_fake_first_test_mean = 0
self.gs_bucket = None
self.target_arch = 'ia32'
self.target_build_type = 'Release'
self.builder_host = None
self.builder_port = None
self.bisect_mode = BISECT_MODE_MEAN
+ self.improvement_direction = 0
@staticmethod
def _CreateCommandLineParser():
@@ -2978,6 +3009,12 @@ class BisectOptions(object):
type='str',
help='The desired metric to bisect on. For example ' +
'"vm_rss_final_b/vm_rss_f_b"')
+ group.add_option('-d', '--improvement_direction',
+ type='int',
+ default=0,
+ help='An integer number representing the direction of ' +
+ 'improvement. 1 for higher is better, -1 for lower is ' +
+ 'better, 0 for ignore (default).')
group.add_option('-r', '--repeat_test_count',
type='int',
default=20,
@@ -3098,6 +3135,12 @@ class BisectOptions(object):
group.add_option('--debug_ignore_perf_test',
action='store_true',
help='DEBUG: Don\'t perform performance tests.')
+ group.add_option('--debug_fake_first_test_mean',
+ type='int',
+ default='0',
+ help=('DEBUG: When faking performance tests, return this '
+ 'value as the mean of the first performance test, '
+ 'and return a mean of 0.0 for further tests.'))
parser.add_option_group(group)
return parser
diff --git a/tools/auto_bisect/bisect_perf_regression_test.py b/tools/auto_bisect/bisect_perf_regression_test.py
index a4ba389..c7d98e0 100644
--- a/tools/auto_bisect/bisect_perf_regression_test.py
+++ b/tools/auto_bisect/bisect_perf_regression_test.py
@@ -17,9 +17,8 @@ import mock
import source_control
-def _GetBisectPerformanceMetricsInstance():
- """Returns an instance of the BisectPerformanceMetrics class."""
- options_dict = {
+# Default options for the dry run
+DEFAULT_OPTIONS = {
'debug_ignore_build': True,
'debug_ignore_sync': True,
'debug_ignore_perf_test': True,
@@ -28,12 +27,53 @@ def _GetBisectPerformanceMetricsInstance():
'good_revision': 280000,
'bad_revision': 280005,
}
+
+
+def _GetBisectPerformanceMetricsInstance(options_dict):
+ """Returns an instance of the BisectPerformanceMetrics class."""
bisect_options = bisect_perf_regression.BisectOptions.FromDict(options_dict)
bisect_instance = bisect_perf_regression.BisectPerformanceMetrics(
bisect_options)
return bisect_instance
+def _GetExtendedOptions(d, f):
+ """Returns the a copy of the default options dict plus some options."""
+ result = dict(DEFAULT_OPTIONS)
+ result.update({
+ 'improvement_direction': d,
+ 'debug_fake_first_test_mean': f})
+ return result
+
+
+def _GenericDryRun(options, print_results=False):
+ """Performs a dry run of the bisector.
+
+ Args:
+ options: Dictionary containing the options for the bisect instance.
+ print_results: Boolean telling whether to call FormatAndPrintResults.
+
+ Returns:
+ The results dictionary as returned by the bisect Run method.
+ """
+ # Disable rmtree to avoid deleting local trees.
+ old_rmtree = shutil.rmtree
+ try:
+ shutil.rmtree = lambda path, onerror: None
+ bisect_instance = _GetBisectPerformanceMetricsInstance(options)
+ results = bisect_instance.Run(bisect_instance.opts.command,
+ bisect_instance.opts.bad_revision,
+ bisect_instance.opts.good_revision,
+ bisect_instance.opts.metric)
+
+ if print_results:
+ bisect_instance.FormatAndPrintResults(results)
+
+ return results
+ finally:
+ shutil.rmtree = old_rmtree
+
+
class BisectPerfRegressionTest(unittest.TestCase):
"""Test case for other functions and classes in bisect-perf-regression.py."""
@@ -257,18 +297,29 @@ class BisectPerfRegressionTest(unittest.TestCase):
This serves as a smoke test to catch errors in the basic execution of the
script.
"""
- # Disable rmtree to avoid deleting local trees.
- old_rmtree = shutil.rmtree
- try:
- shutil.rmtree = lambda path, onerror: None
- bisect_instance = _GetBisectPerformanceMetricsInstance()
- results = bisect_instance.Run(bisect_instance.opts.command,
- bisect_instance.opts.bad_revision,
- bisect_instance.opts.good_revision,
- bisect_instance.opts.metric)
- bisect_instance.FormatAndPrintResults(results)
- finally:
- shutil.rmtree = old_rmtree
+ _GenericDryRun(DEFAULT_OPTIONS, True)
+
+ def testBisectImprovementDirectionFails(self):
+ """Dry run of a bisect with an improvement instead of regression."""
+
+ # Test result goes from 0 to 100 where higher is better
+ results = _GenericDryRun(_GetExtendedOptions(1, 100))
+ self.assertIsNotNone(results.error)
+ self.assertIn('not a regression', results.error)
+ # Test result goes from 0 to -100 where lower is better
+ results = _GenericDryRun(_GetExtendedOptions(-1, -100))
+ self.assertIsNotNone(results.error)
+ self.assertIn('not a regression', results.error)
+
+ def testBisectImprovementDirectionSucceeds(self):
+ """Bisects with improvement direction matching regression range."""
+ # Test result goes from 0 to 100 where lower is better
+ results = _GenericDryRun(_GetExtendedOptions(-1, 100))
+ self.assertIsNone(results.error)
+ # Test result goes from 0 to -100 where higher is better
+ results = _GenericDryRun(_GetExtendedOptions(1, -100))
+ self.assertIsNone(results.error)
+
def testGetCommitPosition(self):
cp_git_rev = '7017a81991de983e12ab50dfc071c70e06979531'
@@ -278,21 +329,21 @@ class BisectPerfRegressionTest(unittest.TestCase):
self.assertEqual(291467, source_control.GetCommitPosition(svn_git_rev))
def testGetCommitPositionForV8(self):
- bisect_instance = _GetBisectPerformanceMetricsInstance()
+ bisect_instance = _GetBisectPerformanceMetricsInstance(DEFAULT_OPTIONS)
v8_rev = '21d700eedcdd6570eff22ece724b63a5eefe78cb'
depot_path = os.path.join(bisect_instance.src_cwd, 'v8')
self.assertEqual(
23634, source_control.GetCommitPosition(v8_rev, depot_path))
def testGetCommitPositionForWebKit(self):
- bisect_instance = _GetBisectPerformanceMetricsInstance()
+ bisect_instance = _GetBisectPerformanceMetricsInstance(DEFAULT_OPTIONS)
wk_rev = 'a94d028e0f2c77f159b3dac95eb90c3b4cf48c61'
depot_path = os.path.join(bisect_instance.src_cwd, 'third_party', 'WebKit')
self.assertEqual(
181660, source_control.GetCommitPosition(wk_rev, depot_path))
def testUpdateDepsContent(self):
- bisect_instance = _GetBisectPerformanceMetricsInstance()
+ bisect_instance = _GetBisectPerformanceMetricsInstance(DEFAULT_OPTIONS)
deps_file = 'DEPS'
# We are intentionally reading DEPS file contents instead of string literal
# with few lines from DEPS because to check if the format we are expecting
diff --git a/tools/run-bisect-perf-regression.py b/tools/run-bisect-perf-regression.py
index aa3251d..344e36b 100755
--- a/tools/run-bisect-perf-regression.py
+++ b/tools/run-bisect-perf-regression.py
@@ -207,6 +207,9 @@ def _CreateBisectOptionsFromConfig(config):
if config.has_key('goma_dir'):
opts_dict['goma_dir'] = config['goma_dir']
+ if config.has_key('improvement_direction'):
+ opts_dict['improvement_direction'] = int(config['improvement_direction'])
+
opts_dict['build_preference'] = 'ninja'
opts_dict['output_buildbot_annotations'] = True
@@ -403,6 +406,9 @@ def _RunBisectionScript(
if config.has_key('bisect_mode'):
cmd.extend(['--bisect_mode', config['bisect_mode']])
+ if config.has_key('improvement_direction'):
+ cmd.extend(['-d', config['improvement_direction']])
+
cmd.extend(['--build_preference', 'ninja'])
if '--browser=cros' in config['command']: