summaryrefslogtreecommitdiffstats
path: root/tools/auto_bisect
diff options
context:
space:
mode:
authorprasadv <prasadv@chromium.org>2015-06-23 15:27:44 -0700
committerCommit bot <commit-bot@chromium.org>2015-06-23 22:28:26 +0000
commita98c2e971a8a9b6326bc7e92a9c10a4f0a76d15c (patch)
treefb019c5c534de5ffa8d4ade2ed0690599b253c51 /tools/auto_bisect
parentbb59fd8618c95da40d8cace3209cc7d4095e7613 (diff)
downloadchromium_src-a98c2e971a8a9b6326bc7e92a9c10a4f0a76d15c.zip
chromium_src-a98c2e971a8a9b6326bc7e92a9c10a4f0a76d15c.tar.gz
chromium_src-a98c2e971a8a9b6326bc7e92a9c10a4f0a76d15c.tar.bz2
Make bisect to abort early when the return codes for known good and known bad revisions are same.
For bisect mode = return_code, there is no point in bisecting revision which returned same return code, in such cases we abort bisect early. BUG=502433 Review URL: https://codereview.chromium.org/1205663002 Cr-Commit-Position: refs/heads/master@{#335778}
Diffstat (limited to 'tools/auto_bisect')
-rwxr-xr-xtools/auto_bisect/bisect_perf_regression.py9
-rw-r--r--tools/auto_bisect/bisect_perf_regression_test.py46
-rw-r--r--tools/auto_bisect/bisect_printer.py11
3 files changed, 59 insertions, 7 deletions
diff --git a/tools/auto_bisect/bisect_perf_regression.py b/tools/auto_bisect/bisect_perf_regression.py
index ca97d67..41db6ae 100755
--- a/tools/auto_bisect/bisect_perf_regression.py
+++ b/tools/auto_bisect/bisect_perf_regression.py
@@ -2307,9 +2307,18 @@ class BisectPerformanceMetrics(object):
# We need these reference values to determine if later runs should be
# classified as pass or fail.
+
known_bad_value = bad_results[0]
known_good_value = good_results[0]
+ # Abort bisect early when the return codes for known good
+ # and known bad revisions are same.
+ if (self._IsBisectModeReturnCode() and
+ known_bad_value['mean'] == known_good_value['mean']):
+ return BisectResults(abort_reason=('known good and known bad revisions '
+ 'returned same return code (return code=%s). '
+ 'Continuing bisect might not yield any results.' %
+ known_bad_value['mean']))
# Check the direction of improvement only if the improvement_direction
# option is set to a specific direction (1 for higher is better or -1 for
# lower is better).
diff --git a/tools/auto_bisect/bisect_perf_regression_test.py b/tools/auto_bisect/bisect_perf_regression_test.py
index 276cdeb..4a7b37f 100644
--- a/tools/auto_bisect/bisect_perf_regression_test.py
+++ b/tools/auto_bisect/bisect_perf_regression_test.py
@@ -114,13 +114,19 @@ DEFAULT_OPTIONS = {
# that use _MockRunTests.
_MockResultsGenerator = (x for x in [])
+def _MakeMockRunTests(bisect_mode_is_return_code=False):
+ def _MockRunTests(*args, **kwargs): # pylint: disable=unused-argument
+ return _FakeTestResult(
+ _MockResultsGenerator.next(), bisect_mode_is_return_code)
-def _MockRunTests(*args, **kwargs): # pylint: disable=unused-argument
- return _FakeTestResult(_MockResultsGenerator.next())
+ return _MockRunTests
-def _FakeTestResult(values):
- result_dict = {'mean': 0.0, 'std_err': 0.0, 'std_dev': 0.0, 'values': values}
+def _FakeTestResult(values, bisect_mode_is_return_code):
+ mean = 0.0
+ if bisect_mode_is_return_code:
+ mean = 0 if (all(v == 0 for v in values)) else 1
+ result_dict = {'mean': mean, 'std_err': 0.0, 'std_dev': 0.0, 'values': values}
success_code = 0
return (result_dict, success_code)
@@ -375,7 +381,7 @@ class BisectPerfRegressionTest(unittest.TestCase):
_MockResultsGenerator = (r for r in results)
bisect_class = bisect_perf_regression.BisectPerformanceMetrics
original_run_tests = bisect_class.RunPerformanceTestAndParseResults
- bisect_class.RunPerformanceTestAndParseResults = _MockRunTests
+ bisect_class.RunPerformanceTestAndParseResults = _MakeMockRunTests()
try:
dry_run_results = _GenericDryRun(_GetExtendedOptions(0, 0, False))
@@ -406,6 +412,36 @@ class BisectPerfRegressionTest(unittest.TestCase):
def testBisectNotAborted_MultipleValues(self):
self.assertFalse(self._CheckAbortsEarly(MULTIPLE_VALUES))
+ def _CheckAbortsEarlyForReturnCode(self, results):
+ """Returns True if the bisect job would abort early in return code mode."""
+ global _MockResultsGenerator
+ _MockResultsGenerator = (r for r in results)
+ bisect_class = bisect_perf_regression.BisectPerformanceMetrics
+ original_run_tests = bisect_class.RunPerformanceTestAndParseResults
+ bisect_class.RunPerformanceTestAndParseResults = _MakeMockRunTests(True)
+ options = dict(DEFAULT_OPTIONS)
+ options.update({'bisect_mode': 'return_code'})
+ try:
+ dry_run_results = _GenericDryRun(options)
+ except StopIteration:
+ # If StopIteration was raised, that means that the next value after
+ # the first two values was requested, so the job was not aborted.
+ return False
+ finally:
+ bisect_class.RunPerformanceTestAndParseResults = original_run_tests
+
+ # If the job was aborted, there should be a warning about it.
+ if ('known good and known bad revisions returned same' in
+ dry_run_results.abort_reason):
+ return True
+ return False
+
+ def testBisectAbortOn_SameReturnCode(self):
+ self.assertTrue(self._CheckAbortsEarlyForReturnCode([[0,0,0], [0,0,0]]))
+
+ def testBisectNotAbortedOn_DifferentReturnCode(self):
+ self.assertFalse(self._CheckAbortsEarlyForReturnCode([[1,1,1], [0,0,0]]))
+
def testGetCommitPosition(self):
cp_git_rev = '7017a81991de983e12ab50dfc071c70e06979531'
self.assertEqual(291765, source_control.GetCommitPosition(cp_git_rev))
diff --git a/tools/auto_bisect/bisect_printer.py b/tools/auto_bisect/bisect_printer.py
index f23d662..9b92320 100644
--- a/tools/auto_bisect/bisect_printer.py
+++ b/tools/auto_bisect/bisect_printer.py
@@ -124,14 +124,21 @@ class BisectPrinter(object):
last_broken_rev, 100, final_step=False)
def _PrintAbortResults(self, abort_reason):
-
if self.opts.output_buildbot_annotations:
bisect_utils.OutputAnnotationStepStart('Results')
+
+ # Metric string in config is not split in case of return code mode.
+ if (self.opts.metric and
+ self.opts.bisect_mode != bisect_utils.BISECT_MODE_RETURN_CODE):
+ metric = '/'.join(self.opts.metric)
+ else:
+ metric = self.opts.metric
+
print ABORT_REASON_TEMPLATE % {
'abort_reason': abort_reason,
'bug_id': self.opts.bug_id or 'NOT SPECIFIED',
'command': self.opts.command,
- 'metric': '/'.join(self.opts.metric),
+ 'metric': metric,
'good_revision': self.opts.good_revision,
'bad_revision': self.opts.bad_revision,
}