summaryrefslogtreecommitdiffstats
path: root/tools/auto_bisect
diff options
context:
space:
mode:
authorqyearsley <qyearsley@chromium.org>2015-06-19 11:20:00 -0700
committerCommit bot <commit-bot@chromium.org>2015-06-19 18:20:52 +0000
commitaa0a6946ec4d465cc5fbfd4b983b2409fb340c37 (patch)
tree9262099333050d4b0535bcdcd97b1cc716767842 /tools/auto_bisect
parent655573935b960e333e9a6f510a76502269c9f16e (diff)
downloadchromium_src-aa0a6946ec4d465cc5fbfd4b983b2409fb340c37.zip
chromium_src-aa0a6946ec4d465cc5fbfd4b983b2409fb340c37.tar.gz
chromium_src-aa0a6946ec4d465cc5fbfd4b983b2409fb340c37.tar.bz2
When calling ConfidenceScore, pass parameters directly instead of using * notation.
Also, update/change a comment above one place where ConfidenceScore is called to try to make it clearer. Review URL: https://codereview.chromium.org/1189193002 Cr-Commit-Position: refs/heads/master@{#335298}
Diffstat (limited to 'tools/auto_bisect')
-rw-r--r--tools/auto_bisect/bisect_results.py18
1 files changed, 8 insertions, 10 deletions
diff --git a/tools/auto_bisect/bisect_results.py b/tools/auto_bisect/bisect_results.py
index 926479d..f6ba0d8 100644
--- a/tools/auto_bisect/bisect_results.py
+++ b/tools/auto_bisect/bisect_results.py
@@ -105,9 +105,9 @@ class BisectResults(object):
'Failed to re-test reverted culprit CL against ToT.')
return
- confidence_params = (results_reverted[0]['values'],
- results_tot[0]['values'])
- confidence = BisectResults.ConfidenceScore(*confidence_params)
+ confidence = BisectResults.ConfidenceScore(
+ results_reverted[0]['values'],
+ results_tot[0]['values'])
self.retest_results_tot = RevisionState('ToT', 'n/a', 0)
self.retest_results_tot.value = results_tot[0]
@@ -247,14 +247,12 @@ class BisectResults(object):
[working_mean, broken_mean]) /
max(0.0001, min(mean_of_good_runs, mean_of_bad_runs))) * 100.0
- # Give a "confidence" in the bisect. Currently, we consider the values of
- # only the revisions at the breaking range (last known good and first known
- # bad) see the note in the docstring for FindBreakingRange.
- confidence_params = (
+ # Give a "confidence" in the bisect culprit by seeing whether the results
+ # of the culprit revision and the revision before that appear to be
+ # statistically significantly different.
+ confidence = cls.ConfidenceScore(
sum([first_working_rev.value['values']], []),
- sum([last_broken_rev.value['values']], [])
- )
- confidence = cls.ConfidenceScore(*confidence_params)
+ sum([last_broken_rev.value['values']], []))
bad_greater_than_good = mean_of_bad_runs > mean_of_good_runs