summaryrefslogtreecommitdiffstats
path: root/tools/bisect-perf-regression.py
diff options
context:
space:
mode:
authorprasadv@chromium.org <prasadv@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2014-07-11 23:37:18 +0000
committerprasadv@chromium.org <prasadv@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2014-07-11 23:37:18 +0000
commitc000352c3631f56902ace22a323f3311fb6bc2e1 (patch)
treef62f287a5e7af8035231740fa6f352f70c00a455 /tools/bisect-perf-regression.py
parent165c595228e74a6e2d1b34ab23f5782ea0f852c2 (diff)
downloadchromium_src-c000352c3631f56902ace22a323f3311fb6bc2e1.zip
chromium_src-c000352c3631f56902ace22a323f3311fb6bc2e1.tar.gz
chromium_src-c000352c3631f56902ace22a323f3311fb6bc2e1.tar.bz2
Reformat bisect results output.
Reformat the bisect results output to give detailed information regarding the bisect job. Includes clear instructions to repro steps. BUG=383669,383935 NOTRY=true Review URL: https://codereview.chromium.org/388623002 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@282735 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'tools/bisect-perf-regression.py')
-rwxr-xr-xtools/bisect-perf-regression.py167
1 files changed, 126 insertions, 41 deletions
diff --git a/tools/bisect-perf-regression.py b/tools/bisect-perf-regression.py
index e6ba663..5d9bf06 100755
--- a/tools/bisect-perf-regression.py
+++ b/tools/bisect-perf-regression.py
@@ -183,6 +183,71 @@ BISECT_MODE_MEAN = 'mean'
BISECT_MODE_STD_DEV = 'std_dev'
BISECT_MODE_RETURN_CODE = 'return_code'
+# The perf dashboard specifically looks for the string
+# "Estimated Confidence: 95%" to decide whether or not
+# to cc the author(s). If you change this, please update the perf
+# dashboard as well.
+RESULTS_BANNER = """
+===== BISECT JOB RESULTS =====
+Status: %(status)s
+
+Test Command: %(command)s
+Test Metric: %(metrics)s
+Relative Change: %(change)s
+Estimated Confidence: %(confidence)d%%"""
+
+# The perf dashboard specifically looks for the string
+# "Author : " to parse out who to cc on a bug. If you change the
+# formatting here, please update the perf dashboard as well.
+RESULTS_REVISION_INFO = """
+===== SUSPECTED CL(s) =====
+Subject : %(subject)s
+Author : %(author)s%(email_info)s%(commit_info)s
+Date : %(cl_date)s"""
+
+REPRO_STEPS_LOCAL = """
+==== INSTRUCTIONS TO REPRODUCE ====
+To run locally:
+$%(command)s"""
+
+REPRO_STEPS_TRYJOB = """
+To reproduce on Performance trybot:
+1. Create new git branch or check out existing branch.
+2. Edit tools/run-perf-test.cfg (instructions in file) or \
+third_party/WebKit/Tools/run-perf-test.cfg.
+ a) Take care to strip any src/ directories from the head of \
+relative path names.
+ b) On desktop, only --browser=release is supported, on android \
+--browser=android-chromium-testshell.
+ c) Test command to use: %(command)s
+3. Upload your patch. --bypass-hooks is necessary to upload the changes you \
+committed locally to run-perf-test.cfg.
+ Note: *DO NOT* commit run-perf-test.cfg changes to the project repository.
+ $ git cl upload --bypass-hooks
+4. Send your try job to the tryserver. \
+[Please make sure to use appropriate bot to reproduce]
+ $ git cl try -m tryserver.chromium.perf -b <bot>
+
+For more details please visit \nhttps://sites.google.com/a/chromium.org/dev/\
+developers/performance-try-bots"""
+
+RESULTS_THANKYOU = """
+===== THANK YOU FOR CHOOSING BISECT AIRLINES =====
+Visit http://www.chromium.org/developers/core-principles for Chrome's policy
+on perf regressions.
+Contact chrome-perf-dashboard-team with any questions or suggestions about
+bisecting.
+ .------.
+ .---. \ \==)
+ |PERF\ \ \\
+ | ---------'-------'-----------.
+ . 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 `-.
+ \______________.-------._______________)
+ / /
+ / /
+ / /==)
+ ._______."""
+
def _AddAdditionalDepotInfo(depot_info):
"""Adds additional depot info to the global depot variables."""
@@ -3208,19 +3273,45 @@ class BisectPerformanceMetrics(object):
# dashboard as well.
print 'Confidence in Bisection Results: %d%%' % results_dict['confidence']
+ def _ConfidenceLevelStatus(self, results_dict):
+ if not results_dict['confidence']:
+ return None
+ confidence_status = 'Successful with %(level)s confidence%(warning)s.'
+ if results_dict['confidence'] >= 95:
+ level = 'high'
+ else:
+ level = 'low'
+ warning = ' and warnings'
+ if not self.warnings:
+ warning = ''
+ return confidence_status % {'level': level, 'warning': warning}
+
+ def _PrintThankYou(self):
+ print RESULTS_THANKYOU
+
def _PrintBanner(self, results_dict):
- print
- print " __o_\___ Aw Snap! We hit a speed bump!"
- print "=-O----O-'__.~.___________________________________"
- print
if self._IsBisectModeReturnCode():
- print ('Bisect reproduced a change in return codes while running the '
- 'performance test.')
+ metrics = 'N/A'
+ change = 'Yes'
else:
- print ('Bisect reproduced a %.02f%% (+-%.02f%%) change in the '
- '%s metric.' % (results_dict['regression_size'],
- results_dict['regression_std_err'], '/'.join(self.opts.metric)))
- self._PrintConfidence(results_dict)
+ metrics = '/'.join(self.opts.metric)
+ change = '%.02f%% (+/-%.02f%%)' % (
+ results_dict['regression_size'], results_dict['regression_std_err'])
+
+ if results_dict['culprit_revisions'] and results_dict['confidence']:
+ status = self._ConfidenceLevelStatus(results_dict)
+ else:
+ status = 'Failure, could not reproduce.'
+ change = 'Bisect could not reproduce a change.'
+
+ print RESULTS_BANNER % {
+ 'status': status,
+ 'command': self.opts.command,
+ 'metrics': metrics,
+ 'change': change,
+ 'confidence': results_dict['confidence'],
+ }
+
def _PrintFailedBanner(self, results_dict):
print
@@ -3246,25 +3337,22 @@ class BisectPerformanceMetrics(object):
return ''
def _PrintRevisionInfo(self, cl, info, depot=None):
- # The perf dashboard specifically looks for the string
- # "Author : " to parse out who to cc on a bug. If you change the
- # formatting here, please update the perf dashboard as well.
- print
- print 'Subject : %s' % info['subject']
- print 'Author : %s' % info['author']
+ email_info = ''
if not info['email'].startswith(info['author']):
- print 'Email : %s' % info['email']
+ email_info = '\nEmail : %s' % info['email']
commit_link = self._GetViewVCLinkFromDepotAndHash(cl, depot)
if commit_link:
- print 'Link : %s' % commit_link
+ commit_info = '\nLink : %s' % commit_link
else:
- print
- print 'Failed to parse svn revision from body:'
- print
- print info['body']
- print
- print 'Commit : %s' % cl
- print 'Date : %s' % info['date']
+ commit_info = ('\nFailed to parse svn revision from body:\n%s' %
+ info['body'])
+ print RESULTS_REVISION_INFO % {
+ 'subject': info['subject'],
+ 'author': info['author'],
+ 'email_info': email_info,
+ 'commit_info': commit_info,
+ 'cl_date': info['date']
+ }
def _PrintTableRow(self, column_widths, row_data):
assert len(column_widths) == len(row_data)
@@ -3318,9 +3406,9 @@ class BisectPerformanceMetrics(object):
final_step=True):
print
if final_step:
- print 'Tested commits:'
+ print '===== TESTED COMMITS ====='
else:
- print 'Partial results:'
+ print '===== PARTIAL RESULTS ====='
self._PrintTestedCommitsHeader()
state = 0
for current_id, current_data in revision_data_sorted:
@@ -3354,12 +3442,12 @@ class BisectPerformanceMetrics(object):
self._PrintTestedCommitsEntry(current_data, cl_link, state_str)
def _PrintReproSteps(self):
- print
- print 'To reproduce locally:'
- print '$ ' + self.opts.command
+ command = '$ ' + self.opts.command
if bisect_utils.IsTelemetryCommand(self.opts.command):
- print
- print 'Also consider passing --profiler=list to see available profilers.'
+ command += ('\nAlso consider passing --profiler=list to see available '
+ 'profilers.')
+ print REPRO_STEPS_LOCAL % {'command': command}
+ print REPRO_STEPS_TRYJOB % {'command': command}
def _PrintOtherRegressions(self, other_regressions, revision_data):
print
@@ -3413,7 +3501,7 @@ class BisectPerformanceMetrics(object):
print
print 'WARNINGS:'
for w in set(self.warnings):
- print ' !!! %s' % w
+ print ' ! %s' % w
def _FindOtherRegressions(self, revision_data_sorted, bad_greater_than_good):
other_regressions = []
@@ -3614,26 +3702,23 @@ class BisectPerformanceMetrics(object):
# bugs. If you change this, please update the perf dashboard as well.
bisect_utils.OutputAnnotationStepStart('Results')
+ self._PrintBanner(results_dict)
+ self._PrintWarnings()
+
if results_dict['culprit_revisions'] and results_dict['confidence']:
- self._PrintBanner(results_dict)
for culprit in results_dict['culprit_revisions']:
cl, info, depot = culprit
self._PrintRevisionInfo(cl, info, depot)
- self._PrintReproSteps()
if results_dict['other_regressions']:
self._PrintOtherRegressions(results_dict['other_regressions'],
revision_data)
- else:
- self._PrintFailedBanner(results_dict)
- self._PrintReproSteps()
-
self._PrintTestedCommitsTable(revision_data_sorted,
results_dict['first_working_revision'],
results_dict['last_broken_revision'],
results_dict['confidence'])
self._PrintStepTime(revision_data_sorted)
- self._PrintWarnings()
-
+ self._PrintReproSteps()
+ self._PrintThankYou()
if self.opts.output_buildbot_annotations:
bisect_utils.OutputAnnotationStepClosed()