summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xtools/bisect-perf-regression.py17
-rwxr-xr-xtools/run-bisect-perf-regression.py45
2 files changed, 37 insertions, 25 deletions
diff --git a/tools/bisect-perf-regression.py b/tools/bisect-perf-regression.py
index 363aa1b..9e1c71b 100755
--- a/tools/bisect-perf-regression.py
+++ b/tools/bisect-perf-regression.py
@@ -1878,6 +1878,7 @@ class BisectPerformanceMetrics(object):
command_to_run: The command to be run to execute the performance test.
metric: The metric to parse out from the results of the performance test.
This is the result chart name and trace name, separated by slash.
+ May be None for perf try jobs.
reset_on_first_run: If True, pass the flag --reset-results on first run.
upload_on_last_run: If True, pass the flag --upload-results on last run.
results_label: A value for the option flag --results-label.
@@ -1951,7 +1952,7 @@ class BisectPerformanceMetrics(object):
if self.opts.output_buildbot_annotations:
print output
- if self._IsBisectModeUsingMetric():
+ if metric and self._IsBisectModeUsingMetric():
metric_values += _ParseMetricValuesFromOutput(metric, output)
# If we're bisecting on a metric (ie, changes in the mean or
# standard deviation) and no metric values are produced, bail out.
@@ -1964,7 +1965,7 @@ class BisectPerformanceMetrics(object):
if elapsed_minutes >= self.opts.max_time_minutes:
break
- if len(metric_values) == 0:
+ if metric and len(metric_values) == 0:
err_text = 'Metric %s was not found in the test output.' % metric
# TODO(qyearsley): Consider also getting and displaying a list of metrics
# that were found in the output here.
@@ -1972,6 +1973,7 @@ class BisectPerformanceMetrics(object):
# If we're bisecting on return codes, we're really just looking for zero vs
# non-zero.
+ values = {}
if self._IsBisectModeReturnCode():
# If any of the return codes is non-zero, output 1.
overall_return_code = 0 if (
@@ -1987,7 +1989,7 @@ class BisectPerformanceMetrics(object):
print 'Results of performance test: Command returned with %d' % (
overall_return_code)
print
- else:
+ elif metric:
# Need to get the average value if there were multiple values.
truncated_mean = math_utils.TruncatedMean(
metric_values, self.opts.truncate_percent)
@@ -3592,11 +3594,12 @@ class BisectOptions(object):
assert hasattr(opts, k), 'Invalid %s attribute in BisectOptions.' % k
setattr(opts, k, v)
- metric_values = opts.metric.split('/')
- if len(metric_values) != 2:
- raise RuntimeError('Invalid metric specified: [%s]' % opts.metric)
+ if opts.metric:
+ metric_values = opts.metric.split('/')
+ if len(metric_values) != 2:
+ raise RuntimeError('Invalid metric specified: [%s]' % opts.metric)
+ opts.metric = metric_values
- opts.metric = metric_values
opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100)
opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60)
opts.truncate_percent = min(max(opts.truncate_percent, 0), 25)
diff --git a/tools/run-bisect-perf-regression.py b/tools/run-bisect-perf-regression.py
index 58edfc8..9095ae2 100755
--- a/tools/run-bisect-perf-regression.py
+++ b/tools/run-bisect-perf-regression.py
@@ -151,7 +151,6 @@ def _ValidatePerfConfigFile(config_contents):
"""
valid_parameters = [
'command',
- 'metric',
'repeat_count',
'truncate_percent',
'max_time_minutes',
@@ -195,7 +194,7 @@ def _CreateBisectOptionsFromConfig(config):
print config['command']
opts_dict = {}
opts_dict['command'] = config['command']
- opts_dict['metric'] = config['metric']
+ opts_dict['metric'] = config.get('metric')
if config['repeat_count']:
opts_dict['repeat_test_count'] = int(config['repeat_count'])
@@ -312,25 +311,32 @@ def _RunPerformanceTest(config, path_to_file):
cloud_file_link = ''
# Calculate the % difference in the means of the 2 runs.
- percent_diff_in_means = (results_with_patch[0]['mean'] /
- max(0.0001, results_without_patch[0]['mean'])) * 100.0 - 100.0
- std_err = math_utils.PooledStandardError(
- [results_with_patch[0]['values'], results_without_patch[0]['values']])
+ percent_diff_in_means = None
+ std_err = None
+ if (results_with_patch[0].has_key('mean') and
+ results_with_patch[0].has_key('values')):
+ percent_diff_in_means = (results_with_patch[0]['mean'] /
+ max(0.0001, results_without_patch[0]['mean'])) * 100.0 - 100.0
+ std_err = math_utils.PooledStandardError(
+ [results_with_patch[0]['values'], results_without_patch[0]['values']])
bisect_utils.OutputAnnotationStepClosed()
- bisect_utils.OutputAnnotationStepStart('Results - %.02f +- %0.02f delta' %
- (percent_diff_in_means, std_err))
- print ' %s %s %s' % (''.center(10, ' '), 'Mean'.center(20, ' '),
- 'Std. Error'.center(20, ' '))
- print ' %s %s %s' % ('Patch'.center(10, ' '),
- ('%.02f' % results_with_patch[0]['mean']).center(20, ' '),
- ('%.02f' % results_with_patch[0]['std_err']).center(20, ' '))
- print ' %s %s %s' % ('No Patch'.center(10, ' '),
- ('%.02f' % results_without_patch[0]['mean']).center(20, ' '),
- ('%.02f' % results_without_patch[0]['std_err']).center(20, ' '))
- if cloud_file_link:
+ if percent_diff_in_means is not None and std_err is not None:
+ bisect_utils.OutputAnnotationStepStart('Results - %.02f +- %0.02f delta' %
+ (percent_diff_in_means, std_err))
+ print ' %s %s %s' % (''.center(10, ' '), 'Mean'.center(20, ' '),
+ 'Std. Error'.center(20, ' '))
+ print ' %s %s %s' % ('Patch'.center(10, ' '),
+ ('%.02f' % results_with_patch[0]['mean']).center(20, ' '),
+ ('%.02f' % results_with_patch[0]['std_err']).center(20, ' '))
+ print ' %s %s %s' % ('No Patch'.center(10, ' '),
+ ('%.02f' % results_without_patch[0]['mean']).center(20, ' '),
+ ('%.02f' % results_without_patch[0]['std_err']).center(20, ' '))
+ if cloud_file_link:
+ bisect_utils.OutputAnnotationStepLink('HTML Results', cloud_file_link)
+ bisect_utils.OutputAnnotationStepClosed()
+ elif cloud_file_link:
bisect_utils.OutputAnnotationStepLink('HTML Results', cloud_file_link)
- bisect_utils.OutputAnnotationStepClosed()
def _SetupAndRunPerformanceTest(config, path_to_file, path_to_goma):
@@ -385,6 +391,9 @@ def _RunBisectionScript(
'--working_directory', working_directory,
'--output_buildbot_annotations']
+ if config.get('metric'):
+ cmd.extend(['-m', config['metric']])
+
if config['repeat_count']:
cmd.extend(['-r', config['repeat_count']])