summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorsimonhatch@chromium.org <simonhatch@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-10-22 00:10:24 +0000
committersimonhatch@chromium.org <simonhatch@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-10-22 00:10:24 +0000
commit774a58c3ad4fb5f93bae4ed761cddf1b90365ff3 (patch)
treec02940eb3f22f4396e39b1805fe664b3752ad008
parent32a3e53d38529dd409a090ddb21eec37b5864371 (diff)
downloadchromium_src-774a58c3ad4fb5f93bae4ed761cddf1b90365ff3.zip
chromium_src-774a58c3ad4fb5f93bae4ed761cddf1b90365ff3.tar.gz
chromium_src-774a58c3ad4fb5f93bae4ed761cddf1b90365ff3.tar.bz2
First pass performance try bot.
Piggy-backed a performance comparison mode onto the existing bisect architecture. The script will build and run the specified performance test, revert any local changes, build, and run again. At the moment, only chromium changes can be perf-tested. Future CL's will add the ability to perf test blink changes. Waiting on http://codereview.chromium.org/27413002, http://codereview.chromium.org/26179009. BUG= TEST= Modify run-perf-test.cfg, commit, and run git try --user=<user> -b linux_perf_bisect Expect bot to run with/without patch, ie. http://build.chromium.org/p/tryserver.chromium/builders/linux_perf_bisect/builds/387 Review URL: https://codereview.chromium.org/27165006 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@229963 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r--tools/PRESUBMIT.py12
-rwxr-xr-xtools/bisect-perf-regression.py35
-rw-r--r--tools/bisect_utils.py13
-rwxr-xr-xtools/run-bisect-perf-regression.py297
-rw-r--r--tools/run-perf-test.cfg78
5 files changed, 372 insertions, 63 deletions
diff --git a/tools/PRESUBMIT.py b/tools/PRESUBMIT.py
index 0157231..aff6048f 100644
--- a/tools/PRESUBMIT.py
+++ b/tools/PRESUBMIT.py
@@ -2,21 +2,23 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-"""Top-level presubmit script for bisect trybot.
+"""Top-level presubmit script for bisect/perf trybot.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for
details on the presubmit API built into gcl.
"""
import imp
+import os
-def _ExamineBisectConfigFile(input_api, output_api):
+def _ExamineConfigFiles(input_api):
for f in input_api.AffectedFiles():
- if not f.LocalPath().endswith('run-bisect-perf-regression.cfg'):
+ if (not f.LocalPath().endswith('run-bisect-perf-regression.cfg') and
+ not f.LocalPath().endswith('run-perf-test.cfg')):
continue
try:
- cfg_file = imp.load_source('config', 'run-bisect-perf-regression.cfg')
+ cfg_file = imp.load_source('config', os.path.basename(f.LocalPath()))
for k, v in cfg_file.config.iteritems():
if v:
@@ -27,7 +29,7 @@ def _ExamineBisectConfigFile(input_api, output_api):
return None
def _CheckNoChangesToBisectConfigFile(input_api, output_api):
- results = _ExamineBisectConfigFile(input_api, output_api)
+ results = _ExamineConfigFiles(input_api)
if results:
return [output_api.PresubmitError(
'The bisection config file should only contain a config dict with '
diff --git a/tools/bisect-perf-regression.py b/tools/bisect-perf-regression.py
index 862f490..0d01def 100755
--- a/tools/bisect-perf-regression.py
+++ b/tools/bisect-perf-regression.py
@@ -35,6 +35,7 @@ An example usage (using git hashes):
"""
+import copy
import datetime
import errno
import imp
@@ -1235,7 +1236,8 @@ class BisectPerformanceMetrics(object):
return False
return True
- def RunPerformanceTestAndParseResults(self, command_to_run, metric):
+ def RunPerformanceTestAndParseResults(self, command_to_run, metric,
+ reset_on_first_run=False, upload_on_last_run=False, results_label=None):
"""Runs a performance test on the current revision by executing the
'command_to_run' and parses the results.
@@ -1261,10 +1263,11 @@ class BisectPerformanceMetrics(object):
# If running a telemetry test for cros, insert the remote ip, and
# identity parameters.
- if self.opts.target_platform == 'cros':
- if 'tools/perf/run_' in args[0]:
- args.append('--remote=%s' % self.opts.cros_remote_ip)
- args.append('--identity=%s' % CROS_TEST_KEY_PATH)
+ is_telemetry = ('tools/perf/run_' in command_to_run or
+ 'tools\\perf\\run_' in command_to_run)
+ if self.opts.target_platform == 'cros' and is_telemetry:
+ args.append('--remote=%s' % self.opts.cros_remote_ip)
+ args.append('--identity=%s' % CROS_TEST_KEY_PATH)
cwd = os.getcwd()
os.chdir(self.src_cwd)
@@ -1272,10 +1275,19 @@ class BisectPerformanceMetrics(object):
start_time = time.time()
metric_values = []
+ output_of_all_runs = ''
for i in xrange(self.opts.repeat_test_count):
# Can ignore the return code since if the tests fail, it won't return 0.
try:
- (output, return_code) = RunProcessAndRetrieveOutput(args)
+ current_args = copy.copy(args)
+ if is_telemetry:
+ if i == 0 and reset_on_first_run:
+ current_args.append('--reset-results')
+ elif i == self.opts.repeat_test_count - 1 and upload_on_last_run:
+ current_args.append('--upload-results')
+ if results_label:
+ current_args.append('--results-label=%s' % results_label)
+ (output, return_code) = RunProcessAndRetrieveOutput(current_args)
except OSError, e:
if e.errno == errno.ENOENT:
err_text = ("Something went wrong running the performance test. "
@@ -1289,6 +1301,7 @@ class BisectPerformanceMetrics(object):
return (err_text, -1)
raise
+ output_of_all_runs += output
if self.opts.output_buildbot_annotations:
print output
@@ -1318,10 +1331,10 @@ class BisectPerformanceMetrics(object):
print 'Results of performance test: %12f %12f' % (
truncated_mean, standard_err)
print
- return (values, 0)
+ return (values, 0, output_of_all_runs)
else:
return ('Invalid metric specified, or no values returned from '
- 'performance test.', -1)
+ 'performance test.', -1, output_of_all_runs)
def FindAllRevisionsToSync(self, revision, depot):
"""Finds all dependant revisions and depots that need to be synced for a
@@ -2718,9 +2731,9 @@ class BisectOptions(object):
opts = BisectOptions()
for k, v in values.iteritems():
- assert hasattr(opts, name_to_attr[k]), 'Invalid %s attribute in '\
- 'BisectOptions.' % name_to_attr[k]
- setattr(opts, name_to_attr[k], v)
+ assert hasattr(opts, k), 'Invalid %s attribute in '\
+ 'BisectOptions.' % k
+ setattr(opts, k, v)
metric_values = opts.metric.split('/')
if len(metric_values) != 2:
diff --git a/tools/bisect_utils.py b/tools/bisect_utils.py
index adef6ba..c958f35 100644
--- a/tools/bisect_utils.py
+++ b/tools/bisect_utils.py
@@ -89,6 +89,19 @@ def OutputAnnotationStepClosed():
sys.stdout.flush()
+def OutputAnnotationStepLink(label, url):
+ """Outputs appropriate annotation to print a link.
+
+ Args:
+ label: The name to print.
+ url: The url to print.
+ """
+ print
+ print '@@@STEP_LINK@%s@%s@@@' % (label, url)
+ print
+ sys.stdout.flush()
+
+
def CreateAndChangeToSourceDirectory(working_directory):
"""Creates a directory 'bisect' as a subdirectory of 'working_directory'. If
the function is successful, the current working directory will change to that
diff --git a/tools/run-bisect-perf-regression.py b/tools/run-bisect-perf-regression.py
index 0181ef7..e8490ad 100755
--- a/tools/run-bisect-perf-regression.py
+++ b/tools/run-bisect-perf-regression.py
@@ -19,26 +19,93 @@ import subprocess
import sys
import traceback
+import bisect_utils
+bisect = imp.load_source('bisect-perf-regression',
+ os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])),
+ 'bisect-perf-regression.py'))
+
+
CROS_BOARD_ENV = 'BISECT_CROS_BOARD'
CROS_IP_ENV = 'BISECT_CROS_IP'
-def LoadConfigFile(path_to_file):
- """Attempts to load the file 'run-bisect-perf-regression.cfg' as a module
+
+class Goma(object):
+
+ def __init__(self, path_to_goma):
+ self._abs_path_to_goma = None
+ self._abs_path_to_goma_file = None
+ if path_to_goma:
+ self._abs_path_to_goma = os.path.abspath(path_to_goma)
+ self._abs_path_to_goma_file = self._GetExecutablePath(
+ self._abs_path_to_goma)
+
+ def __enter__(self):
+ if self._HasGOMAPath():
+ self._SetupAndStart()
+ return self
+
+ def __exit__(self, *_):
+ if self._HasGOMAPath():
+ self._Stop()
+
+ def _HasGOMAPath(self):
+ return bool(self._abs_path_to_goma)
+
+ def _GetExecutablePath(self, path_to_goma):
+ if os.name == 'nt':
+ return os.path.join(path_to_goma, 'goma_ctl.bat')
+ else:
+ return os.path.join(path_to_goma, 'goma_ctl.sh')
+
+ def _SetupEnvVars(self):
+ if os.name == 'nt':
+ os.environ['CC'] = (os.path.join(self._abs_path_to_goma, 'gomacc.exe') +
+ ' cl.exe')
+ os.environ['CXX'] = (os.path.join(self._abs_path_to_goma, 'gomacc.exe') +
+ ' cl.exe')
+ else:
+ os.environ['PATH'] = os.pathsep.join([self._abs_path_to_goma,
+ os.environ['PATH']])
+
+ def _SetupAndStart(self):
+ """Sets up GOMA and launches it.
+
+ Args:
+ path_to_goma: Path to goma directory.
+
+ Returns:
+ True if successful."""
+ self._SetupEnvVars()
+
+ # Sometimes goma is lingering around if something went bad on a previous
+ # run. Stop it before starting a new process. Can ignore the return code
+ # since it will return an error if it wasn't running.
+ self._Stop()
+
+ if subprocess.call([self._abs_path_to_goma_file, 'start']):
+ raise RuntimeError('GOMA failed to start.')
+
+ def _Stop(self):
+ subprocess.call([self._abs_path_to_goma_file, 'stop'])
+
+
+
+def _LoadConfigFile(path_to_file):
+ """Attempts to load the specified config file as a module
and grab the global config dict.
Args:
- path_to_file: Path to the run-bisect-perf-regression.cfg file.
+ path_to_file: Path to the file.
Returns:
The config dict which should be formatted as follows:
{'command': string, 'good_revision': string, 'bad_revision': string
- 'metric': string}.
+ 'metric': string, etc...}.
Returns None on failure.
"""
try:
local_vars = {}
- execfile(os.path.join(path_to_file, 'run-bisect-perf-regression.cfg'),
- local_vars)
+ execfile(path_to_file, local_vars)
return local_vars['config']
except:
@@ -48,7 +115,152 @@ def LoadConfigFile(path_to_file):
return None
-def RunBisectionScript(config, working_directory, path_to_file, path_to_goma):
+def _OutputFailedResults(text_to_print):
+ bisect_utils.OutputAnnotationStepStart('Results - Failed')
+ print
+ print text_to_print
+ print
+ bisect_utils.OutputAnnotationStepClosed()
+
+
+def _CreateBisectOptionsFromConfig(config):
+ opts_dict = {}
+ opts_dict['command'] = config['command']
+ opts_dict['metric'] = config['metric']
+
+ if config['repeat_count']:
+ opts_dict['repeat_test_count'] = int(config['repeat_count'])
+
+ if config['truncate_percent']:
+ opts_dict['truncate_percent'] = int(config['truncate_percent'])
+
+ if config['max_time_minutes']:
+ opts_dict['max_time_minutes'] = int(config['max_time_minutes'])
+
+ if config.has_key('use_goma'):
+ opts_dict['use_goma'] = config['use_goma']
+
+ opts_dict['build_preference'] = 'ninja'
+ opts_dict['output_buildbot_annotations'] = True
+
+ if '--browser=cros' in config['command']:
+ opts_dict['target_platform'] = 'cros'
+
+ if os.environ[CROS_BOARD_ENV] and os.environ[CROS_IP_ENV]:
+ opts_dict['cros_board'] = os.environ[CROS_BOARD_ENV]
+ opts_dict['cros_remote_ip'] = os.environ[CROS_IP_ENV]
+ else:
+ raise RuntimeError('Cros build selected, but BISECT_CROS_IP or'
+ 'BISECT_CROS_BOARD undefined.')
+ elif 'android' in config['command']:
+ opts_dict['target_platform'] = 'android'
+
+ return bisect.BisectOptions.FromDict(opts_dict)
+
+
+def _RunPerformanceTest(config, path_to_file):
+ # Bisect script expects to be run from src
+ os.chdir(os.path.join(path_to_file, '..'))
+
+ bisect_utils.OutputAnnotationStepStart('Building With Patch')
+
+ opts = _CreateBisectOptionsFromConfig(config)
+ b = bisect.BisectPerformanceMetrics(None, opts)
+
+ if bisect_utils.RunGClient(['runhooks']):
+ raise RuntimeError('Failed to run gclient runhooks')
+
+ if not b.BuildCurrentRevision('chromium'):
+ raise RuntimeError('Patched version failed to build.')
+
+ bisect_utils.OutputAnnotationStepClosed()
+ bisect_utils.OutputAnnotationStepStart('Running With Patch')
+
+ results_with_patch = b.RunPerformanceTestAndParseResults(
+ opts.command, opts.metric, reset_on_first_run=True, results_label='Patch')
+
+ if results_with_patch[1]:
+ raise RuntimeError('Patched version failed to run performance test.')
+
+ bisect_utils.OutputAnnotationStepClosed()
+
+ bisect_utils.OutputAnnotationStepStart('Reverting Patch')
+ if bisect_utils.RunGClient(['revert']):
+ raise RuntimeError('Failed to run gclient runhooks')
+ bisect_utils.OutputAnnotationStepClosed()
+
+ bisect_utils.OutputAnnotationStepStart('Building Without Patch')
+
+ if bisect_utils.RunGClient(['runhooks']):
+ raise RuntimeError('Failed to run gclient runhooks')
+
+ if not b.BuildCurrentRevision('chromium'):
+ raise RuntimeError('Unpatched version failed to build.')
+
+ bisect_utils.OutputAnnotationStepClosed()
+ bisect_utils.OutputAnnotationStepStart('Running Without Patch')
+
+ results_without_patch = b.RunPerformanceTestAndParseResults(
+ opts.command, opts.metric, upload_on_last_run=True, results_label='ToT')
+
+ if results_without_patch[1]:
+ raise RuntimeError('Unpatched version failed to run performance test.')
+
+ # Find the link to the cloud stored results file.
+ output = results_without_patch[2]
+ cloud_file_link = [t for t in output.splitlines()
+ if 'storage.googleapis.com/chromium-telemetry/html-results/' in t]
+ if cloud_file_link:
+ cloud_file_link = cloud_file_link[0]
+ else:
+ cloud_file_link = ''
+
+ # Calculate the % difference in the means of the 2 runs.
+ percent_diff_in_means = (results_with_patch[0]['mean'] /
+ max(0.0001, results_without_patch[0]['mean'])) * 100.0 - 100.0
+ std_err = bisect.CalculatePooledStandardError(
+ [results_with_patch[0]['values'], results_without_patch[0]['values']])
+
+ bisect_utils.OutputAnnotationStepClosed()
+ bisect_utils.OutputAnnotationStepStart('Results - %.02f +- %0.02f delta' %
+ (percent_diff_in_means, std_err))
+ print ' %s %s %s' % (''.center(10, ' '), 'Mean'.center(20, ' '),
+ 'Std. Error'.center(20, ' '))
+ print ' %s %s %s' % ('Patch'.center(10, ' '),
+ ('%.02f' % results_with_patch[0]['mean']).center(20, ' '),
+ ('%.02f' % results_with_patch[0]['std_err']).center(20, ' '))
+ print ' %s %s %s' % ('No Patch'.center(10, ' '),
+ ('%.02f' % results_without_patch[0]['mean']).center(20, ' '),
+ ('%.02f' % results_without_patch[0]['std_err']).center(20, ' '))
+ if cloud_file_link:
+ bisect_utils.OutputAnnotationStepLink('HTML Results', cloud_file_link)
+ bisect_utils.OutputAnnotationStepClosed()
+
+
+def _SetupAndRunPerformanceTest(config, path_to_file, path_to_goma):
+ """Attempts to build and run the current revision with and without the
+ current patch, with the parameters passed in.
+
+ Args:
+ config: The config read from run-perf-test.cfg.
+ path_to_file: Path to the bisect-perf-regression.py script.
+ path_to_goma: Path to goma directory.
+
+ Returns:
+ 0 on success, otherwise 1.
+ """
+ try:
+ with Goma(path_to_goma) as goma:
+ config['use_goma'] = bool(path_to_goma)
+ _RunPerformanceTest(config, path_to_file)
+ return 0
+ except RuntimeError, e:
+ bisect_utils.OutputAnnotationStepClosed()
+ _OutputFailedResults('Error: %s' % e.message)
+ return 1
+
+
+def _RunBisectionScript(config, working_directory, path_to_file, path_to_goma):
"""Attempts to execute src/tools/bisect-perf-regression.py with the parameters
passed in.
@@ -98,37 +310,13 @@ def RunBisectionScript(config, working_directory, path_to_file, path_to_goma):
if 'android' in config['command']:
cmd.extend(['--target_platform', 'android'])
- goma_file = ''
if path_to_goma:
- path_to_goma = os.path.abspath(path_to_goma)
-
- if os.name == 'nt':
- os.environ['CC'] = os.path.join(path_to_goma, 'gomacc.exe') + ' cl.exe'
- os.environ['CXX'] = os.path.join(path_to_goma, 'gomacc.exe') + ' cl.exe'
- goma_file = os.path.join(path_to_goma, 'goma_ctl.bat')
- else:
- os.environ['PATH'] = os.pathsep.join([path_to_goma, os.environ['PATH']])
- goma_file = os.path.join(path_to_goma, 'goma_ctl.sh')
-
cmd.append('--use_goma')
- # Sometimes goma is lingering around if something went bad on a previous
- # run. Stop it before starting a new process. Can ignore the return code
- # since it will return an error if it wasn't running.
- subprocess.call([goma_file, 'stop'])
-
- return_code = subprocess.call([goma_file, 'start'])
- if return_code:
- print 'Error: goma failed to start.'
- print
- return return_code
-
cmd = [str(c) for c in cmd]
- return_code = subprocess.call(cmd)
-
- if path_to_goma:
- subprocess.call([goma_file, 'stop'])
+ with Goma(path_to_goma) as goma:
+ return_code = subprocess.call(cmd)
if return_code:
print 'Error: bisect-perf-regression.py returned with error %d' %\
@@ -156,23 +344,38 @@ def main():
'builds will be enabled.')
(opts, args) = parser.parse_args()
- if not opts.working_directory:
- print 'Error: missing required parameter: --working_directory'
- print
- parser.print_help()
- return 1
+ path_to_current_directory = os.path.abspath(os.path.dirname(sys.argv[0]))
+ path_to_bisect_cfg = os.path.join(path_to_current_directory,
+ 'run-bisect-perf-regression.cfg')
- path_to_file = os.path.abspath(os.path.dirname(sys.argv[0]))
+ config = _LoadConfigFile(path_to_bisect_cfg)
- config = LoadConfigFile(path_to_file)
- if not config:
- print 'Error: Could not load config file. Double check your changes to '\
- 'run-bisect-perf-regression.cfg for syntax errors.'
- print
- return 1
+ # Check if the config is empty
+ config_has_values = [v for v in config.values() if v]
+
+ if config and config_has_values:
+ if not opts.working_directory:
+ print 'Error: missing required parameter: --working_directory'
+ print
+ parser.print_help()
+ return 1
+
+ return _RunBisectionScript(config, opts.working_directory,
+ path_to_current_directory, opts.path_to_goma)
+ else:
+ path_to_perf_cfg = os.path.join(
+ os.path.abspath(os.path.dirname(sys.argv[0])), 'run-perf-test.cfg')
+
+ config = _LoadConfigFile(path_to_perf_cfg)
- return RunBisectionScript(config, opts.working_directory, path_to_file,
- opts.path_to_goma)
+ if config:
+ return _SetupAndRunPerformanceTest(config, path_to_current_directory,
+ opts.path_to_goma)
+ else:
+ print 'Error: Could not load config file. Double check your changes to '\
+ 'run-bisect-perf-regression.cfg for syntax errors.'
+ print
+ return 1
if __name__ == '__main__':
diff --git a/tools/run-perf-test.cfg b/tools/run-perf-test.cfg
new file mode 100644
index 0000000..69bdc5a
--- /dev/null
+++ b/tools/run-perf-test.cfg
@@ -0,0 +1,78 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Config file for Run Performance Test Bot
+
+This script is intended for use by anyone that wants to run a remote performance
+test. Modify the config below and add the command to run the performance test,
+the metric you're interested in, and repeat/discard parameters. You can then
+run a git try <bot>.
+
+Changes to this file should never be submitted.
+
+Args:
+ 'command': This is the full command line to pass to the
+ bisect-perf-regression.py script in order to execute the test.
+ 'metric': The name of the metric to parse out from the results of the
+ performance test. You can retrieve the metric by looking at the stdio of
+ the performance test. Look for lines of the format:
+
+ RESULT <graph>: <trace>= <value> <units>
+
+ The metric name is "<graph>/<trace>".
+ 'repeat_count': The number of times to repeat the performance test.
+ 'max_time_minutes': The script will attempt to run the performance test
+ "repeat_count" times, unless it exceeds "max_time_minutes".
+ 'truncate_percent': Discard the highest/lowest % values from performance test.
+
+Sample config:
+
+config = {
+ 'command': './out/Release/performance_ui_tests' +
+ ' --gtest_filter=PageCyclerTest.Intl1File',
+ 'metric': 'times/t',
+ 'repeat_count': '20',
+ 'max_time_minutes': '20',
+ 'truncate_percent': '25',
+}
+
+On Windows:
+ - If you're calling a python script you will need to add "python" to
+the command:
+
+config = {
+ 'command': 'python tools/perf/run_measurement -v --browser=release kraken',
+ 'metric': 'Total/Total',
+ 'repeat_count': '20',
+ 'max_time_minutes': '20',
+ 'truncate_percent': '25',
+}
+
+
+On ChromeOS:
+ - Script accepts either ChromeOS versions, or unix timestamps as revisions.
+ - You don't need to specify --identity and --remote, they will be added to
+ the command using the bot's BISECT_CROS_IP and BISECT_CROS_BOARD values.
+
+config = {
+ 'command': './tools/perf/run_measurement -v '\
+ '--browser=cros-chrome-guest '\
+ 'dromaeo tools/perf/page_sets/dromaeo/jslibstylejquery.json',
+ 'metric': 'jslib/jslib',
+ 'repeat_count': '20',
+ 'max_time_minutes': '20',
+ 'truncate_percent': '25',
+}
+
+"""
+
+config = {
+ 'command': '',
+ 'metric': '',
+ 'repeat_count': '',
+ 'max_time_minutes': '',
+ 'truncate_percent': '',
+}
+
+# Workaround git try issue, see crbug.com/257689