diff options
author | simonhatch@chromium.org <simonhatch@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2013-10-22 00:10:24 +0000 |
---|---|---|
committer | simonhatch@chromium.org <simonhatch@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2013-10-22 00:10:24 +0000 |
commit | 774a58c3ad4fb5f93bae4ed761cddf1b90365ff3 (patch) | |
tree | c02940eb3f22f4396e39b1805fe664b3752ad008 /tools/run-bisect-perf-regression.py | |
parent | 32a3e53d38529dd409a090ddb21eec37b5864371 (diff) | |
download | chromium_src-774a58c3ad4fb5f93bae4ed761cddf1b90365ff3.zip chromium_src-774a58c3ad4fb5f93bae4ed761cddf1b90365ff3.tar.gz chromium_src-774a58c3ad4fb5f93bae4ed761cddf1b90365ff3.tar.bz2 |
First pass performance try bot.
Piggy-backed a performance comparison mode onto the existing bisect architecture. The script will build and run the specified performance test, revert any local changes, build, and run again. At the moment, only chromium changes can be perf-tested. Future CL's will add the ability to perf test blink changes.
Waiting on http://codereview.chromium.org/27413002, http://codereview.chromium.org/26179009.
BUG=
TEST=
Modify run-perf-test.cfg, commit, and run git try --user=<user> -b linux_perf_bisect
Expect bot to run with/without patch, ie. http://build.chromium.org/p/tryserver.chromium/builders/linux_perf_bisect/builds/387
Review URL: https://codereview.chromium.org/27165006
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@229963 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'tools/run-bisect-perf-regression.py')
-rwxr-xr-x | tools/run-bisect-perf-regression.py | 297 |
1 files changed, 250 insertions, 47 deletions
diff --git a/tools/run-bisect-perf-regression.py b/tools/run-bisect-perf-regression.py index 0181ef7..e8490ad 100755 --- a/tools/run-bisect-perf-regression.py +++ b/tools/run-bisect-perf-regression.py @@ -19,26 +19,93 @@ import subprocess import sys import traceback +import bisect_utils +bisect = imp.load_source('bisect-perf-regression', + os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), + 'bisect-perf-regression.py')) + + CROS_BOARD_ENV = 'BISECT_CROS_BOARD' CROS_IP_ENV = 'BISECT_CROS_IP' -def LoadConfigFile(path_to_file): - """Attempts to load the file 'run-bisect-perf-regression.cfg' as a module + +class Goma(object): + + def __init__(self, path_to_goma): + self._abs_path_to_goma = None + self._abs_path_to_goma_file = None + if path_to_goma: + self._abs_path_to_goma = os.path.abspath(path_to_goma) + self._abs_path_to_goma_file = self._GetExecutablePath( + self._abs_path_to_goma) + + def __enter__(self): + if self._HasGOMAPath(): + self._SetupAndStart() + return self + + def __exit__(self, *_): + if self._HasGOMAPath(): + self._Stop() + + def _HasGOMAPath(self): + return bool(self._abs_path_to_goma) + + def _GetExecutablePath(self, path_to_goma): + if os.name == 'nt': + return os.path.join(path_to_goma, 'goma_ctl.bat') + else: + return os.path.join(path_to_goma, 'goma_ctl.sh') + + def _SetupEnvVars(self): + if os.name == 'nt': + os.environ['CC'] = (os.path.join(self._abs_path_to_goma, 'gomacc.exe') + + ' cl.exe') + os.environ['CXX'] = (os.path.join(self._abs_path_to_goma, 'gomacc.exe') + + ' cl.exe') + else: + os.environ['PATH'] = os.pathsep.join([self._abs_path_to_goma, + os.environ['PATH']]) + + def _SetupAndStart(self): + """Sets up GOMA and launches it. + + Args: + path_to_goma: Path to goma directory. + + Returns: + True if successful.""" + self._SetupEnvVars() + + # Sometimes goma is lingering around if something went bad on a previous + # run. Stop it before starting a new process. Can ignore the return code + # since it will return an error if it wasn't running. + self._Stop() + + if subprocess.call([self._abs_path_to_goma_file, 'start']): + raise RuntimeError('GOMA failed to start.') + + def _Stop(self): + subprocess.call([self._abs_path_to_goma_file, 'stop']) + + + +def _LoadConfigFile(path_to_file): + """Attempts to load the specified config file as a module and grab the global config dict. Args: - path_to_file: Path to the run-bisect-perf-regression.cfg file. + path_to_file: Path to the file. Returns: The config dict which should be formatted as follows: {'command': string, 'good_revision': string, 'bad_revision': string - 'metric': string}. + 'metric': string, etc...}. Returns None on failure. """ try: local_vars = {} - execfile(os.path.join(path_to_file, 'run-bisect-perf-regression.cfg'), - local_vars) + execfile(path_to_file, local_vars) return local_vars['config'] except: @@ -48,7 +115,152 @@ def LoadConfigFile(path_to_file): return None -def RunBisectionScript(config, working_directory, path_to_file, path_to_goma): +def _OutputFailedResults(text_to_print): + bisect_utils.OutputAnnotationStepStart('Results - Failed') + print + print text_to_print + print + bisect_utils.OutputAnnotationStepClosed() + + +def _CreateBisectOptionsFromConfig(config): + opts_dict = {} + opts_dict['command'] = config['command'] + opts_dict['metric'] = config['metric'] + + if config['repeat_count']: + opts_dict['repeat_test_count'] = int(config['repeat_count']) + + if config['truncate_percent']: + opts_dict['truncate_percent'] = int(config['truncate_percent']) + + if config['max_time_minutes']: + opts_dict['max_time_minutes'] = int(config['max_time_minutes']) + + if config.has_key('use_goma'): + opts_dict['use_goma'] = config['use_goma'] + + opts_dict['build_preference'] = 'ninja' + opts_dict['output_buildbot_annotations'] = True + + if '--browser=cros' in config['command']: + opts_dict['target_platform'] = 'cros' + + if os.environ[CROS_BOARD_ENV] and os.environ[CROS_IP_ENV]: + opts_dict['cros_board'] = os.environ[CROS_BOARD_ENV] + opts_dict['cros_remote_ip'] = os.environ[CROS_IP_ENV] + else: + raise RuntimeError('Cros build selected, but BISECT_CROS_IP or' + 'BISECT_CROS_BOARD undefined.') + elif 'android' in config['command']: + opts_dict['target_platform'] = 'android' + + return bisect.BisectOptions.FromDict(opts_dict) + + +def _RunPerformanceTest(config, path_to_file): + # Bisect script expects to be run from src + os.chdir(os.path.join(path_to_file, '..')) + + bisect_utils.OutputAnnotationStepStart('Building With Patch') + + opts = _CreateBisectOptionsFromConfig(config) + b = bisect.BisectPerformanceMetrics(None, opts) + + if bisect_utils.RunGClient(['runhooks']): + raise RuntimeError('Failed to run gclient runhooks') + + if not b.BuildCurrentRevision('chromium'): + raise RuntimeError('Patched version failed to build.') + + bisect_utils.OutputAnnotationStepClosed() + bisect_utils.OutputAnnotationStepStart('Running With Patch') + + results_with_patch = b.RunPerformanceTestAndParseResults( + opts.command, opts.metric, reset_on_first_run=True, results_label='Patch') + + if results_with_patch[1]: + raise RuntimeError('Patched version failed to run performance test.') + + bisect_utils.OutputAnnotationStepClosed() + + bisect_utils.OutputAnnotationStepStart('Reverting Patch') + if bisect_utils.RunGClient(['revert']): + raise RuntimeError('Failed to run gclient runhooks') + bisect_utils.OutputAnnotationStepClosed() + + bisect_utils.OutputAnnotationStepStart('Building Without Patch') + + if bisect_utils.RunGClient(['runhooks']): + raise RuntimeError('Failed to run gclient runhooks') + + if not b.BuildCurrentRevision('chromium'): + raise RuntimeError('Unpatched version failed to build.') + + bisect_utils.OutputAnnotationStepClosed() + bisect_utils.OutputAnnotationStepStart('Running Without Patch') + + results_without_patch = b.RunPerformanceTestAndParseResults( + opts.command, opts.metric, upload_on_last_run=True, results_label='ToT') + + if results_without_patch[1]: + raise RuntimeError('Unpatched version failed to run performance test.') + + # Find the link to the cloud stored results file. + output = results_without_patch[2] + cloud_file_link = [t for t in output.splitlines() + if 'storage.googleapis.com/chromium-telemetry/html-results/' in t] + if cloud_file_link: + cloud_file_link = cloud_file_link[0] + else: + cloud_file_link = '' + + # Calculate the % difference in the means of the 2 runs. + percent_diff_in_means = (results_with_patch[0]['mean'] / + max(0.0001, results_without_patch[0]['mean'])) * 100.0 - 100.0 + std_err = bisect.CalculatePooledStandardError( + [results_with_patch[0]['values'], results_without_patch[0]['values']]) + + bisect_utils.OutputAnnotationStepClosed() + bisect_utils.OutputAnnotationStepStart('Results - %.02f +- %0.02f delta' % + (percent_diff_in_means, std_err)) + print ' %s %s %s' % (''.center(10, ' '), 'Mean'.center(20, ' '), + 'Std. Error'.center(20, ' ')) + print ' %s %s %s' % ('Patch'.center(10, ' '), + ('%.02f' % results_with_patch[0]['mean']).center(20, ' '), + ('%.02f' % results_with_patch[0]['std_err']).center(20, ' ')) + print ' %s %s %s' % ('No Patch'.center(10, ' '), + ('%.02f' % results_without_patch[0]['mean']).center(20, ' '), + ('%.02f' % results_without_patch[0]['std_err']).center(20, ' ')) + if cloud_file_link: + bisect_utils.OutputAnnotationStepLink('HTML Results', cloud_file_link) + bisect_utils.OutputAnnotationStepClosed() + + +def _SetupAndRunPerformanceTest(config, path_to_file, path_to_goma): + """Attempts to build and run the current revision with and without the + current patch, with the parameters passed in. + + Args: + config: The config read from run-perf-test.cfg. + path_to_file: Path to the bisect-perf-regression.py script. + path_to_goma: Path to goma directory. + + Returns: + 0 on success, otherwise 1. + """ + try: + with Goma(path_to_goma) as goma: + config['use_goma'] = bool(path_to_goma) + _RunPerformanceTest(config, path_to_file) + return 0 + except RuntimeError, e: + bisect_utils.OutputAnnotationStepClosed() + _OutputFailedResults('Error: %s' % e.message) + return 1 + + +def _RunBisectionScript(config, working_directory, path_to_file, path_to_goma): """Attempts to execute src/tools/bisect-perf-regression.py with the parameters passed in. @@ -98,37 +310,13 @@ def RunBisectionScript(config, working_directory, path_to_file, path_to_goma): if 'android' in config['command']: cmd.extend(['--target_platform', 'android']) - goma_file = '' if path_to_goma: - path_to_goma = os.path.abspath(path_to_goma) - - if os.name == 'nt': - os.environ['CC'] = os.path.join(path_to_goma, 'gomacc.exe') + ' cl.exe' - os.environ['CXX'] = os.path.join(path_to_goma, 'gomacc.exe') + ' cl.exe' - goma_file = os.path.join(path_to_goma, 'goma_ctl.bat') - else: - os.environ['PATH'] = os.pathsep.join([path_to_goma, os.environ['PATH']]) - goma_file = os.path.join(path_to_goma, 'goma_ctl.sh') - cmd.append('--use_goma') - # Sometimes goma is lingering around if something went bad on a previous - # run. Stop it before starting a new process. Can ignore the return code - # since it will return an error if it wasn't running. - subprocess.call([goma_file, 'stop']) - - return_code = subprocess.call([goma_file, 'start']) - if return_code: - print 'Error: goma failed to start.' - print - return return_code - cmd = [str(c) for c in cmd] - return_code = subprocess.call(cmd) - - if path_to_goma: - subprocess.call([goma_file, 'stop']) + with Goma(path_to_goma) as goma: + return_code = subprocess.call(cmd) if return_code: print 'Error: bisect-perf-regression.py returned with error %d' %\ @@ -156,23 +344,38 @@ def main(): 'builds will be enabled.') (opts, args) = parser.parse_args() - if not opts.working_directory: - print 'Error: missing required parameter: --working_directory' - print - parser.print_help() - return 1 + path_to_current_directory = os.path.abspath(os.path.dirname(sys.argv[0])) + path_to_bisect_cfg = os.path.join(path_to_current_directory, + 'run-bisect-perf-regression.cfg') - path_to_file = os.path.abspath(os.path.dirname(sys.argv[0])) + config = _LoadConfigFile(path_to_bisect_cfg) - config = LoadConfigFile(path_to_file) - if not config: - print 'Error: Could not load config file. Double check your changes to '\ - 'run-bisect-perf-regression.cfg for syntax errors.' - print - return 1 + # Check if the config is empty + config_has_values = [v for v in config.values() if v] + + if config and config_has_values: + if not opts.working_directory: + print 'Error: missing required parameter: --working_directory' + print + parser.print_help() + return 1 + + return _RunBisectionScript(config, opts.working_directory, + path_to_current_directory, opts.path_to_goma) + else: + path_to_perf_cfg = os.path.join( + os.path.abspath(os.path.dirname(sys.argv[0])), 'run-perf-test.cfg') + + config = _LoadConfigFile(path_to_perf_cfg) - return RunBisectionScript(config, opts.working_directory, path_to_file, - opts.path_to_goma) + if config: + return _SetupAndRunPerformanceTest(config, path_to_current_directory, + opts.path_to_goma) + else: + print 'Error: Could not load config file. Double check your changes to '\ + 'run-bisect-perf-regression.cfg for syntax errors.' + print + return 1 if __name__ == '__main__': |