summaryrefslogtreecommitdiffstats
path: root/tools/run-bisect-perf-regression.py
diff options
context:
space:
mode:
authorsimonhatch@chromium.org <simonhatch@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-10-22 21:58:38 +0000
committersimonhatch@chromium.org <simonhatch@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-10-22 21:58:38 +0000
commitb3c0ee731210b12a18adc0362212341218462b83 (patch)
treed895ef242f04f3aca03ce26ab32ed33fbda82ba6 /tools/run-bisect-perf-regression.py
parent6fcf47f96fb0f62723a0cd2f8b0dcab3aa9a5160 (diff)
downloadchromium_src-b3c0ee731210b12a18adc0362212341218462b83.zip
chromium_src-b3c0ee731210b12a18adc0362212341218462b83.tar.gz
chromium_src-b3c0ee731210b12a18adc0362212341218462b83.tar.bz2
Added check for blink's perf cfg file. The bisect/perf bots work by modifying a .cfg file with the parameters of the job (ie. revision, command, repetitions, etc...) and submitting that as a patch to the bot. Since a single patch can't span both blink and chromium, we need a run-perf-test.cfg in each depot that we're likely to try running perf tests from.
Waiting on: https://codereview.chromium.org/33553003/ BUG= NOTRY=true Review URL: https://codereview.chromium.org/34723005 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@230222 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'tools/run-bisect-perf-regression.py')
-rwxr-xr-xtools/run-bisect-perf-regression.py29
1 files changed, 17 insertions, 12 deletions
diff --git a/tools/run-bisect-perf-regression.py b/tools/run-bisect-perf-regression.py
index e8490ad..a34e088 100755
--- a/tools/run-bisect-perf-regression.py
+++ b/tools/run-bisect-perf-regression.py
@@ -112,7 +112,7 @@ def _LoadConfigFile(path_to_file):
print
traceback.print_exc()
print
- return None
+ return {}
def _OutputFailedResults(text_to_print):
@@ -363,19 +363,24 @@ def main():
return _RunBisectionScript(config, opts.working_directory,
path_to_current_directory, opts.path_to_goma)
else:
- path_to_perf_cfg = os.path.join(
- os.path.abspath(os.path.dirname(sys.argv[0])), 'run-perf-test.cfg')
+ perf_cfg_files = ['run-perf-test.cfg', os.path.join('..', 'third_party',
+ 'WebKit', 'Tools', 'run-perf-test.cfg')]
- config = _LoadConfigFile(path_to_perf_cfg)
+ for current_perf_cfg_file in perf_cfg_files:
+ path_to_perf_cfg = os.path.join(
+ os.path.abspath(os.path.dirname(sys.argv[0])), current_perf_cfg_file)
- if config:
- return _SetupAndRunPerformanceTest(config, path_to_current_directory,
- opts.path_to_goma)
- else:
- print 'Error: Could not load config file. Double check your changes to '\
- 'run-bisect-perf-regression.cfg for syntax errors.'
- print
- return 1
+ config = _LoadConfigFile(path_to_perf_cfg)
+ config_has_values = [v for v in config.values() if v]
+
+ if config and config_has_values:
+ return _SetupAndRunPerformanceTest(config, path_to_current_directory,
+ opts.path_to_goma)
+
+ print 'Error: Could not load config file. Double check your changes to '\
+ 'run-bisect-perf-regression.cfg/run-perf-test.cfg for syntax errors.'
+ print
+ return 1
if __name__ == '__main__':