diff options
author | bulach@chromium.org <bulach@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2013-11-13 04:03:29 +0000 |
---|---|---|
committer | bulach@chromium.org <bulach@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2013-11-13 04:03:29 +0000 |
commit | ad32f3154d4760ed383f1dfb1fd4d7f5ecb19969 (patch) | |
tree | 6e7066438d8b294048d15563746f7a69bcaf12a8 /build | |
parent | bf622718f4cc2120b993230813c01dc6d58dbf75 (diff) | |
download | chromium_src-ad32f3154d4760ed383f1dfb1fd4d7f5ecb19969.zip chromium_src-ad32f3154d4760ed383f1dfb1fd4d7f5ecb19969.tar.gz chromium_src-ad32f3154d4760ed383f1dfb1fd4d7f5ecb19969.tar.bz2 |
Android: fix --single-step option for test_runner.py perf.
Due to the way buildbot constructs the command, rather than having a single string
for the command, use --single-step -- cmd args
NOTRY=true
BUG=318369
Review URL: https://codereview.chromium.org/68103010
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@234744 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'build')
-rwxr-xr-x | build/android/test_runner.py | 27 |
1 files changed, 18 insertions, 9 deletions
diff --git a/build/android/test_runner.py b/build/android/test_runner.py index 9dbe7b9..fa3e785 100755 --- a/build/android/test_runner.py +++ b/build/android/test_runner.py @@ -436,12 +436,13 @@ def AddPerfTestOptions(option_parser): option_parser.usage = '%prog perf [options]' option_parser.commands_dict = {} option_parser.example = ('%prog perf ' - '[--single-step command] or ' + '[--single-step -- command args] or ' '[--steps perf_steps.json] or ' - '[--print-step step]') + '[--print-step step]') option_parser.add_option( '--single-step', + action='store_true', help='Execute the given command with retries, but only print the result ' 'for the "most successful" round.') option_parser.add_option( @@ -468,7 +469,7 @@ def AddPerfTestOptions(option_parser): AddCommonOptions(option_parser) -def ProcessPerfTestOptions(options, error_func): +def ProcessPerfTestOptions(options, args, error_func): """Processes all perf test options. Args: @@ -484,10 +485,13 @@ def ProcessPerfTestOptions(options, error_func): [options.steps, options.print_step, options.single_step])) if count != 1: error_func('Please specify one of: --steps, --print-step, --single-step.') + single_step = None + if options.single_step: + single_step = ' '.join(args[2:]) return perf_test_options.PerfOptions( options.steps, options.flaky_steps, options.print_step, options.no_timeout, options.test_filter, options.dry_run, - options.single_step) + single_step) def _RunGTests(options, error_func, devices): @@ -628,10 +632,10 @@ def _RunMonkeyTests(options, error_func, devices): return exit_code -def _RunPerfTests(options, error_func, devices): +def _RunPerfTests(options, args, error_func, devices): """Subcommand of RunTestsCommands which runs perf tests.""" - perf_options = ProcessPerfTestOptions(options, error_func) - # Just print the results from a single previously executed step. + perf_options = ProcessPerfTestOptions(options, args, error_func) + # Just print the results from a single previously executed step. if perf_options.print_step: return perf_test_runner.PrintTestOutput(perf_options.print_step) @@ -696,9 +700,14 @@ def RunTestsCommand(command, options, args, option_parser): """ # Check for extra arguments - if len(args) > 2: + if len(args) > 2 and command != 'perf': option_parser.error('Unrecognized arguments: %s' % (' '.join(args[2:]))) return constants.ERROR_EXIT_CODE + if command == 'perf': + if ((options.single_step and len(args) <= 2) or + (not options.single_step and len(args) > 2)): + option_parser.error('Unrecognized arguments: %s' % (' '.join(args))) + return constants.ERROR_EXIT_CODE ProcessCommonOptions(options) @@ -717,7 +726,7 @@ def RunTestsCommand(command, options, args, option_parser): elif command == 'monkey': return _RunMonkeyTests(options, option_parser.error, devices) elif command == 'perf': - return _RunPerfTests(options, option_parser.error, devices) + return _RunPerfTests(options, args, option_parser.error, devices) else: raise Exception('Unknown test type.') |