diff options
author | dennisjeffrey@chromium.org <dennisjeffrey@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2011-09-09 20:44:54 +0000 |
---|---|---|
committer | dennisjeffrey@chromium.org <dennisjeffrey@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2011-09-09 20:44:54 +0000 |
commit | 5b23fc9cc83f4ce0e4367f79c727825b2e08bbbf (patch) | |
tree | f5c6678554c9418f38f3387aa11bb26fc2647787 /chrome/test/chromeos/autotest | |
parent | 169d260383f7cba5ef4587e04500dd73410c7ce6 (diff) | |
download | chromium_src-5b23fc9cc83f4ce0e4367f79c727825b2e08bbbf.zip chromium_src-5b23fc9cc83f4ce0e4367f79c727825b2e08bbbf.tar.gz chromium_src-5b23fc9cc83f4ce0e4367f79c727825b2e08bbbf.tar.bz2 |
Pyauto-based autotest desktopui_PyAutoPerfTests now supports command-line args.
Arguments to autotests are given with the '-a' ('--args') flag.
This CL adds support to handle the following arguments to
desktopui_PyAutoPerfTests:
1) specify which perf test to run, using the same mechanism to specify a
particular test in pyauto (e.g., -a "perf.TabPerfTest.testNewTab").
2) specify the number of iterations to perform in each test (e.g., -a "-i 5").
The arguments can also be combined, e.g., -a "perf.TabPerfTest.testNewTab -i 5"
BUG=chromium-os:18185
TEST=None
Review URL: http://codereview.chromium.org/7867004
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@100483 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'chrome/test/chromeos/autotest')
2 files changed, 21 insertions, 4 deletions
diff --git a/chrome/test/chromeos/autotest/files/client/site_tests/desktopui_PyAutoPerfTests/control b/chrome/test/chromeos/autotest/files/client/site_tests/desktopui_PyAutoPerfTests/control index be13bb2..0bfcb25 100644 --- a/chrome/test/chromeos/autotest/files/client/site_tests/desktopui_PyAutoPerfTests/control +++ b/chrome/test/chromeos/autotest/files/client/site_tests/desktopui_PyAutoPerfTests/control @@ -16,4 +16,4 @@ This is a wrapper test for Chrome pyauto-based performance tests. http://dev.chromium.org/developers/testing/pyauto """ -job.run_test('desktopui_PyAutoPerfTests') +job.run_test('desktopui_PyAutoPerfTests', args=args) diff --git a/chrome/test/chromeos/autotest/files/client/site_tests/desktopui_PyAutoPerfTests/desktopui_PyAutoPerfTests.py b/chrome/test/chromeos/autotest/files/client/site_tests/desktopui_PyAutoPerfTests/desktopui_PyAutoPerfTests.py index 6cfdfe0..73d322d 100644 --- a/chrome/test/chromeos/autotest/files/client/site_tests/desktopui_PyAutoPerfTests/desktopui_PyAutoPerfTests.py +++ b/chrome/test/chromeos/autotest/files/client/site_tests/desktopui_PyAutoPerfTests/desktopui_PyAutoPerfTests.py @@ -2,6 +2,7 @@ # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. +import optparse import os import pwd import re @@ -63,8 +64,20 @@ class desktopui_PyAutoPerfTests(chrome_test.ChromeTestBase): open(constants.DISABLE_BROWSER_RESTART_MAGIC_FILE, 'w').close() assert os.path.exists(constants.DISABLE_BROWSER_RESTART_MAGIC_FILE) - def run_once(self): + def parse_args(self, args): + """Parses input arguments to this autotest.""" + parser = optparse.OptionParser() + parser.add_option('-i', '--num_iterations', dest='num_iterations', + type='int', default=0, + help='Number of iterations for perf measurements. ' + 'Defaults to the value given in perf.py.') + return parser.parse_args(args) + + def run_once(self, args=[]): """Runs the PyAuto performance tests.""" + options, test_args = self.parse_args(args) + test_args = ' '.join(test_args) + # Enable Chrome testing interface and login to a default account. deps_dir = os.path.join(self.autodir, 'deps') pyautolib_dir = os.path.join(self.cr_source_dir, @@ -78,10 +91,14 @@ class desktopui_PyAutoPerfTests(chrome_test.ChromeTestBase): # Run the PyAuto performance tests. functional_cmd = cros_ui.xcommand_as( '%s/chrome_test/test_src/chrome/test/functional/' - 'pyauto_functional.py --suite=CHROMEOS_PERF -v' % deps_dir) + 'pyauto_functional.py --suite=CHROMEOS_PERF -v %s' % ( + deps_dir, test_args)) + environment = os.environ.copy() + if options.num_iterations: + environment['NUM_ITERATIONS'] = str(options.num_iterations) proc = subprocess.Popen( functional_cmd, shell=True, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) + stderr=subprocess.STDOUT, env=environment) output = proc.communicate()[0] print output # Ensure pyauto test output is stored in autotest logs. if proc.returncode != 0: |