summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authornirnimesh@chromium.org <nirnimesh@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-03-31 01:25:39 +0000
committernirnimesh@chromium.org <nirnimesh@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-03-31 01:25:39 +0000
commit6420f7715a34c7bdd0bf3ab64a62224c753d9a14 (patch)
treeaf8b1d82ccb8cc04b22215c5cd269678ccfafdc2
parentad74022bb41f8afed397ae6c72b105a7ec0ff060 (diff)
downloadchromium_src-6420f7715a34c7bdd0bf3ab64a62224c753d9a14.zip
chromium_src-6420f7715a34c7bdd0bf3ab64a62224c753d9a14.tar.gz
chromium_src-6420f7715a34c7bdd0bf3ab64a62224c753d9a14.tar.bz2
First media performance PyAuto test checkin. The aim of this checkin is
to run these performance test using local media files and make these data available on Buildbot. Review URL: http://codereview.chromium.org/6312171 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@79944 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r--chrome/test/data/media/dataset.csv6
-rw-r--r--chrome/test/data/media/player_for_perf.html68
-rwxr-xr-xchrome/test/functional/media_perf.py131
-rwxr-xr-xchrome/test/functional/media_playbacktime.py30
-rwxr-xr-xchrome/test/functional/media_test_base.py199
-rwxr-xr-xchrome/test/functional/media_test_runner.py142
-rw-r--r--chrome/test/functional/ui_perf_test_measure_thread.py94
-rw-r--r--chrome/test/functional/ui_perf_test_utils.py252
-rw-r--r--chrome/test/functional/ui_perf_test_utils_unittest.py121
9 files changed, 1043 insertions, 0 deletions
diff --git a/chrome/test/data/media/dataset.csv b/chrome/test/data/media/dataset.csv
new file mode 100644
index 0000000..0ce17ad
--- /dev/null
+++ b/chrome/test/data/media/dataset.csv
@@ -0,0 +1,6 @@
+"comment","tag(video or audio)","filename","filename_nickname",
+"video","bear_silent.ogv","bear_silent.ogv"
+"video","bear_silent.webm","bear_silent.webm"
+"video","bear.ogv","bear.ogv"
+"audio","bear.wav","bear.wav"
+"video","bear.webm","bear.webm"
diff --git a/chrome/test/data/media/player_for_perf.html b/chrome/test/data/media/player_for_perf.html
new file mode 100644
index 0000000..a963323
--- /dev/null
+++ b/chrome/test/data/media/player_for_perf.html
@@ -0,0 +1,68 @@
+<!--
+This HTML file contains player div which is used for performance testing
+(chrome/test/functional/media_perf.py).
+The query string should contain the following information
+(delimited by "="):
+ tag (required) : HTML video/audio tag.
+ video file (required) : video file name
+ (should be in the same directory as this file).
+ t (optional): adding t parameter for disabling media cache.
+
+ Example: "player_for_perf.html?video=foo.webm=t"
+-->
+<html>
+<body>
+<div id="player_container"></div>
+<script>
+var player = null;
+function InstallEventHandler(event, action) {
+ player.addEventListener(event, function(e) {
+ eval(action);
+ }, false);
+}
+
+// Parse the location and load the media file accordingly.
+var url = window.location.href;
+var url_parts = url.split('?');
+
+// Make sure the URL is of the form "player_for_perf.html?query".
+var ok = false;
+if (url_parts.length > 1) {
+ var query = url_parts[1];
+ var query_parts = query.split('=');
+ if (query_parts.length >= 2) {
+ var tag = query_parts[0];
+ var media_url = query_parts[1];
+ if (query_parts.length == 3) {
+ // If there is another parameter at the end,
+ // append another parameter "t=" in url
+ // that disables media cache.
+ var cold = query_parts[2];
+ if (cold != '') {
+ media_url += '?t=' + (new Date()).getTime();
+ }
+ }
+ if (tag == 'audio' || tag == 'video') {
+ ok = true;
+ var container = document.getElementById('player_container');
+ container.innerHTML = '<' + tag + ' controls id="player"></' + tag + '>';
+ player = document.getElementById('player');
+
+ // Install event handlers.
+ InstallEventHandler('error',
+ 'document.title = "ERROR = " + player.error.code');
+ InstallEventHandler('playing', 'document.title = "PLAYING"');
+ InstallEventHandler('ended', 'document.title = "END"');
+
+ // Starts the player.
+ player.src = media_url;
+ player.play();
+ }
+ }
+}
+if (!ok) {
+ document.title = 'FAILED';
+}
+</script>
+</body>
+</html>
diff --git a/chrome/test/functional/media_perf.py b/chrome/test/functional/media_perf.py
new file mode 100755
index 0000000..36a9b63
--- /dev/null
+++ b/chrome/test/functional/media_perf.py
@@ -0,0 +1,131 @@
+#!/usr/bin/python
+
+# Copyright (c) 2011 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Performance test for HTML5 media tag.
+
+This PyAuto powered script plays media (video or audio) files (using HTML5 tag
+embedded in player.html file) and measures CPU and memory usage using psutil
+library in a different thread using UIPerfTestMeasureThread class.
+The parameters needed to run this performance test are passed in the form of
+environment variables (such as the number of runs). media_perf_runner.py is
+used for generating these variables (PyAuto does not support direct
+parameters).
+
+Ref: http://code.google.com/p/psutil/wiki/Documentation
+"""
+import os
+import time
+
+import pyauto_functional # Must be imported before pyauto.
+import pyauto
+
+from media_test_base import MediaTestBase
+from ui_perf_test_measure_thread import UIPerfTestMeasureThread
+from ui_perf_test_utils import UIPerfTestUtils
+
+
+class MediaPerformanceTest(MediaTestBase):
+ """Tests for basic media performance."""
+ # Since PyAuto does not support commandline argument, we have to rely on
+ # environment variables. The followings are the names of the environment
+ # variables that are used in the tests.
+ # Define the interval for the measurement.
+ MEASURE_INTERVAL_ENV_NAME = 'MEASURE_INTERVALS'
+ # Time interval between measurement.
+ DEFAULT_MEASURE_INTERVAL = 1
+ TIMEOUT = 10000
+
+ # These predefined names are coming from library psutil
+ # except for 'measure-time' which represents the timestamp at the start
+ # of program execution.
+ CHROME_PROCESS_INFO_NAMES = ['measure-time',
+ 'pct-cpu',
+ # pct-cpu: a float representing the current system-wide CPU utilization
+ # as a percentage. When interval is > 0.0 compares system CPU times
+ # elapsed before and after the interval (blocking).
+ 'cpu-user',
+ 'cpu-system',
+ # cpu-user, cpu-system: process CPU user and system times which means
+ # the amount of time expressed in seconds that a process has spent in
+ # user/system mode.
+ 'memory-rss',
+ 'memory-vms',
+ # memory-rss, memory-vms: values representing RSS (Resident Set Size) and
+ # VMS (Virtual Memory Size) in bytes.
+ 'pct-process-memory']
+ # pct-process-memory: compare physical system memory to process resident
+ # memory and calculate process memory utilization as a percentage.
+ CHROME_PROCESS_INFO_UNITS = ['sec',
+ 'percent',
+ 'load',
+ 'load',
+ 'MB',
+ 'MB',
+ 'percent']
+ # Instance variables.
+ run_counter = 0
+ chrome_renderer_process_infos = []
+ measure_thread = None
+
+ def testHTML5MediaTag(self):
+ """test HTML5 Media Tag."""
+ MediaTestBase.ExecuteTest(self)
+
+ def PreAllRunsProcess(self):
+ """A method to execute before all runs."""
+ MediaTestBase.PreAllRunsProcess(self)
+ self.chrome_renderer_process_infos = []
+ for i in range(self.number_of_runs):
+ self.chrome_renderer_process_infos.append([])
+
+ def PostAllRunsProcess(self):
+ """A method to execute after all runs."""
+ MediaTestBase.PostAllRunsProcess(self)
+ print UIPerfTestUtils.PrintMeasuredData(
+ measured_data_list=self.chrome_renderer_process_infos,
+ measured_data_name_list=self.CHROME_PROCESS_INFO_NAMES,
+ measured_data_unit_list=self.CHROME_PROCESS_INFO_UNITS,
+ remove_first_result=self.remove_first_result,
+ parameter_string=self.parameter_str,
+ title=self.media_filename_nickname)
+
+ def PreEachRunProcess(self, run_counter):
+ """A method to execute before each run.
+
+ Starts a thread that measures the performance.
+
+ Args:
+ run_counter: a counter for each run.
+ """
+ MediaTestBase.PreEachRunProcess(self, run_counter)
+
+ self.run_counter = run_counter
+ measure_intervals = os.getenv(self.MEASURE_INTERVAL_ENV_NAME,
+ self.DEFAULT_MEASURE_INTERVAL)
+ # Start the thread.
+ self.measure_thread = UIPerfTestMeasureThread()
+ self.measure_thread.start()
+
+ def PostEachRunProcess(self, run_counter):
+ """A method to execute after each run.
+
+ Terminates the measuring thread and records the measurement in
+ measure_thread.chrome_renderer_process_info.
+
+ Args:
+ run_counter: a counter for each run.
+ """
+ MediaTestBase.PostEachRunProcess(self, run_counter)
+ # Record the measurement data.
+ self.chrome_renderer_process_infos[run_counter] = (
+ self.measure_thread.chrome_renderer_process_info)
+ # Join the thread.
+ self.measure_thread.stop_measurement = True
+ self.measure_thread.join(self.TIMEOUT)
+
+
+if __name__ == '__main__':
+ pyauto_functional.Main()
diff --git a/chrome/test/functional/media_playbacktime.py b/chrome/test/functional/media_playbacktime.py
new file mode 100755
index 0000000..12630d2
--- /dev/null
+++ b/chrome/test/functional/media_playbacktime.py
@@ -0,0 +1,30 @@
+#!/usr/bin/python
+
+# Copyright (c) 2011 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Simple test for HTML5 media tag to measure playback time.
+
+This PyAuto powered script plays media (video or audio) files (using HTML5 tag
+embedded in player.html file) and records whole playback time. The parameters
+needed to run this performance test are passed in the form of environment
+variables (such as the number of runs). media_perf_runner.py is used for
+generating these variables (PyAuto does not support direct parameters).
+"""
+import pyauto_functional # Must be imported before pyauto.
+import pyauto
+
+from media_test_base import MediaTestBase
+
+
+class MediaPlaybackTimeTest(MediaTestBase):
+ """Test class to record playback time."""
+
+ def testHTML5MediaTag(self):
+ """test HTML5 Media Tag."""
+ MediaTestBase.ExecuteTest(self)
+
+
+if __name__ == '__main__':
+ pyauto_functional.Main()
diff --git a/chrome/test/functional/media_test_base.py b/chrome/test/functional/media_test_base.py
new file mode 100755
index 0000000..3f60f0b
--- /dev/null
+++ b/chrome/test/functional/media_test_base.py
@@ -0,0 +1,199 @@
+#!/usr/bin/python
+
+# Copyright (c) 2011 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""A framework to run PyAuto HTML media tests.
+
+This PyAuto powered script plays media (video or audio) files (using HTML5 tag
+embedded in player.html file). The parameters needed to run this test are
+passed in the form of environment variables (such as the number of runs).
+media_test_runner.py is used for generating these variables
+(PyAuto does not support direct parameters).
+"""
+import os
+import time
+
+import pyauto_functional # Must be imported before pyauto.
+import pyauto
+
+from ui_perf_test_utils import UIPerfTestUtils
+
+
+class MediaTestBase(pyauto.PyUITest):
+ """A base class for media related PyAuto tests.
+
+ This class is meant to a base class for all media related Pyauto test and
+ provides useful functionality to run the test in conjunction with
+ player.html file, which contains basic html and JavaScipt for media test.
+ The main test method (ExecuteTest()) contains execution loops to get average
+ measured data over several runs with the same condition. This class also
+ contains several basic pre/post-processing methods that should be overridden.
+ """
+ # Since PyAuto does not support commandline argument, we have to rely on
+ # environment variables. The followings are the names of the environment
+ # variables that are used in the tests. PLAYER_HTML_URL_NICKNAME is used to
+ # display the result output in compact form (e.g., "local", "remote").
+ PLAYER_HTML_URL_NICKNAME_ENV_NAME = 'PLAYER_HTML_URL_NICKNAME'
+ # Default base url nick name used to display the result in case it is not
+ # specified by the environment variable.
+ DEFAULT_PLAYER_HTML_URL_NICKNAME = 'local'
+ PLAYER_HTML_URL_ENV_NAME = 'PLAYER_HTML_URL'
+ # Use this when you want to add extra information in result output.
+ EXTRA_NICKNAME_ENV_NAME = 'EXTRA_NICKNAME'
+ # Use this when you do not want to report the first result output.
+ # First result includes time to start up the browser.
+ REMOVE_FIRST_RESULT_ENV_NAME = 'REMOVE_FIRST_RESULT'
+ # Add t=Data() parameter in query string to disable media cache.
+ ADD_T_PARAMETER_ENV_NAME = 'ADD_T_PARAMETER'
+ # Print out only playback time information (neither CPU nor memory).
+ PRINT_ONLY_TIME_ENV_NAME = 'PRINT_ONLY_TIME'
+ # Define the number of tries.
+ N_RUNS_ENV_NAME = 'N_RUNS'
+ # Define tag name in HTML (either video or audio).
+ MEDIA_TAG_ENV_NAME = 'HTML_TAG'
+ # Define media file name.
+ MEDIA_FILENAME_ENV_NAME = 'MEDIA_FILENAME'
+ # Define media file nickname that is used for display.
+ MEDIA_FILENAME_NICKNAME_ENV_NAME = 'MEDIA_FILENAME_NICKNAME'
+ # Default values used for default case.
+ DEFAULT_MEDIA_TAG_NAME = 'video'
+ DEFAULT_MEDIA_FILENAME = 'bear_silent.ogv'
+ DEFAULT_NUMBER_OF_RUNS = 3
+ TIMEOUT = 10000
+ # Instance variables that used across methods.
+ number_of_runs = 0
+ url = ''
+ parameter_str = ''
+ times = []
+ media_filename = ''
+ media_filename_nickname = ''
+
+ def _GetMediaURLAndParameterString(self, media_filename):
+ """Get media url and parameter string.
+
+ If media url is specified in environment variable, then it is used.
+ Otherwise, local media data directory is used for the url.
+ Parameter string is calculated based on the environment variables.
+
+ Args:
+ media_filename: the file name for the media (video/audio) with extension.
+
+ Returns:
+ a tuple of media_url (with proper query string) and a parameter string,
+ which is used for performance result display.
+ """
+ # Read environment variables.
+ player_html_url = os.getenv(self.PLAYER_HTML_URL_ENV_NAME, 'DEFAULT')
+ player_html_url_nickname = os.getenv(
+ self.PLAYER_HTML_URL_NICKNAME_ENV_NAME,
+ self.DEFAULT_PLAYER_HTML_URL_NICKNAME)
+ extra_nickname = os.getenv(self.EXTRA_NICKNAME_ENV_NAME, '')
+ # This parameter tricks the media cache into thinking
+ # it's a new file every time.
+ # However, it looks like does not make much difference in
+ # performance.
+ add_t_parameter = os.getenv(self.ADD_T_PARAMETER_ENV_NAME) in ('Y', 'y')
+ # Print only playback time data.
+ print_only_time = os.getenv(self.PRINT_ONLY_TIME_ENV_NAME) in ('Y', 'y')
+ tag = os.getenv(self.MEDIA_TAG_ENV_NAME, self.DEFAULT_MEDIA_TAG_NAME)
+ if add_t_parameter:
+ # This can be any string and setting this disables the media cache.
+ t_para_query_str = '=t_para'
+ else:
+ t_para_query_str = ''
+ query_str = tag + '=' + media_filename + t_para_query_str
+ if player_html_url_nickname == self.DEFAULT_PLAYER_HTML_URL_NICKNAME:
+ # Default is local file under DataDir().
+ file_url = self.GetFileURLForDataPath(
+ os.path.join('media', 'player_for_perf.html'))
+ url = file_url + '?' + query_str
+ else:
+ url = player_html_url + '?' + query_str
+ parameter_str = 'tpara_%s-%s-%s' % (str(add_t_parameter),
+ player_html_url_nickname,
+ extra_nickname)
+ return url, parameter_str
+
+ def ExecuteTest(self):
+ """Test HTML5 Media Tag."""
+
+ def _VideoEnded():
+ """Determine if the video ended.
+
+ When the video has finished playing, its title is updated by player.html.
+
+ Returns:
+ True if the video has ended.
+ """
+ return self.GetDOMValue('document.title').strip() == 'END'
+
+ self.PreAllRunsProcess()
+ for run_counter in range(self.number_of_runs):
+ self.PreEachRunProcess(run_counter)
+ self.NavigateToURL(self.url)
+ self.WaitUntil(lambda: _VideoEnded(),
+ self.TIMEOUT)
+ self.PostEachRunProcess(run_counter)
+
+ self.PostAllRunsProcess()
+
+ # A list of methods that should be overridden in the subclass.
+ # It is a good practice to call these methods even if these are
+ # overridden.
+
+ def PreAllRunsProcess(self):
+ """A method to be executed before all runs.
+
+ The default behavior is to read parameters for the tests and initialize
+ variables.
+ """
+ self.media_filename = os.getenv(self.MEDIA_FILENAME_ENV_NAME,
+ self.DEFAULT_MEDIA_FILENAME)
+ self.remove_first_result = (
+ os.getenv(self.REMOVE_FIRST_RESULT_ENV_NAME) in ('Y', 'y'))
+ self.number_of_runs = int(os.getenv(self.N_RUNS_ENV_NAME,
+ self.DEFAULT_NUMBER_OF_RUNS))
+ self.url, self.parameter_str = self._GetMediaURLAndParameterString(
+ self.media_filename)
+ self.times = []
+
+ def PostAllRunsProcess(self):
+ """A method to execute after all runs.
+
+ The default behavior is to print out the playback time data.
+ """
+
+ self.media_filename_nickname = os.getenv(
+ self.MEDIA_FILENAME_NICKNAME_ENV_NAME, self.media_filename)
+ # Print out playback time for each run.
+ print UIPerfTestUtils.PrintResultsImpl(
+ measurement='playback-' + self.parameter_str, modifier='',
+ trace=self.media_filename_nickname, values=self.times[1:],
+ units='sec')
+
+ def PreEachRunProcess(self, run_counter):
+ """A method to execute before each run.
+
+ The default behavior is to record start time.
+
+ Args:
+ run_counter: counter for each run.
+ """
+ self.start = time.time()
+
+ def PostEachRunProcess(self, run_counter):
+ """A method to execute after each run.
+
+ The default behavior is to calculate and store playback time for each run.
+
+ Args:
+ run_counter: counter for each run.
+ """
+ if not self.remove_first_result or run_counter > 0:
+ self.times.append(time.time() - self.start)
+
+
+if __name__ == '__main__':
+ pyauto_functional.Main()
diff --git a/chrome/test/functional/media_test_runner.py b/chrome/test/functional/media_test_runner.py
new file mode 100755
index 0000000..cf597e3
--- /dev/null
+++ b/chrome/test/functional/media_test_runner.py
@@ -0,0 +1,142 @@
+#!/usr/bin/python
+
+# Copyright (c) 2011 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""A module to execute a subclass of MediaTastBase class.
+
+This executes a media test class (a subclass of MediaTastBase class) with
+different configuration (parameters) which are passed in the form of
+environment variables (e.g., the number of runs). The location of the
+subclass is passed as one of arguments. An example of invocation is
+"./media_test_runner.py -p ./media_perf.py". In this example,
+media_perf.py will be invoked using the default set of parameters.
+The list of possible combinations of parameters are: T parameter
+for media cache is set/non-set, Chrome flag is set/non-set, data element
+in data source file (CSV file - its content is list form or its content is
+in matrix form),
+"""
+
+import copy
+import csv
+import os
+from optparse import OptionParser
+import shlex
+import sys
+from subprocess import Popen
+
+from media_test_matrix import MediaTestMatrix
+
+EXTRA_NICKNAMES = ['nocache', 'cache']
+# Disable/enable media_cache.
+CHROME_FLAGS = ['--chrome-flags=\'--media-cache-size=1\'', '']
+# T parameter is passed to player.html to disable/enable cache.
+ADD_T_PARAMETERS = ['Y', 'N']
+DEFAULT_PERF_PROG_NAME = 'media_perf.py'
+DEFAULT_PLAYER_HTML_URL = 'DEFAULT'
+DEFAULT_PLAYER_HTML_URL_NICKNAME = 'local'
+PRINT_ONLY_TIME = 'Y'
+REMOVE_FIRST_RESULT = 'N'
+DEFAULT_NUMBER_OF_RUNS = 3
+DEFAULT_MEASURE_INTERVALS = 3
+
+
+def main():
+ input_filename = os.path.join(os.pardir, 'data', 'media', 'dataset.csv')
+ parser = OptionParser()
+ # TODO(imasaki@chromium.org): add parameter verification.
+ parser.add_option(
+ '-i', '--input', dest='input_filename', default=input_filename,
+ help='Data source file (file contents in list form) [defaults to "%s"]' %
+ input_filename, metavar='FILE')
+ parser.add_option(
+ '-s', '--input_matrix', dest='input_matrix_filename',
+ help='Data source file (file contents in matrix form)', metavar='FILE')
+ parser.add_option('-t', '--input_matrix_testcase',
+ dest='input_matrix_testcase_name',
+ help='Run particular test in matrix')
+ parser.add_option('-x', '--video_matrix_home_url',
+ default='',
+ dest='video_matrix_home_url',
+ help='Video Matrix home URL')
+ parser.add_option('-p', '--perf_prog_name', dest='perf_prog_name',
+ default=DEFAULT_PERF_PROG_NAME,
+ help='Performance main program name [defaults to "%s"]' %
+ DEFAULT_PERF_PROG_NAME, metavar='FILE')
+ parser.add_option('-b', '--player_html_url', dest='player_html_url',
+ default=DEFAULT_PLAYER_HTML_URL,
+ help='Player.html URL [defaults to "%s"] ' %
+ DEFAULT_PLAYER_HTML_URL, metavar='FILE')
+ parser.add_option('-u', '--player_html_url_nickname',
+ dest='player_html_url_nickname',
+ default=DEFAULT_PLAYER_HTML_URL_NICKNAME,
+ help='Player.html Nickname [defaults to "%s"]' %
+ DEFAULT_PLAYER_HTML_URL_NICKNAME)
+ parser.add_option('-n', '--number_of_runs', dest='number_of_runs',
+ default=DEFAULT_NUMBER_OF_RUNS,
+ help='The number of runs [defaults to "%d"]' %
+ DEFAULT_NUMBER_OF_RUNS)
+ parser.add_option('-m', '--measure_intervals', dest='measure_intervals',
+ default=DEFAULT_MEASURE_INTERVALS,
+ help='Interval for measurement data [defaults to "%d"]' %
+ DEFAULT_MEASURE_INTERVALS)
+ parser.add_option('-o', '--test-one-combination', dest='one_combination',
+ default=True, # Currently default is True
+ # since we want to test only 1 combination.
+ help='Run only one parameter combination')
+ options, args = parser.parse_args()
+ if args:
+ parser.print_help()
+ sys.exit(1)
+
+ test_data_list = []
+ if options.input_matrix_filename is None:
+ file = open(options.input_filename, 'rb')
+ test_data_list = csv.reader(file)
+ # First line contains headers that can be skipped.
+ test_data_list.next()
+ else:
+ # Video_matrix_home_url requires "/" at the end.
+ if not options.video_matrix_home_url.endswith('/'):
+ options.video_matrix_home_url += '/'
+ media_test_matrix = MediaTestMatrix()
+ media_test_matrix.ReadData(options.input_matrix_filename)
+ all_data_list = media_test_matrix.GenerateAllMediaInfosInCompactForm(
+ True, options.video_matrix_home_url)
+ if options.input_matrix_testcase_name is None:
+ # Use all test cases.
+ test_data_list = all_data_list
+ else:
+ # Choose particular test case.
+ media_info = MediaTestMatrix.LookForMediaInfoInCompactFormByNickName(
+ all_data_list, options.input_matrix_testcase_name)
+ if media_info is not None:
+ test_data_list.append(media_info)
+ for tag, filename, nickname in test_data_list:
+ for j in range(len(CHROME_FLAGS)):
+ for k in range(len(ADD_T_PARAMETERS)):
+ parent_envs = copy.deepcopy(os.environ)
+ envs = {
+ 'HTML_TAG': tag,
+ 'MEDIA_FILENAME': filename,
+ 'MEDIA_FILENAME_NICKNAME': nickname,
+ 'PLAYER_HTML_URL': options.player_html_url,
+ 'PLAYER_HTML_URL_NICKNAME': options.player_html_url_nickname,
+ 'EXTRA_NICKNAME': EXTRA_NICKNAMES[j],
+ 'ADD_T_PARAMETER': ADD_T_PARAMETERS[k],
+ 'PRINT_ONLY_TIME': PRINT_ONLY_TIME,
+ 'N_RUNS': str(options.number_of_runs),
+ 'REMOVE_FIRST_RESULT': REMOVE_FIRST_RESULT,
+ 'MEASURE_INTERVALS': str(options.measure_intervals),
+ }
+ envs.update(parent_envs)
+ cmd = [options.perf_prog_name, CHROME_FLAGS[j]]
+ proc = Popen(cmd, env=envs, shell=True)
+ proc.communicate()
+ if options.one_combination:
+ sys.exit(0)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/chrome/test/functional/ui_perf_test_measure_thread.py b/chrome/test/functional/ui_perf_test_measure_thread.py
new file mode 100644
index 0000000..3bfa43f
--- /dev/null
+++ b/chrome/test/functional/ui_perf_test_measure_thread.py
@@ -0,0 +1,94 @@
+#!/usr/bin/python
+
+# Copyright (c) 2011 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Thread Module to take measurement (CPU, memory) at certain intervals.
+
+This class has a while loop with sleep. On every iteration it takes
+measurements. The while loop exits when a member variable (stop_measurement)
+is flicked. The parent thread has to set stop_measurement to True, and do
+thread.join() to wait for this thread to terminate.
+"""
+from threading import Thread
+import time
+
+from ui_perf_test_utils import UIPerfTestUtils
+
+
+class UIPerfTestMeasureThread(Thread):
+ """A class to take measurements (CPU, memory) at certain intervals."""
+ # Instance variables that are used across methods.
+ chrome_renderer_process_info = []
+ stop_measurement = False
+ start_time = 0
+
+ def __init__(self, time_interval=1.0):
+ """Init for UIPerfTestMeasureThread.
+
+ Args:
+ time_interval: measurement intervals (in second). Please note that
+ it is not possible to get accurate interval because of timing issue
+ using thread.
+ """
+ Thread.__init__(self)
+ self.time_interval = time_interval
+ self.chrome_renderer_process_info = []
+
+ def run(self):
+ """Run method that contains loops for measurement."""
+ self.start_time = time.time()
+ while 1:
+ if self.stop_measurement:
+ break
+ measure_start_time = time.time()
+ self._TakeMeasurement()
+ measure_elapsed_time = time.time() - measure_start_time
+ time_interval = self.time_interval - (measure_elapsed_time / 1000)
+ if time_interval > 0:
+ time.sleep(time_interval)
+
+ def _TakeMeasurement(self):
+ """Take CPU and memory measurement for Chrome renderer process.
+
+ After the measurement, append them to chrome_renderer_process_info
+ for presentation later.
+ """
+ info = UIPerfTestUtils.GetChromeRendererProcessInfo(self.start_time)
+ if info is not None:
+ self.chrome_renderer_process_info.append(info)
+
+
+def Main():
+ """Test this thread using sample data and Chrome process information.
+
+ You have to start Chrome before you run this.
+ """
+ chrome_renderer_process_infos = []
+ for i in range(1):
+ # Pre-processing.
+ measure_thread = UIPerfTestMeasureThread()
+ measure_thread.start()
+ # Emulate process to be measured by sleeping.
+ time.sleep(5)
+ # Post-processing.
+ measure_thread.stop_measurement = True
+ measure_thread.join(5)
+ chrome_renderer_process_infos.append(
+ perf_thread.chrome_renderer_process_info)
+
+ chrome_process_info_names = ['measure-time', 'pct-cpu', 'cpu-user',
+ 'cpu-system', 'memory-rss', 'memory-vms',
+ 'pct-process-memory']
+ chrome_process_info_units = ['sec', 'percent', 'load',
+ 'load', 'MB', 'MB', 'percent']
+ print UIPerfTestUtils.PrintMeasuredData(
+ chrome_renderer_process_infos,
+ chrome_process_info_names,
+ chrome_process_info_units,
+ False, 'p', 'title')
+
+
+if __name__ == "__main__":
+ Main()
diff --git a/chrome/test/functional/ui_perf_test_utils.py b/chrome/test/functional/ui_perf_test_utils.py
new file mode 100644
index 0000000..a89b6b5
--- /dev/null
+++ b/chrome/test/functional/ui_perf_test_utils.py
@@ -0,0 +1,252 @@
+#!/usr/bin/python
+
+# Copyright (c) 2011 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Module for performance testing using the psutil library.
+
+Ref: http://code.google.com/p/psutil/wiki/Documentation
+
+Most part of this module is from chrome/test/startup/startup_test.cc and
+chrome/test/ui/ui_perf_test.[h,cc] So, we try to preserve the original C++ code
+here in case when there is change in original C++ code, it is easy to update
+this.
+"""
+
+# Standard library imports.
+import re
+import time
+
+# Third-party imports.
+import psutil
+
+
+class UIPerfTestUtils:
+ """Static utility functions for performance testing."""
+
+ @staticmethod
+ def ConvertDataListToString(data_list):
+ """Convert data array to string that can be used for results on BuildBot.
+
+ Full accuracy of the results coming from the psutil library is not needed
+ for Perf on BuildBot. For now, we show 5 digits here. This function goes
+ through the elements in the data_list and does the conversion as well as
+ adding a prefix and suffix.
+
+ Args:
+ data_list: data list contains measured data from perf test.
+
+ Returns:
+ a string that can be used for perf result shown on Buildbot.
+ """
+ output = '['
+ for data in data_list:
+ output += ('%.5f' % data) + ', '
+ # Remove the last ', '.
+ if output.endswith(', '):
+ output = output[:-2]
+ output += ']'
+ return output
+
+ @staticmethod
+ def PrintResultsImpl(measurement, modifier, trace, values, units):
+ """Print results in a format that can be displayed on the BuildBot.
+
+ The followings are acceptable (it can be shown in BuildBot) format:
+ <*>RESULT <graph_name>: <trace_name>= <value> <units>
+ <*>RESULT <graph_name>: <trace_name>= {<mean>, <std deviation>} <units>
+ <*>RESULT <graph_name>: <trace_name>= [<value>,value,value,...,] <units>
+
+ Args:
+ measurement: measurement string (such as a parameter list).
+ modifier: modifier string (such as a file name).
+ trace: trace string (not currently used).
+ values: list of values that displayed as "[value1,value2....]".
+ units: units of values such as "sec" or "msec".
+
+ Returns:
+ a output string that contains all information.
+ """
+ output_string = '%sRESULT %s%s: %s= %s %s' % (
+ '', measurement, modifier, trace,
+ UIPerfTestUtils.ConvertDataListToString(values), units)
+ return output_string
+
+ @staticmethod
+ def FindProcesses(process_name):
+ """Find processes for a given process name.
+
+ Args:
+ process_name: a process name string to find.
+
+ Returns:
+ a list of psutil process instances that are associated with the given
+ process name.
+ """
+ target_process_list = []
+ for pid in psutil.get_pid_list():
+ try:
+ p = psutil.Process(pid)
+ # Exact match does not work
+ if process_name in p.name:
+ target_process_list.append(p)
+ except psutil.NoSuchProcess:
+ # Do nothing since the process is already terminated
+ pass
+ return target_process_list
+
+ @staticmethod
+ def GetResourceInfo(process, start_time):
+ """Get resource information coming from psutil.
+
+ This calls corresponding functions in psutil and parses the results.
+
+ TODO(imasaki@chromium.org): Modify this function so that it's not
+ hard-coded to return 7 pieces of information. Instead, you have the
+ caller somehow indicate the number and types of information it needs.
+ Then the function finds and returns the requested info.
+
+ Args:
+ start_time: the time when the program starts (used for recording
+ measured_time).
+ process: psutil's Process instance.
+
+ Returns:
+ a process info tuple: measured_time, cpu_time in percent,
+ user cpu time, system cpu time, resident memory size,
+ virtual memory size, and memory usage. None is returned if the
+ resource info cannot be identified.
+ """
+ try:
+ measured_time = time.time()
+ cpu_percent = process.get_cpu_percent(interval=1.0)
+ memory_percent = process.get_memory_percent()
+ m1 = re.search(r'cputimes\(user=(\S+),\s+system=(\S+)\)',
+ str(process.get_cpu_times()))
+ m2 = re.search(r'meminfo\(rss=(\S+),\s+vms=(\S+)\)',
+ str(process.get_memory_info()))
+
+ cputimes_user = float(m1.group(1))
+ cputimes_system = float(m1.group(2))
+
+ # Convert Bytes to MBytes.
+ memory_rss = float(m2.group(1)) / 1000000
+ memory_vms = float(m2.group(2)) / 1000000
+
+ return (measured_time - start_time, cpu_percent, cputimes_user,
+ cputimes_system, memory_rss, memory_vms, memory_percent)
+
+ except psutil.NoSuchProcess:
+ # Do nothing since the process is already terminated.
+ # This may happen due to race condition.
+ return None
+
+ @staticmethod
+ def IsChromeRendererProcess(process):
+ """Check whether the given process is a Chrome Renderer process.
+
+ Args:
+ process: a psutil's Process instance.
+
+ Returns:
+ True if process is a Chrome renderer process. False otherwise.
+ """
+ for line in process.cmdline:
+ if 'type=renderer' in line:
+ return True
+ return False
+
+ @staticmethod
+ def GetChromeRendererProcessInfo(start_time):
+ """Get Chrome renderer process information by psutil.
+
+ Returns:
+ a renderer process info tuple: measured_time, cpu_time in
+ percent, user cpu time, system cpu time, resident memory size, virtual
+ memory size, and memory usage. Or returns an empty list if the Chrome
+ renderer process is not found.
+ """
+ chrome_process_list = UIPerfTestUtils.FindProcesses('chrome')
+ for p in chrome_process_list:
+ if UIPerfTestUtils.IsChromeRendererProcess(p):
+ # Return the first renderer process's resource info.
+ resource_info = UIPerfTestUtils.GetResourceInfo(p, start_time)
+ if resource_info is not None:
+ return resource_info
+ return []
+
+ @staticmethod
+ def __getMaxDataLength(chrome_renderer_process_infos):
+ """Get max data length of process render info.
+
+ This method is necessary since reach run may have different data length.
+ So, you have to get maximum to prevent data from missing.
+
+ Args:
+ measured_data_list : measured_data_list that
+ contain a list of measured data (CPU and memory) at certain intervals
+ over several runs. Each run contains several time data.
+ info -> 0th run -> 0th time -> time stamp, CPU data and memory data
+ -> 1th time -> time stamp, CPU data and memory data
+ .....
+ -> 1th run -> 0th time -> time stamp, CPU data and memory data
+ each run may have different number of measurement.
+
+ Returns:
+ max data length among all runs.
+ """
+ maximum = len(chrome_renderer_process_infos[0])
+ for info in chrome_renderer_process_infos:
+ if maximum < len(info):
+ maximum = len(info)
+ return maximum
+
+ @staticmethod
+ def PrintMeasuredData(measured_data_list, measured_data_name_list,
+ measured_data_unit_list, remove_first_result,
+ parameter_string, title):
+ """Calculate statistics over all results and print them in the format that
+ can be shown on BuildBot.
+
+ Args:
+ measured_data_list: measured_data_list that contains a list of measured
+ data at certain intervals over several runs. Each run should contain
+ the timestamp of the measured time as well.
+ info -> 0th run -> 0th time -> list of measured data
+ (defined in measured_data_name_list)
+ -> 1st time -> list of measured data
+ .....
+ -> 1st run -> 0th time -> list of measured data
+ each run may have different number of measurement.
+ measured_data_name_list: a list of the names for an element of
+ measured_data_list (such as 'measured-time','cpu'). The size of this
+ list should be same as the size of measured_data_unit_list.
+ measured_data_unit_list: a list of the names of the units for an element
+ of measured_data_list. The size of this list should be same as the size
+ of measured_data_name_list.
+ remove_first_result: a boolean for removing the first result
+ (the first result contains browser startup time).
+ parameter_string: a string that contains all parameters used.
+ title: a title string for identifying perf data.
+ Returns:
+ an output string that contains all information.
+ """
+ output_string = ''
+ for i in range(len(measured_data_name_list)):
+ max_data_length = UIPerfTestUtils.__getMaxDataLength(
+ measured_data_list)
+ for time_index in range(max_data_length):
+ psutil_data = []
+ for counter in range(len(measured_data_list)):
+ if not remove_first_result or counter > 0:
+ data_length_for_each = (
+ len(measured_data_list[counter]))
+ if (data_length_for_each > time_index):
+ data = measured_data_list[counter][time_index][i]
+ psutil_data.append(data)
+ name = measured_data_name_list[i] + '-' + str(time_index)
+ output_string += UIPerfTestUtils.PrintResultsImpl(
+ parameter_string + '-', name, title, psutil_data,
+ measured_data_unit_list[i]) + '\n'
+ return output_string
diff --git a/chrome/test/functional/ui_perf_test_utils_unittest.py b/chrome/test/functional/ui_perf_test_utils_unittest.py
new file mode 100644
index 0000000..b67d425
--- /dev/null
+++ b/chrome/test/functional/ui_perf_test_utils_unittest.py
@@ -0,0 +1,121 @@
+#!/usr/bin/python
+
+# Copyright (c) 2011 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import time
+import unittest
+
+from ui_perf_test_utils import UIPerfTestUtils
+
+
+class TestUIPerfUtils(unittest.TestCase):
+ """Test UIPerfUtils class."""
+
+ def testConvertDataListToString(self):
+ times = [1.023344324, 2.3233333, 2.442324444]
+ output_string = UIPerfTestUtils.ConvertDataListToString(times)
+ self.assertEqual(output_string, '[1.02334, 2.32333, 2.44232]',
+ 'result output is wrong')
+
+ def testPrintResultsImpl(self):
+ """Test PrintResultList method."""
+ times = [1.023, 2.323, 2.44232]
+ output_string = UIPerfTestUtils.PrintResultsImpl('playback', '', 'bear',
+ times, 'ms')
+ self.assertEqual(output_string,
+ 'RESULT playback: bear= [1.02300, 2.32300, 2.44232] ms',
+ 'result output is wrong')
+
+ def testPrintResultsImplEmptyData(self):
+ """Test PrintResultList method with empty data."""
+ times = []
+ output_string = UIPerfTestUtils.PrintResultsImpl('playback', '', 'bear',
+ times, 'ms')
+ self.assertEqual(output_string,
+ 'RESULT playback: bear= [] ms',
+ 'result output is wrong')
+
+ def testFindProcessesAndGetResourceInfo(self):
+ """Test FindProcesses and GetResourceInfo methods.
+
+ Python process should be found when we run this script. Assert all
+ elements in processInfo are not None.
+ """
+ list = UIPerfTestUtils.FindProcesses('python')
+ self.assertTrue(len(list) > 0, 'python process cannot be found')
+ info = UIPerfTestUtils.GetResourceInfo(list[0], time.time())
+ self._AssertProcessInfo(info)
+
+ def GetChromeRendererProcessInfo(self):
+ """Test GetChromeRendererProcessInfo method.
+
+ You must start Chrome before you run your test. Otherwise, it fails.
+ So, this test is not included in the unit test (i.e., the method name
+ does not start with "test").
+
+ TODO(imasaki@chromium.org): find a way to start Chrome automatically.
+ """
+ start_time = time.time()
+ info = UIPerfTestUtils.GetChromeRendererProcessInfo(start_time)
+ self._AssertProcessInfo(info)
+
+ def _AssertProcessInfo(self, info):
+ """Assert process info has correct length and each element is not null."""
+ # See UIPerfTestUtils.chrome_process_info_names.
+ self.assertEqual(len(info), 7, 'the length of info should be 7')
+ for i in range(len(info)):
+ self.assertTrue(info[i] is not None, 'process info has None data')
+
+ def _CreateFakeProcessInfo(self, time, process_info_length):
+ """Create fake process info for testing.
+
+ Args:
+ time: time used for measured_time.
+
+ Returns:
+ a process info with some data for testing.
+ """
+ chrome_renderer_process_info = []
+ for i in range(process_info_length):
+ chrome_renderer_process_info.append(i + time)
+ return chrome_renderer_process_info
+
+ def testPrintMeasuredData(self):
+ # Build process info for testing.
+ chrome_renderer_process_infos = []
+ run_info1 = []
+ run_info1.append(self._CreateFakeProcessInfo(10, 7))
+ run_info1.append(self._CreateFakeProcessInfo(20, 7))
+ chrome_renderer_process_infos.append(run_info1)
+ run_info2 = []
+ run_info2.append(self._CreateFakeProcessInfo(10, 7))
+ chrome_renderer_process_infos.append(run_info2)
+ chrome_process_info_names = ['measure-time', 'pct-cpu', 'cpu-user',
+ 'cpu-system', 'memory-rss', 'memory-vms',
+ 'pct-process-memory']
+ chrome_process_info_units = ['sec', 'percent', 'load',
+ 'load', 'MB', 'MB', 'percent']
+ output_string = UIPerfTestUtils.PrintMeasuredData(
+ chrome_renderer_process_infos,
+ chrome_process_info_names,
+ chrome_process_info_units,
+ False, 'p', 'title')
+ expected_output_string = (
+ 'RESULT p-measure-time-0: title= [10.00000, 10.00000] sec\n'
+ 'RESULT p-measure-time-1: title= [20.00000] sec\n'
+ 'RESULT p-pct-cpu-0: title= [11.00000, 11.00000] percent\n'
+ 'RESULT p-pct-cpu-1: title= [21.00000] percent\n'
+ 'RESULT p-cpu-user-0: title= [12.00000, 12.00000] load\n'
+ 'RESULT p-cpu-user-1: title= [22.00000] load\n'
+ 'RESULT p-cpu-system-0: title= [13.00000, 13.00000] load\n'
+ 'RESULT p-cpu-system-1: title= [23.00000] load\n'
+ 'RESULT p-memory-rss-0: title= [14.00000, 14.00000] MB\n'
+ 'RESULT p-memory-rss-1: title= [24.00000] MB\n'
+ 'RESULT p-memory-vms-0: title= [15.00000, 15.00000] MB\n'
+ 'RESULT p-memory-vms-1: title= [25.00000] MB\n'
+ 'RESULT p-pct-process-memory-0: title= [16.00000, 16.00000] percent\n'
+ 'RESULT p-pct-process-memory-1: title= [26.00000] percent\n')
+ self.assertEqual(output_string, expected_output_string,
+ 'output string is wrong')