diff options
author | dennisjeffrey@chromium.org <dennisjeffrey@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2012-01-05 00:58:33 +0000 |
---|---|---|
committer | dennisjeffrey@chromium.org <dennisjeffrey@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2012-01-05 00:58:33 +0000 |
commit | e2ae971ff70e0a6ee1cab35750fae2900628ed40 (patch) | |
tree | 98ea62d45ced41877720b2ca56466df21de1291a | |
parent | ca0e201094b6f34d4703b3a8daa88e5328c60bc4 (diff) | |
download | chromium_src-e2ae971ff70e0a6ee1cab35750fae2900628ed40.zip chromium_src-e2ae971ff70e0a6ee1cab35750fae2900628ed40.tar.gz chromium_src-e2ae971ff70e0a6ee1cab35750fae2900628ed40.tar.bz2 |
Support long-running graphs in output of pyauto perf tests, and add a new Gmail test.
I'm adding a new Gmail test that I plan to use to get running continuously
for the Chrome Endure effort. Once this is running, I intend to remove the
other sample Gmail memory bloat test in perf.py.
BUG=chromium-os:22535
TEST=None
Review URL: http://codereview.chromium.org/9034036
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@116421 0039d316-1c4b-4281-b951-d872f2087c98
-rwxr-xr-x | chrome/test/functional/perf.py | 151 | ||||
-rw-r--r-- | chrome/test/functional/perf_endure.py | 197 |
2 files changed, 341 insertions, 7 deletions
diff --git a/chrome/test/functional/perf.py b/chrome/test/functional/perf.py index 1cd5764..858b655 100755 --- a/chrome/test/functional/perf.py +++ b/chrome/test/functional/perf.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -# Copyright (c) 2011 The Chromium Authors. All rights reserved. +# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. @@ -29,6 +29,7 @@ import os import posixpath import re import SimpleHTTPServer +import simplejson import SocketServer import sys import tempfile @@ -66,6 +67,20 @@ class BasePerfTest(pyauto.PyUITest): if 'MAX_TIMEOUT_COUNT' in os.environ: self._max_timeout_count = int(os.environ['MAX_TIMEOUT_COUNT']) self._timeout_count = 0 + + # For users who want to see local perf graphs for Chrome when running the + # tests on their own machines. + self._local_perf_dir = None + if 'LOCAL_PERF_DIR' in os.environ: + self._local_perf_dir = os.environ['LOCAL_PERF_DIR'] + if not os.path.exists(self._local_perf_dir): + self.fail('LOCAL_PERF_DIR environment variable specified as %s, ' + 'but this directory does not exist.' % self._local_perf_dir) + # When outputting perf graph information on-the-fly for Chrome, this + # variable lets us know whether a perf measurement is for a new test + # execution, or the current test execution. + self._seen_graph_lines = {} + pyauto.PyUITest.setUp(self) def _AppendTab(self, url): @@ -113,8 +128,100 @@ class BasePerfTest(pyauto.PyUITest): std_dev = math.sqrt(sum(temp_vals) / (len(temp_vals) - 1)) return avg, std_dev + def _OutputDataForStandaloneGraphing(self, graph_name, description, value, + units, units_x): + """Outputs perf measurement data to a local folder to be graphed. + + This function only applies to Chrome desktop, and assumes that environment + variable 'LOCAL_PERF_DIR' has been specified and refers to a valid directory + on the local machine. + + Args: + graph_name: A string name for the graph associated with this performance + value. + description: A string description of the performance value. Should not + include spaces. + value: Either a single numeric value representing a performance + measurement, or else a list of (x, y) tuples representing one or + more long-running performance measurements, where 'x' is an x-axis + value (such as an iteration number) and 'y' is the corresponding + performance measurement. If a list of tuples is given, then the + |units_x| argument must also be specified. + units: A string representing the units of the performance measurement(s). + Should not include spaces. + units_x: A string representing the units of the x-axis values associated + with the performance measurements, such as 'iteration' if the x + values are iteration numbers. If this argument is specified, + then the |value| argument must be a list of (x, y) tuples. + """ + # Update graphs.dat. + existing_graphs = [] + graphs_file = os.path.join(self._local_perf_dir, 'graphs.dat') + if os.path.exists(graphs_file): + with open(graphs_file, 'r') as f: + existing_graphs = simplejson.loads(f.read()) + is_new_graph = True + for graph in existing_graphs: + if graph['name'] == graph_name: + is_new_graph = False + break + if is_new_graph: + new_graph = { + 'name': graph_name, + 'units': units, + 'important': False, + } + if units_x: + new_graph['units_x'] = units_x + existing_graphs.append(new_graph) + with open(graphs_file, 'w') as f: + f.write(simplejson.dumps(existing_graphs)) + os.chmod(graphs_file, 0755) + + # Update data file for this particular graph. + data_file_name = graph_name + '-summary.dat' + existing_lines = [] + data_file = os.path.join(self._local_perf_dir, data_file_name) + if os.path.exists(data_file): + with open(data_file, 'r') as f: + existing_lines = f.readlines() + existing_lines = map(lambda x: x.strip(), existing_lines) + if units_x: + points = [] + for point in value: + points.append([str(point[0]), str(point[1])]) + new_traces = { + description: points + } + else: + new_traces = { + description: [str(value), str(0.0)] + } + revision = 1 + if existing_lines: + revision = int(eval(existing_lines[0])['rev']) + 1 + new_line = { + 'traces': new_traces, + 'rev': revision + } + + seen_key = graph_name + '|' + description + if seen_key in self._seen_graph_lines: + # Update results for the most recent revision. + new_line['rev'] = int(eval(existing_lines[0])['rev']) + existing_lines[0] = new_line + else: + # New results for a new revision. + existing_lines.insert(0, new_line) + self._seen_graph_lines[seen_key] = True + + existing_lines = map(str, existing_lines) + with open(data_file, 'w') as f: + f.write('\n'.join(existing_lines)) + os.chmod(data_file, 0755) + def _OutputPerfGraphValue(self, description, value, units, - graph_name='Default-Graph'): + graph_name='Default-Graph', units_x=None): """Outputs a performance value to have it graphed on the performance bots. The output format differs, depending on whether the current platform is @@ -131,13 +238,28 @@ class BasePerfTest(pyauto.PyUITest): Args: description: A string description of the performance value. Should not include spaces. - value: A numeric value representing a single performance measurement. - units: A string representing the units of the performance value. Should - not include spaces. + value: Either a single numeric value representing a performance + measurement, or a list of (x, y) tuples representing one or + more long-running performance measurements, where 'x' is an x-axis + value (such as an iteration number) and 'y' is the corresponding + performance measurement. If a list of tuples is given, the + |units_x| argument must also be specified. + units: A string representing the units of the performance measurement(s). + Should not include spaces. graph_name: A string name for the graph associated with this performance value. Only used on Chrome desktop. - + units_x: A string representing the units of the x-axis values associated + with the performance measurements, such as 'iteration' if the x + values are iteration numbers. If this argument is specified, + then the |value| argument must be a list of (x, y) tuples. """ + if isinstance(value, list): + assert units_x + if units_x: + assert isinstance(value, list) + + # TODO(dennisjeffrey): Support long-running performance measurements on + # ChromeOS: crosbug.com/21881. if self.IsChromeOS(): perf_key = '%s_%s' % (units, description) if len(perf_key) > 30: @@ -149,7 +271,19 @@ class BasePerfTest(pyauto.PyUITest): self._PERF_OUTPUT_MARKER_POST) sys.stdout.flush() else: - pyauto_utils.PrintPerfResult(graph_name, description, value, units) + if units_x: + # TODO(dennisjeffrey): Once changes to the Chrome graphing + # infrastructure are committed to support graphs for long-running perf + # tests (crosbug.com/21881), revise the output format in the following + # line if necessary. + pyauto_utils.PrintPerfResult(graph_name, description, value, + units + ' ' + units_x) + else: + pyauto_utils.PrintPerfResult(graph_name, description, value, units) + + if self._local_perf_dir: + self._OutputDataForStandaloneGraphing( + graph_name, description, value, units, units_x) def _PrintSummaryResults(self, description, values, units, graph_name='Default-Graph'): @@ -1059,6 +1193,9 @@ class MemoryBloatTest(BasePerfTest): self._snapshot_results.append(snapshot[0]) return elapsed_time + # TODO(dennisjeffrey): Remove this test once pyauto test + # perf_endure.ChromeEndureGmailTest.testGmailComposeDiscard starts running + # continuously. def GmailBloat(self): """Interact with Gmail while periodically taking v8 heap snapshots. diff --git a/chrome/test/functional/perf_endure.py b/chrome/test/functional/perf_endure.py new file mode 100644 index 0000000..21ac58c --- /dev/null +++ b/chrome/test/functional/perf_endure.py @@ -0,0 +1,197 @@ +#!/usr/bin/env python +# Copyright (c) 2012 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Performance tests for Chrome Endure (long-running perf tests on Chrome). +""" + +import logging +import time + +import perf +import perf_snapshot +import pyauto_functional # Must be imported before pyauto. +import pyauto + + +class ChromeEndureGmailTest(perf.BasePerfTest): + """Long-running performance tests for Chrome.""" + + def setUp(self): + perf.BasePerfTest.setUp(self) + + # Set up an object that takes v8 heap snapshots of the first opened tab + # (index 0). + self._snapshotter = perf_snapshot.PerformanceSnapshotter() + self._snapshot_results = [] + + def ExtraChromeFlags(self): + """Ensures Chrome is launched with custom flags. + + Returns: + A list of extra flags to pass to Chrome when it is launched. + """ + # Ensure Chrome enables remote debugging on port 9222. This is required to + # take v8 heap snapshots of tabs in Chrome. + return (perf.BasePerfTest.ExtraChromeFlags(self) + + ['--remote-debugging-port=9222']) + + def _TakeHeapSnapshot(self): + """Takes a v8 heap snapshot using |self._snapshotter| and stores the result. + + This function will fail the current test if no snapshot can be taken. + + Returns: + The number of seconds it took to take the heap snapshot. + """ + start_time = time.time() + snapshot = self._snapshotter.HeapSnapshot() + elapsed_time = time.time() - start_time + self.assertTrue(snapshot, msg='Failed to take a v8 heap snapshot.') + self._snapshot_results.append(snapshot[0]) + return elapsed_time + + def testGmailComposeDiscard(self): + """Interact with Gmail while periodically taking v8 heap snapshots. + + This test continually composes/discards an e-mail using Gmail, and + periodically takes v8 heap snapshots that may reveal memory bloat. + """ + # The following cannot yet be imported on ChromeOS. + import selenium.common.exceptions + from selenium.webdriver.support.ui import WebDriverWait + + # Log into a test Google account and open up Gmail. + self._LoginToGoogleAccount() + self.NavigateToURL('http://www.gmail.com') + loaded_tab_title = self.GetActiveTabTitle() + self.assertTrue('Gmail' in loaded_tab_title, + msg='Loaded tab title does not contain "Gmail": "%s"' % + loaded_tab_title) + + driver = self.NewWebDriver() + # Any call to wait.until() will raise an exception if the timeout is hit. + wait = WebDriverWait(driver, timeout=60) + + def _SwitchToCanvasFrame(driver): + """Switch the WebDriver to Gmail's 'canvas_frame', if it's available. + + Args: + driver: A selenium.webdriver.remote.webdriver.WebDriver object. + + Returns: + True, if the switch to Gmail's 'canvas_frame' is successful, or + False if not. + """ + try: + driver.switch_to_frame('canvas_frame') + return True + except selenium.common.exceptions.NoSuchFrameException: + return False + + def _GetElement(find_by, value): + """Gets a WebDriver element object from the webpage DOM. + + Args: + find_by: A callable that queries WebDriver for an element from the DOM. + value: A string value that can be passed to the |find_by| callable. + + Returns: + The identified WebDriver element object, if found in the DOM, or + None, otherwise. + """ + try: + return find_by(value) + except selenium.common.exceptions.NoSuchElementException: + return None + + # Wait until Gmail's 'canvas_frame' loads and the 'Inbox' link is present. + # TODO(dennisjeffrey): Check with the Gmail team to see if there's a better + # way to tell when the webpage is ready for user interaction. + wait.until(_SwitchToCanvasFrame) # Raises exception if the timeout is hit. + # Wait for the inbox to appear. + wait.until(lambda _: _GetElement( + driver.find_element_by_partial_link_text, 'Inbox')) + + # Interact with Gmail for awhile. Here, we repeat the following sequence of + # interactions: click the "Compose" button, enter some text into the "To" + # field, enter some text into the "Subject" field, then click the "Discard" + # button to discard the message. + heap_size_results = [] + node_count_results = [] + snapshot_iteration = 0 + num_iterations = 501 + for i in xrange(num_iterations): + logging.info('Chrome interaction iteration %d of %d.' % ( + i + 1, num_iterations)) + + compose_button = wait.until(lambda _: _GetElement( + driver.find_element_by_xpath, + '//div[text()="COMPOSE"]')) + compose_button.click() + + to_field = wait.until(lambda _: _GetElement( + driver.find_element_by_name, 'to')) + to_field.send_keys('nobody@nowhere.com') + + subject_field = wait.until(lambda _: _GetElement( + driver.find_element_by_name, 'subject')) + subject_field.send_keys('This message is about to be discarded') + + discard_button = wait.until(lambda _: _GetElement( + driver.find_element_by_xpath, + '//div[text()="Discard"]')) + discard_button.click() + + # Wait for the message to be discarded, assumed to be true after the + # "To" field is removed from the webpage DOM. + wait.until(lambda _: not _GetElement( + driver.find_element_by_name, 'to')) + + # Snapshot after the first iteration, then every 50 iterations after that. + if i % 50 == 0: + logging.info('Taking heap snapshot...') + sec_to_snapshot = self._TakeHeapSnapshot() + logging.info('Snapshot taken (%.2f sec).' % sec_to_snapshot) + + assert len(self._snapshot_results) >= 1 + base_timestamp = self._snapshot_results[0]['timestamp'] + + snapshot_info = self._snapshot_results[-1] # Get the last snapshot. + logging.info('Snapshot time: %.2f sec' % ( + snapshot_info['timestamp'] - base_timestamp)) + heap_size = snapshot_info['total_heap_size'] / (1024.0 * 1024.0) + logging.info(' Total heap size: %.2f MB' % heap_size) + heap_size_results.append((snapshot_iteration, heap_size)) + + node_count = snapshot_info['total_node_count'] + logging.info(' Total node count: %d nodes' % node_count) + node_count_results.append((snapshot_iteration, node_count)) + snapshot_iteration += 1 + self._OutputPerfGraphValue( + 'HeapSize', heap_size_results, 'MB', graph_name='Gmail-Heap', + units_x='iteration') + self._OutputPerfGraphValue( + 'TotalNodeCount', node_count_results, 'nodes', + graph_name='Gmail-Nodes', units_x='iteration') + + # Output other snapshot results. + assert len(self._snapshot_results) >= 1 + max_heap_size = 0 + max_node_count = 0 + for index, snapshot_info in enumerate(self._snapshot_results): + heap_size = snapshot_info['total_heap_size'] / (1024.0 * 1024.0) + if heap_size > max_heap_size: + max_heap_size = heap_size + node_count = snapshot_info['total_node_count'] + if node_count > max_node_count: + max_node_count = node_count + self._OutputPerfGraphValue( + 'HeapSize', max_heap_size, 'MB', graph_name='Gmail-Heap-Max') + self._OutputPerfGraphValue( + 'TotalNodeCount', max_node_count, 'nodes', graph_name='Gmail-Nodes-Max') + + +if __name__ == '__main__': + pyauto_functional.Main() |