summaryrefslogtreecommitdiffstats
path: root/chrome/test/functional
diff options
context:
space:
mode:
authordennisjeffrey@chromium.org <dennisjeffrey@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-10-30 01:03:09 +0000
committerdennisjeffrey@chromium.org <dennisjeffrey@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-10-30 01:03:09 +0000
commit04215706b90be3d983d4f003340ac0465f9447de (patch)
tree0fadcaa5da5bf3f4462df352a035f60e1fa376bf /chrome/test/functional
parent1f18bfe815cb85d18e42fdc80692828be710ad31 (diff)
downloadchromium_src-04215706b90be3d983d4f003340ac0465f9447de.zip
chromium_src-04215706b90be3d983d4f003340ac0465f9447de.tar.gz
chromium_src-04215706b90be3d983d4f003340ac0465f9447de.tar.bz2
Pyauto-based perf tests now also output perf results in buildbot format on chromeOS.
When the pyauto-based perf tests are run on chromeOS, they previously outputted perf results only in the format understood by autotest. This consisted of a string description, along with a corresponding numeric perf value. However, buildbot accepts a different format, which is a graph name, description, units, and one or more numeric perf values (which can later be averaged and have standard deviations computed). This change makes it so that when these perf tests are run on chromeOS, both the autotest output format and the buildbot output format are used. This will allow both autotest and buildbot to scan the stdout of the tests to determine what perf keyvals to record. This change is needed because there are plans to run these perf tests on chromeOS via buildbot. BUG=None TEST=Verified one of the perf tests still runs on chromeOS with these changes, and that results are outputted to stdout in both autotest and buildbot format. Review URL: https://chromiumcodereview.appspot.com/11264051 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@164803 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'chrome/test/functional')
-rwxr-xr-xchrome/test/functional/perf.py15
1 files changed, 11 insertions, 4 deletions
diff --git a/chrome/test/functional/perf.py b/chrome/test/functional/perf.py
index 4868199..6e5454a 100755
--- a/chrome/test/functional/perf.py
+++ b/chrome/test/functional/perf.py
@@ -414,15 +414,16 @@ class BasePerfTest(pyauto.PyUITest):
assert isinstance(value, list)
if self.IsChromeOS():
- # ChromeOS results don't support lists.
+ # Autotest doesn't support result lists.
+ autotest_value = value
if (isinstance(value, list) and value[0] is not None and
not isinstance(value[0], tuple)):
- value = Mean(value)
+ autotest_value = Mean(value)
if units_x:
# TODO(dennisjeffrey): Support long-running performance measurements on
# ChromeOS in a way that can be graphed: crosbug.com/21881.
- pyauto_utils.PrintPerfResult(graph_name, description, value,
+ pyauto_utils.PrintPerfResult(graph_name, description, autotest_value,
units + ' ' + units_x)
else:
# Output short-running performance results in a format understood by
@@ -433,8 +434,14 @@ class BasePerfTest(pyauto.PyUITest):
'(length 30) when added to the autotest database.',
perf_key, perf_key[:30])
print '\n%s(\'%s\', %f)%s' % (self._PERF_OUTPUT_MARKER_PRE,
- perf_key, value,
+ perf_key, autotest_value,
self._PERF_OUTPUT_MARKER_POST)
+
+ # Also output results in the format recognized by buildbot, for cases
+ # in which these tests are run on chromeOS through buildbot. Since
+ # buildbot supports result lists, it's ok for |value| to be a list here.
+ pyauto_utils.PrintPerfResult(graph_name, description, value, units)
+
sys.stdout.flush()
else:
# TODO(dmikurube): Support stacked graphs in PrintPerfResult.