summaryrefslogtreecommitdiffstats
path: root/build/android
diff options
context:
space:
mode:
authorsimonhatch <simonhatch@chromium.org>2014-11-27 15:33:03 -0800
committerCommit bot <commit-bot@chromium.org>2014-11-27 23:33:31 +0000
commitbe8da0c1118ded22b80c1aa9c4f89cbfe5d74024 (patch)
treebe1036c1cac526ad2b886a265faf4f532dfd594b /build/android
parent5dd7bbe34534f83c55c58e7666c78f28e882acc0 (diff)
downloadchromium_src-be8da0c1118ded22b80c1aa9c4f89cbfe5d74024.zip
chromium_src-be8da0c1118ded22b80c1aa9c4f89cbfe5d74024.tar.gz
chromium_src-be8da0c1118ded22b80c1aa9c4f89cbfe5d74024.tar.bz2
Add chartjson support to android test_runner. This is needed so that we can retrieve the chartjson output from android perf tests to display on the waterfall and send to the dashboard (once chartjson is enabled).
To be followed up by: https://codereview.chromium.org/753073007/ BUG=422174 Review URL: https://codereview.chromium.org/759753008 Cr-Commit-Position: refs/heads/master@{#306029}
Diffstat (limited to 'build/android')
-rw-r--r--build/android/pylib/perf/test_options.py2
-rw-r--r--build/android/pylib/perf/test_runner.py32
-rwxr-xr-xbuild/android/test_runner.py15
3 files changed, 48 insertions, 1 deletions
diff --git a/build/android/pylib/perf/test_options.py b/build/android/pylib/perf/test_options.py
index b04d748..0a0ace0 100644
--- a/build/android/pylib/perf/test_options.py
+++ b/build/android/pylib/perf/test_options.py
@@ -15,4 +15,6 @@ PerfOptions = collections.namedtuple('PerfOptions', [
'test_filter',
'dry_run',
'single_step',
+ 'collect_chartjson_data',
+ 'output_chartjson_data',
])
diff --git a/build/android/pylib/perf/test_runner.py b/build/android/pylib/perf/test_runner.py
index c8563a6..f70358d 100644
--- a/build/android/pylib/perf/test_runner.py
+++ b/build/android/pylib/perf/test_runner.py
@@ -52,7 +52,9 @@ import json
import logging
import os
import pickle
+import shutil
import sys
+import tempfile
import threading
import time
@@ -73,6 +75,14 @@ def OutputJsonList(json_input, json_output):
return 0
+def OutputChartjson(test_name, json_file_name):
+ file_name = os.path.join(constants.PERF_OUTPUT_DIR, test_name)
+ with file(file_name, 'r') as f:
+ persisted_result = pickle.load(f)
+ with open(json_file_name, 'w') as o:
+ o.write(persisted_result['chartjson'])
+
+
def PrintTestOutput(test_name):
"""Helper method to print the output of previously executed test_name.
@@ -168,6 +178,7 @@ class TestRunner(base_test_runner.BaseTestRunner):
self._max_shard = max_shard
self._tests = tests
self._flaky_tests = flaky_tests
+ self._output_dir = None
@staticmethod
def _IsBetter(result):
@@ -198,6 +209,19 @@ class TestRunner(base_test_runner.BaseTestRunner):
test_name, self.device_serial, affinity, self._shard_index)
return False
+ def _CleanupOutputDirectory(self):
+ if self._output_dir:
+ shutil.rmtree(self._output_dir, ignore_errors=True)
+ self._output_dir = None
+
+ def _ReadChartjsonOutput(self):
+ if not self._output_dir:
+ return ''
+
+ json_output_path = os.path.join(self._output_dir, 'results-chart.json')
+ with open(json_output_path) as f:
+ return f.read()
+
def _LaunchPerfTest(self, test_name):
"""Runs a perf test.
@@ -220,6 +244,11 @@ class TestRunner(base_test_runner.BaseTestRunner):
cmd = ('%s --device %s' %
(self._tests['steps'][test_name]['cmd'],
self.device_serial))
+
+ if self._options.collect_chartjson_data:
+ self._output_dir = tempfile.mkdtemp()
+ cmd = cmd + ' --output-dir=%s' % self._output_dir
+
logging.info('%s : %s', test_name, cmd)
start_time = datetime.datetime.now()
@@ -241,10 +270,12 @@ class TestRunner(base_test_runner.BaseTestRunner):
try:
exit_code, output = cmd_helper.GetCmdStatusAndOutputWithTimeout(
full_cmd, timeout, cwd=cwd, shell=True, logfile=logfile)
+ json_output = self._ReadChartjsonOutput()
except cmd_helper.TimeoutError as e:
exit_code = -1
output = str(e)
finally:
+ self._CleanupOutputDirectory()
if self._options.single_step:
logfile.stop()
end_time = datetime.datetime.now()
@@ -277,6 +308,7 @@ class TestRunner(base_test_runner.BaseTestRunner):
persisted_result = {
'name': test_name,
'output': output,
+ 'chartjson': json_output,
'exit_code': exit_code,
'actual_exit_code': actual_exit_code,
'result_type': result_type,
diff --git a/build/android/test_runner.py b/build/android/test_runner.py
index b483d2b..7d11e72 100755
--- a/build/android/test_runner.py
+++ b/build/android/test_runner.py
@@ -546,6 +546,14 @@ def AddPerfTestOptions(option_parser):
'--output-json-list',
help='Write a simple list of names from --steps into the given file.')
option_parser.add_option(
+ '--collect-chartjson-data',
+ action='store_true',
+ help='Cache the chartjson output from each step for later use.')
+ option_parser.add_option(
+ '--output-chartjson-data',
+ default='',
+ help='Write out chartjson into the given file.')
+ option_parser.add_option(
'--print-step',
help='The name of a previously executed perf step to print.')
option_parser.add_option(
@@ -585,7 +593,8 @@ def ProcessPerfTestOptions(options, args, error_func):
return perf_test_options.PerfOptions(
options.steps, options.flaky_steps, options.output_json_list,
options.print_step, options.no_timeout, options.test_filter,
- options.dry_run, single_step)
+ options.dry_run, single_step, options.collect_chartjson_data,
+ options.output_chartjson_data)
def AddPythonTestOptions(option_parser):
@@ -767,6 +776,10 @@ def _RunPerfTests(options, args, error_func):
return perf_test_runner.OutputJsonList(
perf_options.steps, perf_options.output_json_list)
+ if perf_options.output_chartjson_data:
+ return perf_test_runner.OutputChartjson(
+ perf_options.print_step, perf_options.output_chartjson_data)
+
# Just print the results from a single previously executed step.
if perf_options.print_step:
return perf_test_runner.PrintTestOutput(perf_options.print_step)