summaryrefslogtreecommitdiffstats
path: root/build
diff options
context:
space:
mode:
authorjbudorick <jbudorick@chromium.org>2014-12-01 10:07:54 -0800
committerCommit bot <commit-bot@chromium.org>2014-12-01 18:08:14 +0000
commitb8c4207c564baf7c01297852ef3ead70faac6a2d (patch)
tree8efdb803369b17ea2f9c24be99998998c616939e /build
parent228d4f48130956a302bea24f3379caa660e5d995 (diff)
downloadchromium_src-b8c4207c564baf7c01297852ef3ead70faac6a2d.zip
chromium_src-b8c4207c564baf7c01297852ef3ead70faac6a2d.tar.gz
chromium_src-b8c4207c564baf7c01297852ef3ead70faac6a2d.tar.bz2
[Android] Implement generic JSON results that match base/test/launcher.
BUG=428729 Review URL: https://codereview.chromium.org/757683002 Cr-Commit-Position: refs/heads/master@{#306207}
Diffstat (limited to 'build')
-rw-r--r--build/android/PRESUBMIT.py1
-rw-r--r--build/android/pylib/constants.py1
-rw-r--r--build/android/pylib/results/__init__.py3
-rw-r--r--build/android/pylib/results/flakiness_dashboard/__init__.py3
-rw-r--r--build/android/pylib/results/flakiness_dashboard/json_results_generator.py (renamed from build/android/pylib/utils/json_results_generator.py)0
-rw-r--r--build/android/pylib/results/flakiness_dashboard/results_uploader.py (renamed from build/android/pylib/utils/flakiness_dashboard_results_uploader.py)3
-rw-r--r--build/android/pylib/results/json_results.py73
-rwxr-xr-xbuild/android/pylib/results/json_results_test.py133
-rw-r--r--build/android/pylib/results/report_results.py (renamed from build/android/pylib/utils/report_results.py)4
-rwxr-xr-xbuild/android/test_runner.py31
10 files changed, 245 insertions, 7 deletions
diff --git a/build/android/PRESUBMIT.py b/build/android/PRESUBMIT.py
index f226b37..7bce71b 100644
--- a/build/android/PRESUBMIT.py
+++ b/build/android/PRESUBMIT.py
@@ -64,6 +64,7 @@ def CommonChecks(input_api, output_api):
J('pylib', 'device', 'device_utils_test.py'),
J('pylib', 'gtest', 'test_package_test.py'),
J('pylib', 'instrumentation', 'test_runner_test.py'),
+ J('pylib', 'results', 'json_results_test.py'),
J('pylib', 'utils', 'md5sum_test.py'),
],
env=pylib_test_env))
diff --git a/build/android/pylib/constants.py b/build/android/pylib/constants.py
index 5cba3af..2f2953a 100644
--- a/build/android/pylib/constants.py
+++ b/build/android/pylib/constants.py
@@ -188,6 +188,7 @@ PYTHON_UNIT_TEST_SUITES = {
'path': os.path.join(DIR_SOURCE_ROOT, 'build', 'android'),
'test_modules': [
'pylib.device.device_utils_test',
+ 'pylib.results.json_results_test',
'pylib.utils.md5sum_test',
]
},
diff --git a/build/android/pylib/results/__init__.py b/build/android/pylib/results/__init__.py
new file mode 100644
index 0000000..4d6aabb
--- /dev/null
+++ b/build/android/pylib/results/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/build/android/pylib/results/flakiness_dashboard/__init__.py b/build/android/pylib/results/flakiness_dashboard/__init__.py
new file mode 100644
index 0000000..4d6aabb
--- /dev/null
+++ b/build/android/pylib/results/flakiness_dashboard/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/build/android/pylib/utils/json_results_generator.py b/build/android/pylib/results/flakiness_dashboard/json_results_generator.py
index e5c433d..e5c433d 100644
--- a/build/android/pylib/utils/json_results_generator.py
+++ b/build/android/pylib/results/flakiness_dashboard/json_results_generator.py
diff --git a/build/android/pylib/utils/flakiness_dashboard_results_uploader.py b/build/android/pylib/results/flakiness_dashboard/results_uploader.py
index ff286b6..856fa9c 100644
--- a/build/android/pylib/utils/flakiness_dashboard_results_uploader.py
+++ b/build/android/pylib/results/flakiness_dashboard/results_uploader.py
@@ -12,10 +12,9 @@ import tempfile
import xml
-#TODO(craigdh): pylib/utils/ should not depend on pylib/.
from pylib import cmd_helper
from pylib import constants
-from pylib.utils import json_results_generator
+from pylib.results.flakiness_dashboard import json_results_generator
from pylib.utils import repo_utils
diff --git a/build/android/pylib/results/json_results.py b/build/android/pylib/results/json_results.py
new file mode 100644
index 0000000..c34244e
--- /dev/null
+++ b/build/android/pylib/results/json_results.py
@@ -0,0 +1,73 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+import logging
+
+from pylib.base import base_test_result
+
+
+def GenerateResultsDict(test_run_result):
+ """Create a results dict from |test_run_result| suitable for writing to JSON.
+ Args:
+ test_run_result: a base_test_result.TestRunResults object.
+ Returns:
+ A results dict that mirrors the one generated by
+ base/test/launcher/test_results_tracker.cc:SaveSummaryAsJSON.
+ """
+ assert isinstance(test_run_result, base_test_result.TestRunResults)
+
+ def status_as_string(s):
+ if s == base_test_result.ResultType.PASS:
+ return 'SUCCESS'
+ elif s == base_test_result.ResultType.SKIP:
+ return 'SKIPPED'
+ elif s == base_test_result.ResultType.FAIL:
+ return 'FAILURE'
+ elif s == base_test_result.ResultType.CRASH:
+ return 'CRASH'
+ elif s == base_test_result.ResultType.TIMEOUT:
+ return 'TIMEOUT'
+ elif s == base_test_result.ResultType.UNKNOWN:
+ return 'UNKNOWN'
+
+ def generate_iteration_data(t):
+ return {
+ t.GetName(): [
+ {
+ 'status': status_as_string(t.GetType()),
+ 'elapsed_time_ms': t.GetDuration(),
+ 'output_snippet': '',
+ 'losless_snippet': '',
+ 'output_snippet_base64:': '',
+ }
+ ]
+ }
+
+ all_tests_tuple, per_iteration_data_tuple = zip(
+ *[(t.GetName(), generate_iteration_data(t))
+ for t in test_run_result.GetAll()])
+
+ return {
+ 'global_tags': [],
+ 'all_tests': list(all_tests_tuple),
+ # TODO(jbudorick): Add support for disabled tests within base_test_result.
+ 'disabled_tests': [],
+ 'per_iteration_data': list(per_iteration_data_tuple),
+ }
+
+
+def GenerateJsonResultsFile(test_run_result, file_path):
+ """Write |test_run_result| to JSON.
+
+ This emulates the format of the JSON emitted by
+ base/test/launcher/test_results_tracker.cc:SaveSummaryAsJSON.
+
+ Args:
+ test_run_result: a base_test_result.TestRunResults object.
+ file_path: The path to the JSON file to write.
+ """
+ with open(file_path, 'w') as json_result_file:
+ json_result_file.write(json.dumps(GenerateResultsDict(test_run_result)))
+
diff --git a/build/android/pylib/results/json_results_test.py b/build/android/pylib/results/json_results_test.py
new file mode 100755
index 0000000..1bc730d
--- /dev/null
+++ b/build/android/pylib/results/json_results_test.py
@@ -0,0 +1,133 @@
+#!/usr/bin/env python
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from pylib.base import base_test_result
+from pylib.results import json_results
+
+
+class JsonResultsTest(unittest.TestCase):
+
+ def testGenerateResultsDict_passedResult(self):
+ result = base_test_result.BaseTestResult(
+ 'test.package.TestName', base_test_result.ResultType.PASS)
+
+ all_results = base_test_result.TestRunResults()
+ all_results.AddResult(result)
+
+ results_dict = json_results.GenerateResultsDict(all_results)
+ self.assertEquals(
+ ['test.package.TestName'],
+ results_dict['all_tests'])
+ self.assertEquals(1, len(results_dict['per_iteration_data']))
+
+ iteration_result = results_dict['per_iteration_data'][0]
+ self.assertTrue('test.package.TestName' in iteration_result)
+ self.assertEquals(1, len(iteration_result['test.package.TestName']))
+
+ test_iteration_result = iteration_result['test.package.TestName'][0]
+ self.assertTrue('status' in test_iteration_result)
+ self.assertEquals('SUCCESS', test_iteration_result['status'])
+
+ def testGenerateResultsDict_skippedResult(self):
+ result = base_test_result.BaseTestResult(
+ 'test.package.TestName', base_test_result.ResultType.SKIP)
+
+ all_results = base_test_result.TestRunResults()
+ all_results.AddResult(result)
+
+ results_dict = json_results.GenerateResultsDict(all_results)
+ self.assertEquals(
+ ['test.package.TestName'],
+ results_dict['all_tests'])
+ self.assertEquals(1, len(results_dict['per_iteration_data']))
+
+ iteration_result = results_dict['per_iteration_data'][0]
+ self.assertTrue('test.package.TestName' in iteration_result)
+ self.assertEquals(1, len(iteration_result['test.package.TestName']))
+
+ test_iteration_result = iteration_result['test.package.TestName'][0]
+ self.assertTrue('status' in test_iteration_result)
+ self.assertEquals('SKIPPED', test_iteration_result['status'])
+
+ def testGenerateResultsDict_failedResult(self):
+ result = base_test_result.BaseTestResult(
+ 'test.package.TestName', base_test_result.ResultType.FAIL)
+
+ all_results = base_test_result.TestRunResults()
+ all_results.AddResult(result)
+
+ results_dict = json_results.GenerateResultsDict(all_results)
+ self.assertEquals(
+ ['test.package.TestName'],
+ results_dict['all_tests'])
+ self.assertEquals(1, len(results_dict['per_iteration_data']))
+
+ iteration_result = results_dict['per_iteration_data'][0]
+ self.assertTrue('test.package.TestName' in iteration_result)
+ self.assertEquals(1, len(iteration_result['test.package.TestName']))
+
+ test_iteration_result = iteration_result['test.package.TestName'][0]
+ self.assertTrue('status' in test_iteration_result)
+ self.assertEquals('FAILURE', test_iteration_result['status'])
+
+ def testGenerateResultsDict_duration(self):
+ result = base_test_result.BaseTestResult(
+ 'test.package.TestName', base_test_result.ResultType.PASS, duration=123)
+
+ all_results = base_test_result.TestRunResults()
+ all_results.AddResult(result)
+
+ results_dict = json_results.GenerateResultsDict(all_results)
+ self.assertEquals(
+ ['test.package.TestName'],
+ results_dict['all_tests'])
+ self.assertEquals(1, len(results_dict['per_iteration_data']))
+
+ iteration_result = results_dict['per_iteration_data'][0]
+ self.assertTrue('test.package.TestName' in iteration_result)
+ self.assertEquals(1, len(iteration_result['test.package.TestName']))
+
+ test_iteration_result = iteration_result['test.package.TestName'][0]
+ self.assertTrue('elapsed_time_ms' in test_iteration_result)
+ self.assertEquals(123, test_iteration_result['elapsed_time_ms'])
+
+ def testGenerateResultsDict_multipleResults(self):
+ result1 = base_test_result.BaseTestResult(
+ 'test.package.TestName1', base_test_result.ResultType.PASS)
+ result2 = base_test_result.BaseTestResult(
+ 'test.package.TestName2', base_test_result.ResultType.PASS)
+
+ all_results = base_test_result.TestRunResults()
+ all_results.AddResult(result1)
+ all_results.AddResult(result2)
+
+ results_dict = json_results.GenerateResultsDict(all_results)
+ self.assertEquals(
+ ['test.package.TestName1', 'test.package.TestName2'],
+ results_dict['all_tests'])
+ self.assertEquals(2, len(results_dict['per_iteration_data']))
+
+ expected_tests = set([
+ 'test.package.TestName1',
+ 'test.package.TestName2',
+ ])
+
+ for iteration_result in results_dict['per_iteration_data']:
+ self.assertEquals(1, len(iteration_result))
+ name = iteration_result.keys()[0]
+ self.assertTrue(name in expected_tests)
+ expected_tests.remove(name)
+ self.assertEquals(1, len(iteration_result[name]))
+
+ test_iteration_result = iteration_result[name][0]
+ self.assertTrue('status' in test_iteration_result)
+ self.assertEquals('SUCCESS', test_iteration_result['status'])
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
+
diff --git a/build/android/pylib/utils/report_results.py b/build/android/pylib/results/report_results.py
index 8f81c95..4c9518e 100644
--- a/build/android/pylib/utils/report_results.py
+++ b/build/android/pylib/results/report_results.py
@@ -9,7 +9,7 @@ import os
import re
from pylib import constants
-from pylib.utils import flakiness_dashboard_results_uploader
+from pylib.results.flakiness_dashboard import results_uploader
def _LogToFile(results, test_type, suite_name):
@@ -58,7 +58,7 @@ def _LogToFlakinessDashboard(results, test_type, test_package,
logging.warning('Invalid test type')
return
- flakiness_dashboard_results_uploader.Upload(
+ results_uploader.Upload(
results, flakiness_server, dashboard_test_type)
except Exception as e:
diff --git a/build/android/test_runner.py b/build/android/test_runner.py
index 7d11e72..f38b76e 100755
--- a/build/android/test_runner.py
+++ b/build/android/test_runner.py
@@ -39,11 +39,12 @@ from pylib.monkey import test_options as monkey_test_options
from pylib.perf import setup as perf_setup
from pylib.perf import test_options as perf_test_options
from pylib.perf import test_runner as perf_test_runner
+from pylib.results import json_results
+from pylib.results import report_results
from pylib.uiautomator import setup as uiautomator_setup
from pylib.uiautomator import test_options as uiautomator_test_options
from pylib.utils import apk_helper
from pylib.utils import command_option_parser
-from pylib.utils import report_results
from pylib.utils import reraiser_thread
from pylib.utils import run_tests_helper
@@ -94,6 +95,9 @@ def AddCommonOptions(option_parser):
group.add_option('--adb-path',
help=('Specify the absolute path of the adb binary that '
'should be used.'))
+ group.add_option('--json-results-file', dest='json_results_file',
+ help='If set, will dump results in JSON format '
+ 'to specified file.')
option_parser.add_option_group(group)
@@ -161,8 +165,7 @@ def AddGTestOptions(option_parser):
dest='isolate_file_path',
help='.isolate file path to override the default '
'path')
- # TODO(gkanwar): Move these to Common Options once we have the plumbing
- # in our other test types to handle these commands
+
AddCommonOptions(option_parser)
AddDeviceOptions(option_parser)
@@ -647,6 +650,9 @@ def _RunGTests(options, devices):
test_package=suite_name,
flakiness_server=options.flakiness_dashboard_server)
+ if options.json_results_file:
+ json_results.GenerateJsonResultsFile(results, options.json_results_file)
+
if os.path.isdir(constants.ISOLATE_DEPS_DIR):
shutil.rmtree(constants.ISOLATE_DEPS_DIR)
@@ -666,6 +672,9 @@ def _RunLinkerTests(options, devices):
test_type='Linker test',
test_package='ChromiumLinkerTest')
+ if options.json_results_file:
+ json_results.GenerateJsonResultsFile(results, options.json_results_file)
+
return exit_code
@@ -717,6 +726,9 @@ def _RunInstrumentationTests(options, error_func, devices):
annotation=options.annotations,
flakiness_server=options.flakiness_dashboard_server)
+ if options.json_results_file:
+ json_results.GenerateJsonResultsFile(results, options.json_results_file)
+
return exit_code
@@ -737,6 +749,9 @@ def _RunUIAutomatorTests(options, error_func, devices):
annotation=options.annotations,
flakiness_server=options.flakiness_dashboard_server)
+ if options.json_results_file:
+ json_results.GenerateJsonResultsFile(results, options.json_results_file)
+
return exit_code
@@ -764,6 +779,9 @@ def _RunMonkeyTests(options, error_func, devices):
test_type='Monkey',
test_package='Monkey')
+ if options.json_results_file:
+ json_results.GenerateJsonResultsFile(results, options.json_results_file)
+
return exit_code
@@ -799,6 +817,9 @@ def _RunPerfTests(options, args, error_func):
test_type='Perf',
test_package='Perf')
+ if options.json_results_file:
+ json_results.GenerateJsonResultsFile(results, options.json_results_file)
+
if perf_options.single_step:
return perf_test_runner.PrintTestOutput('single_step')
@@ -939,6 +960,10 @@ def RunTestsInPlatformMode(command, options, option_parser):
annotation=options.annotations,
flakiness_server=options.flakiness_dashboard_server)
+ if options.json_results_file:
+ json_results.GenerateJsonResultsFile(
+ results, options.json_results_file)
+
return results